hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7f5fc1c9185cffc53700f1f1d9e3856530dd7fe5 | 1,075 | py | Python | sportsdataverse/cfb/cfb_teams.py | saiemgilani/sportsdataverse-py | 77ae3accbb071b5308335b931e4e55a65e1500cd | [
"MIT"
] | 12 | 2021-10-15T01:24:18.000Z | 2022-03-15T17:00:22.000Z | sportsdataverse/cfb/cfb_teams.py | saiemgilani/sportsdataverse-py | 77ae3accbb071b5308335b931e4e55a65e1500cd | [
"MIT"
] | 19 | 2021-11-02T05:53:41.000Z | 2022-03-16T14:16:51.000Z | sportsdataverse/cfb/cfb_teams.py | saiemgilani/sportsdataverse-py | 77ae3accbb071b5308335b931e4e55a65e1500cd | [
"MIT"
] | 1 | 2021-12-21T14:49:25.000Z | 2021-12-21T14:49:25.000Z | import pandas as pd
import json
from sportsdataverse.dl_utils import download
from urllib.error import URLError, HTTPError, ContentTooShortError
def espn_cfb_teams(groups=None) -> pd.DataFrame:
"""espn_cfb_teams - look up the college football teams
Args:
groups (int): Used to define different divisions. 80 is FBS, 81 is FCS.
Returns:
pd.DataFrame: Pandas dataframe containing schedule dates for the requested season.
"""
if groups is None:
groups = '&groups=80'
else:
groups = '&groups=' + str(groups)
ev = pd.DataFrame()
url = "http://site.api.espn.com/apis/site/v2/sports/football/college-football/teams?{}&limit=1000".format(groups)
resp = download(url=url)
if resp is not None:
events_txt = json.loads(resp)
teams = events_txt.get('sports')[0].get('leagues')[0].get('teams')
del_keys = ['record', 'links']
for team in teams:
for k in del_keys:
team.get('team').pop(k, None)
teams = pd.json_normalize(teams)
return teams
| 32.575758 | 117 | 0.646512 | 146 | 1,075 | 4.691781 | 0.541096 | 0.048175 | 0.035037 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.015854 | 0.237209 | 1,075 | 32 | 118 | 33.59375 | 0.819512 | 0.214884 | 0 | 0 | 0 | 0.047619 | 0.172794 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.047619 | false | 0 | 0.190476 | 0 | 0.285714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7f63663f06ed07834a3d2142ee1d260868e3df08 | 3,636 | py | Python | vnpy/example.py | 0x1be20/vnpy | 51e3439570aefc67986078dd80a452b5f40f5653 | [
"MIT"
] | null | null | null | vnpy/example.py | 0x1be20/vnpy | 51e3439570aefc67986078dd80a452b5f40f5653 | [
"MIT"
] | null | null | null | vnpy/example.py | 0x1be20/vnpy | 51e3439570aefc67986078dd80a452b5f40f5653 | [
"MIT"
] | null | null | null |
import math
import numpy as np
import os
import sys
import csv
import datetime
import pandas as pd
from vnpy.app.cta_strategy.backtesting import BacktestingEngine, OptimizationSetting
from vnpy.app.cta_strategy.base import BacktestingMode
from vnpy.app.cta_strategy import (
CtaTemplate,
TickData,
TradeData,
OrderData,
)
feature_cols = ['custom_feature']
"""
构建自己的tick数据,这样可以通过pandas来向量化计算feature
"""
class MLTickData(TickData):
def __init__(self,**kargs):
for key in feature_cols:
setattr(self,key,kargs[key])
del(kargs[key])
TickData.__init__(self,**kargs)
class MLStrategy(CtaTemplate):
def __init__(self,cta_engine,strategy_name,vt_symbol,setting):
CtaTemplate.__init__(self,cta_engine,strategy_name,vt_symbol,setting)
self.model = setting['model']
self.features = feature_cols
def on_init(self):
print("ml strategy init")
self.load_tick(0)
def on_start(self):
print("ml strategy start")
def on_tick(self,tick:MLTickData):
feature_datas = []
for key in self.features:
feature_datas += [getattr(tick,key)]
predict = self.model.predict([feature_datas])[0]
ret = math.exp(predict)
print('predict',ret)
if self.pos>0:
if ret>1.0003:
return
elif ret>1 and ret<1.0002:
self.cancel_all()
self.sell(tick.ask_price_1,self.pos)
elif ret<0.9997:
self.cancel_all()
# cover
self.sell(tick.ask_price_1,self.pos)
# short
self.short(tick.ask_price_1,0.1)
elif self.pos<0:
if ret<0.9997:
return
elif ret>0.9997 and ret<0.9998:
self.cancel_all()
self.cover(tick.bid_price_1,abs(self.pos))
elif ret>1.0003:
self.cancel_all()
self.cover(tick.bid_price_1,abs(self.pos))
self.buy(tick.bp1,0.1)
elif self.pos==0:
if ret<0.9997:
self.short(tick.ask_price_1,0.1)
elif ret>1.0003:
self.buy(tick.bid_price_1,0.1)
def on_trade(self,trade:TradeData):
self.put_event()
# tick转换
def mapCol(item)->object:
"""
dataframe中的字段转换一下
"""
colMap = {}
for i in range(1,6):
colMap['ask_price_{}'.format(i)] = float(item["ap{}".format(i)])
colMap['ask_volume_{}'.format(i)] = float(item["aq{}".format(i)])
colMap['bid_price_{}'.format(i)] = float(item["bp{}".format(i)])
colMap['bid_volume_{}'.format(i)] = float(item["bq{}".format(i)])
return colMap
# 将feature设置到自定义tick上
def mapFeature(item)->object:
featureMap = {}
for key in feature_cols:
featureMap[key] = item[key]
return featureMap
data = testData.apply(lambda item:MLTickData(
symbol="BTC",
exchange=Exchange.BINANCE,
datetime=item.timestamp,
**mapFeature(item),
**mapCol(item),
),axis=1)
engine = BacktestingEngine()
engine.set_parameters(
vt_symbol="BTC.BINANCE",
interval="1m",
start=datetime(2020,5,19),
end=datetime(2021,5,22),
rate=0,
slippage=0,
size=.1,
pricetick=5,
capital=100000,
mode=BacktestingMode.TICK,
inverse=True,
)
engine.add_strategy(MLStrategy,setting={"model":model})
# engine.load_data()
# 设置历史数据
engine.history_data = data
engine.run_backtesting()
# 显示逐笔统计数据
engine.exhaust_trade_result(engine.trades) | 27.134328 | 84 | 0.596535 | 451 | 3,636 | 4.64745 | 0.310421 | 0.026718 | 0.024809 | 0.024809 | 0.266698 | 0.158397 | 0.158397 | 0.158397 | 0.131679 | 0.06584 | 0 | 0.03619 | 0.278053 | 3,636 | 134 | 85 | 27.134328 | 0.762286 | 0.025303 | 0 | 0.173077 | 0 | 0 | 0.04199 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.096154 | 0 | 0.230769 | 0.028846 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7f63e32605c44b6294e52e86fda5185cf150b67e | 4,368 | py | Python | tests/test_weekdays_list.py | mafrosis/dataclass_property | bd9680bcbf3221691227c5e056a9b1f5f999c849 | [
"MIT"
] | null | null | null | tests/test_weekdays_list.py | mafrosis/dataclass_property | bd9680bcbf3221691227c5e056a9b1f5f999c849 | [
"MIT"
] | null | null | null | tests/test_weekdays_list.py | mafrosis/dataclass_property | bd9680bcbf3221691227c5e056a9b1f5f999c849 | [
"MIT"
] | null | null | null |
def check_days(weekdays, *valid_days, is_valid=True):
from dataclass_property.weekdays_list import Weekdays
if len(valid_days) == 0:
valid_days = list(Weekdays.DAYS)
elif len(valid_days) == 0 and isinstance(valid_days[0], list):
valid_days = valid_days[0]
valid_days = [Weekdays.as_attr(d) for d in valid_days]
for day in Weekdays.DAYS:
should_be_valid = (day in valid_days and is_valid) or (day not in valid_days and not is_valid)
assert (day in weekdays) == should_be_valid, \
'day={} valid_days={} is_valid={}'.format(day, valid_days, is_valid)
# assert ((day in valid_days and day in weekdays and is_valid) or
# (day in valid_days and day not in weekdays and not is_valid) or
# (day not in valid_days and day not in weekdays and is_valid) or
# (day not in valid_days and day in weekdays and not is_valid)
# )
def test_weekdays_init():
from dataclass_property.weekdays_list import Weekdays
# No given inputs should be all True
w = Weekdays()
check_days(w, *list(Weekdays.DAYS))
def check_day_init(day):
w = Weekdays(day)
w2 = Weekdays(**{day: True})
check_days(w, day)
check_days(w2, day)
# Single day as string should be true while all other false
for day in Weekdays.DAYS:
check_day_init(day)
# ===== Case insensitive =====
for day in Weekdays.DAYS:
day = day.upper()
check_day_init(day)
# ===== Abbreviations =====
for day in Weekdays.DAYS:
day = day[:3]
check_day_init(day)
# ===== Abbreviations case insensitive =====
for day in Weekdays.DAYS:
day = day[:3].upper()
check_day_init(day)
def test_weekday_property():
from dataclass_property.weekdays_list import Weekdays
w = Weekdays()
check_days(w)
w.sunday = False
check_days(w, 'sunday', is_valid=False)
w.sunday = True
check_days(w)
checked = []
for day in Weekdays.DAYS:
setattr(w, day, False)
checked.append(day)
check_days(w, *checked, is_valid=False)
checked = []
for day in Weekdays.DAYS:
setattr(w, day, True)
checked.append(day)
check_days(w, *checked, is_valid=True)
def test_weekdays_append_add_remove():
from dataclass_property.weekdays_list import Weekdays
w = Weekdays()
check_days(w)
w.remove('SUnday')
check_days(w, 'sunday', is_valid=False)
# Check append and order
w.append('sunday')
assert all(d1 == d2 for d1, d2 in zip(w, Weekdays.DAYS))
w.mon = False
check_days(w, 'monday', is_valid=False)
w += ['Mon'] # Extend is used in the background so if this works extend works as well
assert all(d1 == d2 for d1, d2 in zip(w, Weekdays.DAYS))
w.pop(2) # Tuesday
check_days(w, 'tuesday', is_valid=False)
w2 = w + ['TUESDAY']
print(w2, type(w2))
assert len(w2) != len(w)
assert all(d1 == d2 for d1, d2 in zip(w2, Weekdays.DAYS))
w3 = ['Tuesday'] + w
assert len(w3) != len(w)
assert all(d1 == d2 for d1, d2 in zip(w3, Weekdays.DAYS))
def test_weekday_pydantic():
from pydantic import BaseModel
from dataclass_property.weekdays_list import Weekdays
# Check pydantic default None
class MyModel(BaseModel):
weekdays: Weekdays = None
class Config:
validate_assignment = True
m = MyModel()
assert m.weekdays is None
# Check pydantic default Weekdays
class MyModel(BaseModel):
weekdays: Weekdays = Weekdays() # Empty fills will all days
class Config:
validate_assignment = True
m = MyModel()
assert isinstance(m.weekdays, Weekdays)
check_days(m.weekdays) # All days should validate
m.weekdays.remove('sunday')
assert isinstance(m.weekdays, Weekdays)
check_days(m.weekdays, 'sunday', is_valid=False) # Make sure sunday is not in the list
# Check mutable default for changed option
m2 = MyModel()
assert isinstance(m2.weekdays, Weekdays)
check_days(m2.weekdays) # All days should validate
if __name__ == '__main__':
test_weekdays_init()
test_weekday_property()
test_weekdays_append_add_remove()
test_weekday_pydantic()
print('All tests finished successfully!')
| 29.12 | 102 | 0.641484 | 611 | 4,368 | 4.415712 | 0.162029 | 0.056709 | 0.040771 | 0.041512 | 0.582283 | 0.422165 | 0.416234 | 0.338769 | 0.279837 | 0.136768 | 0 | 0.011091 | 0.256868 | 4,368 | 149 | 103 | 29.315436 | 0.820086 | 0.181319 | 0 | 0.408163 | 0 | 0 | 0.038829 | 0 | 0 | 0 | 0 | 0 | 0.112245 | 1 | 0.061224 | false | 0 | 0.061224 | 0 | 0.183673 | 0.020408 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7f6542a305f9398e37131670f91383a3e41725e7 | 1,864 | py | Python | Application/datasources/datapod_facebook/settings.py | GraphicalDot/datapod-backend-layer | ab38a5b0e969cd0d762e9d7720ab89174c333c37 | [
"Apache-2.0"
] | null | null | null | Application/datasources/datapod_facebook/settings.py | GraphicalDot/datapod-backend-layer | ab38a5b0e969cd0d762e9d7720ab89174c333c37 | [
"Apache-2.0"
] | null | null | null | Application/datasources/datapod_facebook/settings.py | GraphicalDot/datapod-backend-layer | ab38a5b0e969cd0d762e9d7720ab89174c333c37 | [
"Apache-2.0"
] | null | null | null |
#-*- coding: utf-8 -*-
from playhouse.sqlite_ext import SqliteExtDatabase, FTSModel
import sqlite3
from .db_initialize import initialize
from .api import parse, images, stats, status, get_chats, dashboard, delete_original_path, cancel_parse
import os
from .variables import DATASOURCE_NAME
class Routes:
def __init__(self, db_path):
pragmas = [
('journal_mode', 'wal2'),
('cache_size', -1024*64)]
self.db_path = os.path.join(db_path, DATASOURCE_NAME, f"{DATASOURCE_NAME}.db")
self.db_object = SqliteExtDatabase(self.db_path, pragmas=pragmas, detect_types=sqlite3.PARSE_DECLTYPES)
creds_table, archives_table, images_table, \
yourposts_table, other_posts, \
content, status_table, stats_table, chats, chat_content, address_table = initialize(self.db_object)
self.datasource_name = DATASOURCE_NAME
self.config = {
"tables": {
"creds_table": creds_table,
"image_table" : images_table,
"archives_table": archives_table,
"yourposts_table": yourposts_table,
"other_posts": other_posts,
"content": content,
"chat_table": chats,
"chat_content": chat_content,
"stats_table": stats_table,
"address_table": address_table,
"status_table": status_table},
"utils":{
"stats": stats,
"status": status
}
}
self.routes = {"GET": [("images", images), ("delete_zip", delete_original_path), ("dashboard", dashboard), ("chats", get_chats), ("stats", stats), ("status", status)],
"POST": [("parse", parse), ("cancel_parse", cancel_parse)]}
| 38.040816 | 177 | 0.578863 | 186 | 1,864 | 5.483871 | 0.33871 | 0.068627 | 0.029412 | 0.033333 | 0.056863 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007752 | 0.30794 | 1,864 | 49 | 178 | 38.040816 | 0.782946 | 0.011266 | 0 | 0 | 0 | 0 | 0.141228 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.027027 | false | 0 | 0.162162 | 0 | 0.216216 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7f6784f614c657d64ff55a4aaa604588f7cc578a | 501 | py | Python | router.py | spookybear0/website | 200eb0e56512d134cd5a52727a073a47077cf280 | [
"MIT"
] | null | null | null | router.py | spookybear0/website | 200eb0e56512d134cd5a52727a073a47077cf280 | [
"MIT"
] | null | null | null | router.py | spookybear0/website | 200eb0e56512d134cd5a52727a073a47077cf280 | [
"MIT"
] | null | null | null | import aiohttp
import importlib
routes = {"/": "index",
"/users/{username}": "user",
"/rankings": "rankings",
"/level/{levelname}": "level",
"/search": "search"}
def add_all_routes(app: aiohttp.web.Application):
for route, modulename in routes.items():
modulepath = modulename.replace("/", ".")
routesplit = modulename.split("/")
app.router.add_get(route, getattr(importlib.import_module("handlers." + modulepath), routesplit[-1])) | 35.785714 | 109 | 0.610778 | 49 | 501 | 6.163265 | 0.673469 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002525 | 0.209581 | 501 | 14 | 109 | 35.785714 | 0.760101 | 0 | 0 | 0 | 0 | 0 | 0.183267 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.25 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7f69703f8f809116b21206029c85b39cc24652df | 3,967 | py | Python | golly-4.0-win-64bit/Scripts/Python/tile.py | larayeung/gollywithlocusts | e7adbaaa691fe46f22e88fb4d13e42b3d702a871 | [
"Apache-2.0"
] | null | null | null | golly-4.0-win-64bit/Scripts/Python/tile.py | larayeung/gollywithlocusts | e7adbaaa691fe46f22e88fb4d13e42b3d702a871 | [
"Apache-2.0"
] | null | null | null | golly-4.0-win-64bit/Scripts/Python/tile.py | larayeung/gollywithlocusts | e7adbaaa691fe46f22e88fb4d13e42b3d702a871 | [
"Apache-2.0"
] | null | null | null | # Tile current selection with pattern inside selection.
# Author: Andrew Trevorrow (andrew@trevorrow.com), March 2006.
# Updated to use exit command, Nov 2006.
# Updated to handle multi-state patterns, Aug 2008.
from glife import *
import golly as g
selrect = rect( g.getselrect() )
if selrect.empty: g.exit("There is no selection.")
selpatt = pattern( g.getcells(g.getselrect()) )
if len(selpatt) == 0: g.exit("No pattern in selection.")
# determine if selpatt is one-state or multi-state
inc = 2
if len(selpatt) & 1 == 1: inc = 3
# ------------------------------------------------------------------------------
def clip_left (patt, left):
clist = list(patt)
# remove padding int if present
if (inc == 3) and (len(clist) % 3 == 1): clist.pop()
x = 0
while x < len(clist):
if clist[x] < left:
clist[x : x+inc] = [] # remove cell from list
else:
x += inc
# append padding int if necessary
if (inc == 3) and (len(clist) & 1 == 0): clist.append(0)
return pattern(clist)
# ------------------------------------------------------------------------------
def clip_right (patt, right):
clist = list(patt)
# remove padding int if present
if (inc == 3) and (len(clist) % 3 == 1): clist.pop()
x = 0
while x < len(clist):
if clist[x] > right:
clist[x : x+inc] = [] # remove cell from list
else:
x += inc
# append padding int if necessary
if (inc == 3) and (len(clist) & 1 == 0): clist.append(0)
return pattern(clist)
# ------------------------------------------------------------------------------
def clip_top (patt, top):
clist = list(patt)
# remove padding int if present
if (inc == 3) and (len(clist) % 3 == 1): clist.pop()
y = 1
while y < len(clist):
if clist[y] < top:
clist[y-1 : y-1+inc] = [] # remove cell from list
else:
y += inc
# append padding int if necessary
if (inc == 3) and (len(clist) & 1 == 0): clist.append(0)
return pattern(clist)
# ------------------------------------------------------------------------------
def clip_bottom (patt, bottom):
clist = list(patt)
# remove padding int if present
if (inc == 3) and (len(clist) % 3 == 1): clist.pop()
y = 1
while y < len(clist):
if clist[y] > bottom:
clist[y-1 : y-1+inc] = [] # remove cell from list
else:
y += inc
# append padding int if necessary
if (inc == 3) and (len(clist) & 1 == 0): clist.append(0)
return pattern(clist)
# ------------------------------------------------------------------------------
# find selpatt's minimal bounding box
bbox = getminbox(selpatt)
# first tile selpatt horizontally, clipping where necessary
left = bbox.left
i = 0
while left > selrect.left:
left -= bbox.width
i += 1
if left >= selrect.left:
selpatt.put(-bbox.width * i, 0)
else:
clip_left( selpatt(-bbox.width * i, 0), selrect.left ).put()
right = bbox.right
i = 0
while right < selrect.right:
right += bbox.width
i += 1
if right <= selrect.right:
selpatt.put(bbox.width * i, 0)
else:
clip_right( selpatt(bbox.width * i, 0), selrect.right ).put()
# get new selection pattern and tile vertically, clipping where necessary
selpatt = pattern( g.getcells(g.getselrect()) )
bbox = getminbox(selpatt)
top = bbox.top
i = 0
while top > selrect.top:
top -= bbox.height
i += 1
if top >= selrect.top:
selpatt.put(0, -bbox.height * i)
else:
clip_top( selpatt(0, -bbox.height * i), selrect.top ).put()
bottom = bbox.bottom
i = 0
while bottom < selrect.bottom:
bottom += bbox.height
i += 1
if bottom <= selrect.bottom:
selpatt.put(0, bbox.height * i)
else:
clip_bottom( selpatt(0, bbox.height * i), selrect.bottom ).put()
if not selrect.visible(): g.fitsel()
| 26.986395 | 80 | 0.526594 | 521 | 3,967 | 3.994242 | 0.180422 | 0.046132 | 0.046132 | 0.034599 | 0.56271 | 0.536761 | 0.45507 | 0.45507 | 0.398366 | 0.398366 | 0 | 0.023287 | 0.253088 | 3,967 | 146 | 81 | 27.171233 | 0.679042 | 0.290648 | 0 | 0.565217 | 0 | 0 | 0.016499 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043478 | false | 0 | 0.021739 | 0 | 0.108696 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7f6978f707c126c136ad39f3b45816a9df89b466 | 968 | py | Python | workflow/scripts/process.py | IMS-Bio2Core-Facility/GTExSnake | aefe96f70dd815036b1456c08a7b7068400f79a5 | [
"MIT"
] | 1 | 2021-07-13T09:18:36.000Z | 2021-07-13T09:18:36.000Z | workflow/scripts/process.py | IMS-Bio2Core-Facility/GTExSnake | aefe96f70dd815036b1456c08a7b7068400f79a5 | [
"MIT"
] | 2 | 2021-07-14T09:32:29.000Z | 2021-07-21T07:49:02.000Z | workflow/scripts/process.py | IMS-Bio2Core-Facility/GTExSnake | aefe96f70dd815036b1456c08a7b7068400f79a5 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Process data to xlsx.
Here,
the input data from the previous three steps is combined and written to XLSX.
The GTEx data is merged BioMart data using an outer merge,
so as to keep all entries.
Then,
the MANE data is added using a left merge,
so as to only keep the data from the GTEx query.
"""
if __name__ == "__main__":
import concurrent.futures
import pandas as pd
from gtexquery.data_handling.process import merge_data
from gtexquery.logs.get_logger import get_logger
INS = snakemake.input # noqa: F821
LOGS = snakemake.log[0] # noqa: F821
OUTS = snakemake.output # noqa: F821
THREADS = snakemake.threads # noqa: F821
logger = get_logger(__name__, LOGS)
mane = pd.read_csv(INS["mane"], index_col=0)
with concurrent.futures.ThreadPoolExecutor(max_workers=THREADS) as ex:
ex.map(
merge_data, INS["gtex"], INS["bm"], [mane] * len(INS["gtex"]), OUTS["data"]
)
| 30.25 | 87 | 0.682851 | 144 | 968 | 4.444444 | 0.493056 | 0.05 | 0.034375 | 0.034375 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.019737 | 0.214876 | 968 | 31 | 88 | 31.225806 | 0.822368 | 0.368802 | 0 | 0 | 0 | 0 | 0.043333 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.266667 | 0 | 0.266667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7f6b2102a8e6ad3d81dcdc4da0012a459e68c9a9 | 2,943 | py | Python | gamespy-serverlister.py | cetteup/battlefield-serverlisters | 2176a11fda9e8c5be48dea1daed304db1c83f8a7 | [
"MIT"
] | 1 | 2021-01-04T01:30:37.000Z | 2021-01-04T01:30:37.000Z | gamespy-serverlister.py | cetteup/battlefield-serverlisters | 2176a11fda9e8c5be48dea1daed304db1c83f8a7 | [
"MIT"
] | null | null | null | gamespy-serverlister.py | cetteup/battlefield-serverlisters | 2176a11fda9e8c5be48dea1daed304db1c83f8a7 | [
"MIT"
] | null | null | null | import argparse
import logging
import os
import sys
from src.constants import GSLIST_CONFIGS, GAMESPY_PRINCIPALS
from src.serverlisters import GameSpyServerLister
parser = argparse.ArgumentParser(description='Retrieve a list of game servers for GameSpy-based games '
'and write it to a JSON file')
parser.add_argument('-g', '--gslist', help='Path to gslist binary', type=str, required=True)
parser.add_argument('-b', '--game', help='Game to query servers for', type=str,
choices=list(GSLIST_CONFIGS.keys()), default=list(GSLIST_CONFIGS.keys())[0])
parser.add_argument('-p', '--principal', help='Principal server to query',
type=str, choices=list(GAMESPY_PRINCIPALS.keys()))
parser.add_argument('-f', '--filter', help='Filter to apply to server list', type=str, default='')
parser.add_argument('-t', '--timeout', help='Timeout to use for gslist command', type=int, default=10)
parser.add_argument('-e', '--expired-ttl', help='How long to keep a server in list after it was last seen (in hours)',
type=int, default=24)
parser.add_argument('-d', '--list-dir', help='Path to directory in which servers lists will be stored', type=str,
default='.')
parser.add_argument('-s', '--super-query', help='Query each server in the list for it\'s status', dest='super_query',
action='store_true')
parser.set_defaults(super_query=False)
args = parser.parse_args()
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)-8s %(message)s')
# Make sure gslist path is valid
if not os.path.isfile(args.gslist):
sys.exit('Could not find gslist executable, please double check the provided path')
# Set principal
principal = None
availablePrincipals = GSLIST_CONFIGS[args.game]['servers']
if len(availablePrincipals) > 1 and str(args.principal).lower() in GSLIST_CONFIGS[args.game]['servers']:
# More than one principal available and given principal is valid => use given principal
principal = args.principal.lower()
else:
# Only one principal available or given principal is invalid => use default principal
principal = availablePrincipals[0]
logging.info(f'Listing servers for {args.game.lower()} via {principal.lower()}')
# Init GameSpy server lister
lister = GameSpyServerLister(args.game, principal, args.gslist, args.filter, args.super_query,
args.timeout, args.expired_ttl, args.list_dir)
# Init stats dict
stats = {
'serverTotalBefore': len(lister.servers),
'serverTotalAfter': -1,
'expiredServersRemoved': -1
}
# Run list update
lister.update_server_list()
# Check for any remove any expired servers
stats['expiredServersRemoved'], = lister.remove_expired_servers()
# Write updated list to file
lister.write_to_file()
# Update and log stats
stats['serverTotalAfter'] = len(lister.servers)
logging.info(f'Run stats: {stats}')
| 45.984375 | 118 | 0.706422 | 395 | 2,943 | 5.189873 | 0.36962 | 0.035122 | 0.066341 | 0.017561 | 0.057561 | 0.030244 | 0 | 0 | 0 | 0 | 0 | 0.004082 | 0.167516 | 2,943 | 63 | 119 | 46.714286 | 0.832653 | 0.123004 | 0 | 0 | 0 | 0.044444 | 0.305058 | 0.016342 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.133333 | 0 | 0.133333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7f6c024bdbf80eb668baada9c5fe81bccdb1140e | 398 | py | Python | cornerstone_widget/__init__.py | betatim/cornerstone_widget | c22fafd4d8fe148f6b2349518188eb0bee5f18f1 | [
"Apache-2.0"
] | 24 | 2018-09-07T10:40:07.000Z | 2022-02-01T21:18:00.000Z | cornerstone_widget/__init__.py | betatim/cornerstone_widget | c22fafd4d8fe148f6b2349518188eb0bee5f18f1 | [
"Apache-2.0"
] | 26 | 2018-09-04T16:32:46.000Z | 2018-10-08T09:11:50.000Z | cornerstone_widget/__init__.py | betatim/cornerstone_widget | c22fafd4d8fe148f6b2349518188eb0bee5f18f1 | [
"Apache-2.0"
] | 3 | 2018-09-17T12:56:16.000Z | 2019-12-03T06:30:34.000Z | from .cs_widget import CornerstoneWidget, CornerstoneToolbarWidget
from .utils import get_bbox_handles
from ._version import get_versions
def _jupyter_nbextension_paths():
return [{
'section': 'notebook',
'src': 'static',
'dest': 'cornerstone_widget',
'require': 'cornerstone_widget/extension'
}]
__version__ = get_versions()['version']
del get_versions
| 23.411765 | 66 | 0.70603 | 40 | 398 | 6.625 | 0.65 | 0.124528 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.188442 | 398 | 16 | 67 | 24.875 | 0.820433 | 0 | 0 | 0 | 0 | 0 | 0.221106 | 0.070352 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.25 | 0.083333 | 0.416667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7f6cc4d210e49a95321ceda8b2d2d8db78daf491 | 4,872 | py | Python | go/channel/tests.py | lynnUg/vumi-go | 852f906c46d5d26940bd6699f11488b73bbc3742 | [
"BSD-3-Clause"
] | null | null | null | go/channel/tests.py | lynnUg/vumi-go | 852f906c46d5d26940bd6699f11488b73bbc3742 | [
"BSD-3-Clause"
] | null | null | null | go/channel/tests.py | lynnUg/vumi-go | 852f906c46d5d26940bd6699f11488b73bbc3742 | [
"BSD-3-Clause"
] | null | null | null | from uuid import uuid4
import urllib
from django.core.urlresolvers import reverse
from go.base.tests.helpers import GoDjangoTestCase, DjangoVumiApiHelper
from go.channel.views import get_channel_view_definition
class TestChannelViews(GoDjangoTestCase):
def setUp(self):
self.vumi_helper = self.add_helper(DjangoVumiApiHelper())
self.user_helper = self.vumi_helper.make_django_user()
self.vumi_helper.setup_tagpool(
u'longcode', [u'default1000%s' % i for i in [1, 2, 3, 4]])
self.user_helper.add_tagpool_permission(u'longcode')
self.client = self.vumi_helper.get_client()
def assert_active_channel_tags(self, expected):
self.assertEqual(
set(':'.join(tag) for tag in expected),
set(ch.key for ch in self.user_helper.user_api.active_channels()))
def add_tagpool_permission(self, tagpool, max_keys=None):
permission = self.api.account_store.tag_permissions(
uuid4().hex, tagpool=tagpool, max_keys=max_keys)
permission.save()
account = self.user_helper.user_api.get_user_account()
account.tagpools.add(permission)
account.save()
def get_view_url(self, view, channel_key):
view_def = get_channel_view_definition(None)
return view_def.get_view_url(view, channel_key=channel_key)
def test_index(self):
tag = (u'longcode', u'default10001')
channel_key = u'%s:%s' % tag
response = self.client.get(reverse('channels:index'))
self.assertNotContains(response, urllib.quote(channel_key))
self.user_helper.user_api.acquire_specific_tag(tag)
response = self.client.get(reverse('channels:index'))
self.assertContains(response, urllib.quote(channel_key))
def test_get_new_channel(self):
self.assert_active_channel_tags([])
response = self.client.get(reverse('channels:new_channel'))
self.assertContains(response, 'International')
self.assertContains(response, 'longcode:')
def test_get_new_channel_empty_or_exhausted_tagpool(self):
self.vumi_helper.setup_tagpool(u'empty', [])
self.vumi_helper.setup_tagpool(u'exhausted', [u'tag1'])
self.user_helper.add_tagpool_permission(u'empty')
self.user_helper.add_tagpool_permission(u'exhausted')
tag = self.user_helper.user_api.acquire_tag(u'exhausted')
self.assert_active_channel_tags([tag])
response = self.client.get(reverse('channels:new_channel'))
self.assertContains(response, 'International')
self.assertContains(response, 'longcode:')
self.assertNotContains(response, 'empty:')
self.assertNotContains(response, 'exhausted:')
def test_post_new_channel(self):
self.assert_active_channel_tags([])
response = self.client.post(reverse('channels:new_channel'), {
'country': 'International', 'channel': 'longcode:'})
tag = (u'longcode', u'default10001')
channel_key = u'%s:%s' % tag
self.assertRedirects(response, self.get_view_url('show', channel_key))
self.assert_active_channel_tags([tag])
def test_post_new_channel_no_country(self):
self.assert_active_channel_tags([])
response = self.client.post(reverse('channels:new_channel'), {
'channel': 'longcode:'})
self.assertContains(response, '<li>country<ul class="errorlist">'
'<li>This field is required.</li></ul></li>')
self.assert_active_channel_tags([])
def test_post_new_channel_no_channel(self):
self.assert_active_channel_tags([])
response = self.client.post(reverse('channels:new_channel'), {
'country': 'International'})
self.assertContains(response, '<li>channel<ul class="errorlist">'
'<li>This field is required.</li></ul></li>')
self.assert_active_channel_tags([])
def test_show_channel_missing(self):
response = self.client.get(self.get_view_url('show', u'foo:bar'))
self.assertEqual(response.status_code, 404)
def test_show_channel(self):
tag = (u'longcode', u'default10002')
channel_key = u'%s:%s' % tag
self.user_helper.user_api.acquire_specific_tag(tag)
response = self.client.get(self.get_view_url('show', channel_key))
self.assertContains(response, tag[0])
self.assertContains(response, tag[1])
def test_release_channel(self):
tag = (u'longcode', u'default10002')
channel_key = u'%s:%s' % tag
self.user_helper.user_api.acquire_specific_tag(tag)
self.assert_active_channel_tags([tag])
response = self.client.post(self.get_view_url('release', channel_key))
self.assertRedirects(response, reverse('conversations:index'))
self.assert_active_channel_tags([])
| 43.891892 | 78 | 0.677545 | 602 | 4,872 | 5.227575 | 0.172757 | 0.038132 | 0.066412 | 0.080394 | 0.567525 | 0.512869 | 0.466476 | 0.421989 | 0.407372 | 0.342548 | 0 | 0.009221 | 0.198686 | 4,872 | 110 | 79 | 44.290909 | 0.796875 | 0 | 0 | 0.369565 | 0 | 0 | 0.128489 | 0.009852 | 0 | 0 | 0 | 0 | 0.293478 | 1 | 0.141304 | false | 0 | 0.054348 | 0 | 0.217391 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7f6f22b310867bcb23d6e02764ed6534fb15c5dd | 4,715 | py | Python | analysis/malware/oleDeobf/xdeobf.py | fritzemeier/skeletoncode | 6c2c7de16f588c0856dd2f9770862126979b2620 | [
"MIT"
] | null | null | null | analysis/malware/oleDeobf/xdeobf.py | fritzemeier/skeletoncode | 6c2c7de16f588c0856dd2f9770862126979b2620 | [
"MIT"
] | null | null | null | analysis/malware/oleDeobf/xdeobf.py | fritzemeier/skeletoncode | 6c2c7de16f588c0856dd2f9770862126979b2620 | [
"MIT"
] | null | null | null | import sys
from chardet.universaldetector import UniversalDetector
def parse_args(INFO):
MENU = { \
"OBF":"Obfuscated algorithm file", \
"OF":"Output file (Not implemented)"
}
for NUM in range(1,len(sys.argv)):
CURR = sys.argv[NUM]
if CURR[:2] == "--":
if CURR[2:] == "help":
print_dict(MENU)
sys.exit()
KEY = CURR.split("=")[0]
if KEY in INFO.keys():
INFO[KEY] = CURR[(len(KEY)+1):]
return INFO
def parse_obfs(OBFS,IFILE):
for LINE in IFILE:
SFILE = LINE.split(":")[0].split(",")
LO = LINE[(len(LINE.split(":")[0])+1):]
KEY = LO.replace(" ","").split("=")[0]
PCS = LO.replace(" ","").replace("\n","").split("=")[1].split("+")
OBFS[KEY] = { \
"SFILE":SFILE, \
"PCS":[], \
"ALG": {}, \
"FULLSTR":"", \
"FILE":""
}
if "Array" in LINE:
OBFS[KEY]["PCS"].append("ARR")
for PC in LINE.replace(" ","").replace(")","").replace("\n","").split("(")[1].split(","):
OBFS[KEY]["PCS"].append(PC)
else:
for PC in LO.replace(" ","").replace("\n","").split("=")[1].split("+"):
OBFS[KEY]["PCS"].append(PC)
return OBFS
def parse_files(OBFS):
for KEY,DATA in OBFS.items():
DET = UniversalDetector()
DET.reset()
for FNAME in DATA["SFILE"]:
with open(FNAME,"rb") as FILE:
for LINE in FILE:
DET.feed(LINE)
if DET.done:
break
DET.close()
with open(FNAME,"r",encoding=DET.result["encoding"]) as FILE:
for LINE in FILE:
DATA["FILE"] += LINE
return OBFS
def begin_deobf(OBFS):
for KEY,DATA in OBFS.items():
FILE = DATA["FILE"].split("\n")
for LINE in FILE:
BEGIN = LINE.replace(" ","").split("=")[0]
if BEGIN in DATA["PCS"]:
TYPE = LINE.replace(" ","").split("=")[1].split("(")[0]
if TYPE == "Mid":
OSTR = LINE.replace(" ","").replace("\n","").replace(")","").split("(")[1].split(",")[0]
ENTRY = int(LINE.replace(" ","").replace("\n","").replace(")","").split("(")[1].split(",")[1])
LEN = int(LINE.replace(" ","").replace("\n","").replace(")","").split("(")[1].split(",")[2])
DATA["ALG"][BEGIN] = { \
"TYPE":TYPE, \
"OSTR":OSTR, \
"ENTRY":ENTRY, \
"LEN":LEN, \
"STR":""
}
elif all(VAL in LINE for VAL in (KEY,"(",")")) and not LINE.startswith(KEY) and not LINE.startswith("Dim") and not LINE.startswith("Function"):
NAME = LINE.replace(" ","").split("(")[0].split(",")[1]
DATA["ALG"]["FUNC"] = { \
"NAME":NAME, \
"CONT":[]
}
return OBFS
def construct_deobf(OBFS):
for KEY,DATA in OBFS.items():
FILE = DATA["FILE"].split("\n")
if "ARR" in DATA["PCS"]:
IS_FUNC = False
for CHUNK in FILE:
if "Function "+DATA["ALG"]["FUNC"]["NAME"] in CHUNK and not IS_FUNC:
DATA["FULLSTR"] += "FUNCTION CONTAINING ALGORITHM\n"
DATA["FULLSTR"] += CHUNK+"\n"
DATA["ALG"]["FUNC"]["CONT"].append(CHUNK)
IS_FUNC = True
elif IS_FUNC and "End Function" in CHUNK:
IS_FUNC = False
elif IS_FUNC:
DATA["ALG"]["FUNC"]["CONT"].append(CHUNK)
DATA["FULLSTR"] += CHUNK+"\n"
elif DATA["ALG"]["FUNC"]["NAME"] in CHUNK:
DATA["FULLSTR"] += "LINE CONTAINING VARIABLE\n"
DATA["FULLSTR"] += CHUNK+"\n\n"
else:
for PC1 in DATA["ALG"].keys():
PC2 = DATA["ALG"][PC1]["OSTR"]
for CHUNK in FILE:
if CHUNK.replace(" ","").startswith(PC2):
if DATA["ALG"][PC1]["TYPE"] == "Mid":
ENTRY = DATA["ALG"][PC1]["ENTRY"]
LEN = DATA["ALG"][PC1]["LEN"]
FOUND_STR = CHUNK.replace('"','').split("=")[1][ENTRY:(ENTRY+LEN)]
DATA["ALG"][PC1]["STR"] = FOUND_STR
for PC in DATA["PCS"]:
if PC in OBFS.keys() or PC.startswith("Chr("):
DATA["FULLSTR"] += "<<"+PC+">> "
continue
DATA["FULLSTR"] += DATA["ALG"][PC]["STR"]
return OBFS
def print_results(OBFS):
for KEY,DATA in OBFS.items():
print(" Obfuscated Variable: "+KEY)
print(" Source File: "+str(DATA["SFILE"]))
print(" De-obfuscated String\n------------------------------------------------")
print(OBFS[KEY]["FULLSTR"])
print("\n\n")
def print_dict(INFO):
for KEY,DATA in INFO.items():
print(" "+KEY+" "*(10-len(KEY))+" "+str(DATA))
def main():
files = {}
cliArgs = { \
"IF":"", \
"OF":""
}
cliArgs = parse_args(cliArgs)
if cliArgs["OF"]:
outFile = open(cliArgs["OF"], "w")
if cliArgs["IF"]:
obfFile = open(cliArgs["IF"], "r")
obfStrs = {}
obfStrs = parse_obfs(obfStrs,obfFile)
obfStrs = parse_files(obfStrs)
obfStrs = begin_deobf(obfStrs)
obfStrs = construct_deobf(obfStrs)
if "TEST" in sys.argv:
sys.exit()
print_results(obfStrs)
if __name__ == "__main__":
main()
| 21.828704 | 146 | 0.532768 | 611 | 4,715 | 4.062193 | 0.184943 | 0.036664 | 0.031023 | 0.024174 | 0.278807 | 0.210314 | 0.156326 | 0.136181 | 0.121273 | 0.074134 | 0 | 0.009212 | 0.217179 | 4,715 | 215 | 147 | 21.930233 | 0.663235 | 0 | 0 | 0.1875 | 0 | 0 | 0.142736 | 0.011877 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0 | 0.013889 | 0 | 0.104167 | 0.069444 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7f72a5fd650666ecdf104589c2aeeb5a8a97ff0a | 9,184 | py | Python | typescript/libs/node_client.py | fongandrew/TypeScript-Sublime-JSX-Plugin | ee22b220a9874bb365aa84c2ffb3670ac7e9c97a | [
"Apache-2.0"
] | null | null | null | typescript/libs/node_client.py | fongandrew/TypeScript-Sublime-JSX-Plugin | ee22b220a9874bb365aa84c2ffb3670ac7e9c97a | [
"Apache-2.0"
] | null | null | null | typescript/libs/node_client.py | fongandrew/TypeScript-Sublime-JSX-Plugin | ee22b220a9874bb365aa84c2ffb3670ac7e9c97a | [
"Apache-2.0"
] | null | null | null | import os
import subprocess
import threading
import time
import json
import sublime
import sublime_plugin
from .logger import log
from . import json_helpers
from . import global_vars
# queue module name changed from Python 2 to 3
if int(sublime.version()) < 3000:
import Queue as queue
else:
import queue
class CommClient:
def getEvent(self): pass
def postCmd(self, cmd): pass
def sendCmd(self, cmd, cb): pass
def sendCmdSync(self, cmd): pass
def sendCmdAsync(self, cmd, cb): pass
class NodeCommClient(CommClient):
__CONTENT_LENGTH_HEADER = b"Content-Length: "
def __init__(self, scriptPath):
"""
Starts a node client (if not already started) and communicate with it.
The script file to run is passed to the constructor.
"""
self.asyncReq = {}
self.__serverProc = None
# create response and event queues
self.__msgq = queue.Queue()
self.__eventq = queue.Queue()
# start node process
pref_settings = sublime.load_settings('Preferences.sublime-settings')
node_path = pref_settings.get('node_path')
if node_path:
node_path = os.path.expandvars(node_path)
if not node_path:
if os.name == "nt":
node_path = "node"
else:
node_path = NodeCommClient.__which("node")
if not node_path:
path_list = os.environ["PATH"] + os.pathsep + "/usr/local/bin" + os.pathsep + "$NVM_BIN"
print("Unable to find executable file for node on path list: " + path_list)
print("To specify the node executable file name, use the 'node_path' setting")
self.__serverProc = None
else:
global_vars._node_path = node_path
print("Found node executable at " + node_path)
try:
if os.name == "nt":
# linux subprocess module does not have STARTUPINFO
# so only use it if on Windows
si = subprocess.STARTUPINFO()
si.dwFlags |= subprocess.SW_HIDE | subprocess.STARTF_USESHOWWINDOW
self.__serverProc = subprocess.Popen([node_path, scriptPath],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, startupinfo=si)
else:
log.debug("opening " + node_path + " " + scriptPath)
self.__serverProc = subprocess.Popen([node_path, scriptPath],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
except:
self.__serverProc = None
# start reader thread
if self.__serverProc and (not self.__serverProc.poll()):
log.debug("server proc " + str(self.__serverProc))
log.debug("starting reader thread")
readerThread = threading.Thread(target=NodeCommClient.__reader, args=(
self.__serverProc.stdout, self.__msgq, self.__eventq, self.asyncReq, self.__serverProc))
readerThread.daemon = True
readerThread.start()
self.__debugProc = None
self.__breakpoints = []
def serverStarted(self):
return self.__serverProc is not None
# work in progress
def addBreakpoint(self, file, line):
self.__breakpoints.append((file, line))
# work in progress
def debug(self, file):
# TODO: msg if already debugging
self.__debugProc = subprocess.Popen(["node", "--debug", file],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
def makeTimeoutMsg(self, cmd, seq):
jsonDict = json_helpers.decode(cmd)
timeoutMsg = {
"seq": 0,
"type": "response",
"success": False,
"request_seq": seq,
"command": jsonDict["command"],
"message": "timeout"
}
return timeoutMsg
def sendCmd(self, cmd, cb, seq):
"""
send single-line command string; no sequence number; wait for response
this assumes stdin/stdout; for TCP, need to add correlation with sequence numbers
"""
if self.postCmd(cmd):
reqSeq = -1
try:
while reqSeq < seq:
data = self.__msgq.get(True, 1)
dict = json_helpers.decode(data)
reqSeq = dict['request_seq']
if cb:
cb(dict)
except queue.Empty:
print("queue timeout")
if (cb):
cb(self.makeTimeoutMsg(cmd, seq))
else:
if (cb):
cb(self.makeTimeoutMsg(cmd, seq))
def sendCmdAsync(self, cmd, cb, seq):
"""
Sends the command and registers a callback
"""
if self.postCmd(cmd):
self.asyncReq[seq] = cb
def sendCmdSync(self, cmd, seq):
"""
Sends the command and wait for the result and returns it
"""
if self.postCmd(cmd):
reqSeq = -1
try:
while reqSeq < seq:
data = self.__msgq.get(True, 1)
dict = json_helpers.decode(data)
reqSeq = dict['request_seq']
return dict
except queue.Empty:
print("queue timeout")
return self.makeTimeoutMsg(cmd, seq)
else:
return self.makeTimeoutMsg(cmd, seq)
def postCmd(self, cmd):
"""
Post command to server; no response needed
"""
log.debug('Posting command: {0}'.format(cmd))
if not self.__serverProc:
log.error("can not send request; node process not running")
return False
else:
cmd = cmd + "\n"
self.__serverProc.stdin.write(cmd.encode())
self.__serverProc.stdin.flush()
return True
def getEvent(self):
"""
Try to get event from event queue
"""
try:
ev = self.__eventq.get(False)
except:
return None
return ev
@staticmethod
def __readMsg(stream, msgq, eventq, asyncReq, proc):
"""
Reader thread helper
"""
state = "init"
body_length = 0
while state != "body":
header = stream.readline().strip()
# log.debug(
# 'Stream state: "{0}". Read header: "{1}"'.format(
# state,
# header if header else 'None'
# )
# )
if len(header) == 0:
if state == 'init':
# log.info('0 byte line in stream when expecting header')
return proc.poll() is not None
else:
# Done reading header
state = "body"
else:
state = 'header'
if header.startswith(NodeCommClient.__CONTENT_LENGTH_HEADER):
body_length = int(header[len(NodeCommClient.__CONTENT_LENGTH_HEADER):])
if body_length > 0:
data = stream.read(body_length)
log.debug('Read body of length: {0}'.format(body_length))
data_json = data.decode("utf-8")
data_dict = json_helpers.decode(data_json)
if data_dict['type'] == "response":
request_seq = data_dict['request_seq']
log.debug('Body sequence#: {0}'.format(request_seq))
if request_seq in asyncReq:
callback = asyncReq.pop(request_seq, None)
if callback:
callback(data_dict)
return False
else:
# Only put in the queue if wasn't an async request
msgq.put(data_json)
else:
eventq.put(data_json)
else:
log.info('Body length of 0 in server stream')
return False
@staticmethod
def __reader(stream, msgq, eventq, asyncReq, proc):
"""
Main function for reader thread
"""
while True:
if NodeCommClient.__readMsg(stream, msgq, eventq, asyncReq, proc):
return
@staticmethod
def __which(program):
def is_executable(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_executable(program):
return program
else:
# /usr/local/bin is not on mac default path
# but is where node is typically installed on mac
path_list = os.environ["PATH"] + os.pathsep + "/usr/local/bin" + os.pathsep + "$NVM_BIN"
for path in path_list.split(os.pathsep):
path = path.strip('"')
programPath = os.path.join(path, program)
if is_executable(programPath):
return programPath
return None
| 34.656604 | 119 | 0.532883 | 965 | 9,184 | 4.927461 | 0.251813 | 0.026919 | 0.007571 | 0.020189 | 0.207992 | 0.154995 | 0.132072 | 0.103891 | 0.103891 | 0.103891 | 0 | 0.003837 | 0.375653 | 9,184 | 264 | 120 | 34.787879 | 0.825427 | 0.120862 | 0 | 0.333333 | 0 | 0 | 0.079323 | 0.003565 | 0 | 0 | 0 | 0.003788 | 0 | 1 | 0.100529 | false | 0.026455 | 0.063492 | 0.010582 | 0.269841 | 0.026455 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7f72e22697b47412c2e86ac9d6f29061040b3ceb | 3,980 | py | Python | Calculator.py | NOVAglow/basecalc | 3f40a1d59266239edacd80c3046f78731cba9432 | [
"Apache-2.0"
] | null | null | null | Calculator.py | NOVAglow/basecalc | 3f40a1d59266239edacd80c3046f78731cba9432 | [
"Apache-2.0"
] | null | null | null | Calculator.py | NOVAglow/basecalc | 3f40a1d59266239edacd80c3046f78731cba9432 | [
"Apache-2.0"
] | null | null | null | # Command dictionary, used for the help command and the ? command
cmd_dict = {
"help": "Show help.",
"?": "Get help on a command. 1 argument required: name of the command. Example: \"? dec\"",
"dec": "Convert a number to decimal. 2 arguments required: one is the number itself, second is the base.",
"bin": "Convert a number to binary. 2 arguments required: one is the number itself, second is the base.",
"oct": "Convert a number to octal. 2 arguments required: one is the number, second is the base.",
"hex": "Convert a number to hexadecimal. 2 arguments required: one is the number, second is the base.",
"input": "Input a number with its base, save it for later conversion 2 arguments required: the number itself and its base.",
"->": "Convert the number entered upon the input command to another base. One argument required: the type you want to convert to. Example: \"-> hex\"",
"exit": "Exit the calculator."
}
def convert(in_cmd):
if len(cmd.split()) >= 3:
number = cmd.split()[1]
try:
base = int(cmd.split()[2])
try:
if in_cmd == "dec":
answer = str(int(number, base))
elif in_cmd == "bin":
answer = bin(int(number, base))[2:]
elif in_cmd == "oct":
answer = oct(int(number, base))[2:]
elif in_cmd == "hex":
answer = hex(int(number, base))[2:].upper()
print(">> " + answer)
except ValueError: # Number is not valid in the given base. i.e. Base 2 number 294
print("Number is not correspond to its base.")
except IndexError: # Missing parameter
print("No base given.")
except ValueError: # Base is invalid
print("Invalid base.")
cmd = None
while cmd != "exit":
cmd = input("> ")
cmd = cmd.lower()
try:
main = cmd.split()[0] # Get command
if main == "help":
for command in cmd_dict:
print(command + ": " + cmd_dict[command])
elif main == "?":
try:
if cmd.split()[1] in cmd_dict:
print(cmd.split()[1] + ": " + cmd_dict[cmd.split()[1]])
else:
print("No such command.")
except IndexError:
print("An argument is required.")
elif main == "dec" or main == "bin" or main == "oct" or main == "hex":
convert(main)
elif main == "input":
try:
in_base = int(cmd.split()[2])
try:
in_num = cmd.split()[1]
test = int(in_num, in_base)
del test
except ValueError:
print("Number is not correspond to its base.")
except IndexError:
print("No base given.")
except ValueError:
print("Invalid base.")
elif main == "->":
try:
target = cmd.split()[1]
try:
if target == "dec":
print(">> " + str(int(in_num, in_base)))
elif target == "bin":
print(">> " + bin(int(in_num, in_base))[2:])
elif target == "oct":
print(">> " + oct(int(in_num, in_base))[2:])
elif target == "hex":
print(">> " + hex(int(in_num, in_base))[2:].upper())
else:
print("Invalid argument \"" + target + "\".")
except NameError:
print("No input has been defined.")
except IndexError:
print("An argument is required.")
elif cmd != "exit":
print("Invalid command.")
except IndexError: # If empty
print("Empty input.")
| 42.795699 | 156 | 0.484673 | 454 | 3,980 | 4.200441 | 0.213656 | 0.041951 | 0.028317 | 0.026219 | 0.331935 | 0.317252 | 0.255899 | 0.231778 | 0.158364 | 0.158364 | 0 | 0.010708 | 0.38995 | 3,980 | 92 | 157 | 43.26087 | 0.774712 | 0.045226 | 0 | 0.313953 | 0 | 0.046512 | 0.298028 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.011628 | false | 0 | 0 | 0 | 0.011628 | 0.232558 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7f72e3f0415454b4d930b826553fdc4856d0a5fd | 1,240 | py | Python | nablapps/com/migrations/0002_committee.py | pettaroni/nablaweb | 5e610698a276884b9cd779a718dfdee641713636 | [
"MIT"
] | null | null | null | nablapps/com/migrations/0002_committee.py | pettaroni/nablaweb | 5e610698a276884b9cd779a718dfdee641713636 | [
"MIT"
] | null | null | null | nablapps/com/migrations/0002_committee.py | pettaroni/nablaweb | 5e610698a276884b9cd779a718dfdee641713636 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('auth', '0006_require_contenttypes_0002'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('com', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Committee',
fields=[
('group', models.OneToOneField(primary_key=True, serialize=False, to='auth.Group', verbose_name='Gruppe', on_delete=models.CASCADE)),
('mail_list', models.EmailField(blank=True, max_length=254, verbose_name='Epostliste')),
('name', models.CharField(unique=True, max_length=80, verbose_name='name')),
('leader', models.ForeignKey(to=settings.AUTH_USER_MODEL, blank=True, verbose_name='Leder', on_delete=models.CASCADE)),
('page', models.OneToOneField(to='com.ComPage', blank=True, verbose_name='Komitéside', on_delete=models.CASCADE)),
],
options={
'verbose_name': 'Komité',
'verbose_name_plural': 'Komitéer',
},
),
]
| 38.75 | 149 | 0.620968 | 126 | 1,240 | 5.880952 | 0.539683 | 0.103914 | 0.05668 | 0.08502 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.019169 | 0.242742 | 1,240 | 31 | 150 | 40 | 0.769968 | 0.016935 | 0 | 0 | 0 | 0 | 0.153657 | 0.024651 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.12 | 0 | 0.24 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7f7356dcac238b97ce72ba31683942264a22953e | 543 | py | Python | preprocessing/label_ordianl_encoding.py | Kunal614/Machine-Learning | 26b3e0f3397ddb524c96c5b6c99b173b6fc80501 | [
"MIT"
] | null | null | null | preprocessing/label_ordianl_encoding.py | Kunal614/Machine-Learning | 26b3e0f3397ddb524c96c5b6c99b173b6fc80501 | [
"MIT"
] | null | null | null | preprocessing/label_ordianl_encoding.py | Kunal614/Machine-Learning | 26b3e0f3397ddb524c96c5b6c99b173b6fc80501 | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelEncoder
df = pd.read_csv('train(2).csv')
df2 = df[["KitchenQual" , "BldgType"]]
#LAbel Encoder
le = LabelEncoder()
data = le.fit_transform(df2["BldgType"])
print(data)
df2["BldType_l_enc"] = data
#count number of class in a column
print(df["BldgType"].value_counts())
print(df["KitchenQual"].value_counts())
#Ordinal Encoder
order_label = {"Ex":4 , "Gd":3 , "TA":2 , "Fa":1}
df2["KitchenQual_ordinal_enc"]=df2["KitchenQual"].map(order_label)
print(df2) | 18.1 | 66 | 0.710866 | 81 | 543 | 4.641975 | 0.580247 | 0.069149 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.023061 | 0.121547 | 543 | 30 | 67 | 18.1 | 0.765199 | 0.112339 | 0 | 0 | 0 | 0 | 0.235417 | 0.047917 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.214286 | 0 | 0.214286 | 0.285714 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7f79226a907e0920d92118e3fb487482028c8ebf | 3,533 | py | Python | src/pyams_utils/session.py | Py-AMS/pyams-utils | 65b166596a8b9f66fb092a69ce5d53ac6675685e | [
"ZPL-2.1"
] | null | null | null | src/pyams_utils/session.py | Py-AMS/pyams-utils | 65b166596a8b9f66fb092a69ce5d53ac6675685e | [
"ZPL-2.1"
] | null | null | null | src/pyams_utils/session.py | Py-AMS/pyams-utils | 65b166596a8b9f66fb092a69ce5d53ac6675685e | [
"ZPL-2.1"
] | null | null | null | #
# Copyright (c) 2008-2015 Thierry Florac <tflorac AT ulthar.net>
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
"""PyAMS_utils session module
This helper module is used to add a "session_property" method decorator, which can be used to
store method result into user's session.
It also adds to function to get and set session data.
"""
from pyams_utils.request import check_request
__docformat__ = 'restructuredtext'
def get_session_data(request, app, key, default=None):
"""Get data associated with current user session
PyAMS session management is based on :py:mod:`Beaker` package session management.
:param request: the request from which session is extracted
:param str app: application name
:param str key: session data key for given application
:param default: object; requested session data, or *default* if it can't be found
.. code-block:: python
APPLICATION_KEY = 'MyApp'
SESSION_KEY = 'MyFunction'
def my_function(request):
return get_session_data(request, APPLICATION_KEY, SESSION_KEY)
"""
session = request.session
return session.get('{0}::{1}'.format(app, key), default)
def set_session_data(request, app, key, value):
"""Associate data with current user session
:param request: the request from which session is extracted
:param str app: application name
:param str key: session data key for given application
:param object value: any object that can be pickled can be stored into user session
.. code-block:: python
APPLICATION_KEY = 'MyApp'
SESSION_KEY = 'MyFunction'
def my_function(request):
value = {'key1': 'value1', 'key2': 'value2'}
set_session_data(request, APPLICATION_KEY, SESSION_KEY, value)
"""
session = request.session
session['{0}::{1}'.format(app, key)] = value
_MARKER = object()
def session_property(app, key=None, prefix=None):
"""Define a method decorator used to store result into request's session
If no request is currently running, a new one is created.
:param str app: application identifier used to prefix session keys
:param str key: session's value key; if *None*, the key will be the method's object; if *key*
is a callable object, il will be called to get the actual session key
:param prefix: str; prefix to use for session key; if *None*, the prefix will be the property
name
"""
def session_decorator(func):
def wrapper(obj, app, key, *args, **kwargs):
request = check_request()
if callable(key):
key = key(obj, *args, **kwargs)
if not key:
key = '{1}::{0!r}'.format(obj, prefix or func.__name__)
data = get_session_data(request, app, key, _MARKER)
if data is _MARKER:
data = func
if callable(data):
data = data(obj, *args, **kwargs)
set_session_data(request, app, key, data)
return data
return lambda x, *args, **kwargs: wrapper(x, app, key, *args, **kwargs)
return session_decorator
| 34.300971 | 97 | 0.668271 | 483 | 3,533 | 4.803313 | 0.320911 | 0.047414 | 0.046552 | 0.036207 | 0.265517 | 0.253448 | 0.206897 | 0.17069 | 0.17069 | 0.17069 | 0 | 0.007491 | 0.244268 | 3,533 | 102 | 98 | 34.637255 | 0.861423 | 0.626946 | 0 | 0.076923 | 0 | 0 | 0.03599 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.192308 | false | 0 | 0.038462 | 0 | 0.384615 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7f793f0055dfba9774cbf31309fc492d7d2ba7e5 | 1,543 | py | Python | Picturedom/tests/photo/views/test_category_photos.py | Azzarox/Picturedom | 69d1b77dcdd89d63b12c7c56e25d43955d906b98 | [
"MIT"
] | null | null | null | Picturedom/tests/photo/views/test_category_photos.py | Azzarox/Picturedom | 69d1b77dcdd89d63b12c7c56e25d43955d906b98 | [
"MIT"
] | null | null | null | Picturedom/tests/photo/views/test_category_photos.py | Azzarox/Picturedom | 69d1b77dcdd89d63b12c7c56e25d43955d906b98 | [
"MIT"
] | null | null | null | from django.test import TestCase
from django.contrib.auth.models import User
from django.core.files.uploadedfile import SimpleUploadedFile
from django.urls import reverse
from Picturedom.photo.models import Category, Photo
class TestCategoryPhotos(TestCase):
def setUp(self):
self.user = User.objects.create_user('user', 'email', 'pass')
self.category = Category.objects.create(title='category')
self.category2 = Category.objects.create(title='cat2')
# first photo
self.photo = Photo.objects.create(
image=SimpleUploadedFile("file.jpeg", b"file_content", content_type="image/jpeg"),
posted_by=self.user,
category=self.category,
)
# second photo
self.photo2 = Photo.objects.create(
image=SimpleUploadedFile("file2.jpeg", b"file_content", content_type="image/jpeg"),
posted_by=self.user,
category=self.category,
)
# 3rd photo different category
self.photo3 = Photo.objects.create(
image=SimpleUploadedFile("file2.jpeg", b"file_content", content_type="image/jpeg"),
posted_by=self.user,
category=self.category2,
)
def test_category_photos_GET__success(self):
response = self.client.get(reverse('photo category', args=[self.category.id]))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'photos/photo_category.html')
self.assertEqual(len(response.context['photos']), 2)
| 41.702703 | 95 | 0.670123 | 175 | 1,543 | 5.811429 | 0.348571 | 0.076696 | 0.053097 | 0.067847 | 0.3353 | 0.294985 | 0.294985 | 0.294985 | 0.294985 | 0.294985 | 0 | 0.009926 | 0.216461 | 1,543 | 36 | 96 | 42.861111 | 0.831266 | 0.034349 | 0 | 0.233333 | 0 | 0 | 0.111709 | 0.017497 | 0 | 0 | 0 | 0 | 0.1 | 1 | 0.066667 | false | 0.033333 | 0.166667 | 0 | 0.266667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7f7c838f8641a32bb524bcbc48cd108587b15c84 | 1,636 | py | Python | recommenders/recommenders/models/ranking_model.py | hojinYang/tfrs-movierec-serving | bef4f19a8be99cde510d761082de7602151a7d99 | [
"Apache-2.0"
] | 17 | 2020-11-14T07:03:06.000Z | 2022-02-21T00:56:49.000Z | recommenders/recommenders/models/ranking_model.py | hojinYang/tfrs-movierec-serving | bef4f19a8be99cde510d761082de7602151a7d99 | [
"Apache-2.0"
] | null | null | null | recommenders/recommenders/models/ranking_model.py | hojinYang/tfrs-movierec-serving | bef4f19a8be99cde510d761082de7602151a7d99 | [
"Apache-2.0"
] | 1 | 2021-05-20T06:00:51.000Z | 2021-05-20T06:00:51.000Z | from typing import Callable, Dict, Tuple, Text
from recommenders.datasets import Dataset
import numpy as np
import tensorflow as tf
import tensorflow_recommenders as tfrs
from pathlib import Path
SAVE_PATH = Path(__file__).resolve().parents[1] / "weights"
class RankingModel(tfrs.models.Model):
def __init__(
self,
dataset: Dataset,
network_fn: Callable,
network_args: Dict = None
):
super().__init__()
self._name = f"{self.__class__.__name__}_{network_fn.__name__}"
if network_args is None:
network_args = {}
self.ranking_model: tf.keras.Model = network_fn(
unique_user_ids = dataset.unique_user_ids,
unique_item_ids = dataset.unique_movie_ids, **network_args)
self.task = tfrs.tasks.Ranking(
loss = tf.keras.losses.MeanSquaredError(),
metrics=[tf.keras.metrics.RootMeanSquaredError()]
)
def compute_loss(self, features: Dict[Text, tf.Tensor], training=False) -> tf.Tensor:
prediction = self.ranking_model(**features)
return self.task(prediction, features['rating'])
def call(self, features: Dict[Text, tf.Tensor]):
return self.ranking_model(**features)
def print_summary(self):
print(self.ranking_model.print_summary())
def save_weights(self, save_dir):
if save_dir is None:
save_dir = SAVE_PATH
save_dir.mkdir(parents=True, exist_ok=True)
self.ranking_model.save_weights(str(Path(save_dir) /'ranking'))
| 32.72 | 103 | 0.630196 | 191 | 1,636 | 5.08377 | 0.371728 | 0.056643 | 0.082389 | 0.041195 | 0.057673 | 0.057673 | 0 | 0 | 0 | 0 | 0 | 0.000839 | 0.271394 | 1,636 | 50 | 104 | 32.72 | 0.813758 | 0 | 0 | 0 | 0 | 0 | 0.040929 | 0.028711 | 0 | 0 | 0 | 0 | 0 | 1 | 0.135135 | false | 0 | 0.162162 | 0.027027 | 0.378378 | 0.054054 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7f7f165be9eb0ca7213a02ea5cc2e9b6e4418888 | 32,951 | py | Python | api/custom_components/custom_nodes.py | phucnh22/mapintel_dev | 492c077177fe96b2c975c350d9f3bd1dc61691dd | [
"MIT"
] | null | null | null | api/custom_components/custom_nodes.py | phucnh22/mapintel_dev | 492c077177fe96b2c975c350d9f3bd1dc61691dd | [
"MIT"
] | null | null | null | api/custom_components/custom_nodes.py | phucnh22/mapintel_dev | 492c077177fe96b2c975c350d9f3bd1dc61691dd | [
"MIT"
] | null | null | null | """
See https://github.com/deepset-ai/haystack/issues/955 for further context
"""
import os
import logging
from copy import deepcopy
from typing import Dict, Generator, List, Optional, Union
import numpy as np
from elasticsearch.helpers import bulk, scan
from tqdm.auto import tqdm
from haystack.utils import get_batches_from_generator
from haystack import Document
from haystack.document_store.base import BaseDocumentStore
from haystack.document_store.elasticsearch import OpenDistroElasticsearchDocumentStore
from haystack.retriever.base import BaseRetriever
from haystack.reader.base import BaseReader
from api.custom_components.bertopic import BERTopic2
from api.custom_components.top2vec import Top2Vec2
dirname = os.path.dirname(__file__)
logger = logging.getLogger(__name__)
class TopicRetriever(BaseRetriever):
def __init__(
self,
document_store: BaseDocumentStore,
embedding_model: str,
model_format: str = "bertopic",
umap_args: dict = None,
hdbscan_args: dict = None,
vectorizer_args: dict = None,
top_k: int = 10,
progress_bar: bool = True
):
"""
:param document_store: An instance of DocumentStore from which to retrieve documents.
:param embedding_model: Local path or name of model in Hugging Face's model hub such as ``'deepset/sentence_bert'``
:param model_format: Name of framework that was used for saving the model. Options:
- ``'top2vec'``
- ``'bertopic'``
:param umap_args: Pass custom arguments to UMAP.
:param hdbscan_args: Pass custom arguments to HDBSCAN.
:param hdbscan_args: Pass custom arguments to CountVectorizer. Only needed if model_format="bertopic".
:param top_k: How many documents to return per query.
:param progress_bar: If true displays progress bar during embedding.
"""
# # save init parameters to enable export of component config as YAML
# self.set_config(
# document_store=document_store, embedding_model=embedding_model, umap_args=umap_args,
# hdbscan_args=hdbscan_args, top_k=top_k
# )
self.document_store = document_store
self.embedding_model = embedding_model
self.model_format = model_format
self.umap_args = umap_args
self.hdbscan_args = hdbscan_args
self.vectorizer_args = vectorizer_args
self.top_k = top_k
self.progress_bar = progress_bar
logger.info(f"Init retriever using embeddings of model {embedding_model}")
if self.model_format == "top2vec":
raise NotImplementedError("model_format='top2vec' isn't fully implemented yet.")
# self.embedding_encoder = _Top2VecEncoder(self)
elif self.model_format == "bertopic":
self.embedding_encoder = _BERTopicEncoder(self)
else:
raise ValueError("Argument model_format can only take the values 'top2vec' or 'bertopic'.")
def retrieve(self, query: str, filters: dict = None, top_k: Optional[int] = None, index: str = None) -> List[Document]:
"""
Scan through documents in DocumentStore and return a small number documents
that are most relevant to the query.
:param query: The query
:param filters: A dictionary where the keys specify a metadata field and the value is a list of accepted values for that field
:param top_k: How many documents to return per query.
:param index: The name of the index in the DocumentStore from which to retrieve documents
"""
if top_k is None:
top_k = self.top_k
if index is None:
index = self.document_store.index
query_emb = self.embed_queries(texts=[query])
documents = self.document_store.query_by_embedding(query_emb=query_emb[0], filters=filters,
top_k=top_k, index=index)
return documents
def embed_queries(self, texts: List[str]) -> List[np.ndarray]:
"""
Create embeddings for a list of queries.
:param texts: Queries to embed
:return: Embeddings, one per input queries
"""
# for backward compatibility: cast pure str input
if isinstance(texts, str):
texts = [texts]
assert isinstance(texts, list), "Expecting a list of texts, i.e. create_embeddings(texts=['text1',...])"
return self.embedding_encoder.embed_queries(texts)
def embed_queries_umap(self, texts: List[str]) -> List[np.ndarray]:
"""
Create UMAP embeddings for a list of queries.
:param texts: Queries to embed
:return: Embeddings, one per input queries
"""
# for backward compatibility: cast pure str input
if isinstance(texts, str):
texts = [texts]
assert isinstance(texts, list), "Expecting a list of texts, i.e. create_embeddings(texts=['text1',...])"
return self.embedding_encoder.embed_queries_umap(texts)
def embed_passages(self, docs: List[Document], embeddings: np.array = None) -> List[np.ndarray]:
"""
Create embeddings for a list of passages. Produces the original embeddings, the UMAP embeddings,
the topic number and the topic label of each document.
:param docs: List of documents to embed
:return: Embeddings, one per input passage
"""
return self.embedding_encoder.embed_passages(docs, embeddings)
def run_indexing(self, documents: List[dict], **kwargs):
documents = deepcopy(documents)
document_objects = [Document.from_dict(doc) for doc in documents]
embeddings, umap_embeddings, topic_numbers, topic_labels = self.embed_passages(document_objects)
for doc, emb, umap_emb, topn, topl in zip(documents, embeddings, umap_embeddings, topic_numbers, topic_labels):
doc["embedding"] = emb
doc["umap_embeddings"] = umap_emb
doc["topic_number"] = topn
doc["topic_label"] = topl
output = {**kwargs, "documents": documents}
return output, "output_1"
def train(self, docs: List[Document], embeddings: np.array = None):
"""
Trains the underlying embedding encoder model. If model_format="top2vec", a Top2Vec model
will be trained, otherwise, if model_format="bertopic", a BERTopic model will be trained.
:param docs: List of documents to train the model on.
"""
self.embedding_encoder.train(docs, embeddings)
def get_topic_names(self) -> List[str]:
return self.embedding_encoder.topic_names
class _BERTopicEncoder():
def __init__(
self,
retriever: TopicRetriever
):
self.saved_model_path = os.path.join(dirname, '../../artifacts/saved_models/bertopic.pkl')
self.embedding_model = retriever.embedding_model
self.umap_args = retriever.umap_args
self.hdbscan_args = retriever.hdbscan_args
self.vectorizer_args = retriever.vectorizer_args
self.show_progress_bar = retriever.progress_bar
if retriever.document_store.similarity != "cosine":
logger.warning(
f"You are using a Sentence Transformer with the {retriever.document_store.similarity} function. "
f"We recommend using cosine instead. "
f"This can be set when initializing the DocumentStore")
# Initializing the model
try:
logger.info("Loading the BERTopic model from disk.")
self.model = BERTopic2.load(self.saved_model_path, self.embedding_model)
self.topic_names = list(self.model.topic_names.values())
except Exception as e:
logger.info(f"The BERTopic model hasn't been successfuly loaded: {e}")
self.model = None
self.topic_names = None
def embed_queries(self, texts: List[str]) -> List[np.ndarray]:
self._check_is_trained()
# texts can be a list of strings or a list of [title, text]
# emb = self.model.embedding_model.embedding_model.encode(texts, batch_size=200, show_progress_bar=self.show_progress_bar)
emb = self.model.embedding_model.embed(texts, verbose=self.show_progress_bar)
emb = [r for r in emb] # get back list of numpy embedding vectors
return emb
def embed_queries_umap(self, texts: List[str]) -> List[np.ndarray]:
embeddings = self.embed_queries(texts)
umap_embeddings = self.model.umap_model.transform(np.array(embeddings))
umap_embeddings = [i for i in umap_embeddings]
return umap_embeddings
def embed_passages(self, docs: List[Document], embeddings: np.array = None) -> List[np.ndarray]:
self._check_is_trained()
passages = [[d.meta["name"] if d.meta and "name" in d.meta else "", d.text] for d in docs] # type: ignore
embeddings, umap_embeddings, topic_numbers, _ = self.model.transform(passages, embeddings)
topic_labels = [self.model.topic_names[i] for i in topic_numbers]
return [embeddings, umap_embeddings, topic_numbers, topic_labels]
def train(self, docs: List[Document], embeddings: np.array = None):
# Initializing the BERTopic model
from umap import UMAP
from hdbscan import HDBSCAN
from sklearn.feature_extraction.text import CountVectorizer
if self.umap_args:
umap_model = UMAP(**self.umap_args)
else:
umap_model = UMAP(
n_neighbors=15,
n_components=2,
metric='cosine',
random_state=1
)
if self.hdbscan_args:
hdbscan_model = HDBSCAN(**self.hdbscan_args)
else:
hdbscan_model = HDBSCAN(
min_cluster_size=15,
metric='euclidean',
prediction_data=True
)
if self.vectorizer_args:
vectorizer_model = CountVectorizer(**self.vectorizer_args)
n_gram_range = self.vectorizer_args.get(['ngram_range'], (1,1))
else:
vectorizer_model = CountVectorizer(
ngram_range=(1, 2),
stop_words="english"
)
n_gram_range = (1, 2)
self.model = BERTopic2(
n_gram_range=n_gram_range,
nr_topics=20,
low_memory=True,
embedding_model=self.embedding_model,
umap_model=umap_model,
hdbscan_model=hdbscan_model,
vectorizer_model=vectorizer_model
)
logger.info(f"Beginning training of BERTopic with {len(docs)} documents.")
self.model = self.model.fit(docs, embeddings)
self.topic_names = list(self.model.topic_names.values())
logger.info(f"Saving fitted BERTopic model to disk.")
self.model.save(self.saved_model_path, save_embedding_model=False)
def _check_is_trained(self):
if self.model is None:
raise ValueError("The BERTopic model isn't either loaded or trained yet.")
class _Top2VecEncoder():
def __init__(
self,
retriever: TopicRetriever
):
self.saved_model_path = os.path.join(dirname, '../../artifacts/saved_models/top2vec.pkl')
self.embedding_model = retriever.embedding_model
self.umap_args = retriever.umap_args
self.hdbscan_args = retriever.hdbscan_args
self.show_progress_bar = retriever.progress_bar
self.document_store = retriever.document_store
if self.document_store.similarity != "cosine":
logger.warning(
f"You are using a Sentence Transformer with the {self.document_store.similarity} function. "
f"We recommend using cosine instead. "
f"This can be set when initializing the DocumentStore")
def embed(self, texts: Union[List[List[str]], List[str], str]) -> List[np.ndarray]:
# texts can be a list of strings or a list of [title, text]
# get back list of numpy embedding vectors
self.model._check_model_status() # Setting the embed attribute based on the embedding_model
emb = self.model.embed(texts, batch_size=200, show_progress_bar=self.show_progress_bar)
emb = [r for r in emb]
return emb
def embed_queries(self, texts: List[str]) -> List[np.ndarray]:
# Initializing the top2vec model
self.init_model()
return self.embed(texts)
def embed_passages(self, docs: List[Document]) -> List[np.ndarray]:
# Initializing the top2vec model
self.init_model(docs)
passages = [[d.meta["name"] if d.meta and "name" in d.meta else "", d.text] for d in docs] # type: ignore
embeddings = self.embed(passages)
umap_embeddings = self.model.get_umap().transform(embeddings)
topic_numbers = self.model.doc_top_reduced
topic_labels = self.create_topic_labels()
return [embeddings, umap_embeddings, topic_numbers, topic_labels]
def create_topic_labels(self):
# TODO: Give more importance to words with higher score and that are unique to a cluster.
# Get topic words
topic_words, _, _ = self.model.get_topics(20, reduced=True)
# Produce topic labels by concatenating top 5 words
topic_labels = ["_".join(words[:5]) for words in topic_words]
return topic_labels
def init_model(self, docs=None):
try:
logger.info("Loading the Top2Vec model from disk.")
self.model = Top2Vec2.load(self.saved_model_path)
# Ensure the embedding model matches
assert self.model.embedding_model == self.embedding_model, \
"The Top2Vec embedding model doesn't match the embedding model in the Retriever."
# TODO: Ensure the umap_args and hdbscan_args match as well
except Exception as e:
logger.info(f"The Top2Vec model hasn't been trained or isn't valid: {e}")
if self.document_store.get_document_count() > 1000:
self.train()
else:
if docs is None:
raise RuntimeError("There isn't enough documents in the database for training the Top2Vec model.")
else:
if len(docs) > 1000:
self.train(docs=list(map(lambda d: d.text, docs))) # training the Top2Vec model with the uploaded documents
else:
raise RuntimeError("There isn't enough documents in the database or in the upload for training the Top2Vec model.")
def train(self, docs=None):
if docs is None:
# Get all documents from Document Store
logger.info("Getting all documents from Document Store.")
docs = self.document_store.get_all_documents(return_embedding=False)
docs = list(map(lambda d: d.text, docs))
logger.info(f"Beginning training of Top2Vec with {len(docs)} internal documents.")
else:
logger.info(f"Beginning training of Top2Vec with {len(docs)} external documents.")
self.model = Top2Vec2(
docs,
embedding_model=self.embedding_model,
keep_documents=False, # we don't need to keep the documents as the search isn't performed through top2vec
workers=None,
use_embedding_model_tokenizer=True,
umap_args=self.umap_args,
hdbscan_args=self.hdbscan_args
)
self.model.hierarchical_topic_reduction(20) # reduce the number of topics
self.model.save(self.saved_model_path)
class CrossEncoderReRanker(BaseReader):
"""
A re-ranker based on a BERT Cross-Encoder. The query and a candidate result are passed
simoultaneously to the trasnformer network, which then output a single score between
0 and 1 indicating how relevant the document is for the given query. Read the article
in https://www.sbert.net/examples/applications/retrieve_rerank/README.html for further
details.
"""
def __init__(
self,
cross_encoder: str = "cross-encoder/ms-marco-TinyBERT-L-6",
top_k: int = 10
):
"""
:param cross_encoder: Local path or name of cross-encoder model in Hugging Face's model hub such as ``'cross-encoder/ms-marco-TinyBERT-L-6'``
:param top_k: The maximum number of answers to return
"""
# # save init parameters to enable export of component config as YAML
# self.set_config(
# cross_encoder=cross_encoder, use_gpu=use_gpu, top_k=top_k
# )
self.top_k = top_k
try:
from sentence_transformers import CrossEncoder
except ImportError:
raise ImportError("Can't find package `sentence-transformers` \n"
"You can install it via `pip install sentence-transformers` \n"
"For details see https://github.com/UKPLab/sentence-transformers ")
# pretrained embedding models coming from: https://github.com/UKPLab/sentence-transformers#pretrained-models
# CrossEncoder uses cuda device if available
self.cross_encoder = CrossEncoder(cross_encoder)
def predict(self, query: str, documents: List[Document], top_k: Optional[int] = None):
"""
Use the cross-encoder to find answers for a query in the supplied list of Document.
Returns dictionaries containing answers sorted by (desc.) probability.
Example:
```python
|{
| 'query': 'What is the capital of the United States?',
| 'answers':[
| {'answer': 'Washington, D.C. (also known as simply Washington or D.C.,
| and officially as the District of Columbia) is the capital of
| the United States. It is a federal district. The President of
| the USA and many major national government offices are in the
| territory. This makes it the political center of the United
| States of America.',
| 'score': 0.717,
| 'document_id': 213
| },...
| ]
|}
```
:param query: Query string
:param documents: List of Document in which to search for the answer
:param top_k: The maximum number of answers to return
:return: Dict containing query and answers
"""
if top_k is None:
top_k = self.top_k
# Score every document with the cross_encoder
cross_inp = [[query, doc.text] for doc in documents]
cross_scores = self.cross_encoder.predict(cross_inp)
answers = [
{
'answer': documents[idx].text,
'score': cross_scores[idx],
'document_id': documents[idx].id,
'meta': documents[idx].meta
}
for idx in range(len(documents))
]
# Sort answers by the cross-encoder scores and select top-k
answers = sorted(
answers, key=lambda k: k["score"], reverse=True
)
answers = answers[:top_k]
results = {"query": query,
"answers": answers}
return results
def predict_batch(self, query_doc_list: List[dict], top_k: Optional[int] = None, batch_size: Optional[int] = None):
raise NotImplementedError("Batch prediction not yet available in CrossEncoderReRanker.")
class OpenDistroElasticsearchDocumentStore2(OpenDistroElasticsearchDocumentStore):
def query_by_embedding(self,
query_emb: np.ndarray,
filters: Optional[Union[List[dict], Dict[str, List[str]]]] = None,
top_k: int = 10,
index: Optional[str] = None,
return_embedding: Optional[bool] = None) -> List[Document]:
"""
Find the document that is most similar to the provided `query_emb` by using a vector similarity metric.
:param query_emb: Embedding of the query (e.g. gathered from DPR)
:param filters: Optional filters to narrow down the search space. Follows Open Distro for
Elasticsearch syntax: https://opendistro.github.io/for-elasticsearch-docs/docs/elasticsearch/bool/. Example:
[
{
"terms": {
"author": [
"Alan Silva",
"Mark Costa",
]
}
},
{
"range": {
"timestamp": {
"gte": "01-01-2021",
"lt": "01-06-2021"
}
}
}
]
:param top_k: How many documents to return
:param index: Index name for storing the docs and metadata
:param return_embedding: To return document embedding
:return:
"""
if index is None:
index = self.index
if return_embedding is None:
return_embedding = self.return_embedding
if not self.embedding_field:
raise RuntimeError("Please specify arg `embedding_field` in ElasticsearchDocumentStore()")
else:
# +1 in similarity to avoid negative numbers (for cosine sim)
body = {
"size": top_k,
"query": {
"bool": {
"must": [
self._get_vector_similarity_query(query_emb, top_k)
]
}
}
}
if filters:
body = self._filter_adapter(body, filters)
excluded_meta_data: Optional[list] = None
if self.excluded_meta_data:
excluded_meta_data = deepcopy(self.excluded_meta_data)
if return_embedding is True and self.embedding_field in excluded_meta_data:
excluded_meta_data.remove(self.embedding_field)
elif return_embedding is False and self.embedding_field not in excluded_meta_data:
excluded_meta_data.append(self.embedding_field)
elif return_embedding is False:
excluded_meta_data = [self.embedding_field]
if excluded_meta_data:
body["_source"] = {"excludes": excluded_meta_data}
logger.debug(f"Retriever query: {body}")
result = self.client.search(index=index, body=body, request_timeout=300)["hits"]["hits"]
documents = [
self._convert_es_hit_to_document(hit, adapt_score_for_embedding=True, return_embedding=return_embedding)
for hit in result
]
return documents
def get_document_count(
self,
filters: Optional[Union[List[dict], Dict[str, List[str]]]] = None,
index: Optional[str] = None,
only_documents_without_embedding: bool = False
) -> int:
"""
Return the number of documents in the document store.
"""
index = index or self.index
body: dict = {"query": {"bool": {}}}
if only_documents_without_embedding:
body['query']['bool']['must_not'] = [{"exists": {"field": self.embedding_field}}]
if filters:
body = self._filter_adapter(body, filters)
result = self.client.count(index=index, body=body)
count = result["count"]
return count
def get_all_documents(
self,
index: Optional[str] = None,
filters: Optional[Union[List[dict], Dict[str, List[str]]]] = None,
return_embedding: Optional[bool] = None,
batch_size: int = 10_000,
) -> List[Document]:
"""
Get documents from the document store.
:param index: Name of the index to get the documents from. If None, the
DocumentStore's default index (self.index) will be used.
:param filters: Optional filters to narrow down the documents to return.
Example: {"name": ["some", "more"], "category": ["only_one"]}
:param return_embedding: Whether to return the document embeddings.
:param batch_size: When working with large number of documents, batching can help reduce memory footprint.
"""
result = self.get_all_documents_generator(
index=index, filters=filters, return_embedding=return_embedding, batch_size=batch_size
)
documents = list(result)
return documents
def get_all_documents_generator(
self,
index: Optional[str] = None,
filters: Optional[Union[List[dict], Dict[str, List[str]]]] = None,
return_embedding: Optional[bool] = None,
batch_size: int = 10_000,
) -> Generator[Document, None, None]:
"""
Get documents from the document store. Under-the-hood, documents are fetched in batches from the
document store and yielded as individual documents. This method can be used to iteratively process
a large number of documents without having to load all documents in memory.
:param index: Name of the index to get the documents from. If None, the
DocumentStore's default index (self.index) will be used.
:param filters: Optional filters to narrow down the documents to return.
Example: {"name": ["some", "more"], "category": ["only_one"]}
:param return_embedding: Whether to return the document embeddings.
:param batch_size: When working with large number of documents, batching can help reduce memory footprint.
"""
if index is None:
index = self.index
if return_embedding is None:
return_embedding = self.return_embedding
result = self._get_all_documents_in_index(index=index, filters=filters, batch_size=batch_size)
for hit in result:
document = self._convert_es_hit_to_document(hit, return_embedding=return_embedding)
yield document
def _get_all_documents_in_index(
self,
index: str,
filters: Optional[Union[List[dict], Dict[str, List[str]]]] = None,
batch_size: int = 10_000,
only_documents_without_embedding: bool = False,
) -> Generator[dict, None, None]:
"""
Return all documents in a specific index in the document store
"""
body: dict = {"query": {"bool": {}}}
if filters:
body = self._filter_adapter(body, filters)
if only_documents_without_embedding:
body['query']['bool']['must_not'] = [{"exists": {"field": self.embedding_field}}]
result = scan(self.client, query=body, index=index, size=batch_size, scroll="1d")
yield from result
def _filter_adapter(
self,
query_body: dict,
filters: Optional[Union[List[dict], Dict[str, List[str]]]] = None,
) -> dict:
# To not disrupt any of the code of Haystack we can accept both
# the old filters format or the new format. The following if-else
# clause deals with the operations for the right format.
if isinstance(filters, dict):
filter_clause = []
for key, values in filters.items():
if type(values) != list:
raise ValueError(
f'Wrong filter format for key "{key}": Please provide a list of allowed values for each key. '
'Example: {"name": ["some", "more"], "category": ["only_one"]} ')
filter_clause.append(
{
"terms": {key: values}
}
)
query_body["query"]["bool"]["filter"] = filter_clause
else:
query_body["query"]["bool"]["filter"] = filters
return query_body
def update_embeddings(
self,
retriever,
index: Optional[str] = None,
filters: Optional[Union[List[dict], Dict[str, List[str]]]] = None,
update_existing_embeddings: bool = True,
batch_size: int = 10_000
):
"""
Updates the embeddings in the the document store using the encoding model specified in the retriever.
This can be useful if want to add or change the embeddings for your documents (e.g. after changing the retriever config).
:param retriever: Retriever to use to update the embeddings.
:param index: Index name to update
:param update_existing_embeddings: Whether to update existing embeddings of the documents. If set to False,
only documents without embeddings are processed. This mode can be used for
incremental updating of embeddings, wherein, only newly indexed documents
get processed.
:param filters: Optional filters to narrow down the documents for which embeddings are to be updated.
Example: {"name": ["some", "more"], "category": ["only_one"]}
:param batch_size: When working with large number of documents, batching can help reduce memory footprint.
:return: None
"""
if index is None:
index = self.index
if self.refresh_type == 'false':
self.client.indices.refresh(index=index)
if not self.embedding_field:
raise RuntimeError("Specify the arg `embedding_field` when initializing ElasticsearchDocumentStore()")
if update_existing_embeddings:
document_count = self.get_document_count(index=index)
logger.info(f"Updating embeddings for all {document_count} docs ...")
else:
document_count = self.get_document_count(index=index, filters=filters,
only_documents_without_embedding=True)
logger.info(f"Updating embeddings for {document_count} docs without embeddings ...")
result = self._get_all_documents_in_index(
index=index,
filters=filters,
batch_size=batch_size,
only_documents_without_embedding=not update_existing_embeddings
)
logging.getLogger("elasticsearch").setLevel(logging.CRITICAL)
with tqdm(total=document_count, position=0, unit=" Docs", desc="Updating embeddings") as progress_bar:
for result_batch in get_batches_from_generator(result, batch_size):
document_batch = [self._convert_es_hit_to_document(hit, return_embedding=False) for hit in result_batch]
embeddings, umap_embeddings, topic_numbers, topic_labels = retriever.embed_passages(document_batch) # type: ignore
assert len(document_batch) == len(embeddings)
if embeddings[0].shape[0] != self.embedding_dim:
raise RuntimeError(f"Embedding dim. of model ({embeddings[0].shape[0]})"
f" doesn't match embedding dim. in DocumentStore ({self.embedding_dim})."
"Specify the arg `embedding_dim` when initializing ElasticsearchDocumentStore()")
doc_updates = []
for doc, emb, umap_emb, topn, topl in zip(document_batch, embeddings, umap_embeddings, topic_numbers, topic_labels):
update = {"_op_type": "update",
"_index": index,
"_id": doc.id,
"doc": {
self.embedding_field: emb.tolist(),
"umap_embeddings": umap_emb.tolist(),
"topic_number": topn,
"topic_label": topl
},
}
doc_updates.append(update)
bulk(self.client, doc_updates, request_timeout=300, refresh=self.refresh_type)
progress_bar.update(batch_size)
| 45.956764 | 149 | 0.603654 | 3,796 | 32,951 | 5.08509 | 0.149368 | 0.006424 | 0.003989 | 0.010517 | 0.417603 | 0.368078 | 0.345439 | 0.307154 | 0.260581 | 0.235197 | 0 | 0.006099 | 0.313314 | 32,951 | 716 | 150 | 46.02095 | 0.846997 | 0.266031 | 0 | 0.3 | 0 | 0 | 0.125393 | 0.021044 | 0 | 0 | 0 | 0.001397 | 0.008889 | 1 | 0.068889 | false | 0.022222 | 0.046667 | 0.002222 | 0.166667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7f811161ced4a1cebb10b296dd174d5c70763adb | 886 | py | Python | commands.py | rsmelo/speech_test | ad5fc5605c453f6db33c1a725edae03672951eb1 | [
"MIT"
] | null | null | null | commands.py | rsmelo/speech_test | ad5fc5605c453f6db33c1a725edae03672951eb1 | [
"MIT"
] | null | null | null | commands.py | rsmelo/speech_test | ad5fc5605c453f6db33c1a725edae03672951eb1 | [
"MIT"
] | null | null | null | import subprocess
import os
from get_answer import Fetcher
class Commander:
def __init__(self):
self.confirm = [
"yes", "affimartive", "si", "sure", "do it", "yeah", "confirm"
]
self.cancel = [
"no", "negative", "negative soldier", "don't", "wait", "cancel"
]
def discover(self, text):
if "what" in text and "your name" in text:
if "my" in text:
self.respond("You haven't told me you name yet")
else:
self.respond("My name is python commander. How are you?")
else:
fetcher = Fetcher("https://www.google.com.br/search?q=" + text)
answer = fetcher.lookup()
self.respond(answer)
def respond(self, response):
print(response)
subprocess.call('tts.exe -f 10 -v 1 "' + response + '"', shell=True)
| 30.551724 | 76 | 0.538375 | 106 | 886 | 4.45283 | 0.622642 | 0.038136 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005017 | 0.325056 | 886 | 28 | 77 | 31.642857 | 0.784281 | 0 | 0 | 0.083333 | 0 | 0 | 0.249436 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.125 | 0 | 0.291667 | 0.041667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7f8164659ef1ccd20d9bceb9a6bfd362f063f6e1 | 2,073 | py | Python | day-3/part1.py | bwheel/AdventOfCode2018 | efd5c59c67453e5388a9d38022890affcb3265cf | [
"MIT"
] | null | null | null | day-3/part1.py | bwheel/AdventOfCode2018 | efd5c59c67453e5388a9d38022890affcb3265cf | [
"MIT"
] | null | null | null | day-3/part1.py | bwheel/AdventOfCode2018 | efd5c59c67453e5388a9d38022890affcb3265cf | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import re
regex = "^(#(?P<ClaimId>[0-9]{0,}) @ (?P<FromLeftEdge>[0-9]{0,}),(?P<FromTopEdge>[0-9]{0,}): (?P<Width>[0-9]{0,4})x(?P<Height>[0-9]{0,4}).*)$"
class Claim(object):
def __init__(self, line):
match = re.search(regex, line)
self.ClaimId = match.group('ClaimId')
self.FromLeftEdge = int(match.group('FromLeftEdge'))
self.FromTopEdge = int(match.group('FromTopEdge'))
self.Width = int(match.group('Width'))
self.Height = int(match.group('Height'))
def __str__(self):
return "ClaimId: " + str(self.ClaimId) + "\n\tFromLeftEdge: " + str(self.FromLeftEdge) + "\tFromTopEdge: " + str(self.FromTopEdge) + "\tWidth: " + str(self.Width) + "\tHeight: " + str(self.Height)
def updateCloth(self, cloth):
for x in range(self.FromLeftEdge, self.FromLeftEdge + self.Width):
for y in range(self.FromTopEdge, self.FromTopEdge + self.Height):
cloth[x][y] = cloth[x][y] + 1
def main():
claims = []
maxWidth = 0
maxHeight = 0
# read in all the claims, and find the max/min's for the heigh and width.
with open("input.txt") as f:
for line in f:
claim = Claim(line)
claims.append(claim)
compareWidth = claim.FromLeftEdge + claim.Width
compareHeight = claim.FromTopEdge + claim.Height
maxWidth = compareWidth if compareWidth > maxWidth else maxWidth
maxHeight = compareHeight if compareHeight > maxHeight else maxHeight
# build up the cloth
cloth = [0] * maxWidth
for x in range(maxWidth):
cloth[x] = [0] * maxHeight
# update the cloth with all the claims
for claim in claims:
claim.updateCloth(cloth)
# find the overlapping claims
overlapCount = 0
for x in range(len(cloth)):
for y in range(len(cloth[x])):
if cloth[x][y] > 1:
overlapCount = overlapCount + 1
print("Overlap Count: " + str(overlapCount))
if __name__ == "__main__":
main() | 36.368421 | 205 | 0.593343 | 261 | 2,073 | 4.651341 | 0.291188 | 0.034596 | 0.012356 | 0.009885 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.016351 | 0.262422 | 2,073 | 57 | 206 | 36.368421 | 0.777632 | 0.085384 | 0 | 0 | 0 | 0.02439 | 0.140592 | 0.067653 | 0 | 0 | 0 | 0 | 0 | 1 | 0.097561 | false | 0 | 0.02439 | 0.02439 | 0.170732 | 0.02439 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7f837f1cd2f19a02fd36eec3fc46b1adb29677fc | 3,962 | py | Python | utils.py | vitalProjects/curse_project | 5768d7413db86b1d1054f0ff1102acfbea773fc7 | [
"Apache-2.0"
] | 1 | 2021-03-14T21:38:41.000Z | 2021-03-14T21:38:41.000Z | utils.py | vitalfect/columnsAgent | 5768d7413db86b1d1054f0ff1102acfbea773fc7 | [
"Apache-2.0"
] | null | null | null | utils.py | vitalfect/columnsAgent | 5768d7413db86b1d1054f0ff1102acfbea773fc7 | [
"Apache-2.0"
] | null | null | null | from math import log
import pickle
import numpy
from numpy import array
from math import log10
# beam_search
def beam_search(data: numpy.array, k: int) -> numpy.array:
sequences = [[list(), 0.0]]
for row in data:
all_candidates = list()
for i in range(len(sequences)):
seq, score = sequences[i]
for j in range(len(row)):
candidate = [seq + [j], score + log(-row[j])]
all_candidates.append(candidate)
ordered = sorted(all_candidates, key=lambda tup: tup[1])
sequences = ordered[:k]
return array(sequences, dtype=object)
def main_beam_search() -> None:
best_decision = 3
data = [[-0.1, -0.2, -0.3, -0.4, -0.5],
[-0.5, -0.4, -0.3, -0.2, -0.1],
[-0.1, -0.2, -0.3, -0.4, -0.5],
[-0.5, -0.4, -0.3, -0.2, -0.1],
[-0.1, -0.2, -0.3, -0.4, -0.5],
[-0.5, -0.4, -0.3, -0.2, -0.1],
[-0.1, -0.2, -0.3, -0.4, -0.5],
[-0.5, -0.4, -0.3, -0.2, -0.1],
[-0.1, -0.2, -0.3, -0.4, -0.5],
[-0.5, -0.4, -0.3, -0.2, -0.1]]
data = array(data, dtype=object)
result = beam_search(data, best_decision)
print(f"{best_decision} best decision is:\n")
for seq in result:
print(seq)
# markov chain
def generate_freq_table(data: str, k: int) -> dict:
table = {}
for i in range(len(data) - k):
x = data[i:i + k]
y = data[i + k]
if table.get(x) is None:
table[x] = {}
table[x][y] = 1
else:
if table[x].get(y) is None:
table[x][y] = 1
else:
table[x][y] += 1
return table
def freq_into_prob(table: dict) -> dict:
for symbs in table.keys():
s = float(sum(table[symbs].values()))
for freq in table[symbs].keys():
table[symbs][freq] = table[symbs][freq] / s
return table
def markov_chain(train_filename: str, k: int) -> dict:
with open(train_filename, mode="r") as file:
data = file.read()
freq_table = generate_freq_table(data, k)
prob_table = freq_into_prob(freq_table)
return prob_table
def save_chain(filename, model) -> None:
with open(filename, mode="wb") as file:
pickle.dump(model, file)
def load_chain(filename: str) -> dict:
with open(filename, mode="rb") as file:
model = pickle.load(file)
return model
def sample_next(text: str, model: dict, k: int) -> dict:
text = text[-k:]
prob = dict(zip(list(model[text].keys()), list(model[text].values())))
prob = dict(sorted(prob.items(), key=lambda item: item[1]))
return prob
def main_markov_chain() -> None:
text = "chec"
model = load_chain("./data/statistic/model_3.pkl")
res = sample_next(text, model, 3)
print(f"Possible continuation for {text} is: {res}")
# n-gram statistic
class NgramScore(object):
def __init__(self, ngramfile: str, sep=' '):
self.ngrams = {}
key = None
for line in open(ngramfile, 'r'):
key, count = line.split(sep)
self.ngrams[key] = int(count)
self.L = len(key)
self.N = sum(self.ngrams.values())
for key in self.ngrams.keys():
self.ngrams[key] = log10(float(self.ngrams[key]) / self.N)
self.floor = log10(0.01 / self.N)
def score(self, text: str) -> float:
score = 0
ngrams = self.ngrams.__getitem__
for i in range(len(text) - self.L + 1):
if text[i:i + self.L] in self.ngrams:
score += ngrams(text[i:i + self.L])
else:
score += self.floor
return score
def main_ngram() -> None:
text = "chec"
loader = NgramScore("./data/statistic/english_4grams.txt")
value = loader.score(text)
print(f"For 'chec' value = {value}")
if __name__ == "__main__":
main_beam_search()
main_markov_chain()
main_ngram()
| 28.919708 | 74 | 0.542655 | 589 | 3,962 | 3.55348 | 0.205433 | 0.009556 | 0.014333 | 0.009556 | 0.089823 | 0.047778 | 0.047778 | 0.047778 | 0.047778 | 0.047778 | 0 | 0.043447 | 0.291267 | 3,962 | 136 | 75 | 29.132353 | 0.701923 | 0.010348 | 0 | 0.158879 | 0 | 0 | 0.048251 | 0.016084 | 0 | 0 | 0 | 0 | 0 | 1 | 0.11215 | false | 0 | 0.046729 | 0 | 0.233645 | 0.037383 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7f85a86c36b65042bb52fa6bb39cd317aa2eeda3 | 488 | py | Python | 02-FaceDetect.py | amandureja/Important-Content | d222bbd65f3cf7b40a6766905baa160576b6d295 | [
"Apache-2.0"
] | null | null | null | 02-FaceDetect.py | amandureja/Important-Content | d222bbd65f3cf7b40a6766905baa160576b6d295 | [
"Apache-2.0"
] | null | null | null | 02-FaceDetect.py | amandureja/Important-Content | d222bbd65f3cf7b40a6766905baa160576b6d295 | [
"Apache-2.0"
] | null | null | null | import cv2
face_data = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
cap = cv2.VideoCapture(0)
while True:
ret, img = cap.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_data.detectMultiScale(gray, 1.3, 5)
for x,y,w,h in faces:
cv2.rectangle(img, (x,y), (x+w, y+h), (0,0,255), 5)
cv2.imshow('img',img)
k = cv2.waitKey(30) & 0xFF
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
| 22.181818 | 73 | 0.610656 | 72 | 488 | 4.069444 | 0.611111 | 0.054608 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.067385 | 0.239754 | 488 | 21 | 74 | 23.238095 | 0.722372 | 0 | 0 | 0 | 0 | 0 | 0.08137 | 0.074946 | 0 | 0 | 0.008565 | 0 | 0 | 1 | 0 | false | 0 | 0.066667 | 0 | 0.066667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7f861c4a85065cb19ee05620bdec96aa88122e49 | 12,446 | py | Python | src/models/train_BDRRAA.py | ChristianDjurhuus/RAA | b2eb1db527bcb09f35598c2bbf8dff2689ad599b | [
"MIT"
] | 1 | 2022-03-16T16:09:22.000Z | 2022-03-16T16:09:22.000Z | src/models/train_BDRRAA.py | ChristianDjurhuus/RAA | b2eb1db527bcb09f35598c2bbf8dff2689ad599b | [
"MIT"
] | null | null | null | src/models/train_BDRRAA.py | ChristianDjurhuus/RAA | b2eb1db527bcb09f35598c2bbf8dff2689ad599b | [
"MIT"
] | 1 | 2022-02-18T17:10:27.000Z | 2022-02-18T17:10:27.000Z | import torch
import torch.nn as nn
import matplotlib.pyplot as plt
import torch.nn.functional as F
from sklearn import metrics
import seaborn as sns
from torch_sparse import spspmm
import numpy as np
from src.visualization.visualize import Visualization
from src.features.link_prediction import Link_prediction
class BDRRAA(nn.Module, Link_prediction, Visualization):
def __init__(self, k, d, sample_size, data, data_type = "sparse", data2 = None, non_sparse_i = None, non_sparse_j = None, sparse_i_rem = None, sparse_j_rem = None):
super(BDRRAA, self).__init__()
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.data_type = data_type
self.sample_size = sample_size
self.k = k
self.d = d
self.sparse_i_idx = data.to(self.device)
self.sparse_j_idx = data2.to(self.device)
self.non_sparse_i_idx_removed = non_sparse_i.to(self.device)
self.non_sparse_j_idx_removed = non_sparse_j.to(self.device)
self.sparse_i_idx_removed = sparse_i_rem.to(self.device)
self.sparse_j_idx_removed = sparse_j_rem.to(self.device)
self.removed_i = torch.cat((self.non_sparse_i_idx_removed, self.sparse_i_idx_removed))
self.removed_j = torch.cat((self.non_sparse_j_idx_removed, self.sparse_j_idx_removed))
self.sample_shape = (len(self.sparse_i_idx), len(self.sparse_j_idx))
self.sampling_i_weights = torch.ones(self.sample_shape[0], device = self.device)
self.sampling_j_weights = torch.ones(self.sample_shape[1], device = self.device)
self.sample_i_size = int(self.sample_shape[0] * self.sample_size)
self.sample_j_size = int(self.sample_shape[1] * self.sample_size)
self.beta = torch.nn.Parameter(torch.randn(self.sample_shape[0], device = self.device))
self.gamma = torch.nn.Parameter(torch.randn(self.sample_shape[1], device = self.device))
self.softplus = nn.Softplus()
self.A = torch.nn.Parameter(torch.randn(self.d, self.k, device = self.device))
self.Z_i = torch.nn.Parameter(torch.randn(self.k, self.sample_shape[0], device = self.device))
self.Z_j = torch.nn.Parameter(torch.randn(self.k, self.sample_shape[1], device = self.device))
self.Gate = torch.nn.Parameter(torch.randn(self.sample_shape[0] + self.sample_shape[1], self.k, device = self.device))
self.losses = []
self.N = self.sample_shape[0] + self.sample_shape[1]
Link_prediction.__init__(self)
Visualization.__init__(self)
def sample_network(self):
# USE torch_sparse lib i.e. : from torch_sparse import spspmm
# sample for undirected network
sample_i_idx = torch.multinomial(self.sampling_i_weights, self.sample_i_size, replacement=False)
sample_j_idx = torch.multinomial(self.sampling_j_weights, self.sample_j_size, replacement=False)
# translate sampled indices w.r.t. to the full matrix, it is just a diagonal matrix
indices_i_translator = torch.cat([sample_i_idx.unsqueeze(0), sample_i_idx.unsqueeze(0)], 0)
indices_j_translator = torch.cat([sample_j_idx.unsqueeze(0), sample_j_idx.unsqueeze(0)], 0)
# adjacency matrix in edges format
edges = torch.cat([self.sparse_i_idx.unsqueeze(0), self.sparse_j_idx.unsqueeze(0)], 0)
# matrix multiplication B = Adjacency x Indices translator
# see spspmm function, it give a multiplication between two matrices
# indexC is the indices where we have non-zero values and valueC the actual values (in this case ones)
indexC, valueC = spspmm(edges, torch.ones(edges.shape[1]), indices_j_translator,
torch.ones(indices_j_translator.shape[1]), self.sample_shape[0], self.sample_shape[1],
self.sample_shape[1], coalesced=True)
# second matrix multiplication C = Indices translator x B, indexC returns where we have edges inside the sample
indexC, valueC = spspmm(indices_i_translator, torch.ones(indices_i_translator.shape[1]), indexC, valueC,
self.sample_shape[0], self.sample_shape[0], self.sample_shape[1], coalesced=True)
# edge row position
sparse_i_sample = indexC[0, :]
# edge column position
sparse_j_sample = indexC[1, :]
return sample_i_idx, sample_j_idx, sparse_i_sample, sparse_j_sample
def log_likelihood(self):
sample_i_idx, sample_j_idx, sparse_sample_i, sparse_sample_j = self.sample_network()
Z_i = F.softmax(self.Z_i, dim=0) # (K x N)
Z_j = F.softmax(self.Z_j, dim=0)
Z = torch.cat((Z_i[:,sample_i_idx], Z_j[:,sample_j_idx]),1) #Concatenate partition embeddings
Gate = torch.cat((self.Gate[sample_i_idx,:], self.Gate[sample_j_idx,:]), 0)
Gate = torch.sigmoid(Gate) # Sigmoid activation function
C = (Z.T * Gate) / (Z.T * Gate).sum(0) # Gating function
# For the nodes without links
bias_matrix = self.beta[sample_i_idx].unsqueeze(1) + self.gamma[sample_j_idx] # (N x N)
AZC = torch.mm(self.A, torch.mm(Z, C))
mat = (torch.exp(bias_matrix -
((torch.mm(AZC,Z_i[:,sample_i_idx]).T.unsqueeze(1) -
torch.mm(AZC,Z_j[:,sample_j_idx]).T + 1e-06) ** 2).sum(-1) ** 0.5)).sum()
mat_links = ((self.beta[sparse_sample_i] + self.gamma[sparse_sample_j]) -
(((AZC @ Z_i[:,sparse_sample_i]).T -
(AZC @ Z_j[:,sparse_sample_j]).T + 1e-06) ** 2).sum(-1) ** 0.5).sum()
log_likelihood_sparse = mat_links - mat
return log_likelihood_sparse
def train(self, iterations, LR = 0.1, print_loss = False):
optimizer = torch.optim.Adam(params = self.parameters(), lr=LR)
for _ in range(iterations):
loss = - self.log_likelihood() / self.N
optimizer.zero_grad()
loss.backward()
optimizer.step()
self.losses.append(loss.item())
if print_loss:
print('Loss at the',_,'iteration:',loss.item())
if __name__ == "__main__":
seed = 42
torch.random.manual_seed(seed)
k = 3
d = 2
# Data
dataset = "drug-gene"
data = torch.from_numpy(np.loadtxt("../data/train_masks/" + dataset + "/sparse_i.txt")).long()
data2 = torch.from_numpy(np.loadtxt("../data/train_masks/" + dataset + "/sparse_j.txt")).long()
sparse_i_rem = torch.from_numpy(np.loadtxt("../data/train_masks/" + dataset + "/sparse_i_rem.txt")).long()
sparse_j_rem = torch.from_numpy(np.loadtxt("../data/train_masks/" + dataset + "/sparse_j_rem.txt")).long()
non_sparse_i = torch.from_numpy(np.loadtxt("../data/train_masks/" + dataset + "/non_sparse_i.txt")).long()
non_sparse_j = torch.from_numpy(np.loadtxt("../data/train_masks/" + dataset + "/non_sparse_j.txt")).long()
model = BDRRAA(k = k, d = d, sample_size = 0.2, data = data, data2 = data2, non_sparse_i=non_sparse_i, non_sparse_j=non_sparse_j, sparse_i_rem=sparse_i_rem, sparse_j_rem=sparse_j_rem)
iterations = 10000
model.train(interations = iterations, print_loss = True)
# Plotting latent space
Z_i = F.softmax(model.Z_i, dim=0)
Z_j = F.softmax(model.Z_j, dim=0)
Z = torch.cat((Z_i,Z_j),1)
G = torch.sigmoid(model.Gate)
C = (Z.T * G) / (Z.T * G).sum(0)
embeddings = torch.matmul(model.A, torch.matmul(torch.matmul(Z, C), Z)).T
archetypes = torch.matmul(model.A, torch.matmul(Z, C))
fig, ([ax1, ax2]) = plt.subplots(nrows=1, ncols=2)
sns.heatmap(Z.detach().numpy(), cmap="YlGnBu", cbar=False, ax=ax1)
sns.heatmap(C.T.detach().numpy(), cmap="YlGnBu", cbar=False, ax=ax2)
#sns.heatmap(Z_j.detach().numpy(), cmap="YlGnBu", cbar=False, ax=ax3)
#sns.heatmap(C_j.T.detach().numpy(), cmap="YlGnBu", cbar=False, ax=ax4)
if embeddings.shape[1] == 3:
fig = plt.figure()
ax = fig.add_subplot(projection='3d')
ax.scatter(embeddings[:, 0].detach().numpy(), embeddings[:, 1].detach().numpy(),
embeddings[:, 2].detach().numpy(), c='red')
ax.scatter(archetypes[0, :].detach().numpy(), archetypes[1, :].detach().numpy(),
archetypes[2, :].detach().numpy(), marker='^', c='black')
'''ax.scatter(embeddings_j[:, 0].detach().numpy(), embeddings_j[:, 1].detach().numpy(),
embeddings_j[:, 2].detach().numpy(), c='blue')
ax.scatter(archetypes_j[0, :].detach().numpy(), archetypes_j[1, :].detach().numpy(),
archetypes_j[2, :].detach().numpy(), marker='^', c='purple')'''
ax.set_title(f"Latent space after {iterations} iterations")
else:
fig, (ax1, ax2) = plt.subplots(1, 2)
ax1.scatter(embeddings[model.input_size[0]:, 0].detach().numpy(), embeddings[model.input_size[0]:, 1].detach().numpy(), c='red')
ax1.scatter(embeddings[:model.input_size[0], 0].detach().numpy(), embeddings[:model.input_size[0], 1].detach().numpy(), c='blue')
ax1.scatter(archetypes[0, :].detach().numpy(), archetypes[1, :].detach().numpy(), marker='^', c='black')
#ax1.scatter(embeddings_j[:, 0].detach().numpy(), embeddings_j[:, 1].detach().numpy(), c='blue')
#ax1.scatter(archetypes_j[0, :].detach().numpy(), archetypes_j[1, :].detach().numpy(), marker='^', c='purple')
ax1.set_title(f"Latent space after {iterations} iterations")
# Plotting learning curve
ax2.plot(model.losses)
ax2.set_yscale("log")
ax2.set_title("Loss")
plt.show()
model.plot_auc()
"""
def link_prediction(self):
with torch.no_grad():
Z_i = F.softmax(self.Z_i, dim=0) # (K x N)
Z_j = F.softmax(self.Z_j, dim=0)
Z = torch.cat((Z_i, Z_j),1) #Concatenate partition embeddings
#Z = F.softmax(Z, dim=0)
G = F.sigmoid(self.G)
C = (Z.T * G) / (Z.T * G).sum(0) # Gating function
M_i = torch.matmul(self.A, torch.matmul(torch.matmul(Z, C), Z[:, self.removed_i])).T # Size of test set e.g. K x N
M_j = torch.matmul(self.A, torch.matmul(torch.matmul(Z, C), Z[:, self.removed_j])).T
z_pdist_test = ((M_i - M_j + 1e-06) ** 2).sum(-1) ** 0.5 # N x N
theta = (self.beta[self.removed_i] + self.gamma[self.removed_j] - z_pdist_test) # N x N
# Get the rate -> exp(log_odds)
rate = torch.exp(theta) # N
# TODO Skal lige ha tjekket om det er den rigtige rækkefølge.
target = torch.cat((torch.zeros(self.non_sparse_i_idx_removed.shape[0]), torch.ones(self.sparse_i_idx_removed.shape[0])))
fpr, tpr, threshold = metrics.roc_curve(target, rate.numpy())
# Determining AUC score and precision and recall
auc_score = metrics.roc_auc_score(target, rate.cpu().data.numpy())
return auc_score, fpr, tpr
G = mmread('data/toy_data/divorce/divorce.mtx')
edge_list = torch.tensor([G.row,G.col]).T
edge_list = edge_list.long()
seed = 42
torch.random.manual_seed(seed)
# A = mmread("data/raw/soc-karate.mtx")
# A = A.todense()
k = 3
d = 2
link_pred = True
if link_pred:
num_samples = round(0.2 * ((50 * 9)))
idx_i_test = torch.multinomial(input=torch.arange(0, float(50)), num_samples=num_samples,
replacement=True)
idx_j_test = torch.multinomial(input=torch.arange(0, float(9)), num_samples=num_samples, replacement=True)
test = torch.stack((idx_i_test, idx_j_test))
# TODO: could be a killer.. maybe do it once and save adjacency list ;)
def if_edge(a, edge_list):
a = a.tolist()
edge_list = edge_list.tolist()
a = list(zip(a[0], a[1]))
edge_list = list(zip(edge_list[0], edge_list[1]))
return [a[i] in edge_list for i in range(len(a))]
target = [] #if_edge(test, edge_list)
G = G.todense()
for i in range(len(idx_i_test)):
if G[idx_i_test[i], idx_j_test[i]] == 1:
G[idx_i_test[i], idx_j_test[i]] = 0
target.append(True)
else:
target.append(False)
G = scipy.sparse.coo_matrix(G)
edge_list = torch.tensor([G.row,G.col]).T
edge_list = edge_list.long()
""" | 48.807843 | 187 | 0.625422 | 1,836 | 12,446 | 4.030501 | 0.158497 | 0.037838 | 0.038514 | 0.019459 | 0.448514 | 0.36973 | 0.304189 | 0.269459 | 0.196351 | 0.151486 | 0 | 0.01661 | 0.226016 | 12,446 | 255 | 188 | 48.807843 | 0.751583 | 0.087659 | 0 | 0 | 0 | 0 | 0.048202 | 0 | 0 | 0 | 0 | 0.007843 | 0 | 1 | 0.03125 | false | 0 | 0.078125 | 0 | 0.132813 | 0.03125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7f8755703317e0969ef5e60e9a3552a3020e20c1 | 1,263 | py | Python | Frame/GameView.py | FCWYzzr/py-entertain | ddc5b5df9962a4b51d15bff805961f0ed151fe23 | [
"MIT"
] | null | null | null | Frame/GameView.py | FCWYzzr/py-entertain | ddc5b5df9962a4b51d15bff805961f0ed151fe23 | [
"MIT"
] | null | null | null | Frame/GameView.py | FCWYzzr/py-entertain | ddc5b5df9962a4b51d15bff805961f0ed151fe23 | [
"MIT"
] | null | null | null | from pygame.display import set_mode, update, set_caption
from pygame.time import Clock
from pygame import init, quit, QUIT
from pygame.event import get as events
from GameStage import GameStages
from sys import exit as end
class window:
def __init__(self):
init()
Name = "Title"
mode = (600, 400)
self.screen = set_mode(mode)
set_caption(Name)
self.currentStage = GameStages['test'] # some stage
self.currentStage.Enter(mode)
def Main(self):
clock = Clock()
callback: str or None= None
while True:
for eve in events():
if eve.type == QUIT:
self.currentStage.Exit()
quit()
end(0)
else:
print('not quit', end="\r")
callback = self.currentStage.Control(eve)
if callback:
self.callbackControl(callback)
callback = None
self.currentStage.Update()
self.currentStage.Rend(self.screen)
update()
clock.tick(10)
def callbackControl(self, callback):
pass
| 27.456522 | 62 | 0.511481 | 127 | 1,263 | 5.023622 | 0.456693 | 0.15047 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.012081 | 0.410135 | 1,263 | 45 | 63 | 28.066667 | 0.844295 | 0.007918 | 0 | 0 | 0 | 0 | 0.015755 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0.027778 | 0.166667 | 0 | 0.277778 | 0.027778 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7f87609b319d11eb19d09b2679873ad7399e930b | 1,268 | py | Python | test/test_sources.py | PSD79/avaland | 142547e48b1728db6efe8a6b9f02af18a1b42bc5 | [
"MIT"
] | 27 | 2020-05-12T22:02:57.000Z | 2021-07-27T10:53:24.000Z | test/test_sources.py | PSD79/avaland | 142547e48b1728db6efe8a6b9f02af18a1b42bc5 | [
"MIT"
] | null | null | null | test/test_sources.py | PSD79/avaland | 142547e48b1728db6efe8a6b9f02af18a1b42bc5 | [
"MIT"
] | 2 | 2020-05-13T18:40:03.000Z | 2020-05-14T15:01:07.000Z | import inspect
import unittest
from avaland import sources
from avaland import MusicBase
from avaland.search import SearchResult
def test_search(self):
query = self.source.search.test
source = self.source({}).search(query)
self.assertIsInstance(source, SearchResult)
def test_artist_id(self):
artist = self.source.get_artist.test
source = self.source({}).get_artist(artist)
self.assertIsInstance(source, SearchResult)
def test_album_id(self):
album = self.source.get_album.test
source = self.source({}).get_album(album)
self.assertIsInstance(source, SearchResult)
def test_download_url(self):
music_id = self.source.get_download_url.test
data = self.source({}).get_download_url(music_id)
self.assertEqual(len(data), 3)
def _create_class(name, obj):
_class = type(name + "Test", (unittest.TestCase,), {
"source": obj,
"test_search": test_search,
"test_artist": test_artist_id,
"test_album": test_album_id,
"test_download": test_download_url
})
return _class
class_members = inspect.getmembers(sources, inspect.isclass)
for i in class_members:
if issubclass(i[1], MusicBase) and i[1] != MusicBase:
globals()[i[0]] = _create_class(i[0], i[1])
| 24.862745 | 60 | 0.702681 | 167 | 1,268 | 5.11976 | 0.269461 | 0.093567 | 0.091228 | 0.070175 | 0.267836 | 0.157895 | 0 | 0 | 0 | 0 | 0 | 0.005758 | 0.178233 | 1,268 | 50 | 61 | 25.36 | 0.814779 | 0 | 0 | 0.088235 | 0 | 0 | 0.043444 | 0 | 0 | 0 | 0 | 0 | 0.117647 | 1 | 0.147059 | false | 0 | 0.147059 | 0 | 0.323529 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7f88a5b5649c62584bb4e5058103dfe3ee699bde | 382 | py | Python | db.py | dcronqvist/restberry-api | 35a2698ae946fc392e5e7d56dbc22b0719d6f5b6 | [
"MIT"
] | 1 | 2020-09-18T23:17:27.000Z | 2020-09-18T23:17:27.000Z | db.py | dcronqvist/restberry-api | 35a2698ae946fc392e5e7d56dbc22b0719d6f5b6 | [
"MIT"
] | 7 | 2020-09-29T14:21:24.000Z | 2021-06-15T22:04:47.000Z | db.py | dcronqvist/restberry-api | 35a2698ae946fc392e5e7d56dbc22b0719d6f5b6 | [
"MIT"
] | null | null | null | from pymongo import MongoClient
import config as config
db_conn = MongoClient(config.get_setting("mongo-db-conn", "null"))
db = db_conn.restberry_api
coll_trans = db.transactions
coll_accounts = db.accounts
def stringify_ids(docs):
"""
Expects docs to be a list of documents, not a cursor.
"""
for doc in docs:
doc["_id"] = str(doc["_id"])
return docs | 23.875 | 66 | 0.693717 | 57 | 382 | 4.491228 | 0.649123 | 0.070313 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.196335 | 382 | 16 | 67 | 23.875 | 0.833876 | 0.138743 | 0 | 0 | 0 | 0 | 0.073248 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.2 | 0 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7f89d6f19aa8fe7e4a2b47ac44b71ea6a1395fce | 19,122 | py | Python | garnett/shapes.py | glotzerlab/garne | f9cb7bad391299e28feb4010eb77447fdc4512cb | [
"BSD-3-Clause"
] | 4 | 2019-07-30T00:12:44.000Z | 2020-03-03T19:58:34.000Z | garnett/shapes.py | glotzerlab/garne | f9cb7bad391299e28feb4010eb77447fdc4512cb | [
"BSD-3-Clause"
] | 62 | 2019-07-29T20:05:46.000Z | 2022-02-16T15:22:01.000Z | garnett/shapes.py | glotzerlab/garne | f9cb7bad391299e28feb4010eb77447fdc4512cb | [
"BSD-3-Clause"
] | 2 | 2020-03-03T19:59:09.000Z | 2021-03-22T14:48:56.000Z | # Copyright (c) 2020 The Regents of the University of Michigan
# All rights reserved.
# This software is licensed under the BSD 3-Clause License.
"""Abstract shape definitions used to read/write particle shapes."""
import json
import logging
import numpy as np
__all__ = [
'FallbackShape',
'Shape',
'SphereShape',
'ArrowShape',
'SphereUnionShape',
'PolygonShape',
'SpheropolygonShape',
'ConvexPolyhedronShape',
'ConvexPolyhedronUnionShape',
'ConvexSpheropolyhedronShape',
'GeneralPolyhedronShape',
'EllipsoidShape',
]
logger = logging.getLogger(__name__)
SHAPE_DEFAULT_COLOR = '005984FF'
class _NumpyEncoder(json.JSONEncoder):
"""JSONEncoder class converting NumPy arrays to lists."""
def default(self, obj):
if isinstance(obj, np.number):
return obj.item()
elif isinstance(obj, np.ndarray):
return obj.tolist()
return json.JSONEncoder.default(self, obj)
def _json_sanitize(func):
"""Decorator ensuring that returned data is JSON-encodable."""
def wrapper(*args, **kwargs):
data = func(*args, **kwargs)
return json.loads(json.dumps(data, cls=_NumpyEncoder))
# Ensure that the decorated function inherits the intended docstring.
wrapper.__doc__ = func.__doc__
return wrapper
class FallbackShape(str):
"""This shape definition class is used when no specialized Shape class can be applied.
The fallback shape definition is a string containing the definition."""
pass
class Shape(object):
"""Parent class of all shape objects.
:param shape_class:
Shape class directive, used for POS format (default: :code:`None`).
:type shape_class:
str
:param color:
Hexadecimal color string in format :code:`RRGGBBAA` (default: :code:`None`).
:type color:
str
"""
def __init__(self, shape_class=None, color=None):
self.shape_class = shape_class
self.color = color if color else SHAPE_DEFAULT_COLOR
def __getitem__(self, key):
try:
return getattr(self, key)
except AttributeError as e:
raise KeyError(*e.args)
@property
def pos_string(self):
return "{} {}".format(self.shape_class, self.color)
@property
@_json_sanitize
def type_shape(self):
return {"type": self.shape_class}
def __str__(self):
return json.dumps(self.type_shape)
def __repr__(self):
return str(self)
def __eq__(self, other):
return self.type_shape == other.type_shape
class SphereShape(Shape):
"""Shape class for spheres of a specified diameter.
:param diameter:
Diameter of the sphere.
:type diameter:
float
:param orientable:
Set to True for spheres with orientation (default: :code:`False`).
:type orientable:
bool
:param color:
Hexadecimal color string in format :code:`RRGGBBAA` (default: :code:`None`).
:type color:
str
"""
def __init__(self, diameter, orientable=False, color=None):
super(SphereShape, self).__init__(
shape_class='sphere', color=color)
self.diameter = diameter
self.orientable = orientable
@property
def pos_string(self):
return "{} {} {}".format(self.shape_class, self.diameter, self.color)
@property
@_json_sanitize
def type_shape(self):
"""Shape as dictionary. Example:
>>> SphereShape(2.0).type_shape
{'type': 'Sphere', 'diameter': 2.0}
"""
return {'type': 'Sphere',
'diameter': self.diameter}
class ArrowShape(Shape):
"""Shape class for arrows of a specified thickness.
:param thickness:
Thickness of the arrow.
:type thickness:
float
:param color:
Hexadecimal color string in format :code:`RRGGBBAA` (default: :code:`None`).
:type color:
str
"""
def __init__(self, thickness=0.1, color=None):
super(ArrowShape, self).__init__(
shape_class='arrow', color=color)
self.thickness = thickness
@property
def pos_string(self):
return "{} {} {}".format(self.shape_class, self.thickness, self.color)
class SphereUnionShape(Shape):
"""Shape class for sphere unions, such as rigid bodies of many spheres.
:param diameters:
List of sphere diameters.
:type diameters:
list
:param centers:
List of 3D center vectors.
:type centers:
list
:param colors:
List of hexadecimal color strings in format :code:`RRGGBBAA` (default: :code:`None`).
:type colors:
list
"""
def __init__(self, diameters, centers, colors=None):
super(SphereUnionShape, self).__init__(
shape_class='sphere_union', color='')
self.diameters = diameters
self.centers = centers
self.colors = colors
@property
def pos_string(self):
shape_def = '{} {} '.format(self.shape_class, len(self.centers))
for d, p, c in zip(self.diameters, self.centers, self.colors):
shape_def += '{0} '.format(d)
shape_def += '{0} {1} {2} '.format(*p)
shape_def += '{0} '.format(c)
return shape_def
@property
@_json_sanitize
def type_shape(self):
"""Shape as dictionary. Example:
>>> SphereUnionShape([0.5, 0.5, 0.5], [[0, 0, 1.0], [0, 1.0, 0], [1.0, 0, 0]]).type_shape
{'type': 'SphereUnion', 'diameters': [0.5, 0.5, 0.5],
'centers': [[0, 0, 1.0], [0, 1.0, 0], [1.0, 0, 0]]}
"""
return {'type': 'SphereUnion',
'diameters': self.diameters,
'centers': self.centers}
class PolygonShape(Shape):
"""Shape class for polygons in a 2D plane.
:param vertices:
List of 2D vertex vectors.
:type vertices:
list
:param color:
Hexadecimal color string in format :code:`RRGGBBAA` (default: :code:`None`).
:type color:
str
"""
def __init__(self, vertices, color=None):
super(PolygonShape, self).__init__(
shape_class='poly3d', color=color)
self.vertices = vertices
@property
def pos_string(self):
return "{} {} {} {}".format(
self.shape_class,
len(self.vertices),
' '.join('{} {} 0'.format(v[0], v[1]) for v in self.vertices),
self.color)
@property
@_json_sanitize
def type_shape(self):
"""Shape as dictionary. Example:
>>> PolygonShape([[-0.5, -0.5], [0.5, -0.5], [0.5, 0.5]]).type_shape
{'type': 'Polygon', 'rounding_radius': 0,
'vertices': [[-0.5, -0.5], [0.5, -0.5], [0.5, 0.5]]}
"""
return {'type': 'Polygon',
'rounding_radius': 0,
'vertices': self.vertices}
class SpheropolygonShape(Shape):
"""Shape class for rounded polygons in a 2D plane.
:param vertices:
List of 2D vertex vectors.
:type vertices:
list
:param rounding_radius:
Rounding radius applied to the spheropolygon (default: 0).
:type rounding_radius:
float
:param color:
Hexadecimal color string in format :code:`RRGGBBAA` (default: :code:`None`).
:type color:
str
"""
def __init__(self, vertices, rounding_radius=0, color=None):
super(SpheropolygonShape, self).__init__(
shape_class='spoly3d', color=color)
self.vertices = vertices
self.rounding_radius = rounding_radius
@property
def pos_string(self):
return "{} {} {} {} {}".format(
self.shape_class,
self.rounding_radius,
len(self.vertices),
' '.join('{} {} 0'.format(v[0], v[1]) for v in self.vertices),
self.color)
@property
@_json_sanitize
def type_shape(self):
"""Shape as dictionary. Example:
>>> SpheropolygonShape([[-0.5, -0.5], [0.5, -0.5], [0.5, 0.5]], 0.1).type_shape
{'type': 'Polygon', 'rounding_radius': 0.1,
'vertices': [[-0.5, -0.5], [0.5, -0.5], [0.5, 0.5]]}
"""
return {'type': 'Polygon',
'rounding_radius': self.rounding_radius,
'vertices': self.vertices}
class ConvexPolyhedronShape(Shape):
"""Shape class for convex polyhedra.
:param vertices:
List of 3D vertex vectors.
:type vertices:
list
:param color:
Hexadecimal color string in format :code:`RRGGBBAA` (default: :code:`None`).
:type color:
str
"""
def __init__(self, vertices, color=None):
super(ConvexPolyhedronShape, self).__init__(
shape_class='poly3d', color=color)
self.vertices = vertices
@property
def pos_string(self):
return "{} {} {} {}".format(
self.shape_class,
len(self.vertices),
' '.join((str(v) for xyz in self.vertices for v in xyz)),
self.color)
@property
@_json_sanitize
def type_shape(self):
"""Shape as dictionary. Example:
>>> ConvexPolyhedronShape([[0.5, 0.5, 0.5], [0.5, -0.5, -0.5],
[-0.5, 0.5, -0.5], [-0.5, -0.5, 0.5]]).type_shape
{'type': 'ConvexPolyhedron', 'rounding_radius': 0,
'vertices': [[0.5, 0.5, 0.5], [0.5, -0.5, -0.5],
[-0.5, 0.5, -0.5], [-0.5, -0.5, 0.5]]}
"""
return {'type': 'ConvexPolyhedron',
'rounding_radius': 0,
'vertices': self.vertices}
class ConvexPolyhedronUnionShape(Shape):
"""Shape class for unions of convex polyhedra.
:param vertices:
List of lists of 3D vertex vectors in particle coordinates (each
polyhedron, each vertex).
:type vertices:
list
:param centers:
List of 3D polyhedra center vectors.
:type centers:
list
:param orientations:
Orientations of the polyhedra, as a list of quaternions.
:type orientations:
list
:param colors:
List of hexadecimal color strings in format :code:`RRGGBBAA` (default: :code:`None`).
:type colors:
list
"""
def __init__(self, vertices, centers, orientations, colors=None):
super(ConvexPolyhedronUnionShape, self).__init__(
shape_class='poly3d_union', color='')
self.vertices = vertices
self.centers = centers
self.orientations = orientations
self.colors = colors
@property
def pos_string(self):
shape_def = '{} {} '.format(self.shape_class, len(self.centers))
for verts, p, q, c in zip(self.vertices, self.centers, self.orientations, self.colors):
shape_def += '{0} '.format(len(verts))
for v in verts:
shape_def += '{0} {1} {2} '.format(*v)
shape_def += '{0} {1} {2} '.format(*p)
shape_def += '{0} {1} {2} {3} '.format(*q)
shape_def += '{0} '.format(c)
return shape_def
class ConvexSpheropolyhedronShape(Shape):
"""Shape class for a convex polyhedron extended by a rounding radius.
:param vertices:
List of 3D vertex vectors.
:type vertices:
list
:param rounding_radius:
Rounding radius applied to the spheropolyhedron (default: 0).
:type rounding_radius:
float
:param color:
Hexadecimal color string in format :code:`RRGGBBAA` (default: :code:`None`).
:type color:
str
"""
def __init__(self, vertices, rounding_radius=0, color=None):
super(ConvexSpheropolyhedronShape, self).__init__(
shape_class='spoly3d', color=color)
self.vertices = vertices
self.rounding_radius = rounding_radius
@property
def pos_string(self):
return "{} {} {} {} {}".format(
self.shape_class,
self.rounding_radius,
len(self.vertices),
' '.join((str(v) for xyz in self.vertices for v in xyz)),
self.color)
@property
@_json_sanitize
def type_shape(self):
"""Shape as dictionary. Example:
>>> ConvexSpheropolyhedronShape([[0.5, 0.5, 0.5], [0.5, -0.5, -0.5],
[-0.5, 0.5, -0.5], [-0.5, -0.5, 0.5]], 0.1).type_shape
{'type': 'ConvexPolyhedron', 'rounding_radius': 0.1,
'vertices': [[0.5, 0.5, 0.5], [0.5, -0.5, -0.5],
[-0.5, 0.5, -0.5], [-0.5, -0.5, 0.5]]}
"""
return {'type': 'ConvexPolyhedron',
'rounding_radius': self.rounding_radius,
'vertices': self.vertices}
class GeneralPolyhedronShape(Shape):
"""Shape class for general polyhedra, such as arbitrary meshes.
:param vertices:
List of 3D vertex vectors.
:type vertices:
list
:param faces:
List of lists of integers representing vertex indices for each face.
:type faces:
list
:param color:
Hexadecimal color string in format :code:`RRGGBBAA` (default: :code:`None`).
:type color:
str
:param facet_colors:
List of hexadecimal color strings in format :code:`RRGGBBAA` for each facet (default: :code:`None`).
:type facet_colors:
list
"""
def __init__(self, vertices, faces, color=None, facet_colors=None):
super(GeneralPolyhedronShape, self).__init__(
shape_class='polyV', color=color)
self.vertices = vertices
self.faces = faces
self.facet_colors = facet_colors
@property
def pos_string(self):
return "{} {} {} {} {} {}".format(
self.shape_class,
len(self.vertices),
' '.join((str(v) for xyz in self.vertices for v in xyz)),
len(self.faces),
' '.join((str(fv) for f in self.faces for fv in [len(f)]+f)),
self.color)
@property
@_json_sanitize
def type_shape(self):
"""Shape as dictionary. Example:
>>> GeneralPolyhedronShape([[0.5, 0.5, 0.5], [0.5, -0.5, -0.5],
[-0.5, 0.5, -0.5], [-0.5, -0.5, 0.5]]).type_shape
{'type': 'Mesh',
'vertices': [[0.5, 0.5, 0.5], [0.5, -0.5, -0.5],
[-0.5, 0.5, -0.5], [-0.5, -0.5, 0.5]],
'indices': [[0, 1, 2], [0, 1, 3], [0, 2, 3], [1, 2, 3]]}
"""
return {'type': 'Mesh',
'vertices': self.vertices,
'indices': self.faces}
class EllipsoidShape(Shape):
"""Shape class for ellipsoids of with principal axes a, b, and c.
:param a:
Principal axis a of the ellipsoid (radius in the x direction).
:type a:
float
:param b:
Principal axis b of the ellipsoid (radius in the y direction).
:type b:
float
:param c:
Principal axis c of the ellipsoid (radius in the z direction).
:type c:
float
:param color:
Hexadecimal color string in format :code:`RRGGBBAA` (default: :code:`None`).
:type color:
str
"""
def __init__(self, a, b, c, color=None):
super(EllipsoidShape, self).__init__(
shape_class='ellipsoid', color=color)
self.a = a
self.b = b
self.c = c
@property
def pos_string(self):
return "{} {} {} {} {}".format(
self.shape_class,
self.a,
self.b,
self.c,
self.color
)
@property
@_json_sanitize
def type_shape(self):
"""Shape as dictionary. Example:
>>> EllipsoidShape(7.0, 5.0, 3.0).type_shape
{'type': 'Ellipsoid',
'a': 7.0,
'b': 5.0,
'c': 3.0}
"""
return {'type': 'Ellipsoid',
'a': self.a,
'b': self.b,
'c': self.c}
def _parse_type_shape(shape):
"""Parses a shape object from a dictionary.
This method parses the `GSD Shape Visualization Specification
<https://gsd.readthedocs.io/en/stable/shapes.html>`_, while including
backwards compatibility with shape definitions that do not adhere to that
specification but were previously supported by HOOMD's
:code:`get_type_shapes()` methods.
"""
if not shape:
return FallbackShape('')
type_name = shape['type'].lower()
type_shape = None
if type_name in ('sphere', 'disk'):
# disk support is for backwards compatibility with get_type_shapes()
# from HOOMD-blue < 2.7
diameter = shape.get('diameter', 2*shape.get('rounding_radius', 0.5))
orientable = shape.get('orientable', False)
type_shape = SphereShape(diameter=diameter, orientable=orientable, color=None)
elif type_name == 'ellipsoid':
type_shape = EllipsoidShape(a=shape['a'], b=shape['b'], c=shape['c'], color=None)
elif type_name == 'polygon':
rounding_radius = shape.get('rounding_radius', 0)
if rounding_radius == 0:
type_shape = PolygonShape(vertices=shape['vertices'], color=None)
else:
type_shape = SpheropolygonShape(vertices=shape['vertices'],
rounding_radius=rounding_radius,
color=None)
elif type_name == 'convexpolyhedron':
rounding_radius = shape.get('rounding_radius', 0)
if rounding_radius == 0:
type_shape = ConvexPolyhedronShape(vertices=shape['vertices'], color=None)
else:
type_shape = ConvexSpheropolyhedronShape(vertices=shape['vertices'],
rounding_radius=rounding_radius,
color=None)
elif type_name == 'mesh':
type_shape = GeneralPolyhedronShape(vertices=shape['vertices'],
faces=shape['indices'],
facet_colors=shape['colors'],
color=None)
elif type_name == 'polyhedron':
# polyhedron support is for backwards compatibility with
# get_type_shapes() from HOOMD-blue < 2.7
type_shape = GeneralPolyhedronShape(vertices=shape['vertices'],
faces=shape['faces'],
facet_colors=shape['colors'],
color=None)
elif type_name == 'sphereunion':
type_shape = SphereUnionShape(diameters=shape['diameters'],
centers=shape['centers'],
color=None)
if type_shape is None:
logger.warning("Failed to parse shape definition: shape {} not supported. "
"Using fallback mode.".format(type_name))
type_shape = FallbackShape(json.dumps(shape))
return type_shape
| 31.65894 | 108 | 0.563278 | 2,185 | 19,122 | 4.798627 | 0.119908 | 0.019838 | 0.026896 | 0.034335 | 0.523796 | 0.507773 | 0.471626 | 0.461707 | 0.436815 | 0.4165 | 0 | 0.025248 | 0.308179 | 19,122 | 603 | 109 | 31.711443 | 0.767329 | 0.357076 | 0 | 0.470175 | 0 | 0 | 0.092603 | 0.008639 | 0 | 0 | 0 | 0 | 0 | 1 | 0.136842 | false | 0.003509 | 0.010526 | 0.045614 | 0.301754 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7f8b06b9284f0d519a5edeab7410a71507203f87 | 7,146 | py | Python | cre8/layout.py | dekarrin/cre8orforge | b93801da140ed245d4a0f7129bde0d3e655752c9 | [
"MIT"
] | null | null | null | cre8/layout.py | dekarrin/cre8orforge | b93801da140ed245d4a0f7129bde0d3e655752c9 | [
"MIT"
] | 4 | 2021-12-06T14:06:34.000Z | 2021-12-21T13:12:17.000Z | cre8/layout.py | dekarrin/cre8mancer | b93801da140ed245d4a0f7129bde0d3e655752c9 | [
"MIT"
] | null | null | null | import math
from .format import format_timer, pad_middle, pad_right, pad_left
from . import format
from .activities import Activity, OwnedActivities
DefaultTextCardWidth = 65
_RightColumnWidth = 14
def progress_bar(
width: int,
progress: float,
end_char: str = '|',
fill_char: str = '-',
empty_char: str = ' '
) -> str:
"""
Draw a progress bar that shows the given progress. Will only be full at exactly 100% progress.
:param width: The width of the progress bar, including the ends. The actual progress notches
will have this width - 2 to fill. If width is less than 3, no progress notches will be in the
returned string.
:param progress: The current progress to show. This is a float between 0.0 and 1.0.
:param end_char: Character to use for the end caps.
:param fill_char: Character to use for filled progress notches.
:param empty_char: Character to use for unfilled progress notches.
"""
notches = width - (len(end_char) * 2) # account for the 'ends' of the prog bar.
filled = math.floor(notches * progress)
empty = notches - filled
text = end_char + (fill_char * filled) + (empty_char * empty) + end_char
return text
def bar(width=DefaultTextCardWidth) -> str:
return '+' + ('-' * (width - 2)) + '+'
def make_act_store_listing(act: Activity, count: int, auto_count: int, width=DefaultTextCardWidth) -> str:
"""
Create a card for the store that shows the price, consumption, and production
of the next purchased instance of the Activity.
:param act: The Activity to make the store card for.
:param count: The current number of owned instances of that activity.
:param auto_count: Amount of automations that are currently purchased.
:param width: The width of the card to produce.
"""
# +--------------------------------------------------------------+
# | $20 Eat Bagels - $100/C (0.00J) | AUTO x16192 |
# | 999h60m55s + $100/C (0.003J) | x4 |
# +--------------------------------------------------------------+
global _RightColumnWidth
# actual avail is width minus 2 for the borders and minus 2 for padding
lc_text_space = width - _RightColumnWidth - 2 - 2
# left column first (lc)
# need to do calculation out of order bc + and - should left-align, so
# calculate the size of both and add right padding to the shorter
mcost_fmt = format.money(act.money_cost(count))
mrate_fmt = format.money(act.money_rate(count))
lc_top_right_dollars = "- {:s}/C".format(mcost_fmt)
lc_bot_right_dollars = "+ {:s}/C".format(mrate_fmt)
dollars_width = max(len(lc_top_right_dollars), len(lc_bot_right_dollars))
lc_top_right_dollars = format.pad_right(dollars_width, lc_top_right_dollars)
lc_bot_right_dollars = format.pad_right(dollars_width, lc_bot_right_dollars)
lc_top_right = "{:s} ({:.4f}J)".format(lc_top_right_dollars, act.juice_cost(count))
lc_bot_right = "{:s} ({:.4f}J)".format(lc_bot_right_dollars, act.juice_rate(count))
if len(lc_top_right) > len(lc_bot_right):
lc_bot_right += (' ' * (len(lc_top_right) - len(lc_bot_right)))
else:
lc_top_right += (' ' * (len(lc_bot_right) - len(lc_top_right)))
lc_top_left = "{:s} {:s}".format(format.money(act.price(count)), act.name)
lc_top_text = pad_middle(lc_text_space, lc_top_left, lc_top_right)
lc_bot_left = format_timer(act.duration)
lc_bot_text = pad_middle(lc_text_space, lc_bot_left, lc_bot_right)
# on to the right column
# right col will only subtract 1 for border bc one border is shared w left col glub
# still need to subtract 2 for the padding tho
rc_text_space = _RightColumnWidth - 1 - 2
rc_top_text = pad_right(rc_text_space, "AUTO x{:d}".format(2 ** auto_count))
rc_bot_text = pad_right(rc_text_space, "{:d}(i)".format(act.auto_price(auto_count)))
# now put 'em all together!!!!!!!!
full_text = ''
full_text += '| ' + lc_top_text + ' | ' + rc_top_text + ' |\n'
full_text += '| ' + lc_bot_text + ' | ' + rc_bot_text + ' |'
return full_text
def make_act_card(oa: OwnedActivities, t: float, width=DefaultTextCardWidth) -> str:
"""
Create a card that shows the status of an OwnedActivities.
:param oa: The OwnedActivities to make the card for.
:param t: The current game time represented in seconds since start.
:param width: The width of the card to produce.
"""
# +------------------------------------------------+--------------+
# | Eat Bagels ($20) x242193:IN | (No auto) |
# | $100 (0J) $100/C, 0.03CJ/C | x{:d} |
# | | | 999h60m55s | RUNNING |
# +------------------------------------------------+--------------+
global _RightColumnWidth
# LEFT COLUMN
# actual avail is width minus 2 for the borders and minus 2 for padding
lc_text_space = width - _RightColumnWidth - 2 - 2
inactive = oa.count - oa.active
# top line
lc_top_left = oa.name
lc_top_right = "({:s}) x{:d}:{:d}".format(format.money(oa.price), oa.active, inactive)
lc_top_text = pad_middle(lc_text_space, lc_top_left, lc_top_right)
# mid line
lc_mid_left = "{:s} ({:.2f}J)".format(format.money(oa.money_cost), oa.juice_cost)
lc_mid_right = "{:s}/C {:.4f}J/C".format(format.money(oa.money_production), oa.juice_production)
lc_mid_text = pad_middle(lc_text_space, lc_mid_left, lc_mid_right)
# bot line
remaining_duration = oa.activity.duration
if oa.execution is not None:
remaining_duration = oa.execution.remaining(t)
prog = oa.execution.progress(t)
max_time_len = 10 # assuming three digits for hour
prog_bar_len = lc_text_space - max_time_len - 1 # extra 1 for padding between
lc_bot_left = progress_bar(prog_bar_len, prog)
else:
lc_bot_left = 'X'
lc_bot_right = format_timer(remaining_duration)
lc_bot_text = pad_middle(lc_text_space, lc_bot_left, lc_bot_right)
# RIGHT COLUMN
# right col will only subtract 1 for border bc one border is shared w left col glub
# still need to subtract 2 for the padding tho
rc_text_space = _RightColumnWidth - 1 - 2
if oa.automations < 1:
rc_top_text = pad_left(rc_text_space, "(No auto)")
rc_mid_text = ' ' * rc_text_space
rc_bot_text = ' ' * rc_text_space
else:
rc_top_text = pad_left(rc_text_space, "AUTO")
rc_mid_text = pad_left(rc_text_space, "x{:d}".format(oa.automation_bonus))
if oa.automated:
rc_bot_text = pad_left(rc_text_space, "RUNNING")
else:
rc_bot_text = pad_left(rc_text_space, "(off)")
# now put 'em all together!!!!!!!!
full_text = ''
full_text += '| ' + lc_top_text + ' | ' + rc_top_text + ' |\n'
full_text += '| ' + lc_mid_text + ' | ' + rc_mid_text + ' |\n'
full_text += '| ' + lc_bot_text + ' | ' + rc_bot_text + ' |'
return full_text
| 42.790419 | 106 | 0.623566 | 1,014 | 7,146 | 4.138067 | 0.198225 | 0.026215 | 0.030982 | 0.020257 | 0.404433 | 0.35796 | 0.310772 | 0.282412 | 0.225214 | 0.208055 | 0 | 0.017238 | 0.236916 | 7,146 | 167 | 107 | 42.790419 | 0.752246 | 0.356143 | 0 | 0.258824 | 0 | 0 | 0.044659 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.047059 | false | 0 | 0.047059 | 0.011765 | 0.141176 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7f8dc9d86f37042f9bf2a72c821d1fb648b7c645 | 3,281 | py | Python | app.py | gakunkel/sqlalchemy-challenge | f7fa8ae4e7d5f4ed77446ddd57b5695e8ca69947 | [
"MIT"
] | null | null | null | app.py | gakunkel/sqlalchemy-challenge | f7fa8ae4e7d5f4ed77446ddd57b5695e8ca69947 | [
"MIT"
] | null | null | null | app.py | gakunkel/sqlalchemy-challenge | f7fa8ae4e7d5f4ed77446ddd57b5695e8ca69947 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# In[ ]:
import datetime as dt
import pandas as pd
import numpy as np
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
from flask import Flask, jsonify
# Establishing a connection
engine = create_engine("sqlite:///./resources/hawaii.sqlite")
# To reflect classes
Base = automap_base()
Base.prepare(engine, reflect = True)
# Test out reflection
# Base.classes.keys()
# Alias for measurement and station classes
Measurement = Base.classes.measurement
Station = Base.classes.station
# Create new session of engine
session = Session(engine)
# Flask
app = Flask(__name__)
# Route for home page displays available routes.
@app.route("/")
def home():
return (
f"Hello! Thanks for checking out the Hawaiian Climate API.<br>"
f"Here are the available routes: <br/>"
f"/api/v1.0/precipitation<br/>"
f"/api/v1.0/stations<br/>"
f"/api/v1.0/tobs<br/>"
f"/api/v1.0/temp/start/end"
)
# Precipitation route converts previous year's precipitation to a dictionary using date as the key and prcp as the value.
# Returns the JSON representation of the dictionary.
@app.route("/api/v1.0/precipitation")
def precipitation():
# Data for previous year
previous_year = dt.date(2017, 8, 23)-dt.timedelta(days=365)
# Query to get precipitation and date of measurement
precipitation = session.query(Measurement.date, Measurement.prcp).filter(Measurement.date >= previous_year).all()
# Save the query results as a dictionary and return it as JSON
prcp_dict = {date: prcp for date, prcp in precipitation}
return jsonify(prcp_dict)
# Stations route returns a JSON list of stations from which measurements were taken
@app.route("/api/v1.0/stations")
def stations():
results = session.query(Station.station).all()
stations = list(np.ravel(results))
return jsonify(stations)
# TOBS route returns temperature observations for the most active stations for the last year of data
# Returns a JSON list of temperatures
@app.route("/api/v1.0/tobs")
def temp_monthly():
previous_year = dt.date(2017, 8, 23) - dt.timedelta(days=365)
results = session.query(Measurement.tobs).\
filter(Measurement.station == 'USC00519281').\
filter(Measurement.date >= previous_year).all()
temps = list(np.ravel(results))
return jsonify(temps)
# These routes return temperature observation data for users if they enter start, or start & end, dates
@app.route("/api/v1.0/temp/<start>")
@app.route("/api/v1.0/temp/<start>/<end>")
def stats(start = None, end = None):
sel = [func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)]
if not end:
results = session.query(*sel).\
filter(Measurement.date >= start).all()
temps = list(np.ravel(results))
return jsonify(temps)
results = session.query(*sel).\
filter(Measurement.date >= start).\
filter(Measurement.date <= end).all()
temps = list(np.ravel(results))
return jsonify(temps)
if __name__ == "__main__":
app.run()
| 28.530435 | 121 | 0.682414 | 445 | 3,281 | 4.979775 | 0.310112 | 0.020307 | 0.024368 | 0.029332 | 0.265794 | 0.217058 | 0.162455 | 0.141697 | 0.098375 | 0.038809 | 0 | 0.017925 | 0.200853 | 3,281 | 114 | 122 | 28.780702 | 0.827231 | 0.266992 | 0 | 0.166667 | 0 | 0 | 0.146812 | 0.076762 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.133333 | 0.016667 | 0.316667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7f8e9b2d12abccfba6d15231cb1a8950c8371c66 | 6,330 | py | Python | src/Model.py | colombelli/whatsapp-nlp | 64b54b35a2fefc7bed76e4c16841202252ea8a96 | [
"MIT"
] | 2 | 2021-05-31T16:58:15.000Z | 2021-10-02T23:21:01.000Z | src/Model.py | colombelli/whatsapp-nlp | 64b54b35a2fefc7bed76e4c16841202252ea8a96 | [
"MIT"
] | null | null | null | src/Model.py | colombelli/whatsapp-nlp | 64b54b35a2fefc7bed76e4c16841202252ea8a96 | [
"MIT"
] | null | null | null | import tensorflow as tf
from tqdm import tqdm
from DataProcessing import DataProcessing
# Based on MIT's introduction to Deep Learning course
class Model:
"""
LSTM model for learning from the data and generating a conversation.
The model recognizes all different possible words and map them to a number,
which will serve as a value in the sequence of messages. It stacks an LSTM
layer with a Dense layer working as the output for each next word given a
sequence of words.
Args:
rnn_units (int): number of neurons in the LSTM
dropout (float): between 0 and 1, representing the fraction of the units
to drop for the linear transformation of the inputs.
recurrent_dropout (float): between 0 and 1, representing the fraction of
the units to drop for the linear transformation of the
recurrent state.
learning_rate (float)
batch_size (int)
num_training_iterations (int): number of epochs to train
seq_length (int): the size of the word sequences for each training example
checkpoint_prefix (str): the file name of the saved checkpoints
checkpoint_dir (str): the directory where the checkpoints are to be saved
embedding_dim (int): the embedding dimesion to encode the words to
"""
def __init__(self, rnn_units:int, dropout:float, recurrent_dropout:float,
learning_rate:float, batch_size:int, num_training_iterations:int,
seq_length:int, checkpoint_prefix:str, checkpoint_dir:str,
embedding_dim:int, data_processing:DataProcessing):
self.dropout = dropout
self.recurrent_dropout = recurrent_dropout
self.rnn_units = rnn_units
self.optimizer = tf.optimizers.Adam(learning_rate)
self.batch_size = batch_size
self.num_training_iterations = num_training_iterations
self.seq_length = seq_length
self.checkpoint_prefix = checkpoint_prefix
self.checkpoint_dir = checkpoint_dir
self.embedding_dim = embedding_dim
self.data_processing = data_processing
self.possible_starts = data_processing.get_possible_starts(seq_length)
self.model = self.__build_model()
# Defining the RNN Model
def __build_model(self, batch_size=None):
vocab_size = len(self.data_processing.vocabulary)
if not batch_size:
batch_size = self.batch_size
lstm_layer = tf.keras.layers.LSTM(
self.rnn_units,
return_sequences=True,
recurrent_initializer='glorot_uniform',
recurrent_activation='sigmoid',
stateful=True,
dropout=self.dropout,
recurrent_dropout=self.recurrent_dropout
)
model = tf.keras.Sequential([
# Layer 1: Embedding layer to transform indexes into dense vectors
# of a fixed embedding size
tf.keras.layers.Embedding(vocab_size, self.embedding_dim, batch_input_shape=[batch_size, None]),
# Layer 2: LSTM with `rnn_units` number of units.
lstm_layer,
# Layer 3: Dense (fully-connected) layer that transforms the LSTM output
# into the vocabulary size.
tf.keras.layers.Dense(vocab_size)
])
return model
# Defining the loss function
def compute_loss(self, labels, logits):
loss = tf.keras.losses.sparse_categorical_crossentropy(labels, logits, from_logits=True)
return loss
@tf.function
def train_step(self, x, y):
with tf.GradientTape() as tape:
y_hat = self.model(x)
loss = self.compute_loss(y, y_hat)
grads = tape.gradient(loss, self.model.trainable_variables)
self.optimizer.apply_gradients(zip(grads, self.model.trainable_variables))
return loss
def train_model(self):
tr_history = []
if hasattr(tqdm, '_instances'): tqdm._instances.clear() # clear if it exists
for iter in tqdm(range(self.num_training_iterations)):
# Grab a batch and propagate it through the network
x_batch, y_batch = self.data_processing.get_batch( self.possible_starts,
self.seq_length,
self.batch_size)
loss = self.train_step(x_batch, y_batch)
# Update the progress bar
tr_history.append(loss.numpy().mean())
# Update the model with the changed weights!
if iter % 100 == 0:
self.model.save_weights(self.checkpoint_prefix)
# Save the trained model and the weights
self.model.save_weights(self.checkpoint_prefix)
def generate_text(self, start_word, generation_length=1000):
model = self.__build_model(batch_size=1)
# Restore the model weights for the last checkpoint after training
model.load_weights(tf.train.latest_checkpoint(self.checkpoint_dir))
model.build(tf.TensorShape([1, None]))
input_eval = [self.data_processing.word2idx[start_word]]
input_eval = tf.expand_dims(input_eval, 0)
# Empty string to store our results
text_generated = []
# Here batch size == 1
model.reset_states()
tqdm._instances.clear()
for _ in tqdm(range(generation_length)):
predictions = model(input_eval)
# Remove the batch dimension
predictions = tf.squeeze(predictions, 0)
predicted_id = tf.random.categorical(predictions, num_samples=1)[-1,0].numpy()
# Pass the prediction along with the previous hidden state
# as the next inputs to the model
input_eval = tf.expand_dims([predicted_id], 0)
text_generated.append(" "+self.data_processing.idx2word[predicted_id])
return (start_word + ''.join(text_generated)) | 38.363636 | 108 | 0.615166 | 745 | 6,330 | 5.034899 | 0.302013 | 0.028792 | 0.027993 | 0.010664 | 0.122634 | 0.099707 | 0.099707 | 0.078379 | 0.078379 | 0.078379 | 0 | 0.006034 | 0.319273 | 6,330 | 165 | 109 | 38.363636 | 0.86447 | 0.296998 | 0 | 0.050633 | 0 | 0 | 0.007372 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.075949 | false | 0 | 0.037975 | 0 | 0.177215 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7f8ec6323597a615a6b8ab14e3b12a4175f2d4c5 | 1,989 | py | Python | ionyweb/page_app/page_agenda/tests.py | makinacorpus/ionyweb | 2f18e3dc1fdc86c7e19bae3778e67e28a37567be | [
"BSD-3-Clause"
] | 4 | 2015-09-28T10:07:39.000Z | 2019-10-18T20:14:07.000Z | ionyweb/page_app/page_agenda/tests.py | makinacorpus/ionyweb | 2f18e3dc1fdc86c7e19bae3778e67e28a37567be | [
"BSD-3-Clause"
] | 1 | 2021-03-19T21:41:33.000Z | 2021-03-19T21:41:33.000Z | ionyweb/page_app/page_agenda/tests.py | makinacorpus/ionyweb | 2f18e3dc1fdc86c7e19bae3778e67e28a37567be | [
"BSD-3-Clause"
] | 1 | 2017-10-12T09:25:19.000Z | 2017-10-12T09:25:19.000Z | # -*- coding: utf-8 -*-
import datetime
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.sites.models import Site
from ionyweb.website.models import WebSite
from ionyweb.page.models import Page
from ionyweb.page_app.page_agenda.models import PageApp_Agenda, Event
from ionyweb.administration.tests import test_reverse, AdministrationTests
class PageAppAgendaTests(AdministrationTests):
def setUp(self):
# Create website with a IonywebSubscription home page
# Create the domain name
site = Site.objects.get_or_create(pk=1)[0]
site.domain = "testserver"
site.name = "Jungleland"
site.save()
# Create the website
website = WebSite.objects.create(
title="Jungleland", theme="notmyidea",
default_layout="100", slug="jungleland",
domain=site)
website.ndds.add(site)
page_agenda = PageApp_Agenda.objects.create()
Page.objects.create(
website=website, parent=None, title="Home",
placeholder_slug="content-placeholder-1",
plugin_order=0, slug="",
app_page_object=page_agenda)
user = User.objects.create_user(username="admin", password="admin")
user.is_staff = True
user.save()
birthday = Event.objects.create(app=page_agenda,
title='My Birthday',
description='Remy\'s birthday',
start_date=datetime.datetime(2012, 2, 21))
def test_get_pages(self):
url = '/'
response = self.client.get('/')
self.assertEqual(response.status_code, 302)
response = self.client.get('/p/2012/02/')
self.assertContains(response, 'Birthday')
response = self.client.get('/p/2012/02/21/')
self.assertContains(response, 'Birthday')
| 33.711864 | 82 | 0.609351 | 215 | 1,989 | 5.539535 | 0.427907 | 0.050378 | 0.04534 | 0.052897 | 0.047019 | 0.047019 | 0.047019 | 0 | 0 | 0 | 0 | 0.022567 | 0.287079 | 1,989 | 58 | 83 | 34.293103 | 0.817348 | 0.057818 | 0 | 0.05 | 0 | 0 | 0.078117 | 0.011236 | 0 | 0 | 0 | 0 | 0.075 | 1 | 0.05 | false | 0.025 | 0.2 | 0 | 0.275 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7f8fedeabf571a4542f1688e973043c53b7ef3b3 | 8,279 | py | Python | libs/helpers.py | lbahtarliev/MalwareScan | 495e2fd3ceb3498c651ddd360a4cc2eb9571a10b | [
"Unlicense"
] | 3 | 2018-12-06T03:09:16.000Z | 2021-02-25T01:13:05.000Z | libs/helpers.py | lbahtarliev/MalwareScan | 495e2fd3ceb3498c651ddd360a4cc2eb9571a10b | [
"Unlicense"
] | 9 | 2018-12-10T18:44:14.000Z | 2019-02-06T21:13:31.000Z | libs/helpers.py | lbahtarliev/MalwareScan | 495e2fd3ceb3498c651ddd360a4cc2eb9571a10b | [
"Unlicense"
] | 4 | 2019-06-04T13:46:24.000Z | 2021-02-25T02:23:50.000Z | # -*- coding: utf-8 -*-
import os
import re
import shutil
import sys
import time
from datetime import datetime as dtime
from datetime import timedelta as tdelta
from hashlib import md5, sha1, sha256
from dateutil.parser import parse as dtparser
# Some RegExes
re_md5 = re.compile("^([0-9]|[a-f]){32}$", re.I)
re_sha1 = re.compile("^([0-9]|[a-f]){40}$", re.I)
re_sha256 = re.compile("^([0-9]|[a-f]){64}$", re.I)
re_email = re.compile(
r"^[A-Z0-9._%+-]{1,64}@(?:[A-Z0-9-]{1,63}\.){1,125}[A-Z]{2,63}$", re.I)
re_uuid4 = re.compile(
r"^([0-9]|[a-f]){8}\-([0-9]|[a-f]){4}\-([0-9]|[a-f]){4}\-([0-9]|[a-f]){4}\-([0-9]|[a-f]){12}$", # noqa: E501
re.I,
) # noqa: E501
# Jinja2 Filters
def fltr_elapsedTime(date_f, date_b=None):
if date_b is None:
date_b = dtime.now()
return str(dtparser(str(date_b)) - dtparser(str(date_f)))
def fltr_elapsedTime_secs(value):
return str(tdelta(seconds=value))
# Nice log messages
def timed(message, level):
now = time.strftime("%a, %d %b %Y %H:%M:%S", time.localtime())
return "%s | %-8s | %-6d | %-13s | %s" % (
now,
level,
os.getpid(),
sys._getframe(1).f_code.co_name,
message,
)
# Evaluation helpers
def eval_cleanup(new_eval):
shutil.rmtree(
"/tmp/uploads/files/{}".format(new_eval.uuid_f),
ignore_errors=True,
onerror=None,
)
def eval_running(new_eval):
for file in new_eval.files:
if file.status_f == "InProgress":
return True
return False
def eval_status(new_eval):
has_score = 0
has_error = 0
eval_result = "Complete"
for file in new_eval.files:
if file.score > has_score:
has_score = file.score
if file.status_f == "Error":
has_error += 1
if has_error > 0:
eval_result = "Error"
return eval_result, has_score
# File results marshaling helper per OpenAPI specification
def marshal_file(sfile):
r_file = {
"fileName": "",
"malicious": False,
"message": "",
"sha256": "",
"statusDate": "",
"status": "InProgress",
}
# EvaluationFile
r_file["fileName"] = sfile["name"]
if sfile["score"] > 5:
r_file["malicious"] = True
r_file["message"] = sfile.get("message")
r_file["sha256"] = sfile["hash"]
r_file["statusDate"] = sfile["date_b"]
r_file["status"] = sfile["status_f"]
if r_file["status"] == "InProgress":
file_keys = ["malicious", "message"]
for key in file_keys:
r_file.pop(key, None)
dtime_fmt = "%Y-%m-%d %H:%M:%S.%f"
r_file["statusDate"] = dtime.strptime(
dtime.now().strftime(dtime_fmt)[:-3], dtime_fmt)
elif r_file["status"] == "Error":
r_file.pop("malicious", None)
return r_file
# Evaluation results marshaling helper per OpenAPI specification
def marshal_eval(sfile):
r_eval = {
"id": "",
"correlationID": "",
"date": "",
"elapsedTime": "",
"statusDate": "",
"status": "InProgress",
"malicious": False,
"files": [],
}
# Evaluation
r_eval["id"] = sfile["uuid_f"]
r_eval["correlationID"] = sfile["corrid"]
r_eval["date"] = sfile["date_f"]
if sfile["score"] > 5:
r_eval["malicious"] = True
r_eval["statusDate"] = sfile["date_b"]
r_eval["status"] = sfile["status_f"]
if r_eval["status"] == "InProgress":
eval_keys = ["malicious", "date_b"]
for key in eval_keys:
r_eval.pop(key, None)
r_eval["elapsedTime"] = str(dtime.now() -
dtparser(str(sfile.get("date_f"))))
dtime_fmt = "%Y-%m-%d %H:%M:%S.%f"
r_eval["statusDate"] = dtime.strptime(
dtime.now().strftime(dtime_fmt)[:-3], dtime_fmt)
elif r_eval["status"] == "Error":
r_eval.pop("malicious", None)
r_eval["elapsedTime"] = str(
dtparser(str(sfile.get("date_b"))) -
dtparser(str(sfile.get("date_f"))))
else:
r_eval["elapsedTime"] = str(
dtparser(str(sfile.get("date_b"))) -
dtparser(str(sfile.get("date_f"))))
return r_eval
def del_none(original):
filtered = {k: v for k, v in original.items() if v is not None}
# If you want yo update the original one.
# original.clear()
# original.update(filtered)
return filtered
def hash_checksum(alg, filename, block_size=65536):
if alg.lower() == "md5":
hash_alg = md5()
elif alg.lower() == "sha1":
hash_alg = sha1()
elif alg.lower() == "sha256":
hash_alg = sha256()
with open(filename, "rb") as f:
for block in iter(lambda: f.read(block_size), b""):
hash_alg.update(block)
return hash_alg.hexdigest()
def file_hash(alg, filename, block_size=65536):
if alg.lower() == "md5":
hash_alg = md5()
elif alg.lower() == "sha1":
hash_alg = sha1()
elif alg.lower() == "sha256":
hash_alg = sha256()
for block in iter(lambda: filename.stream.read(block_size), b""):
hash_alg.update(block)
return hash_alg.hexdigest()
def file_config(fullpath, hash, client_id):
fc = {"fullpath": fullpath, "hash": hash, "client_id": client_id}
fc["filename"] = fullpath.split("/")[-1]
# Little delay trick based on filename
fc["delay"] = 0
if re.match("^delay_[0-9]{1,3}_.*$", fc["filename"]):
fc["delay"] = int(fc["filename"].split("_")[1])
# Backend information
from flask import current_app
CAS_CONF = current_app.config["CAS_API"]
fc["host"] = CAS_CONF["host"]
fc["token"] = CAS_CONF["token"]
fc["headers"] = {
"X-API-TOKEN": fc["token"],
"X-Response-Wait-MS": CAS_CONF["wait_ms"],
}
if fc["client_id"] is None:
url_base = "https://{host}/rapi/cas/scan?token={token}"
else:
url_base = "https://{host}/rapi/cas/scan?token={token}&client-id={client_id}"
fc["scan_url"] = url_base.format(**fc)
return fc
def file_result(jdata):
# Parse and collect meaningful 'message'
if jdata.get("score") < 6:
return "Clean"
out_msg = []
cas_modules = {
"file_reputation": "File Reputation",
"user_hash_list": "Custom Blacklist",
"policy": "Global Policy",
"cylance": "Predictive Analysis",
# "symantec": "Predictive Analysis",
"symantec": "Antivirus/AML",
"sophos": "Antivirus",
"kaspersky": "Antivirus",
"mcafee": "Antivirus",
"malware_analysis": "Sandboxing",
"fireeye": "Sandboxing",
"lastline": "Sandboxing",
"cloud_sandboxing": "Sandboxing",
}
for k, v in jdata.items():
if k in cas_modules.keys():
if v.get("status") == 1 and v.get("score", 0) > 5:
out_msg.append("Blocked by {}".format(cas_modules[k]))
if k == "policy":
out_msg.append(v.get("details", None))
if len(out_msg) == 0:
return "File Reputation (Cached)"
return "; ".join(out_msg)
class ReverseProxied(object):
"""Wrap the application in this middleware and configure the
front-end server to add these headers, to let you quietly bind
this to a URL other than / and to an HTTP scheme that is
different than what is used locally.
In nginx:
location /myprefix {
proxy_pass http://192.168.0.1:5001;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Scheme $scheme;
proxy_set_header X-Script-Name /myprefix;
}
:param app: the WSGI application
"""
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
script_name = environ.get("HTTP_X_SCRIPT_NAME", "")
if script_name:
environ["SCRIPT_NAME"] = script_name
path_info = environ["PATH_INFO"]
if path_info.startswith(script_name):
environ["PATH_INFO"] = path_info[len(script_name):]
scheme = environ.get("HTTP_X_SCHEME", "")
if scheme:
environ["wsgi.url_scheme"] = scheme
return self.app(environ, start_response)
| 30.549815 | 113 | 0.578452 | 1,098 | 8,279 | 4.192168 | 0.257741 | 0.01738 | 0.005214 | 0.006952 | 0.260048 | 0.214208 | 0.191397 | 0.191397 | 0.155333 | 0.139257 | 0 | 0.022269 | 0.256915 | 8,279 | 270 | 114 | 30.662963 | 0.725943 | 0.118251 | 0 | 0.191176 | 0 | 0.009804 | 0.212071 | 0.026855 | 0 | 0 | 0 | 0 | 0 | 1 | 0.073529 | false | 0 | 0.04902 | 0.004902 | 0.205882 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7f933adecbe017e770aa506f0488152882940d45 | 12,364 | py | Python | pyaviso/cli_aviso.py | mpejcoch/aviso | 250b5646220fae85725278b3ca80fed4e15a103a | [
"Apache-2.0"
] | 6 | 2021-02-03T17:55:05.000Z | 2022-02-20T08:05:42.000Z | pyaviso/cli_aviso.py | mpejcoch/aviso | 250b5646220fae85725278b3ca80fed4e15a103a | [
"Apache-2.0"
] | 1 | 2021-04-26T14:42:39.000Z | 2021-04-26T14:42:39.000Z | pyaviso/cli_aviso.py | mpejcoch/aviso | 250b5646220fae85725278b3ca80fed4e15a103a | [
"Apache-2.0"
] | 2 | 2021-02-09T15:07:41.000Z | 2021-08-13T09:55:30.000Z | # (C) Copyright 1996- ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation
# nor does it submit to any jurisdiction.
import functools
import signal
import sys
import threading
import time
from typing import Dict, List
import click
from pyaviso import __version__, logger
from pyaviso import user_config as conf
from pyaviso.custom_exceptions import (
EngineException,
EventListenerException,
InvalidInputError,
TriggerException,
)
from pyaviso.engine import EngineType
from pyaviso.notification_manager import NotificationManager
from pyaviso.service_config_manager import ServiceConfigException
# Create the listener manager
manager: NotificationManager = NotificationManager()
# set of known exceptions
KNOWN_EXCEPTION = (
ServiceConfigException,
EventListenerException,
TriggerException,
EngineException,
InvalidInputError,
AssertionError,
KeyError,
)
def catch_all_exceptions(cls, handler):
"""
This function is used to pass a child of the click.command class to the click CLI initialisation.
This new class overrides the default error handling by allowing to intercept keyboard interruption and EOF errors.
:param cls: click.command
:param handler: function in charge of error handling
:return:
"""
class Cls(cls):
_original_args = None
def make_context(self, info_name, args, parent=None, **extra):
# grab the original command line arguments
self._original_args = " ".join(args)
try:
return super(Cls, self).make_context(info_name, args, parent=parent, **extra)
except Exception:
# call the handler
handler()
# let the user see the original error
raise
def invoke(self, ctx):
try:
return super(Cls, self).invoke(ctx)
except Exception:
# call the handler
handler()
# let the user see the original error
raise
return Cls
def ignore_signal(signum, frame):
"""
This is used to ignore a specific signal sent to this process
:param signum:
:param frame:
:return:
"""
pass
def ignore_signal_and_sleep(signum, frame, time_sec=0.1):
"""
This is used to ignore and sleep when a specific signal is sent to this process.
The sleep is required when the signal is sent multiple times like the SIGTTIN in case of CLICK running in the
background and trying to read from the stdin.
:param time_sec: time in second to sleep
:param signum:
:param frame:
:return:
"""
time.sleep(time_sec)
def stop_listeners(signum=None, frame=None):
"""
This function takes care of gracefully stopping the listeners.
:param signum:
:param frame:
:return:
"""
# Stop gracefully the notification listeners
try:
logger.debug("Stopping listeners...")
manager.listener_manager.cancel_listeners()
logger.info("Listeners stopped")
except Exception as e:
logger.error(f"Error while stopping the listeners, {e}")
logger.debug("", exc_info=True)
sys.exit(-1)
def stop_listeners_and_exit(signum=None, frame=None):
"""
This function takes care of gracefully stopping the listeners and then exit
propagates the exception.
:param signum:
:param frame:
:return:
"""
# Stop gracefully the notification listeners and exit
stop_listeners()
sys.exit()
def notification_server_setup(f):
@click.option("--host", "-H", help="Notification server host.")
@click.option("--port", "-P", help="Notification server port.", type=int)
@click.option("--test", help="Activate TestMode.", is_flag=True, default=False)
@functools.wraps(f)
def functor(*args, **kwargs):
if kwargs["host"]:
kwargs["configuration"].notification_engine.host = kwargs["host"]
kwargs.pop("host")
if kwargs["port"]:
kwargs["configuration"].notification_engine.port = kwargs["port"]
kwargs.pop("port")
if kwargs["test"]:
kwargs["configuration"].notification_engine.type = EngineType.FILE_BASED
kwargs.pop("test")
return f(*args, **kwargs)
return functor
def user_config_setup(f):
@click.option("--config", "-c", help="User configuration file path.")
@click.option("--log", "-l", help="Logging configuration file path.")
@click.option("--debug", "-d", help="Enable the debug log.", is_flag=True, default=False)
@click.option(
"--quiet", "-q", help="Suppress non-error messages from the console output.", is_flag=True, default=False
)
@click.option("--no-fail", help="Suppress any error exit code.", is_flag=True, default=False)
@click.option("--username", "-u", help="Username required to authenticate to the server.")
@click.option("--key", "-k", help="File path to the key required to authenticate to the server.")
@functools.wraps(f)
def functor(*args, **kwargs):
# CLIK automatically sets the flags, put back None values like for the other parameters
kwargs["debug"] = None if not kwargs["debug"] else True
kwargs["quiet"] = None if not kwargs["quiet"] else True
kwargs["no_fail"] = None if not kwargs["no_fail"] else True
# create the configuration object
configuration = conf.UserConfig(
conf_path=kwargs["config"],
logging_path=kwargs["log"],
debug=kwargs["debug"],
quiet=kwargs["quiet"],
no_fail=kwargs["no_fail"],
username=kwargs["username"],
key_file=kwargs["key"],
)
# pass it as a option in the same dictionary but remove the fields used for the configuration
kwargs["configuration"] = configuration
kwargs.pop("config")
kwargs.pop("log")
kwargs.pop("debug")
kwargs.pop("quiet")
kwargs.pop("no_fail")
kwargs.pop("username")
kwargs.pop("key")
logger.debug(f"Running Aviso v.{__version__}")
logger.debug(f"Configuration loaded: {configuration}")
return f(*args, **kwargs)
return functor
CONTEXT_SETTINGS = dict(help_option_names=["-h", "--help"])
@click.group(context_settings=CONTEXT_SETTINGS)
@click.version_option(version=__version__)
def cli():
pass
@click.command(cls=catch_all_exceptions(click.Command, handler=stop_listeners), context_settings=CONTEXT_SETTINGS)
@user_config_setup
@notification_server_setup
@click.argument("listener_files", nargs=-1)
@click.option(
"--from",
"from_date",
type=click.DateTime(formats=["%Y-%m-%dT%H:%M:%S.%fZ"]),
help="Replay notification from this date.",
)
@click.option(
"--to", "to_date", type=click.DateTime(formats=["%Y-%m-%dT%H:%M:%S.%fZ"]), help="Replay notification to this date."
)
@click.option("--now", "now", is_flag=True, default=False, help="Ignore missed notifications, only listen to new ones.")
@click.option("--catchup", "catchup", is_flag=True, default=False, help="Retrieve first the missed notifications.")
def listen(listener_files: List[str], configuration: conf.UserConfig, from_date, to_date, now, catchup):
"""
This method allows the user to execute the listeners defined in the YAML listener file
:param listener_files: YAML files used to define the listeners
"""
try:
"""
UNIX Signal handling
"""
if threading.current_thread() is threading.main_thread():
# This is needed to avoid the process to be suspended in case it runs in background, we must sleep
# because we constantly read from the stdin
signal.signal(signal.SIGTTIN, ignore_signal_and_sleep)
# this is sent with CTRL + \
signal.signal(signal.SIGQUIT, stop_listeners_and_exit)
# this is sent whit the default kill command
signal.signal(signal.SIGTERM, stop_listeners_and_exit)
# call the main listen method
manager.listen(
configuration,
listeners_file_paths=listener_files,
from_date=from_date,
to_date=to_date,
now=now,
catchup=catchup,
)
except KNOWN_EXCEPTION as e:
logger.error(f"{e}")
logger.debug("", exc_info=True)
stop_listeners()
sys.exit(-1)
except Exception as e:
logger.error(f"Error occurred while running the listeners: {e}")
logger.debug("", exc_info=True)
stop_listeners()
sys.exit(-1)
@click.command(context_settings=CONTEXT_SETTINGS)
@click.argument("parameters", required=True)
@user_config_setup
@notification_server_setup
def key(parameters: str, configuration: conf.UserConfig):
"""
Generate the key to send to the notification server according to the current schema using the parameters defined
:param parameters: key1=value1,key2=value2,...
"""
try:
parsed_param = _parse_inline_params(parameters)
# base_key and maintenance are ignored because not needed here
key_generated, base_key, maintenance = manager.key(parsed_param, configuration)
print(key_generated)
except KNOWN_EXCEPTION as e:
logger.error(f"{e}")
logger.debug("", exc_info=True)
sys.exit(-1)
except Exception as e:
logger.error(f"Error occurred while generating key from {parameters}, " f"{e}")
logger.debug("", exc_info=True)
sys.exit(-1)
@click.command(context_settings=CONTEXT_SETTINGS)
@click.argument("parameters", required=True)
@user_config_setup
@notification_server_setup
def value(parameters: str, configuration: conf.UserConfig):
"""
Return the value on the server corresponding to the key which is generated according to the current schema and
the parameters defined
:param parameters: key1=value1,key2=value2,...
"""
try:
parsed_param = _parse_inline_params(parameters)
v = manager.value(parsed_param, configuration)
print(v)
except KNOWN_EXCEPTION as e:
logger.error(f"{e}")
logger.debug("", exc_info=True)
sys.exit(-1)
except Exception as e:
logger.error(f"Error occurred while return value for {parameters}, " f"{e}")
logger.debug("", exc_info=True)
sys.exit(-1)
@click.command(context_settings=CONTEXT_SETTINGS)
@click.argument("parameters", required=True)
@user_config_setup
@notification_server_setup
def notify(parameters: str, configuration: conf.UserConfig):
"""
Create a notification with the parameters passed and submit it to the notification server
:param parameters: key1=value1,key2=value2,...
"""
try:
parsed_param = _parse_inline_params(parameters)
manager.notify(parsed_param, config=configuration)
print("Done")
except KNOWN_EXCEPTION as e:
logger.error(f"{e}")
logger.debug("", exc_info=True)
sys.exit(-1)
except Exception as e:
logger.error(f"Error occurred while notifying the notification {parameters}, " f"{e}")
logger.debug("", exc_info=True)
sys.exit(-1)
cli.add_command(listen)
cli.add_command(key)
cli.add_command(value)
cli.add_command(notify)
if __name__ == "__main__":
listen()
def _parse_inline_params(params: str) -> Dict[str, any]:
"""
This helper method parses the notification string in a dictionary
:param params:
:return: notification as dictionary
"""
logger.debug("Parsing the inline parameters...")
parsed_param = {}
ps = params.split(",")
assert len(ps) > 1, "Wrong structure for the notification string, it should be <key_name>=<key_value>,..."
for p in ps:
pair = p.split("=")
assert len(pair) == 2, "Wrong structure for the notification string, it should be <key_name>=<key_value>,..."
parsed_param[pair[0]] = pair[1]
logger.debug("Notification string successfully parsed")
return parsed_param
| 32.882979 | 120 | 0.663054 | 1,547 | 12,364 | 5.184228 | 0.215902 | 0.015711 | 0.013466 | 0.0202 | 0.351746 | 0.298504 | 0.271571 | 0.250499 | 0.243142 | 0.239277 | 0 | 0.003775 | 0.228729 | 12,364 | 375 | 121 | 32.970667 | 0.837248 | 0.232287 | 0 | 0.376106 | 0 | 0 | 0.169667 | 0.010256 | 0 | 0 | 0 | 0 | 0.013274 | 1 | 0.075221 | false | 0.00885 | 0.057522 | 0 | 0.176991 | 0.013274 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7f93d54e8a447a4241851eb2750b969c33fadc5a | 1,726 | py | Python | utils/registrar.py | glusa8/navygem | be437f3ce89d2edc5a565d7903e80171abec6929 | [
"MIT"
] | 1 | 2018-05-17T13:05:06.000Z | 2018-05-17T13:05:06.000Z | utils/registrar.py | glusa8/navygem | be437f3ce89d2edc5a565d7903e80171abec6929 | [
"MIT"
] | null | null | null | utils/registrar.py | glusa8/navygem | be437f3ce89d2edc5a565d7903e80171abec6929 | [
"MIT"
] | null | null | null | from navygem.settings import BASE_DIR
import glob
import json
import os
class ResourceRegistrarMeta(type):
file = __file__
def __new__(cls, name, parents, dct):
if 'types' in dct:
files = {}
for root, _, filenames in os.walk(os.path.dirname(os.path.realpath(cls.file))):
for filename in filenames:
_, extention = os.path.splitext(filename)
if extention in dct['types']:
if filename not in files:
files[filename] = os.path.join(root, filename)
else:
raise Exception('Two resource files cannot be named the same name.')
dct['files'] = files
return super(ResourceRegistrarMeta, cls).__new__(cls, name, parents, dct)
class ResourceRegistrar(object):
__metaclass__ = ResourceRegistrarMeta
# Subclasses should override 'types' array.
types = []
@classmethod
def find(cls, filename, loader):
if filename in cls.files:
_, extention = os.path.splitext(filename)
if extention in cls.types:
return loader(cls.files[filename])
def load_class(class_name):
def load_from(file_full_path):
_, extention = os.path.splitext(file_full_path)
full_path = os.path.relpath(file_full_path, BASE_DIR)
full_path_no_extention = full_path[:-len(extention)]
module_name = full_path_no_extention.replace(os.sep, '.')
# Why we use fromlist:
# http://stackoverflow.com/a/2725668
module = __import__(module_name, fromlist=[class_name])
return getattr(module, class_name)
return load_from
| 33.192308 | 96 | 0.61124 | 198 | 1,726 | 5.085859 | 0.378788 | 0.041708 | 0.044687 | 0.06852 | 0.12711 | 0.087388 | 0.087388 | 0.087388 | 0 | 0 | 0 | 0.005771 | 0.297219 | 1,726 | 51 | 97 | 33.843137 | 0.824402 | 0.056199 | 0 | 0.054054 | 0 | 0 | 0.04 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.108108 | false | 0 | 0.135135 | 0 | 0.486486 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7f956d5f8233b1c96361f43fdbe16032f4c6826a | 4,412 | py | Python | rank_predictor/rank_predictor/model/graph_only_models.py | Simsso/Vision-Based-Page-Rank-Estimation | 424d80031501701ebe1ab1473b0fb09ccd6f6453 | [
"MIT"
] | 3 | 2019-05-27T05:59:40.000Z | 2021-06-03T20:10:49.000Z | rank_predictor/rank_predictor/model/graph_only_models.py | Simsso/Vision-Based-Page-Rank-Estimation | 424d80031501701ebe1ab1473b0fb09ccd6f6453 | [
"MIT"
] | null | null | null | rank_predictor/rank_predictor/model/graph_only_models.py | Simsso/Vision-Based-Page-Rank-Estimation | 424d80031501701ebe1ab1473b0fb09ccd6f6453 | [
"MIT"
] | 1 | 2020-02-18T16:27:30.000Z | 2020-02-18T16:27:30.000Z | from copy import deepcopy
import torch
from graph_nets.data_structures.edge import Edge
from graph_nets.functions.aggregation import AvgAggregation, MaxAggregation
from graph_nets.block import GNBlock
from graph_nets.data_structures.graph import Graph
from torch import nn, Tensor
from graph_nets.functions.update import NodeAggregationGlobalStateUpdate, IndependentNodeUpdate
from rank_predictor.model.graph_extractor_full import DecoderGlobalStateUpdate, EncoderEdgeUpdate, \
EncoderGlobalStateUpdate, CoreGlobalStateUpdate, CoreNodeUpdate, CoreEdgeUpdate
from rank_predictor.model.utils import ListModule
class GNAvg(nn.Module):
"""[baseline+avg] model"""
def __init__(self):
super().__init__()
self.dense = nn.Linear(64, 1)
self.core = GNBlock(phi_v=IndependentNodeUpdate(self.dense))
self.dec = GNBlock(rho_vu=AvgAggregation(), phi_u=NodeAggregationGlobalStateUpdate())
def forward(self, g: Graph) -> torch.Tensor:
g: Graph = self.core(g)
return self.dec(g).attr.val
class GNMax(nn.Module):
"""[baseline+max] model"""
def __init__(self):
super().__init__()
self.dense = nn.Linear(64, 1)
self.core = GNBlock(phi_v=IndependentNodeUpdate(self.dense))
self.dec = GNBlock(rho_vu=MaxAggregation(), phi_u=NodeAggregationGlobalStateUpdate())
def forward(self, g: Graph) -> torch.Tensor:
g: Graph = self.core(g)
return self.dec(g).attr.val
class GNDeep(nn.Module):
"""[n-core(-shared)]"""
def __init__(self, drop_p: float, num_core_blocks: int, edge_mode: str, shared_weights: bool = False):
"""
Deep graph network for domain rank estimation.
:param drop_p: Dropout probability
:param num_core_blocks: Number of stacked core blocks, >= 0
:param edge_mode: Whether to keep the graph edges, remove them altogether, or make them bi-directional. In any
case, the existence of reflexive edges is ensured.
:param shared_weights:
"""
super().__init__()
self.drop_p = drop_p
self.edge_fns = {
'default': GNDeep.default,
'bi_directional': GNDeep.bi_directional,
'no_edges': GNDeep.no_edges,
'all_edges': GNDeep.all_edges
}
assert edge_mode in self.edge_fns, "Invalid edge mode; not in [default, bi_directional, no_edges, all_edges]"
self.edge_mode = edge_mode
self.enc = GNBlock(
phi_e=EncoderEdgeUpdate(),
phi_u=EncoderGlobalStateUpdate(),
rho_eu=AvgAggregation())
assert num_core_blocks >= 0
core_blocks = []
for i in range(num_core_blocks):
if shared_weights and i > 0:
block = core_blocks[0]
else:
block = GNBlock(
phi_e=CoreEdgeUpdate(self.drop_p),
phi_v=CoreNodeUpdate(self.drop_p),
phi_u=CoreGlobalStateUpdate(self.drop_p),
rho_ev=AvgAggregation(),
rho_vu=AvgAggregation(),
rho_eu=AvgAggregation())
core_blocks.append(block)
self.core_blocks = ListModule(*core_blocks)
self.dec = GNBlock(phi_u=DecoderGlobalStateUpdate()) # maps global state from vec to scalar
def forward(self, g: Graph) -> torch.Tensor:
# add/remove/keep edges
g = self.edge_fns[self.edge_mode](g)
g = self.enc(g)
for core in self.core_blocks:
g = core(g)
g: Graph = self.dec(g)
return g.attr.val
@staticmethod
def no_edges(g: Graph) -> Graph:
g = deepcopy(g)
g.remove_all_edges()
g.add_reflexive_edges()
return g
@staticmethod
def all_edges(g: Graph) -> Graph:
g = deepcopy(g)
g.remove_all_edges()
g.add_all_edges(reflexive=True)
return g
@staticmethod
def default(g: Graph) -> Graph:
g = deepcopy(g)
g.add_reflexive_edges()
return g
@staticmethod
def bi_directional(g: Graph) -> Graph:
g = deepcopy(g)
new_edges = set()
for e in g.edges:
new_edges.add(Edge(sender=e.receiver, receiver=e.sender, attr=e.attr))
g.add_reflexive_edges()
for e in new_edges:
g.edges.add(e)
return g
| 31.741007 | 118 | 0.625793 | 537 | 4,412 | 4.934823 | 0.251397 | 0.041509 | 0.024528 | 0.018113 | 0.277358 | 0.256981 | 0.249057 | 0.229434 | 0.200755 | 0.200755 | 0 | 0.00313 | 0.275839 | 4,412 | 138 | 119 | 31.971014 | 0.826291 | 0.105168 | 0 | 0.368421 | 0 | 0 | 0.028438 | 0 | 0 | 0 | 0 | 0 | 0.021053 | 1 | 0.105263 | false | 0 | 0.105263 | 0 | 0.315789 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7f971fac68abf046c70d17ff216b21ac6dd5fe7d | 3,337 | py | Python | plotting.py | oytundemirbilek/ReMI-Net-Star | fd7412e08bbdc1ee66053f17e4e781bdf319cd73 | [
"MIT"
] | 1 | 2021-12-13T11:16:59.000Z | 2021-12-13T11:16:59.000Z | plotting.py | oytundemirbilek/ReMI-Net-Star | fd7412e08bbdc1ee66053f17e4e781bdf319cd73 | [
"MIT"
] | null | null | null | plotting.py | oytundemirbilek/ReMI-Net-Star | fd7412e08bbdc1ee66053f17e4e781bdf319cd73 | [
"MIT"
] | 1 | 2022-01-03T16:20:14.000Z | 2022-01-03T16:20:14.000Z | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import networkx as nx
from matplotlib._color_data import BASE_COLORS
from nxviz.api import CircosPlot
def save_csv(name, data, columns, index):
saving = pd.DataFrame(data=data,columns=columns,index=index)
saving.to_csv(name)
def plot_cbt(img, fold_num=1, timepoint=0, dataset="simulated",norm="minmax",conv="edge_rnn"):
img = np.repeat(np.repeat(img, 10, axis=1), 10, axis=0)
plt.imshow(img)
plt.title(f"CBT at Fold {fold_num} - Time {timepoint}")
plt.axis('off')
plt.colorbar()
plt.savefig(f"./cbt_plots/{dataset}_{norm}_{conv}_cbt_time{timepoint}_fold{fold_num}.png")
plt.close()
def plot_training_curve(losses1,losses2):
plt.plot(np.arange(31)*5, losses1, label="Avg Train Frob Loss")
plt.plot(np.arange(31)*5, losses2, label="Avg Train Reg Loss")
plt.legend()
plt.title("Mean Regularizer Losses")
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.show()
def plot_scores(data, t=0, strategy="Last",data_type="simulated"):
plt.figure()#figsize=(20,10))
color_list = list(BASE_COLORS.keys())
color_list.remove("w")
gap = .8 / len(data)
labels = []
for i, row in enumerate(data[0]):
labels.append("Fold " + str(i+1))
# Add average column.
labels.append("Average")
data = np.concatenate((data, data.mean(axis=1,keepdims=True)), axis=1)
barlabels = ["Cyclic Sigmoid Double RNN","Cyclic Weighted Minmax Double RNN", "Cyclic Sigmoid Edge RNN", "Cyclic Weighted Minmax Edge RNN"]
ticks = np.arange(data.shape[1])
for i, row in enumerate(data):
plt.bar(ticks+i*gap, row, width = gap, edgecolor = "k", color = color_list[i % data.shape[0]], label=barlabels[i])
plt.xticks(ticks+(data.shape[0]*gap*1/2)-(gap/2), labels)
plt.title(f"Average Frobenius Loss Time {t+1} - {strategy} Model")
plt.ylim(top=18.0) #ymax is your value
plt.ylim(bottom=15) #ymin is your value
plt.legend()#loc='upper center', bbox_to_anchor=(0.5, -0.05), ncol=5)
#plt.show()
name = f"./experiments/final_{data_type}_{strategy.lower()}model_time{t}"
save_csv(name+".csv",data.transpose(),barlabels,labels)
plt.savefig(name + ".png")
plt.close()
def plot_circular_graph(cbt, n_nodes=35, TOPK=5):
cbt[np.tril_indices_from(cbt, -1)] = 0
cbt = np.abs(cbt)
cbt_selected_features = np.unravel_index(np.argsort(cbt.ravel())[-TOPK:], cbt.shape)
print(cbt_selected_features)
node_list=np.arange(n_nodes).tolist()
edge_list=[]
for f in range(TOPK):
i = cbt_selected_features[0][f]
j = cbt_selected_features[1][f]
edge_list.append((i,j,cbt[i,j]*100))
print(edge_list)
G = nx.Graph()
G.add_nodes_from(node_list)
G.add_weighted_edges_from(edge_list)
color_list=["a", "b", "c", "d", "e"]
for n, d in G.nodes(data=True):
G.nodes[n]["class"] = node_list[n-1]
c = CircosPlot(graph=G,node_labels=True,
node_label_rotation=True,
fontsize=15,
group_legend=False,
figsize=(7, 7),node_color="class",edge_width='weight')
c.draw()
plt.title(f"Right Hemisphere\n", fontdict={'fontsize': 20, 'fontweight': 'medium'})
plt.show() | 37.920455 | 144 | 0.641594 | 507 | 3,337 | 4.094675 | 0.360947 | 0.013487 | 0.036609 | 0.013487 | 0.055877 | 0.038536 | 0 | 0 | 0 | 0 | 0 | 0.023525 | 0.197483 | 3,337 | 88 | 145 | 37.920455 | 0.75168 | 0.041355 | 0 | 0.081081 | 0 | 0.013514 | 0.172514 | 0.044094 | 0 | 0 | 0 | 0 | 0 | 1 | 0.067568 | false | 0 | 0.081081 | 0 | 0.148649 | 0.027027 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7f97dbd9f97f19001752071be1afe254daea44e0 | 4,925 | py | Python | datasets/single_file_dataset.py | AiPBAND/OmiTrans | 8e5d9198a1ee422eb805e5ead068c1a2523aeed5 | [
"MIT"
] | 3 | 2021-11-26T04:43:05.000Z | 2022-02-23T20:18:10.000Z | datasets/single_file_dataset.py | AiPBAND/OmiTrans | 8e5d9198a1ee422eb805e5ead068c1a2523aeed5 | [
"MIT"
] | 1 | 2022-03-02T03:39:29.000Z | 2022-03-02T03:39:29.000Z | datasets/single_file_dataset.py | AiPBAND/OmiTrans | 8e5d9198a1ee422eb805e5ead068c1a2523aeed5 | [
"MIT"
] | 3 | 2021-11-26T06:25:46.000Z | 2022-03-09T13:16:45.000Z | import torch
import os.path
import numpy as np
import pandas as pd
from util import preprocess
from datasets import load_file
from datasets.basic_dataset import BasicDataset
class SingleFileDataset(BasicDataset):
"""
A dataset class for single file paired omics dataset.
The data should be two single files and prepared in '/path/to/data/'.
For each single matrix file, each columns should be each sample and each row should be each molecular feature.
"""
def __init__(self, param):
"""
Initialize this dataset class.
"""
BasicDataset.__init__(self, param)
self.omics_dims = []
# Load data for A
A_df = load_file(param, 'A')
# Get the min and max of A
self.target_max = A_df.max().max()
self.target_min = A_df.min().min()
# Get the sample list
if param.use_sample_list:
sample_list_path = os.path.join(param.data_root, 'sample_list.tsv') # get the path of sample list
self.sample_list = np.loadtxt(sample_list_path, delimiter='\t', dtype='<U32')
else:
self.sample_list = A_df.columns
# Get the feature list for A
if param.use_feature_lists:
feature_list_A_path = os.path.join(param.data_root, 'feature_list_A.tsv') # get the path of feature list
self.feature_list_A = np.loadtxt(feature_list_A_path, delimiter='\t', dtype='<U32')
else:
self.feature_list_A = A_df.index
A_df = A_df.loc[self.feature_list_A, self.sample_list]
self.A_dim = A_df.shape[0]
self.sample_num = A_df.shape[1]
A_array = A_df.values
if self.param.add_channel:
# Add one dimension for the channel
A_array = A_array[np.newaxis, :, :]
self.A_tensor_all = torch.Tensor(A_array)
self.omics_dims.append(self.A_dim)
# Load data for B
B_df = load_file(param, 'B')
# Get the feature list for B
if param.use_feature_lists:
feature_list_B_path = os.path.join(param.data_root, 'feature_list_B.tsv') # get the path of feature list
feature_list_B = np.loadtxt(feature_list_B_path, delimiter='\t', dtype='<U32')
else:
feature_list_B = B_df.index
B_df = B_df.loc[feature_list_B, self.sample_list]
if param.ch_separate:
B_df_list, self.B_dim = preprocess.separate_B(B_df)
self.B_tensor_all = []
for i in range(0, 23):
B_array = B_df_list[i].values
if self.param.add_channel:
# Add one dimension for the channel
B_array = B_array[np.newaxis, :, :]
B_tensor_part = torch.Tensor(B_array)
self.B_tensor_all.append(B_tensor_part)
else:
self.B_dim = B_df.shape[0]
B_array = B_df.values
if self.param.add_channel:
# Add one dimension for the channel
B_array = B_array[np.newaxis, :, :]
self.B_tensor_all = torch.Tensor(B_array)
self.omics_dims.append(self.B_dim)
if param.stratify:
# Load labels
labels_path = os.path.join(param.data_root, 'labels.tsv') # get the path of the label
labels_df = pd.read_csv(labels_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.labels_array = labels_df.iloc[:, -1].values
def __getitem__(self, index):
"""
Return a data point and its metadata information.
Returns a dictionary that contains A_tensor, B_tensor
A_tensor (tensor) -- input data with source omics data type
B_tensor (tensor/list) -- output data with targeting omics data type
index (int) -- the index of this data point
"""
# Get the tensor of A
if self.param.add_channel:
A_tensor = self.A_tensor_all[:, :, index]
else:
A_tensor = self.A_tensor_all[:, index]
# Get the tensor of B
if self.param.ch_separate:
B_tensor = []
for i in range(0, 23):
if self.param.add_channel:
B_tensor_part = self.B_tensor_all[i][:, :, index]
else:
B_tensor_part = self.B_tensor_all[i][:, index]
B_tensor.append(B_tensor_part)
# Return a list of tensor
else:
if self.param.add_channel:
B_tensor = self.B_tensor_all[:, :, index]
else:
B_tensor = self.B_tensor_all[:, index]
# Return a tensor
return {'A_tensor': A_tensor, 'B_tensor': B_tensor, 'index': index}
def __len__(self):
"""
Return the number of data points in the dataset.
"""
return self.sample_num
| 38.779528 | 117 | 0.586802 | 687 | 4,925 | 3.954876 | 0.176128 | 0.051527 | 0.02834 | 0.036069 | 0.353699 | 0.30806 | 0.267575 | 0.128451 | 0.128451 | 0.077659 | 0 | 0.005386 | 0.321421 | 4,925 | 126 | 118 | 39.087302 | 0.8076 | 0.217868 | 0 | 0.25 | 0 | 0 | 0.028047 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0375 | false | 0 | 0.0875 | 0 | 0.1625 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7f989baf22457d064a80cc2874de7f09184bd835 | 416 | py | Python | src/logs/logs.py | StevenVuong/twitter_scraper_sentiment_analysis | 6306dcb7e43d53da8d53c9d90d81d70dae442665 | [
"MIT"
] | 2 | 2020-05-11T16:48:40.000Z | 2020-05-11T21:03:10.000Z | src/logs/logs.py | StevenVuong/twitter_scraper_sentiment_analysis | 6306dcb7e43d53da8d53c9d90d81d70dae442665 | [
"MIT"
] | null | null | null | src/logs/logs.py | StevenVuong/twitter_scraper_sentiment_analysis | 6306dcb7e43d53da8d53c9d90d81d70dae442665 | [
"MIT"
] | null | null | null | import logging
formatting = "%(levelname)s: " \
"%(asctime)s -> " \
"%(name)s - " \
"line %(lineno)d: " \
"%(message)s"
def add_stream_handler(logger: logging.Logger):
formatter = logging.Formatter(formatting)
handler = logging.StreamHandler()
handler.setLevel(logging.DEBUG)
handler.setFormatter(formatter)
logger.addHandler(handler)
| 23.111111 | 47 | 0.588942 | 38 | 416 | 6.394737 | 0.578947 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.271635 | 416 | 18 | 48 | 23.111111 | 0.80198 | 0 | 0 | 0 | 0 | 0 | 0.165468 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.083333 | 0 | 0.166667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7f9cd9fd3b819d0be208a54f23cb4903da617f11 | 1,306 | py | Python | src/utils/data_utils.py | vikigenius/neural_speaker_identification | a723290808d748daf65163b71aef2c5376319db3 | [
"MIT"
] | 1 | 2019-07-27T00:32:02.000Z | 2019-07-27T00:32:02.000Z | src/utils/data_utils.py | vikigenius/neural_speaker_identification | a723290808d748daf65163b71aef2c5376319db3 | [
"MIT"
] | null | null | null | src/utils/data_utils.py | vikigenius/neural_speaker_identification | a723290808d748daf65163b71aef2c5376319db3 | [
"MIT"
] | 1 | 2019-07-27T00:32:06.000Z | 2019-07-27T00:32:06.000Z | #!/usr/bin/env python
import os
def get_hash(path: str):
hashpath = os.path.dirname(path)
return os.path.basename(hashpath)
def get_cid(path: str):
idpath = os.path.dirname(os.path.dirname(path))
cidstr = os.path.basename(idpath)
cid = int(cidstr.replace('id1', '').replace('id0', ''))
return cid - 1
def get_pid(path: str):
idpath = os.path.dirname(os.path.dirname(path))
cidstr = os.path.basename(idpath)
return cidstr
class M4AStreamer(object):
def __init__(self, data_dir, extensions=['.wav', '.m4a']):
self.extensions = extensions
self.data_dir = data_dir
def __iter__(self):
for (dirpath, dirnames, files) in os.walk(self.data_dir,
followlinks=True):
for filename in files:
if any([filename.endswith(ext) for ext in self.extensions]):
yield os.path.join(dirpath, filename)
def __len__(self):
total_len = 0
for (dirpath, dirnames, files) in os.walk(self.data_dir,
followlinks=True):
for filename in files:
if any([filename.endswith(ext) for ext in self.extensions]):
total_len += 1
return total_len
| 30.372093 | 76 | 0.573507 | 161 | 1,306 | 4.509317 | 0.329193 | 0.07438 | 0.089532 | 0.070248 | 0.523416 | 0.523416 | 0.523416 | 0.523416 | 0.523416 | 0.523416 | 0 | 0.007769 | 0.310107 | 1,306 | 42 | 77 | 31.095238 | 0.798002 | 0.015314 | 0 | 0.387097 | 0 | 0 | 0.010895 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.193548 | false | 0 | 0.032258 | 0 | 0.387097 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7f9ee4cce3e5875aad64421d7f4c59c9aad2f69d | 2,809 | py | Python | zfused_maya/zfused_maya/tool/utility/assemblymanage/assetlistwidget/assetlistwidget.py | qinningfx/zfused_outsource | bfc5558f05e3d6005653794a47bd863b61b009b1 | [
"Apache-2.0"
] | 2 | 2019-02-22T03:33:26.000Z | 2019-02-23T03:29:26.000Z | zfused_maya/zfused_maya/tool/utility/assemblymanage/assetlistwidget/assetlistwidget.py | qinningfx/zfused_outsource | bfc5558f05e3d6005653794a47bd863b61b009b1 | [
"Apache-2.0"
] | null | null | null | zfused_maya/zfused_maya/tool/utility/assemblymanage/assetlistwidget/assetlistwidget.py | qinningfx/zfused_outsource | bfc5558f05e3d6005653794a47bd863b61b009b1 | [
"Apache-2.0"
] | null | null | null | # coding:utf-8
# --author-- lanhua.zhou
from __future__ import print_function
import logging
from qtpy import QtWidgets, QtGui, QtCore
import zfused_api
import zfused_maya.core.record as record
import zfused_maya.core.resource as resource
import zfused_maya.widgets.widgets as widgets
from . import assetlistmodel
from . import assetlistview
from . import assetitemdelegate
from . import searchline
__all__ = ["AssetListWidget"]
logger = logging.getLogger(__name__)
class AssetListWidget(widgets.ShowPanelWidget):
def __init__(self, parent = None):
super(AssetListWidget, self).__init__(parent)
self._build()
self._load()
# self.build_panel()
self.search_line.textChanged.connect(self._search)
# self.asset_list_view.clicked.connect(self._show_panel)
def _show_panel(self, model_index):
""" show asset assembly panel
"""
self.show_panel()
def _search(self):
""" search text
"""
_text = self.search_line.text()
self.asset_proxy_model.search(_text)
def _load(self):
"""
加载当前项目资产
"""
_interface = record.Interface()
_project_id = _interface.get("current_project_id")
_project_assets = zfused_api.asset.project_assets([_project_id])
if _project_id:
self.asset_model = assetlistmodel.AssetListModel(_project_assets, self.asset_list_view)
self.asset_proxy_model.setSourceModel(self.asset_model)
self.asset_list_view.setModel(self.asset_proxy_model)
def _build(self):
_layout = QtWidgets.QVBoxLayout(self)
_layout.setContentsMargins(0,0,0,0)
_layout.setSpacing(0)
# 搜索窗
self.search_widget = QtWidgets.QFrame()
self.search_widget.setMaximumHeight(25)
self.search_widget.setMinimumHeight(25)
self.search_widget.setObjectName("search_widget")
_layout.addWidget(self.search_widget)
self.search_layout = QtWidgets.QHBoxLayout(self.search_widget)
self.search_layout.setContentsMargins(0, 0, 0, 0)
self.search_line = searchline.SearchLine()
self.search_layout.addWidget(self.search_line)
self.search_line.setMinimumWidth(400)
self.search_layout.addStretch(True)
# 资产列表
self.asset_list_view = assetlistview.AssetListView()
_layout.addWidget(self.asset_list_view)
self.asset_proxy_model = assetlistmodel.AssetListFilterProxyModel()
self.asset_list_view.setItemDelegate(
assetitemdelegate.AssetItemDelegate(self.asset_list_view))
_qss = resource.get("qss", "tool/assemblymanage/assetlistwidget.qss")
with open(_qss) as f:
qss = f.read()
self.setStyleSheet(qss) | 31.561798 | 99 | 0.684229 | 311 | 2,809 | 5.848875 | 0.311897 | 0.093458 | 0.050027 | 0.065421 | 0.102254 | 0.102254 | 0.039582 | 0.039582 | 0 | 0 | 0 | 0.007795 | 0.223567 | 2,809 | 89 | 100 | 31.561798 | 0.826227 | 0.072624 | 0 | 0 | 0 | 0 | 0.034321 | 0.015211 | 0 | 0 | 0 | 0 | 0 | 1 | 0.089286 | false | 0 | 0.196429 | 0 | 0.303571 | 0.017857 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7fa05b2a5eaa254783f5987a4ee647edeaf6173a | 2,467 | py | Python | eland/tests/series/test_hist_pytest.py | redNixon/eland | 1b9cb1db6d30f0662fe3679c7bb31e2c0865f0c3 | [
"Apache-2.0"
] | null | null | null | eland/tests/series/test_hist_pytest.py | redNixon/eland | 1b9cb1db6d30f0662fe3679c7bb31e2c0865f0c3 | [
"Apache-2.0"
] | null | null | null | eland/tests/series/test_hist_pytest.py | redNixon/eland | 1b9cb1db6d30f0662fe3679c7bb31e2c0865f0c3 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Elasticsearch BV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# File called _pytest for PyCharm compatability
import numpy as np
import pandas as pd
import pytest
from pandas.util.testing import assert_almost_equal
from eland.tests.common import TestData
class TestSeriesFrameHist(TestData):
def test_flight_delay_min_hist(self):
pd_flights = self.pd_flights()
ed_flights = self.ed_flights()
num_bins = 10
# pandas data
pd_flightdelaymin = np.histogram(pd_flights["FlightDelayMin"], num_bins)
pd_bins = pd.DataFrame({"FlightDelayMin": pd_flightdelaymin[1]})
pd_weights = pd.DataFrame({"FlightDelayMin": pd_flightdelaymin[0]})
ed_bins, ed_weights = ed_flights["FlightDelayMin"]._hist(num_bins=num_bins)
# Numbers are slightly different
print(pd_bins, ed_bins)
assert_almost_equal(pd_bins, ed_bins)
assert_almost_equal(pd_weights, ed_weights)
def test_filtered_hist(self):
pd_flights = self.pd_flights()
ed_flights = self.ed_flights()
num_bins = 10
# pandas data
pd_filteredhist = np.histogram(
pd_flights[pd_flights.FlightDelay == True].FlightDelayMin, num_bins
)
pd_bins = pd.DataFrame({"FlightDelayMin": pd_filteredhist[1]})
pd_weights = pd.DataFrame({"FlightDelayMin": pd_filteredhist[0]})
d = ed_flights[ed_flights.FlightDelay == True].FlightDelayMin
print(d.info_es())
ed_bins, ed_weights = ed_flights[
ed_flights.FlightDelay == True
].FlightDelayMin._hist(num_bins=num_bins)
# Numbers are slightly different
assert_almost_equal(pd_bins, ed_bins)
assert_almost_equal(pd_weights, ed_weights)
def test_invalid_hist(self):
with pytest.raises(ValueError):
assert self.ed_ecommerce()["products.tax_amount"].hist()
| 33.794521 | 83 | 0.689501 | 317 | 2,467 | 5.138801 | 0.381703 | 0.049724 | 0.052179 | 0.066298 | 0.473297 | 0.441375 | 0.417434 | 0.322897 | 0.315531 | 0.249233 | 0 | 0.008399 | 0.227807 | 2,467 | 72 | 84 | 34.263889 | 0.846719 | 0.295906 | 0 | 0.277778 | 0 | 0 | 0.059988 | 0 | 0 | 0 | 0 | 0 | 0.166667 | 1 | 0.083333 | false | 0 | 0.138889 | 0 | 0.25 | 0.055556 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7fa1255aeed1640b6a22639a447638509e1708d4 | 2,108 | py | Python | glitchtip/pagination.py | rh-cssre/glitchtip-backend | ae12fbd54532cff5fd3d7a72631ba18625bbf1de | [
"MIT"
] | null | null | null | glitchtip/pagination.py | rh-cssre/glitchtip-backend | ae12fbd54532cff5fd3d7a72631ba18625bbf1de | [
"MIT"
] | null | null | null | glitchtip/pagination.py | rh-cssre/glitchtip-backend | ae12fbd54532cff5fd3d7a72631ba18625bbf1de | [
"MIT"
] | null | null | null | import logging
import urllib.parse as urlparse
from urllib.parse import parse_qs
from rest_framework.exceptions import ValidationError
from rest_framework.pagination import CursorPagination
from rest_framework.response import Response
logger = logging.getLogger(__name__)
class LinkHeaderPagination(CursorPagination):
"""Inform the user of pagination links via response headers, similar to
what's described in
https://developer.github.com/guides/traversing-with-pagination/.
"""
page_size_query_param = "limit"
max_hits = 1000
def paginate_queryset(self, queryset, request, view=None):
self.count = self.get_count(queryset)
try:
return super().paginate_queryset(queryset, request, view)
except ValueError as err:
# https://gitlab.com/glitchtip/glitchtip-backend/-/issues/136
logging.warning("Pagination received invalid cursor", exc_info=True)
raise ValidationError("Invalid page cursor") from err
def get_count(self, queryset):
"""Count with max limit, to prevent slowdown"""
return queryset[: self.max_hits].count()
def get_paginated_response(self, data):
next_url = self.get_next_link()
previous_url = self.get_previous_link()
links = []
for url, label in (
(previous_url, "previous"),
(next_url, "next"),
):
if url is not None:
parsed = urlparse.urlparse(url)
cursor = parse_qs(parsed.query).get(self.cursor_query_param, [""])[0]
links.append(
'<{}>; rel="{}"; results="true"; cursor="{}"'.format(
url, label, cursor
)
)
else:
links.append(
'<{}>; rel="{}"; results="false"'.format(self.base_url, label)
)
headers = {"Link": ", ".join(links)} if links else {}
headers["X-Max-Hits"] = self.max_hits
headers["X-Hits"] = self.count
return Response(data, headers=headers)
| 34 | 85 | 0.604839 | 231 | 2,108 | 5.376623 | 0.4329 | 0.022544 | 0.041063 | 0.033816 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005295 | 0.283207 | 2,108 | 61 | 86 | 34.557377 | 0.816678 | 0.121442 | 0 | 0.046512 | 0 | 0 | 0.09081 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.069767 | false | 0 | 0.139535 | 0 | 0.348837 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7fa2950d2c1947708703dac582e56abc5bd22d7c | 1,336 | py | Python | rzeczownik_zwiazek.py | alkamid/wiktionary | ce242da609a1001ae7462b07da2f6e83f1a7281b | [
"MIT"
] | 3 | 2015-01-06T22:00:22.000Z | 2016-08-14T08:07:32.000Z | rzeczownik_zwiazek.py | alkamid/wiktionary | ce242da609a1001ae7462b07da2f6e83f1a7281b | [
"MIT"
] | 56 | 2015-07-12T10:21:38.000Z | 2020-02-23T18:51:01.000Z | rzeczownik_zwiazek.py | alkamid/wiktionary | ce242da609a1001ae7462b07da2f6e83f1a7281b | [
"MIT"
] | 2 | 2015-01-06T21:25:06.000Z | 2018-01-17T12:03:17.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
sys.path.append('/home/adam/wikt/pywikipedia')
#sys.path.append('/home/alkamid/wikt/pywikipedia')
import pywikibot
from pywikibot import Category
from pywikibot import pagegenerators
import re
from pywikibot import xmlreader
from klasa import *
def main():
data = '20110310'
site = pywikibot.Site()
cat = Category(site,'Kategoria:francuski (indeks)')
lista = pagegenerators.CategorizedPageGenerator(cat)
#lista_stron1 = xmlreader.XmlDump('plwiktionary-%s-pages-articles.xml' % data)
#lista = xmlreader.XmlDump.parse(lista_stron1)
for a in lista:
h = Haslo(a.title())
#h = HasloXML(a.title, a.text)
if h.type != 4 and ' ' in h.title:
h.langs()
for c in h.list_lang:
c.pola()
if c.type != 2 and c.lang == 'hiszpański':
if ('rzeczownik' in c.znaczenia.tresc) and ('rzeczownika' not in c.znaczenia.tresc):
print('\n' + h.title)
text = '*[[%s]]\n' % h.title
file = open("log/rzeczownik.txt", 'a')
file.write (text.encode("utf-8"))
file.close
if __name__ == '__main__':
try:
main()
finally:
pywikibot.stopme()
| 29.688889 | 104 | 0.567365 | 158 | 1,336 | 4.727848 | 0.493671 | 0.052209 | 0.076305 | 0.045515 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.014925 | 0.297904 | 1,336 | 44 | 105 | 30.363636 | 0.78145 | 0.178144 | 0 | 0 | 0 | 0 | 0.126374 | 0.024725 | 0 | 0 | 0 | 0 | 0 | 1 | 0.032258 | false | 0 | 0.225806 | 0 | 0.258065 | 0.032258 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7fa541c36336e94cfd1cde616a593d1859f31c6d | 962 | py | Python | scripts/reader/extract_eval_results.py | lixinsu/RCZoo | 37fcb7962fbd4c751c561d4a0c84173881ea8339 | [
"MIT"
] | 166 | 2018-08-07T03:35:02.000Z | 2022-01-11T10:40:09.000Z | scripts/reader/extract_eval_results.py | lixinsu/RCZoo | 37fcb7962fbd4c751c561d4a0c84173881ea8339 | [
"MIT"
] | 16 | 2018-08-17T09:53:37.000Z | 2019-06-17T12:58:00.000Z | scripts/reader/extract_eval_results.py | lixinsu/RCZoo | 37fcb7962fbd4c751c561d4a0c84173881ea8339 | [
"MIT"
] | 45 | 2018-08-27T06:38:42.000Z | 2021-01-17T11:12:39.000Z | #!/usr/bin/env python
# coding: utf-8
import os
import sys
import re
import numpy as np
import pandas as pd
def extract_file(logfile, max_epoch=40):
with open(logfile) as infp:
pat = re.compile(r"Epoch = ([0-9]+) \| EM = ([0-9]+\.[0-9]+) \| F1 = ([0-9]+\.[0-9]+)")
res = []
for line in infp:
if "dev valid official" in line:
m = pat.search(line)
res.append([m.group(1),m.group(2),m.group(3)])
return res[:max_epoch]
def compare_result(files):
results = {}
for ifile in files:
print(ifile.split('/')[-1])
save_name = ifile.split('/')[-1].split('.')[0]
res = extract_file(ifile)
results['%s-EM' % save_name] = [float(ires[1]) for ires in res]
results['%s-F1' % save_name] = [float(ires[2]) for ires in res]
pd.DataFrame.from_dict(results).to_csv('compare.csv', sep=',')
if __name__ == '__main__':
compare_result(sys.argv[1:])
| 28.294118 | 95 | 0.564449 | 146 | 962 | 3.589041 | 0.472603 | 0.019084 | 0.01145 | 0.015267 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.033103 | 0.246362 | 962 | 33 | 96 | 29.151515 | 0.689655 | 0.035343 | 0 | 0 | 0 | 0.04 | 0.12635 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.08 | false | 0 | 0.2 | 0 | 0.32 | 0.04 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7fa656207915017887d981061398ba3f4a5e4115 | 1,332 | py | Python | tests/conversion/converters/inside_worker_test/transliterate_sql_function_learning_test.py | tyrasd/osmaxx | da4454083d17b2ef8b0623cad62e39992b6bd52a | [
"MIT"
] | 27 | 2015-03-30T14:17:26.000Z | 2022-02-19T17:30:44.000Z | tests/conversion/converters/inside_worker_test/transliterate_sql_function_learning_test.py | tyrasd/osmaxx | da4454083d17b2ef8b0623cad62e39992b6bd52a | [
"MIT"
] | 483 | 2015-03-09T16:58:03.000Z | 2022-03-14T09:29:06.000Z | tests/conversion/converters/inside_worker_test/transliterate_sql_function_learning_test.py | tyrasd/osmaxx | da4454083d17b2ef8b0623cad62e39992b6bd52a | [
"MIT"
] | 6 | 2015-04-07T07:38:30.000Z | 2020-04-01T12:45:53.000Z | from contextlib import closing
import pytest
import sqlalchemy
from tests.conversion.converters.inside_worker_test.conftest import slow
international_text_strings = [
('ascii', 'some normal ascii', 'some normal ascii'),
('umlaut', 'öäüüäüö', 'öäüüäüö'),
('special_chars', "*+?'^'%ç#", "*+?'^'%ç#"),
('japanese', "大洲南部広域農道", 'dà zhōu nán bù guǎng yù nóng dào'),
('chinese russian', "二连浩特市 Эрээн хот", 'èr lián hào tè shì Éréén hot'),
('arabic', "شارع المنيرة الرئيسي", 'sẖạrʿ ạlmnyrẗ ạlrỷysy'),
# transliteration doesn't work on eritrean characters!
('eritrean', 'ጋሽ-ባርካ', 'ጋሽ-ባርካ'),
]
@pytest.fixture(params=international_text_strings)
def international_text(request):
return dict(
variant=request.param[0],
text=request.param[1],
expected=request.param[2],
)
@slow
def test_osml10n_translit_works_as_expected(osmaxx_functions, international_text):
engine = osmaxx_functions
text_escaped = international_text['text']
with closing(engine.execute(sqlalchemy.text("select osml10n_translit($${}$$) as label;".format(text_escaped)).execution_options(autocommit=True))) as result:
assert result.rowcount == 1
results = result.fetchall()
assert len(results) == 1
assert results[0]['label'] == international_text['expected']
| 35.052632 | 161 | 0.688438 | 158 | 1,332 | 5.664557 | 0.620253 | 0.113966 | 0.053631 | 0.044693 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009025 | 0.168168 | 1,332 | 37 | 162 | 36 | 0.798736 | 0.039039 | 0 | 0 | 0 | 0 | 0.251174 | 0.018779 | 0 | 0 | 0 | 0 | 0.103448 | 1 | 0.068966 | false | 0 | 0.137931 | 0.034483 | 0.241379 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7fa71b0b3f054bcdd23fb5a4941606959b1e3af7 | 1,123 | py | Python | python/example_code/iam/get_pub_keys.py | dlo/aws-doc-sdk-examples | 305e5c4f6cf268cafad7e1603aa5d2909fcd9c0c | [
"Apache-2.0"
] | 9 | 2018-09-29T11:44:19.000Z | 2019-11-06T21:41:34.000Z | python/example_code/iam/get_pub_keys.py | dlo/aws-doc-sdk-examples | 305e5c4f6cf268cafad7e1603aa5d2909fcd9c0c | [
"Apache-2.0"
] | 1 | 2018-10-30T06:11:07.000Z | 2018-10-30T06:11:07.000Z | python/example_code/iam/get_pub_keys.py | dlo/aws-doc-sdk-examples | 305e5c4f6cf268cafad7e1603aa5d2909fcd9c0c | [
"Apache-2.0"
] | 2 | 2018-12-25T10:13:56.000Z | 2021-06-24T11:26:38.000Z | # Copyright 2010-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import boto3
user_name = 'your-name'
# Create IAM client
iam = boto3.client('iam')
ssh_public_keys_response = iam.list_ssh_public_keys(
UserName = user_name,
MaxItems = 100,
)
# Get SSH public key
for ssh_public_key in ssh_public_keys_response['SSHPublicKeys']:
ssh_public_key = ssh_public_key['SSHPublicKeyId']
ssh_public_key_response = iam.get_ssh_public_key(
UserName = user_name,
SSHPublicKeyId = ssh_public_key,
Encoding = 'SSH',
)
print(ssh_public_key_response['SSHPublicKey']['SSHPublicKeyBody'])
| 32.085714 | 78 | 0.741763 | 163 | 1,123 | 4.932515 | 0.521472 | 0.123134 | 0.119403 | 0.052239 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.018319 | 0.173642 | 1,123 | 34 | 79 | 33.029412 | 0.84806 | 0.514693 | 0 | 0.133333 | 0 | 0 | 0.132075 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.066667 | 0 | 0.066667 | 0.066667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7fa9f0e8cb2ce25f47bfafc2286cfa382e9eef68 | 62,700 | py | Python | build_rdkit_csharp.py | kazuyaujihara/playfield | 2ef34dcfde820461c1f7fa47415f83dbea4481cc | [
"BSD-3-Clause"
] | null | null | null | build_rdkit_csharp.py | kazuyaujihara/playfield | 2ef34dcfde820461c1f7fa47415f83dbea4481cc | [
"BSD-3-Clause"
] | null | null | null | build_rdkit_csharp.py | kazuyaujihara/playfield | 2ef34dcfde820461c1f7fa47415f83dbea4481cc | [
"BSD-3-Clause"
] | null | null | null | """Script to make RDKit.DotNetWrapper.
Notes:
This is developed with rdkit-Release_2021_09_4.
"""
from enum import Enum
import argparse
import glob
import logging
import os
import platform
import re
import shutil
import subprocess
import sys
import typing
import xml.etree.ElementTree as ET
from os import PathLike
from pathlib import Path
from subprocess import PIPE
from typing import (
Callable,
Collection,
Dict,
Iterable,
List,
Literal,
Mapping,
Optional,
Sequence,
Set,
Tuple,
Union,
cast,
)
from xml.etree.ElementTree import Element, ElementTree, SubElement
logging.basicConfig(level=logging.DEBUG)
project_name: str = "RDKit.DotNetWrap"
VisualStudioVersion = Literal["15.0", "16.0"]
CpuModel = Literal["x86", "x64"]
MSPlatform = Literal["Win32", "x64"]
AddressModel = Literal[32, 64]
MSVCInternalVersion = Literal["14.1", "14.2"]
SupportedSystem = Literal["win", "linux"]
here = Path(__file__).parent.resolve()
class LangType(Enum):
CPlusPlus = 1
Java = 2
CSharp = 3
Python = 4
_LangType_to_str: Mapping[LangType, str] ={
LangType.CPlusPlus: "cpp",
LangType.Java: "java",
LangType.CSharp: "CSharp",
LangType.Python : "python",
}
_platform_system_to_system: Mapping[str, SupportedSystem] = {
"Windows": "win",
"Linux": "linux",
}
_vs_ver_to_cmake_option_catalog: Mapping[VisualStudioVersion, Mapping[CpuModel, Sequence[str]]] = {
"15.0": {
"x86": ['-G"Visual Studio 15 2017"'],
"x64": ['-G"Visual Studio 15 2017 Win64"'],
},
"16.0": {
"x86": ['-G"Visual Studio 16 2019"', "-AWin32"],
"x64": ['-G"Visual Studio 16 2019"'],
},
}
_platform_to_ms_form: Mapping[CpuModel, MSPlatform] = {
"x86": "Win32",
"x64": "x64",
}
_platform_to_address_model: Mapping[CpuModel, AddressModel] = {
"x86": 32,
"x64": 64,
}
_vs_to_msvc_internal_ver: Mapping[VisualStudioVersion, MSVCInternalVersion] = {
"15.0": "14.1",
"16.0": "14.2",
}
def get_os() -> SupportedSystem:
pf = platform.system()
if pf not in _platform_system_to_system:
raise RuntimeError
return _platform_system_to_system[pf]
def get_value(dic: Mapping[str, str], key: Optional[str]) -> str:
if key is None:
raise ValueError
if key not in dic:
raise ValueError
return dic[key]
def make_bak(filename: PathLike) -> None:
bak_filename = f"{filename}.bak"
if not os.path.exists(bak_filename):
shutil.copy2(filename, bak_filename)
def restore_from_bak(filename: PathLike) -> None:
bak_filename = f"{filename}.bak"
if os.path.exists(bak_filename):
shutil.copy2(bak_filename, filename)
def get_as_text(filename: PathLike) -> str:
with open(filename, "r", encoding="utf-8") as file:
filedata = file.read()
return filedata
def get_original_text(filename: PathLike) -> str:
bak_filename = f"{filename}.bak"
if os.path.exists(bak_filename):
filename = Path(bak_filename)
text = get_as_text(filename)
return text
def _replace_file_content(
filename: PathLike, replace_text: Callable[[str], str], make_backup: bool
) -> None:
if make_backup:
make_bak(filename)
curr_text = get_as_text(filename)
original_text = get_original_text(filename)
else:
curr_text = original_text = get_as_text(filename)
filedata = replace_text(original_text)
if filedata != curr_text:
with open(filename, "w", encoding="utf-8") as file:
file.write(filedata)
def replace_file_string(
filename: PathLike, pattern_replace: Sequence[Tuple[str, str]], make_backup: bool
) -> None:
def __replace_text(text: str) -> str:
for pattern, replace in pattern_replace:
text = re.sub(pattern, replace, text, flags=re.MULTILINE | re.DOTALL)
return text
_replace_file_content(filename, __replace_text, make_backup)
def insert_line_after(
filename: PathLike, insert_after: Mapping[str, str], make_backup: bool
) -> None:
def __replace_text(text: str) -> str:
new_lines: List[str] = []
lines = text.split("\n")
for line in lines:
new_lines.append(line)
if line in insert_after:
new_lines.append(insert_after[line])
return "\n".join(new_lines) + "\n"
_replace_file_content(filename, __replace_text, make_backup)
def call_subprocess(cmd: Sequence[str], show_info: bool = True) -> None:
try:
_env: Dict[str, str] = {}
_env.update(os.environ)
_CL_env_for_MSVC: Mapping[str, str] = {
"CL": "/source-charset:utf-8 /execution-charset:utf-8"
}
_env.update(_CL_env_for_MSVC)
logging.info(f"pwd={os.path.abspath(os.curdir)}")
def __t(text: str) -> str:
if '"' in text:
return text
if " " in text:
return '"' + text + '"'
return text
cmdline = " ".join([__t(s) for s in cmd if s])
logging.info(cmdline)
if get_os() == "win":
subprocess.check_call(cmdline, env=_env)
else:
subprocess.check_call(cmd, env=_env)
except subprocess.CalledProcessError as e:
logging.warning(e)
sys.exit(e.returncode)
def remove_if_exist(path: Path) -> None:
if path.exists():
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(path)
def remove_by_pattern(parent: Path, re_pattern: str, delete_on_match: bool) -> None:
pat = re.compile(re_pattern)
for p in parent.iterdir():
if delete_on_match:
if pat.match(p.name):
remove_if_exist(p)
else:
if not pat.match(p.name):
remove_if_exist(p)
def makefile_to_lines(filename: PathLike) -> Iterable[str]:
lines: List[str] = []
with open(filename, "r") as f:
for line in f.readlines():
if line.endswith("\n"):
line = line[:-1]
if line.endswith("\\"):
lines.append(line[:-1])
else:
lines.append(line)
yield re.sub("[ \\t]+", " ", "".join(lines))
lines = []
def match_and_add(pattern: re.Pattern, dest: List[str], line: str) -> None:
match = pattern.match(line)
if match:
for name in [s.strip() for s in match["name"].split(" ")]:
if name and name != "$(NULL)":
dest.append(name)
def get_value_from_env(env: str, default: Optional[str] = None) -> Optional[str]:
if env not in os.environ:
return default
return os.environ[env]
def get_vs_ver() -> VisualStudioVersion:
env_name = "VisualStudioVersion"
vs_version = get_value_from_env(env_name)
if not vs_version:
raise ValueError(f"{env_name} is empty.")
if vs_version not in typing.get_args(VisualStudioVersion):
raise ValueError(f"Unknown Visual Studio version: {vs_version}.")
return cast(VisualStudioVersion, vs_version)
def get_msvc_internal_ver() -> MSVCInternalVersion:
return _vs_to_msvc_internal_ver[get_vs_ver()]
def load_msbuild_xml(path: PathLike) -> ElementTree:
ns = {"msbuild": "http://schemas.microsoft.com/developer/msbuild/2003"}
ET.register_namespace("", ns["msbuild"])
tree = ET.parse(path)
return tree
def load_nuspec_xml(path: PathLike) -> ElementTree:
ns = {"nuspec": "http://schemas.microsoft.com/packaging/2010/07/nuspec.xsd"}
ET.register_namespace("", ns["nuspec"])
tree = ET.parse(path)
return tree
def get_elems(parent: Element, name: str, ns: Optional[str] = None) -> Iterable[Element]:
ns_name = name
if ns is not None:
ns_name = "{" + ns + "}" + ns_name
return (e for e in parent if e.tag == ns_name)
def get_elem(parent: Element, name: str, ns: Optional[str] = None) -> Element:
elms = list(get_elems(parent, name, ns))
if len(elms) != 1:
raise RuntimeError(f"Number of <{name}> is {len(elms)}.")
return elms[0]
class Config:
def __init__(self):
self.this_path: Optional[Path] = None
self.rdkit_path: Optional[Path] = None
self.boost_path: Optional[Path] = None
self.eigen_path: Optional[Path] = None
self.zlib_path: Optional[Path] = None
self.libpng_path: Optional[Path] = None
self.pixman_path: Optional[Path] = None
self.cairo_path: Optional[Path] = None
self.freetype_path: Optional[Path] = None
self.minor_version: int = 1
self.cairo_support: bool = False
self.freetype_support: bool = False
self.swig_patch_enabled: bool = True
self.use_boost: bool = False
self.test_enabled: bool = False
self.limit_external: bool = False
self.use_static_libs: bool = False
self.target_lang: LangType = LangType.CPlusPlus
self.more_functions: bool = False
def to_on_off(flag: bool) -> str:
return "ON" if flag else "OFF"
def get_shared_lib_names(binary_path: Path) -> Iterable[str]:
dependent_dll_names: Set[str] = set()
if get_os() == "win":
cmdline = f"dumpbin.exe /DEPENDENTS {binary_path}"
proc = subprocess.run(cmdline, shell=True, stdout=PIPE, text=True)
if proc.returncode != 0:
raise RuntimeError("Failed to execute dumpbin")
pat = re.compile(" ([a-zA-Z0-9_\\-]+\\.dll) ")
for name in re.findall(pat, proc.stdout, flags=0):
dependent_dll_names.add(name)
elif get_os() == "linux":
cmdline = f"ldd {binary_path}"
proc = subprocess.run(cmdline, shell=True, stdout=PIPE, text=True)
if proc.returncode != 0:
raise RuntimeError("Failed to execute ldd")
pat = re.compile(" ([a-zA-Z0-9_\\-]+\\.so\\.\\d+)\\s+\\=\\>")
for name in re.findall(pat, proc.stdout, flags=0):
dependent_dll_names.add(name)
else:
raise RuntimeError
return dependent_dll_names
def vcxproj_to_vscurr(proj_file: Path) -> None:
ns = "http://schemas.microsoft.com/developer/msbuild/2003"
tree = load_msbuild_xml(proj_file)
project = tree.getroot()
for prop_grp in get_elems(project, "PropertyGroup", ns):
if "Label" in prop_grp.attrib and prop_grp.attrib["Label"] == "Globals":
vc_proj_ver = get_elem(prop_grp, "VCProjectVersion", ns)
vc_proj_ver.text = get_vs_ver()
break
else:
raise RuntimeError(f"VCProjectVersion is missing in {proj_file}")
for prop_grp in get_elems(project, "PropertyGroup", ns):
for elm in prop_grp:
if elm.tag == "{" + ns + "}" + "PlatformToolset":
elm.text = "v" + get_msvc_internal_ver().replace(".", "")
tree.write(proj_file, "utf-8", True)
class NativeMaker:
def __init__(self, config: Config, build_platform: Optional[CpuModel] = None):
self.build_platform: Optional[CpuModel] = build_platform if build_platform else "x64"
self.config: Config = config
@property
def g_option_of_cmake(self) -> Sequence[str]:
if get_os() == "linux":
return ["-GUnix Makefiles"]
if get_os() == "win":
assert self.build_platform
return _vs_ver_to_cmake_option_catalog[get_vs_ver()][self.build_platform]
raise RuntimeError
@property
def build_dir_name(self) -> str:
"""Returns build path. Typically "buildx86".
Returns:
str: Directory name.
"""
assert self.build_platform
return f"build{self.build_platform}"
@property
def build_dir_name_of_rdkit(self) -> str:
"""Returns build path for RDKit. Typically "buildx86winCSharp".
Returns:
str: Directory name.
"""
assert self.build_platform
return f"build{get_os()}{self.build_platform}{_LangType_to_str[self.config.target_lang]}"
@property
def ms_build_platform(self) -> MSPlatform:
assert self.build_platform
return _platform_to_ms_form[self.build_platform]
@property
def address_model(self) -> AddressModel:
assert self.build_platform
return _platform_to_address_model[self.build_platform]
@property
def this_path(self) -> Path:
assert self.config.this_path
return self.config.this_path
@property
def rdkit_path(self) -> Path:
assert self.config.rdkit_path
return self.config.rdkit_path
@property
def boost_path(self) -> Path:
assert self.config.boost_path
return self.config.boost_path
@property
def eigen_path(self) -> Path:
assert self.config.eigen_path
return self.config.eigen_path
@property
def zlib_path(self) -> Path:
assert self.config.zlib_path
return self.config.zlib_path
@property
def libpng_path(self) -> Path:
assert self.config.libpng_path
return self.config.libpng_path
@property
def pixman_path(self) -> Path:
assert self.config.pixman_path
return self.config.pixman_path
@property
def freetype_path(self) -> Path:
assert self.config.freetype_path
return self.config.freetype_path
@property
def cairo_path(self) -> Path:
assert self.config.cairo_path
return self.config.cairo_path
@property
def boost_bin_path(self) -> Path:
return self.boost_path / f"lib{self.address_model}-msvc-{get_msvc_internal_ver()}"
@property
def rdkit_build_path(self) -> Path:
return self.rdkit_path / self.build_dir_name_of_rdkit
@property
def rdkit_wrapper_path(self) -> Path:
dic: Mapping[LangType, str] = {
LangType.Java: "gmwrapper",
LangType.CSharp: "csharp_wrapper",
}
if self.config.target_lang not in dic:
raise AssertionError
return self.rdkit_path / "Code" / "JavaWrappers" / dic[self.config.target_lang]
@property
def rdkit_swig_csharp_path(self) -> Path:
return self.rdkit_wrapper_path / "swig_csharp"
def get_rdkit_version(self) -> int:
return int(re.sub(r".*_(\d\d\d\d)_(\d\d)_(\d)", r"\1\2\3", str(self.config.rdkit_path)))
def get_version_for_nuget(self) -> str:
return f"0.{self.get_rdkit_version()}.{self.config.minor_version}"
def get_version_for_rdkit_dotnetwrap(self) -> str:
num = self.get_rdkit_version()
major, minor, build, revision = (
0,
(num // 10) % 10000,
num % 10,
self.config.minor_version,
)
return f"{major}.{minor}.{build}.{revision}"
def get_version_for_rdkit(self) -> str:
num = self.get_rdkit_version()
return f"{num // 1000}_{('00' + str((num % 1000) // 10))[-2:]}_{num % 10}"
def get_version_for_boost(self) -> str:
return re.sub(r".*(\d+_\d+_\d+)", r"\1", str(self.boost_path))
def get_version_for_eigen(self) -> str:
return re.sub(r".*(\d+\.\d+\.\d+)", r"\1", str(self.eigen_path))
def get_version_for_zlib(self) -> str:
return re.sub(r".*(\d+\.\d+\.\d+)", r"\1", str(self.zlib_path))
def get_version_for_libpng(self) -> str:
return re.sub(r".*lpng(\d)(\d)(\d\d)", r"\1.\2.\3", str(self.libpng_path))
def get_version_for_freetype(self) -> str:
return re.sub(r".*(\d+\.\d+\.\d+)", r"\1", str(self.freetype_path))
def get_version_for_pixman(self) -> str:
return re.sub(r".*(\d+\.\d+\.\d+)", r"\1", str(self.pixman_path))
def get_version_for_cairo(self) -> str:
return re.sub(r".*(\d+\.\d+\.\d+)", r"\1", str(self.cairo_path))
@property
def zlib_lib_path(self):
return (
self.zlib_path
/ self.build_dir_name
/ "Release"
/ ("zlibstatic.lib" if self.config.use_static_libs else "zlib.lib")
)
def run_msbuild(self, proj: Union[PathLike, str], platform: Optional[str] = None) -> None:
if not platform:
platform = self.ms_build_platform
cmd = [
"MSBuild",
str(proj),
f"/p:Configuration=Release,Platform={platform}",
"/maxcpucount",
]
call_subprocess(cmd)
def make_zlib(self) -> None:
build_path = self.zlib_path / self.build_dir_name
build_path.mkdir(exist_ok=True)
_curdir = os.path.abspath(os.curdir)
try:
os.chdir(build_path)
cmd = ["cmake", str(self.zlib_path)] + list(self.g_option_of_cmake)
call_subprocess(cmd)
self.run_msbuild("zlib.sln")
shutil.copy2(build_path / "zconf.h", self.zlib_path)
finally:
os.chdir(_curdir)
def make_libpng(self) -> None:
build_path = self.libpng_path / self.build_dir_name
build_path.mkdir(exist_ok=True)
_curdir = os.path.abspath(os.curdir)
try:
os.chdir(build_path)
cmd = (
["cmake", str(self.libpng_path)]
+ list(self.g_option_of_cmake)
+ [
f'-DZLIB_LIBRARY="{str(self.zlib_lib_path)}"',
f'-DZLIB_INCLUDE_DIR="{str(self.zlib_path)}"',
f"-DPNG_SHARED={to_on_off(not self.config.use_static_libs)}",
f"-DPNG_STATIC={to_on_off(self.config.use_static_libs)}",
]
)
call_subprocess(cmd)
self.run_msbuild("libpng.sln")
finally:
os.chdir(_curdir)
def make_pixman(self) -> None:
_curdir = os.path.abspath(os.curdir)
try:
proj_dir = self.pixman_path / "vc2017"
proj_dir.mkdir(exist_ok=True)
os.chdir(proj_dir)
files_dir = self.this_path / "files" / "pixman"
vcxproj = "pixman.vcxproj"
shutil.copy2(files_dir / vcxproj, proj_dir)
proj_file = proj_dir / vcxproj
shutil.copy2(files_dir / "config.h", self.pixman_path / "pixman")
vcxproj_to_vscurr(proj_file)
makefile_win32 = self.pixman_path / "pixman" / "Makefile.win32"
makefile_sources = self.pixman_path / "pixman" / "Makefile.sources"
c_files: List[str] = []
i_files: List[str] = []
pattern_c = re.compile("^libpixman_sources\\s*\\=(?P<name>.*)$")
pattern_h = re.compile("^libpixman_headers\\s*\\=(?P<name>.*)$")
for line in makefile_to_lines(makefile_sources):
match_and_add(pattern_c, c_files, line)
match_and_add(pattern_h, i_files, line)
pattern_c = re.compile("^\\s*libpixman_sources\\s*\\+\\=(?P<name>.*)$")
for line in makefile_to_lines(makefile_win32):
match_and_add(pattern_c, c_files, line)
tree = load_msbuild_xml(proj_file)
root = tree.getroot()
item_group = SubElement(root, "ItemGroup")
for name in c_files:
node = SubElement(item_group, "ClCompile")
node.attrib["Include"] = f"..\\pixman\\{name}"
for name in i_files:
node = SubElement(item_group, "ClInclude")
node.attrib["Include"] = f"..\\pixman\\{name}"
tree.write(proj_file, "utf-8", True)
self.run_msbuild(proj_file)
finally:
os.chdir(_curdir)
def make_cairo(self) -> None:
# TODO: get file names from src\Makefile.sources
_curdir = os.path.abspath(os.curdir)
try:
proj_dir = self.cairo_path / "vc2017"
proj_dir.mkdir(exist_ok=True)
os.chdir(proj_dir)
files_dir = self.this_path / "files" / "cairo"
vcxproj = "cairo.vcxproj"
shutil.copy2(files_dir / vcxproj, proj_dir)
proj_file = proj_dir / vcxproj
shutil.copy2(files_dir / "cairo-features.h", self.cairo_path / "src")
vcxproj_to_vscurr(proj_file)
replace_file_string(
proj_file,
[
(
"__CAIRODIR__",
str(self.cairo_path).replace("\\", "\\\\"),
),
(
"__LIBPNGDIR__",
str(self.libpng_path).replace("\\", "\\\\"),
),
(
"__ZLIBDIR__",
str(self.zlib_path).replace("\\", "\\\\"),
),
(
"__PIXMANDIR__",
str(self.pixman_path).replace("\\", "\\\\"),
),
(
"__FREETYPEDIR__",
str(self.freetype_path).replace("\\", "\\\\"),
),
],
make_backup=False,
)
self.run_msbuild(vcxproj)
finally:
os.chdir(_curdir)
def make_freetype(self) -> None:
_curdir = os.path.abspath(os.curdir)
try:
os.chdir(self.freetype_path)
shutil.copy2(
self.this_path / "files" / "freetype" / "freetype.vcxproj",
self.freetype_path / "builds" / "windows" / "vc2010",
)
os.chdir(self.freetype_path / "builds" / "windows" / "vc2010")
logging.debug(f"current dir = {os.getcwd()}")
self.run_msbuild("freetype.sln")
finally:
os.chdir(_curdir)
@property
def path_streams_cpp(self) -> Path:
return self.rdkit_path / "Code" / "RDStreams" / "streams.cpp"
@property
def path_streams_h(self) -> Path:
return self.rdkit_path / "Code" / "RDStreams" / "streams.h"
@property
def path_GraphMolCSharp_i(self) -> Path:
return self.rdkit_wrapper_path / "GraphMolCSharp.i"
@property
def path_Descriptors_i(self) -> Path:
return self.rdkit_path / "Code" / "JavaWrappers" / "Descriptors.i"
@property
def path_MolDescriptors_h(self) -> Path:
return self.rdkit_path / "Code" / "GraphMol" / "Descriptors" / "MolDescriptors.h"
@property
def path_MolSupplier_i(self) -> Path:
return self.rdkit_path / "Code" / "JavaWrappers" / "MolSupplier.i"
@property
def path_Streams_i(self) -> Path:
return self.rdkit_path / "Code" / "JavaWrappers" / "Streams.i"
@property
def path_MolDraw2D_i(self) -> Path:
return self.rdkit_path / "Code" / "JavaWrappers" / "MolDraw2D.i"
@property
def path_MolDraw2D_h(self) -> Path:
return self.rdkit_path / "Code" / "GraphMol" / "MolDraw2D" / "MolDraw2D.h"
@property
def _path_csharp_wrapper_CMakeLists_txt(self) -> Path:
return self.rdkit_path / "Code" / "JavaWrappers" / "csharp_wrapper" / "CMakeLists.txt"
@property
def bakable_files(self) -> Iterable[Path]:
return [
self.path_streams_cpp,
self.path_streams_h,
self.path_GraphMolCSharp_i,
self.path_Descriptors_i,
self.path_MolDescriptors_h,
self.path_MolSupplier_i,
self.path_Streams_i,
self.path_MolDraw2D_i,
self.path_MolDraw2D_h,
self._path_csharp_wrapper_CMakeLists_txt,
]
@property
def path_RDKit2DotNet_folder(self):
return self.rdkit_wrapper_path / "RDKit2DotNet"
def build_cmake_rdkit(self) -> Sequence[str]:
self.rdkit_build_path.mkdir(exist_ok=True)
_curdir = os.path.abspath(os.curdir)
os.chdir(self.rdkit_build_path)
try:
self._patch_i_files()
cmd = self._make_rdkit_cmake()
return cmd
finally:
os.chdir(_curdir)
def build_rdkit(self) -> None:
self.rdkit_build_path.mkdir(exist_ok=True)
_curdir = os.path.abspath(os.curdir)
os.chdir(self.rdkit_build_path)
try:
if get_os() == "win":
self.run_msbuild("RDKit.sln")
else:
cmd = ["make", "-j"]
if self.config.target_lang in (LangType.CPlusPlus,):
pass
elif self.config.target_lang in (LangType.CSharp,):
cmd += ["RDKFuncs"]
elif self.config.target_lang in (LangType.Java,):
cmd += ["install"]
else:
raise AssertionError
call_subprocess(cmd)
finally:
os.chdir(_curdir)
def copy_rdkit_dlls(self) -> None:
self._copy_dlls()
def _patch_GraphMolCSharp_i(self):
dic: Dict[str, str] = dict()
_line = r"%shared_ptr(RDKit::QueryOps)"
_insert = r"%shared_ptr(RDKit::MolBundle)" + "\n"
_insert += r"%shared_ptr(RDKit::FixedMolSizeMolBundle)"
dic.update({_line: _insert})
_line = r"%shared_ptr(RDKit::SmilesParseException)"
_insert = r"%shared_ptr(RDKit::MolPicklerException)"
dic.update({_line: _insert})
_line = r'%include "../QueryOps.i"'
_insert = r'%include "../MolBundle.i"'
dic.update({_line: _insert})
_line = r'%include "../Trajectory.i"'
_insert = r'%include "../MolStandardize.i"'
dic.update({_line: _insert})
_line = r'%include "../SubstanceGroup.i"'
_insert = r'%include "../MolEnumerator.i"'
dic.update({_line: _insert})
insert_line_after(self.path_GraphMolCSharp_i, dic, make_backup=True)
if self.config.swig_patch_enabled and self.get_rdkit_version() < 2021032:
replace_file_string(
self.path_GraphMolCSharp_i,
[("boost::int32_t", "int32_t"), ("boost::uint32_t", "uint32_t")],
make_backup=False, # backed up above
)
def _patch_MolDraw2D_i(self):
dic: Dict[str, str] = dict()
_svg_h = "<GraphMol/MolDraw2D/MolDraw2DSVG.h>"
_cairo_h = "<GraphMol/MolDraw2D/MolDraw2DCairo.h>"
_line = f"#include {_svg_h}"
_insert = f"\n#ifdef RDK_BUILD_CAIRO_SUPPORT\n#include {_cairo_h}\n#endif\n"
dic.update({_line: _insert})
_line = f"%include {_svg_h}"
_insert = f"\n#ifdef RDK_BUILD_CAIRO_SUPPORT\n%include {_cairo_h}\n#endif\n"
dic.update({_line: _insert})
_line = "%template(Int_Vect_Vect) std::vector<std::vector<int> >;"
_insert = "\n"
_insert += "%template(UInt_Vect_Vect) std::vector<std::vector<unsigned int> >;\n"
_insert += "%template(Double_Vect_Vect) std::vector<std::vector<double> >;\n"
_insert += "%template(Point3D_Const_Vect) std::vector<const RDGeom::Point3D *>;\n"
_insert += "%template(Point3D_Val_Vect) std::vector<RDGeom::Point3D>;\n"
insert_line_after(self.path_MolDraw2D_i, dic, make_backup=True)
def _patch_MolDraw2D_h(self) -> None:
dic: Dict[str, str] = dict()
_line = r" const MolDrawOptions &drawOptions() const { return options_; }"
_insert = (
r"void setDrawOptions(const RDKit::MolDrawOptions &opts) { drawOptions() = opts; }"
)
dic.update({_line: _insert})
insert_line_after(self.path_MolDraw2D_h, dic, make_backup=True)
def _patch_MolDescriptors_h(self) -> None:
dic: Dict[str, str] = dict()
_line = r"SET(CMAKE_SWIG_OUTDIR ${CMAKE_CURRENT_SOURCE_DIR}/swig_csharp )"
_inserts = [
"if(RDK_BUILD_DESCRIPTORS3D)"
"SET(CMAKE_SWIG_FLAGS \"-DRDK_BUILD_DESCRIPTORS3D\" \"-DRDK_HAS_EIGEN3\" ${CMAKE_SWIG_FLAGS} )",
"endif()",
"if(RDK_BUILD_CAIRO_SUPPORT)",
"SET(CMAKE_SWIG_FLAGS \"-DRDK_BUILD_CAIRO_SUPPORT\" ${CMAKE_SWIG_FLAGS} )",
"endif()",
]
_insert = "\n" + "\n".join(_inserts) + "\n"
dic.update({_line: _insert})
insert_line_after(self._path_csharp_wrapper_CMakeLists_txt, dic, make_backup=True)
_line = r"#include <GraphMol/Descriptors/MolDescriptors.h>"
_inserts = [
"#include <GraphMol/Descriptors/AtomFeat.h>",
"#include <GraphMol/Descriptors/USRDescriptor.h>",
"#include <GraphMol/Depictor/RDDepictor.h>",
"#ifdef RDK_BUILD_DESCRIPTORS3D",
"#include <GraphMol/Descriptors/MolDescriptors3D.h>",
"#endif",
]
_insert = "\n" + "\n".join(_inserts) + "\n"
dic.update({_line: _insert})
_line = r"%include <GraphMol/Descriptors/MQN.h>"
_inserts = [
"%include <GraphMol/Descriptors/AUTOCORR2D.h>",
"%include <GraphMol/Descriptors/AtomFeat.h>",
"%include <GraphMol/Descriptors/USRDescriptor.h>",
"%include <GraphMol/Depictor/RDDepictor.h>",
"#ifdef RDK_HAS_EIGEN3",
"%include <GraphMol/Descriptors/BCUT.h>",
"#endif",
"#ifdef RDK_BUILD_DESCRIPTORS3D",
"%include <GraphMol/Descriptors/CoulombMat.h>",
"%include <GraphMol/Descriptors/EEM.h>",
"%include <GraphMol/Descriptors/PBF.h>",
"%include <GraphMol/Descriptors/RDF.h>",
"%include <GraphMol/Descriptors/MORSE.h>",
"%include <GraphMol/Descriptors/WHIM.h>",
"%include <GraphMol/Descriptors/GETAWAY.h>",
"%include <GraphMol/Descriptors/AUTOCORR3D.h>",
"%include <GraphMol/Descriptors/PMI.h>",
"#endif",
]
_insert = "\n" + "\n".join(_inserts) + "\n"
dic.update({_line: _insert})
insert_line_after(self.path_Descriptors_i, dic, make_backup=True)
dic = dict()
_line = "#include <GraphMol/Descriptors/MQN.h>"
_insert = "#include <GraphMol/Descriptors/BCUT.h>"
dic.update({_line: _insert})
insert_line_after(self.path_MolDescriptors_h, dic, make_backup=True)
def _patch_MolSupplier_i(self):
__t0 = "%extend RDKit::ForwardSDMolSupplier {\n"
__t1 = "};\n"
replace_file_string(
self.path_MolSupplier_i,
[
(__t0, "#ifdef RDK_USE_BOOST_IOSTREAMS\n" + __t0),
(__t1, __t1 + "#endif\n"),
],
make_backup=True,
)
def _patch_Streams_i(self):
__t2 = "%extend RDKit::gzstream {\n"
__t3 = "%include <../RDStreams/streams.h>"
replace_file_string(
self.path_Streams_i,
[
(__t2, "#ifdef RDK_USE_BOOST_IOSTREAMS\n" + __t2),
(__t3, "#endif\n" + __t3),
],
make_backup=True,
)
def _patch_i_files(self):
if self.config.target_lang == LangType.CSharp:
if self.config.more_functions:
self._patch_GraphMolCSharp_i()
self._patch_MolDraw2D_i()
self._patch_MolDraw2D_h()
if self.config.more_functions:
self._patch_MolDescriptors_h()
self._patch_MolSupplier_i()
self._patch_Streams_i()
def _make_rdkit_cmake(self) -> Sequence[str]:
cmd: List[str] = self._get_cmake_rdkit_cmd_line()
if get_os() == "win":
cmd = [a.replace("\\", "/") for a in cmd]
call_subprocess(cmd)
return cmd
def _get_cmake_rdkit_cmd_line(self) -> List[str]:
def f_test() -> str:
return to_on_off(self.config.test_enabled)
def f_boost() -> str:
return to_on_off(self.config.use_boost)
def f_no_limit_external() -> str:
return to_on_off(not self.config.limit_external)
args = [f"{str(self.rdkit_path)}"]
args += ["-Wdev"]
args += self.g_option_of_cmake
if self.config.target_lang == LangType.CPlusPlus:
args += [
"-DRDK_BUILD_SWIG_WRAPPERS=OFF",
"-DRDK_BUILD_SWIG_CSHARP_WRAPPER=OFF",
"-DRDK_BUILD_SWIG_JAVA_WRAPPER=OFF",
"-DRDK_BUILD_PYTHON_WRAPPERS=OFF",
]
elif self.config.target_lang == LangType.CSharp:
args += [
"-DRDK_BUILD_SWIG_WRAPPERS=ON",
"-DRDK_BUILD_SWIG_CSHARP_WRAPPER=ON",
"-DRDK_BUILD_SWIG_JAVA_WRAPPER=OFF",
"-DRDK_BUILD_PYTHON_WRAPPERS=OFF",
]
elif self.config.target_lang == LangType.Java:
args += [
"-DRDK_BUILD_SWIG_WRAPPERS=ON",
"-DRDK_BUILD_SWIG_CSHARP_WRAPPER=OFF",
"-DRDK_BUILD_SWIG_JAVA_WRAPPER=ON",
"-DRDK_BUILD_PYTHON_WRAPPERS=OFF",
]
else:
raise RuntimeError(f"Not supported. {self.config.target_lang}")
if self.config.boost_path:
args += [
f"-DBOOST_ROOT={str(self.boost_path)}",
f"-DBOOST_INCLUDEDIR={str(self.boost_path)}",
f"-DBOOST_LIBRARYDIR={str(self.boost_bin_path)}",
]
if self.config.eigen_path:
args += [f"-DEIGEN3_INCLUDE_DIR={str(self.eigen_path)}"]
if self.config.zlib_path:
zlib_lib_path = (
self.zlib_path
/ self.build_dir_name
/ "Release"
/ ("zlibstatic.lib" if self.config.use_static_libs else "zlib.lib")
)
args += [
f'-DZLIB_LIBRARIES="{zlib_lib_path}"',
f'-DZLIB_INCLUDE_DIRS="{self.zlib_path}"',
]
if self.config.cairo_support:
if self.config.cairo_path:
cairo_lib_path = (
self.cairo_path / "vc2017" / self.ms_build_platform / "Release" / "cairo.lib"
)
args += [
f'-DCAIRO_INCLUDE_DIRS={self.cairo_path / "src"}',
f"-DCAIRO_LIBRARIES={cairo_lib_path}",
]
args += [
"-DRDK_INSTALL_INTREE=ON",
f"-DRDK_BUILD_CPP_TESTS={f_test()}",
f"-DRDK_USE_BOOST_SERIALIZATION={f_boost()}",
f"-DRDK_USE_BOOST_IOSTREAMS={f_boost()}",
f"-DRDK_USE_BOOST_REGEX={f_boost()}",
"-DBoost_NO_BOOST_CMAKE=ON",
f"-DRDK_BUILD_COORDGEN_SUPPORT={f_no_limit_external()}",
f"-DRDK_BUILD_MAEPARSER_SUPPORT={f_no_limit_external()}",
"-DRDK_OPTIMIZE_POPCNT=ON",
f"-DRDK_BUILD_FREESASA_SUPPORT={f_no_limit_external()}",
f"-DRDK_BUILD_CAIRO_SUPPORT={to_on_off(self.config.cairo_support)}",
f"-DRDK_BUILD_FREETYPE_SUPPORT={to_on_off(self.config.cairo_support)}",
"-DRDK_BUILD_THREADSAFE_SSS=ON",
f"-DRDK_BUILD_INCHI_SUPPORT={f_no_limit_external()}",
f"-DRDK_BUILD_AVALON_SUPPORT={f_no_limit_external()}",
# do not install comic fonts because of incorrect md5 checksum.
# see https://salsa.debian.org/debichem-team/rdkit/-/commit/15da2bc1796c507e0c3afa36eecfc1961d16c13e # NOQA
"-DRDK_INSTALL_COMIC_FONTS=OFF",
f"-DRDK_BUILD_TEST_GZIP={f_test()}",
]
if self.get_rdkit_version() >= 2020091:
# needs followings after 2020_09_1
args += [
"-DRDK_USE_URF=ON",
]
if get_os() == "win":
if self.config.use_static_libs:
args += [
"-DRDK_SWIG_STATIC=ON",
"-DRDK_INSTALL_STATIC_LIBS=ON",
"-DBOOST_USE_STATIC_LIBS=ON",
"-DRDL_WIN_STATIC=ON",
]
else:
args += [
"-DRDK_SWIG_STATIC=OFF",
"-DRDK_INSTALL_STATIC_LIBS=OFF",
"-DRDK_INSTALL_DLLS_MSVC=ON",
]
if get_os() == "linux":
if self.config.use_static_libs:
args += [
"-DRDK_SWIG_STATIC=ON",
"-DRDK_INSTALL_STATIC_LIBS=ON",
"-DBOOST_LIBRARYDIR=/usr/lib/x86_64-linux-gnu",
"-DBOOST_ROOT=/usr",
"-DBOOST_USE_STATIC_LIBS=ON",
]
else:
args += [
"-DRDK_SWIG_STATIC=OFF",
"-DRDK_INSTALL_STATIC_LIBS=OFF",
]
if self.get_rdkit_version() >= 2020091:
# freetype supports starts from 2020_09_1
if self.config.freetype_path:
freetype_lib_path = (
self.freetype_path
/ "objs"
/ self.ms_build_platform
/ "Release"
/ "freetype.lib"
)
freetype_include_path = self.freetype_path / "include"
args += [
f"-DFREETYPE_LIBRARY={freetype_lib_path}",
f"-DFREETYPE_INCLUDE_DIRS={freetype_include_path}",
]
return ["cmake"] + args
def get_RDKFuncs_dll_path(self) -> Path:
a: Path
a = self.rdkit_build_path / "Code" / "JavaWrappers" / "csharp_wrapper"
if get_os() == "win":
a = a / "Release" / "RDKFuncs.dll"
elif get_os() == "linux":
a = a / "RDKFuncs.so"
else:
raise RuntimeError
return a
def _copy_dlls(self) -> None:
assert self.build_platform
dll_dest_path = self.rdkit_wrapper_path / get_os() / self.build_platform
remove_if_exist(dll_dest_path)
os.makedirs(dll_dest_path)
logging.info(f"Copy DLLs to {dll_dest_path}.")
files_to_copy: List[Union[str, PathLike]] = []
if self.config.target_lang == LangType.CSharp:
files_to_copy.append(self.get_RDKFuncs_dll_path())
# pick up dependent DLLs in buildlinux*CSharp/lib or buildwin*CSharp\bin\Release
if get_os() == "win":
lib_path = self.rdkit_build_path / "bin" / "Release"
for file_path in lib_path.glob("*.dll"):
files_to_copy.append(file_path)
elif get_os() == "linux":
lib_path = self.rdkit_build_path / "lib"
for file_path in lib_path.glob("*.so.1"):
files_to_copy.append(file_path)
else:
raise RuntimeError
if not self.config.use_static_libs:
if get_os() == "win":
files_to_copy.append(self.zlib_path / self.build_dir_name / "Release" / "zlib.dll")
# Pick BOOST Dlls
if self.config.use_boost:
# DLLs of boost.
for file_path in self.boost_bin_path.glob("*.dll"):
if re.match(r".*\-vc\d\d\d\-mt\-x(32|64)\-\d_\d\d\.dll", file_path.name):
# boost_python-vc###-mt-x##-#_##.dll is not needed.
if file_path.name.startswith("boost_python"):
continue
files_to_copy.append(file_path)
# DLLs of freetype.
if self.config.freetype_support and self.get_rdkit_version() >= 2020091:
files_to_copy.append(
self.freetype_path
/ "objs"
/ self.ms_build_platform
/ "Release"
/ "freetype.dll"
)
# DLLs of cairo.
if self.config.cairo_support:
files_to_copy += [
self.libpng_path / self.build_dir_name / "Release" / "libpng16.dll",
self.pixman_path
/ "vc2017"
/ self.ms_build_platform
/ "Release"
/ "pixman.dll",
self.cairo_path
/ "vc2017"
/ self.ms_build_platform
/ "Release"
/ "cairo.dll",
]
# Copy files.
for path in files_to_copy:
shutil.copy2(path, dll_dest_path)
def build_wrapper(self) -> None:
if self.config.target_lang == LangType.CSharp:
self._patch_rdkit_swig_created_files()
self._prepare_RDKitDotNet_folder()
self._copy_test_projects()
self._build_RDKit2DotNet()
elif self.config.target_lang == LangType.Java:
_curdir = os.path.abspath(os.curdir)
os.chdir(self.rdkit_build_path)
try:
cmd = ["make", "-j", "GraphMolWrapJar"]
call_subprocess(cmd)
finally:
os.chdir(_curdir)
else:
pass
def _patch_rdkit_swig_created_files(self) -> None:
# Customize the followings if required.
if self.config.swig_patch_enabled:
swig_patches: List[Tuple[Path, Sequence[Tuple[str, str]]]] = []
if self.get_rdkit_version() < 2021032:
swig_patches += [
(
# extract BOOST_BINARY.
self.rdkit_swig_csharp_path / "PropertyPickleOptions.cs",
[("BOOST_BINARY\\(\\s*([01]+)\\s*\\)", "0b\\1")],
),
(
# remove dupulicated methods.
self.rdkit_swig_csharp_path / "RDKFuncs.cs",
[
(
"public static double DiceSimilarity\\([^\\}]*\\."
"DiceSimilarity__SWIG_(12|13|14)\\([^\\}]*\\}",
"",
)
],
),
]
swig_patches += [
(
self.rdkit_swig_csharp_path / "CXSmilesFields.cs",
[
(
"std\\:\\:numeric_limits\\<\\s*std\\:\\:int32_t\\s*\\>\\:\\:max\\(\\)",
"0x7fffffff",
)
],
)
]
for filepath, patterns in swig_patches:
replace_file_string(filepath, patterns, make_backup=False)
for filepath, patterns in (
(
self.rdkit_swig_csharp_path / "RDKFuncsPINVOKE.cs",
[("(partial )?class RDKFuncsPINVOKE\\s*\\{", "partial class RDKFuncsPINVOKE {")],
),
(
self.rdkit_swig_csharp_path / "RDKFuncsPINVOKE.cs",
[
(
"static SWIGExceptionHelper\\(\\)\\s*\\{",
"static SWIGExceptionHelper() { RDKFuncsPINVOKE.LoadDll();",
)
],
),
):
replace_file_string(filepath, patterns, make_backup=False)
shutil.copy2(
self.this_path / "files" / "rdkit" / "RDKFuncsPINVOKE_Loader.cs",
self.rdkit_swig_csharp_path,
)
def _prepare_RDKitDotNet_folder(self):
remove_if_exist(self.path_RDKit2DotNet_folder)
shutil.copytree(
self.this_path / "files" / "rdkit" / "RDKit2DotNet",
self.rdkit_wrapper_path / "RDKit2DotNet",
)
path_RDKit2DotNet_csproj = self.path_RDKit2DotNet_folder / "RDKit2DotNet.csproj"
rdkit_dotnetwrap_version = self.get_version_for_rdkit_dotnetwrap()
tree = load_msbuild_xml(path_RDKit2DotNet_csproj)
project = tree.getroot()
prop_grp = list(get_elems(project, "PropertyGroup"))[0]
asm_ver = get_elem(prop_grp, "AssemblyVersion")
asm_ver.text = rdkit_dotnetwrap_version
file_ver = get_elem(prop_grp, "FileVersion")
file_ver.text = rdkit_dotnetwrap_version
property_group = SubElement(project, "PropertyGroup")
sign_assembly = SubElement(property_group, "SignAssembly")
sign_assembly.text = "true"
assembly_originator_key_file = SubElement(property_group, "AssemblyOriginatorKeyFile")
assembly_originator_key_file.text = "rdkit2dotnet.snk"
# below is only for convenience to run test project
item_group = SubElement(project, "ItemGroup")
for cpu_model in typing.get_args(CpuModel):
for filename in glob.glob(
str(self.rdkit_wrapper_path / get_os() / cpu_model / "*.dll")
):
dllbasename = os.path.basename(filename)
content = SubElement(item_group, "None")
path_to_dll = f"..\\\\{get_os()}\\\\{cpu_model}\\\\{dllbasename}"
link_to_dll = f"runtimes\\\\{get_os()}-{cpu_model}\\\\native\\\\{dllbasename}"
content.attrib["Include"] = path_to_dll
content.attrib["Link"] = link_to_dll
copy_to_output_directory = SubElement(content, "CopyToOutputDirectory")
copy_to_output_directory.text = "PreserveNewest"
tree.write(path_RDKit2DotNet_csproj, "utf-8", True)
@property
def test_csprojects(self) -> Collection[str]:
return (
"RDKit2DotNetTest",
"RDKit2DotNetTest2",
"NuGetExample",
"NuGetExample2",
)
@property
def test_sln_names(self) -> Collection[str]:
return (
"RDKit2DotNet.sln",
"NuGetExample.sln",
)
def _copy_test_projects(self) -> None:
path_rdkit_files = self.this_path / "files" / "rdkit"
for name in self.test_csprojects:
remove_if_exist(self.rdkit_wrapper_path / name)
shutil.copytree(
path_rdkit_files / name,
self.rdkit_wrapper_path / name,
dirs_exist_ok=True,
)
proj_path = self.rdkit_wrapper_path / name / f"{name}.csproj"
tree = load_msbuild_xml(proj_path)
project = tree.getroot()
for item_grp in get_elems(project, "ItemGroup"):
for pkg_ref in get_elems(item_grp, "PackageReference"):
if (
"Include" in pkg_ref.attrib
and pkg_ref.attrib["Include"] == "RDKit.DotNetWrap"
):
pkg_ref.attrib["Version"] = self.get_version_for_nuget()
tree.write(proj_path, "utf-8", True)
for name in self.test_sln_names:
shutil.copy2(path_rdkit_files / name, self.rdkit_wrapper_path)
print(f"Test slns {self.test_sln_names} are created in {self.rdkit_wrapper_path}.")
print("RDKit2DotNetTest: .NET 5.0 example.")
print("RDKit2DotNetTest2: .NET Framework 4 example.")
print("NuGetExample: NuGet package example for .NET 5.0.")
print("NuGetExample2: NuGet package example for .NET Framework 4.")
def _build_RDKit2DotNet(self) -> None:
_pushd_build_wrapper = os.getcwd()
try:
os.chdir(self.path_RDKit2DotNet_folder)
call_subprocess(["dotnet", "restore"])
call_subprocess(
["dotnet", "build", "RDKit2DotNet.csproj", "/t:Build", "/p:Configuration=Release"]
)
finally:
os.chdir(_pushd_build_wrapper)
def build_nuget_package(self) -> None:
dll_basenames_dic = self._make_dll_basenames_dic()
self._prepare_nuspec_file(dll_basenames_dic)
self._prepare_targets_file(dll_basenames_dic)
self._build_nupkg()
def _make_dll_basenames_dic(self) -> Mapping[str, Mapping[str, Sequence[str]]]:
dll_basenames_dic: Dict[str, Dict[str, List[str]]] = dict()
for _os in typing.get_args(SupportedSystem):
if _os not in dll_basenames_dic:
dll_basenames_dic[_os] = dict()
for cpu_model in typing.get_args(CpuModel):
dlls_path = self.rdkit_wrapper_path / _os / cpu_model
dll_basenames: List[str] = []
for filename in glob.glob(str(dlls_path / "*.*")):
dll_basenames.append(os.path.basename(filename))
dll_basenames_dic[_os][cpu_model] = dll_basenames
if get_os() == "win":
assert dll_basenames_dic["win"]["x86"]
assert dll_basenames_dic["win"]["x64"]
if get_os() == "linux":
assert dll_basenames_dic["linux"]["x64"]
assert not dll_basenames_dic["linux"]["x86"]
return dll_basenames_dic
def get_description_for_nuget(self) -> str:
s = f".NET binding of RDKit Release_{self.get_version_for_rdkit()}."
if get_os() == "linux":
s += " Supports Linux (x64)."
else:
s += " Supports Windows (x86 and x64) and Linux (x64)."
return s
def get_lib_versios(self) -> Sequence[str]:
lib_versions: List[str] = []
lib_versions.append(f"Eigen {self.get_version_for_eigen()}")
if get_os() == "win":
lib_versions.append(f"zlib {self.get_version_for_zlib()}")
if self.config.use_boost:
lib_versions.append(f"Boost {self.get_version_for_boost()}")
if self.config.freetype_support:
lib_versions.append(f"FreeType {self.get_version_for_freetype()}")
if self.config.cairo_support:
lib_versions += [
f"libpng {self.get_version_for_libpng()}",
f"pixman {self.get_version_for_pixman()}",
f"cairo {self.get_version_for_cairo()}",
]
return lib_versions
def get_releaseNotes(self) -> str:
s: str = "This release uses "
s += ", ".join(self.get_lib_versios())
if get_os() != "win":
s += f" built using Visual Studio {self.get_version_for_rdkit()}"
s += " for Windows build"
s += "."
return s
def _prepare_nuspec_file(
self, dll_basenames_dic: Mapping[str, Mapping[str, Sequence[str]]]
) -> None:
origin_file = self.this_path / "files" / "rdkit" / f"{project_name}.nuspec"
nuspec_file = shutil.copy2(origin_file, self.rdkit_wrapper_path / "RDKit2DotNet")
tree: ElementTree = load_nuspec_xml(nuspec_file)
root = tree.getroot()
ns = "http://schemas.microsoft.com/packaging/2010/07/nuspec.xsd"
metadata = get_elem(root, "metadata", ns)
description = get_elem(metadata, "description", ns)
description.text = self.get_description_for_nuget()
version = get_elem(metadata, "version", ns)
version.text = self.get_version_for_nuget()
releaseNotes = get_elem(metadata, "releaseNotes", ns)
releaseNotes.text = self.get_releaseNotes()
files = get_elem(root, "files", ns)
for _os in typing.get_args(SupportedSystem):
for cpu_model in typing.get_args(CpuModel):
for dll_basename in dll_basenames_dic[_os][cpu_model]:
file_element = SubElement(files, "file")
file_element.attrib["src"] = f"../{_os}/{cpu_model}/{dll_basename}"
file_element.attrib[
"target"
] = f"runtimes/{_os}-{cpu_model}/native/{dll_basename}"
tree.write(nuspec_file, "utf-8", True)
def _prepare_targets_file(
self, dll_basenames_dic: Mapping[str, Mapping[str, Sequence[str]]]
) -> None:
origin_file = self.this_path / "files" / "rdkit" / f"{project_name}.targets"
targets_file = shutil.copy2(origin_file, self.rdkit_wrapper_path / "RDKit2DotNet")
tree: ElementTree = load_msbuild_xml(targets_file)
project = tree.getroot()
non_net = (
"!$(TargetFramework.Contains('netstandard')) "
"And !$(TargetFramework.Contains('netcoreapp')) "
"And !$(TargetFramework.Contains('net5.'))"
"And !$(TargetFramework.Contains('net6.'))"
)
_os = "win"
ig: Element
for cpu_model in typing.get_args(CpuModel):
ig = SubElement(project, "ItemGroup")
ig.attrib["Condition"] = f"{non_net} And '$(Platform)' == '{cpu_model}'"
for dllname in dll_basenames_dic[_os][cpu_model]:
none = SubElement(ig, "None")
none.attrib[
"Include"
] = f"$(MSBuildThisFileDirectory)../runtimes/{_os}-{cpu_model}/native/{dllname}"
link = SubElement(none, "Link")
link.text = dllname
copy_to = SubElement(none, "CopyToOutputDirectory")
copy_to.text = "PreserveNewest"
ig = SubElement(project, "ItemGroup")
ig.attrib["Condition"] = f"{non_net} And '$(Platform)' == 'AnyCPU'"
_os = "win"
for cpu_model in typing.get_args(CpuModel):
for dllname in dll_basenames_dic[_os][cpu_model]:
none = SubElement(ig, "None")
none.attrib[
"Include"
] = f"$(MSBuildThisFileDirectory)../runtimes/{_os}-{cpu_model}/native/{dllname}"
link = SubElement(none, "Link")
link.text = f"runtimes/{_os}-{cpu_model}/native/{dllname}"
copy_to = SubElement(none, "CopyToOutputDirectory")
copy_to.text = "PreserveNewest"
tree.write(targets_file, "utf-8", True)
def _build_nupkg(self) -> None:
_curr_dir = os.curdir
os.chdir(self.rdkit_wrapper_path / "RDKit2DotNet")
try:
cmd = [
"dotnet",
"pack",
"RDKit2DotNet.csproj",
f"-p:NuspecFile={project_name}.nuspec",
"/p:Configuration=Release",
]
call_subprocess(cmd)
finally:
os.chdir(_curr_dir)
def clean_zlib(self) -> None:
if self.config.zlib_path:
for p in typing.get_args(CpuModel):
remove_if_exist(self.zlib_path / "zconf.h")
remove_if_exist(self.zlib_path / f"build{p}")
def clean_rdkit(self) -> None:
if self.config.rdkit_path:
for path in self.bakable_files:
restore_from_bak(path)
# for C#
for p in (
[
f"{project_name}.nuspec",
f"{project_name}.targets",
"swig_csharp",
"Properties",
"packages",
]
+ list(typing.get_args(SupportedSystem))
+ list(self.test_csprojects)
+ list(self.test_sln_names)
):
remove_if_exist(self.rdkit_wrapper_path / p)
# for native libs
remove_if_exist(self.rdkit_path / "lib")
# build dir
for _os in typing.get_args(SupportedSystem):
for p in typing.get_args(CpuModel):
remove_if_exist(self.rdkit_path / f"build{_os}{p}CSharp")
remove_if_exist(self.rdkit_path / f"build{_os}{p}java")
remove_if_exist(self.rdkit_path / f"build{_os}{p}cpp")
# libs for copy dlls
for wrapper_name in ("gmwrapper", "csharp_wrapper"):
for _os in typing.get_args(SupportedSystem):
dir = self.rdkit_path / "Code" / "JavaWrappers" / wrapper_name / _os
remove_if_exist(dir)
# for java
dir = self.rdkit_path / "Code" / "JavaWrappers" / "gmwrapper" / "doc"
remove_if_exist(dir)
for name in ("build", "build-test"):
dir = self.rdkit_path / "Code" / "JavaWrappers" / "gmwrapper" / name
remove_if_exist(dir)
for name in ("src", "src-test"):
dir = self.rdkit_path / "Code" / "JavaWrappers" / "gmwrapper" / name / "org" / "RDKit"
remove_by_pattern(dir, "\\.gitignore", delete_on_match=False)
dir = self.rdkit_path / "Code" / "JavaWrappers" / "gmwrapper"
remove_by_pattern(dir, ".*\\.jar$", delete_on_match=True)
def clean(self) -> None:
self.clean_rdkit()
if self.config.freetype_path:
for __cpu_model in typing.get_args(CpuModel):
remove_if_exist(self.freetype_path / "objs" / _platform_to_ms_form[__cpu_model])
self.clean_zlib()
if self.config.libpng_path:
for p in typing.get_args(CpuModel):
remove_if_exist(self.libpng_path / f"build{p}")
if self.config.pixman_path:
remove_if_exist(self.pixman_path / "vc2017")
if self.config.cairo_path:
remove_if_exist(self.cairo_path / "vc2017")
def config_file_to_map(path: Path) -> Mapping[str, str]:
dic: Dict[str, str] = dict()
with open(path, "r") as f:
for line in f.readlines():
line = line.strip()
splitted_line = line.split("=")
if len(splitted_line) == 0:
continue
if len(splitted_line) != 2:
raise RuntimeError(f"Invalid: {line} in {path}")
dic[splitted_line[0]] = splitted_line[1]
return dic
def main() -> None:
parser = argparse.ArgumentParser()
parser.add_argument(
"--build_platform", default="all", choices=list(typing.get_args(CpuModel)) + ["all"]
)
parser.add_argument(
"--target_lang", default="csharp", choices=("csharp", "java", "cpp")
)
for opt in (
"build_zlib",
"build_libpng",
"build_pixman",
"build_freetype",
"build_cairo",
"build_rdkit",
"build_wrapper",
"build_nuget",
"build_cmake",
"disable_swig_patch",
"use_boost",
"limit_external",
"no_cairo",
"no_freetype",
"use_static_libs",
"clean",
"clean_zlib",
"clean_rdkit",
"build_rdkit_only",
"show_cmake",
"enable_test",
"more_functions",
):
parser.add_argument(f"--{opt}", default=False, action="store_true")
args = parser.parse_args()
# x86 is supported only for Windows
if get_os() == "linux" and args.build_platform == "x86":
raise RuntimeError("x86 is not supported for Linux system.")
if get_os() == "linux" and (args.build_platform == "all" or not args.build_platform):
args.build_platform = "x64"
default_config: Mapping[str, str] = config_file_to_map(Path("config.txt"))
config = create_config(args, default_config)
if args.target_lang == "csharp":
config.target_lang = LangType.CSharp
if args.target_lang == "cpp":
config.target_lang = LangType.CPlusPlus
if args.target_lang == "java":
config.target_lang = LangType.Java
config.test_enabled = args.enable_test
config.more_functions = args.more_functions
curr_dir = os.getcwd()
try:
if args.clean:
NativeMaker(config).clean()
else:
if args.clean_zlib:
NativeMaker(config).clean_zlib()
if args.clean_rdkit:
NativeMaker(config).clean_rdkit()
for cpu_model in (
typing.get_args(CpuModel) if args.build_platform == "all" else [args.build_platform]
):
maker = NativeMaker(config, cpu_model)
if args.build_freetype:
maker.make_freetype()
if args.build_zlib:
maker.make_zlib()
if args.build_libpng:
maker.make_libpng()
if args.build_pixman:
maker.make_pixman()
if args.build_cairo:
maker.make_cairo()
if args.show_cmake:
cmd = maker.build_cmake_rdkit()
print(" ".join([(('"' + s + '"') if (" " in s or '"' in s) else s) for s in cmd]))
if args.build_cmake:
maker.build_cmake_rdkit()
if args.build_rdkit_only:
maker.build_rdkit()
maker.copy_rdkit_dlls()
if args.build_rdkit:
maker.build_cmake_rdkit()
maker.build_rdkit()
maker.copy_rdkit_dlls()
# if required x64 is used as platform
maker = NativeMaker(config)
if args.build_wrapper:
maker.build_wrapper()
if args.build_nuget:
maker.build_nuget_package()
finally:
os.chdir(curr_dir)
def create_config(args: argparse.Namespace, config_info: Mapping[str, str]) -> Config:
def get_value(env: str) -> Optional[str]:
value: Optional[str]
if env in config_info:
value = config_info[env]
else:
value = get_value_from_env(env)
return value
def path_from_ini(env: str) -> Optional[Path]:
value: Optional[str] = get_value(env)
if value is None:
return None
path = Path(value)
if not path.is_absolute():
path = here / value
return path
def int_from_int(env: str, default: int) -> int:
value: Optional[str] = get_value(env)
if value is None:
return default
return int(value)
config = Config()
config.minor_version = int_from_int("MINOR_VERSION", 1)
config.swig_patch_enabled = not args.disable_swig_patch
config.use_boost = args.use_boost
config.cairo_support = not args.no_cairo
config.freetype_support = not args.no_freetype
config.limit_external = args.limit_external
if config.limit_external:
config.cairo_support = False
config.freetype_support = False
config.this_path = here
config.use_static_libs = args.use_static_libs
config.rdkit_path = path_from_ini("RDKIT_DIR")
if get_os() == "win":
# These pathes are only for Windows.
config.boost_path = path_from_ini("BOOST_DIR")
config.zlib_path = path_from_ini("ZLIB_DIR")
config.libpng_path = path_from_ini("LIBPNG_DIR")
config.pixman_path = path_from_ini("PIXMAN_DIR")
config.freetype_path = path_from_ini("FREETYPE_DIR")
config.cairo_path = path_from_ini("CAIRO_DIR")
config.eigen_path = path_from_ini("EIGEN_DIR")
config.test_enabled = False
return config
if __name__ == "__main__":
main()
| 37.299227 | 120 | 0.567321 | 7,123 | 62,700 | 4.711077 | 0.092236 | 0.021158 | 0.011443 | 0.009536 | 0.44566 | 0.32336 | 0.253598 | 0.203594 | 0.172274 | 0.140895 | 0 | 0.010726 | 0.311563 | 62,700 | 1,680 | 121 | 37.321429 | 0.766686 | 0.017113 | 0 | 0.290166 | 0 | 0.002078 | 0.185288 | 0.091864 | 0.00277 | 0 | 0.000163 | 0.000595 | 0.014543 | 1 | 0.081717 | false | 0.001385 | 0.011773 | 0.022161 | 0.155125 | 0.004155 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7fae89a2e7b33a8349cff26d3637eccd5dfa7ee7 | 2,734 | py | Python | pkg/win/build_pillow-simd.py | milkey-mouse/swood.exe | 6e691c2e9c4a42845b13c83d216022897aca97c5 | [
"MIT"
] | 12 | 2016-06-09T22:08:49.000Z | 2022-01-15T16:58:42.000Z | pkg/win/build_pillow-simd.py | milkey-mouse/swood.exe | 6e691c2e9c4a42845b13c83d216022897aca97c5 | [
"MIT"
] | 29 | 2016-04-17T01:25:09.000Z | 2021-03-12T23:22:35.000Z | pkg/win/build_pillow-simd.py | milkey-mouse/swood.exe | 6e691c2e9c4a42845b13c83d216022897aca97c5 | [
"MIT"
] | 1 | 2020-12-22T00:16:05.000Z | 2020-12-22T00:16:05.000Z | import subprocess
import requests
import tempfile
import tarfile
import shutil
import sys
import os
if os.name != "nt":
print("Error: Can only build pillow-simd on a Windows system")
repo_url = "https://api.github.com/repos/uploadcare/pillow-simd/tags"
tarball_url = requests.get(repo_url).json()[0]["tarball_url"]
r = requests.head(tarball_url, allow_redirects=True)
for x in r.headers["content-disposition"].split(";"):
if "filename=" in x:
tarball_fn = os.path.join(tempfile.gettempdir(), x.strip()[9:])
if not os.path.isfile(tarball_fn):
for fp in os.scandir():
if "pillow-simd" in fp.name and fp.name.endswith(".tar.gz"):
print("Removing outdated tarball {}".format(fp.name))
os.remove(fp.path)
print("Downloading tarball {}...".format(tarball_fn))
r = requests.get(tarball_url, stream=True)
with open(tarball_fn, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
else:
print("Using cached tarball {}".format(tarball_fn))
if os.path.isdir("pillow-simd"):
shutil.rmtree("pillow-simd")
with tarfile.open(tarball_fn) as pkg_tar:
for fp in pkg_tar:
if fp.isfile():
outp = os.path.join("pillow-simd", *fp.name.split("/")[1:])
print("{} -> {}".format(fp.name, outp))
os.makedirs(os.path.dirname(outp), exist_ok=True)
with open(outp, "wb") as out:
out.write(pkg_tar.extractfile(fp).read())
# no easy way to specify on the command line to not build default libs
with open("pillow-simd/setup.py") as infile, open("pillow-simd/setup.py.tmp", "w") as outfile:
for line in infile:
if line.startswith(" required ="):
outfile.write(" required = set()\n")
else:
# visual c++ compiler seems to enable sse4 too with sse2 enabled
outfile.write(line.replace("-msse4", "/arch:SSE2"))
shutil.move("pillow-simd/setup.py.tmp", "pillow-simd/setup.py")
owd = os.getcwd()
os.chdir("pillow-simd")
subprocess.run([sys.executable, "setup.py", "build"], check=True)
os.chdir(owd)
for fp in os.scandir("pillow-simd/build"):
if fp.is_dir() and fp.name.startswith("lib."):
build_dir = fp.path
break
bitness = 64 if "amd64" in os.path.basename(build_dir) else 32
with tarfile.open("pillow-simd-{}bit.tar.gz".format(bitness), "w") as out_tar:
for root, dirs, files in os.walk(build_dir):
for fp in (os.path.join(root, p) for p in files):
outp = os.path.relpath(fp, build_dir)
print("{} -> {}".format(fp, outp))
out_tar.add(fp, outp)
shutil.rmtree("pillow-simd")
| 36.453333 | 94 | 0.63387 | 408 | 2,734 | 4.183824 | 0.377451 | 0.082015 | 0.016403 | 0.039836 | 0.056825 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007867 | 0.209583 | 2,734 | 74 | 95 | 36.945946 | 0.782045 | 0.059985 | 0 | 0.065574 | 0 | 0 | 0.208106 | 0.028059 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.114754 | 0 | 0.114754 | 0.098361 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7faef27d00dc089e93362f99e786f4406c15fa41 | 559 | py | Python | irco/migrations/versions/52b38830e4f1_add_ambiguous_affiliation_flag.py | GaretJax/irco | e5df3cf1a608dc813011a1ee7e920637e5bd155c | [
"MIT"
] | null | null | null | irco/migrations/versions/52b38830e4f1_add_ambiguous_affiliation_flag.py | GaretJax/irco | e5df3cf1a608dc813011a1ee7e920637e5bd155c | [
"MIT"
] | null | null | null | irco/migrations/versions/52b38830e4f1_add_ambiguous_affiliation_flag.py | GaretJax/irco | e5df3cf1a608dc813011a1ee7e920637e5bd155c | [
"MIT"
] | 1 | 2015-12-17T19:18:28.000Z | 2015-12-17T19:18:28.000Z | """add ambiguous affiliation flag
Revision ID: 52b38830e4f1
Revises: None
Create Date: 2014-08-23 20:08:30.702574
"""
# revision identifiers, used by Alembic.
revision = '52b38830e4f1'
down_revision = None
from alembic import op # NOQA
import sqlalchemy as sa # NOQA
def upgrade():
op.add_column(
'publication',
sa.Column('has_ambiguous_affiliations', sa.Boolean(), nullable=False,
server_default=sa.sql.expression.false())
)
def downgrade():
op.drop_column('publication', 'has_ambiguous_affiliations')
| 20.703704 | 77 | 0.699463 | 68 | 559 | 5.632353 | 0.647059 | 0.088773 | 0.125326 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.084071 | 0.191413 | 559 | 26 | 78 | 21.5 | 0.763274 | 0.288014 | 0 | 0 | 0 | 0 | 0.221649 | 0.134021 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.166667 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7faf000a12cfb8382d4089e48ea4a9fb68db6a0d | 7,118 | py | Python | DSN/config_DSN.py | JerrySchonenberg/DSN | 4b79ac4beada888a859f76bfdcb1c08cefd4f02d | [
"MIT"
] | 4 | 2021-03-18T13:02:28.000Z | 2021-12-02T14:49:39.000Z | DSN/config_DSN.py | JerrySchonenberg/DSN | 4b79ac4beada888a859f76bfdcb1c08cefd4f02d | [
"MIT"
] | null | null | null | DSN/config_DSN.py | JerrySchonenberg/DSN | 4b79ac4beada888a859f76bfdcb1c08cefd4f02d | [
"MIT"
] | 1 | 2021-07-21T06:19:08.000Z | 2021-07-21T06:19:08.000Z | #configure DSN based on the velocities of the commands
import configparser
import os
import time
import tensorflow as tf
import subprocess
import numpy as np
import cv2
import sys
import math
import matplotlib.pyplot as plt
sys.path.append("../coppelia_sim")
from API_coppeliasim import CoppeliaSim
from PIL import Image
PATH_EXEC = './coppeliaSim.sh' #symbolic link
COMMAND_INIT = '../config/commands.ini'
VELOCITY = [] #velocity per command, as defined in commands.ini
CS_INIT = '../config/coppeliasim.ini'
HANDLE_NAME = [] #name of the handles
CONFIG_OUT = '../config/DSN.ini'
IMAGES = [] #store all images to compute the angles and zoom from
RESOLUTION_CONFIG = -1
RESOLUTION_ACTUAL = -1
ITER = int(sys.argv[1]) #how many times should every command be handled
#initialize the commands from a configuration file
def command_init() -> None:
config = configparser.ConfigParser()
config.read(COMMAND_INIT)
backwards = True #skip backwards command
for section in config.sections():
if backwards == False:
VELOCITY.append([int(config[section]['leftmotor']), int(config[section]['rightmotor'])])
else:
backwards = False
#start the configuration scene on coppeliasim
def scene_init() -> tuple(str, str, int):
config = configparser.ConfigParser()
config.read(CS_INIT)
scene = config['COM']['scene']
address = config['COM']['address']
port = int(config['COM']['port'])
for i in config['HANDLES']:
HANDLE_NAME.append(config.get('HANDLES', i))
global RESOLUTION_CONFIG
RESOLUTION_CONFIG = int(config['IMAGE']['resolution_config'])
global RESOLUTION_ACTUAL
RESOLUTION_ACTUAL = int(config['IMAGE']['resolution_actual'])
return scene, address, port
#get image from coppeliasim robot
def retrieve_image(CS: CoppeliaSim) -> np.ndarray:
resolution, img_list = CS.get_image()
img = np.array(img_list, dtype=np.uint8)
img.resize([resolution[0], resolution[1], 3]) #convert into right format
img = np.flipud(img) #vertically flip img
return img
#write results to configuration file (.ini)
def write_config_init(dx: list, dy: list, DSN_variant: int, tau: float) -> None:
config_command = configparser.ConfigParser()
config_command.read(COMMAND_INIT)
config_DSN = configparser.ConfigParser()
config_DSN['GENERAL'] = {'variant' : str(DSN_variant),
'tau' : str(tau)}
i = 0
backwards = True #used to skip backwards command
for command in config_command.sections():
if backwards == False:
config_DSN[command] = {'shift' : str(dx[i]),
'zoom' : str(dy[i])}
i += 1
else:
backwards = False
with open(CONFIG_OUT, 'w') as configfile:
config_DSN.write(configfile)
#use AKAZE for feature point detection
def AKAZE(DSN_variant: int, tau: float) -> None:
pixel_ratio = RESOLUTION_CONFIG / RESOLUTION_ACTUAL
dx = [0] * len(VELOCITY) #contains amount of horizontal pixels to be shifted
dy = [0] * len(VELOCITY) #same as dx, but for vertical pixels
for i in range(ITER):
for command in range(len(VELOCITY)):
temp_dx, temp_dy = 0, 0
list_kp1, list_kp2 = [], []
cv_img1 = IMAGES[i*len(VELOCITY)+command]
cv_img2 = cv2.cvtColor(IMAGES[i*len(VELOCITY)+command+1], cv2.COLOR_RGB2GRAY)
#AKAZE feature point detection and matching
akaze = cv2.AKAZE_create()
img1_kp, img1_ds = akaze.detectAndCompute(cv_img1, None)
img2_kp, img2_ds = akaze.detectAndCompute(cv_img2, None)
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
search_params = dict(checks=50)
flann = cv2.FlannBasedMatcher(index_params, search_params)
img1_ds = np.float32(img1_ds)
img2_ds = np.float32(img2_ds)
matches = flann.knnMatch(img1_ds, img2_ds, 2)
#need to draw only good matches, so create a mask
matchesMask = [[0,0] for i in range(len(matches))]
#atio test as per Lowe's paper
for j,(m,n) in enumerate(matches):
if m.distance < 0.7*n.distance:
matchesMask[j]=[1,0]
list_kp1.append([img1_kp[m.queryIdx].pt[0], img1_kp[m.queryIdx].pt[1]])
list_kp2.append([img2_kp[m.trainIdx].pt[0], img2_kp[m.trainIdx].pt[1]])
count = 0
if len(list_kp1) > 0:
for j in range(len(list_kp1)):
temp_dx += list_kp2[j][0] - list_kp1[j][0]
if list_kp1[j][1] >= RESOLUTION_CONFIG/2: #only upper half of image considered
temp_dy += list_kp2[j][1] - list_kp1[j][1]
count += 1
temp_dx /= len(list_kp1)
temp_dy /= count
dx[command] += temp_dx
dy[command] += temp_dy
for i in range(len(VELOCITY)):
dx[i] = (dx[i] / ITER) / pixel_ratio
dy[i] = (dy[i] / ITER) / pixel_ratio
if dx[i] < 0:
dx[i] = math.ceil(dx[i])
else:
dx[i] = math.floor(dx[i])
if dy[i] < 0:
dy[i] = math.ceil(dy[i])
else:
dy[i] = math.floor(dy[i])
write_config_init(dx, dy, DSN_variant, tau)
#simulate use of CNN
def dummy_cnn() -> None:
img = np.zeros((1,64,64,3), dtype=np.int)
model.predict(img)
model.predict(img)
#main loop of program
def main_loop(address: str, port: int, DSN_variant: int, tau: float) -> None:
CS = CoppeliaSim(address, port)
CS.get_handles(HANDLE_NAME[0:2], HANDLE_NAME[2:]) #motor-handle, sensor-handle
CS.check_startup_sim()
print("Configuring DSN...")
#first image is always blank
CS.get_image()
CS.get_image()
#get image of starting point
img = retrieve_image(CS)
IMAGES.append(img)
for i in range(ITER):
for command in range(len(VELOCITY)):
CS.set_velocity(VELOCITY[command][0], VELOCITY[command][1])
dummy_cnn()
CS.set_velocity(0, 0)
img = retrieve_image(CS)
IMAGES.append(img)
CS.stop_simulation()
AKAZE(DSN_variant, tau) #match keypoints
CS.exit_API('Configuration completed, saved in ' + CONFIG_OUT)
#start of script
if __name__ == "__main__":
if len(sys.argv) != 4:
print('insufficient arguments: [iter] [DSN-variant] [tau]')
exit()
#get files with configuration parameters
command_init()
scene, address, port = scene_init()
model = tf.keras.models.load_model('../models/weights/weights_OAH_1.h5')
pid = os.fork()
if pid == 0:
with open(os.devnull, 'wb') as devnull:
subprocess.check_call([PATH_EXEC, '-q', '-h', scene], stdout=devnull, stderr=subprocess.STDOUT)
else:
time.sleep(5) #wait for coppeliasim to start
main_loop(address, port, int(sys.argv[2]), float(sys.argv[3])) #start the configuration
| 32.208145 | 107 | 0.618573 | 956 | 7,118 | 4.469665 | 0.269874 | 0.005617 | 0.007021 | 0.010297 | 0.106015 | 0.053124 | 0.035572 | 0.020126 | 0.020126 | 0.020126 | 0 | 0.019015 | 0.261169 | 7,118 | 220 | 108 | 32.354545 | 0.793497 | 0.142034 | 0 | 0.162338 | 0 | 0 | 0.061554 | 0.013331 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045455 | false | 0 | 0.077922 | 0 | 0.136364 | 0.012987 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7faf132493fb997a22585fcc1b6f5221296ea124 | 1,012 | py | Python | spammer/vcspam.py | 00-00-00-11/Raid-Toolbox | 4d24841de5ef112dc15b858f62607e0d6b5277cd | [
"0BSD"
] | null | null | null | spammer/vcspam.py | 00-00-00-11/Raid-Toolbox | 4d24841de5ef112dc15b858f62607e0d6b5277cd | [
"0BSD"
] | null | null | null | spammer/vcspam.py | 00-00-00-11/Raid-Toolbox | 4d24841de5ef112dc15b858f62607e0d6b5277cd | [
"0BSD"
] | 1 | 2021-05-15T11:32:24.000Z | 2021-05-15T11:32:24.000Z | import discord
import asyncio
import sys
import random
import aiohttp
token = sys.argv[1]
tokenno = sys.argv[2]
voice_id = sys.argv[3]
useproxies = sys.argv[4] # proxies for voice chats smh
if useproxies == 'True':
proxy_list = open("proxies.txt").read().splitlines()
proxy = random.choice(proxy_list)
con = aiohttp.ProxyConnector(proxy="http://"+proxy)
client = discord.Client(connector=con)
else:
client = discord.Client()
@client.event
async def on_ready():
await asyncio.sleep(1)
voice_channel = client.get_channel(int(voice_id))
while not client.is_closed():
vc = await voice_channel.connect()
vc.play(discord.FFmpegPCMAudio('spammer/file.wav'))
vc.source = discord.PCMVolumeTransformer(vc.source)
vc.source.volume = 10.0
while vc.is_playing():
await asyncio.sleep(3)
await vc.disconnect(force=True)
try:
client.run(token, bot=False)
except Exception as c:
print(c)
| 27.351351 | 60 | 0.657115 | 133 | 1,012 | 4.924812 | 0.548872 | 0.042748 | 0.058015 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.01145 | 0.22332 | 1,012 | 36 | 61 | 28.111111 | 0.821883 | 0.02668 | 0 | 0 | 0 | 0 | 0.040127 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.15625 | 0 | 0.15625 | 0.03125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7faf5789a456612c7591dd05136e4084bf218d95 | 4,912 | py | Python | recommendation_engines/movie_recommendation/movie_recommender.py | DKMalungu/python_ml | 4df7b6ddaa7ae072db27799a9226f6f8fab40452 | [
"Apache-2.0"
] | null | null | null | recommendation_engines/movie_recommendation/movie_recommender.py | DKMalungu/python_ml | 4df7b6ddaa7ae072db27799a9226f6f8fab40452 | [
"Apache-2.0"
] | null | null | null | recommendation_engines/movie_recommendation/movie_recommender.py | DKMalungu/python_ml | 4df7b6ddaa7ae072db27799a9226f6f8fab40452 | [
"Apache-2.0"
] | null | null | null | """
The dataset can be downloaded from: https://grouplens.org/datasets/movielens/100k/
"""
# Importing Libraries
import pandas as pd
import numpy as np
# Step 1: Loading the dataset into python
# Loading users file
"""
This information is from the read me doc of the dataset
u.user -- Demographic information about the users; this is a tab
separated list of
user id | age | gender | occupation | zip code
The user ids are the ones used in the u.data data set."""
# User data column names
user_columns = ["user_id", 'age', 'gender', 'occupation', 'zip_code']
df_user = pd.read_csv(filepath_or_buffer='./data_store/ml-100k/u.user', sep='|', names=user_columns,
encoding='latin-1')
# Loading ratings file:
"""
This information is from the read me doc of the dataset
u.data -- The full u data set, 100000 ratings by 943 users on 1682 items.
Each user has rated at least 20 movies. Users and items are
numbered consecutively from 1. The data is randomly
ordered. This is a tab separated list of
user id | item id | rating | timestamp.
The time stamps are unix seconds since 1/1/1970 UTC """
rating_columns = ['user id', 'item_id', 'rating', 'timestamp']
df_rating = pd.read_csv(filepath_or_buffer='./data_store/ml-100k/u.data', sep='|', names=rating_columns,
encoding='latin-1')
# loading items file:
"""
u.item -- Information about the items (movies); this is a tab separated
list of
movie id | movie title | release date | video release date |
IMDb URL | unknown | Action | Adventure | Animation |
Children's | Comedy | Crime | Documentary | Drama | Fantasy |
Film-Noir | Horror | Musical | Mystery | Romance | Sci-Fi |
Thriller | War | Western |
The last 19 fields are the genres, a 1 indicates the movie
is of that genre, a 0 indicates it is not; movies can be in
several genres at once.
The movie ids are the ones used in the u.data data set.
"""
items_columns = ['movie_id', 'movie_title', 'release_date', 'video_release_date', 'imdb_url', 'unknown', 'action', 'adventure', 'animation',
'children', 'comedy', 'crime', 'documentary', 'drama', 'fantasy',
'film_noir', 'horror', 'musical', 'mystery', 'romance', 'sci_fi',
'thriller', 'war', 'western']
df_items = pd.read_csv(filepath_or_buffer='./data_store/ml-100k/u.item', sep='|', names=items_columns,
encoding='latin-1')
# Step 2: Descriptive Analysis of the data in each file
# user data file
print('Shape of user data file', df_user.shape)
print("Data types of the different columns in user data fil: ", df_user.dtypes)
print("A sample of the data in user dat file: ", df_user.sample(5))
"""Results:
a)Shape
Shape of user data file (943, 5) - The user file data is made up of 943 rows and 5 columns
b) Data types
Data types of the different columns in user data fil:
user_id int64
age int64
gender object
occupation object
zip_code object
dtype: object - The dataset is mad up of data of the type object and int64
c) Sample data
A sample of the data in user dat file:
user_id age gender occupation zip_code
743 744 35 M marketing 47024
292 293 24 M writer 60804
460 461 15 M student 98102
897 898 23 M homemaker 61755
582 583 44 M engineer 29631
The above is a sample of five random rows from the dataset
"""
# rating data file
print('Shape of user data file', df_rating.shape)
print("Data types of the different columns in user data fil: ", df_rating.dtypes)
print("A sample of the data in user dat file: ", df_rating.sample(5))
"""Results
a) Shape
Shape of user data file (100000, 4) - the dataset is made up of hundred thousand rows and four columns
b) Dtypes
Data types of the different columns in user data fil: user id object
item_id float64
rating float64
timestamp float64
dtype: object - it made up of the float data type
A sample of the data in user dat file: user id item_id rating timestamp
37410 561\t410\t1\t885810117 NaN NaN NaN
93150 314\t1297\t4\t877890734 NaN NaN NaN
39493 378\t1531\t4\t880056423 NaN NaN NaN
69012 89\t301\t5\t879461219 NaN NaN NaN
7593 83\t575\t4\t880309339 NaN NaN NaN
The above is a sample of five random rows from the dataset
"""
# Items data file
print('Shape of user data file', df_items.shape)
print("Data types of the different columns in user data fil: ", df_items.dtypes)
print("A sample of the data in user dat file: ", df_items.sample(5))
| 43.087719 | 140 | 0.643119 | 737 | 4,912 | 4.222524 | 0.299864 | 0.0241 | 0.020244 | 0.021208 | 0.541131 | 0.523136 | 0.500964 | 0.465938 | 0.465938 | 0.413239 | 0 | 0.067901 | 0.27443 | 4,912 | 113 | 141 | 43.469027 | 0.805275 | 0.066979 | 0 | 0.130435 | 0 | 0 | 0.445145 | 0.051072 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.086957 | 0 | 0.086957 | 0.391304 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7fb292a14c4c1af1a6114d6ef6dda19af9eebf1c | 673 | py | Python | 266_palindrome_permutation.py | gengwg/leetcode | 0af5256ec98149ef5863f3bba78ed1e749650f6e | [
"Apache-2.0"
] | 2 | 2018-04-24T19:17:40.000Z | 2018-04-24T19:33:52.000Z | 266_palindrome_permutation.py | gengwg/leetcode | 0af5256ec98149ef5863f3bba78ed1e749650f6e | [
"Apache-2.0"
] | null | null | null | 266_palindrome_permutation.py | gengwg/leetcode | 0af5256ec98149ef5863f3bba78ed1e749650f6e | [
"Apache-2.0"
] | 3 | 2020-06-17T05:48:52.000Z | 2021-01-02T06:08:25.000Z | # 266. Palindrome Permutation
#
# Given a string, determine if a permutation of the string could form a palindrome.
#
# Example 1:
#
# Input: "code"
# Output: false
#
# Example 2:
#
# Input: "aab"
# Output: true
#
# Example 3:
#
# Input: "carerac"
# Output: true
# https://leetcode.com/articles/palindrome-permutation/
class Solution:
def canPermutePalindrome(self, s):
"""
:type s: str
:rtype: bool
"""
unpaired_chars = set()
for c in s:
if c not in unpaired_chars:
unpaired_chars.add(c)
else:
unpaired_chars.remove(c)
return len(unpaired_chars) <= 1
| 17.710526 | 83 | 0.579495 | 79 | 673 | 4.873418 | 0.620253 | 0.168831 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.015021 | 0.307578 | 673 | 37 | 84 | 18.189189 | 0.811159 | 0.456166 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7fb526a62b477c606c93725191f3522b82d53a95 | 10,522 | py | Python | mediumbackup/__init__.py | lucafrance/mediumbackup | 7a5abd67e1b072bfbd86a01fbfdfb5d9153d8192 | [
"MIT"
] | null | null | null | mediumbackup/__init__.py | lucafrance/mediumbackup | 7a5abd67e1b072bfbd86a01fbfdfb5d9153d8192 | [
"MIT"
] | null | null | null | mediumbackup/__init__.py | lucafrance/mediumbackup | 7a5abd67e1b072bfbd86a01fbfdfb5d9153d8192 | [
"MIT"
] | null | null | null | import os
import logging
import medium
from markdownify import markdownify as md
from bs4 import BeautifulSoup as bs
import requests
MAX_FILENAME_LENGTH = 30 # Ignores date and extension, e.g. 2020-10-31<-- 30 characthers -->.md
FORBIDDEN_FILENAME_CHARS = "*?"
DEFAULT_BACKUP_DIR = "backup"
DEFAULT_FORMAT = "html"
class MediumStory():
def __init__(self, raw):
self.raw = raw
self.pub_date = raw["pubDate"][:len("yyyy-mm-dd")]
self.title = raw["title"]
self.link = raw["link"].split("?")[0]
self.content = raw["content"]
self._html = None
self._markdown = None
def html(self):
if self._html is not None:
return self._html
# Add story title to the content
html = "<h3>{}</h3>{}".format(self.title, self.content)
# Remove placeholder images for stats
# They are used to count views from e.g. rss feeds
soup = bs(html, "html.parser")
for img in soup.find_all("img"):
if img["src"].startswith("https://medium.com/_/stat"):
img.decompose()
html = str(soup)
# Embrace loose links in a figure, otherwise embedded content
# stays on the same line as the next paragraph and looks weird
# especially when converted to markdown. E.g.:
# <a href="https://medium.com/media/abcdef123456/href">
# https://medium.com/media/abcdef123456/href
# </a><p>Lorem Ipsum [...] dolor sit amet.</p>
soup = bs(html, "html.parser")
links_to_replace = []
for a in soup.find_all("a"):
if a.parent.name == "[document]":
links_to_replace.append(str(a))
html = str(soup)
for link in links_to_replace:
html = html.replace(link, "<figure>{}</figure>".format(link))
# Check links starting with https://medium.com/media/, which are
# probably embedded content and redirect to other websites.
# If so, replace the url with the final one
links_redirects = []
soup = bs(html, "html.parser")
for a in soup.find_all("a"):
a_href = a["href"]
if a_href.startswith("https://medium.com/media/"):
r = requests.get(a_href, allow_redirects=True)
if not r.ok:
logging.warning("Could not resolve \"{}\", maybe the link is broken.".format(a_href))
if a_href != r.url:
links_redirects.append((a_href, r.url))
for medium_link, redirect_link in links_redirects:
html = html.replace(medium_link, redirect_link)
# Replace gist links with embedding script
soup = bs(html, "html.parser")
for a in soup.find_all("a"):
a_href = a["href"]
if a_href == a.string and a_href.startswith("https://gist.github.com/"):
embedding_tag = soup.new_tag("script", src=a_href + ".js")
a.replace_with(embedding_tag)
html = str(soup)
self._html = html
return self._html
def download_images(self, images_dir, images_src=None):
""" Download images and update the html to use the local images as source.
Keyword arguments:
images_dir -- the directory where the images should be saved
e.g. /backup/images or /assets/images
images_src -- the source parameter to be entered in html
e.g. /images
"""
# If images_src is missing, assume the directory
# Replace "\" with "/" for Windows
if images_src is None:
images_src = images_dir.replace("\\", "/")
# If the folder doesn't exist yet, create it
os.makedirs(images_dir, exist_ok=True)
# Parse the html source for all images
html = self.html()
soup = bs(html, "html.parser")
img_sources = [img["src"] for img in soup.find_all("img")]
# For each image, download it and update the html source
for img_src in img_sources:
# Build the filename of the image
filename = img_src.split("/")[-1]
for char in FORBIDDEN_FILENAME_CHARS:
filename = filename.replace(char, "")
# Download the image
r = requests.get(img_src)
# Save the image
file_path = os.path.join(images_dir, filename)
with open(file_path, "wb") as f:
f.write(r.content)
logging.info("Downloaded \"{}\" to \"{}\".".format(img_src, file_path))
#Replace src attributes to point to the downloaded image
new_src = "/".join((images_src, filename))
html = html.replace("src=\"" + img_src + "\"",
"src=\"" + new_src + "\"")
# Update html paramter with local sources paths
self._html = html
return
def markdown(self, jekyll_front_matter=False):
""" Return the content of the story in markdown.
Keyword arguments:
jekyll_front_matter -- include a front matter to use with jekyll
"""
if self._markdown is not None:
return self._markdown
html = self.html()
# Add two new lines after figures and blockquotes
# to prevent formatting errors with markdown
# https://github.com/matthewwithanm/python-markdownify/pull/25
for closing_tag in ["</figure>", "</blockquote>"]:
html = html.replace(closing_tag, closing_tag + "<br><br>")
# Workaround for ordered lists in markdownify
# https://github.com/matthewwithanm/python-markdownify/issues/8
# https://github.com/matthewwithanm/python-markdownify/pull/23
html = html.replace("\n<li>", "<li>")
# Escape sequence for grave accent
html = html.replace("`", "\\`")
# Workaround for <pre> tags not being converted
html = html.replace("<pre>", "<pre>```").replace("</pre>", "```</pre>")
# Convert to markdown
md_story = md(html, heading_style="ATX")
# Ensure that "```" stays on its own line
md_story = md_story.replace("```", "\n```\n")
# Remove leading whitspaces
# https://github.com/matthewwithanm/python-markdownify/issues/17
md_story = "\n".join([line.strip() for line in md_story.split("\n")])
# Add jekyll front matter
if jekyll_front_matter:
front_matter = "---\ntitle: {}\ncanonicalurl: {}\n---\n\n".format(
self.title, self.link
)
md_story = front_matter + md_story
self._markdown = md_story
return self._markdown
def backup(self, backup_dir, format, download_images=False, images_dir=None, jekyll_front_matter=False):
""" Download the story locally.
Keyword arguments:
backup_dir -- destination directory name, default "backup"
format -- "html" or "md" for markdown, default "html"
download_images -- True to download images and adjust the source, default False
images_dir -- directory to save the images, if different from backup_dir/images
jekyll_front_matter -- Include jekyll front matter, only valid with markdown
"""
logging.info("Downloading story \"{}\" published on \"{}\".".format(self.title, self.pub_date))
# Check user input
if format not in ["html", "md"]:
logging.warning("Format {} not recognized, html will be used instead.".format(format))
if format != "md" and jekyll_front_matter:
logging.warning("Format {} cannot include a jekyll front matter. For that use markdown (\"md\") instead.".format(format))
# Create backup directory if not existent
if not os.path.exists(backup_dir):
os.mkdir(backup_dir)
# Download images if necessary
if download_images:
if images_dir is None:
images_dir = "/".join((backup_dir, "images"))
images_src = "images"
else:
images_src = None
self.download_images(images_dir=images_dir, images_src=images_src)
# Get the content formatted correctly
if format == "md":
content = self.markdown(jekyll_front_matter=jekyll_front_matter)
else:
# html is the default option
content = self.html()
# Find the url path portion of the story url
# (i.e. whatever comes after the last /)
# and remove invalid filename characthers
url_path = self.link.split("/")[-1]
for char in FORBIDDEN_FILENAME_CHARS:
url_path = url_path.replace(char, "")
# Build the filename and save the file
filename = "".join([self.pub_date, "-", url_path[:MAX_FILENAME_LENGTH], ".", format])
with open(os.path.join(backup_dir, filename), "wt", encoding="utf8") as f:
f.write(content)
logging.info("Story \"{}\" downloaded to \"{}\".".format(self.title, filename))
return
def backup_stories(username, backup_dir=DEFAULT_BACKUP_DIR,
format=DEFAULT_FORMAT,
download_images=False,
images_dir=None,
jekyll_front_matter=False,
):
""" Download all public stories by username. """
# Get the stories list through a medium client,
# authentication is not required in this case
mclient = medium.Client()
list_stories = mclient.list_articles(username=username)
# For each story, crate a backup file
for story_raw in list_stories:
story = MediumStory(story_raw)
story.backup(backup_dir, format=format,
download_images=download_images,
images_dir=images_dir,
jekyll_front_matter=jekyll_front_matter,
)
print("Downloaded Medium story: \"{}\"".format(story.title))
return
| 39.70566 | 133 | 0.563676 | 1,236 | 10,522 | 4.662621 | 0.237055 | 0.032448 | 0.041298 | 0.012146 | 0.155822 | 0.140205 | 0.11383 | 0.055527 | 0.042686 | 0.042686 | 0 | 0.005385 | 0.32931 | 10,522 | 265 | 134 | 39.70566 | 0.811251 | 0.285497 | 0 | 0.198582 | 0 | 0 | 0.101221 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.042553 | false | 0 | 0.042553 | 0 | 0.141844 | 0.007092 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7fb8a2d01a0b8780de371becc7f40508be277d45 | 8,596 | py | Python | Algorithms/Off-Policy/DDPG-TD3/main.py | baturaysaglam/DISCOVER | 423158c84a5935ca5755ccad06ea5fe20fb57d76 | [
"MIT"
] | null | null | null | Algorithms/Off-Policy/DDPG-TD3/main.py | baturaysaglam/DISCOVER | 423158c84a5935ca5755ccad06ea5fe20fb57d76 | [
"MIT"
] | null | null | null | Algorithms/Off-Policy/DDPG-TD3/main.py | baturaysaglam/DISCOVER | 423158c84a5935ca5755ccad06ea5fe20fb57d76 | [
"MIT"
] | null | null | null | import argparse
import os
import socket
import gym
import numpy as np
import torch
import DDPG
import TD3
import DISCOVER_DDPG
import DISCOVER_TD3
import utils
# DDPG tuned hyper-parameters are imported from
# OpenAI Baselines3 Zoo: https://github.com/DLR-RM/rl-baselines3-zoo
def hyper_parameter_dict_DDPG(args):
if args.env == "BipedalWalker-v3" or args.env == "LunarLanderContinuous-v2":
args.gamma = 0.98
args.tau = 0.02
if args.env == "Ant-v2" or args.env == "HalfCheetah-v2" or args.env == "LunarLanderContinuous-v2" or args.env == "BipedalWalker-v3":
args.start_steps = 10000
return args
# Runs policy for X episodes and returns average reward
# A fixed seed is used for the eval environment
def evaluate_policy(agent, env_name, seed, eval_episodes=10):
eval_env = gym.make(env_name)
eval_env.seed(seed + 100)
avg_reward = 0.
for _ in range(eval_episodes):
state, done = eval_env.reset(), False
while not done:
action = agent.select_action(np.array(state))
state, reward, done, _ = eval_env.step(action)
avg_reward += reward
avg_reward /= eval_episodes
print("---------------------------------------")
print(f"Evaluation over {eval_episodes} episodes: {avg_reward:.3f}")
print("---------------------------------------")
return avg_reward
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='DDPG, TD3 and their DISCOVER Implementation')
parser.add_argument("--policy", default="DISCOVER_TD3", help='Algorithm (default: DISCOVER_TD3)')
parser.add_argument("--env", default="Hopper-v2", help='OpenAI Gym environment name')
parser.add_argument("--seed", default=0, type=int,
help='Seed number for PyTorch, NumPy and OpenAI Gym (default: 0)')
parser.add_argument("--gpu", default="0", type=int, help='GPU ordinal for multi-GPU computers (default: 0)')
parser.add_argument("--start_time_steps", default=1000, type=int, metavar='N',
help='Number of exploration time steps sampling random actions (default: 1000)')
parser.add_argument("--buffer_size", default=1000000, type=int,
help='Size of the experience replay buffer (default: '
'1000000)')
parser.add_argument("--eval_freq", default=1e3, metavar='N', help='Evaluation period in number of time '
'steps (default: 1000)')
parser.add_argument("--max_time_steps", default=1000000, type=int, metavar='N',
help='Maximum number of steps (default: 1000000)')
parser.add_argument("--exp_regularization", default=0.3, type=float)
parser.add_argument("--exploration_noise", default=0.1, metavar='G', help='Std of Gaussian exploration noise')
parser.add_argument("--batch_size", default=256, metavar='N',
help='Batch size (default: 256)')
parser.add_argument("--discount", default=0.99, metavar='G',
help='Discount factor for reward (default: 0.99)')
parser.add_argument("--tau", default=0.005, type=float, metavar='G',
help='Learning rate in soft/hard updates of the target networks (default: 0.005)')
parser.add_argument("--policy_noise", default=0.2, metavar='G', help='Noise added to target policy during critic '
'update')
parser.add_argument("--noise_clip", default=0.5, metavar='G', help='Range to clip target policy noise')
parser.add_argument("--policy_freq", default=2, type=int, metavar='N', help='Frequency of delayed policy updates')
parser.add_argument("--save_model", action="store_true", help='Save model and optimizer parameters')
parser.add_argument("--load_model", default="", help='Model load file name; if empty, does not load')
args = parser.parse_args()
print(args)
file_name = f"{args.policy}_{args.env}_{args.seed}"
print("---------------------------------------")
print(f"Policy: {args.policy}, Env: {args.env}, Seed: {args.seed}")
print("---------------------------------------")
if "DDPG" in args.policy:
args.batch_size = 64
args.tau = 0.001
# Adjust the hyper-parameters with respect to the environment
args = hyper_parameter_dict_DDPG(args)
if not os.path.exists("./results"):
os.makedirs("./results")
if args.save_model and not os.path.exists("./models"):
os.makedirs("./models")
env = gym.make(args.env)
# Set seeds
env.seed(args.seed)
env.action_space.seed(args.seed)
torch.manual_seed(args.seed)
np.random.seed(args.seed)
state_dim = env.observation_space.shape[0]
action_dim = env.action_space.shape[0]
max_action = float(env.action_space.high[0])
discover = False
device = torch.device(f"cuda:{args.gpu}" if torch.cuda.is_available() else "cpu")
kwargs = {
"state_dim": state_dim,
"action_dim": action_dim,
"max_action": max_action,
"discount": args.discount,
"tau": args.tau,
"device": device
}
# Initialize policy
if args.policy == "TD3":
# Target policy smoothing is scaled wrt the action scale
kwargs["policy_noise"] = args.policy_noise * max_action
kwargs["noise_clip"] = args.noise_clip * max_action
kwargs["policy_freq"] = args.policy_freq
agent = TD3.TD3(**kwargs)
elif args.policy == "DDPG":
agent = DDPG.DDPG(**kwargs)
elif args.policy == "DISCOVER_DDPG":
discover = True
kwargs["exp_regularization"] = args.exp_regularization
agent = DISCOVER_DDPG.DISCOVER_DDPG(**kwargs)
elif args.policy == "DISCOVER_TD3":
discover = True
kwargs["exp_regularization"] = args.exp_regularization
agent = DISCOVER_TD3.DISCOVER_TD3(**kwargs)
if args.load_model != "":
policy_file = file_name if args.load_model == "default" else args.load_model
agent.load(f"./models/{policy_file}")
replay_buffer = utils.NoisyExperienceReplayBuffer(state_dim, action_dim, device) if discover \
else utils.ExperienceReplayBuffer(state_dim, action_dim, device)
# Evaluate the untrained policy
evaluations = [f"HOST: {socket.gethostname()}", f"GPU: {torch.cuda.get_device_name(args.gpu)}",
evaluate_policy(agent, args.env, args.seed)]
state, done = env.reset(), False
episode_reward = 0
episode_time_steps = 0
episode_num = 0
for t in range(int(args.max_time_steps)):
episode_time_steps += 1
# Sample action from the action space or policy
if t < args.start_time_steps:
action = env.action_space.sample()
exploration = np.random.normal(0, max_action * args.exp_regularization, size=action_dim)
else:
action = agent.select_action(np.array(state))
noise = agent.generate_noise(state) if discover else np.random.normal(0, max_action * args.exp_regularization,
size=action_dim)
# Take the selected action
next_state, reward, done, _ = env.step(action)
done_bool = float(done) if episode_time_steps <= env._max_episode_steps else 0
# Store data in the experience replay buffer
if discover:
replay_buffer.add(state, action, exploration, next_state, reward, done_bool)
else:
replay_buffer.add(state, action, next_state, reward, done_bool)
state = next_state
episode_reward += reward
# Train the agent after collecting sufficient samples
if t >= args.start_time_steps:
agent.update_parameters(replay_buffer, args.batch_size)
if done:
print(f"Total T: {t + 1} Episode Num: {episode_num + 1} Episode T: {episode_time_steps} Reward: "
f"{episode_reward:.3f}")
# Reset the environment
state, done = env.reset(), False
episode_reward = 0
episode_time_steps = 0
episode_num += 1
# Evaluate the agent over a number of episodes
if (t + 1) % args.eval_freq == 0:
evaluations.append(evaluate_policy(agent, args.env, args.seed))
np.save(f"./results/{file_name}", evaluations)
if args.save_model:
agent.save(f"./models/{file_name}")
| 40.168224 | 136 | 0.620405 | 1,074 | 8,596 | 4.800745 | 0.211359 | 0.03142 | 0.059348 | 0.0064 | 0.228084 | 0.131885 | 0.100465 | 0.073701 | 0.073701 | 0.073701 | 0 | 0.021145 | 0.246277 | 8,596 | 213 | 137 | 40.356808 | 0.774657 | 0.071894 | 0 | 0.118421 | 0 | 0.013158 | 0.241899 | 0.04308 | 0 | 0 | 0 | 0 | 0 | 1 | 0.013158 | false | 0 | 0.072368 | 0 | 0.098684 | 0.052632 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7fbb4e52d64906788fb583d0d26e7df5468dcdca | 1,590 | py | Python | setup.py | PlaidCloud/sqlalchemy-greenplum | b40beeee8b775290b262d3b9989e8faeba8b2d20 | [
"BSD-3-Clause"
] | 6 | 2019-05-10T18:31:05.000Z | 2021-09-08T16:59:46.000Z | setup.py | PlaidCloud/sqlalchemy-greenplum | b40beeee8b775290b262d3b9989e8faeba8b2d20 | [
"BSD-3-Clause"
] | 2 | 2018-06-04T23:28:16.000Z | 2022-03-08T14:20:14.000Z | setup.py | PlaidCloud/sqlalchemy-greenplum | b40beeee8b775290b262d3b9989e8faeba8b2d20 | [
"BSD-3-Clause"
] | 1 | 2019-06-13T10:12:44.000Z | 2019-06-13T10:12:44.000Z |
import os
from setuptools import setup, find_packages
source_location = os.path.abspath(os.path.dirname(__file__))
def get_version():
with open(os.path.join(source_location, "VERSION")) as version:
return version.readline().strip()
setup(
name="sqlalchemy-greenplum",
version=get_version(),
license="LICENSE.txt",
url="https://github.com/PlaidCloud/sqlalchemy-greenplum",
author="Patrick Buxton",
author_email="patrick.buxton@tartansolutions.com",
description="SQLAlchemy dialect for Pivotal Greenplum Database",
packages=find_packages(),
zip_safe=False,
install_requires=[
"sqlalchemy"
],
extras_require={
},
setup_requires=["pytest-runner"],
tests_require=["pytest", "mock"],
test_suite="test.test_suite",
classifiers=[ # cf. http://pypi.python.org/pypi?%3Aaction=list_classifiers
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Developers",
"License :: Other/Proprietary License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: Implementation :: PyPy",
"Programming Language :: SQL",
"Topic :: Database",
"Topic :: Database :: Front-Ends",
"Topic :: Software Development",
"Topic :: Software Development :: Libraries :: Python Modules",
],
entry_points = {
"sqlalchemy.dialects":
["greenplum = sqlalchemy_greenplum.dialect:GreenplumDialect"]
},
)
| 33.829787 | 78 | 0.648428 | 159 | 1,590 | 6.352201 | 0.591195 | 0.075248 | 0.074257 | 0.051485 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004804 | 0.214465 | 1,590 | 46 | 79 | 34.565217 | 0.803843 | 0.036478 | 0 | 0.046512 | 0 | 0 | 0.483322 | 0.051668 | 0 | 0 | 0 | 0 | 0 | 1 | 0.023256 | false | 0 | 0.046512 | 0 | 0.093023 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7fbd170ddca2f532e47d512537a2706fb3db9b79 | 4,976 | py | Python | src/cray/cfs/operator/__main__.py | Cray-HPE/cfs-operator | 16cd12155ba52b89e504ed668c49b544b92d3794 | [
"MIT"
] | null | null | null | src/cray/cfs/operator/__main__.py | Cray-HPE/cfs-operator | 16cd12155ba52b89e504ed668c49b544b92d3794 | [
"MIT"
] | 2 | 2021-12-16T19:29:28.000Z | 2022-03-02T22:38:35.000Z | src/cray/cfs/operator/__main__.py | Cray-HPE/cfs-operator | 16cd12155ba52b89e504ed668c49b544b92d3794 | [
"MIT"
] | 1 | 2021-11-10T22:28:36.000Z | 2021-11-10T22:28:36.000Z | #!/usr/bin/env python
#
# MIT License
#
# (C) Copyright 2019-2022 Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
"""
CFS Operator - A Python operator for the Cray Configuration Framework Service.
"""
import logging
import threading
import os
from pkg_resources import get_distribution
import time
from urllib3.exceptions import MaxRetryError
from kubernetes import config, client
from kubernetes.config.config_exception import ConfigException
from .events import CFSSessionController
from cray.cfs.operator.cfs.options import options
import cray.cfs.operator.cfs.sessions as sessions
from cray.cfs.operator.liveness.timestamp import Timestamp
LOGGER = logging.getLogger('cray.cfs.operator')
try:
config.load_incluster_config()
except ConfigException: # pragma: no cover
config.load_kube_config() # Development
_api_client = client.ApiClient()
k8sjobs = client.BatchV1Api(_api_client)
def session_cleanup():
"""
Periodically deletes all completed sessions older than the set ttl.
"""
while True:
time.sleep(60 * 5) # Run every 5 minutes
try:
options.update()
ttl = options.session_ttl
if ttl:
sessions.delete_sessions(status='complete', min_age=ttl)
except Exception as e:
LOGGER.warning('Exception during session cleanup: {}'.format(e))
def monotonic_liveliness_heartbeat():
"""
Periodically add a timestamp to disk; this allows for reporting of basic
health at a minimum rate. This prevents the pod being marked as dead if
a period of no events have been monitored from k8s for an extended
period of time.
"""
while True:
Timestamp()
time.sleep(10)
def main(env):
""" Spawn watch processes of relevant Kubernetes objects """
# Periodically checks for and removes sessions older than the TTL
cleanup = threading.Thread(
target=session_cleanup,
args=(),
name="cfs_session_cleanup",
)
cleanup.start()
# Always periodically heartbeat, even when there isn't work to be
# done.
heartbeat = threading.Thread(target=monotonic_liveliness_heartbeat,
args=())
heartbeat.start()
controller = CFSSessionController(env)
controller.run()
def _init_logging():
# Format logs for stdout
log_format = "%(asctime)-15s - %(levelname)-7s - %(name)s - %(message)s"
requested_log_level = os.environ.get('CFS_OPERATOR_LOG_LEVEL', 'INFO')
log_level = logging.getLevelName(requested_log_level)
if type(log_level) != int:
LOGGER.warning('Log level %r is not valid. Falling back to INFO', requested_log_level)
log_level = logging.INFO
logging.basicConfig(level=log_level, format=log_format)
def _init_env():
# CFS Environment Variables
cfs_environment = {k: v for k, v in os.environ.items() if 'CFS' in k}
# Ensure the namespace is in the environment
resource_namespace = cfs_environment.get('CRAY_CFS_NAMESPACE', 'services')
cfs_environment['RESOURCE_NAMESPACE'] = resource_namespace
for k, v in cfs_environment.items():
LOGGER.info('CFS Operator runtime environment: %s=%s', k, v)
return cfs_environment
def _wait_for_networking_setup():
# This is an arbitrary kubernetes call to test connectivity
while True:
try:
k8sjobs.get_api_resources()
except MaxRetryError:
LOGGER.info('Waiting for pod networking to complete setup')
time.sleep(1)
continue
LOGGER.info('Networking is available. Continuing with startup')
return
if __name__ == '__main__':
Timestamp() # Initialize our watch timestamp
_init_logging()
env = _init_env()
_wait_for_networking_setup()
version = get_distribution('cray-cfs').version
LOGGER.info('Starting CFS Operator version=%s', version)
main(env)
| 33.395973 | 94 | 0.712018 | 650 | 4,976 | 5.336923 | 0.418462 | 0.020755 | 0.017296 | 0.010954 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005851 | 0.210008 | 4,976 | 148 | 95 | 33.621622 | 0.876622 | 0.384244 | 0 | 0.102564 | 0 | 0 | 0.14694 | 0.007397 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.153846 | 0 | 0.25641 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7fbd6c101c88c86adca08569b698ed439f1e762d | 25,986 | py | Python | disco_aws_automation/disco_metanetwork.py | amplifylitco/asiaq | a1a292f6e9cbf32a30242405e4947b17910e5369 | [
"BSD-2-Clause"
] | 27 | 2016-03-08T16:50:22.000Z | 2018-11-26T06:33:25.000Z | disco_aws_automation/disco_metanetwork.py | amplifylitco/asiaq | a1a292f6e9cbf32a30242405e4947b17910e5369 | [
"BSD-2-Clause"
] | 202 | 2016-03-08T17:13:08.000Z | 2019-02-01T00:49:06.000Z | disco_aws_automation/disco_metanetwork.py | amplify-education/asiaq | fb6004bc4da0acef40e7bc18b148db4f72fa2f32 | [
"BSD-2-Clause"
] | 2 | 2016-03-17T18:52:37.000Z | 2016-10-06T20:36:37.000Z | """
Network abstraction
"""
import logging
from random import choice
from netaddr import IPNetwork, IPAddress
from boto.ec2.networkinterface import (
NetworkInterfaceSpecification,
NetworkInterfaceCollection
)
from boto.exception import EC2ResponseError
from boto.vpc import VPCConnection
import boto3
from disco_aws_automation.network_helper import calc_subnet_offset
from .disco_subnet import DiscoSubnet
from .resource_helper import (
keep_trying,
find_or_create,
throttled_call
)
from .disco_constants import NETWORKS
from .exceptions import (
IPRangeError,
EIPConfigError,
RouteCreationError
)
logger = logging.getLogger(__name__)
class DiscoMetaNetwork(object):
"""
Representation of a disco meta-network. Contains a subnet for each availability zone,
along with a route table which is applied all the subnets.
"""
def __init__(self, name, vpc, network_cidr=None, boto3_connection=None):
self.vpc = vpc
self.name = name
if network_cidr:
self._network_cidr = IPNetwork(network_cidr)
else:
self._network_cidr = None
self._centralized_route_table_loaded = False
self._centralized_route_table = None # lazily initialized
self._security_group = None # lazily initialized
self._connection = VPCConnection()
self._disco_subnets = None # lazily initialized
self._boto3_connection = boto3_connection # Lazily initialized if parameter is None
@property
def network_cidr(self):
"""Get the network_cidr for the meta network"""
if not self._network_cidr:
# if we don't have a network_cidr yet (if it wasn't passed in the constructor)
# then calculate it from the subnets
subnets = self._instantiate_subnets(try_creating_aws_subnets=False)
# calculate how big the meta network must have been if we divided it into the existing subnets
subnet_cidr_offset = calc_subnet_offset(len(subnets.values()))
# pick one of the subnets to do our math from
subnet_network = IPNetwork(subnets.values()[0].subnet_dict['CidrBlock'])
# the meta network cidr is the cidr of one of the subnets but with a smaller prefix
subnet_network.prefixlen = subnet_network.prefixlen - subnet_cidr_offset
self._network_cidr = subnet_network.cidr
return self._network_cidr
@property
def boto3_ec2(self):
"""
Lazily creates boto3 EC2 connection
"""
if not self._boto3_connection:
self._boto3_connection = boto3.client('ec2')
return self._boto3_connection
def _resource_name(self, suffix=None):
suffix = "_{0}".format(suffix) if suffix else ""
return "{0}_{1}{2}".format(self.vpc.environment_name, self.name, suffix)
def create(self):
"""
Metanetwork is initialized lazily. This forces creation of all
components.
"""
self._centralized_route_table = self.centralized_route_table
self._security_group = self.security_group
self._disco_subnets = self.disco_subnets
def vpc_filter(self):
""" Returns VPC filter """
vpc_filter = self.vpc.vpc_filters()[0]
return {vpc_filter.get('Name'): vpc_filter.get('Values')[0]}
@property
def _resource_filter(self):
resource_filter = self.vpc_filter()
resource_filter["tag:meta_network"] = self.name
return resource_filter
def _tag_resource(self, resource, suffix=None):
keep_trying(300, resource.add_tag, "Name", self._resource_name(suffix))
keep_trying(300, resource.add_tag, "meta_network", self.name)
@property
def centralized_route_table(self):
'''Returns the centralized route table for our metanetwork,
which could be None'''
if not self._centralized_route_table_loaded:
self._centralized_route_table = self._find_centralized_route_table()
self._centralized_route_table_loaded = True
return self._centralized_route_table
def _find_centralized_route_table(self):
route_tables = throttled_call(
self._connection.get_all_route_tables,
filters=self._resource_filter
)
if len(route_tables) != 1:
# If the number of route tables is more than one, it means there is
# one route table per disco_subnet, therefore don't return anything.
return None
return route_tables[0]
@property
def security_group(self):
'''Finds or creates the security group for our metanetwork'''
if not self._security_group:
self._security_group = find_or_create(
self._find_security_group, self._create_security_group
)
return self._security_group
def _find_security_group(self):
try:
return throttled_call(
self._connection.get_all_security_groups,
filters=self._resource_filter
)[0]
except IndexError:
return None
@property
def sg_description(self):
"""Returns a description of the metanetwork's purpose"""
return NETWORKS[self.name]
def _create_security_group(self):
security_group = throttled_call(
self._connection.create_security_group,
self._resource_name(),
self.sg_description,
self.vpc.get_vpc_id()
)
self._tag_resource(security_group)
logger.debug("%s security_group: %s", self.name, security_group)
return security_group
@property
def disco_subnets(self):
'''Creates the subnets for our metanetwork'''
if not self._disco_subnets:
self._disco_subnets = self._instantiate_subnets()
return self._disco_subnets
@property
def subnet_ip_networks(self):
"""
Return IPNetwork of all subnet CIDRs
"""
return [
IPNetwork(subnet.subnet_dict['CidrBlock'])
for subnet in self.disco_subnets.values()
]
@property
def subnet_ids(self):
"""
Return subnet ids
"""
return [
subnet.subnet_dict['SubnetId']
for subnet in self.disco_subnets.values()
]
def add_nat_gateways(self, allocation_ids=None):
"""
Creates a NAT gateway in each of the metanetwork's subnet, either using the
EIP allocation ids provided, or dynamic EIPs if EIP allocation_ids are not passed in.
:param allocation_ids: Allocation ids of the Elastic IPs that will be
associated with the NAT gateways.
"""
if allocation_ids:
if len(self.disco_subnets.values()) != len(allocation_ids):
raise EIPConfigError("The number of subnets does not match with the "
"number of NAT gateway EIPs provided for {0}: "
"{1} != {2}"
.format(self._resource_name(),
len(self.disco_subnets.values()),
len(allocation_ids)))
self._create_route_table_per_subnet()
for disco_subnet, allocation_id in zip(self.disco_subnets.values(), allocation_ids):
disco_subnet.create_nat_gateway(eip_allocation_id=allocation_id)
else:
self._create_route_table_per_subnet()
for disco_subnet in self.disco_subnets.values():
disco_subnet.create_nat_gateway()
def _create_route_table_per_subnet(self):
if self.centralized_route_table:
for disco_subnet in self.disco_subnets.values():
disco_subnet.recreate_route_table()
throttled_call(self._connection.delete_route_table, self.centralized_route_table.id)
self._centralized_route_table = None
def delete_nat_gateways(self):
""" Deletes all subnets' NAT gateways if any """
for disco_subnet in self.disco_subnets.values():
disco_subnet.delete_nat_gateway()
def _instantiate_subnets(self, try_creating_aws_subnets=True):
# FIXME needs to talk about and simplify this
logger.debug("instantiating subnets")
zones = throttled_call(self._connection.get_all_zones)[:3]
logger.debug("zones: %s", zones)
# We'll need to split each subnet into smaller ones, one per zone
# offset is how much we need to add to cidr divisor to create at least
# that len(zone) subnets
zone_cidr_offset = calc_subnet_offset(len(zones))
logger.debug("zone_offset: %s", zone_cidr_offset)
if try_creating_aws_subnets:
zone_cidrs = self.network_cidr.subnet(
int(self.network_cidr.prefixlen + zone_cidr_offset)
)
else:
zone_cidrs = ['' for _ in zones]
subnets = {}
for zone, cidr in zip(zones, zone_cidrs):
logger.debug("%s %s", zone, cidr)
disco_subnet = DiscoSubnet(str(zone.name), self, str(cidr),
self.centralized_route_table.id
if self.centralized_route_table else None)
subnets[zone.name] = disco_subnet
logger.debug("%s disco_subnet: %s", self.name, disco_subnet)
return subnets
def subnet_by_ip(self, ip_address):
""" Return the subnet to which the ip address belongs to """
ip_address = IPAddress(ip_address)
for disco_subnet in self.disco_subnets.values():
cidr = IPNetwork(disco_subnet.subnet_dict['CidrBlock'])
if ip_address >= cidr[0] and ip_address <= cidr[-1]:
return disco_subnet.subnet_dict
raise IPRangeError("IP {0} is not in Metanetwork ({1}) range.".format(ip_address, self.name))
def create_interfaces_specification(self, subnet_ids=None, public_ip=False):
"""
Create a network interface specification for an instance -- to be used
with run_instance()
"""
random_subnet_id = choice(subnet_ids if subnet_ids else
[disco_subnet.subnet_dict['SubnetId']
for disco_subnet in self.disco_subnets.values()])
interface = NetworkInterfaceSpecification(
subnet_id=random_subnet_id,
groups=[self.security_group.id],
associate_public_ip_address=public_ip)
interfaces = NetworkInterfaceCollection(interface)
return interfaces
def get_interface(self, private_ip):
"""
Allocate a 'floating' network inteface with static ip --
if it does not already exist.
"""
interface_filter = self.vpc_filter()
interface_filter["private-ip-address"] = private_ip
interfaces = throttled_call(
self._connection.get_all_network_interfaces,
filters=interface_filter
)
if interfaces:
return interfaces[0]
logger.debug("Creating floating ENI %s", private_ip)
aws_subnet = self.subnet_by_ip(private_ip)
return throttled_call(
self._connection.create_network_interface,
subnet_id=aws_subnet['SubnetId'],
private_ip_address=private_ip,
description="floating interface",
groups=[self.security_group.id]
)
@staticmethod
def _convert_sg_rule_tuple_to_dict(sg_rule_tuple):
sg_rule = {
"group_id": sg_rule_tuple[0],
"ip_protocol": sg_rule_tuple[1]
}
if sg_rule_tuple[4]:
sg_rule["src_security_group_group_id"] = sg_rule_tuple[4]
elif sg_rule_tuple[5]:
sg_rule["cidr_ip"] = sg_rule_tuple[5]
sg_rule["from_port"] = sg_rule_tuple[2]
sg_rule["to_port"] = sg_rule_tuple[3]
return sg_rule
def create_sg_rule_tuple(self, protocol, ports, sg_source_id=None, cidr_source=None):
""" Creates a tuple represeting a security group rule with the security groupd ID
of the current meta network added """
return self.security_group.id, protocol, ports[0], ports[1], sg_source_id, cidr_source
def update_sg_rules(self, desired_sg_rules, dry_run=False):
"""
Update the security rules of the meta network so that they conform to
the new rules being passed in. Each rule is a tuple that contains 6 values:
desire_sg_rules[0]: security groupd ID
desire_sg_rules[1]: protocol, e.g. tcp, icmp
desire_sg_rules[2]: from port
desire_sg_rules[3]: end port
desire_sg_rules[4]: source security group ID
desire_sg_rules[5]: source CIDR
"""
logger.info("Updating security rules for meta network %s", self.name)
current_sg_rules = [
self.create_sg_rule_tuple(
rule.ip_protocol,
[int(rule.from_port) if rule.from_port else 0,
int(rule.to_port) if rule.to_port else 65535],
grant.group_id, grant.cidr_ip)
for rule in self.security_group.rules
for grant in rule.grants]
current_sg_rules = set(current_sg_rules)
desired_sg_rules = set(desired_sg_rules) if desired_sg_rules else set()
sg_rules_to_add = list(desired_sg_rules - current_sg_rules)
sg_rules_to_delete = list(current_sg_rules - desired_sg_rules)
logger.info("Adding new security group rules %s", sg_rules_to_add)
logger.info("Revoking security group rules %s", sg_rules_to_delete)
if not dry_run:
self._add_sg_rules(sg_rules_to_add)
self._revoke_sg_rules(sg_rules_to_delete)
def _revoke_sg_rules(self, rule_tuples):
""" Revoke the list of security group rules from the current meta network """
for rule in rule_tuples:
rule = DiscoMetaNetwork._convert_sg_rule_tuple_to_dict(rule)
if not throttled_call(self._connection.revoke_security_group, **rule):
logger.warning("Failed to revoke security group %s", rule)
def _add_sg_rules(self, rule_tuples):
""" Add a list of security rules to the current meta network """
for rule in rule_tuples:
rule = DiscoMetaNetwork._convert_sg_rule_tuple_to_dict(rule)
if not throttled_call(self._connection.authorize_security_group, **rule):
logger.warning("Failed to authorize security group %s", rule)
def ip_by_offset(self, offset):
"""
Pass in +10 and get 10th ip of subnet range
Pass in -2 and get 2nd to last ip of subnet
Returns IpAddress object, usually you'll want
to cast this to str.
"""
try:
offset = int(offset)
except ValueError:
raise IPRangeError(
"Cannot find IP in metanetwork {0} by offset {1}."
.format(self.name, offset))
subnets = sorted(self.subnet_ip_networks)
base_address = subnets[0].first if offset >= 0 else subnets[-1].last
desired_address = IPAddress(base_address + offset)
# Lazy check to ensure IP address is in metanetwork range
self.subnet_by_ip(desired_address)
return desired_address
def add_gateway_routes(self, route_tuples):
""""
Add a list of gateway routes to all the subnets' route tables. Each route
is a tuple that contains 2 values:
new_route_tuples[0]: destination CIDR block
new_route_tuples[1]: gateway ID
"""
for route_tuple in route_tuples:
self._add_gateway_route(route_tuple[0], route_tuple[1])
def _delete_gateway_routes(self, dest_cidr_blocks):
""""
Delete the routes to destination CIDR blocks from all the subnets' route tables.
"""
if self.centralized_route_table:
for dest_cidr_block in dest_cidr_blocks:
throttled_call(
self._connection.delete_route,
route_table_id=self.centralized_route_table.id,
destination_cidr_block=dest_cidr_block
)
else:
for dest_cidr_block in dest_cidr_blocks:
for disco_subnet in self.disco_subnets.values():
disco_subnet.delete_route(dest_cidr_block)
def update_gateways_and_routes(self, desired_route_tuples, dry_run=False):
"""
Update gateways and routes to them in the meta network so that they conform to
the new routes being passed in. Each new route is a tuple that contains 2 values:
desired_route_tuples[0]: destination CIDR block
desired_route_tuples[1]: gateway ID
"""
desired_route_tuples = set(desired_route_tuples) if desired_route_tuples else set()
# Getting the routes currently in the route table(s)
current_route_tuples = set()
if self.centralized_route_table:
for route in self.centralized_route_table.routes:
if route.destination_cidr_block and \
route.gateway_id and route.gateway_id != 'local':
current_route_tuples.add((route.destination_cidr_block, route.gateway_id))
else:
# Only need to get from one subnet since they are the same
for route in self.disco_subnets.values()[0].route_table['Routes']:
if route.get('DestinationCidrBlock') and \
route.get('GatewayId') and route.get('GatewayId') != 'local':
current_route_tuples.add(
(route['DestinationCidrBlock'], route['GatewayId']))
current_cidrs = set([route_tuple[0] for route_tuple in current_route_tuples])
desired_cidrs = set([route_tuple[0] for route_tuple in desired_route_tuples])
common_cidrs = current_cidrs & desired_cidrs
routes_to_replace = set([(common_cidr, route_tuple[1])
for common_cidr in common_cidrs
for route_tuple in desired_route_tuples
if common_cidr == route_tuple[0]])
# Remove the ones that are the same as in the current routes
routes_to_replace -= current_route_tuples
routes_to_be_replaced = set([(common_cidr, route_tuple[1])
for common_cidr in common_cidrs
for route_tuple in current_route_tuples
if common_cidr == route_tuple[0]])
# Remove the ones that are the same as in the desired routes
routes_to_be_replaced -= desired_route_tuples
routes_to_delete = current_route_tuples - desired_route_tuples - routes_to_be_replaced
routes_to_add = desired_route_tuples - current_route_tuples - routes_to_replace
logger.info("Routes to delete: %s", routes_to_delete)
logger.info("Routes to replace existing ones: %s", routes_to_replace)
logger.info("Existing routes to be replaced: %s", routes_to_be_replaced)
logger.info("Routes to add: %s", routes_to_add)
if not dry_run:
self._delete_gateway_routes([route[0] for route in routes_to_delete])
self._replace_gateway_routes(routes_to_replace)
self.add_gateway_routes(routes_to_add)
def _add_gateway_route(self, destination_cidr_block, gateway_id):
""" Add a gateway route to the centralized route table or to all the
subnets' route tables"""
if self.centralized_route_table:
try:
return throttled_call(
self._connection.create_route,
route_table_id=self.centralized_route_table.id,
destination_cidr_block=destination_cidr_block,
gateway_id=gateway_id
)
except EC2ResponseError:
logger.exception("Failed to create route due to conflict. Deleting old route and re-trying.")
throttled_call(
self._connection.delete_route,
self.centralized_route_table.id,
destination_cidr_block
)
new_route = throttled_call(
self._connection.create_route,
route_table_id=self.centralized_route_table.id,
destination_cidr_block=destination_cidr_block,
gateway_id=gateway_id
)
logger.error("Route re-created")
return new_route
else:
# No centralized route table here, so add a route to each disco_subnet
for disco_subnet in self.disco_subnets.values():
if not disco_subnet.add_route_to_gateway(destination_cidr_block, gateway_id):
raise RouteCreationError("Failed to create a route for metanetwork-subnet {0}-{1}:"
"{2} -> {3}".format(self.name,
disco_subnet.name,
destination_cidr_block,
gateway_id))
def _replace_gateway_routes(self, route_tuples):
for route_tuple in route_tuples:
if self.centralized_route_table:
throttled_call(
self._connection.replace_route,
route_table_id=self.centralized_route_table.id,
destination_cidr_block=route_tuple[0],
gateway_id=route_tuple[1]
)
else:
# No centralized route table here, so replace the route in each disco_subnet
for disco_subnet in self.disco_subnets.values():
disco_subnet.replace_route_to_gateway(route_tuple[0], route_tuple[1])
def upsert_nat_gateway_route(self, dest_metanetwork):
""" Add a default route in each of the subnet's route table to the corresponding NAT gateway
of the same AZ in the destination metanetwork if it's not already there. """
self._create_route_table_per_subnet()
for zone in self.disco_subnets.keys():
self.disco_subnets[zone].upsert_route_to_nat_gateway(
'0.0.0.0/0',
dest_metanetwork.disco_subnets[zone].nat_gateway['NatGatewayId']
)
def delete_nat_gateway_route(self):
""" Deletes the default route to NAT gateway """
for disco_subnet in self.disco_subnets.values():
disco_subnet.delete_route('0.0.0.0/0')
def get_nat_gateway_metanetwork(self):
""" If this meta network's default route is going to a NAT gateway, returns the name of
the meta network in which the NAT resides. Otherwise, returns None. """
for route in self.disco_subnets.values()[0].route_table['Routes']:
if route.get('NatGatewayId') and route['DestinationCidrBlock'] == '0.0.0.0/0':
try:
nat_gateway = throttled_call(self.boto3_ec2.describe_nat_gateways,
NatGatewayIds=[route['NatGatewayId']])['NatGateways'][0]
except IndexError:
raise RuntimeError("Phantom NatGatewayId {0} found in meta network {1}."
.format(route['NatGatewayId'], self.name))
subnet = throttled_call(self.boto3_ec2.describe_subnets,
SubnetIds=[nat_gateway['SubnetId']])['Subnets'][0]
for tag in subnet['Tags']:
if tag['Key'] == 'meta_network':
return tag['Value']
raise RuntimeError("The meta_network tag is missing in subnet {0}."
.format(subnet['SubnetId']))
return None
def create_peering_route(self, peering_conn_id, cidr):
""" create/update a route between the peering connection and all the subnets.
If a centralized route table is used, add the route there. If not, add the route
to all the subnets. """
if self.centralized_route_table:
peering_routes_for_cidr = [
_ for _ in self.centralized_route_table.routes
if _.destination_cidr_block == cidr
]
if not peering_routes_for_cidr:
logger.info(
'Create routes for (route_table: %s, dest_cidr: %s, connection: %s)',
self.centralized_route_table.id, cidr, peering_conn_id)
throttled_call(
self._connection.create_route,
route_table_id=self.centralized_route_table.id,
destination_cidr_block=cidr,
vpc_peering_connection_id=peering_conn_id
)
else:
logger.info(
'Update routes for (route_table: %s, dest_cidr: %s, connection: %s)',
self.centralized_route_table.id, cidr, peering_conn_id)
throttled_call(
self._connection.replace_route,
route_table_id=self.centralized_route_table.id,
destination_cidr_block=cidr,
vpc_peering_connection_id=peering_conn_id
)
else:
# No centralized route table here, so add a route to each subnet
for disco_subnet in self.disco_subnets.values():
disco_subnet.create_peering_routes(peering_conn_id, cidr)
| 42.881188 | 109 | 0.619949 | 3,119 | 25,986 | 4.878807 | 0.111254 | 0.038115 | 0.052441 | 0.047644 | 0.366235 | 0.291648 | 0.214563 | 0.17763 | 0.155747 | 0.146941 | 0 | 0.006887 | 0.307165 | 25,986 | 605 | 110 | 42.952066 | 0.838314 | 0.166628 | 0 | 0.244019 | 0 | 0 | 0.06811 | 0.001285 | 0 | 0 | 0 | 0.001653 | 0 | 1 | 0.093301 | false | 0 | 0.028708 | 0 | 0.191388 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7fbda92d5e5b22dc691a6f5f033c42ec9d5340cb | 602 | py | Python | examples/docs/example1.py | jzuhone/xija | 1e423d0c48056cc4ea9e4993d28e34794c1420fa | [
"BSD-3-Clause"
] | 2 | 2016-01-05T19:20:43.000Z | 2021-06-04T08:23:08.000Z | examples/docs/example1.py | jzuhone/xija | 1e423d0c48056cc4ea9e4993d28e34794c1420fa | [
"BSD-3-Clause"
] | 61 | 2015-02-24T02:27:11.000Z | 2022-03-23T13:52:15.000Z | examples/docs/example1.py | jzuhone/xija | 1e423d0c48056cc4ea9e4993d28e34794c1420fa | [
"BSD-3-Clause"
] | 1 | 2016-01-04T21:08:17.000Z | 2016-01-04T21:08:17.000Z | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Example with a single node (ACA CCD temperature) with solar heating
(2 bins).
"""
import xija
name = __file__[:-3]
model = xija.XijaModel(name, start='2015:001', stop='2015:050')
model.add(xija.Node, 'aacccdpt')
model.add(xija.Pitch)
model.add(xija.Eclipse)
model.add(xija.SolarHeat,
node='aacccdpt',
pitch_comp='pitch',
eclipse_comp='eclipse',
P_pitches=[45, 180],
Ps=[0.0, 0.0],
ampl=0.0,
epoch='2010:001',
)
model.write('{}.json'.format(name))
| 20.066667 | 67 | 0.607973 | 84 | 602 | 4.27381 | 0.595238 | 0.089136 | 0.133705 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.075594 | 0.230897 | 602 | 29 | 68 | 20.758621 | 0.699784 | 0.232558 | 0 | 0 | 0 | 0 | 0.129956 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.0625 | 0 | 0.0625 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7fbe5ab33f430353b7e3d2a97fe210687b0799f6 | 856 | py | Python | Flask/Graph/flask_graph.py | stanman71/Python | fe442e421362b22f61d05235e835a568d9ce3aef | [
"MIT"
] | 1 | 2019-02-18T18:56:07.000Z | 2019-02-18T18:56:07.000Z | Flask/Graph/flask_graph.py | stanman71/Python | fe442e421362b22f61d05235e835a568d9ce3aef | [
"MIT"
] | null | null | null | Flask/Graph/flask_graph.py | stanman71/Python | fe442e421362b22f61d05235e835a568d9ce3aef | [
"MIT"
] | null | null | null | # https://technovechno.com/creating-graphs-in-python-using-matplotlib-flask-framework-pythonanywhere/
# https://stackoverflow.com/questions/50728328/python-how-to-show-matplotlib-in-flask
from flask import Flask, render_template
from graph import build_graph
app = Flask(__name__)
@app.route('/') # Change URL
def graphs():
#These coordinates could be stored in DB
x1 = [0, 1, 2, 3, 4]
y1 = [10, 30, 40, 5, 50]
x2 = [0, 1, 2, 3, 4]
y2 = [50, 30, 20, 10, 50]
x3 = [0, 1, 2, 3, 4]
y3 = [0, 30, 10, 5, 30]
graph1_url = build_graph(x1,y1);
graph2_url = build_graph(x2,y2);
graph3_url = build_graph(x3,y3);
return render_template('graphs.html',
graph1=graph1_url,
graph2=graph2_url,
graph3=graph3_url)
if __name__ == '__main__':
app.debug = True
app.run() | 27.612903 | 102 | 0.625 | 127 | 856 | 4.023622 | 0.496063 | 0.078278 | 0.017613 | 0.023483 | 0.029354 | 0 | 0 | 0 | 0 | 0 | 0 | 0.107903 | 0.231308 | 856 | 31 | 103 | 27.612903 | 0.668693 | 0.272196 | 0 | 0 | 0 | 0 | 0.033956 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.047619 | false | 0 | 0.095238 | 0 | 0.190476 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f68110d06e89b1eedf0ec730b2d3342465b0961c | 2,687 | py | Python | inventory-check.py | jwnichols3/s3-batch-ops-restore-status-check | 0331cdbf68f5b7dc71042aece8d128af4cd523f0 | [
"Apache-2.0"
] | null | null | null | inventory-check.py | jwnichols3/s3-batch-ops-restore-status-check | 0331cdbf68f5b7dc71042aece8d128af4cd523f0 | [
"Apache-2.0"
] | null | null | null | inventory-check.py | jwnichols3/s3-batch-ops-restore-status-check | 0331cdbf68f5b7dc71042aece8d128af4cd523f0 | [
"Apache-2.0"
] | null | null | null | import argparse
import boto3
from botocore.exceptions import ClientError
from urllib.parse import unquote
import time
from smart_open import open
import os
import sys
s3 = boto3.resource('s3')
parser = argparse.ArgumentParser(
description="Analyze Inventory Files")
parser.add_argument('--inventory_file', '-i',
help='The file that has a csv formatted list of inventory to check. The first column of the CSV is the bucket, the second column is the key. This can be an S3 object or local file. It can also be gzipped.')
parser.add_argument('--inventory_directory',
help='A directory with a set of inventories. this will recursively iterate across all folders/files.')
parser.add_argument(
'--env', action='store_true', help="use the AWS environment variables for aws_access_key_id and aws_secret_access_key values")
parser.add_argument(
"--profile", help='Use a specific AWS Profile'
)
args = parser.parse_args()
start_time = time.localtime()
inventory_file = args.inventory_file
inventory_directory = args.inventory_directory
env = args.env
profile = args.profile
object_list = []
response_list = []
object_count = 0
total_records = 0
if (not inventory_file) and (not inventory_directory):
print("--inventory_file or --inventory_directory is required")
exit()
# I'm sure there is a way to do this more elegantly...
# First priority: If --env is specified, use the environment variables
# Second priority: if --profile is specified, use the profile name
# Last priority: if nothing is specified, use the current user
if env:
try:
s3_client = boto3.client(
's3',
aws_access_key_id=os.environ['AWS_ACCESS_KEY_ID'],
aws_secret_access_key=os.environ['AWS_SECRET_ACCESS_KEY']
)
print(os.environ)
except Exception as err:
print(err)
exit()
elif profile:
boto3.setup_default_session(profile_name=profile)
s3_client = boto3.client('s3')
else:
s3_client = boto3.client('s3')
if inventory_file:
print(f"Analyzing inventory file...")
total_records = len(open(inventory_file).readlines())
print("Number of records in the " + inventory_file +
" inventory file: " + str(total_records))
if inventory_directory:
print("Walking directory " + os.path.abspath(inventory_directory))
for dirpath, dirs, files in os.walk(inventory_directory):
for f in files:
inv_file = dirpath + "/" + f
# print("Inv File: " + inv_file)
records_files = len(open(inv_file).readlines())
# print("Num records in " + inv_file)
print(f + ": " + str(records_files))
| 33.5875 | 226 | 0.694827 | 369 | 2,687 | 4.897019 | 0.363144 | 0.071942 | 0.037631 | 0.023243 | 0.034864 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007989 | 0.208039 | 2,687 | 79 | 227 | 34.012658 | 0.841165 | 0.116859 | 0 | 0.098361 | 0 | 0.016393 | 0.287828 | 0.035503 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.131148 | 0 | 0.131148 | 0.114754 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f6827b188401595203a70b0eec5054bb98bd151d | 999 | py | Python | source/faker_extensions/tattoo_provider.py | UKHomeOffice/PythonFakerExtensions | 7b515956e36608a9344ab2a8a57b48387a1be54c | [
"MIT"
] | null | null | null | source/faker_extensions/tattoo_provider.py | UKHomeOffice/PythonFakerExtensions | 7b515956e36608a9344ab2a8a57b48387a1be54c | [
"MIT"
] | null | null | null | source/faker_extensions/tattoo_provider.py | UKHomeOffice/PythonFakerExtensions | 7b515956e36608a9344ab2a8a57b48387a1be54c | [
"MIT"
] | 1 | 2021-04-11T09:14:48.000Z | 2021-04-11T09:14:48.000Z | from enum import Enum
from random import randint
from faker import Faker
from faker_extensions.abstract_providers import WeightedProvider
from faker_extensions.common_categories import Gender
from faker_extensions.distinguishing_features_provider import BodyArea
class TattooProvider(WeightedProvider):
""" Eye wear distribution in the uk """
tattoo_distributions = {
Gender.FEMALE: 0.47,
Gender.MALE: 0.33
}
def __init__(self, generator):
super().__init__(self.tattoo_distributions, generator)
def tattoo(self):
""" Returns if a person has a tattoo and location """
choice = self.get_choice()
random_body_area = BodyArea(randint(1, len(BodyArea.__members__)))
return {choice, random_body_area}
def main():
""" Get tattoo's by gender and distribution """
fake = Faker(['en_UK'])
fake.add_provider(TattooProvider(fake))
tattoo = fake.tattoo()
print(tattoo)
if __name__ == '__main__':
main()
| 27 | 74 | 0.704705 | 120 | 999 | 5.566667 | 0.5 | 0.053892 | 0.085329 | 0.05988 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008783 | 0.202202 | 999 | 36 | 75 | 27.75 | 0.82936 | 0.119119 | 0 | 0 | 0 | 0 | 0.015116 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.25 | 0 | 0.5 | 0.041667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f68b865589b413c178df9c90f4d9076655b75ea3 | 4,036 | py | Python | hw2/test.py | zeynepCankara/NTU_DLCV2019 | 2dc44584ec7b9e1d84e688551eb8cef48d501b45 | [
"MIT"
] | 1 | 2022-01-17T14:28:46.000Z | 2022-01-17T14:28:46.000Z | hw2/test.py | zeynepCankara/NTU_DLCV2019 | 2dc44584ec7b9e1d84e688551eb8cef48d501b45 | [
"MIT"
] | null | null | null | hw2/test.py | zeynepCankara/NTU_DLCV2019 | 2dc44584ec7b9e1d84e688551eb8cef48d501b45 | [
"MIT"
] | 2 | 2021-11-08T19:05:57.000Z | 2022-01-17T14:28:48.000Z | import os
import parser
import models
import data
import data_test
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from sklearn.metrics import accuracy_score
import skimage
from mean_iou_evaluate import mean_iou_score
# import torch library
import torch
# models
import simple_baseline_model
import baseline_model
def prediction_labeller(idx):
remain = '0' * (4-len(idx))
idx = remain + idx
return str(idx)
# change the hard coded path $2 -> output dir
# output_dir = "./preds/"
def evaluate(model, data_loader, mode = "train", output_dir = "./preds/"):
''' set model to evaluate mode '''
model.eval()
preds = []
gts = []
with torch.no_grad(): # do not need to caculate information for gradient during eval
cnt = 0
for _, (imgs, gt) in enumerate(data_loader):
imgs = imgs.cuda()
pred = model(imgs)
if mode == "val" or mode == "test":
# save images only during test and validation
for p in pred:
p = torch.argmax(p.squeeze(), dim=0).detach().cpu().numpy()
skimage.io.imsave(os.path.join(output_dir, prediction_labeller(str(cnt)) + ".png"), p)
cnt += 1
pass
else:
# no need to save during training
pass
_, pred = torch.max(pred, dim = 1)
pred = pred.cpu().numpy().squeeze()
gt = gt.numpy().squeeze()
preds.append(pred)
gts.append(gt)
gts = np.concatenate(gts)
preds = np.concatenate(preds)
return mean_iou_score(gts, preds)
def evaluate_test(model, data_loader, output_dir = "./preds/"):
''' set model to evaluate mode '''
model.eval()
preds = []
with torch.no_grad(): # do not need to caculate information for gradient during eval
cnt = 0
for _, imgs in enumerate(data_loader):
imgs = imgs.cuda()
pred = model(imgs)
# save images only during test and validation
for p in pred:
p = torch.argmax(p.squeeze(), dim=0).detach().cpu().numpy()
skimage.io.imsave(os.path.join(output_dir, prediction_labeller(str(cnt)) + ".png"), p)
cnt += 1
pass
_, pred = torch.max(pred, dim = 1)
pred = pred.cpu().numpy().squeeze()
preds.append(pred)
preds = np.concatenate(preds)
return 0
if __name__ == '__main__':
args = parser.arg_parse()
# get input and output directory
input_dir = args.input_dir
''' setup GPU '''
torch.cuda.set_device(args.gpu)
''' prepare data_loader '''
print('===> prepare data loader ...')
if input_dir == "val_test":
test_loader = torch.utils.data.DataLoader(data.DATA(args, mode='val'),
batch_size=args.test_batch,
num_workers=args.workers,
shuffle=False)
else:
test_loader = torch.utils.data.DataLoader(data_test.DATA_TEST(args, mode='test'),
batch_size=args.test_batch,
num_workers=args.workers,
shuffle=False)
''' prepare mode '''
if args.model == "simple_baseline":
model = simple_baseline_model.SimpleBaselineModel(args).cuda()
else:
model = baseline_model.BaselineModel(args).cuda()
''' resume save model '''
checkpoint = torch.load(args.resume)
model.load_state_dict(checkpoint)
if input_dir == "val_test":
acc = evaluate(model, test_loader, mode="val", output_dir = args.output_dir)
print('Testing Accuracy: {}'.format(acc))
else:
_ = evaluate_test(model, test_loader, output_dir = args.output_dir)
| 30.345865 | 106 | 0.557483 | 475 | 4,036 | 4.587368 | 0.267368 | 0.041303 | 0.026159 | 0.015603 | 0.509408 | 0.427719 | 0.427719 | 0.392841 | 0.392841 | 0.392841 | 0 | 0.004456 | 0.332755 | 4,036 | 132 | 107 | 30.575758 | 0.804679 | 0.105302 | 0 | 0.488636 | 0 | 0 | 0.038286 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.034091 | false | 0.034091 | 0.170455 | 0 | 0.238636 | 0.022727 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f68c0aed306b7f3cb69cdebd512e4da33319782f | 7,028 | py | Python | config/custom_components/ir_fan/fan.py | Poeschl/home-assistant-config | 380640bc46b14542866fbf8bbdc4218b2d58b55c | [
"MIT"
] | 7 | 2020-05-29T11:54:36.000Z | 2021-11-20T06:24:31.000Z | config/custom_components/ir_fan/fan.py | Poeschl/home-assistant-config | 380640bc46b14542866fbf8bbdc4218b2d58b55c | [
"MIT"
] | null | null | null | config/custom_components/ir_fan/fan.py | Poeschl/home-assistant-config | 380640bc46b14542866fbf8bbdc4218b2d58b55c | [
"MIT"
] | 1 | 2022-02-17T03:13:52.000Z | 2022-02-17T03:13:52.000Z | import logging
import asyncio
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.fan import (
FanEntity, PLATFORM_SCHEMA, SPEED_OFF, SUPPORT_SET_SPEED, SUPPORT_OSCILLATE, SPEED_LOW, SPEED_MEDIUM, SPEED_HIGH)
from homeassistant.const import (
CONF_NAME, STATE_ON)
from homeassistant.helpers.restore_state import RestoreEntity
from homeassistant.util.percentage import ordered_list_item_to_percentage, percentage_to_ordered_list_item
from .const import DEFAULT_NAME, CONF_REMOTE_ENTITY, CONF_COMMAND_ON_OFF, CONF_COMMAND_SPEED, CONF_COMMAND_OSCILLATE, CONF_COMMAND_DEVICE, \
DEFAULT_DELAY
_LOGGER = logging.getLogger(__name__)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Required(CONF_REMOTE_ENTITY): cv.string,
vol.Required(CONF_COMMAND_DEVICE): cv.string,
vol.Required(CONF_COMMAND_ON_OFF): cv.string,
vol.Required(CONF_COMMAND_SPEED): cv.string,
vol.Required(CONF_COMMAND_OSCILLATE): cv.string,
})
FAN_SPEEDS = [SPEED_LOW, SPEED_MEDIUM, SPEED_HIGH]
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the IR Fan platform."""
async_add_entities([IrFan(hass, config)])
class IrFan(FanEntity, RestoreEntity):
def __init__(self, hass, config):
self.hass = hass
self._name = config.get(CONF_NAME)
self._remote_entity = config.get(CONF_REMOTE_ENTITY)
self._unique_id = f"{self._remote_entity}_{self._name}"
self._device_name = config.get(CONF_COMMAND_DEVICE)
self._commands = {
CONF_COMMAND_ON_OFF: config.get(CONF_COMMAND_ON_OFF),
CONF_COMMAND_SPEED: config.get(CONF_COMMAND_SPEED),
CONF_COMMAND_OSCILLATE: config.get(CONF_COMMAND_OSCILLATE),
}
self._speed = SPEED_LOW
self._last_on_speed = SPEED_LOW
self._oscillating = False
self._current_speed = SPEED_OFF
self._current_oscillating = True
self._current_on = self.is_on
self._temp_lock = asyncio.Lock()
async def async_added_to_hass(self):
"""Run when entity about to be added."""
await super().async_added_to_hass()
last_state = await self.async_get_last_state()
if last_state is not None:
if 'speed' in last_state.attributes:
self._speed = last_state.attributes['speed']
if 'last_on_speed' in last_state.attributes:
self._last_on_speed = last_state.attributes['last_on_speed']
self._current_speed = self._last_on_speed
if 'oscillating' in last_state.attributes:
self._oscillating = last_state.attributes['oscillating']
self._current_oscillating = self._oscillating
@property
def unique_id(self):
return self._unique_id
@property
def name(self):
return self._name
@property
def supported_features(self):
return SUPPORT_SET_SPEED + SUPPORT_OSCILLATE
@property
def available(self) -> bool:
return self.hass.states.get(self._remote_entity) is not None and self.hass.states.get(self._remote_entity).state == STATE_ON
@property
def is_on(self):
return self._current_speed != SPEED_OFF
@property
def percentage(self):
if self._current_speed == SPEED_OFF:
return 0
else:
return ordered_list_item_to_percentage(FAN_SPEEDS, self._current_speed)
@property
def speed_count(self) -> int:
"""Return the number of speeds the fan supports."""
return len(FAN_SPEEDS)
@property
def oscillating(self):
return self._current_oscillating
@property
def last_on_speed(self):
return self._last_on_speed
@property
def device_state_attributes(self) -> dict:
"""Platform specific attributes."""
return {
'last_on_speed': self._last_on_speed,
'remote_entity': self._remote_entity,
}
async def async_set_percentage(self, percentage: int):
"""Set the speed of the fan."""
speed = percentage_to_ordered_list_item(FAN_SPEEDS, percentage)
if percentage > 0:
self._last_on_speed = speed
self._speed = speed
else:
self._speed = SPEED_OFF
await self.send_command()
await self.async_update_ha_state()
async def async_oscillate(self, oscillating: bool) -> None:
"""Set oscillation of the fan."""
self._oscillating = oscillating
await self.send_command()
await self.async_update_ha_state()
async def async_turn_on(self, speed: str = None, **kwargs):
"""Turn on the fan."""
if speed is None:
speed = FAN_SPEEDS[0]
await self.async_set_percentage(ordered_list_item_to_percentage(FAN_SPEEDS, speed))
async def async_turn_off(self):
"""Turn off the fan."""
await self.async_set_percentage(0)
async def send_command(self):
async with self._temp_lock:
speed = self._speed.lower()
last_speed = self._current_speed
oscillating = self._oscillating
last_oscillating = self._current_oscillating
if speed == SPEED_OFF:
if self.is_on:
await self.__send(self._commands[CONF_COMMAND_ON_OFF])
self._last_on_speed = last_speed
self._current_speed = SPEED_OFF
return
else:
if not self.is_on:
await self.__send(self._commands[CONF_COMMAND_ON_OFF])
last_speed = self._last_on_speed
if FAN_SPEEDS.index(speed) > FAN_SPEEDS.index(last_speed):
speed_command_times = FAN_SPEEDS.index(speed) - FAN_SPEEDS.index(last_speed)
elif FAN_SPEEDS.index(speed) < FAN_SPEEDS.index(last_speed):
# Go around the speed wheel by adding 3 to the wanted speed
speed_command_times = FAN_SPEEDS.index(speed) + 3 - FAN_SPEEDS.index(last_speed)
else:
speed_command_times = 0
for _ in range(speed_command_times):
await self.__send(self._commands[CONF_COMMAND_SPEED])
self._current_speed = speed
if self.is_on and oscillating != last_oscillating:
await self.__send(self._commands[CONF_COMMAND_OSCILLATE])
self._current_oscillating = oscillating
async def __send(self, command):
target = {
'entity_id': self._remote_entity
}
service_data = {
'delay_secs': DEFAULT_DELAY,
'device': self._device_name,
'command': command
}
await self.hass.services.async_call(
'remote', 'send_command', target=target, service_data=service_data)
| 33.788462 | 140 | 0.652675 | 858 | 7,028 | 4.972028 | 0.158508 | 0.048992 | 0.030942 | 0.028129 | 0.367089 | 0.257853 | 0.158462 | 0.094233 | 0.080638 | 0.051102 | 0 | 0.001356 | 0.265509 | 7,028 | 207 | 141 | 33.951691 | 0.825068 | 0.019067 | 0 | 0.147651 | 0 | 0 | 0.025071 | 0.005074 | 0 | 0 | 0 | 0 | 0 | 1 | 0.073826 | false | 0 | 0.060403 | 0.04698 | 0.221477 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f68c414cfe15c93050fbef9ed4e125294af5c073 | 2,405 | py | Python | billingclient/v1/client.py | nubeliu/billingclient | f2539e211ca049f3ddc9ce5680932f8f5eff7434 | [
"Apache-2.0"
] | 1 | 2018-01-04T16:20:51.000Z | 2018-01-04T16:20:51.000Z | billingclient/v1/client.py | nubeliu/billingclient | f2539e211ca049f3ddc9ce5680932f8f5eff7434 | [
"Apache-2.0"
] | null | null | null | billingclient/v1/client.py | nubeliu/billingclient | f2539e211ca049f3ddc9ce5680932f8f5eff7434 | [
"Apache-2.0"
] | null | null | null | # NubeliU Billing SDK
# @autor: Sergio Colinas
from stevedore import extension
from billingclient import client as ckclient
from billingclient.openstack.common.apiclient import client
from billingclient.v1 import chart
from billingclient.v1 import core
from billingclient.v1 import metric
from billingclient.v1.rating.gnocchi import client as rating_client
from billingclient.v1 import report
from billingclient.v1 import widget
SUBMODULES_NAMESPACE = 'billing.client.modules'
class Client(object):
"""Client for the Billing v1 API.
:param string endpoint: A user-supplied endpoint URL for the billing
service.
:param function token: Provides token for authentication.
:param integer timeout: Allows customization of the timeout for client
http requests. (optional)
"""
def __init__(self, *args, **kwargs):
"""Initialize a new client for the Billing v1 API."""
self.auth_plugin = (kwargs.get('auth_plugin')
or ckclient.get_auth_plugin(*args, **kwargs))
self.client = client.HTTPClient(
auth_plugin=self.auth_plugin,
region_name=kwargs.get('region_name'),
endpoint_type=kwargs.get('endpoint_type'),
original_ip=kwargs.get('original_ip'),
verify=kwargs.get('verify'),
cert=kwargs.get('cert'),
timeout=kwargs.get('timeout'),
timings=kwargs.get('timings'),
keyring_saver=kwargs.get('keyring_saver'),
debug=kwargs.get('debug'),
user_agent=kwargs.get('user_agent'),
http=kwargs.get('http')
)
self.http_client = client.BaseClient(self.client)
self.status = core.BillingStatusManager(self.http_client)
self.charts = chart.ChartManager(self.http_client)
self.rating = rating_client.Client(self.http_client)
self.metrics = metric.MetricManager(self.http_client)
self.reports = report.ReportManager(self.http_client)
self.widgets = widget.WidgetManager(self.http_client)
self._expose_submodules()
def _expose_submodules(self):
extensions = extension.ExtensionManager(
SUBMODULES_NAMESPACE,
)
for ext in extensions:
client = ext.plugin.get_client(self.http_client)
setattr(self, ext.name, client)
| 38.790323 | 74 | 0.664449 | 274 | 2,405 | 5.69708 | 0.343066 | 0.069186 | 0.071749 | 0.069186 | 0.070468 | 0.03075 | 0 | 0 | 0 | 0 | 0 | 0.004396 | 0.243243 | 2,405 | 61 | 75 | 39.42623 | 0.853297 | 0.167983 | 0 | 0 | 0 | 0 | 0.063136 | 0.011202 | 0 | 0 | 0 | 0 | 0 | 1 | 0.046512 | false | 0 | 0.209302 | 0 | 0.27907 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f6908dcaaa5d2842b23cd9c05cdda3c923e0e6a0 | 3,572 | py | Python | main.py | berkurka/chameleon | ee84a5a7da1e49734ffd885606aa39db73e6ae9d | [
"MIT"
] | null | null | null | main.py | berkurka/chameleon | ee84a5a7da1e49734ffd885606aa39db73e6ae9d | [
"MIT"
] | null | null | null | main.py | berkurka/chameleon | ee84a5a7da1e49734ffd885606aa39db73e6ae9d | [
"MIT"
] | 1 | 2020-11-16T01:07:33.000Z | 2020-11-16T01:07:33.000Z | #!/usr/bin/env python
# coding: utf-8
# In[ ]:
# pip install pypdf2
# conda install -c conda-forge pypdf2
# In[ ]:
import os
import re
import pandas as pd
import PyPDF2
# In[ ]:
INP_PATH = './input/'
OUT_PATH = './output/'
# In[ ]:
def load_pdf_files(file_path:str):
'''
Loads pdf files and into PyPDF2.pdf.PdfFileReader object
and append thems into a dictionary.
Parameters
----------
file_path : str
Location of pdf files.
Returns
----------
inp_files : Dictionary
Example: {file_1.pdf: PyPDF2.PdfFileReader}
'''
inp_files = {}
for file in os.listdir(file_path):
if file.endswith(".pdf"):
# print('Reading', os.path.join(file_path, file))
loaded_file = open(os.path.join(file_path, file), 'rb')
# Creating a pdf reader object
fileReader = PyPDF2.PdfFileReader(loaded_file)
inp_files[file] = fileReader
return inp_files
# In[ ]:
main_dict = load_pdf_files(INP_PATH)
# # Define Rules
# In[ ]:
rules = {'simple_1' : {'type': 'simple',
'contains': 'CNPJ',
'case_sens': False,
'n_char_before': 10,
'n_char_after': 30,
'matches': 'First'
},
'simple_2' : {'type': 'simple',
'contains': 'CPF',
'case_sens': False,
'n_char_before': 10,
'n_char_after': 30,
'matches': 'All'
},
'regex' : {'type': 'regex',
'pattern': 'taxa.{30}',
'matches': 'All'
},
}
# # Function to Process rules
# In[ ]:
dfs = []
for fn in main_dict:
df = pd.DataFrame([],columns=['File name', 'Rule', 'Page', 'text'])
fileReader = main_dict[fn]
pg_count = fileReader.numPages
doc = ''
# Applying rules for each page
for i in range(pg_count):
#Pdf reader
page = fileReader.getPage(i).extractText()
doc = page
doc = re.sub(r"[\n\t\r]*", "", doc)
#Applying rules
for rule in rules:
extract_texts = []
###Simple
if rules[rule]['type'] == 'simple':
word = rules[rule]['contains']
matches = [m.start() for m in re.finditer(word, doc, re.IGNORECASE)]
for m in matches:
start = m - rules[rule]['n_char_before']
end = m + rules[rule]['n_char_after'] + len(word)
extract_texts.append(doc[start: end])
###Regex
elif rules[rule]['type'] == 'regex':
pattern = rules[rule]['pattern']
matches = re.findall(r"{}".format(pattern), doc)
extract_texts.append(matches)
#Adding results to a temporary dataframe
df = pd.DataFrame({"File name":fn,
"Rule":rule,
"Page":i+1,
"text":extract_texts
})
#Adding results to the main dataframe
dfs.append(df)
df_final=pd.concat(dfs)
# In[ ]:
df_final=df_final.reset_index(drop=True)
# In[ ]:
df_final.to_excel(OUT_PATH + 'results.xlsx', index=False)
df_final
# In[ ]:
| 22.049383 | 84 | 0.468085 | 373 | 3,572 | 4.343164 | 0.359249 | 0.018519 | 0.02037 | 0.017284 | 0.101235 | 0.082716 | 0.055556 | 0.055556 | 0.055556 | 0.055556 | 0 | 0.009836 | 0.402296 | 3,572 | 161 | 85 | 22.186335 | 0.748946 | 0.191209 | 0 | 0.119403 | 0 | 0 | 0.120215 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.014925 | false | 0 | 0.059701 | 0 | 0.089552 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f690fdd47205b08324635e05a16d09bdf0637cc9 | 8,913 | py | Python | pydyn/solvers/RungeKuttaFehlberg.py | rwl/PyDyn | 87f89c63fdb1bc9449c05430dd4265eece771739 | [
"Apache-2.0"
] | 4 | 2017-04-12T05:19:19.000Z | 2021-08-28T18:41:53.000Z | pydyn/solvers/RungeKuttaFehlberg.py | rwl/PyDyn | 87f89c63fdb1bc9449c05430dd4265eece771739 | [
"Apache-2.0"
] | null | null | null | pydyn/solvers/RungeKuttaFehlberg.py | rwl/PyDyn | 87f89c63fdb1bc9449c05430dd4265eece771739 | [
"Apache-2.0"
] | null | null | null | # Copyright (C) 2009 Stijn Cole
# Copyright (C) 2010-2011 Richard Lincoln
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from numpy import array, finfo, r_
from pydyn.models.exciters.Exciter import Exciter
from pydyn.models.governors.Governor import Governor
from pydyn.models.generators.Generator import Generator
from pydyn.SolveNetwork import SolveNetwork
from pydyn.MachineCurrents import MachineCurrents
EPS = finfo(float).eps
def RungeKuttaFehlberg(t0, Xgen0, Pgen, Vgen0, Xexc0, Pexc, Vexc0, Xgov0, Pgov,
Vgov0, U0, invYbus, gbus, genmodel, excmodel, govmodel, tol, maxstepsize, stepsize):
""" Runge-Kutta Fehlberg ODE solver
@see: U{http://www.esat.kuleuven.be/electa/teaching/matdyn/}
"""
##
# Init
accept = False
facmax = 4
failed = 0
## Runge-Kutta coefficients
# c = [0 1/4 3/8 12/13 1 1/2] not used
a = array([0, 0, 0, 0, 0,
1/4., 0, 0, 0, 0,
3/32., 9/32., 0, 0, 0,
1932/2197., -7200/2197., 7296/2197., 0, 0,
439/216., -8, 3680/513., -845/4104., 0,
-8/27., 2, -3544/2565., 1859/4104., -11/40.])
b1 = array([25/216., 0, 1408/2565., 2197/4104., -1/5., 0])
b2 = array([16/135., 0, 6656/12825., 28561/56430., -9/50., 2/55])
##
i=0
while accept == False:
i += 1
## K1
# EXCITERS
Kexc1 = Exciter(Xexc0, Pexc, Vexc0, excmodel)
Xexc1 = Xexc0 + stepsize * a[1, 0] * Kexc1
# GOVERNORS
Kgov1 = Governor(Xgov0, Pgov, Vgov0, govmodel)
Xgov1 = Xgov0 + stepsize * a[1, 0] * Kgov1
# GENERATORS
Kgen1 = Generator(Xgen0, Xexc1, Xgov1, Pgen, Vgen0, genmodel)
Xgen1 = Xgen0 + stepsize * a[1, 0] * Kgen1
# Calculate system voltages
U1 = SolveNetwork(Xgen1, Pgen, invYbus, gbus, genmodel)
# Calculate machine currents and power
Id1, Iq1, Pe1 = MachineCurrents(Xgen1, Pgen, U1[gbus], genmodel)
# Update variables that have changed
Vexc1 = abs(U1[gbus])
Vgen1 = r_[Id1, Iq1, Pe1]
Vgov1 = Xgen1[:, 1]
## K2
# EXCITERS
Kexc2 = Exciter(Xexc1, Pexc, Vexc1, excmodel)
Xexc2 = Xexc0 + stepsize * (a[2, 0] * Kexc1 + a[2, 1] * Kexc2 )
# GOVERNORS
Kgov2 = Governor(Xgov1, Pgov, Vgov1, govmodel)
Xgov2 = Xgov0 + stepsize * (a[2, 0] * Kgov1 + a[2, 1] * Kgov2 )
# GENERATORS
Kgen2 = Generator(Xgen1, Xexc2, Xgov2, Pgen, Vgen1, genmodel)
Xgen2 = Xgen0 + stepsize * (a[2, 0] * Kgen1 + a[2, 1] * Kgen2 )
# Calculate system voltages
U2 = SolveNetwork(Xgen2, Pgen, invYbus, gbus, genmodel)
# Calculate machine currents and power
Id2, Iq2, Pe2 = MachineCurrents(Xgen2, Pgen, U2[gbus], genmodel)
# Update variables that have changed
Vexc2 = abs(U2[gbus])
Vgen2 = r_[Id2, Iq2, Pe2]
Vgov2 = Xgen2[:, 1]
## K3
# EXCITERS
Kexc3 = Exciter(Xexc2, Pexc, Vexc2, excmodel)
Xexc3 = Xexc0 + stepsize * (a[3, 0] * Kexc1 + a[3, 1] * Kexc2 + a[3, 2] * Kexc3)
# GOVERNORS
Kgov3 = Governor(Xgov2, Pgov, Vgov2, govmodel)
Xgov3 = Xgov0 + stepsize * (a[3, 0] * Kgov1 + a[3, 1] * Kgov2 + a[3, 2] * Kgov3)
# GENERATORS
Kgen3 = Generator(Xgen2, Xexc3, Xgov3, Pgen, Vgen2, genmodel)
Xgen3 = Xgen0 + stepsize * (a[3, 0] * Kgen1 + a[3, 1] * Kgen2 + a[3, 2] * Kgen3)
# Calculate system voltages
U3 = SolveNetwork(Xgen3, Pgen, invYbus, gbus, genmodel)
# Calculate machine currents and power
Id3, Iq3, Pe3 = MachineCurrents(Xgen3, Pgen, U3[gbus], genmodel)
# Update variables that have changed
Vexc3 = abs(U3[gbus])
Vgen3 = r_[Id3, Iq3, Pe3]
Vgov3 = Xgen3[:, 1]
## K4
# EXCITERS
Kexc4 = Exciter(Xexc3, Pexc, Vexc3, excmodel)
Xexc4 = Xexc0 + stepsize * (a[4, 0] * Kexc1 + a[4, 1] * Kexc2 + a[4, 2] * Kexc3 + a[4, 3] * Kexc4)
# GOVERNORS
Kgov4 = Governor(Xgov3, Pgov, Vgov3, govmodel)
Xgov4 = Xgov0 + stepsize * (a[4, 0] * Kgov1 + a[4, 1] * Kgov2 + a[4, 2] * Kgov3 + a[4, 3] * Kgov4)
# GENERATORS
Kgen4 = Generator(Xgen3, Xexc4, Xgov4, Pgen, Vgen3, genmodel)
Xgen4 = Xgen0 + stepsize * (a[4, 0] * Kgen1 + a[4, 1] * Kgen2 + a[4, 2] * Kgen3 + a[4, 3] * Kgen4)
# Calculate system voltages
U4 = SolveNetwork(Xgen4, Pgen, invYbus, gbus, genmodel)
# Calculate machine currents and power
Id4, Iq4, Pe4 = MachineCurrents(Xgen4, Pgen, U4[gbus], genmodel)
# Update variables that have changed
Vexc4 = abs(U4[gbus])
Vgen4 = r_[Id4, Iq4, Pe4]
Vgov4 = Xgen4[:, 1]
## K5
# EXCITERS
Kexc5 = Exciter(Xexc4, Pexc, Vexc4, excmodel)
Xexc5 = Xexc0 + stepsize * (a[5, 0] * Kexc1 + a[5, 1] * Kexc2 + a[5, 2] * Kexc3 + a[5, 3] * Kexc4 + a[5, 4] * Kexc5)
# GOVERNORS
Kgov5 = Governor(Xgov4, Pgov, Vgov4, govmodel)
Xgov5 = Xgov0 + stepsize * (a[5, 0] * Kgov1 + a[5, 1] * Kgov2 + a[5, 2] * Kgov3 + a[5, 3] * Kgov4 + a[5, 4] * Kgov5)
# GENERATORS
Kgen5 = Generator(Xgen4, Xexc5, Xgov5, Pgen, Vgen4, genmodel)
Xgen5 = Xgen0 + stepsize * (a[5, 0] * Kgen1 + a[5, 1] * Kgen2 + a[5, 2] * Kgen3 + a[5, 3] * Kgen4 + a[5, 4] * Kgen5)
# Calculate system voltages
U5 = SolveNetwork(Xgen5, Pgen, invYbus, gbus, genmodel)
# Calculate machine currents and power
Id5, Iq5, Pe5 = MachineCurrents(Xgen5, Pgen, U5[gbus], genmodel)
# Update variables that have changed
Vexc5 = abs(U5[gbus])
Vgen5 = r_[Id5, Iq5, Pe5]
Vgov5 = Xgen5[:, 1]
## K6
# EXCITERS
Kexc6 = Exciter(Xexc5, Pexc, Vexc5, excmodel)
Xexc6 = Xexc0 + stepsize * (b1[0] * Kexc1 + b1[1] * Kexc2 + b1[2] * Kexc3 + b1[3] * Kexc4 + b1[4] * Kexc5 + b1[5] * Kexc6)
# GOVERNORS
Kgov6 = Governor(Xgov5, Pgov, Vgov5, govmodel)
Xgov6 = Xgov0 + stepsize * (b1[0] * Kgov1 + b1[1] * Kgov2 + b1[2] * Kgov3 + b1[3] * Kgov4 + b1[4] * Kgov5 + b1[5] * Kgov6)
# GENERATORS
Kgen6 = Generator(Xgen5, Xexc6, Xgov6, Pgen, Vgen5, genmodel)
Xgen6 = Xgen0 + stepsize * (b1[0] * Kgen1 + b1[1] * Kgen2 + b1[2] * Kgen3 + b1[3] * Kgen4 + b1[4] * Kgen5 + b1[5] * Kgen6)
# Calculate system voltages
U6 = SolveNetwork(Xgen6, Pgen, invYbus, gbus, genmodel)
# Calculate machine currents and power
Id6, Iq6, Pe6 = MachineCurrents(Xgen6, Pgen, U6[gbus], genmodel)
# Update variables that have changed
Vexc6 = abs(U6[gbus])
Vgen6 = r_[Id6, Iq6, Pe6]
Vgov6 = Xgen6[:, 1]
## Second, higher order solution
Xexc62 = Xexc0 + stepsize * (b2[0] * Kexc1 + b2[1] * Kexc2 + b2[2] * Kexc3 + b2[3] * Kexc4 + b2[4] * Kexc5 + b2[5] * Kexc6)
Xgov62 = Xgov0 + stepsize * (b2[0] * Kgov1 + b2[1] * Kgov2 + b2[2] * Kgov3 + b2[3] * Kgov4 + b2[4] * Kgov5 + b2[5] * Kgov6)
Xgen62 = Xgen0 + stepsize * (b2[0] * Kgen1 + b2[1] * Kgen2 + b2[2] * Kgen3 + b2[3] * Kgen4 + b2[4] * Kgen5 + b2[5] * Kgen6)
## Error estimate
Xexc = abs((Xexc62 - Xexc6).T)
Xgov = abs((Xgov62 - Xgov6).T)
Xgen = abs((Xgen62 - Xgen6).T)
errest = max( [max(max(Xexc)), max(max(Xgov)), max(max(Xgen)) ])
if errest < EPS:
errest = EPS
q = 0.84 * (tol / errest)**(1/4.)
if errest < tol:
accept = True
U0 = U6
Vgen0 = Vgen6
Vgov0 = Vgov6
Vexc0 = Vexc6
Xgen0 = Xgen6
Xexc0 = Xexc6
Xgov0 = Xgov6
Pgen0 = Pgen
Pexc0 = Pexc
Pgov0 = Pgov
t = t0
else:
failed += 1
facmax = 1
t = t0
Pgen0 = Pgen
Pexc0 = Pexc
Pgov0 = Pgov
stepsize = min(max(q, 0.1), facmax) * stepsize
return Xgen0, Pgen0, Vgen0, Xexc0, Pexc0, Vexc0, Xgov0, Pgov0, Vgov0, U0, errest, failed, t, stepsize
stepsize = min(max(q, 0.1), facmax) * stepsize
if stepsize > maxstepsize:
stepsize = maxstepsize
return Xgen0, Pgen0, Vgen0, Xexc0, Pexc0, Vexc0, Xgov0, Pgov0, Vgov0, U0, errest, failed, t, stepsize
| 32.889299 | 131 | 0.556042 | 1,150 | 8,913 | 4.303478 | 0.248696 | 0.027278 | 0.026874 | 0.027884 | 0.170944 | 0.170944 | 0.160032 | 0.109113 | 0.096585 | 0.029905 | 0 | 0.115611 | 0.312914 | 8,913 | 270 | 132 | 33.011111 | 0.692521 | 0.177718 | 0 | 0.096 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.008 | false | 0 | 0.048 | 0 | 0.072 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f697fcf33036f0447c00db52e0fcc8aab5a7c40c | 3,488 | py | Python | utils/build_nfr_dataset.py | chaiyujin/AudioDVP | 1b7a6bc85bda6df16c9709d08d7b1415b449c584 | [
"MIT"
] | 200 | 2020-11-14T16:23:11.000Z | 2022-03-31T17:40:37.000Z | utils/build_nfr_dataset.py | chaiyujin/AudioDVP | 1b7a6bc85bda6df16c9709d08d7b1415b449c584 | [
"MIT"
] | 36 | 2020-11-15T14:17:51.000Z | 2022-01-04T08:22:43.000Z | utils/build_nfr_dataset.py | chaiyujin/AudioDVP | 1b7a6bc85bda6df16c9709d08d7b1415b449c584 | [
"MIT"
] | 42 | 2020-11-14T16:29:18.000Z | 2022-03-20T01:16:39.000Z | """
Following https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/datasets/combine_A_and_B.py
"""
import os
import cv2
import numpy as np
from tqdm import tqdm
import sys
sys.path.append(".")
from models import networks
from options.options import Options
from utils.util import create_dir, load_coef, get_file_list
if __name__ == '__main__':
opt = Options().parse_args()
create_dir(os.path.join(opt.data_dir, 'mask'))
alpha_list = load_coef(os.path.join(opt.data_dir, 'alpha'))
beta_list = load_coef(os.path.join(opt.data_dir, 'beta'))
delta_list = load_coef(os.path.join(opt.data_dir, 'delta'))
gamma_list = load_coef(os.path.join(opt.data_dir, 'gamma'))
angle_list = load_coef(os.path.join(opt.data_dir, 'rotation'))
translation_list = load_coef(os.path.join(opt.data_dir, 'translation'))
mouth_mask = networks.MouthMask(opt)
for i in tqdm(range(len(alpha_list))):
alpha = alpha_list[i].unsqueeze(0).cuda()
beta = beta_list[i].unsqueeze(0).cuda()
delta = delta_list[i].unsqueeze(0).cuda()
gamma = gamma_list[i].unsqueeze(0).cuda()
rotation = angle_list[i].unsqueeze(0).cuda()
translation = translation_list[i].unsqueeze(0).cuda()
mask = mouth_mask(alpha, delta, beta, gamma, rotation, translation)
mask = mask.squeeze(0).detach().cpu().permute(1, 2, 0).numpy() * 255.0
mask = cv2.dilate(mask, np.ones((3,3), np.uint8), iterations=4)
cv2.imwrite(os.path.join(opt.data_dir, 'mask', '%05d.png' % (i+1)), mask)
create_dir(os.path.join(opt.data_dir, 'nfr', 'A', 'train'))
create_dir(os.path.join(opt.data_dir, 'nfr', 'B', 'train'))
masks = get_file_list(os.path.join(opt.data_dir, 'mask'))
crops = get_file_list(os.path.join(opt.data_dir, 'crop'))
renders = get_file_list(os.path.join(opt.data_dir, 'render'))
for i in tqdm(range(len(masks))):
mask = cv2.imread(masks[i])
crop = cv2.imread(crops[i])
render = cv2.imread(renders[i])
masked_crop = cv2.bitwise_and(crop, mask)
masked_render = cv2.bitwise_and(render, mask)
cv2.imwrite(os.path.join(opt.data_dir, 'nfr', 'A', 'train', '%05d.png' % (i+1)), masked_crop)
cv2.imwrite(os.path.join(opt.data_dir, 'nfr', 'B', 'train', '%05d.png' % (i+1)), masked_render)
splits = os.listdir(os.path.join(opt.data_dir, 'nfr', 'A'))
for sp in splits:
image_fold_A = os.path.join(os.path.join(opt.data_dir, 'nfr', 'A'), sp)
image_fold_B = os.path.join(os.path.join(opt.data_dir, 'nfr', 'B'), sp)
image_list = os.listdir(image_fold_A)
image_fold_AB = os.path.join(opt.data_dir, 'nfr', 'AB', sp)
if not os.path.isdir(image_fold_AB):
os.makedirs(image_fold_AB)
for n in tqdm(range(len(image_list))):
name_A = image_list[n]
path_A = os.path.join(image_fold_A, name_A)
name_B = name_A
path_B = os.path.join(image_fold_B, name_B)
if os.path.isfile(path_A) and os.path.isfile(path_B):
name_AB = name_A
path_AB = os.path.join(image_fold_AB, name_AB)
im_A = cv2.imread(path_A, 1) # python2: cv2.CV_LOAD_IMAGE_COLOR; python3: cv2.IMREAD_COLOR
im_B = cv2.imread(path_B, 1) # python2: cv2.CV_LOAD_IMAGE_COLOR; python3: cv2.IMREAD_COLOR
im_AB = np.concatenate([im_A, im_B], 1)
cv2.imwrite(path_AB, im_AB)
| 39.191011 | 106 | 0.639335 | 553 | 3,488 | 3.811935 | 0.195298 | 0.07685 | 0.113852 | 0.117173 | 0.455408 | 0.374288 | 0.343928 | 0.327324 | 0.287002 | 0.078748 | 0 | 0.018712 | 0.203268 | 3,488 | 88 | 107 | 39.636364 | 0.739834 | 0.064794 | 0 | 0 | 0 | 0 | 0.044882 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.126984 | 0 | 0.126984 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f69a0402d79e3e6e23987987a39cea442751a085 | 5,650 | py | Python | src/genotype/mutagen/option.py | sash-a/CoDeepNEAT | 3476078a48986107ea1fc1a7ab1b42a55ac4f62f | [
"MIT"
] | 28 | 2019-10-18T06:44:00.000Z | 2021-11-09T18:54:52.000Z | src/genotype/mutagen/option.py | sash-a/CoDeepNEAT | 3476078a48986107ea1fc1a7ab1b42a55ac4f62f | [
"MIT"
] | 2 | 2019-10-21T07:21:52.000Z | 2022-01-12T22:34:03.000Z | src/genotype/mutagen/option.py | sash-a/CoDeepNEAT | 3476078a48986107ea1fc1a7ab1b42a55ac4f62f | [
"MIT"
] | 11 | 2019-07-01T13:01:32.000Z | 2022-03-31T20:18:56.000Z | import random
from typing import Dict, Any, Union, List
from src.genotype.mutagen import mutagen as MutagenFile
from src.genotype.mutagen.mutagen import Mutagen
from src.genotype.neat.operators.mutators.mutation_report import MutationReport
class _Null:
"""Default current value, allows for an option to be None"""
pass
class Option(Mutagen):
def __init__(self, name: str, *options, current_value=_Null, submutagens: Dict[Any, Dict[str, Mutagen]] = None,
mutation_chance: float = 0.3, probability_weighting: List[float] = None):
if current_value is _Null or current_value == 'auto':
current_value = random.choice(options)
super().__init__(name, mutation_chance)
self.options = options
if probability_weighting is not None:
self.probability_weightings = probability_weighting
else:
self.probability_weightings = [1] * len(options)
# maps an option value to -> a mapping from subvalue name to -> submutagen
self.submutagens: Dict[Any, Dict[str, Union[Mutagen, Option]]] = submutagens
if current_value not in options:
raise Exception("Current value must be option in list: " + repr(current_value) + " not in " + repr(options))
self.current_value = current_value
def __repr__(self):
out: str = self.name + ": " + repr(self.current_value)
if self.submutagens is None or self.value not in self.submutagens:
return out
out += "\n"
subs = self.submutagens[self.value]
i = 0
for sub in subs:
out += repr(subs[sub]) + ("\t" if i % 2 == 0 else "\n")
i+=1
return out
def get_subvalue(self, subvalue_name):
return self.get_submutagen(subvalue_name).value
def get_submutagen(self, subvalue_name):
if self.submutagens is None:
raise Exception("No submutagens on option: " + repr(self.name) + " " + repr(self))
if self.value not in self.submutagens:
print(self.name, "does not have the submutagen", subvalue_name, "does not have any submutagens")
if subvalue_name not in self.submutagens[self.value]:
raise Exception(
self.name + " does not have the submutagen " + subvalue_name + " for value " + repr(self.value))
return self.submutagens[self.value][subvalue_name]
def get_submutagens(self):
if self.submutagens is None:
return []
try:
if self.value not in self.submutagens:
return []
except Exception as e:
print("failed to get submutagens for val",self.value,"subs:",self.submutagens)
raise e
return self.submutagens[self.value].values()
def get_current_value(self):
return self.current_value
def mutate(self) -> MutationReport:
mutation_report = MutationReport()
my_weighting = self.probability_weightings[self.options.index(self())]
my_relative_weighting = my_weighting / sum(self.probability_weightings)
normalised_weighting = len(self.options)*my_relative_weighting
effective_mutation_chance = self.mutation_chance * 1.0/ normalised_weighting
"""
if the probability weightings of an option are not equal, then the mutation rates
should be adjusted such that: if the current option value is weighted less - the option
is more likely to change, and if the current option value is highly weighted - the option
should be less likely to change
"""
if random.random() < effective_mutation_chance:
if len(self.options) < 2:
raise Exception("too few options to mutate")
new_value = random.choices(self.options, weights=self.probability_weightings)[0]
while new_value == self():
new_value = random.choices(self.options, weights=self.probability_weightings)[0]
mutation_report += self.name + " changed from " + repr(self.current_value) + " to " + repr(new_value)
self.current_value = new_value
return mutation_report + self.mutate_sub_mutagens()
def mutate_sub_mutagens(self) -> MutationReport:
mutation_report = MutationReport()
for sub in self.get_submutagens():
mutation_report += sub.mutate()
return mutation_report
def set_value(self, value):
if value not in self.options:
raise InvalidOptionException("trying to set the value of the " + self.name + " mutagen to "
+ repr(value) + " which is not in the options: " + repr(self.options))
self.current_value = value
def set_sub_value(self, submutagen_name, value):
self.get_submutagen(submutagen_name).set_value(value)
def interpolate(self, other: Mutagen):
return Option(self.name, *self.options,
current_value=random.choice([self.current_value, other.get_current_value()]),
submutagens=interpolate_submutagens(self, other))
class InvalidOptionException(Exception):
pass
def interpolate_submutagens(mutagen_a: Option, mutagen_b: Option):
subs = {}
if mutagen_a.submutagens is None:
return subs
for val in mutagen_a.submutagens.keys():
subs[val] = {}
for name in mutagen_a.submutagens[val].keys():
subs[val][name] = MutagenFile.interpolate(mutagen_a.submutagens[val][name],
mutagen_b.submutagens[val][name])
return subs
| 37.417219 | 120 | 0.641239 | 683 | 5,650 | 5.155198 | 0.18448 | 0.064754 | 0.031809 | 0.015905 | 0.182335 | 0.105368 | 0.091167 | 0.061914 | 0.061914 | 0.036921 | 0 | 0.002906 | 0.269204 | 5,650 | 150 | 121 | 37.666667 | 0.849843 | 0.022655 | 0 | 0.161616 | 0 | 0 | 0.065335 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.121212 | false | 0.020202 | 0.050505 | 0.030303 | 0.333333 | 0.020202 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f69a6a8b42d76e0e9d08c5bd6c7c15447c9d0847 | 1,351 | py | Python | solutions/0169-majority-element/majority-element.py | iFun/Project-G | d33b3b3c7bcee64f93dc2539fd9955a27f321d96 | [
"MIT"
] | null | null | null | solutions/0169-majority-element/majority-element.py | iFun/Project-G | d33b3b3c7bcee64f93dc2539fd9955a27f321d96 | [
"MIT"
] | null | null | null | solutions/0169-majority-element/majority-element.py | iFun/Project-G | d33b3b3c7bcee64f93dc2539fd9955a27f321d96 | [
"MIT"
] | null | null | null | # Given an array of size n, find the majority element. The majority element is the element that appears more than ⌊ n/2 ⌋ times.
#
# You may assume that the array is non-empty and the majority element always exist in the array.
#
# Example 1:
#
#
# Input: [3,2,3]
# Output: 3
#
# Example 2:
#
#
# Input: [2,2,1,1,1,2,2]
# Output: 2
#
#
#
# @lc app=leetcode id=169 lang=python3
#
# [169] Majority Element
#
# https://leetcode.com/problems/majority-element/description/
#
# algorithms
# Easy (53.44%)
# Likes: 1820
# Dislikes: 153
# Total Accepted: 409.6K
# Total Submissions: 766.5K
# Testcase Example: '[3,2,3]'
#
# Given an array of size n, find the majority element. The majority element is
# the element that appears more than ⌊ n/2 ⌋ times.
#
# You may assume that the array is non-empty and the majority element always
# exist in the array.
#
# Example 1:
#
#
# Input: [3,2,3]
# Output: 3
#
# Example 2:
#
#
# Input: [2,2,1,1,1,2,2]
# Output: 2
#
#
# Check Notes for vote algo
class Solution:
def majorityElement(self, nums: List[int]) -> int:
count = 0
result = nums[0]
for num in nums:
if count == 0:
result = num
count = 1
elif num == result:
count += 1
else:
count -= 1
return result
| 18.506849 | 129 | 0.592894 | 204 | 1,351 | 3.946078 | 0.397059 | 0.149068 | 0.134161 | 0.034783 | 0.559006 | 0.559006 | 0.559006 | 0.559006 | 0.559006 | 0.559006 | 0 | 0.067358 | 0.285714 | 1,351 | 72 | 130 | 18.763889 | 0.762694 | 0.65433 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0 | 0 | 0.230769 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f69d50b79bb2cf94bbd6f6e08bfe1cc9753de50a | 3,929 | py | Python | tests/st/ops/gpu/test_dropout_nd.py | httpsgithu/mindspore | c29d6bb764e233b427319cb89ba79e420f1e2c64 | [
"Apache-2.0"
] | 1 | 2022-02-23T09:13:43.000Z | 2022-02-23T09:13:43.000Z | tests/st/ops/gpu/test_dropout_nd.py | 949144093/mindspore | c29d6bb764e233b427319cb89ba79e420f1e2c64 | [
"Apache-2.0"
] | null | null | null | tests/st/ops/gpu/test_dropout_nd.py | 949144093/mindspore | c29d6bb764e233b427319cb89ba79e420f1e2c64 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021-2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore.nn as nn
from mindspore import Tensor
import mindspore.context as context
from mindspore.ops import operations as P
def check_dropout_nd_by_keep_prob(input_x, output, output_mask, keep_prob):
"""
Feature: check mindspore Dropout2D or Dropout3D's output and mask.
Description: output shape, mask shap and keep_pro will be checked.
Expectation: match to mindspore Dropout2D or Dropout3D.
"""
# Check input, output, mask all have same shape
assert input_x.shape == output.shape == output_mask.shape
data_type = input_x.dtype
error = 1e-6
if data_type == np.float16:
error = 1e-3
data_shape = input_x.shape
channels = data_shape[0] * data_shape[1]
features = 1
if len(input_x.shape) == 4:
# HW
features = features * data_shape[-2] * data_shape[-1]
else:
# DHW
features = features * data_shape[-3] * data_shape[-2] * data_shape[-1]
if keep_prob == 0.0:
input_x_by_keep_prob = input_x.astype(data_type).reshape(channels, features)
else:
input_x_by_keep_prob = (input_x / keep_prob).astype(data_type).reshape(channels, features)
output_reshape = output.reshape(channels, features)
mask_reshape = output_mask.reshape(channels, features)
# Check each channel is entirely True or False and output match to input_x
for channel in range(channels):
if np.all(output_reshape[channel] == 0):
assert int(np.all(mask_reshape[channel])) == 0
else:
assert np.all(mask_reshape[channel])
np.allclose(input_x_by_keep_prob[channel], output_reshape[channel], error, error)
class Dropout3DNet(nn.Cell):
def __init__(self, keep_prob):
super(Dropout3DNet, self).__init__()
self.drop3d = P.Dropout3D(keep_prob)
def construct(self, x):
return self.drop3d(x)
class Dropout2DNet(nn.Cell):
def __init__(self, keep_prob):
super(Dropout2DNet, self).__init__()
self.drop2d = P.Dropout2D(keep_prob)
def construct(self, x):
return self.drop2d(x)
@pytest.mark.level0
@pytest.mark.env_onecard
@pytest.mark.platform_x86_gpu_training
@pytest.mark.parametrize("keep_prob", [0.0, 0.4, 1.0])
@pytest.mark.parametrize("data_shape", [(32, 16, 4, 5), (32, 16, 2, 5, 4)])
@pytest.mark.parametrize("data_type", [np.int8, np.int16, np.int32, np.int64, np.float16, np.float32, np.float64])
def test_dropout_nd(data_shape, data_type, keep_prob):
"""
Feature: Test Dropout2D and Dropout3D.
Description: The input shape is 4d or 5d.
Expectation: check it by function check_dropout_nd_by_keep_prob.
"""
input_data = np.ones(data_shape).astype(data_type)
if len(input_data.shape) == 4:
dropout_nd = Dropout2DNet(keep_prob)
else:
dropout_nd = Dropout3DNet(keep_prob)
output, mask = dropout_nd(Tensor(input_data))
context.set_context(mode=context.GRAPH_MODE)
check_dropout_nd_by_keep_prob(input_data, output.asnumpy(), mask.asnumpy(), keep_prob)
context.set_context(mode=context.PYNATIVE_MODE)
output, mask = dropout_nd(Tensor(input_data))
check_dropout_nd_by_keep_prob(input_data, output.asnumpy(), mask.asnumpy(), keep_prob)
| 39.29 | 114 | 0.694833 | 565 | 3,929 | 4.624779 | 0.304425 | 0.061232 | 0.026789 | 0.034443 | 0.255262 | 0.209721 | 0.166093 | 0.112132 | 0.049751 | 0.049751 | 0 | 0.027162 | 0.18478 | 3,929 | 99 | 115 | 39.686869 | 0.788636 | 0.281497 | 0 | 0.196721 | 0 | 0 | 0.010149 | 0 | 0 | 0 | 0 | 0 | 0.04918 | 1 | 0.098361 | false | 0 | 0.098361 | 0.032787 | 0.262295 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f69f531965456d98859d0057da7c70b698a88862 | 4,940 | py | Python | controllers/vessel/vessel_vpn_update.py | gbf-labs/rh-api | 317a812164ad8943ab638c06f61723cb928bfd12 | [
"Apache-2.0"
] | null | null | null | controllers/vessel/vessel_vpn_update.py | gbf-labs/rh-api | 317a812164ad8943ab638c06f61723cb928bfd12 | [
"Apache-2.0"
] | 6 | 2020-03-30T23:11:27.000Z | 2022-03-12T00:21:45.000Z | controllers/vessel/vessel_vpn_update.py | gbf-labs/rh-api | 317a812164ad8943ab638c06f61723cb928bfd12 | [
"Apache-2.0"
] | null | null | null | # pylint: disable=too-many-arguments, too-many-locals, too-many-statements, too-many-instance-attributes, too-many-branches, no-member, no-name-in-module, anomalous-backslash-in-string, too-many-function-args, no-self-use
"""VPN Update"""
import time
import urllib.request
from configparser import ConfigParser
from flask import request
from library.common import Common
from library.couch_database import CouchDatabase
from library.couch_queries import Queries
from library.postgresql_queries import PostgreSQL
from library.config_parser import config_section_parser
class VesselVPNUpdate(Common):
"""Class for VPNUpdate"""
# INITIALIZE
def __init__(self):
"""The Constructor for VPNUpdate class"""
self._couch_db = CouchDatabase()
self.couch_query = Queries()
self.postgres = PostgreSQL()
# INIT CONFIG
self.config = ConfigParser()
# CONFIG FILE
self.config.read("config/config.cfg")
self.vpn_db_build = config_section_parser(self.config, "VPNDB")['build']
super(VesselVPNUpdate, self).__init__()
if self.vpn_db_build.upper() == 'TRUE':
self.my_ip = config_section_parser(self.config, "IPS")['my']
self.my_protocol = config_section_parser(self.config, "IPS")['my_protocol']
self.user_vpn = config_section_parser(self.config, "IPS")['user_vpn']
self.user_protocol = config_section_parser(self.config, "IPS")['user_protocol']
self.vessel_vpn = config_section_parser(self.config, "IPS")['vessel_vpn']
self.vessel_protocol = config_section_parser(self.config, "IPS")['vessel_protocol']
self.vpn_token = '269c2c3706886d94aeefd6e7f7130ab08346590533d4c5b24ccaea9baa5211ec'
def vessel_vpn_update(self):
"""
This API is for Getting Data for VPN
---
tags:
- Vessel
produces:
- application/json
parameters:
- name: token
in: header
description: Token
required: true
type: string
- name: jobid
in: header
description: Job ID
required: true
type: string
- name: query
in: body
description: Updating VNP
required: true
schema:
id: Updating VNP
properties:
status:
type: string
message:
type: string
directory:
type: string
action:
type: string
ip:
type: string
responses:
500:
description: Error
200:
description: Role
"""
# GET JSON REQUEST
query_json = request.get_json(force=True)
# GET DATA
job_id = request.headers.get('jobid')
token = request.headers.get('token')
print("="*50, " vessel_vpn_update ", "="*50)
print("job_id: ", job_id)
print("token: ", token)
print("query_json: ", query_json)
print("="*50, " vessel_vpn_update ", "="*50)
vnp_server_ip = query_json['ip']
action = query_json['action']
message = query_json['message']
directory = query_json['directory']
status = query_json['status']
created_on = int(time.time())
filename = directory.split("/")[-1]
url = self.my_protocol + '://' + self.vessel_vpn + '/zip_vpn/' + filename
vpn_dir = '/home/admin/all_vpn/' + filename
urllib.request.urlretrieve(url, vpn_dir)
# VESSEL VPN
created_vpn = str(created_on) + "_" + filename
vvpn_dir = '/home/admin/all_vpn/VESSEL_VPN/' + created_vpn
urllib.request.urlretrieve(url, vvpn_dir)
# UPDATE VESSEL VPN JOB
conditions = []
conditions.append({
"col": "token",
"con": "=",
"val": str(token)
})
conditions.append({
"col": "vessel_vpn_job_id",
"con": "=",
"val": job_id
})
vv_job = {}
vv_job['message'] = message
vv_job['directory'] = directory
vv_job['vnp_server_ip'] = vnp_server_ip
vv_job['status'] = status
vv_job['action'] = action
vv_job['created_on'] = created_on
data = {}
if self.postgres.update('vessel_vpn_job', vv_job, conditions):
data['message'] = "Job successfully updated!"
data['status'] = "ok"
else:
data['message'] = "Invalid query!"
data['status'] = "Failed"
return self.return_data(data)
| 31.666667 | 222 | 0.548583 | 501 | 4,940 | 5.203593 | 0.269461 | 0.037975 | 0.058305 | 0.061757 | 0.157652 | 0.112773 | 0.094361 | 0 | 0 | 0 | 0 | 0.016646 | 0.34332 | 4,940 | 155 | 223 | 31.870968 | 0.786991 | 0.216397 | 0 | 0.108108 | 0 | 0 | 0.156269 | 0.028223 | 0 | 0 | 0 | 0 | 0 | 1 | 0.027027 | false | 0 | 0.121622 | 0 | 0.175676 | 0.067568 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f6a31c2011000a082736d9d587f09c391b1c8680 | 900 | py | Python | 5-python-dataviz-notebook/4-matplotlib_decorating_plots.py | reetasharma11/winter2020-code | 6875ca13e4f3d2a95ad36dca386a3a87e8ed08f7 | [
"MIT"
] | 1 | 2020-02-11T04:29:14.000Z | 2020-02-11T04:29:14.000Z | 5-python-dataviz-notebook/4-matplotlib_decorating_plots.py | reetasharma11/winter2020-code | 6875ca13e4f3d2a95ad36dca386a3a87e8ed08f7 | [
"MIT"
] | null | null | null | 5-python-dataviz-notebook/4-matplotlib_decorating_plots.py | reetasharma11/winter2020-code | 6875ca13e4f3d2a95ad36dca386a3a87e8ed08f7 | [
"MIT"
] | 5 | 2020-01-18T21:22:04.000Z | 2020-02-27T23:00:07.000Z | #!/usr/bin/env python3
import numpy as np
import matplotlib.pyplot as plt
import os
linear = np.arange(1, 20)
square = linear ** 2
log = np.log(linear)
random = np.random.randint(0, 100, 20)
fig, axes = plt.subplots(2, 1, figsize=(5, 5))
# In order to decorate plots we can set the title, the x/y label and the ticks
axes[0].plot(linear, label='Linear Plot Legend')
axes[0].set_title('Linear Plot')
axes[0].set_xlabel('Index')
axes[0].set_xticklabels(['one_x','two_x','three_x','four_x'])
axes[0].set_yticklabels(['one_y','two_y','three_y','four_y'])
# The function legend will display a legend box with the contents of the label param
axes[0].legend()
# Plotting a separate axe for comparison
axes[1].plot(square)
plt.tight_layout()
os.makedirs('plots/4-matplotlib_decorating_plots', exist_ok=True)
plt.savefig('plots/4-matplotlib_decorating_plots/decorated_plot.png', dpi=300)
plt.close()
| 27.272727 | 84 | 0.735556 | 158 | 900 | 4.06962 | 0.512658 | 0.046656 | 0.049767 | 0.080871 | 0.096423 | 0 | 0 | 0 | 0 | 0 | 0 | 0.033835 | 0.113333 | 900 | 32 | 85 | 28.125 | 0.77193 | 0.244444 | 0 | 0 | 0 | 0 | 0.25 | 0.131657 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.157895 | 0 | 0.157895 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f6a5f9427bcd35cf17e213bb60dc81bb58e7cdee | 783 | py | Python | forecast/util/univariate_forecast_item.py | jobine/smartAI-plugin | b19709315eccf553518fceed1b369be709b113ce | [
"MIT"
] | null | null | null | forecast/util/univariate_forecast_item.py | jobine/smartAI-plugin | b19709315eccf553518fceed1b369be709b113ce | [
"MIT"
] | null | null | null | forecast/util/univariate_forecast_item.py | jobine/smartAI-plugin | b19709315eccf553518fceed1b369be709b113ce | [
"MIT"
] | null | null | null | from common.util.constant import TIMESTAMP
FORECAST_VALUE = 'forecastValue'
CONFIDENCE = 'confidence'
UPPER_BOUNDARY = 'upperBoundary'
LOWER_BOUNDARY = 'lowerBoundary'
class UnivariateForecastItem:
def __init__(self, forecast_value, lower_boundary, upper_boundary, confidence, timestamp):
self.forecast_value = float(forecast_value)
self.confidence = float(confidence)
self.upper_boundary = float(upper_boundary)
self.lower_boundary = float(lower_boundary)
self.timestamp = timestamp.strftime('%Y-%m-%d %H:%M:%S')
def to_dict(self):
return {FORECAST_VALUE: self.forecast_value, CONFIDENCE: self.confidence,
UPPER_BOUNDARY: self.upper_boundary, LOWER_BOUNDARY: self.lower_boundary, TIMESTAMP: self.timestamp}
| 41.210526 | 116 | 0.739464 | 88 | 783 | 6.318182 | 0.340909 | 0.140288 | 0.091727 | 0.089928 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.164751 | 783 | 18 | 117 | 43.5 | 0.850153 | 0 | 0 | 0 | 0 | 0 | 0.084291 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.133333 | false | 0 | 0.066667 | 0.066667 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f6a6f96660b59865f95a5c11e6f5b37b62d8c6b8 | 19,911 | py | Python | glycan_profiling/composition_distribution_model/glycome_network_smoothing.py | mstim/glycresoft | 1d305c42c7e6cba60326d8246e4a485596a53513 | [
"Apache-2.0"
] | 4 | 2019-04-26T15:47:57.000Z | 2021-04-20T22:53:58.000Z | glycan_profiling/composition_distribution_model/glycome_network_smoothing.py | mstim/glycresoft | 1d305c42c7e6cba60326d8246e4a485596a53513 | [
"Apache-2.0"
] | 8 | 2017-11-22T19:20:20.000Z | 2022-02-14T01:49:58.000Z | glycan_profiling/composition_distribution_model/glycome_network_smoothing.py | mstim/glycresoft | 1d305c42c7e6cba60326d8246e4a485596a53513 | [
"Apache-2.0"
] | 3 | 2017-11-21T18:05:28.000Z | 2021-09-23T18:38:33.000Z | import numpy as np
from glypy import GlycanComposition
from glycan_profiling.database.composition_network import (
CompositionGraphNode, NeighborhoodWalker)
from glycan_profiling.task import log_handle
from .constants import (
DEFAULT_LAPLACIAN_REGULARIZATION,
NORMALIZATION,
DEFAULT_RHO,
RESET_THRESHOLD_VALUE)
from .laplacian_smoothing import LaplacianSmoothingModel, ProportionMatrixNormalization
from .graph import (
BlockLaplacian,
assign_network,
network_indices,
weighted_laplacian_matrix)
from .grid_search import (
NetworkReduction,
NetworkTrimmingSearchSolution,
ThresholdSelectionGridSearch)
from .observation import (
GlycanCompositionSolutionRecord,
VariableObservationAggregation)
def _has_glycan_composition(x):
try:
gc = x.glycan_composition
return gc is not None
except AttributeError:
return False
class GlycomeModel(LaplacianSmoothingModel):
"""An implementation of the Glycan Network Smoothing by Laplacian Regularization.
Attributes
----------
block_L: :class:`~.BlockLaplacian`
The block-oriented version of the Weighted Laplacian matrix of :attr:`network`
network: :class:`~.CompositionGraph`
A graph of glycan compositions that has been annotated by their observation confidence.
This network may be pruned during the course of model fitting. The original network is
always available under :attr:`_network`.
observed_compositions: list of :class:`~.GlycanCompositionSolutionRecord`
The observed and scored glycan compositions. These may not be unique, and will be
summarized prior to being projected onto the network by :attr:`observation_aggregator`.
This list may be truncated during the course of model fitting. The original list is always
available under :attr:`_observed_compositions`.
threshold: float
The current threshold applied to the observed score. This value may change during model
fitting.
belongingness_matrix: np.ndarray[float, ndim=2]
A C by N matrix where C is the number of glycan compositions in the network and N is the
number of neighborhoods over the network, with the value at [i, j] corresponding to how much
C_i belongs to N_j. This value is not normalized, see :attr:`normalized_belongingness_matrix`.
normalized_belongingness_matrix: np.ndarray[float, ndim=2]
A normalized version of :attr:`belongingness_matrix`, which follows the normalization paradigm
given by :attr:`_belongingness_normalization`
A0: :class:`np.ndarray[float, ndim=2]`
The subset of :attr:`normalized_belongingness_matrix` corresponding to :attr:`observed_compositions`
Am: :class:`np.ndarray[float, ndim=2]`
The subset of :attr:`normalized_belongingness_matrix` corresponding to nodes in :attr:`network` that
are not in :attr:`observed_compositions`
S0: :class:`np.ndarray[float]`
The summarized scores of the observed nodes in :attr:`network`
C0: list of :class:`CompositionGraphNode`
The observed nodes in :attr:`network`
obs_ix: :class:`np.ndarray[int]`
The indices into :attr:`network` that were observed
miss_ix: :class:`np.ndarray[int]`
The indices into :attr:`network` that were not observed
summarized_state: :class:`~.ObservationWeightState`
A helper intermediary which holds the transformation from :attr:`observed_compositions`
to :attr:`S0`, :attr:`variance_matrix` and :attr:`inverse_variance_matrix`
variance_matrix: :class:`np.ndarray[float, ndim=2]`
The variance of the observed scores, as estimated by :attr:`observation_aggregator`
from :attr:`observed_compositions`.
inverse_variance_matrix: :class:`np.ndarray[float, ndim=2]`
The inverse of :attr:`variance_matrix`, computed separately for efficiency.
"""
def __init__(self, observed_compositions, network, belongingness_matrix=None,
regularize=DEFAULT_LAPLACIAN_REGULARIZATION,
belongingness_normalization=NORMALIZATION,
observation_aggregator=VariableObservationAggregation):
self.observation_aggregator = observation_aggregator
observed_compositions = [
o for o in observed_compositions if _has_glycan_composition(o) and o.score > 0]
self._observed_compositions = observed_compositions
self._configure_with_network(network)
if len(self.miss_ix) == 0:
self._network.add_node(CompositionGraphNode(GlycanComposition(), -1), reindex=True)
self._configure_with_network(self._network)
self.block_L = BlockLaplacian(self.network, regularize=regularize)
self.threshold = self.block_L.threshold
# Initialize Names
self.normalized_belongingness_matrix = None
self.A0 = None
self._belongingness_normalization = None
self.S0 = np.array([])
self.C0 = []
self.variance_matrix = None
self.inverse_variance_matrix = None
# Expensive Step
if belongingness_matrix is None:
self.belongingness_matrix = self.build_belongingness_matrix()
else:
self.belongingness_matrix = np.array(belongingness_matrix)
# Normalize and populate
self.normalize_belongingness(belongingness_normalization)
self._populate(self._observed_compositions)
def _configure_with_network(self, network):
self._network = network
self.network = assign_network(network.clone(), self._observed_compositions)
self.neighborhood_walker = NeighborhoodWalker(self.network)
self.neighborhood_names = self.neighborhood_walker.neighborhood_names()
self.node_names = [str(node) for node in self._network]
self.obs_ix, self.miss_ix = network_indices(self.network)
def __reduce__(self):
return self.__class__, (
self._observed_compositions, self._network, self.belongingness_matrix,
self.block_L.regularize, self._belongingness_normalization,
self.observation_aggregator)
def _populate(self, observations):
var_agg = self.observation_aggregator(self._network)
var_agg.collect(observations)
aggregated_observations, summarized_state = var_agg.build_records()
self.network = assign_network(self._network.clone(), aggregated_observations)
self.obs_ix, self.miss_ix = network_indices(self.network)
self.A0 = self.normalized_belongingness_matrix[self.obs_ix, :]
self.Am = self.normalized_belongingness_matrix[self.miss_ix, :]
self.S0 = np.array([g.score for g in self.network[self.obs_ix]])
self.C0 = ([g for g in self.network[self.obs_ix]])
self.summarized_state = summarized_state
self.variance_matrix = np.diag(summarized_state.variance_matrix[self.obs_ix, self.obs_ix])
self.inverse_variance_matrix = np.diag(summarized_state.inverse_variance_matrix[self.obs_ix, self.obs_ix])
def set_threshold(self, threshold):
accepted = [
g for g in self._observed_compositions if g.score > threshold]
if len(accepted) == 0:
raise ValueError("Threshold %f produces an empty observed set" % (threshold,))
self._populate(accepted)
self.block_L = BlockLaplacian(self.network, threshold=threshold, regularize=self.block_L.regularize)
self.threshold = self.block_L.threshold
def reset(self):
self.set_threshold(RESET_THRESHOLD_VALUE)
def normalize_belongingness(self, method=NORMALIZATION):
self.normalized_belongingness_matrix = ProportionMatrixNormalization.normalize(
self.belongingness_matrix, method)
self._belongingness_normalization = method
self.A0 = self.normalized_belongingness_matrix[self.obs_ix, :]
def apply_belongingness_patch(self):
updated_belongingness = self.get_belongingness_patch()
self.normalized_belongingness_matrix = updated_belongingness
self.A0 = self.normalized_belongingness_matrix[self.obs_ix, :]
def remove_belongingness_patch(self):
self.normalized_belongingness_matrix = ProportionMatrixNormalization.normalize(
self.belongingness_matrix, self._belongingness_normalization)
self.A0 = self.normalized_belongingness_matrix[self.obs_ix, :]
def sample_tau(self, rho, lmda):
sigma_est = np.std(self.S0)
mu_tau = self.estimate_tau_from_S0(rho, lmda)
return np.random.multivariate_normal(mu_tau, np.eye(len(mu_tau)).dot(sigma_est ** 2))
def sample_phi_given_tau(self, tau, lmda):
return np.random.multivariate_normal(self.A0.dot(tau), (1. / lmda) * self.L_oo_inv)
def find_optimal_lambda(self, rho, lambda_max=1, step=0.01, threshold=0.0001, fit_tau=True,
drop_missing=True, renormalize_belongingness=NORMALIZATION):
obs = []
missed = []
network = self.network.clone()
for node in network:
if node.score < threshold:
missed.append(node)
node.marked = True
else:
obs.append(node.score)
lambda_values = np.arange(0.01, lambda_max, step)
press = []
if drop_missing:
for node in missed:
network.remove_node(node, limit=5)
# The network passed into LaplacianSmoothingModel will have its indices changed,
# and will not match the ordering of the belongingness matrix, so make sure the
# observed indices are aligned.
obs_ix, _miss_ix = network_indices(network)
wpl = weighted_laplacian_matrix(network)
lum = LaplacianSmoothingModel(
network, self.normalized_belongingness_matrix[obs_ix, :], threshold,
neighborhood_walker=self.neighborhood_walker,
belongingness_normalization=renormalize_belongingness,
variance_matrix=self.variance_matrix)
ident = np.eye(wpl.shape[0])
for lambd in lambda_values:
if fit_tau:
tau = lum.estimate_tau_from_S0(rho, lambd)
else:
tau = np.zeros(self.A0.shape[1])
T = lum.optimize_observed_scores(lambd, lum.A0.dot(tau))
A = ident + lambd * wpl
H = np.linalg.inv(A)
press_value = sum(
((obs - T) / (1 - (np.diag(H) - np.finfo(float).eps))) ** 2) / len(obs)
press.append(press_value)
return lambda_values, np.array(press)
def find_threshold_and_lambda(self, rho, lambda_max=1., lambda_step=0.02, threshold_start=0.,
threshold_step=0.2, fit_tau=True, drop_missing=True,
renormalize_belongingness=NORMALIZATION):
r'''Iterate over score thresholds and smoothing factors (lambda), sampling points
from the parameter grid and computing the PRESS residual at each point.
This produces a :class:`NetworkReduction` data structure recording the results for
later local maximum detection.
Parameters
----------
rho: float
The scale of the variance of the observed score
lambda_max: float
The maximum value of lambda to consider on the grid
lambda_step: float
The size of the change in lambda at each iteration
threshold_start: float
The minimum observed score threshold to start the grid search at
threshold_step: float
The size of the change in the observed score threshold at each iteration
fit_tau: bool
Whether or not to estimate :math:`\tau` for each iteration when computing
the PRESS
drop_missing: bool
Whether or not to remove nodes from the graph which are not observed above
the threshold, restructuring the graph, which in turn changes the Laplacian.
renormalize_belongingness: str
A string constant which names the belongingness normalization technique to
use.
Returns
-------
:class:`NetworkReduction`:
The recorded grid of sampled points and snapshots of the model at each point
'''
solutions = NetworkReduction()
limit = max(self.S0)
start = max(min(self.S0) - 1e-3, threshold_start)
current_network = self.network.clone()
thresholds = np.arange(start, limit, threshold_step)
last_solution = None
last_raw_observations = None
last_aggregate = None
for i_threshold, threshold in enumerate(thresholds):
if i_threshold % 10 == 0:
log_handle.log("... Threshold = %r (%0.2f%%)" % (
threshold, (100.0 * i_threshold / len(thresholds))))
# Aggregate the raw observations into averaged, variance reduced records
# and annotate the network with these new scores
raw_observations = [c for c in self._observed_compositions if c.score > threshold]
# cache on the explicit raw observations used because the step size may be smaller than
# the next highest difference, and aggregating observations can be expensive. There is
# no solution to the general problem as it calls for inverting a potentially large matrix
# to only be used in this loop.
if raw_observations == last_raw_observations:
observations, summarized_state, obs_ix = last_aggregate # pylint: disable=unpacking-non-sequence
else:
agg = self.observation_aggregator(self.network)
agg.collect(raw_observations)
observations, summarized_state = agg.build_records()
obs_ix = agg.observed_indices()
last_aggregate = (observations, summarized_state, obs_ix)
last_raw_observations = raw_observations
# Extract pre-calculated variance matrices
variance_matrix = summarized_state.variance_matrix
inverse_variance_matrix = summarized_state.inverse_variance_matrix
variance_matrix = np.diag(variance_matrix[obs_ix, obs_ix])
inverse_variance_matrix = np.diag(inverse_variance_matrix[obs_ix, obs_ix])
# clear the scores from the network
current_network = current_network.clone()
for node in current_network:
node.score = 0
node.internal_score = 0
# assign aggregated scores to the network
network = assign_network(current_network, observations)
# Filter the network, marking nodes for removal and recording observed
# nodes for future use.
obs = []
missed = []
for i, node in enumerate(network):
if node.score < threshold:
missed.append(node)
node.marked = True
else:
obs.append(node.score)
if len(obs) == 0:
break
obs = np.array(obs)
press = []
if drop_missing:
# drop nodes whose score does not exceed the threshold
for node in missed:
network.remove_node(node, limit=5)
if last_solution is not None:
# If after pruning the network, no new nodes have been removed,
# the optimal solution won't have changed from previous iteration
# so just reuse the solution
if last_solution.network == network:
current_solution = last_solution.copy()
current_solution.threshold = threshold
solutions[threshold] = current_solution
last_solution = current_solution
current_network = network
continue
wpl = weighted_laplacian_matrix(network)
ident = np.eye(wpl.shape[0])
# The network passed into LaplacianSmoothingModel will have its indices changed,
# and will not match the ordering of the belongingness matrix, so make sure the
# observed indices are aligned.
lum = LaplacianSmoothingModel(
network, self.normalized_belongingness_matrix[obs_ix, :], threshold,
neighborhood_walker=self.neighborhood_walker,
belongingness_normalization=renormalize_belongingness,
variance_matrix=variance_matrix,
inverse_variance_matrix=inverse_variance_matrix)
updates = []
taus = []
lambda_values = np.arange(0.01, lambda_max, lambda_step)
for lambd in lambda_values:
if fit_tau:
tau = lum.estimate_tau_from_S0(rho, lambd)
else:
tau = np.zeros(self.A0.shape[1])
T = lum.optimize_observed_scores(lambd, lum.A0.dot(tau))
A = ident + lambd * wpl
H = np.linalg.inv(A)
diag_H = np.diag(H)
if len(diag_H) != len(T):
diag_H = diag_H[lum.obs_ix]
assert len(diag_H) == len(T)
press_value = sum(
((obs - T) / (1 - (diag_H - np.finfo(float).eps))) ** 2) / len(obs)
press.append(press_value)
updates.append(T)
taus.append(tau)
current_solution = NetworkTrimmingSearchSolution(
threshold, lambda_values, np.array(press), network, np.array(obs),
updates, taus, lum)
solutions[threshold] = current_solution
last_solution = current_solution
current_network = network
return solutions
def smooth_network(network, observed_compositions, threshold_step=0.5, apex_threshold=0.95,
belongingness_matrix=None, rho=DEFAULT_RHO, lambda_max=1,
include_missing=False, lmbda=None, model_state=None,
observation_aggregator=VariableObservationAggregation,
belongingness_normalization=NORMALIZATION, annotate_network=True):
convert = GlycanCompositionSolutionRecord.from_chromatogram
observed_compositions = [
convert(o) for o in observed_compositions if _has_glycan_composition(o)]
model = GlycomeModel(
observed_compositions, network,
belongingness_matrix=belongingness_matrix,
observation_aggregator=observation_aggregator,
belongingness_normalization=belongingness_normalization)
log_handle.log("... Begin Model Fitting")
if model_state is None:
reduction = model.find_threshold_and_lambda(
rho=rho, threshold_step=threshold_step,
lambda_max=lambda_max)
if len(reduction) == 0:
log_handle.log("... No Network Reduction Found")
return None, None, None
search = ThresholdSelectionGridSearch(model, reduction, apex_threshold)
params = search.average_solution(lmbda=lmbda)
if params is None:
log_handle.log("... No Acceptable Solution. Could not fit model.")
return None, None, None
else:
search = ThresholdSelectionGridSearch(model, None, apex_threshold)
model_state.reindex(model)
params = model_state
if lmbda is not None:
params.lmbda = lmbda
if annotate_network:
log_handle.log("... Projecting Solution Onto Network")
annotated_network = search.annotate_network(params, include_missing=include_missing)
else:
annotated_network = None
return annotated_network, search, params
| 46.412587 | 114 | 0.657626 | 2,244 | 19,911 | 5.640374 | 0.175579 | 0.046536 | 0.034368 | 0.02868 | 0.357273 | 0.292328 | 0.240657 | 0.232994 | 0.205815 | 0.166153 | 0 | 0.006121 | 0.2697 | 19,911 | 428 | 115 | 46.521028 | 0.864315 | 0.266134 | 0 | 0.280576 | 0 | 0 | 0.014686 | 0 | 0 | 0 | 0 | 0 | 0.003597 | 1 | 0.053957 | false | 0 | 0.032374 | 0.007194 | 0.125899 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f6b06e24315b971b76d107d8572119071dcbbe2e | 7,230 | py | Python | heredity/heredity.py | immortal-zeus/CS50-AI_Projects | cac627ea75650960ddc22f9dd60777e3ee64eacf | [
"BSD-3-Clause"
] | 1 | 2021-06-22T06:31:59.000Z | 2021-06-22T06:31:59.000Z | heredity/heredity.py | immortal-zeus/CS50-AI_Projects | cac627ea75650960ddc22f9dd60777e3ee64eacf | [
"BSD-3-Clause"
] | null | null | null | heredity/heredity.py | immortal-zeus/CS50-AI_Projects | cac627ea75650960ddc22f9dd60777e3ee64eacf | [
"BSD-3-Clause"
] | null | null | null | import csv
import itertools
import sys
PROBS = {
# Unconditional probabilities for having gene
"gene": {
2: 0.01,
1: 0.03,
0: 0.96
},
"trait": {
# Probability of trait given two copies of gene
2: {
True: 0.65,
False: 0.35
},
# Probability of trait given one copy of gene
1: {
True: 0.56,
False: 0.44
},
# Probability of trait given no gene
0: {
True: 0.01,
False: 0.99
}
},
# Mutation probability
"mutation": 0.01
}
def main():
# Check for proper usage
if len(sys.argv) != 2:
sys.exit("Usage: python heredity.py data.csv")
people = load_data(sys.argv[1])
# Keep track of gene and trait probabilities for each person
probabilities = {
person: {
"gene": {
2: 0,
1: 0,
0: 0
},
"trait": {
True: 0,
False: 0
}
}
for person in people
}
# Loop over all sets of people who might have the trait
names = set(people)
for have_trait in powerset(names):
# Check if current set of people violates known information
fails_evidence = any(
(people[person]["trait"] is not None and
people[person]["trait"] != (person in have_trait))
for person in names
)
if fails_evidence:
continue
# Loop over all sets of people who might have the gene
for one_gene in powerset(names):
for two_genes in powerset(names - one_gene):
# Update probabilities with new joint probability
p = joint_probability(people, one_gene, two_genes, have_trait)
update(probabilities, one_gene, two_genes, have_trait, p)
# Ensure probabilities sum to 1
normalize(probabilities)
# Print results
for person in people:
print(f"{person}:")
for field in probabilities[person]:
print(f" {field.capitalize()}:")
for value in probabilities[person][field]:
p = probabilities[person][field][value]
print(f" {value}: {p:.4f}")
def load_data(filename):
"""
Load gene and trait data from a file into a dictionary.
File assumed to be a CSV containing fields name, mother, father, trait.
mother, father must both be blank, or both be valid names in the CSV.
trait should be 0 or 1 if trait is known, blank otherwise.
"""
data = dict()
with open(filename) as f:
reader = csv.DictReader(f)
for row in reader:
name = row["name"]
data[name] = {
"name": name,
"mother": row["mother"] or None,
"father": row["father"] or None,
"trait": (True if row["trait"] == "1" else
False if row["trait"] == "0" else None)
}
return data
def powerset(s):
"""
Return a list of all possible subsets of set s.
"""
s = list(s)
return [
set(s) for s in itertools.chain.from_iterable(
itertools.combinations(s, r) for r in range(len(s) + 1)
)
]
def joint_probability(people, one_gene, two_genes, have_trait):
"""
Compute and return a joint probability.
The probability returned should be the probability that
* everyone in set `one_gene` has one copy of the gene, and
* everyone in set `two_genes` has two copies of the gene, and
* everyone not in `one_gene` or `two_gene` does not have the gene, and
* everyone in set `have_trait` has the trait, and
* everyone not in set` have_trait` does not have the trait.
"""
final_per=0
joint_prob_all=1
trait=None
for per in list(people.keys()):
mother = people[per]['mother']
father= people[per]['father']
prob_parent = {mother:0, father:0}
if mother == None and father == None:
if per in one_gene:
trait=1
final_per=PROBS["gene"][1]
elif per in two_genes:
trait=2
final_per=PROBS["gene"][2]
else:
trait=0
final_per=PROBS["gene"][0]
else:
for parent in list(prob_parent.keys()):
if parent in one_gene:
prob_parent[parent]=(1-PROBS["mutation"])*0.5
elif parent in two_genes:
prob_parent[parent]=1-PROBS["mutation"]
else:
prob_parent[parent]= PROBS["mutation"]
if per in one_gene:
trait=1
final_per=(prob_parent[mother]*(1-prob_parent[father]))+(prob_parent[father]*(1-prob_parent[mother]))
elif per in two_genes:
trait=2
final_per= prob_parent[mother]*prob_parent[father]
else:
trait=0
final_per= (1-prob_parent[mother])*(1-prob_parent[father])
if per in have_trait:
final_per*=PROBS["trait"][trait][True]
else:
final_per *= PROBS["trait"][trait][False]
joint_prob_all*=final_per
return joint_prob_all
def update(probabilities, one_gene, two_genes, have_trait, p):
"""
Add to `probabilities` a new joint probability `p`.
Each person should have their "gene" and "trait" distributions updated.
Which value for each distribution is updated depends on whether
the person is in `have_gene` and `have_trait`, respectively.
"""
for people in list(probabilities.keys()):
if people in one_gene:
probabilities[people]["gene"][1]+=p
if people in have_trait:
probabilities[people]["trait"][True]+=p
else:
probabilities[people]["trait"][False]+=p
elif people in two_genes:
probabilities[people]["gene"][2]+=p
if people in have_trait:
probabilities[people]["trait"][True]+=p
else:
probabilities[people]["trait"][False]+=p
else:
probabilities[people]["gene"][0]+=p
if people in have_trait:
probabilities[people]["trait"][True]+=p
else:
probabilities[people]["trait"][False]+=p
def normalize(probabilities):
"""
Update `probabilities` such that each probability distribution
is normalized (i.e., sums to 1, with relative proportions the same).
"""
for people in list(probabilities.keys()):
sum = 0
for val in probabilities[people]["gene"]:
sum +=probabilities[people]["gene"][val]
for val in probabilities[people]["gene"]:
probabilities[people]["gene"][val]/=sum
sum = 0
for val in probabilities[people]["trait"]:
sum += probabilities[people]["trait"][val]
for val in probabilities[people]["trait"]:
probabilities[people]["trait"][val]/=sum
if __name__ == "__main__":
main()
| 31.572052 | 117 | 0.547303 | 880 | 7,230 | 4.404545 | 0.184091 | 0.083333 | 0.06192 | 0.01548 | 0.2887 | 0.25774 | 0.193756 | 0.160733 | 0.160733 | 0.083849 | 0 | 0.016974 | 0.348133 | 7,230 | 228 | 118 | 31.710526 | 0.805432 | 0.226003 | 0 | 0.25 | 0 | 0 | 0.057735 | 0.003849 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0375 | false | 0 | 0.01875 | 0 | 0.075 | 0.01875 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f6b0f025ad48f88945b93b850eb2f1bf03a9f188 | 2,977 | py | Python | Lab5_7/Repositories/PickleRepo.py | alexnaiman/Fundamentals-Of-Programming---Lab-assignments | ef066e6036e20b9c686799f507f10e15e50e3285 | [
"MIT"
] | 4 | 2018-02-19T13:57:38.000Z | 2022-01-08T04:10:54.000Z | Lab5_7/Repositories/PickleRepo.py | alexnaiman/Fundamentals-Of-Programming---Lab-assignments | ef066e6036e20b9c686799f507f10e15e50e3285 | [
"MIT"
] | null | null | null | Lab5_7/Repositories/PickleRepo.py | alexnaiman/Fundamentals-Of-Programming---Lab-assignments | ef066e6036e20b9c686799f507f10e15e50e3285 | [
"MIT"
] | null | null | null | import pickle
from Repositories.BaseRepository import Repository
class PickleRepo(Repository):
'''
A generic class for a repository for a given class
'''
def __init__(self, fileName, name):
'''
The constructor of the Repository class
:param fileName: the location of the file we want to read from
:param name: the name of the repository
'''
super().__init__()
self.__fileName = fileName
self.__name = name
def readAllLines(self):
f = open(self.__fileName, "rb")
"""
You cannot unpickle an empty file
- EOFError means the file is empty
- Exception means no file, not accessible and so on...
- finally makes sure we close the input file, regardless of error
"""
try:
self._data = pickle.load(f)
except EOFError:
self._data = {}
except Exception as e:
raise e
finally:
f.close()
def writeAllToFile(self):
f = open(self.__fileName, "wb")
pickle.dump(self._data, f)
f.close()
def getItemById(self, itemId):
self.readAllLines()
return self.find(itemId)
def getAllLines(self):
'''
A function that returns all the lines from file
:return: a list of lists of form (*params) where params are the attributes of the given class
'''
self.readAllLines()
return self.getAll()
def createItem(self, item):
'''
Create a new item in the repository and adds it to the file as a new line
:param item: object - the item we want to add in the repository
:return: returns True if there wasn't any errors and we successfully added the new item
'''
self.readAllLines()
self.create(item)
self.writeAllToFile()
return True
def updateItemById(self, itemId, item):
'''
A function that updates an item from the repository by a given id
:param itemId: the id of the item we want to modify
:param item: the item with the new given properties
:return: returns True if there wasn't any errors and the item was updated with success
'''
self.readAllLines()
if self.getItemById(itemId) is False:
return False
self.update(item)
self.writeAllToFile()
return True
def deleteItemById(self, itemId):
'''
A functions that deletes an item by a given id
:param itemId: the item's id we want to delete
:return: True, if there wasn't any errors and the item was successfully deleted
'''
self.readAllLines()
self.delete(itemId)
self.writeAllToFile()
def __str__(self):
return Repository.__str__(self)
| 31.336842 | 102 | 0.575076 | 357 | 2,977 | 4.719888 | 0.336134 | 0.024926 | 0.018991 | 0.026706 | 0.189911 | 0.147181 | 0.105638 | 0.077151 | 0.077151 | 0.077151 | 0 | 0 | 0.354384 | 2,977 | 94 | 103 | 31.670213 | 0.876691 | 0.33994 | 0 | 0.266667 | 0 | 0 | 0.002843 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.2 | false | 0 | 0.044444 | 0.022222 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f6b3328c566ef126d74431b64f1f2183ec8ed890 | 9,565 | py | Python | src/tests/ftest/security/container_security_acl.py | sherintg/daos | a54e347938a0a7b2fe8771c982336f1d28654b1f | [
"BSD-2-Clause-Patent"
] | 2 | 2021-07-14T12:21:50.000Z | 2021-07-14T12:21:52.000Z | src/tests/ftest/security/container_security_acl.py | sherintg/daos | a54e347938a0a7b2fe8771c982336f1d28654b1f | [
"BSD-2-Clause-Patent"
] | null | null | null | src/tests/ftest/security/container_security_acl.py | sherintg/daos | a54e347938a0a7b2fe8771c982336f1d28654b1f | [
"BSD-2-Clause-Patent"
] | 1 | 2021-11-03T05:00:42.000Z | 2021-11-03T05:00:42.000Z | #!/usr/bin/python3
"""
(C) Copyright 2020-2021 Intel Corporation.
SPDX-License-Identifier: BSD-2-Clause-Patent
"""
import os
import security_test_base as secTestBase
from cont_security_test_base import ContSecurityTestBase
from pool_security_test_base import PoolSecurityTestBase
class DaosContainterSecurityTest(ContSecurityTestBase, PoolSecurityTestBase):
# pylint: disable=too-few-public-methods,too-many-ancestors
"""Test daos_container user acls.
:avocado: recursive
"""
def test_container_user_acl(self):
"""
Description:
DAOS-4838: Verify container user security with ACL.
DAOS-4390: Test daos_cont_set_owner
DAOS-4839: Verify container group user with ACL.
DAOS-4840: Verify container user and group access with
ACL grant/remove modification.
DAOS-4841: Verify container ACL works when servers
not sync with client compute hosts.
Test container 5 users enforcement order:
(defined on test.yaml)
OWNER: container owner assigned with the permissions.
user: container user assigned with the permissions.
user-group: container user-group assigned with the permissions.
GROUP: container group assigned with the permissions.
EVERYONE: everyone assigned with the permissions.
Test container user acl permissions:
w - set_container_attribute or data
r - get_container_attribute or data
T - set_container_property
t - get_container_property
a - get_container_acl_list
A - update_container_acl
o - set_container_owner
d - destroy_container
Steps:
(1)Setup
(2)Create pool and container with acl
(3)Verify container permissions rw, rw-attribute
(4)Verify container permissions tT, rw-property
(5)Verify container permissions aA, rw-acl
(6)Verify container permission o, set-owner
(7)Verify container permission d, delete
(8)Cleanup
:avocado: tags=all,full_regression,security,container_acl,
:avocado: tags=cont_user_sec,cont_group_sec,cont_sec
"""
#(1)Setup
self.log.info("(1)==>Setup container user acl test.")
cont_permission, expect_read, expect_write = self.params.get(
"perm_expect", "/run/container_acl/permissions/*")
new_test_user = self.params.get("new_user", "/run/container_acl/*")
new_test_group = self.params.get("new_group", "/run/container_acl/*")
attribute_name, attribute_value = self.params.get(
"attribute", "/run/container_acl/*")
property_name, property_value = self.params.get(
"property", "/run/container_acl/*")
secTestBase.add_del_user(
self.hostlist_clients, "useradd", new_test_user)
secTestBase.add_del_user(
self.hostlist_clients, "groupadd", new_test_group)
acl_file_name = os.path.join(
self.tmp, self.params.get(
"acl_file_name", "/run/container_acl/*", "cont_test_acl.txt"))
test_user = self.params.get(
"testuser", "/run/container_acl/daos_user/*")
test_user_type = secTestBase.get_user_type(test_user)
base_acl_entries = self.get_base_acl_entries(test_user)
if test_user == "user":
test_user = self.current_user
if test_user == "group":
test_user = self.current_group
self.log.info(
"==>(1.1)Start testing container acl on user: %s", test_user)
#(2)Create pool and container with acl
self.log.info("(2)==>Create a pool and a container with acl\n"
" base_acl_entries= %s\n", base_acl_entries)
self.pool_uuid = self.create_pool_with_dmg()
secTestBase.create_acl_file(acl_file_name, base_acl_entries)
self.container_uuid = self.create_container_with_daos(
self.pool, None, acl_file_name)
#(3)Verify container permissions rw, rw-attribute
permission_type = "attribute"
self.log.info("(3)==>Verify container permission %s", permission_type)
self.update_container_acl(
secTestBase.acl_entry(test_user_type, test_user, "rw"))
self.verify_cont_rw_attribute(
"write", "pass", attribute_name, attribute_value)
self.setup_container_acl_and_permission(
test_user_type, test_user, permission_type, cont_permission)
self.log.info(
"(3.1)Verify container_attribute: write, expect: %s", expect_write)
self.verify_cont_rw_attribute(
"write", expect_write, attribute_name, attribute_value)
self.log.info(
"(3.2)Verify container_attribute: read, expect: %s", expect_read)
self.verify_cont_rw_attribute("read", expect_read, attribute_name)
#(4)Verify container permissions tT rw-property
permission_type = "property"
self.log.info("(4)==>Verify container permission tT, rw-property")
self.log.info(
"(4.1)Update container-acl %s, %s, permission_type: %s with %s",
test_user_type, test_user, permission_type, cont_permission)
self.setup_container_acl_and_permission(
test_user_type, test_user, permission_type, cont_permission)
self.log.info(
"(4.2)Verify container_attribute: read, expect: %s", expect_read)
self.verify_cont_rw_property("read", expect_read)
self.log.info(
"(4.3)Verify container_attribute: write, expect: %s", expect_write)
self.verify_cont_rw_property(
"write", expect_write, property_name, property_value)
self.log.info(
"(4.4)Verify container_attribute: read, expect: %s", expect_read)
self.verify_cont_rw_property("read", expect_read)
#(5)Verify container permissions aA, rw-acl
permission_type = "acl"
self.log.info("(5)==>Verify container permission aA, rw-acl ")
self.log.info(
"(5.1)Update container-acl %s, %s, permission_type: %s with %s",
test_user_type, test_user, permission_type, cont_permission)
expect = "pass" #User who created the container has full acl access.
self.setup_container_acl_and_permission(
test_user_type, test_user, permission_type, cont_permission)
self.log.info("(5.2)Verify container_acl: write, expect: %s", expect)
self.verify_cont_rw_acl(
"write", expect, secTestBase.acl_entry(
test_user_type, test_user, cont_permission))
self.log.info("(5.3)Verify container_acl: read, expect: %s", expect)
self.verify_cont_rw_acl("read", expect)
#(6)Verify container permission o, set-owner
self.log.info("(6)==>Verify container permission o, set-owner")
permission_type = "ownership"
expect = "deny"
if "w" in cont_permission:
expect = "pass"
self.log.info(
"(6.1)Update container-set ownership %s, %s, permission_type:"
" %s with %s", test_user_type, test_user, permission_type,
cont_permission)
self.setup_container_acl_and_permission(
test_user_type, test_user, permission_type, cont_permission)
self.log.info("(6.2)Verify container_ownership: write, expect: %s",
expect)
self.verify_cont_set_owner(
expect, new_test_user+"@", new_test_group+"@")
#Verify container permission A acl-write after set container
# to a different owner.
if cont_permission == "w":
permission_type = "acl"
expect = "deny"
self.log.info("(6.3)Verify container_acl write after changed "
"ownership: expect: %s", expect)
self.verify_cont_rw_acl("write", expect,
secTestBase.acl_entry(
test_user_type, test_user,
cont_permission))
#(7)Verify container permission d, delete
self.log.info("(7)==>Verify cont-delete on container and pool"
" with/without d permission.")
permission_type = "delete"
c_permission = "rwaAtTod"
p_permission = "rctd"
expect = "pass"
if "r" not in cont_permission: #remove d from cont_permission
c_permission = "rwaAtTo"
if "w" not in cont_permission: #remove d from pool_permission
p_permission = "rct"
if cont_permission == "":
expect = "deny"
self.update_container_acl(secTestBase.acl_entry(test_user_type,
test_user,
c_permission))
self.update_pool_acl_entry(self.pool_uuid,
"update",
secTestBase.acl_entry("user",
"OWNER",
p_permission))
self.verify_cont_delete(expect)
#(8)Cleanup
secTestBase.add_del_user(
self.hostlist_clients, "userdel", new_test_user)
secTestBase.add_del_user(
self.hostlist_clients, "groupdel", new_test_group)
| 46.658537 | 79 | 0.612964 | 1,123 | 9,565 | 4.964381 | 0.15138 | 0.050224 | 0.039462 | 0.034439 | 0.450583 | 0.379193 | 0.35713 | 0.255247 | 0.249507 | 0.249507 | 0 | 0.011868 | 0.295243 | 9,565 | 204 | 80 | 46.887255 | 0.815161 | 0.225196 | 0 | 0.263158 | 0 | 0.015038 | 0.209582 | 0.008762 | 0 | 0 | 0 | 0 | 0 | 1 | 0.007519 | false | 0.030075 | 0.030075 | 0 | 0.045113 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f6b3c15f1c2351ca221f6a3291a1eb60723fa6bc | 1,205 | py | Python | get_links.py | jabbalaci/Bash-Utils | c6fb115834a221c4aaba8eaa37f650beea45ef29 | [
"MIT"
] | 73 | 2015-03-31T01:12:26.000Z | 2021-07-10T19:45:04.000Z | get_links.py | doc22940/Bash-Utils | c6fb115834a221c4aaba8eaa37f650beea45ef29 | [
"MIT"
] | 2 | 2017-01-06T17:17:42.000Z | 2017-08-23T18:35:55.000Z | get_links.py | doc22940/Bash-Utils | c6fb115834a221c4aaba8eaa37f650beea45ef29 | [
"MIT"
] | 27 | 2015-01-03T18:51:23.000Z | 2020-11-15T11:49:51.000Z | #!/usr/bin/env python3
"""
Extract all links from a web page
=================================
Author: Laszlo Szathmary, 2011 (jabba.laci@gmail.com)
Website: https://pythonadventures.wordpress.com/2011/03/10/extract-all-links-from-a-web-page/
GitHub: https://github.com/jabbalaci/Bash-Utils
Given a webpage, extract all links.
Usage:
------
./get_links.py <URL>
Last update: 2017-01-08 (yyyy-mm-dd)
"""
import sys
from pathlib import Path
from urllib.parse import urljoin
import requests
from bs4 import BeautifulSoup
user_agent = {'User-agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:50.0) Gecko/20100101 Firefox/50.0'}
def process(url):
r = requests.get(url, headers=user_agent)
soup = BeautifulSoup(r.text, "lxml")
for tag in soup.findAll('a', href=True):
tag['href'] = urljoin(url, tag['href'])
print(tag['href'])
def main():
if len(sys.argv) == 1:
print("Usage: {0} URL [URL]...".format(Path(sys.argv[0]).name))
sys.exit(1)
# else, if at least one parameter was passed
for url in sys.argv[1:]:
process(url)
#############################################################################
if __name__ == "__main__":
main()
| 24.1 | 99 | 0.6 | 169 | 1,205 | 4.207101 | 0.568047 | 0.042194 | 0.063291 | 0.053446 | 0.075949 | 0.075949 | 0.075949 | 0 | 0 | 0 | 0 | 0.048563 | 0.162656 | 1,205 | 49 | 100 | 24.591837 | 0.656095 | 0.365975 | 0 | 0 | 0 | 0.05 | 0.185841 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.25 | 0 | 0.35 | 0.1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f6b6c0f6b1a695df96a661e8da3cdd75b9963602 | 2,563 | py | Python | texas/views/AnnotationViewSet.py | HeglerTissot/TEXAS | fdcfc4e4042b9c6de49d50324937858d7c1a7c45 | [
"Apache-2.0"
] | null | null | null | texas/views/AnnotationViewSet.py | HeglerTissot/TEXAS | fdcfc4e4042b9c6de49d50324937858d7c1a7c45 | [
"Apache-2.0"
] | null | null | null | texas/views/AnnotationViewSet.py | HeglerTissot/TEXAS | fdcfc4e4042b9c6de49d50324937858d7c1a7c45 | [
"Apache-2.0"
] | 1 | 2021-02-11T15:54:35.000Z | 2021-02-11T15:54:35.000Z | #!/usr/bin/python
#-*- coding: utf-8 -*-
from texas.annotations.AnnotationSet import AnnotationSet
from texas.views.AnnotationView import AnnotationView
from texas.views.CharView import CharView
from texas.views.TokenView import TokenView
from texas.views.SpanView import SpanView
from texas.views.RelationView import RelationView
class AnnotationViewSet:
def __init__(self):
self._anns = {}
def add(self, pView:AnnotationView):
if not isinstance(pView, AnnotationView):
raise Exception("AnnotationViewSet 'pView' parameter class is required to be 'AnnotationView'");
pViewName = pView.getName()
if pViewName in self._anns:
raise Exception("AnnotationViewSet already has an AnnotationView named '"+pViewName+"'");
self._anns[pViewName] = pView
def get(self, pViewName:str):
if pViewName not in self._anns:
raise Exception("AnnotationView '"+pViewName+"' does NOT exist");
return self._anns[pViewName]
def size(self):
return len(self._anns)
def exists(self, pViewName:str):
if pViewName in self._anns:
return True
else:
return False
def TAS(self):
d = {}
for annViewName in self._anns:
d[annViewName] = self._anns[annViewName].TAS()
return d
def reverse(self, jss: dict):
self._anns = {}
if jss is None:
return
if not type(jss) is dict:
raise Exception("AnnotationViewSet reverse JSON-Serializable-Schema 'jss' parameter is required to be 'dict'");
for annViewName in jss:
annView = jss[annViewName]
if not "type" in annView:
raise Exception("Missing 'type' attribute in AnnotationView '"+annViewName+"' during reverse");
if annView["type"].endswith("AnnotationView.CharView"):
self._anns[annViewName] = CharView(annViewName)
elif annView["type"].endswith("AnnotationView.TokenView"):
self._anns[annViewName] = TokenView(annViewName)
elif annView["type"].endswith("AnnotationView.SpanView"):
self._anns[annViewName] = SpanView(annViewName)
elif annView["type"].endswith("AnnotationView.RelationView"):
self._anns[annViewName] = RelationView(annViewName)
else:
self._anns[annViewName] = AnnotationView(annViewName,annView["type"])
self._anns[annViewName].reverse( annView )
| 40.046875 | 123 | 0.633632 | 263 | 2,563 | 6.098859 | 0.269962 | 0.079801 | 0.082918 | 0.082294 | 0.166459 | 0.089776 | 0 | 0 | 0 | 0 | 0 | 0.000532 | 0.266875 | 2,563 | 63 | 124 | 40.68254 | 0.853113 | 0.014436 | 0 | 0.113208 | 0 | 0 | 0.172742 | 0.04794 | 0 | 0 | 0 | 0 | 0 | 1 | 0.132075 | false | 0 | 0.113208 | 0.018868 | 0.377358 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f6bd4d588241d1dc77c6591b3c2c3f80f0410cf6 | 2,737 | py | Python | modules/data_handler/__init__.py | tamslo/koala | 9f8bb0e201bd9a773752f1fd70ecbfc2fe98eb5c | [
"MIT"
] | null | null | null | modules/data_handler/__init__.py | tamslo/koala | 9f8bb0e201bd9a773752f1fd70ecbfc2fe98eb5c | [
"MIT"
] | null | null | null | modules/data_handler/__init__.py | tamslo/koala | 9f8bb0e201bd9a773752f1fd70ecbfc2fe98eb5c | [
"MIT"
] | null | null | null | import os
import yaml
import json
import modules.file_utils as file_utils
from .instance_handler import InstanceHandler
from .instances.experiment import Experiment
from .instances.dataset import Dataset
from .cache import Cache
class DataHandler:
def __init__(self, data_directory):
self.experiments_directory = data_directory + "experiments/"
self.datasets_directory = data_directory + "datasets/"
self.error_directory = data_directory + "errored/"
self.reference_directory = data_directory + "references/"
self.experiments = InstanceHandler(self.experiments_directory, Experiment)
self.datasets = InstanceHandler(self.datasets_directory, Dataset)
self.cache = Cache(self.datasets_directory, self.error_directory)
with open("constants.yml", "r") as constants_file:
self.constants = yaml.load(constants_file)
def reference_path(self, experiment, alternate_file_ending=None):
reference_id = experiment.get("reference")
file_ending = alternate_file_ending or ".fa"
return self.reference_directory + reference_id + file_ending
def get_references(self):
with open(self.reference_directory + "references.json") as references_file:
return json.load(references_file)
def genome_index_path(self, experiment, aligner):
reference_id = experiment.get("reference")
return self.reference_directory + "{}_{}_index".format(reference_id, aligner)
def clean_up(self):
# In case of an server stop, clean up references and experiments
for reference in os.listdir(self.reference_directory):
if reference.endswith(".running"):
file_utils.delete(os.path.join(self.reference_directory, reference))
for experiment_id, experiment in self.experiments.all().items():
status = experiment.get("status")
pipeline = experiment.get("pipeline")
error_message = "Server stopped unexpectedly"
errored_action = list(pipeline.keys())[0]
if status == self.constants["experiment"]["WAITING"]:
experiment.mark_error(errored_action, error_message)
if status == self.constants["experiment"]["RUNNING"]:
for action, pipeline_step in pipeline.items():
started = "started" in pipeline_step and pipeline_step["started"]
completed = "completed" in pipeline_step and pipeline_step["completed"]
if started and not completed:
errored_action = action
self.cache.clean_up(experiment, action)
experiment.mark_error(errored_action, error_message)
| 48.017544 | 91 | 0.679576 | 299 | 2,737 | 6.010033 | 0.270903 | 0.043406 | 0.073456 | 0.026711 | 0.152476 | 0.081247 | 0.048971 | 0 | 0 | 0 | 0 | 0.000476 | 0.232371 | 2,737 | 56 | 92 | 48.875 | 0.854831 | 0.022653 | 0 | 0.083333 | 0 | 0 | 0.080808 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.104167 | false | 0 | 0.166667 | 0 | 0.354167 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f6bfbf21f0ab05b2cba9ffc3d4de3d8629a8d0b9 | 7,512 | py | Python | vmock/methodmock.py | vburenin/vmock | 8af296938328a1178418e479cb60a008111eb1d3 | [
"MIT"
] | 1 | 2015-07-04T05:57:45.000Z | 2015-07-04T05:57:45.000Z | vmock/methodmock.py | vburenin/vmock | 8af296938328a1178418e479cb60a008111eb1d3 | [
"MIT"
] | null | null | null | vmock/methodmock.py | vburenin/vmock | 8af296938328a1178418e479cb60a008111eb1d3 | [
"MIT"
] | null | null | null | """MethodMock class that keeps method data.
"""
import inspect
import functools
from vmock import matchers
from vmock.mockerrors import CallSequenceError
from vmock.mockerrors import InterfaceError
from vmock.mockerrors import UnexpectedCall
class MethodMock(object):
"""Method mock.
Method mock object records all method calls with specific parameters,
and store them in appropriate queue that depends on type of Mock Call.
"""
def __init__(self, func_def, mock_control, display_name):
"""Constructor.
:param func_def: Mocked function definitions.
:param mock_control: Parent MockControl object.
:param display_name: Name that will be displayed as class/module,
name of mocked method/function
"""
# Method/Function argument specification.
self._func_def = func_def
spec = func_def.arg_spec
# If it is class method, let skip first 'self' parameter.
if func_def.kind in ('method', 'class method'):
if func_def.arg_spec.args:
func_def.arg_spec.args.pop(0)
defaults = None if spec.defaults is None else [
0 for _ in spec.defaults]
kwonlydefaults = None if spec.kwonlydefaults is None else {
k: 0 for k in spec.kwonlydefaults}
# [1:-1] removes parenthesis.
txt_args = inspect.formatargspec(
spec.args,
varargs=spec.varargs,
varkw=spec.varkw,
defaults=defaults,
kwonlyargs=spec.kwonlyargs,
kwonlydefaults=kwonlydefaults)[1:-1]
try:
lambda_func = eval('lambda %s: None' % (txt_args,))
except SyntaxError:
# This should never happen unless there is a bug in vmock.
import sys
print(func_def.name, file=sys.stderr)
print('lambda %s: None' % (txt_args,), file=sys.stderr)
raise
self._func = functools.wraps(func_def.func)(lambda_func)
# Parent Mock Control.
self._mc = mock_control
# Name to be displayed for this method mock.
self._display_name = display_name
def __call__(self, *args, **kwargs):
"""Record or execute expected call.
Behavior of MethodMock object call depends on current mode.
In record mode it saves all calls and then reproduces them
in replay mode.
:param args, kwargs: Parameters are variable and depend on mocked
method or function.
"""
# Each mock call records an error if such has happened, since it may be
# handled by function which you are testing. So, each next call of
# other mocks will throw saved error.
self._mc.check_error()
if self._mc.is_recording():
return self._save_call(args, kwargs)
else:
return self._make_call(args, kwargs)
def __str__(self):
if self._display_name:
return '(MethodMock): ' + self._display_name
else:
return '(%s): %s' % (object.__str__(self), self.func_name)
@property
def func_name(self):
"""Mocked method name"""
return self._func_def.name
def _verify_interface(self, args, kwargs):
"""Verify mock call with original function interface.
This method verify that is expected call fits the original
function interface.
:param args: Args how is method mock expected to be called.
:param kwargs: Keyword args how is method mock expected to be called.
"""
if args and isinstance(args[0], matchers.AnyArgsMatcher):
return
try:
self._func(*args, **kwargs)
except TypeError as e:
err_txt = e.args[0].replace('<lambda>', self._func_def.name, 1)
raise InterfaceError(err_txt) from None
def _restore_original(self):
"""Restore original method/function"""
setattr(self._func_def.owner, self.func_name, self._func_def.func)
def _save_call(self, args, kwargs):
"""Save current call"""
self._verify_interface(args, kwargs)
return self._mc.get_new_action(self, args, kwargs)
def _make_call(self, a_args, a_kwargs):
"""Mock call.
Method performs a call of mocked method and returns an appropriate
value. It checks what is going to be call, stub first and mock second.
If there are no such call stub or expected call in the expectation
queue. The CallSequenceError or UnexpectedCall exceptions will be
raised.
:params a_args: Actual call arguments.
:params a_kwargs: Actual call keyword arguments.
:return: Expected result.
:raise: CallSequenceError or UnexpectedCall if call is unexpected.
"""
# Find stub first.
e_data = self._mc.find_stub(self, a_args, a_kwargs)
# If there are no stubs, get call from the queue of expectors
if e_data is None:
e_data = self._mc.pop_current_record()
# Failure if there are no stubs and expectors in the queue.
if e_data is None:
error = CallSequenceError(
'No more calls are expected. \n'
'Actual call: %s, with args: %s' %
(str(self), self._args_to_str(a_args, a_kwargs)))
self._mc.raise_error(error)
if e_data.obj != self or not e_data._compare_args(a_args, a_kwargs):
err_str = ('Unexpected method call.\n'
'Expected object: %s\n'
'Expected args: %s\n'
'Actual object: %s\n'
'Actual args: %s\n')
fmt_params = (str(e_data.obj),
self._args_to_str(e_data.args, e_data.kwargs),
str(self), self._args_to_str(a_args, a_kwargs))
error = UnexpectedCall(err_str % fmt_params)
self._mc.raise_error(error)
return e_data._get_result(*a_args, **a_kwargs)
@staticmethod
def _args_to_str(args, kwargs):
"""Format arguments in appropriate way."""
args_str = []
kwargs_str = {}
for arg in args:
if isinstance(arg, int):
args_str.append(arg)
else:
args_str.append(str(arg))
for key in kwargs.keys():
if isinstance(kwargs[key], int):
kwargs_str[key] = kwargs[key]
else:
kwargs_str[key] = str(kwargs[key])
return '(%s, %s)' % (args_str, kwargs_str)
class MethodStub(MethodMock):
"""Used for immediate response after mocking."""
def __call__(self, *args, **kwargs):
self._mc.check_error()
e_data = self._mc.find_static_mock(self, args, kwargs)
if e_data is None:
if self._mc.is_recording():
self._verify_interface(args, kwargs)
return self._mc.get_new_static_action(self, args, kwargs)
else:
self._mc.raise_error(CallSequenceError(
'There is no static mock for this call. \n'
'Actual call: %s, with args: %s' %
(str(self), self._args_to_str(args, kwargs))))
else:
return e_data._get_result(*args, **kwargs)
def redefine(self, *args, **kwargs):
"""Redefine stub action."""
return self._mc.redefine_static_action(self, args, kwargs)
| 34.939535 | 79 | 0.600905 | 942 | 7,512 | 4.605096 | 0.220807 | 0.041494 | 0.029046 | 0.016598 | 0.179806 | 0.067773 | 0.067773 | 0.067773 | 0.067773 | 0.050715 | 0 | 0.001933 | 0.311502 | 7,512 | 214 | 80 | 35.102804 | 0.836814 | 0.283679 | 0 | 0.2 | 0 | 0 | 0.062561 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.095652 | false | 0 | 0.06087 | 0 | 0.278261 | 0.017391 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f6c0e78179bef616d68fd16014e74c5008efe77d | 2,532 | py | Python | in.py | dlinsley/youtube-resource | 82ef203c292a6a6ceb9ac84a9931f1aeec1ebdce | [
"MIT"
] | null | null | null | in.py | dlinsley/youtube-resource | 82ef203c292a6a6ceb9ac84a9931f1aeec1ebdce | [
"MIT"
] | null | null | null | in.py | dlinsley/youtube-resource | 82ef203c292a6a6ceb9ac84a9931f1aeec1ebdce | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from __future__ import unicode_literals
import yt_dlp
import json
import sys
import os
class MyLogger(object):
def __init__(self):
self.vidmeta = {}
def debug(self, msg):
if msg.startswith('{"'):
self.vidmeta = json.loads(msg)
return
print('[DEBUG] '+msg, file=sys.stderr)
return
def warning(self, msg):
print('[WARN] '+msg, file=sys.stderr)
return
def error(self, msg):
print('[ERROR] '+msg, file=sys.stderr)
return
def get_vid_meta(self):
toReturn = []
toReturn.append({'name': 'id','value': self.vidmeta['id']})
toReturn.append({'name': 'uploader','value': self.vidmeta.get('uploader')})
toReturn.append({'name': 'title','value': self.vidmeta.get('title')})
toReturn.append({'name': 'duration','value': str(self.vidmeta.get('duration'))})
toReturn.append({'name': 'view_count','value': str(self.vidmeta.get('view_count'))})
toReturn.append({'name': 'like_count','value': str(self.vidmeta.get('like_count'))})
toReturn.append({'name': 'dislike_count','value': str(self.vidmeta.get('dislike_count'))})
toReturn.append({'name': 'average_rating','value': str(self.vidmeta.get('average_rating'))})
toReturn.append({'name': 'width','value': str(self.vidmeta.get('width'))})
toReturn.append({'name': 'height','value': str(self.vidmeta.get('height'))})
toReturn.append({'name': 'fps','value': str(self.vidmeta.get('fps'))})
toReturn.append({'name': 'ext','value': self.vidmeta.get('ext')})
return toReturn
destination_dir_str = sys.argv[1]
resource_config = json.load(sys.stdin)
try:
os.makedirs(destination_dir_str)
except FileExistsError:
pass
os.chdir(destination_dir_str)
ydl_output = MyLogger()
ydl_opts = {
'logger': ydl_output,
'forcejson': True,
'ignoreerrors': True,
'skip_download': False,
'format': '137+140'
}
if 'skip_download' in resource_config['source']:
ydl_opts['skip_download'] = resource_config['source']['skip_download']
if 'format_id' in resource_config['source']:
ydl_opts['format'] = resource_config['source']['format_id']
exit_code = 0
with yt_dlp.YoutubeDL(ydl_opts) as ydl:
ydl.download([resource_config['version']['ref']])
if ydl._download_retcode:
exit_code = ydl._download_retcode
print(json.dumps({'version': resource_config['version'], 'metadata': ydl_output.get_vid_meta()}))
sys.exit(exit_code)
| 33.315789 | 100 | 0.644945 | 317 | 2,532 | 4.974763 | 0.299685 | 0.097654 | 0.136969 | 0.096386 | 0.205453 | 0.135701 | 0 | 0 | 0 | 0 | 0 | 0.004296 | 0.172591 | 2,532 | 75 | 101 | 33.76 | 0.748449 | 0.008294 | 0 | 0.065574 | 0 | 0 | 0.190837 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.081967 | false | 0.016393 | 0.081967 | 0 | 0.262295 | 0.065574 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f6c1cb6598ac1209aec6a6d526452c0ee83bb0ea | 4,579 | py | Python | ossreport/printer/printer.py | craftslab/ossreport | 7c60963af28e9cc22a4c107c58b697e02261d105 | [
"Apache-2.0"
] | null | null | null | ossreport/printer/printer.py | craftslab/ossreport | 7c60963af28e9cc22a4c107c58b697e02261d105 | [
"Apache-2.0"
] | null | null | null | ossreport/printer/printer.py | craftslab/ossreport | 7c60963af28e9cc22a4c107c58b697e02261d105 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import openpyxl
import os
from reportlab.lib import colors
from reportlab.lib.styles import getSampleStyleSheet
from reportlab.platypus import Paragraph, SimpleDocTemplate, Table
from ossreport.proto.proto import Component, File, Level, Risk
risk_head = {
"A": Risk.__name__.capitalize(),
"B": Level.CRITICAL,
"C": Level.HIGH,
"D": Level.MEDIUM,
"E": Level.LOW,
"F": Level.NONE,
}
component_head = {
"A": Component.COMPONENT,
"B": Component.SOURCE,
"C": Component.MATCH_TYPE,
"D": Component.USAGE,
"E": Component.LICENSE,
"F": Component.SECURITY_RISK,
"G": Component.OPERATIONAL_RISK,
}
file_head = {
"A": File.ID,
"B": File.NAME,
"C": File.LINES,
"D": File.OSS_LINES,
"E": File.MATCHED,
"F": File.PURL,
"G": File.VENDOR,
"H": File.COMPONENT,
"I": File.VERSION,
"J": File.LATEST,
"K": File.URL,
"L": File.RELEASE_DATE,
"M": File.FILE,
"N": File.DEPENDENCIES,
"O": File.LICENSES,
"P": File.VULNERABILITIES,
}
class PrinterException(Exception):
def __init__(self, info):
super().__init__(self)
self._info = info
def __str__(self):
return self._info
class Printer(object):
def __init__(self, risks, components, files, name):
self._risks = risks
self._components = components
self._files = files
self._name = name
def _pdf(self):
def _styling_title(style):
return style["Title"]
def _styling_head(style):
return style["Heading3"]
def _styling_table():
return [
("ALIGN", (0, 0), (-1, -1), "CENTER"),
("FONTSIZE", (0, 0), (-1, 0), 8),
("FONTSIZE", (0, 1), (-1, -1), 6),
("GRID", (0, 0), (-1, -1), 0.1, colors.black),
("VALIGN", (0, 0), (-1, -1), "MIDDLE"),
]
def _write_table(head, data, style):
content = [[head[key] for key in sorted(head.keys())]]
for _, val in data.items():
for v in val:
buf = [v[head[_k]] for _k in sorted(head.keys())]
content.append(buf)
return Table(data=content, style=style, colWidths=["*"])
stylesheet = getSampleStyleSheet()
story = [
Paragraph("SecTrend SCA Report", _styling_title(stylesheet)),
Paragraph("", _styling_head(stylesheet)),
_write_table(risk_head, self._risks, _styling_table()),
Paragraph("", _styling_head(stylesheet)),
_write_table(component_head, self._components, _styling_table()),
]
doc = SimpleDocTemplate(self._name)
doc.build(story)
def _xlsx(self):
def _styling_head(sheet, head):
for item in head.keys():
sheet[item + "1"].alignment = openpyxl.styles.Alignment(
horizontal="center", shrink_to_fit=True, vertical="center"
)
sheet[item + "1"].font = openpyxl.styles.Font(bold=True, name="Calibri")
sheet.freeze_panes = sheet["A2"]
def _styling_content(sheet, head, rows):
for key in head.keys():
for row in range(rows):
sheet[key + str(row + 2)].alignment = openpyxl.styles.Alignment(
horizontal="center", vertical="center"
)
sheet[key + str(row + 2)].font = openpyxl.styles.Font(
bold=False, name="Calibri"
)
def _write_table(book, head, data):
sheet = book.create_sheet()
sheet.append([head[key] for key in sorted(head.keys())])
head_len = 0
for key, val in data.items():
sheet.title = key
for v in val:
buf = [v[head[_k]] for _k in sorted(head.keys())]
head_len = len(buf)
sheet.append(buf)
_styling_head(sheet, head)
_styling_content(sheet, head, head_len)
wb = openpyxl.Workbook()
wb.remove(wb.active)
_write_table(wb, risk_head, self._risks)
_write_table(wb, component_head, self._components)
_write_table(wb, file_head, self._files)
wb.save(filename=self._name)
def run(self):
func = Printer.__dict__.get(
os.path.splitext(self._name)[1].replace(".", "_"), None
)
if func is not None:
func(self)
| 30.939189 | 88 | 0.540293 | 517 | 4,579 | 4.584139 | 0.305609 | 0.029536 | 0.005063 | 0.027004 | 0.171308 | 0.136709 | 0.05654 | 0.05654 | 0.032068 | 0.032068 | 0 | 0.010611 | 0.320812 | 4,579 | 147 | 89 | 31.14966 | 0.751447 | 0.004586 | 0 | 0.048387 | 0 | 0 | 0.032704 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.104839 | false | 0 | 0.048387 | 0.032258 | 0.209677 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f6c2acd218cca04a11f0f3775ecde0c74f8938eb | 1,179 | py | Python | transformer-openai/parse_output.py | fredriko/tweet-stance-prediction | f12cf924db7c947d5b681c01f492da029c08c415 | [
"MIT"
] | 109 | 2019-01-16T01:47:34.000Z | 2022-03-07T08:02:45.000Z | transformer-openai/parse_output.py | fredriko/tweet-stance-prediction | f12cf924db7c947d5b681c01f492da029c08c415 | [
"MIT"
] | 2 | 2019-11-04T07:15:16.000Z | 2020-09-16T18:45:40.000Z | transformer-openai/parse_output.py | fredriko/tweet-stance-prediction | f12cf924db7c947d5b681c01f492da029c08c415 | [
"MIT"
] | 65 | 2019-01-20T20:51:47.000Z | 2022-03-27T15:50:46.000Z | import pandas as pd
import sys
def output_predictions(test_path, pred_path, out_path, topic):
test = pd.read_csv(test_path, delimiter='\t', header=0, encoding = "latin-1")
if topic is not None:
test = test.loc[test["Target"] == topic].reset_index()
def clean_ascii(text):
# function to remove non-ASCII chars from data
return ''.join(i for i in text if ord(i) < 128)
test['Tweet'] = test['Tweet'].apply(clean_ascii)
#print(test)
pred = pd.read_csv(pred_path, header=0, delimiter='\t')
#print(pred)
pred['prediction'] = pred['prediction'].astype('int64')
df = test.join(pred)
#print(df)
stances = ["AGAINST", "FAVOR", "NONE", "UNKNOWN"]
df["Stance"] = df["prediction"].apply(lambda i: stances[i])
df = df[["index", "Target", "Tweet", "Stance"]]
class_nums = {s: i for i, s in enumerate(stances)}
df.to_csv(out_path, sep='\t', index=False, header=['ID', 'Target', 'Tweet', 'Stance'])
if __name__ == "__main__":
test_path, pred_path, out_path = sys.argv[1:4]
topic = None
if len(sys.argv) > 4:
topic = sys.argv[4]
output_predictions(test_path, pred_path, out_path, topic)
| 39.3 | 90 | 0.631043 | 175 | 1,179 | 4.085714 | 0.411429 | 0.044755 | 0.05035 | 0.067133 | 0.158042 | 0.158042 | 0.125874 | 0.125874 | 0.125874 | 0 | 0 | 0.012672 | 0.196777 | 1,179 | 29 | 91 | 40.655172 | 0.742344 | 0.063613 | 0 | 0 | 0 | 0 | 0.129208 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.086957 | false | 0 | 0.086957 | 0.043478 | 0.217391 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f6c39ed6dcaa6f4e4a1e00abaa066ab255bcf56d | 1,333 | py | Python | entifyfishing_client/models/category.py | kairntech/entifyfishing-client | 10a86166cf7895d681fe7e4adf0f01622b48dcb8 | [
"Apache-2.0"
] | null | null | null | entifyfishing_client/models/category.py | kairntech/entifyfishing-client | 10a86166cf7895d681fe7e4adf0f01622b48dcb8 | [
"Apache-2.0"
] | null | null | null | entifyfishing_client/models/category.py | kairntech/entifyfishing-client | 10a86166cf7895d681fe7e4adf0f01622b48dcb8 | [
"Apache-2.0"
] | null | null | null | from typing import Any, Dict, Type, TypeVar, Union
import attr
from ..types import UNSET, Unset
T = TypeVar("T", bound="Category")
@attr.s(auto_attribs=True)
class Category:
""" """
category: str
source: Union[Unset, str] = UNSET
weight: Union[Unset, float] = UNSET
page_id: Union[Unset, int] = UNSET
def to_dict(self) -> Dict[str, Any]:
category = self.category
source = self.source
weight = self.weight
page_id = self.page_id
field_dict: Dict[str, Any] = {}
field_dict.update(
{
"category": category,
}
)
if source is not UNSET:
field_dict["source"] = source
if weight is not UNSET:
field_dict["weight"] = weight
if page_id is not UNSET:
field_dict["page_id"] = page_id
return field_dict
@classmethod
def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:
d = src_dict.copy()
category = d.pop("category")
source = d.pop("source", UNSET)
weight = d.pop("weight", UNSET)
page_id = d.pop("page_id", UNSET)
category = cls(
category=category,
source=source,
weight=weight,
page_id=page_id,
)
return category
| 22.59322 | 63 | 0.546137 | 161 | 1,333 | 4.391304 | 0.254658 | 0.084866 | 0.042433 | 0.063649 | 0.131542 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.341335 | 1,333 | 58 | 64 | 22.982759 | 0.805239 | 0 | 0 | 0 | 0 | 0 | 0.047511 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.047619 | false | 0 | 0.071429 | 0 | 0.285714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f6c538bf317f0f8bf220aa7aa6e223706398ea78 | 12,890 | py | Python | gen.py | roj4s/FSRCNN-TensorFlow | 17fec785296a4c125d37432a176d523040a7a26d | [
"MIT"
] | 9 | 2017-10-19T14:20:39.000Z | 2022-02-08T09:45:28.000Z | gen.py | roj4s/FSRCNN-TensorFlow | 17fec785296a4c125d37432a176d523040a7a26d | [
"MIT"
] | 1 | 2019-05-14T08:14:29.000Z | 2019-05-14T08:14:29.000Z | gen.py | roj4s/FSRCNN-TensorFlow | 17fec785296a4c125d37432a176d523040a7a26d | [
"MIT"
] | 7 | 2017-08-06T10:49:46.000Z | 2021-10-03T04:33:39.000Z | import sys
import math
from itertools import islice
radius = 1
def get_line_number(phrase, file_name):
with open(file_name) as f:
for i, line in enumerate(f, 1):
if phrase in line:
return i
return False
def read_weights(file_name, ln, size=1):
content = []
with open(file_name) as f:
for line in islice(f, ln, ln + size):
if line.find('[') != -1:
line = line[line.index('[') + 1:]
if line.find(']') != -1:
line = line[:line.rindex(']')]
content.append(line)
return [x.strip() for x in content]
def format_weights(weights, n, length=4):
return ",".join(['{:.16f}'.format(float(i)) for i in weights.strip(",").split(",")[n:n+length]])
def base_header(file):
file.write('//!HOOK LUMA\n')
if scale > 1:
file.write('//!WHEN OUTPUT.w LUMA.w / {0}.400 > OUTPUT.h LUMA.h / {0}.400 > *\n'.format(scale - 1))
def header1(file, n, d):
base_header(file)
file.write('//!DESC feature map {}\n'.format((n//4)%(d//4) + 1))
file.write('//!BIND LUMA\n')
file.write('//!SAVE FEATURE{}\n'.format((n//4)%(d//4) + 1))
file.write('//!COMPONENTS 4\n')
def header2(file, d, n, s):
base_header(file)
file.write('//!DESC shrinking {}\n'.format((n//4)%(s//4) + 1))
for i in range(d//4):
file.write('//!BIND {}{}\n'.format("FEATURE", i + 1))
file.write('//!SAVE SHRINKED{}\n'.format((n//4)%(s//4) + 1))
file.write('//!COMPONENTS 4\n')
def header3(file, r, mi, m, n, s, inp):
base_header(file)
file.write('//!DESC mapping {}_{}\n'.format(mi + 1, (n//4)%(s//4) + 1))
for i in range(s//4):
file.write('//!BIND {}{}\n'.format(inp, i+1 + (0 if (r * m + mi) % 2 == 0 else 20)))
file.write('//!SAVE MODEL{}\n'.format((n//4)%(s//4) + 1 + (20 if (r * m + mi) % 2 == 0 else 0)))
file.write('//!COMPONENTS 4\n')
def header3_1(file, r, mi, m, n, s, inp):
base_header(file)
file.write('//!DESC sub-band residuals {}\n'.format((n//4)%(s//4) + 1))
for i in range(s//4):
file.write('//!BIND MODEL{}\n'.format(i + 1 + (20 if (r * m + mi) % 2 == 0 else 0)))
file.write('//!BIND {}{}\n'.format(inp, (n//4)%(s//4) + 1))
file.write('//!SAVE RES{}\n'.format((n//4)%(s//4) + 1))
file.write('//!COMPONENTS 4\n')
def header4(file, s, m, r, n, d):
base_header(file)
file.write('//!DESC expanding {}\n'.format((n//4)%(d//4) + 1))
for i in range(s//4):
file.write('//!BIND RES{}\n'.format(i + 1))
file.write('//!SAVE EXPANDED{}\n'.format((n//4)%(d//4) + 1))
file.write('//!COMPONENTS 4\n')
def header5(file, n, d, inp):
base_header(file)
file.write('//!DESC sub-pixel convolution {}\n'.format((n//comps) + 1))
for i in range(d//4):
file.write('//!BIND {}{}\n'.format(inp, i + 1))
if scale > 1:
file.write('//!SAVE SUBCONV{}\n'.format((n//comps) + 1))
file.write('//!COMPONENTS {}\n'.format(comps))
def header6(file):
base_header(file)
file.write('//!WIDTH LUMA.w {} *\n'.format(scale))
file.write('//!HEIGHT LUMA.h {} *\n'.format(scale))
file.write('//!DESC aggregation\n')
for i in range(scale**2//comps):
file.write('//!BIND SUBCONV{}\n'.format(i + 1))
def main():
if len(sys.argv) == 2:
fname=sys.argv[1]
d, s, m, r = [int(i) for i in fname[7:fname.index('.')].split("_")]
if s == 0:
s = d
shrinking = False
else:
shrinking = True
global scale, comps
deconv_biases = read_weights(fname, get_line_number("deconv_b", fname))
scale = int(math.sqrt(len(deconv_biases[0].split(","))))
dst = fname.replace("_", "-").replace("weights", "FSRCNNX_x{}_".format(scale)).replace("txt", "glsl")
with open(dst, 'w') as file:
# Feature layer
feature_radius = 2
ln = get_line_number("w1", fname)
weights = read_weights(fname, ln, (feature_radius*2+1)**2)
ln = get_line_number("b1", fname)
biases = read_weights(fname, ln)
for n in range(0, d, 4):
header1(file, n, d)
file.write('vec4 hook()\n')
file.write('{\n')
file.write('vec4 res = vec4({});\n'.format(format_weights(biases[0], n)))
p = 0
for l in range(0, len(weights)):
y, x = p%(feature_radius*2+1)-feature_radius, p//(feature_radius*2+1)-feature_radius
p += 1
file.write('res += vec4({}) * float(LUMA_texOff(vec2({},{})));\n'.format(format_weights(weights[l], n), x, y))
if shrinking:
ln = get_line_number("alpha1", fname)
alphas = read_weights(fname, ln)
file.write('res = max(res, vec4(0.0)) + vec4({}) * min(res, vec4(0.0));\n'.format(format_weights(alphas[0], n)))
file.write('return res;\n')
file.write('}\n\n')
if shrinking:
# Shrinking layer
ln = get_line_number("w2", fname)
weights = read_weights(fname, ln, d)
ln = get_line_number("b2", fname)
biases = read_weights(fname, ln)
for n in range(0, s, 4):
header2(file, d, n, s)
file.write('vec4 hook()\n')
file.write('{\n')
file.write('vec4 res = vec4({});\n'.format(format_weights(biases[0], n)))
for l in range(0, d, 4):
file.write('res += mat4({},{},{},{}) * FEATURE{}_texOff(vec2(0.0));\n'.format(format_weights(weights[l], n), format_weights(weights[l+1], n), format_weights(weights[l+2], n), format_weights(weights[l+3], n), l//4+1))
file.write('return res;\n')
file.write('}\n\n')
# Mapping layers
inp = "SHRINKED" if shrinking else "FEATURE"
for ri in range(r):
for mi in range(m):
tex_name = inp if ri == 0 and mi == 0 else "RES" if ri > 0 and mi == 0 else "MODEL"
ln = get_line_number("w{}".format(mi + 3), fname)
weights = read_weights(fname, ln, s*9)
ln = get_line_number("b{}".format(mi + 3), fname)
biases = read_weights(fname, ln)
for n in range(0, s, 4):
header3(file, ri, mi, m, n, s, tex_name)
file.write('vec4 hook()\n')
file.write('{\n')
file.write('vec4 res = vec4({});\n'.format(format_weights(biases[0], n)))
p = 0
for l in range(0, len(weights), 4):
if l % s == 0:
y, x = p%3-1, p//3-1
p += 1
idx = (l//4)%(s//4)
file.write('res += mat4({},{},{},{}) * {}{}_texOff(vec2({},{}));\n'.format(
format_weights(weights[l], n), format_weights(weights[l+1], n),
format_weights(weights[l+2], n), format_weights(weights[l+3], n),
tex_name, idx + 1 + (20 if (ri * m + mi) % 2 == 1 else 0), x, y))
ln = get_line_number("alpha{}".format(m + 3 if mi == m - 1 else mi + 4), fname)
alphas = read_weights(fname, ln)
file.write('res = max(res, vec4(0.0)) + vec4({}) * min(res, vec4(0.0));\n'.format(format_weights(alphas[0], n)))
file.write('return res;\n')
file.write('}\n\n')
if mi == m - 1:
ln = get_line_number("w{}".format(m + 3), fname)
weights = read_weights(fname, ln, s*(mi+2))
ln = get_line_number("b{}".format(m + 3), fname)
biases = read_weights(fname, ln)
for n in range(0, s, 4):
header3_1(file, ri, mi, m, n, s, inp)
file.write('vec4 hook()\n')
file.write('{\n')
file.write('vec4 res = vec4({});\n'.format(format_weights(biases[0], n)))
for l in range(0, s, 4):
file.write('res += mat4({},{},{},{}) * MODEL{}_texOff(0);\n'.format(
format_weights(weights[l], n), format_weights(weights[l+1], n),
format_weights(weights[l+2], n), format_weights(weights[l+3], n),
l//4 + 1 + (20 if (ri * m + mi) % 2 == 0 else 0)))
file.write('res += {}{}_texOff(0);\n'.format(inp, (n//4)%(s//4) + 1))
if ri == r - 1:
ln = get_line_number("alpha2", fname)
alphas = read_weights(fname, ln)
file.write('res = max(res, vec4(0.0)) + vec4({}) * min(res, vec4(0.0));\n'.format(format_weights(alphas[0], n)))
file.write('return res;\n')
file.write('}\n\n')
if shrinking:
# Expanding layer
ln = get_line_number("w{}".format(m + 4), fname)
weights = read_weights(fname, ln, d)
ln = get_line_number("b{}".format(m + 4), fname)
biases = read_weights(fname, ln)
ln = get_line_number("alpha{}".format(m + 4), fname)
alphas = read_weights(fname, ln)
for n in range(0, d, 4):
header4(file, s, m, r, n, d)
file.write('vec4 hook()\n')
file.write('{\n')
file.write('vec4 res = vec4({});\n'.format(format_weights(biases[0], n)))
for l in range(0, s, 4):
file.write('res += mat4({},{},{},{}) * RES{}_texOff(vec2(0.0));\n'.format(format_weights(weights[l], n), format_weights(weights[l+1], n), format_weights(weights[l+2], n), format_weights(weights[l+3], n),
l//4 + 1))
file.write('res = max(res, vec4(0.0)) + vec4({}) * min(res, vec4(0.0));\n'.format(format_weights(alphas[0], n)))
file.write('return res;\n')
file.write('}\n\n')
# Sub-pixel convolution
ln = get_line_number("deconv_w", fname)
weights = read_weights(fname, ln, d*(radius*2+1)**2)
ln = get_line_number("deconv_b", fname)
biases = read_weights(fname, ln)
inp = "EXPANDED" if shrinking else "RES"
comps = scale if scale % 2 == 1 else 4
for n in range(0, scale**2, comps):
header5(file, n, d, inp)
file.write('vec4 hook()\n')
file.write('{\n')
if scale == 1:
file.write('float res = {};\n'.format(format_weights(biases[0], n, length=comps)))
else:
file.write('vec{0} res = vec{0}({1});\n'.format(comps, format_weights(biases[0], n, length=comps)))
p = 0
for l in range(0, len(weights), 4):
if l % d == 0:
y, x = p%(radius*2+1)-radius, p//(radius*2+1)-radius
p += 1
idx = (l//4)%(d//4)
file.write('res += {}{}({},{},{},{}){} {}{}_texOff(vec2({},{})){};\n'.format(
"mat4x" if scale > 1 else "dot(", comps if scale > 1 else "vec4",
format_weights(weights[l], n, length=comps), format_weights(weights[l+1], n, length=comps),
format_weights(weights[l+2], n, length=comps), format_weights(weights[l+3], n, length=comps),
" *" if scale > 1 else ",", inp, idx + 1, x, y, "" if scale > 1 else ")"))
file.write('return vec4(res{});\n'.format(", 0" * (4 - comps)))
file.write('}\n\n')
if scale > 1:
# Aggregation
header6(file)
file.write('vec4 hook()\n')
file.write('{\n')
file.write('vec2 fcoord = fract(SUBCONV1_pos * SUBCONV1_size);\n')
file.write('vec2 base = SUBCONV1_pos + (vec2(0.5) - fcoord) * SUBCONV1_pt;\n')
file.write('ivec2 index = ivec2(fcoord * vec2({}));\n'.format(scale))
if scale > 2:
file.write('mat{0} res = mat{0}(SUBCONV1_tex(base).{1}'.format(scale, "rgba"[:comps]))
for i in range(scale-1):
file.write(',SUBCONV{}_tex(base).{}'.format(i + 2, "rgba"[:comps]))
file.write(');\n')
file.write('return vec4(res[index.x][index.y], 0, 0, 1);\n')
else:
file.write('vec4 res = SUBCONV1_tex(base);\n')
file.write('return vec4(res[index.x * {} + index.y], 0, 0, 1);\n'.format(scale))
file.write('}\n')
else:
print("Missing argument: You must specify a file name")
return
if __name__ == '__main__':
main()
| 47.389706 | 236 | 0.482933 | 1,780 | 12,890 | 3.417416 | 0.088202 | 0.12576 | 0.044386 | 0.072497 | 0.689956 | 0.607102 | 0.556962 | 0.46145 | 0.409831 | 0.386487 | 0 | 0.039029 | 0.322188 | 12,890 | 271 | 237 | 47.564576 | 0.657205 | 0.007292 | 0 | 0.38843 | 0 | 0.024793 | 0.172805 | 0.019235 | 0 | 0 | 0 | 0 | 0 | 1 | 0.049587 | false | 0 | 0.012397 | 0.004132 | 0.082645 | 0.004132 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f6c7c421ad38800a15857692e302afbb85bc977e | 18,997 | py | Python | musegan.py | vab10266/wolfGANg_Vaud | 254634cd2eb9043d282236d3af86ba7938d92319 | [
"MIT"
] | null | null | null | musegan.py | vab10266/wolfGANg_Vaud | 254634cd2eb9043d282236d3af86ba7938d92319 | [
"MIT"
] | null | null | null | musegan.py | vab10266/wolfGANg_Vaud | 254634cd2eb9043d282236d3af86ba7938d92319 | [
"MIT"
] | null | null | null | import time
import argparse
from copy import deepcopy
from progress.bar import IncrementalBar
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import utils as vutils
from itertools import chain
from gan.generator import MuseGenerator
from gan.critic import MuseCritic
from gan.utils import initialize_weights, Normal
from criterion import WassersteinLoss, GradientPenalty
def copy_G_params(model):
flatten = deepcopy(list(p.data for p in model.parameters()))
return flatten
def load_params(model, new_param):
for p, new_p in zip(model.parameters(), new_param):
p.data.copy_(new_p)
def get_item(pred):
return pred.mean().item()
dist = Normal()
class MuseGAN():
def __init__(self,
c_dimension=10,
z_dimension=32,
g_channels=1024,
g_features=1024,
c_channels=128,
c_features=1024,
g_lr=0.001,
c_lr=0.001,
device='cpu'):
self.c_dim = c_dimension
self.z_dim = z_dimension
self.device = device
self.one_hot = True
# generator and optimizer
self.generator = MuseGenerator(c_dimension = c_dimension,
z_dimension = z_dimension,
hid_channels = g_channels,
hid_features = g_features,
out_channels = 1).to(device)
self.generator = self.generator.apply(initialize_weights)
self.g_optimizer = torch.optim.Adam(self.generator.parameters(),
lr=g_lr, betas=(0.5, 0.9))
# critic and optimizer
self.critic = MuseCritic(c_dimension = c_dimension,
hid_channels = c_channels,
hid_features = c_features,
out_features = 1).to(device)
self.critic = self.critic.apply(initialize_weights)
self.c_optimizer = torch.optim.Adam(self.critic.parameters(),
lr=c_lr, betas=(0.5, 0.9))
self.opt_info = optim.Adam(chain(self.generator.parameters(), self.critic.pred_c.parameters()), lr=(g_lr+c_lr)/2, betas=(0.5, 0.99))
self.running_avg_g = None
self.real_images = None
self.prob_c = False
self.recon_weight = 1.0
self.onehot_weight = 1.0
# loss functions and gradient penalty (critic is wasserstein-like gan)
self.g_criterion = WassersteinLoss().to(device)
self.c_criterion = WassersteinLoss().to(device)
self.c_penalty = GradientPenalty().to(device)
# dictionary to save history
self.data = {'g_loss':[],
'c_loss':[],
'cf_loss':[],
'cr_loss':[],
'cp_loss':[],
'cs_loss':[],
'o_loss':[]}
print('MuseGAN initialized.')
def generate_random_sample(self, save_path, z=None, c=None, batch_size=16):
backup_para = copy_G_params(self.generator)
load_params(self.generator, self.running_avg_g)
#self.generator.eval()
if self.z_dim > 0 and z is None:
z = torch.randn(batch_size, self.z_dim).to(self.device)
if self.c_dim > 0 and c is None:
c = torch.randn(batch_size, self.c_dim).uniform_(0,1).to(self.device)
#c = torch.randn(1, self.c_dim).uniform_(0,1).repeat(batch_size,1).to(self.device)
with torch.no_grad():
g_img = self.generator(c=c, z=z).cpu()
vutils.save_image(g_img.add_(1).mul_(0.5), save_path.replace(".jpg", "_random.jpg"), pad_value=0)
del g_img
#self.generator.train()
load_params(self.generator, backup_para)
def generate_each_dim(self, save_path, dim=0, z=None, c=None, num_interpolate=10, num_samples=8):
#self.generator.eval()
if self.running_avg_g is not None:
backup_para = copy_G_params(self.generator)
load_params(self.generator, self.running_avg_g)
if self.z_dim > 0 and z is None:
z = torch.randn(num_samples, self.z_dim).to(self.device)
z = z.unsqueeze(1).repeat(1, num_interpolate, 1).view(-1, self.z_dim)
if self.c_dim > 0 and c is None:
c = torch.randn(num_samples, self.c_dim).uniform_(0.2, 0.6).to(self.device)
c = c.unsqueeze(1).repeat(1, num_interpolate, 1) #.view(-1, self.z_dim)
c_line = torch.linspace(0, 1, num_interpolate).to(self.device)
c_line = c_line.unsqueeze(0).repeat(num_samples, 1)
c[:,:,dim] = c_line
c = c.view(-1, self.c_dim)
with torch.no_grad():
g_img = self.generator(c=c, z=z)
vutils.save_image(g_img.add_(1).mul_(0.5), \
save_path.replace(".jpg", "_dim_%d.jpg"%(dim)), pad_value=0,nrow=num_interpolate)
del g_img
if self.running_avg_g is not None:
load_params(self.generator, backup_para)
def neg_log_density(self, sample, params):
constant = torch.Tensor([np.log(2 * np.pi)]).to(self.device)
mu = params[:,:self.c_dim]
logsigma = params[:,self.c_dim:]
inv_sigma = torch.exp(-logsigma)
tmp = (sample - mu) * inv_sigma
return 0.5 * (tmp * tmp + 2 * logsigma + constant)
def sample_hot_c(self, batch_size, c_dim, num_hot=1):
y_onehot = torch.zeros(batch_size, c_dim)
# print("num_hot: ", num_hot)
if num_hot==1:
y = torch.LongTensor(batch_size, 1).random_() % c_dim
# print("y_onehot: ", y_onehot.shape, y_onehot.dtype)
# print("torch.ones_like(y_onehot).to(y_onehot): ", torch.ones_like(y_onehot).to(y_onehot).shape, torch.ones_like(y_onehot).to(y_onehot).dtype)
# print("y: ", y.shape, y.dtype)
# print("torch.ones_like(y): ", torch.ones_like(y).shape, torch.ones_like(y).dtype)
# print("torch.ones_like(y).to(y): ", torch.ones_like(y).to(y).shape, torch.ones_like(y).to(y).dtype)
y_onehot.scatter_(1, y, 1.0)
# print("c_idx: ", y.view(-1).shape, y.view(-1, 1).shape, y.view([-1, 1]).shape)
return y_onehot.to(self.device), y.to(self.device)
else:
for _ in range(num_hot):
y = torch.LongTensor(batch_size,1).random_() % c_dim
y_onehot.scatter_(1, y, torch.ones_like(y).to(y))
return y_onehot.to(self.device)
def sample_z_and_c(self, batch_size, n_iter):
# sample z from Normal distribution
z = None
if self.z_dim > 0:
z = torch.randn(batch_size, 10, self.z_dim).to(self.device)
# sample c alternativaly from uniform and onehot
c_idx = None
if n_iter%4==0 and self.one_hot:
c = torch.Tensor(batch_size, self.c_dim).uniform_(0.2,0.6).to(self.device)
# chosen_section = np.random.randint(0, 10)
choosen_dim = np.random.randint(0, self.c_dim)
c[:, choosen_dim] = 1
c_idx = torch.Tensor(batch_size).fill_(choosen_dim).long().to(self.device)
elif n_iter%2==0 and self.one_hot:
c, c_idx = self.sample_hot_c(batch_size, c_dim=self.c_dim, num_hot=1)
else:
c = torch.Tensor(batch_size, self.c_dim).uniform_(0, 1).to(self.device)
return z, c, c_idx
def compute_gradient_penalty(self, real_images, fake_images):
# Compute gradient penalty
# print("real_images, fake_images: ", real_images.shape, fake_images.shape)
alpha = torch.rand(real_images.size(0), 1, 1, 1, 1).expand_as(real_images).to(self.device)
interpolated = (alpha * real_images + (1 - alpha) * fake_images).clone().detach().requires_grad_(True)
out = self.critic(interpolated)[0]
exp_grad = torch.ones(out.size()).to(self.device)
grad = torch.autograd.grad(outputs=out,
inputs=interpolated,
grad_outputs=exp_grad,
retain_graph=True,
create_graph=True,
only_inputs=True)[0]
grad = grad.view(grad.size(0), -1)
grad_l2norm = torch.sqrt(torch.sum(grad ** 2, dim=1))
d_loss_gp = torch.mean((grad_l2norm - 1) ** 2)
return d_loss_gp
def compute_total_correlation(self):
real_images = torch.cat(self.real_images, dim=0)
batch_size = real_images.size(0)
# print(batch_size)
self.critic.eval()
with torch.no_grad():
c_params = self.critic(real_images)[1]
self.critic.train()
sample_c = dist.sample(params=c_params.view(batch_size, self.c_dim, 2))
_logqc = dist.log_density( sample_c.view(-1, 1, self.c_dim), c_params.view(1, -1, self.c_dim, 2) )
logqc_prodmarginals = (logsumexp(_logqc, dim=1, keepdim=False) - math.log(batch_size)).sum(1)
logqc = (logsumexp(_logqc.sum(2), dim=1, keepdim=False) - math.log(batch_size))
#print( logqc, logqc_prodmarginals )
self.real_images = None
return (logqc - logqc_prodmarginals).mean().item()
def train_step(self, real_image, n_iter):
cfb_loss, crb_loss, cpb_loss, cb_loss = 0, 0, 0, 0
if self.running_avg_g is None:
self.running_avg_g = copy_G_params(self.generator)
batch_size = real_image.size(0)
c_ratio = 5
for _ in range(c_ratio):
### prepare data part
z, c, c_idx = self.sample_z_and_c(batch_size, n_iter)
g_img = self.generator(c=c, z=z)
r_img = real_image.to(self.device)
### critic part
self.critic.zero_grad()
# pred_r, _ = self.critic(r_img)
# pred_f, _ = self.critic(g_img.detach())
# get critic's `fake` loss
fake_pred, _ = self.critic(g_img)
fake_target = - torch.ones_like(fake_pred)
fake_loss = self.c_criterion(fake_pred, fake_target)
# get critic's `real` loss
real_pred, _ = self.critic(r_img)
real_target = torch.ones_like(real_pred)
real_loss = self.c_criterion(real_pred, real_target)
# mix `real` and `fake` melody
realfake = self.alpha * r_img + (1. - self.alpha) * g_img
# get critic's penalty
realfake_pred, _ = self.critic(realfake)
# print("realfake: ", realfake.shape)
# print("realfake_pred: ", realfake_pred.shape)
penalty = self.c_penalty(realfake, realfake_pred)
# sum up losses
closs = fake_loss + real_loss + 10 * penalty
# retain graph
closs.backward(retain_graph=True)
# update critic parameters
self.c_optimizer.step()
# devide by number of critic updates in the loop (5)
cfb_loss += fake_loss.item()/c_ratio
crb_loss += real_loss.item()/c_ratio
cpb_loss += 10* penalty.item()/c_ratio
cb_loss += closs.item()/c_ratio
### prepare data part
z, c, c_idx = self.sample_z_and_c(batch_size, n_iter)
g_img = self.generator(c=c, z=z)
r_img = real_image.to(self.device)
### Generator part
self.generator.zero_grad()
pred_g, _ = self.critic(g_img)
loss_g = -pred_g.mean()
loss_g.backward()
self.g_optimizer.step()
### Mutual Information between c and c' Part
self.generator.zero_grad()
self.critic.zero_grad()
z, c, c_idx = self.sample_z_and_c(batch_size, n_iter)
g_img = self.generator(c=c, z=z)
pred_g, pred_c_params = self.critic(g_img)
# print("c, c_pred: ", c.shape, pred_c_params.shape)
if self.prob_c:
loss_g_recon_c = self.neg_log_density(c, pred_c_params).mean()
else:
loss_g_recon_c = F.l1_loss(pred_c_params, c)
loss_g_onehot = torch.Tensor([0]).to(self.device)
# if n_iter%2==0 and self.one_hot:
# print("cp, c_idx: ", pred_c_params[:,:self.c_dim].shape, c_idx.shape)
if n_iter%4==0 and self.one_hot:
loss_g_onehot = 0.2*F.cross_entropy(pred_c_params[:,:self.c_dim], c_idx.view(-1))
elif n_iter%2==0 and self.one_hot:
loss_g_onehot = 0.8*F.cross_entropy(pred_c_params[:,:self.c_dim], c_idx.view(-1))
loss_info = self.recon_weight * loss_g_recon_c + self.onehot_weight * loss_g_onehot
loss_info.backward()
self.opt_info.step()
for p, avg_p in zip(self.generator.parameters(), self.running_avg_g):
avg_p.mul_(0.999).add_(0.001, p.data)
# print("losses: ", cfb_loss, crb_loss, cpb_loss, cb_loss, loss_g.item(), loss_g_onehot.item(), loss_g_recon_c.item())
return cfb_loss, crb_loss, cpb_loss, cb_loss, loss_g.item(), loss_g_onehot.item(), loss_g_recon_c.item()
def train(self,
dataloader,
epochs=500,
batch_size=64,
display_epoch=10,
device='cpu'):
# alpha parameter for mixing images
self.alpha = torch.rand((batch_size, 1, 1, 1, 1)).requires_grad_().to(device)
for epoch in range(epochs):
ge_loss, ce_loss = 0, 0
cfe_loss, cre_loss, cpe_loss, oe_loss, cse_loss = 0, 0, 0, 0, 0
start = time.time()
bar = IncrementalBar(f'[Epoch {epoch+1}/{epochs}]', max=len(dataloader))
# print("epoch: ", epoch)
for real in dataloader:
# real: real image batch
# print("real: ", real.shape)
cfb_loss, crb_loss, cpb_loss, cb_loss, gb_loss, ob_loss, csb_loss = self.train_step(real, epoch)
"""
real = real.to(device)
# train Critic
cb_loss=0
cfb_loss, crb_loss, cpb_loss = 0, 0, 0
for _ in range(5):
# create random `noises`
cords = torch.randn(batch_size, 32).to(device)
style = torch.randn(batch_size, 32).to(device)
melody = torch.randn(batch_size, 4, 32).to(device)
groove = torch.randn(batch_size, 4, 32).to(device)
# forward to generator
self.c_optimizer.zero_grad()
with torch.no_grad():
fake = self.generator(cords, style, melody, groove).detach()
# get critic's `fake` loss
fake_pred, c_pred = self.critic(fake)
fake_target = - torch.ones_like(fake_pred)
fake_loss = self.c_criterion(fake_pred, fake_target)
# get critic's `real` loss
real_pred = self.critic(real)
real_target = torch.ones_like(real_pred)
real_loss = self.c_criterion(real_pred, real_target)
# mix `real` and `fake` melody
realfake = self.alpha * real + (1. - self.alpha) * fake
# get critic's penalty
realfake_pred = self.critic(realfake)
penalty = self.c_penalty(realfake, realfake_pred)
# sum up losses
closs = fake_loss + real_loss + 10 * penalty
# retain graph
closs.backward(retain_graph=True)
# update critic parameters
self.c_optimizer.step()
# devide by number of critic updates in the loop (5)
cfb_loss += fake_loss.item()/5
crb_loss += real_loss.item()/5
cpb_loss += 10* penalty.item()/5
cb_loss += closs.item()/5
cfe_loss += cfb_loss/len(dataloader)
cre_loss += crb_loss/len(dataloader)
cpe_loss += cpb_loss/len(dataloader)
ce_loss += cb_loss/len(dataloader)
# train generator
self.g_optimizer.zero_grad()
# create random `noises`
cords = torch.randn(batch_size, 32).to(device)
style = torch.randn(batch_size, 32).to(device)
melody = torch.randn(batch_size, 4, 32).to(device)
groove = torch.randn(batch_size, 4, 32).to(device)
# forward to generator
fake = self.generator(cords, style, melody, groove)
# forward to critic (to make prediction)
fake_pred = self.critic(fake)
# get generator loss (idea is to fool critic)
gb_loss = self.g_criterion(fake_pred, torch.ones_like(fake_pred))
gb_loss.backward()
# update critic parameters
self.g_optimizer.step()
ge_loss += gb_loss.item()/len(dataloader)
"""
cfe_loss += cfb_loss/len(dataloader)
cre_loss += crb_loss/len(dataloader)
cpe_loss += cpb_loss/len(dataloader)
ce_loss += cb_loss/len(dataloader)
ge_loss += gb_loss/len(dataloader)
oe_loss += ob_loss/len(dataloader)
cse_loss += csb_loss/len(dataloader)
bar.next()
bar.finish()
end = time.time()
tm = (end - start)
# save history
self.data['g_loss'].append(ge_loss)
self.data['c_loss'].append(ce_loss)
self.data['cf_loss'].append(cfe_loss)
self.data['cr_loss'].append(cre_loss)
self.data['cp_loss'].append(cpe_loss)
self.data['cs_loss'].append(cse_loss)
self.data['o_loss'].append(oe_loss)
# display losses
if epoch%10==0:
print("[Epoch %d/%d] [G loss: %.3f] [D loss: %.3f] ETA: %.3fs" % (epoch+1, epochs, ge_loss, ce_loss, tm))
print(f"[C loss | (fake: {cfe_loss:.3f}, real: {cre_loss:.3f}, penalty: {cpe_loss:.3f})]")
print(f"[c similarity loss | {cse_loss:.3f}]")
print(f"[onehot loss | {oe_loss:.3f}]")
return self.generator
| 44.804245 | 155 | 0.549245 | 2,495 | 18,997 | 3.943086 | 0.117435 | 0.030189 | 0.01545 | 0.021244 | 0.448567 | 0.396422 | 0.354544 | 0.327506 | 0.292844 | 0.26418 | 0 | 0.020055 | 0.335948 | 18,997 | 423 | 156 | 44.910165 | 0.75981 | 0.103543 | 0 | 0.166667 | 0 | 0.007752 | 0.027654 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.050388 | false | 0 | 0.05814 | 0.003876 | 0.151163 | 0.01938 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f6cbb638c7d86c902db1e8bf832a0ff78bbdb70f | 523 | py | Python | citybuilder/core.py | Tankernn/citybuilder | aee8ac217d7371df854a151bab4f5345ff1cd9b7 | [
"Apache-2.0"
] | null | null | null | citybuilder/core.py | Tankernn/citybuilder | aee8ac217d7371df854a151bab4f5345ff1cd9b7 | [
"Apache-2.0"
] | null | null | null | citybuilder/core.py | Tankernn/citybuilder | aee8ac217d7371df854a151bab4f5345ff1cd9b7 | [
"Apache-2.0"
] | null | null | null | import yaml
import _thread
from . import server
import time
config = yaml.load(open("config/game.yaml"))['game']
def main_loop():
for player in list(server.players.values()):
player.update(time.time() - main_loop.last_tick)
main_loop.last_tick = time.time()
time.sleep(1)
main_loop.last_tick = time.time()
if __name__ == '__main__':
def run(*args):
server.run_server()
print("Websocket thread terminated.")
_thread.start_new_thread(run, ())
while 1:
main_loop()
| 21.791667 | 56 | 0.661568 | 72 | 523 | 4.513889 | 0.472222 | 0.123077 | 0.110769 | 0.147692 | 0.147692 | 0.147692 | 0 | 0 | 0 | 0 | 0 | 0.004785 | 0.200765 | 523 | 23 | 57 | 22.73913 | 0.772727 | 0 | 0 | 0.111111 | 0 | 0 | 0.107075 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.222222 | 0 | 0.333333 | 0.055556 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f6cbe6216c1d3065e771a051dc775fda5c1d5d09 | 14,266 | py | Python | dart_backend/Components/Calibration/EllipseUtils.py | Akkarin007/TeamProject-WiSe-DartImageProcessing | 14ecabb795bbda3c5158ce92750c12baae147294 | [
"MIT"
] | null | null | null | dart_backend/Components/Calibration/EllipseUtils.py | Akkarin007/TeamProject-WiSe-DartImageProcessing | 14ecabb795bbda3c5158ce92750c12baae147294 | [
"MIT"
] | null | null | null | dart_backend/Components/Calibration/EllipseUtils.py | Akkarin007/TeamProject-WiSe-DartImageProcessing | 14ecabb795bbda3c5158ce92750c12baae147294 | [
"MIT"
] | null | null | null | import cv2
import numpy as np
import math
import sys
from .Utils import *
def getEllipseLineIntersection(Ellipse, lines_seg, image_proc_img):
x = Ellipse.x
y = Ellipse.y
a = Ellipse.a
b = Ellipse.b
angle = (Ellipse.angle) * math.pi / 180
# build transformation matrix http://math.stackexchange.com/questions/619037/circle-affine-transformation
R1 = np.array([[math.cos(angle), math.sin(angle), 0], [-math.sin(angle), math.cos(angle), 0], [0, 0, 1]])
T1 = np.array([[1, 0, -x], [0, 1, -y], [0, 0, 1]])
D = np.array([[1, 0, 0], [0, a / b, 0], [0, 0, 1]])
M = D.dot(R1.dot(T1))
M_inv = np.linalg.inv(M)
transformed_intersectpoints = []
for line in lines_seg:
x0, y0 = line[0]
x1, y1 = line[1]
p1 = M.dot(np.transpose([x0,y0,1]))
p2 = M.dot(np.transpose([x1,y1,1]))
x0, y0 = p1[0], p1[1]
x1, y1 = p2[0], p2[1]
# # build transformation matrix http://math.stackexchange.com/questions/619037/circle-affine-transformation
slope = (y1 - y0) / (x1 - x0)
intercept = y0 - (slope * x0)
t_0 = 1 + slope**2
t_1 = 2 * slope * intercept
t_2 = intercept**2 - a**2
d = (t_1**2) - (4 * t_0 * t_2)
sol_x0 = (-t_1 - math.sqrt(d))/(2 * t_0)
sol_x1 = (-t_1 + math.sqrt(d))/(2 * t_0)
sol_y0 = slope * sol_x0 + intercept
sol_y1 = slope * sol_x1 + intercept
inter_p1 = [sol_x0, sol_y0,1]
inter_p2 = [sol_x1, sol_y1,1]
inter_p1 = M_inv.dot(np.transpose(inter_p1))
inter_p2 = M_inv.dot(np.transpose(inter_p2))
transformed_intersectpoints.append(inter_p1)
transformed_intersectpoints.append(inter_p2)
# for points in transformed_intersectpoints:
# cv2.circle(image_proc_img, (int(points[0]), int(points[1])), 10, (0, 255, 255), -1)
point1 = (int(transformed_intersectpoints[0][0]), int(transformed_intersectpoints[0][1]))
piont2 = (int(transformed_intersectpoints[1][0]), int(transformed_intersectpoints[1][1]))
piont3 = (int(transformed_intersectpoints[2][0]), int(transformed_intersectpoints[2][1]))
piont4 = (int(transformed_intersectpoints[3][0]), int(transformed_intersectpoints[3][1]))
cv2.circle(image_proc_img, point1, 5, (255, 0, 0), 3)
cv2.putText(image_proc_img, str(1), point1, cv2.FONT_HERSHEY_SIMPLEX,
2, (255, 0, 255), 4, cv2.LINE_AA)
cv2.circle(image_proc_img, piont2, 5, (0, 255, 0), 3)
cv2.putText(image_proc_img, str(2), piont2, cv2.FONT_HERSHEY_SIMPLEX,
2, (255, 0, 255), 4, cv2.LINE_AA)
cv2.circle(image_proc_img, piont3, 5, (255, 0, 0), 3)
cv2.putText(image_proc_img, str(3), piont3, cv2.FONT_HERSHEY_SIMPLEX,
2, (255, 0, 255), 4, cv2.LINE_AA)
cv2.circle(image_proc_img, piont4, 5, (0, 255, 0), 3)
cv2.putText(image_proc_img, str(4), piont4, cv2.FONT_HERSHEY_SIMPLEX,
2, (255, 0, 255), 4, cv2.LINE_AA)
cv2.imshow("intersection points", image_proc_img)
return transformed_intersectpoints, image_proc_img
def calculateDstPoint(i, calData):
dstpoint = [(calData.center_dartboard[0] + calData.ring_radius[5] * math.cos((0.5 + i) * calData.sectorangle)),
(calData.center_dartboard[1] + calData.ring_radius[5] * math.sin((0.5 + i) * calData.sectorangle))]
return dstpoint
def nothing(x):
pass
def createTrackbarsForHoughline():
cv2.namedWindow('houghlines', cv2.WINDOW_NORMAL)
cv2.createTrackbar('accuraccy', 'houghlines', 0, 200, nothing)
cv2.createTrackbar('votes', 'houghlines', 0, 200, nothing)
cv2.setTrackbarPos('accuraccy', 'houghlines', 160)
cv2.setTrackbarPos('votes', 'houghlines', 90)
cv2.createTrackbar('1 -> Done', 'houghlines', 0, 1, nothing)
def createTrackbars():
cv2.namedWindow('transformation', cv2.WINDOW_NORMAL)
cv2.createTrackbar('p1_x', 'transformation', 0, 20, nothing)
cv2.createTrackbar('p1_y', 'transformation', 0, 20, nothing)
cv2.createTrackbar('p2_x', 'transformation', 0, 20, nothing)
cv2.createTrackbar('p2_y', 'transformation', 0, 20, nothing)
cv2.createTrackbar('p3_x', 'transformation', 0, 20, nothing)
cv2.createTrackbar('p3_y', 'transformation', 0, 20, nothing)
cv2.createTrackbar('p4_x', 'transformation', 0, 20, nothing)
cv2.createTrackbar('p4_y', 'transformation', 0, 20, nothing)
cv2.setTrackbarPos('p1_x', 'transformation', 10)
cv2.setTrackbarPos('p1_y', 'transformation', 10)
cv2.setTrackbarPos('p2_x', 'transformation', 10)
cv2.setTrackbarPos('p2_y', 'transformation', 10)
cv2.setTrackbarPos('p3_x', 'transformation', 10)
cv2.setTrackbarPos('p3_y', 'transformation', 10)
cv2.setTrackbarPos('p4_x', 'transformation', 10)
cv2.setTrackbarPos('p4_y', 'transformation', 10)
cv2.createTrackbar('1 -> Done', 'transformation', 0, 1, nothing)
def getFinalTransformationMatrix(image, calData):
image = image.copy()
intersectPoints = calData.intersectPoints
createTrackbars()
while (1):
# get current positions of four trackbars
s = cv2.getTrackbarPos('1 -> Done', 'transformation')
if s == 1:
cv2.destroyAllWindows()
break
p1_x = cv2.getTrackbarPos('p1_x', 'transformation') - 10
p1_y = cv2.getTrackbarPos('p1_y', 'transformation') - 10
p2_x = cv2.getTrackbarPos('p2_x', 'transformation') - 10
p2_y = cv2.getTrackbarPos('p2_y', 'transformation') - 10
p3_x = cv2.getTrackbarPos('p3_x', 'transformation') - 10
p3_y = cv2.getTrackbarPos('p3_y', 'transformation') - 10
p4_x = cv2.getTrackbarPos('p4_x', 'transformation') - 10
p4_y = cv2.getTrackbarPos('p4_y', 'transformation') - 10
trackings = [(p1_x,p1_y),(p2_x,p2_y),(p3_x,p3_y),(p4_x,p4_y)]
dst_points = []
for dstPoint in calData.destinationPoints:
dst_points.append(calculateDstPoint(dstPoint, calData))
# finalize transformation matrix
src_points = []
for index, point in enumerate(intersectPoints):
src_points.append((point[0] + trackings[index][0], point[1]+ trackings[index][1]))
transformation_matrix = cv2.getPerspectiveTransform(np.array(src_points, np.float32), np.array(dst_points, np.float32))
normilzed_board_image = cv2.warpPerspective(image, transformation_matrix, (800, 800))
normilzed_board_image = getNormilizedBoard(normilzed_board_image, calData)
for dstPoint in dst_points:
cv2.circle(normilzed_board_image, (int(dstPoint[0]), int(dstPoint[1])), 2, (255, 255, 0), 2, 4)
for index, point in enumerate(dst_points):
cv2.putText(normilzed_board_image, str(index+1), (int(point[0]), int(point[1])), cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 0, 255), 4, cv2.LINE_AA)
cv2.imshow('adjusted_image', normilzed_board_image)
cv2.waitKey(1)
return transformation_matrix, normilzed_board_image
def getSectorAngle(i, calData):
return (0.5 + i) * calData.sectorangle
def getNormilizedBoard(img, calData):
center = 400
for rings in calData.ring_radius:
cv2.circle(img, (center, center), rings, (255, 0, 0), 1) # outside double
for i in range(0,20):
sectorAngle = getSectorAngle(i,calData)
p1 = center + int(calData.ring_radius[1] * math.cos(sectorAngle))
p2 = center + int(calData.ring_radius[1] * math.sin(sectorAngle))
cv2.line(img, (p1, p2), (
int(center + calData.ring_radius[5] * math.cos(sectorAngle)),
int(center + calData.ring_radius[5] * math.sin(sectorAngle))), (255, 0, 0), 1)
return img
def getIntersectionPointsFromEllipse(image_proc_img, pre_processed_lines, pre_processed_ellipse):
# find enclosing ellipse TODO: use HoughEllipse or at least try using it :>
Ellipse, image_proc_img = findEllipse(pre_processed_ellipse, image_proc_img)
cv2.imshow("4-findEllipse", image_proc_img)
waitForKey()
lines_seg, image_proc_img = findSectorLines(pre_processed_lines, image_proc_img, Ellipse)
cv2.imshow("5-detectedLines", image_proc_img)
waitForKey()
intersectPoints, image_proc_img = getEllipseLineIntersection(Ellipse, lines_seg, image_proc_img)
return intersectPoints, image_proc_img
def smoothEllipse(thresh):
# open -> erode then dilate
# close -> dilate then erode
# smooth out board to get an even ellipse
pre_processing_ellipse = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)))
pre_processing_ellipse = cv2.morphologyEx(pre_processing_ellipse, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (17, 17)))
return pre_processing_ellipse
def findSectorLines(edged, image_proc_img, Ellipse):
original = image_proc_img.copy()
createTrackbarsForHoughline()
while True:
s = cv2.getTrackbarPos('1 -> Done', 'houghlines')
if s == 1:
cv2.destroyAllWindows()
break
accuracy = cv2.getTrackbarPos('accuraccy', 'houghlines')
votes = cv2.getTrackbarPos('votes', 'houghlines')
image_proc_img = original.copy()
houghlines = cv2.HoughLines(edged, 1, np.pi / accuracy, votes,100)
horizontal_lines = []
vertical_lines = []
intersectLines_XY_coord = []
fixed_horizontal_slope= 0
fixed_vertical_slope= sys.maxsize
filtered_Lines = []
horizontal_temp = 75
vertical_temp = 75
try:
for line in houghlines:
# rho, theta = line[0]
rho, theta = line[0]
a = np.cos(theta)
b = np.sin(theta)
x0 = a * rho
y0 = b * rho
x1 = int(x0 + 2000 * (-b))
y1 = int(y0 + 2000 * (a))
x2 = int(x0 - 2000 * (-b))
y2 = int(y0 - 2000 * (a))
slope = (y1 - y0) / (x1 - x0)
c= y0-slope
distance = (slope * Ellipse.x - Ellipse.y +c) / (math.sqrt(slope**2 + 1))
if distance < 300:
angle_for_vertical_line = abs(math.degrees(math.atan((slope-fixed_horizontal_slope)/(1+ (slope * fixed_horizontal_slope)))))
angle_for_horizontal_line = abs(math.degrees(math.atan((slope-fixed_vertical_slope)/(1+ (slope * fixed_vertical_slope)))))
cv2.line(image_proc_img, (x1,y1),(x2, y2), (255, 0, 255),1)
if angle_for_vertical_line > angle_for_horizontal_line and angle_for_vertical_line > horizontal_temp:
horizontal_temp = angle_for_vertical_line
vertical_lines.append([(x1,y1),(x2,y2)])
filtered_Lines.append([(x1,y1),(x2,y2)])
elif angle_for_vertical_line < angle_for_horizontal_line and angle_for_horizontal_line > vertical_temp:
vertical_temp = angle_for_horizontal_line
horizontal_lines.append([(x1,y1),(x2,y2)])
filtered_Lines.append([(x1,y1),(x2,y2)])
degree_btw_both_lines = 60
h = 0
v = 0
for x_line in horizontal_lines:
(x0,y0), (x1,y1) = x_line
slope_x = (y1 - y0) / (x1 - x0)
for y_line in vertical_lines:
(x2,y2), (x3,y3) = y_line
slope_y = (y3 - y2) / (x3 - x2)
try:
angle_between = abs(math.degrees(math.atan((slope_x-slope_y)/(1+ (slope_x * slope_y)))))
if angle_between > degree_btw_both_lines:
degree_btw_both_lines = angle_between
h = [(x0,y0),(x1,y1)]
v = [(x2,y2),(x3,y3)]
except:
continue
cv2.line(image_proc_img, h[0],h[1], (0, 0, 255),2)
cv2.line(image_proc_img, v[0],v[1], (0, 255, 0),2)
except:
print("no lines found")
cv2.imshow('lines detected', image_proc_img)
cv2.waitKey(1)
# if len(intersectLines) == 2:
# x, y = intersection(intersectLines[0], intersectLines[1])
# else:
# x, y = segmented_intersections(intersectLines)
# cv2.circle(image_proc_img, (int(x), int(y)), 5, (255, 0, 255), -1)
intersectLines_XY_coord.append(h)
intersectLines_XY_coord.append(v)
return intersectLines_XY_coord, image_proc_img
def findEllipse(edged, image_proc_img):
Ellipse = EllipseDef()
contours, _ = cv2.findContours(edged, 1, 2)
# countur = image_proc_img.copy()
# cv2.drawContours(countur, contours, -1, (0, 255, 0), 3)
# cv2.imshow("all-counturs", countur)
minThresE = 100000
maxThresE = 150000
for cnt in contours:
print(cv2.contourArea(cnt));
try:
area = cv2.contourArea(cnt);
if minThresE < area < maxThresE:
ellipse = cv2.fitEllipse(cnt)
x, y = ellipse[0]
a, b = ellipse[1]
angle = ellipse[2]
cv2.drawContours(image_proc_img, cnt, -1, (0, 255, 0), 7)
a = a / 2
b = b / 2
cv2.ellipse(image_proc_img, (int(x), int(y)), (int(a), int(b)), int(angle), 0.0, 360.0,
(255, 0, 0), 1)
cv2.circle(image_proc_img, (int(x), int(y)), 5, (255, 255, 0), -1)
Ellipse.a = a
Ellipse.b = b
Ellipse.x = x
Ellipse.y = y
Ellipse.angle = angle
except:
continue
return Ellipse, image_proc_img
def waitForKey():
keyInput = cv2.waitKey(0)
if keyInput == 1:
cv2.destroyAllWindows() | 42.082596 | 156 | 0.589724 | 1,764 | 14,266 | 4.586168 | 0.14229 | 0.041162 | 0.054883 | 0.023733 | 0.328677 | 0.219036 | 0.18665 | 0.117923 | 0.109023 | 0.104821 | 0 | 0.06911 | 0.280878 | 14,266 | 339 | 157 | 42.082596 | 0.719466 | 0.067784 | 0 | 0.105882 | 0 | 0 | 0.055053 | 0 | 0 | 0 | 0 | 0.00295 | 0 | 1 | 0.05098 | false | 0.003922 | 0.019608 | 0.003922 | 0.105882 | 0.007843 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f6d2d74f3f54c4f51662dfe6138f6ef14972234c | 1,261 | py | Python | src/largest_rectangle.py | kemingy/daily-coding-problem | 0839311ec0848f8f0b4a9edba817ecceb8f944a0 | [
"Unlicense"
] | 3 | 2019-03-06T03:14:56.000Z | 2020-01-07T16:00:48.000Z | src/largest_rectangle.py | kemingy/daily-coding-problem | 0839311ec0848f8f0b4a9edba817ecceb8f944a0 | [
"Unlicense"
] | null | null | null | src/largest_rectangle.py | kemingy/daily-coding-problem | 0839311ec0848f8f0b4a9edba817ecceb8f944a0 | [
"Unlicense"
] | null | null | null | # Given an N by M matrix consisting only of 1's and 0's, find the largest
# rectangle containing only 1's and return its area.
# For example, given the following matrix:
# [[1, 0, 0, 0],
# [1, 0, 1, 1],
# [1, 0, 1, 1],
# [0, 1, 0, 0]]
# Return 4.
def largest_rectangle(matrix):
if not matrix or not matrix[0]:
return 0
m, n = len(matrix), len(matrix[0])
ans = 0
left = [0] * n
right = [n] * n
height = [0] * n
for i in range(m):
cur_left = 0
cur_right = n
for j in range(n):
if matrix[i][j] == 1:
height[j] += 1
left[j] = max(left[j], cur_left)
else:
height[j] = 0
left[j] = 0
cur_left = j + 1
for j in range(n-1, -1, -1):
if matrix[i][j] == 1:
right[j] = min(right[j], cur_right)
else:
right[j] = n
cur_right = j
for j in range(n):
ans = max(ans, (right[j] - left[j]) * height[j])
return ans
if __name__ == "__main__":
matrix = [[1, 0, 0, 0], [1, 0, 1, 1], [1, 0, 1, 1], [0, 1, 0, 0]]
print(largest_rectangle(matrix))
| 24.25 | 74 | 0.436955 | 190 | 1,261 | 2.815789 | 0.236842 | 0.037383 | 0.033645 | 0.029907 | 0.190654 | 0.082243 | 0.082243 | 0.082243 | 0.082243 | 0.082243 | 0 | 0.070652 | 0.416336 | 1,261 | 51 | 75 | 24.72549 | 0.65625 | 0.184774 | 0 | 0.193548 | 0 | 0 | 0.008264 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.032258 | false | 0 | 0 | 0 | 0.096774 | 0.032258 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f6d2e5c6ff68cb1bdb35e291b71a6f58705d4f91 | 7,207 | py | Python | anodos/swarm/models.py | abezpalov/anodos.ru | 6b905eb44b6f4a54f6e199b80cd714522deed277 | [
"MIT"
] | 2 | 2020-04-26T07:28:38.000Z | 2022-03-31T14:24:44.000Z | anodos/swarm/models.py | abezpalov/anodos.ru | 6b905eb44b6f4a54f6e199b80cd714522deed277 | [
"MIT"
] | 9 | 2017-12-01T04:43:31.000Z | 2022-01-01T13:26:04.000Z | anodos/swarm/models.py | abezpalov/anodos.ru | 6b905eb44b6f4a54f6e199b80cd714522deed277 | [
"MIT"
] | null | null | null | import os
import uuid
from django.db import models
from django.conf import settings
from django.utils import timezone
class SourceManager(models.Manager):
def take(self, name, **kwargs):
if not name:
return None
try:
o = self.get(name=name)
except Source.DoesNotExist:
o = Source()
o.name = name[:512]
o.login = kwargs.get('login', None)
o.password = kwargs.get('password', None)
o.save()
return o
class Source(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
name = models.CharField(max_length=512, unique=True)
login = models.TextField(null=True, default=None)
password = models.TextField(null=True, default=None)
objects = SourceManager()
def __str__(self):
return "Source: {}".format(self.name)
class Meta:
ordering = ['name']
class SourceDataManager(models.Manager):
def take(self, source, url=None):
if not source:
return None
try:
o = self.get(source=source, url=url)
except SourceData.DoesNotExist:
o = SourceData()
o.source = source
o.url = url
o.save()
# Проверяем наличие уже скачанного файла
file_name = '{}swarm/{}/{}'.format(settings.MEDIA_ROOT, o.source.name, o.url)
if os.path.isfile(file_name):
o.file_name = file_name
o.save()
return o
class SourceData(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
source = models.ForeignKey('Source', null=True, default=None,
on_delete=models.CASCADE, related_name='+')
url = models.TextField(null=True, default=None, db_index=True)
file_name = models.TextField(null=True, default=None)
content = models.TextField(null=True, default=None)
created = models.DateTimeField(default=timezone.now)
parsed = models.DateTimeField(null=True, default=None)
objects = SourceDataManager()
def save_file(self, data_):
self.file_name = '{}swarm/{}/{}'.format(settings.MEDIA_ROOT, self.source.name, self.url)
directory = '/'
for dir_ in self.file_name.split('/')[:-1]:
directory = '{}/{}'.format(directory, dir_)
if not os.path.exists(directory):
os.makedirs(directory)
if type(data_) == str:
with open(self.file_name, "w") as f:
f.write(data_)
else:
with open(self.file_name, "wb") as f:
f.write(data_.getbuffer())
self.save()
def load_file(self):
if self.file_name is None:
self.file_name = '{}swarm/{}/{}'.format(settings.MEDIA_ROOT, self.source.name, self.url)
f = open(self.file_name, 'r')
content = f.read()
return content
def set_parsed(self):
self.parsed = timezone.now()
self.save()
def __str__(self):
if self.url:
return 'SourceData: {}'.format(self.url)
else:
return 'SourceData: {}'.format(self.source.name)
class Meta:
ordering = ['created']
class DataManager(models.Manager):
@staticmethod
def add(source_data, content_type, content):
o = Data()
o.source_data = source_data
o.content_type = content_type
o.save()
o.file_name = '{}swarm/data/{}/{}.{}'.format(settings.MEDIA_ROOT,
o.content_type,
o.id,
o.content_type)
directory = '/'
for dir_ in o.file_name.split('/')[:-1]:
directory = '{}/{}'.format(directory, dir_)
if not os.path.exists(directory):
os.makedirs(directory)
with open(o.file_name, "wb") as f:
f.write(content)
o.save()
return o
class Data(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
source_data = models.ForeignKey('SourceData', null=True, default=None,
on_delete=models.CASCADE, related_name='+')
content_type = models.TextField(null=True, default=None, db_index=True)
file_name = models.TextField(null=True, default=None, db_index=True)
created = models.DateTimeField(default=timezone.now)
parsed = models.DateTimeField(null=True, default=None)
def __str__(self):
'Data: {}'.format(self.file_name)
class Meta:
ordering = ['created']
objects = DataManager()
class OrganisationManager(models.Manager):
def take(self, ogrn, **kwargs):
if not ogrn:
return None
try:
o = self.get(ogrn=ogrn)
except Organisation.DoesNotExist:
o = Organisation()
o.ogrn = ogrn
o.name = kwargs.get('name', None)
o.inn = kwargs.get('inn', None)
o.save()
return o
class Organisation(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
ogrn = models.TextField(unique=True)
name = models.TextField(null=True, default=None)
inn = models.TextField(null=True, default=None)
created = models.DateTimeField(default=timezone.now)
objects = OrganisationManager()
def __str__(self):
return f"{self.name} ({self.ogrn} {self.inn})"
class Meta:
ordering = ['-created']
class ProductManager(models.Manager):
def take(self, register_number, **kwargs):
if not register_number:
return None
try:
o = self.get(register_number=register_number)
o.new = False
except Product.DoesNotExist:
o = Product()
o.register_number = register_number
o.organisation = kwargs.get('organisation', None)
o.name = kwargs.get('name', None)
o.okpd2 = kwargs.get('okpd2', None)
o.tnved = kwargs.get('tnved', None)
o.name_of_regulation = kwargs.get('name_of_regulation', None)
o.save()
o.new = True
return o
class Product(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
register_number = models.TextField(unique=True)
organisation = models.ForeignKey('Organisation',
null=True,
default=None,
on_delete=models.CASCADE,
related_name='+')
name = models.TextField(null=True, default=None)
okpd2 = models.TextField(null=True, default=None)
tnved = models.TextField(null=True, default=None)
name_of_regulation = models.TextField(null=True, default=None)
created = models.DateTimeField(default=timezone.now)
objects = ProductManager()
def __str__(self):
return f"{self.register_number} {self.name}"
class Meta:
ordering = ['-created']
| 29.780992 | 100 | 0.579159 | 813 | 7,207 | 5.02214 | 0.146371 | 0.061964 | 0.066128 | 0.083762 | 0.563801 | 0.460201 | 0.385746 | 0.337742 | 0.337742 | 0.335048 | 0 | 0.003176 | 0.301096 | 7,207 | 241 | 101 | 29.904564 | 0.807425 | 0.005273 | 0 | 0.367232 | 0 | 0 | 0.045347 | 0.006 | 0 | 0 | 0 | 0 | 0 | 1 | 0.073446 | false | 0.011299 | 0.028249 | 0.016949 | 0.468927 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f6d505231ba879f61e9253759941879591d04805 | 2,098 | py | Python | raxcli/actions.py | racker/python-raxcli | c59d7ef9abca0a7cea56882113bd71feb6c5c6ef | [
"Apache-2.0"
] | 1 | 2020-01-16T09:45:28.000Z | 2020-01-16T09:45:28.000Z | raxcli/actions.py | racker/python-raxcli | c59d7ef9abca0a7cea56882113bd71feb6c5c6ef | [
"Apache-2.0"
] | null | null | null | raxcli/actions.py | racker/python-raxcli | c59d7ef9abca0a7cea56882113bd71feb6c5c6ef | [
"Apache-2.0"
] | null | null | null | # Copyright 2013 Rackspace
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import argparse
class HelpAction(argparse.Action):
"""
Custom HelpAction which recognizes commands in <app> command> <sub command>
format.
"""
def __call__(self, parser, namespace, values, option_string=None):
app = self.default
parser.print_help(app.stdout)
app.stdout.write('\nCommands:\n')
command_manager = app.command_manager
for command, sub_commands in sorted(command_manager):
for sub_command, ep in sub_commands.items():
try:
factory = ep.load()
except Exception as err:
app.stdout.write('Could not load %r\n' % ep)
continue
try:
cmd = factory(self, None)
except Exception as err:
app.stdout.write('Could not instantiate %r: %s\n' %
(ep, err))
continue
one_liner = cmd.get_description().split('\n')[0]
if sub_command == 'index':
name = command
else:
name = '%s %s' % (command, sub_command)
app.stdout.write(' %-13s %s\n' % (name, one_liner))
sys.exit(0)
| 36.807018 | 79 | 0.606292 | 257 | 2,098 | 4.883268 | 0.501946 | 0.047809 | 0.044622 | 0.025498 | 0.066932 | 0.066932 | 0.066932 | 0.066932 | 0.066932 | 0 | 0 | 0.008299 | 0.310772 | 2,098 | 56 | 80 | 37.464286 | 0.859613 | 0.410867 | 0 | 0.214286 | 0 | 0 | 0.07244 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.035714 | false | 0 | 0.071429 | 0 | 0.142857 | 0.035714 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f6d563aa962d8c0e6ddaba79c71c22e3395f937e | 5,146 | py | Python | backend/app/api/api_v1/endpoints/cars.py | rufusnufus/BTSParking | 3bb6e7fd20943f258e297428ab1624c4f2786444 | [
"MIT"
] | 2 | 2021-11-13T08:05:14.000Z | 2021-12-02T11:36:11.000Z | backend/app/api/api_v1/endpoints/cars.py | rufusnufus/BTSParking | 3bb6e7fd20943f258e297428ab1624c4f2786444 | [
"MIT"
] | 44 | 2021-11-23T10:06:11.000Z | 2021-12-18T07:23:22.000Z | backend/app/api/api_v1/endpoints/cars.py | rufusnufus/BTSParking | 3bb6e7fd20943f258e297428ab1624c4f2786444 | [
"MIT"
] | null | null | null | from fastapi import APIRouter, Body, Depends, HTTPException, Response, status
from fastapi.encoders import jsonable_encoder
from app.core.security import cookie_is_none, oauth2_scheme
from app.logs import logger
from app.models.car import Car as ModelCar
from app.models.user import User as ModelUser
from app.schemas.car import InputCar, OutputCar
router = APIRouter()
@router.post(
"/",
summary="Create a new car",
response_model=OutputCar,
responses={
status.HTTP_200_OK: {
"description": "Car created successfully",
"content": {
"application/json": {
"examples": {
"touareg": {
"summary": "Volkswagen Touareg",
"value": {
"id": 1,
"model": "Volkswagen Touareg",
"license_number": "A000AA",
},
},
}
}
},
},
},
)
async def create_car(
car: InputCar = Body(
...,
examples={
"touareg": {
"summary": "Volkswagen Touareg",
"value": {"model": "Volkswagen Touareg", "license_number": "A000AA"},
},
},
),
auth_token: str = Depends(oauth2_scheme),
):
logger.info(f"function: create_car, params: car={car}")
if cookie_is_none(auth_token):
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED)
valid_email = await ModelUser.check_cookie(auth_token)
logger.info(f"function: create_car, email: {valid_email}")
if not valid_email:
# user is not authorized
raise HTTPException(status.HTTP_401_UNAUTHORIZED)
logger.info(f"function: create_car, creating car for {valid_email}")
created_car = await ModelCar.create(**car.dict(), email=valid_email)
return OutputCar(**created_car).dict()
@router.get(
"/",
summary="List the saved cars",
responses={
status.HTTP_200_OK: {
"description": "Listing of all added cars of a user",
"content": {
"application/json": {
"examples": {
"cars": {
"summary": "cars",
"value": [
{
"id": 1,
"model": "Volkswagen Touareg",
"license_number": "A000AA",
}
],
},
}
}
},
},
},
)
async def get_cars(auth_token: str = Depends(oauth2_scheme)):
if cookie_is_none(auth_token):
logger.info("function: get_cars, got cookie is None")
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED)
valid_email = await ModelUser.check_cookie(auth_token)
logger.info(f"function: get_cars, email: {valid_email}")
if not valid_email:
# user is not authorized
raise HTTPException(status.HTTP_401_UNAUTHORIZED)
logger.info(f"function: get_cars, getting all {valid_email}'s cars")
cars = await ModelCar.get_all(valid_email)
json_cars = []
for car in cars:
json_car = jsonable_encoder(car)
json_car.pop("email", None)
json_cars.append(json_car)
return json_cars
@router.delete(
"/{car_id}",
summary="Delete a saved car",
status_code=status.HTTP_204_NO_CONTENT,
responses={
status.HTTP_204_NO_CONTENT: {
"description": "Car deleted successfully",
},
status.HTTP_403_FORBIDDEN: {
"description": "This car isn't owned by this user",
},
status.HTTP_404_NOT_FOUND: {
"description": "This car doesn't exist",
},
},
)
async def delete_car(car_id: int, auth_token: str = Depends(oauth2_scheme)):
logger.info(f"function: delete_car, params: car_id={car_id}")
if cookie_is_none(auth_token):
logger.info("function: delete_car, got cookie is None")
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED)
valid_email = await ModelUser.check_cookie(auth_token)
logger.info(f"function: delete_car, email: {valid_email}")
if not valid_email:
# user is not authorized
raise HTTPException(status.HTTP_401_UNAUTHORIZED)
logger.info(f"function: delete_car, checking if car: {car_id} exists")
car = await ModelCar.get(car_id)
if not car:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND)
logger.info(
f"function: delete_car, deleting car: {car_id} if it is {valid_email}'s car"
)
deleted_car_id = await ModelCar.delete(car_id, valid_email)
if deleted_car_id:
assert deleted_car_id == car_id
return Response(status_code=status.HTTP_204_NO_CONTENT)
else:
# if user asks to get not his car
raise HTTPException(status_code=status.HTTP_403_FORBIDDEN)
| 34.536913 | 85 | 0.570152 | 561 | 5,146 | 5.00713 | 0.215686 | 0.0534 | 0.035244 | 0.060876 | 0.552154 | 0.53293 | 0.412602 | 0.360627 | 0.360627 | 0.331435 | 0 | 0.017356 | 0.328216 | 5,146 | 148 | 86 | 34.77027 | 0.795198 | 0.019433 | 0 | 0.285714 | 0 | 0 | 0.211863 | 0 | 0 | 0 | 0 | 0 | 0.007519 | 1 | 0 | false | 0 | 0.052632 | 0 | 0.075188 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f6e15a7ff5904b518c1e697c2b7f7bd499819849 | 793 | py | Python | boards/tests/test_cell.py | gcmac16/deeptoe | d92ffef553a03640479fb0dcd5e6fa1f4e6d1cd1 | [
"MIT"
] | null | null | null | boards/tests/test_cell.py | gcmac16/deeptoe | d92ffef553a03640479fb0dcd5e6fa1f4e6d1cd1 | [
"MIT"
] | null | null | null | boards/tests/test_cell.py | gcmac16/deeptoe | d92ffef553a03640479fb0dcd5e6fa1f4e6d1cd1 | [
"MIT"
] | null | null | null | import pytest
from ..cell import Cell
from ..exceptions import CellOccupiedError
def test_is_empty():
c = Cell('00')
assert c.is_empty
c.move('X')
assert not c.is_empty
def test_str():
c = Cell('00')
assert str(c) == '-'
c.move('X')
assert str(c) == 'X'
def test_error_on_double_play():
c = Cell('00')
c.move('X')
with pytest.raises(CellOccupiedError):
c.move('O')
def test_bad_input_error():
c = Cell('00')
with pytest.raises(ValueError):
c.move('BAD INPUT')
def test_cells_equal():
c = Cell('00')
c.move('X')
c2 = Cell('01')
c2.move('O')
c3 = Cell('02')
c4 = Cell('10')
c4.move('X')
assert c == c4
assert not c == c2
assert not c == c3
assert not c2 == c3
| 14.418182 | 42 | 0.553594 | 120 | 793 | 3.541667 | 0.291667 | 0.070588 | 0.082353 | 0.061176 | 0.061176 | 0.061176 | 0 | 0 | 0 | 0 | 0 | 0.045694 | 0.282472 | 793 | 54 | 43 | 14.685185 | 0.70123 | 0 | 0 | 0.264706 | 0 | 0 | 0.042875 | 0 | 0 | 0 | 0 | 0 | 0.235294 | 1 | 0.147059 | false | 0 | 0.088235 | 0 | 0.235294 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f6e3fa4794dda049119dfb9a8ab2d410227d18eb | 1,135 | py | Python | day-03/part-1/youyoun.py | TPXP/adventofcode-2019 | ee653d6bfb510d14f2c2b3efc730d328c16b3f71 | [
"MIT"
] | 8 | 2019-12-01T08:56:46.000Z | 2019-12-05T21:21:12.000Z | day-03/part-1/youyoun.py | TPXP/adventofcode-2019 | ee653d6bfb510d14f2c2b3efc730d328c16b3f71 | [
"MIT"
] | 10 | 2019-11-25T09:56:20.000Z | 2021-05-10T19:57:48.000Z | day-03/part-1/youyoun.py | TPXP/adventofcode-2019 | ee653d6bfb510d14f2c2b3efc730d328c16b3f71 | [
"MIT"
] | 5 | 2019-12-01T08:19:57.000Z | 2020-11-23T09:50:19.000Z | from tool.runners.python import SubmissionPy
def get_coords(steps):
x, y = 0, 0
coords = set()
for step in steps.split(","):
if step[0] == "R":
for i in range(x, x + int(step[1:])):
coords.add((i, y))
x = x + int(step[1:])
elif step[0] == "L":
for i in range(x, x - int(step[1:]), -1):
coords.add((i, y))
x = x - int(step[1:])
elif step[0] == "D":
for i in range(y, y - int(step[1:]), -1):
coords.add((x, i))
y = y - int(step[1:])
elif step[0] == "U":
for i in range(y, y + int(step[1:])):
coords.add((x, i))
y = y + int(step[1:])
return coords
class YouyounSubmission(SubmissionPy):
def run(self, s):
# :param s: input in string format
# :return: solution flag
# Your code goes here
l1, l2 = s.splitlines()
d1 = get_coords(l1)
d2 = get_coords(l2)
intersections = d2.intersection(d1) - {(0, 0)}
return min([abs(e[0]) + abs(e[1]) for e in intersections])
| 29.868421 | 66 | 0.462555 | 163 | 1,135 | 3.202454 | 0.337423 | 0.10728 | 0.122605 | 0.084291 | 0.377395 | 0.377395 | 0.360153 | 0.360153 | 0.360153 | 0.203065 | 0 | 0.039161 | 0.370044 | 1,135 | 37 | 67 | 30.675676 | 0.690909 | 0.066079 | 0 | 0.137931 | 0 | 0 | 0.004735 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.068966 | false | 0 | 0.034483 | 0 | 0.206897 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f6e3fc4beae8b1f7a433dcce7cb12b7d34338dc4 | 5,104 | py | Python | niftynet/contrib/regression_weighted_sampler/isample_regression.py | elias-1/NiftyNet | 05cd2ffbff5043d9a40b524a6d72f6bd5cd072d2 | [
"Apache-2.0"
] | 2 | 2019-03-25T18:50:47.000Z | 2019-10-10T01:45:02.000Z | niftynet/contrib/regression_weighted_sampler/isample_regression.py | elias-1/NiftyNet | 05cd2ffbff5043d9a40b524a6d72f6bd5cd072d2 | [
"Apache-2.0"
] | null | null | null | niftynet/contrib/regression_weighted_sampler/isample_regression.py | elias-1/NiftyNet | 05cd2ffbff5043d9a40b524a6d72f6bd5cd072d2 | [
"Apache-2.0"
] | 2 | 2018-05-13T14:54:48.000Z | 2018-05-26T16:08:09.000Z | import os
import tensorflow as tf
from niftynet.application.regression_application import \
RegressionApplication, SUPPORTED_INPUT
from niftynet.engine.sampler_uniform import UniformSampler
from niftynet.engine.sampler_weighted import WeightedSampler
from niftynet.engine.application_variables import NETWORK_OUTPUT
from niftynet.io.image_reader import ImageReader
from niftynet.layer.histogram_normalisation import \
HistogramNormalisationLayer
from niftynet.layer.mean_variance_normalisation import \
MeanVarNormalisationLayer
from niftynet.layer.pad import PadLayer
class ISampleRegression(RegressionApplication):
#def initialise_weighted_sampler(self):
# if len(self.readers) == 2:
# training_sampler = WeightedSampler(
# reader=self.readers[0],
# data_param=self.data_param,
# batch_size=self.net_param.batch_size,
# windows_per_image=self.action_param.sample_per_volume,
# queue_length=self.net_param.queue_length)
# validation_sampler = UniformSampler(
# reader=self.readers[1],
# data_param=self.data_param,
# batch_size=self.net_param.batch_size,
# windows_per_image=self.action_param.sample_per_volume,
# queue_length=self.net_param.queue_length)
# self.sampler = [[training_sampler, validation_sampler]]
# else:
# RegressionApplication.initialise_weighted_sampler()
def initialise_dataset_loader(
self, data_param=None, task_param=None, data_partitioner=None):
RegressionApplication.initialise_dataset_loader(
self, data_param, task_param, data_partitioner)
if self.is_training:
return
if not task_param.error_map:
return
file_lists = self.get_file_lists(data_partitioner)
# modifying the original readers in regression application
# as we need ground truth labels to generate error maps
self.readers=[]
for file_list in file_lists:
reader = ImageReader(['image', 'output'])
reader.initialise(data_param, task_param, file_list)
self.readers.append(reader)
mean_var_normaliser = MeanVarNormalisationLayer(image_name='image')
histogram_normaliser = None
if self.net_param.histogram_ref_file:
histogram_normaliser = HistogramNormalisationLayer(
image_name='image',
modalities=vars(task_param).get('image'),
model_filename=self.net_param.histogram_ref_file,
norm_type=self.net_param.norm_type,
cutoff=self.net_param.cutoff,
name='hist_norm_layer')
preprocessors = []
if self.net_param.normalisation:
preprocessors.append(histogram_normaliser)
if self.net_param.whitening:
preprocessors.append(mean_var_normaliser)
if self.net_param.volume_padding_size:
preprocessors.append(PadLayer(
image_name=SUPPORTED_INPUT,
border=self.net_param.volume_padding_size))
self.readers[0].add_preprocessing_layers(preprocessors)
def connect_data_and_network(self,
outputs_collector=None,
gradients_collector=None):
if self.is_training:
# using the original training pipeline
RegressionApplication.connect_data_and_network(
self, outputs_collector, gradients_collector)
else:
init_aggregator = \
self.SUPPORTED_SAMPLING[self.net_param.window_sampling][2]
init_aggregator()
# modifying the original pipeline so that
# the error maps are computed instead of the regression output
with tf.name_scope('validation'):
data_dict = self.get_sampler()[0][-1].pop_batch_op()
image = tf.cast(data_dict['image'], tf.float32)
net_out = self.net(image, is_training=self.is_training)
if self.regression_param.error_map:
# writing error maps to folder without prefix
error_map_folder = os.path.join(
os.path.dirname(self.action_param.save_seg_dir),
'error_maps')
self.output_decoder.output_path = error_map_folder
self.output_decoder.prefix = ''
# computes absolute error
target = tf.cast(data_dict['output'], tf.float32)
net_out = tf.squared_difference(target, net_out)
# window output and locations for aggregating volume results
outputs_collector.add_to_collection(
var=net_out, name='window',
average_over_devices=False, collection=NETWORK_OUTPUT)
outputs_collector.add_to_collection(
var=data_dict['image_location'], name='location',
average_over_devices=False, collection=NETWORK_OUTPUT)
| 43.623932 | 75 | 0.651646 | 545 | 5,104 | 5.812844 | 0.288073 | 0.030934 | 0.049242 | 0.017677 | 0.229167 | 0.217803 | 0.137626 | 0.082702 | 0.082702 | 0.082702 | 0 | 0.002982 | 0.277234 | 5,104 | 116 | 76 | 44 | 0.855787 | 0.22453 | 0 | 0.105263 | 0 | 0 | 0.025426 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.026316 | false | 0 | 0.131579 | 0 | 0.197368 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f6e9ac097d290b36827d579227b511e7d239d092 | 2,037 | py | Python | mayan/apps/navigation/widgets.py | Dave360-crypto/mayan-edms | 9cd37537461347f79ff0429e4b8b16fd2446798d | [
"Apache-2.0"
] | 3 | 2020-02-03T11:58:51.000Z | 2020-10-20T03:52:21.000Z | mayan/apps/navigation/widgets.py | Dave360-crypto/mayan-edms | 9cd37537461347f79ff0429e4b8b16fd2446798d | [
"Apache-2.0"
] | null | null | null | mayan/apps/navigation/widgets.py | Dave360-crypto/mayan-edms | 9cd37537461347f79ff0429e4b8b16fd2446798d | [
"Apache-2.0"
] | 2 | 2020-10-24T11:10:06.000Z | 2021-03-03T20:05:38.000Z | from __future__ import absolute_import
import urlparse
from django.conf import settings
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse
from django.template import RequestContext, Variable
from django.template.defaultfilters import capfirst
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from permissions.models import Permission
from .templatetags.navigation_tags import resolve_links
from .utils import resolve_to_name
def button_navigation_widget(request, link):
if 'permissions' in link:
try:
Permission.objects.check_permissions(request.user, link['permissions'])
return render_widget(request, link)
except PermissionDenied:
return u''
else:
return render_widget(request, link)
def render_widget(request, link):
context = RequestContext(request)
request = Variable('request').resolve(context)
current_path = request.META['PATH_INFO']
current_view = resolve_to_name(current_path)
query_string = urlparse.urlparse(request.get_full_path()).query or urlparse.urlparse(request.META.get('HTTP_REFERER', u'/')).query
parsed_query_string = urlparse.parse_qs(query_string)
links = resolve_links(context, [link], current_view, current_path, parsed_query_string)
if links:
link = links[0]
return mark_safe(u'<a style="text-decoration:none; margin-right: 10px;" href="%(url)s"><button style="vertical-align: top; padding: 1px; width: 110px; height: 100px; margin: 10px;"><img src="%(static_url)simages/icons/%(icon)s" alt="%(image_alt)s" /><p style="margin: 0px 0px 0px 0px;">%(string)s</p></button></a>' % {
'url': reverse(link['view']) if 'view' in link else link['url'],
'icon': link.get('icon', 'link_button.png'),
'static_url': settings.STATIC_URL,
'string': capfirst(link['text']),
'image_alt': _(u'icon'),
})
else:
return u''
| 39.173077 | 326 | 0.704467 | 259 | 2,037 | 5.366795 | 0.3861 | 0.05036 | 0.048921 | 0.04964 | 0.041727 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009541 | 0.17673 | 2,037 | 51 | 327 | 39.941176 | 0.81932 | 0 | 0 | 0.15 | 0 | 0.025 | 0.203731 | 0.071674 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0 | 0.3 | 0 | 0.475 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
f6e9d127c928e003e7e3f6cc2bec25c1baf82144 | 3,237 | py | Python | src/toja/views/contribute.py | scmmmh/the-old-joke-archive | cfc842de94d092aa43de794154bea7e5edd97b16 | [
"MIT"
] | null | null | null | src/toja/views/contribute.py | scmmmh/the-old-joke-archive | cfc842de94d092aa43de794154bea7e5edd97b16 | [
"MIT"
] | 12 | 2019-12-26T17:40:56.000Z | 2022-02-26T17:21:06.000Z | src/toja/views/contribute.py | scmmmh/the-old-joke-archive | cfc842de94d092aa43de794154bea7e5edd97b16 | [
"MIT"
] | null | null | null | from copy import deepcopy
from math import ceil
from pyramid.httpexceptions import HTTPForbidden
from pyramid.view import view_config
from sqlalchemy import and_
from ..models import Image
from ..session import require_logged_in
from ..config import ANNOTATIONS, JOKE_METADATA
from ..translation import _
@view_config(route_name='contribute', renderer='toja:templates/contribute/index.jinja2')
def index(request):
"""Handle the contribution landing page."""
return {}
@view_config(route_name='contribute.workbench', renderer='toja:templates/contribute/workbench/index.jinja2')
@require_logged_in()
def workbench(request):
"""Handle the source overview list for the transcription workbench."""
if request.current_user.trust == 'full':
try:
page = int(request.params['page'])
except Exception:
page = 0
sources = request.dbsession.query(Image).filter(and_(Image.type == 'source',
Image.status == 'processing'))
total = sources.count()
sources = sources.offset(page * 10).limit(10)
return {'sources': sources,
'pagination': {'start': max(0, page - 2),
'current': page,
'end': min(ceil(total / 10), page + 2),
'total': total}}
else:
raise HTTPForbidden()
@view_config(route_name='contribute.workbench.edit', renderer='toja:templates/contribute/workbench/edit.jinja2')
@require_logged_in()
def workbench_edit(request):
"""Handle the transcription workbench page for a single source."""
annotations = []
for annotation in ANNOTATIONS:
annotation = deepcopy(annotation)
if 'attrs' in annotation:
for attr in annotation['attrs']:
if 'values' in attr:
attr['values'] = [(value, _(request, value)) for value in attr['values']]
if attr['type'] in ['singletext', 'multitext']:
attr['autosuggest'] = request.route_url('search.autosuggest', category=attr['name'])
annotations.append(annotation)
metadata = []
for entry in JOKE_METADATA:
if entry['type'] in ['multichoice', 'select']:
metadata.append({'name': entry['name'],
'label': entry['label'],
'type': entry['type'],
'values': [(value, _(request, value)) for value in entry['values']]})
elif entry['type'] == 'multitext':
metadata.append({'name': entry['name'],
'label': entry['label'],
'type': entry['type'],
'autosuggest': request.route_url('search.autosuggest', category=entry['name'])})
if request.current_user.trust == 'full':
return {'config': {'baseURL': request.route_url('api'),
'sourceId': int(request.matchdict['sid']),
'userId': request.current_user.id,
'annotations': annotations,
'metadata': metadata}}
else:
raise HTTPForbidden()
| 43.16 | 112 | 0.569045 | 318 | 3,237 | 5.707547 | 0.323899 | 0.022039 | 0.024793 | 0.031405 | 0.323416 | 0.263361 | 0.153168 | 0.060606 | 0.060606 | 0.060606 | 0 | 0.005755 | 0.302132 | 3,237 | 74 | 113 | 43.743243 | 0.797698 | 0.050355 | 0 | 0.21875 | 0 | 0 | 0.170046 | 0.051668 | 0 | 0 | 0 | 0 | 0 | 1 | 0.046875 | false | 0 | 0.140625 | 0 | 0.234375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |