content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
from connect_four.envs import TwoPlayerGameEnvVariables
from connect_four.problem.connecting_group_manager import ConnectingGroupManager
class ConnectFourGroupManager(ConnectingGroupManager):
def __init__(self, env_variables: TwoPlayerGameEnvVariables):
super().__init__(env_variables, num_to_connect=4)
|
nilq/baby-python
|
python
|
__author__ = 'Felix Simkovic'
__date__ = '2019-05-11'
__license__ = 'MIT License'
import os
import sys
APPLICATION_NAME = 'Pomodoro TaskWarrior'
if sys.platform.startswith('darwin'):
try:
from Foundation import NSBundle
bundle = NSBundle.mainBundle()
if bundle:
app_info = bundle.localizedInfoDictionary() or bundle.infoDictionary()
if app_info:
app_info['CFBundleName'] = APPLICATION_NAME
except ImportError:
pass
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# pylint: disable=C0103,C0111
import argparse
import sys
from snake.game import PureGame, GameConf
from snake.utils import dotdict
from snake.rl.coach import Coach
from snake.rl.nnet_wrapper import NNetWrapper
import logging
logging.basicConfig(level=logging.INFO)
sys.setrecursionlimit(5001)
args = dotdict({
'lr': 0.001,
'dropout': 0.3,
'epochs': 10,
'batch_size': 64,
'cuda': False,
'num_channels': 128,
'checkpoint': './temp/',
'load_model': False,
'load_folder_file': ('/dev/models/8x100x50','best.pth.tar'),
'numItersForTrainExamplesHistory': 20,
'numIters': 20,
'numEps': 100, # Number of complete self-play games to simulate during a new iteration.
'tempThreshold': 15, #
'updateThreshold': 0.6, # During arena playoff, new neural net will be accepted if threshold or more of games are won.
'maxlenOfQueue': 20000, # Number of game examples to train the neural networks.
'numMCTSSims': 25, # Number of games moves for MCTS to simulate.
'cpuct': 1,
})
def main():
logging.info('Loading %s...', PureGame.__name__)
game = PureGame(GameConf())
logging.info('Loading %s...', NNetWrapper.__name__)
nnet = NNetWrapper(game, args)
if args.load_model:
logging.info('Loading checkpoint "%s/%s"...', args.load_folder_file)
nnet.load_checkpoint(args.load_folder_file[0], args.load_folder_file[1])
else:
logging.warning('Not loading a checkpoint!')
logging.info('Loading the Coach...')
coach = Coach(game, nnet, args)
if args.load_model:
logging.info("Loading 'trainExamples' from file...")
coach.loadTrainExamples()
logging.info('Starting the learning process 🎉')
coach.learn()
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
# flake8: noqa
# This file is autogenerated by /metadata-ingestion/scripts/avro_codegen.py
# Do not modify manually!
# fmt: off
from ......schema_classes import ChartKeyClass
from ......schema_classes import CorpGroupKeyClass
from ......schema_classes import CorpUserKeyClass
from ......schema_classes import DashboardKeyClass
from ......schema_classes import DataFlowKeyClass
from ......schema_classes import DataHubPolicyKeyClass
from ......schema_classes import DataJobKeyClass
from ......schema_classes import DataPlatformKeyClass
from ......schema_classes import DataProcessKeyClass
from ......schema_classes import DatasetKeyClass
from ......schema_classes import GlossaryNodeKeyClass
from ......schema_classes import GlossaryTermKeyClass
from ......schema_classes import MLFeatureKeyClass
from ......schema_classes import MLFeatureTableKeyClass
from ......schema_classes import MLModelDeploymentKeyClass
from ......schema_classes import MLModelGroupKeyClass
from ......schema_classes import MLModelKeyClass
from ......schema_classes import MLPrimaryKeyKeyClass
from ......schema_classes import SchemaFieldKeyClass
from ......schema_classes import TagKeyClass
ChartKey = ChartKeyClass
CorpGroupKey = CorpGroupKeyClass
CorpUserKey = CorpUserKeyClass
DashboardKey = DashboardKeyClass
DataFlowKey = DataFlowKeyClass
DataHubPolicyKey = DataHubPolicyKeyClass
DataJobKey = DataJobKeyClass
DataPlatformKey = DataPlatformKeyClass
DataProcessKey = DataProcessKeyClass
DatasetKey = DatasetKeyClass
GlossaryNodeKey = GlossaryNodeKeyClass
GlossaryTermKey = GlossaryTermKeyClass
MLFeatureKey = MLFeatureKeyClass
MLFeatureTableKey = MLFeatureTableKeyClass
MLModelDeploymentKey = MLModelDeploymentKeyClass
MLModelGroupKey = MLModelGroupKeyClass
MLModelKey = MLModelKeyClass
MLPrimaryKeyKey = MLPrimaryKeyKeyClass
SchemaFieldKey = SchemaFieldKeyClass
TagKey = TagKeyClass
# fmt: on
|
nilq/baby-python
|
python
|
# // Copyright 2016 The go-vgo Project Developers. See the COPYRIGHT
# // file at the top-level directory of this distribution and at
# // https://github.com/go-vgo/robotgo/blob/master/LICENSE
# //
# // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
# // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
# // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
# // option. This file may not be copied, modified, or distributed
# // except according to those terms.
from __future__ import print_function
import sys
import os
from cffi import FFI
is_64b = sys.maxsize > 2**32
ffi = FFI()
if is_64b:
ffi.cdef("typedef long GoInt;\n")
else:
ffi.cdef("typedef int GoInt;\n")
ffi.cdef("""
typedef struct {
GoInt x;
GoInt y;
} GoRInt;
typedef struct {
char* arr;
char* err;
} GoStr;
char* GetVersion();
void Sleep(GoInt tm);
void MSleep(double tm);
char* GetPixelColor(GoInt x, GoInt y);
char* GetMouseColor();
GoRInt GetScreenSize();
GoRInt GetScaleSize();
void MoveMose(GoInt x, GoInt y);
void DargMose(GoInt x, GoInt y, char* btn);
void MoveSmooth(GoInt x, GoInt y, double low, double high);
GoRInt GetMousePos();
void Click(char* btn, bool double_c);
void MoseToggle(char* key, char* btn);
void Scroll(GoInt x, GoInt y);
char* KeyTap(char* key, char* vals);
char* KeyToggle(char* key, char* vals);
void TypeStr(char* str, double args);
GoStr ReadAll();
char* WriteAll(char* str);
void PasteStr(char* str);
bool AddEvent(char* p0);
void StopEvent();
bool AddEvents(char* p0, char* p1);
void End();
bool AddMouse(char* p0, GoInt p1, GoInt p2);
bool AddMousePos(GoInt p0, GoInt p1);
char* GetTitle(GoInt pid);
GoStr FindIds(char* name);
GoStr FindName(GoInt pid);
GoStr FindNames();
char* ActivePID(GoInt pid);
char* ActiveName(char* name);
char* Kill(GoInt pid);
""")
dir = os.path.dirname(__file__)
bin = os.path.join(dir, "../robotgo")
lib = ffi.dlopen(bin)
def ch(s):
return s.encode('utf-8')
def f_str(cs):
return ffi.string(cs)
def getVersion():
ver = lib.GetVersion()
return f_str(ver)
def sleep(tm):
lib.Sleep(tm)
def MSleep(tm):
lib.MSleep(tm)
# /*
# _______. ______ .______ _______ _______ .__ __.
# / | / || _ \ | ____|| ____|| \ | |
# | (----`| ,----'| |_) | | |__ | |__ | \| |
# \ \ | | | / | __| | __| | . ` |
# .----) | | `----.| |\ \----.| |____ | |____ | |\ |
# |_______/ \______|| _| `._____||_______||_______||__| \__|
# */
def getPixelColor(x, y):
color = lib.GetPixelColor(x, y)
return f_str(color)
def getMouseColor():
color = lib.GetMouseColor()
return f_str(color)
def getScreenSize():
s = lib.GetScreenSize()
return s.x, s.y
def getScaleSize():
s = lib.GetScaleSize()
return s.x, s.y
# /*
# .___ ___. ______ __ __ _______. _______
# | \/ | / __ \ | | | | / || ____|
# | \ / | | | | | | | | | | (----`| |__
# | |\/| | | | | | | | | | \ \ | __|
# | | | | | `--' | | `--' | .----) | | |____
# |__| |__| \______/ \______/ |_______/ |_______|
# */
def moveMose(x, y):
lib.MoveMose(x, y)
def dargMose(x, y, btn="left"):
lib.dargMose(x, y, ch(btn))
def moveSmooth(x, y, low=1.0, high=3.0):
lib.MoveSmooth(x, y, low, high)
def click(btn="left", double_c=False):
lib.Click(ch(btn), double_c)
def moseToggle(key, btn):
lib.moseToggle(ch(key), ch(btn))
def scroll(x, y):
lib.Scroll(x, y)
# /*
# __ ___ ___________ ____ .______ ______ ___ .______ _______
# | |/ / | ____\ \ / / | _ \ / __ \ / \ | _ \ | \
# | ' / | |__ \ \/ / | |_) | | | | | / ^ \ | |_) | | .--. |
# | < | __| \_ _/ | _ < | | | | / /_\ \ | / | | | |
# | . \ | |____ | | | |_) | | `--' | / _____ \ | |\ \----.| '--' |
# |__|\__\ |_______| |__| |______/ \______/ /__/ \__\ | _| `._____||_______/
# */
def arr_add(args):
arr = ""
for i in range(len(args)):
if i < len(args)-1:
arr += args[i] + ","
else:
arr += args[i]
return arr
def keyTap(key, *vals):
arr = arr_add(vals)
s = lib.KeyTap(ch(key), ch(arr))
return f_str(s)
def KeyToggle(key, *vals):
arr = arr_add(vals)
s = lib.KeyToggle(ch(key), ch(arr))
return f_str(s)
def typeStr(s, args=3.0):
lib.TypeStr(ch(s), args)
def errStr(s):
err = str(f_str(s.err))
if err == "b''":
return arr(s.arr)
return err
def readAll():
s = lib.ReadAll()
return errStr(s)
def writeAll(s):
return lib.WriteAll(ch(s))
def pasteStr(s):
lib.pasteStr(ch(s))
# /*
# .______ __ .___________..___ ___. ___ .______
# | _ \ | | | || \/ | / \ | _ \
# | |_) | | | `---| |----`| \ / | / ^ \ | |_) |
# | _ < | | | | | |\/| | / /_\ \ | ___/
# | |_) | | | | | | | | | / _____ \ | |
# |______/ |__| |__| |__| |__| /__/ \__\ | _|
# */
# /*
# ___________ ____ _______ .__ __. .___________.
# | ____\ \ / / | ____|| \ | | | |
# | |__ \ \/ / | |__ | \| | `---| |----`
# | __| \ / | __| | . ` | | |
# | |____ \ / | |____ | |\ | | |
# |_______| \__/ |_______||__| \__| |__|
# */
def addEvent(key):
return lib.AddEvent(ch(key))
def end():
lib.End()
def addEvents(key, *vals):
arr = arr_add(vals)
return lib.AddEvents(ch(key), ch(arr))
def end():
lib.End()
def addMouse(btn, x=-1, y=-1):
return lib.AddMouse(ch(btn), x, y)
def addMousePos(x, y):
return lib.AddMousePos(x, y)
# /*
# ____ __ ____ __ .__ __. _______ ______ ____ __ ____
# \ \ / \ / / | | | \ | | | \ / __ \ \ \ / \ / /
# \ \/ \/ / | | | \| | | .--. | | | | \ \/ \/ /
# \ / | | | . ` | | | | | | | | \ /
# \ /\ / | | | |\ | | '--' | `--' | \ /\ /
# \__/ \__/ |__| |__| \__| |_______/ \______/ \__/ \__/
# */
def arr(s):
st = bytes.decode(f_str(s))
return st.split(' ')
def getTitle(pid=-1):
s = lib.GetTitle(pid)
return f_str(s)
def findIds(name):
s = lib.FindIds(ch(name))
return errStr(s)
def findName(pid):
s = lib.FindName(pid)
return f_str(s)
def findNames():
s = lib.FindNames()
return errStr(s)
def activePID(pid):
err = lib.ActivePID(pid)
return f_str(err)
def activeName(name):
err = lib.ActiveName(ch(name))
return f_str(err)
def kill(pid):
lib.Kill(pid)
|
nilq/baby-python
|
python
|
class Solution:
def arrayNesting(self, nums: List[int]) -> int:
max_length = -1
visited = [False] * len(nums)
for i in range(0, len(nums)):
if visited[i]:
continue
start, count = nums[i], 0
visited[i] = True
# form the cycle
while True:
start = nums[start]
visited[start] = True
count += 1
if start == nums[i]:
break
max_length = max(max_length, count)
return max_length
|
nilq/baby-python
|
python
|
from typing import Dict, List
from elasticsearch_dsl.query import Q
from elasticsearch_dsl.response import Response
from elasticsearch_dsl.response.hit import Hit
from elasticsearch_dsl.search import Search
from flask_restful import Resource, reqparse
from meetup_search.models.group import Group
from .argument_validator import date_validator, positive_int_validator
class MeetupSearchApi(Resource):
def __init__(self):
super().__init__()
self.parser = reqparse.RequestParser()
# query
self.parser.add_argument(
"query", type=str, required=True, help="Bad query: {error_msg}"
)
# pagination
self.parser.add_argument(
"page",
type=positive_int_validator,
help="Bad pagination page number: {error_msg}",
default=0,
)
self.parser.add_argument(
"limit",
type=int,
help="Bad pagination limit: {error_msg}",
choices=(5, 10, 25, 100),
default=10,
)
# sort
self.parser.add_argument(
"sort", type=str, help="Bad sorting: {error_msg}",
)
# load events
self.parser.add_argument(
"load_events", type=bool, help="Bad sorting: {error_msg}", default=False,
)
# event time filter
self.parser.add_argument(
"event_time_gte", type=date_validator, help="Bad date: {error_msg}",
)
self.parser.add_argument(
"event_time_lte", type=date_validator, help="Bad date: {error_msg}",
)
# geo_distance
self.parser.add_argument(
"geo_lat", type=float, help="Bad geo latitute: {error_msg}",
)
self.parser.add_argument(
"geo_lon", type=float, help="Bad geo longitute: {error_msg}",
)
self.parser.add_argument(
"geo_distance", type=str, help="Bad distance (example: 100km): {error_msg}",
)
def put(self) -> dict:
"""
search for a group in Elasticsearch
Returns:
dict -- search results
"""
args = self.parser.parse_args()
# init search
search: Search = Group.search()
search_query: dict = {
"bool": {
"should": [
{"query_string": {"query": args["query"], "fields": ["*"]}},
{
"nested": {
"path": "topics",
"score_mode": "avg",
"query": {
"bool": {
"must": [
{
"query_string": {
"query": args["query"],
"fields": ["*"],
}
}
]
}
},
}
},
{
"nested": {
"path": "events",
"score_mode": "avg",
"query": {
"bool": {
"must": [
{
"query_string": {
"query": args["query"],
"fields": ["*"],
}
}
]
}
},
}
},
],
"must": [],
}
}
# set event time filter
if args["event_time_gte"] or args["event_time_lte"]:
range_query: dict = {}
if args["event_time_gte"]:
range_query["gte"] = args["event_time_gte"]
if args["event_time_lte"]:
range_query["lte"] = args["event_time_lte"]
search_query["bool"]["must"].append(
{
"nested": {
"path": "events",
"score_mode": "avg",
"query": {
"bool": {"must": [{"range": {"events.time": range_query}}]}
},
}
}
)
# set geo_distance filter
if args["geo_distance"] and args["geo_lat"] and args["geo_lon"]:
search_query["bool"]["must"].append(
{
"nested": {
"path": "events",
"score_mode": "avg",
"query": {
"bool": {
"must": [
{
"geo_distance": {
"distance": args["geo_distance"],
"events.venue_location": {
"lat": args["geo_lat"],
"lon": args["geo_lon"],
},
}
}
]
}
},
}
}
)
# pagination
strat_entry: int = args["page"] * args["limit"]
end_entry: int = strat_entry + args["limit"]
search = search[strat_entry:end_entry]
# sort
if args["sort"]:
search = Search().sort(args["sort"])
# execute search
search = search.query(Q(search_query))
# set highlight score
search.highlight_options(order="score")
# load response from elasticsearch
results: Response = search.execute()
# get response
found_groups: List[dict] = []
map_center_lat: float = 0
map_center_lon: float = 0
for group in results.hits:
group_dict: dict = {}
if isinstance(group, Hit):
group_object = Group.get_group(urlname=group.to_dict()["urlname"])
group_dict = group_object.to_json_dict(load_events=args["load_events"])
else:
group_dict = group.to_json_dict(load_events=args["load_events"])
if "venue_location_average" in group_dict:
map_center_lat = (
map_center_lat + group_dict["venue_location_average"]["lat"]
)
map_center_lon = (
map_center_lon + group_dict["venue_location_average"]["lon"]
)
else:
map_center_lat = map_center_lat + group_dict["location"]["lat"]
map_center_lon = map_center_lon + group_dict["location"]["lon"]
# add group dict to array
found_groups.append(
{**group_dict,}
)
if len(found_groups) > 0:
map_center_lat = map_center_lat / len(found_groups)
map_center_lon = map_center_lon / len(found_groups)
return {
"results": found_groups,
"hits": results.hits.total["value"],
"map_center": {"lat": map_center_lat, "lon": map_center_lon},
}
class MeetupSearchSuggestApi(Resource):
def __init__(self):
super().__init__()
self.parser = reqparse.RequestParser()
# query
self.parser.add_argument(
"query", type=str, required=True, help="Bad query: {error_msg}"
)
def put(self) -> Dict[str, List[str]]:
"""
Get Suggestion for query term in Group name
Returns:
Dict[str, List[str]] -- a list to 5 suggestions
"""
args = self.parser.parse_args()
# run suggest query
search: Search = Group.search()
search = search.suggest(
"suggestion", args["query"], completion={"field": "name_suggest"},
)
response: Response = search.execute()
# get suggestion
suggestion: List[str] = []
for result in response.suggest.suggestion:
for option in result.options:
suggestion.append(option.text)
return {"suggestions": suggestion}
|
nilq/baby-python
|
python
|
from a10sdk.common.A10BaseClass import A10BaseClass
class Crl(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param crl_sec: {"minLength": 1, "maxLength": 255, "type": "string", "description": "Secondary CRL File Name or URL (http://www.example.com/ocsp) (only .der filetypes)", "format": "string-rlx"}
:param crl_pri: {"minLength": 1, "maxLength": 255, "type": "string", "description": "Primary CRL File Name or URL (http://www.example.com/ocsp) (only .der filetypes)", "format": "string-rlx"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "crl"
self.DeviceProxy = ""
self.crl_sec = ""
self.crl_pri = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class Ocsp(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param ocsp_pri: {"minLength": 1, "maxLength": 31, "type": "string", "description": "Primary OCSP Authentication Server", "format": "string"}
:param ocsp_sec: {"minLength": 1, "maxLength": 31, "type": "string", "description": "Secondary OCSP Authentication Server", "format": "string"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "ocsp"
self.DeviceProxy = ""
self.ocsp_pri = ""
self.ocsp_sec = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class Revocation(A10BaseClass):
"""Class Description::
IPsec VPN revocation settings.
Class revocation supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param uuid: {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}
:param ca: {"description": "Certificate Authority file name", "format": "string", "minLength": 1, "optional": true, "maxLength": 31, "type": "string"}
:param name: {"description": "Revocation name", "format": "string", "minLength": 1, "optional": false, "maxLength": 31, "type": "string"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/vpn/revocation/{name}`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required = [ "name"]
self.b_key = "revocation"
self.a10_url="/axapi/v3/vpn/revocation/{name}"
self.DeviceProxy = ""
self.uuid = ""
self.ca = ""
self.name = ""
self.crl = {}
self.ocsp = {}
for keys, value in kwargs.items():
setattr(self,keys, value)
|
nilq/baby-python
|
python
|
"""
Noop migration to test rollback
"""
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('oauth_dispatch', '0010_noop_migration_to_test_rollback'),
]
operations = [
migrations.RunSQL(migrations.RunSQL.noop, reverse_sql=migrations.RunSQL.noop)
]
|
nilq/baby-python
|
python
|
from senscritiquescraper.utils import survey_utils
def test_get_category_from_survey(survey_movie):
if survey_utils.get_category_from_survey(survey_movie) != "films":
raise AssertionError()
def test_get_rows_from_survey(survey_movie):
rows = survey_utils.get_rows_from_survey(survey_movie)
if len(rows) != 15:
print(len(rows))
raise AssertionError()
def test_get_infos_from_survey(survey_movie):
category = survey_utils.get_category_from_survey(survey_movie)
infos = survey_utils.get_survey_infos(survey_movie, category)
if len(infos) != 15:
raise AssertionError()
if infos[0]["Title"] != "La Haine":
raise AssertionError()
|
nilq/baby-python
|
python
|
from jira.exceptions import JIRAError
from tests.conftest import JiraTestCase
class VersionTests(JiraTestCase):
def test_create_version(self):
name = "new version " + self.project_b
desc = "test version of " + self.project_b
release_date = "2015-03-11"
version = self.jira.create_version(
name, self.project_b, releaseDate=release_date, description=desc
)
self.assertEqual(version.name, name)
self.assertEqual(version.description, desc)
self.assertEqual(version.releaseDate, release_date)
version.delete()
def test_create_version_with_project_obj(self):
project = self.jira.project(self.project_b)
version = self.jira.create_version(
"new version 2",
project,
releaseDate="2015-03-11",
description="test version!",
)
self.assertEqual(version.name, "new version 2")
self.assertEqual(version.description, "test version!")
self.assertEqual(version.releaseDate, "2015-03-11")
version.delete()
def test_update_version(self):
version = self.jira.create_version(
"new updated version 1",
self.project_b,
releaseDate="2015-03-11",
description="new to be updated!",
)
version.update(name="new updated version name 1", description="new updated!")
self.assertEqual(version.name, "new updated version name 1")
self.assertEqual(version.description, "new updated!")
v = self.jira.version(version.id)
self.assertEqual(v, version)
self.assertEqual(v.id, version.id)
version.delete()
def test_delete_version(self):
version_str = "test_delete_version:" + self.test_manager.jid
version = self.jira.create_version(
version_str,
self.project_b,
releaseDate="2015-03-11",
description="not long for this world",
)
version.delete()
self.assertRaises(JIRAError, self.jira.version, version.id)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
import logging
from _pytest.main import EXIT_OK, EXIT_NOTESTSCOLLECTED, EXIT_INTERRUPTED # NOQA
def assert_fnmatch_lines(output, matches):
if isinstance(output, str):
output = output.split('\n')
missing = []
for match in matches:
if match not in output:
missing.append(match)
assert len(missing) == 0, "The following matches were not found:\n - %s" % '\n - '.join(missing)
def test_debug_logging(testdir, capsys):
'''verifies pytest-github loads configuration from the default configuration file'''
# setup logging
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
# create stderr StreamHandler
sh = logging.StreamHandler()
sh.setLevel(logging.DEBUG)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(levelname)s - %(message)s')
sh.setFormatter(formatter)
# add handler to logger
logger.addHandler(sh)
src = """\
def test_foo():
pass
"""
result = testdir.inline_runsource(src)
# Assert py.test exit code
assert result.ret == EXIT_OK
(stdout, stderr) = capsys.readouterr()
fnmatch_lines = [
'DEBUG - pytest_cmdline_main() called',
'DEBUG - pytest_configure() called',
'DEBUG - GitHubPytestPlugin initialized',
'DEBUG - pytest_runtest_setup() called',
]
# Assert stderr logging
assert_fnmatch_lines(stderr, fnmatch_lines)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author: Deformable ConvNets v2: More Deformable, Better Results
# Modified by: RainbowSecret(yuyua@microsoft.com)
# Select Seg Model for img segmentation.
import pdb
import torch
import torch.nn as nn
import torch.utils.checkpoint as cp
from collections import OrderedDict
from lib.models.tools.module_helper import ModuleHelper
from lib.extensions.dcn import (
ModulatedDeformConv,
ModulatedDeformRoIPoolingPack,
DeformConv,
)
def conv3x3(in_planes, out_planes, stride=1, dilation=1):
"3x3 convolution with padding"
return nn.Conv2d(
in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=dilation,
dilation=dilation,
bias=False,
)
class BasicBlock(nn.Module):
expansion = 1
def __init__(
self,
inplanes,
planes,
stride=1,
dilation=1,
downsample=None,
style="pytorch",
with_cp=False,
bn_type=None,
):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride, dilation)
self.bn1 = ModuleHelper.BatchNorm2d(bn_type=bn_type)(planes)
self.relu = nn.ReLU(inplace=False)
self.relu_in = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = ModuleHelper.BatchNorm2d(bn_type=bn_type)(planes)
self.downsample = downsample
self.stride = stride
self.dilation = dilation
assert not with_cp
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out = out + residual
out = self.relu_in(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(
self,
inplanes,
planes,
stride=1,
dilation=1,
downsample=None,
style="pytorch",
with_cp=False,
with_dcn=False,
num_deformable_groups=1,
dcn_offset_lr_mult=0.1,
use_regular_conv_on_stride=False,
use_modulated_dcn=False,
bn_type=None,
):
"""Bottleneck block.
If style is "pytorch", the stride-two layer is the 3x3 conv layer,
if it is "caffe", the stride-two layer is the first 1x1 conv layer.
"""
super(Bottleneck, self).__init__()
conv1_stride = 1
conv2_stride = stride
self.conv1 = nn.Conv2d(
inplanes, planes, kernel_size=1, stride=conv1_stride, bias=False
)
self.with_dcn = with_dcn
self.use_modulated_dcn = use_modulated_dcn
if use_regular_conv_on_stride and stride > 1:
self.with_dcn = False
if self.with_dcn:
print(
"--->> use {}dcn in block where c_in={} and c_out={}".format(
"modulated " if self.use_modulated_dcn else "", planes, inplanes
)
)
if use_modulated_dcn:
self.conv_offset_mask = nn.Conv2d(
planes,
num_deformable_groups * 27,
kernel_size=3,
stride=conv2_stride,
padding=dilation,
dilation=dilation,
)
self.conv_offset_mask.lr_mult = dcn_offset_lr_mult
self.conv_offset_mask.zero_init = True
self.conv2 = ModulatedDeformConv(
planes,
planes,
3,
stride=conv2_stride,
padding=dilation,
dilation=dilation,
deformable_groups=num_deformable_groups,
no_bias=True,
)
else:
self.conv2_offset = nn.Conv2d(
planes,
num_deformable_groups * 18,
kernel_size=3,
stride=conv2_stride,
padding=dilation,
dilation=dilation,
)
self.conv2_offset.lr_mult = dcn_offset_lr_mult
self.conv2_offset.zero_init = True
self.conv2 = DeformConv(
planes,
planes,
(3, 3),
stride=conv2_stride,
padding=dilation,
dilation=dilation,
num_deformable_groups=num_deformable_groups,
)
else:
self.conv2 = nn.Conv2d(
planes,
planes,
kernel_size=3,
stride=conv2_stride,
padding=dilation,
dilation=dilation,
bias=False,
)
self.bn1 = ModuleHelper.BatchNorm2d(bn_type=bn_type)(planes)
self.bn2 = ModuleHelper.BatchNorm2d(bn_type=bn_type)(planes)
self.conv3 = nn.Conv2d(
planes, planes * self.expansion, kernel_size=1, bias=False
)
self.bn3 = ModuleHelper.BatchNorm2d(bn_type=bn_type)(planes * self.expansion)
self.relu = nn.ReLU(inplace=False)
self.relu_in = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.dilation = dilation
self.with_cp = with_cp
def forward(self, x):
def _inner_forward(x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
if self.with_dcn:
if self.use_modulated_dcn:
offset_mask = self.conv_offset_mask(out)
offset1, offset2, mask_raw = torch.chunk(offset_mask, 3, dim=1)
offset = torch.cat((offset1, offset2), dim=1)
mask = torch.sigmoid(mask_raw)
out = self.conv2(out, offset, mask)
else:
offset = self.conv2_offset(out)
# add bias to the offset to solve the bug of dilation rates within dcn.
dilation = self.conv2.dilation[0]
bias_w = torch.cuda.FloatTensor(
[[-1, 0, 1], [-1, 0, 1], [-1, 0, 1]]
) * (dilation - 1)
bias_h = bias_w.permute(1, 0)
bias_w.requires_grad = False
bias_h.requires_grad = False
offset += torch.cat([bias_h.reshape(-1), bias_w.reshape(-1)]).view(
1, -1, 1, 1
)
out = self.conv2(out, offset)
else:
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out = out + residual
return out
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = self.relu_in(out)
return out
def make_res_layer(
block,
inplanes,
planes,
blocks,
stride=1,
dilation=1,
style="pytorch",
with_cp=False,
with_dcn=False,
dcn_offset_lr_mult=0.1,
use_regular_conv_on_stride=False,
use_modulated_dcn=False,
bn_type=None,
):
downsample = None
if stride != 1 or inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(
inplanes,
planes * block.expansion,
kernel_size=1,
stride=stride,
bias=False,
),
ModuleHelper.BatchNorm2d(bn_type=bn_type)(planes * block.expansion),
)
layers = []
layers.append(
block(
inplanes,
planes,
stride,
dilation,
downsample,
style=style,
with_cp=with_cp,
with_dcn=with_dcn,
dcn_offset_lr_mult=dcn_offset_lr_mult,
use_regular_conv_on_stride=use_regular_conv_on_stride,
use_modulated_dcn=use_modulated_dcn,
bn_type=bn_type,
)
)
inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(
block(
inplanes,
planes,
1,
dilation,
style=style,
with_cp=with_cp,
with_dcn=with_dcn,
dcn_offset_lr_mult=dcn_offset_lr_mult,
use_regular_conv_on_stride=use_regular_conv_on_stride,
use_modulated_dcn=use_modulated_dcn,
bn_type=bn_type,
)
)
return nn.Sequential(*layers)
class DCNResNet(nn.Module):
"""ResNet backbone.
Args:
depth (int): Depth of resnet, from {18, 34, 50, 101, 152}.
num_stages (int): Resnet stages, normally 4.
strides (Sequence[int]): Strides of the first block of each stage.
dilations (Sequence[int]): Dilation of each stage.
out_indices (Sequence[int]): Output from which stages.
style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two
layer is the 3x3 conv layer, otherwise the stride-two layer is
the first 1x1 conv layer.
frozen_stages (int): Stages to be frozen (all param fixed). -1 means
not freezing any parameters.
bn_eval (bool): Whether to set BN layers to eval mode, namely, freeze
running stats (mean and var).
bn_frozen (bool): Whether to freeze weight and bias of BN layers.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed.
"""
def __init__(self, block, layers, deep_base=True, bn_type=None):
super(DCNResNet, self).__init__()
# if depth not in self.arch_settings:
# raise KeyError('invalid depth {} for resnet'.format(depth))
# assert num_stages >= 1 and num_stages <= 4
# block, stage_blocks = self.arch_settings[depth]
# stage_blocks = stage_blocks[:num_stages]
# assert len(strides) == len(dilations) == num_stages
# assert max(out_indices) < num_stages
self.style = "pytorch"
self.inplanes = 128 if deep_base else 64
if deep_base:
self.resinit = nn.Sequential(
OrderedDict(
[
(
"conv1",
nn.Conv2d(
3, 64, kernel_size=3, stride=2, padding=1, bias=False
),
),
("bn1", ModuleHelper.BatchNorm2d(bn_type=bn_type)(64)),
("relu1", nn.ReLU(inplace=False)),
(
"conv2",
nn.Conv2d(
64, 64, kernel_size=3, stride=1, padding=1, bias=False
),
),
("bn2", ModuleHelper.BatchNorm2d(bn_type=bn_type)(64)),
("relu2", nn.ReLU(inplace=False)),
(
"conv3",
nn.Conv2d(
64, 128, kernel_size=3, stride=1, padding=1, bias=False
),
),
(
"bn3",
ModuleHelper.BatchNorm2d(bn_type=bn_type)(self.inplanes),
),
("relu3", nn.ReLU(inplace=False)),
]
)
)
else:
self.resinit = nn.Sequential(
OrderedDict(
[
(
"conv1",
nn.Conv2d(
3, 64, kernel_size=7, stride=2, padding=3, bias=False
),
),
(
"bn1",
ModuleHelper.BatchNorm2d(bn_type=bn_type)(self.inplanes),
),
("relu1", nn.ReLU(inplace=False)),
]
)
)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = make_res_layer(
block,
self.inplanes,
64,
layers[0],
style=self.style,
with_dcn=False,
use_modulated_dcn=False,
bn_type=bn_type,
)
self.layer2 = make_res_layer(
block,
256,
128,
layers[1],
stride=2,
style=self.style,
with_dcn=False,
use_modulated_dcn=False,
bn_type=bn_type,
)
self.layer3 = make_res_layer(
block,
512,
256,
layers[2],
stride=2,
style=self.style,
with_dcn=True,
use_modulated_dcn=False,
bn_type=bn_type,
)
self.layer4 = make_res_layer(
block,
1024,
512,
layers[3],
stride=2,
style=self.style,
with_dcn=True,
use_modulated_dcn=False,
bn_type=bn_type,
)
def forward(self, x):
x = self.resinit(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x
class DCNResNetModels(object):
def __init__(self, configer):
self.configer = configer
def deepbase_dcn_resnet50(self, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on Places
"""
model = DCNResNet(
Bottleneck,
[3, 4, 6, 3],
deep_base=True,
bn_type=self.configer.get("network", "bn_type"),
**kwargs
)
model = ModuleHelper.load_model(
model,
all_match=False,
pretrained=self.configer.get("network", "pretrained"),
network="dcnet",
)
return model
def deepbase_dcn_resnet101(self, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on Places
"""
model = DCNResNet(
Bottleneck,
[3, 4, 23, 3],
deep_base=True,
bn_type=self.configer.get("network", "bn_type"),
**kwargs
)
model = ModuleHelper.load_model(
model,
all_match=False,
pretrained=self.configer.get("network", "pretrained"),
network="dcnet",
)
return model
|
nilq/baby-python
|
python
|
class LoggerError(Exception):
""" Base class for all logger error classes.
All exceptions raised by the benchmark runner library should inherit from this class. """
pass
class MethodError(LoggerError):
"""
This class is fot method error
"""
def __init__(self, method_name, exception):
self.message = f'method error: {method_name}, exception: {exception}'
super(MethodError, self).__init__(self.message)
|
nilq/baby-python
|
python
|
# Generated by Django 3.1.7 on 2021-12-24 18:34
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tracker', '0005_movie_poster'),
]
operations = [
migrations.AddField(
model_name='movie',
name='cast',
field=models.CharField(default='Not Specified', max_length=64),
),
]
|
nilq/baby-python
|
python
|
"""Coding Quiz: Check for Prime Numbers
Prime numbers are whole numbers that have only two factors: 1 and the number itself. The first few prime numbers are 2, 3, 5, 7.
For instance, 6 has four factors: 1, 2, 3, 6.
1 X 6 = 6
2 X 3 = 6
So we know 6 is not a prime number.
In the following coding environment, write code to check if the numbers provided in the list check_prime are prime numbers.
If the numbers are prime, the code should print "[number] is a prime number."
If the number is NOT a prime number, it should print "[number] is not a prime number", and a factor of that number, other than 1 and the number itself: "[factor] is a factor of [number]".
Example output:
7 IS a prime number
26 is NOT a prime number, because 2 is a factor of 26
"""
check_prime = [26, 37, 39, 51, 53, 57, 73, 79, 85]
# iterate through the check_prime list
for num in check_prime:
# search for factors, iterating through numbers ranging from 2 to the number itself
for i in range(2, num):
# number is not prime if module is 0
if (num % i) == 0:
print('{} is not a prime number, because {} is a factor of {}'.format(num, i, num))
break
# otherwise keep checking until we've searched all possible factors, and then declare it prime
if i == num -1:
print('{} is a prime number'.format(num))
""" Logic for our solution:
We loop through each number in the check_prime list.
Create a "search-for-factors" loop beginning at 2, and continuing up to the (number-1)
Use a conditional statement with the modulo operator to check if our number when divided by the possible factor yields any remainder besides 0.
If we ever find one factor, we can declare that the number is not prime, and state the factor we found. Then we can break out of the loop for that number.
If we get up to the (number - 1) and haven't broken out of the loop, then we can declare that the number is prime.
"""
|
nilq/baby-python
|
python
|
import timm
import torchvision.models as models
""""
timm_models = [
'adv_inception_v3',
'cait_m36_384',
'cait_m48_448',
'cait_s24_224',
'cait_s24_384',
'cait_s36_384',
'cait_xs24_384',
'cait_xxs24_224',
'cait_xxs24_384',
'cait_xxs36_224',
'cait_xxs36_384',
'coat_lite_mini',
'coat_lite_small',
'coat_lite_tiny',
'coat_mini',
'coat_tiny',
'convit_base',
'convit_small',
'convit_tiny',
'cspdarknet53',
'cspresnet50',
'cspresnext50',
'deit_base_distilled_patch16_224',
'deit_base_distilled_patch16_384',
'deit_base_patch16_224',
'deit_base_patch16_384',
'deit_small_distilled_patch16_224',
'deit_small_patch16_224',
'deit_tiny_distilled_patch16_224',
'deit_tiny_patch16_224',
'densenet121',
'densenet161',
'densenet169',
'densenet201',
'densenetblur121d',
'dla34',
'dla46_c',
'dla46x_c',
'dla60',
'dla60_res2net',
'dla60_res2next',
'dla60x',
'dla60x_c',
'dla102',
'dla102x',
'dla102x2',
'dla169',
'dm_nfnet_f0',
'dm_nfnet_f1',
'dm_nfnet_f2',
'dm_nfnet_f3',
'dm_nfnet_f4',
'dm_nfnet_f5',
'dm_nfnet_f6',
'dpn68',
'dpn68b',
'dpn92',
'dpn98',
'dpn107',
'dpn131',
'eca_nfnet_l0',
'eca_nfnet_l1',
'eca_nfnet_l2',
'ecaresnet26t',
'ecaresnet50d',
'ecaresnet50d_pruned',
'ecaresnet50t',
'ecaresnet101d',
'ecaresnet101d_pruned',
'ecaresnet269d',
'ecaresnetlight',
'efficientnet_b0',
'efficientnet_b1',
'efficientnet_b1_pruned',
'efficientnet_b2',
'efficientnet_b2_pruned',
'efficientnet_b3',
'efficientnet_b3_pruned',
'efficientnet_b4',
'efficientnet_el',
'efficientnet_el_pruned',
'efficientnet_em',
'efficientnet_es',
'efficientnet_es_pruned',
'efficientnet_lite0',
'efficientnetv2_rw_m',
'efficientnetv2_rw_s',
'ens_adv_inception_resnet_v2',
'ese_vovnet19b_dw',
'ese_vovnet39b',
'fbnetc_100',
'gernet_l',
'gernet_m',
'gernet_s',
'ghostnet_100',
'gluon_inception_v3',
'gluon_resnet18_v1b',
'gluon_resnet34_v1b',
'gluon_resnet50_v1b',
'gluon_resnet50_v1c',
'gluon_resnet50_v1d',
'gluon_resnet50_v1s',
'gluon_resnet101_v1b',
'gluon_resnet101_v1c',
'gluon_resnet101_v1d',
'gluon_resnet101_v1s',
'gluon_resnet152_v1b',
'gluon_resnet152_v1c',
'gluon_resnet152_v1d',
'gluon_resnet152_v1s',
'gluon_resnext50_32x4d',
'gluon_resnext101_32x4d',
'gluon_resnext101_64x4d',
'gluon_senet154',
'gluon_seresnext50_32x4d',
'gluon_seresnext101_32x4d',
'gluon_seresnext101_64x4d',
'gluon_xception65',
'gmixer_24_224',
'hardcorenas_a',
'hardcorenas_b',
'hardcorenas_c',
'hardcorenas_d',
'hardcorenas_e',
'hardcorenas_f',
'hrnet_w18',
'hrnet_w18_small',
'hrnet_w18_small_v2',
'hrnet_w30',
'hrnet_w32',
'hrnet_w40',
'hrnet_w44',
'hrnet_w48',
'hrnet_w64',
'ig_resnext101_32x8d',
'ig_resnext101_32x16d',
'ig_resnext101_32x32d',
'ig_resnext101_32x48d',
'inception_resnet_v2',
'inception_v3',
'inception_v4',
'legacy_senet154',
'legacy_seresnet18',
'legacy_seresnet34',
'legacy_seresnet50',
'legacy_seresnet101',
'legacy_seresnet152',
'legacy_seresnext26_32x4d',
'legacy_seresnext50_32x4d',
'legacy_seresnext101_32x4d',
'levit_128',
'levit_128s',
'levit_192',
'levit_256',
'levit_384',
'mixer_b16_224',
'mixer_b16_224_in21k',
'mixer_b16_224_miil',
'mixer_b16_224_miil_in21k',
'mixer_l16_224',
'mixer_l16_224_in21k',
'mixnet_l',
'mixnet_m',
'mixnet_s',
'mixnet_xl',
'mnasnet_100',
'mobilenetv2_100',
'mobilenetv2_110d',
'mobilenetv2_120d',
'mobilenetv2_140',
'mobilenetv3_large_100',
'mobilenetv3_large_100_miil',
'mobilenetv3_large_100_miil_in21k',
'mobilenetv3_rw',
'nasnetalarge',
'nf_regnet_b1',
'nf_resnet50',
'nfnet_l0',
'pit_b_224',
'pit_b_distilled_224',
'pit_s_224',
'pit_s_distilled_224',
'pit_ti_224',
'pit_ti_distilled_224',
'pit_xs_224',
'pit_xs_distilled_224',
'pnasnet5large',
'regnetx_002',
'regnetx_004',
'regnetx_006',
'regnetx_008',
'regnetx_016',
'regnetx_032',
'regnetx_040',
'regnetx_064',
'regnetx_080',
'regnetx_120',
'regnetx_160',
'regnetx_320',
'regnety_002',
'regnety_004',
'regnety_006',
'regnety_008',
'regnety_016',
'regnety_032',
'regnety_040',
'regnety_064',
'regnety_080',
'regnety_120',
'regnety_160',
'regnety_320',
'repvgg_a2',
'repvgg_b0',
'repvgg_b1',
'repvgg_b1g4',
'repvgg_b2',
'repvgg_b2g4',
'repvgg_b3',
'repvgg_b3g4',
'res2net50_14w_8s',
'res2net50_26w_4s',
'res2net50_26w_6s',
'res2net50_26w_8s',
'res2net50_48w_2s',
'res2net101_26w_4s',
'res2next50',
'resmlp_12_224',
'resmlp_12_distilled_224',
'resmlp_24_224',
'resmlp_24_distilled_224',
'resmlp_36_224',
'resmlp_36_distilled_224',
'resmlp_big_24_224',
'resmlp_big_24_224_in22ft1k',
'resmlp_big_24_distilled_224',
'resnest14d',
'resnest26d',
'resnest50d',
'resnest50d_1s4x24d',
'resnest50d_4s2x40d',
'resnest101e',
'resnest200e',
'resnest269e',
'resnet18',
'resnet18d',
'resnet26',
'resnet26d',
'resnet34',
'resnet34d',
'resnet50',
'resnet50d',
'resnet51q',
'resnet101d',
'resnet152d',
'resnet200d',
'resnetblur50',
'resnetrs50',
'resnetrs101',
'resnetrs152',
'resnetrs200',
'resnetrs270',
'resnetrs350',
'resnetrs420',
'resnetv2_50x1_bit_distilled',
'resnetv2_50x1_bitm',
'resnetv2_50x1_bitm_in21k',
'resnetv2_50x3_bitm',
'resnetv2_50x3_bitm_in21k',
'resnetv2_101x1_bitm',
'resnetv2_101x1_bitm_in21k',
'resnetv2_101x3_bitm',
'resnetv2_101x3_bitm_in21k',
'resnetv2_152x2_bit_teacher',
'resnetv2_152x2_bit_teacher_384',
'resnetv2_152x2_bitm',
'resnetv2_152x2_bitm_in21k',
'resnetv2_152x4_bitm',
'resnetv2_152x4_bitm_in21k',
'resnext50_32x4d',
'resnext50d_32x4d',
'resnext101_32x8d',
'rexnet_100',
'rexnet_130',
'rexnet_150',
'rexnet_200',
'selecsls42b',
'selecsls60',
'selecsls60b',
'semnasnet_100',
'seresnet50',
'seresnet152d',
'seresnext26d_32x4d',
'seresnext26t_32x4d',
'seresnext50_32x4d',
'skresnet18',
'skresnet34',
'skresnext50_32x4d',
'spnasnet_100',
'ssl_resnet18',
'ssl_resnet50',
'ssl_resnext50_32x4d',
'ssl_resnext101_32x4d',
'ssl_resnext101_32x8d',
'ssl_resnext101_32x16d',
'swin_base_patch4_window7_224',
'swin_base_patch4_window7_224_in22k',
'swin_base_patch4_window12_384',
'swin_base_patch4_window12_384_in22k',
'swin_large_patch4_window7_224',
'swin_large_patch4_window7_224_in22k',
'swin_large_patch4_window12_384',
'swin_large_patch4_window12_384_in22k',
'swin_small_patch4_window7_224',
'swin_tiny_patch4_window7_224',
'swsl_resnet18',
'swsl_resnet50',
'swsl_resnext50_32x4d',
'swsl_resnext101_32x4d',
'swsl_resnext101_32x8d',
'swsl_resnext101_32x16d',
'tf_efficientnet_b0',
'tf_efficientnet_b0_ap',
'tf_efficientnet_b0_ns',
'tf_efficientnet_b1',
'tf_efficientnet_b1_ap',
'tf_efficientnet_b1_ns',
'tf_efficientnet_b2',
'tf_efficientnet_b2_ap',
'tf_efficientnet_b2_ns',
'tf_efficientnet_b3',
'tf_efficientnet_b3_ap',
'tf_efficientnet_b3_ns',
'tf_efficientnet_b4',
'tf_efficientnet_b4_ap',
'tf_efficientnet_b4_ns',
'tf_efficientnet_b5',
'tf_efficientnet_b5_ap',
'tf_efficientnet_b5_ns',
'tf_efficientnet_b6',
'tf_efficientnet_b6_ap',
'tf_efficientnet_b6_ns',
'tf_efficientnet_b7',
'tf_efficientnet_b7_ap',
'tf_efficientnet_b7_ns',
'tf_efficientnet_b8',
'tf_efficientnet_b8_ap',
'tf_efficientnet_cc_b0_4e',
'tf_efficientnet_cc_b0_8e',
'tf_efficientnet_cc_b1_8e',
'tf_efficientnet_el',
'tf_efficientnet_em',
'tf_efficientnet_es',
'tf_efficientnet_l2_ns',
'tf_efficientnet_l2_ns_475',
'tf_efficientnet_lite0',
'tf_efficientnet_lite1',
'tf_efficientnet_lite2',
'tf_efficientnet_lite3',
'tf_efficientnet_lite4',
'tf_efficientnetv2_b0',
'tf_efficientnetv2_b1',
'tf_efficientnetv2_b2',
'tf_efficientnetv2_b3',
'tf_efficientnetv2_l',
'tf_efficientnetv2_l_in21ft1k',
'tf_efficientnetv2_l_in21k',
'tf_efficientnetv2_m',
'tf_efficientnetv2_m_in21ft1k',
'tf_efficientnetv2_m_in21k',
'tf_efficientnetv2_s',
'tf_efficientnetv2_s_in21ft1k',
'tf_efficientnetv2_s_in21k',
'tf_inception_v3',
'tf_mixnet_l',
'tf_mixnet_m',
'tf_mixnet_s',
'tf_mobilenetv3_large_075',
'tf_mobilenetv3_large_100',
'tf_mobilenetv3_large_minimal_100',
'tf_mobilenetv3_small_075',
'tf_mobilenetv3_small_100',
'tf_mobilenetv3_small_minimal_100',
'tnt_s_patch16_224',
'tresnet_l',
'tresnet_l_448',
'tresnet_m',
'tresnet_m_448',
'tresnet_m_miil_in21k',
'tresnet_xl',
'tresnet_xl_448',
'tv_densenet121',
'tv_resnet34',
'tv_resnet50',
'tv_resnet101',
'tv_resnet152',
'tv_resnext50_32x4d',
'twins_pcpvt_base',
'twins_pcpvt_large',
'twins_pcpvt_small',
'twins_svt_base',
'twins_svt_large',
'twins_svt_small',
'vgg11',
'vgg11_bn',
'vgg13',
'vgg13_bn',
'vgg16',
'vgg16_bn',
'vgg19',
'vgg19_bn',
'visformer_small',
'vit_base_patch16_224',
'vit_base_patch16_224_in21k',
'vit_base_patch16_224_miil',
'vit_base_patch16_224_miil_in21k',
'vit_base_patch16_384',
'vit_base_patch32_224',
'vit_base_patch32_224_in21k',
'vit_base_patch32_384',
'vit_base_r50_s16_224_in21k',
'vit_base_r50_s16_384',
'vit_huge_patch14_224_in21k',
'vit_large_patch16_224',
'vit_large_patch16_224_in21k',
'vit_large_patch16_384',
'vit_large_patch32_224_in21k',
'vit_large_patch32_384',
'vit_large_r50_s32_224',
'vit_large_r50_s32_224_in21k',
'vit_large_r50_s32_384',
'vit_small_patch16_224',
'vit_small_patch16_224_in21k',
'vit_small_patch16_384',
'vit_small_patch32_224',
'vit_small_patch32_224_in21k',
'vit_small_patch32_384',
'vit_small_r26_s32_224',
'vit_small_r26_s32_224_in21k',
'vit_small_r26_s32_384',
'vit_tiny_patch16_224',
'vit_tiny_patch16_224_in21k',
'vit_tiny_patch16_384',
'vit_tiny_r_s16_p8_224',
'vit_tiny_r_s16_p8_224_in21k',
'vit_tiny_r_s16_p8_384',
'wide_resnet50_2',
'wide_resnet101_2',
'xception',
'xception41',
'xception65',
'xception71']
"""
timm_models = timm.list_models(pretrained=True)
torchvison_models = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
_all__ = ['get_model', 'get_model_list']
def get_model(name, **kwargs):
"""Returns a pre-defined model by name
Parameters
----------
name : str
Name of the model.
pretrained : bool
Whether to load the pretrained weights for model.
root : str, default '~/.encoding/models'
Location for keeping the model parameters.
Returns
-------
Module:
The model.
"""
name = name.lower()
if name in timm_models:
net = timm.create_model(name, **kwargs)
# elif name in torchvison_models:
# net = models.__dict__[name](**kwargs)
else:
raise ValueError('%s\n\t%s' % (str(name), '\n\t'.join(sorted(timm_models))))
return net
def get_model_list():
"""Get the entire list of model names in model_zoo.
Returns
-------
list of str
Entire list of model names in model_zoo.
"""
return list(timm_models) # + list(torchvison_models)
if __name__ == '__main__':
# models = get_model_list()
# print(models)
net = get_model("efficientnet_b1", pretrained=False)
print(net)
|
nilq/baby-python
|
python
|
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
"""
Service monitor to instantiate/scale/monitor services like firewall, LB, ...
"""
import sys
reload(sys)
sys.setdefaultencoding('UTF8')
import gevent
from gevent import monkey
monkey.patch_all(thread=not 'unittest' in sys.modules)
from cfgm_common.zkclient import ZookeeperClient
import requests
import ConfigParser
import cStringIO
import argparse
import signal
import random
import hashlib
import os
import logging
import logging.handlers
import cfgm_common
from cfgm_common import importutils
from cfgm_common import svc_info
from cfgm_common import vnc_cgitb
from cfgm_common.utils import cgitb_hook
from cfgm_common.vnc_amqp import VncAmqpHandle
from cfgm_common.exceptions import ResourceExhaustionError
from vnc_api.utils import AAA_MODE_VALID_VALUES
from config_db import *
from pysandesh.sandesh_base import Sandesh, SandeshSystem, SandeshConfig
from pysandesh.gen_py.sandesh.ttypes import SandeshLevel
from pysandesh.gen_py.process_info.ttypes import ConnectionStatus
from sandesh_common.vns.ttypes import Module
from vnc_api.vnc_api import *
from agent_manager import AgentManager
from db import ServiceMonitorDB
from logger import ServiceMonitorLogger
from module_logger import ServiceMonitorModuleLogger
from loadbalancer_agent import LoadbalancerAgent
from port_tuple import PortTupleAgent
from snat_agent import SNATAgent
from reaction_map import REACTION_MAP
try:
from novaclient import exceptions as nc_exc
except ImportError:
pass
# zookeeper client connection
_zookeeper_client = None
class SvcMonitor(object):
def __init__(self, sm_logger=None, args=None):
self._args = args
# initialize logger
if sm_logger is not None:
self.logger = sm_logger
else:
# Initialize logger
self.logger = ServiceMonitorLogger(args)
# init object_db
self._object_db = ServiceMonitorDB(self._args, self.logger)
DBBaseSM.init(self, self.logger, self._object_db)
# init rabbit connection
rabbitmq_cfg = get_rabbitmq_cfg(args)
self.rabbit = VncAmqpHandle(self.logger._sandesh, self.logger,
DBBaseSM, REACTION_MAP, 'svc_monitor', rabbitmq_cfg,
self._args.trace_file)
self.rabbit.establish()
def post_init(self, vnc_lib, args=None):
# api server
self._vnc_lib = vnc_lib
try:
self._nova_client = importutils.import_object(
'svc_monitor.nova_client.ServiceMonitorNovaClient',
self._args, self.logger)
except Exception as e:
self._nova_client = None
# agent manager
self._agent_manager = AgentManager()
# load vrouter scheduler
self.vrouter_scheduler = importutils.import_object(
self._args.si_netns_scheduler_driver,
self._vnc_lib, self._nova_client,
None, self.logger, self._args)
# load virtual machine instance manager
self.vm_manager = importutils.import_object(
'svc_monitor.virtual_machine_manager.VirtualMachineManager',
self._vnc_lib, self._object_db, self.logger,
self.vrouter_scheduler, self._nova_client, self._agent_manager,
self._args)
# load network namespace instance manager
self.netns_manager = importutils.import_object(
'svc_monitor.instance_manager.NetworkNamespaceManager',
self._vnc_lib, self._object_db, self.logger,
self.vrouter_scheduler, self._nova_client, self._agent_manager,
self._args)
# load a vrouter instance manager
self.vrouter_manager = importutils.import_object(
'svc_monitor.vrouter_instance_manager.VRouterInstanceManager',
self._vnc_lib, self._object_db, self.logger,
self.vrouter_scheduler, self._nova_client,
self._agent_manager, self._args)
# load PNF instance manager
self.ps_manager = importutils.import_object(
'svc_monitor.physical_service_manager.PhysicalServiceManager',
self._vnc_lib, self._object_db, self.logger,
self.vrouter_scheduler, self._nova_client,
self._agent_manager, self._args)
# load a loadbalancer agent
self.loadbalancer_agent = LoadbalancerAgent(
self, self._vnc_lib,
self._object_db, self._args)
self._agent_manager.register_agent(self.loadbalancer_agent)
# load a snat agent
self.snat_agent = SNATAgent(self, self._vnc_lib,
self._object_db, self._args,
ServiceMonitorModuleLogger(self.logger))
self._agent_manager.register_agent(self.snat_agent)
# load port tuple agent
self.port_tuple_agent = PortTupleAgent(self, self._vnc_lib,
self._object_db, self._args, ServiceMonitorModuleLogger(self.logger))
self._agent_manager.register_agent(self.port_tuple_agent)
# Read the object_db and populate the entry in ServiceMonitor DB
self.sync_sm()
# create default analyzer template
self._create_default_template('analyzer-template', 'analyzer',
flavor='m1.medium',
image_name='analyzer')
# create default NAT template
self._create_default_template('nat-template', 'firewall',
svc_mode='in-network-nat',
image_name='analyzer',
flavor='m1.medium')
# create default netns SNAT template
self._create_default_template('netns-snat-template', 'source-nat',
svc_mode='in-network-nat',
hypervisor_type='network-namespace',
scaling=True)
# create default loadbalancer template
self._create_default_template('haproxy-loadbalancer-template',
'loadbalancer',
svc_mode='in-network-nat',
hypervisor_type='network-namespace',
scaling=True)
self._create_default_template('docker-template', 'firewall',
svc_mode='transparent',
image_name="ubuntu",
hypervisor_type='vrouter-instance',
vrouter_instance_type='docker',
instance_data={
"command": "/bin/bash"
})
# upgrade handling
self.upgrade()
# check services
self.vrouter_scheduler.vrouters_running()
self.launch_services()
self.rabbit._db_resync_done.set()
def _upgrade_instance_ip(self, vm):
for vmi_id in vm.virtual_machine_interfaces:
vmi = VirtualMachineInterfaceSM.get(vmi_id)
if not vmi:
continue
for iip_id in vmi.instance_ips:
iip = InstanceIpSM.get(iip_id)
if not iip or iip.service_instance_ip:
continue
iip_obj = InstanceIp()
iip_obj.name = iip.name
iip_obj.uuid = iip.uuid
iip_obj.set_service_instance_ip(True)
try:
self._vnc_lib.instance_ip_update(iip_obj)
except NoIdError:
self.logger.error("upgrade instance ip to service ip failed %s" % (iip.name))
continue
def _upgrade_auto_policy(self, si, st):
if st.name != 'netns-snat-template':
return
if not si.params['auto_policy']:
return
si_obj = ServiceInstance()
si_obj.uuid = si.uuid
si_obj.fq_name = si.fq_name
si_props = ServiceInstanceType(**si.params)
si_props.set_auto_policy(False)
si_obj.set_service_instance_properties(si_props)
try:
self._vnc_lib.service_instance_update(si_obj)
self.logger.notice("snat policy upgraded for %s" % (si.name))
except NoIdError:
self.logger.error("snat policy upgrade failed for %s" % (si.name))
return
def upgrade(self):
for lr in LogicalRouterSM.values():
self.snat_agent.upgrade(lr)
for si in ServiceInstanceSM.values():
st = ServiceTemplateSM.get(si.service_template)
if not st:
continue
self._upgrade_auto_policy(si, st)
vm_id_list = list(si.virtual_machines)
for vm_id in vm_id_list:
vm = VirtualMachineSM.get(vm_id)
self._upgrade_instance_ip(vm)
if vm.virtualization_type:
continue
try:
nova_vm = self._nova_client.oper('servers', 'get',
si.proj_name, id=vm_id)
except nc_exc.NotFound:
nova_vm = None
if nova_vm:
vm_name = nova_vm.name
vm.proj_fq_name = nova_vm.name.split('__')[0:2]
else:
vm_name = vm.name
if not vm_name.split('__')[-1].isdigit():
continue
vm.virtualization_type = st.virtualization_type
self.delete_service_instance(vm)
def launch_services(self):
for si in ServiceInstanceSM.values():
self.create_service_instance(si)
def sync_sm(self):
# Read and Sync all DBase
for cls in DBBaseSM.get_obj_type_map().values():
for obj in cls.list_obj():
cls.locate(obj['uuid'], obj)
# Link SI and VM
for vm in VirtualMachineSM.values():
if vm.service_instance:
continue
for vmi_id in vm.virtual_machine_interfaces:
vmi = VirtualMachineInterfaceSM.get(vmi_id)
if not vmi:
continue
self.port_delete_or_si_link(vm, vmi)
# invoke port tuple handling
try:
self.port_tuple_agent.update_port_tuples()
except Exception:
cgitb_error_log(self)
# Load the loadbalancer driver
self.loadbalancer_agent.load_drivers()
# Invoke the health monitors
for hm in HealthMonitorSM.values():
hm.sync()
# Invoke the loadbalancers
for lb in LoadbalancerSM.values():
lb.sync()
# Invoke the loadbalancer listeners
for lb_listener in LoadbalancerListenerSM.values():
lb_listener.sync()
# Invoke the loadbalancer pools
for lb_pool in LoadbalancerPoolSM.values():
lb_pool.sync()
# Audit the lb pools
self.loadbalancer_agent.audit_lb_pools()
# Audit the SNAT instances
self.snat_agent.audit_snat_instances()
# end sync_sm
# create service template
def _create_default_template(self, st_name, svc_type, svc_mode=None,
hypervisor_type='virtual-machine',
image_name=None, flavor=None, scaling=False,
vrouter_instance_type=None,
instance_data=None):
domain_name = 'default-domain'
domain_fq_name = [domain_name]
st_fq_name = [domain_name, st_name]
self.logger.info("Creating %s %s hypervisor %s" %
(domain_name, st_name, hypervisor_type))
domain_obj = None
for domain in DomainSM.values():
if domain.fq_name == domain_fq_name:
domain_obj = Domain()
domain_obj.uuid = domain.uuid
domain_obj.fq_name = domain_fq_name
break
if not domain_obj:
self.logger.error("%s domain not found" % (domain_name))
return
for st in ServiceTemplateSM.values():
if st.fq_name == st_fq_name:
self.logger.info("%s exists uuid %s" %
(st.name, str(st.uuid)))
return
svc_properties = ServiceTemplateType()
svc_properties.set_service_type(svc_type)
svc_properties.set_service_mode(svc_mode)
svc_properties.set_service_virtualization_type(hypervisor_type)
svc_properties.set_image_name(image_name)
svc_properties.set_flavor(flavor)
svc_properties.set_ordered_interfaces(True)
svc_properties.set_service_scaling(scaling)
# set interface list
if svc_type == 'analyzer':
if_list = [['left', False]]
elif hypervisor_type == 'network-namespace':
if_list = [['right', True], ['left', True]]
else:
if_list = [
['management', False], ['left', False], ['right', False]]
for itf in if_list:
if_type = ServiceTemplateInterfaceType(shared_ip=itf[1])
if_type.set_service_interface_type(itf[0])
svc_properties.add_interface_type(if_type)
if vrouter_instance_type is not None:
svc_properties.set_vrouter_instance_type(vrouter_instance_type)
if instance_data is not None:
svc_properties.set_instance_data(
json.dumps(instance_data, separators=(',', ':')))
st_obj = ServiceTemplate(name=st_name, domain_obj=domain)
st_obj.set_service_template_properties(svc_properties)
try:
st_uuid = self._vnc_lib.service_template_create(st_obj)
except Exception as e:
self.logger.error("%s create failed with error %s" %
(st_name, str(e)))
return
# Create the service template in local db
ServiceTemplateSM.locate(st_uuid)
self.logger.info("%s created with uuid %s" %
(st_name, str(st_uuid)))
#_create_default_analyzer_template
def port_delete_or_si_link(self, vm, vmi):
if vmi.port_tuples:
return
if (vmi.service_instances and vmi.virtual_machine == None):
self.vm_manager.cleanup_svc_vm_ports([vmi.uuid])
return
if not vm or vm.service_instance:
return
if not vmi.if_type:
return
if len(vmi.name.split('__')) < 4:
return
si_fq_name = vmi.name.split('__')[0:3]
index = int(vmi.name.split('__')[3]) - 1
for si in ServiceInstanceSM.values():
if si.fq_name != si_fq_name:
continue
st = ServiceTemplateSM.get(si.service_template)
self.vm_manager.link_si_to_vm(si, st, index, vm.uuid)
return
def create_service_instance(self, si):
if si.state == 'active':
return
st = ServiceTemplateSM.get(si.service_template)
if not st:
self.logger.error("template not found for %s" %
((':').join(si.fq_name)))
return
if st.params and st.params.get('version', 1) == 2:
return
self.logger.info("Creating SI %s (%s)" %
((':').join(si.fq_name), st.virtualization_type))
try:
if st.virtualization_type == 'virtual-machine':
self.vm_manager.create_service(st, si)
elif st.virtualization_type == 'network-namespace':
self.netns_manager.create_service(st, si)
elif st.virtualization_type == 'vrouter-instance':
self.vrouter_manager.create_service(st, si)
elif st.virtualization_type == 'physical-device':
self.ps_manager.create_service(st, si)
else:
self.logger.error("Unknown virt type: %s" %
st.virtualization_type)
except Exception:
cgitb_error_log(self)
si.launch_count += 1
self.logger.info("SI %s creation success" % (':').join(si.fq_name))
def delete_service_instance(self, vm):
self.logger.info("Deleting VM %s %s for SI %s" %
((':').join(vm.fq_name), vm.uuid, vm.service_id))
try:
if vm.virtualization_type == svc_info.get_vm_instance_type():
self.vm_manager.delete_service(vm)
elif vm.virtualization_type == svc_info.get_netns_instance_type():
self.netns_manager.delete_service(vm)
elif vm.virtualization_type == 'vrouter-instance':
self.vrouter_manager.delete_service(vm)
elif vm.virtualization_type == 'physical-device':
self.ps_manager.delete_service(vm)
self.logger.info("Deleted VM %s %s for SI %s" %
((':').join(vm.fq_name), vm.uuid, vm.service_id))
except Exception:
cgitb_error_log(self)
# generate UVE
si_fq_name = vm.display_name.split('__')[:-2]
si_fq_str = (':').join(si_fq_name)
self.logger.uve_svc_instance(si_fq_str, status='DELETE',
vms=[{'uuid': vm.uuid}])
return True
def _relaunch_service_instance(self, si):
si.state = 'relaunch'
self.create_service_instance(si)
def _check_service_running(self, si):
st = ServiceTemplateSM.get(si.service_template)
if st.params and st.params.get('version', 1) == 2:
return
if st.virtualization_type == 'virtual-machine':
status = self.vm_manager.check_service(si)
elif st.virtualization_type == 'network-namespace':
status = self.netns_manager.check_service(si)
elif st.virtualization_type == 'vrouter-instance':
status = self.vrouter_manager.check_service(si)
elif st.virtualization_type == 'physical-device':
status = self.ps_manager.check_service(si)
return status
def delete_interface_route_table(self, irt_uuid):
try:
self._vnc_lib.interface_route_table_delete(id=irt_uuid)
InterfaceRouteTableSM.delete(irt_uuid)
except (NoIdError, RefsExistError):
return
def _delete_shared_vn(self, vn_uuid):
try:
self.logger.info("Deleting vn %s" % (vn_uuid))
self._vnc_lib.virtual_network_delete(id=vn_uuid)
VirtualNetworkSM.delete(vn_uuid)
except (NoIdError, RefsExistError):
pass
@staticmethod
def reset():
for cls in DBBaseSM.get_obj_type_map().values():
cls.reset()
def sighup_handler(self):
if self._conf_file:
config = ConfigParser.SafeConfigParser()
config.read(self._conf_file)
if 'DEFAULTS' in config.sections():
try:
collectors = config.get('DEFAULTS', 'collectors')
if type(collectors) is str:
collectors = collectors.split()
new_chksum = hashlib.md5("".join(collectors)).hexdigest()
if new_chksum != self._chksum:
self._chksum = new_chksum
config.random_collectors = random.sample(collectors, len(collectors))
# Reconnect to achieve load-balance irrespective of list
self.logger.sandesh_reconfig_collectors(config)
except ConfigParser.NoOptionError as e:
pass
# end sighup_handler
def skip_check_service(si):
# wait for first launch
if not si.launch_count:
return True
# back off going on
if si.back_off > 0:
si.back_off -= 1
return True
# back off done
if si.back_off == 0:
si.back_off = -1
return False
# set back off
if not si.launch_count % 10:
si.back_off = 10
return True
return False
def timer_callback(monitor):
# delete orphan shared iips
iip_delete_list = []
for iip in InstanceIpSM.values():
if not iip.instance_ip_secondary or not iip.service_instance_ip:
continue
if iip.service_instance:
continue
if len(iip.virtual_machine_interfaces):
continue
iip_delete_list.append(iip)
for iip in iip_delete_list:
monitor.port_tuple_agent.delete_shared_iip(iip)
# delete vms without si
vm_delete_list = []
for vm in VirtualMachineSM.values():
si = ServiceInstanceSM.get(vm.service_instance)
if not si and vm.virtualization_type:
vm_delete_list.append(vm)
for vm in vm_delete_list:
monitor.delete_service_instance(vm)
# delete vmis with si but no vms
vmi_delete_list = []
for vmi in VirtualMachineInterfaceSM.values():
for si_uuid in vmi.service_instances:
si = ServiceInstanceSM.get(si_uuid)
if si and not vmi.virtual_machine:
vmi_delete_list.append(vmi.uuid)
if len(vmi_delete_list):
monitor.vm_manager.cleanup_svc_vm_ports(vmi_delete_list)
# check vrouter agent status
monitor.vrouter_scheduler.vrouters_running()
# check status of service
si_list = list(ServiceInstanceSM.values())
for si in si_list:
if skip_check_service(si):
continue
if not monitor._check_service_running(si):
monitor._relaunch_service_instance(si)
if si.max_instances != len(si.virtual_machines):
monitor._relaunch_service_instance(si)
# check vns to be deleted
for project in ProjectSM.values():
if project.service_instances:
continue
vn_id_list = list(project.virtual_networks)
for vn_id in vn_id_list:
vn = VirtualNetworkSM.get(vn_id)
if not vn or vn.virtual_machine_interfaces:
continue
if vn.name in svc_info.get_shared_vn_list():
monitor._delete_shared_vn(vn.uuid)
def launch_timer(monitor):
if not monitor._args.check_service_interval.isdigit():
monitor.logger.emergency("set seconds for check_service_interval "
"in contrail-svc-monitor.conf. \
example: check_service_interval=60")
sys.exit()
monitor.logger.notice("check_service_interval set to %s seconds" %
monitor._args.check_service_interval)
while True:
gevent.sleep(int(monitor._args.check_service_interval))
try:
timer_callback(monitor)
except Exception:
cgitb_error_log(monitor)
def cgitb_error_log(monitor):
string_buf = cStringIO.StringIO()
cgitb_hook(file=string_buf, format="text")
monitor.logger.log(string_buf.getvalue(), level=SandeshLevel.SYS_ERR)
def parse_args(args_str):
'''
Eg. python svc_monitor.py --rabbit_server localhost
--rabbit_port 5672
--rabbit_user guest
--rabbit_password guest
--cassandra_server_list 10.1.2.3:9160
--api_server_ip 10.1.2.3
--api_server_port 8082
--api_server_use_ssl False
--zk_server_ip 10.1.2.3
--zk_server_port 2181
--collectors 127.0.0.1:8086
--http_server_port 8090
--log_local
--log_level SYS_DEBUG
--log_category test
--log_file <stdout>
--trace_file /var/log/contrail/svc-monitor.err
--use_syslog
--syslog_facility LOG_USER
--cluster_id <testbed-name>
--check_service_interval 60
[--region_name <name>]
[--reset_config]
'''
# Source any specified config/ini file
# Turn off help, so we show all options in response to -h
conf_parser = argparse.ArgumentParser(add_help=False)
conf_parser.add_argument("-c", "--conf_file", action='append',
help="Specify config file", metavar="FILE")
args, remaining_argv = conf_parser.parse_known_args(args_str.split())
defaults = {
'rabbit_server': 'localhost',
'rabbit_port': '5672',
'rabbit_user': 'guest',
'rabbit_password': 'guest',
'rabbit_vhost': None,
'rabbit_ha_mode': False,
'cassandra_server_list': '127.0.0.1:9160',
'api_server_ip': '127.0.0.1',
'api_server_port': '8082',
'api_server_use_ssl': False,
'zk_server_ip': '127.0.0.1',
'zk_server_port': '2181',
'collectors': None,
'http_server_port': '8088',
'log_local': False,
'log_level': SandeshLevel.SYS_DEBUG,
'log_category': '',
'log_file': Sandesh._DEFAULT_LOG_FILE,
'trace_file': '/var/log/contrail/svc-monitor.err',
'use_syslog': False,
'syslog_facility': Sandesh._DEFAULT_SYSLOG_FACILITY,
'region_name': None,
'cluster_id': '',
'logging_conf': '',
'logger_class': None,
'check_service_interval': '60',
'nova_endpoint_type': 'internalURL',
'rabbit_use_ssl': False,
'kombu_ssl_version': '',
'kombu_ssl_keyfile': '',
'kombu_ssl_certfile': '',
'kombu_ssl_ca_certs': '',
}
defaults.update(SandeshConfig.get_default_options(['DEFAULTS']))
secopts = {
'use_certs': False,
'keyfile': '',
'certfile': '',
'ca_certs': '',
}
ksopts = {
'auth_host': '127.0.0.1',
'auth_protocol': 'http',
'auth_port': '5000',
'auth_version': 'v2.0',
'auth_insecure': True,
'admin_user': 'user1',
'admin_password': 'password1',
'admin_tenant_name': 'admin'
}
schedops = {
'si_netns_scheduler_driver':
'svc_monitor.scheduler.vrouter_scheduler.RandomScheduler',
'analytics_server_list': '127.0.0.1:8081',
'availability_zone': None,
'netns_availability_zone': None,
'aaa_mode': cfgm_common.AAA_MODE_DEFAULT_VALUE,
}
cassandraopts = {
'cassandra_user': None,
'cassandra_password': None,
}
sandeshopts = SandeshConfig.get_default_options()
saved_conf_file = args.conf_file
config = ConfigParser.SafeConfigParser()
if args.conf_file:
config.read(args.conf_file)
defaults.update(dict(config.items("DEFAULTS")))
if ('SECURITY' in config.sections() and
'use_certs' in config.options('SECURITY')):
if config.getboolean('SECURITY', 'use_certs'):
secopts.update(dict(config.items("SECURITY")))
if 'KEYSTONE' in config.sections():
ksopts.update(dict(config.items("KEYSTONE")))
if 'SCHEDULER' in config.sections():
schedops.update(dict(config.items("SCHEDULER")))
if 'CASSANDRA' in config.sections():
cassandraopts.update(dict(config.items('CASSANDRA')))
SandeshConfig.update_options(sandeshopts, config)
# Override with CLI options
# Don't surpress add_help here so it will handle -h
parser = argparse.ArgumentParser(
# Inherit options from config_parser
parents=[conf_parser],
# script description with -h/--help
description=__doc__,
# Don't mess with format of description
formatter_class=argparse.RawDescriptionHelpFormatter,
)
defaults.update(secopts)
defaults.update(ksopts)
defaults.update(schedops)
defaults.update(cassandraopts)
defaults.update(sandeshopts)
parser.set_defaults(**defaults)
parser.add_argument(
"--cassandra_server_list",
help="List of cassandra servers in IP Address:Port format",
nargs='+')
parser.add_argument(
"--cassandra_use_ssl", action="store_true",
help="Enable TLS for cassandra communication")
parser.add_argument(
"--cassandra_ca_certs",
help="Cassandra CA certs")
parser.add_argument(
"--reset_config", action="store_true",
help="Warning! Destroy previous configuration and start clean")
parser.add_argument("--api_server_ip",
help="IP address of API server")
parser.add_argument("--api_server_port",
help="Port of API server")
parser.add_argument("--api_server_use_ssl",
help="Use SSL to connect with API server")
parser.add_argument("--collectors",
help="List of VNC collectors in ip:port format",
nargs="+")
parser.add_argument("--http_server_port",
help="Port of local HTTP server")
parser.add_argument(
"--log_local", action="store_true",
help="Enable local logging of sandesh messages")
parser.add_argument(
"--log_level",
help="Severity level for local logging of sandesh messages")
parser.add_argument(
"--log_category",
help="Category filter for local logging of sandesh messages")
parser.add_argument("--log_file",
help="Filename for the logs to be written to")
parser.add_argument("--trace_file", help="Filename for the error "
"backtraces to be written to")
parser.add_argument("--use_syslog", action="store_true",
help="Use syslog for logging")
parser.add_argument("--syslog_facility",
help="Syslog facility to receive log lines")
parser.add_argument("--aaa_mode",
choices=AAA_MODE_VALID_VALUES,
help="AAA mode")
parser.add_argument("--admin_user",
help="Name of keystone admin user")
parser.add_argument("--admin_password",
help="Password of keystone admin user")
parser.add_argument("--admin_tenant_name",
help="Tenant name for keystone admin user")
parser.add_argument("--region_name",
help="Region name for openstack API")
parser.add_argument("--cluster_id",
help="Used for database keyspace separation")
parser.add_argument(
"--logging_conf",
help=("Optional logging configuration file, default: None"))
parser.add_argument(
"--logger_class",
help=("Optional external logger class, default: None"))
parser.add_argument("--cassandra_user",
help="Cassandra user name")
parser.add_argument("--cassandra_password",
help="Cassandra password")
parser.add_argument("--check_service_interval",
help="Check service interval")
SandeshConfig.add_parser_arguments(parser)
args = parser.parse_args(remaining_argv)
args._conf_file = saved_conf_file
args.config_sections = config
if type(args.cassandra_server_list) is str:
args.cassandra_server_list = args.cassandra_server_list.split()
if type(args.collectors) is str:
args.collectors = args.collectors.split()
if args.region_name and args.region_name.lower() == 'none':
args.region_name = None
if args.availability_zone and args.availability_zone.lower() == 'none':
args.availability_zone = None
if args.netns_availability_zone and \
args.netns_availability_zone.lower() == 'none':
args.netns_availability_zone = None
args.sandesh_config = SandeshConfig.from_parser_arguments(args)
args.cassandra_use_ssl = (str(args.cassandra_use_ssl).lower() == 'true')
return args
def get_rabbitmq_cfg(args):
return {
'servers': args.rabbit_server, 'port': args.rabbit_port,
'user': args.rabbit_user, 'password': args.rabbit_password,
'vhost': args.rabbit_vhost, 'ha_mode': args.rabbit_ha_mode,
'use_ssl': args.rabbit_use_ssl,
'ssl_version': args.kombu_ssl_version,
'ssl_keyfile': args.kombu_ssl_keyfile,
'ssl_certfile': args.kombu_ssl_certfile,
'ssl_ca_certs': args.kombu_ssl_ca_certs
}
def run_svc_monitor(sm_logger, args=None):
sm_logger.notice("Elected master SVC Monitor node. Initializing... ")
sm_logger.introspect_init()
monitor = SvcMonitor(sm_logger, args)
monitor._zookeeper_client = _zookeeper_client
monitor._conf_file = args._conf_file
monitor._chksum = ""
if args.collectors:
monitor._chksum = hashlib.md5("".join(args.collectors)).hexdigest()
""" @sighup
SIGHUP handler to indicate configuration changes
"""
gevent.signal(signal.SIGHUP, monitor.sighup_handler)
# Retry till API server is up
connected = False
monitor.logger.api_conn_status_update(ConnectionStatus.INIT)
api_server_list = args.api_server_ip.split(',')
while not connected:
try:
vnc_api = VncApi(
args.admin_user, args.admin_password, args.admin_tenant_name,
api_server_list, args.api_server_port,
api_server_use_ssl=args.api_server_use_ssl)
connected = True
monitor.logger.api_conn_status_update(ConnectionStatus.UP)
except requests.exceptions.ConnectionError as e:
monitor.logger.api_conn_status_update(
ConnectionStatus.DOWN, str(e))
time.sleep(3)
except (RuntimeError, ResourceExhaustionError):
# auth failure or haproxy throws 503
time.sleep(3)
try:
monitor.post_init(vnc_api, args)
timer_task = gevent.spawn(launch_timer, monitor)
gevent.joinall([timer_task])
except KeyboardInterrupt:
monitor.rabbit.close()
raise
def main(args_str=None):
global _zookeeper_client
if not args_str:
args_str = ' '.join(sys.argv[1:])
args = parse_args(args_str)
if args.cluster_id:
client_pfx = args.cluster_id + '-'
zk_path_pfx = args.cluster_id + '/'
else:
client_pfx = ''
zk_path_pfx = ''
# randomize collector list
args.random_collectors = args.collectors
if args.collectors:
args.random_collectors = random.sample(args.collectors,
len(args.collectors))
# Initialize logger without introspect thread
sm_logger = ServiceMonitorLogger(args, http_server_port=-1)
# Initialize AMQP handler then close it to be sure remain queue of a
# precedent run is cleaned
rabbitmq_cfg = get_rabbitmq_cfg(args)
vnc_amqp = VncAmqpHandle(sm_logger._sandesh, sm_logger, DBBaseSM,
REACTION_MAP, 'svc_monitor', rabbitmq_cfg,
args.trace_file)
vnc_amqp.establish()
vnc_amqp.close()
sm_logger.debug("Removed remained AMQP queue")
# Waiting to be elected as master node
_zookeeper_client = ZookeeperClient(
client_pfx+"svc-monitor", args.zk_server_ip)
sm_logger.notice("Waiting to be elected as master...")
_zookeeper_client.master_election(zk_path_pfx+"/svc-monitor", os.getpid(),
run_svc_monitor, sm_logger, args)
# end main
def server_main():
vnc_cgitb.enable(format='text')
main()
# end server_main
if __name__ == '__main__':
server_main()
|
nilq/baby-python
|
python
|
from .db.models import ModelWorker
from .db.connection import DbEngine
ModelWorker.metadata.create_all(DbEngine)
|
nilq/baby-python
|
python
|
import string
def encotel(frase):
teclado = {
'abc' : '2',
'def' : '3',
'ghi': '4',
'jkl': '5',
'mno' : '6',
'pqrs' : '7',
'tuv' : '8',
'wxyz' : '9',
}
numeros = []
for letra in frase:
if letra not in string.letters:
numeros.append(letra)
continue
numeros.extend([teclado[chave] for chave in teclado.keys() if letra in chave])
return "".join(numeros)
|
nilq/baby-python
|
python
|
import itertools
import beatbox
import pandas as pd
def query_salesforce(line, query=''):
"""Runs SQL statement against a salesforce, using specified user,password and security token and beatbox.
If no user,password and security token has been given, an error will be raised
Examples::
%%salesforce user,password,security_token
SELECT id FROM task """
assert len(line.split(',')) == 3, 'You should specify 3 arguments:\nuser_id, password, security_token'
user, password, security_token = line.split(',')
sf = Salesforce(user, password, security_token)
df = sf.query(query, deleted_included=True)
return df
class Salesforce(object):
def __init__(self, user_name, password, security_token):
"""Constructor for salesforce api which open session with salesforce with given credentials
Args: * user_name: salesforce user
* password: salesforce password
* security_token: salesforcesecurity_token """
self.sf = beatbox._tPartnerNS
self.svc = beatbox.Client()
self.svc.login(user_name, password + security_token)
def __get_query_results(self, is_actual_query, rest_of_query, deleted_included=False):
""" Function to call the salesforce API given the calculated query
Args: * is_actual_query: query to be sent to the api
* rest_of_query: if is_actual_query=true its the query string else its the continuation of
the query given in iteration before
* deleted_included: should the query bring records from recycle bin (http://spanning.com/blog/what-you-need-to-know-about-salesforces-recycle-bin/)
Returns: * res_[self.sf.records:] which represent list of the salesforce results and columns
* res_.done[0] which indicates if there are more records which wasnt fetched for this specific query
* res_.queryLocator[0]= the query locator to be sent to this function in the next page"""
if is_actual_query:
res_ = self.svc.query(rest_of_query) if deleted_included else self.svc.queryAll(rest_of_query)
else:
res_ = self.svc.queryMore(rest_of_query)
return res_[self.sf.records:], \
res_.done[0] if hasattr(res_, 'done') else True, \
res_.queryLocator[0] if res_.queryLocator else None
@staticmethod
def get_columns_names(row):
return [str(col._name[1].lower()) for col in row[2:]]
@staticmethod
def get_columns_values(row):
return [str(col) for col in row[2:]]
def query(self, query, deleted_included=False):
""" Function to call the salesforce API given the calculated query
Args: * query: a given query for salesforce (https://developer.salesforce.com/docs/atlas.en-us.soql_sosl.meta/soql_sosl/sforce_api_calls_soql_select.htm)d
* deleted_included: should the query bring records from recycle bin (http://spanning.com/blog/what-you-need-to-know-about-salesforces-recycle-bin/)
Returns: Dataframe with results from the given query"""
res, done, header = [], 'false', []
rest_of_query = query
for i in itertools.takewhile(lambda c: done == 'false', itertools.count()):
first_iteration = i == 0
sf_results, done, rest_of_query = self.__get_query_results(first_iteration, \
rest_of_query, \
deleted_included)
normalized_sf_results = [self.get_columns_values(row) for row in sf_results]
res.extend(normalized_sf_results)
if first_iteration and sf_results:
header = self.get_columns_names(sf_results[0])
return pd.DataFrame(res, columns=header)
def load_ipython_extension(ipython):
ipython.register_magic_function(query_salesforce, 'cell', 'salesforce')
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
import functools
import logging
import queue
import threading
class AsyncCaller:
'''Singleton class which executes function calls in separate thread'''
class _Caller:
class Thread(threading.Thread):
def __init__(self, queue, error_handler):
self.queue = queue
self.error_handler = error_handler
self.logger = logging.getLogger('AsyncCaller')
super().__init__(daemon=True)
def run(self):
while True:
async_job = self.queue.get()
if async_job == None: break
try:
async_job()
except Exception as e:
self.error_handler(str(e))
def __init__(self, error_handler):
self.queue = queue.Queue()
self.thread = self.Thread(self.queue, error_handler)
self.thread.start()
def call(self, target):
self.queue.put(target)
_instance = None
def __new__(a, error_handler=None):
if AsyncCaller._instance is None:
AsyncCaller._instance = AsyncCaller._Caller(error_handler)
return AsyncCaller._instance
def asynchronous(f):
'''Decorator which allows any function to be called asynchronously'''
@functools.wraps(f)
def _async_call(*args, **kwargs):
AsyncCaller().call(lambda: f(*args, **kwargs))
return _async_call
|
nilq/baby-python
|
python
|
from pyson0.json0diff import diff
from pyson0.json0 import TypeJSON
|
nilq/baby-python
|
python
|
import uuid
import json
import os
import pytest
import postgraas_server.backends.docker.postgres_instance_driver as pid
import postgraas_server.backends.postgres_cluster.postgres_cluster_driver as pgcd
import postgraas_server.configuration as configuration
from postgraas_server.backends.exceptions import PostgraasApiException
from postgraas_server.create_app import create_app
from postgraas_server.management_resources import DBInstance
DOCKER_CONFIG = {
"metadb":
{
"db_name": "postgraas",
"db_username": "postgraas",
"db_pwd": "postgraas12",
"host": "localhost",
"port": "54321"
},
"backend":
{
"type": "docker"
}
}
CLUSTER_CONFIG = {
"metadb":
{
"db_name": "postgraas",
"db_username": "postgraas",
"db_pwd": "postgraas12",
"host": "localhost",
"port": "54321"
},
"backend":
{
"type": "pg_cluster",
"host": os.environ.get('PGHOST', 'localhost'),
"port": os.environ.get('PGPORT', '5432'),
"database": os.environ.get('PGDATABASE', 'postgres'),
"username": os.environ.get('PGUSER', 'postgres'),
"password": os.environ.get('PGPASSWORD', 'postgres'),
}
}
CONFIGS = {
'docker': DOCKER_CONFIG,
'pg_cluster': CLUSTER_CONFIG,
}
def remove_digits(s):
return ''.join(c for c in s if not c.isdigit())
def delete_all_test_postgraas_container():
c = pid._docker_client()
for container in c.containers.list():
if container.name.startswith("tests_postgraas_"):
container.remove(force=True)
def delete_all_test_database_and_user(config):
con = pgcd._create_pg_connection(config)
cur = con.cursor()
cur.execute(
'''SELECT d.datname, u.usename
FROM pg_database d
JOIN pg_user u ON (d.datdba = u.usesysid);''')
for db in cur:
if db[0].startswith("tests_postgraas_"):
delete_test_database_and_user(db[0], db[1], config)
cur.execute(
'''SELECT u.usename
FROM pg_user u;''')
for db in cur:
if db[0].startswith("tests_postgraas_"):
pgcd.delete_user(db[0], config)
def delete_test_database_and_user(db_name, username, config):
pgcd.delete_database(db_name, config)
pgcd.delete_user(username, config)
@pytest.fixture(params=['docker', 'pg_cluster'])
def parametrized_setup(request, tmpdir):
from postgraas_server.management_resources import db
cfg = tmpdir.join('config')
with open(cfg.strpath, "w") as fp:
json.dump(CONFIGS[request.param], fp)
config = configuration.get_config(cfg.strpath)
this_app = create_app(config)
this_app.config['SQLALCHEMY_DATABASE_URI'] = "sqlite://"
this_app.use_reloader = False
this_app.config['TESTING'] = True
ctx = this_app.app_context()
ctx.push()
db.create_all()
username, db_name = str(uuid.uuid4()).replace('-', '_'), str(uuid.uuid4()).replace('-', '_')
request.cls.this_app = this_app
request.cls.app_client = this_app.test_client()
request.cls.db_name = remove_digits(db_name)
request.cls.username = remove_digits(username)
request.cls.backend = request.param
try:
yield
except Exception:
pass
if request.param == 'docker':
delete_all_test_postgraas_container()
elif request.param == 'pg_cluster':
delete_all_test_database_and_user(config['backend'])
db.drop_all()
ctx.pop()
@pytest.mark.usefixtures('parametrized_setup')
class TestPostgraasApi():
def test_create_and_delete_postgres_instance(self):
db_credentials = {
"db_name": 'tests_postgraas_instance_name',
"db_username": 'tests_postgraas_db_username',
"db_pwd": 'test_db_pwd',
"host": pid.get_hostname(),
"port": pid.get_open_port()
}
db_entry = DBInstance(
postgraas_instance_name=db_credentials['db_name'],
db_name=db_credentials['db_name'],
username=db_credentials['db_username'],
password="",
hostname=db_credentials['host'],
port=db_credentials['port']
)
db_entry.container_id = self.this_app.postgraas_backend.create(db_entry, db_credentials)
self.this_app.postgraas_backend.delete(db_entry)
assert True
def test_create_postgraas_twice(self):
db_credentials = {
"db_name": 'tests_postgraas_instance_name',
"db_username": 'tests_postgraas_db_username',
"db_pwd": 'test_db_pwd',
"host": pid.get_hostname(),
"port": pid.get_open_port()
}
db_entry = DBInstance(
postgraas_instance_name=db_credentials['db_name'],
db_name=db_credentials['db_name'],
username=db_credentials['db_username'],
password="",
hostname=db_credentials['host'],
port=db_credentials['port']
)
db_entry.container_id = self.this_app.postgraas_backend.create(db_entry, db_credentials)
with pytest.raises(PostgraasApiException) as excinfo:
db_entry.container_id = self.this_app.postgraas_backend.create(db_entry, db_credentials)
if self.backend == "pg_cluster":
assert excinfo.value.message == 'db or user already exists'
elif self.backend == "docker":
assert excinfo.value.message == 'Container exists already'
self.this_app.postgraas_backend.delete(db_entry)
assert True
@pytest.mark.xfail(reason='Username now valid due to hardening against SQL injections.')
def test_create_postgraas_bad_username(self):
db_credentials = {
"db_name": 'tests_postgraas_instance_name',
"db_username": 'tests_postgraas_db-bad username',
"db_pwd": 'test_db_pwd',
"host": pid.get_hostname(),
"port": pid.get_open_port()
}
db_entry = DBInstance(
postgraas_instance_name=db_credentials['db_name'],
db_name=db_credentials['db_name'],
username=db_credentials['db_username'],
password="",
hostname=db_credentials['host'],
port=db_credentials['port']
)
if self.backend == "pg_cluster":
with pytest.raises(PostgraasApiException) as excinfo:
db_entry.container_id = self.this_app.postgraas_backend.create(db_entry, db_credentials)
self.this_app.postgraas_backend.delete(db_entry)
assert 'syntax error at or near "-"' in excinfo.value.message
def test_delete_nonexisting_db(self):
db_credentials = {
"db_name": 'tests_postgraas_instance_name',
"db_username": 'tests_postgraas_db-bad username',
"db_pwd": 'test_db_pwd',
"host": pid.get_hostname(),
"port": pid.get_open_port()
}
db_entry = DBInstance(
postgraas_instance_name=db_credentials['db_name'],
db_name=db_credentials['db_name'],
username=db_credentials['db_username'],
password="",
hostname=db_credentials['host'],
port=db_credentials['port'],
container_id="4n8nz48az49prdmdmprmr4doesnotexit"
)
with pytest.raises(PostgraasApiException) as excinfo:
db_entry.container_id = self.this_app.postgraas_backend.delete(db_entry)
assert 'does not exist' in excinfo.value.message
|
nilq/baby-python
|
python
|
import argparse
import ibapi
from ib_tws_server.codegen.asyncio_client_generator import AsyncioWrapperGenerator
from ib_tws_server.codegen import *
from ib_tws_server.api_definition import *
import logging
import os
import shutil
import sys
logging.basicConfig(stream=sys.stdout, level=logging.ERROR)
def generate(output_dir: str):
response_class_fname = os.path.join(output_dir, "client_responses.py")
asyncio_client_fname = os.path.join(output_dir, "asyncio_client.py")
asyncio_wrapper_fname = os.path.join(output_dir, "asyncio_wrapper.py")
graphql_schema_fname = os.path.join(output_dir, "schema.graphql")
graphql_resolver_fname = os.path.join(output_dir, "graphql_resolver.py")
shutil.rmtree(output_dir, ignore_errors=True)
os.mkdir(output_dir)
print(f"Generating code for TWS API Version {ibapi.get_version_string()}")
d = ApiDefinition.verify()
ResponseTypesGenerator.generate(response_class_fname)
AsyncioClientGenerator.generate(asyncio_client_fname)
AsyncioWrapperGenerator.generate(asyncio_wrapper_fname)
GraphQLSchemaGenerator.generate(graphql_schema_fname)
GraphQLResolverGenerator.generate(graphql_resolver_fname)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Generate wrapper classes from the request definitions")
parser.add_argument('--output-dir', '-o', dest="output_dir", required=True, help='The output directory')
args = parser.parse_args()
generate(args.output_dir)
|
nilq/baby-python
|
python
|
import unittest
from cornflow_client.airflow import dag_utilities as du
from unittest.mock import Mock, patch
class DagUtilities(unittest.TestCase):
@patch("cornflow_client.airflow.dag_utilities.CornFlow")
def test_env_connection_vars(self, CornFlow):
secrets = Mock()
conn_uris = [
(
"cornflow://some_test_user:very_classified_password@devsm.cornflow.baobabsoluciones.app",
("some_test_user", "very_classified_password"),
"http://devsm.cornflow.baobabsoluciones.app",
),
(
"https://some_test_user:very_classified_password@devsm.cornflow.baobabsoluciones.app",
("some_test_user", "very_classified_password"),
"https://devsm.cornflow.baobabsoluciones.app",
),
(
"https://some_test_user:very_classified_password@devsm.cornflow.baobabsoluciones.app/some_dir",
("some_test_user", "very_classified_password"),
"https://devsm.cornflow.baobabsoluciones.app/some_dir",
),
(
"http://airflow:airflow_test_password@localhost:5000",
("airflow", "airflow_test_password"),
"http://localhost:5000",
),
]
client_instance = CornFlow.return_value
client_instance.login.return_value = ""
for (conn_str, user_info, url) in conn_uris:
secrets.get_conn_uri.return_value = conn_str
du.connect_to_cornflow(secrets)
client_instance.login.assert_called_with(
username=user_info[0], pwd=user_info[1]
)
CornFlow.assert_called_with(url=url)
|
nilq/baby-python
|
python
|
import http
import json
from unittest import mock
import pytest
from sqlalchemy import orm
from todos import crud, db, serializers
from todos.db import models
@pytest.fixture()
def exemplary_event_path_parameters(exemplary_task_model: models.Task) -> dict:
return {"task_id": exemplary_task_model.id}
@pytest.fixture()
def exemplary_event(exemplary_headers_with_access_token: dict, exemplary_event_path_parameters: dict) -> dict:
return {"headers": exemplary_headers_with_access_token, "pathParameters": exemplary_event_path_parameters}
@pytest.mark.usefixtures("exemplary_access_token")
def test_should_return_unauthorized_when_access_token_is_missing() -> None:
response = crud.get_task_details({}, {})
assert response["statusCode"] == http.HTTPStatus.UNAUTHORIZED
assert response["body"] is None
def test_should_successfully_return_task_details(
dbsession: orm.Session, exemplary_event: dict, exemplary_task_model: models.Task
) -> None:
with mock.patch.object(db, "get_session", return_value=dbsession):
response = crud.get_task_details(exemplary_event, {})
assert response["statusCode"] == http.HTTPStatus.OK
assert response["body"] == json.dumps(serializers.serialize_task(exemplary_task_model))
def test_should_return_bad_request_when_task_not_found(
dbsession: orm.Session, exemplary_headers_with_access_token: dict
) -> None:
event = {"headers": exemplary_headers_with_access_token, "pathParameters": {"task_id": 999}}
with mock.patch.object(db, "get_session", return_value=dbsession):
response = crud.get_task_details(event, {})
assert response["statusCode"] == http.HTTPStatus.BAD_REQUEST
def test_should_return_service_unavailable_when_unexpected_error_occurs(exemplary_event: dict) -> None:
with mock.patch.object(db, "get_session", side_effect=Exception()):
response = crud.get_task_details(exemplary_event, {})
assert response["statusCode"] == http.HTTPStatus.SERVICE_UNAVAILABLE
assert response["body"] is None
|
nilq/baby-python
|
python
|
'''Standard Simple feedforward model
feedforward takes in a single image
Model-specific config.py options: (inherits from models.base_net):
'batch_size': An int. The number of input bundle to use in a batch
'hidden_size': An int. The size of representation size before FC layer
In metric network:
'output_size': For discriminative task, the size of output.
Encoder:
'encoder': A function that will build take 'input_placeholder', 'is_training', 'hidden_size', and returns a representation.
-'encoder_kwargs': A Dict of all args to pass to 'encoder'.
'''
from __future__ import absolute_import, division, print_function
from functools import partial
from models.base_net import BaseNet
import losses.all as losses_lib
import tensorflow as tf
import tensorflow.contrib.slim as slim
from models.sample_models import *
from models.resnet_v1 import *
import optimizers.train_steps as train_steps
import optimizers.ops as optimize
import pdb
class StandardFeedforward(BaseNet):
'''
'''
def __init__(self, global_step, cfg):
'''
Args:
cfg: Configuration.
'''
super(StandardFeedforward, self).__init__(global_step, cfg)
self.cfg = cfg
if 'hidden_size' not in cfg:
raise ValueError("config.py for Feedforward Network must specify 'hidden_size'")
if 'encoder' not in cfg:
raise ValueError("config.py for Feedforward Network must specify 'encoder'")
if 'metric_net' not in cfg:
raise ValueError("config.py for Feedforward Network must specify 'metric_net'")
if 'loss_threshold' in cfg:
self.threshold = tf.constant(cfg['loss_threshold'])
else:
self.threshold = None
self.is_l1 = 'is_l1' in cfg and cfg['is_l1']
def build_encoder(self, input_imgs, is_training):
'''Builds encoder.
Args:
input_img: input image to encode after scaling to [-1, 1]
is_training: flag for whether the model is in training mode.
Returns:
encoder_output: tensor representing the ouptut of the encoder
'''
encoder_kwargs = {}
if 'encoder_kwargs' in self.cfg:
encoder_kwargs = self.cfg['encoder_kwargs']
else:
print("Not using 'kwargs' arguments for encoder.")
with tf.variable_scope("feedforward") as scope:
encoder_output, end_points = self.cfg['encoder'](
input_imgs,
is_training,
reuse=None,
hidden_size=self.cfg['hidden_size'],
scope=scope,
**encoder_kwargs)
encoder_output = tf.reshape(encoder_output, [-1,16,16,8])
self.encoder_endpoints = end_points
return encoder_output
def build_postprocess(self, encoder_output, is_training):
'''Build the post-process on feedforward network structure output.
The default approach will be a three layer fully connected networks
Args:
encoder_output: a tensor output representations of input image
is_training: flag for wheter the model is in training mode.
Returns:
final_output: final output for the whole model
'''
metric_kwargs = {}
if 'metric_kwargs' in self.cfg:
metric_kwargs = self.cfg['metric_kwargs']
else:
raise ValueError("config.py for Feedforward Network must specify 'metric_kwargs'")
encoder_output = tf.contrib.layers.flatten(encoder_output)
final_output, end_points = self.cfg['metric_net'](
encoder_output,
is_training,
**metric_kwargs)
self.metric_endpoints = end_points
return final_output
def build_model(self, input_imgs, is_training, targets, masks=None, privileged_input=None):
'''Builds the model. Assumes that the input is from range [0, 1].
Args:
input_imgs: batch of input images (scaled between -1 and 1) with the
dimensions specified in the cfg
is_training: flag for whether the model is in training mode or not
mask: mask used for computing sum of squares loss. If None, we assume
it is np.ones.
'''
print('building model')
cfg = self.cfg
self.is_training= is_training
self.masks = masks
if self.decoder_only:
encoder_output = input_imgs
else:
encoder_output = self.build_encoder(input_imgs, is_training)
final_output = self.build_postprocess(encoder_output, is_training)
losses = self.get_losses(final_output, targets, is_softmax='l2_loss' not in cfg)
# use weight regularization
if 'omit_weight_reg' in cfg and cfg['omit_weight_reg']:
add_reg = False
else:
add_reg = True
# get losses
regularization_loss = tf.add_n( slim.losses.get_regularization_losses(), name='losses/regularization_loss' )
total_loss = slim.losses.get_total_loss( add_regularization_losses=add_reg,
name='losses/total_loss')
self.input_images = input_imgs
self.targets = targets
self.masks = masks
self.encoder_output = encoder_output
self.decoder_output = final_output
self.losses = losses
self.total_loss = total_loss
# add summaries
if self.extended_summaries:
slim.summarize_variables()
slim.summarize_weights()
slim.summarize_biases()
slim.summarize_activations()
slim.summarize_collection(tf.GraphKeys.LOSSES)
slim.summarize_tensor( regularization_loss )
slim.summarize_tensor( total_loss )
self.model_built = True
def get_losses(self, final_output, target, is_softmax=True):
'''Returns the loss for a Siamese Network.
Args:
final_output: tensor that represent the final output of the image bundle.
target: Tensor of target to be output by the siamese network.
Returns:
losses: list of tensors representing each loss component
'''
print('setting up losses...')
self.target = target
self.final_output = final_output
self.predicted = slim.softmax(final_output)
with tf.variable_scope('losses'):
if is_softmax:
if len(target.shape) == len(final_output.shape):
correct_prediction = tf.equal(tf.argmax(final_output,1), tf.argmax(target, 1))
if len(self.masks.shape) == 2:
self.masks = tf.squeeze(self.masks)
siamese_loss = tf.reduce_mean(
losses_lib.get_softmax_loss(
final_output,
target,
self.masks,
scope='softmax_loss'))
else:
correct_prediction = tf.equal(tf.argmax(final_output,1), target)
siamese_loss = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=final_output,
labels=target,
name='softmax_loss'))
self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
self.siamese_loss = siamese_loss
else:
# If it's not softmax, it's l2 norm loss.
self.accuracy = 0
# self.l2_loss = tf.losses.mean_squared_error(
# final_output,
# target,
# scope='d1',
# loss_collection=tf.GraphKeys,
# reduction="none")
target = tf.to_float(target)
final_output = tf.to_float(final_output)
# self.l2_loss = tf.norm(target - final_output, axis=1)
#self.l2_loss_sum = tf.reduce_sum(self.l2_loss, 1)
# print(self.l2_loss)
if self.is_l1:
self.l_loss = losses_lib.get_l1_loss(
final_output,
target,
scope='d1')
print('Using L1 loss.....')
else:
self.l_loss = losses_lib.get_l2_loss(
final_output,
target,
scope='d1')
self.siamese_loss = self.l_loss
self.robust_l_loss = self.l_loss
# siamese_loss = self.l2_loss
# if self.threshold is not None:
# ind = tf.unstack(siamese_loss)
# siamese_loss = [ tf.cond(tf.greater(x, self.threshold),
# lambda: self.threshold + self.threshold * tf.log(x / self.threshold),
# lambda: x) for x in ind ]
# self.robust_l2_loss = siamese_loss
# siamese_loss = tf.stack(siamese_loss)
# self.siamese_loss = tf.reduce_sum(siamese_loss) / self.cfg['batch_size']
tf.add_to_collection(tf.GraphKeys.LOSSES, self.siamese_loss)
losses = [self.siamese_loss]
return losses
def get_train_step_fn( self ):
'''
Returns:
A train_step funciton which takes args:
(sess, train_ops, global_stepf)
'''
return partial( train_steps.discriminative_train_step_fn,
return_accuracy=self.cfg['return_accuracy'] )
def build_train_op( self, global_step ):
'''
Builds train ops for discriminative task
Args:
global_step: A Tensor to be incremented
Returns:
[ loss_op, accuracy ]
'''
if not self.model_built or self.total_loss is None :
raise RuntimeError( "Cannot build optimizers until 'build_model' ({0}) and 'get_losses' {1} are run".format(
self.model_built, self.losses_built ) )
self.global_step = global_step
t_vars = tf.trainable_variables()
# Create the optimizer train_op for the generator
self.optimizer = optimize.build_optimizer( global_step=self.global_step, cfg=self.cfg )
if 'clip_norm' in self.cfg:
self.loss_op = optimize.create_train_op( self.total_loss, self.optimizer, update_global_step=True, clip_gradient_norm=self.cfg['clip_norm'])
else:
if self.is_training:
self.loss_op = optimize.create_train_op( self.total_loss, self.optimizer, update_global_step=True )
else:
self.loss_op = optimize.create_train_op( self.total_loss, self.optimizer, is_training=False, update_global_step=True )
# Create a train_op for the discriminator
self.train_op = [ self.loss_op, self.accuracy ]
self.train_op_built = True
return self.train_op
|
nilq/baby-python
|
python
|
"""
"""
PROMPT_COLORS = {
"purple": '\033[95m',
"blue": '\033[94m',
"green": '\033[92m',
"yellow": '\033[93m',
"red": '\033[91m',
"bold": '\033[1m',
"underline": '\033[4m'}
PROMPT_TAILER = '\033[0m'
class ColoredPrinter(object):
def __init__(self, color):
if not color in PROMPT_COLORS.keys():
raise ValueError('unknown color {}'.format(color))
self.print_fmt = PROMPT_COLORS[color] + '{string}' + PROMPT_TAILER
def __str__(self):
"""return a colored version of the representation string"""
return self.format(self.__repr__())
def format(self, *strings):
"""add coloration items to a list of strings
"""
string = " ".join([self.print_fmt.format(string=string) for string in strings])
return string
def __call__(self, *strings, **kwargs):
string = self.format(*strings)
print(string, **kwargs)
printpurple = ColoredPrinter('purple')
printblue = ColoredPrinter('blue')
printgreen = ColoredPrinter('green')
printyellow = ColoredPrinter('yellow')
printred = ColoredPrinter('red')
printbold = ColoredPrinter('bold')
printunderline = ColoredPrinter('underline')
PRINTERS = {color: eval("print{}".format(color)) for color in PROMPT_COLORS}
if __name__ == '__main__':
for color, printer in PRINTERS.items():
print("{:<20s} {} ======> ".format(color, printer), end=" ")
printer('hello world')
|
nilq/baby-python
|
python
|
import math
import os
import random
import re
import sys
n = int(input())
arr = list(map(int, input().rstrip().split()))
numSwaps = 0
i = 0
while(i < len(arr)-1):
if arr[i] != i+1:
tmp = arr[i]
arr[i], arr[tmp-1] = arr[tmp-1], arr[i]
numSwaps += 1
else:
i += 1
print(numSwaps)
|
nilq/baby-python
|
python
|
"""
This is a reST markup explaining the following code, compatible with
`Sphinx Gallery <https://sphinx-gallery.github.io/>`_.
"""
# You can convert the file to a Jupyter notebook using the
# sphx_glr_python_to_jupyter.py utility from Sphinx Gallery.
import math
sin = math.sin(0.13587)
print(sin)
#%%
# And a sum with itself turns it into two sins, because the following holds:
#
# .. math::
#
# 2 a = a + a
#
two_sins = sin + sin
if two_sins != 2*sin:
print("Assumptions broken. Restart the universe.")
|
nilq/baby-python
|
python
|
import os.path
from os import listdir
import re
from numpy.distutils.core import setup
def find_version(*paths):
fname = os.path.join(os.path.dirname(__file__), *paths)
with open(fname) as fp:
code = fp.read()
match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", code, re.M)
if match:
return match.group(1)
raise RuntimeError("Unable to find version string.")
scripts = ['Scripts/' + i for i in listdir('Scripts/')]
setup(
name='obstools',
version=find_version('obstools', '__init__.py'),
description='Python tools for ocean bottom seismic instruments',
author='Pascal Audet, Helen Janiszewski',
author_email='pascal.audet@uottawa.ca',
maintainer='Pascal Audet, Helen Janiszewski',
maintainer_email='pascal.audet@uottawa.ca, hajanisz@hawaii.edu',
url='https://github.com/paudetseis/OBStools',
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7'],
install_requires=['numpy', 'obspy', 'stdb'],
python_requires='>=3.6',
packages=['obstools','obstools.atacr'],
scripts=scripts)
|
nilq/baby-python
|
python
|
# Simulate a Thomas cluster process on a rectangle.
# Author: H. Paul Keeler, 2018.
# Website: hpaulkeeler.com
# Repository: github.com/hpaulkeeler/posts
# For more details, see the post:
# hpaulkeeler.com/simulating-a-thomas-cluster-point-process/
import numpy as np; # NumPy package for arrays, random number generation, etc
import matplotlib.pyplot as plt # For plotting
plt.close("all"); # close all figures
# Simulation window parameters
xMin = -.5;
xMax = .5;
yMin = -.5;
yMax = .5;
# Parameters for the parent and daughter point processes
lambdaParent = 10; # density of parent Poisson point process
lambdaDaughter = 100; # mean number of points in each cluster
sigma = 0.05; # sigma for normal variables (ie random locations) of daughters
# Extended simulation windows parameters
rExt=6*sigma; # extension parameter
# for rExt, use factor of deviation sigma eg 5 or 6
xMinExt = xMin - rExt;
xMaxExt = xMax + rExt;
yMinExt = yMin - rExt;
yMaxExt = yMax + rExt;
# rectangle dimensions
xDeltaExt = xMaxExt - xMinExt;
yDeltaExt = yMaxExt - yMinExt;
areaTotalExt = xDeltaExt * yDeltaExt; # area of extended rectangle
# Simulate Poisson point process for the parents
numbPointsParent = np.random.poisson(areaTotalExt * lambdaParent);# Poisson number of points
# x and y coordinates of Poisson points for the parent
xxParent = xMinExt + xDeltaExt * np.random.uniform(0, 1, numbPointsParent);
yyParent = yMinExt + yDeltaExt * np.random.uniform(0, 1, numbPointsParent);
# Simulate Poisson point process for the daughters (ie final poiint process)
numbPointsDaughter = np.random.poisson(lambdaDaughter, numbPointsParent);
numbPoints = sum(numbPointsDaughter); # total number of points
# Generate the (relative) locations in Cartesian coordinates by
# simulating independent normal variables
xx0 = np.random.normal(0, sigma, numbPoints); # (relative) x coordinaets
yy0 = np.random.normal(0, sigma, numbPoints); # (relative) y coordinates
# replicate parent points (ie centres of disks/clusters)
xx = np.repeat(xxParent, numbPointsDaughter);
yy = np.repeat(yyParent, numbPointsDaughter);
# translate points (ie parents points are the centres of cluster disks)
xx = xx + xx0;
yy = yy + yy0;
# thin points if outside the simulation window
booleInside = ((xx >= xMin) & (xx <= xMax) & (yy >= yMin) & (yy <= yMax));
# retain points inside simulation window
xx = xx[booleInside];
yy = yy[booleInside];
# Plotting
plt.scatter(xx, yy, edgecolor='b', facecolor='none', alpha=0.5);
plt.xlabel("x");
plt.ylabel("y");
plt.axis('equal');
|
nilq/baby-python
|
python
|
#
# (c) 2019, Ansible by Red Hat, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from ansible_collections.cisco.ios.tests.unit.compat.mock import patch
from ansible_collections.cisco.ios.plugins.modules import ios_ospf_interfaces
from ansible_collections.cisco.ios.tests.unit.modules.utils import (
set_module_args,
)
from .ios_module import TestIosModule, load_fixture
class TestIosOspfInterfacesModule(TestIosModule):
module = ios_ospf_interfaces
def setUp(self):
super(TestIosOspfInterfacesModule, self).setUp()
self.mock_get_config = patch(
"ansible_collections.ansible.netcommon.plugins.module_utils.network.common.network.Config.get_config"
)
self.get_config = self.mock_get_config.start()
self.mock_load_config = patch(
"ansible_collections.ansible.netcommon.plugins.module_utils.network.common.network.Config.load_config"
)
self.load_config = self.mock_load_config.start()
self.mock_get_resource_connection_config = patch(
"ansible_collections.ansible.netcommon.plugins.module_utils.network.common.cfg.base."
"get_resource_connection"
)
self.get_resource_connection_config = (
self.mock_get_resource_connection_config.start()
)
self.mock_get_resource_connection_facts = patch(
"ansible_collections.ansible.netcommon.plugins.module_utils.network.common.rm_base.resource_module_base."
"get_resource_connection"
)
self.get_resource_connection_facts = (
self.mock_get_resource_connection_facts.start()
)
self.mock_edit_config = patch(
"ansible_collections.cisco.ios.plugins.module_utils.network.ios.providers.providers.CliProvider.edit_config"
)
self.edit_config = self.mock_edit_config.start()
self.mock_execute_show_command = patch(
"ansible_collections.cisco.ios.plugins.module_utils.network.ios.facts.ospf_interfaces.ospf_interfaces."
"Ospf_InterfacesFacts.get_ospf_interfaces_data"
)
self.execute_show_command = self.mock_execute_show_command.start()
def tearDown(self):
super(TestIosOspfInterfacesModule, self).tearDown()
self.mock_get_resource_connection_config.stop()
self.mock_get_resource_connection_facts.stop()
self.mock_edit_config.stop()
self.mock_get_config.stop()
self.mock_load_config.stop()
self.mock_execute_show_command.stop()
def load_fixtures(self, commands=None):
def load_from_file(*args, **kwargs):
return load_fixture("ios_ospf_interfaces.cfg")
self.execute_show_command.side_effect = load_from_file
def test_ios_ospf_interfaces_merged(self):
set_module_args(
dict(
config=[
dict(
name="GigabitEthernet0/2",
address_family=[
dict(
afi="ipv4",
bfd=True,
cost=dict(interface_cost=30),
network=dict(broadcast=True),
priority=60,
resync_timeout=90,
ttl_security=dict(hops=120),
authentication=dict(key_chain="test_key"),
),
dict(
afi="ipv6",
bfd=True,
dead_interval=dict(time=100),
network=dict(manet=True),
priority=50,
),
],
),
dict(
name="GigabitEthernet0/3",
address_family=[
dict(
afi="ipv4",
bfd=True,
cost=dict(interface_cost=50),
priority=50,
ttl_security=dict(hops=150),
)
],
),
],
state="merged",
)
)
commands = [
"interface GigabitEthernet0/3",
"ip ospf bfd",
"ip ospf cost 50",
"ip ospf priority 50",
"ip ospf ttl-security hops 150",
"interface GigabitEthernet0/2",
"ip ospf authentication key-chain test_key",
"ip ospf bfd",
"ip ospf cost 30",
"ip ospf network broadcast",
"ip ospf priority 60",
"ip ospf resync-timeout 90",
"ip ospf ttl-security hops 120",
"ipv6 ospf bfd",
"ipv6 ospf dead-interval 100",
"ipv6 ospf network manet",
"ipv6 ospf priority 50",
]
result = self.execute_module(changed=True)
self.assertEqual(sorted(result["commands"]), sorted(commands))
def test_ios_ospf_interfaces_merged_idempotent(self):
set_module_args(
dict(
config=[
dict(
address_family=[
dict(
afi="ipv4",
adjacency=True,
cost=dict(interface_cost=30),
priority=40,
process=dict(id=10, area_id="20"),
ttl_security=dict(hops=50),
)
],
name="GigabitEthernet0/2",
),
dict(
address_family=[
dict(
afi="ipv6",
adjacency=True,
priority=20,
process=dict(id=55, area_id="105"),
transmit_delay=30,
)
],
name="GigabitEthernet0/3",
),
],
state="merged",
)
)
self.execute_module(changed=False, commands=[])
def test_ios_ospf_interfaces_replaced(self):
set_module_args(
dict(
config=[
dict(
name="GigabitEthernet0/3",
address_family=[
dict(
afi="ipv4",
bfd=True,
cost=dict(interface_cost=50),
priority=50,
ttl_security=dict(hops=150),
)
],
)
],
state="replaced",
)
)
commands = [
"interface GigabitEthernet0/3",
"ip ospf bfd",
"ip ospf cost 50",
"ip ospf priority 50",
"ip ospf ttl-security hops 150",
]
result = self.execute_module(changed=True)
self.assertEqual(sorted(result["commands"]), sorted(commands))
def test_ios_ospf_interfaces_replaced_idempotent(self):
set_module_args(
dict(
config=[
dict(
address_family=[
dict(
afi="ipv4",
adjacency=True,
cost=dict(interface_cost=30),
priority=40,
process=dict(id=10, area_id="20"),
ttl_security=dict(hops=50),
)
],
name="GigabitEthernet0/2",
),
dict(
address_family=[
dict(
afi="ipv6",
adjacency=True,
priority=20,
process=dict(id=55, area_id="105"),
transmit_delay=30,
)
],
name="GigabitEthernet0/3",
),
],
state="replaced",
)
)
self.execute_module(changed=False, commands=[])
def test_ios_ospf_interfaces_overridden(self):
set_module_args(
dict(
config=[
dict(
address_family=[
dict(
afi="ipv6",
manet=dict(cost=dict(percent=10)),
priority=40,
process=dict(id=10, area_id="20"),
transmit_delay=50,
)
],
name="GigabitEthernet0/3",
)
],
state="overridden",
)
)
commands = [
"interface GigabitEthernet0/2",
"no ip ospf 10 area 20",
"no ip ospf adjacency stagger disable",
"no ip ospf cost 30",
"no ip ospf priority 40",
"no ip ospf ttl-security hops 50",
"interface GigabitEthernet0/3",
"ipv6 ospf 10 area 20",
"no ipv6 ospf adjacency stagger disable",
"ipv6 ospf manet peering cost percent 10",
"ipv6 ospf priority 40",
"ipv6 ospf transmit-delay 50" "",
]
result = self.execute_module(changed=True)
self.assertEqual(sorted(result["commands"]), sorted(commands))
def test_ios_ospf_interfaces_overridden_idempotent(self):
set_module_args(
dict(
config=[
dict(
address_family=[
dict(
afi="ipv4",
adjacency=True,
cost=dict(interface_cost=30),
priority=40,
process=dict(id=10, area_id="20"),
ttl_security=dict(hops=50),
)
],
name="GigabitEthernet0/2",
),
dict(
address_family=[
dict(
afi="ipv6",
adjacency=True,
priority=20,
process=dict(id=55, area_id="105"),
transmit_delay=30,
)
],
name="GigabitEthernet0/3",
),
],
state="overridden",
)
)
self.execute_module(changed=False, commands=[])
def test_ios_ospf_interfaces_deleted_interface(self):
set_module_args(
dict(config=[dict(name="GigabitEthernet0/2")], state="deleted")
)
commands = [
"interface GigabitEthernet0/2",
"no ip ospf priority 40",
"no ip ospf adjacency stagger disable",
"no ip ospf ttl-security hops 50",
"no ip ospf 10 area 20",
"no ip ospf cost 30",
]
result = self.execute_module(changed=True)
self.assertEqual(sorted(result["commands"]), sorted(commands))
def test_ios_ospf_interfaces_deleted_all(self):
set_module_args(dict(config=[], state="deleted"))
commands = [
"interface GigabitEthernet0/3",
"no ipv6 ospf 55 area 105",
"no ipv6 ospf adjacency stagger disable",
"no ipv6 ospf priority 20",
"no ipv6 ospf transmit-delay 30",
"interface GigabitEthernet0/2",
"no ip ospf 10 area 20",
"no ip ospf adjacency stagger disable",
"no ip ospf cost 30",
"no ip ospf priority 40",
"no ip ospf ttl-security hops 50",
]
result = self.execute_module(changed=True)
self.assertEqual(sorted(result["commands"]), sorted(commands))
def test_ios_ospf_interfaces_rendered(self):
set_module_args(
dict(
config=[
dict(
name="GigabitEthernet0/2",
address_family=[
dict(
afi="ipv4",
bfd=True,
cost=dict(interface_cost=30),
network=dict(broadcast=True),
priority=60,
resync_timeout=90,
ttl_security=dict(hops=120),
),
dict(
afi="ipv6",
bfd=True,
dead_interval=dict(time=100),
network=dict(manet=True),
priority=50,
),
],
),
dict(
name="GigabitEthernet0/3",
address_family=[
dict(
afi="ipv4",
bfd=True,
cost=dict(interface_cost=50),
priority=50,
ttl_security=dict(hops=150),
)
],
),
],
state="rendered",
)
)
commands = [
"interface GigabitEthernet0/3",
"ip ospf bfd",
"ip ospf cost 50",
"ip ospf priority 50",
"ip ospf ttl-security hops 150",
"interface GigabitEthernet0/2",
"ip ospf bfd",
"ip ospf cost 30",
"ip ospf network broadcast",
"ip ospf priority 60",
"ip ospf resync-timeout 90",
"ip ospf ttl-security hops 120",
"ipv6 ospf bfd",
"ipv6 ospf dead-interval 100",
"ipv6 ospf network manet",
"ipv6 ospf priority 50",
]
result = self.execute_module(changed=False)
self.assertEqual(sorted(result["rendered"]), sorted(commands))
|
nilq/baby-python
|
python
|
# Generated by Django 4.0 on 2021-12-29 18:47
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
dependencies = [
('games', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='game',
name='genre',
),
migrations.RemoveField(
model_name='game',
name='plataform',
),
migrations.CreateModel(
name='GamePlataform',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('game', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='games.game')),
('plataform', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='games.plataform')),
],
),
migrations.CreateModel(
name='GameGenre',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('game', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='games.game')),
('genre', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='games.genre')),
],
),
]
|
nilq/baby-python
|
python
|
#! /usr/bin/env python3
import sys
f = sys.stdin
s = f.read()
words = s.split()
n = len(words)
d = {}
for w in words:
if w in d:
d[w] += 1
else:
d[w] = 1
def foo(s):
return d[s]
#sorted_keys = sorted(d.keys(), key=foo, reverse=True)
sorted_keys = sorted(d.keys(), key = lambda x: d[x], reverse = True)
i = 0
for k in sorted_keys:
if i == 20:
break
print("{}: {}".format(k, d[k]))
i += 1
print(d, file=sys.stdout, end='')
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
""" Generic tests for all animations.
These tests run against all animation classes found in
earthstar.effects.animations.*.
"""
import glob
import os
import pytest
import earthstar.effects.animations as animations
from earthstar.effects.engine import EffectEngine
from earthstar.frame_utils import FrameConstants
def find_animations():
pkg_folder = os.path.dirname(animations.__file__)
pkg_modules = [
os.path.splitext(os.path.basename(x))[0]
for x in glob.glob(pkg_folder + "/*.py")
if not x.endswith('/__init__.py')
]
return [
animations.import_animation(x) for x in pkg_modules
]
ANIMATIONS = find_animations()
@pytest.mark.parametrize("animation_cls", ANIMATIONS)
@pytest.mark.timeout(2.5) # at least 40 frames per second
def test_generates_one_hundred_frames(animation_cls):
""" Tests that each animation can generate one hundred
frames correctly in a reasonable amount of time.
"""
fc = FrameConstants()
engine = EffectEngine(fc=fc, tick=1. / 10, transition=60)
engine.add_animation_type(animation_cls)
for i in range(100):
frame = engine.next_frame()
assert frame.shape == fc.frame_shape
assert frame.dtype == fc.frame_dtype
|
nilq/baby-python
|
python
|
import pandas as pd
import os
import sys
in_dir = sys.argv[1]
types = ['Right', 'Left']
out_df_base = 'russian_combined_{}'
files = [os.path.join(in_dir, f) for f in os.listdir(in_dir)
if f.lower().endswith('.csv')]
# dfs = [pd.read_csv(f) for f in files]
for type in types:
outdir = type.lower()
if not os.path.isdir(outdir):
os.makedirs(outdir)
for i, f in enumerate(files):
df = pd.read_csv(f, encoding='utf-8')
sub = df.loc[df.account_type == type]
sub.to_csv(os.path.join(outdir, type + '_' + os.path.basename(f)))
|
nilq/baby-python
|
python
|
""" Contains all the models that can be used to impute missing data. """
from .daema import Daema
from .holoclean import Holoclean
from .mida import MIDA
from .miss_forest import MissForestImpute
from .baseline_imputations import MeanImputation, Identity
MODELS = {
"DAEMA": Daema,
"Holoclean": Holoclean,
"MIDA": MIDA,
"MissForest": MissForestImpute,
"Mean": MeanImputation,
"Real": Identity, # Not a proper imputation algorithm, handled separately in the run.py file
}
|
nilq/baby-python
|
python
|
from django.contrib import admin
from .models import AdminlteLog, AdminlteLogType
admin.site.register(AdminlteLog)
admin.site.register(AdminlteLogType)
|
nilq/baby-python
|
python
|
from libsvm.python.svmutil import *
from libsvm.python.svm import *
import os
import struct
import numpy
dic={}
#数据加载函数,kind值标明了读取文件的类型
def loadforSVM(path, kind='train'):
labels_path = os.path.join(path,'%s-labels.idx1-ubyte'% kind)
images_path = os.path.join(path,'%s-images.idx3-ubyte'% kind)
with open(labels_path, 'rb') as lbpath:
magic, n = struct.unpack('>II',lbpath.read(8))
labels = numpy.fromfile(lbpath,dtype=numpy.uint8)
with open(images_path, 'rb') as imgpath:
magic, num, rows, cols = struct.unpack('>IIII',imgpath.read(16))
images = numpy.fromfile(imgpath,dtype=numpy.uint8).reshape(len(labels), 784)
#由于源数据有些数据过大,会导致激活函数计算溢出,所以对数据集集体缩小,
#由于图片数据每一位的值均为0-255之间,归一化处理
if kind=='train':
f = open('trainforSVM.txt','w')
if kind=='t10k':
f = open('testforSVM.txt','w')
count=0
for i in range(10):
for j in range(len(images)):
index=1
if labels[j]==i:
string=str(i)+' '
for k in images[j]:
string=string+str(index)+':'+str(k/255)+' '
index+=1
f.writelines(string+'\n')
dic[count]=j
count+=1
f.close()
if __name__ == '__main__':
loadforSVM("C:\\Users\\Anonymous\\Documents\\机器学习\\作业四赵虎201600301325", kind='train')
loadforSVM("C:\\Users\\Anonymous\\Documents\\机器学习\\作业四赵虎201600301325", kind='t10k')
y, x = svm_read_problem('trainforSVM.txt')
yt,xt=svm_read_problem('testforSVM.txt')
model=svm_train(y,x,'-t 0 -m 600')
# print('test:')
p_label, p_acc, p_val = svm_predict(yt, xt, model)
f = open('classificationforSVM.txt','w')
for i in range(len(p_label)):
# f.write(str(int(p_label[dic[i]]))+' ')
f.write(str(int(p_label[i]))+' ')
f1=open("classificationforSVM.txt")
s=f1.read().split()
dic1={}
for i in range(10000):
dic1[dic[i]]=i
f2=open("classificationforlinearSVM.txt",'w')
for i in range(10000):
f2.write(s[dic1[i]]+' ')
|
nilq/baby-python
|
python
|
from abc import abstractmethod, ABC
from typing import Callable, TypeVar
T = TypeVar("T")
class Policy(ABC):
@abstractmethod
def execute(self, function: Callable[[], T]) -> T:
"""
Accepts lambda function and execute it with pre-defined policy parameters
Example: p.execute(lambda: api.call(1, 2))
:param function: lambda function to be executed
:return: function result
"""
raise NotImplementedError
|
nilq/baby-python
|
python
|
# Generated by Django 4.0.2 on 2022-03-06 06:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('polls', '0002_challenges_game_delete_choice_delete_question_and_more'),
]
operations = [
migrations.AddField(
model_name='game',
name='console',
field=models.CharField(default='N/A', max_length=100),
),
]
|
nilq/baby-python
|
python
|
import os, sys, time
sys.path.append(os.getcwd())
import torch
import torchvision
from torch import nn
from torch import autograd
from torch import optim
import torch.nn.functional as F
import time
import tflib as lib
import tflib.save_images
import tflib.mnist
import tflib.cifar10
import tflib.plot
#import tflib.inception_score
import numpy as np
from tqdm import tqdm
# Download CIFAR-10 (Python version) at
# https://www.cs.toronto.edu/~kriz/cifar.html and fill in the path to the
# extracted files here!
DATA_DIR = '/mnt/7FC1A7CD7234342C/cifar-10-batches-py/'
OUTPUT_BASE_DIR = '/mnt/7FC1A7CD7234342C/cifar10-results/'
RUN_PATH = '{}{}/'.format(OUTPUT_BASE_DIR, time.strftime('%Y_%m_%d_%H_%M_%S')) #TODO: generate by settings
if not os.path.exists(RUN_PATH):
os.mkdir(RUN_PATH)
#TODO:hack
tflib.plot.log_dir = RUN_PATH
if len(DATA_DIR) == 0:
raise Exception('Please specify path to data directory in gan_cifar.py!')
DIM = 64 # This overfits substantially; you're probably better off with 64
CRITIC_DIM = 64 # ambition
INPUT_DIM = 128 # generator input dimension (latent variable dimension)
LAMBDA = 10 # Gradient penalty lambda hyperparameter
CRITIC_ITERS = 5 # How many critic iterations per generator iteration
BATCH_SIZE = 64 # Batch size
ITERS = 100000 # How many generator iterations to train for
OUTPUT_DIM = 3072 # Number of pixels in CIFAR10 (3*32*32)
KERNEL_SIZE = 4
CONSTANCY_LOSS = False
CONSTANCY_LAMBDA = 8
LR = 1e-4
GENERATOR_INSTANCE_NORM = nn.BatchNorm2d
ENCODER_INSTANCE_NORM = False # TODO
DISCRIMINATOR_RECONSTRUCTION_LOSS = False
DISCRIMINATOR_RECONSTRUCTION_LAMBDA = 8
GENERATOR_AUTOENCODER_LOSS = False
GENERATOR_AUTOENCODER_LAMBDA = 1
GENERATOR_SCORE_LOSS = False
GENERATOR_SCORE_LAMBDA = 8
AUTOENCODER_GP = False
ONE_SIDED = False
params = dict(
MODE = 'cramer', # Valid options are dcgan, wgan, or wgan-gp
DIM = DIM, # This overfits substantially; you're probably better off with 64
INPUT_DIM = INPUT_DIM, # generator input dimension (latent variable dimension)
LAMBDA = LAMBDA, # Gradient penalty lambda hyperparameter
CRITIC_ITERS = CRITIC_ITERS, # How many critic iterations per generator iteration
BATCH_SIZE = BATCH_SIZE, # Batch size
ITERS = ITERS, # How many generator iterations to train for
OUTPUT_DIM = OUTPUT_DIM, # Number of pixels in CIFAR10 (3*32*32)
KERNEL_SIZE = KERNEL_SIZE,
GENERATOR_INSTANCE_NORM = GENERATOR_INSTANCE_NORM.__name__,
ENCODER_INSTANCE_NORM = ENCODER_INSTANCE_NORM,
DISCRIMINATOR_RECONSTRUCTION_LOSS = DISCRIMINATOR_RECONSTRUCTION_LOSS,
LR=LR,
AUTOENCODER_GP = AUTOENCODER_GP,
ONE_SIDED=ONE_SIDED,
CONSTANCY_LOSS = CONSTANCY_LOSS,
CONSTANCY_LAMBDA = CONSTANCY_LAMBDA,
GENERATOR_SCORE_LOSS = GENERATOR_SCORE_LOSS,
GENERATOR_SCORE_LAMBDA = GENERATOR_SCORE_LAMBDA,
GENERATOR_AUTOENCODER_LOSS = GENERATOR_AUTOENCODER_LOSS,
GENERATOR_AUTOENCODER_LAMBDA = GENERATOR_AUTOENCODER_LAMBDA,
CRITIC_DIM=CRITIC_DIM,
)
with open(RUN_PATH + '/algo_params.txt','w') as f:
import json
json.dump(params, f, indent=2)
def _upscale_resize(in_dim, out_dim, kernel_size):
return nn.Sequential(
nn.InstanceNorm2d(in_dim, affine=True),
nn.Upsample(scale_factor=2, mode='nearest'),
nn.ReflectionPad2d((1,2,1,2)),
nn.Conv2d(in_dim, out_dim, kernel_size, bias=False)
)
def _upblock(in_dim, out_dim, kernel_size, padding, norm=nn.InstanceNorm2d, non_linearity=lambda: nn.ReLU(True)):
blocks = []
bias_conv = not norm # if no norm them add bias parameter
if norm is not None:
blocks.append(norm(in_dim))
blocks.append(nn.ConvTranspose2d(in_dim, out_dim, kernel_size, stride=2, padding=padding, bias=bias_conv))
blocks.append(non_linearity())
return nn.Sequential(*blocks)
class Generator(nn.Module):
def __init__(self, norm=GENERATOR_INSTANCE_NORM):
super(Generator, self).__init__()
preprocess = nn.Sequential(
#nn.InstanceNorm2d(4 * 4 * 4 * DIM),
nn.Linear(INPUT_DIM, 4 * 4 * 4 * DIM),
nn.ReLU(True),
)
non_linearity = nn.ReLU
#block1 = _upscale_resize(4 * DIM, 2 * DIM, KERNEL_SIZE)
#block2 = _upscale_resize(2 * DIM, DIM, KERNEL_SIZE)
#self.last_norm = nn.InstanceNorm2d(DIM, affine=True)
#deconv_out = nn.ConvTranspose2d(DIM, 3, KERNEL_SIZE, stride=2, padding=1, bias=False)
#self.out_norm = nn.InstanceNorm2d(3, affine=True)
self.preprocess = preprocess
self.block1 = _upblock(4 * DIM, 2 * DIM, KERNEL_SIZE, 1, norm=norm, non_linearity=non_linearity)
self.block2 = _upblock(2 * DIM, DIM, KERNEL_SIZE, 1, norm=norm, non_linearity=non_linearity)
self.block_out = _upblock(DIM, 3, KERNEL_SIZE, 1, norm=norm, non_linearity=nn.Tanh)
#self.deconv_out = deconv_out
#self.tanh = nn.Tanh()
def forward(self, input):
output = self.preprocess(input)
output = output.view(-1, 4 * DIM, 4, 4)
#print(output.size())
output = self.block1(output)
#print(output.size())
output = self.block2(output)
#print(output.size())
output = self.block_out(output)
#output = self.deconv_out(self.last_norm(output))
#output = self.deconv_out(output)
#output = self.tanh(output)
#output = self.out_norm(output)
return output.view(-1, 3, 32, 32)
class Encoder(nn.Module):
def __init__(self, dim):
super().__init__()
if ENCODER_INSTANCE_NORM:
main = nn.Sequential(
nn.Conv2d(3, dim, KERNEL_SIZE, 2, padding=1, bias=False),
nn.InstanceNorm2d(dim),
nn.LeakyReLU(0.2, True),
nn.Conv2d(dim, 2 * dim, KERNEL_SIZE, 2, padding=1, bias=False),
nn.InstanceNorm2d(2 * dim),
nn.LeakyReLU(0.2, True),
nn.Conv2d(2 * dim, 4 * dim, KERNEL_SIZE, 2, padding=1, bias=False),
nn.InstanceNorm2d(4 * dim),
nn.LeakyReLU(0.2, True),
)
else:
main = nn.Sequential(
nn.Conv2d(3, dim, KERNEL_SIZE, 2, padding=1, bias=True),
nn.LeakyReLU(0.2, True),
nn.Conv2d(dim, 2 * dim, KERNEL_SIZE, 2, padding=1, bias=True),
nn.LeakyReLU(0.2, True),
nn.Conv2d(2 * dim, 4 * dim, KERNEL_SIZE, 2, padding=1, bias=True),
nn.LeakyReLU(0.2, True),
)
self.dim = dim
self.main = main
self.linear = nn.Linear(4*4*4*dim, INPUT_DIM)
def forward(self, input):
output = self.main(input)
before_linear = output.view(-1, 4 * 4 * 4 * self.dim)
output = self.linear(before_linear)
return output
def cramer_loss(net_real, independent_encoded):
"f from cramer gan paper"
return torch.norm(net_real - independent_encoded, p=2, dim=-1) - \
torch.norm(net_real, p=2, dim=-1)
def critic_schedule():
for i in range(10):
yield 100
while True:
yield CRITIC_ITERS
def gen_schedule():
for i in range(10):
yield 1
for i in range(100):
yield 1
for i in range(7000):
yield 1
while True:
yield 1
# custom weights initialization called on netG and netD
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
if m.bias is not None:
m.bias.data.fill_(0)
elif classname.find('Norm') != -1:
if m.weight is not None:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
elif classname.find('Linear') != -1:
m.weight.data.normal_(0.0, 0.01)
m.bias.data.fill_(0)
def print_weights(m):
if isinstance(m, (nn.Conv2d, nn.Linear)):
print(m.weight)
if m.bias is not None:
print(m.bias)
def print_grads(m):
if isinstance(m, (nn.Conv2d, nn.Linear)):
print(m.weight.grad)
if m.bias is not None:
print(m.bias.grad)
netG = Generator()
netD = Encoder(CRITIC_DIM)
netG.apply(weights_init)
netD.apply(weights_init)
print(netG)
print(netD)
use_cuda = torch.cuda.is_available()
mse_loss = torch.nn.MSELoss()
if use_cuda:
gpu = 0
# makes things slower?!
torch.backends.cudnn.benchmark = True
if use_cuda:
netD = netD.cuda(gpu)
netG = netG.cuda(gpu)
mse_loss = mse_loss.cuda(gpu)
one = torch.FloatTensor([1])
mone = one * -1
if use_cuda:
one = one.cuda(gpu)
mone = mone.cuda(gpu)
optimizerD = optim.Adam(netD.parameters(), lr=LR, betas=(0.5, 0.9))
optimizerG = optim.Adam(netG.parameters(), lr=LR, betas=(0.5, 0.9))
netG.train()
netD.train()
def calc_gradient_penalty(netD, netG, real_data, fake_data, encoded):
if AUTOENCODER_GP:
fake_data = netG(encoded) #TODO:investigate
# print "real_data: ", real_data.size(), fake_data.size()
alpha = torch.rand(BATCH_SIZE, 1)
alpha = alpha.expand(BATCH_SIZE, real_data.nelement()//BATCH_SIZE).contiguous().view(BATCH_SIZE, 3, 32, 32)
alpha = alpha.cuda(gpu) if use_cuda else alpha
interpolates = alpha * real_data + ((1 - alpha) * fake_data.data)
if use_cuda:
interpolates = interpolates.cuda(gpu)
interpolates = autograd.Variable(interpolates, requires_grad=True)
# TODO: clashes with autoencoder_gp?
disc_interpolates = cramer_loss(netD(interpolates), encoded)
gradients = autograd.grad(outputs=disc_interpolates, inputs=interpolates,
grad_outputs=torch.ones(disc_interpolates.size()).cuda(gpu) if use_cuda else torch.ones(
disc_interpolates.size()),
create_graph=True, retain_graph=True, only_inputs=True)[0]
gradients = gradients.view(gradients.size(0), -1)
if ONE_SIDED:
gradient_penalty = (F.relu(gradients.norm(2, dim=1) - 1, inplace=True) ** 2).mean() * LAMBDA
else:
gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean() * LAMBDA
return gradient_penalty
# For generating samples
def generate_image(frame, netG, input):
noisev = autograd.Variable(input, volatile=True)
netG.eval()
samples = netG(noisev)
netG.train()
save_images(samples, RUN_PATH + 'samples_{}.jpg'.format(frame))
def save_images(images_tensor, output_path):
samples = images_tensor.view(-1, 3, 32, 32)
samples = samples.mul(0.5).add(0.5)
samples = samples.cpu().data.numpy()
lib.save_images.save_images(samples, output_path)
# For calculating inception score
def get_inception_score(G, ):
all_samples = []
for i in xrange(10):
samples_100 = torch.randn(100, INPUT_DIM)
if use_cuda:
samples_100 = samples_100.cuda(gpu)
samples_100 = autograd.Variable(samples_100, volatile=True)
all_samples.append(G(samples_100).cpu().data.numpy())
all_samples = np.concatenate(all_samples, axis=0)
all_samples = np.multiply(np.add(np.multiply(all_samples, 0.5), 0.5), 255).astype('int32')
all_samples = all_samples.reshape((-1, 3, 32, 32)).transpose(0, 2, 3, 1)
return lib.inception_score.get_inception_score(list(all_samples))
# Dataset iterator
train_gen, dev_gen = lib.cifar10.load(BATCH_SIZE, data_dir=DATA_DIR, cuda=use_cuda)
def inf_train_gen():
while True:
for images in train_gen():
# yield images.astype('float32').reshape(BATCH_SIZE, 3, 32, 32).transpose(0, 2, 3, 1)
yield images
gen = inf_train_gen()
#preprocess = torchvision.transforms.Compose([
# torchvision.transforms.ToTensor(),
# torchvision.transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
# ])
preprocess = torchvision.transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
CRITIC_GEN = critic_schedule()
GEN_ITERS = gen_schedule()
noise = torch.randn(BATCH_SIZE, INPUT_DIM)
noise_independent = torch.randn(BATCH_SIZE, INPUT_DIM)
if use_cuda:
noise = noise.cuda(gpu)
noise_independent = noise_independent.cuda(gpu)
for iteration in tqdm(range(ITERS)):
start_time = time.time()
############################
# (1) Update D network
###########################
for p in netD.parameters(): # reset requires_grad
p.requires_grad = True # they are set to False below in netG update
for p in netG.parameters(): # reset requires_grad
p.requires_grad = False # they are set to False below in netG update
#for i in range(CRITIC_ITERS):
netG.eval()
netD.train()
for i in range(next(CRITIC_GEN)):
_data = next(gen)
netD.zero_grad()
noise.normal_(0, 1)
noise_independent.normal_(0, 1)
noisev = autograd.Variable(noise, volatile=True)
noisev_independent = autograd.Variable(noise_independent, volatile=True)
# Generate two independent fake batches
fake = autograd.Variable(netG(noisev).data)
fake_independent = autograd.Variable(netG(noisev_independent).data)
# train with real
_data = _data.view((BATCH_SIZE, 3, 32, 32))
real_data = _data # preprocess(_data)#torch.stack([preprocess(item) for item in _data])
#if use_cuda:
# real_data = real_data.cuda(gpu)
real_data_v = autograd.Variable(real_data)
# import torchvision
# filename = os.path.join("test_train_data", str(iteration) + str(i) + ".jpg")
# torchvision.utils.save_image(real_data, filename)
encoded_independent = netD(fake_independent)
encoded_real = netD(real_data_v)
D_real = cramer_loss(encoded_real, encoded_independent)
encoded_fake = netD(fake)
D_fake = cramer_loss(encoded_fake, encoded_independent)
#print(D_real, D_fake)
loss = (D_fake - D_real).mean()
#netD.apply(print_weights)
#print(fake)
if CONSTANCY_LOSS:
c_loss = CONSTANCY_LAMBDA * mse_loss(encoded_fake, autograd.Variable(noise))
loss += c_loss
# train with gradient penalty
gradient_penalty = calc_gradient_penalty(netD, netG, real_data_v.data, fake, encoded_real)
loss += gradient_penalty
loss.backward()
# print "gradien_penalty: ", gradient_penalty
D_cost = loss.data
# TODO: D_cost = loss.data[0]
Wasserstein_D = (D_real - D_fake).data.mean()
optimizerD.step()
############################
# (2) Update G network
###########################
netG.train()
#netD.eval() # screws up cuda?
for p in netD.parameters():
p.requires_grad = False # to avoid computation
for p in netG.parameters(): # reset requires_grad
p.requires_grad = True # they are set to False below in netG update
for i in range(next(GEN_ITERS)):
netG.zero_grad()
_data = next(gen)
real = autograd.Variable(_data.view((BATCH_SIZE, 3, 32, 32)))
#if use_cuda:
# real = real.cuda()
noise.normal_(0, 1)
noise_independent.normal_(0, 1)
noisev1 = autograd.Variable(noise)
noisev2 = autograd.Variable(noise_independent)
fake1 = netG(noisev1)
fake2 = netG(noisev2)
real_encoded = netD(real)
fake1_encoded = netD(fake1)
fake2_encoded = netD(fake2)
G = (torch.norm(real_encoded - fake1_encoded, p=2, dim=-1) +
torch.norm(real_encoded - fake2_encoded, p=2, dim=-1) -
torch.norm(fake1_encoded - fake2_encoded, p=2, dim=-1)).mean()
if GENERATOR_SCORE_LOSS or GENERATOR_AUTOENCODER_LOSS:
real_data_v = autograd.Variable(next(gen).view((BATCH_SIZE, 3, 32, 32)), volatile=True)
#if use_cuda:
# real_data_v = real_data_v.cuda()
real_latent = netD(real_data_v)
real_latent = autograd.Variable(real_latent.data)
reconstructed = netG(autograd.Variable(real_latent.data))
if GENERATOR_AUTOENCODER_LOSS:
gen_ae_loss = mse_loss(reconstructed, real_data_v)
G += GENERATOR_AUTOENCODER_LAMBDA * gen_ae_loss
if GENERATOR_SCORE_LOSS:
gen_rec_loss = ((real_latent - netD(reconstructed))**2).mean()
G += GENERATOR_SCORE_LAMBDA * gen_rec_loss
G.backward()
G_cost = G.data
optimizerG.step()
# Write logs and save samples
lib.plot.plot(RUN_PATH + 'train disc cost', D_cost.cpu().numpy())
lib.plot.plot(RUN_PATH + 'time', time.time() - start_time)
lib.plot.plot(RUN_PATH + 'train gen cost', G_cost.cpu().numpy())
lib.plot.plot(RUN_PATH + 'wasserstein distance', Wasserstein_D)
# Calculate inception score every 1K iters
if False and iteration % 1000 == 999:
inception_score = get_inception_score(netG)
lib.plot.plot(RUN_PATH + 'inception score', inception_score[0])
# Calculate dev loss and generate samples every 200 iters
if iteration % 200 == 199:
dev_disc_costs = []
#TODO:
netD.eval()
for images in dev_gen():
images = images.view((BATCH_SIZE, 3, 32, 32))
imgs = images#preprocess(images)
#imgs = preprocess(images)
#if use_cuda:
# imgs = imgs.cuda(gpu)
imgs_v = autograd.Variable(imgs, volatile=True)
D = netD(imgs_v)
_dev_disc_cost = -D.mean().cpu().data.numpy()
dev_disc_costs.append(_dev_disc_cost)
netD.train()
lib.plot.plot(RUN_PATH + 'dev disc cost', np.mean(dev_disc_costs))
fixed_noise_128 = torch.randn(128, INPUT_DIM)
if use_cuda:
fixed_noise_128 = fixed_noise_128.cuda(gpu)
generate_image(iteration, netG, fixed_noise_128)
generate_image("{}_reconstruct".format(iteration), netG, D.data)
save_images(imgs_v, RUN_PATH + 'samples_{}_original.jpg'.format(iteration))
#print(encoded)
#print(fixed_noise_128)
# Save logs every 200 iters
if (iteration < 5) or (iteration % 100 == 99):
lib.plot.flush()
lib.plot.tick()
state_dict = {
'iters': iteration + 1,
'algo_params': params,
'gen_state_dict': netG.state_dict(),
'critic_state_dict': netD.state_dict(),
'optimizerG' : optimizerG.state_dict(),
'optimizerD' : optimizerD.state_dict(),
}
torch.save(state_dict, RUN_PATH + 'final.pth.tar')
|
nilq/baby-python
|
python
|
# @Author: Anas Mazouni <Stormix>
# @Date: 2017-05-17T23:59:31+01:00
# @Email: madadj4@gmail.com
# @Project: PluralSight Scraper V1.0
# @Last modified by: Stormix
# @Last modified time: 2017-05-18T17:08:22+01:00
import selenium as sl
import os,time,inspect
from sys import platform
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.keys import Keys
import config
from slugify import slugify
from clint.textui import progress
import requests
class PluralCourse:
"""
Course Class.
"""
link = ""
title = ""
browser = ""
delay = 3
Username = config.Username
Password = config.Password
output = "Download" #output folder
def __init__(self,link):
self.link = link
def launchBrowser(self):
assert not self.browser, "Browser already set !"
# Initiate the Browser webdriver
currentfolder = os.path.dirname(os.path.abspath(inspect.stack()[0][1]))
# Check which operating system is being used !
if platform == "linux" or platform == "linux2":
# linux
chrome_driver = currentfolder+"/chromedriver"
elif platform == "win32":
# Windows
chrome_driver = currentfolder+"/chromedriver.exe"
self.browser = webdriver.Chrome(chrome_driver)
Browser = self.browser
Website = self.link
# Open Pronote
Browser.get(Website)
print("Browser Initiated !")
print("Loading .. " + Website, end =' ')
time.sleep(self.delay)
print(u'\u2713')
def checkLoginAlert(self):
try:
self.browser.find_element_by_css_selector(".ps-button-primary-md.mr-lg")
except NoSuchElementException:
return False
return True
def pausePlayback(self):
body = self.browser.find_element_by_css_selector("body");
body.send_keys(Keys.SPACE);
def login(self):
assert self.checkLoginAlert(), "Already logged in !"
loginButton = self.browser.find_element_by_css_selector(".ps-button-primary-md.mr-lg")
# Go to login page
loginButton.click()
# Define the login form
Browser = self.browser
usernameInput = "Username"
passwordInput = "Password"
LoginButtonClass = ".button.primary"
# Fill in the login form
username_log = Browser.find_element_by_id(usernameInput)
password_log = Browser.find_element_by_id(passwordInput)
username_log.send_keys(self.Username)
password_log.send_keys(self.Password)
# Click the connect buttun
print("Logging in ...",end=" ")
Browser.find_element_by_css_selector(LoginButtonClass).click()
time.sleep(self.delay)
self.pausePlayback()
print(u'\u2713')
def downloadEpisodes(self):
#Create output folder
self.createDir(self.output)
titlesClass = ".m-0.p-0.ps-color-white.ps-type-sm.ps-type-weight-medium"
moduleClass = ".module"
episodesListClass = ".clips.m-0.p-0"
modules = {}
modulesSections = [elt.click() for elt in self.browser.find_elements_by_css_selector(moduleClass)] # Click all sections
ModuleTitles = [element.text for element in self.browser.find_elements_by_css_selector(titlesClass)] # Looping through each title
#Fetching the modules episodes lists
Modules = self.browser.find_elements_by_css_selector(episodesListClass)
for i in range(len(ModuleTitles)):
#Create output folder
self.createDir(self.output+"/"+slugify(ModuleTitles[i]))
#For each list items(li) in the each list(ul) ,Get the titles (h3)
ModuleEpisodesList = [elt.find_element_by_tag_name('h3').text for elt in [elt for elt in Modules[i].find_elements_by_tag_name('li')]]
for j in range(len(ModuleEpisodesList)):
self.createDir(self.output+"/"+slugify(ModuleTitles[i])+"/"+slugify(ModuleEpisodesList[j]))
# Get the episode elemnt
self.browser.find_element_by_xpath("//*[contains(text(), '"+ModuleEpisodesList[j]+"')]").click()
time.sleep(self.delay*1.5)
self.pausePlayback()
print("Downloading : ",slugify(ModuleEpisodesList[j])+".mp4")
path =self.output+"/"+slugify(ModuleTitles[i])+"/"+slugify(ModuleEpisodesList[j])+"/"+slugify(ModuleEpisodesList[j])+".mp4"
if not os.path.exists(path):
self.download(self.getVideoLink(),path)
else:
print("Already downloaded ... skipping \n")
# Store the module title and episodes list
modules[ModuleTitles[i].replace(" ", "_")] = ModuleEpisodesList
return modules
def getVideoLink(self):
video_elt = self.browser.find_element_by_tag_name('video')
link = video_elt.get_attribute("src")
return link
def createDir(self,Dir):
if not os.path.exists(Dir):
os.makedirs(Dir)
print("<"+Dir+"> folder created !")
def download(self,url,path):
r = requests.get(url, stream=True)
with open(path, 'wb') as f:
total_length = int(r.headers.get('content-length'))
for chunk in progress.bar(r.iter_content(chunk_size=1024), expected_size=(total_length/1024) + 1):
if chunk:
f.write(chunk)
f.flush()
|
nilq/baby-python
|
python
|
''' Learning rate schedulers. '''
import json
import torch
import torch.optim.lr_scheduler as lr_sched
from typing import Any
from cosine_scheduler import CosineLRWithRestarts
def step(optimizer, last_epoch, step_size=10, gamma=0.1, **_) -> Any:
return lr_sched.StepLR(optimizer, step_size=step_size, gamma=gamma,
last_epoch=last_epoch)
def multi_step(optimizer, last_epoch, milestones=[500, 5000], gamma=0.1, **_) -> Any:
if isinstance(milestones, str):
milestones = json.loads(milestones)
return lr_sched.MultiStepLR(optimizer, milestones=milestones, gamma=gamma,
last_epoch=last_epoch)
def exponential(optimizer, last_epoch, gamma=0.995, **_) -> Any:
return lr_sched.ExponentialLR(optimizer, gamma=gamma, last_epoch=last_epoch)
def none(optimizer, last_epoch, **_) -> Any:
return lr_sched.StepLR(optimizer, step_size=10000000, last_epoch=last_epoch)
def reduce_lr_on_plateau(optimizer, last_epoch, mode='max', factor=0.1,
patience=10, threshold=0.0001, threshold_mode='rel',
cooldown=0, min_lr=0, **_) -> Any:
return lr_sched.ReduceLROnPlateau(optimizer, mode=mode, factor=factor,
patience=patience, threshold=threshold,
threshold_mode=threshold_mode,
cooldown=cooldown, min_lr=min_lr)
def cyclic_lr(optimizer, last_epoch, base_lr=0.001, max_lr=0.01,
step_size_up=2000, step_size_down=None, mode='triangular',
gamma=1.0, scale_fn=None, scale_mode='cycle', cycle_momentum=False,
base_momentum=0.8, max_momentum=0.9, coeff=1, **_) -> Any:
def exp_range_scale_fn(x):
res = gamma ** (x - 1)
return res
return lr_sched.CyclicLR(optimizer, base_lr=base_lr*coeff, max_lr=max_lr*coeff,
step_size_up=step_size_up, step_size_down=
step_size_down, mode=mode, scale_fn=exp_range_scale_fn,
scale_mode=scale_mode, cycle_momentum=
cycle_momentum, base_momentum=base_momentum,
max_momentum=max_momentum, last_epoch=last_epoch)
def get_scheduler(config, optimizer, last_epoch=-1, coeff=1):
func = globals().get(config.name)
return func(optimizer, last_epoch, coeff=coeff, **config.params)
def is_scheduler_continuous(scheduler) -> bool:
if tuple(torch.__version__.split('.')) >= tuple(['1', '1', '0']):
return type(scheduler) in [lr_sched.ExponentialLR, lr_sched.CosineAnnealingLR,
lr_sched.CyclicLR, CosineLRWithRestarts]
else:
return type(scheduler) in [lr_sched.ExponentialLR, lr_sched.CosineAnnealingLR,
CosineLRWithRestarts]
def get_warmup_scheduler(config, optimizer) -> Any:
return lr_sched.CyclicLR(optimizer, base_lr=0, max_lr=config.train.warmup.max_lr,
step_size_up=config.train.warmup.steps, step_size_down=0,
cycle_momentum=False, mode='triangular')
|
nilq/baby-python
|
python
|
#!/usr/bin/python
# encoding: utf-8
"""
@author: Ian
@file: serializers.py.py
@time: 2019-04-30 12:23
"""
from rest_framework import serializers
from snippets.models import Snippet
from dicproj.models import Dic, CsvFile
class SnippetSerializer(serializers.ModelSerializer):
class Meta:
model = Snippet
fields = ('id', 'title', 'code', 'linenos', 'language', 'style')
class DicSerializer(serializers.ModelSerializer):
class Meta:
model = Dic
fields = ('code', 'name')
class CsvFileSerializer(serializers.ModelSerializer):
class Meta:
model = CsvFile
fields = '__all__'
|
nilq/baby-python
|
python
|
from django.db import models
from django.utils import timezone
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.dispatch import receiver
import app.core.patch
# La solución planteada tiene ventajas y desventajas. Como ventaja, se usa el
# sistema de autenticación de django, y no hay que hacer muchas cosas pues ya
# vienen hechas. Cada entidad que es logueable, actua a modo de "perfil" de
# usuario, conteniendo información adicional a los datos básicos que sirven para
# loguear al usuario, etc.
# Además, cada vez que se crea un usuario, sea desde el registro o desde el admin,
# se le crean perfiles asociados (Acá viene la desventaja, si creo un usuario,
# se le crean dos perfiles, uno de desocupado y uno de empresa, a lo cual, siempre
# tengo un perfil que no uso, porq un desocupado no es una empresa, asi que me
# quedan elementos vacíos por varios lados, pero bue)
# Por otro lado, a un usuario se le puede preguntar si es o no un desocupado, o
# si es o no una empresa, y pedir el "perfil" que devuelve o bien una empresa o
# bien un desocupado, dependiendo de lo que se haya cargado.
class Desocupado(models.Model):
# Las cosas logueables tienen que tener este campo adicional.
# Estas entidad actuan entonces como perfil de un usuario, y guardan
# datos adicionales a los que se guarda en un usuario tradicional de Django
user = models.OneToOneField(User, on_delete=models.CASCADE)
# El resto de los campos son los que yo quiero tener el perfil. Notece que
# algunos campos como el nombre, el apellido, o el email, ya están incluidos
# en el usuario de django, pero se pueden clonar tranquilamente acá.
nombre = models.CharField(max_length=20)
apellido = models.CharField(max_length=20)
fecha_nacimiento = models.DateField(null=True)
localidad = models.CharField(max_length=20,null=True)
estado_ocupacion = models.BooleanField(default=False)
experiencia_laboral = models.TextField(null=True)
formacion = models.TextField(null=True)
habilidades = models.TextField(null=True)
trabajo_realizable = models.CharField(max_length=50, null=True)
dni = models.CharField(max_length=10, null=True)
# Como se representa como texto, o sea, como se ve en el admin
def __str__(self):
return "Desocupado: " + str(self.nombre) + " " + str(self.apellido) + " de " + str(self.user.username)
# Si se crea un usuario, se crea automáticamente un Desocupado
@receiver(post_save, sender=User)
def update_user_desocupado(sender, instance, created, **kwargs):
if created:
Desocupado.objects.create(user=instance, nombre=instance.first_name, apellido=instance.last_name)
instance.desocupado.save()
class Empresa(models.Model):
# La empresa también es logueable, idem desocupado
user = models.OneToOneField(User, on_delete=models.CASCADE)
# El resto de los campos
cuit = models.IntegerField(default=0)
razon_social = models.CharField(max_length=50, null=True)
rubro = models.CharField(max_length=30, null=True)
# oferta_laboral = models.ForeignKey('OfertaLaboral')
# Como se representa como texto, o sea, como se ve en el admin
def __str__(self):
return "Empresa" + str(self.razon_social) + " de " + str(self.user.username)
#class EliminarUsuario(models.Model):
# username = models.CharField(max_length=50)
# Si se crea un usuario, se crea automáticamente una Empresa
@receiver(post_save, sender=User)
def update_user_empresa(sender, instance, created, **kwargs):
if created:
Empresa.objects.create(user=instance)
instance.empresa.save()
class Oferta(models.Model):
cargo = models.CharField(max_length=200)
trabajo = models.CharField(max_length=200)
horarios = models.CharField(max_length=200)
profesion = models.CharField(max_length=200)
empresa = models.ForeignKey('core.Empresa')
def __str__(self):
return self.nombre
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" run_file2db is a tool to migrate a labeled dataset in a pickle file to a
mongo db.
It must be invoked using
python run_file2db.py <project_folder>
Created on Dec, 2016
@autor: Jesus Cid.
"""
import ast
import time
import sys
import os
import ipdb
# Local imports
from labelfactory.ConfigCfg import ConfigCfg as Cfg
from labelfactory.Log import Log
from labelfactory.labeling.datamanager import DataManager
CF_FNAME = "config.cf"
CF_DEFAULT_PATH = "./config.cf.default"
def main():
# To complete the migration to python 3, I should replace all "raw_input"
# by "input". Transitorily, to preserve compatibility with python 2, I
# simply rename inut to raw_input
if sys.version_info.major == 3:
raw_input2 = input
else:
raw_input2 = raw_input
#######
# Start
# Check if project folder exists. Otherwise exit.
if len(sys.argv) > 1:
project_path = sys.argv[1]
else:
project_path = raw_input2("Select the (absolute or relative) path to" +
" the labeling project folder: ")
if not project_path.endswith('/'):
project_path = project_path + '/'
# Check if project folder exists. This is necessary to follow
if not os.path.isdir(project_path):
sys.exit("Project folder does not exist")
#########################
# Read configuration data
# Check if configuration file existe
config_path = project_path + CF_FNAME
if not os.path.isfile(config_path):
sys.exit("Configuration file does not exist")
# Read data from the configuation file
cf = Cfg(config_path)
# Data source and destination (options: file, mongodb)
source_type = 'file'
dest_type = 'mongodb'
# Mongo DB settings
db_info = {'name': cf.get('DataPaths', 'db_name'),
'hostname': cf.get('DataPaths', 'db_hostname'),
'user': cf.get('DataPaths', 'db_user'),
'pwd': cf.get('DataPaths', 'db_pwd'),
'label_coll_name': cf.get('DataPaths', 'db_label_coll_name'),
'history_coll_name': cf.get('DataPaths',
'db_history_coll_name'),
'port': cf.get('DataPaths', 'db_port'),
'mode': cf.get('DataPaths', 'db_mode'),
'file2db_mode': cf.get('DataPaths', 'db_file2db_mode'),
'db2file_mode': cf.get('DataPaths', 'db_db2file_mode'),
}
# Folder containing the urls to label
file_info = {'project_path': project_path,
'input_folder': cf.get('DataPaths', 'input_folder'),
'output_folder': cf.get('DataPaths', 'output_folder'),
'used_folder': cf.get('DataPaths', 'used_folder'),
'dataset_fname': cf.get('DataPaths', 'dataset_fname'),
'labelhistory_fname': cf.get(
'DataPaths', 'labelhistory_fname'),
'labels_endname': cf.get('DataPaths', 'labels_endname'),
'preds_endname': cf.get('DataPaths', 'preds_endname'),
'urls_fname': cf.get('DataPaths', 'urls_fname')}
# Type of wid: if 'yes', the wid is computed as a transformed url.
# if 'no', the wid is taken equal to the url.
compute_wid = cf.get('Labeler', 'compute_wid')
# List of categories to label.
categories = ast.literal_eval(cf.get('Labeler', 'categories'))
parentcat = ast.literal_eval(cf.get('Labeler', 'parentcat'))
# Possible labels for each category
yes_label = cf.get('Labeler', 'yes_label')
no_label = cf.get('Labeler', 'no_label')
unknown_label = cf.get('Labeler', 'unknown_label')
error_label = cf.get('Labeler', 'error_label')
alphabet = {'yes': yes_label, 'no': no_label, 'unknown': unknown_label,
'error': error_label}
# In multiclass cases, the reference class is the class used by the active
# learning algorithm to compute the sample scores.
ref_class = cf.get('ActiveLearning', 'ref_class')
##########
# Log file
# Create the log object
log = Log(project_path + 'log')
log.info('*****************************')
log.info('****** WEB LABELER: *********')
#####################
# Create main objects
# Data manager object
data_mgr = DataManager(source_type, dest_type, file_info, db_info,
categories, parentcat, ref_class, alphabet,
compute_wid)
##############
# Read dataset
# Load data from the standard dataset.
log.info('Carga de datos')
df_labels, df_preds, labelhistory = data_mgr.loadData(source_type)
###############
# Migrate to DB
# Save data and label history into db
log.info("-- Saving data in mongodb")
start = time.clock()
data_mgr.migrate2DB(df_labels)
log.info(str(time.clock() - start) + ' seconds')
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
from django.contrib.auth.models import User
from django.db import models
import datetime as dt
from tinymce.models import HTMLField
from django.db.models.signals import post_save
from django.dispatch import receiver
@receiver(post_save, sender=User)
def create_user_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
@receiver(post_save, sender=User)
def save_user_profile(sender, instance, **kwargs):
instance.profile.save()
@receiver(post_save, sender=User)
def create_user_profile(sender, instance, created, **kwargs):
if created:
Business.objects.create(user=instance)
@receiver(post_save, sender=User)
def save_user_profile(sender, instance, **kwargs):
instance.business.save()
class NeighbourHood(models.Model):
neighbourhood_name = models.CharField(max_length =60)
neighbourhood_location = models.CharField(max_length =250)
population_count = models.IntegerField(null=True)
admin = models.ForeignKey(User)
def __str__(self):
return self.neighbourhood_name
def save_neighbourhood(self):
self.save()
def delete_neighborhood(self):
self.delete()
@classmethod
def search_neighbourhood(cls,search_term):
neighbourhood = cls.objects.filter(name__icontains = search_term)
return neighbourhood
class Profile(models.Model):
profile_photo = models.ImageField(upload_to='images/')
bio = models.CharField(max_length=300)
user = models.OneToOneField(User)
location = models.ForeignKey(NeighbourHood, null=True)
email = models.EmailField(null = True)
def __str__(self):
return self.email
def save_profile(self):
self.save()
def delete_profile(self):
self.delete()
class Business(models.Model):
business_logo = models.ImageField(upload_to='images/')
business_moto = models.CharField(max_length=300)
user = models.OneToOneField(User)
hood = models.ForeignKey(NeighbourHood, null=True)
email = models.EmailField(null = True)
def __str__(self):
return self.email
def save_business(self):
self.save()
def delete_business(self):
self.delete()
@classmethod
def search_business(cls,search_term):
business = cls.objects.filter(name__icontains = search_term)
return business
class JoinHood(models.Model):
user_id = models.OneToOneField(User)
hood_id = models.ForeignKey(NeighbourHood)
def __str__(self):
return self.user_id
class Allert(models.Model):
title = models.CharField(max_length=300)
body = models.TextField()
user = models.ForeignKey(User)
hood = models.ForeignKey(NeighbourHood)
def __str__(self):
return self.title
def save_allert(self):
self.save()
def delete_allert(self):
self.delete()
class Comment(models.Model):
comment = models.CharField(max_length=500)
user = models.ForeignKey(User)
post = models.ForeignKey(Allert)
def __str__(self):
return self.comment
def save_comment(self):
self.save()
def delete_comment(self):
self.delete()
|
nilq/baby-python
|
python
|
"""
This code is based on these codebases associated with Yuta Saito's research.
- Unbiased Recommender Learning from Missing-Not-At-Random Implicit Feedback: https://github.com/usaito/unbiased-implicit-rec-real
- Unbiased Pairwise Learning from Biased Implicit Feedback: https://github.com/usaito/unbiased-pairwise-rec
- Asymmetric Tri-training for Debiasing Missing-Not-At-Random Explicit Feedback: https://github.com/usaito/asymmetric-tri-rec-real
"""
from typing import Optional
import numpy as np
# Set a lower bound of a propensity score
eps = 1e-3
def dcg_at_k(y_true: np.ndarray, y_score: np.ndarray,
k: int, pscore: Optional[np.ndarray] = None) -> float:
"""Calculate a DCG score for a given user"""
y_true_sorted_by_score = y_true[y_score.argsort()[::-1]]
# If propensity score is provided, put high weight on records whose propensity score is low for unbiased evaluation
# Otherwise, we evaluate each record evenly by setting all propensity scores as 1
if pscore is not None:
pscore_sorted_by_score = np.maximum(pscore[y_score.argsort()[::-1]], eps)
else:
pscore_sorted_by_score = np.ones_like(y_true_sorted_by_score)
dcg_score = 0.0
final_score = 0.0
k = k if y_true.shape[0] >= k else y_true.shape[0]
if not np.sum(y_true_sorted_by_score) == 0:
dcg_score += y_true_sorted_by_score[0] / pscore_sorted_by_score[0]
for i in np.arange(1, k):
dcg_score += y_true_sorted_by_score[i] / (pscore_sorted_by_score[i] * np.log2(i + 1))
final_score = dcg_score / np.sum(y_true_sorted_by_score) if pscore is None \
else dcg_score / np.sum(1. / pscore_sorted_by_score[y_true_sorted_by_score > 0])
return final_score
def recall_at_k(y_true: np.ndarray, y_score: np.ndarray,
k: int, pscore: Optional[np.ndarray] = None) -> float:
"""Calculate a recall score for a given user"""
# Sort records in ascending order by prediction score
y_true_sorted_by_score = y_true[y_score.argsort()[::-1]]
# If propensity score is provided, put high weight on records whose propensity score is low for unbiased evaluation
# Otherwise, we evaluate each record evenly by setting all propensity scores as 1
if pscore is not None:
pscore_sorted_by_score = np.maximum(pscore[y_score.argsort()[::-1]], eps)
else:
pscore_sorted_by_score = np.ones_like(y_true_sorted_by_score)
final_score = 0.
k = k if y_true.shape[0] >= k else y_true.shape[0]
if not np.sum(y_true_sorted_by_score) == 0:
recall_score = np.sum(y_true_sorted_by_score[:k] / pscore_sorted_by_score[:k])
final_score = recall_score / np.sum(y_true_sorted_by_score) if pscore is None \
else recall_score / np.sum(1. / pscore_sorted_by_score[y_true_sorted_by_score > 0])
return final_score
def average_precision_at_k(y_true: np.ndarray, y_score: np.ndarray,
k: int, pscore: Optional[np.ndarray] = None) -> float:
"""Calculate a average precision for a given user"""
y_true_sorted_by_score = y_true[y_score.argsort()[::-1]]
# If propensity score is provided, put high weight on records whose propensity score is low for unbiased evaluation
# Otherwise, we evaluate each record evenly by setting all propensity scores as 1
if pscore is not None:
pscore_sorted_by_score = np.maximum(pscore[y_score.argsort()[::-1]], eps)
else:
pscore_sorted_by_score = np.ones_like(y_true_sorted_by_score)
average_precision_score = 0.0
final_score = 0.0
k = k if y_true.shape[0] >= k else y_true.shape[0]
if not np.sum(y_true_sorted_by_score) == 0:
for i in np.arange(k):
if y_true_sorted_by_score[i] > 0:
score_ = np.sum(y_true_sorted_by_score[:i + 1] / pscore_sorted_by_score[:i + 1]) / (i + 1)
average_precision_score += score_
final_score = average_precision_score / np.sum(y_true_sorted_by_score) if pscore is None \
else average_precision_score / np.sum(1. / pscore_sorted_by_score[y_true_sorted_by_score > 0])
return final_score
|
nilq/baby-python
|
python
|
def is_super(connection):
with connection.cursor() as cursor:
cursor.execute('show grants for current_user()')
query_result = cursor.fetchone()
return 'SUPER' in query_result
|
nilq/baby-python
|
python
|
from pixiedust.display.app import *
@PixieApp
class TestEntity():
@route()
def main_screen(self):
return """
<h1><center>Simple PixieApp with dynamically computed dataframe</center></h1>
<div pd_entity="compute_pdf('prefix')" pd_options="handlerId=dataframe" pd_render_onload></div>
"""
test = TestEntity()
test.run()
|
nilq/baby-python
|
python
|
# --coding:utf-8--
#
# Copyright (c) 2020 vesoft inc. All rights reserved.
#
# This source code is licensed under Apache 2.0 License,
# attached with Common Clause Condition 1.0, found in the LICENSES directory.
import pytest
from nebula2.graph import ttypes
from tests.common.nebula_test_suite import NebulaTestSuite
class TestSetQuery(NebulaTestSuite):
@classmethod
def prepare(self):
self.use_nba()
def test_union_all(self):
stmt = '''GO FROM "Tim Duncan" OVER serve YIELD $^.player.name, serve.start_year, $$.team.name \
UNION ALL GO FROM "Tony Parker" OVER serve YIELD $^.player.name, serve.start_year, $$.team.name'''
resp = self.execute_query(stmt)
self.check_resp_succeeded(resp)
column_names = ["$^.player.name", "serve.start_year", "$$.team.name"]
self.check_column_names(resp, column_names)
expected_data = [["Tim Duncan", 1997, "Spurs"],
["Tony Parker", 1999, "Spurs"],
["Tony Parker", 2018, "Hornets"]]
self.check_out_of_order_result(resp, expected_data)
stmt = '''GO FROM "Tim Duncan" OVER serve YIELD $^.player.name, serve.start_year, $$.team.name \
UNION ALL GO FROM "Tony Parker" OVER serve YIELD $^.player.name, serve.start_year, $$.team.name \
UNION ALL GO FROM "Manu Ginobili" OVER serve YIELD $^.player.name, serve.start_year, $$.team.name'''
resp = self.execute_query(stmt)
self.check_resp_succeeded(resp)
colums = ["$^.player.name", "serve.start_year", "$$.team.name"]
self.check_column_names(resp, colums)
expected_data = [["Tim Duncan", 1997, "Spurs"],
["Tony Parker", 1999, "Spurs"],
["Tony Parker", 2018, "Hornets"],
["Manu Ginobili", 2002, "Spurs"]]
self.check_out_of_order_result(resp, expected_data)
stmt = '''(GO FROM "Tim Duncan" OVER like YIELD like._dst AS id | \
GO FROM $-.id OVER serve YIELD $^.player.name, serve.start_year, $$.team.name) \
UNION ALL GO FROM "Tony Parker" OVER serve YIELD $^.player.name, serve.start_year, $$.team.name'''
resp = self.execute_query(stmt)
self.check_resp_succeeded(resp)
column_names = ["$^.player.name", "serve.start_year", "$$.team.name"]
self.check_column_names(resp, column_names)
expected_data = [["Manu Ginobili", 2002, "Spurs"],
["Tony Parker", 1999, "Spurs"],
["Tony Parker", 2018, "Hornets"],
["Tony Parker", 1999, "Spurs"],
["Tony Parker", 2018, "Hornets"]]
self.check_out_of_order_result(resp, expected_data)
stmt = '''GO FROM "Tim Duncan" OVER like YIELD like._dst AS id | \
GO FROM $-.id OVER serve YIELD $^.player.name, serve.start_year, $$.team.name \
UNION ALL GO FROM "Tony Parker" OVER serve YIELD $^.player.name, serve.start_year, $$.team.name'''
resp = self.execute_query(stmt)
self.check_resp_succeeded(resp)
column_names = ["$^.player.name", "serve.start_year", "$$.team.name"]
self.check_column_names(resp, column_names)
expected_data = [["Manu Ginobili", 2002, "Spurs"],
["Tony Parker", 1999, "Spurs"],
["Tony Parker", 2018, "Hornets"],
["Tony Parker", 1999, "Spurs"],
["Tony Parker", 2018, "Hornets"]]
self.check_out_of_order_result(resp, expected_data)
stmt = '''GO FROM "Tim Duncan" OVER serve YIELD $^.player.name, serve.start_year, $$.team.name \
UNION ALL (GO FROM "Tony Parker" OVER like YIELD like._dst AS id | \
GO FROM $-.id OVER serve YIELD $^.player.name, serve.start_year, $$.team.name)'''
resp = self.execute_query(stmt)
self.check_resp_succeeded(resp)
column_names = ["$^.player.name", "serve.start_year", "$$.team.name"]
self.check_column_names(resp, column_names)
expected_data = [["Tim Duncan", 1997, "Spurs"],
["LaMarcus Aldridge", 2015, "Spurs"],
["LaMarcus Aldridge", 2006, "Trail Blazers"],
["Manu Ginobili", 2002, "Spurs"],
["Tim Duncan", 1997, "Spurs"]]
self.check_out_of_order_result(resp, expected_data)
stmt = '''GO FROM "Tim Duncan" OVER serve YIELD $^.player.name, serve.start_year, $$.team.name \
UNION ALL GO FROM "Tony Parker" OVER like YIELD like._dst AS id | \
GO FROM $-.id OVER serve YIELD $^.player.name, serve.start_year, $$.team.name'''
resp = self.execute_query(stmt)
self.check_resp_succeeded(resp)
column_names = ["$^.player.name", "serve.start_year", "$$.team.name"]
self.check_column_names(resp, column_names)
expected_data = [["Tim Duncan", 1997, "Spurs"],
["LaMarcus Aldridge", 2015, "Spurs"],
["LaMarcus Aldridge", 2006, "Trail Blazers"],
["Manu Ginobili", 2002, "Spurs"],
["Tim Duncan", 1997, "Spurs"]]
self.check_out_of_order_result(resp, expected_data)
stmt = '''(GO FROM "Tim Duncan" OVER like YIELD like._dst AS id \
UNION ALL GO FROM "Tony Parker" OVER like YIELD like._dst AS id) \
| GO FROM $-.id OVER serve YIELD $^.player.name, serve.start_year, $$.team.name'''
resp = self.execute_query(stmt)
self.check_resp_succeeded(resp)
column_names = ["$^.player.name", "serve.start_year", "$$.team.name"]
self.check_column_names(resp, column_names)
expected_data = [["Manu Ginobili", 2002, "Spurs"],
["Tony Parker", 1999, "Spurs"],
["Tony Parker", 2018, "Hornets"],
["LaMarcus Aldridge", 2015, "Spurs"],
["LaMarcus Aldridge", 2006, "Trail Blazers"],
["Manu Ginobili", 2002, "Spurs"],
["Tim Duncan", 1997, "Spurs"]]
# self.check_out_of_order_result(resp, expected_data)
stmt = '''GO FROM "Tim Duncan" OVER serve YIELD $^.player.name as name, $$.team.name as player \
UNION ALL \
GO FROM "Tony Parker" OVER serve \
YIELD $^.player.name as name, serve.start_year as player'''
resp = self.execute_query(stmt)
self.check_resp_succeeded(resp)
column_names = ["name", "player"]
self.check_column_names(resp, column_names)
expected_data = [["Tim Duncan", "Spurs"], ["Tony Parker", 1999],
["Tony Parker", 2018]]
self.check_out_of_order_result(resp, expected_data)
stmt = '''GO FROM "Tim Duncan" OVER serve YIELD $^.player.name as name, $$.team.name as player \
UNION ALL \
GO FROM "Tony Parker" OVER serve \
YIELD $^.player.name as name, serve.start_year'''
resp = self.execute_query(stmt)
self.check_resp_failed(resp)
# column_names = ["name", "player"]
# self.check_column_names(resp, column_names)
# expected_data = [["Tim Duncan", "Spurs"], ["Tony Parker", "1999"],
# ["Tony Parker", "2018"]]
# self.check_out_of_order_result(resp, expected_data)
stmt = '''GO FROM "Nobody" OVER serve YIELD $^.player.name AS player, serve.start_year AS start \
UNION ALL \
GO FROM "Tony Parker" OVER serve YIELD $^.player.name AS player, serve.start_year AS start'''
resp = self.execute_query(stmt)
self.check_resp_succeeded(resp)
column_names = ["player", "start"]
self.check_column_names(resp, column_names)
expected_data = [["Tony Parker", 1999], ["Tony Parker", 2018]]
self.check_out_of_order_result(resp, expected_data)
stmt = '''GO FROM "Nobody" OVER serve YIELD $^.player.name AS player, serve.start_year AS start \
UNION ALL \
GO FROM "Tony Parker" OVER serve YIELD $^.player.name, serve.start_year'''
resp = self.execute_query(stmt)
self.check_resp_failed(resp)
# column_names = ["player", "start"]
# self.check_column_names(resp, column_names)
# expected_data = [["Tony Parker", 1999], ["Tony Parker", 2018]]
# self.check_out_of_order_result(resp, expected_data)
def test_union_distinct(self):
stmt = '''(GO FROM "Tim Duncan" OVER like YIELD like._dst as id | \
GO FROM $-.id OVER serve YIELD $^.player.name, serve.start_year, $$.team.name) \
UNION \
GO FROM "Tony Parker" OVER serve YIELD $^.player.name, serve.start_year, $$.team.name \
UNION \
GO FROM "Manu Ginobili" OVER serve YIELD $^.player.name, serve.start_year, $$.team.name'''
resp = self.execute_query(stmt)
self.check_resp_succeeded(resp)
column_names = ["$^.player.name", "serve.start_year", "$$.team.name"]
self.check_column_names(resp, column_names)
expected_data = [["Manu Ginobili", 2002, "Spurs"],
["Tony Parker", 1999, "Spurs"],
["Tony Parker", 2018, "Hornets"]]
self.check_out_of_order_result(resp, expected_data)
stmt = '''(GO FROM "Tim Duncan" OVER like YIELD like._dst as id | \
GO FROM $-.id OVER serve YIELD $^.player.name, serve.start_year, $$.team.name) \
UNION DISTINCT \
GO FROM "Tony Parker" OVER serve YIELD $^.player.name, serve.start_year, $$.team.name'''
resp = self.execute_query(stmt)
self.check_resp_succeeded(resp)
column_names = ["$^.player.name", "serve.start_year", "$$.team.name"]
self.check_column_names(resp, column_names)
expected_data = [["Manu Ginobili", 2002, "Spurs"],
["Tony Parker", 1999, "Spurs"],
["Tony Parker", 2018, "Hornets"]]
self.check_out_of_order_result(resp, expected_data)
def test_minus(self):
stmt = '''(GO FROM "Tim Duncan" OVER like YIELD like._dst as id | \
GO FROM $-.id OVER serve YIELD $^.player.name, serve.start_year, $$.team.name) \
MINUS \
GO FROM "Tony Parker" OVER serve YIELD $^.player.name, serve.start_year, $$.team.name'''
resp = self.execute_query(stmt)
self.check_resp_succeeded(resp)
column_names = ["$^.player.name", "serve.start_year", "$$.team.name"]
self.check_column_names(resp, column_names)
expected_data = [["Manu Ginobili", 2002, "Spurs"]]
self.check_result(resp, expected_data)
def test_intersect(self):
stmt = '''(GO FROM "Tim Duncan" OVER like YIELD like._dst as id | \
GO FROM $-.id OVER serve YIELD $^.player.name, serve.start_year, $$.team.name) \
INTERSECT \
GO FROM "Tony Parker" OVER serve YIELD $^.player.name, serve.start_year, $$.team.name'''
resp = self.execute_query(stmt)
self.check_resp_succeeded(resp)
column_names = ["$^.player.name", "serve.start_year", "$$.team.name"]
self.check_column_names(resp, column_names)
expected_data = [["Tony Parker", 1999, "Spurs"],
["Tony Parker", 2018, "Hornets"]]
self.check_out_of_order_result(resp, expected_data)
def test_mix(self):
stmt = '''(GO FROM "Tim Duncan" OVER like YIELD like._dst as id | \
GO FROM $-.id OVER serve YIELD $^.player.name, serve.start_year, $$.team.name) \
MINUS \
GO FROM "Tony Parker" OVER serve YIELD $^.player.name, serve.start_year, $$.team.name \
UNION \
GO FROM "Tim Duncan" OVER serve YIELD $^.player.name, serve.start_year, $$.team.name \
INTERSECT \
GO FROM "Manu Ginobili" OVER serve YIELD $^.player.name, serve.start_year, $$.team.name'''
resp = self.execute_query(stmt)
self.check_resp_succeeded(resp)
column_names = ["$^.player.name", "serve.start_year", "$$.team.name"]
self.check_column_names(resp, column_names)
expected_data = [["Manu Ginobili", 2002, "Spurs"]]
self.check_result(resp, expected_data)
def test_assign(self):
stmt = '''$var = GO FROM "Tim Duncan" OVER serve YIELD $^.player.name, serve.start_year, $$.team.name \
UNION ALL \
GO FROM "Tony Parker" OVER serve YIELD $^.player.name, serve.start_year, $$.team.name; \
YIELD $var.*'''
resp = self.execute_query(stmt)
self.check_resp_succeeded(resp)
column_names = ["$var.$^.player.name", "$var.serve.start_year", "$var.$$.team.name"]
self.check_column_names(resp, column_names)
expected_data = [["Tim Duncan", 1997, "Spurs"],
["Tony Parker", 1999, "Spurs"],
["Tony Parker", 2018, "Hornets"]]
self.check_out_of_order_result(resp, expected_data)
stmt = '''$var = (GO FROM "Tim Duncan" OVER serve YIELD $^.player.name, serve.start_year, $$.team.name \
UNION ALL \
GO FROM "Tony Parker" OVER serve YIELD $^.player.name, serve.start_year, $$.team.name); \
YIELD $var.*'''
resp = self.execute_query(stmt)
self.check_resp_succeeded(resp)
column_names = ["$var.$^.player.name", "$var.serve.start_year", "$var.$$.team.name"]
self.check_column_names(resp, column_names)
expected_data = [["Tim Duncan", 1997, "Spurs"],
["Tony Parker", 1999, "Spurs"],
["Tony Parker", 2018, "Hornets"]]
self.check_out_of_order_result(resp, expected_data)
stmt = '''$var = (GO FROM "Tim Duncan" OVER like YIELD like._dst as id | \
GO FROM $-.id OVER serve YIELD $^.player.name, serve.start_year, $$.team.name) \
MINUS \
GO FROM "Tony Parker" OVER serve YIELD $^.player.name, serve.start_year, $$.team.name; \
YIELD $var.*'''
resp = self.execute_query(stmt)
self.check_resp_succeeded(resp)
column_names = ["$var.$^.player.name", "$var.serve.start_year", "$var.$$.team.name"]
self.check_column_names(resp, column_names)
expected_data = [["Manu Ginobili", 2002, "Spurs"]]
self.check_result(resp, expected_data)
stmt = '''$var = (GO FROM "Tim Duncan" OVER like YIELD like._dst as id | \
GO FROM $-.id OVER serve YIELD $^.player.name, serve.start_year, $$.team.name) \
INTERSECT \
GO FROM "Tony Parker" OVER serve YIELD $^.player.name, serve.start_year, $$.team.name; \
YIELD $var.*'''
resp = self.execute_query(stmt)
self.check_resp_succeeded(resp)
column_names = ["$var.$^.player.name", "$var.serve.start_year", "$var.$$.team.name"]
self.check_column_names(resp, column_names)
expected_data = [["Tony Parker", 1999, "Spurs"],
["Tony Parker", 2018, "Hornets"]]
self.check_out_of_order_result(resp, expected_data)
def test_empty_input(self):
stmt = '''GO FROM "NON EXIST VERTEX ID" OVER serve YIELD serve.start_year, $$.team.name \
UNION \
GO FROM "NON EXIST VERTEX ID" OVER serve YIELD serve.start_year, $$.team.name \
MINUS \
GO FROM "NON EXIST VERTEX ID" OVER serve YIELD serve.start_year, $$.team.name \
INTERSECT \
GO FROM "NON EXIST VERTEX ID" OVER serve YIELD serve.start_year, $$.team.name'''
resp = self.execute_query(stmt)
self.check_resp_succeeded(resp)
column_names = ["serve.start_year", "$$.team.name"]
self.check_column_names(resp, column_names)
expected_data = []
self.check_result(resp, expected_data)
stmt = '''$var = GO FROM "NON EXIST VERTEX ID" OVER serve YIELD serve.start_year, $$.team.name \
UNION \
GO FROM "NON EXIST VERTEX ID" OVER serve YIELD serve.start_year, $$.team.name \
MINUS \
GO FROM "NON EXIST VERTEX ID" OVER serve YIELD serve.start_year, $$.team.name \
INTERSECT \
GO FROM "NON EXIST VERTEX ID" OVER serve YIELD serve.start_year, $$.team.name; \
YIELD $var.*'''
resp = self.execute_query(stmt)
self.check_resp_succeeded(resp)
column_names = ["$var.serve.start_year", "$var.$$.team.name"]
self.check_column_names(resp, column_names)
expected_data = []
self.check_result(resp, expected_data)
def test_syntax_error(self):
stmt = '''GO FROM "123" OVER like \
YIELD like._src as src, like._dst as dst \
| (GO FROM $-.src OVER serve \
UNION GO FROM $-.dst OVER serve)'''
resp = self.execute_query(stmt)
self.check_resp_failed(resp, ttypes.ErrorCode.E_SEMANTIC_ERROR)
def test_execution_error(self):
stmt = '''GO FROM "Tim Duncan" OVER serve YIELD $^.player.name, serve.start_year, $$.team.name \
UNION \
GO FROM "Tony Parker" OVER serve YIELD $^.player.name1, serve.start_year, $$.team.name'''
resp = self.execute_query(stmt)
self.check_resp_failed(resp, ttypes.ErrorCode.E_SEMANTIC_ERROR)
stmt = '''GO FROM "Tim Duncan" OVER serve YIELD $^.player.name, serve.start_year \
UNION \
GO FROM "Tony Parker" OVER serve YIELD $^.player.name, serve.start_year, $$.team.name'''
resp = self.execute_query(stmt)
self.check_resp_failed(resp, ttypes.ErrorCode.E_SEMANTIC_ERROR)
|
nilq/baby-python
|
python
|
from os import environ
from .app_settings import *
SECRET_KEY=environ.get('SECRET_KEY')
STATIC_ROOT=environ.get('STATIC_ROOT')
ALLOWED_HOSTS = list(environ.get('ALLOWED_HOSTS', default='').split(','))
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': environ.get('DB_NAME'),
'HOST': '',
}
}
DEBUG = False
SECURE_SSL_REDIRECT = True
SESSION_COOKIE_SECURE = True
CSRF_COOKIE_SECURE = True
SECURE_HSTS_SECONDS = 63072000
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.14 on 2018-08-23 08:01
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('cms', '0020_old_tree_cleanup'),
('articles', '0002_category_placeholder'),
]
operations = [
migrations.CreateModel(
name='CategoryPluginModel',
fields=[
('cmsplugin_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, related_name='articles_categorypluginmodel', serialize=False, to='cms.CMSPlugin')),
('number_to_show', models.IntegerField(choices=[(3, '3'), (6, '6'), (9, '9'), (12, '12')], default=6)),
],
options={
'abstract': False,
},
bases=('cms.cmsplugin',),
),
migrations.AlterModelOptions(
name='article',
options={'verbose_name': 'Artikel', 'verbose_name_plural': 'Artikel'},
),
]
|
nilq/baby-python
|
python
|
class Solution:
# Solution using Mancher's Algorithm
@staticmethod
def longest_palindromic(s: str) -> str:
if(type(s) != str):
raise ValueError(f"{type(s)} not allowed only string type is allowed")
def adjust_string(s: str) -> str: # method to adjust the string
list_from_s = list(s.strip()) # Create List From {s}
modified_s = "#".join(list_from_s) # Modified {s} By adding Hash After every Char in list
return "#" + modified_s + "#" # return new {s} like : #a#b#b#a#
if(len(s)<=1): # Check is {s} Empty or has length equal 1
return s;
s = adjust_string(s) # Get new {s} adjusted from {adjust_string} method
max_length = 0 # Variable indicate to maximum palindromic length in the string
index = 0 # Variable indicate to the index of CENTER of the palindromic
P = [0] * len(s) # Create Array with length equal to new {s} length and fill it zeros
center = right_boundary = 0 # center and right_boundary variables that indicates to first index
for i in range(0, len(s)): # start the functionallity by looping around the {s} from zero to the last element
mirror = 2*center - i # mirror Variable indicate to the mirror index of current string ex: aczbzca the mirror of z is z
if(i < right_boundary): # check if i lower than right_boundary
P[i]= min(right_boundary-i,P[mirror]) # fill the location P[i] minimum value of { right_boundary - i } or value of the P[mirror]
right = i + (P[i]+1) # right Variable is expanding to the right side
left = i - (P[i]+1) # left Variable is expanding to the left side
while(left >= 0 and right < len(s) and s[right] == s[left]): # check how many expantion is equal in left and right side and increase element of P[i]
left-=1
right+=1
P[i]+=1
if(i + P[i] > right_boundary): # check if value of { i + P[i] > right_boundary}
center = i # set {center} equal to {i}
right_boundary = i + P[i] # set {right_boundary} equal to last index in right expantion
if(P[i] > max_length): # set max_length and index
max_length = P[i]
index=i
start_position = index - max_length + 1
end_position = index + max_length
s = "".join(s[start_position:end_position].split("#"))
return s # return the result after delete hashes
list_of_examples = ["babad","cbbd","a","ac"]
for example in list_of_examples:
print(f"Input : {example} , Output : {Solution.longest_palindromic(example)}")
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# coding=utf-8
# ====================================================
#
# File Name : pc_nd_conv_plot.py
# Creation Date : 17-04-2018
# Created By : Min-Ye Zhang
# Contact : stevezhang@pku.edu.cn
#
# ====================================================
from __future__ import print_function
import sys
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from argparse import ArgumentParser
def __check_column_and_target(df, xtarget_column, ytarget_column):
n_columns = len(df.columns)
# Get the column names and the maximum value for each column
# Here the fact that the calculation is more accurate with larger parameter is assumed.
# Not recommended to use for n_columns >= 7
if n_columns >= 7:
raise ValueError(" data columns >= 7 will be crowded and NOT implemented YET. Remove some data.")
if ytarget_column == 0:
i_ytarget = n_columns - 1
else:
try:
assert ytarget_column <= n_columns
assert ytarget_column > 0
except AssertionError:
raise ValueError("Invalid ytarget")
else:
i_ytarget = ytarget_column - 1
if xtarget_column == 0:
i_xtarget = n_columns - 2
else:
try:
assert xtarget_column <= n_columns
assert xtarget_column > 0
except AssertionError:
raise ValueError("Invalid xtarget")
else:
i_xtarget = xtarget_column - 1
para_names = []
for i in range(n_columns):
if i == i_xtarget or i == i_ytarget:
continue
para_names.append(df.columns[i])
para_max = []
for col in para_names:
para_max.append(df[col].max())
x_name = df.columns[i_xtarget]
y_name = df.columns[i_ytarget]
return n_columns, x_name, y_name, para_names, para_max
# ====================================================
def __set_ax_linewidth(subplot_ax, linewidth=4):
for axis in ['top','bottom','left','right']:
subplot_ax.spines[axis].set_linewidth(linewidth)
subplot_ax.tick_params(axis='both', which='major', length=linewidth*2, \
width=linewidth/2, direction='in')
subplot_ax.tick_params(axis='both', which='minor', length=linewidth, \
width=linewidth/2, direction='in')
# ====================================================
def __init_fig_axs(n_columns, para_names, x_name, y_name):
# N-1 graphs are required for N (n>=2) convergence parameters,
# with the left one as the x-axis
if n_columns == 3:
fig, axs = plt.subplots(figsize=(8,8))
axs.set_xlabel(x_name, size=12)
axs.set_ylabel(y_name,size=12)
__set_ax_linewidth(axs, 4)
else:
if n_columns == 4:
fig, axs = plt.subplots(1,2, figsize=(12,8))
axs[0].set_xlabel(x_name, size=12)
axs[1].set_xlabel(x_name, size=12)
axs[0].set_ylabel(y_name, size=12)
if n_columns == 5:
fig, axs = plt.subplots(1,3, figsize=(16,8))
axs[0].set_xlabel(x_name, size=12)
axs[1].set_xlabel(x_name, size=12)
axs[2].set_xlabel(x_name, size=12)
axs[0].set_ylabel(y_name, size=12)
if n_columns == 6:
fig, axs = plt.subplots(2,2, figsize=(12,12))
#axs[:,:].set_xlabel(x_name, size=12)
#axs[].set_xlabel(x_name, size=12)
axs[0,0].set_ylabel(y_name, size=12)
axs[1,0].set_ylabel(y_name, size=12)
axs[1,0].set_xlabel(x_name, size=12)
axs[1,1].set_xlabel(x_name, size=12)
for ax in axs.flatten():
__set_ax_linewidth(ax, 4)
return fig, axs
# ====================================================
def __init_fig_3d_axs(n_columns, para_names, x_name, y_name):
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure(figsize=(12,9))
if n_columns == 3:
axs = fig.add_subplot(111, projection='3d')
axs.set_xlabel(para_names[0], size=12)
axs.set_ylabel(x_name, size=12)
axs.set_zlabel(y_name, size=12)
else:
raise ValueError("plot3d has not been implemented yet for n_columns >3. Delete some columns")
return fig, axs
# ====================================================
def common_nd_conv_plot(df_all, xtarget_column=0, ytarget_column=0, f_plot3d=False, \
figname='', preview=False, imgres=2):
n_columns, x_name, y_name, para_names, para_max = \
__check_column_and_target(df_all, xtarget_column, ytarget_column)
# TODO:
# if 3D plot is required, import necessary 3D plotting modules first
if f_plot3d:
from matplotlib import cm
fig, axs = __init_fig_3d_axs(n_columns, para_names, x_name, y_name)
if n_columns == 3:
p3d = axs.scatter(xs=df_all[para_names[0]], ys=df_all[x_name], zs=df_all[y_name], \
s=100, c=df_all[y_name], cmap=cm.coolwarm, marker='o', \
depthshade=False)
else:
raise ValueError("--plot3d has not been implemented for n_columns !=3. Sorry :(")
else:
# Group the DataFrame by groupby method
df_all_gpb = df_all.groupby(para_names)
fig, axs = __init_fig_axs(n_columns, para_names, x_name, y_name)
if n_columns == 3:
for group in sorted(df_all_gpb.groups.iterkeys()):
gp_data = df_all_gpb.get_group(group)
x = gp_data.sort_values(by=x_name)[x_name]
y = gp_data.sort_values(by=x_name)[y_name]
axs.plot(x, y, 'o-', linewidth=2, \
label="%s=%s" % (para_names[0], group))
axs.legend(loc="upper left", shadow=True, fancybox=True)
if n_columns >= 4:
#print(df_all_gpb.groups)
for i in range(len(para_names)):
for group in sorted(df_all_gpb.groups.keys(), key=lambda x: x[i]):
# check the convergence of parameter para_names[i]
# with the other parameters at the best, i.e. max
flag_best_other = True
for j in range(len(para_names)):
if j != i and group[j] != para_max[j]:
flag_best_other = False
break
if not flag_best_other:
continue
gp_data = df_all_gpb.get_group(group)
x = gp_data.sort_values(by=x_name)[x_name]
y = gp_data.sort_values(by=x_name)[y_name]
axs.flatten()[i].plot(x, y, 'o-', linewidth=2, \
label="%s=%s" % (para_names[i], group[i]))
# Generate the title string as the fixed parameters
for i in range(len(para_names)):
title_str_list = ['convergence w.r.t', para_names[i],'\n@ (']
for j in range(len(para_names)):
if j != i:
title_str_list.append("%s = %s" % (para_names[j], para_max[j]))
title_str_list.append(')')
title_str = ' '.join(title_str_list)
axs.flatten()[i].set_title(title_str)
for ax in axs.flatten():
ax.legend(loc="upper left", shadow=True, fancybox=True)
if preview:
if f_plot3d:
fig.colorbar(p3d)
plt.show()
if figname is not '':
print("- Saving to %s" % figname)
fig.savefig(figname, dpi=int(imgres)*150)
return
# ====================================================
def Main(ArgList):
description = '''Visualize the data for an N-parameter convergence test. In general N is equal to 2 or 3. Support up to 5.'''
parser = ArgumentParser(description=description)
parser.add_argument(dest="datafile", metavar='file', type=str, nargs=1, help="The name of file storing the data. Better in CSV/Excel format and index is not necessary.")
parser.add_argument("--xt", dest="xtarget_column", metavar="X", type=int, default=0, help="the index of column (>0) which contains the direct test parameter (x). Default is the second to last column.")
parser.add_argument("--yt", dest="ytarget_column", metavar="Y", type=int, default=0, help="the index of column (>0) which contains the quantity to converge (y). Default is the last column.")
parser.add_argument("--plot3d", dest="f_plot3d", action="store_true", help="Flag to use 3D plots. Support 2-parameter test only.")
parser.add_argument("--save", dest="figname", type=str, default='', help="File name (e.g. conv.png) to save the figure. The figure will not be saved unless this option is set other than ''.")
parser.add_argument("--res", dest="resolution", metavar='RES', type=int, default=2, help="Resolution of image, dpi = 150*RES. Default 2 (300 dpi).")
# initialize options as 'opts'
opts = parser.parse_args()
datafile = opts.datafile[0]
df_all = pd.read_table(datafile, delim_whitespace=True)
common_nd_conv_plot(df_all, opts.xtarget_column, opts.ytarget_column, opts.f_plot3d, opts.figname, \
True, opts.resolution)
# ==============================
if __name__ == "__main__":
Main(sys.argv)
|
nilq/baby-python
|
python
|
sandwich_orders = ['pastrami', 'fish', 'pastrami', 'cabbage', 'pastrami', 'sala', 'pig', 'chicken']
finished_sandwich_orders = []
print(sandwich_orders)
print("'pastrami' soled out!")
while 'pastrami' in sandwich_orders:
sandwich_orders.remove('pastrami')
print(sandwich_orders)
while sandwich_orders:
finished = sandwich_orders.pop()
print("I made your " + finished + ' sandwich.')
finished_sandwich_orders.append(finished)
print(sandwich_orders)
print(finished_sandwich_orders)
|
nilq/baby-python
|
python
|
import tensorflow as tf
import src.lib as tl
class DNN:
def __init__(self,conf_data):
n_classes = len(conf_data["classes_list"])
data_size = conf_data["size"]
self.name = "selector"
self.show_kernel_map = []
with tf.name_scope('Input'):
self.input = tf.placeholder(tf.float32, shape=[None, data_size[0] * data_size[1] ], name="x-input")
with tf.name_scope('Labels'):
self.labels = tf.placeholder(tf.float32, shape=[None, n_classes], name="y-input")
with tf.name_scope('DropOut'):
self.keep_prob = tf.placeholder(tf.float32)
with tf.name_scope('model'):
net = tf.reshape(self.input, shape=[-1, data_size[0], data_size[1], 1])
with tf.variable_scope("CONV_1"):
[conv1, W, b] = tl.conv2d(net, 121, 20)
R1 = tf.nn.l2_loss(W)
self.show_kernel_map.append(W) # Create the feature map
with tf.variable_scope("POOL_1"):
pool1 = tl.max_pool_2x2(conv1)
with tf.variable_scope("CONV_2"):
[conv2, W, b] = tl.conv2d(pool1, 16, 10)
R2 = tf.nn.l2_loss(W)
self.show_kernel_map.append(W) # Create the feature map
with tf.variable_scope("POOL_2"):
pool2 = tl.max_pool_2x2(conv2)
with tf.variable_scope("FC_1"):
flat1 = tl.fc_flat(pool2)
h, W, b = tl.fc(flat1, 1024)
R3 = tf.nn.l2_loss(W)
fc1 = tf.nn.relu(h)
with tf.variable_scope("DROPOUT_1"):
drop1 = tf.nn.dropout(fc1, self.keep_prob)
with tf.variable_scope("FC_2"):
h, W, b = tl.fc(drop1, 1024)
R4 = tf.nn.l2_loss(W)
fc2 = tf.nn.relu( h )
with tf.variable_scope("DROPOUT_2"):
drop2 = tf.nn.dropout(fc2, self.keep_prob)
with tf.variable_scope("OUT"):
self.out, W, b = tl.fc(drop2, n_classes)
with tf.name_scope('Cost'):
self.cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(
labels=self.labels,
logits=self.out) )
self.cost = self.cost + 0.01 * (R1 + R2 + R3 + R4)
self.output = tf.nn.softmax (self.out)
|
nilq/baby-python
|
python
|
"""
Wrap Google Prediction API into something that looks
kind of like the standard scikit-learn interface to
learning models.
Derived from Google API example code examples found here:
https://github.com/google/google-api-python-client
@author: Jed Ludlow
"""
from __future__ import print_function
import argparse
import pprint
import time
import numpy as np
from apiclient import sample_tools
from oauth2client import client
# Time to wait (in seconds) between successive checks of training status.
TRAIN_SLEEP_TIME = 10
# Time to wait (in seconds) between successive prediction calls.
PREDICT_SLEEP_TIME = 0.8
# String to display if OAuth fails.
REAUTH = ("The credentials have been revoked or expired. "
"Please re-instantiate the predictor to re-authorize.")
def print_header(line):
"""
Format and print header block sized to length of line
"""
header_str = '='
header_line = header_str * len(line)
print('\n' + header_line)
print(line)
print(header_line)
class GooglePredictor(object):
"""
Prediction engine from the Google Prediction API wrapped
loosely in the style of sckit-learn.
"""
def __init__(self, project_id, object_name, model_id, client_secrets):
# Take advantage of the Google API example tools for
# credential management which make use of command line
# argument parsing.
argparser = argparse.ArgumentParser(add_help=False)
argparser.add_argument(
'object_name',
help="Full Google Storage path of csv data (ex bucket/object)")
argparser.add_argument(
'model_id',
help="Model Id of your choosing to name trained model")
argparser.add_argument(
'project_id',
help="Project Id as shown in Developer Console")
service, self.flags = sample_tools.init(
['GooglePredictor', object_name, model_id, project_id],
'prediction', 'v1.6', __doc__, client_secrets,
parents=[argparser],
scope=(
'https://www.googleapis.com/auth/prediction',
'https://www.googleapis.com/auth/devstorage.read_only'))
self.papi = service.trainedmodels()
def list(self):
"""
List available models in the current project.
"""
try:
# List models.
print_header("Fetching list of first ten models")
result = self.papi.list(
maxResults=10,
project=self.flags.project_id).execute()
print("List results:")
pprint.pprint(result)
except client.AccessTokenRefreshError:
print(REAUTH)
def get_params(self):
"""
Get description of current model.
"""
try:
# Describe model.
print_header("Fetching model description")
result = self.papi.analyze(
id=self.flags.model_id,
project=self.flags.project_id).execute()
print("Analyze results:")
pprint.pprint(result)
except client.AccessTokenRefreshError:
print(REAUTH)
def fit(self, model_type='CLASSIFICATION'):
"""
Fit a model to training data in the current bucket object.
"""
try:
# Start training request on a data set.
print_header("Submitting model training request")
body = {
'id': self.flags.model_id,
'storageDataLocation': self.flags.object_name,
'modelType': model_type}
start = self.papi.insert(
body=body,
project=self.flags.project_id).execute()
print("Training results:")
pprint.pprint(start)
# Wait for the training to complete.
print_header("Waiting for training to complete")
while True:
status = self.papi.get(
id=self.flags.model_id,
project=self.flags.project_id).execute()
state = status['trainingStatus']
print("Training state: " + state)
if state == 'DONE':
break
elif state == 'RUNNING':
time.sleep(TRAIN_SLEEP_TIME)
continue
else:
raise Exception("Training Error: " + state)
# Job has completed.
print("Training completed:")
pprint.pprint(status)
break
except client.AccessTokenRefreshError:
print(REAUTH)
def predict(self, X):
"""
Get model predictions for the samples in X.
X is a numpy array where each column is a feature, and
each row is an observation sample.
"""
try:
# Make some predictions using the newly trained model.
print_header("Making some predictions")
out = []
for sample in X:
body = {'input': {'csvInstance': sample.tolist()}}
result = self.papi.predict(
body=body,
id=self.flags.model_id,
project=self.flags.project_id).execute()
if 'outputLabel' in result:
out.append(result['outputLabel'])
elif 'outputValue' in result:
out.append(float(result['outputValue']))
time.sleep(PREDICT_SLEEP_TIME)
return np.array(out)
except client.AccessTokenRefreshError:
print(REAUTH)
def delete(self):
"""
Delete the current model.
"""
try:
# Delete model.
print_header("Deleting model")
result = self.papi.delete(
id=self.flags.model_id,
project=self.flags.project_id).execute()
print("Model deleted.")
return result
except client.AccessTokenRefreshError:
print(REAUTH)
|
nilq/baby-python
|
python
|
def findDecision(obj): #obj[0]: Coupon, obj[1]: Education, obj[2]: Occupation
# {"feature": "Coupon", "instances": 8147, "metric_value": 0.4744, "depth": 1}
if obj[0]>1:
# {"feature": "Education", "instances": 5889, "metric_value": 0.4676, "depth": 2}
if obj[1]>1:
# {"feature": "Occupation", "instances": 3337, "metric_value": 0.4747, "depth": 3}
if obj[2]<=13.339599828993485:
return 'True'
elif obj[2]>13.339599828993485:
return 'True'
else: return 'True'
elif obj[1]<=1:
# {"feature": "Occupation", "instances": 2552, "metric_value": 0.4568, "depth": 3}
if obj[2]<=19.03559777229008:
return 'True'
elif obj[2]>19.03559777229008:
return 'True'
else: return 'True'
else: return 'True'
elif obj[0]<=1:
# {"feature": "Occupation", "instances": 2258, "metric_value": 0.4882, "depth": 2}
if obj[2]>2.015213346063521:
# {"feature": "Education", "instances": 1795, "metric_value": 0.4911, "depth": 3}
if obj[1]>0:
return 'False'
elif obj[1]<=0:
return 'True'
else: return 'True'
elif obj[2]<=2.015213346063521:
# {"feature": "Education", "instances": 463, "metric_value": 0.4395, "depth": 3}
if obj[1]<=3:
return 'False'
elif obj[1]>3:
return 'True'
else: return 'True'
else: return 'False'
else: return 'False'
|
nilq/baby-python
|
python
|
from typing import Callable, Dict, Optional
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from kornia.metrics import accuracy, mean_average_precision, mean_iou
from .trainer import Trainer
from .utils import Configuration
class ImageClassifierTrainer(Trainer):
"""Module to be used for image classification purposes.
The module subclasses :py:class:`~kornia.x.Trainer` and overrides the
:py:func:`~kornia.x.Trainer.evaluate` function implementing a standard
:py:func:`~kornia.metrics.accuracy` topk@[1, 5].
.. seealso::
Learn how to use this class in the following
`example <https://github.com/kornia/kornia/blob/master/examples/train/image_classifier/>`__.
"""
def compute_metrics(self, *args: torch.Tensor) -> Dict[str, float]:
if len(args) != 2:
raise AssertionError
out, target = args
acc1, acc5 = accuracy(out, target, topk=(1, 5))
return dict(top1=acc1.item(), top5=acc5.item())
class SemanticSegmentationTrainer(Trainer):
"""Module to be used for semantic segmentation purposes.
The module subclasses :py:class:`~kornia.x.Trainer` and overrides the
:py:func:`~kornia.x.Trainer.evaluate` function implementing IoU :py:func:`~kornia.metrics.mean_iou`.
.. seealso::
Learn how to use this class in the following
`example <https://github.com/kornia/kornia/blob/master/examples/train/semantic_segmentation/>`__.
"""
def compute_metrics(self, *args: torch.Tensor) -> Dict[str, float]:
if len(args) != 2:
raise AssertionError
out, target = args
iou = mean_iou(out.argmax(1), target, out.shape[1]).mean()
return dict(iou=iou.item())
class ObjectDetectionTrainer(Trainer):
"""Module to be used for object detection purposes.
The module subclasses :py:class:`~kornia.x.Trainer` and overrides the
:py:func:`~kornia.x.Trainer.evaluate` function implementing IoU :py:func:`~kornia.metrics.mean_iou`.
.. seealso::
Learn how to use this class in the following
`example <https://github.com/kornia/kornia/blob/master/examples/train/object_detection/>`__.
"""
def __init__(
self,
model: nn.Module,
train_dataloader: DataLoader,
valid_dataloader: DataLoader,
criterion: Optional[nn.Module],
optimizer: torch.optim.Optimizer,
scheduler: torch.optim.lr_scheduler.CosineAnnealingLR,
config: Configuration,
num_classes: int,
callbacks: Dict[str, Callable] = None,
loss_computed_by_model: Optional[bool] = None,
) -> None:
if callbacks is None:
callbacks = {}
super().__init__(
model, train_dataloader, valid_dataloader, criterion, optimizer, scheduler, config, callbacks
)
# TODO: auto-detect if the model is from TorchVision
self.loss_computed_by_model = loss_computed_by_model
self.num_classes = num_classes
def on_model(self, model: nn.Module, sample: dict):
if self.loss_computed_by_model and model.training:
return model(sample["input"], sample["target"])
return model(sample["input"])
def compute_loss(self, *args: torch.Tensor) -> torch.Tensor:
if self.loss_computed_by_model:
return torch.stack(list(args[0])).sum()
if self.criterion is None:
raise RuntimeError("`criterion` should not be None if `loss_computed_by_model` is False.")
return self.criterion(*args)
def compute_metrics(self, *args: torch.Tensor) -> Dict[str, float]:
if (
isinstance(args[0], dict) and "boxes" in args[0] and "labels" in args[0] and "scores" in args[0]
and isinstance(args[1], dict) and "boxes" in args[1] and "labels" in args[1]
):
mAP, _ = mean_average_precision(
[a['boxes'] for a in args[0]],
[a['labels'] for a in args[0]],
[a['scores'] for a in args[0]],
[a['boxes'] for a in args[1]],
[a['labels'] for a in args[1]],
n_classes=self.num_classes,
threshold=0.000001
)
return {'mAP': mAP.item()}
return super().compute_metrics(*args)
|
nilq/baby-python
|
python
|
# Created on Mar 07, 2021
# author: Hosein Hadipour
# contact: hsn.hadipour@gmail.com
import os
output_dir = os.path.curdir
str_feedback1 = lambda a24, b15, b0, b1, b2: a24 + ' + ' + b15 + ' + ' + b0 + ' + ' + b1 + '*' + b2
str_feedback2 = lambda b6, a27, a0, a1, a2: b6 + ' + ' + a27 + ' + ' + a0 + ' + ' + a1 + '*' + a2
str_f = lambda b0, b15: b0 + ' + ' + b15
def biviumb(T=177):
cipher_name = 'biviumb'
# 177 clock cycles
recommended_mg = 32
recommended_ms = 65
eqs = '#%s %d clock cycles\n' % (cipher_name, T)
eqs += 'connection relations\n'
for t in range(T):
eqs += 'b_%d, b_%d => bm_%d\n' % (t + 1, t + 2, t)
eqs += 'a_%d, a_%d => am_%d\n' % (t + 1, t + 2 ,t)
eqs += 'algebraic relations\n'
for t in range(T):
eqs += 'a_%d + a_%d + b_%d + b_%d + bm_%d\n' % (t + 93, t + 24, t, t + 15, t)
eqs += 'b_%d + b_%d + a_%d + a_%d + am_%d\n' % (t + 84, t + 6, t, t + 27, t)
eqs += 'b_%d + b_%d + a_%d + a_%d + z_%d\n' % (t, t + 15, t, t + 27 , t)
eqs += 'known\n' + '\n'.join(['z_%d' % i for i in range(T)]) + '\nend'
eqsfile_path = os.path.join(output_dir, 'relationfile_%s_%dclk_mg%d_ms%d.txt' % (
cipher_name, T, recommended_mg, recommended_ms))
with open(eqsfile_path, 'w') as relation_file:
relation_file.write(eqs)
def main():
biviumb(T=177)
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
from django.utils.translation import ugettext_lazy as _
from django.contrib.comments.models import CommentFlag
from django.contrib.comments.admin import CommentsAdmin
from django.contrib import admin
from scipy_central.comments.models import SpcComment
class SpcCommentAdmin(CommentsAdmin):
"""
Custom admin interface for comments
defined on the top of built-in admin interface
"""
list_display = CommentsAdmin.list_display
fieldsets = (
(None,
{'fields': ('content_type', 'object_pk', 'site')}
),
(_('Content'),
{'fields': ('user', 'user_name', 'user_email', 'user_url', 'comment', 'rest_comment')}
),
(_('Metadata'),
{'fields': ('submit_date', 'ip_address', 'is_public', 'is_removed')}
),
)
class SpcCommentFlagAdmin(admin.ModelAdmin):
"""
Admin interface for comment flags
"""
list_display = ('flag', 'user', 'comment', 'flag_date')
search_fields = ['user__username', 'comment__user__username', 'flag_date']
list_filter = ['flag_date']
ordering = ['-flag_date']
admin.site.register(SpcComment, SpcCommentAdmin)
admin.site.register(CommentFlag, SpcCommentFlagAdmin)
|
nilq/baby-python
|
python
|
# 3.11 随机选择
import random
values = [1,2,3,4,5,6]
for i in range(0, 4):
print(random.choice(values))
for i in range(0, 4):
print(random.sample(values, 2))
random.shuffle(values)
print(values)
for i in range(0, 10):
print(random.randint(0, 10))
for i in range(0, 3):
print(random.random())
print(random.getrandbits(200))
random.seed() # Seed based on system time or os.urandom()
random.seed(12345) # Seed based on integer given
random.seed(b'bytedata') # Seed based on byte data
|
nilq/baby-python
|
python
|
import json
from pytorch_pretrained_bert import cached_path
from pytorch_pretrained_bert import OpenAIGPTTokenizer
from keras_gpt_2 import load_trained_model_from_checkpoint, get_bpe_from_files, generate
tokenizer = OpenAIGPTTokenizer.from_pretrained('openai-gpt')
url = "s3://datasets.huggingface.co/personachat/personachat_self_original.json"
# Download and load JSON dataset
personachat_file = cached_path(url)
with open(personachat_file, "r", encoding="utf-8") as f:
dataset = json.loads(f.read())
# with open('dataset.json', "w", encoding="utf-8") as f:
# f.write(json.dumps(dataset))
dataset = dataset['train']
dataset = dataset[:1]
print('\n')
print(dataset[0]['utterances'][1])
print('\n')
print(dataset[0]['utterances'][2])
# Tokenize and encode the dataset using our loaded GPT tokenizer
def tokenize(obj):
if isinstance(obj, str):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(obj))
if isinstance(obj, dict):
return dict((n, tokenize(o)) for n, o in obj.items())
return list(tokenize(o) for o in obj)
dataset = tokenize(dataset)
|
nilq/baby-python
|
python
|
import unittest
import pandas as pd
import os
from requests import Response
from computerMetricCollector.metricsCollector.StorageAPI import store_to_database
from computerMetricCollector.crypto import encrypt_data
from computerMetricCollector.test.crypto import read_key, decrypt_data
from computerMetricCollector.config import import_config
from computerMetricCollector.metricsCollector.memoryMetrics import MemoryMetrics
from computerMetricCollector.metricsCollector.computerMetrics import get_computer_id
from computerMetricCollector.test.TestCase.LoggerTest import set_logger
class MemoryTest(unittest.TestCase):
def setUp(self):
self.logger = set_logger("DEBUG")
self.root_dir = os.path.dirname(os.path.dirname(__file__))
self.settings = import_config(self.root_dir)
self.date_format = self.settings.get("date_time_format")
self.meta = self.settings.get("collectors").get("MemoryMetrics")
self.collector = MemoryMetrics(self.logger, get_computer_id(self.logger), self.meta.get("metrics"),
self.meta.get("metrics_to_encrypt"), self.date_format, self.meta.get("url"))
self.collector.fetch_metrics()
self.metrics_df = self.collector.get_metrics_df()
self.sample_df = pd.read_csv(self.root_dir + "/sample_data/MemoryMetrics.csv",
names=self.meta.get("metrics"))
def test_memory_metrics(self):
if len(self.meta.get("metrics_to_match")) > 0:
match_metrics_df = self.metrics_df.filter(items=self.meta.get("metrics_to_match"), axis=1)
match_sample_df = self.sample_df.filter(items=self.meta.get("metrics_to_match"), axis=1)
pd.testing.assert_frame_equal(match_metrics_df, match_sample_df, check_dtype=False)
def test_metrics_type(self):
for idx, rec in self.metrics_df.iterrows():
self.assertLess(int(rec["memory_available"]), int(rec["memory_total"]))
self.assertLess(int(rec["memory_used"]), int(rec["memory_total"]))
self.assertLess(int(rec["swap_used"]), int(rec["swap_total"]))
self.assertLess(int(rec["swap_free"]), int(rec["swap_total"]))
self.assertGreaterEqual(int(rec["swap_byte_in"]), 0)
self.assertGreaterEqual(int(rec["swap_byte_out"]), 0)
self.assertIsInstance(rec["memory_used_percent"], float)
self.assertIsInstance(rec["swap_percent"], float)
def test_encryption(self):
raw_metrics_df = self.metrics_df
encrypt_key = read_key(self.root_dir + self.settings.get("encryption_key_file"))
encrypt_data(self.collector, encrypt_key)
encrypted_metrics_df = self.collector.get_metrics_df()
decrypt_key = read_key(self.root_dir + self.settings.get("decryption_key_file"))
decrypted_metrics_df = decrypt_data(encrypted_metrics_df, self.meta.get("metrics_to_encrypt"), decrypt_key)
pd.testing.assert_frame_equal(raw_metrics_df, decrypted_metrics_df)
def test_store(self):
url = self.meta.get("url")
reg_id = self.settings.get("registration_id")
encrypt_key = read_key(self.root_dir + self.settings.get("encryption_key_file"))
if (url is not None and url != "") and (reg_id is not None and reg_id != ""):
response = store_to_database(self.collector, reg_id, encrypt_key)
self.assertIsInstance(response, Response)
self.assertEqual(response.status_code, 200)
|
nilq/baby-python
|
python
|
import unittest
from util.bean import deepNaviReqToNaviModel
from model import DeepNaviReq
import time
def generateReq():
req = DeepNaviReq()
req.time = int(time.time() * 1000)
print()
# magnetic = req.magneticList.add()
# magnetic.x = 1
# magnetic.y = 2
# magnetic.z = 3
accelerometer = req.accelerometerList.add()
accelerometer.x = 1
accelerometer.y = 2
accelerometer.z = 3
rientation = req.orientationList.add()
rientation.x = 1
rientation.y = 2
rientation.z = 3
gyroscope = req.gyroscopeList.add()
gyroscope.x = 1
gyroscope.y = 2
gyroscope.z = 3
gravity = req.gravityList.add()
gravity.x = 1
gravity.y = 2
gravity.z = 3
linearAcceleration = req.linearAccelerationList.add()
linearAcceleration.x = 1
linearAcceleration.y = 2
linearAcceleration.z = 3
ambientTemperature = req.ambientTemperatureList.add()
ambientTemperature.value = 20
light = req.lightList.add()
light.value = 20
pressure = req.pressureList.add()
pressure.value = 20
proximity = req.proximityList.add()
proximity.value = 20
return req
class TestTo(unittest.TestCase):
def testA(self):
print(deepNaviReqToNaviModel(generateReq()))
|
nilq/baby-python
|
python
|
# Generated by Django 2.1.11 on 2019-12-03 21:08
from django.db import migrations
from qatrack.qatrack_core.dates import (
format_as_date,
format_datetime,
parse_date,
parse_datetime,
)
def datestrings_to_dates(apps, schema):
TestInstance = apps.get_model("qa", "TestInstance")
for ti in TestInstance.objects.filter(unit_test_info__test__type="date"):
ti.date_value = parse_date(ti.string_value)
ti.string_value = ""
ti.save()
for ti in TestInstance.objects.filter(unit_test_info__test__type="datetime"):
ti.datetime_value = parse_datetime(ti.string_value)
ti.string_value = ""
ti.save()
def date_to_datestrings(apps, schema):
TestInstance = apps.get_model("qa", "TestInstance")
for ti in TestInstance.objects.filter(unit_test_info__test__type="date"):
ti.string_value = format_as_date(ti.date_value)
ti.save()
for ti in TestInstance.objects.filter(unit_test_info__test__type="datetime"):
ti.string_value = format_datetime(ti.datetime_value)
ti.save()
class Migration(migrations.Migration):
dependencies = [
('qa', '0045_auto_20191203_1409'),
]
operations = [
migrations.RunPython(datestrings_to_dates, date_to_datestrings),
]
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
"""Software Carpentry Windows Installer
Helps mimic a *nix environment on Windows with as little work as possible.
The script:
* Installs nano and makes it accessible from msysgit
* Provides standard nosetests behavior for msysgit
To use:
1. Install Python, IPython, and Nose. An easy way to do this is with
the Anaconda CE Python distribution
http://continuum.io/anacondace.html
2. Install msysgit
http://code.google.com/p/msysgit/downloads/list?q=full+installer+official+git
3. Run swc_windows_installer.py
You should be able to simply double click the file in Windows
"""
import hashlib
try: # Python 3
from io import BytesIO as _BytesIO
except ImportError: # Python 2
from StringIO import StringIO as _BytesIO
import os
import re
try: # Python 3
from urllib.request import urlopen as _urlopen
except ImportError: # Python 2
from urllib2 import urlopen as _urlopen
import zipfile
def zip_install(url, sha1, install_directory):
"""Download and install a zipped bundle of compiled software"""
r = _urlopen(url)
zip_bytes = r.read()
download_sha1 = hashlib.sha1(zip_bytes).hexdigest()
if download_sha1 != sha1:
raise ValueError(
'downloaded {!r} has the wrong SHA1 hash: {} != {}'.format(
url, downloaded_sha1, sha1))
zip_io = _BytesIO(zip_bytes)
zip_file = zipfile.ZipFile(zip_io)
if not os.path.isdir(install_directory):
os.makedirs(install_directory)
zip_file.extractall(install_directory)
def install_nano(install_directory):
"""Download and install the nano text editor"""
zip_install(
url='http://www.nano-editor.org/dist/v2.2/NT/nano-2.2.6.zip',
sha1='f5348208158157060de0a4df339401f36250fe5b',
install_directory=install_directory)
def create_nosetests_entry_point(python_scripts_directory):
"""Creates a terminal-based nosetests entry point for msysgit"""
contents = '\n'.join([
'#!/usr/bin/env/ python',
'import sys',
'import nose',
"if __name__ == '__main__':",
' sys.exit(nose.core.main())',
'',
])
if not os.path.isdir(python_scripts_directory):
os.makedirs(python_scripts_directory)
with open(os.path.join(python_scripts_directory, 'nosetests'), 'w') as f:
f.write(contents)
def update_bash_profile(extra_paths=()):
"""Create or append to a .bash_profile for Software Carpentry
Adds nano to the path, sets the default editor to nano, and adds
additional paths for other executables.
"""
lines = [
'',
'# Add paths for Software-Carpentry-installed scripts and executables',
'export PATH=\"$PATH:{}\"'.format(':'.join(
make_posix_path(path) for path in extra_paths),),
'',
'# Make nano the default editor',
'export EDITOR=nano',
'',
]
config_path = os.path.join(os.path.expanduser('~'), '.bash_profile')
with open(config_path, 'a') as f:
f.write('\n'.join(lines))
def make_posix_path(windows_path):
"""Convert a Windows path to a posix path"""
for regex, sub in [
(re.compile(r'\\'), '/'),
(re.compile('^[Cc]:'), '/c'),
]:
windows_path = regex.sub(sub, windows_path)
return windows_path
def main():
swc_dir = os.path.join(os.path.expanduser('~'), '.swc')
bin_dir = os.path.join(swc_dir, 'bin')
create_nosetests_entry_point(python_scripts_directory=bin_dir)
nano_dir = os.path.join(swc_dir, 'lib', 'nano')
install_nano(install_directory=nano_dir)
update_bash_profile(extra_paths=(nano_dir, bin_dir))
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
import sqlalchemy as sa
from sqlalchemy import orm
from data.db_session import BaseModel
import datetime
class Post(BaseModel):
__tablename__ = 'posts'
__repr_attrs__ = ["title", "tournament"]
serialize_only = (
"id",
"title",
"content",
"status",
"now",
"tournament.id",
"tournament.title",
"author.id",
"author.email",
"author.fullname",
"created_info"
)
secure_serialize_only = (
"id",
"title",
"content",
"status",
"now",
"tournament.id",
"tournament.title",
"author.id",
"author.fullname",
"created_info"
)
title = sa.Column(sa.String, nullable=False)
content = sa.Column(sa.Text, nullable=False)
status = sa.Column(sa.Integer, nullable=False, default=1)
now = sa.Column(sa.Boolean, nullable=False, default=False)
author_id = sa.Column(sa.Integer, sa.ForeignKey('users.id'))
tournament_id = sa.Column(sa.Integer, sa.ForeignKey('tournaments.id'))
author = orm.relationship('User', backref="posts")
tournament = orm.relationship('Tournament', backref="posts")
@property
def created_info(self):
created_date = datetime.datetime.fromisoformat(str(self.created_at))
return created_date.strftime('%d %B %Y')
def __str__(self):
return self.title
def have_permission(self, user):
return user == self.author or self.tournament.have_permission(user)
|
nilq/baby-python
|
python
|
from geniusweb.issuevalue.Bid import Bid
from geniusweb.issuevalue.Domain import Domain
from geniusweb.issuevalue.Value import Value
from geniusweb.profile.utilityspace.LinearAdditive import LinearAdditive
from tudelft.utilities.immutablelist.AbstractImmutableList import AbstractImmutableList
from tudelft.utilities.immutablelist.FixedList import FixedList
from tudelft.utilities.immutablelist.ImmutableList import ImmutableList
from tudelft.utilities.immutablelist.JoinedList import JoinedList
from tudelft.utilities.immutablelist.MapList import MapList
from tudelft.utilities.immutablelist.Tuple import Tuple
from typing import List, Dict
from geniusweb.bidspace.IssueInfo import IssueInfo
from geniusweb.bidspace.Interval import Interval
from geniusweb.utils import val
from decimal import Decimal
class BidsWithUtility :
'''
WARNING DO NOT USE, NOT YET WORKING CORRECTLY
Tool class containing functions dealing with utilities of all bids in a given
{@link LinearAdditive}. This class caches previously computed values to
accelerate the calls and subsequent calls. Re-use the object to keep/reuse
the cache.
<h2>Rounding</h2> Internally, utilities of bids are rounded to the given
precision. This may cause inclusion/exclusion of some bids in the results.
See {@link #BidsWithUtility(LinearAdditive, int)} for more details
Immutable.
'''
def __init__(self, issuesInfo:List[IssueInfo] , precision:int ) :
'''
@param issuesInfo List of the relevant issues (in order of relevance) and
all info of each issue.
@param precision the number of digits to use for computations. In
practice, 6 seems a good default value.
<p>
All utilities * weight are rounded to this number of
digits. This value should match the max number of
(digits used in the weight of an issue + number of
digits used in the issue utility). To determine the
optimal value, one may consider the step size of the
issues, and the range of interest. For instance if the
utility function has values 1/3 and 2/3, then these have
an 'infinite' number of relevant digits. But if the goal
is to search bids between utility 0.1 and 0.2, then
computing in 2 digits might already be sufficient.
<p>
This algorithm has memory and space complexity O(
|nissues| 10^precision ). For spaces up to 7 issues, 7
digits should be feasible; for 9 issues, 6 digits may be
the maximum.
'''
if issuesInfo == None or len(issuesInfo)==0:
raise ValueError("sortedissues list must contain at least 1 element")
self._issueInfo = issuesInfo;
self._precision = precision;
# cache. Key = call arguments for {@link #get(int, Interval)}. Value=return
# value of that call.
self._cache:Dict[Tuple[int, Interval], ImmutableList[Bid]] = {}
@staticmethod
def create(space:LinearAdditive, precision:int=6) -> "BidsWithUtility":
'''
Support constructor, uses default precision 6. This value seems practical
for the common range of issues, utilities and weights. See
{@link #BidsWithUtility(LinearAdditive, int)} for more details on the
precision.
@param space the {@link LinearAdditive} to analyze
@param space the {@link LinearAdditive} to analyze. Optional, defaults to 6
'''
return BidsWithUtility(BidsWithUtility._getInfo(space, precision), precision);
def getRange(self) ->Interval :
'''
@return the (rounded) utility {@link Interval} of this space: minimum and
maximum achievable utility.
'''
return self._getRange(len(self._issueInfo) - 1)
def getBids(self, range: Interval) -> ImmutableList[Bid] :
'''
@param range the minimum and maximum utility required of the bids. to be
included (both ends inclusive).
@return a list with bids that have a (rounded) utility inside range.
possibly empty.
'''
return self._get(len(self._issueInfo) - 1, range.round(self._precision));
def getInfo(self) -> List[IssueInfo] :
return self._issueInfo.copy()
def getExtremeBid(self, isMax:bool) ->Bid :
'''
@param isMax the extreme bid required
@return the extreme bid, either the minimum if isMax=false or maximum if
isMax=true
'''
map:Dict[str, Value] = {}
for info in self._issueInfo:
map[info.getName()] = info.getExtreme(isMax)
return Bid(map)
def _get(self, n:int , goal:Interval) -> ImmutableList[Bid] :
'''
Create partial BidsWithUtil list considering only issues 0..n, with
utilities in given range.
@param n the number of issueRanges to consider, we consider 0..n here.
The recursion decreases n until n=0
@param goal the minimum and maximum utility required of the bids. to be
included (both ends inclusive)
@return BidsWithUtil list, possibly empty.
'''
if goal == None:
raise ValueError("Interval=null")
# clamp goal into what is reachable. Avoid caching empty
goal = goal.intersect(self._getRange(n))
if (goal.isEmpty()):
return FixedList([])
cachetuple = Tuple(n, goal)
if (cachetuple in self._cache):
return self._cache[cachetuple]
result = self._checkedGet(n, goal)
self._cache[cachetuple]=result
return result
@staticmethod
def _getInfo(space2:LinearAdditive , precision:int) -> List[IssueInfo] :
dom = space2.getDomain()
return [IssueInfo(issue, dom.getValues(issue), \
val(space2.getUtilities().get(issue)), \
space2.getWeight(issue), precision) \
for issue in dom.getIssues()]
def _checkedGet(self, n:int, goal:Interval ) -> ImmutableList[Bid] :
info = self._issueInfo[n]
# issue is the first issuesWithRange.
issue = info.getName()
if n == 0:
return OneIssueSubset(info, goal)
# make new list, joining all sub-lists
fulllist:ImmutableList[Bid] = FixedList([])
for val in info.getValues():
weightedutil = info.getWeightedUtil(val)
subgoal = goal.subtract(weightedutil)
# recurse: get list of bids for the subspace
partialbids = self._get(n - 1, subgoal)
bid = Bid({issue: val})
fullbids = BidsWithUtility.maplist(bid, partialbids)
if fullbids.size() != 0:
fulllist = JoinedList[Bid]([fullbids, fulllist])
return fulllist
@staticmethod
def maplist(bid: Bid, partialbids: ImmutableList[Bid]) -> ImmutableList[Bid]:
'''
this is just to force a scope onto bid
'''
return MapList[Bid, Bid](lambda pbid: pbid.merge(bid), partialbids)
def _getRange(self, n:int) ->Interval :
'''
@param n the maximum issuevalue utility to include. Use n=index of last
issue s= (#issues in the domain - 1) for the full range of this
domain.
@return Interval (min, max) of the total weighted utility Interval of
issues 0..n. All weighted utilities have been rounded to the set
{@link #precision}
'''
value = Interval(Decimal(0),Decimal(0))
for i in range(0,n+1): # include end point
value = value.add(self._issueInfo[i].getInterval())
return value
class OneIssueSubset (AbstractImmutableList[Bid]):
'''
List of all one-issue bids that have utility inside given interval.
'''
def __init__(self, info:IssueInfo , interval:Interval ) :
'''
@param info the {@link IssueInfo}
@param interval a utility interval (weighted)
'''
self._info = info;
self._interval = interval;
self._size = info._subsetSize(interval)
#Override
def get(self, index:int) ->Bid :
return Bid({self._info.getName():
self._info._subset(self._interval)[index]})
#Override
def size(self) ->int:
return self._size
|
nilq/baby-python
|
python
|
import discord
from discord.ext import commands
from WhiteFox.core.config.config import Config
class WhiteFox(commands.Bot):
def __init__(self, token=None, client_id=None, prefixes=None):
self.configs = None
self._init_configs()
if token is not None:
self.configs.discord.token = token
if client_id is not None:
self.configs.discord.client_id = client_id
if prefixes is not None:
self.configs.discord.prefixes = prefixes
super().__init__(command_prefix=commands.when_mentioned_or(*self.configs.fox.prefixes))
def _init_configs(self):
self.configs = Config()
def run(self):
try:
super().run(self.configs.discord.token)
except discord.LoginFailure:
print("Invalid token provided.")
async def on_ready(self):
print(f"{self.user.name}#{self.user.discriminator} Ready!")
print(f"User Id: {self.user.id}")
print("-------")
|
nilq/baby-python
|
python
|
import re
import json
import requests
import time
from urllib.parse import unquote
import os
headers = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36', 'referer': 'https://youtube.com'}
class Caption:
def __init__(self, url, language=None):
for i in re.search(r'watch\?v=(.*?)&|youtu.be/(.*?)&', url+'&').groups():
if i is not None:
vid = i
break
url = 'https://youtube.com/watch?v='+vid
html = unquote(requests.get(url, headers=headers).text).replace('\\"', '"')
title = re.search(r'"videoId":".*?", "title":"(.*?)"', html).groups()[0]
self.caption_details = self.get_caption_details(html)
if language is not None:
try:
captions = self.fetch_captions(self.caption_details[language])
self.convert_to_srt(caption_file=captions, path=os.getcwd(), file_name=title)
except Exception:
raise Exception(f'No captions were found for {language}. Available Captions : {self.caption_details.keys()}')
def get_caption_details(self, html=None):
urls_regex = re.search(r'(\{"captionTracks":\[.*?\])', html)
caption_details = dict()
if urls_regex.groups()[0] is not None:
urls_regex = urls_regex.groups()[0]+'}'
for i in json.loads(urls_regex)['captionTracks']:
caption_details[i['languageCode']] = i['baseUrl']
return caption_details
else:
raise Exception('Captions not available for this Video')
def fetch_captions(self, url):
caption_file = requests.get(url).text.replace('\n', '')
return caption_file
def convert_to_srt(self, caption_file=None, path=None, file_name=None):
if caption_file is not None:
srt_text = ''
lines = 1
for i in re.findall(r'<text start="(.*?)" dur="(.*?)">(.*?)</text>', caption_file):
start = float(i[0])
dur = float(i[1])
end = start+dur
text = i[2]
start_time = time.strftime("%H:%M:%S"+", 000", time.gmtime(start))
end_time = time.strftime("%H:%M:%S"+", 000", time.gmtime(end))
text_line = f'{lines}\n{start_time} --> {end_time}\n{text}\n'
srt_text += text_line
lines += 1
if file_name is not None:
file_name = file_name.split('.srt')[0]
open(f'{path}' + os.path.sep + f'{file_name}.srt', 'wb').write(srt_text.encode('utf-8'))
else:
raise Exception('Please provide file name and path to covert_to_srt function')
|
nilq/baby-python
|
python
|
import torch
from torch.autograd import Function
from torch.autograd.function import once_differentiable
from torch._thnn import type2backend
from .thnn.auto import function_by_name
import torch.backends.cudnn as cudnn
MODE_ZEROS = 0
MODE_BORDER = 1
class GridSampler(Function):
@staticmethod
def forward(ctx, input, grid, padding_mode='zeros'):
ctx.save_for_backward(input, grid)
if padding_mode == 'zeros':
ctx.padding_mode = MODE_ZEROS
elif padding_mode == 'border':
ctx.padding_mode = MODE_BORDER
else:
raise ValueError("padding_mode needs to be 'zeros' or 'border', but got {}"
.format(padding_mode))
grid_sz = grid.size()
if cudnn.is_acceptable(input) and padding_mode == 'zeros':
output = input.new(grid_sz[0], input.size(1), grid_sz[1], grid_sz[2])
grid = grid.contiguous()
if 0 in input.stride():
input = input.contiguous()
torch._C._cudnn_grid_sampler_forward(input, grid, output)
else:
backend = type2backend[type(input)]
output = input.new(grid_sz[0], input.size(1), grid_sz[1], grid_sz[2])
backend.SpatialGridSamplerBilinear_updateOutput(
backend.library_state, input, grid, output, ctx.padding_mode)
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
input, grid = ctx.saved_tensors
padding_mode = ctx.padding_mode
if cudnn.is_acceptable(input) and padding_mode == 'zeros':
grad_input = input.new(input.size())
grad_grid = grid.new(grid.size())
grid = grid.contiguous()
if 0 in input.stride():
input = input.contiguous()
# Sometimes grad_output is a scalar (like 1) expanded as a tensor.
# cudnn requires a tensor that has non-zero strides.
if 0 in grad_output.stride():
grad_output = grad_output.contiguous()
torch._C._cudnn_grid_sampler_backward(input, grad_input,
grid, grad_grid,
grad_output)
else:
backend = type2backend[type(input)]
grad_input = input.new(input.size())
grad_grid = grid.new(grid.size())
backend.SpatialGridSamplerBilinear_updateGradInput(
backend.library_state, input, grad_input,
grid, grad_grid, grad_output, padding_mode)
return grad_input, grad_grid, None
class AffineGridGenerator(Function):
@staticmethod
def _enforce_cudnn(input):
if not cudnn.enabled:
raise RuntimeError("AffineGridGenerator needs CuDNN for "
"processing CUDA inputs, but CuDNN is not enabled")
assert cudnn.is_acceptable(input)
@staticmethod
def forward(ctx, theta, size):
assert type(size) == torch.Size
N, C, H, W = size
ctx.size = size
if theta.is_cuda:
ctx.is_cuda = True
AffineGridGenerator._enforce_cudnn(theta)
grid = theta.new(N, H, W, 2)
theta = theta.contiguous()
torch._C._cudnn_affine_grid_generator_forward(theta, grid, N, C, H, W)
else:
ctx.is_cuda = False
base_grid = theta.new(N, H, W, 3)
linear_points = torch.linspace(-1, 1, W) if W > 1 else torch.Tensor([-1])
base_grid[:, :, :, 0] = torch.ger(torch.ones(H), linear_points).expand_as(base_grid[:, :, :, 0])
linear_points = torch.linspace(-1, 1, H) if H > 1 else torch.Tensor([-1])
base_grid[:, :, :, 1] = torch.ger(linear_points, torch.ones(W)).expand_as(base_grid[:, :, :, 1])
base_grid[:, :, :, 2] = 1
ctx.base_grid = base_grid
grid = torch.bmm(base_grid.view(N, H * W, 3), theta.transpose(1, 2))
grid = grid.view(N, H, W, 2)
return grid
@staticmethod
@once_differentiable
def backward(ctx, grad_grid):
N, C, H, W = ctx.size
assert grad_grid.size() == torch.Size([N, H, W, 2])
assert ctx.is_cuda == grad_grid.is_cuda
if grad_grid.is_cuda:
AffineGridGenerator._enforce_cudnn(grad_grid)
grad_theta = grad_grid.new(N, 2, 3)
grad_grid = grad_grid.contiguous()
torch._C._cudnn_affine_grid_generator_backward(grad_theta, grad_grid,
N, C, H, W)
else:
base_grid = ctx.base_grid
grad_theta = torch.bmm(
base_grid.view(N, H * W, 3).transpose(1, 2),
grad_grid.view(N, H * W, 2))
grad_theta = grad_theta.transpose(1, 2)
return grad_theta, None
|
nilq/baby-python
|
python
|
from unittest import TestCase
from starmie import AStarProblem
class Maze(AStarProblem):
WALL = 'O'
START = 'S'
GOAL = 'G'
ROAD = ' '
PATH = '*'
def __init__(self, map_data, allow_slant=True):
self.map = []
self.start = None
self.goal = None
for x, line in enumerate(map_data):
self.map.append([])
for y, char in enumerate(line):
assert char in (self.WALL, self.START, self.GOAL, self.ROAD)
self.map[x].append(char)
if char == self.START: self.start = (x, y)
if char == self.GOAL: self.goal = (x, y)
self.shape = (len(self.map), len(self.map[0]))
self.move = [(0, -1), (0, 1), (-1, 0), (1, 0)]
if allow_slant:
self.move += [(-1, -1), (-1, 1), (1, -1), (1, 1)]
def get_start(self):
return self.start
def is_goal(self, node):
return node == self.goal
def get_neighbors(self, node):
x, y = node
w, h = self.shape
neighbors = [(x + dx, y + dy) for dx, dy in self.move]
neighbors = filter(lambda pos: 0 <= pos[0] < w and 0 <= pos[1] < h, neighbors)
neighbors = filter(lambda pos: self.map[pos[0]][pos[1]] != self.WALL, neighbors)
return neighbors
def get_path_cost(self, from_node, to_node):
dx = from_node[0] - to_node[0]
dy = from_node[1] - to_node[1]
return (dx ** 2 + dy ** 2) ** 0.5
def estimate_heuristic_cost(self, node):
x1, y1 = node
x2, y2 = self.goal
return ((x1 - x2) ** 2 + (y1 - y2) ** 2) ** 0.5
def solve(self):
path = super().solve()
path_str = ''
for x, line in enumerate(self.map):
for y, char in enumerate(line):
if (x, y) in path and char == self.ROAD:
path_str += self.PATH
else:
path_str += char
path_str += '\n'
return path_str
class TestMaze(TestCase):
def test_solve(self):
map_data = [
'OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO',
'OS O O O O O',
'O O O O O O O OOOO GO',
'O O O O OOOO O O OOOO',
'OOOOOOOOOOOO OOOOO O O O O',
'O O O O O',
'O OOO O O OOOOOOOOO O',
'O OO O OOOO O O OO O',
'O O O O O O O O',
'O OOO O O O O O',
'O O O O O',
'OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO',
]
actual = Maze(map_data).solve()
expected = '\n'.join([
'OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO',
'OS* O ** O O O ***** O',
'O *O *O *O O O **** O *OOOO GO',
'O ** O ** O O *OOOO* O *O OOOO',
'OOOOOOOOOOOO*OOOOO *O *O *O O',
'O * O *O *O **** O',
'O OOO * O *O *OOOOOOOOO* O',
'O OO O *OOOO* O *O *** OO* O',
'O O O **** O *O* O * O* O',
'O OOO O O * O *O* O',
'O O O O * O',
'OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO',
'',
])
self.assertEqual(expected, actual)
|
nilq/baby-python
|
python
|
"""cmlkit exceptions."""
class DependencyMissing(Exception):
"""Raised when an optional dependency is needed."""
...
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
__description__ = \
"""
compareAncestor.py
"""
__author__ = "Michael J. Harms"
__usage__ = "comapreAncestors.py ancestor_file1 ancestor_file2"
__date__ = "100726"
import sys, phyloBase
class CompareAncestorError(Exception):
"""
General error class for this module.
"""
pass
def readAncestorFile(ancestor_file):
"""
"""
f = open(ancestor_file,'r')
lines = f.readlines()
f.close()
# Skip comments and blank lines
lines = [l for l in lines if l.strip() != "" and l[0] != "#"]
out = []
num_states = (len(lines[0].split())-2)/2
for l in lines[1:]:
position = int(l[7:12])
tmp_out = []
for i in range(num_states):
aa = l[12+12*i:18+12*i].strip()
pp = float(l[18+12*i:24+12*i])
tmp_out.append((aa,pp))
out.append((position,tmp_out))
return out
def compareAncestors(ancestor1_file,ancestor2_file,ambiguous_cutoff=0.8):
"""
"""
anc1 = readAncestorFile(ancestor1_file)
anc2 = readAncestorFile(ancestor2_file)
anc1_pos = [p[0] for p in anc1]
anc2_pos = [p[0] for p in anc2]
only_in_anc1 = [p for p in anc1_pos if p not in anc2_pos]
only_in_anc2 = [p for p in anc2_pos if p not in anc1_pos]
if len(only_in_anc1) > 0:
print "# Warning: some sites only in ancestor 1:"
print "".join(["# %i\n" % p for p in only_in_anc1]),
if len(only_in_anc2) > 0:
print "# Warning: some sites only in ancestRr 2:"
print "".join(["# %i\n" % p for p in only_in_anc2]),
all_pos = [p for p in anc1_pos if p not in only_in_anc1]
all_pos.extend([p for p in anc2_pos if p not in only_in_anc2 and p not in all_pos])
anc1_dict = dict([a for a in anc1 if a[0] in anc1_pos])
anc2_dict = dict([a for a in anc2 if a[0] in anc2_pos])
out = []
out.append("# pos new_state old_state same? state_type?")
out.append(" ambiguity pp_new pp_old\n")
out.append("#\n# same?\n")
out.append("# \'*\' -> changed\n")
out.append("# \' \' -> no change\n")
out.append("# flipped_with_alternate?\n")
out.append("# \'*\' -> took new state\n")
out.append("# \'~\' -> took alternate state\n")
out.append("# \' \' -> no change in state\n")
out.append("# ambig_state key:\n")
out.append("# \'~\' -> ambiguous in both\n")
out.append("# \'-\' -> newly ambiguous\n")
out.append("# \'+\' -> newly well supported\n")
out.append("# \' \' -> well suppported in both\n")
for p in all_pos:
s1 = anc1_dict[p]
s2 = anc2_dict[p]
# See if the new reconstruction has the same residue at this position
same = "*"
if s1[0][0] == s2[0][0]:
same = " "
# Check to see if new state existed as less likely state in original
# reconstruction
flipped = " "
if same == "*":
if s1[0] in [a[0] for a in s2[1:]]:
flipped = "~"
else:
flipped = "*"
# Remained ambiguous
if s1[0][1] <= ambiguous_cutoff and s2[0][1] <= ambiguous_cutoff:
ambig_state = "~"
# Newly ambiguous
elif s1[0][1] <= ambiguous_cutoff and s2[0][1] > ambiguous_cutoff:
ambig_state = "+"
# Became well supported
elif s1[0][1] > ambiguous_cutoff and s2[0][1] <= ambiguous_cutoff:
ambig_state = "-"
# Remained well supported
else:
ambig_state = " "
check_me = " "
if ambig_state == "-" or \
(same == "*" and ambig_state == " "):
check_me = "!"
out.append("%5i %s %s %s %s %s %6.2f%6.2f %s\n" % (p,s1[0][0],s2[0][0],
same,flipped,ambig_state,s1[0][1],s2[0][1],check_me))
return "".join(out)
def main(argv=None):
"""
"""
if argv == None:
argv = sys.argv[1:]
try:
ancestor1_file = argv[0]
ancestor2_file = argv[1]
except IndexError:
err = "Incorrect number of arguments!\n\n%s\n\n" % __usage__
raise CompareAncestorError(err)
out = compareAncestors(ancestor1_file,ancestor2_file)
print out
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
conv_encoder = km.Sequential(name="ConvEncoderModel")
conv_encoder.add(kl.Conv2D(16, (3,3) , activation='relu', input_shape=(28,28,1) , padding='same' ))
conv_encoder.add(kl.MaxPooling2D((2, 2), padding='same'))
conv_encoder.add(kl.Conv2D(8, (3, 3), activation='relu', padding='same'))
conv_encoder.add(kl.MaxPooling2D((2, 2), padding='same'))
conv_encoder.add(kl.Conv2D(8, (3, 3), activation='relu', padding='same'))
conv_encoder.add(kl. MaxPooling2D((2, 2), padding='same'))
conv_decoder = km.Sequential(name="ConvDecoderModel")
conv_decoder.add(kl.Conv2D(8, (3, 3), activation='relu', input_shape = (4, 4, 8), padding='same'))
conv_decoder.add(kl.UpSampling2D((2, 2)))
conv_decoder.add(kl.Conv2D(8, (3, 3), activation='relu', padding='same'))
conv_decoder.add(kl.UpSampling2D((2, 2)))
conv_decoder.add(kl.Conv2D(16, (3, 3), activation='relu'))
conv_decoder.add(kl.UpSampling2D((2, 2)))
conv_decoder.add(kl.Conv2D(1, (3, 3), activation='sigmoid', padding='same'))
conv_autoencoder = km.Sequential(name="ConvAutoencoderModel")
conv_autoencoder.add(conv_encoder)
conv_autoencoder.add(conv_decoder)
conv_autoencoder.compile(optimizer='adam', loss='binary_crossentropy')
conv_autoencoder.fit(x_train_noisy, x_train_conv, epochs=10, batch_size=256, validation_data=(x_test_noisy, x_test_conv))
|
nilq/baby-python
|
python
|
"""Tests for appname application."""
from unittest import TestCase
from django.test import TestCase as DjangoTestCase
class TestSuiteTestCase(TestCase):
"""General test to make sure that the setup works."""
def test_test_suite_can_be_run(self):
self.assertTrue(True)
class ExampleTestCase(DjangoTestCase):
"""Tests for Example model class."""
fixtures = ['test_data']
urls = 'appname.tests.urls'
def test_example_view_is_callable(self):
resp = self.client.get('/example/')
self.assertEqual(resp.status_code, 200)
|
nilq/baby-python
|
python
|
#
# PySNMP MIB module EXPAND-NETWORKS-SMI (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/EXPAND-NETWORKS-SMI
# Produced by pysmi-0.3.4 at Wed May 1 13:07:01 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ConstraintsUnion, ConstraintsIntersection, ValueRangeConstraint, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ConstraintsUnion", "ConstraintsIntersection", "ValueRangeConstraint", "ValueSizeConstraint")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
IpAddress, iso, TimeTicks, Counter64, MibScalar, MibTable, MibTableRow, MibTableColumn, ObjectIdentity, Unsigned32, Gauge32, enterprises, ModuleIdentity, NotificationType, Integer32, Counter32, Bits, MibIdentifier = mibBuilder.importSymbols("SNMPv2-SMI", "IpAddress", "iso", "TimeTicks", "Counter64", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ObjectIdentity", "Unsigned32", "Gauge32", "enterprises", "ModuleIdentity", "NotificationType", "Integer32", "Counter32", "Bits", "MibIdentifier")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
expand_networks = MibIdentifier((1, 3, 6, 1, 4, 1, 3405)).setLabel("expand-networks")
expandSystemId = MibScalar((1, 3, 6, 1, 4, 1, 3405, 1), ObjectIdentifier()).setMaxAccess("readonly")
if mibBuilder.loadTexts: expandSystemId.setStatus('mandatory')
if mibBuilder.loadTexts: expandSystemId.setDescription('This object identifier defines the object identifiers that are assigned to the various Expand-Networks operating systems, and hence are returned as values for sysObjectID leaf of MIB 2.')
expandProducts = MibIdentifier((1, 3, 6, 1, 4, 1, 3405, 2))
acceleratorOs = MibIdentifier((1, 3, 6, 1, 4, 1, 3405, 3))
p2pAccelerator = MibIdentifier((1, 3, 6, 1, 4, 1, 3405, 4))
management = MibIdentifier((1, 3, 6, 1, 4, 1, 3405, 10))
mibBuilder.exportSymbols("EXPAND-NETWORKS-SMI", p2pAccelerator=p2pAccelerator, expandSystemId=expandSystemId, management=management, expand_networks=expand_networks, expandProducts=expandProducts, acceleratorOs=acceleratorOs)
|
nilq/baby-python
|
python
|
'''
思路:
位图1,用于判断是否存在该词。对于每次读进来的词,计算哈希值,相应比特位置1。
位图2,用于标志是否重复。对于读进来的并且是被位图1标志过存在的词,则置1
队列,用于保存不重复词。队尾保留最新不重复词,每次push都是在队尾,pop则不一定
(改用队列,主要是为了防止大文件都是不相同词时,要遍历整个hashmap,相当于遍历大文件两遍)
'''
# 伪码
# 遍历文件
for word in largeFile:
if bitmap1.isExist(word):
bitmap2.add(word)
pop word from dueue
else:
bitmap1.add(word)
push word to dueue
if len(dueue) > maxSize: # 推算每次I/O文件的大小和队列、两个位图共16GB得 maxSize = 7GB
write dueue to disk
# 结算结果
firstWord = dueue[0] # 此时内存的第一个不重复词
# read data from disk
while word = read(disk):
if bitmap2.isExist(word)
continue
else:
break
if word:
firstWord = word # 如果硬盘有更早的第一个不重复的词,更新
|
nilq/baby-python
|
python
|
import datetime
from django.conf import settings
from rest_framework.settings import APISettings
from .utils import hash_string
USER_SETTINGS = getattr(settings, 'JWT2FA_AUTH', None)
DEFAULTS = {
# Length of the verification code (digits)
'CODE_LENGTH': 7,
# Characters used in the verification code
'CODE_CHARACTERS': '0123456789',
# Secret key to use for signing the Code Tokens
'CODE_TOKEN_SECRET_KEY': hash_string('2fa-code-' + settings.SECRET_KEY),
# Secret string to extend the verification code with
'CODE_EXTENSION_SECRET': hash_string('2fa-ext-' + settings.SECRET_KEY),
# How long the code token is valid
'CODE_EXPIRATION_TIME': datetime.timedelta(minutes=5),
# Throttle limit for code token requests from same IP
'CODE_TOKEN_THROTTLE_RATE': '12/3h',
# How much time must pass between verification attempts, i.e. to
# request authentication token with a with the same code token and a
# verification code
'AUTH_TOKEN_RETRY_WAIT_TIME': datetime.timedelta(seconds=2),
# Function that sends the verification code to the user
'CODE_SENDER': 'drf_jwt_2fa.sending.send_verification_code_via_email',
# From Address used by the e-mail sender
'EMAIL_SENDER_FROM_ADDRESS': settings.DEFAULT_FROM_EMAIL,
# Set to this to a (translated) string to override the default
# message subject of the e-mail sender
'EMAIL_SENDER_SUBJECT_OVERRIDE': None,
# Set to this to a (translated) string to override the default
# message body of the e-mail sender
'EMAIL_SENDER_BODY_OVERRIDE': None,
}
IMPORT_STRINGS = [
'CODE_SENDER',
]
api_settings = APISettings(USER_SETTINGS, DEFAULTS, IMPORT_STRINGS)
|
nilq/baby-python
|
python
|
from __future__ import unicode_literals
from django.core.exceptions import ObjectDoesNotExist
from django.forms.models import ModelForm, model_to_dict
from .constants import (MODERATION_STATUS_PENDING, MODERATION_STATUS_REJECTED)
from .utils import django_17
class BaseModeratedObjectForm(ModelForm):
class Meta:
if django_17():
exclude = '__all__'
def __init__(self, *args, **kwargs):
instance = kwargs.get('instance', None)
if instance:
try:
if instance.moderated_object.status in\
[MODERATION_STATUS_PENDING, MODERATION_STATUS_REJECTED] and\
not instance.moderated_object.moderator.\
visible_until_rejected:
initial = model_to_dict(
instance.moderated_object.changed_object)
kwargs.setdefault('initial', {})
kwargs['initial'].update(initial)
except ObjectDoesNotExist:
pass
super(BaseModeratedObjectForm, self).__init__(*args, **kwargs)
|
nilq/baby-python
|
python
|
"""Lightly modified build_ext which captures stderr.
isort:skip_file
"""
# IMPORTANT: `import setuptools` MUST come before any module imports `distutils`
# background: https://bugs.python.org/issue23102
import setuptools # noqa: F401
import distutils.command.build_ext
import distutils.core
import io
import os
import sys
import tempfile
from typing import IO, Any, List, TextIO
from httpstan.config import HTTPSTAN_DEBUG
def _get_build_extension() -> distutils.command.build_ext.build_ext: # type: ignore
if HTTPSTAN_DEBUG: # pragma: no cover
distutils.log.set_verbosity(distutils.log.DEBUG) # type: ignore
dist = distutils.core.Distribution()
# Make sure build respects distutils configuration
dist.parse_config_files(dist.find_config_files()) # type: ignore
build_extension = distutils.command.build_ext.build_ext(dist) # type: ignore
build_extension.finalize_options()
return build_extension
def run_build_ext(extensions: List[distutils.core.Extension], build_lib: str) -> str:
"""Configure and call `build_ext.run()`, capturing stderr.
Compiled extension module will be placed in `build_lib`.
All messages sent to stderr will be saved and returned. These
messages are typically messages from the compiler or linker.
"""
# utility functions for silencing compiler output
def _has_fileno(stream: TextIO) -> bool:
"""Returns whether the stream object has a working fileno()
Suggests whether _redirect_stderr is likely to work.
"""
try:
stream.fileno()
except (AttributeError, OSError, IOError, io.UnsupportedOperation): # pragma: no cover
return False
return True
def _redirect_stderr_to(stream: IO[Any]) -> int:
"""Redirect stderr for subprocesses to /dev/null.
Returns
-------
orig_stderr: copy of original stderr file descriptor
"""
sys.stderr.flush()
stderr_fileno = sys.stderr.fileno()
orig_stderr = os.dup(stderr_fileno)
os.dup2(stream.fileno(), stderr_fileno)
return orig_stderr
build_extension = _get_build_extension()
build_extension.build_lib = build_lib
# silence stderr for compilation, if stderr is silenceable
stream = tempfile.TemporaryFile(prefix="httpstan_")
redirect_stderr = _has_fileno(sys.stderr) and not HTTPSTAN_DEBUG
compiler_output = ""
if redirect_stderr:
orig_stderr = _redirect_stderr_to(stream)
build_extension.extensions = extensions
try:
build_extension.run()
finally:
if redirect_stderr:
stream.seek(0)
compiler_output = stream.read().decode()
stream.close()
# restore
os.dup2(orig_stderr, sys.stderr.fileno())
return compiler_output
|
nilq/baby-python
|
python
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init
import numpy as np
from unet import *
from utils import *
def weight_init(m):
if isinstance(m, nn.Conv3d) or isinstance(m, nn.Conv2d):
torch.nn.init.kaiming_normal_(m.weight.data)
if m.bias is not None:
m.bias.data.fill_(0.0)
if isinstance(m, nn.BatchNorm3d) or isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1.0)
m.bias.data.fill_(0.0)
if isinstance(m, nn.Linear):
torch.nn.init.kaiming_normal_(m.weight.data)
if m.bias is not None:
m.bias.data.fill_(0.0)
class DescMatchingModule(nn.Module):
"""
DescMatchingModule
"""
def __init__(self, in_channels, out_channels):
super(DescMatchingModule, self).__init__()
self.fc = nn.Linear(in_channels, out_channels)
self.apply(weight_init)
def forward(self, out1, out2):
b, c, h1, w1 = out1.size()
b, c, h2, w2 = out2.size()
out1 = out1.view(b, c, h1*w1).permute(0, 2, 1).view(b, h1*w1, 1, c)
out2 = out2.view(b, c, h2*w2).permute(0, 2, 1).view(b, 1, h2*w2, c)
# all possible descriptor pairs
out = out1 * out2
out = out.contiguous().view(-1, c)
out = self.fc(out)
# normalize input features
dn1 = torch.norm(out1, p=2, dim=3) # Compute the norm.
out1 = out1.div(1e-6 + torch.unsqueeze(dn1, 3)) # Divide by norm to normalize.
dn2 = torch.norm(out2, p=2, dim=3) # Compute the norm.
out2 = out2.div(1e-6 + torch.unsqueeze(dn2, 3)) # Divide by norm to normalize.
out_norm = torch.norm(out1 - out2, p=2, dim=3)
return out, out_norm
class Net(nn.Module):
"""
What follows is awesomeness redefined
"""
def __init__(self, in_channels=1, out_channels=2, batchnorm=False, threeD=False, depth=4, width=16,\
device="cuda:0", k=512, scale_factor=8):
super(Net, self).__init__()
self.device = device
self.k = k
self.scale_factor = scale_factor
self.CNN_branch = UNet(depth=depth, width=width, growth_rate=2, in_channels=in_channels, out_channels=1)
feature_channels = self.CNN_branch.feature_channels
self.desc_matching_layer = DescMatchingModule(feature_channels, out_channels)
def forward(self, x1, x2):
k = self.k
scale_factor = self.scale_factor
# landmark detection and description
heatmaps1, features1 = self.CNN_branch(x1)
heatmaps2, features2 = self.CNN_branch(x2)
# sampling top k landmark locations and descriptors
landmarks1, landmark_probs1, desc1 = self.sampling_layer(heatmaps1, features1, is_training=True)
landmarks2, landmark_probs2, desc2 = self.sampling_layer(heatmaps2, features2, is_training=True)
# descriptor matching probabilities and descriptor norms
desc_pairs_score, desc_pairs_norm = self.desc_matching_layer(desc1, desc2)
return landmark_probs1, landmark_probs2, landmarks1, landmarks2, desc_pairs_score, desc_pairs_norm
def predict(self, x1, x2, deformation=None, conf_thresh=0.01, k=None):
if k is None:
k = self.k
scale_factor = self.scale_factor
b, _, H, W = x1.shape
# landmark detection and description
heatmaps1, features1 = self.CNN_branch(x1)
heatmaps2, features2 = self.CNN_branch(x2)
# sampling top k landmark locations and descriptors
pts1, _, desc1 = self.sampling_layer(heatmaps1, features1, conf_thresh=conf_thresh, is_training=False)
pts2, _, desc2 = self.sampling_layer(heatmaps2, features2, conf_thresh=conf_thresh, is_training=False)
# descriptor matching probabilities and descriptor norms
desc_pairs_score, desc_pairs_norm = self.desc_matching_layer(desc1, desc2)
# post processing
landmarks1 = convert_points_to_image(pts1, H, W)
landmarks2 = convert_points_to_image(pts2, H, W)
b, k1, _ = landmarks1.shape
_, k2, _ = landmarks2.shape
# two-way (bruteforce) matching
desc_pairs_score = F.softmax(desc_pairs_score, dim=1)[:,1].view(b, k1, k2)
desc_pairs_score = desc_pairs_score.detach().to("cpu").numpy()
desc_pairs_norm = desc_pairs_norm.detach().to("cpu").numpy()
matches = list()
for i in range(b):
pairs_score = desc_pairs_score[i]
pairs_norm = desc_pairs_norm[i]
match_cols = np.zeros((k1, k2))
match_cols[np.argmax(pairs_score, axis=0), np.arange(k2)] = 1
match_rows = np.zeros((k1, k2))
match_rows[np.arange(k1), np.argmax(pairs_score, axis=1)] = 1
match = match_rows * match_cols
match_cols = np.zeros((k1, k2))
match_cols[np.argmin(pairs_norm, axis=0), np.arange(k2)] = 1
match_rows = np.zeros((k1, k2))
match_rows[np.arange(k1), np.argmin(pairs_norm, axis=1)] = 1
match = match * match_rows * match_cols
matches.append(match)
matches = np.array(matches)
if deformation is not None:
deformation = deformation.permute(0, 3, 1, 2) #b, 2, h, w
pts1_projected = F.grid_sample(deformation, pts2) #b, 2, 1, k
pts1_projected = pts1_projected.permute(0, 2, 3, 1) #b, 1, k, 2
landmarks1_projected = convert_points_to_image(pts1_projected, H, W)
return landmarks1, landmarks2, matches, landmarks1_projected
else:
return landmarks1, landmarks2, matches
def sampling_layer(self, heatmaps, features, conf_thresh=0.000001, is_training=True):
k = self.k
scale_factor = self.scale_factor
device = self.device
b, _, H, W = heatmaps.shape
heatmaps = torch.sigmoid(heatmaps)
"""
Convert pytorch -> numpy after maxpooling and unpooling
This is faster way of sampling while ensuring sparsity
One could alternatively apply non-maximum suppresion (NMS)
"""
if is_training:
heatmaps1, indices = F.max_pool2d(heatmaps, (scale_factor, scale_factor), stride=(scale_factor, scale_factor), return_indices=True)
heatmaps1 = F.max_unpool2d(heatmaps1, indices, (scale_factor, scale_factor))
heatmaps1 = heatmaps1.to("cpu").detach().numpy().reshape(b, H, W)
else:
heatmaps1 = heatmaps.to("cpu").detach().numpy().reshape(b, H, W)
# border mask, optional
border = 10
border_mask = np.zeros_like(heatmaps1)
border_mask[:, border : H - border, border : W - border] = 1.
heatmaps1 = heatmaps1 * border_mask
all_pts= []
for heatmap in heatmaps1:
xs, ys = np.where(heatmap >= conf_thresh) # get landmark locations above conf_thresh
if is_training:
if len(xs) < k:
xs, ys = np.where(heatmap >= 0.0)
pts = np.zeros((len(xs), 3))
pts[:, 0] = ys
pts[:, 1] = xs
pts[:, 2] = heatmap[xs, ys]
inds = np.argsort(pts[:, 2])
pts = pts[inds[::-1], :] # sort by probablity scores
pts = pts[:k, :2] #take top k
# Interpolate into descriptor map using 2D point locations.
samp_pts = convert_points_to_torch(pts, H, W, device=device)
all_pts.append(samp_pts)
all_pts = torch.cat(all_pts, dim=0)
pts_score = F.grid_sample(heatmaps, all_pts) #b, 1, 1, k
pts_score = pts_score.permute(0, 3, 1, 2).view(b, -1)
desc = [F.grid_sample(desc, all_pts) for desc in features]
desc = torch.cat(desc, dim=1)
return all_pts, pts_score, desc
def weight_init(m):
if isinstance(m, nn.Conv3d) or isinstance(m, nn.Conv2d):
torch.nn.init.kaiming_normal_(m.weight.data)
if m.bias is not None:
m.bias.data.fill_(0.0)
if isinstance(m, nn.BatchNorm3d) or isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1.0)
m.bias.data.fill_(0.0)
if isinstance(m, nn.Linear):
torch.nn.init.kaiming_normal_(m.weight.data)
if m.bias is not None:
m.bias.data.fill_(0.0)
if __name__ == '__main__':
pass
|
nilq/baby-python
|
python
|
# modified jetbot physical implementation
import atexit
import subprocess
import traitlets
from traitlets.config.configurable import Configurable
class Motor(Configurable):
value = traitlets.Float()
# config
alpha = traitlets.Float(default_value=1.0).tag(config=True)
beta = traitlets.Float(default_value=0.0).tag(config=True)
def __init__(self, driver, channel, *args, **kwargs):
super(Motor, self).__init__(*args, **kwargs) # initializes traitlets
self._motor = "J" + str(channel)
atexit.register(self._release)
@traitlets.observe('value')
def _observe_value(self, change):
self._write_value(change['new'])
def _write_value(self, value):
"""Sets motor value between [-3, 3] rad/s"""
mapped_value = float(3 * (self.alpha * value + self.beta))
subprocess.call(["motor_util", "-n", self._motor, "set", "--mode", "4", "--velocity", str(mapped_value)])
def _release(self):
"""Stops motor by releasing control"""
subprocess.call(["motor_util", "-n", self._motor, "set", "--mode", "0"])
|
nilq/baby-python
|
python
|
from gui.contract import IView, IPresenter
from gui.presenter import Presenter
import time
from tkinter import *
from command.queue.buildthread import BuildThread
from command.queue.properties import QueueProperties
from utils.context import Context
from utils.travian_utils import login_to_account, create_browser
from utils.util import getVillagesInfo
from gui.scrolled_view import VerticalScrolledFrame
from gui.disable_frame import dFrame
from command.queue.dataclasses import *
class View(IView):
def __init__(self):
super(View, self).__init__()
self.root: Tk = Tk()
self.root.title("GUI на Python")
self.root.geometry("640x480")
self.root.protocol("WM_DELETE_WINDOW", self.onQuit)
self.root.bind("<Destroy>", self.onDestroy)
self.main_frame = dFrame(self.root)
self.__presenter: IPresenter = Presenter(self)
self.__build_properties: BuildProperties = None
self.__auto_build_vars: list = None
def mainloop(self):
self.showLoginWindow()
self.root.mainloop()
def onQuit(self):
self.__presenter.quit()
def onDestroy(self, event):
pass
# Вызывается каждый раз, когда удаляется компонент в иерархии(все дочерние)
# print ('onDestroy')
def authorization(self):
self.__presenter.login('', '', '')
def startBotWork(self):
for index, item in enumerate(self.__auto_build_vars):
self.__build_properties.info_list[index].auto_build_res = bool(item.get())
self.__presenter.startWork(self.__build_properties)
def stopBotWork(self):
self.__presenter.stopWork()
def showLoginWindow(self):
for widget in self.main_frame.winfo_children():
widget.destroy()
server_frame = Frame(self.main_frame)
server_label = Label(master=server_frame, text='Сервер')
server_label.pack(side="left")
server_choices = [
'https://ts3.travian.ru',
'test_server_1',
'test_server_2'
]
server = StringVar()
server.set(server_choices[0])
server_choice = OptionMenu(server_frame, server, *server_choices)
server_choice.pack(side="left", fill='x')
server_frame.pack(fill='x')
login_frame = Frame(self.main_frame)
login_label = Label(master=login_frame, text='Логин')
login_label.pack(side="left")
login = StringVar()
login_entry = Entry(master=login_frame, textvariable=login)
login_entry.pack(side="left", fill='x')
login_frame.pack(fill='x')
psw_frame = Frame(self.main_frame)
psw_label = Label(master=psw_frame, text='Пароль')
psw_label.pack(side="left")
psw = StringVar()
psw_entry = Entry(master=psw_frame, show='*', textvariable=psw)
psw_entry.pack(side="left", fill="x")
psw_frame.pack(fill='x')
message_button = Button(master=self.main_frame, text='Авторизация', command=self.authorization)
message_button.pack(side="top", fill="x")
self.main_frame.pack(fill=BOTH, expand=YES)
def showVillagePropertiesWindow(self, default_properties: BuildProperties):
self.__build_properties = default_properties
for widget in self.main_frame.winfo_children():
widget.destroy()
width = 640
height = 480
villages_properties_frame = VerticalScrolledFrame(
self.main_frame,
width=width,
height=height
)
info_frame = Frame(villages_properties_frame)
info_label = Label(master=info_frame, text='Настройка параметров работы бота')
info_label.pack()
start_button = Button(master=info_frame, text='Начать работу бота', command=self.startBotWork)
start_button.pack(fill='x')
info_frame.pack(side='top', fill='x')
props_frame = Frame(villages_properties_frame)
self.__auto_build_vars = []
for info in default_properties.info_list:
build_info: BuildVillageInfo = info
vil_prop_frame = Frame(props_frame)
info_label = build_info.info.name + ' :(' + str(build_info.info.point.x) + '|' + str(build_info.info.point.y) + ')'
vil_info_label = Label(master=vil_prop_frame, text=info_label)
vil_info_label.pack(side='left')
auto_build_var = IntVar()
auto_build_var.set(int(build_info.auto_build_res))
button = Checkbutton(
vil_prop_frame,
text='Автоматическое стр-во ресурсов в деревне',
variable=auto_build_var
)
self.__auto_build_vars.append(auto_build_var)
button.pack(side='left', fill='x')
vil_prop_frame.pack(side='top', fill='x')
props_frame.pack(side='top', fill=BOTH)
villages_properties_frame.pack(fill=BOTH, expand=YES)
self.main_frame.pack(fill=BOTH, expand=YES)
def showBotWorkingWindow(self):
for widget in self.main_frame.winfo_children():
widget.destroy()
server_frame = Frame(self.main_frame)
server_label = Label(master=server_frame, text='Лог работа бота')
server_label.pack(side="left")
message_button = Button(master=self.main_frame, text='Завершить работу', command=self.stopBotWork)
message_button.pack(side="top", fill="x")
self.main_frame.pack(fill=BOTH, expand=YES)
def disableWindow(self):
self.main_frame.disable()
def enableWindow(self):
self.main_frame.enable()
def quit(self):
self.root.destroy()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
#
# Given a configuration executes p2rank and all components.
#
import json
import os
import logging
import requests
import shutil
import subprocess
import conservation_wrapper
from model import *
from output_prankweb import prepare_output_prankweb
from output_p2rank import prepare_output_p2rank
logger = logging.getLogger("prankweb.executor")
logger.setLevel(logging.DEBUG)
def execute(configuration: Execution) -> ExecutionResult:
# TODO Add configuration validation ...
_prepare_directories(configuration)
_create_execute_command(configuration)
structure = _prepare_structure(configuration)
conservation = _prepare_conservation(structure, configuration)
p2rank_input = _prepare_p2rank_input(
structure, configuration, conservation)
p2rank_output = os.path.join(
configuration.working_directory, "p2rank-output")
_execute_p2rank(p2rank_input, p2rank_output, configuration)
result = _prepare_output(
p2rank_output, structure, conservation, configuration)
logger.info("All done")
return result
def _prepare_directories(configuration: Execution):
os.makedirs(configuration.working_directory, exist_ok=True)
def _create_execute_command(configuration: Execution):
if configuration.execute_command is not None:
return
def execute_command(command: str, ignore_return_code: bool = True):
logger.debug(f"Executing '{command}' ...")
result = subprocess.run(
command,
shell=True,
env=os.environ.copy(),
stdout=configuration.stdout,
stderr=configuration.stderr,
)
# Throw for non-zero (failure) return code.
if not ignore_return_code:
result.check_returncode()
logger.debug(f"Executing '{command}' ... done")
configuration.execute_command = execute_command
# region Prepare structure
def _prepare_structure(configuration: Execution) -> Structure:
metadata = {}
logger.info("Preparing structure ...")
raw_structure_file = _prepare_raw_structure_file(configuration, metadata)
structure_file = _filter_raw_structure_file(
raw_structure_file, configuration)
# Use raw file as we need all chains for the visualisation.
fasta_files = _prepare_fasta_files(raw_structure_file, configuration)
return Structure(
raw_structure_file,
structure_file,
fasta_files,
metadata=metadata
)
def _prepare_raw_structure_file(
configuration: Execution, metadata:
typing.Dict[str, any]) -> str:
result = os.path.join(configuration.working_directory, "structure-raw.")
if configuration.lazy_execution and os.path.exists(result):
logger.info("I'm lazy and structure file already exists")
return result
if configuration.structure_code is not None:
configuration.structure_extension = "pdb"
result += configuration.structure_extension
_download_from_pdb(configuration.structure_code, result)
elif configuration.structure_file is not None:
configuration.structure_extension = \
_extension(configuration.structure_file)
result += configuration.structure_extension
shutil.copy(configuration.structure_file, result)
elif configuration.structure_uniprot is not None:
configuration.structure_extension = "cif"
result += configuration.structure_extension
_download_from_alpha_fold(
configuration.structure_uniprot, result, metadata)
else:
raise Exception("Missing structure.")
return result
def _download_from_pdb(code: str, destination: str) -> None:
url = f"https://files.rcsb.org/download/{code}.pdb"
_download(url, destination)
def _download(url: str, destination: str) -> None:
logger.debug(f"Downloading '{url}' to '{destination}' ...")
response = requests.get(url)
if not 199 < response.status_code < 299:
raise Exception(f"Download failed with code: {response.status_code}")
with open(destination, "wb") as stream:
stream.write(response.content)
def _extension(file_name: str) -> str:
"""For 'name.ext' return 'ext'."""
return file_name[file_name.rindex(".") + 1:]
def _download_from_alpha_fold(
code: str, destination: str, metadata: typing.Dict[str, any]) -> any:
entry_url = f"https://alphafold.ebi.ac.uk/api/prediction/{code}"
entry_response = requests.get(entry_url)
entry_content = json.loads(entry_response.content)
metadata["alpha-fold"] = entry_content
if len(entry_content) == 0:
raise Exception(f"No Alphafold entry found for: {code}")
assert len(entry_content) == 1, \
f"One entry expected for AlphaFold, found {len(entry_content)}"
cif_url = entry_content[0]["cifUrl"]
_download(cif_url, destination)
def _filter_raw_structure_file(
raw_file: str, configuration: Execution) -> str:
if configuration.structure_sealed:
return raw_file
result = os.path.join(
configuration.working_directory,
"structure." + _extension(raw_file)
)
command = f"{configuration.p2rank} transform reduce-to-chains" + \
f" -f {raw_file}" + \
f" --out_file {result} "
if configuration.chains:
command += "-chains " + ",".join(configuration.chains)
else:
assert False, "Structure is not sealed and no chains were selected."
configuration.execute_command(command)
return result
def _prepare_fasta_files(
structure_file: str, configuration: Execution) \
-> typing.Dict[str, str]:
output = os.path.join(configuration.working_directory, "fasta")
os.makedirs(output, exist_ok=True)
configuration.execute_command(
f"{configuration.p2rank} analyze fasta-masked"
f" --f {structure_file}"
f" --o {output}"
)
return {
# The fifth one is the code, for example: 2W83_A.fasta
name[name.rindex("_") + 1:name.rindex(".")]: os.path.join(output, name)
for name in os.listdir(output) if name.endswith(".fasta")
}
# endregion
# region Compute conservation
def _prepare_conservation(
structure: Structure, configuration: Execution) \
-> typing.Dict[str, str]:
if configuration.conservation == ConservationType.NONE:
return {}
logger.info("Computing conservation ...")
output_directory = os.path.join(
configuration.working_directory,
"conservation")
os.makedirs(output_directory, exist_ok=True)
result = {}
cache = {}
for chain, fasta_file in structure.sequence_files.items():
working_directory = os.path.join(
configuration.working_directory,
f"conservation-{chain}")
os.makedirs(working_directory, exist_ok=True)
output_file = os.path.join(output_directory, f"conservation-{chain}")
fasta = _read_fasta(fasta_file)
if fasta in cache:
logger.info("We already have conservation for given chain.")
shutil.copy(cache[fasta], output_file)
else:
_prepare_conservation_for_chain(
fasta_file, working_directory, output_file,
configuration)
cache[fasta] = output_file
result[chain] = output_file
return result
def _prepare_conservation_for_chain(
fasta_file: str,
working_directory: str,
output_file: str,
configuration: Execution):
if os.path.exists(output_file) and configuration.lazy_execution:
logger.info("I'm lazy and conservation file already exists.")
return
conservation_type = configuration.conservation
if conservation_type == ConservationType.ALIGNMENT:
conservation_wrapper.compute_alignment_based_conservation(
fasta_file, working_directory, output_file,
configuration.execute_command)
elif conservation_type == ConservationType.HMM:
conservation_wrapper.compute_hmm_based_conservation(
fasta_file, working_directory, output_file,
configuration.execute_command)
else:
raise Exception("Unknown conservation type!")
def _read_fasta(path):
with open(path, "r") as stream:
stream.readline()
return stream.read()
# endregion
# region Execute p2rank
def _prepare_p2rank_input(
structure: Structure,
configuration: Execution,
conservation: typing.Dict[str, str]) -> str:
directory = os.path.join(configuration.working_directory, "p2rank-input")
os.makedirs(directory, exist_ok=True)
structure_file = os.path.join(
directory, "structure." + configuration.structure_extension)
shutil.copy(structure.structure_file, structure_file)
for chain, file in conservation.items():
shutil.copy(
file,
os.path.join(directory, f"structure{chain.upper()}.hom"))
return structure_file
def _execute_p2rank(
input_structure: str, output_directory: str,
configuration: Execution):
command = (
f"{configuration.p2rank} predict "
f"-c {configuration.p2rank_configuration} "
f"-threads 1 "
f"-f {input_structure} "
f"-o {output_directory} "
f"--log_to_console 1"
)
configuration.execute_command(command)
# endregion
def _prepare_output(
p2rank_output: str,
structure: Structure,
conservation: typing.Dict[str, str],
configuration: Execution) -> ExecutionResult:
logger.info("Collecting output ...")
if configuration.output_type == OutputType.P2RANK:
return prepare_output_p2rank(
p2rank_output, structure, conservation, configuration)
elif configuration.output_type == OutputType.PRANKWEB:
return prepare_output_prankweb(
p2rank_output, structure, conservation, configuration)
else:
raise Exception("Invalid output type!")
|
nilq/baby-python
|
python
|
import ConfigParser
def readConfig():
config = ConfigParser.ConfigParser()
config.readfp(open("sharenet.ini"))
binDir = config.get("Import", "bin")
inDir = config.get("Import", "in")
workDir = config.get("Import", "work")
doneDir = config.get("Import", "done")
dbHost = config.get("Database", "host")
dbName = config.get("Database", "name")
dbUser = config.get("Database", "uid")
dbPwd = config.get("Database", "pwd")
def intParse(s):
if s.replace(" ","") == "":
return 0
else:
try:
return int(s)
except:
try:
return int(float(s))
except:
return 0
|
nilq/baby-python
|
python
|
import bpy
from ..sollumz_properties import SollumType, SOLLUMZ_UI_NAMES, BOUND_POLYGON_TYPES
from ..ybn.collision_materials import create_collision_material_from_index
from ..tools.meshhelper import create_box, create_sphere, create_capsule, create_cylinder
from mathutils import Vector, Matrix
def create_bound_shape(type, aobj):
pobj = create_mesh(type)
# Constrain scale for bound polys
if pobj.sollum_type in BOUND_POLYGON_TYPES and type != SollumType.BOUND_POLY_BOX and type != SollumType.BOUND_POLY_TRIANGLE:
constraint = pobj.constraints.new(type='LIMIT_SCALE')
constraint.use_transform_limit = True
# Why blender? So ugly
constraint.use_min_x = True
constraint.use_min_y = True
constraint.use_min_z = True
constraint.use_max_x = True
constraint.use_max_y = True
constraint.use_max_z = True
constraint.min_x = 1
constraint.min_y = 1
constraint.min_z = 1
constraint.max_x = 1
constraint.max_y = 1
constraint.max_z = 1
if type == SollumType.BOUND_POLY_BOX:
create_box(pobj.data)
elif type == SollumType.BOUND_BOX:
pobj.bound_dimensions = Vector((1, 1, 1))
elif type == SollumType.BOUND_SPHERE or type == SollumType.BOUND_POLY_SPHERE:
pobj.bound_radius = 1
elif type == SollumType.BOUND_POLY_CAPSULE:
pobj.bound_radius = 1
pobj.bound_length = 1
elif type == SollumType.BOUND_CAPSULE:
pobj.bound_radius = 1
pobj.margin = 0.5
elif type == SollumType.BOUND_CYLINDER or type == SollumType.BOUND_POLY_CYLINDER:
pobj.bound_length = 2
pobj.bound_radius = 1
elif type == SollumType.BOUND_DISC:
pobj.margin = 0.04
pobj.bound_radius = 1
if aobj:
if aobj.sollum_type == SollumType.BOUND_GEOMETRY or aobj.sollum_type == SollumType.BOUND_GEOMETRYBVH or aobj.sollum_type == SollumType.BOUND_COMPOSITE:
pobj.parent = aobj
return pobj
def create_bound(sollum_type=SollumType.BOUND_COMPOSITE, aobj=None):
empty = bpy.data.objects.new(SOLLUMZ_UI_NAMES[sollum_type], None)
empty.empty_display_size = 0
empty.sollum_type = sollum_type
bpy.context.collection.objects.link(empty)
bpy.context.view_layer.objects.active = bpy.data.objects[empty.name]
if aobj:
if aobj.sollum_type == SollumType.BOUND_COMPOSITE:
empty.parent = aobj
return empty
def create_mesh(sollum_type):
name = SOLLUMZ_UI_NAMES[sollum_type]
mesh = bpy.data.meshes.new(name)
obj = bpy.data.objects.new(name, mesh)
obj.sollum_type = sollum_type
obj.data.materials.append(create_collision_material_from_index(0))
bpy.context.collection.objects.link(obj)
return obj
def convert_selected_to_bound(objs, use_name, multiple, bvhs, replace_original):
selected = objs
if not multiple:
dobj = create_bound()
dmobj = create_bound(SollumType.BOUND_GEOMETRYBVH) if bvhs else create_bound(
SollumType.BOUND_GEOMETRY)
dmobj.parent = dobj
for obj in selected:
if multiple:
dobj = create_bound()
dmobj = create_bound(SollumType.BOUND_GEOMETRYBVH) if bvhs else create_bound(
SollumType.BOUND_GEOMETRY)
dmobj.parent = dobj
if obj.type == 'MESH':
if use_name:
dobj.name = obj.name
poly_mesh = obj if replace_original else create_mesh(
SollumType.BOUND_POLY_TRIANGLE)
poly_mesh.parent = dmobj
if replace_original:
poly_mesh.name = SOLLUMZ_UI_NAMES[SollumType.BOUND_POLY_TRIANGLE]
# set properties
poly_mesh.sollum_type = SollumType.BOUND_POLY_TRIANGLE
else:
poly_mesh.data = obj.data.copy()
|
nilq/baby-python
|
python
|
import asyncio
from netschoolapi import NetSchoolAPI
async def main():
login_data = {
"login": "Иван",
"password": "Иван228",
"school": "МАОУ многопрофильный лицей №20"
}
async with NetSchoolAPI("http://sgo.cit73.ru/", **login_data) as api:
print(await api.get_announcements())
asyncio.run(main())
|
nilq/baby-python
|
python
|
import datetime
import unittest
from search.ql import Query, Q, GeoQueryArguments
from search.fields import TextField, GeoField, DateField
from search.indexes import DocumentModel
class FakeDocument(DocumentModel):
foo = TextField()
bar = DateField()
class FakeGeoDocument(DocumentModel):
my_loc = GeoField()
class TestKeywordQuery(unittest.TestCase):
def test_basic_keywords(self):
query = Query(FakeDocument)
query.add_keywords("foo bar")
self.assertEqual(
u"foo bar",
unicode(query))
class TestQuery(unittest.TestCase):
def test_basic_keywords(self):
query = Query(FakeDocument)
query.add_q(Q(foo__gt=42))
self.assertEqual(
u"(foo > 42)",
unicode(query))
def test_add_q_or(self):
"""Test that two Q objects can be added to a query without needing to wrap them in
another Q object
"""
query = Query(FakeDocument)
q_1 = Q(foo=42)
q_2 = Q(foo=128)
query.add_q(q_1)
query.add_q(q_2, conn=Q.OR)
self.assertEqual(
u'((foo:"42") OR (foo:"128"))',
unicode(query))
class TestGeoQuery(unittest.TestCase):
def test_geosearch(self):
query = Query(FakeGeoDocument)
query.add_q(Q(my_loc__geo=GeoQueryArguments(3.14, 6.28, 20)))
self.assertEqual(
u"(distance(my_loc, geopoint(3.140000, 6.280000)) < 20)",
unicode(query))
def test_geosearch_lt(self):
query = Query(FakeGeoDocument)
query.add_q(Q(my_loc__geo_lt=GeoQueryArguments(3.14, 6.28, 20)))
self.assertEqual(
u"(distance(my_loc, geopoint(3.140000, 6.280000)) < 20)",
unicode(query))
def test_geosearch_lte(self):
query = Query(FakeGeoDocument)
query.add_q(Q(my_loc__geo_lte=GeoQueryArguments(3.14, 6.28, 20)))
self.assertEqual(
u"(distance(my_loc, geopoint(3.140000, 6.280000)) <= 20)",
unicode(query))
def test_geosearch_gt(self):
query = Query(FakeGeoDocument)
query.add_q(Q(my_loc__geo_gt=GeoQueryArguments(3.14, 6.28, 20)))
self.assertEqual(
u"(distance(my_loc, geopoint(3.140000, 6.280000)) > 20)",
unicode(query))
def test_geosearch_gte(self):
query = Query(FakeGeoDocument)
query.add_q(Q(my_loc__geo_gte=GeoQueryArguments(3.14, 6.28, 20)))
self.assertEqual(
u"(distance(my_loc, geopoint(3.140000, 6.280000)) >= 20)",
unicode(query))
class TestDateQuery(unittest.TestCase):
def test_before(self):
query = Query(FakeDocument)
today = datetime.date.today()
query.add_q(Q(bar__lt=today))
self.assertEqual(
u"(bar < {0})".format(today.isoformat()),
unicode(query))
def test_after(self):
query = Query(FakeDocument)
today = datetime.date.today()
query.add_q(Q(bar__gt=today))
self.assertEqual(
u"(bar > {0} AND NOT bar:{1})".format(today.isoformat(), DateField().none_value()),
unicode(query))
|
nilq/baby-python
|
python
|
import copy
import random
import math
import numpy as np
from Higashi_backend.utils import *
from Higashi_backend.Functions import *
import multiprocessing
import time
from torch.nn.utils.rnn import pad_sequence
from sklearn.decomposition import PCA
from sklearn.preprocessing import normalize
from scipy.sparse import diags, vstack
from scipy.stats import norm
cpu_num = multiprocessing.cpu_count()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
torch.set_default_dtype(torch.float32)
activation_func = swish
# Code adapted from scVI
def log_zinb_positive(
x: torch.Tensor, mu: torch.Tensor, theta: torch.Tensor, pi: torch.Tensor, eps=1e-8
):
"""
Log likelihood (scalar) of a minibatch according to a zinb model.
Parameters
----------
x
Data
mu
mean of the negative binomial (has to be positive support) (shape: minibatch x vars)
theta
inverse dispersion parameter (has to be positive support) (shape: minibatch x vars)
pi
logit of the dropout parameter (real support) (shape: minibatch x vars)
eps
numerical stability constant
Notes
-----
We parametrize the bernoulli using the logits, hence the softplus functions appearing.
"""
# theta is the dispersion rate. If .ndimension() == 1, it is shared for all cells (regardless of batch or labels)
# if theta.ndimension() == 1:
# theta = theta.view(
# 1, theta.size(0)
# ) # In this case, we reshape theta for broadcasting
softplus_pi = F.softplus(-pi) # uses log(sigmoid(x)) = -softplus(-x)
log_theta_eps = torch.log(theta + eps)
log_theta_mu_eps = torch.log(theta + mu + eps)
pi_theta_log = -pi + theta * (log_theta_eps - log_theta_mu_eps)
case_zero = F.softplus(pi_theta_log) - softplus_pi
mul_case_zero = torch.mul((x < eps).type(torch.float32), case_zero)
case_non_zero = (
-softplus_pi
+ pi_theta_log
+ x * (torch.log(mu + eps) - log_theta_mu_eps)
+ torch.lgamma(x + theta)
- torch.lgamma(theta)
- torch.lgamma(x + 1)
)
mul_case_non_zero = torch.mul((x > eps).type(torch.float32), case_non_zero)
res = mul_case_zero + mul_case_non_zero
return res
class Wrap_Embedding(torch.nn.Embedding):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def forward(self, *input):
return super().forward(*input)
def features(self, *input):
return self.forward(*input)
def start_fix(self):
return
def fix_cell(self, cell_list=None, bin_id=None):
return
# Used only for really big adjacency matrix
class SparseEmbedding(nn.Module):
def __init__(self, embedding_weight, sparse=False, cpu=False):
super().__init__()
# print("Initializing embedding, shape", embedding_weight.shape)
self.sparse = sparse
self.cpu_flag = cpu
if self.cpu_flag:
print("CPU mode")
self_device = "cpu"
else:
self_device = device
if self.sparse:
print ("Sparse mode")
self.embedding = embedding_weight
else:
if type(embedding_weight) is torch.Tensor:
self.embedding = embedding_weight.to(self_device)
elif type(embedding_weight) is np.ndarray:
try:
self.embedding = torch.from_numpy(
np.array(embedding_weight.todense())).to(self_device)
except BaseException:
self.embedding = torch.from_numpy(
np.array(embedding_weight)).to(self_device)
else:
print("Sparse Embedding Error", type(embedding_weight))
self.sparse = True
self.embedding = embedding_weight
def forward(self, x):
if self.sparse:
x = x.cpu().numpy()
x = x.reshape((-1))
temp = np.asarray((self.embedding[x, :]).todense())
return torch.from_numpy(temp).to(device, non_blocking=True)
if self.cpu:
temp = self.embedding[x.cpu(), :]
return temp.to(device, non_blocking=True)
else:
return self.embedding[x, :]
# Deep Auto-encoder with tied or partial tied weights (reduce the number of parameters to be trained)
class TiedAutoEncoder(nn.Module):
def __init__(self, shape_list: list,
use_bias=True,
tied_list=None,
add_activation=False,
dropout=None,
layer_norm=False,
activation=None):
super().__init__()
if tied_list is None:
tied_list = []
self.add_activation = add_activation
self.weight_list = []
self.reverse_weight_list = []
self.bias_list = []
self.use_bias = use_bias
self.recon_bias_list = []
self.shape_list = shape_list
self.activation = activation
if self.activation is None:
self.activation = activation_func
# Generating weights for the tied autoencoder
for i in range(len(shape_list) - 1):
p = nn.parameter.Parameter(torch.FloatTensor(shape_list[i + 1], shape_list[i]).to(device, non_blocking=True))
self.weight_list.append(p)
if i not in tied_list:
self.reverse_weight_list.append(
nn.parameter.Parameter(torch.FloatTensor(shape_list[i + 1], shape_list[i]).to(device, non_blocking=True)))
else:
self.reverse_weight_list.append(p)
self.bias_list.append(nn.parameter.Parameter(torch.FloatTensor(shape_list[i + 1]).to(device, non_blocking=True)))
self.recon_bias_list.append(nn.parameter.Parameter(torch.FloatTensor(shape_list[i]).to(device, non_blocking=True)))
# reverse the order of the decoder.
self.recon_bias_list = self.recon_bias_list[::-1]
self.reverse_weight_list = self.reverse_weight_list[::-1]
self.weight_list = nn.ParameterList(self.weight_list)
self.reverse_weight_list = nn.ParameterList(self.reverse_weight_list)
self.bias_list = nn.ParameterList(self.bias_list)
self.recon_bias_list = nn.ParameterList(self.recon_bias_list)
# Initialize the parameters
self.reset_parameters()
if dropout is not None:
self.dropout = nn.Dropout(dropout)
else:
self.dropout = None
if layer_norm:
self.layer_norm = nn.LayerNorm(shape_list[-1])
else:
self.layer_norm = None
self.tied_list = tied_list
self.input_dropout = nn.Dropout(0.1)
def reset_parameters(self):
for i, w in enumerate(self.weight_list):
nn.init.kaiming_uniform_(self.weight_list[i], a=0.0, mode='fan_in', nonlinearity='leaky_relu')
nn.init.kaiming_uniform_(self.reverse_weight_list[i], a=0.0, mode='fan_out', nonlinearity='leaky_relu')
for i, b in enumerate(self.bias_list):
fan_in, _ = torch.nn.init._calculate_fan_in_and_fan_out(self.weight_list[i])
bound = 1 / math.sqrt(fan_in)
torch.nn.init.uniform_(self.bias_list[i], -bound, bound)
temp_weight_list = self.weight_list[::-1]
for i, b in enumerate(self.recon_bias_list):
fan_in, fan_out = torch.nn.init._calculate_fan_in_and_fan_out(temp_weight_list[i])
bound = 1 / math.sqrt(fan_out)
torch.nn.init.uniform_(self.recon_bias_list[i], -bound, bound)
def untie(self):
new_reverse_weight_list = []
for w in self.reverse_weight_list:
new_reverse_weight_list.append(nn.parameter.Parameter(torch.ones_like(w).to(device, non_blocking=True)))
for i in range(len(new_reverse_weight_list)):
nn.init.kaiming_uniform_(new_reverse_weight_list[i], a=0.0, mode='fan_out', nonlinearity='leaky_relu')
self.reverse_weight_list = nn.ParameterList(new_reverse_weight_list)
for i, b in enumerate(self.recon_bias_list):
fan_in, fan_out = torch.nn.init._calculate_fan_in_and_fan_out(self.reverse_weight_list[i])
bound = 1 / math.sqrt(fan_out)
torch.nn.init.uniform_(self.recon_bias_list[i], -bound, bound)
def encoder(self, input):
encoded_feats = input
for i in range(len(self.weight_list)):
if self.use_bias:
encoded_feats = F.linear(encoded_feats, self.weight_list[i], self.bias_list[i])
else:
encoded_feats = F.linear(encoded_feats, self.weight_list[i])
if i < len(self.weight_list) - 1:
encoded_feats = self.activation(encoded_feats)
if self.dropout is not None:
encoded_feats = self.dropout(encoded_feats)
if self.layer_norm is not None:
encoded_feats = self.layer_norm(encoded_feats)
if self.add_activation:
encoded_feats = self.activation(encoded_feats)
return encoded_feats
def decoder(self, encoded_feats):
if self.add_activation:
reconstructed_output = encoded_feats
else:
reconstructed_output = self.activation(encoded_feats)
reverse_weight_list = self.reverse_weight_list
recon_bias_list = self.recon_bias_list
for i in range(len(reverse_weight_list)):
reconstructed_output = F.linear(reconstructed_output, reverse_weight_list[i].t(),
recon_bias_list[i])
if i < len(recon_bias_list) - 1:
reconstructed_output = self.activation(reconstructed_output)
return reconstructed_output
def forward(self, input, return_recon=False):
encoded_feats = self.encoder(input)
if return_recon:
if not self.add_activation:
reconstructed_output = self.activation(encoded_feats)
else:
reconstructed_output = encoded_feats
if self.dropout is not None:
reconstructed_output = self.dropout(reconstructed_output)
reconstructed_output = self.decoder(reconstructed_output)
return encoded_feats, reconstructed_output
else:
return encoded_feats
def fit(self, data: np.ndarray,
epochs=10, sparse=True, sparse_rate=None, classifier=False, early_stop=True, batch_size=-1, targets=None):
if self.shape_list[1] < data.shape[1]:
pca = PCA(n_components=self.shape_list[1]).fit(data)
self.weight_list[0].data = torch.from_numpy(pca.components_).float().to(device, non_blocking=True)
self.reverse_weight_list[-1].data = torch.from_numpy(pca.components_).float().to(device, non_blocking=True)
optimizer = torch.optim.Adam(self.parameters(), lr=1e-3)
data = torch.from_numpy(data).to(device, non_blocking=True)
if batch_size < 0:
batch_size = int(len(data))
bar = trange(epochs, desc="")
no_improve_count = 0
for i in bar:
batch_index = torch.randint(0, int(len(data)), (batch_size,)).to(device, non_blocking=True)
encode, recon = self.forward(data[batch_index], return_recon=True)
optimizer.zero_grad()
if sparse:
loss = sparse_autoencoder_error(recon, targets[batch_index], sparse_rate)
elif classifier:
loss = F.binary_cross_entropy_with_logits(recon, (targets[batch_index] > 0).float())
else:
loss = F.mse_loss(recon, targets[batch_index]) # / len(recon)
if i == 0:
loss_best = float(loss.item())
loss.backward()
optimizer.step()
if early_stop:
if i >= 50:
if loss.item() < loss_best * 0.99:
loss_best = loss.item()
no_improve_count = 0
else:
no_improve_count += 1
if no_improve_count >= 30:
break
bar.set_description("%.3f" % (loss.item()), refresh=False)
if epochs > 0:
print("loss", loss.item(), "loss best", loss_best, "epochs", i)
print()
torch.cuda.empty_cache()
def predict(self, data):
self.eval()
data = torch.from_numpy(data).to(device, non_blocking=True)
with torch.no_grad():
encode = self.forward(data)
self.train()
torch.cuda.empty_cache()
return encode.cpu().detach().numpy()
# Deep Auto-encoder
class AutoEncoder(nn.Module):
def __init__(self, encoder_shape_list, decoder_shape_list,
use_bias=True,
add_activation=False,
dropout=None,
layer_norm=False):
super().__init__()
self.add_activation = add_activation
self.weight_list = []
self.reverse_weight_list = []
self.use_bias = use_bias
# Generating weights for the tied autoencoder
for i in range(len(encoder_shape_list) - 1):
self.weight_list.append(nn.Linear(encoder_shape_list[i], encoder_shape_list[i+1]).to(device, non_blocking=True))
for i in range(len(decoder_shape_list) - 1):
self.reverse_weight_list.append(nn.Linear(decoder_shape_list[i], decoder_shape_list[i+1]).to(device, non_blocking=True))
self.reverse_weight_list = nn.ModuleList(self.reverse_weight_list)
self.weight_list = nn.ModuleList(self.weight_list)
if dropout is not None:
self.dropout = nn.Dropout(dropout)
else:
self.dropout = None
if layer_norm:
self.layer_norm_stack = []
for i in range(len(encoder_shape_list) - 1):
self.layer_norm_stack.append(nn.LayerNorm(encoder_shape_list[i+1]).to(device, non_blocking=True))
else:
self.layer_norm_stack = None
def encoder(self, input):
encoded_feats = input
for i in range(len(self.weight_list)):
encoded_feats = self.weight_list[i](encoded_feats)
if i < len(self.weight_list) - 1:
encoded_feats = activation_func(encoded_feats)
if self.dropout is not None:
encoded_feats = self.dropout(encoded_feats)
if self.layer_norm_stack is not None:
encoded_feats = self.layer_norm_stack[i](encoded_feats)
if self.add_activation:
encoded_feats = activation_func(encoded_feats)
return encoded_feats
def decoder(self, encoded_feats):
if self.add_activation:
reconstructed_output = encoded_feats
else:
reconstructed_output = activation_func(encoded_feats)
reverse_weight_list = self.reverse_weight_list
for i in range(len(reverse_weight_list)):
reconstructed_output = reverse_weight_list[i](reconstructed_output)
if i < len(reverse_weight_list) - 1:
reconstructed_output = activation_func(reconstructed_output)
return reconstructed_output
def forward(self, input, return_recon=False):
encoded_feats = self.encoder(input)
if return_recon:
reconstructed_output = encoded_feats
if self.dropout is not None:
reconstructed_output = self.dropout(reconstructed_output)
reconstructed_output = self.decoder(reconstructed_output)
return encoded_feats, reconstructed_output
else:
return encoded_feats
def fit(self, data, epochs=10, sparse=True, sparse_rate=None, classifier=False, early_stop=True, batch_size=-1, targets=None):
optimizer = torch.optim.AdamW(self.parameters(), lr=1e-3)
data = torch.from_numpy(data).to(device, non_blocking=True)
if batch_size < 0:
batch_size = len(data)
bar = trange(epochs, desc="")
if targets is None:
targets=data
no_improve_count = 0
for i in bar:
batch_index = torch.randint(0, len(data), (batch_size,)).to(device, non_blocking=True)
encode, recon = self.forward(data[batch_index], return_recon=True)
optimizer.zero_grad()
if sparse:
loss = sparse_autoencoder_error(recon, targets[batch_index], sparse_rate)
elif classifier:
loss = F.binary_cross_entropy_with_logits(recon, (targets[batch_index] > 0).float())
else:
loss = F.mse_loss(recon, targets[batch_index], reduction="sum") / len(batch_index)
if i == 0:
loss_best = float(loss.item())
loss.backward()
optimizer.step()
if early_stop:
if i >= 50:
if loss.item() < loss_best * 0.99:
loss_best = loss.item()
no_improve_count = 0
else:
no_improve_count += 1
if no_improve_count >= 50:
break
bar.set_description("%.3f" % (loss.item()), refresh=False)
print("loss", loss.item(), "loss best", loss_best, "epochs", i)
print()
torch.cuda.empty_cache()
def predict(self, data):
self.eval()
data = torch.from_numpy(data).to(device, non_blocking=True)
with torch.no_grad():
encode = self.forward(data)
self.train()
torch.cuda.empty_cache()
return encode.cpu().detach().numpy()
# Multiple Embedding is a module that passes nodes to different branch of neural network to generate embeddings
# The neural network to use would be dependent to the node ids (the input num_list parameters)
# If the num_list is [0, 1000, 2000,...,]
# Then node 0~1000 would pass through NN1, 1000~200 would pass through NN2...
# target weights represent the auxilary task that the embedding would do.
class MultipleEmbedding(nn.Module):
def __init__(self, embedding_weights, dim, sparse=True, num_list=None, target_weights=None):
super().__init__()
if target_weights is None:
target_weights = embedding_weights
self.dim = dim
self.num_list = torch.tensor([0] + list(num_list)).to(device, non_blocking=True)
# searchsort_table is a fast mapping between node id and the neural network to use for generate embeddings
self.searchsort_table = torch.zeros(num_list[-1] + 1).long().to(device, non_blocking=True)
for i in range(len(self.num_list) - 1):
self.searchsort_table[self.num_list[i] + 1:self.num_list[i + 1] + 1] = i
self.searchsort_table_one_hot = torch.zeros([len(self.searchsort_table), self.searchsort_table.max() + 1])
x = torch.range(0, len(self.searchsort_table) - 1, dtype=torch.long)
self.searchsort_table_one_hot[x, self.searchsort_table] = 1
self.searchsort_table = self.searchsort_table_one_hot
self.searchsort_table[0] = 0
self.searchsort_table = self.searchsort_table.bool().to(device, non_blocking=True)
self.embeddings = []
complex_flag = False
for i, w in enumerate(embedding_weights):
self.embeddings.append(SparseEmbedding(w, sparse))
self.targets = []
complex_flag = False
for i, w in enumerate(target_weights):
self.targets.append(SparseEmbedding(w, sparse))
# Generate a test id to test the output size of each embedding modules.
test = torch.zeros(1, device=device).long()
self.input_size = []
for w in self.embeddings:
result = w(test)
if type(result) == tuple:
result = result[0]
self.input_size.append(result.shape[-1])
self.layer_norm = nn.LayerNorm(self.dim).to(device, non_blocking=True)
self.wstack = []
i = 0
if self.input_size[i] == target_weights[i].shape[-1]:
self.wstack.append(
TiedAutoEncoder([self.input_size[i], self.dim], add_activation=False, tied_list=[]))
else:
self.wstack.append(AutoEncoder([self.input_size[i], self.dim], [self.dim, target_weights[i].shape[-1]],
add_activation=True))
for i in range(1, len(self.embeddings)):
if self.input_size[i] == target_weights[i].shape[-1]:
self.wstack.append(TiedAutoEncoder([self.input_size[i], self.dim],add_activation=True, tied_list=[]))
else:
self.wstack.append(AutoEncoder([self.input_size[i], self.dim],[self.dim, target_weights[i].shape[-1]],add_activation=True))
self.wstack = nn.ModuleList(self.wstack)
self.on_hook_embedding = nn.ModuleList([nn.Sequential(w,
self.wstack[i]
) for i, w in enumerate(self.embeddings)])
self.on_hook_set = set([i for i in range(len(self.embeddings))])
self.off_hook_embedding = [i for i in range(len(self.embeddings))]
self.features = self.forward
def forward(self, x, *args):
if len(x.shape) > 1:
sz_b, len_seq = x.shape
x = x.view(-1)
reshape_flag = True
else:
reshape_flag = False
final = torch.zeros((len(x), self.dim), device=device).float()
# ind is a bool type array
ind = self.searchsort_table[x]
node_type = torch.nonzero(torch.any(ind, dim=0)).view(-1)
for i in node_type:
mask = ind[:, i]
if int(i) in self.on_hook_set:
final[mask] = self.on_hook_embedding[i](x[mask] - self.num_list[i] - 1)
else:
final[mask] = self.off_hook_embedding[i](x[mask] - self.num_list[i] - 1)
if reshape_flag:
final = final.view(sz_b, len_seq, -1)
return final
# No longer do BP through a list of embedding modules.
def off_hook(self, off_hook_list=[]):
if len(off_hook_list) == 0:
off_hook_list = list(range(len(self.wstack)))
for index in off_hook_list:
ae = self.wstack[index]
for w in ae.weight_list:
w.requires_grad = False
for w in ae.reverse_weight_list:
w.requires_grad = False
for b in ae.bias_list:
b.requires_grad = False
for b in ae.recon_bias_list:
b.requires_grad = False
ids = torch.arange(start=0, end=self.num_list[index + 1] - self.num_list[index], device=device)
with torch.no_grad():
embed = self.on_hook_embedding[index](ids).detach()
self.embeddings[index] = self.embeddings[index].cpu()
self.targets[index] = self.targets[index].cpu()
self.off_hook_embedding[index] = SparseEmbedding(embed, False)
try:
self.on_hook_set.remove(index)
except:
pass
def on_hook(self, on_hook_list):
if len(on_hook_list) == 0:
on_hook_list = list(range(len(self.wstack)))
for index in on_hook_list:
ae = self.wstack[index]
for w in ae.weight_list:
w.requires_grad = True
for w in ae.reverse_weight_list:
w.requires_grad = True
for b in ae.bias_list:
b.requires_grad = True
for b in ae.recon_bias_list:
b.requires_grad = True
self.embeddings[index] = self.embeddings[index].to(device, non_blocking=True)
self.targets[index] = self.targets[index].to(device, non_blocking=True)
self.on_hook_set.add(index)
def start_fix(self):
return
def fix_cell(self, cell=None, bin_id=None):
return
class Hyper_SAGNN(nn.Module):
def __init__(
self,
n_head,
d_model,
d_k,
d_v,
diag_mask,
bottle_neck,
attribute_dict=None,
cell_feats=None,
encoder_dynamic_nn=None,
encoder_static_nn=None,
chrom_num=1):
super().__init__()
self.pff_classifier = PositionwiseFeedForward(
[d_model, int(d_model / 2), 1])
self.pff_classifier_var = PositionwiseFeedForward(
[d_model, int(d_model / 2), 1])
self.pff_classifier_proba = PositionwiseFeedForward(
[d_model, int(d_model / 2), 1])
self.encode_list = []
self.encode1 = EncoderLayer(
n_head,
d_model,
d_k,
d_v,
dropout_mul=0.3,
dropout_pff=0.4,
diag_mask=diag_mask,
bottle_neck=bottle_neck,
dynamic_nn=encoder_dynamic_nn,
static_nn=encoder_static_nn)
self.diag_mask_flag = diag_mask
self.layer_norm1 = nn.LayerNorm(d_model)
self.layer_norm2 = nn.LayerNorm(d_model)
self.dropout = nn.Dropout(0.3)
if attribute_dict is not None:
self.attribute_dict = torch.from_numpy(attribute_dict).to(device, non_blocking=True)
input_size = self.attribute_dict.shape[-1] * 2 + cell_feats.shape[-1]
self.extra_proba = FeedForward([input_size, 4, 1])
self.extra_proba2 = FeedForward([input_size, 4, 1])
self.extra_proba3 = FeedForward([input_size, 4, 1])
self.attribute_dict_embedding = nn.Embedding(len(self.attribute_dict), 1, padding_idx=0)
self.attribute_dict_embedding.weight = nn.Parameter(self.attribute_dict)
self.attribute_dict_embedding.weight.requires_grad = False
self.cell_feats = torch.from_numpy(cell_feats).to(device, non_blocking=True)
self.only_distance = False
self.only_model = False
self.chrom_num = chrom_num
self.d_model = d_model
def get_embedding(self, x, x_chrom, slf_attn_mask=None, non_pad_mask=None):
# if slf_attn_mask is None:
# slf_attn_mask = get_attn_key_pad_mask(seq_k=x, seq_q=x)
# non_pad_mask = get_non_pad_mask(x)
dynamic, static, attn = self.encode1(x, x, x_chrom, slf_attn_mask, non_pad_mask)
if torch.sum(torch.isnan(dynamic)) > 0:
print ("nan error", x, dynamic, static)
raise EOFError
return dynamic, static, attn
def forward(self, x, x_chrom, mask=None):
x = x.long()
sz_b, len_seq = x.shape
if self.attribute_dict is not None:
if not self.only_model:
distance = torch.cat([self.attribute_dict_embedding(x[:, 1]), self.attribute_dict_embedding(x[:, 2]), self.cell_feats[x[:, 0]]], dim=-1)
distance_proba = self.extra_proba(distance)
distance_proba2 = self.extra_proba2(distance)
distance_proba3 = self.extra_proba3(distance)
else:
distance = torch.cat([self.attribute_dict_embedding(x[:, 1]), self.attribute_dict_embedding(x[:, 2]),
torch.zeros((len(x), self.cell_feats.shape[-1])).float().to(device, non_blocking=True)], dim=-1)
distance_proba = self.extra_proba(distance)
distance_proba2 = self.extra_proba2(distance)
distance_proba3 = self.extra_proba3(distance)
else:
distance_proba = torch.zeros((len(x), 1), dtype=torch.float, device=device)
distance_proba2 = torch.zeros((len(x), 1), dtype=torch.float, device=device)
distance_proba3 = torch.zeros((len(x), 1), dtype=torch.float, device=device)
if not self.only_distance:
# slf_attn_mask = get_attn_key_pad_mask(seq_k=x, seq_q=x)
# non_pad_mask = get_non_pad_mask(x)
dynamic, static, attn = self.get_embedding(x, x_chrom)
dynamic = self.layer_norm1(dynamic)
static = self.layer_norm2(static)
if self.diag_mask_flag:
output = (dynamic - static) ** 2
else:
output = dynamic
output_proba = self.pff_classifier_proba(static)
# output_proba = torch.sum(output_proba * non_pad_mask, dim=-2, keepdim=False)
# mask_sum = torch.sum(non_pad_mask, dim=-2, keepdim=False)
# output_proba /= mask_sum
output_proba = torch.mean(output_proba, dim=-2, keepdim=False)
output_proba = output_proba + distance_proba
output_mean = self.pff_classifier(output)
# output_mean = torch.sum(output_mean * non_pad_mask, dim=-2, keepdim=False)
# output_mean /= mask_sum
output_mean = torch.mean(output_mean, dim=-2, keepdim=False)
output_var = self.pff_classifier_var(static)
# output_var = torch.sum(output_var * non_pad_mask, dim=-2, keepdim=False)
# output_var /= mask_sum
output_var = torch.mean(output_var, dim=-2, keepdim=False)
output_mean = output_mean + distance_proba2
output_var = output_var + distance_proba3
else:
return distance_proba2, distance_proba3, distance_proba
return output_mean, output_var, output_proba
def predict(self, input, input_chrom, verbose=False, batch_size=96, activation=None, extra_info=None):
self.eval()
with torch.no_grad():
output = []
if verbose:
func1 = trange
else:
func1 = range
if batch_size < 0:
batch_size = len(input)
with torch.no_grad():
for j in func1(math.ceil(len(input) / batch_size)):
x = input[j * batch_size:min((j + 1) * batch_size, len(input))]
if type(input_chrom) is not tuple:
x_chrom = input_chrom[j * batch_size:min((j + 1) * batch_size, len(input))]
x_chrom = torch.from_numpy(x_chrom).long().to(device, non_blocking=True)
else:
a,b = input_chrom
x_chrom = a[j * batch_size:min((j + 1) * batch_size, len(input))], b[j * batch_size:min((j + 1) * batch_size, len(input))]
x = np2tensor_hyper(x, dtype=torch.long)
if len(x.shape) == 1:
x = pad_sequence(x, batch_first=True, padding_value=0).to(device, non_blocking=True)
else:
x = x.to(device, non_blocking=True)
o, _, o_proba = self(x, x_chrom)
if activation is not None:
o = activation(o)
if extra_info is not None:
o = o * extra_info[x[:, 2] - x[:, 1]]
output.append(o.detach().cpu())
output = torch.cat(output, dim=0)
torch.cuda.empty_cache()
self.train()
return output.numpy()
# A custom position-wise MLP.
# dims is a list, it would create multiple layer with tanh between them
# If dropout, it would add the dropout at the end. Before residual and
# layer-norm
class PositionwiseFeedForward(nn.Module):
def __init__(
self,
dims,
dropout=None,
reshape=False,
use_bias=True,
residual=False,
layer_norm=False):
super(PositionwiseFeedForward, self).__init__()
self.w_stack = []
self.dims = dims
for i in range(len(dims) - 1):
self.w_stack.append(nn.Conv1d(dims[i], dims[i + 1], 1, bias=use_bias))
# self.w_stack.append(nn.Linear(dims[i], dims[i + 1], bias=use_bias))
self.w_stack = nn.ModuleList(self.w_stack)
self.reshape = reshape
self.layer_norm = nn.LayerNorm(dims[0])
if dropout is not None:
self.dropout = nn.Dropout(dropout)
else:
self.dropout = None
self.residual = residual
self.layer_norm_flag = layer_norm
self.alpha = torch.nn.Parameter(torch.zeros(1))
self.register_parameter("alpha", self.alpha)
def forward(self, x):
if self.layer_norm_flag:
output = self.layer_norm(x)
else:
output = x
output = output.transpose(1, 2)
for i in range(len(self.w_stack) - 1):
output = self.w_stack[i](output)
output = activation_func(output)
if self.dropout is not None:
output = self.dropout(output)
output = self.w_stack[-1](output)
output = output.transpose(1, 2)
if self.reshape:
output = output.view(output.shape[0], -1, 1)
if self.dims[0] == self.dims[-1]:
# residual
if self.residual:
output = output + x
return output
# A custom position wise MLP.
# dims is a list, it would create multiple layer with torch.tanh between them
# We don't do residual and layer-norm, because this is only used as the
# final classifier
class FeedForward(nn.Module):
''' A two-feed-forward-layer module '''
def __init__(self, dims, dropout=None, reshape=False, use_bias=True):
super(FeedForward, self).__init__()
self.w_stack = []
for i in range(len(dims) - 1):
self.w_stack.append(nn.Linear(dims[i], dims[i + 1], use_bias))
self.w_stack = nn.ModuleList(self.w_stack)
if dropout is not None:
self.dropout = nn.Dropout(dropout)
else:
self.dropout = None
self.reshape = reshape
def forward(self, x):
output = x
for i in range(len(self.w_stack) - 1):
output = self.w_stack[i](output)
output = activation_func(output)
if self.dropout is not None:
output = self.dropout(output)
output = self.w_stack[-1](output)
if self.reshape:
output = output.view(output.shape[0], -1, 1)
return output
class ScaledDotProductAttention(nn.Module):
''' Scaled Dot-Product Attention '''
def __init__(self, temperature):
super().__init__()
self.temperature = temperature
def masked_softmax(self, vector: torch.Tensor,
mask: torch.Tensor,
dim: int = -1,
memory_efficient: bool = False,
mask_fill_value: float = -1e32) -> torch.Tensor:
if mask is None:
result = torch.nn.functional.softmax(vector, dim=dim)
else:
mask = mask.float()
while mask.dim() < vector.dim():
mask = mask.unsqueeze(1)
if not memory_efficient:
# To limit numerical errors from large vector elements outside
# the mask, we zero these out.
result = torch.nn.functional.softmax(vector * mask, dim=dim)
result = result * mask
result = result / (result.sum(dim=dim, keepdim=True) + 1e-13)
else:
masked_vector = vector.masked_fill(
(1 - mask).bool(), mask_fill_value)
result = torch.nn.functional.softmax(masked_vector, dim=dim)
return result
def forward(self, q, k, v, diag_mask, mask=None):
attn = torch.bmm(q, k.transpose(1, 2))
attn = attn / self.temperature
if mask is not None:
attn = attn.masked_fill(mask, -float('inf'))
attn = self.masked_softmax(
attn, diag_mask, dim=-1, memory_efficient=True)
output = torch.bmm(attn, v)
return output, attn
class MultiHeadAttention(nn.Module):
''' Multi-Head Attention module '''
def __init__(
self,
n_head,
d_model,
d_k,
d_v,
dropout,
diag_mask,
input_dim):
super().__init__()
self.d_model = d_model
self.input_dim = input_dim
self.n_head = n_head
self.d_k = d_k
self.d_v = d_v
self.w_qs = nn.Linear(input_dim, n_head * d_k, bias=False)
self.w_ks = nn.Linear(input_dim, n_head * d_k, bias=False)
self.w_vs = nn.Linear(input_dim, n_head * d_v, bias=False)
nn.init.normal_(self.w_qs.weight, mean=0,
std=np.sqrt(2.0 / (d_model + d_k)))
nn.init.normal_(self.w_ks.weight, mean=0,
std=np.sqrt(2.0 / (d_model + d_k)))
nn.init.normal_(self.w_vs.weight, mean=0,
std=np.sqrt(2.0 / (d_model + d_v)))
self.attention = ScaledDotProductAttention(
temperature=np.power(d_k, 0.5))
self.fc1 = FeedForward([n_head * d_v, d_model], use_bias=False)
self.fc2 = FeedForward([n_head * d_v, d_model], use_bias=False)
self.layer_norm1 = nn.LayerNorm(input_dim)
self.layer_norm2 = nn.LayerNorm(input_dim)
self.layer_norm3 = nn.LayerNorm(input_dim)
if dropout is not None:
self.dropout = nn.Dropout(dropout)
else:
self.dropout = dropout
self.diag_mask_flag = diag_mask
self.diag_mask = None
self.alpha_static = torch.nn.Parameter(torch.zeros(1))
self.alpha_dynamic = torch.nn.Parameter(torch.zeros(1))
self.register_parameter("alpha_static", self.alpha_static)
self.register_parameter("alpha_dynamic", self.alpha_dynamic)
def forward(self, q, k, v, diag_mask=None, mask=None):
d_k, d_v, n_head = self.d_k, self.d_v, self.n_head
residual_dynamic = q
residual_static = v
q = self.layer_norm1(q)
k = self.layer_norm2(k)
v = self.layer_norm3(v)
sz_b, len_q, _ = q.shape
sz_b, len_k, _ = k.shape
sz_b, len_v, _ = v.shape
q = self.w_qs(q).view(sz_b, len_q, n_head, d_k)
k = self.w_ks(k).view(sz_b, len_k, n_head, d_k)
v = self.w_vs(v).view(sz_b, len_v, n_head, d_v)
q = q.permute(2, 0, 1, 3).contiguous(
).view(-1, len_q, d_k) # (n*b) x lq x dk
k = k.permute(2, 0, 1, 3).contiguous(
).view(-1, len_k, d_k) # (n*b) x lk x dk
v = v.permute(2, 0, 1, 3).contiguous(
).view(-1, len_v, d_v) # (n*b) x lv x dv
n = sz_b * n_head
if self.diag_mask is not None:
if (len(self.diag_mask) <= n) or (
self.diag_mask.shape[1] != len_v):
self.diag_mask = torch.ones((len_v, len_v), device=device)
if self.diag_mask_flag:
self.diag_mask -= torch.eye(len_v, len_v, device=device)
self.diag_mask = self.diag_mask.repeat(n, 1, 1).bool()
diag_mask = self.diag_mask
else:
diag_mask = self.diag_mask[:n]
else:
self.diag_mask = (torch.ones((len_v, len_v), device=device))
if self.diag_mask_flag:
self.diag_mask -= torch.eye(len_v, len_v, device=device)
self.diag_mask = self.diag_mask.repeat(n, 1, 1).bool()
diag_mask = self.diag_mask
if mask is not None:
mask = mask.repeat(n_head, 1, 1) # (n*b) x .. x ..
dynamic, attn = self.attention(q, k, v, diag_mask, mask=mask)
dynamic = dynamic.view(n_head, sz_b, len_q, d_v)
dynamic = dynamic.permute(
1, 2, 0, 3).contiguous().view(
sz_b, len_q, -1) # b x lq x (n*dv)
static = v.view(n_head, sz_b, len_q, d_v)
static = static.permute(
1, 2, 0, 3).contiguous().view(
sz_b, len_q, -1) # b x lq x (n*dv)
dynamic = self.dropout(self.fc1(dynamic)) if self.dropout is not None else self.fc1(dynamic)
static = self.dropout(self.fc2(static)) if self.dropout is not None else self.fc2(static)
dynamic = dynamic # + residual_dynamic
static = static # + residual_static
return dynamic, static, attn
class EncoderLayer(nn.Module):
'''A self-attention layer + 2 layered pff'''
def __init__(
self,
n_head,
d_model,
d_k,
d_v,
dropout_mul,
dropout_pff,
diag_mask,
bottle_neck,
dynamic_nn=None,
static_nn=None):
super().__init__()
self.n_head = n_head
self.d_k = d_k
self.d_v = d_v
self.mul_head_attn = MultiHeadAttention(
n_head,
d_model,
d_k,
d_v,
dropout=dropout_mul,
diag_mask=diag_mask,
input_dim=bottle_neck)
self.pff_n1 = PositionwiseFeedForward(
[d_model, d_model, d_model], dropout=dropout_pff, residual=True, layer_norm=True)
residual = True if bottle_neck == d_model else False
self.pff_n2 = PositionwiseFeedForward(
[bottle_neck, d_model, d_model], dropout=dropout_pff, residual=residual, layer_norm=True)
self.dynamic_nn = dynamic_nn
self.static_nn = static_nn
self.dropout = nn.Dropout(0.2)
def forward(self, dynamic, static, chrom_info, slf_attn_mask, non_pad_mask):
if type(chrom_info) is tuple:
chrom_info, to_neighs = chrom_info
else:
to_neighs = chrom_info
if isinstance(self.dynamic_nn, GraphSageEncoder_with_weights) :
dynamic, static = self.dynamic_nn(dynamic, to_neighs)
else:
static = self.static_nn(static, to_neighs)
dynamic = self.dynamic_nn(dynamic, to_neighs)
dynamic, static1, attn = self.mul_head_attn(
dynamic, dynamic, static)
dynamic = self.pff_n1(dynamic) #* non_pad_mask
# static = self.pff_n2(static * non_pad_mask) * non_pad_mask
return dynamic, static1, attn
# Sampling positive triplets.
# THe number of triplets from each chromosome is balanced across different chromosome
class DataGenerator():
def __init__(self, edges, edge_chrom, edge_weight, batch_size, flag=False, num_list=None, k=1):
self.batch_size = batch_size
self.flag = flag
self.k = k
self.batch_size = int(self.batch_size)
self.num_list = list(num_list)
self.edges = [[] for i in range(len(self.num_list) - 1)]
self.edge_weight = [[] for i in range(len(self.num_list) - 1)]
self.edge_chrom = [[] for i in range(len(self.num_list) - 1)]
self.chrom_list = np.arange(len(self.num_list) - 1)
self.size_list = []
print ("initializing data generator")
for i in trange(len(self.num_list) - 1):
mask = (edges[:, 1] >= self.num_list[i]+1) & (edges[:, 1] < self.num_list[i+1]+1)
self.size_list.append(np.sum(mask))
self.edges[i] = edges[mask]
self.edge_weight[i] = edge_weight[mask]
self.edge_chrom[i] = edge_chrom[mask]
if len(self.edges[i]) == 0:
print ("The %d th chrom in your chrom_list has no sample in this generator" % i)
continue
while len(self.edges[i]) <= (self.batch_size):
self.edges[i] = np.concatenate([self.edges[i], self.edges[i]])
self.edge_weight[i] = np.concatenate([self.edge_weight[i], self.edge_weight[i]])
self.edge_chrom[i] = np.concatenate([self.edge_chrom[i], self.edge_chrom[i]])
index = np.random.permutation(len(self.edges[i]))
self.edges[i] = (self.edges[i])[index]
self.edge_weight[i] = (self.edge_weight[i])[index]
self.edge_chrom[i] = (self.edge_chrom[i])[index]
self.pointer = np.zeros(int(np.max(self.chrom_list) + 1)).astype('int')
self.size_list /= np.sum(self.size_list)
def next_iter(self):
chroms = np.random.choice(self.chrom_list, size=self.k, replace=True)
e_list = []
c_list = []
w_list = []
batch_size = self.batch_size / self.k
batch_size = int(batch_size)
for chrom in chroms:
if len(self.edges[chrom]) == 0:
continue
self.pointer[chrom] += batch_size
if self.pointer[chrom] > len(self.edges[chrom]):
index = np.random.permutation(len(self.edges[chrom]))
self.edges[chrom] = (self.edges[chrom])[index]
self.edge_weight[chrom] = (self.edge_weight[chrom])[index]
self.edge_chrom[chrom] = (self.edge_chrom[chrom])[index]
self.pointer[chrom] = batch_size
index = range(self.pointer[chrom] - batch_size, min(self.pointer[chrom], len(self.edges[chrom])))
e, c, w = (self.edges[chrom])[index], (self.edge_chrom[chrom])[index], (self.edge_weight[chrom])[index]
e_list.append(e)
c_list.append(c)
w_list.append(w)
e = np.concatenate(e_list, axis=0)
c = np.concatenate(c_list, axis=0)
w = np.concatenate(w_list, axis=0)
return e, c, w
class MeanAggregator(nn.Module):
"""
Aggregates a node's embeddings using mean of neighbors' embeddings
"""
def __init__(self, features, gcn=False, num_list=None, start_end_dict=None, pass_pseudo_id=False):
"""
Initializes the aggregator for a specific graph.
features -- function mapping LongTensor of node ids to FloatTensor of feature values.
gcn --- whether to perform concatenation GraphSAGE-style, or add self-loops GCN-style
"""
super(MeanAggregator, self).__init__()
self.features = features
self.gcn = gcn
self.num_list = torch.as_tensor(num_list)
self.mask = None
self.start_end_dict = start_end_dict
# If the feature function comes from a graphsage encoder, use the cell_id * (bin_num+1) + bin_id as the bin_id
self.pass_pseudo_id = pass_pseudo_id
print("pass_pseudo_id", self.pass_pseudo_id)
# nodes_real represents the true bin_id, nodes might represent the pseudo_id generated by cell_id * (bin_num+1) + bin_id
def forward(self, nodes_real, to_neighs, num_sample=10):
"""
nodes --- list of nodes in a batch
to_neighs --- list of sets, each set is the set of neighbors for node in batch
num_sample --- number of neighbors to sample. No sampling if None.
"""
samp_neighs = np.array(to_neighs)
unique_nodes = {}
unique_nodes_list = []
count = 0
column_indices = []
row_indices = []
v = []
for i, samp_neigh in enumerate(samp_neighs):
samp_neigh = set(samp_neigh)
for n in samp_neigh:
if n not in unique_nodes:
unique_nodes[n] = count
unique_nodes_list.append(n)
count += 1
column_indices.append(unique_nodes[n])
row_indices.append(i)
v.append(1 / len(samp_neigh))
unique_nodes_list = torch.LongTensor(unique_nodes_list).to(device, non_blocking=True)
mask = torch.sparse.FloatTensor(torch.LongTensor([row_indices, column_indices]),
torch.tensor(v, dtype=torch.float),
torch.Size([len(samp_neighs), len(unique_nodes_list)])).to(device, non_blocking=True)
embed_matrix = self.features(unique_nodes_list)
to_feats = mask.mm(embed_matrix)
return to_feats
class MeanAggregator_with_weights(nn.Module):
"""
Aggregates a node's embeddings using mean of neighbors' embeddings
"""
def __init__(self, features, gcn=False, num_list=None, start_end_dict=None, pass_pseudo_id=False, remove=False, pass_remove=False):
"""
Initializes the aggregator for a specific graph.
features -- function mapping LongTensor of node ids to FloatTensor of feature values.
gcn --- whether to perform concatenation GraphSAGE-style, or add self-loops GCN-style
"""
super(MeanAggregator_with_weights, self).__init__()
self.features = features
self.gcn = gcn
self.num_list = torch.as_tensor(num_list)
self.mask = None
self.start_end_dict = start_end_dict
# If the feature function comes from a graphsage encoder, use the cell_id * (bin_num+1) + bin_id as the bin_id
self.pass_pseudo_id = pass_pseudo_id
self.remove=remove
self.pass_remove = pass_remove
print("pass_pseudo_id", self.pass_pseudo_id)
@staticmethod
def list_pass(x, num_samples):
return x
# nodes_real represents the true bin_id, nodes might represent the pseudo_id generated by cell_id * (bin_num+1) + bin_id
def forward(self, nodes_real, to_neighs, num_sample=10):
"""
nodes --- list of nodes in a batch
to_neighs --- list of sets, each set is the set of neighbors for node in batch
num_sample --- number of neighbors to sample. No sampling if None.
"""
row_indices, column_indices, v, unique_nodes_list = to_neighs
unique_nodes_list = unique_nodes_list.to(device, non_blocking=True)
mask = torch.sparse.FloatTensor(torch.LongTensor([row_indices, column_indices]),
torch.tensor(v, dtype=torch.float),
torch.Size([len(nodes_real), len(unique_nodes_list)])).to(device, non_blocking=True)
embed_matrix = self.features(unique_nodes_list)
to_feats = mask.mm(embed_matrix)
return to_feats
def forward_GCN(self, nodes, adj, moving_range=0):
embed_matrix = self.features(nodes)
adj = moving_avg(adj, moving_range)
adj.data = np.log1p(adj.data)
adj = normalize(adj, norm='l1', axis=1)
Acoo = adj.tocoo()
mask = torch.sparse.FloatTensor(torch.LongTensor([Acoo.row.tolist(), Acoo.col.tolist()]),
torch.FloatTensor(Acoo.data), torch.Size([adj.shape[0], adj.shape[1]])).to(device, non_blocking=True)
to_feats = mask.mm(embed_matrix)
return to_feats
def moving_avg(adj, moving_range):
adj_origin = adj.copy()
adj = adj.copy()
adj = adj * norm.pdf(0)
for i in range(moving_range * 3):
before_list = []
after_list = []
for j in range(i + 1):
before_list.append(adj_origin[0, :])
before_list.append(adj_origin[:-(i+1), :])
adj_before = vstack(before_list)
after_list.append(adj_origin[i+1:, :])
for j in range(i + 1):
after_list.append(adj_origin[-1, :])
adj_after = vstack(after_list)
adj = adj + (adj_after + adj_before) * norm.pdf((i+1) / moving_range)
return adj
class GraphSageEncoder_with_weights(nn.Module):
"""
Encodes a node's using 'convolutional' GraphSage approach
"""
def __init__(self, features, linear_features=None, feature_dim=64,
embed_dim=64,
num_sample=10, gcn=False, num_list=None, transfer_range=0, start_end_dict=None, pass_pseudo_id=False,
remove=False, pass_remove=False):
super(GraphSageEncoder_with_weights, self).__init__()
self.features = features
self.linear_features = linear_features
self.feat_dim = feature_dim
self.pass_pseudo_id = pass_pseudo_id
# aggregator aggregates through hic graph
self.aggregator = MeanAggregator_with_weights(self.features, gcn, num_list, start_end_dict, pass_pseudo_id, remove, pass_remove)
# linear aggregator aggregats through 1D genomic neighbors
self.linear_aggregator = MeanAggregator(self.linear_features, gcn, num_list, start_end_dict, pass_pseudo_id)
self.num_sample = num_sample
self.transfer_range = transfer_range
self.gcn = gcn
self.embed_dim = embed_dim
self.start_end_dict = start_end_dict
input_size = 1
if not self.gcn:
input_size += 1
if self.transfer_range > 0:
input_size += 1
self.nn = nn.Linear(input_size * self.feat_dim, embed_dim)
self.num_list = torch.as_tensor(num_list)
self.bin_feats = torch.zeros([int(self.num_list[-1]) + 1, self.feat_dim], dtype=torch.float, device=device)
if self.transfer_range > 0:
self.bin_feats_linear = torch.zeros([int(self.num_list[-1]) + 1, self.feat_dim], dtype=torch.float, device=device)
if not self.gcn:
self.bin_feats_self = torch.zeros([int(self.num_list[-1]) + 1, self.feat_dim], dtype=torch.float, device=device)
self.fix = False
self.forward = self.forward_on_hook
def start_fix(self):
self.fix = True
ids = (torch.arange(int(self.num_list[0])) + 1).long().to(device, non_blocking=True).view(-1)
self.cell_feats = self.features(ids)
def fix_cell2(self, cell, bin_ids=None, sparse_matrix=None, local_transfer_range=0):
self.fix = True
with torch.no_grad():
for chrom, bin_id in enumerate(bin_ids):
magic_number = int(self.num_list[-1] + 1)
nodes_flatten = torch.from_numpy(bin_id).long().to(device, non_blocking=True)
neigh_feats = self.aggregator.forward_GCN(nodes_flatten,
sparse_matrix[chrom], local_transfer_range)
self.bin_feats[nodes_flatten] = neigh_feats.detach().clone()
tr = self.transfer_range
if tr > 0:
start = np.maximum(bin_id - tr, self.start_end_dict[bin_id, 0] + 1)
end = np.minimum(bin_id + tr, self.start_end_dict[bin_id, 1] + 1)
to_neighs = np.array([list(range(s, e)) for s, e in zip(start, end)], dtype='object')
neigh_feats_linear = self.linear_aggregator.forward(nodes_flatten,
to_neighs,
2 * tr + 1)
self.bin_feats_linear[nodes_flatten, :] = neigh_feats_linear.detach().clone()
if not self.gcn:
self.bin_feats_self[nodes_flatten, :] = self.features(nodes_flatten)
def forward_on_hook(self, nodes, to_neighs, *args):
"""
Generates embeddings for a batch of nodes.
nodes -- list of nodes
pseudo_nodes -- pseudo_nodes for getting the correct neighbors
"""
tr = self.transfer_range
if len(nodes.shape) == 1:
nodes_flatten = nodes
else:
sz_b, len_seq = nodes.shape
nodes_flatten = nodes[:, 1:].contiguous().view(-1)
if self.fix:
cell_feats = self.cell_feats[nodes[:, 0] - 1, :]
neigh_feats = self.bin_feats[nodes_flatten, :].view(sz_b, len_seq - 1, -1)
if tr > 0:
neigh_feats_linear = self.bin_feats_linear[nodes_flatten, :].view(sz_b, len_seq - 1, -1)
else:
if len(nodes.shape) == 1:
neigh_feats = self.aggregator.forward(nodes_flatten, to_neighs, self.num_sample)
else:
cell_feats = self.features(nodes[:, 0].to(device, non_blocking=True))
neigh_feats = self.aggregator.forward(nodes_flatten, to_neighs,
self.num_sample).view(sz_b, len_seq - 1, -1)
if tr > 0:
nodes_flatten_np = nodes_flatten.cpu().numpy()
start = np.maximum(nodes_flatten_np - tr, self.start_end_dict[nodes_flatten_np, 0])
end = np.minimum(nodes_flatten_np + tr, self.start_end_dict[nodes_flatten_np, 1])
to_neighs = np.array([list(range(s, e)) for s, e in zip(start, end)])
neigh_feats_linear = self.linear_aggregator.forward(nodes_flatten,
to_neighs,
2 * tr + 1)
if len(nodes.shape) > 1:
neigh_feats_linear = neigh_feats_linear.view(sz_b, len_seq - 1, -1)
list1 = [neigh_feats, neigh_feats_linear] if tr > 0 else [neigh_feats]
if not self.gcn:
if self.fix:
self_feats = self.bin_feats_self[nodes_flatten].view(sz_b, len_seq - 1, -1)
else:
if len(nodes.shape) == 1:
self_feats = self.features(nodes_flatten)
else:
sz_b, len_seq = nodes.shape
self_feats = self.features(nodes_flatten).view(sz_b, len_seq - 1, -1)
list1.append(self_feats)
if len(list1) > 0:
combined = torch.cat(list1, dim=-1)
else:
combined = list1[0]
combined = activation_func(self.nn(combined))
if len(nodes.shape) > 1:
combined = torch.cat([cell_feats[:, None, :], combined], dim=1).view(sz_b, len_seq, -1)
return combined, torch.cat([cell_feats[:, None, :], self_feats], dim=1).view(sz_b, len_seq, -1)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import fields, models, api
class RemovalStrategy(models.Model):
_name = 'product.removal'
_description = 'Removal Strategy'
name = fields.Char('Name', required=True)
method = fields.Char("Method", required=True, help="FIFO, LIFO...")
class PutAwayStrategy(models.Model):
_name = 'product.putaway'
_description = 'Put Away Strategy'
name = fields.Char('Name', required=True)
fixed_location_ids = fields.One2many(
'stock.fixed.putaway.strat', 'putaway_id',
'Fixed Locations Per Product Category', domain=[('category_id', '!=', False)], copy=True)
product_location_ids = fields.One2many(
'stock.fixed.putaway.strat', 'putaway_id',
'Fixed Locations Per Product', domain=[('product_id', '!=', False)], copy=True)
def putaway_apply(self, product):
put_away = self._get_putaway_rule(product)
if put_away:
return put_away.fixed_location_id
return self.env['stock.location']
def _get_putaway_rule(self, product):
if self.product_location_ids:
put_away = self.product_location_ids.filtered(lambda x: x.product_id == product)
if put_away:
return put_away[0]
if self.fixed_location_ids:
categ = product.categ_id
while categ:
put_away = self.fixed_location_ids.filtered(lambda x: x.category_id == categ)
if put_away:
return put_away[0]
categ = categ.parent_id
return self.env['stock.location']
class FixedPutAwayStrategy(models.Model):
_name = 'stock.fixed.putaway.strat'
_order = 'sequence'
_description = 'Fixed Putaway Strategy on Location'
product_id = fields.Many2one('product.product', 'Product')
putaway_id = fields.Many2one('product.putaway', 'Put Away Method', required=True)
category_id = fields.Many2one('product.category', 'Product Category')
fixed_location_id = fields.Many2one('stock.location', 'Location', required=True)
sequence = fields.Integer('Priority', help="Give to the more specialized category, a higher priority to have them in top of the list.")
|
nilq/baby-python
|
python
|
# AUTOGENERATED! DO NOT EDIT! File to edit: 01c_grad_utils.ipynb (unless otherwise specified).
__all__ = ['cg', 'cat_list_to_tensor', 'reverse_unroll', 'reverse', 'fixed_point', 'CG', 'CG_normaleq', 'neumann',
'exact', 'grd', 'list_dot', 'jvp', 'get_outer_gradients', 'cat_list_to_tensor', 'update_tensor_grads',
'grad_unused_zero', 'DifferentiableOptimizer', 'HeavyBall', 'Momentum', 'GradientDescent', 'gd_step',
'heavy_ball_step', 'torch_momentum_step']
# Cell
#export
import torch
from torch.autograd import grad as torch_grad
from torch import Tensor
from typing import List, Callable
from itertools import repeat
# Cell
"""from https://github.com/lrjconan/RBP/blob/9c6e68d1a7e61b1f4c06414fae04aeb43c8527cb/utils/model_helper.py"""
def cg(Ax, b, max_iter=100, epsilon=1.0e-5):
""" Conjugate Gradient
Args:
Ax: function, takes list of tensors as input
b: list of tensors
Returns:
x_star: list of tensors
"""
x_last = [torch.zeros_like(bb) for bb in b]
r_last = [torch.zeros_like(bb).copy_(bb) for bb in b]
p_last = [torch.zeros_like(rr).copy_(rr) for rr in r_last]
for ii in range(max_iter):
Ap = Ax(p_last)
Ap_vec = cat_list_to_tensor(Ap)
p_last_vec = cat_list_to_tensor(p_last)
r_last_vec = cat_list_to_tensor(r_last)
rTr = torch.sum(r_last_vec * r_last_vec)
pAp = torch.sum(p_last_vec * Ap_vec)
alpha = rTr / pAp
x = [xx + alpha * pp for xx, pp in zip(x_last, p_last)]
r = [rr - alpha * pp for rr, pp in zip(r_last, Ap)]
r_vec = cat_list_to_tensor(r)
if float(torch.norm(r_vec)) < epsilon:
break
beta = torch.sum(r_vec * r_vec) / rTr
p = [rr + beta * pp for rr, pp in zip(r, p_last)]
x_last = x
p_last = p
r_last = r
return x_last
def cat_list_to_tensor(list_tx):
return torch.cat([xx.view([-1]) for xx in list_tx])
# Cell
# noinspection PyUnusedLocal
def reverse_unroll(params: List[Tensor],
hparams: List[Tensor],
outer_loss: Callable[[List[Tensor], List[Tensor]], Tensor],
set_grad=True) -> List[Tensor]:
"""
Computes the hypergradient by backpropagating through a previously employed inner solver procedure.
Args:
params: the output of a torch differentiable inner solver (it must depend on hparams in the torch graph)
hparams: the outer variables (or hyperparameters), each element needs requires_grad=True
outer_loss: computes the outer objective taking parameters and hyperparameters as inputs
set_grad: if True set t.grad to the hypergradient for every t in hparams
Returns:
the list of hypergradients for each element in hparams
"""
o_loss = outer_loss(params, hparams)
grads = torch.autograd.grad(o_loss, hparams, retain_graph=True)
if set_grad:
update_tensor_grads(hparams, grads)
return grads
# Cell
# noinspection PyUnusedLocal
def reverse(params_history: List[List[Tensor]],
hparams: List[Tensor],
update_map_history: List[Callable[[List[Tensor], List[Tensor]], List[Tensor]]],
outer_loss: Callable[[List[Tensor], List[Tensor]], Tensor],
set_grad=True) -> List[Tensor]:
"""
Computes the hypergradient by recomputing and backpropagating through each inner update
using the inner iterates and the update maps previously employed by the inner solver.
Similarly to checkpointing, this allows to save memory w.r.t. reverse_unroll by increasing computation time.
Truncated reverse can be performed by passing only part of the trajectory information, i.e. only the
last k inner iterates and updates.
Args:
params_history: the inner iterates (from first to last)
hparams: the outer variables (or hyperparameters), each element needs requires_grad=True
update_map_history: updates used to solve the inner problem (from first to last)
outer_loss: computes the outer objective taking parameters and hyperparameters as inputs
set_grad: if True set t.grad to the hypergradient for every t in hparams
Returns:
the list of hypergradients for each element in hparams
"""
params_history = [[w.detach().requires_grad_(True) for w in params] for params in params_history]
o_loss = outer_loss(params_history[-1], hparams)
grad_outer_w, grad_outer_hparams = get_outer_gradients(o_loss, params_history[-1], hparams)
alphas = grad_outer_w
grads = [torch.zeros_like(w) for w in hparams]
K = len(params_history) - 1
for k in range(-2, -(K + 2), -1):
w_mapped = update_map_history[k + 1](params_history[k], hparams)
bs = grad_unused_zero(w_mapped, hparams, grad_outputs=alphas, retain_graph=True)
grads = [g + b for g, b in zip(grads, bs)]
alphas = torch_grad(w_mapped, params_history[k], grad_outputs=alphas)
grads = [g + v for g, v in zip(grads, grad_outer_hparams)]
if set_grad:
update_tensor_grads(hparams, grads)
return grads
# Cell
def fixed_point(params: List[Tensor],
hparams: List[Tensor],
K: int ,
fp_map: Callable[[List[Tensor], List[Tensor]], List[Tensor]],
outer_loss: Callable[[List[Tensor], List[Tensor]], Tensor],
tol=1e-10,
set_grad=True,
stochastic=False) -> List[Tensor]:
"""
Computes the hypergradient by applying K steps of the fixed point method (it can end earlier when tol is reached).
Args:
params: the output of the inner solver procedure.
hparams: the outer variables (or hyperparameters), each element needs requires_grad=True
K: the maximum number of fixed point iterations
fp_map: the fixed point map which defines the inner problem
outer_loss: computes the outer objective taking parameters and hyperparameters as inputs
tol: end the method earlier when the normed difference between two iterates is less than tol
set_grad: if True set t.grad to the hypergradient for every t in hparams
stochastic: set this to True when fp_map is not a deterministic function of its inputs
Returns:
the list of hypergradients for each element in hparams
"""
params = [w.detach().requires_grad_(True) for w in params]
o_loss = outer_loss(params, hparams)
grad_outer_w, grad_outer_hparams = get_outer_gradients(o_loss, params, hparams)
if not stochastic:
w_mapped = fp_map(params, hparams)
vs = [torch.zeros_like(w) for w in params]
vs_vec = cat_list_to_tensor(vs)
for k in range(K):
vs_prev_vec = vs_vec
if stochastic:
w_mapped = fp_map(params, hparams)
vs = torch_grad(w_mapped, params, grad_outputs=vs, retain_graph=False)
else:
vs = torch_grad(w_mapped, params, grad_outputs=vs, retain_graph=True)
vs = [v + gow for v, gow in zip(vs, grad_outer_w)]
vs_vec = cat_list_to_tensor(vs)
if float(torch.norm(vs_vec - vs_prev_vec)) < tol:
break
if stochastic:
w_mapped = fp_map(params, hparams)
grads = torch_grad(w_mapped, hparams, grad_outputs=vs, allow_unused=True)
grads = [g + v if g is not None else v for g, v in zip(grads, grad_outer_hparams)]
if set_grad:
update_tensor_grads(hparams, grads)
return grads
# Cell
def CG(params: List[Tensor],
hparams: List[Tensor],
K: int ,
fp_map: Callable[[List[Tensor], List[Tensor]], List[Tensor]],
outer_loss: Callable[[List[Tensor], List[Tensor]], Tensor],
tol=1e-10,
set_grad=True,
stochastic=False) -> List[Tensor]:
"""
Computes the hypergradient by applying K steps of the conjugate gradient method (CG).
It can end earlier when tol is reached.
Args:
params: the output of the inner solver procedure.
hparams: the outer variables (or hyperparameters), each element needs requires_grad=True
K: the maximum number of conjugate gradient iterations
fp_map: the fixed point map which defines the inner problem
outer_loss: computes the outer objective taking parameters and hyperparameters as inputs
tol: end the method earlier when the norm of the residual is less than tol
set_grad: if True set t.grad to the hypergradient for every t in hparams
stochastic: set this to True when fp_map is not a deterministic function of its inputs
Returns:
the list of hypergradients for each element in hparams
"""
params = [w.detach().requires_grad_(True) for w in params]
o_loss = outer_loss(params, hparams)
grad_outer_w, grad_outer_hparams = get_outer_gradients(o_loss, params, hparams)
if not stochastic:
w_mapped = fp_map(params, hparams)
def dfp_map_dw(xs):
if stochastic:
w_mapped_in = fp_map(params, hparams)
Jfp_mapTv = torch_grad(w_mapped_in, params, grad_outputs=xs, retain_graph=False)
else:
Jfp_mapTv = torch_grad(w_mapped, params, grad_outputs=xs, retain_graph=True)
return [v - j for v, j in zip(xs, Jfp_mapTv)]
vs = cg(dfp_map_dw, grad_outer_w, max_iter=K, epsilon=tol) # K steps of conjugate gradient
if stochastic:
w_mapped = fp_map(params, hparams)
grads = torch_grad(w_mapped, hparams, grad_outputs=vs)
grads = [g + v for g, v in zip(grads, grad_outer_hparams)]
if set_grad:
update_tensor_grads(hparams, grads)
return grads
# Cell
def CG_normaleq(params: List[Tensor],
hparams: List[Tensor],
K: int ,
fp_map: Callable[[List[Tensor], List[Tensor]], List[Tensor]],
outer_loss: Callable[[List[Tensor], List[Tensor]], Tensor],
tol=1e-10,
set_grad=True) -> List[Tensor]:
""" Similar to CG but the conjugate gradient is applied on the normal equation (has a higher time complexity)"""
params = [w.detach().requires_grad_(True) for w in params]
o_loss = outer_loss(params, hparams)
grad_outer_w, grad_outer_hparams = get_outer_gradients(o_loss, params, hparams)
w_mapped = fp_map(params, hparams)
def dfp_map_dw(xs):
Jfp_mapTv = torch_grad(w_mapped, params, grad_outputs=xs, retain_graph=True)
v_minus_Jfp_mapTv = [v - j for v, j in zip(xs, Jfp_mapTv)]
# normal equation part
Jfp_mapv_minus_Jfp_mapJfp_mapTv = jvp(lambda _params: fp_map(_params, hparams), params, v_minus_Jfp_mapTv)
return [v - vv for v, vv in zip(v_minus_Jfp_mapTv, Jfp_mapv_minus_Jfp_mapJfp_mapTv)]
v_minus_Jfp_mapv = [g - jfp_mapv for g, jfp_mapv in zip(grad_outer_w, jvp(
lambda _params: fp_map(_params, hparams), params, grad_outer_w))]
vs = cg(dfp_map_dw, v_minus_Jfp_mapv, max_iter=K, epsilon=tol) # K steps of conjugate gradient
grads = torch_grad(w_mapped, hparams, grad_outputs=vs, allow_unused=True)
grads = [g + v if g is not None else v for g, v in zip(grads, grad_outer_hparams)]
if set_grad:
update_tensor_grads(hparams, grads)
return grads
# Cell
def neumann(params: List[Tensor],
hparams: List[Tensor],
K: int ,
fp_map: Callable[[List[Tensor], List[Tensor]], List[Tensor]],
outer_loss: Callable[[List[Tensor], List[Tensor]], Tensor],
tol=1e-10,
set_grad=True) -> List[Tensor]:
""" Saves one iteration from the fixed point method"""
# from https://arxiv.org/pdf/1803.06396.pdf, should return the same gradient of fixed point K+1
params = [w.detach().requires_grad_(True) for w in params]
o_loss = outer_loss(params, hparams)
grad_outer_w, grad_outer_hparams = get_outer_gradients(o_loss, params, hparams)
w_mapped = fp_map(params, hparams)
vs, gs = grad_outer_w, grad_outer_w
gs_vec = cat_list_to_tensor(gs)
for k in range(K):
gs_prev_vec = gs_vec
vs = torch_grad(w_mapped, params, grad_outputs=vs, retain_graph=True)
gs = [g + v for g, v in zip(gs, vs)]
gs_vec = cat_list_to_tensor(gs)
if float(torch.norm(gs_vec - gs_prev_vec)) < tol:
break
grads = torch_grad(w_mapped, hparams, grad_outputs=gs)
grads = [g + v for g, v in zip(grads, grad_outer_hparams)]
if set_grad:
update_tensor_grads(hparams, grads)
return grads
def exact(opt_params_f: Callable[[List[Tensor]], List[Tensor]],
hparams: List[Tensor],
outer_loss: Callable[[List[Tensor], List[Tensor]], Tensor],
set_grad=True) -> List[Tensor]:
"""
Computes the exact hypergradient using backpropagation and exploting the closed form torch differentiable function
that computes the optimal parameters given the hyperparameters (opt_params_f).
"""
grads = torch_grad(outer_loss(opt_params_f(hparams), hparams), hparams)
if set_grad:
update_tensor_grads(hparams, grads)
return grads
# Cell
# UTILS
def grd(a, b):
return torch.autograd.grad(a, b, create_graph=True, retain_graph=True)
def list_dot(l1, l2): # extended dot product for lists
return torch.stack([(a*b).sum() for a, b in zip(l1, l2)]).sum()
def jvp(fp_map, params, vs):
dummy = [torch.ones_like(phw).requires_grad_(True) for phw in fp_map(params)]
g1 = grd(list_dot(fp_map(params), dummy), params)
return grd(list_dot(vs, g1), dummy)
def get_outer_gradients(outer_loss, params, hparams, retain_graph=True):
grad_outer_w = grad_unused_zero(outer_loss, params, retain_graph=retain_graph)
grad_outer_hparams = grad_unused_zero(outer_loss, hparams, retain_graph=retain_graph)
return grad_outer_w, grad_outer_hparams
def cat_list_to_tensor(list_tx):
return torch.cat([xx.view([-1]) for xx in list_tx])
def update_tensor_grads(hparams, grads):
for l, g in zip(hparams, grads):
if l.grad is None:
l.grad = torch.zeros_like(l)
if g is not None:
l.grad += g
def grad_unused_zero(output, inputs, grad_outputs=None, retain_graph=False, create_graph=False):
grads = torch.autograd.grad(output, inputs, grad_outputs=grad_outputs, allow_unused=True,
retain_graph=retain_graph, create_graph=create_graph)
def grad_or_zeros(grad, var):
return torch.zeros_like(var) if grad is None else grad
return tuple(grad_or_zeros(g, v) for g, v in zip(grads, inputs))
# Cell
class DifferentiableOptimizer:
def __init__(self, loss_f, dim_mult, data_or_iter=None):
"""
Args:
loss_f: callable with signature (params, hparams, [data optional]) -> loss tensor
data_or_iter: (x, y) or iterator over the data needed for loss_f
"""
self.data_iterator = None
if data_or_iter:
self.data_iterator = data_or_iter if hasattr(data_or_iter, '__next__') else repeat(data_or_iter)
self.loss_f = loss_f
self.dim_mult = dim_mult
self.curr_loss = None
def get_opt_params(self, params):
opt_params = [p for p in params]
opt_params.extend([torch.zeros_like(p) for p in params for _ in range(self.dim_mult-1) ])
return opt_params
def step(self, params, hparams, create_graph):
raise NotImplementedError
def __call__(self, params, hparams, create_graph=True):
with torch.enable_grad():
return self.step(params, hparams, create_graph)
def get_loss(self, params, hparams):
if self.data_iterator:
data = next(self.data_iterator)
self.curr_loss = self.loss_f(params, hparams, data)
else:
self.curr_loss = self.loss_f(params, hparams)
return self.curr_loss
# Cell
class HeavyBall(DifferentiableOptimizer):
def __init__(self, loss_f, step_size, momentum, data_or_iter=None):
super(HeavyBall, self).__init__(loss_f, dim_mult=2, data_or_iter=data_or_iter)
self.loss_f = loss_f
self.step_size_f = step_size if callable(step_size) else lambda x: step_size
self.momentum_f = momentum if callable(momentum) else lambda x: momentum
def step(self, params, hparams, create_graph):
n = len(params) // 2
p, p_aux = params[:n], params[n:]
loss = self.get_loss(p, hparams)
sz, mu = self.step_size_f(hparams), self.momentum_f(hparams)
p_new, p_new_aux = heavy_ball_step(p, p_aux, loss, sz, mu, create_graph=create_graph)
return [*p_new, *p_new_aux]
# Cell
class Momentum(DifferentiableOptimizer):
"""
GD with momentum step as implemented in torch.optim.SGD
.. math::
v_{t+1} = \mu * v_{t} + g_{t+1} \\
p_{t+1} = p_{t} - lr * v_{t+1}
"""
def __init__(self, loss_f, step_size, momentum, data_or_iter=None):
super(Momentum, self).__init__(loss_f, dim_mult=2, data_or_iter=data_or_iter)
self.loss_f = loss_f
self.step_size_f = step_size if callable(step_size) else lambda x: step_size
self.momentum_f = momentum if callable(momentum) else lambda x: momentum
def step(self, params, hparams, create_graph):
n = len(params) // 2
p, p_aux = params[:n], params[n:]
loss = self.get_loss(p, hparams)
sz, mu = self.step_size_f(hparams), self.momentum_f(hparams)
p_new, p_new_aux = torch_momentum_step(p, p_aux, loss, sz, mu, create_graph=create_graph)
return [*p_new, *p_new_aux]
# Cell
class GradientDescent(DifferentiableOptimizer):
def __init__(self, loss_f, step_size, data_or_iter=None):
super(GradientDescent, self).__init__(loss_f, dim_mult=1, data_or_iter=data_or_iter)
self.step_size_f = step_size if callable(step_size) else lambda x: step_size
def step(self, params, hparams, create_graph):
loss = self.get_loss(params, hparams)
sz = self.step_size_f(hparams)
return gd_step(params, loss, sz, create_graph=create_graph)
def gd_step(params, loss, step_size, create_graph=True):
grads = torch.autograd.grad(loss, params, create_graph=create_graph)
return [w - step_size * g for w, g in zip(params, grads)]
def heavy_ball_step(params, aux_params, loss, step_size, momentum, create_graph=True):
grads = torch.autograd.grad(loss, params, create_graph=create_graph)
return [w - step_size * g + momentum * (w - v) for g, w, v in zip(grads, params, aux_params)], params
def torch_momentum_step(params, aux_params, loss, step_size, momentum, create_graph=True):
"""
GD with momentum step as implemented in torch.optim.SGD
.. math::
v_{t+1} = \mu * v_{t} + g_{t+1} \\
p_{t+1} = p_{t} - lr * v_{t+1}
"""
grads = torch.autograd.grad(loss, params, create_graph=create_graph)
new_aux_params = [momentum*v + g for v, g in zip(aux_params, grads)]
return [w - step_size * nv for w, nv in zip(params, new_aux_params)], new_aux_params
|
nilq/baby-python
|
python
|
import bisect
import keyword
import rope.base.simplify
MINIMAL_LEN_FOR_AS = 5
def get_name_at(resource, offset):
source_code = resource.read()
word_finder = Worder(source_code)
return word_finder.get_word_at(offset)
class Worder(object):
"""A class for finding boundaries of words and expressions
Note that in these methods, offset should be the index of the
character not the index of the character after it.
Some of the methods here doesn't exactly do what their name might lead you
to think they do, these probably should be fixed. Refer to
ropetest/codeanalyzetest.py for what these methods returns. Note that
codeanalyzetest.py documents the current behavior, rather than what they
should've been.
"""
def __init__(self, code, handle_ignores=False):
simplified = rope.base.simplify.real_code(code)
self.code_finder = _RealFinder(simplified, code)
self.handle_ignores = handle_ignores
self.code = code
def _init_ignores(self):
ignores = rope.base.simplify.ignored_regions(self.code)
self.dumb_finder = _RealFinder(self.code, self.code)
self.starts = [ignored[0] for ignored in ignores]
self.ends = [ignored[1] for ignored in ignores]
def _context_call(self, name, offset):
if self.handle_ignores:
if not hasattr(self, "starts"):
self._init_ignores()
start = bisect.bisect(self.starts, offset)
if start > 0 and offset < self.ends[start - 1]:
return getattr(self.dumb_finder, name)(offset)
return getattr(self.code_finder, name)(offset)
def get_primary_at(self, offset):
return self._context_call("get_primary_at", offset)
def get_word_at(self, offset):
return self._context_call("get_word_at", offset)
def get_primary_range(self, offset):
return self._context_call("get_primary_range", offset)
def get_splitted_primary_before(self, offset):
return self._context_call("get_splitted_primary_before", offset)
def get_word_range(self, offset):
return self._context_call("get_word_range", offset)
def is_function_keyword_parameter(self, offset):
return self.code_finder.is_function_keyword_parameter(offset)
def is_a_class_or_function_name_in_header(self, offset):
return self.code_finder.is_a_class_or_function_name_in_header(offset)
def is_from_statement_module(self, offset):
return self.code_finder.is_from_statement_module(offset)
def is_from_aliased(self, offset):
return self.code_finder.is_from_aliased(offset)
def is_import_statement_aliased_module(self, offset):
return self.code_finder.is_import_statement_aliased_module(offset)
def find_parens_start_from_inside(self, offset):
return self.code_finder.find_parens_start_from_inside(offset)
def is_a_name_after_from_import(self, offset):
return self.code_finder.is_a_name_after_from_import(offset)
def is_from_statement(self, offset):
return self.code_finder.is_from_statement(offset)
def get_from_aliased(self, offset):
return self.code_finder.get_from_aliased(offset)
def is_import_statement(self, offset):
return self.code_finder.is_import_statement(offset)
def is_assigned_here(self, offset):
return self.code_finder.is_assigned_here(offset)
def is_a_function_being_called(self, offset):
return self.code_finder.is_a_function_being_called(offset)
def get_word_parens_range(self, offset):
return self.code_finder.get_word_parens_range(offset)
def is_name_assigned_in_class_body(self, offset):
return self.code_finder.is_name_assigned_in_class_body(offset)
def is_on_function_call_keyword(self, offset):
return self.code_finder.is_on_function_call_keyword(offset)
def _find_parens_start(self, offset):
return self.code_finder._find_parens_start(offset)
def get_parameters(self, first, last):
return self.code_finder.get_parameters(first, last)
def get_from_module(self, offset):
return self.code_finder.get_from_module(offset)
def is_assigned_in_a_tuple_assignment(self, offset):
return self.code_finder.is_assigned_in_a_tuple_assignment(offset)
def get_assignment_type(self, offset):
return self.code_finder.get_assignment_type(offset)
def get_function_and_args_in_header(self, offset):
return self.code_finder.get_function_and_args_in_header(offset)
def get_lambda_and_args(self, offset):
return self.code_finder.get_lambda_and_args(offset)
def find_function_offset(self, offset):
return self.code_finder.find_function_offset(offset)
class _RealFinder(object):
def __init__(self, code, raw):
self.code = code
self.raw = raw
def _find_word_start(self, offset):
current_offset = offset
while current_offset >= 0 and self._is_id_char(current_offset):
current_offset -= 1
return current_offset + 1
def _find_word_end(self, offset):
while offset + 1 < len(self.code) and self._is_id_char(offset + 1):
offset += 1
return offset
def _find_last_non_space_char(self, offset):
while offset >= 0 and self.code[offset].isspace():
if self.code[offset] == "\n":
return offset
offset -= 1
return max(-1, offset)
def get_word_at(self, offset):
offset = self._get_fixed_offset(offset)
return self.raw[self._find_word_start(offset) : self._find_word_end(offset) + 1]
def _get_fixed_offset(self, offset):
if offset >= len(self.code):
return offset - 1
if not self._is_id_char(offset):
if offset > 0 and self._is_id_char(offset - 1):
return offset - 1
if offset < len(self.code) - 1 and self._is_id_char(offset + 1):
return offset + 1
return offset
def _is_id_char(self, offset):
return self.code[offset].isalnum() or self.code[offset] == "_"
def _find_string_start(self, offset):
kind = self.code[offset]
try:
return self.code.rindex(kind, 0, offset)
except ValueError:
return 0
def _find_parens_start(self, offset):
offset = self._find_last_non_space_char(offset - 1)
while offset >= 0 and self.code[offset] not in "[({":
if self.code[offset] not in ":,":
offset = self._find_primary_start(offset)
offset = self._find_last_non_space_char(offset - 1)
return offset
def _find_atom_start(self, offset):
old_offset = offset
if self.code[offset] == "\n":
return offset + 1
if self.code[offset].isspace():
offset = self._find_last_non_space_char(offset)
if self.code[offset] in "'\"":
return self._find_string_start(offset)
if self.code[offset] in ")]}":
return self._find_parens_start(offset)
if self._is_id_char(offset):
return self._find_word_start(offset)
return old_offset
def _find_primary_without_dot_start(self, offset):
"""It tries to find the undotted primary start
It is different from `self._get_atom_start()` in that it
follows function calls, too; such as in ``f(x)``.
"""
last_atom = offset
offset = self._find_last_non_space_char(last_atom)
while offset > 0 and self.code[offset] in ")]":
last_atom = self._find_parens_start(offset)
offset = self._find_last_non_space_char(last_atom - 1)
if offset >= 0 and (self.code[offset] in "\"'})]" or self._is_id_char(offset)):
atom_start = self._find_atom_start(offset)
if not keyword.iskeyword(self.code[atom_start : offset + 1]) or (
offset + 1 < len(self.code) and self._is_id_char(offset + 1)
):
return atom_start
return last_atom
def _find_primary_start(self, offset):
if offset >= len(self.code):
offset = len(self.code) - 1
if self.code[offset] != ".":
offset = self._find_primary_without_dot_start(offset)
else:
offset = offset + 1
while offset > 0:
prev = self._find_last_non_space_char(offset - 1)
if offset <= 0 or self.code[prev] != ".":
break
# Check if relative import
# XXX: Looks like a hack...
prev_word_end = self._find_last_non_space_char(prev - 1)
if self.code[prev_word_end - 3 : prev_word_end + 1] == "from":
offset = prev
break
offset = self._find_primary_without_dot_start(prev - 1)
if not self._is_id_char(offset):
break
return offset
def get_primary_at(self, offset):
offset = self._get_fixed_offset(offset)
start, end = self.get_primary_range(offset)
return self.raw[start:end].strip()
def get_splitted_primary_before(self, offset):
"""returns expression, starting, starting_offset
This function is used in `rope.codeassist.assist` function.
"""
if offset == 0:
return ("", "", 0)
end = offset - 1
word_start = self._find_atom_start(end)
real_start = self._find_primary_start(end)
if self.code[word_start:offset].strip() == "":
word_start = end
if self.code[end].isspace():
word_start = end
if self.code[real_start:word_start].strip() == "":
real_start = word_start
if real_start == word_start == end and not self._is_id_char(end):
return ("", "", offset)
if real_start == word_start:
return ("", self.raw[word_start:offset], word_start)
else:
if self.code[end] == ".":
return (self.raw[real_start:end], "", offset)
last_dot_position = word_start
if self.code[word_start] != ".":
last_dot_position = self._find_last_non_space_char(word_start - 1)
last_char_position = self._find_last_non_space_char(last_dot_position - 1)
if self.code[word_start].isspace():
word_start = offset
return (
self.raw[real_start : last_char_position + 1],
self.raw[word_start:offset],
word_start,
)
def _get_line_start(self, offset):
try:
return self.code.rindex("\n", 0, offset + 1)
except ValueError:
return 0
def _get_line_end(self, offset):
try:
return self.code.index("\n", offset)
except ValueError:
return len(self.code)
def is_name_assigned_in_class_body(self, offset):
word_start = self._find_word_start(offset - 1)
word_end = self._find_word_end(offset) + 1
if "." in self.code[word_start:word_end]:
return False
line_start = self._get_line_start(word_start)
line = self.code[line_start:word_start].strip()
return not line and self.get_assignment_type(offset) == "="
def is_a_class_or_function_name_in_header(self, offset):
word_start = self._find_word_start(offset - 1)
line_start = self._get_line_start(word_start)
prev_word = self.code[line_start:word_start].strip()
return prev_word in ["def", "class"]
def _find_first_non_space_char(self, offset):
if offset >= len(self.code):
return len(self.code)
while offset < len(self.code) and self.code[offset].isspace():
if self.code[offset] == "\n":
return offset
offset += 1
return offset
def is_a_function_being_called(self, offset):
word_end = self._find_word_end(offset) + 1
next_char = self._find_first_non_space_char(word_end)
return (
next_char < len(self.code)
and self.code[next_char] == "("
and not self.is_a_class_or_function_name_in_header(offset)
)
def _find_import_end(self, start):
return self._get_line_end(start)
def is_import_statement(self, offset):
try:
last_import = self.code.rindex("import ", 0, offset)
except ValueError:
return False
line_start = self._get_line_start(last_import)
return (
self._find_import_end(last_import + 7) >= offset
and self._find_word_start(line_start) == last_import
)
def is_from_statement(self, offset):
try:
last_from = self.code.rindex("from ", 0, offset)
from_import = self.code.index(" import ", last_from)
from_names = from_import + 8
except ValueError:
return False
from_names = self._find_first_non_space_char(from_names)
return self._find_import_end(from_names) >= offset
def is_from_statement_module(self, offset):
if offset >= len(self.code) - 1:
return False
stmt_start = self._find_primary_start(offset)
line_start = self._get_line_start(stmt_start)
prev_word = self.code[line_start:stmt_start].strip()
return prev_word == "from"
def is_import_statement_aliased_module(self, offset):
if not self.is_import_statement(offset):
return False
try:
line_start = self._get_line_start(offset)
import_idx = self.code.rindex("import", line_start, offset)
imported_names = import_idx + 7
except ValueError:
return False
# Check if the offset is within the imported names
if (
imported_names - 1 > offset
or self._find_import_end(imported_names) < offset
):
return False
try:
end = self._find_import_main_part_end(offset)
if not self._has_enough_len_for_as(end):
return False
as_end = min(self._find_word_end(end + 1), len(self.code))
as_start = self._find_word_start(as_end)
return self.code[as_start : as_end + 1] == "as"
except ValueError:
return False
def _has_enough_len_for_as(self, end):
return len(self.code) > end + MINIMAL_LEN_FOR_AS
def _find_import_main_part_end(self, offset):
end = self._find_word_end(offset)
while len(self.code) > end + 2 and self.code[end + 1] == ".":
end = self._find_word_end(end + 2)
return end
def is_a_name_after_from_import(self, offset):
try:
if len(self.code) > offset and self.code[offset] == "\n":
line_start = self._get_line_start(offset - 1)
else:
line_start = self._get_line_start(offset)
last_from = self.code.rindex("from ", line_start, offset)
from_import = self.code.index(" import ", last_from)
from_names = from_import + 8
except ValueError:
return False
if from_names - 1 > offset:
return False
return self._find_import_end(from_names) >= offset
def get_from_module(self, offset):
try:
last_from = self.code.rindex("from ", 0, offset)
import_offset = self.code.index(" import ", last_from)
end = self._find_last_non_space_char(import_offset)
return self.get_primary_at(end)
except ValueError:
pass
def is_from_aliased(self, offset):
if not self.is_a_name_after_from_import(offset):
return False
try:
end = self._find_word_end(offset)
as_end = min(self._find_word_end(end + 1), len(self.code))
as_start = self._find_word_start(as_end)
return self.code[as_start : as_end + 1] == "as"
except ValueError:
return False
def get_from_aliased(self, offset):
try:
end = self._find_word_end(offset)
as_ = self._find_word_end(end + 1)
alias = self._find_word_end(as_ + 1)
start = self._find_word_start(alias)
return self.raw[start : alias + 1]
except ValueError:
pass
def is_function_keyword_parameter(self, offset):
word_end = self._find_word_end(offset)
if word_end + 1 == len(self.code):
return False
next_char = self._find_first_non_space_char(word_end + 1)
equals = self.code[next_char : next_char + 2]
if equals == "==" or not equals.startswith("="):
return False
word_start = self._find_word_start(offset)
prev_char = self._find_last_non_space_char(word_start - 1)
return prev_char - 1 >= 0 and self.code[prev_char] in ",("
def is_on_function_call_keyword(self, offset):
stop = self._get_line_start(offset)
if self._is_id_char(offset):
offset = self._find_word_start(offset) - 1
offset = self._find_last_non_space_char(offset)
if offset <= stop or self.code[offset] not in "(,":
return False
parens_start = self.find_parens_start_from_inside(offset)
return stop < parens_start
def find_parens_start_from_inside(self, offset):
stop = self._get_line_start(offset)
while offset > stop:
if self.code[offset] == "(":
break
if self.code[offset] != ",":
offset = self._find_primary_start(offset)
offset -= 1
return max(stop, offset)
def is_assigned_here(self, offset):
return self.get_assignment_type(offset) is not None
def get_assignment_type(self, offset):
# XXX: does not handle tuple assignments
word_end = self._find_word_end(offset)
next_char = self._find_first_non_space_char(word_end + 1)
single = self.code[next_char : next_char + 1]
double = self.code[next_char : next_char + 2]
triple = self.code[next_char : next_char + 3]
if double not in ("==", "<=", ">=", "!="):
for op in [single, double, triple]:
if op.endswith("="):
return op
def get_primary_range(self, offset):
start = self._find_primary_start(offset)
end = self._find_word_end(offset) + 1
return (start, end)
def get_word_range(self, offset):
offset = max(0, offset)
start = self._find_word_start(offset)
end = self._find_word_end(offset) + 1
return (start, end)
def get_word_parens_range(self, offset, opening="(", closing=")"):
end = self._find_word_end(offset)
start_parens = self.code.index(opening, end)
index = start_parens
open_count = 0
while index < len(self.code):
if self.code[index] == opening:
open_count += 1
if self.code[index] == closing:
open_count -= 1
if open_count == 0:
return (start_parens, index + 1)
index += 1
return (start_parens, index)
def get_parameters(self, first, last):
keywords = []
args = []
current = self._find_last_non_space_char(last - 1)
while current > first:
primary_start = current
current = self._find_primary_start(current)
while current != first and (
self.code[current] not in "=," or self.code[current - 1] in "=!<>"
):
current = self._find_last_non_space_char(current - 1)
primary = self.raw[current + 1 : primary_start + 1].strip()
if self.code[current] == "=":
primary_start = current - 1
current -= 1
while current != first and self.code[current] not in ",":
current = self._find_last_non_space_char(current - 1)
param_name = self.raw[current + 1 : primary_start + 1].strip()
keywords.append((param_name, primary))
else:
args.append(primary)
current = self._find_last_non_space_char(current - 1)
args.reverse()
keywords.reverse()
return args, keywords
def is_assigned_in_a_tuple_assignment(self, offset):
start = self._get_line_start(offset)
end = self._get_line_end(offset)
primary_start = self._find_primary_start(offset)
primary_end = self._find_word_end(offset)
prev_char_offset = self._find_last_non_space_char(primary_start - 1)
next_char_offset = self._find_first_non_space_char(primary_end + 1)
next_char = prev_char = ""
if prev_char_offset >= start:
prev_char = self.code[prev_char_offset]
if next_char_offset < end:
next_char = self.code[next_char_offset]
try:
equals_offset = self.code.index("=", start, end)
except ValueError:
return False
if prev_char not in "(," and next_char not in ",)":
return False
parens_start = self.find_parens_start_from_inside(offset)
# XXX: only handling (x, y) = value
return offset < equals_offset and self.code[start:parens_start].strip() == ""
def get_function_and_args_in_header(self, offset):
offset = self.find_function_offset(offset)
lparens, rparens = self.get_word_parens_range(offset)
return self.raw[offset : rparens + 1]
def find_function_offset(self, offset, definition="def "):
while True:
offset = self.code.index(definition, offset)
if offset == 0 or not self._is_id_char(offset - 1):
break
offset += 1
def_ = offset + 4
return self._find_first_non_space_char(def_)
def get_lambda_and_args(self, offset):
offset = self.find_function_offset(offset, definition="lambda ")
lparens, rparens = self.get_word_parens_range(offset, opening=" ", closing=":")
return self.raw[offset : rparens + 1]
|
nilq/baby-python
|
python
|
def main():
import RPi.GPIO as GPIO
try:
print('UNKNOWN:%d' % GPIO.UNKNOWN)
print('SERIAL:%d' % GPIO.SERIAL)
print('SPI:%d' % GPIO.SPI)
print('I2C:%d' % GPIO.I2C)
print('HARD_PWM:%d' % GPIO.HARD_PWM)
GPIO.setmode(GPIO.BOARD)
GPIO.setup(3, GPIO.OUT)
for pin in range(1, 41):
try:
print('%02d: %d' % (pin, GPIO.gpio_function(pin)))
except ValueError as ex:
print(ex)
finally:
GPIO.cleanup()
if __name__ == '__main__':
import logging
logging.basicConfig(level=logging.DEBUG)
main()
|
nilq/baby-python
|
python
|
"""
.. module:: Facemovie
:platform: Unix, Windows
:synopsis: Main class of the application. Contains the core image processing functions, and contains API methods.
.. moduleauthor:: Julien Lengrand-Lambert <jlengrand@gmail.com>
"""
import os
import sys
import logging
import cv
from util import exif
import Guy
from util.Notifier import Observable
from util.Notifier import Observer
class FaceMovie(object, Observable, Observer):
'''
Main class of the whole application.
Contains the core image processing functions.
Takes a bunch of parameters and a list of images and creates the ouput, depending what the user asked for.
Contains general methods, aimed at being used trough an interface.
'''
def __init__(self, face_params):
"""
Initializes all parameters of the application. Input and output folders
are defined, together with the classifier profile.
:param in_folder: the location where input files will be searched
:type in_folder: string
:param out_folder: the location where the outputs will be saved
:type out_folder: string
:param face_param: the location of the profile file used to train the classifier
:type face_param: string
"""
Observable.__init__(self) # used to send notifications to process
Observer.__init__(self, "Lib") # used to receive notification to stop
#self.console_logger = logging.getLogger('ConsoleLog') # Used to send messages to the console
self.my_logger = logging.getLogger('IvolutionFile.Lib') # Used to save events into a file
self.source = face_params.input_folder # Source folder for pictures
# Retrieving parameters for Face Detection
self.face_params = face_params
out_folder = self.face_params.output_folder
self.out_path = "./data"
self.out_name = "ivolution"
self.out_format = "avi"
# updating the out_folder if needed
self.check_out_name(out_folder)
self.sort_method = face_params.sort # sorting by name or using metadata (n or e)
self.mode = face_params.mode # can be crop or conservative.
###
self.guys = [] # List of pictures in source folder
self.center = [0, 0] # Position of the center in output images (x, y)
self.dims = [0, 0] # Size of the final output image (x, y). Depends on selected mode
self.nChannels = 0 # number of channels of the set of images
self.depth = 0 # depth of the set of images
self.weight_steps = 5 # number of images to be inserted between each frame to reduce violent switch
self.speed = [3, 6, 9] # this one should be internal. Number of fps for the video
self.run = True # command used to stop the processing if needed
def update(self, message):
"""
Used to receive system commands, using the Observer pattern
"""
if len(message) == 1: # system command
self.run = False
def list_guys(self):
"""
Aims at populating the guys list, using the source folder as an input.
Guys list can be sorted either by name, or using metadata.
In case source folder is not found; Exits without processing.
Non Image files are autmatically skipped.
Source folder is searched recursively. All subfolders are also processed.
.. note::In case no valid date is found for metadata mode, the images are taken in name order
"""
try:
os.path.exists(self.source)
os.path.isdir(self.source) # checking if folder exists
except: # find precise exception
#self.console_logger.critical("Source folder not found ! Exiting. . .")
self.my_logger.critical("Source folder not found ! Exiting. . .")
self.run = False
#sys.exit(0)
return -1
# loading images, create Guys and store it into guys
ptr = 0
for root, _, files in os.walk(self.source):
for a_file in files:
# notifying the Observers
self.notify_progress("Processing file", ptr, len(files))
if self.run: # as long as we want to continue
guy_source = os.path.join(root, a_file)
try:
cv.LoadImage(guy_source) # used to check image is valid
guy_name = os.path.splitext(a_file)[0]
# Tries to extract date from metadata
try:
guy_date = exif.parse(guy_source)['DateTime']
except Exception:
self.my_logger.warning("No metadata found for %s" % (guy_name))
#if self.sort_method == "exif":
#self.console_logger.warning(" No metadata found for %s" % (guy_name))
guy_date = ''
a_guy = Guy.Guy(guy_name, guy_date, guy_source)
ptr += 1 # Adding file only if picture
# populating guys
self.guys.append(a_guy)
self.notify(["Application", ["FILEADD", guy_name]])
except:
#self.console_logger.info("Skipping %s. Not an image file" % (guy_source))
self.my_logger.info("Skipping %s. Not an image file" % (guy_source))
# Checking if we have at least one image
if self.number_guys > 0:
self.sort_guys()
##self.console_logger.info("%d guys found in source folder." % (self.number_guys()))
self.my_logger.info("%d guys found in source folder." % (self.number_guys()))
return self.number_guys()
def sort_guys(self):
"""
Guys list has just been populated, but elements are not ordered yet.
Sorts the elements of the list either by name or by date extracted from metadata,
depending on the chosen mode.
"""
# Sorting either by exif date or name
if self.sort_method == "exif":
self.guys.sort(key=lambda g: g.date)
else: # default is sort by name
self.guys.sort(key=lambda g: g.name)
def search_faces(self):
"""
Searches for all faces in the guys we have
Results to be stored directly in guys
Takes each image one after the other, and create a guy out of it.
The Face of each guy is searched.
In case no face is found, a warning is returned and Guy is set to None
"""
ptr = 0
for a_guy in self.guys:
ptr += 1
if self.run:
faceres = 0
a_guy.search_face(self.face_params)
# notifying the Observers
self.notify_progress("Processing picture", ptr, self.number_guys())
if a_guy.has_face(): # face(s) have been found
#self.console_logger.info("Face found for %s" % (a_guy.name))
self.my_logger.info("Face found for %s" % (a_guy.name))
faceres = 1 # for notifying
else:
#self.console_logger.warning("No face found for %s. Skipped . . ." % (a_guy.name))
self.my_logger.warning("No face found for %s. Skipped . . ." % (a_guy.name))
self.notify(["Application", ["FILEDONE", a_guy.name, faceres]])
def percent(self, num, den):
"""
Returns a float between 0 and 1, being the percentage given by num / den
"""
if num > den:
raise ArithmeticError
if den <= 0:
raise ZeroDivisionError
return (num / float(den))
def notify_progress(self, message_root, num, den):
"""
A notification scheme to quickly notify most common messages
"""
# notifying the Observers
try:
message = message_root + " %d / %d" % (num, den)
self.notify(["Application", [message, self.percent(num, den)]])
except (ArithmeticError, ZeroDivisionError):
self.my_logger.error("ArithmeticError on %s, %d, %d" % (message_root, num, den))
self.notify(["Application", ["Error", 0]])
def clean_guys(self):
"""
Removes all guys for who no face has been found.
This avoids all has_face loops in the rest of the application
"""
return [a_guy for a_guy in self.guys if a_guy.has_face()]
def prepare_faces(self):
"""
Searches for all faces and keep only the one that may be properly used.
Images without face are discarded.
The program is exited in case no face is found.
Searches for the reference size. If will be used later for image resizing, so that
all faces have the same size.
"""
self.search_faces()
# removes guys that have no faces
self.guys = self.clean_guys()
# check that everybody has the same number of channels
self.check_channels()
self.check_depth()
if self.number_guys() == 0:
#self.console_logger.error("No face has been found in the whole repository! Exiting. . . ")
self.my_logger.error("No face has been found in the whole repository! Exiting. . . ")
self.notify(["Error", 0])
sys.exit(0)
# normalize faces to make them clean
self.set_guys_ratio() # sets all faces to the same size, by calculating a ratio to a reference
def check_depth(self):
"""
Checks that the depth of all the images in guys is the same
Sets the depth for the video
"""
my_depth = []
for a_guy in self.guys:
my_depth.append(a_guy.depth)
my_depth = list(set(my_depth)) # remove duplicates
if len(my_depth) != 1:
# We do not have a unique number of channels for all images
#self.console_logger.error("All images must have the same depth")
self.my_logger.error("All images must have the same depth")
else:
self.depth = my_depth[0]
def check_channels(self):
"""
Checks that the number of channels of all the images in guys is the same
Sets the number of channels for the video
"""
my_chans = []
for a_guy in self.guys:
my_chans.append(a_guy.in_channels)
my_chans = list(set(my_chans)) # remove duplicates
if len(my_chans) != 1:
# We do not have a unique number of channels for all images
#self.console_logger.error("All images must have the same number of channels")
self.my_logger.error("All images must have the same number of channels")
else:
self.nChannels = my_chans[0]
def set_guys_ratio(self):
"""
For each Guy, calculates the factor by which the image is going to be resized so that all faces finally have the same size.
"""
ref = self.find_reference()
for a_guy in self.guys:
a_guy.set_ratio(ref)
def find_reference(self):
"""
Searched for the best face size we want to have.
Defined (for now), as the smallest of all found faces.
:returns int - the reference size of the bounding square for faces.
"""
references = []
for a_guy in self.guys:
if a_guy.has_face():
references.append(a_guy.faces[0][0][3]) # catch face size (width)
return min(references)
def find_final_dimensions(self, cropdims=(0, 0)):
"""
Finds the final dimensions that will be needed to create the output.
Depending on the desired output, it can be
- (default) the maximal size of the image, by overlapping all images and adding black borders.
- (crop) the maximal size of the image by overlapping all the images, without adding any black borders
- (custom crop) A chosen user size, defined as x * y times the head size.
"""
if self.mode == "conservative":
self.find_default_dims()
elif self.mode == "crop":
self.find_crop_dims()
elif self.mode == "custom crop":
# TODO : implement
#self.console_logger.critical("custom crop is not yet implemented")
self.my_logger.critical("custom crop is not yet implemented")
raise Exception
def find_default_dims(self):
"""
Calculates best output image size and position depending on
faces found in guys.
The system is simple. The output image should be as big as possible,
and faces are always placed in the same position. Depending on that,
the image input image is placed in the output at the correct position.
Black borders are set everywhere else.
"""
# TODO: badly done !
x_af = 0
y_af = 0
ptr = 0
for a_guy in self.guys:
if self.run:
ptr += 1
# notifying the Observers
self.notify_progress("Processing picture", ptr, self.number_guys())
(xc, yc) = a_guy.resized_center()
(inx, iny) = a_guy.resized_dims()
# update center
if xc > self.center[0]:
self.center[0] = xc
if yc > self.center[1]:
self.center[1] = yc
# update right part
if (inx - xc) > x_af:
x_af = inx - xc
if (iny - yc) > y_af:
y_af = iny - yc
self.dims = [x_af + self.center[0], y_af + self.center[1]]
def find_crop_dims(self):
"""
Calculates smallest output image that can be used to avoid adding black borders on image
It will later be used to create the final image.
"""
# TODO: badly done !
ht = 1000000 # space left above eyes
hb = 1000000 # space left beneath eyes
wl = 1000000 # space left left of eyes
wr = 1000000 # space left right of eyes
#tr = 0
ptr = 0
for a_guy in self.guys:
if self.run:
ptr += 1
# notifying the Observers
self.notify_progress("Processing picture", ptr, self.number_guys())
(xc, yc) = a_guy.resized_center()
(inx, iny) = a_guy.resized_dims()
# finding width
if xc < wl:
wl = xc
if (inx - xc) < wr:
wr = inx - xc
# finding height
if yc < ht:
ht = yc
if (iny - yc) < hb:
hb = iny - yc
self.dims = [wl + wr, ht + hb]
self.center = [wl, ht]
def get_out_file(self):
"""
Reconstructs the final output file for the movie creation
:returns: String -- The ouput file path to be saved
"""
return os.path.join(self.out_path, (self.out_name + "." + self.out_format))
def save_movie(self):
"""
Creates a movie with all faces found in the inputs.
Guy is skipped if no face is found.
:param out_folder: the location where to save the output image.
:type out_folder: string
:param fps: the number of frames per second to be displayed in final video (3)
:type fps: int
"""
speedrate = self.face_params.speed
if "win" in sys.platform:
fourcc = cv.CV_FOURCC('C', 'V', 'I', 'D')
else: # some kind of Linux/Unix platform
fourcc = cv.CV_FOURCC('F', 'M', 'P', '4')
# Corrects frameSize to get a nice video output
frameSize = self.resizes_for_video_codec() # Fixme : Put in global parameter
# We have to resize the out_image to make them fit with the desired size
corr_im = cv.CreateImage(frameSize, self.depth, self.nChannels)
#frameSize = (652, 498)
pace = ["slow", "normal", "fast"]
my_video = cv.CreateVideoWriter(self.get_out_file(),
fourcc,
self.speed[speedrate],
frameSize,
1)
ii = 0
for a_guy in self.guys:
if self.run:
ii += 1
self.notify_progress("Saving frame", ii, self.number_guys())
#self.console_logger.info("Saving frame %d / %d" % (ii, self.number_guys()))
self.my_logger.info("Saving frame %d / %d" % (ii, self.number_guys()))
out_im = self.prepare_image(a_guy)
cv.Resize(out_im, corr_im, cv.CV_INTER_LINEAR)
cv.WriteFrame(my_video, corr_im)
def show_faces(self, mytime=1000):
"""
Show all faces that have been found for the guys.
The time for which each image will be displayed can be chosen.
:param mytime: time for which the image should be displayed (in ms) (1000)
:type mytime: int
"""
win_name = " Face Results"
cv.NamedWindow(win_name, cv.CV_WINDOW_NORMAL)
cv.ResizeWindow(win_name, 640, 480)
for a_guy in self.guys:
if self.run:
out_im = self.prepare_image(a_guy)
cv.ShowImage(win_name, out_im)
cv.WaitKey(mytime)
cv.DestroyWindow(win_name)
def save_faces(self, im_format="png"):
"""
Save all faces into out_folder, in the given image format
:param out_folder: the location where to save the output image.
:type out_folder: string
:param im_format: Format in which the image should be saved ("png")
:type im_format: string
"""
for a_guy in self.guys:
if self.run:
out_im = self.prepare_image(a_guy)
self.save_guy(out_im, a_guy.name, im_format)
def number_guys(self):
"""
Simply returns the number of guys in the current to-be movie
.. note::
Designed for interface use only
"""
return len(self.guys)
def out_display(self, im, name, time=1000, im_x=640, im_y=480):
"""
Displays the output image, for time ms.
Setting time to 0 causes the image to remains open.
Window name slightly changed to match output
:param im: the image to be saved, formatted as an OpenCV Image
:type im: IplImage
:param name: the name of the image to be saved
:type name: string
:param time: time for which the image should be displayed (in ms) (1000)
:type time: int
:param im_x: output size of the displayed image (in pixels) (640)
:type im_x: int
:param im_y: output size of the displayed image (in pixels) (480)
:type im_y: int
"""
win_name = name + " - out"
cv.NamedWindow(win_name, cv.CV_WINDOW_NORMAL)
cv.ResizeWindow(win_name, im_x, im_y)
cv.ShowImage(win_name, im)
cv.WaitKey(time)
cv.DestroyWindow(win_name)
def check_out_name(self, out_folder):
"""
Checks the desired output selected by the user.
It can be either a folder or a file itself.
Checks whether the designated path ends with a extension name.
In case it is, the extension is checked and changed if needed
:param out_folder: the path slected by the user as output location
:type out_folder: String
"""
if len(os.path.splitext(out_folder)[1]) > 0: # if ends up with an extension
self.out_path, complete_name = os.path.split(out_folder)
self.out_name, format = os.path.splitext(complete_name)
if format != self.out_format:
# the format is not compliant with what we can do. We refuse it
self.my_logger.info("Changing format to avi")
else:
# no filename is given. We keep the default
self.out_path = os.path.split(out_folder)[0]
def save_guy(self, im, name, ext):
"""
Saves output image to the given format (given in extension)
:param im: the image to be saved, formatted as an OpenCV Image
:type im: IplImage
:param name: the name of the image to be saved
:type name: string
:param out_folder: the location where to save the image
:type out_folder: string
:param ext: Format in which the image should be saved ("png")
:type ext: string
"""
file_name = name + "." + ext
out_name = os.path.join(self.out_path, file_name)
self.my_logger.info("Saving %s" % (out_name))
#self.console_logger.info("Saving %s" % (out_name))
cv.SaveImage(out_name, im)
def prepare_image(self, a_guy):
"""
Takes a Guy and processes its input image. Prepares the final output image for this
Guy, so that it is ready to be saved in the desired output.
:param a_guy: The Guy currently being processed.
:type a_guy: Guy
:returns: IplImage -- The ouput image, created depending on the chosen mode, ready to be saved
"""
if self.mode == "conservative":
out_im = a_guy.create_default_output(self.dims,
self.center)
elif self.mode == "crop":
out_im = a_guy.create_crop_output(self.dims,
self.center)
return out_im
def resizes_for_video_codec(self):
"""
Searches for the closest couple of frameSize so that width*height is a multiple of 4 to avoid weird image encoding.
:param frameSize: The desired video output size before correction. (in Pixels)
:type frameSize: (int, int)
:returns: corrected frameSize -- The desired output size after correction. In (x, y) form.
"""
frameSize = (self.dims[0], self.dims[1])
try:
x, y = frameSize
except ValueError:
self.my_logger.error("unknown format for frameSize ")
return (0, 0)
if not(isinstance(x, int)) or not(isinstance(x, int)):
self.my_logger.error("method expects two integers")
return (0, 0)
while ((x * self.nChannels) % 4) != 0:
x += 1
return (x, y)
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.