content stringlengths 0 1.05M | origin stringclasses 2
values | type stringclasses 2
values |
|---|---|---|
from blackbox_mpc.policies.model_free_base_policy import ModelFreeBasePolicy
import tensorflow as tf
class RandomPolicy(ModelFreeBasePolicy):
def __init__(self, number_of_agents, env_action_space):
"""
This is the random policy for controlling the agent
Parameters
---------
env_action_space: gym.ActionSpace
Defines the action space of the gym environment.
number_of_agents: tf.int32
Defines the number of runner running in parallel
"""
super(RandomPolicy, self).__init__()
self._num_of_agents = number_of_agents
self._action_lower_bound = tf.constant(env_action_space.high,
dtype=tf.float32)
self._action_upper_bound = tf.constant(env_action_space.low,
dtype=tf.float32)
return
def act(self, observations, t, exploration_noise=False):
"""
This is the act function for the random policy, which should be called to provide the action
to be executed at the current time step.
Parameters
---------
observations: tf.float32
Defines the current observations received from the environment.
t: tf.float32
Defines the current timestep.
exploration_noise: bool
Defines if exploration noise should be added to the action to be executed.
Returns
-------
action: tf.float32
The action to be executed for each of the runner (dims = runner X dim_U)
"""
return tf.random.uniform([self._num_of_agents, *self._action_lower_bound.shape],
self._action_lower_bound,
self._action_upper_bound, dtype=tf.float32)
def reset(self):
"""
This is the reset function for the random policy, which should be called at the beginning of
the episode.
"""
return
| nilq/baby-python | python |
import ROOT
import Analysis
import AnalysisHelpers as AH
import Constants
#======================================================================
class ZAnalysis(Analysis.Analysis):
"""Analysis searching for events where Z bosons decay to two leptons of same flavour and opposite charge.
"""
def __init__(self, store):
super(ZAnalysis, self).__init__(store)
def initialize(self):
self.invMass = self.addStandardHistogram("invMass")
self.hist_leptn = self.addStandardHistogram("lep_n")
self.hist_leadleptpt = self.addStandardHistogram("leadlep_pt")
self.hist_leadlepteta = self.addStandardHistogram("leadlep_eta")
self.hist_leadleptE = self.addStandardHistogram("leadlep_E")
self.hist_leadleptphi = self.addStandardHistogram("leadlep_phi")
self.hist_leadleptch = self.addStandardHistogram("leadlep_charge")
self.hist_leadleptID = self.addStandardHistogram("leadlep_type")
self.hist_leadleptptc = self.addStandardHistogram("leadlep_ptconerel30")
self.hist_leadleptetc = self.addStandardHistogram("leadlep_etconerel20")
self.hist_leadlepz0 = self.addStandardHistogram("leadlep_z0")
self.hist_leadlepd0 = self.addStandardHistogram("leadlep_d0")
self.hist_trailleptpt = self.addStandardHistogram("traillep_pt")
self.hist_traillepteta = self.addStandardHistogram("traillep_eta")
self.hist_trailleptE = self.addStandardHistogram("traillep_E")
self.hist_trailleptphi = self.addStandardHistogram("traillep_phi")
self.hist_trailleptch = self.addStandardHistogram("traillep_charge")
self.hist_trailleptID = self.addStandardHistogram("traillep_type")
self.hist_trailleptptc = self.addStandardHistogram("traillep_ptconerel30")
self.hist_trailleptetc = self.addStandardHistogram("traillep_etconerel20")
self.hist_traillepz0 = self.addStandardHistogram("traillep_z0")
self.hist_traillepd0 = self.addStandardHistogram("traillep_d0")
self.hist_njets = self.addStandardHistogram("n_jets")
self.hist_jetspt = self.addStandardHistogram("jet_pt")
self.hist_jetm = self.addStandardHistogram("jet_m")
self.hist_jetJVF = self.addStandardHistogram("jet_jvf")
self.hist_jeteta = self.addStandardHistogram("jet_eta")
self.hist_jetmv1 = self.addStandardHistogram("jet_MV1")
self.hist_etmiss = self.addStandardHistogram("etmiss")
self.hist_vxp_z = self.addStandardHistogram("vxp_z")
self.hist_pvxp_n = self.addStandardHistogram("pvxp_n")
def analyze(self):
# retrieving objects
eventinfo = self.Store.getEventInfo()
weight = eventinfo.scalefactor()*eventinfo.eventWeight() if not self.getIsData() else 1
self.countEvent("no cut", weight)
# apply standard event based selection
if not AH.StandardEventCuts(eventinfo): return False
self.countEvent("EventCuts", weight)
# Lepton Requirements
GoodLeptons = AH.selectAndSortContainer(self.Store.getLeptons(), AH.isGoodLepton, lambda p: p.pt())
if not (len(GoodLeptons) == 2): return False
self.countEvent("2 high pt Leptons", weight)
leadLepton = GoodLeptons[0]
trailLepton = GoodLeptons[1]
# test Z candidate
if not (leadLepton.charge() * trailLepton.charge() < 0): return False
if not (abs(leadLepton.pdgId()) == abs(trailLepton.pdgId())): return False
if not (abs((leadLepton.tlv() + trailLepton.tlv()).M() - Constants.Z_Mass) < -999): return False# TO DO: Find a good value for this cut
# Vertex Histograms
self.hist_vxp_z.Fill(eventinfo.primaryVertexPosition(), weight)
self.hist_pvxp_n.Fill(eventinfo.numberOfVertices(), weight)
# Z boson Histograms
self.invMass.Fill((leadLepton.tlv() + trailLepton.tlv()).M(), weight)
# Missing Et Histograms
etmiss = self.Store.getEtMiss()
self.hist_etmiss.Fill(etmiss.et(),weight)
self.hist_leptn.Fill(len(GoodLeptons), weight)
# Leading Lepton Histograms
self.hist_leadleptpt.Fill(leadLepton.pt(), weight)
self.hist_leadlepteta.Fill(leadLepton.eta(), weight)
self.hist_leadleptE.Fill(leadLepton.e(), weight)
self.hist_leadleptphi.Fill(leadLepton.phi(), weight)
self.hist_leadleptch.Fill(leadLepton.charge(), weight)
self.hist_leadleptID.Fill(leadLepton.pdgId(), weight)
self.hist_leadleptptc.Fill(leadLepton.isoptconerel30(), weight)
self.hist_leadleptetc.Fill(leadLepton.isoetconerel20(), weight)
self.hist_leadlepz0.Fill(leadLepton.z0(), weight)
self.hist_leadlepd0.Fill(leadLepton.d0(), weight)
# Trailing Lepton Histograms
self.hist_trailleptpt.Fill(trailLepton.pt(), weight)
self.hist_traillepteta.Fill(trailLepton.eta(), weight)
self.hist_trailleptE.Fill(trailLepton.e(), weight)
self.hist_trailleptphi.Fill(trailLepton.phi(), weight)
self.hist_trailleptch.Fill(trailLepton.charge(), weight)
self.hist_trailleptID.Fill(trailLepton.pdgId(), weight)
self.hist_trailleptptc.Fill(trailLepton.isoptconerel30(), weight)
self.hist_trailleptetc.Fill(trailLepton.isoetconerel20(), weight)
self.hist_traillepz0.Fill(trailLepton.z0(), weight)
self.hist_traillepd0.Fill(trailLepton.d0(), weight)
# Jet Histograms
jets = AH.selectAndSortContainer(self.Store.getJets(), AH.isGoodJet, lambda p: p.pt())
self.hist_njets.Fill(len(jets), weight)
[self.hist_jetm.Fill(jet.m(), weight) for jet in jets]
[self.hist_jetspt.Fill(jet.pt(), weight) for jet in jets]
[self.hist_jetJVF.Fill(jet.jvf(), weight) for jet in jets]
[self.hist_jeteta.Fill(jet.eta(), weight) for jet in jets]
[self.hist_jetmv1.Fill(jet.mv1(), weight) for jet in jets]
return True
def finalize(self):
pass
| nilq/baby-python | python |
import uuid
import os
import shutil
def create_tmp_dir() -> str:
tmp_dir = f"/tmp/gitopscli/{uuid.uuid4()}"
os.makedirs(tmp_dir)
return tmp_dir
def delete_tmp_dir(tmp_dir: str) -> None:
shutil.rmtree(tmp_dir, ignore_errors=True)
| nilq/baby-python | python |
"""
BeWilder - a *wild* text adventure game :: Main game module
# Make a new player object that is currently in the 'outside' room.
Write a loop that:
- Prints the current room name
- Prints the current description (the textwrap module might be useful here).
- Waits for user input and decides what to do.
- If the user enters a cardinal direction, attempt to move to the room there.
- Print an error message if the movement isn't allowed.
- If the user enters "q", quit the game.
"""
# %%
import sys
from adv_utils import justify_center, table_printer, prompt, link_rooms
from item import Food, Medicine, Artifact, Weapon, Armor
from room import Room
from player import Player
# %%
# === Declare all the rooms === #
room = {
"outside": Room("Outside Cave Entrance", "North of you, the cave mount beckons."),
"foyer": Room(
"Foyer",
"""Dim light filters in from the south. Dusty
passages run north and east.""",
),
"overlook": Room(
"Grand Overlook",
"""A steep cliff appears before you, falling
into the darkness. Ahead to the north, a light flickers in
the distance, but there is no way across the chasm.""",
),
"narrow": Room(
"Narrow Passage",
"""The narrow passage bends here from west
to north. The smell of gold permeates the air.""",
),
"treasure": Room(
"Treasure Chamber",
"""You've found the long-lost treasure
chamber! Sadly, it has already been completely emptied by
earlier adventurers. The only exit is to the south.""",
),
}
# %%
# === Link rooms together === #
room["outside"].n_to = room["foyer"]
room["foyer"].s_to = room["outside"]
room["foyer"].n_to = room["overlook"]
room["foyer"].e_to = room["narrow"]
room["overlook"].s_to = room["foyer"]
room["narrow"].w_to = room["foyer"]
room["narrow"].n_to = room["treasure"]
room["treasure"].s_to = room["narrow"]
# %%
# === Instantiate items === #
helmet = Armor("Helmet", "Protects the noggin", effect=9)
gauntlets = Armor("Gauntlets", "Protects the hands/wrists", effect=3)
boots = Armor("Boots", "Protects the feet/ankles", effect=4)
shield = Armor("Shield", "All around protection", effect=5)
sword = Weapon("Sword", "Good for close combat encounters", effect=6)
bow = Weapon("Bow", "Good for long-range attacks", effect=3, requires="Arrow")
arrow = Weapon("Arrow", "Missile shot by bow", effect=4, requires="Bow")
dagger = Weapon("Dagger", "Good for close quarters", effect=2)
potion1 = Medicine("Potion", "May help, may hurt", effect=-12)
potion2 = Medicine("Potion", "May help, may hurt", effect=-2)
potion3 = Medicine("Potion", "May help, may hurt", effect=20)
jerky = Food("Jerky", "A nice slab of jerky", effect=2)
gem1 = Artifact("Gem", "A sparkling gem", ability="confidence", effect=1)
gem2 = Artifact("Gem", "A sparkling gem", ability="confidence", effect=1)
# === Add items to rooms === #
room["outside"].add_item(helmet)
room["foyer"].add_item(gauntlets)
room["foyer"].add_item(arrow)
room["foyer"].add_item(potion2)
room["narrow"].add_item(sword)
room["narrow"].add_item(potion1)
room["overlook"].add_item(bow)
room["overlook"].add_item(jerky)
room["overlook"].add_item(potion3)
room["treasure"].add_item(shield)
room["treasure"].add_item(gem1)
room["treasure"].add_item(gem2)
# %%
# === Define the key commands === #
verbs = {
"n": "move north",
"s": "move south",
"e": "move east",
"w": "move west",
"inv": "display inventory",
"get": "add item to inventory",
"take": "add item to inventory",
"drop": "remove item from inventory",
"q": "quit",
}
# %%
# ====== Main ====== #
def initiate_game(player_name: str, rooms: dict = room):
"""Initiates the bewilder REPL."""
# Instantiate player, which prints initial room
player = Player(player_name, rooms["outside"])
while True:
cmd = prompt(verbs).lower() # Make lowercase
cmd = cmd.split() # Convert to list
verb = cmd[0] # Extract the verb
if cmd[0] not in verbs: # Filter out incorrect key commands
print("Command not available...\nTry again.")
elif cmd[0] == "q": # Quit game upon pressing "q"
print("Exiting game...")
sys.exit(0)
else: # If command is valid, player takes action on it
if len(cmd) == 1: # Single commands
if verb == "inv": # Display inventory
player.inventory()
else: # Move player
# Look up destination room and move the player into it
verb = getattr(player.current_room, f"{verb}_to")
player.move(verb) if verb else print("No room in that direction!")
else:
# Allow for multiple items to be acted upon
for obj in cmd[1:]:
if verb in ["get", "take"]: # Pick up item
# Try to get the item object from the current_room's item dict
try:
item = player.current_room.items[obj]
except KeyError:
print("Item not available.")
finally:
player.add_item(item)
else: # Drop item
try:
item = player.items[obj]
except KeyError:
print("Item not available to drop.")
finally:
player.rm_item(item)
# %%
initiate_game("jeopard")
| nilq/baby-python | python |
import inspect
import re
import sys
from builtins import object
from operator import attrgetter
from os import sep, path, mkdir
try:
from os import scandir
except ImportError:
from scandir import scandir
from n_utils.git_utils import Git
from n_utils.aws_infra_util import load_parameters
class Component(object):
subcomponent_classes = []
def __init__(self, name, project):
self.name = name
self.subcomponents = []
self.project = project
if not self.subcomponent_classes:
self.subcomponent_classes = [name_and_obj for name_and_obj in inspect.getmembers(sys.modules["n_utils.ndt_project"]) if name_and_obj[0].startswith("SC") and inspect.isclass(name_and_obj[1])]
def get_subcomponents(self):
if not self.subcomponents:
self.subcomponents = sorted(self._find_subcomponents(), key=attrgetter("name"))
return self.subcomponents
def _find_subcomponents(self):
ret = []
for subdir in [de.name for de in scandir(self.project.root + sep + self.name) if self._is_subcomponent(de.name)]:
for _, obj in self.subcomponent_classes:
if obj(self, "").match_dirname(subdir):
if subdir == "image":
sc_name = ""
else:
sc_name = "-".join(subdir.split("-")[1:])
ret.append(obj(self, sc_name))
return ret
def _is_subcomponent(self, dir):
for _, obj in self.subcomponent_classes:
if obj(self, "").match_dirname(dir):
return True
return False
class SubComponent(object):
def __init__(self, component, name):
self.component = component
self.name = name
self.type = self.__class__.__name__[2:].lower()
def get_dir(self):
return self.component.name + sep + self.type + "-" + self.name
def match_dirname(self, dir):
return dir.startswith(self.type + "-")
def list_row(self, branch):
return ":".join([self.component.name, branch, self.type, self.name])
def job_properties_filename(self, branch, root):
name_arr = [self.type, re.sub(r'[^\w-]', '_', branch), self.component.name, self.name]
return root + sep + "job-properties" + sep + "-".join(name_arr) + ".properties"
class SCImage(SubComponent):
def get_dir(self):
if self.name:
return self.component.name + sep + "image-" + self.name
else:
return self.component.name + sep + "image"
def match_dirname(self, dir):
return dir == "image" or dir.startswith("image-")
def list_row(self, branch):
if not self.name:
name = "-"
else:
name = self.name
return ":".join([self.component.name, branch, self.type, name])
def job_properties_filename(self, branch, root):
name_arr = [self.type, re.sub(r'[^\w-]', '_', branch), self.component.name]
if self.name:
name_arr.append(self.name)
return root + sep + "job-properties" + sep + "-".join(name_arr) + ".properties"
class SCStack(SubComponent):
pass
class SCDocker(SubComponent):
pass
class SCServerless(SubComponent):
pass
class SCCDK(SubComponent):
pass
class SCTerraform(SubComponent):
pass
class Project(object):
def __init__(self, root=".", branch=None):
if not branch:
self.branch = Git().get_current_branch()
else:
self.branch = branch
self.componets = []
self.root = root if root else guess_project_root()
self.all_subcomponents = []
def get_components(self):
if not self.componets:
self.componets = sorted(self._find_components(), key=attrgetter("name"))
return self.componets
def get_component(self, component):
filtered = [c for c in self.get_components() if c.name == component]
if len(filtered) == 1:
return filtered[0]
return None
def _find_components(self):
return [Component(de.name, self) for de in scandir(self.root) if de.is_dir() and self._is_component(de.path)]
def get_all_subcomponents(self, sc_type=None):
if not self.all_subcomponents:
for component in self.get_components():
self.all_subcomponents.extend(component.get_subcomponents())
if not sc_type:
return self.all_subcomponents
else:
return [sc for sc in self.all_subcomponents if sc.type == sc_type]
def _is_component(self, dir):
return len([de for de in scandir(dir) if de.is_file() and (de.name == "infra.properties" or (de.name.startswith("infra-") and de.name.endswith(".properties")))]) > 0
def guess_project_root():
for guess in [".", Git().get_git_root(), "..", "../..", "../../..", "../../../.."]:
if len(Project(root=guess).get_all_subcomponents()) > 0:
if guess == ".":
return guess
else:
return path.abspath(guess)
def list_jobs(export_job_properties=False, branch=None, json=False, component=None):
ret = {"branches":[]}
arr = []
param_files = {}
with Git() as git:
current_project = Project(root=guess_project_root())
if branch:
branches = [ branch ]
else:
branches = git.get_branches()
components = []
for c_branch in branches:
branch_obj = {"name": c_branch, "components": []}
ret["branches"].append(branch_obj)
if c_branch == git.get_current_branch():
project = current_project
else:
root = git.export_branch(c_branch)
project = Project(root=root, branch=c_branch)
if component:
c_component = project.get_component(component)
if not c_component:
print("No matching components")
if json:
return {}
else:
return []
branch_obj["components"].append({"name": c_component.name, "subcomponents": []})
components.append(c_component)
else:
for c_component in project.get_components():
branch_obj["components"].append({"name": c_component.name, "subcomponents": []})
components.append(c_component)
if not json and export_job_properties:
try:
mkdir(current_project.root + sep + "job-properties")
except OSError as err:
# Directory already exists is ok
if err.errno == 17:
pass
else:
raise err
if json:
_collect_json(components, ret, export_job_properties ,git)
else:
arr, param_files = _collect_prop_files(components, export_job_properties, current_project.root, git)
if export_job_properties:
_write_prop_files(param_files)
if json:
return ret
else:
return arr
def _collect_json(components, ret, export_job_properties, git):
with git:
for component in components:
subcomponents = component.get_subcomponents()
for subcomponent in subcomponents:
branch_elem = [b for b in ret["branches"] if b["name"] == component.project.branch][0]
component_elem = [c for c in branch_elem["components"] if c["name"] == component.name][0]
subc_elem = {"type": subcomponent.type}
if subcomponent.name:
subc_elem["name"] = subcomponent.name
component_elem["subcomponents"].append(subc_elem)
if export_job_properties:
prop_args = {
"component": subcomponent.component.name,
subcomponent.type: subcomponent.name,
"branch": component.project.branch,
"git": git
}
subc_elem["properties"] = load_parameters(**prop_args)
def _collect_prop_files(components, export_job_properties, root, git):
arr = []
param_files = {}
with git:
for component in components:
subcomponents = component.get_subcomponents()
for subcomponent in subcomponents:
arr.append(subcomponent.list_row(component.project.branch))
if export_job_properties:
#$TYPE-$GIT_BRANCH-$COMPONENT-$NAME.properties
filename = subcomponent.job_properties_filename(component.project.branch, root)
prop_args = {
"component": subcomponent.component.name,
subcomponent.type: subcomponent.name,
"branch": component.project.branch,
"git": git
}
parameters = load_parameters(**prop_args)
param_files[filename] = parameters
return arr, param_files
def _write_prop_files(param_files):
for filename, parameters in list(param_files.items()):
with open(filename, 'w+') as prop_file:
for key, value in list(parameters.items()):
prop_file.write(key + "=" + value + "\n")
def list_components(branch=None, json=None):
return [c.name for c in Project(branch=branch).get_components()] | nilq/baby-python | python |
import sys
import os
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
# from abc import ABCMeta, abstractmethod
import numpy as np
from pytest import approx
import torch
from implicitresnet.utils.spectral import spectral_norm
import implicitresnet.utils.calc as utils
###############################################################################
###############################################################################
_cpu = torch.device('cpu')
_gpu = torch.device('cuda')
_batches = 1
###############################################################################
###############################################################################
class Test_SpectralNorm:
def test_matrix_100x100(self):
A = spectral_norm(torch.nn.Linear(100, 100, bias=False), name='weight', input_shape=(100,), n_power_iterations=1, eps=1e-12, dim=None)
y = torch.ones(100)
for _ in range(1000):
y = A(y)
y.requires_grad_(True)
jacobian = utils.jacobian( A(y), y, True ).reshape(y.numel(),y.numel()).cpu().detach().numpy()
singvals = np.linalg.svd(jacobian, compute_uv=False)
print("spectral norm = %.2e"%(np.amax(singvals)))
assert np.amax(singvals) == approx(1.0, abs=1.e-3)
def test_matrix_200x100(self):
A = spectral_norm(torch.nn.Linear(100, 200, bias=False), name='weight', input_shape=(100,), n_power_iterations=1, eps=1e-12, dim=None)
x = torch.ones(100)
for _ in range(1000):
y = A(x)
x.requires_grad_(True)
jacobian = utils.jacobian( A(x), x, True ).reshape(y.numel(),x.numel()).cpu().detach().numpy()
singvals = np.linalg.svd(jacobian, compute_uv=False)
print("spectral norm = %.2e"%(np.amax(singvals)))
assert np.amax(singvals) == approx(1.0, abs=1.e-3)
def test_matrix_100x200(self):
A = spectral_norm(torch.nn.Linear(200, 100, bias=False), name='weight', input_shape=(200,), n_power_iterations=1, eps=1e-12, dim=None)
x = torch.ones(200)
for _ in range(1000):
y = A(x)
x.requires_grad_(True)
jacobian = utils.jacobian( A(x), x, True ).reshape(y.numel(),x.numel()).cpu().detach().numpy()
singvals = np.linalg.svd(jacobian, compute_uv=False)
print("spectral norm = %.2e"%(np.amax(singvals)))
assert np.amax(singvals) == approx(1.0, abs=1.e-3)
def test_conv2d_5_5_28_28(self):
input_shape = (5,28,28)
A = spectral_norm(torch.nn.Conv2d(5, 5, kernel_size=3, padding=3//2, bias=False), name='weight', input_shape=input_shape, n_power_iterations=1, eps=1e-12, dim=None)
x = torch.ones(1,*input_shape)
for _ in range(1000):
y = A(x)
x.requires_grad_(True)
jacobian = utils.jacobian( A(x), x, True ).reshape(y.numel(),x.numel()).cpu().detach().numpy()
singvals = np.linalg.svd(jacobian, compute_uv=False)
print("spectral norm = %.3e"%(np.amax(singvals)))
assert np.amax(singvals) == approx(1.0, abs=1.e-3)
def test_conv2d_5_3_28_28(self):
input_shape = (5,28,28)
A = spectral_norm(torch.nn.Conv2d(5, 3, kernel_size=3, padding=0, bias=False), name='weight', input_shape=input_shape, n_power_iterations=1, eps=1e-12, dim=None)
x = torch.ones(1,*input_shape)
for _ in range(1000):
y = A(x)
x.requires_grad_(True)
jacobian = utils.jacobian( A(x), x, True ).reshape(y.numel(),x.numel()).cpu().detach().numpy()
singvals = np.linalg.svd(jacobian, compute_uv=False)
print("spectral norm = %.3e"%(np.amax(singvals)))
assert np.amax(singvals) == approx(1.0, abs=1.e-3)
def test_save_load_state_dict(self):
A = spectral_norm(torch.nn.Linear(100, 200, bias=False), name='weight', input_shape=(100,), n_power_iterations=1, eps=1e-12, dim=None)
x = torch.ones(100)
for _ in range(10):
y = A(x)
B = spectral_norm(torch.nn.Linear(100, 200, bias=False), name='weight', input_shape=(100,), n_power_iterations=1, eps=1e-12, dim=None)
B.load_state_dict(A.state_dict())
# a = Test_SpectralNorm()
# a.save_load_state_dict() | nilq/baby-python | python |
# -*- coding: utf-8 -*-
import logging
from .api import PrivoxyAdapter, RetryPrivoxyAdapter # noqa: F401
from .version import __version__ # noqa: F401
logging.getLogger("urllib3").setLevel(logging.ERROR)
__author__ = "Alexey Shevchenko"
__email__ = 'otetz@me.com'
__copyright__ = "Copyright 2017, Alexey Shevchenko"
| nilq/baby-python | python |
import os
import unittest
from pathlib import Path
import pytest
from paramak import RotateStraightShape, SweepSplineShape
class TestSweepSplineShape(unittest.TestCase):
def setUp(self):
self.test_shape = SweepSplineShape(
points=[(-10, 10), (10, 10), (10, -10), (-10, -10)],
path_points=[(50, 0), (30, 50), (70, 100), (50, 150)]
)
def test_default_parameters(self):
"""Checks that the default parameters of a SweepSplineShape are correct."""
# assert self.test_shape.rotation_angle == 360
assert self.test_shape.azimuth_placement_angle == 0
assert self.test_shape.workplane == "XY"
assert self.test_shape.path_workplane == "XZ"
assert self.test_shape.force_cross_section == False
def test_solid_construction_workplane(self):
"""Checks that SweepSplineShapes can be created in different workplanes."""
self.test_shape.workplane = "YZ"
self.test_shape.path_workplane = "YX"
assert self.test_shape.solid is not None
self.test_shape.workplane = "XZ"
self.test_shape.path_workplane = "XY"
assert self.test_shape.solid is not None
def test_relative_shape_volume_points(self):
"""Creates two SweepSplineShapes and checks that their relative volumes
are correct."""
self.test_shape.points = [(-20, 20), (20, 20), (20, -20), (-20, -20)]
test_volume = self.test_shape.volume()
self.test_shape.points = [(-10, 10), (10, 10), (10, -10), (-10, -10)]
assert self.test_shape.volume() == pytest.approx(
test_volume * 0.25, rel=0.01)
def test_relative_shape_volume_azimuthal_placement(self):
"""Creates two SweepSplineShapes and checks that their relative volumes
are correct."""
test_volume = self.test_shape.volume()
self.test_shape.azimuth_placement_angle = [0, 90, 180, 270]
assert self.test_shape.volume() == pytest.approx(
test_volume * 4, rel=0.01)
def test_force_cross_section(self):
"""Checks that a SweepSplineShape with the same cross-section at each path_point
is created when force_cross_section = True."""
self.test_shape.force_cross_section = True
test_area = round(min(self.test_shape.areas))
assert self.test_shape.areas.count(
pytest.approx(test_area, rel=0.01)) == 2
cutting_shape = RotateStraightShape(
points=[(0, 50), (0, 200), (100, 200), (100, 50)]
)
self.test_shape.cut = cutting_shape
assert self.test_shape.areas.count(
pytest.approx(test_area, rel=0.01)) == 2
cutting_shape.points = [(0, 100), (0, 200), (100, 200), (100, 100)]
self.test_shape.cut = cutting_shape
assert self.test_shape.areas.count(
pytest.approx(test_area, rel=0.01)) == 2
def test_force_cross_section_volume(self):
"""Checks that a SweepSplineShape with a larger volume is created when
force_cross_section = True than when force_cross_section = False."""
test_volume = self.test_shape.volume()
self.test_shape.force_cross_section = True
assert self.test_shape.volume() > test_volume
def test_surface_count(self):
"""Creates a SweepSplineShape and checks that it has the correct number
of surfaces."""
assert len(self.test_shape.areas) == 3
assert len(set(round(i) for i in self.test_shape.areas)) == 2
def test_export_stp(self):
"""Exports and stp file with mode = solid and wire and checks
that the outputs exist and relative file sizes are correct."""
os.system("rm test_solid.stp test_solid2.stp test_wire.stp")
self.test_shape.export_stp('test_solid.stp', mode='solid')
self.test_shape.export_stp('test_solid2.stp')
self.test_shape.export_stp('test_wire.stp', mode='wire')
assert Path("test_solid.stp").exists() is True
assert Path("test_solid2.stp").exists() is True
assert Path("test_wire.stp").exists() is True
assert Path("test_solid.stp").stat().st_size == \
Path("test_solid2.stp").stat().st_size
assert Path("test_wire.stp").stat().st_size < \
Path("test_solid2.stp").stat().st_size
os.system("rm test_solid.stp test_solid2.stp test_wire.stp")
def test_incorrect_points_input(self):
"""Checks that an error is raised when the points are input with the
connection"""
def incorrect_points_definition():
self.test_shape.points = [
(10, 10, 'spline'),
(10, 30, 'spline'),
(30, 30, 'spline'),
(30, 10, 'spline')
]
self.assertRaises(
ValueError,
incorrect_points_definition
)
if __name__ == "__main__":
unittest.main()
| nilq/baby-python | python |
""" TF sandbox for testing new stuff """
import math
import tensorflow as tf
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_boolean('fake_data', False, 'If true, uses fake data '
'for unit testing.')
flags.DEFINE_integer('max_steps', 1000, 'Number of steps to run trainer.')
flags.DEFINE_float('learning_rate', 0.01, 'Initial learning rate.')
def getTestMatrix():
return np.random.randn(25, 20)
def runTestSession(feed,graph,golden_res):
# Do whatever calculation I want to test (build the graph)
sess = tf.InteractiveSession()
merged = tf.merge_all_summaries()
writer = tf.train.SummaryWriter('logs', sess.graph_def)
result = sess.run(graph, feed_dict=feed)
print("\n\ngolden result:")
print(golden_res)
print("result:")
print(result)
print("match? ",np.allclose(golden_res,result, rtol=1e-03, atol=1e-03,))
def test_sharpOp():
# NumpyArrays
inputA = getTestMatrix()
print("Inputs:")
print(inputA)
def numpyTest():
U, s, V = np.linalg.svd(inputA, full_matrices=False)
return np.dot( U, V ) * np.sum(s)
tf_inA = tf.placeholder(tf.float32, inputA.shape, name='input1')
tf_graph=sharpOp(tf_inA)
feed = {tf_inA : inputA}
runTestSession(feed,tf_graph,numpyTest())
def logSoftMax(vector):
maxLogit= tf.reduce_max(vector,reduction_indices=1,keep_dims=True) # [batch_size]
lse = tf.log( tf.reduce_sum(tf.exp( vector - maxLogit ), reduction_indices=1, keep_dims=True ) ) + maxLogit
return vector - lse
def test_logSoftMax():
# NumpyArrays
inputA = getTestMatrix()
print("Inputs:")
print(inputA)
def numpyTest():
maxLogit = np.apply_along_axis(np.max,1,inputA) # returns [batch]
print(maxLogit)
expSubMax = np.exp(np.apply_along_axis(np.subtract,0,inputA,maxLogit)) # returns [batch,classes]
print(expSubMax)
lse = np.log( np.sum(expSubMax, axis=1) ) + maxLogit # returns [batch]
print(lse)
return np.apply_along_axis(np.subtract,0,inputA,lse) # returns [batch,classes]
tf_inA = tf.placeholder(tf.float32, [4,3], name='input1')
tf_graph=logSoftMax(tf_inA)
feed = {tf_inA : inputA}
runTestSession(feed,tf_graph,numpyTest())
def test_NNLCriterion():
# NumpyArrays
inputA = np.array([[1.,2.,3.],[4.,5.,6.],[7.,8.,9.],[10.,11.,12.]])
labels = np.array([2,1,0,1], dtype=np.int32)
def numpyTest():
numPyOut = np.empty(inputA.shape[0])
for currLine in range(inputA.shape[0]):
numPyOut[currLine] = - inputA[currLine][labels[currLine]]
return numPyOut
tf_inA = tf.placeholder(tf.float32, [4,3], name='input1')
tf_labels = tf.placeholder(tf.int32,4,name='labels')
def tf_graph(inA, labels):
batch_size = tf.size(labels)
labels = tf.expand_dims(labels, 1)
indices = tf.expand_dims(tf.range(0, batch_size), 1)
concated = tf.concat(1, [indices, labels])
onehot_labels = tf.sparse_to_dense(
concated, tf.pack([batch_size, 3]), 1.0, 0.0)
return - tf.reduce_sum(tf.mul(inA, onehot_labels), reduction_indices=1)
feed = {tf_inA : inputA, tf_labels : labels}
runTestSession(feed,tf_graph(tf_inA,tf_labels),numpyTest())
def test_sandbox():
# Create a tensor with dummy vaules
# NumpyArrays
inputA = np.random.rand(3,2)
inputB = np.random.rand(2,4)
print("Inputs:")
print(inputA)
print(inputB)
def numpyTest():
return np.dot(inputA,inputB)
in1 = tf.placeholder(tf.float32, [3,2], name='input1')
in2 = tf.placeholder(tf.float32, [2,4], name='input2')
#out1 = tf.placeholder(tf.float32, [3,4], name='output')
with tf.name_scope('test-matmul'):
out_tf = tf.matmul( in1, in2 )
#tf.initialize_all_variables().run() # no idea what this does
# Execute and print result
feed = {in1: inputA, in2: inputB}
runTestSession(feed,out_tf,numpyTest())
#summary_str = result[0]
#outputGraph = result[1]
#writer.add_summary(summary_str)
#print('output of graph: %s' % (outputGraph))
def test_tensorboard(_):
# Import data
mnist = input_data.read_data_sets('/tmp/data/', one_hot=True,
fake_data=FLAGS.fake_data)
sess = tf.InteractiveSession()
# Create the model
x = tf.placeholder(tf.float32, [None, 784], name='x-input')
W = tf.Variable(tf.zeros([784, 10]), name='weights')
b = tf.Variable(tf.zeros([10], name='bias'))
# Use a name scope to organize nodes in the graph visualizer
with tf.name_scope('Wx_b'):
y = tf.nn.softmax(tf.matmul(x, W) + b)
# Add summary ops to collect data
_ = tf.histogram_summary('weights', W)
_ = tf.histogram_summary('biases', b)
_ = tf.histogram_summary('y', y)
# Define loss and optimizer
y_ = tf.placeholder(tf.float32, [None, 10], name='y-input')
# More name scopes will clean up the graph representation
with tf.name_scope('xent'):
cross_entropy = -tf.reduce_sum(y_ * tf.log(y))
_ = tf.scalar_summary('cross entropy', cross_entropy)
with tf.name_scope('train'):
train_step = tf.train.GradientDescentOptimizer(
FLAGS.learning_rate).minimize(cross_entropy)
with tf.name_scope('test'):
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
_ = tf.scalar_summary('accuracy', accuracy)
# Merge all the summaries and write them out to /tmp/mnist_logs
merged = tf.merge_all_summaries()
writer = tf.train.SummaryWriter('logs', sess.graph_def)
tf.initialize_all_variables().run()
# Train the model, and feed in test data and record summaries every 10 steps
for i in range(FLAGS.max_steps):
if i % 10 == 0: # Record summary data and the accuracy
if FLAGS.fake_data:
batch_xs, batch_ys = mnist.train.next_batch(
100, fake_data=FLAGS.fake_data)
feed = {x: batch_xs, y_: batch_ys}
else:
feed = {x: mnist.test.images, y_: mnist.test.labels}
result = sess.run([merged, accuracy], feed_dict=feed)
summary_str = result[0]
acc = result[1]
writer.add_summary(summary_str, i)
print('Accuracy at step %s: %s' % (i, acc))
else:
batch_xs, batch_ys = mnist.train.next_batch(
100, fake_data=FLAGS.fake_data)
feed = {x: batch_xs, y_: batch_ys}
sess.run(train_step, feed_dict=feed)
def test_matrix_comp():
def getTestMatrix(transp=False):
return np.random.randn(3, 4) if transp else np.random.radn(4,3)
def numpyTestSvd(test_in):
U, s, V = np.linalg.svd(test_in, full_matrices=False)
print("### SVD Test:")
print("U")
print(U)
print("s")
print(s)
print("V")
print(V)
def numpyTestSvdS(test_in):
U, s, V = np.linalg.svd(test_in, full_matrices=False)
return s
def numpyTestQr(test_in):
q,r = np.linalg.qr(test_in,mode='complete')
print("### QR Test")
print("q")
print(q)
print("r")
print(r)
print("normal")
a = getTestMatrix(True)
print("a",a.shape,"\n",a)
U, s, V = np.linalg.svd(a, full_matrices=False)
print("U",U.shape,"\n",U)
print("s",s.shape,"\n", s)
print("V",V.shape,"\n",V)
print("transp")
a = getTestMatrix(True)
print("a",a.shape,"\n",a)
U, s, V = np.linalg.svd(a, full_matrices=False)
print("U",U.shape,"\n",U)
print("s",s.shape,"\n", s)
print("V",V.shape,"\n",V)
def main(_):
test_sharpOp()
if __name__ == '__main__':
tf.app.run()
| nilq/baby-python | python |
"""
Example showing how to set up a semi-discretization with the spectral difference method and advect it.
"""
# Import libraries
##################
from nodepy import semidisc
from nodepy import *
import numpy as np
import matplotlib.pyplot as pl
# Create spatial operator L (i.e. u' = L u)
###########################################
orderAcc = 1
spectralDifference = semidisc.load_semidisc('spectral difference advection',order=orderAcc)
# Create time marching
######################
rk4=rk.loadRKM('RK44')
# Solve the problem
###################
t,y=rk4(spectralDifference)
# Plot the soution
##################
pl.plot(spectralDifference.xExact,spectralDifference.uExact,label = 'Exact solution')
pl.plot(spectralDifference.xExact,spectralDifference.uExactInit,label = 'Initial solution')
# Check if we want a 1st-order spectral difference solution. If we want that, prepare some arrays
# for pretty plots
if orderAcc == 1:
# Copy the last element of the list y in temporary array.
# The element is a numpy array.
tmp = y[-1]
# Solution is constant in a cell. Thus two points are enough for plotting a pice-wise constant
# function
nbrPlotPnts = 2*spectralDifference.xCenter.size
x1stSD=np.zeros(nbrPlotPnts)
u1stSD=np.zeros(nbrPlotPnts)
dx = spectralDifference.xCenter[1] - spectralDifference.xCenter[0] # Assume uniform grid spacing
for i in range(0,spectralDifference.xCenter.size):
for j in range(0,2):
# Compute x coordinate
x1stSD[i*2] = spectralDifference.xCenter[i] - 1./2.*dx
x1stSD[i*2+1] = spectralDifference.xCenter[i] + 1./2.*dx
# Set solution
u1stSD[i*2] = tmp[i]
u1stSD[i*2+1] = tmp[i]
# Plot 1st-order numerical solution
pl.plot(x1stSD,u1stSD,label = 'Spectral difference solution')
else:
# Plot orderAcc-order numerical solution
pl.plot(spectralDifference.xSol,y[-1],label = 'Spectral difference solution')
pl.title('1D linear advection equation')
pl.xlabel('x')
pl.ylabel('u')
#pl.legend()
pl.show()
| nilq/baby-python | python |
class pos:
def __init__(self, r, c, is_blocked):
self.r = r
self.c = c
self.is_blocked = is_blocked
def robot_find_path(matrix, cur_pos, end):
''' Start at 0,0'''
memo_path = dict
memo_path[ (0,0) ] = (0,0)
if (cur_pos == end):
return pos
if cur_pos.is_blocked:
memo_path[cur_pos] = -1
while len(stack) > 0:
cur_pos = stack.pop(len(stack))
# recursive relation:
cur_pos = [ cur_pos, path]
| nilq/baby-python | python |
import maya.cmds as cmds
#### XGEN DESCRIPTIONS - RENDER ONLY #####
def main():
xGen = getXgen()
setVisibility(xGen)
def getXgen():
node = cmds.ls(selection = True)
children_nodes = cmds.listRelatives(allDescendents=True, type='xgmSplineDescription')
if not children_nodes:
print("No XGEN")
children_nodes = []
else:
print("XGEN Splines found")
return(children_nodes)
def setVisibility(xGen):
current = 0
for i in xGen:
currentNodeVisibility = str(xGen[current]) + ".visibility"
currentNodeLodVisibility = str(xGen[current]) + ".lodVisibility"
print(currentNodeVisibility)
cmds.setAttr(currentNodeVisibility, 1)
cmds.setAttr(currentNodeLodVisibility, 1)
current += 1
if __name__ == "__main__":
main() | nilq/baby-python | python |
from src.domain.interaction.interaction_phase_state import InteractionPhaseState
class ExitPhase(InteractionPhaseState):
def fetch_next_interaction_phase(self, input_text):
return self
def process_input_text(self, input_text, input_text_processor):
return input_text_processor.process_exit_statement(input_text) | nilq/baby-python | python |
# ---------------------------------------------------------------------
# Angtel.Topaz.get_vlans
# ---------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Python modules
import re
# NOC modules
from noc.core.script.base import BaseScript
from noc.sa.interfaces.igetvlans import IGetVlans
class Script(BaseScript):
name = "Angtel.Topaz.get_vlans"
interface = IGetVlans
rx_vlan = re.compile(r"^\s*(?P<vlan_id>\d+)\s+(?P<name>\S+)", re.MULTILINE)
def execute_cli(self):
r = []
for match in self.rx_vlan.finditer(self.cli("show vlan")):
r += [match.groupdict()]
return r
| nilq/baby-python | python |
"""
makeblastdb -dbtype nucl -in nanoTRF_0.1M.fasta -out nanoTRF_0.1M.fasta
blastn -query merged_TR_rank_all.fasta -outfmt 6 -db nanoTRF_0.1M.fasta -out merged_TR_rank_all_vs_nanoTRF_0.1M.out -window_size 22 -num_threads 100 -evalue 10
"""
from bin.helpers.help_functions import getLog
import os
class run_BLAST():
def __init__(self,blast_run,makedb,inFile, outFile, threads, wordsize, evalue, log_file):
self.blast_run,self.makedb,self.inFile, self.outFile, self.threads, self.wordsize, self.evalue = blast_run,makedb,inFile, outFile, threads, wordsize, evalue
self.bl_log = getLog(log_file, "BLAST module")
self.edge_list_file = outFile + "edges.list"
self.main()
def filterOut_table(self):
"""
:return: list of edges [(query, hit),(),...]
"""
edge_cnt = 0
with open(self.outFile) as inFile, open(self.edge_list_file, 'w') as outEdgeList:
for lines in inFile:
sp = lines.split("\t")
if sp[0] != sp[1] and float(sp[10]) < 0.00001:
outEdgeList.write("{0}\t{1}\t{2}\n".format(sp[0], sp[1], sp[10]))
edge_cnt += 1
print("NUmber of edges", edge_cnt)
self.bl_log.info("NUmber of edges: {}".format(edge_cnt))
def main(self):
self.bl_log.info("BLAST database is making")
os.system('{0} -dbtype nucl -in {1} -out {1}'.format(self.makedb,self.inFile))
self.bl_log.info("BLAST is running")
os.system('{0} -query {1} -outfmt 6 -db {1} -out {2} -window_size {3} -num_threads {4} -evalue {5}'.format(
self.blast_run,self.inFile, self.outFile, self.wordsize, self.threads, self.evalue
))
self.filterOut_table()
| nilq/baby-python | python |
import argparse
import csv
import itertools as it
from operator import itemgetter
import csv2xml as c2x
def find_targeting(from_char, to_char):
lineset, opponent_meta = c2x.load_character(from_char)
rows = []
line_to_row = {}
for stageset, case in c2x.iter_lineset(lineset):
targeted_to_char = False
stage_targeting = None
if 'target' in case.conditions:
if case.conditions['target'] == to_char:
targeted_to_char = True
stage_targeting = case.conditions.get('targetStage', None)
else:
continue
if 'alsoPlaying' in case.conditions and case.conditions['alsoPlaying'] == to_char:
targeted_to_char = True
stage_targeting = case.conditions.get('alsoPlayingStage', None)
if not targeted_to_char:
continue
tag = case.tag
if tag.startswith('must_strip_'):
tag = 'must_strip_self'
for state in case.states:
line_to_row[state.text.strip()] = {
'from': from_char,
'from-stage': stageset,
'to': to_char,
'to-stage': stage_targeting,
'case': tag,
'conditions': case.format_conditions(True),
'image': state.image,
'text': state.text,
'marker': state.marker
}
return list(line_to_row.values())
SELF_STRIPPING_TAGS = [
'must_strip_self',
'must_strip_losing',
'must_strip_normal',
'must_strip_winning',
'stripping',
'stripped',
'must_masturbate_first',
'must_masturbate',
'start_masturbating',
'masturbating',
'heavy_masturbating',
'finished_masturbating',
]
STRIPPING_TAGS = [
'must_strip_self',
'must_strip_losing',
'must_strip_normal',
'must_strip_winning',
'female_must_strip',
'male_must_strip',
'female_removing_accessory',
'male_removing_accessory',
'female_removing_minor',
'male_removing_minor',
'female_removing_major',
'male_removing_major',
'female_chest_will_be_visible',
'male_chest_will_be_visible',
'female_crotch_will_be_visible',
'male_crotch_will_be_visible',
'stripping',
'female_removed_accessory',
'male_removed_accessory',
'female_removed_minor',
'male_removed_minor',
'female_removed_major',
'male_removed_major',
'male_chest_is_visible',
'female_small_chest_is_visible',
'female_medium_chest_is_visible',
'female_large_chest_is_visible',
'female_crotch_is_visible',
'male_small_crotch_is_visible',
'male_medium_crotch_is_visible',
'male_large_crotch_is_visible',
'stripped',
'must_masturbate_first',
'must_masturbate',
'female_must_masturbate',
'male_must_masturbate',
'start_masturbating',
'female_start_masturbating',
'male_start_masturbating',
'masturbating',
'female_masturbating',
'male_masturbating',
'heavy_masturbating',
'female_heavy_masturbating',
'male_heavy_masturbating',
'finished_masturbating',
'female_finished_masturbating',
'male_finished_masturbating',
]
def is_stripping_case(row):
return row['case'] in STRIPPING_TAGS
def get_stripping_case_sort_key(row):
if row['case'] in STRIPPING_TAGS:
return STRIPPING_TAGS.index(row['case'])
return 0
def stage_set_key(field):
def _sorter(row):
if row[field] is None:
return 999
else:
return sum(row[field])
return _sorter
def stages_to_strings(row):
row = row.copy()
row['from-stage'] = c2x.format_stage_set(row['from-stage'])
if row['to-stage'] is not None:
row['to-stage'] = c2x.format_interval(row['to-stage'])
else:
row['to-stage'] = ''
return row
def get_stripping_rows(rows):
stripping_rows = filter(is_stripping_case, rows)
stripping_rows = sorted(stripping_rows, key=get_stripping_case_sort_key)
for tag, case_group in it.groupby(stripping_rows, key=itemgetter('case')):
if tag in SELF_STRIPPING_TAGS:
case_group = sorted(case_group, key=itemgetter('from'))
char_iter = it.groupby(case_group, key=itemgetter('from'))
else:
case_group = sorted(case_group, key=itemgetter('to'))
char_iter = it.groupby(case_group, key=itemgetter('to'))
for _, char_group in char_iter:
if tag in SELF_STRIPPING_TAGS:
char_group = sorted(char_group, key=stage_set_key('from-stage'))
stage_iter = it.groupby(char_group, key=itemgetter('from-stage'))
else:
char_group = sorted(char_group, key=stage_set_key('to-stage'))
stage_iter = it.groupby(char_group, key=itemgetter('to-stage'))
for _, stage_group in stage_iter:
yield from map(stages_to_strings, stage_group)
yield {}
#return stripping_rows
def get_other_rows(rows):
other_rows = it.filterfalse(is_stripping_case, rows)
other_rows = sorted(other_rows, key=itemgetter('from-stage'))
other_rows = sorted(other_rows, key=itemgetter('to-stage'))
other_rows = sorted(other_rows, key=itemgetter('from'))
other_rows = sorted(other_rows, key=lambda r: c2x.Case.ALL_TAGS.index(r['case']))
for tag, case_group in it.groupby(other_rows, key=itemgetter('case')):
for char, char_group in it.groupby(case_group, key=itemgetter('from')):
yield from char_group
yield {}
def main(args):
rows = find_targeting(args.char_1, args.char_2)
rows.extend(find_targeting(args.char_2, args.char_1))
fields = ['from', 'from-stage', 'to', 'to-stage', 'case', 'conditions', 'image', 'text', 'marker']
with open(args.outfile, 'w', encoding='utf-8', newline='') as f:
writer = csv.DictWriter(f, fields, dialect='unix')
writer.writeheader()
writer.writerows(get_stripping_rows(rows))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Finds all instances of targetting between two characters.')
parser.add_argument('char_1', help='The first character to analyze.')
parser.add_argument('char_2', help='The second character to analyze.')
parser.add_argument('outfile', help='CSV file to write to.')
args = parser.parse_args()
main(args)
| nilq/baby-python | python |
# Copyright (c) 2015 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and#
# limitations under the License.
"""
This module contains actions for retrieving server groups and their policies
from a source cloud and deploying them into a destination cloud
"""
from cloudferry.lib.base.action import transporter
from cloudferry.lib.os.compute import server_groups
from cloudferry.lib.utils import log
LOG = log.getLogger(__name__)
class ServerGroupTransporter(transporter.Transporter):
"""
Transporter uses server group handlers to retrieve and deploy server
groups in from defined cloud.
Required configuration options:
[src]
type = os
auth_url = http://<auth_url>
user = <admin_user>
password = <admin_pass>
tenant = <admin_tenant>
[dst]
type = os
auth_url = http://<auth_url>
user = <admin_user>
password = <admin_pass>
tenant = <admin_tenant>
[src_compute]
service = nova
db_connection = mysql+pymysql
db_host = <db_host>
db_port = <db_port>
db_name = nova
db_user = <db_user>
db_password = <db_password>
[dst_compute]
service = nova
db_connection = mysql+pymysql
db_host = <db_host>
db_port = <db_port>
db_name = nova
db_user = <db_user>
db_password = <db_password>
Scenario:
process:
- task_server_group_transport:
-act_server_group_trans: True
Dependent tasks:
None
Required tasks:
None
"""
def run(self, **kwargs):
src_resource = server_groups.ServerGroupsHandler(self.src_cloud)
dst_resource = server_groups.ServerGroupsHandler(self.dst_cloud)
src_server_groups = src_resource.get_server_groups()
if len(src_server_groups) > 0:
dst_resource.deploy_server_groups(src_server_groups)
else:
LOG.debug("No server groups found on the source cloud")
return {'server_group_info': src_server_groups}
| nilq/baby-python | python |
# -*- coding: utf-8 -*-
'''
Easy and basic configure for print log
'''
__author__ = 'lujiaying@baidu.com'
import logging
from logging.handlers import RotatingFileHandler
import os
################################
# Conf to edit
################################
# To print into screen if DebugConf is True
DebugConf = True
#DebugConf = False
################################
# Init Loggers
################################
data_analysis_logger = logging.getLogger('data_analysis')
data_process_logger = logging.getLogger('data_process')
model_logger = logging.getLogger('model')
################################
# Init Handlers
################################
formatter = logging.Formatter('[%(asctime)s][pid:%(process)s] %(module)s.%(funcName)s: %(levelname)s: %(message)s')
# StreamHandler for print log to console
hdr = logging.StreamHandler()
hdr.setFormatter(formatter)
hdr.setLevel(logging.DEBUG)
# RotatingFileHandler
## Set log dir
abs_path = os.path.dirname(os.path.abspath(__file__))
abs_father_path = os.path.dirname(abs_path)
log_dir_path = abs_father_path + '/log'
#log_dir_path = abs_path + '/log'
if not os.path.exists(log_dir_path):
os.makedirs(log_dir_path)
## Specific file handler
fhr_ana = RotatingFileHandler('%s/analysis.log'%(log_dir_path), maxBytes=10*1024*1024, backupCount=3)
fhr_ana.setFormatter(formatter)
fhr_ana.setLevel(logging.DEBUG)
## Specific file handler
fhr_pro = RotatingFileHandler('%s/process.log'%(log_dir_path), maxBytes=10*1024*1024, backupCount=3)
fhr_pro.setFormatter(formatter)
fhr_pro.setLevel(logging.DEBUG)
## Specific file handler
fhr_model = RotatingFileHandler('%s/model.log'%(log_dir_path), maxBytes=10*1024*1024, backupCount=3)
fhr_model.setFormatter(formatter)
fhr_model.setLevel(logging.DEBUG)
################################
# Add Handlers
################################
data_analysis_logger.addHandler(fhr_ana)
if DebugConf:
data_analysis_logger.addHandler(hdr)
data_analysis_logger.setLevel(logging.DEBUG) #lowest debug level for logger
else:
data_analysis_logger.setLevel(logging.ERROR) #lowest debug level for logger
data_process_logger.addHandler(fhr_pro)
if DebugConf:
data_process_logger.addHandler(hdr)
data_process_logger.setLevel(logging.DEBUG)
else:
data_process_logger.setLevel(logging.ERROR)
model_logger.addHandler(fhr_model)
if DebugConf:
model_logger.addHandler(hdr)
model_logger.setLevel(logging.DEBUG)
else:
model_logger.setLevel(logging.ERROR)
if __name__ == '__main__':
'''
Usage:
from tools.log_tools import data_process_logger as logger
logger.debug('debug debug')
'''
data_analysis_logger.debug('My logger configure success')
data_analysis_logger.info('My logger configure success')
data_analysis_logger.error('analysis error test')
data_process_logger.info('My logger configure success~~')
data_process_logger.error('process error test test')
model_logger.info('Ohhh model')
model_logger.error('error model')
| nilq/baby-python | python |
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Common system settings
# ---------------------------------------------------------------------
# Copyright (C) 2007-2015 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# NOC modules
from __future__ import absolute_import
from .base import BaseFact
class System(BaseFact):
ATTRS = [
"hostname",
"domain_name",
"profile",
"vendor",
"platform",
"version",
"timezone",
"[nameservers]",
"managed_object_name",
"object_profile",
"level",
"location",
]
def __init__(
self,
hostname=None,
domain_name=False,
profile=None,
vendor=None,
platform=None,
version=None,
timezone=None,
nameservers=None,
object_profile=None,
level=None,
location=None,
**kwargs
):
super(System, self).__init__()
self.hostname = hostname
self.domain_name = domain_name
self.profile = profile
self.vendor = vendor
self.platform = platform
self.version = version
self.timezone = timezone
self.nameservers = nameservers
self.managed_object_name = None
self.object_profile = object_profile
self.level = level
self.location = location
@property
def hostname(self):
return self._hostname
@hostname.setter
def hostname(self, value):
self._hostname = value or None
@property
def domain_name(self):
return self._domain_name
@domain_name.setter
def domain_name(self, value):
self._domain_name = value or None
@property
def profile(self):
return self._profile
@profile.setter
def profile(self, value):
self._profile = value or None
@property
def vendor(self):
return self._vendor
@vendor.setter
def vendor(self, value):
self._vendor = value or None
@property
def platform(self):
return self._platform
@platform.setter
def platform(self, value):
self._platform = value or None
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value or None
@property
def timezone(self):
return self._timezone
@timezone.setter
def timezone(self, value):
self._timezone = value or None
@property
def nameservers(self):
return self._nameservers
@nameservers.setter
def nameservers(self, value):
self._nameservers = value or []
@property
def managed_object_name(self):
return self._managed_object_name
@managed_object_name.setter
def managed_object_name(self, value):
self._managed_object_name = value
@property
def object_profile(self):
return self._object_profile
@object_profile.setter
def object_profile(self, value):
self._object_profile = value
@property
def level(self):
return self._level
@level.setter
def level(self, value):
self._level = value
@property
def location(self):
return self._location
@location.setter
def location(self, value):
self._location = value
def bind(self):
self.managed_object_name = self.managed_object.name
if self.managed_object.object_profile:
self.object_profile = self.managed_object.object_profile.name
self.level = self.managed_object.object_profile.level
| nilq/baby-python | python |
import numpy as np
from scipy.optimize import minimize
import dadapy.utils_.utils as ut
def ML_fun_gPAk(params, args):
"""
The function returns the log-Likelihood expression to be minimized.
Requirements:
* **params**: array of initial values for ''a'', ''b''
* **args**: additional parameters ''kopt'', ''Vi'' entering the Likelihood
Note:
* **b**: correspond to the ''log(rho)'', as in Eq. (S1)
* **a**: the linear correction, as in Eq. (S1)
"""
Fi = params[0]
a = params[1]
kopt = args[0]
vij = args[1]
grads_ij = args[2]
gb = kopt
ga = np.sum(grads_ij)
L0 = Fi * gb + a * ga
for j in range(kopt):
t = Fi + a * grads_ij[j]
s = np.exp(t)
tt = vij[j] * s
L0 = L0 - tt
return -L0
def ML_fun_gpPAk(params, args):
"""
The function returns the log-Likelihood expression to be minimized.
Requirements:
* **params**: array of initial values for ''a'', ''b''
* **args**: additional parameters ''kopt'', ''Vi'' entering the Likelihood
Note:
* **b**: correspond to the ''log(rho)'', as in Eq. (S1)
* **a**: the linear correction, as in Eq. (S1)
"""
Fi = params[0]
a = params[1]
kopt = args[0]
vij = args[1]
grads_ij = args[2]
gb = kopt
ga = (kopt + 1) * kopt * 0.5
L0 = Fi * gb + np.sum(grads_ij) + a * ga
for j in range(kopt):
jf = float(j + 1)
t = Fi + grads_ij[j] + a * jf
s = np.exp(t)
tt = vij[j] * s
L0 = L0 - tt
return -L0
def ML_fun(params, args):
"""
The function returns the log-Likelihood expression to be minimized.
Requirements:
* **params**: array of initial values for ''a'', ''b''
* **args**: additional parameters ''kopt'', ''Vi'' entering the Likelihood
Note:
* **b**: correspond to the ''log(rho)'', as in Eq. (S1)
* **a**: the linear correction, as in Eq. (S1)
"""
# g = [0, 0]
b = params[0]
a = params[1]
kopt = args[0]
gb = kopt
ga = (kopt + 1) * kopt * 0.5
L0 = b * gb + a * ga
Vi = args[1]
for k in range(1, kopt):
jf = float(k)
t = b + a * jf
s = np.exp(t)
tt = Vi[k - 1] * s
L0 = L0 - tt
return -L0
def ML_hess_fun(params, args):
"""
The function returns the expressions for the asymptotic variances of the estimated parameters.
Requirements:
* **params**: array of initial values for ''a'', ''b''
* **args**: additional parameters ''kopt'', ''Vi'' entering the Likelihood
Note:
* **b**: correspond to the ''log(rho)'', as in Eq. (S1)
* **a**: the linear correction, as in Eq. (S1)
"""
g = [0, 0]
b = params[0]
a = params[1]
kopt = args[0]
gb = kopt
ga = (kopt + 1) * kopt * 0.5
L0 = b * gb + a * ga
Vi = args[1]
Cov2 = np.array([[0.0] * 2] * 2)
for k in range(1, kopt):
jf = float(k)
t = b + a * jf
s = np.exp(t)
tt = Vi[k - 1] * s
L0 = L0 - tt
gb = gb - tt
ga = ga - jf * tt
Cov2[0][0] = Cov2[0][0] - tt
Cov2[0][1] = Cov2[0][1] - jf * tt
Cov2[1][1] = Cov2[1][1] - jf * jf * tt
Cov2[1][0] = Cov2[0][1]
Cov2 = Cov2 * (-1)
Covinv2 = np.linalg.inv(Cov2)
g[0] = np.sqrt(Covinv2[0][0])
g[1] = np.sqrt(Covinv2[1][1])
return g
def MLmax(rr, kopt, Vi):
"""
This function uses the scipy.optimize package to minimize the function returned by ''ML_fun'', and
the ''ML_hess_fun'' for the analytical calculation of the Hessian for errors estimation.
It returns the value of the density which minimize the log-Likelihood in Eq. (S1)
Requirements:
* **rr**: is the initial value for the density, by using the standard k-NN density estimator
* **kopt**: is the optimal neighborhood size k as return by the Likelihood Ratio test
* **Vi**: is the list of the ''kopt'' volumes of the shells defined by two successive nearest neighbors of the current point
#"""
# results = minimize(ML_fun, [rr, 0.], method='Nelder-Mead', args=([kopt, Vi],),
# options={'maxiter': 1000})
results = minimize(
ML_fun,
[rr, 0.0],
method="Nelder-Mead",
tol=1e-6,
args=([kopt, Vi]),
options={"maxiter": 1000},
)
# err = ML_hess_fun(results.x, [kopt, Vi])
# a_err = err[1]
rr = results.x[0] # b
print(results.message)
return rr
def MLmax_gPAk(rr, kopt, Vi, grads_ij):
results = minimize(
ML_fun_gPAk,
[rr, 0.0],
method="Nelder-Mead",
tol=1e-6,
args=([kopt, Vi, grads_ij]),
options={"maxiter": 1000},
)
rr = results.x[0] # b
print(results.message)
return rr
def MLmax_gpPAk(rr, kopt, Vi, grads_ij):
results = minimize(
ML_fun_gpPAk,
[rr, 0.0],
method="Nelder-Mead",
tol=1e-6,
args=([kopt, Vi, grads_ij]),
options={"maxiter": 1000},
)
rr = results.x[0] # b
print(results.message)
return rr
def MLmax_kNN_corr(Fis, kstar, Vis, dist_indices, Fij_list, Fij_var_list, alpha):
print("ML maximisation started")
# methods: 'Nelder-Mead', 'BFGS'
# results = minimize(ML_fun_kNN_corr, Fis, method='Nelder-Mead', tol=1e-6,
# args=([kstar, Vis, dist_indices, Fij_list, Fij_var_list, alpha]),
# options={'maxiter': 50000})
results = minimize(
ML_fun_kNN_corr,
Fis,
method="CG",
tol=1e-6,
jac=ML_fun_grad,
args=([kstar, Vis, dist_indices, Fij_list, Fij_var_list, alpha]),
options={"maxiter": 100},
)
rr = results.x # b
print(results.message)
print(results.nit)
print(results.nfev)
print(results.njev)
print(np.mean(abs(results.jac)))
return rr
if __name__ == "__main__":
pass
| nilq/baby-python | python |
class StockSpanner:
def __init__(self):
self.stack = [] # (price, span)
def next(self, price: int) -> int:
span = 1
while self.stack and self.stack[-1][0] <= price:
span += self.stack.pop()[1]
self.stack.append((price, span))
return span
| nilq/baby-python | python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""This allows easy testing of the `projects` service of the
application server. It can be run interactively or in 'simulation'
mode.
"""
from __future__ import unicode_literals, division, print_function #Py2
import argparse
import random
import time
import requests
import sys
import json
import os
import tempfile
import logging
import codecs
from collections import OrderedDict
try:
from sqlite3 import dbapi2 as sqlite
except ImportError:
from pysqlite2 import dbapi2 as sqlite #for old Python versions
import numpy as np
DEF_BASEURL = "http://127.0.0.1:9999/wsgi/"
#DEF_BASEURL = "http://rkv-must1.puk.ac.za:88/app/"
DEF_LOGFILE = "project_tester.log"
DEF_LOGLEVEL = 20 #INFO
DEF_TESTFILE = "ptest01.json"
DEF_DBFILE = "projects.db"
DEF_NUSERS = 40
DEF_NPROCS = 40
DEF_MINDELAY = 20.0 #seconds
DEF_MAXDELAY = 60.0 #seconds
################################################################################
def setuplog(logname, logfile, loglevel, tid):
try:
fmt = "%(asctime)s [%(levelname)s] %(name)s on tid:{} in %(funcName)s(): %(message)s".format(tid)
log = logging.getLogger(logname)
formatter = logging.Formatter(fmt)
ofstream = logging.FileHandler(logfile, encoding="utf-8")
ofstream.setFormatter(formatter)
log.addHandler(ofstream)
log.setLevel(loglevel)
#If we want console output:
console = logging.StreamHandler()
console.setFormatter(formatter)
log.addHandler(console)
return log
except Exception as e:
print("FATAL ERROR: Could not create logging instance: {}".format(e), file=sys.stderr)
sys.exit(1)
class RequestFailed(Exception):
pass
def post(service, data, baseurl=DEF_BASEURL):
headers = {"Content-Type" : "application/json"}
servpath = os.path.join(baseurl, service)
LOG.debug(servpath)
return requests.post(servpath, headers=headers, data=json.dumps(data))
################################################################################
class Test:
def __init__(self, testdata, projectdbfile, baseurl=DEF_BASEURL, forever=False, seed=None):
self.__dict__ = testdata
self.baseurl = baseurl
self.seed = seed
LOG.info("SEED: {}".format(self.seed))
self.state = {"u_notloggedin": True,
"u_loggedin": False,
"u_hasprojects": False,
"p_loaded": False,
"p_hasaudio": False,
"p_saved": False,
"p_unlocked": False,
"p_locked": False,
"p_unassigned": False,
"p_assigned": False,
"p_updated": False}
self.ops = OrderedDict([("logout2", {}),
("logout", {"u_loggedin"}),
("login", {"u_notloggedin"}),
("createproject", {"u_loggedin"}),
("deleteproject", {"u_loggedin", "u_hasprojects", "p_loaded"}),
("changepassword", {"u_loggedin"}),
("listcategories", {"u_loggedin"}),
("listlanguages", {"u_loggedin"}),
("listprojects", {"u_loggedin"}),
("loadusers", {"u_loggedin"}),
("loadproject", {"u_loggedin", "u_hasprojects", "p_unlocked"}),
("uploadaudio", {"u_loggedin", "u_hasprojects", "p_loaded", "p_unlocked", "p_unassigned"}),
("getaudio", {"u_loggedin", "u_hasprojects", "p_loaded", "p_hasaudio", "p_unlocked", "p_unassigned"}),
("diarizeaudio", {"u_loggedin", "u_hasprojects", "p_loaded", "p_hasaudio", "p_unlocked", "p_unassigned"}),
("diarizeaudio2", {"u_loggedin", "u_hasprojects", "p_loaded", "p_hasaudio", "p_unlocked", "p_unassigned"}),
("unlockproject", {"u_loggedin", "u_hasprojects", "p_loaded", "p_locked"}),
("saveproject", {"u_loggedin", "u_hasprojects", "p_loaded", "p_hasaudio", "p_unlocked", "p_unassigned"}),
("assigntasks", {"u_loggedin", "u_hasprojects", "p_loaded", "p_hasaudio", "p_saved", "p_unlocked", "p_unassigned"}),
("updateproject", {"u_loggedin", "u_hasprojects", "p_loaded", "p_hasaudio", "p_saved", "p_unlocked", "p_assigned"})])
self.forever = forever
self.stopstate = {"u_notloggedin": False,
"u_loggedin": True,
"u_hasprojects": True,
"p_loaded": True,
"p_hasaudio": True,
"p_saved": True,
"p_unlocked": True,
"p_locked": False,
"p_unassigned": False,
"p_assigned": True,
"p_updated": True}
self.db = sqlite.connect(projectdbfile)
self.db.row_factory = sqlite.Row
def _possible(self):
possible_ops = set()
possible_ops = [op for op in self.ops if all(self.state[flag] for flag in self.ops[op])]
return possible_ops
def walkthrough(self, mindelay, maxdelay):
random.seed(self.seed)
np.random.seed(self.seed)
try:
while True:
possible = self._possible()
LOG.info("POSSIBLE: {}".format(possible))
idxs = np.arange(len(possible))
probs = ((idxs + 1) ** 2) / sum((idxs + 1) ** 2)
choice = possible[np.random.choice(idxs, p=probs)]
LOG.info("CHOICE: {}".format(choice))
getattr(self, choice)()
stime = random.uniform(mindelay, maxdelay)
LOG.info("SLEEP: {}".format(stime))
time.sleep(stime)
if self.state == self.stopstate and not self.forever:
LOG.info("DONE!")
return (True, None, self)
except Exception as e:
return (False, e, self)
### ADMIN
def adminlin(self, username=None, password=None):
LOG.debug("ENTER")
data = {"username": username or self.auser,
"password": password or self.apassw,
"role" : "admin"}
result = post("admin/login", data)
LOG.info("SERVSTAT: {}".format(result.status_code))
LOG.info("SERVMESG: {}".format(result.text))
if result.status_code != 200:
raise RequestFailed(result.text)
pkg = result.json()
self.atoken = pkg["token"]
def adminlout(self, token=None):
LOG.debug("ENTER")
data = {"token": token or self.atoken}
result = post("admin/logout", data)
LOG.info("SERVSTAT: {}".format(result.status_code))
LOG.info("SERVMESG: {}".format(result.text))
if result.status_code != 200:
raise RequestFailed(result.text)
self.atoken = None
def adminlout2(self, username=None, password=None):
LOG.debug("ENTER")
data = {"username": username or self.auser,
"password": password or self.apassw}
result = post("admin/logout2", data)
LOG.info("SERVSTAT: {}".format(result.status_code))
LOG.info("SERVMESG: {}".format(result.text))
if result.status_code != 200:
raise RequestFailed(result.text)
self.atoken = None
def adduser(self, token=None, username=None, password=None, name=None, surname=None, email=None, role=None):
LOG.debug("ENTER")
data = {"token": token or self.atoken,
"username": username or self.user,
"password": password or self.passw,
"name": name or self.name,
"surname": surname or self.surname,
"email": email or self.email,
"role" : role or self.role}
result = post("admin/adduser", data)
LOG.info("SERVSTAT: {}".format(result.status_code))
LOG.info("SERVMESG: {}".format(result.text))
if result.status_code != 200:
raise RequestFailed(result.text)
def deluser(self, token=None, username=None):
LOG.debug("ENTER")
data = {"token": token or self.atoken,
"username": username or self.user}
result = post("admin/deluser", data)
LOG.info("SERVSTAT: {}".format(result.status_code))
LOG.info("SERVMESG: {}".format(result.text))
if result.status_code != 200:
raise RequestFailed(result.text)
### NON-ADMIN
def login(self, username=None, password=None):
LOG.debug("ENTER")
data = {"username": username or self.user,
"password": password or self.passw,
"role" : "project"}
result = post("projects/login", data)
LOG.info("SERVSTAT: {}".format(result.status_code))
LOG.info("SERVMESG: {}".format(result.text))
if result.status_code != 200:
raise RequestFailed(result.text)
pkg = result.json()
self.token = pkg['token']
self.state["u_notloggedin"] = False
self.state["u_loggedin"] = True
def logout(self, token=None):
LOG.debug("ENTER")
data = {"token": token or self.token}
result = post("projects/logout", data)
LOG.info("SERVSTAT: {}".format(result.status_code))
LOG.info("SERVMESG: {}".format(result.text))
if result.status_code != 200:
raise RequestFailed(result.text)
self.token = None
self.state["u_notloggedin"] = True
self.state["u_loggedin"] = False
def logout2(self, username=None, password=None):
LOG.debug("ENTER")
data = {"username": username or self.user,
"password": password or self.passw}
result = post("projects/logout2", data)
LOG.info("SERVSTAT: {}".format(result.status_code))
LOG.info("SERVMESG: {}".format(result.text))
if result.status_code != 200:
raise RequestFailed(result.text)
self.token = None
self.state["u_notloggedin"] = True
self.state["u_loggedin"] = False
def changepassword(self, token=None, username=None, password=None):
LOG.debug("ENTER")
data = {"token": token or self.token,
"password": password or self.passw_}
result = post("projects/changepassword", data)
LOG.info("SERVSTAT: {}".format(result.status_code))
LOG.info("SERVMESG: {}".format(result.text))
if result.status_code != 200:
raise RequestFailed(result.text)
self.passw_, self.passw = self.passw, data["password"]
def listcategories(self, token=None):
LOG.debug("ENTER")
data = {"token": token or self.token}
result = post("projects/listcategories", data)
LOG.info("SERVSTAT: {}".format(result.status_code))
LOG.info("SERVMESG: {}".format(result.text))
if result.status_code != 200:
raise RequestFailed(result.text)
def listlanguages(self, token=None):
LOG.debug("ENTER")
data = {"token": token or self.token}
result = post("projects/listlanguages", data)
LOG.info("SERVSTAT: {}".format(result.status_code))
LOG.info("SERVMESG: {}".format(result.text))
if result.status_code != 200:
raise RequestFailed(result.text)
def loadusers(self, token=None):
LOG.debug("ENTER")
data = {"token": token or self.token}
result = post("projects/loadusers", data)
LOG.info("SERVSTAT: {}".format(result.status_code))
LOG.info("SERVMESG: {}".format(result.text))
if result.status_code != 200:
raise RequestFailed(result.text)
def createproject(self, token=None, projectname=None, category=None):
LOG.debug("ENTER")
data = {"token": token or self.token,
"projectname": projectname or self.projectname,
"category": category or self.projectcat,
"projectmanager" : self.user}
result = post("projects/createproject", data)
LOG.info("SERVSTAT: {}".format(result.status_code))
LOG.info("SERVMESG: {}".format(result.text))
if result.status_code != 200:
raise RequestFailed(result.text)
pkg = result.json()
self.pid = pkg['projectid']
self.state["u_hasprojects"] = True
self.state["p_loaded"] = True
self.state["p_hasaudio"] = False
self.state["p_saved"] = False
self.state["p_unlocked"] = True
self.state["p_locked"] = False
self.state["p_unassigned"] = True
self.state["p_assigned"] = False
self.state["p_updated"] = False
def listprojects(self, token=None):
LOG.debug("ENTER")
data = {"token": token or self.token}
result = post("projects/listprojects", data)
LOG.info("SERVSTAT: {}".format(result.status_code))
LOG.info("SERVMESG: {}".format(result.text))
if result.status_code != 200:
raise RequestFailed(result.text)
def listcreatedprojects(self, token=None):
LOG.debug("ENTER")
data = {"token": token or self.token}
result = post("projects/listcreatedprojects", data)
LOG.info("SERVSTAT: {}".format(result.status_code))
LOG.info("SERVMESG: {}".format(result.text))
if result.status_code != 200:
raise RequestFailed(result.text)
def loadproject(self, token=None, projectid=None):
LOG.debug("ENTER")
data = {"token": token or self.token,
"projectid": projectid or self.pid}
result = post("projects/loadproject", data)
LOG.info("SERVSTAT: {}".format(result.status_code))
LOG.info("SERVMESG: {}".format(result.text))
if result.status_code != 200:
raise RequestFailed(result.text)
#DEMIT: set new project parms
def deleteproject(self, token=None, projectid=None):
LOG.debug("ENTER")
data = {"token": token or self.token,
"projectid": projectid or self.pid}
result = post("projects/deleteproject", data)
LOG.info("SERVSTAT: {}".format(result.status_code))
LOG.info("SERVMESG: {}".format(result.text))
if result.status_code != 200:
raise RequestFailed(result.text)
self.pid = None
self.state["u_hasprojects"] = False
self.state["p_loaded"] = False
def uploadaudio(self, token=None, projectid=None, filename=None):
LOG.debug("ENTER")
data = {"token": token or self.token,
"projectid": projectid or self.pid,
"filename": filename or os.path.basename(self.audiofile),
"file": open(filename or self.audiofile, "rb")}
result = requests.post(os.path.join(self.baseurl, "projects/uploadaudio"), files=data)
LOG.info("SERVSTAT: {}".format(result.status_code))
LOG.info("SERVMESG: {}".format(result.text))
if result.status_code != 200:
raise RequestFailed(result.text)
self.state["p_hasaudio"] = True
self.state["p_saved"] = False
def getaudio(self, token=None, projectid=None):
LOG.debug("ENTER")
data = {"token": token or self.token,
"projectid": projectid or self.pid}
result = requests.get(os.path.join(self.baseurl, "projects/getaudio"), params=data)
LOG.info("SERVSTAT: {}".format(result.status_code))
LOG.info("SERVMESG: {}".format("BINARY"))
if result.status_code != 200:
raise RequestFailed(result.text)
#Write temp audiofile
f, fname = tempfile.mkstemp()
f = os.fdopen(f, "w")
f.write(result.content)
f.close()
os.remove(fname)
def diarizeaudio(self, token=None, projectid=None, ctm=None):
LOG.debug("ENTER")
data = {"token": token or self.token,
"projectid": projectid or self.pid}
putdata = {"CTM": ctm or self.diarizectm}
result = post("projects/diarizeaudio", data)
LOG.info("SERVSTAT: {}".format(result.status_code))
LOG.info("SERVMESG: {}".format(result.text))
if result.status_code != 200:
raise RequestFailed(result.text)
#SIMULATING SPEECHSERVER JOB
with self.db:
outurl, = self.db.execute("SELECT url "
"FROM outgoing "
"WHERE projectid=?", (data["projectid"],)).fetchone()
inurl, = self.db.execute("SELECT url "
"FROM incoming "
"WHERE projectid=?", (data["projectid"],)).fetchone()
##GET
result = requests.get(os.path.join(self.baseurl, "projects", outurl), params={})
LOG.info("SPEECHGETSTAT: {}".format(result.status_code))
if result.status_code != 200:
LOG.info("SPEECHGETMESG: {}".format(result.text))
raise RequestFailed(result.text)
LOG.info("SPEECHGETMESG: {}".format("BINARY"))
###Write temp audiofile
f, fname = tempfile.mkstemp()
f = os.fdopen(f, "w")
f.write(result.content)
f.close()
os.remove(fname)
##PUT
result = requests.put(os.path.join(self.baseurl, "projects", inurl), headers={"Content-Type" : "application/json"}, data=json.dumps(putdata))
LOG.info("SPEECHPUTSTAT: {}".format(result.status_code))
LOG.info("SPEECHPUTMESG: {}".format(result.text))
self.state["p_saved"] = False
def diarizeaudio2(self, token=None, projectid=None):
LOG.debug("ENTER")
data = {"token": token or self.token,
"projectid": projectid or self.pid}
result = post("projects/diarizeaudio", data)
LOG.info("SERVSTAT: {}".format(result.status_code))
LOG.info("SERVMESG: {}".format(result.text))
if result.status_code != 200:
raise RequestFailed(result.text)
self.state["p_unlocked"] = False
self.state["p_locked"] = True
def saveproject(self, token=None, projectid=None, tasks=None, project=None):
LOG.debug("ENTER")
data = {"token": token or self.token,
"projectid": projectid or self.pid,
"tasks": tasks or self.savetasks,
"project": project or self.saveproj}
result = post("projects/saveproject", data)
LOG.info("SERVSTAT: {}".format(result.status_code))
LOG.info("SERVMESG: {}".format(result.text))
if result.status_code != 200:
raise RequestFailed(result.text)
self.state["p_saved"] = True
def assigntasks(self, token=None, projectid=None, collator=None):
LOG.debug("ENTER")
data = {"token": token or self.token,
"projectid": projectid or self.pid,
"collator": collator or self.user}
result = post("projects/assigntasks", data)
LOG.info("SERVSTAT: {}".format(result.status_code))
LOG.info("SERVMESG: {}".format(result.text))
if result.status_code != 200:
raise RequestFailed(result.text)
self.state["p_unassigned"] = False
self.state["p_assigned"] = True
def updateproject(self, token=None, projectid=None, tasks=None, project=None):
LOG.debug("ENTER")
data = {"token": token or self.token,
"projectid": projectid or self.pid,
"tasks": tasks or self.updatetasks,
"project": project or self.updateproj}
result = post("projects/updateproject", data)
LOG.info("SERVSTAT: {}".format(result.status_code))
LOG.info("SERVMESG: {}".format(result.text))
if result.status_code != 200:
raise RequestFailed(result.text)
self.state["p_updated"] = True
def updateproject2(self, token=None, projectid=None, tasks=None, project=None):
LOG.debug("ENTER")
data = {"token": token or self.token,
"projectid": projectid or self.pid,
"project": {"projectstatus" : "Assigned"}}
result = post("projects/updateproject", data)
LOG.info("SERVSTAT: {}".format(result.status_code))
LOG.info("SERVMESG: {}".format(result.text))
if result.status_code != 200:
raise RequestFailed(result.text)
self.state["p_updated"] = True
def unlockproject(self, token=None, projectid=None):
LOG.debug("ENTER")
data = {"token": token or self.token,
"projectid": projectid or self.pid}
result = post("projects/unlockproject", data)
LOG.info("SERVSTAT: {}".format(result.status_code))
LOG.info("SERVMESG: {}".format(result.text))
if result.status_code != 200:
raise RequestFailed(result.text)
self.state["p_unlocked"] = True
self.state["p_locked"] = False
def resetpassword(self, token=None, username=None):
LOG.debug("ENTER")
data = {"token": token or self.atoken,
"username": username or self.user}
result = post("projects/resetpassword", data)
LOG.info("SERVSTAT: {}".format(result.status_code))
LOG.info("SERVMESG: {}".format(result.text))
if result.status_code != 200:
raise RequestFailed(result.text)
def runtest(args):
baseurl, testdata, projectdbfile, mindelay, maxdelay, logfile, loglevel = args
################################################################################
### LOGGING SETUP
global LOG
LOG = setuplog("PTESTER", logfile, loglevel, testdata["testid"])
################################################################################
t = Test(testdata, projectdbfile, baseurl=baseurl, seed=testdata["testid"])
return t.walkthrough(mindelay, maxdelay)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('mode', metavar='MODE', type=str, help="Mode of operation (interactive|simulate)")
parser.add_argument('--baseurl', metavar='BASEURL', type=str, dest="baseurl", default=DEF_BASEURL, help="Base URL for requests")
parser.add_argument('--logfile', metavar='LOGFILE', type=str, dest="logfile", default=DEF_LOGFILE, help="Log file path")
parser.add_argument('--loglevel', metavar='LOGLEVEL', type=int, dest="loglevel", default=DEF_LOGLEVEL, help="Log verbosity level")
parser.add_argument('--testfile', metavar='TESTFILE', type=str, dest="testfile", default=DEF_TESTFILE, help="Test data description file")
parser.add_argument('--dbfile', metavar='DBFILE', type=str, dest="dbfile", default=DEF_DBFILE, help="Projects DB file path")
parser.add_argument('--nusers', metavar='NUSERS', type=int, dest="nusers", default=DEF_NUSERS, help="Number of concurrent users (simulation mode)")
parser.add_argument('--nprocs', metavar='NPROCS', type=int, dest="nprocs", default=DEF_NPROCS, help="Number of concurrent processes (simulation mode)")
parser.add_argument('--mindelay', metavar='MINDELAY', type=float, dest="mindelay", default=DEF_MINDELAY, help="Minimum delay between user requests (simulation mode)")
parser.add_argument('--maxdelay', metavar='DURATION', type=float, dest="maxdelay", default=DEF_MAXDELAY, help="Maximum delay between user requests (simulation mode)")
args = parser.parse_args()
try:
import multiprocessing
POOL = multiprocessing.Pool(processes=args.nprocs)
def map(f, i):
return POOL.map(f, i, chunksize=1)
except ImportError:
pass
LOG = setuplog("PTESTER", args.logfile, args.loglevel, "admin")
with codecs.open(args.testfile, encoding="utf-8") as testfh:
testdata = json.load(testfh)
if args.mode.startswith("sim"):
LOG.info("Accessing Docker app server via: {}".format(args.baseurl))
LOG.info("Creating {} tests/users".format(args.nusers))
tests = []
t = Test(testdata, args.dbfile, baseurl=args.baseurl)
t.adminlin()
for i in range(args.nusers):
tdata = dict(testdata)
tdata["user"] = "user{}".format(str(i).zfill(2))
tdata["testid"] = i
t.adduser(username=tdata["user"])
tests.append(tdata)
LOG.info("Walking through {} tests {} procs".format(args.nusers, args.nprocs))
testresults = map(runtest, [(args.baseurl, tdata, args.dbfile, args.mindelay, args.maxdelay, args.logfile, args.loglevel) for tdata in tests])
LOG.info("Walkthrough results: {} of {} successful".format(len([flag for flag, _, __ in testresults if flag == True]), len(tests)))
LOG.info("Walkthrough failed for TIDs: {}".format(", ".join([str(teststate.testid) for flag, _, teststate in testresults if flag == False])))
#force logout all and delete
for flag, e, teststate in testresults:
LOG.info("tid:{} Logging out and deleting user: {}".format(teststate.testid, teststate.user))
LOG.info("tid:{} E-state: {}".format(teststate.testid, e))
try:
t.logout2(username=teststate.user, password=teststate.passw)
except RequestFailed:
t.logout2(username=teststate.user, password=teststate.passw_)
t.deluser(username=teststate.user)
#logout admin
t.adminlout2()
elif args.mode.startswith("int"):
t = Test(testdata, args.dbfile, baseurl=args.baseurl)
try:
while True:
cmd = raw_input("Enter command (type help for list)> ")
cmd = cmd.lower()
if cmd == "exit":
t.logout2()
t.adminlout2()
break
elif cmd in ["help", "list"]:
print("ADMINLIN - Admin login")
print("ADMINLOUT - Admin logout")
print("ADMINLOUT2 - Admin logout (with username & password)")
print("ADDUSER - add new user\n")
print("DELUSER - delete new user\n")
print("LOGIN - user login")
print("LOGOUT - user logout")
print("LOGOUT2 - user logout (with username & password)")
print("CHANGEPASSWORD - change user user password")
print("CHANGEBACKPASSWORD - change user user password back")
print("LISTCATEGORIES - list project categories")
print("LISTLANGUAGES - list languages")
print("CREATEPROJECT - create a new project")
print("LISTPROJECTS - list projects")
print("LOADUSERS - load users")
print("LOADPROJECT - load projects")
print("UPLOADAUDIO - upload audio to project")
print("GETAUDIO - retrieve project audio")
print("SAVEPROJECT - update project and create/save tasks for a project")
print("ASSIGNTASKS - assign tasks to editors")
print("DIARIZEAUDIO - save tasks to a project via diarize request (simulate speech server)\n")
print("DIARIZEAUDIO2 - like DIARIZEAUDIO but withouth speech server (project stays locked)\n")
print("UPDATEPROJECT - update project and associated tasks")
print("UPDATEPROJECT2 - update projectstatus")
print("UNLOCKPROJECT - unlock project (can test this against DIARIZEAUDIO2)")
print("RESETPASSWORD - reset user's password")
print("EXIT - quit")
else:
try:
meth = getattr(t, cmd)
meth()
except Exception as e:
print('Error processing command:', e)
except:
t.logout2()
t.adminlout2()
print('')
else:
parser.print_help()
| nilq/baby-python | python |
#!/usr/bin/python
import MySQLdb
# Open database connection
db = MySQLdb.connect("localhost","root","","zambia_weather")
# prepare a cursor object using cursor() method
cursor = db.cursor()
insert = """INSERT INTO test (name, region) VALUES (%s, %s)"""
sql = cursor.execute(insert, ("Hello", "World"))
db.commit()
#cursor.execute(sql)
# disconnect from server
db.close()
# execute SQL query using execute() method.
#cursor.execute("DROP TABLE IF EXISTS employees")
# Fetch a single row using fetchone() method.
# Create table as per requirement
| nilq/baby-python | python |
import numpy as np
from numpy import log, exp, sqrt
from numpy.lib.shape_base import _expand_dims_dispatcher
# import yahoo finance to pull stock and crypto data from
import yfinance as yf
import pandas as pd
import matplotlib.pyplot as plt
import scipy.optimize as optimization
# bringing in these libraries in order to use a dynamic date selection - see sub_years
import datetime as dt
from datetime import date
from workflow.MCForecastTools import MCSimulation
from CAPM import CAPM
# first we create a variable today and set it equal to the datetime libraries date.today()
today = date.today()
# # once we have todays date we can run a formula to replace the year output from the date.today() with whatever timeframe we enter
# # in our program we will set this input at 10 years
def sub_years(years):
today = date.today()
try:
return today.replace(year = today.year - years)
except ValueError:
return today + (date(today.year + years, 1, 1) - date(today.year, 1, 1))
def start_end(today):
# # historical data - define START and END dates
# # to calculate the start_date we must use the sub_years function defined above to get today's date and subtract 10 years
# # then using the .strftime('%Y-%m-%d') we format it so that it can be passed to yahoo finance
start_date = sub_years(10).strftime('%Y-%m-%d')
# # for the end_date we just have to reformat the today variable with the .strftime('%Y-%m-%d') we format it so that it can be passed to yahoo finance
end_date = today.strftime('%Y-%m-%d')
return start_date, end_date
# number of trading days in a year (stocks only)
num_tradings_days = 252
# set variable of amount of random w (different portfolios) we want to create
num_portfolios = 40000
# define a function download_data()
def download_data(stocks):
stock_data = yf.download(
#tickers list or string as well
tickers = stocks,
# use "period" instead of start/end
# valid periods: 1d, 5d, 1mo, 3mo, 6mo, 1y, 2y, 5y, 10y, ytd, max
# (optional, default is "1mo")
period = "10y",
# fetch data by interval (including intraday if period < 60 days)
# valid intervals: 1m,2m,5m,15m,30m,60m,90m,1h,1d,5d,1wk,1mo,3mo
# (optional, default is '1d')
interval = '1d',
# adjust all OHLC automatically
# (optional, default is False)
auto_adjust = True,
# download pre/post regular market hours data
# (optional, default is False)
prepost = True,
# use threads for mass downloading? (True/False/Integre)
# (optional, default is True)
threads = True,
# proxy URL scheme use use when downloading?
# (optional, default is None)
proxy = None
)['Close']
data_cleaned = stock_data.fillna(stock_data.rolling(6, min_periods=1).mean())
data_cleaned = data_cleaned.dropna()
return pd.DataFrame(data_cleaned)
# define a function show_data()
def show_data(data, start_date, end_date):
data.plot(figsize=(20,10), grid=True, xlabel='Date', ylabel="Stock Price", title=f"Historical Price from {start_date} through {end_date}")
plt.show()
# Calculates the log returns to assist with creating the portfolio weights, average returns and volatility.
def calculate_log_return(data):
# NORMALIZATION - to measure all variables in comparable metric
log_return = np.log(data/data.shift(1))
# return [1:] takes out the null values from the first data point
return log_return[1:]
# Calculates the daily return from the data provided
def calculate_return(data):
daily_returns = data.pct_change()
return daily_returns[1:]
# Define annual statistics
def show_statistics(returns):
print(returns.mean() * num_tradings_days)
print(returns.cov() * num_tradings_days)
# Calculates the portfolio returns using the weights calculated in a previous function.
# Calculates the portfolio volatility using the weights calculated in a previous function.
def show_mean_variance(returns, weights):
portfolio_return = np.sum(returns.mean()*weights) * num_tradings_days
portfolio_volatility = np.sqrt(np.dot(weights.T, np.dot(returns.cov()*num_tradings_days, weights)))
print(f"Expected portfolio mean (return): {portfolio_return}")
print(f"Expected portfolio volatility (standard deviation): {portfolio_volatility}")
# Explains what the Efficient Frontier and the model.
# Inputs include the log daily returns and stock picks to output portfolio weights, means and risk.
def generate_portfolios(stocks, returns):
print("\n...................................**Efficient Frontier**...................................\n")
print("")
print("In Modern Portfolio Theory, the efficient frontier is an investment portfolio which occupies\n")
print("the 'efficient' parts of the risk-return spectrum. Formally, it is the set of portfolios which\n")
print("satisfy the condition that no other portfolio exists with a higher expected return but with the\n")
print(" same amount of risk (standard deviation).\n")
print("")
print("..............................................................................................\n")
print("")
print("In our model we are using the Efficient Frontier to generate the optimal weights for our portfolio's\n")
print("capital allocation. The weights generated here will then be passed to our Monte Carlo Simulator so\n")
print(f" we can determine a range of expected returns with 95% confidence.\n")
print("")
print("")
portfolio_means = []
portfolio_risks = []
portfolio_weights = []
for _ in range(num_portfolios):
if _ % 4000 == 0:
print(f"Running Modern Portfolio Theory simulation... {round((_ / num_portfolios) * 100,0)}% completed.")
w = np.random.random(len(stocks))
w /= np.sum(w)
portfolio_weights.append(w)
portfolio_means.append(np.sum(returns.mean() * w) * num_tradings_days)
portfolio_risks.append(np.sqrt(np.dot(w.T, np.dot(returns.cov() * num_tradings_days, w))))
return np.array(portfolio_weights), np.array(portfolio_means), np.array(portfolio_risks)
# Prints out the Efficient Frontier plot
def show_portfolios(returns, volatilities):
plt.figure(figsize=(20,10))
plt.style.use(['dark_background'])
plt.scatter(volatilities, returns, c=returns/volatilities, marker='o')
plt.grid(True)
plt.xlabel('Expected Volatility')
plt.ylabel('Expected Returns')
plt.colorbar(label='Sharpe Ratio')
plt.show()
# Prints out the statistics of the portfolio
def statistics(weights, returns):
portfolio_return = np.sum(returns.mean() * weights) * num_tradings_days
portfolio_volatility = np.sqrt(np.dot(weights.T, np.dot(returns.cov() * num_tradings_days, weights)))
return np.array([portfolio_return, portfolio_volatility, portfolio_return/portfolio_volatility])
# scipy optimize module can find the minimum of a given function
# the maximum of a f(x) is the minimum of -f(x)
def min_function_sharpe(weights, returns):
return -statistics(weights, returns)[2]
# what are the constraints? the sum of weights = 1
# f(x)=0 this is the function to minimize
def optimize_portfolio(stocks, weights, returns):
# the sum of weights is 1
cons = {'type': 'eq', 'fun': lambda x: np.sum(x) -1}
# the weights can be 1 at most: 1 when 100% of money is invested inot a single stock
bnds = tuple((0,1) for _ in range(len(stocks)))
return optimization.minimize(
fun=min_function_sharpe,
x0=weights[0],
args=returns,
method='SLSQP',
bounds=bnds,
constraints=cons
)
# Prints the optimal portfolio and retun volatility and sharpe ratios
def print_optimum(optimum, returns):
print(f"Optimal portfolio: {optimum['x']}")
print(f"Expected Return, volatility and Sharpe Ratio: {statistics(optimum['x'], returns)}")
# print the Stocks and Weights into a manageable pd.DataFrame to be easier to read and export
def print_optimal_portfolio_dataframe(stocks, optimum, returns):
# first create a variable to be passed into the new dataframe
weights = []
for x in optimum['x']:
rounded_x = round(x,5)
rounded = rounded_x * 100
rounded = f"{rounded} %"
weights.append(rounded)
# create the new dataframe with index = stocks
optimal_portfolio_weights_df = pd.DataFrame({'Weights %': weights}, index=stocks)
# create another dataframe that holds the metrics we are tracking for our portfolio
headers = ['Expected Returns', 'Expected Volatility', 'Expected Sharpe Ratio']
stats = statistics(optimum['x'], returns)
metrics = pd.DataFrame({"Metrics": stats}, index=headers)
# print(metrics)
# the weights are ordered in the same order as the stocks from above so they will print side by side
# print(optimal_portfolio_weights_df)
return metrics, optimal_portfolio_weights_df
# Prints out the optimal portfolio plot in the efficient frontier.
def show_optimal_portfolio(opt, rets, portfolio_rets, portfolio_vols, sectors_selected):
plt.figure(figsize=(20,10))
# plt.style.use(['dark_background'])
plt.scatter(portfolio_vols, portfolio_rets, c=portfolio_rets/portfolio_vols, marker='o')
plt.grid(True)
plt.rcParams.update({'font.size': 18})
plt.title(f"Modern Portfolio Theory for {sectors_selected} Sectors by Top 5 Market Cap")
plt.xlabel("Expected Volatility")
plt.ylabel("Expected Return")
plt.colorbar(label='Sharpe Ratio')
plt.plot(statistics(opt['x'], rets)[1], statistics(opt['x'], rets)[0], 'r*', markersize=20.0)
# Cleans the dataframe to use in the monte carlo simulation
def clean_df_monte_carlo(dataset, daily_returns):
# bring in dataset and add multiindex column name 'close'
dataset.columns = pd.MultiIndex.from_product([dataset.columns, ['close']])
# bring in log_daily_returns and add multiindex column name 'daily_returns'
daily_returns.columns = pd.MultiIndex.from_product([daily_returns.columns, ['daily_return']])
# join the 2 tables together
joined_df_columns = pd.concat(
[dataset, daily_returns],
axis='columns',
join='inner'
)
# sort the columns by ticker symbol
joined_df_columns.sort_index(axis=1, level=0, inplace=True)
return pd.DataFrame(joined_df_columns)
# Runs the monte carlo
def monte_carlo(stocks, dataset, optimum, investment):
print("\n...................................**Monte Carlo Simulation**...................................\n")
print("A Monte Carlo simulation is a model used to predict the probability of different outcomes when the\n")
print(" intervention of random variables is present.\n")
print("\n")
print("\n")
num_trading_days=252
# Configure the Monte Carlo simulation to forecast 30 years cumulative returns
# The weights should be split 40% to AGG and 60% to SPY.
# Run 500 samples.
weights = optimum['x']
optimal_portfolio_weights_df = pd.DataFrame({'Weights %': weights}, index=stocks)
# dataset.columns = pd.MultiIndex.from_product([['close'], dataset.columns])
MC_Stocks = MCSimulation(
portfolio_data= dataset,
weights=weights,
num_simulation=500,
num_trading_days=num_trading_days
)
# Review the simulation input data
MC_Stocks.calc_cumulative_return()
mc_stock_tbl = MC_Stocks.summarize_cumulative_return()
# print(optimal_portfolio_weights_df)
# print(mc_stock_tbl)
mc_ci_lower = round(mc_stock_tbl[8]*investment,2)
mc_ci_upper = round(mc_stock_tbl[9]*investment,2)
# investment_return = print(f"There is a 95% chance that an initial investment of ${investment} in the portfolio over the next {round(num_trading_days / 252)} years will end within in the range of ${mc_ci_lower} ({round(((mc_ci_lower - investment) / investment) * 100,2)}%) and ${mc_ci_upper} ({round(((mc_ci_upper - investment) / investment) * 100,2)}%).")
return MC_Stocks, mc_stock_tbl, mc_ci_upper, mc_ci_lower
def mc_invest_print(investment, mc_ci_upper, mc_ci_lower):
num_trading_days = 252
print(f"There is a 95% chance that an initial investment of ${investment} in the portfolio over the next {round(num_trading_days / 252)} year will be within a range of ${mc_ci_lower} ({round(((mc_ci_lower - investment) / investment) * 100,2)}%) and ${mc_ci_upper} ({round(((mc_ci_upper - investment) / investment) * 100,2)}%).")
# in order to get both plots to show we had to create a separate function for each plot
# and pass the MC_Stocks dataframe in as a parameter
# ultimately we had to use "plt.show()" in order for the plots to populate individually
def mc_line_plot(MC_Stocks):
MC_Stocks.plot_simulation()
plt.show()
# mc_line_plot(MC_Stocks)
# in order to get both plots to show we had to create a separate function for each plot
# and pass the MC_Stocks dataframe in as a parameter
# ultimately we had to use "plt.show()" in order for the plots to populate individually
def mc_dist_plot(MC_Stocks):
MC_Stocks.plot_distribution()
plt.show()
def capm(stocks, start_date, end_date, risk_free_rate, weights):
stocks.append('^GSPC')
capm = CAPM(
stocks,
start_date,
end_date,
risk_free_rate,
weights
)
capm.initialize()
beta = capm.calculate_beta()
print(beta)
capm.regression() | nilq/baby-python | python |
# Copyright (C) 2019 LYNX B.V. All rights reserved.
# Import ibapi deps
from ibapi import wrapper
from ibapi.client import EClient
from ibapi.contract import *
from threading import Thread
from time import sleep
CONTRACT_ID = 4001
class Wrapper(wrapper.EWrapper):
def __init__(self):
wrapper.EWrapper.__init__(self)
def contractDetails(self, reqId:int, contractDetails:ContractDetails):
"""Receives the full contract's definitions. This method will return all
contracts matching the requested via EEClientSocket::reqContractDetails.
For example, one can obtain the whole option chain with it."""
print("marketName: ", contractDetails.marketName, "\nvalidExchanges: ", contractDetails.validExchanges,\
"\nlongName: ", contractDetails.longName, "\nminTick: ",contractDetails.minTick)
#printinstance(contractDetails) using this print statement all of the availabe details will be printed out.
class Client(EClient):
def __init__(self, wrapper):
EClient.__init__(self, wrapper)
def get_contractDetails(self, contract, reqId = CONTRACT_ID):
# Here we are requesting contract details for the EUR.USD Contract
self.reqContractDetails(reqId, contract)
MAX_WAITED_SECONDS = 5
print("Getting contract details from the server... can take %d second to complete" % MAX_WAITED_SECONDS)
sleep(MAX_WAITED_SECONDS)
class TestApp(Wrapper, Client):
def __init__(self, ipaddress, portid, clientid):
Wrapper.__init__(self)
Client.__init__(self, wrapper=self)
self.connect(ipaddress, portid, clientid)
thread = Thread(target=self.run)
thread.start()
setattr(self, "_thread", thread)
def printinstance(inst:Object):
attrs = vars(inst)
print('\n'.join("%s: %s" % item for item in attrs.items()))
def main():
# Init the TestApp(Wrapper, Client)
app = TestApp("localhost", 7496, clientid = 0)
print("serverVersion:%s connectionTime:%s" % (app.serverVersion(),
app.twsConnectionTime()))
# Define the contract
contract = Contract()
contract.symbol = "EUR"
contract.secType = "CASH"
contract.currency = "USD"
contract.exchange = "IDEALPRO"
app.get_contractDetails(contract)
if __name__ == "__main__":
main()
| nilq/baby-python | python |
import re
# print(re.split(r'\s*', 'here are some words'))
# print(re.split(r'(\s*)', 'here are some words'))
# print(re.split(r'(s*)', 'here are some words'))
# [a-z] find a range of characters
# print(re.split(r'[a-hA-F0-9]', 'saldkfjeilksjdLKJSAEIAL;SDF', re.I | re.M))
'''
\d = digits
\D = non-digits
\s = Space
\S = non-Space
\w = alphanumeric
\. = regular period (.)
. = Any character but newline(\n)
* = 0 or more
+ = 1 or more
? = 0 or 1 of ...
{5} = exact number of ...
{1,60} = range on number of ...
'''
# print(re.split(r'\d', 'ocinwe324 main st.asdvce'))
print(re.findall(r'\d{1,5}\s\w+\s\w+\.', 'ocinwe324 main st.asdvce'))
| nilq/baby-python | python |
## AUTHOR: Vamsi Krishna Reddy Satti
##################################################################################
# Data loader
##################################################################################
import numpy as np
class DataLoader:
def __init__(self, dataset, batch_size=1, shuffle=False):
self.dataset = dataset if isinstance(dataset, tuple) else (dataset, )
self.batch_size = batch_size
self.shuffle = shuffle
self.dataset_size = self.dataset[0].shape[0]
self.batches_outstanding = np.ceil(self.dataset_size / self.batch_size).astype(np.long).item()
self.shuffle_data()
def __iter__(self):
return self
def __len__(self):
return self.batches_outstanding
def __next__(self):
if self.batches_outstanding == 0:
self.batches_outstanding = np.ceil(self.dataset_size / self.batch_size).astype(np.long).item() # This helps for next epoch to reuse the same dataloader object
self.shuffle_data()
raise StopIteration
self.batches_outstanding -= 1
batch = tuple(data[self.batches_outstanding * self.batch_size: (self.batches_outstanding + 1) * self.batch_size] for data in self.dataset)
return batch if len(batch) > 1 else batch[0]
def shuffle_data(self):
if self.shuffle:
indices = np.random.permutation(self.dataset_size)
self.dataset = [data[indices] for data in self.dataset]
| nilq/baby-python | python |
import PHPTraceTokenizer
import PHPProfileParser
import PHPTraceParser
import os
traceDir = "test-data"
def trace_and_profile_from_timestamp(traceDir, timestamp):
return (
os.path.join(traceDir, "{}.xt".format(timestamp)),
os.path.join(traceDir, "{}.xp".format(timestamp))
)
def create_trace(traceFile, profileFile):
function_mappings = PHPProfileParser.get_function_file_mapping(profileFile)
return PHPTraceTokenizer.Trace(traceFile, function_mappings)
def traceNoExceptionsTest(timestamp):
traceFile, profileFile = trace_and_profile_from_timestamp(traceDir, timestamp)
trace = create_trace(traceFile, profileFile)
traceNoExceptionsTest('1541770537') | nilq/baby-python | python |
"""
EVM Instruction Encoding (Opcodes)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. contents:: Table of Contents
:backlinks: none
:local:
Introduction
------------
Machine readable representations of EVM instructions, and a mapping to their
implementations.
"""
import enum
from typing import Callable, Dict
from . import arithmetic as arithmetic_instructions
from . import bitwise as bitwise_instructions
from . import block as block_instructions
from . import comparison as comparison_instructions
from . import control_flow as control_flow_instructions
from . import environment as environment_instructions
from . import keccak as keccak_instructions
from . import log as log_instructions
from . import memory as memory_instructions
from . import stack as stack_instructions
from . import storage as storage_instructions
class Ops(enum.Enum):
"""
Enum for EVM Opcodes
"""
# Arithmetic Ops
ADD = 0x01
MUL = 0x02
SUB = 0x03
DIV = 0x04
SDIV = 0x05
MOD = 0x06
SMOD = 0x07
ADDMOD = 0x08
MULMOD = 0x09
EXP = 0x0A
SIGNEXTEND = 0x0B
# Comparison Ops
LT = 0x10
GT = 0x11
SLT = 0x12
SGT = 0x13
EQ = 0x14
ISZERO = 0x15
# Bitwise Ops
AND = 0x16
OR = 0x17
XOR = 0x18
NOT = 0x19
BYTE = 0x1A
# Keccak Op
KECCAK = 0x20
# Environmental Ops
ADDRESS = 0x30
BALANCE = 0x31
ORIGIN = 0x32
CALLER = 0x33
CALLVALUE = 0x34
CALLDATALOAD = 0x35
CALLDATASIZE = 0x36
CALLDATACOPY = 0x37
CODESIZE = 0x38
CODECOPY = 0x39
GASPRICE = 0x3A
EXTCODESIZE = 0x3B
EXTCODECOPY = 0x3C
# Block Ops
BLOCKHASH = 0x40
COINBASE = 0x41
TIMESTAMP = 0x42
NUMBER = 0x43
DIFFICULTY = 0x44
GASLIMIT = 0x45
# Control Flow Ops
STOP = 0x00
JUMP = 0x56
JUMPI = 0x57
PC = 0x58
GAS = 0x5A
JUMPDEST = 0x5B
# Storage Ops
SLOAD = 0x54
SSTORE = 0x55
# Pop Operation
POP = 0x50
# Push Operations
PUSH1 = 0x60
PUSH2 = 0x61
PUSH3 = 0x62
PUSH4 = 0x63
PUSH5 = 0x64
PUSH6 = 0x65
PUSH7 = 0x66
PUSH8 = 0x67
PUSH9 = 0x68
PUSH10 = 0x69
PUSH11 = 0x6A
PUSH12 = 0x6B
PUSH13 = 0x6C
PUSH14 = 0x6D
PUSH15 = 0x6E
PUSH16 = 0x6F
PUSH17 = 0x70
PUSH18 = 0x71
PUSH19 = 0x72
PUSH20 = 0x73
PUSH21 = 0x74
PUSH22 = 0x75
PUSH23 = 0x76
PUSH24 = 0x77
PUSH25 = 0x78
PUSH26 = 0x79
PUSH27 = 0x7A
PUSH28 = 0x7B
PUSH29 = 0x7C
PUSH30 = 0x7D
PUSH31 = 0x7E
PUSH32 = 0x7F
# Dup operations
DUP1 = 0x80
DUP2 = 0x81
DUP3 = 0x82
DUP4 = 0x83
DUP5 = 0x84
DUP6 = 0x85
DUP7 = 0x86
DUP8 = 0x87
DUP9 = 0x88
DUP10 = 0x89
DUP11 = 0x8A
DUP12 = 0x8B
DUP13 = 0x8C
DUP14 = 0x8D
DUP15 = 0x8E
DUP16 = 0x8F
# Swap operations
SWAP1 = 0x90
SWAP2 = 0x91
SWAP3 = 0x92
SWAP4 = 0x93
SWAP5 = 0x94
SWAP6 = 0x95
SWAP7 = 0x96
SWAP8 = 0x97
SWAP9 = 0x98
SWAP10 = 0x99
SWAP11 = 0x9A
SWAP12 = 0x9B
SWAP13 = 0x9C
SWAP14 = 0x9D
SWAP15 = 0x9E
SWAP16 = 0x9F
# Memory Operations
MLOAD = 0x51
MSTORE = 0x52
MSTORE8 = 0x53
MSIZE = 0x59
# Log Operations
LOG0 = 0xA0
LOG1 = 0xA1
LOG2 = 0xA2
LOG3 = 0xA3
LOG4 = 0xA4
op_implementation: Dict[Ops, Callable] = {
Ops.STOP: control_flow_instructions.stop,
Ops.ADD: arithmetic_instructions.add,
Ops.MUL: arithmetic_instructions.mul,
Ops.SUB: arithmetic_instructions.sub,
Ops.DIV: arithmetic_instructions.div,
Ops.SDIV: arithmetic_instructions.sdiv,
Ops.MOD: arithmetic_instructions.mod,
Ops.SMOD: arithmetic_instructions.smod,
Ops.ADDMOD: arithmetic_instructions.addmod,
Ops.MULMOD: arithmetic_instructions.mulmod,
Ops.EXP: arithmetic_instructions.exp,
Ops.SIGNEXTEND: arithmetic_instructions.signextend,
Ops.LT: comparison_instructions.less_than,
Ops.GT: comparison_instructions.greater_than,
Ops.SLT: comparison_instructions.signed_less_than,
Ops.SGT: comparison_instructions.signed_greater_than,
Ops.EQ: comparison_instructions.equal,
Ops.ISZERO: comparison_instructions.is_zero,
Ops.AND: bitwise_instructions.bitwise_and,
Ops.OR: bitwise_instructions.bitwise_or,
Ops.XOR: bitwise_instructions.bitwise_xor,
Ops.NOT: bitwise_instructions.bitwise_not,
Ops.BYTE: bitwise_instructions.get_byte,
Ops.KECCAK: keccak_instructions.keccak,
Ops.SLOAD: storage_instructions.sload,
Ops.BLOCKHASH: block_instructions.block_hash,
Ops.COINBASE: block_instructions.coinbase,
Ops.TIMESTAMP: block_instructions.timestamp,
Ops.NUMBER: block_instructions.number,
Ops.DIFFICULTY: block_instructions.difficulty,
Ops.GASLIMIT: block_instructions.gas_limit,
Ops.SSTORE: storage_instructions.sstore,
Ops.MLOAD: memory_instructions.mload,
Ops.MSTORE: memory_instructions.mstore,
Ops.MSTORE8: memory_instructions.mstore8,
Ops.MSIZE: memory_instructions.msize,
Ops.ADDRESS: environment_instructions.address,
Ops.BALANCE: environment_instructions.balance,
Ops.ORIGIN: environment_instructions.origin,
Ops.CALLER: environment_instructions.caller,
Ops.CALLVALUE: environment_instructions.callvalue,
Ops.CALLDATALOAD: environment_instructions.calldataload,
Ops.CALLDATASIZE: environment_instructions.calldatasize,
Ops.CALLDATACOPY: environment_instructions.calldatacopy,
Ops.CODESIZE: environment_instructions.codesize,
Ops.CODECOPY: environment_instructions.codecopy,
Ops.GASPRICE: environment_instructions.gasprice,
Ops.EXTCODESIZE: environment_instructions.extcodesize,
Ops.SSTORE: storage_instructions.sstore,
Ops.JUMP: control_flow_instructions.jump,
Ops.JUMPI: control_flow_instructions.jumpi,
Ops.PC: control_flow_instructions.pc,
Ops.GAS: control_flow_instructions.gas_left,
Ops.JUMPDEST: control_flow_instructions.jumpdest,
Ops.POP: stack_instructions.pop,
Ops.PUSH1: stack_instructions.push1,
Ops.PUSH2: stack_instructions.push2,
Ops.PUSH3: stack_instructions.push3,
Ops.PUSH4: stack_instructions.push4,
Ops.PUSH5: stack_instructions.push5,
Ops.PUSH6: stack_instructions.push6,
Ops.PUSH7: stack_instructions.push7,
Ops.PUSH8: stack_instructions.push8,
Ops.PUSH9: stack_instructions.push9,
Ops.PUSH10: stack_instructions.push10,
Ops.PUSH11: stack_instructions.push11,
Ops.PUSH12: stack_instructions.push12,
Ops.PUSH13: stack_instructions.push13,
Ops.PUSH14: stack_instructions.push14,
Ops.PUSH15: stack_instructions.push15,
Ops.PUSH16: stack_instructions.push16,
Ops.PUSH17: stack_instructions.push17,
Ops.PUSH18: stack_instructions.push18,
Ops.PUSH19: stack_instructions.push19,
Ops.PUSH20: stack_instructions.push20,
Ops.PUSH21: stack_instructions.push21,
Ops.PUSH22: stack_instructions.push22,
Ops.PUSH23: stack_instructions.push23,
Ops.PUSH24: stack_instructions.push24,
Ops.PUSH25: stack_instructions.push25,
Ops.PUSH26: stack_instructions.push26,
Ops.PUSH27: stack_instructions.push27,
Ops.PUSH28: stack_instructions.push28,
Ops.PUSH29: stack_instructions.push29,
Ops.PUSH30: stack_instructions.push30,
Ops.PUSH31: stack_instructions.push31,
Ops.PUSH32: stack_instructions.push32,
Ops.DUP1: stack_instructions.dup1,
Ops.DUP2: stack_instructions.dup2,
Ops.DUP3: stack_instructions.dup3,
Ops.DUP4: stack_instructions.dup4,
Ops.DUP5: stack_instructions.dup5,
Ops.DUP6: stack_instructions.dup6,
Ops.DUP7: stack_instructions.dup7,
Ops.DUP8: stack_instructions.dup8,
Ops.DUP9: stack_instructions.dup9,
Ops.DUP10: stack_instructions.dup10,
Ops.DUP11: stack_instructions.dup11,
Ops.DUP12: stack_instructions.dup12,
Ops.DUP13: stack_instructions.dup13,
Ops.DUP14: stack_instructions.dup14,
Ops.DUP15: stack_instructions.dup15,
Ops.DUP16: stack_instructions.dup16,
Ops.SWAP1: stack_instructions.swap1,
Ops.SWAP2: stack_instructions.swap2,
Ops.SWAP3: stack_instructions.swap3,
Ops.SWAP4: stack_instructions.swap4,
Ops.SWAP5: stack_instructions.swap5,
Ops.SWAP6: stack_instructions.swap6,
Ops.SWAP7: stack_instructions.swap7,
Ops.SWAP8: stack_instructions.swap8,
Ops.SWAP9: stack_instructions.swap9,
Ops.SWAP10: stack_instructions.swap10,
Ops.SWAP11: stack_instructions.swap11,
Ops.SWAP12: stack_instructions.swap12,
Ops.SWAP13: stack_instructions.swap13,
Ops.SWAP14: stack_instructions.swap14,
Ops.SWAP15: stack_instructions.swap15,
Ops.SWAP16: stack_instructions.swap16,
Ops.LOG0: log_instructions.log0,
Ops.LOG1: log_instructions.log1,
Ops.LOG2: log_instructions.log2,
Ops.LOG3: log_instructions.log3,
Ops.LOG4: log_instructions.log4,
}
| nilq/baby-python | python |
import tensorflow as tf
import sys
def get_id_feature(features, key, len_key, max_len):
ids = features[key]
ids_len = tf.squeeze(features[len_key], [1])
ids_len = tf.minimum(ids_len, tf.constant(max_len, dtype=tf.int64))
return ids, ids_len
def create_train_op(loss, hparams):
train_op = tf.contrib.layers.optimize_loss(
loss=loss,
global_step=tf.contrib.framework.get_global_step(),
learning_rate=hparams.learning_rate,
clip_gradients=10.0,
optimizer=hparams.optimizer)
return train_op
def create_model_fn(hparams, model_impl):
def model_fn(features, targets, mode):
context, context_len = get_id_feature(
features, "context", "context_len", hparams.max_context_len)
utterance, utterance_len = get_id_feature(
features, "utterance", "utterance_len", hparams.max_utterance_len)
batch_size = targets.get_shape().as_list()[0]
if mode == tf.contrib.learn.ModeKeys.TRAIN:
probs, loss = model_impl(
hparams,
mode,
context,
context_len,
utterance,
utterance_len,
targets)
train_op = create_train_op(loss, hparams)
return probs, loss, train_op
if mode == tf.contrib.learn.ModeKeys.INFER:
probs, loss = model_impl(
hparams,
mode,
context,
context_len,
utterance,
utterance_len,
None)
return probs, 0.0, None
if mode == tf.contrib.learn.ModeKeys.EVAL:
# We have 10 exampels per record, so we accumulate them
all_contexts = [context]
all_context_lens = [context_len]
all_utterances = [utterance]
all_utterance_lens = [utterance_len]
all_targets = [tf.ones([batch_size, 1], dtype=tf.int64)]
for i in range(9):
distractor, distractor_len = get_id_feature(features,
"distractor_{}".format(i),
"distractor_{}_len".format(i),
hparams.max_utterance_len)
all_contexts.append(context)
all_context_lens.append(context_len)
all_utterances.append(distractor)
all_utterance_lens.append(distractor_len)
all_targets.append(
tf.zeros([batch_size, 1], dtype=tf.int64)
)
probs, loss = model_impl(
hparams,
mode,
tf.concat(all_contexts,0),
tf.concat(all_context_lens,0),
tf.concat(all_utterances,0),
tf.concat(all_utterance_lens,0),
tf.concat(all_targets,0))
split_probs = tf.split(probs, 10, 0)
shaped_probs = tf.concat(split_probs,1)
# Add summaries
tf.histogram("eval_correct_probs_hist", split_probs[0])
tf.scalar("eval_correct_probs_average", tf.reduce_mean(split_probs[0])) # scalar_summary
tf.histogram("eval_incorrect_probs_hist", split_probs[1])
tf.scalar("eval_incorrect_probs_average", tf.reduce_mean(split_probs[1]))
return shaped_probs, loss, None
return model_fn
| nilq/baby-python | python |
from __future__ import unicode_literals
from celery import shared_task
from isisdata.models import *
from isisdata.tasks import _get_filtered_object_queryset
from django.apps import apps
from django.core.exceptions import ObjectDoesNotExist
from django.contrib.auth.models import User
from django.db import models
import logging
import smart_open
import csv
from datetime import datetime
from dateutil.tz import tzlocal
import time
from past.utils import old_div
import haystack
import math
COLUMN_NAME_ATTR_SUBJ_ID = 'ATT Subj ID'
COLUMN_NAME_ATTR_RELATED_NAME = 'Related Record Name'
COLUMN_NAME_ATTR_TYPE = 'ATT Type'
COLUMN_NAME_ATTR_VALUE = 'ATT Value'
COLUMN_NAME_ATTR_DATE_FREE = 'ATT DateFree'
COLUMN_NAME_ATTR_DATE_BEGIN = 'ATT DateBegin'
COLUMN_NAME_ATTR_DATE_END = 'ATT DateEnd'
COLUMN_NAME_ATTR_PLACE_NAME = 'ATT PlaceName'
COLUMN_NAME_ATTR_PLACE_LINK = 'ATT PlaceLink'
COLUMN_NAME_ATTR_NOTES = 'ATT Notes'
logger = logging.getLogger(__name__)
@shared_task
def reindex_authorities(user_id, filter_params_raw, task_id=None, object_type='AUTHORITY'):
queryset, _ = _get_filtered_object_queryset(filter_params_raw, user_id, object_type)
if task_id:
task = AsyncTask.objects.get(pk=task_id)
task.max_value = queryset.count()
_inc = max(2, math.floor(old_div(task.max_value, 200.)))
task.save()
else:
task = None
try: # Report all exceptions as a task failure.
for i, obj in enumerate(queryset):
if task and (i % _inc == 0 or i == (task.max_value - 1)):
task.current_value = i
task.save()
haystack.connections[settings.HAYSTACK_DEFAULT_INDEX].get_unified_index().get_index(Authority).update_object(obj)
task.state = 'SUCCESS'
task.save()
except Exception as E:
print('bulk_update_citations failed for %s' % filter_params_raw, end=' ')
print(E)
task.state = 'FAILURE'
task.save()
@shared_task
def merge_authorities(file_path, error_path, task_id, user_id):
logging.info('Merging duplicate authorities and redirecting.')
SUCCESS = 'SUCCESS'
ERROR = 'ERROR'
COL_MASTER_AUTH = 'CBA ID Master'
COL_DUPLICATE_AUTH = 'CBA ID Duplicate'
COL_NOTE = 'Note'
with smart_open.open(file_path, 'rb', encoding='utf-8') as f:
reader = csv.reader(f)
task = AsyncTask.objects.get(pk=task_id)
results = []
row_count = _count_rows(f, results)
task.max_value = row_count
task.save()
current_count = 0
not_matching_subject_names = []
current_time_obj = datetime.now(tzlocal())
try:
for row in csv.DictReader(f):
master_id = row[COL_MASTER_AUTH]
duplicate_id = row[COL_DUPLICATE_AUTH]
note = row[COL_NOTE]
try:
master = Authority.objects.get(pk=master_id)
except Exception as e:
logger.error('Authority with id %s does not exist. Skipping.' % (master_id))
results.append((ERROR, master_id, 'Authority record does not exist.', ""))
current_count = _update_count(current_count, task)
continue
try:
duplicate = Authority.objects.get(pk=duplicate_id)
except Exception as e:
logger.error('Authority with id %s does not exist. Skipping.' % (duplicate_id))
results.append((ERROR, duplicate_id, 'Authority record does not exist.', ""))
current_count = _update_count(current_count, task)
continue
for attr in duplicate.attributes.all():
attr.source = master
_add_change_note(attr, task_id, 'source', 'source', master_id, duplicate_id, user_id, current_time_obj)
attr.record_history += '\n' + note
attr.save()
for ld in duplicate.linkeddata_entries.all():
ld.subject = master
_add_change_note(ld, task_id, 'source', 'source', master_id, duplicate_id, user_id, current_time_obj)
ld.record_history += '\n' + note
ld.save()
for acr in duplicate.acrelations.all():
acr.authority = master
_add_change_note(acr, task_id, 'source', 'source', master_id, duplicate_id, user_id, current_time_obj)
acr.record_history += '\n' + note
acr.save()
# change duplicate record to redirect
duplicate.redirect_to = master
old_status = duplicate.record_status_value
duplicate.record_status_value = CuratedMixin.REDIRECT
_add_change_note(duplicate, task_id, 'record_status_value', 'record_status_value', "Redirect to %s"%(master_id), old_status, user_id, current_time_obj)
duplicate.record_history += '\n' + note
duplicate.save()
results.append((SUCCESS, "Records Merged", "%s and %s were successfully merged. Master is %s."%(master_id, duplicate_id, master_id), ""))
current_count = _update_count(current_count, task)
except Exception as e:
logger.error("There was an unexpected error processing the CSV file.")
logger.exception(e)
results.append((ERROR, "unexpected error", "There was an unexpected error processing the CSV file: " + repr(e), ""))
_save_results(error_path, results, ('Type', 'Title', 'Message', ''))
task.state = 'SUCCESS'
task.save()
@shared_task
def add_attributes_to_authority(file_path, error_path, task_id, user_id):
logging.info('Adding attributes from %s.' % (file_path))
# this is a hack but the best I can come up with right now :op
logging.debug('Make AuthorityValue exists in ContentType table...')
ContentType.objects.get_or_create(model='authorityvalue', app_label='isisdata')
SUCCESS = 'SUCCESS'
ERROR = 'ERROR'
with smart_open.open(file_path, 'rb', encoding='utf-8') as f:
reader = csv.reader(f)
task = AsyncTask.objects.get(pk=task_id)
results = []
row_count = _count_rows(f, results)
task.max_value = row_count
task.save()
current_count = 0
not_matching_subject_names = []
current_time_obj = datetime.now(tzlocal())
try:
for row in csv.DictReader(f):
subject_id = row[COLUMN_NAME_ATTR_SUBJ_ID]
try:
authority = Authority.objects.get(pk=subject_id)
except Authority.DoesNotExist:
logger.error('Authority with id %s does not exist. Skipping attribute.' % (subject_id))
results.append((ERROR, subject_id, subject_id, 'Authority record does not exist.'))
current_count = _update_count(current_count, task)
continue
related_name = row[COLUMN_NAME_ATTR_RELATED_NAME]
if authority.name != related_name:
not_matching_subject_names.append((subject_id, authority.name, related_name))
attribute_type = row[COLUMN_NAME_ATTR_TYPE]
atype = AttributeType.objects.filter(name=attribute_type)
if not atype:
logger.error('Attribute type with name %s does not exist. Skipping attribute.' % (attribute_type))
results.append((ERROR, subject_id, attribute_type, 'Attribute type does not exist.'))
current_count = _update_count(current_count, task)
continue
# we can be pretty sure there is just one
atype = atype.first()
# get source content type (authority in this case)
ctype = ContentType.objects.filter(model=type(authority).__name__.lower()).first()
# content type of value
vctype = atype.value_content_type
avmodel_class = vctype.model_class()
att_init_values = {
'type_controlled': atype,
'source_content_type': ctype,
'source_instance_id': subject_id,
'value_freeform': row[COLUMN_NAME_ATTR_DATE_FREE],
'administrator_notes': row[COLUMN_NAME_ATTR_NOTES]
}
val_init_values = {}
if row[COLUMN_NAME_ATTR_VALUE]:
val_init_values.update({
'value': row[COLUMN_NAME_ATTR_VALUE]
})
if row[COLUMN_NAME_ATTR_DATE_BEGIN]:
val_init_values.update({
'start': ISODateValue.convert(row[COLUMN_NAME_ATTR_DATE_BEGIN])
})
if row[COLUMN_NAME_ATTR_DATE_END]:
val_init_values.update({
'end': ISODateValue.convert(row[COLUMN_NAME_ATTR_DATE_END])
})
if row[COLUMN_NAME_ATTR_PLACE_NAME]:
val_init_values.update({
'name': row[COLUMN_NAME_ATTR_PLACE_NAME]
})
att_init_values['value_freeform'] = row[COLUMN_NAME_ATTR_PLACE_NAME]
if row[COLUMN_NAME_ATTR_PLACE_LINK]:
try:
place = Authority.objects.get(pk=row[COLUMN_NAME_ATTR_PLACE_LINK])
val_init_values.update({
'value': place
})
except:
logger.error('Authority with id %s does not exist.' % (row[COLUMN_NAME_ATTR_PLACE_LINK]))
results.append((ERROR, subject_id, row[COLUMN_NAME_ATTR_PLACE_LINK], 'Adding place link. Authority does not exist.'))
current_count = _update_count(current_count, task)
continue
_add_creation_note(att_init_values, task_id, user_id, current_time_obj)
attribute = Attribute(**att_init_values)
attribute.save()
results.append((SUCCESS, subject_id, attribute.id, 'Added'))
val_init_values.update({
'attribute': attribute
})
value = avmodel_class(**val_init_values)
value.save()
current_count = _update_count(current_count, task)
except Exception as e:
logger.error("There was an unexpected error processing the CSV file.")
logger.exception(e)
results.append((ERROR, "unexpected error", "", "There was an unexpected error processing the CSV file: " + repr(e)))
_save_results(error_path, results, ('Type', 'ATT Subj ID', 'Affected object', 'Message'))
task.state = 'SUCCESS'
task.save()
def _add_creation_note(properties, task_id, user_id, created_on):
user = User.objects.get(pk=user_id)
mod_time = created_on.strftime("%m/%d/%y %r %Z")
properties.update({
RECORD_HISTORY: "This record was created as part of the bulk creation #%s by %s on %s."%(task_id, user.username, mod_time),
'modified_by_id': user_id,
})
ELEMENT_TYPES = {
'Attribute': Attribute,
'LinkedData': LinkedData,
}
ALLOWED_FIELDS = {
Attribute: ['description', 'value_freeform', 'value__value', 'record_status_value', 'record_status_explanation'],
LinkedData: ['description', 'universal_resource_name', 'resource_name', 'url', 'administrator_notes', 'record_status_value', 'record_status_explanation'],
ACRelation: ['citation_id', 'authority_id', 'name_for_display_in_citation', 'description', 'type_controlled', 'data_display_order', 'confidence_measure','administrator_notes', 'record_status_value', 'record_status_explanation'],
CCRelation: ['subject_id', 'object_id', 'name', 'description', 'type_controlled', 'belongs_to_id', 'data_display_order', 'administrator_notes', 'record_status_value', 'record_status_explanation']
}
FIELD_MAP = {
Attribute: {
'ATT Description': 'description',
'ATT Value': 'value__value',
'ATT Value Freeform': 'value_freeform',
'ATT Status': 'record_status_value',
'ATT RecordStatusExplanation': 'record_status_explanation',
'ATT DateFree': 'value_freeform',
'ATT DateBegin': 'value__start',
'ATT DateEnd': 'value__end',
'ATT PlaceName' : 'value__name',
'ATT PlaceLink' : 'value__value',
'ATT Notes': 'administrator_notes',
},
LinkedData: {
'LED URN': 'universal_resource_name',
'LED URL': 'url',
'LED Resource': 'resource_name',
'LED Notes': 'administrator_notes',
'LED Status': 'record_status_value',
'LED RecordStatusExplanation': 'record_status_explanation',
'LED Subj ID': 'typed:subject',
},
ACRelation: {
'ACR ID Auth': 'authority_id',
'ACR ID Cit': 'citation_id',
'ACR NameDisplay': 'name_for_display_in_citation',
'ACR Type': 'type_controlled',
'ACR DataDisplayOrder': 'data_display_order',
'ACR ConfidenceMeasure': 'confidence_measure',
'ACR Notes': 'administrator_notes',
'ACR Status': 'record_status_value',
'ACR RecordStatusExplanation': 'record_status_explanation',
},
CCRelation: {
'CCR ID Cit Subj': 'subject_id',
'CCR ID Cit Obj': 'object_id',
'CCR Name': 'name',
'CCR Description': 'description',
'CCR Type': 'type_controlled',
'CCR DisplayOrder': 'data_display_order',
'CCR Dataset': 'find:Dataset:name:belongs_to',
'CCR Notes': 'administrator_notes',
'CCR Status': 'record_status_value',
'CCR RecordStatusExplanation': 'record_status_explanation',
},
Authority: {
'CBA Type': 'type_controlled',
'CBA Name': 'name',
'CBA Redirect': 'redirect_to_id',
'CBA ClassCode': 'classification_code',
'CBA ClassHier': 'classification_hierarchy',
'CBA ClassSystem': 'classification_system',
'CBA Description': 'description',
'CBA Dataset': 'find:Dataset:name:belongs_to',
'CBA Notes': 'administrator_notes',
'CBA Status': 'record_status_value',
'CBA RecordStatusExplanation': 'record_status_explanation',
'CBA First': 'personal_name_first',
'CBA Last': 'personal_name_last',
'CBA Suff': 'personal_name_suffix',
'CBA Preferred': 'personal_name_preferred',
},
Citation: {
'CBB Type': 'type_controlled',
'CBB Title': 'title',
'CBB Abstract': 'abstract',
'CBB Description': 'description',
'CBB EditionDetails': 'edition_details',
'CBB Language': 'find:Language:name:language:multi',
'CBB PhysicalDetails': 'physical_details',
'CBB IssueBegin':'part_details__issue_begin',
'CBB IssueEnd': 'part_details__issue_end',
'CBB IssueFreeText': 'part_details__issue_free_text',
'CBB PageBegin': 'part_details__page_begin',
'CBB PageEnd': 'part_details__page_end',
'CBB PagesFreeText': 'part_details__pages_free_text',
'CBB VolumeBegin': 'part_details__volume_begin',
'CBB VolumeEnd': 'part_details__volume_end',
'CBB VolumeFreeText': 'part_details__volume_free_text',
'CBB Extent': 'part_details__extent',
'CBB ExtentNote': 'part_details__extent_note',
'CBB Dataset': 'find:Dataset:name:belongs_to',
'CBB Notes': 'administrator_notes',
'CBB Status': 'record_status_value',
'CBB RecordStatusExplanation': 'record_status_explanation',
}
}
COLUMN_NAME_TYPE = 'Table'
COLUMN_NAME_ID = "Id"
COLUMN_NAME_FIELD = "Field"
COLUMN_NAME_VALUE = "Value"
ADMIN_NOTES = 'administrator_notes'
RECORD_HISTORY = 'record_history'
TYPED_PREFIX = 'typed:'
FIND_PREFIX = 'find:'
@shared_task
def update_elements(file_path, error_path, task_id, user_id):
logging.info('Updating elements from %s.' % (file_path))
SUCCESS = 'SUCCESS'
ERROR = 'ERROR'
result_file_headers = ('Status', 'Type', 'Element Id', 'Message', 'Modification Date')
with smart_open.open(file_path, 'rb', encoding='utf-8') as f:
reader = csv.reader(f)
task = AsyncTask.objects.get(pk=task_id)
results = []
row_count = _count_rows(f, results)
task.max_value = row_count
task.save()
current_count = 0
try:
current_time_obj = datetime.now(tzlocal())
current_time = current_time_obj.isoformat()
for row in csv.DictReader(f):
# update timestamp for long running processes
current_time = datetime.now(tzlocal()).isoformat()
elem_type = row[COLUMN_NAME_TYPE]
element_id = row[COLUMN_NAME_ID]
try:
type_class = apps.get_model(app_label='isisdata', model_name=elem_type)
except Exception as e:
results.append((ERROR, elem_type, element_id, '%s is not a valid type.'%(elem_type), current_time))
current_count = _update_count(current_count, task)
continue
try:
element = type_class.objects.get(pk=element_id)
# we need special handling of persons, this is ugly but ahh well
if elem_type == "Authority" and element.type_controlled == Authority.PERSON:
element = Person.objects.get(pk=element_id)
except ObjectDoesNotExist:
results.append((ERROR, elem_type, element_id, '%s with id %s does not exist.'%(type_class, element_id), current_time))
current_count = _update_count(current_count, task)
continue
field_to_change = row[COLUMN_NAME_FIELD]
new_value = row[COLUMN_NAME_VALUE]
if field_to_change in FIELD_MAP[type_class]:
field_in_csv = field_to_change
field_to_change = FIELD_MAP[type_class][field_to_change]
# if we change a field that directly belongs to the class
if '__' not in field_to_change:
# if there are choices make sure they are respected
is_valid = _is_value_valid(element, field_to_change, new_value)
if not is_valid:
results.append((ERROR, elem_type, element_id, '%s is not a valid value.'%(new_value), current_time))
else:
try:
if field_to_change == ADMIN_NOTES:
_add_to_administrator_notes(element, new_value, task.id, user_id, current_time_obj)
else:
# in some cases we have authority or citation as relation
# this is in cases like subject of linkeddata
# it needs to be amended if there are objects that can link to other types
# than authorities/citations
if field_to_change.startswith(TYPED_PREFIX):
field_to_change = field_to_change[len(TYPED_PREFIX):]
if new_value.startswith(Authority.ID_PREFIX):
linked_element = Authority.objects.get(pk=new_value)
else:
linked_element = Citation.objects.get(pk=new_value)
new_value = linked_element
if field_to_change.startswith(FIND_PREFIX):
field_to_change, new_value = _find_value(field_to_change, new_value, element)
# check if field to change is a ManyToManyField (IEXP-232)
if isinstance(element.__class__.__dict__[field_to_change], models.fields.related_descriptors.ManyToManyDescriptor):
# all this is really ugly, but we have to store the old list for the
# administrator notes
old_value = element.__getattribute__(field_to_change).all()
old_value_list = list(old_value)
element.__getattribute__(field_to_change).add(new_value)
new_value = list(element.__getattribute__(field_to_change).all())
old_value = old_value_list
else:
old_value = getattr(element, field_to_change)
setattr(element, field_to_change, new_value)
# some fields need special handling
_specific_post_processing(element, field_to_change, new_value, old_value)
_add_change_note(element, task.id, field_in_csv, field_to_change, new_value, old_value, user_id, current_time_obj)
setattr(element, 'modified_by_id', user_id)
element.save()
results.append((SUCCESS, element_id, field_in_csv, 'Successfully updated', element.modified_on))
except Exception as e:
logger.error(e)
logger.exception(e)
results.append((ERROR, elem_type, element_id, 'Something went wrong. %s was not changed.'%(field_to_change), current_time))
# otherwise
else:
object, field_name = field_to_change.split('__')
try:
object_to_change = getattr(element, object)
object_to_update_timestamp = object_to_change
# if we have an attribute, we need to convert the value first
if type_class == Attribute:
object_to_change = object_to_change.get_child_class()
object_to_update_timestamp = element
if field_name in ['value', 'start', 'end']:
new_value = object_to_change.__class__.convert(new_value)
# this is a hack, but ahh well
if type(object_to_change) == PartDetails:
object_to_update_timestamp = element
# if there are choices make sure they are respected
is_valid = _is_value_valid(object_to_change, field_name, new_value)
if not is_valid:
results.append((ERROR, elem_type, element_id, '%s is not a valid value.'%(new_value), current_time))
else:
old_value = getattr(object_to_change, field_name)
if field_to_change == ADMIN_NOTES:
_add_to_administrator_notes(object_to_change, new_value, task.id, user_id, current_time_obj)
old_value = old_value[:10] + "..."
else:
setattr(object_to_change, field_name, new_value)
object_to_change.save()
_add_change_note(object_to_update_timestamp, task.id, field_in_csv, field_name, new_value, old_value, user_id, current_time_obj)
setattr(object_to_update_timestamp, 'modified_by_id', user_id)
object_to_update_timestamp.save()
results.append((SUCCESS, element_id, field_in_csv, 'Successfully updated', object_to_update_timestamp.modified_on))
except Exception as e:
logger.error(e)
logger.exception(e)
results.append((ERROR, type, element_id, 'Field %s cannot be changed. %s does not exist.'%(field_to_change, object), current_time))
else:
results.append((ERROR, elem_type, element_id, 'Field %s cannot be changed.'%(field_to_change), current_time))
current_count = _update_count(current_count, task)
except KeyError as e:
logger.exception("There was a column error processing the CSV file.")
results.append((ERROR, "column error", "", "There was a column error processing the CSV file. Have you provided the correct column headers? " + repr(e), current_time))
except Exception as e:
logger.error("There was an unexpected error processing the CSV file.")
logger.exception(e)
results.append((ERROR, "unexpected error", "", "There was an unexpected error processing the CSV file: " + repr(e), current_time))
_save_csv_file(error_path, result_file_headers, results)
task.state = 'SUCCESS'
task.save()
def _specific_post_processing(element, field_name, new_value, old_value):
# turn authority non-person into person
if type(element) == Authority and field_name == 'type_controlled':
if new_value == Authority.PERSON and old_value != Authority.PERSON:
try:
# is object already a person
element.person
except Person.DoesNotExist:
# if not make it one
person = Person(authority_ptr_id=element.pk)
person.__dict__.update(element.__dict__)
person.save()
if type(element) == Citation and field_name == 'type_controlled':
if new_value in [Citation.ARTICLE, Citation.BOOK, Citation.REVIEW, Citation.CHAPTER, Citation.THESIS]:
if not hasattr(element, 'part_details'):
element.part_details = PartDetails()
# to specify a find operation, fields need to be in format find:type:field:linking_field (e.g. find:Dataset:name:belongs_to_id)
def _find_value(field_to_change, new_value, element):
field_parts = field_to_change.split(":")
model = apps.get_model("isisdata." + field_parts[1])
filter_params = { field_parts[2]:new_value }
linked_element = model.objects.filter(**filter_params).first()
if len(field_parts) > 4:
if field_parts[4] == "multi":
old_value = getattr(element, field_parts[3])
# IEXP-232: looks like we can't just replace the old list, but have to add new element
# so we will not return a new list but just the element to add.
#linked_element = list(old_value.all()) + [linked_element]
return field_parts[3], linked_element
def _get_old_multi_value(field_to_change, element):
field_parts = field_to_change.split(":")
print(field_parts)
if len(field_parts) <= 4 or field_parts[4] != "multi":
return None
print(field_parts[3])
getattr(element, field_parts[3])
def _add_to_administrator_notes(element, value, task_nr, modified_by, modified_on):
note = getattr(element, ADMIN_NOTES)
if note:
note += '\n\n'
user = User.objects.get(pk=modified_by)
mod_time = modified_on.strftime("%m/%d/%y %r %Z")
note += "%s added the following in bulk change #%s on %s:"%(user.username, task_nr, mod_time)
note += '\n'
note += value
setattr(element, ADMIN_NOTES, note)
def _add_change_note(element, task_nr, field, field_name, value, old_value, modified_by, modified_on):
user = User.objects.get(pk=modified_by)
mod_time = modified_on.strftime("%m/%d/%y %r %Z")
note = getattr(element, RECORD_HISTORY) + '\n\n' if getattr(element, RECORD_HISTORY) else ''
note += 'This record was changed as part of bulk change #%s. "%s" was changed from "%s" to "%s" by %s on %s.'%(task_nr, field, old_value, value, user.username, mod_time)
setattr(element, RECORD_HISTORY, note)
element._history_user=user
def _is_value_valid(element, field_to_change, new_value):
if ":" in field_to_change:
return True
choices = element._meta.get_field(field_to_change).choices
if choices:
if new_value not in dict(choices):
return False
return True
def _update_count(current_count, task):
current_count += 1
task.current_value = current_count
task.save()
return current_count
def _count_rows(f, results):
# we want to avoid loading everything in memory, in case it's a large file
# we do not count the header, so we start at -1
row_count = -1
try:
for row in csv.DictReader(f):
row_count += 1
except Exception as e:
logger.error("There was an unexpected error processing the CSV file.")
logger.exception(e)
results.append(('ERROR', "unexpected error", "", "There was an unexpected error processing the CSV file: " + repr(e)))
# reset file cursor to first data line
f.seek(0)
return row_count
def _save_csv_file(path, headers, data):
with smart_open.open(path, 'w') as f:
writer = csv.writer(f)
writer.writerow(headers)
for line in data:
writer.writerow(line)
def _save_results(path, results, headings):
with smart_open.open(path, 'w') as f:
writer = csv.writer(f)
writer.writerow(headings)
for result in results:
writer.writerow(result)
| nilq/baby-python | python |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import argparse
import time
import functools
import random
from multiprocessing import Pool
import cProfile
import pstats
try:
import networkx as nx
except ImportError:
print('This script requires NetworkX to be installed.')
exit(1)
try:
import vkontakte
except ImportError:
print('This script requires vkontakte package to be installed.')
print('Download and install it from https://github.com/budnyjj/vkontakte3')
exit(1)
import graph.io as io
import utils.print as gprint
INIT_TIME_TO_SLEEP_MIN = 0.2
INIT_TIME_TO_SLEEP_MAX = 2
TIME_TO_SLEEP_MAX = 5
TIME_TO_SLEEP_FACTOR = 2
def write_time_profiling_data(profiler, filename):
"""Write time profiling data to file."""
ps = pstats.Stats(profiler)
print('Write time profiling information '
'to: {0}.\n'.format(filename))
ps.dump_stats(filename)
def args_are_valid(args):
"""Validate cli arguments.
Raise ValueError if they are not correct.
"""
if args.recursion_level <= 0:
print('Recursion level should be greater than zero!\n')
raise ValueError
elif args.pool_size <= 0:
print('Pool size should be greater than zero!\n')
raise ValueError
else:
print('Provided arguments are seem to be correct...\n')
def get_profile(uid, req_fields='first_name, last_name, sex',
max_err_count=5):
"""Get information (profile) about user with specified uid."""
answer = None
error_count = 0
# used to delay request with errors
time_to_sleep = random.uniform(INIT_TIME_TO_SLEEP_MIN,
INIT_TIME_TO_SLEEP_MAX)
while True:
try:
# get only first element of list
answer = VK.getProfiles(uids=uid,
fields=req_fields)[0]
except vkontakte.VKError as e:
print('E: profile {}:'.format(uid))
if e.code == 6:
error_count += 1
print(' Vk.com bandwith limitations. ', end='')
if error_count <= max_err_count:
print('Lets try again in {0}s '
'(#{1})...'.format(time_to_sleep, error_count))
# Need to sleep due to vk.com bandwidth limitations
time.sleep(time_to_sleep)
if time_to_sleep <= TIME_TO_SLEEP_MAX:
# exponentially increase time_to_sleep
time_to_sleep *= TIME_TO_SLEEP_FACTOR
else:
print('Reached maximal bandwith error count ({0})! '
'Skip...'.format(error_count))
return None
else:
print(' {}.'.format(e.description))
return None
except Exception as e:
print('E: profile {}:'.format(uid))
print(' {}.'.format(e))
return None
else:
print('S: profile {uid}: '
'{first_name} {last_name}.'.format(**answer))
return answer
def get_friends(profile, req_fields='first_name, last_name, sex',
max_err_count=5):
"""Get list with friend profiles of user with specified profile."""
answer = None
error_count = 0
# used to delay request with errors
time_to_sleep = random.uniform(INIT_TIME_TO_SLEEP_MIN,
INIT_TIME_TO_SLEEP_MAX)
while True:
try:
# get only first element of list
answer = VK.friends.get(uid=profile['uid'],
fields=req_fields)
except vkontakte.VKError as e:
print('E: friends of {uid} '
'({first_name} {last_name}):'.format(**profile))
if e.code == 6: # bandwith limitations
error_count += 1
print(' Vk.com bandwith limitations. ', end='')
if error_count <= max_err_count:
print('Lets try again in '
'{0}s (#{1})...'.format(time_to_sleep, error_count))
# Need to sleep due to vk.com bandwidth limitations
time.sleep(time_to_sleep)
if time_to_sleep <= TIME_TO_SLEEP_MAX:
# exponentially increase time_to_sleep
time_to_sleep *= TIME_TO_SLEEP_FACTOR
else:
print(' Reached maximal bandwith error count ({0})! '
'Skip...'.format(error_count))
return []
else:
print(' {}.'.format(e.description))
return []
except Exception as e: # unknown error occured
print('E: friends of {uid} '
'({first_name} {last_name}):'.format(**profile))
print(' {}.'.format(e))
return []
else: # got friends without errors
print('S: {number} friends of {uid}: '
'({first_name} {last_name}).'.format(
number=len(answer), **profile))
return answer
def get_num_followers(uid, max_err_count=5):
"""Get number of followers of user with specified UID.
Return -1 if cannot do so.
"""
answer = None
error_count = 0
# used to delay request with errors
time_to_sleep = random.uniform(INIT_TIME_TO_SLEEP_MIN,
INIT_TIME_TO_SLEEP_MAX)
while True:
try:
answer = VK.subscriptions.getFollowers(uid=uid,
count=0)['count']
except vkontakte.VKError as e:
print('E: followers of {}:'.format(uid))
if e.code == 6:
error_count += 1
print(' Vk.com bandwith limitations. ', end='')
if error_count <= max_err_count:
print('Lets try again in '
'{0}s (#{1})...'.format(time_to_sleep, error_count))
# Need to sleep due to vk.com bandwidth limitations
time.sleep(time_to_sleep)
if time_to_sleep <= TIME_TO_SLEEP_MAX:
# exponentially increase time_to_sleep
time_to_sleep *= TIME_TO_SLEEP_FACTOR
else:
print('Reached maximal bandwith error count ({0})! '
'Skip...'.format(error_count))
return -1
else:
print(' {}.'.format(e.description))
return -1
except Exception as e:
print('E: followers of {}:'.format(uid))
print(' {}.'.format(e))
return -1
else:
print('S: user {} has {} followers.'.format(uid, answer))
return answer
def strip_attributes(node, preserve_attrs):
"""Strip unnecessary data attributes from node."""
node_attrs = list(node[1].keys())
for attr in node_attrs:
if attr not in preserve_attrs:
del node[1][attr]
return node
def profile_to_node(src_profile):
"""convert source profile to graph node."""
return (src_profile['uid'], src_profile)
def build_edges(src_profile, dst_profiles):
"""create set of edges, compatible with NX graph format."""
edges = set()
for dst_profile in dst_profiles:
edges.add((src_profile['uid'], dst_profile['uid']))
return edges
def construct_graph(uids, required_attributes=('first_name',
'last_name',
'sex'),
with_num_followers=False,
max_recursion_level=1, pool_size=1,
time_profiler=None):
"""get and build graph data for specified uids."""
# get list of profiles using get_profile() in multiple processes
def _get_init_profiles(uids, attrs_string):
print('Get init profiles...\n')
# get_profile() with required data attributes
req_get_profile = functools.partial(get_profile,
req_fields=attrs_string)
init_profiles = []
if pool_size == 1:
# no need to organize pool
init_profiles = list(map(req_get_profile, uids))
else:
# disable profiling, because of new fork processes
if time_profiler:
time_profiler.disable()
# organize multiprocessing calculations
with Pool(processes=pool_size) as pool:
init_profiles = list(pool.map(req_get_profile, uids))
# enable profiling
if time_profiler:
time_profiler.enable()
return init_profiles
# get list of friend profiles, indexed by init_profiles,
# using get_friends() in multiple processes
def _get_friend_profiles(init_profiles, attrs_string):
# get_friends() with required data attributes
req_get_friends = functools.partial(get_friends,
req_fields=attrs_string)
friend_profiles = []
if pool_size == 1:
# no need to organize pool
friend_profiles = list(map(req_get_friends,
init_profiles))
else:
# disable profiling, because of new fork processes
if time_profiler:
time_profiler.disable()
# organize multiprocess calculations
with Pool(processes=pool_size) as pool:
friend_profiles = list(pool.map(req_get_friends,
init_profiles))
# enable profiling
if time_profiler:
time_profiler.enable()
print('\nThere are {0} obtained friend profiles on current level '
'of recursion.\n'.format(sum(map(len, friend_profiles))))
return friend_profiles
# get information about user (node) followers and append it to nodes
# using get_num_followers in multiple processes
def _get_num_followers(nodes):
# full list of user uids
all_uids = [node[0] for node in nodes]
# uids of users with 'friends_total'
uids_with_friends_total = [node[0]
for node in nodes if 'friends_total' in node[1]]
# list of user uids, contains only nodes with 'friends_total'
num_followers_per_uid = []
if pool_size == 1:
# no need to organize pool
num_followers_per_uid = list(
map(get_num_followers, uids_with_friends_total))
else:
# disable profiling, because of new fork processes
if time_profiler:
time_profiler.disable()
# organize multiprocess calculations
with Pool(processes=pool_size) as pool:
num_followers_per_uid = list(pool.map(get_num_followers,
uids_with_friends_total))
# enable profiling
if time_profiler:
time_profiler.enable()
# append number of followers to nodes
for i, num_followers in enumerate(num_followers_per_uid):
if num_followers >= 0:
# quick and dirty solution
req_index = all_uids.index(all_uids[i])
nodes[req_index][1]['followers_total'] = num_followers
# convert list of lists to list
def _flatten(list_of_lists):
return [e for l in list_of_lists for e in l]
# append information about number of friends
# it cannot be multiprocessed for unknown reasons
def _append_num_friends(init_profiles, friend_profiles):
for i, init_profile in enumerate(init_profiles):
init_profile['friends_total'] = len(friend_profiles[i])
# append only NEW nodes from src_list to dst_list
# without duplicates and cut data
def _append_nodes(src_list, dst_list):
# UID: index of node with UID in dst_list
dst_node_indexes = {node[0]: i for i, node in enumerate(dst_list)}
for node in src_list:
# check,
# if uid of source node not in list of destination uids,
if node[0] not in dst_node_indexes:
dst_list.append(node)
dst_node_indexes[node[0]] = len(dst_list) - 1
# if there is total number of friends in node,
# then this node is newer,
# so we need to replace older node by this
elif 'friends_total' in node[1]:
# replace node in dst_list with actual data
dst_list[dst_node_indexes[node[0]]] = node
# strip unnecessary attributes using strip_attributes(),
# but preserve 'friends_total' and multiprocessing capabilities
def _strip_attributes(nodes, preserve_attrs):
# convert to list
mod_attrs = list(preserve_attrs)
# append 'friends_total' to preserve this attribute
mod_attrs.append('friends_total')
# convert back to tuple
mod_attrs = tuple(mod_attrs)
# strip_attributes() with required data attributes
req_strip_attributes = functools.partial(strip_attributes,
preserve_attrs=mod_attrs)
if pool_size == 1:
# no need to organize pool
nodes[:] = map(req_strip_attributes, nodes)
else:
# disable profiling, because of new fork processes
if time_profiler:
time_profiler.disable()
# organize multiprocess calculations
with Pool(processes=pool_size) as pool:
nodes[:] = pool.map(req_strip_attributes, nodes)
# enable profiling
if time_profiler:
time_profiler.enable()
return nodes
# Enable profiling
if time_profiler:
time_profiler.enable()
# Current level of recursion
cur_level = 0
# Contains all data required to build graph
gd_accumulator = {'nodes': [], 'edges': set()}
# Build required attributes string.
req_attrs_string = ', '.join(required_attributes)
# List of user profiles with requested UIDs, for example
# init_profiles = [{
# 'first_name' : 'Roman',
# 'last_name' : 'Budny',
# 'uid' : 55358627 }, ...]
init_profiles = _get_init_profiles(args.uids, req_attrs_string)
while cur_level < max_recursion_level:
print('\nGet friend profiles...')
print('Current level of recursion is {0}.\n'.format(cur_level))
# list of friends of users, which specified in init_profiles
friend_profiles = _get_friend_profiles(init_profiles, req_attrs_string)
# append information about total number of friends to
# profiles in init_profiles
_append_num_friends(init_profiles, friend_profiles)
print('Merge obtained friend profiles into graph data...\n')
# temporary storage for nodes and edges, use it
# because of optimization purpouses
all_obtained_nodes = []
all_obtained_edges = set()
# iterate by init list of profile
for i, init_profile in enumerate(init_profiles):
all_obtained_edges.update(build_edges(init_profile,
friend_profiles[i]))
all_obtained_nodes.extend(map(profile_to_node, friend_profiles[i]))
all_obtained_nodes.append(profile_to_node(init_profile))
# append obtained data to graph data accumulator
_append_nodes(all_obtained_nodes, gd_accumulator['nodes'])
gd_accumulator['edges'].update(all_obtained_edges)
init_profiles = _flatten(friend_profiles)
# disable profiling
if time_profiler:
time_profiler.disable()
cur_level += 1
# Enable profiling
if time_profiler:
time_profiler.enable()
_strip_attributes(gd_accumulator['nodes'], required_attributes)
# Get number of followers
if with_num_followers:
print('Get number of followers per user...\n')
_get_num_followers(gd_accumulator['nodes'])
print('\nBuild graph with obtained data...\n')
graph = nx.Graph()
graph.add_nodes_from(gd_accumulator['nodes'])
graph.add_edges_from(gd_accumulator['edges'])
# Disable profiling
if time_profiler:
time_profiler.disable()
return graph
DESCRIPTION = 'Get information about friends of user ' \
'with specified UID in social network vk.com'
TOKEN_VK = '2e27464b84d9a9833248daa69ac07ec4e9ef98a05' \
'1ad62dd18dc4a51513281a8de4249170a575d40f1332'
VK = vkontakte.API(token=TOKEN_VK)
DEFAULT_ATTRIBUTES = ['first_name', 'last_name', 'sex']
time_profiler = None
if __name__ == '__main__':
# set cli options
parser = argparse.ArgumentParser(description=DESCRIPTION)
parser.add_argument('uids', metavar='UID', type=int, nargs='+',
help='UID of vk.com user.')
parser.add_argument('-w', '--write-to', metavar='PATH', type=str,
required=True,
help='file to write graph data. '
'It currently supports YAML and pickle formats, '
'swithing between them by extension.')
parser.add_argument('-p', '--pool-size', metavar='N', type=int,
default=1, help='number of downloading '
'threads in pool.')
parser.add_argument('-r', '--recursion-level', metavar='N', type=int,
default=1, help='recursion deepness, '
'use it to get friends of friends, etc.')
parser.add_argument('--data-attributes', metavar='ATTR', type=str,
nargs='+', default=DEFAULT_ATTRIBUTES,
help='attributes for requesting from vk.com')
parser.add_argument('--with-num-followers', action='store_true',
help='get number of followers per user')
parser.add_argument('--time-profiling', metavar='PATH', type=str,
help='write speed profile in pStats'
'compatible format to file, specified by PATH')
# parse cli options
args = parser.parse_args()
try:
args_are_valid(args)
start_time = time.time()
if args.time_profiling:
time_profiler = cProfile.Profile()
print('Start constructing graph for vk.com users with UIDs:',
', '.join(map(str, args.uids)))
print('Requested data attributes:', ', '.join(args.data_attributes))
print('Recursion level:', args.recursion_level)
print('Pool size:', args.pool_size, '\n')
G = construct_graph(uids=args.uids,
required_attributes=tuple(args.data_attributes),
with_num_followers=args.with_num_followers,
max_recursion_level=args.recursion_level,
pool_size=args.pool_size,
time_profiler=time_profiler)
print(nx.info(G), '\n')
io.write_graph(G, args.write_to)
if args.time_profiling:
write_time_profiling_data(time_profiler, args.time_profiling)
except ValueError:
print('ValueError happened! Quitting...')
except IOError:
print('IOError happened! Quitting...')
else:
gprint.print_elapsed_time(time.time() - start_time)
| nilq/baby-python | python |
#!/env/bin/python
import hashlib
import json
import random
import string
import sys
import time
import zmq
from termcolor import colored
import fnode
# def check_files_node(node, my_id):
# files_my_id = {}
# delete = {}
# for i in node['file']:
# print i[0:7] + '-->>' + node['lower_bound']
# print 'i --> ' + i
# if my_id > node['lower_bound']:
# if (i <= my_id and i >= 0) or (i > node['lower_bound'] and i <= 0):
# # print i
# files_my_id[i] = node['file'][i]
# delete[i] = i
# else:
# if i <= my_id and i > node['lower_bound']:
# # print i
# files_my_id[i] = node['file'][i]
# delete[i] = i
#
# for i in delete:
# print ' DEL --> ' + i
# del node['file'][i]
#
# files_my_id = json.dumps(files_my_id)
#
# return files_my_id
def add(node, req, socket_send):
fnode.printJSON(req)
check = fnode.check_rank(node['id'], node['lower_bound'], req['msg']['id'])
print 'CHECK --> ' + str(check)
if check == 0:
# files_my_id = check_files_node(node, req['msg']['id'])
# # print files_my_id
#
# req_update_files = fnode.create_req('update_file',
# node['ip'] + ':' + node['port'],
# req['msg']['origin'],
# json.loads(files_my_id))
# req_update_files_json = json.loads(req_update_files)
# print 'Update to ' + 'tcp://' + req_update_files_json['to']
# time.sleep(2)
# socket_send.connect('tcp://' + req_update_files_json['to'])
# # fnode.printJSON(req_update_json)
# socket_send.send(req_update_files)
# message = socket_send.recv()
# print message
req_update = fnode.create_req(
'update', node['ip'] + ':' + node['port'], req['msg']['origin'], {
'lower_bound': node['lower_bound'],
'lower_bound_ip': node['lower_bound_ip']
})
req_update_json = json.loads(req_update)
print 'Update to ' + 'tcp://' + req_update_json['to']
time.sleep(2)
socket_send.connect('tcp://' + req_update_json['to'])
socket_send.send(req_update)
message = socket_send.recv()
print message
node['lower_bound'] = req['msg']['id']
node['lower_bound_ip'] = req['msg']['origin']
fnode.node_info(node)
elif check == -1:
req_add = fnode.create_req(
'add', node['ip'] + ':' + node['port'], node['lower_bound_ip'],
{'origin': req['msg']['origin'],
'id': req['msg']['id']})
req_add_json = json.loads(req_add)
socket_send.connect('tcp://' + req_add_json['to'])
# fnode.printJSON(req_add_json)
socket_send.send(req_add)
message = socket_send.recv()
print message
def update(node, req):
fnode.printJSON(req)
node['lower_bound'] = req['msg']['lower_bound']
node['lower_bound_ip'] = req['msg']['lower_bound_ip']
print '############ UPDATE OK'
fnode.node_info(node)
def save(node, req, socket_send):
fnode.printJSON(req)
check = fnode.check_rank(node['id'], node['lower_bound'], req['id'])
print 'CHECK --> ' + str(check)
if check == 0:
fnode.file_to_ring(node, req['name'], req['data'], req['id'])
fnode.node_info(node)
elif check == -1:
req_save = json.dumps({
'req': 'save',
'from': node['ip'] + ':' + node['port'],
'to': node['lower_bound_ip'],
'data': req['data'],
'name': req['name'],
'id': req['id']
})
req_save_json = json.loads(req_save)
socket_send.connect('tcp://' + req_save_json['to'])
# fnode.printJSON(req_add_json)
socket_send.send(req_save)
message = socket_send.recv()
print message
def remove_file(node, req, socket_send):
fnode.printJSON(req)
check = fnode.check_rank(node['id'], node['lower_bound'], req['id'])
print 'CHECK --> ' + str(check)
if check == 0:
fnode.remove_file_ring(node, req['id'])
fnode.node_info(node)
elif check == -1:
req_remove = json.dumps({
'req': 'remove',
'from': node['ip'] + ':' + node['port'],
'to': node['lower_bound_ip'],
'id': req['id']
})
req_remove_json = json.loads(req_remove)
socket_send.connect('tcp://' + req_remove_json['to'])
# fnode.printJSON(req_add_json)
socket_send.send(req_remove)
message = socket_send.recv()
print message
def check_file(node, file_id):
for i in node:
print i
if i == file_id:
return node[i]
break
return 'No file'
def get_file(node, req, socket_send):
fnode.printJSON(req)
check = check_file(node['file'], req['id'])
if check != 'No file':
print colored(check, 'cyan')
# fnode.node_info(node)
req_send = json.dumps({
'from': node['ip'] + ':' + node['port'],
'to': req['client_origin'],
'info': check
})
req_send_json = json.loads(req_send)
socket_send.connect('tcp://' + req_send_json['to'])
socket_send.send(req_send)
message = socket_send.recv()
print message
else:
print colored('File does not exist in this node :(', 'red')
if req['node_origin'] == node['lower_bound_ip']:
req_send = json.dumps({
'from': node['ip'] + ':' + node['port'],
'to': req['client_origin'],
'info': 'No'
})
req_send_json = json.loads(req_send)
socket_send.connect('tcp://' + req_send_json['to'])
socket_send.send(req_send)
message = socket_send.recv()
print message
else:
get_req = json.dumps({
'req': 'get',
'from': req['from'],
'to': node['lower_bound_ip'],
'id': req['id'],
'node_origin': req['node_origin'],
'client_origin': req['client_origin']
})
get_req_json = json.loads(get_req)
socket_send.connect('tcp://' + get_req_json['to'])
socket_send.send(get_req)
message = socket_send.recv()
print colored(message, 'green')
def pass_data(node, req_json):
for i in req_json['msg']:
node['file'][i] = req_json['msg'][i]
fnode.node_info(node)
def search_new_connection(node, info, socket_send):
if node['lower_bound'] == info['node_id']:
node['lower_bound'] = info['lower_bound']
node['lower_bound_ip'] = info['lower_bound_ip']
fnode.node_info(node)
else:
new_req = fnode.create_req('new_connection',
node['ip'] + ':' + node['port'],
node['lower_bound_ip'], info)
new_req_json = json.loads(new_req)
socket_send.connect('tcp://' + new_req_json['to'])
socket_send.send(new_req)
message = socket_send.recv()
print colored(message, 'green')
# def update_file_list(node, req):
# for i in req['msg']:
# # print i
# node['file'][i] = req['msg'][i]
#
# fnode.node_info(node)
| nilq/baby-python | python |
from django import forms
from django.forms import SelectDateWidget
from mopga.modules.project.models import Project
class NewProject(forms.Form):
title = forms.CharField(max_length=200)
donationGoal = forms.IntegerField(label='Donation goal', min_value=0)
description = forms.CharField(max_length=5000, widget=forms.Textarea(),
help_text='Write here a description of your project (5000 caracters)')
deadline = forms.DateField(widget=SelectDateWidget(empty_label=("Year", "Month", "Day"),
attrs=({
'style': 'width: 32%; display: inline-block; margin: 5px;'})))
image = forms.ImageField(allow_empty_file=False)
def clean(self):
cleaned_data = super(NewProject, self).clean()
donationGoal = cleaned_data.get('donationGoal')
description = cleaned_data.get('description')
name = cleaned_data.get('name')
image = cleaned_data.get('image')
if not name and not donationGoal and not description and not image:
raise forms.ValidationError('Please fill all fields.')
class AddNote(forms.Form):
note = forms.IntegerField(min_value=0, max_value=5)
def clean(self):
cleaned_data = super(AddNote, self).clean()
note = cleaned_data.get('note')
if note < 0 or note > 5:
raise forms.ValidationError('The note must be between 0 and 5')
class NewComment(forms.Form):
title = forms.CharField(max_length=50, required=False)
content = forms.CharField(max_length=200, widget=forms.Textarea(attrs={'rows': 2}), required=False)
def clean(self):
cleaned_data = super(NewComment, self).clean()
title = cleaned_data.get('title')
content = cleaned_data.get('content')
if not title and not content:
raise forms.ValidationError('Please fill all fields.')
class AddFundsProject(forms.Form):
addfunds = forms.IntegerField(required=False, label='Funds Project ? (€)')
class Meta:
model = Project
fields = ('addfunds')
| nilq/baby-python | python |
from ..systems import ContinuousSys
from ..tools import nd_rand_init, sigmoid
import numpy as np
class JansenRit(ContinuousSys):
def __init__(self, A=3.25, B=22, a_inv=10, b_inv=20, C=135, Crep=[1., 0.8, 0.25, 0.25], vmax=5, v0=6, r=0.56, n_points=5000, t_min=0, t_max=30):
self.A = A
self.B = B
self.C = C
self.a = 1. / a_inv
self.b = 1. / b_inv
self.C = C
self.C1 = Crep[0] * C
self.C2 = Crep[1] * C
self.C3 = Crep[2] * C
self.C4 = Crep[3] * C
self.vmax = vmax
self.v0 = v0
self.r = r
def sigm(x):
return self.vmax * sigmoid(self.r * (x - v0))
def ode(X, t, p=None):
x0, x1, x2, x3, x4, x5 = X
p = p(t) if callable(p) else 0
return np.asarray([
x3, x4, x5,
self.A * self.a * sigm(x1 - x2) - 2 * self.a * x3 - self.a**2 * x0,
self.A * self.a * (p + self.C2 * sigm(x1 * self.C1)) - 2 * self.a * x4 - self.a**2 * x1,
self.B * self.b * self.C4 * sigm(self.C3 * x0) - 2 * self.b * x5 - self.b ** 2 * x2
])
def rand_init():
return nd_rand_init(*[(-5,5)]*6)
super().__init__(dim=6, map_func=ode, init_func=rand_init, n_points=n_points, t_min=t_min, t_max=t_max)
| nilq/baby-python | python |
__all__ = ['Design', 'DesignTools', 'DesignSep2Phase', 'DesignSep3Phase']
| nilq/baby-python | python |
def kfilter(ar,kf):
nx=shape(ar)[0];kx=fftshift(fftfreq(nx))*nx
ny=shape(ar)[1];ky=fftshift(fftfreq(ny))*ny
nz=shape(ar)[2];kz=fftshift(fftfreq(nz))*nz
km=np.zeros((nx,ny,nz))
for x in range(nx):
for y in range(ny):
for z in range(nz):
km[x,y,z]=sqrt(kx[x]**2+ky[y]**2+kz[z]**2)
fbx = fftshift(fftn(ar))
for x in range(nx):
for y in range(ny):
for z in range(nz):
i=np.round(kp[x,y,z])
if i > kf:
fbx[x,y,z] = complex(0,0)
bxf = real(ifftn(ifftshift(fbx)))
return bxf
| nilq/baby-python | python |
# -*- coding: utf-8 -*-
import yaml
from pymodm import connect, fields, MongoModel, EmbeddedMongoModel
def setup_db(environment):
config = parse_db_config(environment)
connect("mongodb://{0}/{1}".format(config['clients']['default']['hosts'][0], config['clients']['default']['database']))
def parse_db_config(environment):
with open('config/mongodb.yml') as f:
config = yaml.load(f)
return config[environment]
| nilq/baby-python | python |
import psutil
import gc
# check if memory usage is over limit
def check_memory_limit(limit: float = 90.):
return psutil.virtual_memory()[2] > limit
def memory_circuit_breaker(limit: float = 90.):
# if over limit, garbage collect
if check_memory_limit(limit):
gc.collect()
# if still above limit, stop execution
if check_memory_limit(limit):
raise MemoryError("The execution of this cell has reached {limit} %. Stopping Execution.") | nilq/baby-python | python |
# Boilerplate stuff:
from pyspark import SparkConf, SparkContext
conf = SparkConf().setMaster('local').setAppName('DegreesOfSeparation')
sc = SparkContext(conf=conf)
# The characters we wish to find the degree of separation between:
START_CHARACTER_ID = 5306 # SpiderMan
TARGET_CHARACTER_ID = 14 # ADAM 3,031 (who?)
# Our accumulator, used to signal when we find the target character during
# our BFS traversal.
hit_counter = sc.accumulator(0)
def convert_to_bfs(line):
fields = line.split()
hero_id = int(fields[0])
connections = []
for connection in fields[1:]:
connections.append(int(connection))
color = 'WHITE'
distance = 9999
if (hero_id == START_CHARACTER_ID):
color = 'GRAY'
distance = 0
return (hero_id, (connections, distance, color))
def create_starting_rdd():
input_file = sc.textFile(
'file:///Users/brian/code/from_courses/SparkCourse/Marvel-Graph')
return input_file.map(convert_to_bfs)
def bfs_map(node):
character_id = node[0]
data = node[1]
connections = data[0]
distance = data[1]
color = data[2]
results = []
# If this node needs to be expanded...
if (color == 'GRAY'):
for connection in connections:
new_character_id = connection
new_distance = distance + 1
new_color = 'GRAY'
if (TARGET_CHARACTER_ID == connection):
hit_counter.add(1)
new_entry = (new_character_id, ([], new_distance, new_color))
results.append(new_entry)
# We've processed this node, so color it black
color = 'BLACK'
# Emit the input node so we don't lose it.
results.append((character_id, (connections, distance, color)))
return results
def bfs_reduce(data1, data2):
edges1 = data1[0]
edges2 = data2[0]
distance1 = data1[1]
distance2 = data2[1]
color1 = data1[2]
color2 = data2[2]
distance = 9999
color = color1
edges = []
# See if one is the original node with its connections.
# If so preserve them.
if (len(edges1) > 0):
edges.extend(edges1)
if (len(edges2) > 0):
edges.extend(edges2)
# Preserve minimum distance
if (distance1 < distance):
distance = distance1
if (distance2 < distance):
distance = distance2
# Preserve darkest color
if (color1 == 'WHITE' and (color2 == 'GRAY' or color2 == 'BLACK')):
color = color2
if (color1 == 'GRAY' and color2 == 'BLACK'):
color = color2
if (color2 == 'WHITE' and (color1 == 'GRAY' or color1 == 'BLACK')):
color = color1
if (color2 == 'GRAY' and color1 == 'BLACK'):
color = color1
return (edges, distance, color)
# Main program here:
iteration_rdd = create_starting_rdd()
for iteration in range(0, 10):
print('Running BFS iteration# ' + str(iteration+1))
# Create new vertices as needed to darken or reduce distances in the
# reduce stage. If we encounter the node we're looking for as a GRAY
# node, increment our accumulator to signal that we're done.
mapped = iteration_rdd.flatMap(bfs_map)
# Note that mapped.count() action here forces the RDD to be evaluated, and
# that's the only reason our accumulator is actually updated.
print('Processing ' + str(mapped.count()) + ' values.')
if (hit_counter.value > 0):
print('Hit the target character! From ' + str(hit_counter.value)
+ ' different direction(s).')
break
# Reducer combines data for each character ID, preserving the darkest
# color and shortest path.
iteration_rdd = mapped.reduceByKey(bfs_reduce)
| nilq/baby-python | python |
from aiogram.types import Message, ReplyKeyboardRemove
from aiogram.dispatcher.filters import ChatTypeFilter
from app.loader import dp
from app.keyboards import reply_bot_menu
@dp.message_handler(ChatTypeFilter("private"), commands='menu')
async def show_menu_command(msg: Message):
return await msg.answer(
'Командное меню',
reply_markup=reply_bot_menu(msg.from_user.id)
)
@dp.message_handler(commands='close_menu')
async def show_menu_command(msg: Message):
return await msg.answer(
'Меню закрыто',
reply_markup=ReplyKeyboardRemove()
)
| nilq/baby-python | python |
from __future__ import print_function
from __future__ import unicode_literals
import errno
import os
from . import config
def _get_address(instance_ip):
username = config.get('ssh.username_prefix', '') + config.get('ssh.username', '')
# Don't add the username to the address when it is the current user,
# because it would make no difference.
if username == os.environ.get('USER'):
username = None
if username:
return username + '@' + instance_ip
else:
return instance_ip
def connect(instance, bastion, command):
bastion_hostname = config.get('bastion.hostname')
if not bastion_hostname and bastion:
bastion_hostname = get_ip(bastion, connect_through_bastion=False)
if bastion_hostname:
config.add('bastion.address', _get_address(bastion_hostname))
instance_ip = get_ip(instance, connect_through_bastion=bool(bastion_hostname))
config.add('hostname', instance_ip)
instance_address = _get_address(instance_ip)
config.add('address', instance_address)
ssh_command = ['ssh']
if config.get('verbose'):
ssh_command += ['-v']
user_known_hosts_file = config.get('ssh.user_known_hosts_file')
if user_known_hosts_file:
ssh_command += ['-o', 'UserKnownHostsFile={}'.format(user_known_hosts_file)]
if bastion_hostname:
proxy_command = config.get('ssh.proxy_command')
ssh_command += ['-o', 'ProxyCommand={}'.format(proxy_command)]
ssh_command += [instance_address]
config.add('ssh.cmd', format_command(ssh_command))
if command:
command = config.render(command)
print('[ssha] running {}'.format(command))
return os.system(command)
else:
print('[ssha] running {}'.format(config.get('ssh.cmd')))
run(ssh_command)
def format_command(command):
args = []
for arg in command:
if ' ' in arg:
args.append('"' + arg + '"')
else:
args.append(arg)
return ' '.join(args)
def get_ip(instance, connect_through_bastion):
if connect_through_bastion:
return instance['PrivateIpAddress']
return instance.get('PublicIpAddress') or instance['PrivateIpAddress']
def run(command):
child_pid = os.fork()
if child_pid == 0:
os.execlp(command[0], *command)
else:
while True:
try:
os.waitpid(child_pid, 0)
except OSError as error:
if error.errno == errno.ECHILD:
# No child processes.
# It has exited already.
break
elif error.errno == errno.EINTR:
# Interrupted system call.
# This happens when resizing the terminal.
pass
else:
# An actual error occurred.
raise
| nilq/baby-python | python |
import unittest
from unittest import mock
from betfairlightweight.streaming.stream import BaseStream, MarketStream, OrderStream
from tests.unit.tools import create_mock_json
class BaseStreamTest(unittest.TestCase):
def setUp(self):
self.listener = mock.Mock()
self.listener.max_latency = 0.5
self.stream = BaseStream(self.listener)
def test_init(self):
assert self.stream._listener == self.listener
assert self.stream._initial_clk is None
assert self.stream._clk is None
assert self.stream._caches == {}
assert self.stream._updates_processed == 0
assert self.stream.time_created is not None
assert self.stream.time_updated is not None
@mock.patch("betfairlightweight.streaming.stream.BaseStream._process")
@mock.patch("betfairlightweight.streaming.stream.BaseStream._update_clk")
def test_on_subscribe(self, mock_update_clk, mock_process):
self.stream.on_subscribe({})
mock_update_clk.assert_called_once_with({})
self.stream.on_subscribe({"mc": {123}})
mock_process.assert_called_once_with({123}, None)
@mock.patch("betfairlightweight.streaming.stream.BaseStream._update_clk")
def test_on_heartbeat(self, mock_update_clk):
self.stream.on_heartbeat({})
mock_update_clk.assert_called_once_with({})
@mock.patch("betfairlightweight.streaming.stream.BaseStream.on_update")
def test_on_resubscribe(self, mock_on_update):
self.stream.on_resubscribe({})
mock_on_update.assert_called_once_with({})
@mock.patch("betfairlightweight.streaming.stream.BaseStream._process")
@mock.patch(
"betfairlightweight.streaming.stream.BaseStream._calc_latency", return_value=0.1
)
@mock.patch("betfairlightweight.streaming.stream.BaseStream._update_clk")
def test_on_update(self, mock_update_clk, mock_calc_latency, mock_process):
mock_response = create_mock_json("tests/resources/streaming_mcm_update.json")
self.stream.on_update(mock_response.json())
mock_update_clk.assert_called_with(mock_response.json())
mock_calc_latency.assert_called_with(mock_response.json().get("pt"))
mock_process.assert_called_with(
mock_response.json().get("mc"), mock_response.json().get("pt")
)
mock_calc_latency.return_value = 10
self.stream.on_update(mock_response.json())
@mock.patch("betfairlightweight.streaming.stream.BaseStream._process")
@mock.patch(
"betfairlightweight.streaming.stream.BaseStream._calc_latency", return_value=0.1
)
@mock.patch("betfairlightweight.streaming.stream.BaseStream._update_clk")
def test_on_update_no_latency(
self, mock_update_clk, mock_calc_latency, mock_process
):
data = {"pt": 12345, "mc": "trainer"}
self.listener.max_latency = None
self.stream.on_update(data)
mock_update_clk.assert_called_with(data)
mock_calc_latency.assert_called_with(data.get("pt"))
mock_process.assert_called_with(data.get("mc"), data.get("pt"))
def test_clear_cache(self):
self.stream._caches = {1: "abc"}
self.stream.clear_cache()
assert self.stream._caches == {}
def test_snap(self):
market_books = self.stream.snap()
assert market_books == []
mock_cache = mock.Mock()
mock_cache.market_id = "1.1"
self.stream._caches = {"1.1": mock_cache}
market_books = self.stream.snap()
assert market_books == [mock_cache.create_resource()]
market_books = self.stream.snap(["1.2"])
assert market_books == []
market_books = self.stream.snap(["1.1"])
assert market_books == [mock_cache.create_resource()]
def test_snap_dict_size_err(self):
mock_cache = mock.Mock()
mock_cache.market_id = "1.1"
def _change_dict(*_, **__):
self.stream._caches["1.{}".format(len(self.stream._caches))] = mock_cache
mock_cache.create_resource = _change_dict
self.stream._caches = {"1.{}".format(i): mock_cache for i in range(2)}
self.stream.snap()
def test_on_creation(self):
self.stream._on_creation()
def test_process(self):
self.stream._process(None, None)
def test_on_process(self):
self.stream.on_process([1, 2])
self.stream.output_queue.put.assert_called_with([1, 2])
def test_update_clk(self):
self.stream._update_clk({"initialClk": 1234})
assert self.stream._initial_clk == 1234
self.stream._update_clk({"clk": 123})
assert self.stream._clk == 123
def test_unique_id(self):
assert self.stream.unique_id == self.listener.stream_unique_id
def test_output_queue(self):
assert self.stream.output_queue == self.listener.output_queue
def test_max_latency(self):
assert self.stream._max_latency == self.listener.max_latency
def test_lightweight(self):
assert self.stream._lightweight == self.listener.lightweight
@mock.patch("time.time", return_value=1485554805.107185)
def test_calc_latency(self, mock_time):
pt = 1485554796455
assert self.stream._calc_latency(pt) is not None
assert abs(self.stream._calc_latency(pt) - 8.652184) < 1e-5
def test_len(self):
assert len(self.stream) == 0
def test_str(self):
assert str(self.stream) == "BaseStream"
def test_repr(self):
assert repr(self.stream) == "<BaseStream [0]>"
class MarketStreamTest(unittest.TestCase):
def setUp(self):
self.listener = mock.Mock()
self.stream = MarketStream(self.listener)
@mock.patch("betfairlightweight.streaming.stream.MarketStream._process")
@mock.patch("betfairlightweight.streaming.stream.MarketStream._update_clk")
def test_on_subscribe(self, mock_update_clk, mock_process):
self.stream.on_subscribe({})
mock_update_clk.assert_called_once_with({})
self.stream.on_subscribe({"mc": {123}})
mock_process.assert_called_once_with({123}, None)
@mock.patch("betfairlightweight.streaming.stream.MarketStream._process")
@mock.patch("betfairlightweight.streaming.stream.MarketStream._update_clk")
def test_on_subscribe(self, mock_update_clk, mock_process):
self.stream.on_subscribe({})
mock_update_clk.assert_called_once_with({})
self.stream.on_subscribe({"mc": {123}})
mock_process.assert_called_once_with({123}, None)
@mock.patch("betfairlightweight.streaming.stream.MarketBookCache")
@mock.patch("betfairlightweight.streaming.stream.MarketStream.on_process")
def test_process(self, mock_on_process, mock_cache):
sub_image = create_mock_json("tests/resources/streaming_mcm_SUB_IMAGE.json")
data = sub_image.json()["mc"]
self.stream._process(data, 123)
self.assertEqual(len(self.stream), len(data))
@mock.patch("betfairlightweight.streaming.stream.MarketBookCache")
@mock.patch("betfairlightweight.streaming.stream.MarketStream.on_process")
def test_process_no_market_definition(self, mock_on_process, mock_cache):
sub_image_error = create_mock_json(
"tests/resources/streaming_mcm_SUB_IMAGE_no_market_def.json"
)
data = sub_image_error.json()["mc"]
self.stream._process(data, 123)
self.assertEqual(len(data), 137)
self.assertEqual(len(self.stream), 135) # two markets missing marketDef
def test_str(self):
assert str(self.stream) == "MarketStream"
def test_repr(self):
assert repr(self.stream) == "<MarketStream [0]>"
class OrderStreamTest(unittest.TestCase):
def setUp(self):
self.listener = mock.Mock()
self.stream = OrderStream(self.listener)
@mock.patch("betfairlightweight.streaming.stream.OrderStream._process")
@mock.patch("betfairlightweight.streaming.stream.OrderStream._update_clk")
def test_on_subscribe(self, mock_update_clk, mock_process):
self.stream.on_subscribe({})
mock_update_clk.assert_called_once_with({})
self.stream.on_subscribe({"oc": {123}})
mock_process.assert_called_once_with({123}, None)
@mock.patch("betfairlightweight.streaming.stream.OrderBookCache")
@mock.patch("betfairlightweight.streaming.stream.OrderStream.on_process")
def test_process(self, mock_on_process, mock_cache):
sub_image = create_mock_json("tests/resources/streaming_ocm_FULL_IMAGE.json")
data = sub_image.json()["oc"]
self.stream._process(data, 123)
self.assertEqual(len(self.stream), len(data))
def test_str(self):
assert str(self.stream) == "OrderStream"
def test_repr(self):
assert repr(self.stream) == "<OrderStream [0]>"
| nilq/baby-python | python |
#! /usr/bin/env python3
# Copyright 2018 Red Book Connect LLC. operating as HotSchedules
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This is a wrapper around the jdeps tool that ships with jdk 8 or later
# The simplest way to use it is to go to a directory with jar files in it and
# call the script passing in the name of one of the jar files. The script will
# return the jar files in the directory or the jre upon which the argument jar
# file depends.
#
# More than one jar file may be passed as an argument, in which case the each
# jar file on which at least one of the arguments depends will be returned, but
# each depended-upon jar file will only be listed once.
import argparse
import subprocess
import sys
parser = argparse.ArgumentParser()
parser.add_argument(
"target_jar",
help="the jar whose dependencies you want",
nargs="*")
parser.add_argument("-cp", "--classpath", default="*",
help="the classpath for jdeps to search for dependencies")
parser.add_argument("-f", "--file", help="the file containing the name(s) of\
jar file(s) whose dependencies you want")
args = parser.parse_args()
if (not args.target_jar) and (not args.file):
parser.print_help()
sys.exit("at least one of target_jar and file must be specified")
jdeps_command = ["jdeps", "-cp", args.classpath, "-summary"]
# add jar names passed on command line
jdeps_command.extend(args.target_jar)
# add jar names from file
if args.file:
with open(args.file, 'r') as file:
file_contents = file.read()
jar_file_names = file_contents.splitlines()
jdeps_command.extend(jar_file_names)
jdeps_output = subprocess.check_output(jdeps_command)
lines = jdeps_output.decode("utf-8").splitlines()
depended_jars = [line.split(" -> ")[1] for line in lines]
unique_sorted_jars = sorted(set(depended_jars))
for jar in unique_sorted_jars:
print(jar)
| nilq/baby-python | python |
#!/usr/bin/env python3
from baselines.common import tf_util as U
from baselines import logger
from env.LaneChangeEnv import LaneChangeEnv
from ppo_new import ppo_sgd
import random, sys, os
import numpy as np
import tensorflow as tf
if 'SUMO_HOME' in os.environ:
tools = os.path.join(os.environ['SUMO_HOME'], 'tools')
sys.path.append(tools)
print('success')
else:
sys.exit("please declare environment variable 'SUMO_HOME'")
import traci
def train(num_timesteps, is_train):
from baselines.ppo1 import mlp_policy
U.make_session(num_cpu=1).__enter__()
def policy_fn(name, ob_space, ac_space):
return mlp_policy.MlpPolicy(name=name, ob_space=ob_space, ac_space=ac_space,
hid_size=64, num_hid_layers=2)
env = LaneChangeEnv()
pi = ppo_sgd.learn(env, policy_fn,
max_timesteps=num_timesteps,
timesteps_per_actorbatch=512,
clip_param=0.1, entcoeff=0.0,
optim_epochs=16,
optim_stepsize=1e-4,
optim_batchsize=64,
gamma=0.99,
lam=0.95,
schedule='constant',
is_train=is_train)
env.close()
return pi
def main():
# logger.configure()
is_train = True
model_dir = '../../tf_model/11'
latest_checkpoint = tf.train.latest_checkpoint(model_dir)
model_path = latest_checkpoint
EP_MAX = 10
EP_LEN_MAX = 1000
if is_train:
# train the model
train(num_timesteps=1000000, is_train=True)
else:
# animate trained results
pi = train(num_timesteps=1, is_train=False)
U.load_state(model_path)
env = LaneChangeEnv()
for ep in range(EP_MAX):
egoid = 'lane1.' + str(random.randint(1, 5))
# todo set sumoseed and randomseed as fixed
ob = env.reset(egoid=egoid, tlane=0, tfc=2, is_gui=True, sumoseed=None, randomseed=None)
traci.vehicle.setColor(egoid, (255, 69, 0))
ob_np = np.asarray(ob).flatten()
for t in range(EP_LEN_MAX):
ac = pi.act(stochastic=False, ob=ob_np)[0]
ob, reward, done, info = env.step(ac) # need modification
ob_np = np.asarray(ob).flatten()
is_end_episode = done and info['resetFlag']
if is_end_episode:
break
if __name__ == '__main__':
main()
| nilq/baby-python | python |
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.collections import LineCollection
# In order to efficiently plot many lines in a single set of axes,
# Matplotlib has the ability to add the lines all at once. Here is a
# simple example showing how it is done.
N = 50
x = np.arange(N)
# Here are many sets of y to plot vs x
ys = [x + i for i in x]
# We need to set the plot limits, they will not autoscale
ax = plt.axes()
ax.set_xlim((np.amin(x), np.amax(x)))
ax.set_ylim((np.amin(np.amin(ys)), np.amax(np.amax(ys))))
# colors is sequence of rgba tuples
# linestyle is a string or dash tuple. Legal string values are
# solid|dashed|dashdot|dotted. The dash tuple is (offset, onoffseq)
# where onoffseq is an even length tuple of on and off ink in points.
# If linestyle is omitted, 'solid' is used
# See matplotlib.collections.LineCollection for more information
# Make a sequence of x,y pairs
line_segments = LineCollection([list(zip(x, y)) for y in ys],
linewidths=(0.5, 1, 1.5, 2),
linestyles='solid')
line_segments.set_array(x)
ax.add_collection(line_segments)
fig = plt.gcf()
axcb = fig.colorbar(line_segments)
axcb.set_label('Line Number')
ax.set_title('Line Collection with mapped colors')
plt.sci(line_segments) # This allows interactive changing of the colormap.
plt.show()
| nilq/baby-python | python |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import configparser
import os
class ConfigParser:
def __init__(this, filePath = os.getcwd() + os.sep + 'config' + os.sep + 'config.ini'):
this.fp = filePath
this.conf = configparser.ConfigParser()
this.conf.read(this.fp, encoding="utf-8-sig")
def driveType(this):
return this.conf.get('drive', 'type').lower().strip()
def driveVersion(this):
return this.conf.get('drive', 'version').strip()
def filePath(this):
file_path = this.conf.get('file', 'path').strip()
return file_path if len(file_path) > 0 else os.getcwd() + os.sep + 'script'
def fileName(this):
file_name = this.conf.get('file', 'name').strip()
return file_name if len(file_name) > 0 else ''
def fileOutput(this):
output = this.conf.get('file', 'output').strip()
if not output:
output = os.getcwd() + os.sep + 'output' + os.sep
elif 'false' == output:
return False
return output
def WinSize(this):
return this.conf.get('window', 'size').strip()
def WinFrequency(this):
poll_frequency = this.conf.get('window', 'frequency').strip()
return poll_frequency if len(str(poll_frequency)) > 0 else 0.5
def WinImplicitly(this):
return this.conf.get('window', 'implicitly').strip()
def PlugsFile(this):
return this.conf.get('plugs', 'filepath').strip()
if __name__ == '__main__':
ConfigParser().filePath() | nilq/baby-python | python |
## This file is part of Scapy
## See http://www.secdev.org/projects/scapy for more informations
## Copyright (C) Philippe Biondi <phil@secdev.org>
## This program is published under a GPLv2 license
"""
Clone of Nmap's first generation OS fingerprinting.
"""
import os
from scapy.data import KnowledgeBase
from scapy.config import conf
from scapy.arch import WINDOWS
if WINDOWS:
conf.nmap_base=os.environ["ProgramFiles"] + "\\nmap\\nmap-os-fingerprints"
else:
conf.nmap_base ="/usr/share/nmap/nmap-os-fingerprints"
######################
## nmap OS fp stuff ##
######################
class NmapKnowledgeBase(KnowledgeBase):
def lazy_init(self):
try:
f=open(self.filename)
except IOError:
return
self.base = []
name = None
try:
for l in f:
l = l.strip()
if not l or l[0] == "#":
continue
if l[:12] == "Fingerprint ":
if name is not None:
self.base.append((name,sig))
name = l[12:].strip()
sig={}
p = self.base
continue
elif l[:6] == "Class ":
continue
op = l.find("(")
cl = l.find(")")
if op < 0 or cl < 0:
warning("error reading nmap os fp base file")
continue
test = l[:op]
s = map(lambda x: x.split("="), l[op+1:cl].split("%"))
si = {}
for n,v in s:
si[n] = v
sig[test]=si
if name is not None:
self.base.append((name,sig))
except:
self.base = None
warning("Can't read nmap database [%s](new nmap version ?)" % self.filename)
f.close()
nmap_kdb = NmapKnowledgeBase(conf.nmap_base)
def TCPflags2str(f):
fl="FSRPAUEC"
s=""
for i in range(len(fl)):
if f & 1:
s = fl[i]+s
f >>= 1
return s
def nmap_tcppacket_sig(pkt):
r = {}
if pkt is not None:
# r["Resp"] = "Y"
r["DF"] = (pkt.flags & 2) and "Y" or "N"
r["W"] = "%X" % pkt.window
r["ACK"] = pkt.ack==2 and "S++" or pkt.ack==1 and "S" or "O"
r["Flags"] = TCPflags2str(pkt.payload.flags)
r["Ops"] = "".join(map(lambda x: x[0][0],pkt.payload.options))
else:
r["Resp"] = "N"
return r
def nmap_udppacket_sig(S,T):
r={}
if T is None:
r["Resp"] = "N"
else:
r["DF"] = (T.flags & 2) and "Y" or "N"
r["TOS"] = "%X" % T.tos
r["IPLEN"] = "%X" % T.len
r["RIPTL"] = "%X" % T.payload.payload.len
r["RID"] = S.id == T.payload.payload.id and "E" or "F"
r["RIPCK"] = S.chksum == T.getlayer(IPerror).chksum and "E" or T.getlayer(IPerror).chksum == 0 and "0" or "F"
r["UCK"] = S.payload.chksum == T.getlayer(UDPerror).chksum and "E" or T.getlayer(UDPerror).chksum ==0 and "0" or "F"
r["ULEN"] = "%X" % T.getlayer(UDPerror).len
r["DAT"] = T.getlayer(conf.raw_layer) is None and "E" or S.getlayer(conf.raw_layer).load == T.getlayer(conf.raw_layer).load and "E" or "F"
return r
def nmap_match_one_sig(seen, ref):
c = 0
for k in seen.keys():
if k in ref:
if seen[k] in ref[k].split("|"):
c += 1
if c == 0 and seen.get("Resp") == "N":
return 0.7
else:
return 1.0*c/len(seen.keys())
def nmap_sig(target, oport=80, cport=81, ucport=1):
res = {}
tcpopt = [ ("WScale", 10),
("NOP",None),
("MSS", 256),
("Timestamp",(123,0)) ]
tests = [ IP(dst=target, id=1)/TCP(seq=1, sport=5001, dport=oport, options=tcpopt, flags="CS"),
IP(dst=target, id=1)/TCP(seq=1, sport=5002, dport=oport, options=tcpopt, flags=0),
IP(dst=target, id=1)/TCP(seq=1, sport=5003, dport=oport, options=tcpopt, flags="SFUP"),
IP(dst=target, id=1)/TCP(seq=1, sport=5004, dport=oport, options=tcpopt, flags="A"),
IP(dst=target, id=1)/TCP(seq=1, sport=5005, dport=cport, options=tcpopt, flags="S"),
IP(dst=target, id=1)/TCP(seq=1, sport=5006, dport=cport, options=tcpopt, flags="A"),
IP(dst=target, id=1)/TCP(seq=1, sport=5007, dport=cport, options=tcpopt, flags="FPU"),
IP(str(IP(dst=target)/UDP(sport=5008,dport=ucport)/(300*"i"))) ]
ans, unans = sr(tests, timeout=2)
ans += map(lambda x: (x,None), unans)
for S,T in ans:
if S.sport == 5008:
res["PU"] = nmap_udppacket_sig(S,T)
else:
t = "T%i" % (S.sport-5000)
if T is not None and T.haslayer(ICMP):
warning("Test %s answered by an ICMP" % t)
T=None
res[t] = nmap_tcppacket_sig(T)
return res
def nmap_probes2sig(tests):
tests=tests.copy()
res = {}
if "PU" in tests:
res["PU"] = nmap_udppacket_sig(*tests["PU"])
del(tests["PU"])
for k in tests:
res[k] = nmap_tcppacket_sig(tests[k])
return res
def nmap_search(sigs):
guess = 0,[]
for os,fp in nmap_kdb.get_base():
c = 0.0
for t in sigs.keys():
if t in fp:
c += nmap_match_one_sig(sigs[t], fp[t])
c /= len(sigs.keys())
if c > guess[0]:
guess = c,[ os ]
elif c == guess[0]:
guess[1].append(os)
return guess
@conf.commands.register
def nmap_fp(target, oport=80, cport=81):
"""nmap fingerprinting
nmap_fp(target, [oport=80,] [cport=81,]) -> list of best guesses with accuracy
"""
sigs = nmap_sig(target, oport, cport)
return nmap_search(sigs)
@conf.commands.register
def nmap_sig2txt(sig):
torder = ["TSeq","T1","T2","T3","T4","T5","T6","T7","PU"]
korder = ["Class", "gcd", "SI", "IPID", "TS",
"Resp", "DF", "W", "ACK", "Flags", "Ops",
"TOS", "IPLEN", "RIPTL", "RID", "RIPCK", "UCK", "ULEN", "DAT" ]
txt=[]
for i in sig.keys():
if i not in torder:
torder.append(i)
for t in torder:
sl = sig.get(t)
if sl is None:
continue
s = []
for k in korder:
v = sl.get(k)
if v is None:
continue
s.append("%s=%s"%(k,v))
txt.append("%s(%s)" % (t, "%".join(s)))
return "\n".join(txt)
| nilq/baby-python | python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 Ben Lindsay <benjlindsay@gmail.com>
from distutils.core import setup
desc = 'A module for automating flat or hierarchical job creation and submission'
with open('README.rst', 'r') as f:
long_desc = f.read()
setup(
name = 'job_tree',
packages = ['job_tree'],
version = '0.4.2',
description = desc,
long_description = long_desc,
requires = ['pandas'],
install_requires = ['pandas'],
scripts = ['bin/job_tree'],
author = 'Ben Lindsay',
author_email = 'benjlindsay@gmail.com',
url = 'https://github.com/benlindsay/job_tree',
keywords = ['workflow', 'simulations'],
classifiers = [],
)
| nilq/baby-python | python |
#-------------------------------------#
# Python script for BEST address #
# Author: Marc Bruyland (FOD BOSA) #
# Contact: marc.bruyland@bosa.fgov.be #
# June 2019 #
#-------------------------------------#
from BEST_Lib import *
def createTestFile(inputFile, outputFile):
fileIn=open(inputFile,"r", encoding=PREFERRED_ENCODING)
fileOut=open(outputFile,"w", encoding=PREFERRED_ENCODING)
address = fileIn.readline()
cnt = 0
while address:
cnt += 1
if 'B0429918' in address or 'B0404482' in address :
fileOut.write(address)
try:
address = fileIn.readline()
except:
print("SERIOUS ERROR at line ", cnt)
break
fileIn.close()
fileOut.close()
inputFile = "RR_B.txt"
outputFile = "RR_B_Test.txt"
createTestFile(inputFile, outputFile)
| nilq/baby-python | python |
import logging
import re
from string import punctuation
import unicodedata
from nltk.corpus import stopwords
import contractions
from spellchecker import SpellChecker
from .article import get_article
from .dictionary import get_extended_dictionary, valid_one_letter_words
logger = logging.getLogger('django')
def check_spelling(page, settings):
# Validate that this is for English; currently only English is supported
language = 'en' if 'lang' not in settings else settings['lang']
article = get_article(page, settings)
custom_known_words = [] if 'known_words' not in settings else settings['known_words']
dictionary = set(list(get_extended_dictionary()) + list(custom_known_words))
if article.text:
raw_text = u'%s. %s' % (article.title, article.text)
misspelled = get_misspelled_words(raw_text, language, dictionary)
found_misspellings = len(misspelled) > 0
message = "No misspellings found" if not found_misspellings else u'Found %s misspelling(s): "%s"' % (len(misspelled), '", "'.join(misspelled))
return found_misspellings, message, {'misspelled_words': misspelled}
return False, 'No article found', {}
def is_in_dictionary(word, dictionary):
if len(word) == 1:
if word.lower() in valid_one_letter_words:
return True
else:
return (word in dictionary)
def get_simplification_options(word):
suffixes = [
{'able': ''},
{'acy': ''},
{'ant': ''},
{'al': ''},
{'ance': ''},
{'ate': ''},
{'bed': ''},
{'bility': ''},
{'bility': 'ble'},
{'bio': ''},
{'dom': ''},
{'cced': 'c'},
{'cces': 'c'},
{'ccing': 'c'},
{'dded': 'd'},
{'ddes': 'd'},
{'dding': 'd'},
{'ed': ''},
{'ed': 'e'},
{'ee': ''},
{'en': ''},
{'en': 'e'},
{'ence': ''},
{'ence': 'e'},
{'ent': ''},
{'er': ''},
{'er': 'e'},
{'erizer': ''},
{'es': ''},
{'es': 'e'},
{'esque': ''},
{'est': ''},
{'ffed': 'f'},
{'ffes': 'f'},
{'ffing': 'f'},
{'ful': ''},
{'fy': ''},
{'gged': 'g'},
{'gges': 'g'},
{'gging': 'g'},
{'hood': ''},
{'ible': ''},
{'ic': ''},
{'ical': ''},
{'ied': ''},
{'ied': 'y'},
{'ier': ''},
{'ier': 'y'},
{'ies': ''},
{'ies': 'y'},
{'iest': ''},
{'iest': 'y'},
{'ify': ''},
{'ily': ''},
{'iness': ''},
{'iness': 'y'},
{'ing': ''},
{'ing': 'e'},
{'ious': ''},
{'ise': ''},
{'ish': ''},
{'ism': ''},
{'ist': ''},
{'ity': ''},
{'ity': 'y'},
{'ive': ''},
{'ize': ''},
{'izer': ''},
{'jjed': 'j'},
{'jjes': 'j'},
{'jjing': 'j'},
{'kked': 'k'},
{'kkes': 'k'},
{'kking': 'k'},
{'less': ''},
{'like': ''},
{'lled': 'l'},
{'lles': 'l'},
{'lling': 'l'},
{'long': ''},
{'ly': ''},
{'mate': ''},
{'ment': ''},
{'mmed': 'm'},
{'mmes': 'm'},
{'mming': 'm'},
{'ness': ''},
{'nned': 'n'},
{'nnes': 'n'},
{'nning': 'n'},
{'ologist': ''},
{'ologist': 'ology'},
{'ous': ''},
{'ped': ''},
{'pped': 'p'},
{'ppes': 'p'},
{'pping': 'p'},
{'qqed': 'q'},
{'qqes': 'q'},
{'qqing': 'q'},
{'red': ''},
{'red': 're'},
{'rred': 'r'},
{'rres': 'r'},
{'rring': 'r'},
{'s': ''},
{'sion': ''},
{'ssed': 's'},
{'sses': 's'},
{'ssing': 's'},
{'tion': ''},
{'tion': 'te'},
{'tize': ''},
{'tize': 'ty'},
{'tize': 't'},
{'tted': 't'},
{'ttes': 't'},
{'tting': 't'},
{'ty': ''},
{'vved': 'v'},
{'vves': 'v'},
{'vving': 'v'},
{'ward': ''},
{'wards': ''},
{'wide': ''},
{'wise': ''},
{'worthy': ''},
{'y': ''},
{'zzed': 'z'},
{'zzes': 'z'},
{'zzing': 'z'},
]
prefixes = [
{'ante': ''},
{'anti': ''},
{'auto': ''},
{'bi': ''},
{'bio': ''},
{'bis': ''},
{'co': ''},
{'de': ''},
{'dis': ''},
{'en': ''},
{'ex': ''},
{'extra': ''},
{'hyper': ''},
{'ig': ''},
{'im': ''},
{'in': ''},
{'inter': ''},
{'ir': ''},
{'macro': ''},
{'mal': ''},
{'mega': ''},
{'micro': ''},
{'mini': ''},
{'mis': ''},
{'mono': ''},
{'multi': ''},
{'neo': ''},
{'neuro': ''},
{'non': ''},
{'omni': ''},
{'over': ''},
{'penta': ''},
{'per': ''},
{'poly': ''},
{'post': ''},
{'pre': ''},
{'pro': ''},
{'quad': ''},
{'re': ''},
{'retro': ''},
{'semi': ''},
{'socio': ''},
{'sub': ''},
{'super': ''},
{'tran': ''},
{'tri': ''},
{'un': ''},
{'under': ''},
{'uni': ''}
]
# Sort prefixes and suffixes from longest to shortest
suffixes.sort(key=lambda s: len(next(iter(s))))
suffixes.reverse()
prefixes.sort(key=lambda s: len(next(iter(s))))
prefixes.reverse()
output = []
for prefix_item in prefixes:
prefix = next(iter(prefix_item))
if word.startswith(prefix):
output.append({
'type': 'prefix',
'search': prefix,
'replace': prefix_item[prefix]
})
for suffix_item in suffixes:
suffix = next(iter(suffix_item))
if word.endswith(suffix):
output.append({
'type': 'suffix',
'search': suffix,
'replace': suffix_item[suffix]
})
return output
def apply_simplification(word, simplification):
if simplification['type'] == 'prefix':
if word.startswith(simplification['search']):
word = simplification['replace'] + word[len(simplification['search']):]
if simplification['type'] == 'suffix':
if word.endswith(simplification['search']):
word = word[:-len(simplification['search'])] + simplification['replace']
return word
def simplify_word(word, dictionary, debug=False):
log_level = logging.WARNING if debug else logging.DEBUG
logger.log(log_level, u"\n--------- Simplifying %s ---------" % (word))
possible_simplifications = get_simplification_options(word)
logger.log(log_level, "Possible simplifications: %s " % (possible_simplifications))
if len(possible_simplifications) == 0:
logger.log(log_level, "No more simplification options found, returning %s" % (word))
return word
for simplification in possible_simplifications:
applied = apply_simplification(word, simplification)
logger.log(log_level, "Applied simplification %s replaced --> %s" % (simplification, applied))
if is_in_dictionary(applied, dictionary):
logger.log(log_level, "Simplification yielded valid word %s" % (applied))
return applied
else:
drilled_down = simplify_word(applied, dictionary, debug)
if is_in_dictionary(drilled_down, dictionary):
logger.log(log_level, "Drilled down yielded valid word %s" % (drilled_down))
return drilled_down
return word
def remove_emails(input):
return re.sub(r"\S*@\S*\s?", " ", input)
def remove_hashes(input):
return re.sub(r"#(\w+)", " ", input)
def remove_phonenumbers(input):
# TODO
# intl_removed = re.sub(r'(\+[0-9]+\s*)?(\([0-9]+\))?[\s0-9\-]+[0-9]+', ' ', input)
# intl_removed = input
intl_removed = re.sub(r"(\d{1,3}[-\.\s]??\d{3}[-\.\s]??\d{3}[-\.\s]??\d{4}|\(\d{3}\)\s*\d{3}[-\.\s]??\d{4}|\d{3}[-\.\s]??\d{4})", " ", input)
us_removed = re.sub(r"(\d{1,3}[-\.\s]??\d{3}[-\.\s]??\d{3}[-\.\s]??\d{4}|\(\d{3}\)\s*\d{3}[-\.\s]??\d{4}|\d{3}[-\.\s]??\d{4})", " ", intl_removed)
return us_removed
def remove_urls(input):
# return re.sub(r'\s*(?:https?://)?\S*\.[A-Za-z]{2,5}\s*', " ", input)
removed_full_links = re.sub(r'(http|https|ftp|telnet):\/\/[\w\-_]+(\.[\w\-_]+)+([\w\-\.,@?^=%&:/~\+#]*[\w\-\@?^=%&/~\+#])?', " ", input)
remove_partial_links = re.sub(r"([\w\.]+\.(?:com|org|net|us|co|edu|gov|uk)[^,\s]*)", " ", removed_full_links)
remove_mailtos = re.sub(r'((mailto\:|(news|(ht|f)tp(s?))\://){1}\S+)', " ", remove_partial_links)
ips_removed = re.sub(r"\b\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\b", " ", remove_mailtos)
intl_removed = re.sub(r'(tel):(\+[0-9]+\s*)?(\([0-9]+\))?[\s0-9\-]+[0-9]+', ' ', ips_removed)
us_removed = re.sub(r"(tel):(\d{3}[-\.\s]??\d{3}[-\.\s]??\d{4}|\(\d{3}\)\s*\d{3}[-\.\s]??\d{4}|\d{3}[-\.\s]??\d{4})", " ", intl_removed)
filenames_removed = re.sub(r"([\w\d\-.]+\.(pdf|PDF|doc|DOC|docx|DOCX|zip|ZIP|xlsx|XLSX|csv|CSV))", " ", us_removed)
return filenames_removed
def remove_acronyms(input):
return re.sub(r"\b[A-Z\.]{2,}s?\b", "", input)
def remove_direct_quotation_brackets(input):
return input.replace("[", "").replace("]", "")
def get_misspelled_words(raw_text, language, dictionary, debug=False):
log_level = logging.WARNING if debug else logging.DEBUG
# if language != 'en':
# return True, 'Language "%s" not supported' % (language)
logger.log(log_level, ">> raw_text:")
logger.log(log_level, raw_text)
# Remove email addresses, hashes, urls, phone numbers...
urls_removed = remove_urls(raw_text)
emails_removed = remove_emails(urls_removed)
hashes_removed = remove_hashes(emails_removed)
phonenumbers_removed = remove_phonenumbers(hashes_removed)
logger.log(log_level, ">> after email, hashes, urls, phone numbers removed:")
logger.log(log_level, phonenumbers_removed)
# Replace fancy typigraphic characters like curly quotes and em dashes
typographic_translation_table = dict([(ord(x), ord(y)) for x, y in zip(u"‘’´'“”–-—⁃‐…●•∙©", u"''''\"\"-----.----")])
typography_removed = phonenumbers_removed.translate(typographic_translation_table)
hyphens_removed = typography_removed.replace("-", " ").replace("/", " ")
newlines_removed = hyphens_removed.replace("\n", " ").replace("\r", " ")
logger.log(log_level, ">> after fancy typographic characters and newlines removed:")
logger.log(log_level, newlines_removed)
contractions_removed = contractions.fix(newlines_removed)
possessives_removed = re.sub("\'s ", " ", contractions_removed)
hyphens_removed = possessives_removed.replace("-", " ")
acronyms_removed = remove_acronyms(hyphens_removed)
whitespace_condensed = re.sub("[ \t]+", " ", acronyms_removed.replace(u'\u200b', ' '))
logger.log(log_level, ">> after contractions, posessives, hyphens and acronyms removed:")
logger.log(log_level, whitespace_condensed)
# Split text into words
check_words_raw = whitespace_condensed.split(' ')
logger.log(log_level, ">> check_words_raw:")
logger.log(log_level, check_words_raw)
# Remove stopwords for faster processing
stop_words = set(stopwords.words('english'))
stopwords_removed = [word for word in check_words_raw if (word.lower() not in stop_words)]
logger.log(log_level, ">> stopwords_removed:")
logger.log(log_level, stopwords_removed)
# Remove any numbers and punctuation
normalzized_words = [unicodedata.normalize('NFKC', word) for word in stopwords_removed]
punctuation_removed = [remove_direct_quotation_brackets(word.strip(punctuation)) for word in normalzized_words if (word and not word[0].isdigit())]
# Apply twice in case there is punctuation around digits
punctuation_removed = [remove_direct_quotation_brackets(word.strip(punctuation)) for word in punctuation_removed if (word and not word[0].isdigit())]
logger.log(log_level, ">> punctuation_removed:")
logger.log(log_level, punctuation_removed)
remove_empty_words = [word for word in punctuation_removed if word]
# Gather list of assumed proper nouns.
# Assume anything capitalized in article is a local proper noun
proper_nouns = []
for word in remove_empty_words:
if word[0].isupper() and not is_in_dictionary(simplify_word(word.lower(), dictionary), dictionary):
proper_nouns.append(word.strip(punctuation))
proper_nouns_lower = [word.lower() for word in proper_nouns]
logger.log(log_level, ">> proper_nouns:")
logger.log(log_level, proper_nouns)
# Remove anything matching a proper noun from above
remove_proper_nounds = [item for item in remove_empty_words if item.lower() not in proper_nouns_lower]
# Reduce to unique set of words
check_words = list(set(remove_proper_nounds))
logger.log(log_level, ">> check_words:")
logger.log(log_level, check_words)
# First check the corpus dictionary:
words_not_in_dict = [word for word in check_words if not is_in_dictionary(word.lower(), dictionary)]
logger.log(log_level, ">> words_not_in_dict:")
logger.log(log_level, words_not_in_dict)
# Next use spelling library
spell = SpellChecker(language=language, distance=1)
unknown = [item for item in list(spell.unknown(words_not_in_dict))]
logger.log(log_level, ">> unknown:")
logger.log(log_level, unknown)
# Finally, removing prefix and suffixes to unearth a valid root word
misspelled = []
for word in unknown:
simplified_word = simplify_word(word, dictionary)
if not is_in_dictionary(simplified_word, dictionary):
misspelled.append(simplified_word)
logger.log(log_level, ">> misspelled:")
logger.log(log_level, misspelled)
return misspelled
| nilq/baby-python | python |
import logging; log = logging.getLogger(__name__)
import OpenGL.GL as gl
from OpenGL.raw.GL.ARB.vertex_array_object import glGenVertexArrays, \
glBindVertexArray
class VertexArray:
"""GL vertex array object."""
class _Binding:
"""Object returned by VertexArray.bound()."""
def __init__(self, arr):
self.arr = arr
def __enter__(self):
self.arr.bind()
return self.arr
def __exit__(self, type, value, traceback):
self.arr.unbind()
def __init__(self, ctx):
self.ctx = ctx
self.id = ctx.glGenVertexArrays(1)
self.bind()
def bind(self):
#log.debug("Bind vertex array %d", self.id)
self.ctx.glBindVertexArray(self.id)
def unbind(self):
self.ctx.glBindVertexArray(0)
def bound(self):
"""Create a temporary binding: `with myArray.bound() ...`"""
return self._Binding(self)
def render(self, mode, count=None, offset=0):
"""Render vertex array.
mode: Type of primitives; eg GL_POINTS, GL_LINES...
count: Number of indices to render.
offset: First index offset to render.
"""
self.bind()
# XXX count=None should mean all... but how do we know how many?
self.ctx.glDrawArrays(mode, offset, count)
def renderInstanced(self, mode, offset=0, length=0, count=0):
"""Render multiple instances of vertex array.
mode: Type of primitives; eg GL_POINTS, GL_LINES...
offset: First index offset to render.
length: Number of indices to render.
count: Number of instances of this range to render.
"""
self.bind()
self.ctx.glDrawArraysInstanced(mode, offset, length, count)
| nilq/baby-python | python |
REDIS_URL = 'redis://redis:6379'
| nilq/baby-python | python |
# This program is public domain.
"""
Support for rarely varying instrument configuration parameters.
Instrument configuration parameters will change throughout the
lifetime of an instrument. For example, the properties of the
beam such as wavelength and wavelength divergence will change
when a new monochromator is installed on the instrument. Ideally,
all such parameters would be encoded in the data file (this is
one goal of the NeXus file format), but this is not the case for
all instrument formats available today.
We cannot simply hard code the current value of the instrument
parameters in the file reader for the data file format. Such a
reader will give bad values for old data files and for new data
files after the format has changed. Nor should we burden the user
with knowing and entering values for such parameters on their own.
Instead, we provide support for dated values. Each instrument has
a table of values and the date the values come into effect. When
a file is loaded, the software scans the list of values, extracting
all that are in effect on the file date.
As instrument parameters change add additional lines to the configuration
file indicating the new value and the date of the change. The order of
# the entries does not matter. The timestamp on the file will
determine which value will be used.
The format of the entries should be::
default.NAME = (VALUE, 'YYYY-MM-DD') # value after MM/DD/YYYY
default.NAME = (VALUE, '') # value at commissioning
[Not implemented] Each data reader has an associated URL which
contains the configuration file for the instrument. On file
load, the program will fetch dated values from the URL and use
them to populate the configuration data for the instrument. This
gives control of the instrument parameters to the instrument
scientist where it belongs.
Example
=======
The following parameters are needed for the NG71reflectometer::
config = properties.DatedValues()
config.wavelength = (4.76,'') # in case ICP records the wrong value
# Detector response is uniform below 15000 counts/s. The efficiency
# curve above 15000 has not been measured.
config.saturation = (numpy.array([[1,15000,0]]),'')
config.detector_distance = (36*25.4, '') # mm
config.psd_width = (20, '') # mm
config.slit1_distance = (-75*25.4, '') # mm
config.slit2_distance = (-14*25.4, '') # mm
config.slit3_distance = (9*25.4, '') # mm
config.slit4_distance = (42*25.4, '') # mm
config.detector_distance = (48*25.4, '2004-02-15')
The defaults are used as follows::
class Data:
def load(filename):
data = readheaders(filename)
self.config = config(str(data.date))
self.detector.distance = self.config.detector_distance
...
"""
# TODO: provide URI for the instrument configuration
# Check the URI if the file date is newer than the configuration date. This
# will normally be true for the user, but there is no other way to make sure
# that they are using the most up-to-date values available. The URI will
# be given on the constructor as DatedValues('URI').
# TODO: optimize repeated lookups.
# Currently we scan the table once for each file. A cheap optimization is to
# identify the range of dates surrounding the current date for which the
# value is correct and check if the new file falls in that range. The next
# level is to cache a set of these ordered by date. A third option is to
# build an in-memory database while the configuration values are registered
# so they don't need to be scanned on file load.
# TODO: identify data reader version required
# As the data format for the instrument evolves, old data readers may not
# be sufficient to read the new data. For example, if the reflectometer
# gets a 2-D detector but the reflectometry data format does not yet
# support 2-D detectors, then a new reader will be required.
# TODO: property sheet editor
# Once the file parameters are loaded the values are displayed to
# the user on a property sheet. Values different from the default
# are highlighted. Users can edit the values, with changes noted in
# the reduction log so that data history is preserved.
# TODO: XML support
# We should probably support to/from xml for the purposes of
# saving and reloading corrections.
import re
datepattern = re.compile(r'^(19|20)\d\d-\d\d-\d\d$')
class DatedValuesInstance: pass
class DatedValues(object):
def __init__(self):
self.__dict__['_parameters'] = {}
def __setattr__(self, name, pair):
"""
Record the parameter value and the date it was set. The pair should
contain the value and the date. The assignment will look like:
datedvalue.name = (value, 'yyyy-mm-dd')
"""
# Check that the date is valid
value,date = pair
assert date == "" or datepattern.match(date), \
"Expected default.%s = (value,'YYYYMMDD')"%(name)
# Record the value-date pair on the list of values for that parameters
if name not in self._parameters:
self._parameters[name] = []
self._parameters[name].append(pair)
def __call__(self, date):
"""
Recover the parameter value for a specific date.
"""
instance = DatedValuesInstance()
for name,values in self._parameters.iteritems():
# Sort parameter entries by date
values.sort(lambda a,b: cmp(a[0],b[0]))
for v,d in values:
if d <= date: setattr(instance,name,v)
else: break
return instance
def test():
default = DatedValues()
default.a = (1,'')
default.a = (2,'2000-12-15')
default.a = (3,'2004-02-05')
assert default('1993-01-01').a == 1
assert default('2000-12-14').a == 1
assert default('2000-12-15').a == 2
assert default('2000-12-16').a == 2
assert default('2006-02-19').a == 3
if __name__ == "__main__": test()
| nilq/baby-python | python |
import cv2
import glob, os
import numpy as np
import pandas as pd
import tensorflow as tf
from preprocessing import Preprocess
import Model
from matplotlib import pyplot as plt
df_train = []
df_test = []
df_val = []
if os.path.exists("./dataset/train.npy"):
df_train = np.load("./dataset/train.npy")
df_test = np.load("./dataset/test.npy")
df_val = np.load("./dataset/val.npy")
else:
#TRAIN
for grade in range(5):
images=[ cv2.imread(file) for file in glob.glob(r'C:/Users/Gaurav/Desktop/Minor_Project/MinorProject/dataset/train/'+str(grade)+'/*.png')]
path_input = r'C:/Users/Gaurav/Desktop/Minor_Project/MinorProject/dataset/train/'+str(grade)
fnames = os.listdir(path_input)
for f in fnames:
img = cv2.imread(os.path.join(path_input,f),0)
#img = images[i]
#i += 1
img1 = np.array(img, dtype=np.uint8)
img_pre,img_CLAHE = Preprocess(img1)
med= cv2.medianBlur(img_CLAHE, 3)
w= os.path.split(f)[1].split('.')[0]
if (w.find('L') != -1):
cv2.imwrite(r'C:/Users/Gaurav/Desktop/Minor_Project/MinorProject/dataset/train/'+str(grade)+'/'+w+'.png', np.fliplr(med))
else:
cv2.imwrite(r'C:/Users/Gaurav/Desktop/Minor_Project/MinorProject/dataset/train/'+str(grade)+'/'+w+'.png', med)
#img_pre:grayScale->roi->CLAHE->edgeDetection->contour
#img_CLAHE:grayScale->CLAHE
img_CLAHE = img_CLAHE/255.0
df_train.append([img_CLAHE,grade+1])
#TEST
for grade in range(5):
images=[ cv2.imread(file) for file in glob.glob(r'C:/Users/Gaurav/Desktop/Minor_Project/MinorProject/dataset/test/'+str(grade)+'/*.png')]
for img in images:
img1 = np.array(img, dtype=np.uint8)/255.0
df_test.append([img1,grade+1])
#VAL
for grade in range(5):
images=[ cv2.imread(file) for file in glob.glob(r'C:/Users/Gaurav/Desktop/Minor_Project/MinorProject/dataset/val/'+str(grade)+'/*.png')]
for img in images:
img1 = np.array(img, dtype=np.uint8)/255.0
df_test.append([img1,grade+1])
np.save('train.npy',df_train)
np.save('test.npy',df_test)
np.save('val.npy',df_val)
print("*****Loading Done!*****")
'''
#shuffle
df_train = df_train.sample(frac = 1)
X_train, Y_train = df_train['Image'], df_train['Grade']
X_test, Y_test = df_test['Image'], df_test['Grade']
X_val, Y_val = df_val['Image'], df_val['Grade']
print("Splitting Done!")
#df has two coloumns Image and Grade
#don't paste the code directly rather make a different .py file and use functions
model_1 = Model.ConvPoolModel(inputShape)
history_1 = model_1.fit(X_train, Y_train,batch_size=32,epochs = 5,verbose = 1)
model_2 = Model.SimpleModel(inputShape)
filepath = 'Simple_Model.hdf5'
checkpoint = tf.keras.callbacks.ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True,mode='auto', save_frequency=1)
history_2 = model_2.fit(X_train, Y_train,batch_size = 32,epochs = 5,verbose = 1,validation_split = 0.2,validation_data = (X_val, Y_val),callbacks = [checkpoint],shuffle=True)
'''
print("DONE") | nilq/baby-python | python |
import sys
import numpy as np
import h5py
import random
import os
from subprocess import check_output
# 1. h5 i/o
def readh5(filename, datasetname):
data=np.array(h5py.File(filename,'r')[datasetname])
return data
def writeh5(filename, datasetname, dtarray):
# reduce redundant
fid=h5py.File(filename,'w')
ds = fid.create_dataset(datasetname, dtarray.shape, compression="gzip", dtype=dtarray.dtype)
ds[:] = dtarray
fid.close()
def readh5k(filename, datasetname):
fid=h5py.File(filename)
data={}
for kk in datasetname:
data[kk]=array(fid[kk])
fid.close()
return data
def writeh5k(filename, datasetname, dtarray):
fid=h5py.File(filename,'w')
for kk in datasetname:
ds = fid.create_dataset(kk, dtarray[kk].shape, compression="gzip", dtype=dtarray[kk].dtype)
ds[:] = dtarray[kk]
fid.close()
def resizeh5(path_in, path_out, dataset, ratio=(0.5,0.5), interp=2, offset=[0,0,0]):
from scipy.ndimage.interpolation import zoom
# for half-res
im = h5py.File( path_in, 'r')[ dataset ][:]
shape = im.shape
if len(shape)==3:
im_out = np.zeros((shape[0]-2*offset[0], int(np.ceil(shape[1]*ratio[0])), int(np.ceil(shape[2]*ratio[1]))), dtype=im.dtype)
for i in xrange(shape[0]-2*offset[0]):
im_out[i,...] = zoom( im[i+offset[0],...], zoom=ratio, order=interp)
if offset[1]!=0:
im_out=im_out[:,offset[1]:-offset[1],offset[2]:-offset[2]]
elif len(shape)==4:
im_out = np.zeros((shape[0]-2*offset[0], shape[1], int(shape[2]*ratio[0]), int(shape[3]*ratio[1])), dtype=im.dtype)
for i in xrange(shape[0]-2*offset[0]):
for j in xrange(shape[1]):
im_out[i,j,...] = zoom( im[i+offset[0],j,...], ratio, order=interp)
if offset[1]!=0:
im_out=im_out[:,offset[1]:-offset[1],offset[2]:-offset[2],offset[3]:-offset[3]]
if path_out is None:
return im_out
writeh5(path_out, dataset, im_out)
def writetxt(filename, dtarray):
a = open(filename,'w')
a.write(dtarray)
a.close()
# 2. segmentation wrapper
def segToAffinity(seg):
from ..lib import malis_core as malisL
nhood = malisL.mknhood3d()
return malisL.seg_to_affgraph(seg,nhood)
def bwlabel(mat):
ran = [int(mat.min()),int(mat.max())];
out = np.zeros(ran[1]-ran[0]+1);
for i in range(ran[0],ran[1]+1):
out[i] = np.count_nonzero(mat==i)
return out
def genSegMalis(gg3,iter_num): # given input seg map, widen the seg border
from scipy.ndimage import morphology as skmorph
#from skimage import morphology as skmorph
gg3_dz = np.zeros(gg3.shape).astype(np.uint32)
gg3_dz[1:,:,:] = (np.diff(gg3,axis=0))
gg3_dy = np.zeros(gg3.shape).astype(np.uint32)
gg3_dy[:,1:,:] = (np.diff(gg3,axis=1))
gg3_dx = np.zeros(gg3.shape).astype(np.uint32)
gg3_dx[:,:,1:] = (np.diff(gg3,axis=2))
gg3g = ((gg3_dx+gg3_dy)>0)
#stel=np.array([[1, 1],[1,1]]).astype(bool)
#stel=np.array([[0, 1, 0],[1,1,1], [0,1,0]]).astype(bool)
stel=np.array([[1, 1, 1],[1,1,1], [1,1,1]]).astype(bool)
#stel=np.array([[1,1,1,1],[1, 1, 1, 1],[1,1,1,1],[1,1,1,1]]).astype(bool)
gg3gd=np.zeros(gg3g.shape)
for i in range(gg3g.shape[0]):
gg3gd[i,:,:]=skmorph.binary_dilation(gg3g[i,:,:],structure=stel,iterations=iter_num)
out = gg3.copy()
out[gg3gd==1]=0
#out[0,:,:]=0 # for malis
return out
# 3. evaluation
"""
def runBash(cmd):
fn = '/tmp/tmp_'+str(random.random())[2:]+'.sh'
print('tmp bash file:',fn)
writetxt(fn, cmd)
os.chmod(fn, 0755)
out = check_output([fn])
os.remove(fn)
print(out)
"""
| nilq/baby-python | python |
# シェルソート
n = int(input())
lst = [int(input()) for _ in range(n)]
def insertionSort(A, n, g):
global cnt
for i in range(g, n):
v = A[i]
j = i - g
while j >= 0 and A[j] > v:
A[j+g] = A[j]
j = j - g
cnt += 1
A[j+g] = v
def shellSort(A, n):
g = []
h = 1
while h <= len(A):
g.append(h)
h = 3*h + 1
g.reverse()
m = len(g)
print(m)
print(' '.join(map(str, g)))
for i in range(m):
insertionSort(A, n, g[i])
cnt = 0
shellSort(lst, n)
print(cnt)
print(*lst, sep="\n")
| nilq/baby-python | python |
import json
from flask import request, make_response, jsonify, session as loginSession
from sqlalchemy.exc import IntegrityError
from sqlalchemy.orm.exc import NoResultFound
from config.flask_config import app
from model.entities import Category
from model.repository import CategoryRepo
from exception.exception_helper import InvalidUsage
@app.route('/api/category', methods=['POST'])
def addCategoryJSON():
content = request.json
try:
if not content["title"]:
raise InvalidUsage("Title is a required field")
category = Category(title=content["title"],
userId=loginSession.get('user_id'))
CategoryRepo.createOrUpdate(category)
return jsonify(category.serialize)
except IntegrityError:
raise InvalidUsage("There is another category with title '%s'." % content["title"])
@app.route('/api/category/<int:categoryId>')
def getCategoryJSON(categoryId):
try:
result = CategoryRepo.findByIdWithItems(categoryId)
return jsonify(result.serialize)
except NoResultFound:
raise InvalidUsage("No category found")
@app.route('/api/category/<int:categoryId>', methods=['PUT'])
def updateCategoryJSON(categoryId):
content = request.json
try:
if not content["title"]:
raise InvalidUsage("Title is a required field")
category = CategoryRepo.findById(categoryId)
category.title = content["title"]
CategoryRepo.createOrUpdate(category)
return jsonify(category.serialize)
except NoResultFound:
raise InvalidUsage("Category %s not found." % categoryId)
except IntegrityError:
raise InvalidUsage("There is another category with title '%s'." % content["title"])
@app.route('/api/category/<int:categoryId>', methods=['DELETE'])
def removeCategoryJSON(categoryId):
try:
category = CategoryRepo.findById(categoryId)
CategoryRepo.delete(category)
return make_response()
except NoResultFound:
raise InvalidUsage("Category %s not found" % categoryId)
@app.route('/api/categories')
def getAllCategoriesJSON():
categories = [row.serialize for row in CategoryRepo.findAll()]
response = make_response(json.dumps(categories), 200)
response.headers["Content-Type"] = "application/json"
return response
| nilq/baby-python | python |
s = input()
index_A = float('inf')
index_Z = float('inf')
for i in range(len(s)):
if s[i] == 'A':
index_A = i
break
for i in range(len(s) - 1, -1, -1):
if s[i] == 'Z':
index_Z = i
break
print(len(s[index_A:index_Z + 1]))
| nilq/baby-python | python |
def evaluate_policy(env, model, render, turns = 3):
scores = 0
for j in range(turns):
s, done, ep_r, steps = env.reset(), False, 0, 0
while not done:
# Take deterministic actions at test time
a = model.select_action(s, deterministic=True)
s_prime, r, done, info = env.step(a)
ep_r += r
steps += 1
s = s_prime
if render:
env.render()
scores += ep_r
return int(scores/turns)
#You can just ignore this funciton. Is not related to the RL.
def str2bool(v):
'''transfer str to bool for argparse'''
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'True','true','TRUE', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'False','false','FALSE', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.') | nilq/baby-python | python |
import os,sys
import re
import sympy
import math
import cmath
from math import factorial as fact
from sympy import factorial as symb_fact
from sympy import factorial2 as symb_fact2
from scipy.special import binom as binomial
from sympy import exp as symb_exp
from sympy import I as symb_I
def generate_cartesian_ls( L ):
l = []
for i in range(L+1):
lx = L - i
for j in range(i+1):
ly = i - j
lz = L - lx - ly
l.append( [0, 0, 0] )
for k in range( lx - 1 ):
l[-1][0] = l[-1][0] + 1
for k in range( ly - 1 ):
l[-1][1] = l[-1][1] + 1
for k in range( lz - 1 ):
l[-1][2] = l[-1][2] + 1
if lx > 0:
l[-1][0] = l[-1][0] + 1
if ly > 0:
l[-1][1] = l[-1][1] + 1
if lz > 0:
l[-1][2] = l[-1][2] + 1
return l
def generate_spherical_coeff( l, m, lx, ly, lz ):
j = (lx + ly - abs(m))
if j%2 == 0:
j = int(j / 2)
else:
return 0.0
prefactor = fact(2.*lx) * fact(2.*ly) * fact(2.*lz) * fact(l)
prefactor = prefactor * fact( l - abs(m) )
prefactor = prefactor / (fact(2.*l) * fact(lx) * fact(ly) * fact(lz))
prefactor = prefactor / fact( l + abs(m) )
prefactor = math.sqrt( prefactor )
term1 = 0.0
for i in range( int((l - abs(m))/2)+1 ):
term1 = term1 + binomial(l,i) * binomial(i,j) * \
math.pow(-1,i) * fact( 2*l - 2*i ) / \
fact( l - abs(m) - 2*i )
term1 = term1 / math.pow(2,l) / fact(l)
m_fact = 1.
if m < 0:
m_fact = -1.
term2 = 0.0 + 0.0j
for k in range( j+1 ):
z = cmath.exp( m_fact * math.pi / 2. * (abs(m) - lx + 2*k) * 1.j )
term2 = term2 + binomial(j,k) * binomial( abs(m), lx - 2*k ) * z
val = prefactor * term1 * term2
if abs(val.real) < 1e-10:
val = 0.0 + val.imag*1j
if abs(val.imag) < 1e-10:
val = val.real
return val
def generate_spherical_coeff_symb( l, m, lx, ly, lz, unnorm = False ):
j = (lx + ly - abs(m))
if j%2 == 0:
j = int(j / 2)
else:
return sympy.Integer(0)
j_symb = sympy.Integer(j)
l_symb = sympy.Integer(l)
m_symb = sympy.Integer( abs(m) )
lx_symb = sympy.Integer(lx)
ly_symb = sympy.Integer(ly)
lz_symb = sympy.Integer(lz)
prefactor = symb_fact(2*lx_symb) * symb_fact(2*ly_symb) * symb_fact(2*lz_symb) * symb_fact(l_symb)
prefactor = prefactor * symb_fact( l_symb - m_symb )
prefactor = prefactor / (symb_fact(2*l_symb) * symb_fact(lx_symb) * symb_fact(ly_symb) * symb_fact(lz_symb))
prefactor = prefactor / symb_fact( l_symb + m_symb )
# Ed's stupid normalization convention...
if unnorm:
prefactor = prefactor * symb_fact2( 2*l - 1 ) / symb_fact2( 2*lx - 1 ) / symb_fact2(2*ly - 1) / symb_fact2( 2*lz - 1 )
prefactor = sympy.sqrt( prefactor )
term1 = sympy.Integer(0)
for i in range( int((l - abs(m))/2)+1 ):
term1 = term1 + sympy.Integer(binomial(l,i)) * sympy.Integer(binomial(i,j)) * \
sympy.Integer(math.pow(-1,i)) * symb_fact( 2*l_symb - sympy.Integer(2*i) ) / \
symb_fact( l_symb - m_symb - sympy.Integer(2*i) )
term1 = term1 / (2**l_symb) / symb_fact(l)
m_fact_symb = sympy.Integer(1)
if m < 0:
m_fact_symb = - m_fact_symb
term2 = sympy.Integer(0)
for k in range( j+1 ):
z = sympy.exp( m_fact_symb * sympy.pi / 2 * (m_symb - lx_symb + sympy.Integer(2*k)) * symb_I )
term2 = term2 + sympy.Integer(binomial(j,k)) * sympy.Integer(binomial( abs(m), lx - 2*k )) * z
return prefactor * term1 * term2
def generate_cartesian_angular( ls ):
[x,y,z,r] = sympy.symbols('x y z r', real=True)
ang = []
for l in ls:
ang.append(r)
for i in range( l[0] ): ang[-1] = ang[-1] * x
for i in range( l[1] ): ang[-1] = ang[-1] * y
for i in range( l[2] ): ang[-1] = ang[-1] * z
ang[-1] = ang[-1] / r
return ang
def generate_spherical_angular( L, unnorm = False ):
ls = generate_cartesian_ls( L )
angs = generate_cartesian_angular( ls )
#r = sympy.symbols( 'r' )
sph_angs = []
for m in range( L + 1 ):
tmp_p = 0
tmp_m = 0
for i in range(len(ls)):
l = ls[i]
ang = angs[i]
#c = generate_spherical_coeff( L, m, l[0],l[1],l[2] )
c = generate_spherical_coeff_symb( L, m, l[0],l[1],l[2], unnorm )
if m == 0:
tmp_p = tmp_p + c * ang
else:
c_p = ( c + sympy.conjugate(c) ) / sympy.sqrt(2)
c_m = ( c - sympy.conjugate(c) ) / sympy.sqrt(2) / symb_I
tmp_p = tmp_p + c_p * ang
tmp_m = tmp_m + c_m * ang
sph_angs.append( (m, tmp_p) )
if m > 0:
sph_angs.append( (-m, tmp_m) )
sph_angs = sorted( sph_angs, key=lambda x: x[0] )
sph_angs_bare = []
for a in sph_angs:
sph_angs_bare.append( sympy.simplify(a[1]) )
return sph_angs_bare
def generate_eval_lines( L, ang ):
[x,y,z,r] = sympy.symbols('x y z r', real=True)
[bf,bf_x,bf_y,bf_z] = sympy.symbols('bf bf_x bf_y bf_z',real=True)
bf_eval_strs = []
bf_x_eval_strs = []
bf_y_eval_strs = []
bf_z_eval_strs = []
for j in range(len(ang)):
a = ang[j]
a_x = sympy.diff( a, x )
a_y = sympy.diff( a, y )
a_z = sympy.diff( a, z )
bf_eval = sympy.simplify( a * bf )
bf_x_eval = sympy.simplify( a_x * bf + a * bf_x )
bf_y_eval = sympy.simplify( a_y * bf + a * bf_y )
bf_z_eval = sympy.simplify( a_z * bf + a * bf_z )
bf_eval_str = 'eval[{}] = {};'.format(j,bf_eval)
bf_x_eval_str = 'eval_x[{}] = {};'.format(j,bf_x_eval)
bf_y_eval_str = 'eval_y[{}] = {};'.format(j,bf_y_eval)
bf_z_eval_str = 'eval_z[{}] = {};'.format(j,bf_z_eval)
if L >= 2:
for k in range(2,L+1):
for X in ('x','y','z'):
pow_str = X + '**' + str(k)
repl_str = ''
for K in range(k-1): repl_str = repl_str + X + '*'
repl_str = repl_str + X
bf_eval_str = bf_eval_str.replace(pow_str,repl_str)
bf_x_eval_str = bf_x_eval_str.replace(pow_str,repl_str)
bf_y_eval_str = bf_y_eval_str.replace(pow_str,repl_str)
bf_z_eval_str = bf_z_eval_str.replace(pow_str,repl_str)
bf_eval_strs .append(bf_eval_str )
bf_x_eval_strs.append(bf_x_eval_str)
bf_y_eval_strs.append(bf_y_eval_str)
bf_z_eval_strs.append(bf_z_eval_str)
return (bf_eval_strs, bf_x_eval_strs, bf_y_eval_strs, bf_z_eval_strs)
cart_header_fname = "gaueval_angular_cartesian.hpp"
sphr_header_fname = "gaueval_angular_spherical.hpp"
cons_header_fname = "gaueval_device_constants.hpp"
cart_header_file = open( cart_header_fname, 'w' )
sphr_header_file = open( sphr_header_fname, 'w' )
cons_header_file = open( cons_header_fname, 'w' )
L_max = 4
do_libint_norm = False
#do_libint_norm = True
preamble = """
#pragma once
#include "gaueval_device_constants.hpp"
#define GPGAUEVAL_INLINE __inline__
namespace GauXC {
"""
cart_header_file.write( preamble )
sphr_header_file.write( preamble )
cartesian_bf_template = """
GPGAUEVAL_INLINE __device__ void generate_cartesian_angular{}(
const double bf,
const double x,
const double y,
const double z,
double* eval
) {{
"""
cartesian_bf_deriv1_template = """
GPGAUEVAL_INLINE __device__ void generate_cartesian_angular{}_deriv1(
const double bf,
const double bf_x,
const double bf_y,
const double bf_z,
const double x,
const double y,
const double z,
double* eval_x,
double* eval_y,
double* eval_z
) {{
"""
spherical_bf_template = cartesian_bf_template.replace('cartesian','spherical')
spherical_bf_deriv1_template = cartesian_bf_deriv1_template.replace('cartesian','spherical')
constant_lines = []
for L in range( L_max + 1 ):
sph_ang = generate_spherical_angular(L, do_libint_norm)
car_ang = generate_cartesian_angular( generate_cartesian_ls(L) )
sph_bf_eval_strs, sph_bf_x_eval_strs, sph_bf_y_eval_strs, sph_bf_z_eval_strs = generate_eval_lines( L, sph_ang )
car_bf_eval_strs, car_bf_x_eval_strs, car_bf_y_eval_strs, car_bf_z_eval_strs = generate_eval_lines( L, car_ang )
cartesian_bf_prototype = cartesian_bf_template.format( "_" + str(L) )
spherical_bf_prototype = spherical_bf_template.format( "_" + str(L) )
cartesian_bf_deriv1_prototype = cartesian_bf_deriv1_template.format( "_" + str(L) )
spherical_bf_deriv1_prototype = spherical_bf_deriv1_template.format( "_" + str(L) )
spherical_bf_func = spherical_bf_prototype + "\n"
for s in sph_bf_eval_strs: spherical_bf_func = spherical_bf_func + " " + s + "\n"
spherical_bf_func = spherical_bf_func + "\n}\n"
spherical_bf_deriv1_func = spherical_bf_deriv1_prototype + "\n"
for s in sph_bf_x_eval_strs: spherical_bf_deriv1_func = spherical_bf_deriv1_func + " " + s + "\n"
spherical_bf_deriv1_func = spherical_bf_deriv1_func + "\n"
for s in sph_bf_y_eval_strs: spherical_bf_deriv1_func = spherical_bf_deriv1_func + " " + s + "\n"
spherical_bf_deriv1_func = spherical_bf_deriv1_func + "\n"
for s in sph_bf_z_eval_strs: spherical_bf_deriv1_func = spherical_bf_deriv1_func + " " + s + "\n"
spherical_bf_deriv1_func = spherical_bf_deriv1_func + "\n}\n"
cartesian_bf_func = cartesian_bf_prototype + "\n"
for s in car_bf_eval_strs: cartesian_bf_func = cartesian_bf_func + " " + s + "\n"
cartesian_bf_func = cartesian_bf_func + "\n}\n"
cartesian_bf_deriv1_func = cartesian_bf_deriv1_prototype + "\n"
for s in car_bf_x_eval_strs: cartesian_bf_deriv1_func = cartesian_bf_deriv1_func + " " + s + "\n"
cartesian_bf_deriv1_func = cartesian_bf_deriv1_func + "\n"
for s in car_bf_y_eval_strs: cartesian_bf_deriv1_func = cartesian_bf_deriv1_func + " " + s + "\n"
cartesian_bf_deriv1_func = cartesian_bf_deriv1_func + "\n"
for s in car_bf_z_eval_strs: cartesian_bf_deriv1_func = cartesian_bf_deriv1_func + " " + s + "\n"
cartesian_bf_deriv1_func = cartesian_bf_deriv1_func + "\n}\n"
sqrt_regex = "sqrt\([0-9]+\)"
sqrt_finds = re.findall( sqrt_regex, spherical_bf_func )
sqrt_finds = sqrt_finds + ( re.findall( sqrt_regex, spherical_bf_deriv1_func ) )
sqrt_finds = sqrt_finds + ( re.findall( sqrt_regex, cartesian_bf_func ) )
sqrt_finds = sqrt_finds + ( re.findall( sqrt_regex, cartesian_bf_deriv1_func ) )
sqrt_finds = list(set(sqrt_finds))
for x in sqrt_finds:
arg = x.strip('sqrt(').strip(')')
new_str = 'sqrt_' + arg
spherical_bf_func = spherical_bf_func.replace( x, new_str )
spherical_bf_deriv1_func = spherical_bf_deriv1_func.replace( x, new_str )
cartesian_bf_func = cartesian_bf_func.replace( x, new_str )
cartesian_bf_deriv1_func = cartesian_bf_deriv1_func.replace( x, new_str )
new_str = "constexpr double " + new_str + " = " + str( math.sqrt(int(arg)) ) + ";"
constant_lines.append( new_str )
cart_header_file.write( cartesian_bf_func )
cart_header_file.write( cartesian_bf_deriv1_func )
sphr_header_file.write( spherical_bf_func )
sphr_header_file.write( spherical_bf_deriv1_func )
# Generate calling routines
cartesian_bf_calling_func = cartesian_bf_template.format('')
spherical_bf_calling_func = spherical_bf_template.format('')
cartesian_bf_deriv1_calling_func = cartesian_bf_deriv1_template.format('')
spherical_bf_deriv1_calling_func = spherical_bf_deriv1_template.format('')
am_dispatch_template = "switch( shell.l ) {{\n"
am_dispatch_template_deriv1 = "switch( shell.l ) {{\n"
for L in range( L_max + 1 ):
bf_template = """
case {0}:
gaueval_{{0}}_angular_{0}(tmp, xc, yc, zc, bf_eval);
break;
""".format(L)
deriv1_template = """
case {0}:
gaueval_{{0}}_angular_{0}(tmp, xc, yc, zc, bf_eval);
gaueval_{{0}}_angular_{0}_deriv1(tmp, tmp_x, tmp_y, tmp_z, xc, yc, zc, bf_eval, bf_x_eval, bf_y_eval, bf_z_eval);
break;
""".format(L)
am_dispatch_template = am_dispatch_template + bf_template
am_dispatch_template_deriv1 = am_dispatch_template_deriv1 + deriv1_template
am_dispatch_template = am_dispatch_template + "}}\n"
am_dispatch_template_deriv1 = am_dispatch_template_deriv1 + "}}\n"
print(am_dispatch_template_deriv1.format('cartesian'))
print(am_dispatch_template_deriv1.format('spherical'))
footer = "} // namespace GauXC"
cart_header_file.write( footer )
sphr_header_file.write( footer )
constant_lines = list(set(constant_lines))
preamble = """
#pragma once
namespace GauXC {
"""
cons_header_file.write( preamble )
for s in constant_lines: cons_header_file.write( " " + s + "\n" )
cons_header_file.write(footer)
| nilq/baby-python | python |
#Embedded file name: cmstop_inj.py
import re
if 0:
i11iIiiIii
def assign(service, arg):
if service != '''cmstop''':
return
else:
return (True, arg)
if 0:
O0 / iIii1I11I1II1 % OoooooooOO - i1IIi
def audit(arg):
o0OO00 = arg + decode('\xc5Y\x05K\xc5\xa8\x80\xac\x13\xc3=\r\x93S\x11\xe7S\xbb\x93\x02\xa0j8i\xed3\xb8\xaeo\xc5\xb0\x81\xd3^\x1a\x0f\xcd\xbe\x9f\xbc@\xc8x\x13\x9a\x12\x0f\xeeS\xbb\x9c\x0f\xe6=')
oo, i1iII1IiiIiI1, iIiiiI1IiI1I1, o0OoOoOO00, I11i = curl.curl(o0OO00)
if oo == 200:
O0O = re.match(decode('\xfahF%\x8a\xe1\xb1\xeb\x0b\x89'), iIiiiI1IiI1I1.strip())
if O0O:
security_hole(o0OO00)
if 0:
i11ii11iIi11i.oOoO0oo0OOOo + IiiI / Iii1ii1II11i
if 0:
I1iII1iiII + I1Ii111 / OOo
if __name__ == '__main__':
from dummy import *
#KEY---a13b6776facce2ce24b9407fe76b7d9a2ac9f97fd11b4c03da49c5dc1bfdd4ed--- | nilq/baby-python | python |
# encoding: utf-8
"""
@author: liaoxingyu
@contact: sherlockliao01@gmail.com
"""
import torch
from torch import nn
from torch.nn import functional as F
from torch.nn.parameter import Parameter
from torch.nn.modules.module import Module
import numpy as np
import math
import random
from .backbones import *
from .losses.cosface import AddMarginProduct
from .utils import *
# Changed by Xinchen Liu
class Normalize(nn.Module):
def __init__(self, power=2):
super(Normalize, self).__init__()
self.power = power
def forward(self, x):
norm = x.pow(self.power).sum(1, keepdim=True).pow(1. / self.power)
out = x.div(norm)
return out
class GraphConvolution(Module):
"""
Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
"""
def __init__(self, in_features, out_features, adj_size=9, bias=True):
super(GraphConvolution, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.adj_size = adj_size
self.weight = Parameter(torch.FloatTensor(in_features, out_features))
if bias:
self.bias = Parameter(torch.FloatTensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
#self.bn = nn.BatchNorm2d(self.out_features)
self.bn = nn.BatchNorm1d(out_features * adj_size)
def reset_parameters(self):
stdv = 1. / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def forward(self, input, adj):
support = torch.matmul(input, self.weight)
output_ = torch.bmm(adj, support)
if self.bias is not None:
output_ = output_ + self.bias
output = output_.view(output_.size(0), output_.size(1)*output_.size(2))
output = self.bn(output)
output = output.view(output_.size(0), output_.size(1), output_.size(2))
return output
def __repr__(self):
return self.__class__.__name__ + ' (' \
+ str(self.in_features) + ' -> ' \
+ str(self.out_features) + ')'
class GCN(nn.Module):
def __init__(self, adj_size, nfeat, nhid, isMeanPooling = True):
super(GCN, self).__init__()
self.adj_size = adj_size
self.nhid = nhid
self.isMeanPooling = isMeanPooling
self.gc1 = GraphConvolution(nfeat, nhid ,adj_size)
self.gc2 = GraphConvolution(nhid, nhid, adj_size)
def forward(self, x, adj):
x_ = F.dropout(x, 0.5, training=self.training)
x_ = F.relu(self.gc1(x_, adj))
x_ = F.dropout(x_, 0.5, training=self.training)
x_ = F.relu(self.gc2(x_, adj))
x_mean = torch.mean(x_, 1) # aggregate features of nodes by mean pooling
x_cat = x_.view(x.size()[0], -1) # aggregate features of nodes by concatenation
x_mean = F.dropout(x_mean, 0.5, training=self.training)
x_cat = F.dropout(x_cat, 0.5, training=self.training)
return x_mean, x_cat
class Baseline_SelfGCN(nn.Module):
gap_planes = 2048
def __init__(self,
backbone,
num_classes,
num_parts,
last_stride,
with_ibn,
gcb,
stage_with_gcb,
pretrain=True,
model_path=''):
super().__init__()
try:
self.base = ResNet.from_name(backbone, last_stride, with_ibn, gcb, stage_with_gcb)
self.base_gcn = ResNet.from_name(backbone, last_stride, with_ibn, gcb, stage_with_gcb)
except:
print(f'not support {backbone} backbone')
if pretrain:
self.base.load_pretrain(model_path)
self.base_gcn.load_pretrain(model_path)
self.gcn = GCN(num_parts-1, self.gap_planes, self.gap_planes, isMeanPooling = True)
self.num_classes = num_classes
self.num_parts = num_parts # 1 for only foreground, 10 for masks of ten parts
# Global Branch
self.gap = nn.AdaptiveAvgPool2d(1)
# Global head
self.bottleneck = nn.BatchNorm1d(self.gap_planes)
self.bottleneck.bias.requires_grad_(False) # no shift
self.classifier = nn.Linear(self.gap_planes, self.num_classes, bias=False)
self.bottleneck.apply(weights_init_kaiming)
self.classifier.apply(weights_init_classifier)
# GCN head
self.bottleneck_gcn = nn.BatchNorm1d(self.gap_planes)
self.bottleneck_gcn.bias.requires_grad_(False) # no shift
self.classifier_gcn = nn.Linear(self.gap_planes, self.num_classes, bias=False)
self.bottleneck_gcn.apply(weights_init_kaiming)
self.classifier_gcn.apply(weights_init_classifier)
def forward(self, inputs_global, inputs_gcn, mask, adj):
# Global Branch
x_global = self.base(inputs_global)
feat_global = self.gap(x_global) # (b, 2048, 1, 1)
feat_global = feat_global.view(-1, feat_global.size()[1])
bnfeat_global = self.bottleneck(feat_global) # normalize for angular softmax
# Self-GCN Branch
x_gcn = self.base_gcn(inputs_gcn)
h, w = x_gcn.size(2), x_gcn.size(3)
mask_resize = F.interpolate(input=mask.float(), size=(h, w), mode='nearest')
# random part drop
x_self_list = list()
for i in range(x_gcn.size(0)): # randomly drop one part for each sample
mask_self = mask_resize[i]
part_list = []
for c in range(1, self.num_parts):
part = (mask_self.long() == c)
if part.any():
part_list.append(c)
drop_part = random.choice(part_list)
mask_self = (mask_self.long() != drop_part)
x_self = mask_self.float()*x_gcn[i]
x_self = x_self.unsqueeze(0)
x_self_list.append(x_self)
x_self = torch.cat(x_self_list, dim=0)
mask_list = list()
mask_list.append((mask_resize.long() > 0))
for c in range(1, self.num_parts):
mask_list.append((mask_resize.long() == c)) # split mask of each class
x_list = list()
x_self_list = list()
for c in range(self.num_parts):
x_list.append(mask_list[c].float() * x_gcn) # split feature map by mask of each class
x_self_list.append(mask_list[c].float() * x_self)
for c in range(1, self.num_parts):
x_list[c] = (x_list[c].sum(dim=2).sum(dim=2)) / \
(mask_list[c].squeeze(dim=1).sum(dim=1).sum(dim=1).float().unsqueeze(dim=1)+1e-8) # GAP feature of each part
x_list[c] = x_list[c].unsqueeze(1) # keep 2048
x_self_list[c] = (x_self_list[c].sum(dim=2).sum(dim=2)) / \
(mask_list[c].squeeze(dim=1).sum(dim=1).sum(dim=1).float().unsqueeze(dim=1)+1e-8) # GAP feature of each part
x_self_list[c] = x_self_list[c].unsqueeze(1) # keep 2048
mask_feat = torch.cat(x_list[1:], dim=1) # concat all parts to feat matrix b*part*feat
self_feat = torch.cat(x_self_list[1:], dim=1)
feat_gcn_mean, feat_gcn_cat = self.gcn(mask_feat, adj) # feat*9 to feat by gcn
feat_gcn = feat_gcn_mean.view(-1, feat_gcn_mean.size()[1])
feat_gcn_cat = feat_gcn_cat.view(-1, feat_gcn_cat.size()[1])
feat_self_mean, feat_self_cat = self.gcn(self_feat, adj) # feat*9 to feat by gcn
feat_self = feat_self_mean.view(-1, feat_self_mean.size()[1])
feat_self_cat = feat_self_cat.view(-1, feat_self_cat.size()[1])
bnfeat_gcn = self.bottleneck_gcn(feat_gcn)
bnfeat_self = self.bottleneck_gcn(feat_self)
if self.training:
cls_score = self.classifier(bnfeat_global)
cls_score_gcn = self.classifier_gcn(bnfeat_gcn)
cls_score_self = self.classifier_gcn(bnfeat_self)
return cls_score, feat_global, cls_score_gcn, bnfeat_gcn, cls_score_self, bnfeat_self, feat_gcn_cat, feat_self_cat
# return cls_score, feat_global, cls_score_gcn, feat_gcn, cls_score_self, feat_self, feat_gcn_cat, feat_self_cat
else:
cls_score = None
cls_score_gcn = None
cls_score_self = None
return cls_score, bnfeat_global, cls_score_gcn, bnfeat_gcn, cls_score_self, bnfeat_self, feat_gcn_cat, feat_self_cat
def load_params_wo_fc(self, state_dict):
state_dict.pop('classifier.weight')
state_dict.pop('classifier_gcn.weight')
res = self.load_state_dict(state_dict, strict=False)
print("Loading Pretrained Model ... Missing Keys: ", res.missing_keys)
def load_params_w_fc(self, state_dict):
res = self.load_state_dict(state_dict, strict=False)
print("Loading Pretrained Model ... Missing Keys: ", res.missing_keys)
| nilq/baby-python | python |
# Confidential, Copyright 2020, Sony Corporation of America, All rights reserved.
from .person_routines import DefaultPersonRoutineAssignment
from ..environment import Home, GroceryStore, Office, School, Hospital, RetailStore, HairSalon, Restaurant, Bar, \
PandemicSimConfig, LocationConfig
__all__ = ['town_config', 'small_town_config', 'test_config',
'tiny_town_config', 'medium_town_config',
'above_medium_town_config']
"""
A few references for the numbers selected:
http://www.worldcitiescultureforum.com/data/number-of-restaurants-per-100.000-population (Austin)
"""
town_config = PandemicSimConfig(
num_persons=10000,
location_configs=[
LocationConfig(Home, num=3000),
LocationConfig(GroceryStore, num=40, num_assignees=5, state_opts=dict(visitor_capacity=30)),
LocationConfig(Office, num=50, num_assignees=150, state_opts=dict(visitor_capacity=0)),
LocationConfig(School, num=100, num_assignees=4, state_opts=dict(visitor_capacity=30)),
LocationConfig(Hospital, num=10, num_assignees=30, state_opts=dict(patient_capacity=10)),
LocationConfig(RetailStore, num=40, num_assignees=5, state_opts=dict(visitor_capacity=30)),
LocationConfig(HairSalon, num=40, num_assignees=3, state_opts=dict(visitor_capacity=5)),
LocationConfig(Restaurant, num=20, num_assignees=6, state_opts=dict(visitor_capacity=30)),
LocationConfig(Bar, num=20, num_assignees=5, state_opts=dict(visitor_capacity=30)),
],
person_routine_assignment=DefaultPersonRoutineAssignment())
above_medium_town_config = PandemicSimConfig(
num_persons=4000,
location_configs=[
LocationConfig(Home, num=1200),
LocationConfig(GroceryStore, num=16, num_assignees=5, state_opts=dict(visitor_capacity=30)),
LocationConfig(Office, num=20, num_assignees=150, state_opts=dict(visitor_capacity=0)),
LocationConfig(School, num=40, num_assignees=4, state_opts=dict(visitor_capacity=30)),
LocationConfig(Hospital, num=4, num_assignees=30, state_opts=dict(patient_capacity=10)),
LocationConfig(RetailStore, num=16, num_assignees=5, state_opts=dict(visitor_capacity=30)),
LocationConfig(HairSalon, num=16, num_assignees=3, state_opts=dict(visitor_capacity=5)),
LocationConfig(Restaurant, num=8, num_assignees=6, state_opts=dict(visitor_capacity=30)),
LocationConfig(Bar, num=8, num_assignees=4, state_opts=dict(visitor_capacity=30))
],
person_routine_assignment=DefaultPersonRoutineAssignment())
medium_town_config = PandemicSimConfig(
num_persons=2000,
location_configs=[
LocationConfig(Home, num=600),
LocationConfig(GroceryStore, num=8, num_assignees=5, state_opts=dict(visitor_capacity=30)),
LocationConfig(Office, num=10, num_assignees=150, state_opts=dict(visitor_capacity=0)),
LocationConfig(School, num=20, num_assignees=4, state_opts=dict(visitor_capacity=30)),
LocationConfig(Hospital, num=2, num_assignees=30, state_opts=dict(patient_capacity=10)),
LocationConfig(RetailStore, num=8, num_assignees=5, state_opts=dict(visitor_capacity=30)),
LocationConfig(HairSalon, num=8, num_assignees=3, state_opts=dict(visitor_capacity=5)),
LocationConfig(Restaurant, num=4, num_assignees=6, state_opts=dict(visitor_capacity=30)),
LocationConfig(Bar, num=4, num_assignees=3, state_opts=dict(visitor_capacity=30))
],
person_routine_assignment=DefaultPersonRoutineAssignment())
small_town_config = PandemicSimConfig(
num_persons=1000,
location_configs=[
LocationConfig(Home, num=300),
LocationConfig(GroceryStore, num=4, num_assignees=5, state_opts=dict(visitor_capacity=30)),
LocationConfig(Office, num=5, num_assignees=150, state_opts=dict(visitor_capacity=0)),
LocationConfig(School, num=10, num_assignees=4, state_opts=dict(visitor_capacity=30)),
LocationConfig(Hospital, num=1, num_assignees=30, state_opts=dict(patient_capacity=10)),
LocationConfig(RetailStore, num=4, num_assignees=5, state_opts=dict(visitor_capacity=30)),
LocationConfig(HairSalon, num=4, num_assignees=3, state_opts=dict(visitor_capacity=5)),
LocationConfig(Restaurant, num=2, num_assignees=6, state_opts=dict(visitor_capacity=30)),
LocationConfig(Bar, num=2, num_assignees=5, state_opts=dict(visitor_capacity=30)),
],
person_routine_assignment=DefaultPersonRoutineAssignment())
tiny_town_config = PandemicSimConfig(
num_persons=500,
location_configs=[
LocationConfig(Home, num=150),
LocationConfig(GroceryStore, num=2, num_assignees=5, state_opts=dict(visitor_capacity=30)),
LocationConfig(Office, num=2, num_assignees=150, state_opts=dict(visitor_capacity=0)),
LocationConfig(School, num=10, num_assignees=2, state_opts=dict(visitor_capacity=30)),
LocationConfig(Hospital, num=1, num_assignees=15, state_opts=dict(patient_capacity=5)),
LocationConfig(RetailStore, num=2, num_assignees=5, state_opts=dict(visitor_capacity=30)),
LocationConfig(HairSalon, num=2, num_assignees=3, state_opts=dict(visitor_capacity=5)),
LocationConfig(Restaurant, num=1, num_assignees=6, state_opts=dict(visitor_capacity=30)),
LocationConfig(Bar, num=1, num_assignees=3, state_opts=dict(visitor_capacity=30))
],
person_routine_assignment=DefaultPersonRoutineAssignment())
test_config = PandemicSimConfig(
num_persons=100,
location_configs=[
LocationConfig(Home, num=30),
LocationConfig(GroceryStore, num=1, num_assignees=5, state_opts=dict(visitor_capacity=30)),
LocationConfig(Office, num=1, num_assignees=150, state_opts=dict(visitor_capacity=0)),
LocationConfig(School, num=10, num_assignees=2, state_opts=dict(visitor_capacity=30)),
LocationConfig(Hospital, num=1, num_assignees=30, state_opts=dict(patient_capacity=2)),
LocationConfig(Restaurant, num=1, num_assignees=3, state_opts=dict(visitor_capacity=10)),
LocationConfig(Bar, num=1, num_assignees=3, state_opts=dict(visitor_capacity=10)),
],
person_routine_assignment=DefaultPersonRoutineAssignment())
| nilq/baby-python | python |
from django.urls import path, include
from .views import registration_view, login_view, logout_view
urlpatterns = [
path("registration/", registration_view, name="registration_view"),
path("login/", login_view),
path("logout/", logout_view)
] | nilq/baby-python | python |
from holdings.position import Position
from collections import OrderedDict
from holdings.transaction import Transaction
class PositionHandler:
"""
Helper class to handle position operations in a Portfolio object.
"""
def __init__(self):
self.positions = OrderedDict()
def transact_position(self,
trans: Transaction) -> None:
"""
Execute transaction and update position.
:param trans: Transaction.
:return: None.
"""
security = trans.name
if security in self.positions:
self.positions[security].transact(trans)
else:
position = Position()
position.transact(trans)
self.positions[security] = position
def total_market_value(self) -> float:
"""
Calculate total market value for all positions.
:return: Market value.
"""
return sum(pos.market_value for asset, pos in self.positions.items())
def total_unrealized_pnl(self) -> float:
"""
Calculate total unrealized PnL for all positions.
:return: Unrealized PnL.
"""
return sum(pos.unrealized_pnl for asset, pos in self.positions.items())
def total_realized_pnl(self) -> float:
"""
Calculate total realized PnL for all positions.
:return: Realized PnL.
"""
return sum(pos.realized_pnl for asset, pos in self.positions.items())
def total_pnl(self) -> float:
"""
Calculate total PnL for all positions.
:return: PnL.
"""
return sum(pos.total_pnl for asset, pos in self.positions.items())
def total_commission(self) -> float:
"""
Calculate total commission for all positions.
:return: Total commission.
"""
return sum(pos.total_commission for asset, pos in self.positions.items())
| nilq/baby-python | python |
# Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
from concurrent import futures
import contextlib
import distutils.spawn
import errno
import importlib
import os
import os.path
import pkgutil
import shutil
import subprocess
import sys
import tempfile
import threading
import unittest
import platform
import grpc
from grpc_tools import protoc
from tests.unit.framework.common import test_constants
_MESSAGES_IMPORT = b'import "messages.proto";'
_SPLIT_NAMESPACE = b'package grpc_protoc_plugin.invocation_testing.split;'
_COMMON_NAMESPACE = b'package grpc_protoc_plugin.invocation_testing;'
@contextlib.contextmanager
def _system_path(path):
old_system_path = sys.path[:]
sys.path = sys.path[0:1] + path + sys.path[1:]
yield
sys.path = old_system_path
class DummySplitServicer(object):
def __init__(self, request_class, response_class):
self.request_class = request_class
self.response_class = response_class
def Call(self, request, context):
return self.response_class()
class SeparateTestMixin(object):
def testImportAttributes(self):
with _system_path([self.python_out_directory]):
pb2 = importlib.import_module(self.pb2_import)
pb2.Request
pb2.Response
if self.should_find_services_in_pb2:
pb2.TestServiceServicer
else:
with self.assertRaises(AttributeError):
pb2.TestServiceServicer
with _system_path([self.grpc_python_out_directory]):
pb2_grpc = importlib.import_module(self.pb2_grpc_import)
pb2_grpc.TestServiceServicer
with self.assertRaises(AttributeError):
pb2_grpc.Request
with self.assertRaises(AttributeError):
pb2_grpc.Response
def testCall(self):
with _system_path([self.python_out_directory]):
pb2 = importlib.import_module(self.pb2_import)
with _system_path([self.grpc_python_out_directory]):
pb2_grpc = importlib.import_module(self.pb2_grpc_import)
server = grpc.server(
futures.ThreadPoolExecutor(max_workers=test_constants.POOL_SIZE))
pb2_grpc.add_TestServiceServicer_to_server(
DummySplitServicer(pb2.Request, pb2.Response), server)
port = server.add_insecure_port('[::]:0')
server.start()
channel = grpc.insecure_channel('localhost:{}'.format(port))
stub = pb2_grpc.TestServiceStub(channel)
request = pb2.Request()
expected_response = pb2.Response()
response = stub.Call(request)
self.assertEqual(expected_response, response)
class CommonTestMixin(object):
def testImportAttributes(self):
with _system_path([self.python_out_directory]):
pb2 = importlib.import_module(self.pb2_import)
pb2.Request
pb2.Response
if self.should_find_services_in_pb2:
pb2.TestServiceServicer
else:
with self.assertRaises(AttributeError):
pb2.TestServiceServicer
with _system_path([self.grpc_python_out_directory]):
pb2_grpc = importlib.import_module(self.pb2_grpc_import)
pb2_grpc.TestServiceServicer
with self.assertRaises(AttributeError):
pb2_grpc.Request
with self.assertRaises(AttributeError):
pb2_grpc.Response
def testCall(self):
with _system_path([self.python_out_directory]):
pb2 = importlib.import_module(self.pb2_import)
with _system_path([self.grpc_python_out_directory]):
pb2_grpc = importlib.import_module(self.pb2_grpc_import)
server = grpc.server(
futures.ThreadPoolExecutor(max_workers=test_constants.POOL_SIZE))
pb2_grpc.add_TestServiceServicer_to_server(
DummySplitServicer(pb2.Request, pb2.Response), server)
port = server.add_insecure_port('[::]:0')
server.start()
channel = grpc.insecure_channel('localhost:{}'.format(port))
stub = pb2_grpc.TestServiceStub(channel)
request = pb2.Request()
expected_response = pb2.Response()
response = stub.Call(request)
self.assertEqual(expected_response, response)
@unittest.skipIf(platform.python_implementation() == "PyPy",
"Skip test if run with PyPy")
class SameSeparateTest(unittest.TestCase, SeparateTestMixin):
def setUp(self):
same_proto_contents = pkgutil.get_data(
'tests.protoc_plugin.protos.invocation_testing', 'same.proto')
self.directory = tempfile.mkdtemp(suffix='same_separate', dir='.')
self.proto_directory = os.path.join(self.directory, 'proto_path')
self.python_out_directory = os.path.join(self.directory, 'python_out')
self.grpc_python_out_directory = os.path.join(self.directory,
'grpc_python_out')
os.makedirs(self.proto_directory)
os.makedirs(self.python_out_directory)
os.makedirs(self.grpc_python_out_directory)
same_proto_file = os.path.join(self.proto_directory,
'same_separate.proto')
open(same_proto_file, 'wb').write(
same_proto_contents.replace(
_COMMON_NAMESPACE,
b'package grpc_protoc_plugin.invocation_testing.same_separate;'))
protoc_result = protoc.main([
'',
'--proto_path={}'.format(self.proto_directory),
'--python_out={}'.format(self.python_out_directory),
'--grpc_python_out=grpc_2_0:{}'.format(
self.grpc_python_out_directory),
same_proto_file,
])
if protoc_result != 0:
raise Exception("unexpected protoc error")
open(os.path.join(self.grpc_python_out_directory, '__init__.py'),
'w').write('')
open(os.path.join(self.python_out_directory, '__init__.py'),
'w').write('')
self.pb2_import = 'same_separate_pb2'
self.pb2_grpc_import = 'same_separate_pb2_grpc'
self.should_find_services_in_pb2 = False
def tearDown(self):
shutil.rmtree(self.directory)
@unittest.skipIf(platform.python_implementation() == "PyPy",
"Skip test if run with PyPy")
class SameCommonTest(unittest.TestCase, CommonTestMixin):
def setUp(self):
same_proto_contents = pkgutil.get_data(
'tests.protoc_plugin.protos.invocation_testing', 'same.proto')
self.directory = tempfile.mkdtemp(suffix='same_common', dir='.')
self.proto_directory = os.path.join(self.directory, 'proto_path')
self.python_out_directory = os.path.join(self.directory, 'python_out')
self.grpc_python_out_directory = self.python_out_directory
os.makedirs(self.proto_directory)
os.makedirs(self.python_out_directory)
same_proto_file = os.path.join(self.proto_directory,
'same_common.proto')
open(same_proto_file, 'wb').write(
same_proto_contents.replace(
_COMMON_NAMESPACE,
b'package grpc_protoc_plugin.invocation_testing.same_common;'))
protoc_result = protoc.main([
'',
'--proto_path={}'.format(self.proto_directory),
'--python_out={}'.format(self.python_out_directory),
'--grpc_python_out={}'.format(self.grpc_python_out_directory),
same_proto_file,
])
if protoc_result != 0:
raise Exception("unexpected protoc error")
open(os.path.join(self.python_out_directory, '__init__.py'),
'w').write('')
self.pb2_import = 'same_common_pb2'
self.pb2_grpc_import = 'same_common_pb2_grpc'
self.should_find_services_in_pb2 = True
def tearDown(self):
shutil.rmtree(self.directory)
@unittest.skipIf(platform.python_implementation() == "PyPy",
"Skip test if run with PyPy")
class SplitCommonTest(unittest.TestCase, CommonTestMixin):
def setUp(self):
services_proto_contents = pkgutil.get_data(
'tests.protoc_plugin.protos.invocation_testing.split_services',
'services.proto')
messages_proto_contents = pkgutil.get_data(
'tests.protoc_plugin.protos.invocation_testing.split_messages',
'messages.proto')
self.directory = tempfile.mkdtemp(suffix='split_common', dir='.')
self.proto_directory = os.path.join(self.directory, 'proto_path')
self.python_out_directory = os.path.join(self.directory, 'python_out')
self.grpc_python_out_directory = self.python_out_directory
os.makedirs(self.proto_directory)
os.makedirs(self.python_out_directory)
services_proto_file = os.path.join(self.proto_directory,
'split_common_services.proto')
messages_proto_file = os.path.join(self.proto_directory,
'split_common_messages.proto')
open(services_proto_file, 'wb').write(
services_proto_contents.replace(
_MESSAGES_IMPORT, b'import "split_common_messages.proto";')
.replace(
_SPLIT_NAMESPACE,
b'package grpc_protoc_plugin.invocation_testing.split_common;'))
open(messages_proto_file, 'wb').write(
messages_proto_contents.replace(
_SPLIT_NAMESPACE,
b'package grpc_protoc_plugin.invocation_testing.split_common;'))
protoc_result = protoc.main([
'',
'--proto_path={}'.format(self.proto_directory),
'--python_out={}'.format(self.python_out_directory),
'--grpc_python_out={}'.format(self.grpc_python_out_directory),
services_proto_file,
messages_proto_file,
])
if protoc_result != 0:
raise Exception("unexpected protoc error")
open(os.path.join(self.python_out_directory, '__init__.py'),
'w').write('')
self.pb2_import = 'split_common_messages_pb2'
self.pb2_grpc_import = 'split_common_services_pb2_grpc'
self.should_find_services_in_pb2 = False
def tearDown(self):
shutil.rmtree(self.directory)
@unittest.skipIf(platform.python_implementation() == "PyPy",
"Skip test if run with PyPy")
class SplitSeparateTest(unittest.TestCase, SeparateTestMixin):
def setUp(self):
services_proto_contents = pkgutil.get_data(
'tests.protoc_plugin.protos.invocation_testing.split_services',
'services.proto')
messages_proto_contents = pkgutil.get_data(
'tests.protoc_plugin.protos.invocation_testing.split_messages',
'messages.proto')
self.directory = tempfile.mkdtemp(suffix='split_separate', dir='.')
self.proto_directory = os.path.join(self.directory, 'proto_path')
self.python_out_directory = os.path.join(self.directory, 'python_out')
self.grpc_python_out_directory = os.path.join(self.directory,
'grpc_python_out')
os.makedirs(self.proto_directory)
os.makedirs(self.python_out_directory)
os.makedirs(self.grpc_python_out_directory)
services_proto_file = os.path.join(self.proto_directory,
'split_separate_services.proto')
messages_proto_file = os.path.join(self.proto_directory,
'split_separate_messages.proto')
open(services_proto_file, 'wb').write(
services_proto_contents.replace(
_MESSAGES_IMPORT, b'import "split_separate_messages.proto";')
.replace(
_SPLIT_NAMESPACE,
b'package grpc_protoc_plugin.invocation_testing.split_separate;'
))
open(messages_proto_file, 'wb').write(
messages_proto_contents.replace(
_SPLIT_NAMESPACE,
b'package grpc_protoc_plugin.invocation_testing.split_separate;'
))
protoc_result = protoc.main([
'',
'--proto_path={}'.format(self.proto_directory),
'--python_out={}'.format(self.python_out_directory),
'--grpc_python_out=grpc_2_0:{}'.format(
self.grpc_python_out_directory),
services_proto_file,
messages_proto_file,
])
if protoc_result != 0:
raise Exception("unexpected protoc error")
open(os.path.join(self.python_out_directory, '__init__.py'),
'w').write('')
self.pb2_import = 'split_separate_messages_pb2'
self.pb2_grpc_import = 'split_separate_services_pb2_grpc'
self.should_find_services_in_pb2 = False
def tearDown(self):
shutil.rmtree(self.directory)
if __name__ == '__main__':
unittest.main(verbosity=2)
| nilq/baby-python | python |
''' Handling the data io '''
import argparse
import torch
import sys
def read_vocab_idx(vocab_path):
''' build vocab '''
word2idx = {"_PAD" : 0}
with open(vocab_path) as f:
for line in f:
tokens = line.strip("\n").split("\t")
no = int(tokens[1])
word2idx[tokens[0]] = no
print('[Info] Trimmed vocabulary size = {},'.format(len(word2idx)))
return word2idx
def read_ent_des(inst_file):
ent_des_dict = dict()
ent_des = list()
ent_des.append([0] * 20)
with open(inst_file) as f:
for step, line in enumerate(f):
tokens = line.strip().split()
ent_des_dict[tokens[0]] = step + 1
ent_des.append([int(token) for token in tokens[1:]][:20])
return ent_des, ent_des_dict
def read_ent_car(inst_file):
ent_wrd_dict = dict()
ent_wrd = list()
ent_wrd.append([0] * 10)
with open(inst_file) as f:
for step, line in enumerate(f):
tokens = line.strip().split()
ent_wrd_dict[tokens[0]] = step + 1
ent_wrd.append([int(token) for token in tokens[1:]][:10])
return ent_wrd, ent_wrd_dict
def main():
''' Main function '''
parser = argparse.ArgumentParser()
parser.add_argument('-ent_des', required=True)
parser.add_argument('-ent_car', required=True)
parser.add_argument('-save_data', required=True)
parser.add_argument('-wrd_vocab', required=True)
parser.add_argument('-ent_vocab', required=True)
parser.add_argument('-car_vocab', required=True)
opt = parser.parse_args()
wrd2idx = read_vocab_idx(opt.wrd_vocab)
ent2idx = read_vocab_idx(opt.ent_vocab)
car2idx = read_vocab_idx(opt.car_vocab)
ent_des, ent_des_dict = read_ent_des(opt.ent_des)
ent_wrd, ent_wrd_dict = read_ent_car(opt.ent_car)
data = {
'settings': opt,
'wrd2idx': wrd2idx,
'ent2idx': ent2idx,
'car2idx': car2idx,
'ent_des_dict' : ent_des_dict,
'ent_des' : ent_des,
'ent_wrd_dict': ent_wrd_dict,
'ent_wrd': ent_wrd}
print('[Info] Dumping the processed data to pickle file', opt.save_data)
torch.save(data, opt.save_data)
print('[Info] Finish.')
if __name__ == '__main__':
#reload(sys)
#sys.setdefaultencoding('utf-8')
main() | nilq/baby-python | python |
class Serie:
__slots__ = ("__weakref__", "_state", "_rooms", "id", "code", "name", "description", "difficulty", )
def __init__(self, state, data):
self._state = state
self._from_data(data)
def _from_data(self, data):
self.id = data.get("_id")
self.code = data.get("id")
self.name = data.get("name")
self.description = data.get("description")
self.difficulty = data.get("difficulty")
self._rooms = data.get("rooms")
self._sync(data)
def _sync(self, data):
self._badge = self._state.store_badge(data.get("badge"))
@property
def rooms(self):
return [self._state.get_room(room.get("code")) for room in self._rooms]
| nilq/baby-python | python |
import pathlib
#import numpy as np
test_data = 0
points = set()
folds = []
path = str(pathlib.Path(__file__).parent.resolve())
with open(path+"/data{}.csv".format("_test" if test_data else ""), 'r') as file:
for line in file.read().splitlines():
if line.startswith("fold"):
folds.append((line[11], int(line[13:])))
elif line:
points.add(tuple(map(int,line.split(","))))
# for p in points: print(p)
# for f in folds: print(f)
print("###### TASK 1 ######")
def tf(x, f):
return x if x <= f else 2*f - x
def fold(f):
for p in list(points):
np = (p[0] if f[0] == 'y' else tf(p[0],f[1]), p[1] if f[0] == 'x' else tf(p[1],f[1]))
if not p == np:
points.remove(p)
points.add(np)
fold(folds[0])
answer = len(points)
print("Answer: ", answer)
print("###### TASK 2 ######")
for f in folds[1:]: fold(f)
answer = len(points)
for p in points: print(p)
print("Answer: ", answer)
grid = []
for y in range(max(points, key=lambda p: p[1])[1]+1):
grid.append([ '#' if (x,y) in points else '.' for x in range(max(points, key=lambda p: p[0])[0]+1)])
for p in grid: print("".join(p)) | nilq/baby-python | python |
class Attr(object):
def __init__(self, name, type_):
self.name = name
self.type_ = type_
def __get__(self, instance, cls):
return instance.__dict__[self.name]
def __set__(self, instance, value):
if not isinstance(value, self.type_):
raise TypeError('expected an %s' % self.type_)
instance.__dict__[self.name] = value
def __delete__(self, instance):
raise AttributeError("can't delete this attr")
class Person(object):
name = Attr('name', str)
age = Attr('age', int)
height = Attr('height', float)
weight = Attr('weight', float)
s = Person()
s.name = 'Bob'
s.age = 17
s.height = 1.82
s.weight = 52.5
| nilq/baby-python | python |
from django.urls import path, include
from django.contrib import admin
from django.views.decorators.csrf import csrf_exempt
from rest_framework import routers, urlpatterns
from .views import *
urlpatterns = [
path("register/", Register.as_view(), name="register-user"),
]
| nilq/baby-python | python |
import re
import storage
import args
import requests
import concurrent.futures
from bs4 import BeautifulSoup
from urllib.parse import urlparse, urljoin
class NaiveCrawler:
def __init__(self, initial_url, allowed_domains, depth, database, init=True):
self.init = init
self.initial_url = initial_url
self.current_url = ""
self.allowed_domains = allowed_domains
self.depth = depth
self.links_to_visit = set()
self.visited_links = set()
self.db = database
self.recall_last_crawl()
self.display_status_on_init()
@staticmethod
def is_absolute(url):
return bool(urlparse(url).netloc)
def recall_last_crawl(self):
try:
prev_state = self.db.json_load()
if prev_state:
self.current_url = prev_state["current_url"]
self.visited_links = set(prev_state["visited_links"])
self.links_to_visit = set(prev_state["links_to_visit"])
self.initial_url = self.current_url
self.init = False
else:
pass
except Exception as ex:
return ex
def display_status_on_init(self):
print(f"\U0001F7E2\tCrawler starting at:\n{self.current_url}\n")
print(f"\U0001F645\tRestricted to crawl {len(self.allowed_domains)} domain(s):\n{self.allowed_domains} for depth: {self.depth}")
def is_valid(self, candidate):
if candidate in self.visited_links:
return False
if re.search('tel:', candidate)\
or re.search('mailto:', candidate)\
or re.search('#', candidate):
return False
# Fetch domain name (including potential subdomain)
current_domain_name = urlparse(candidate).netloc
# try:
# current_subdomain = current_domain_name.split('.')[0]
# except Exception:
# # No subdomain
# pass
# Validate if traversal is restricted
if current_domain_name not in self.allowed_domains:
return False
url_ojbect = urlparse(candidate)
return any([url_ojbect.scheme, url_ojbect.netloc, url_ojbect.path])
@staticmethod
def get_relative_path(href):
if href.startswith("/"):
return href[1:len(href)]
return href
def get_links(self):
try:
if self.init:
self.links_to_visit.add(self.initial_url)
self.init = False
# Pop out an arbitrary element from the set
self.current_link = self.links_to_visit.pop()
current_page = requests.get(self.current_link)
print(f"\n\U0001F577\U0001F578\tCrawler \U0001F440 at:\n{self.current_link}")
self.visited_links.add(self.current_link)
soup = BeautifulSoup(current_page.content, 'html.parser')
return soup.find_all('a')
except Exception:
print("\U0001F6AB Invalid URL.")
return False
def crawl(self):
links = self.get_links()
if links:
for i, link in enumerate(links):
if link is not None:
link_href = link.get('href')
if not self.is_absolute(link_href):
relative_path = self.get_relative_path(link_href)
parsed_linked_href = urlparse(link_href)
scheme = parsed_linked_href.scheme
current_domain_name = urlparse(self.current_link).netloc
if not scheme: scheme = 'http'
link_href = f"{scheme}://{current_domain_name}/{relative_path}"
if not self.is_valid(link_href):
continue
self.links_to_visit.add(link_href)
print(f"Links to visit: {len(self.links_to_visit)}")
def initium(self):
try:
if self.init:
threads = 1
else:
threads = min(32, len(self.links_to_visit)+1)
for i in range(self.depth):
# print(f'\n\U0001F577\U0001F578\tCrawler_{i}')
with concurrent.futures.ThreadPoolExecutor(max_workers=threads) as dominus:
dominus.submit(self.crawl())
print(f"\U0001F534\tCrawler stopped after crawling {len(self.visited_links)} link(s).")
print(f"\U0001F481\tFound {len(self.links_to_visit)} page(s) to crawl.\n")
# Save the state
self.salvare()
except Exception as ex:
print(f"The following error occured:\n{ex}")
return
def salvare(self):
state = {
"current_url": self.current_link,
"visited_links": list(self.visited_links),
"links_to_visit": list(self.links_to_visit)
}
self.db.json_save(state)
| nilq/baby-python | python |
from django.urls import path
from .views import DocumentAPIView, DocumentDetails
urlpatterns = [
path('document/', DocumentAPIView.as_view()),
path('document/<int:id>/', DocumentDetails.as_view()),
]
| nilq/baby-python | python |
# -*- coding: utf-8 -*-
import os
import sys
import topology_sdk.api.view.create_view_pb2
import topology_sdk.api.view.delete_view_pb2
import topology_sdk.api.view.fetch_cmdb_business_view_pb2
import topology_sdk.model.topology.view_pb2
import topology_sdk.api.view.fetch_origin_view_pb2
import topology_sdk.api.view.get_view_pb2
import topology_sdk.api.view.list_view_pb2
import topology_sdk.api.view.update_view_pb2
import topology_sdk.utils.http_util
import google.protobuf.json_format
class ViewClient(object):
def __init__(self, server_ip="", server_port=0, service_name="", host=""):
"""
初始化client
:param server_ip: 指定sdk请求的server_ip,为空时走名字服务路由
:param server_port: 指定sdk请求的server_port,与server_ip一起使用, 为空时走名字服务路由
:param service_name: 指定sdk请求的service_name, 为空时按契约名称路由。如果server_ip和service_name同时设置,server_ip优先级更高
:param host: 指定sdk请求服务的host名称, 如cmdb.easyops-only.com
"""
if server_ip == "" and server_port != 0 or server_ip != "" and server_port == 0:
raise Exception("server_ip和server_port必须同时指定")
self._server_ip = server_ip
self._server_port = server_port
self._service_name = service_name
self._host = host
def create_view(self, request, org, user, timeout=10):
# type: (topology_sdk.api.view.create_view_pb2.CreateViewRequest, int, str, int) -> topology_sdk.api.view.create_view_pb2.CreateViewResponse
"""
创建视图
:param request: create_view请求
:param org: 客户的org编号,为数字
:param user: 调用api使用的用户名
:param timeout: 调用超时时间,单位秒
:return: topology_sdk.api.view.create_view_pb2.CreateViewResponse
"""
headers = {"org": org, "user": user}
route_name = ""
server_ip = self._server_ip
if self._service_name != "":
route_name = self._service_name
elif self._server_ip != "":
route_name = "easyops.api.topology.view.CreateView"
uri = "/api/v1/view"
requestParam = request
rsp_obj = topology_sdk.utils.http_util.do_api_request(
method="POST",
src_name="logic.topology_sdk",
dst_name=route_name,
server_ip=server_ip,
server_port=self._server_port,
host=self._host,
uri=uri,
params=google.protobuf.json_format.MessageToDict(
requestParam, preserving_proto_field_name=True),
headers=headers,
timeout=timeout,
)
rsp = topology_sdk.api.view.create_view_pb2.CreateViewResponse()
google.protobuf.json_format.ParseDict(rsp_obj["data"], rsp, ignore_unknown_fields=True)
return rsp
def delete_view(self, request, org, user, timeout=10):
# type: (topology_sdk.api.view.delete_view_pb2.DeleteViewRequest, int, str, int) -> topology_sdk.api.view.delete_view_pb2.DeleteViewResponse
"""
删除视图
:param request: delete_view请求
:param org: 客户的org编号,为数字
:param user: 调用api使用的用户名
:param timeout: 调用超时时间,单位秒
:return: topology_sdk.api.view.delete_view_pb2.DeleteViewResponse
"""
headers = {"org": org, "user": user}
route_name = ""
server_ip = self._server_ip
if self._service_name != "":
route_name = self._service_name
elif self._server_ip != "":
route_name = "easyops.api.topology.view.DeleteView"
uri = "/api/v1/view/{id}".format(
id=request.id,
)
requestParam = request
rsp_obj = topology_sdk.utils.http_util.do_api_request(
method="DELETE",
src_name="logic.topology_sdk",
dst_name=route_name,
server_ip=server_ip,
server_port=self._server_port,
host=self._host,
uri=uri,
params=google.protobuf.json_format.MessageToDict(
requestParam, preserving_proto_field_name=True),
headers=headers,
timeout=timeout,
)
rsp = topology_sdk.api.view.delete_view_pb2.DeleteViewResponse()
google.protobuf.json_format.ParseDict(rsp_obj["data"], rsp, ignore_unknown_fields=True)
return rsp
def fetch_cmdb_business_view(self, request, org, user, timeout=10):
# type: (topology_sdk.api.view.fetch_cmdb_business_view_pb2.FetchCmdbBusinessViewRequest, int, str, int) -> topology_sdk.model.topology.view_pb2.View
"""
基于业务源点的特殊拓扑数据
:param request: fetch_cmdb_business_view请求
:param org: 客户的org编号,为数字
:param user: 调用api使用的用户名
:param timeout: 调用超时时间,单位秒
:return: topology_sdk.model.topology.view_pb2.View
"""
headers = {"org": org, "user": user}
route_name = ""
server_ip = self._server_ip
if self._service_name != "":
route_name = self._service_name
elif self._server_ip != "":
route_name = "easyops.api.topology.view.FetchCmdbBusinessView"
uri = "/api/v1/view/cmdb/business"
requestParam = request
rsp_obj = topology_sdk.utils.http_util.do_api_request(
method="POST",
src_name="logic.topology_sdk",
dst_name=route_name,
server_ip=server_ip,
server_port=self._server_port,
host=self._host,
uri=uri,
params=google.protobuf.json_format.MessageToDict(
requestParam, preserving_proto_field_name=True),
headers=headers,
timeout=timeout,
)
rsp = topology_sdk.model.topology.view_pb2.View()
google.protobuf.json_format.ParseDict(rsp_obj["data"], rsp, ignore_unknown_fields=True)
return rsp
def fetch_origin_view(self, request, org, user, timeout=10):
# type: (topology_sdk.api.view.fetch_origin_view_pb2.FetchOriginViewRequest, int, str, int) -> topology_sdk.model.topology.view_pb2.View
"""
基于源点的拓扑数据
:param request: fetch_origin_view请求
:param org: 客户的org编号,为数字
:param user: 调用api使用的用户名
:param timeout: 调用超时时间,单位秒
:return: topology_sdk.model.topology.view_pb2.View
"""
headers = {"org": org, "user": user}
route_name = ""
server_ip = self._server_ip
if self._service_name != "":
route_name = self._service_name
elif self._server_ip != "":
route_name = "easyops.api.topology.view.FetchOriginView"
uri = "/api/v1/view/origin"
requestParam = request
rsp_obj = topology_sdk.utils.http_util.do_api_request(
method="POST",
src_name="logic.topology_sdk",
dst_name=route_name,
server_ip=server_ip,
server_port=self._server_port,
host=self._host,
uri=uri,
params=google.protobuf.json_format.MessageToDict(
requestParam, preserving_proto_field_name=True),
headers=headers,
timeout=timeout,
)
rsp = topology_sdk.model.topology.view_pb2.View()
google.protobuf.json_format.ParseDict(rsp_obj["data"], rsp, ignore_unknown_fields=True)
return rsp
def get_view(self, request, org, user, timeout=10):
# type: (topology_sdk.api.view.get_view_pb2.GetViewRequest, int, str, int) -> topology_sdk.model.topology.view_pb2.View
"""
获取视图详细数据
:param request: get_view请求
:param org: 客户的org编号,为数字
:param user: 调用api使用的用户名
:param timeout: 调用超时时间,单位秒
:return: topology_sdk.model.topology.view_pb2.View
"""
headers = {"org": org, "user": user}
route_name = ""
server_ip = self._server_ip
if self._service_name != "":
route_name = self._service_name
elif self._server_ip != "":
route_name = "easyops.api.topology.view.GetView"
uri = "/api/v1/view/{id}".format(
id=request.id,
)
requestParam = request
rsp_obj = topology_sdk.utils.http_util.do_api_request(
method="GET",
src_name="logic.topology_sdk",
dst_name=route_name,
server_ip=server_ip,
server_port=self._server_port,
host=self._host,
uri=uri,
params=google.protobuf.json_format.MessageToDict(
requestParam, preserving_proto_field_name=True),
headers=headers,
timeout=timeout,
)
rsp = topology_sdk.model.topology.view_pb2.View()
google.protobuf.json_format.ParseDict(rsp_obj["data"], rsp, ignore_unknown_fields=True)
return rsp
def list_view(self, request, org, user, timeout=10):
# type: (topology_sdk.api.view.list_view_pb2.ListViewRequest, int, str, int) -> topology_sdk.api.view.list_view_pb2.ListViewResponse
"""
获取视图列表
:param request: list_view请求
:param org: 客户的org编号,为数字
:param user: 调用api使用的用户名
:param timeout: 调用超时时间,单位秒
:return: topology_sdk.api.view.list_view_pb2.ListViewResponse
"""
headers = {"org": org, "user": user}
route_name = ""
server_ip = self._server_ip
if self._service_name != "":
route_name = self._service_name
elif self._server_ip != "":
route_name = "easyops.api.topology.view.ListView"
uri = "/api/v1/view"
requestParam = request
rsp_obj = topology_sdk.utils.http_util.do_api_request(
method="GET",
src_name="logic.topology_sdk",
dst_name=route_name,
server_ip=server_ip,
server_port=self._server_port,
host=self._host,
uri=uri,
params=google.protobuf.json_format.MessageToDict(
requestParam, preserving_proto_field_name=True),
headers=headers,
timeout=timeout,
)
rsp = topology_sdk.api.view.list_view_pb2.ListViewResponse()
google.protobuf.json_format.ParseDict(rsp_obj["data"], rsp, ignore_unknown_fields=True)
return rsp
def update_view(self, request, org, user, timeout=10):
# type: (topology_sdk.api.view.update_view_pb2.UpdateViewRequest, int, str, int) -> topology_sdk.api.view.update_view_pb2.UpdateViewResponse
"""
更新视图
:param request: update_view请求
:param org: 客户的org编号,为数字
:param user: 调用api使用的用户名
:param timeout: 调用超时时间,单位秒
:return: topology_sdk.api.view.update_view_pb2.UpdateViewResponse
"""
headers = {"org": org, "user": user}
route_name = ""
server_ip = self._server_ip
if self._service_name != "":
route_name = self._service_name
elif self._server_ip != "":
route_name = "easyops.api.topology.view.UpdateView"
uri = "/api/v1/view/{id}".format(
id=request.id,
)
requestParam = request
rsp_obj = topology_sdk.utils.http_util.do_api_request(
method="PUT",
src_name="logic.topology_sdk",
dst_name=route_name,
server_ip=server_ip,
server_port=self._server_port,
host=self._host,
uri=uri,
params=google.protobuf.json_format.MessageToDict(
requestParam, preserving_proto_field_name=True),
headers=headers,
timeout=timeout,
)
rsp = topology_sdk.api.view.update_view_pb2.UpdateViewResponse()
google.protobuf.json_format.ParseDict(rsp_obj["data"], rsp, ignore_unknown_fields=True)
return rsp
| nilq/baby-python | python |
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from google.ads.google_ads.v1.proto.resources import mobile_device_constant_pb2 as google_dot_ads_dot_googleads__v1_dot_proto_dot_resources_dot_mobile__device__constant__pb2
from google.ads.google_ads.v1.proto.services import mobile_device_constant_service_pb2 as google_dot_ads_dot_googleads__v1_dot_proto_dot_services_dot_mobile__device__constant__service__pb2
class MobileDeviceConstantServiceStub(object):
"""Proto file describing the mobile device constant service.
Service to fetch mobile device constants.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetMobileDeviceConstant = channel.unary_unary(
'/google.ads.googleads.v1.services.MobileDeviceConstantService/GetMobileDeviceConstant',
request_serializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_services_dot_mobile__device__constant__service__pb2.GetMobileDeviceConstantRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_resources_dot_mobile__device__constant__pb2.MobileDeviceConstant.FromString,
)
class MobileDeviceConstantServiceServicer(object):
"""Proto file describing the mobile device constant service.
Service to fetch mobile device constants.
"""
def GetMobileDeviceConstant(self, request, context):
"""Returns the requested mobile device constant in full detail.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_MobileDeviceConstantServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetMobileDeviceConstant': grpc.unary_unary_rpc_method_handler(
servicer.GetMobileDeviceConstant,
request_deserializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_services_dot_mobile__device__constant__service__pb2.GetMobileDeviceConstantRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_resources_dot_mobile__device__constant__pb2.MobileDeviceConstant.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'google.ads.googleads.v1.services.MobileDeviceConstantService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
| nilq/baby-python | python |
# Author: Aqeel Anwar(ICSRL)
# Created: 10/14/2019, 12:50 PM
# Email: aqeel.anwar@gatech.edu
import numpy as np
import os, subprocess, psutil
import math
import random
import time
import airsim
import pygame
from configs.read_cfg import read_cfg
import matplotlib.pyplot as plt
def close_env(env_process):
process = psutil.Process(env_process.pid)
for proc in process.children(recursive=True):
proc.kill()
process.kill()
def save_network_path(cfg, algorithm_cfg):
# Save the network to the directory network_path
weights_type = 'Imagenet'
if algorithm_cfg.custom_load == True:
algorithm_cfg.network_path = 'models/trained/' + cfg.env_type + '/' + cfg.env_name + '/' + 'CustomLoad/' + algorithm_cfg.train_type + '/' + algorithm_cfg.train_type
else:
algorithm_cfg.network_path = 'models/trained/' + '/' + cfg.env_type + '/' + cfg.env_name + '/' + weights_type + '/' + algorithm_cfg.train_type + '/' + algorithm_cfg.train_type
if not os.path.exists(algorithm_cfg.network_path):
os.makedirs(algorithm_cfg.network_path)
return cfg, algorithm_cfg
def communicate_across_agents(agent, name_agent_list, algorithm_cfg):
name_agent = name_agent_list[0]
update_done = False
if algorithm_cfg.distributed_algo == 'GlobalLearningGlobalUpdate':
# No need to do anything
update_done = True
elif algorithm_cfg.distributed_algo == 'LocalLearningGlobalUpdate':
agent_on_same_network = name_agent_list
agent[name_agent].initialize_graphs_with_average(agent, agent_on_same_network)
elif algorithm_cfg.distributed_algo == 'LocalLearningLocalUpdate':
agent_connectivity_graph = []
for j in range(int(np.floor(len(name_agent_list) / algorithm_cfg.average_connectivity))):
div1 = random.sample(name_agent_list, algorithm_cfg.average_connectivity)
# print(div1)
agent_connectivity_graph.append(div1)
name_agent_list = list(set(name_agent_list) - set(div1))
if name_agent_list:
agent_connectivity_graph.append(name_agent_list)
for agent_network in agent_connectivity_graph:
agent_on_same_network = agent_network
agent[name_agent].initialize_graphs_with_average(agent, agent_on_same_network)
return update_done
def start_environment(env_name):
print_orderly('Environment', 80)
env_folder = os.path.dirname(os.path.abspath(__file__)) + "/unreal_envs/" + env_name + "/"
path = env_folder + env_name + ".exe"
# env_process = []
env_process = subprocess.Popen(path)
time.sleep(5)
print("Successfully loaded environment: " + env_name)
return env_process, env_folder
def initialize_infer(env_cfg, client, env_folder):
if not os.path.exists(env_folder+'results'):
os.makedirs(env_folder+'results')
# Mapping floor to 0 height
f_z = env_cfg.floor_z/100
c_z = (env_cfg.ceiling_z-env_cfg.floor_z)/100
p_z = (env_cfg.player_start_z-env_cfg.floor_z)/100
plt.ion()
fig_z = plt.figure()
ax_z = fig_z.add_subplot(111)
line_z, = ax_z.plot(0, 0)
ax_z.set_ylim(0, c_z)
plt.title("Altitude variation")
# start_posit = client.simGetVehiclePose()
fig_nav = plt.figure()
ax_nav = fig_nav.add_subplot(111)
img = plt.imread(env_folder+ env_cfg.floorplan)
ax_nav.imshow(img)
plt.axis('off')
plt.title("Navigational map")
plt.plot(env_cfg.o_x, env_cfg.o_y, 'b*', linewidth=20)
nav, = ax_nav.plot(env_cfg.o_x, env_cfg.o_y)
return p_z,f_z, fig_z, ax_z, line_z, fig_nav, ax_nav, nav
def translate_action(action, num_actions):
# action_word = ['Forward', 'Right', 'Left', 'Sharp Right', 'Sharp Left']
sqrt_num_actions = np.sqrt(num_actions)
# ind = np.arange(sqrt_num_actions)
if sqrt_num_actions % 2 == 0:
v_string = list('U' * int((sqrt_num_actions - 1) / 2) + 'D' * int((sqrt_num_actions - 1) / 2))
h_string = list('L' * int((sqrt_num_actions - 1) / 2) + 'R' * int((sqrt_num_actions - 1) / 2))
else:
v_string = list('U' * int(sqrt_num_actions / 2) + 'F' + 'D' * int(sqrt_num_actions / 2))
h_string = list('L' * int(sqrt_num_actions / 2) + 'F' + 'R' * int(sqrt_num_actions / 2))
v_ind = int(action[0]/sqrt_num_actions)
h_ind = int(action[0]%sqrt_num_actions)
action_word = v_string[v_ind] + str(int(np.ceil(abs((sqrt_num_actions-1)/2-v_ind)))) + '-' + h_string[h_ind]+str(int(np.ceil(abs((sqrt_num_actions-1)/2-h_ind))))
return action_word
def get_errors(data_tuple, choose, ReplayMemory, input_size, agent, target_agent, gamma, Q_clip):
_, Q_target, _, err, _ = minibatch_double(data_tuple, len(data_tuple), choose, ReplayMemory, input_size, agent, target_agent, gamma, Q_clip)
return err
def minibatch_double(data_tuple, batch_size, choose, ReplayMemory, input_size, agent, target_agent, gamma, Q_clip):
# Needs NOT to be in DeepAgent
# NO TD error term, and using huber loss instead
# Bellman Optimality equation update, with less computation, updated
if batch_size==1:
train_batch = data_tuple
idx=None
else:
batch = ReplayMemory.sample(batch_size)
train_batch = np.array([b[1][0] for b in batch])
idx = [b[0] for b in batch]
actions = np.zeros(shape=(batch_size), dtype=int)
crashes = np.zeros(shape=(batch_size))
rewards = np.zeros(shape=batch_size)
curr_states = np.zeros(shape=(batch_size, input_size, input_size, 3))
new_states = np.zeros(shape=(batch_size, input_size, input_size, 3))
for ii, m in enumerate(train_batch):
curr_state_m, action_m, new_state_m, reward_m, crash_m = m
curr_states[ii, :, :, :] = curr_state_m[...]
actions[ii] = action_m
new_states[ii,:,:,:] = new_state_m
rewards[ii] = reward_m
crashes[ii] = crash_m
#
# oldQval = np.zeros(shape = [batch_size, num_actions])
if choose:
oldQval_A = target_agent.Q_val(curr_states)
newQval_A = target_agent.Q_val(new_states)
newQval_B = agent.Q_val(new_states)
else:
oldQval_A = agent.Q_val(curr_states)
newQval_A = agent.Q_val(new_states)
newQval_B = target_agent.Q_val(new_states)
TD = np.zeros(shape=[batch_size])
err = np.zeros(shape=[batch_size])
Q_target = np.zeros(shape=[batch_size])
term_ind = np.where(rewards==-1)[0]
nonterm_ind = np.where(rewards!=-1)[0]
TD[nonterm_ind] = rewards[nonterm_ind] + gamma* newQval_B[nonterm_ind, np.argmax(newQval_A[nonterm_ind], axis=1)] - oldQval_A[nonterm_ind, actions[nonterm_ind].astype(int)]
TD[term_ind] = rewards[term_ind]
if Q_clip:
TD_clip = np.clip(TD, -1, 1)
else:
TD_clip = TD
Q_target[nonterm_ind] = oldQval_A[nonterm_ind, actions[nonterm_ind].astype(int)] + TD_clip[nonterm_ind]
Q_target[term_ind] = TD_clip[term_ind]
err=abs(TD) # or abs(TD_clip)
return curr_states, Q_target, actions, err, idx
def policy(epsilon, curr_state, iter, b, epsilon_model, wait_before_train, num_actions, agent):
qvals=[]
#epsilon_ceil = 0.99/0.8/0.7
epsilon_ceil=0.8
if epsilon_model=='linear':
epsilon = epsilon_ceil* (iter-wait_before_train) / (b-wait_before_train)
if epsilon > epsilon_ceil:
epsilon = epsilon_ceil
elif epsilon_model=='exponential':
epsilon = 1- math.exp(-2/(b-wait_before_train) * (iter-wait_before_train) )
if epsilon > epsilon_ceil:
epsilon = epsilon_ceil
if random.random() > epsilon:
sss =curr_state.shape
action = np.random.randint(0, num_actions, size = sss[0], dtype=np.int32)
action_type = 'Rand'
else:
# Use NN to predict action
action = agent.action_selection(curr_state)
action_type = 'Pred'
# print(action_array/(np.mean(action_array)))
return action, action_type, epsilon, qvals
def reset_to_initial(level, reset_array, client, vehicle_name):
# client.moveByVelocityAsync(vx=0, vy=0, vz=0, duration=0.01, vehicle_name=vehicle_name)
reset_pos = reset_array[vehicle_name][level]
# reset_pos = p
client.simSetVehiclePose(reset_pos, ignore_collison=True, vehicle_name=vehicle_name)
time.sleep(0.05)
def print_orderly(str, n):
print('')
hyphens = '-' * int((n - len(str)) / 2)
print(hyphens + ' ' + str + ' ' + hyphens)
def connect_drone(ip_address='127.0.0.0', phase='infer', num_agents=1):
print_orderly('Drone', 80)
client = airsim.MultirotorClient(ip=ip_address, timeout_value=10)
client.confirmConnection()
# old_posit = client.simGetVehiclePose()
# if phase == 'train':
# client.simSetVehiclePose(
# airsim.Pose(airsim.Vector3r(0, 0, 0), old_posit.orientation),
# ignore_collison=True)
# elif phase == 'infer':
# print("Yes")
old_posit = {}
for agents in range(num_agents):
name_agent = "drone"+ str(agents)
client.enableApiControl(True, name_agent)
client.armDisarm(True, name_agent)
client.takeoffAsync(vehicle_name=name_agent).join()
old_posit[name_agent] = client.simGetVehiclePose(vehicle_name=name_agent)
initZ = old_posit[name_agent].position.z_val
# client.enableApiControl(True)
# client.armDisarm(True)
# client.takeoffAsync().join()
return client, old_posit, initZ
def blit_text(surface, text, pos, font, color=pygame.Color('black')):
words = [word.split(' ') for word in text.splitlines()] # 2D array where each row is a list of words.
space = font.size(' ')[0] # The width of a space.
max_width, max_height = surface.get_size()
x, y = pos
for line in words:
for word in line:
word_surface = font.render(word, 0, color)
word_width, word_height = word_surface.get_size()
if x + word_width >= max_width:
x = pos[0] # Reset the x.
y += word_height # Start on new row.
surface.blit(word_surface, (x, y))
x += word_width + space
x = pos[0] # Reset the x.
y += word_height # Start on new row.
def pygame_connect(phase):
pygame.init()
if phase == 'train':
img_path = 'images/train_keys.png'
elif phase == 'infer':
img_path = 'images/infer_keys.png'
img = pygame.image.load(img_path)
screen = pygame.display.set_mode(img.get_rect().size)
screen.blit(img, (0, 0))
pygame.display.set_caption('DLwithTL')
# screen.fill((21, 116, 163))
# text = 'Supported Keys:\n'
# font = pygame.font.SysFont('arial', 32)
# blit_text(screen, text, (20, 20), font, color = (214, 169, 19))
# pygame.display.update()
#
# font = pygame.font.SysFont('arial', 24)
# text = 'R - Reconnect unreal\nbackspace - Pause/play\nL - Update configurations\nEnter - Save Network'
# blit_text(screen, text, (20, 70), font, color=(214, 169, 19))
pygame.display.update()
return screen
def check_user_input(active, automate, agent, client, old_posit, initZ, fig_z, fig_nav, env_folder,cfg, algorithm_cfg):
# algorithm_cfg.learning_rate, algorithm_cfg.epsilon,algorithm_cfg.network_path,cfg.mode,
for event in pygame.event.get():
if event.type == pygame.QUIT:
active = False
pygame.quit()
# Training keys control
if event.type == pygame.KEYDOWN and cfg.mode =='train':
if event.key == pygame.K_l:
# Load the parameters - epsilon
path = 'configs/' + cfg.algorithm + '.cfg'
algorithm_cfg = read_cfg(config_filename=path, verbose=False)
cfg, algorithm_cfg = save_network_path(cfg=cfg, algorithm_cfg=algorithm_cfg)
print('Updated Parameters')
if event.key == pygame.K_RETURN:
# take_action(-1)
automate = False
print('Saving Model')
# agent.save_network(iter, save_path, ' ')
agent.save_network(algorithm_cfg.network_path)
# agent.save_data(iter, data_tuple, tuple_path)
print('Model Saved: ', algorithm_cfg.network_path)
if event.key == pygame.K_BACKSPACE:
automate = automate ^ True
if event.key == pygame.K_r:
client, old_posit, initZ = connect_drone(ip_address=cfg.ip_address, phase=cfg.mode,
num_agents=cfg.num_agents)
agent.client = client
# Set the routine for manual control if not automate
if not automate:
# print('manual')
# action=[-1]
if event.key == pygame.K_UP:
action = 0
elif event.key == pygame.K_RIGHT:
action = 1
elif event.key == pygame.K_LEFT:
action = 2
elif event.key == pygame.K_d:
action = 3
elif event.key == pygame.K_a:
action = 4
elif event.key == pygame.K_DOWN:
action = -2
elif event.key == pygame.K_y:
pos = client.getPosition()
client.moveToPosition(pos.x_val, pos.y_val, 3 * initZ, 1)
time.sleep(0.5)
elif event.key == pygame.K_h:
client.reset()
# agent.take_action(action)
elif event.type == pygame.KEYDOWN and cfg.mode == 'infer':
if event.key == pygame.K_s:
# Save the figures
file_path = env_folder + 'results/'
fig_z.savefig(file_path+'altitude_variation.png', dpi=1000)
fig_nav.savefig(file_path+'navigation.png', dpi=1000)
print('Figures saved')
if event.key == pygame.K_BACKSPACE:
client.moveByVelocityAsync(vx=0, vy=0, vz=0, duration=0.1)
automate = automate ^ True
return active, automate, algorithm_cfg, client
| nilq/baby-python | python |
#! /usr/bin/env nix-shell
#! nix-shell -i python3 -p "[python3] ++ (with pkgs.python37Packages; [ requests future ws4py pytest pylint coveralls twine wheel ])"
# <<END Extended Shebang>>
import json
from pywebostv.discovery import *
from pywebostv.connection import *
from pywebostv.controls import *
with open('/home/camus/.lgtv.json') as f:
store = json.load(f)
client = WebOSClient(store['hostname'])
client.connect()
for status in client.register(store):
if status == WebOSClient.PROMPTED:
print("Please accept the connect on the TV!")
elif status == WebOSClient.REGISTERED:
print("Registration successful!")
ctrl = InputControl(client)
system = SystemControl(client)
media = MediaControl(client)
app = ApplicationControl(client)
inp = InputControl(client)
inp.connect_input()
# vim: set filetype=python :
| nilq/baby-python | python |
NSIDE = 16
STRENGTH = 500
BACKGROUND = 1000
TILT = 45
ALTERNATING = False
TEST = False
TALK = True
plot = True
#%matplotlib inline only a notebook feature.
"""
Parameters
----------
NSIDE : int
Must be a power of 2, corresponding to the number of pixels to occupy TSM (ie NSIDE = 8 => 768 pixels, etc.)
STRENGTH : float
The desired strength of the incident GRBs.
BACKGROUND : float
The desired background in the detectors.
TILT : float
Angle in degrees to bend the detectors. Optimal range is somewhere between 30 and 45 degrees.
ALTERNATING : bool
Condition on whether or not you want to alternate the tilt pattern of the detectors.
TEST : bool
Condition on whether or not you are testing over the entire sky, or just one for testing purposes.
TALK : bool
Condition on whether or not you want simulation to tell you the sky localization for every point, as it is running.
"""
from NoahCube import Sky, BurstCube
sim1 = Sky(NSIDE,STRENGTH)
#run this file, and you immediately get
#run this file, and you immediately get
testcube = BurstCube(BACKGROUND,TILT,alternating =ALTERNATING)
if TALK:
print("Initializing...")
_ = testcube.initialize #supress output, this creates the ideal response database for reference.
if TALK:
print("done!")
offsets , errors = testcube.response2GRB(sim1,talk=TALK,test = TEST)
if plot:
#Only difference is the graphs are opened in the notebook, as opposed to saved.
from healpy import newvisufunc
import matplotlib.pyplot as plt
newvisufunc.mollview(offsets,min=0, max=15,unit='Localization Offset (degrees)',graticule=True,graticule_labels=True)
if type(ALTERNATING) == int:
plt.title('All Sky Localization Accuracy for BurstCube with Orientation ' + str(TILT) +' by '+str(ALTERNATING) +' deg' ) #should add something about design too!
#plt.savefig('offset'+'tilt'+str(TILT)+'s'+str(STRENGTH)+'bg'+str(BACKGROUND)+'.png')
plt.savefig('offset'+str(TILT)+'by'+str(ALTERNATING)+'s'+str(STRENGTH)+'bg'+str(BACKGROUND)+'.png')
else:
plt.title('All Sky Localization Offsets for BurstCube with Orientation ' + str(TILT) + ' deg' ) #should add something about design too!
plt.savefig('offset'+str(TILT)+'s'+str(STRENGTH)+'bg'+str(BACKGROUND)+'.png')
plt.figure()
newvisufunc.mollview(errors,min=0, max=100,unit='Localization Error (degrees)',graticule=True,graticule_labels=True)
if type(ALTERNATING) == int:
plt.title('All Sky Localization Errors for BurstCube with Orientation ' + str(TILT) +' by '+str(ALTERNATING) +' deg' ) #should add something about design too!
plt.savefig('error'+str(TILT)+'by'+str(ALTERNATING)+'s'+str(STRENGTH)+'bg'+str(BACKGROUND)+'.png')
#plt.savefig('error'+'tilt'+str(TILT)+'s'+str(STRENGTH)+'bg'+str(BACKGROUND)+'.png')
else:
plt.title('All Sky Localization Errors for BurstCube with Orientation ' + str(TILT) + ' deg' )
plt.savefig('error'+str(TILT)+'s'+str(STRENGTH)+'bg'+str(BACKGROUND)+'.png')
| nilq/baby-python | python |
# -*- coding: utf-8 -*-
"""Script to run the experiment for anglicisms with different parameters"""
import experiment_context_window_comparative as ecwc
score_fns = ['binary', 'raw_count', 'chi_sq', 'dice']
score_lists = {}
for window_size in [4,25,60,100]:
# for window_size in [90,100,110]:
for score_fn in score_fns:
if not score_fn in score_lists:
score_lists[score_fn] = {}
ecwc.conduct(verbose = False, window_size = window_size, score_fn = score_fn)
# for i, score in enumerate(scores):
# if not score in score_lists[score_fn]:
# score_lists[score_fn][score] = []
# score_lists[score_fn][score].append(results[1,i])
# print(json.dumps(score_lists, sort_keys=True, indent = 4)) | nilq/baby-python | python |
# -*- coding: utf-8 -*-
"""
Created on Sun Jun 25 12:50:46 2017
@author: Sergio Cristauro Manzano
"""
from ..DB.MySQL_Aena import MySQLAccessAena as DBContext #Server
#from self.db.MySQL import MySQLAccess as DBContext #Local
class RepositoryVuelosEntrantesAena():
#####################################################################################################################################################################
#######################################################VUELOS ENTRANTES####################################################
#####################################################################################################################################################################
def ObtenerPaisOrigenYVuelosEntrantesAenaDadoPaisDestinoAnio(self, paisDestino, anio):
self.db = DBContext()
self.labels = ['Pais_Origen', 'Numero_Vuelos']
return (self.db.ObtenerPaisOrigenYVuelosEntrantesAenaDadoPaisDestinoAnio( paisDestino, str(anio)), self.labels)
def ObtenerPaisOrigenYVuelosEntrantesAenaDadoPaisDestinoCiudadDestinoAnio(self, paisDestino, CiudadDestino, anio):
self.db = DBContext()
self.labels = ['Pais_Origen', 'Numero_Vuelos']
return (self.db.ObtenerPaisOrigenYVuelosEntrantesAenaDadoPaisDestinoCiudadDestinoAnio( paisDestino, CiudadDestino, str(anio)), self.labels)
def ObtenerPaisesOrigenYVuelosEntrantesMensualmenteDuranteAniosAenaDadoPaisDestinoAnio(self, paisDestino, anio):
self.db = DBContext()
self.labels = ['Mes', 'Pais_Origen', 'Numero_Vuelos']
return (self.db.ObtenerPaisesOrigenYVuelosEntrantesMensualmenteDuranteAniosAenaDadoPaisDestinoAnio( paisDestino, str(anio)), self.labels)
def ObtenerPaisesOrigenYVuelosEntrantesAnualmenteAenaDadoPaisDestinoAnioMinMax(self, paisDestino, anioInicio, anioFin):
self.db = DBContext()
self.labels = ['Anio', 'Pais_Origen', 'Numero_Vuelos']
return (self.db.ObtenerPaisesOrigenYVuelosEntrantesAnualmenteAenaDadoPaisDestinoAnioMinMax( paisDestino, str(anioInicio), str(anioFin)), self.labels)
def ObtenerPaisesOrigenCiudadesOrigenYVuelosEntrantesDuranteAnioAenaDadoPaisDestinoAnio(self, paisDestino, anio):
self.db = DBContext()
self.labels = ['Pais_Origen', 'Ciudad_Origen', 'Numero_Vuelos']
return (self.db.ObtenerPaisesOrigenCiudadesOrigenYVuelosEntrantesDuranteAnioAenaDadoPaisDestinoAnio( paisDestino, str(anio)), self.labels)
def ObtenerPaisesOrigenCiudadesOrigenYVuelosEntrantesAnualmenteAenaDadoPaisDestinoAnioMinMax(self, paisDestino, anioInicio, anioFin):
self.db = DBContext()
self.labels = ['Anio', 'Pais_Origen', 'Ciudad_Origen', 'Numero_Vuelos']
return (self.db.ObtenerPaisesOrigenCiudadesOrigenYVuelosEntrantesAnualmenteAenaDadoPaisDestinoAnioMinMax( paisDestino, str(anioInicio), str(anioFin)), self.labels)
def ObtenerPaisesOrigenCiudadesOrigenYVuelosEntrantesAnualmenteAenaDadoPaisDestinoMesAnioMinMax(self, paisDestino, Mes, anioInicio, anioFin):
self.db = DBContext()
self.labels = ['Anio', 'Pais_Origen', 'Numero_Vuelos']
return (self.db.ObtenerPaisesOrigenCiudadesOrigenYVuelosEntrantesAnualmenteAenaDadoPaisDestinoMesAnioMinMax( paisDestino, Mes, str(anioInicio), str(anioFin)), self.labels)
def ObtenerDatosVuelosEntrantesAenaDadoPaisDestinoAnioMinMax(self, paisDestino, anioInicio, anioFin): ###
self.db = DBContext()
self.labels = ['Anio', 'Numero_Vuelos']
return (self.db.ObtenerDatosVuelosEntrantesAenaDadoPaisDestinoAnioMinMax( paisDestino, str(anioInicio), str(anioFin)), self.labels)
def ObtenerDatosVuelosEntrantesAenaMensualmenteDadoPaisDestinoAnioMinMax(self, paisDestino, anioInicio, anioFin): ###
self.db = DBContext()
self.labels = ['Anio', 'Mes', 'Numero_Vuelos']
return (self.db.ObtenerDatosVuelosEntrantesAenaMensualmenteDadoPaisDestinoAnioMinMax( paisDestino, str(anioInicio), str(anioFin)), self.labels)
def ObtenerDatosVuelosEntrantesAenaEnUnMesDadoPaisDestinoMesAnioMinMax(self, paisDestino, mes, anioInicio, anioFin): ###
self.db = DBContext()
self.labels = ['Anio', 'Numero_Vuelos']
return (self.db.ObtenerDatosVuelosEntrantesAenaEnUnMesDadoPaisDestinoMesAnioMinMax( paisDestino, mes, str(anioInicio), str(anioFin)), self.labels)
def ObtenerDatosVuelosEntrantesAenaMensualmenteDadoPaisDestinoAnio(self, paisDestino, anio):
self.db = DBContext()
self.labels = ['Mes', 'Numero_Vuelos']
return (self.db.ObtenerDatosVuelosEntrantesAenaMensualmenteDadoPaisDestinoAnio( paisDestino, str(anio)), self.labels)
def ObtenerDatosVuelosEntrantesAenaDivididosPorCiudadesDadoPaisDestinoAnioMinMax(self, paisDestino, anioInicio, anioFin):
self.db = DBContext()
self.labels = ['Anio', 'Ciudad', 'Numero_Vuelos']
return (self.db.ObtenerDatosVuelosEntrantesAenaDivididosPorCiudadesDadoPaisDestinoAnioMinMax( paisDestino, str(anioInicio), str(anioFin)), self.labels)
def ObtenerDatosVuelosEntrantesEnUnMesAenaDivididosPorCiudadesDadoPaisDestinoMesAnioMinMax(self, paisDestino, mes, anioInicio, anioFin):
self.db = DBContext()
self.labels = ['Anio', 'Ciudad', 'Numero_Vuelos']
return (self.db.ObtenerDatosVuelosEntrantesEnUnMesAenaDivididosPorCiudadesDadoPaisDestinoMesAnioMinMax(paisDestino, mes, str(anioInicio), str(anioFin)), self.labels)
def ObtenerDatosVuelosEntrantesAenaEnUnAnioDivididosPorCiudadDadoPaisDestinoAnio(self, paisDestino, anio): ####
self.db = DBContext()
self.labels = ['Ciudad', 'Numero_Vuelos']
return (self.db.ObtenerDatosVuelosEntrantesAenaEnUnAnioDivididosPorCiudadDadoPaisDestinoAnio( paisDestino, str(anio)), self.labels)
def ObtenerDatosVuelosEntrantesAenaMensualmenteDivididosPorCiudadDadoPaisDestinoMesAnio(self, paisDestino, mes, Anio): ##
self.db = DBContext()
self.labels = ['Ciudad', 'Numero_Vuelos']
return (self.db.ObtenerDatosVuelosEntrantesAenaMensualmenteDivididosPorCiudadDadoPaisDestinoMesAnio( paisDestino, mes, str(Anio)), self.labels)
def ObtenerDatosVuelosEntrantesAenaDadoPaisDestinoCiudadDestinoAnioMinMax(self, paisDestino, CiudadDestino, anioInicio, anioFin): ###
self.db = DBContext()
self.labels = ['Anio','Numero_Vuelos']
return (self.db.ObtenerDatosVuelosEntrantesAenaDadoPaisDestinoCiudadDestinoAnioMinMax( paisDestino,CiudadDestino, str(anioInicio), str(anioFin)), self.labels)
def ObtenerDatosVuelosEntrantesAenaEnUnMesDadoPaisDestinoCiudadDestinoMesAnioMinMax(self, paisDestino, CiudadDestino, mes, anioInicio, anioFin): ###
self.db = DBContext()
self.labels = ['Anio', 'Numero_Vuelos']
return (self.db.ObtenerDatosVuelosEntrantesAenaEnUnMesDadoPaisDestinoCiudadDestinoMesAnioMinMax(paisDestino, CiudadDestino, mes, str(anioInicio), str(anioFin)), self.labels)
def ObtenerDatosVuelosEntrantesAenaEnUnAnioEnUnaCiudadMensualmenteDadoPaisDestinoCiudadAnio(self, paisDestino, CiudadDestino, Anio): ###
self.db = DBContext()
self.labels = ['Mes', 'Numero_Vuelos']
return (self.db.ObtenerDatosVuelosEntrantesAenaEnUnAnioEnUnaCiudadMensualmenteDadoPaisDestinoCiudadAnio(paisDestino, CiudadDestino, str(Anio)), self.labels)
| nilq/baby-python | python |
import datetime
import requests
import lxml.html as lh
import pandas as pd
## VARS
# Code of meteo station
station_code = 'CE'
# year-month-day to start retrieving data from
meteodate = (2021, 5, 13)
# how many days of data do we retrieve?
meteodays = 62
# name of excel file to write to
excelfile = r'meteo_data.xlsx'
sheet_name = r'HostaletsPierola'
## CONSTANTS - DON'T MODIFY BEYOND THIS LINE
meteocat_url_template = "https://www.meteo.cat/observacions/xema/dades?codi={}&dia={}T00:00Z"
# this is the data structure of meteocat web for the table of data for a single day
# since we are going to combine data from several days, we also add the additional column "date" at the beggining
column_headers = ["fecha", "periodo", "tm", "tx", "tn", "hrm", "ppt", "vvm", "dvm", "vvx", "pm", "rs"]
final_data = pd.DataFrame(columns=column_headers)
## FUNCTIONS
def generate_date_range(startdate, days):
start_date = datetime.date(startdate[0], startdate[1], startdate[2])
date_list = []
for day in range(days):
a_date = (start_date + datetime.timedelta(days=day)).isoformat()
# isoformat is 'yyyy-mm-dd' which is perfect for this case
date_list.append(a_date.format())
return date_list
for currentmeteodate in generate_date_range(meteodate, meteodays):
scrappedcontents = []
meteocat_url_formatted = meteocat_url_template.format(station_code, currentmeteodate)
print(f"Obteniendo información meteorológica de la estación {station_code} para el dia {currentmeteodate}...")
# print(meteocat_url_formatted)
html = requests.get(meteocat_url_formatted)
# scrappedcontents.append(r.content)
htmlcontent = lh.fromstring(html.content)
meteodata_elements = htmlcontent.xpath("//table[@class='tblperiode']//tr")
# sanity check = value should be 11 always (the table we want contains 11 fields)
# [print(len(T)) for T in meteodata_elements[:12]]
# Now we parse the table and add to a dataframe, but skipping header, hence the "range 1,len"
for row in range(1, len(meteodata_elements)):
# print("Row = {}".format(row))
row_contents = meteodata_elements[row]
column = 0
data = [currentmeteodate]
for column_contents in row_contents.iterchildren():
# print("Column = {}".format(column))
data.append(str.strip(column_contents.text_content()))
column += 1
# print(data)
# print(type(data))
data_to_append = pd.Series(data, index=final_data.columns)
# print(data_to_append)
final_data = final_data.append(data_to_append, ignore_index=True)
# print(final_data)
final_data.to_excel(excelfile, sheet_name=sheet_name, index=False, startrow=1, startcol=1, header=True)
print('Los datos se han volcado al fichero.'.format(excelfile))
| nilq/baby-python | python |
# --------------------------------------------------------------------------- #
# --------------------------------------------------------------------------- #
# AUTHOR: César Miranda Meza
# COMPLETITION DATE: November 23, 2021.
# LAST UPDATE: November 27, 2021.
#
# This code is used to apply the classification evaluation metric known as the
# F1 score. This is done with the two databases for linear equation systems,
# that differ only because one has a random bias value and the other does not.
# In addition, both of these databases have 1'000'000 samples each. Moreover,
# the well known scikit-learn library will be used to calculate the F1 score
# metric (https://bit.ly/32rKQ0t) and then its result will be compared with
# the one obtained with the CenyML library as a means of validating the code
# of CenyML.
# --------------------------------------------------------------------------- #
# --------------------------------------------------------------------------- #
# Python version 3.9.7
# ----------------------------------- #
# ----- Importing the Libraries ----- #
# ----------------------------------- #
import pandas as pd # version 1.3.3
import numpy as np # version 1.21.2
import time
from sklearn.metrics import f1_score # version 1.0.1
# -------------------------------------------- #
# ----- Define the user variables values ----- #
# -------------------------------------------- #
m = 1 # This variable is used to define the number of independent variables
# that the system under study has.
p = 1 # This variable is used to define the number of dependent variables
# that the system under study has.
columnIndexOfOutputDataInCsvFile = 2; # This variable will contain the index
# of the first column in which we will
# specify the location of the output
# values (Y and/or Y_hat).
# ------------------------------ #
# ----- Import the dataset ----- #
# ------------------------------ #
# Read the .csv file containing the results of the CenyML library.
print("Innitializing data extraction from .csv file containing the CenyML results ...")
startingTime = time.time()
dataset_CenyML_getF1scoreResults = pd.read_csv('CenyML_getF1score_Results.csv')
elapsedTime = time.time() - startingTime
print("Data extraction from .csv file with the CenyML results elapsed " + format(elapsedTime) + " seconds.")
print("")
# Read the .csv file containing the real output data.
print("Innitializing data extraction from .csv file containing the real output data ...")
startingTime = time.time()
dataset_rLES1000S1000SPS = pd.read_csv("../../../../databases/classification/randLinearEquationSystem/100systems_100samplesPerAxisPerSys.csv")
elapsedTime = time.time() - startingTime
n = len(dataset_rLES1000S1000SPS)
csvColumns = len(dataset_rLES1000S1000SPS.iloc[0])
print("Data extraction from .csv file containing " + format(n) + " samples for each of the " + format(csvColumns) + " columns (total samples = " + format(n*csvColumns) + ") elapsed " + format(elapsedTime) + " seconds.")
print("")
# Read the .csv file containing the predicted output data.
print("Innitializing data extraction from .csv file containing the predicted output data ...")
startingTime = time.time()
dataset_lES1000S1000SPS = pd.read_csv("../../../../databases/classification/linearEquationSystem/100systems_100samplesPerAxisPerSys.csv")
elapsedTime = time.time() - startingTime
n = len(dataset_lES1000S1000SPS)
csvColumns = len(dataset_lES1000S1000SPS.iloc[0])
print("Data extraction from .csv file containing " + format(n) + " samples for each of the " + format(csvColumns) + " columns (total samples = " + format(n*csvColumns) + ") elapsed " + format(elapsedTime) + " seconds.")
print("")
# ------------------------------------- #
# ----- Preprocessing of the data ----- #
# ------------------------------------- #
# Retrieving the real data of its corresponding dataset
print("Innitializing real output data with " + format(n) + " samples for each of the " + format(p) + " columns (total samples = " + format(n*p) + ") ...")
startingTime = time.time()
Y = np.zeros((n, 0))
for currentColumn in range(0, p):
temporalRow = dataset_rLES1000S1000SPS.iloc[:,(currentColumn + columnIndexOfOutputDataInCsvFile)].values.reshape(n, 1)
Y = np.append(Y, temporalRow, axis=1)
elapsedTime = time.time() - startingTime
print("Real output data innitialization elapsed " + format(elapsedTime) + " seconds.")
print("")
# Retrieving the predicted data of its corresponding dataset
print("Innitializing predicted output data with " + format(n) + " samples for each of the " + format(p) + " columns (total samples = " + format(n*p) + ") ...")
startingTime = time.time()
Y_hat = np.zeros((n, 0))
for currentColumn in range(0, p):
temporalRow = dataset_lES1000S1000SPS.iloc[:,(currentColumn + columnIndexOfOutputDataInCsvFile)].values.reshape(n, 1)
Y_hat = np.append(Y_hat, temporalRow, axis=1)
elapsedTime = time.time() - startingTime
print("Predicted output data innitialization elapsed " + format(elapsedTime) + " seconds.")
print("")
# ------------------------------------- #
# ----- Apply the F1 score metric ----- #
# ------------------------------------- #
print("Innitializing scikit-learn F1 score metric calculation ...")
startingTime = time.time()
F1score = f1_score(Y, Y_hat)
elapsedTime = time.time() - startingTime
print("scikit-learn F1 score metric elapsed " + format(elapsedTime) + " seconds.")
print("")
# ---------------------------------------------------------------- #
# ----- Determine if the CenyML Library's method was correct ----- #
# ---------------------------------------------------------------- #
# Compare the results from the CenyML Lybrary and the ones obtained in python.
print("The results will begin their comparation process...")
startingTime = time.time()
epsilon = 3.88e-7
isMatch = 1
for currentColumn in range(0, p):
differentiation = abs(dataset_CenyML_getF1scoreResults.iloc[0][currentColumn] - F1score)
if (differentiation > epsilon):
isMatch = 0
print("The absolute differentiation of the Column: " + dataset_CenyML_getF1scoreResults.columns.tolist()[currentColumn] + " and the Row: " + format(0) + " exceeded the value defined for epsilon.")
print("The absolute differentiation obtained was: " + format(differentiation))
break
if (isMatch == 1):
print("The results obtained in Python and in the CenyML Library matched !!!.")
elapsedTime = time.time() - startingTime
print("The comparation process elapsed " + format(elapsedTime) + " seconds.")
| nilq/baby-python | python |
import csv
import pickle
callstate={}
with open('call_state.dat') as fin:
reader=csv.reader(fin, skipinitialspace=True, delimiter='|', quotechar="'")
for row in reader:
#print (row[0])
callstate[row[0]]=row[1:]
print ('Done')
print ("Saving Object")
# Step 2
with open('callstate.dictionary', 'wb') as callstate_dictionary_file:
# Step 3
pickle.dump(callstate, callstate_dictionary_file)
#def save_obj(obj, name ):
# with open('obj/'+ name + '.pkl', 'wb') as f:
# pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
#
#def load_obj(name ):
# with open('obj/' + name + '.pkl', 'rb') as f:
# return pickle.load(f)
print (callstate["N3KA"])
print (callstate["N3LGA"])
print (callstate["WA6SM"])
| nilq/baby-python | python |
import json
import directory
def parse_key_group_name(key_group_name = 'Group.basic'):
line = key_group_name.split('.')
if len(line) != 2 or not line[0] or not line[1]:
raise ValueError('key_group_name not correct, please see dtsk_python_load_demo.py')
name_type = line[0]
name_value = line[1]
if name_type.lower() == 'folder'.lower():
return 'folder', name_value
elif name_type.lower() == 'group'.lower():
return 'group', name_value.lower()
else:
raise ValueError('key_group_name not support {0}', name_type)
def get_key_list(key_type, key_value, config_json_content):
if key_type == 'folder':
return [key_value]
elif key_type == 'group':
return config_json_content[key_value]
else:
raise ValueError('key_type {0} not supported', key_type)
def load(key_group = 'Group.basic', remote_root = 'Default', local_cache_root = ''):
key_group_file = directory.open_prioritized_file(\
file_relative_path = 'StockInfo/dtsk_key_group.json',\
remote_root = remote_root, local_cache_root = local_cache_root)
config_json_content = json.load(key_group_file)
key_type, key_value = parse_key_group_name(key_group)
key_list = get_key_list(key_type, key_value, config_json_content)
return key_list
| nilq/baby-python | python |
# coding: utf-8
import unittest
from problems.power_of_two import Solution
from problems.power_of_two import Solution2
from problems.power_of_two import Solution3
class TestCase(unittest.TestCase):
def setUp(self):
self.solution = Solution()
def test(self):
test_data = [
{'n': 0, 'expected': False},
{'n': 1, 'expected': True},
{'n': 2, 'expected': True},
{'n': 1024, 'expected': True},
{'n': 100000, 'expected': False},
]
for data in test_data:
n = data['n']
expected = data['expected']
with self.subTest(n=n):
self.assertEqual(self.solution.isPowerOfTwo(n), expected)
class TestCase2(unittest.TestCase):
def setUp(self):
self.solution = Solution2()
def test(self):
test_data = [
{'n': 0, 'expected': False},
{'n': 1, 'expected': True},
{'n': 2, 'expected': True},
{'n': 1024, 'expected': True},
{'n': 100000, 'expected': False},
]
for data in test_data:
n = data['n']
expected = data['expected']
with self.subTest(n=n):
self.assertEqual(self.solution.isPowerOfTwo(n), expected)
class TestCase3(unittest.TestCase):
def setUp(self):
self.solution = Solution3()
def test(self):
test_data = [
{'n': 0, 'expected': False},
{'n': 1, 'expected': True},
{'n': 2, 'expected': True},
{'n': 1024, 'expected': True},
{'n': 100000, 'expected': False},
]
for data in test_data:
n = data['n']
expected = data['expected']
with self.subTest(n=n):
self.assertEqual(self.solution.isPowerOfTwo(n), expected)
if __name__ == '__main__':
unittest.main()
| nilq/baby-python | python |
import unittest
import numpy as np
from pax import core, plugin
from pax.datastructure import Event, Peak
class TestPosRecTopPatternFunctionFit(unittest.TestCase):
def setUp(self):
self.pax = core.Processor(config_names='XENON1T',
just_testing=True,
config_dict={'pax': {'plugin_group_names': ['test'],
'look_for_config_in_runs_db': False,
'test': ['WeightedSum.PosRecWeightedSum',
'TopPatternFit.PosRecTopPatternFunctionFit'],
'logging_level': 'debug'}})
self.guess_plugin = self.pax.get_plugin_by_name('PosRecWeightedSum')
self.plugin = self.pax.get_plugin_by_name('PosRecTopPatternFunctionFit')
def tearDown(self):
delattr(self, 'pax')
delattr(self, 'plugin')
delattr(self, 'guess_plugin')
@staticmethod
def example_event():
top_hits = [7, 8, 8, 5, 8, 10, 6, 9, 3, 7, 6, 4, 5, 2, 1, 0, 7, 1, 3, 1, 4, 2, 5, 1, 4, 3,
1, 3, 2, 4, 3, 0, 4, 4, 1, 6, 2, 4, 9, 12, 8, 10, 9, 6, 9, 1, 2, 1, 2, 1, 4, 10,
0, 0, 1, 2, 1, 0, 2, 3, 6, 1, 3, 2, 3, 5, 2, 6, 30, 18, 24, 10, 8, 3, 4, 2, 4, 2,
1, 4, 3, 4, 5, 5, 2, 1, 2, 2, 2, 4, 12, 48, 139, 89, 19, 9, 3, 4, 2, 3, 1, 1, 6,
0, 3, 1, 2, 4, 12, 97, 87, 15, 6, 3, 4, 4, 0, 2, 3, 6, 13, 21, 3, 4, 3, 1, 7]
hits = np.append(top_hits, np.zeros(254 - 127))
e = Event.empty_event()
e.peaks.append(Peak({'left': 5,
'right': 9,
'type': 'S2',
'detector': 'tpc',
'area': 123,
'area_per_channel': hits}))
return e
def test_posrec(self):
self.assertIsInstance(self.plugin, plugin.TransformPlugin)
self.assertEqual(self.plugin.__class__.__name__, 'PosRecTopPatternFunctionFit')
e = self.example_event()
e = self.guess_plugin.transform_event(e)
e = self.plugin.transform_event(e)
self.assertIsInstance(e, Event)
self.assertEqual(len(e.peaks), 1)
self.assertEqual(len(e.S2s()), 1)
self.assertEqual(len(e.peaks[0].reconstructed_positions), 2)
rp = e.peaks[0].reconstructed_positions[1]
self.assertEqual(rp.algorithm, 'PosRecTopPatternFunctionFit')
x_truth = 11.0882
y_truth = 18.7855
self.assertAlmostEqual(rp.x, x_truth, delta=3)
self.assertAlmostEqual(rp.y, y_truth, delta=3)
cts = rp.confidence_tuples
self.assertEqual(len(cts), 2)
if __name__ == '__main__':
unittest.main()
| nilq/baby-python | python |
# Escreva um programa que leia a velocidade de um carro. Se ele ultrapassar 80Km/h, mostre uma mensagem dizendo que ele foi multado. A multa vai custar R$7,00 por cada Km acima do limite.
velocidade = int(input('Quantos Km/h você estava dirigindo ? '))
if velocidade >80:
print('QUER VOAR ???')
multa = (velocidade-80) * 7
print('Mutado no valor de R${} REAIS!'.format(multa))
else:
print('Otímo, dirija com segurança e BOM DIA! S2') | nilq/baby-python | python |
"""
Unit tests for `dh.ejson`.
"""
import fractions
import unittest
import dh.ejson
class Test(unittest.TestCase):
def test_bytes(self):
"""
JSON serialization and de-serialization of byte arrays.
"""
x = bytes([225, 127, 98, 213])
j = dh.ejson.dumps(x)
xHat = dh.ejson.loads(j)
self.assertIsInstance(xHat, bytes)
self.assertEqual(x, xHat)
def test_fraction(self):
"""
JSON serialization and de-serialization of fractions.
"""
x = fractions.Fraction(22, 7)
j = dh.ejson.dumps(x)
xHat = dh.ejson.loads(j)
self.assertIsInstance(xHat, fractions.Fraction)
self.assertEqual(x, xHat)
| nilq/baby-python | python |
# Universal Power Supply Controller
# USAID Middle East Water Security Initiative
#
# Developed by: Nathan Webster
# Primary Investigator: Nathan Johnson
#
# Version History (mm_dd_yyyy)
# 1.00 03_24_2018_NW
#
######################################################
# Import Libraries
import Parameters
from Initialization import *
import time
from PWM_Wrapper import *
# Declare Variables
# Main UPS Loop
while True:
# Set parametersrameters and declare variables
# Run initializtaion to setup VFD and converter controls
Run_Initialization()
# UPS Control Loop
while True:
#48-96
# Vo = Vin*(1/(1-D))
# Vo = Vin/D
D = input('Enter duty cycle: ')
print('Setting duty cycle to: ', D)
time.sleep(1)
Val = 96*(1-D)
PWM.PWM_Write(Parameters.Pin,int(Val))
print(Parameters.Pin)
time.sleep(5) | nilq/baby-python | python |
import tensorflow as tf
from tensorflow.keras.layers import (Add, Conv2D, Input, Concatenate,
TimeDistributed)
from tensorflow.keras.models import Model
from .blocks import (RecurrentConvBlock, ResidualBlock, ConvBlock,
DenseBlock, TransitionBlock, LocalizedConvBlock,
get_dropout_layer)
from ..utils import checkarg_backbone, checkarg_dropout_variant
def recnet_pin(
backbone_block,
n_channels,
n_aux_channels,
hr_size,
time_window,
# ----- below are parameters that shall be tweaked by the user -----
n_channels_out=1,
n_filters=8,
n_blocks=6,
normalization=None,
dropout_rate=0,
dropout_variant=None,
attention=False,
activation='relu',
output_activation=None,
localcon_layer=False):
"""
Recurrent deep neural network with different backbone architectures
(according to the ``backbone_block``) and pre-upsampling via interpolation
(the samples are expected to be interpolated to the HR grid). This model is
capable of exploiting spatio-temporal samples.
The interpolation method depends on the ``interpolation`` argument used in
the training procedure (which is passed to the DataGenerator).
Parameters
----------
backbone_block : str
Backbone type. One of dl4ds.BACKBONE_BLOCKS. WARNING: this parameter is
not supposed to be set by the user. It's set internallly through
dl4ds.Trainers.
n_channels : int
Number of channels/variables in each sample. WARNING: this parameter is
not supposed to be set by the user. It's set internallly through
dl4ds.Trainers.
n_aux_channels : int
Number of auxiliary channels. WARNING: this parameter is not supposed to
be set by the user. It's set internallly through dl4ds.Trainers.
hr_size : tuple
Height and width of the HR grid. WARNING: this parameter is not supposed
to be set by the user. It's set internallly through dl4ds.Trainers.
time_window : int
Temporal window or number of time steps in each sample. WARNING: this
parameter is not supposed to be set by the user. It's set internallly
through dl4ds.Trainers.
n_filters : int, optional
Number of convolutional filters in RecurrentConvBlock. `n_filters` sets
the number of output filters in the convolution inside the ConvLSTM unit.
n_blocks : int, optional
Number of recurrent convolutional blocks (RecurrentConvBlock).
Sets the depth of the network.
normalization : str or None, optional
Normalization method in the residual or dense block. Can be either 'bn'
for BatchNormalization or 'ln' for LayerNormalization. If None, then no
normalization is performed (eg., for the 'resnet' backbone this results
in the EDSR-style residual block).
dropout_rate : float, optional
Float between 0 and 1. Fraction of the input units to drop. If 0 then no
dropout is applied.
dropout_variant : str or None, optional
Type of dropout. Defined in dl4ds.DROPOUT_VARIANTS variable.
attention : bool, optional
If True, dl4ds.ChannelAttention2D is used in convolutional blocks.
activation : str, optional
Activation function to use, as supported by tf.keras. E.g., 'relu' or
'gelu'.
output_activation : str, optional
Activation function to use in the last ConvBlock. Useful to constraint
the values distribution of the output grid.
localcon_layer : bool, optional
If True, the LocalizedConvBlock is activated in the output module.
"""
backbone_block = checkarg_backbone(backbone_block)
dropout_variant = checkarg_dropout_variant(dropout_variant)
auxvar_array_is_given = True if n_aux_channels > 0 else False
h_hr, w_hr = hr_size
if not localcon_layer:
x_in = Input(shape=(None, None, None, n_channels))
else:
x_in = Input(shape=(None, h_hr, w_hr, n_channels))
init_n_filters = n_filters
x = b = RecurrentConvBlock(n_filters, activation=activation,
normalization=normalization)(x_in)
for i in range(n_blocks):
b = RecurrentConvBlock(n_filters, activation=activation,
normalization=normalization, dropout_rate=dropout_rate,
dropout_variant=dropout_variant, name_suffix=str(i + 2))(b)
b = get_dropout_layer(dropout_rate, dropout_variant, dim=3)(b)
if backbone_block == 'convnet':
x = b
elif backbone_block == 'resnet':
x = Add()([x, b])
elif backbone_block == 'densenet':
x = Concatenate()([x, b])
#---------------------------------------------------------------------------
# HR aux channels are processed
if auxvar_array_is_given:
s_in = Input(shape=(None, None, n_aux_channels))
s = ConvBlock(n_filters, activation=activation, dropout_rate=0,
normalization=None, attention=attention)(s_in)
s = tf.expand_dims(s, 1)
s = tf.repeat(s, time_window, axis=1)
x = Concatenate()([x, s])
#---------------------------------------------------------------------------
# Localized convolutional layer
if localcon_layer:
lcb = LocalizedConvBlock(filters=2, use_bias=True)
lws = TimeDistributed(lcb, name='localized_conv_block')(x)
x = Concatenate()([x, lws])
#---------------------------------------------------------------------------
# Last conv layers
x = TransitionBlock(init_n_filters, name='TransitionLast')(x)
x = ConvBlock(init_n_filters, activation=None, dropout_rate=dropout_rate,
normalization=normalization, attention=True)(x)
x = ConvBlock(n_channels_out, activation=output_activation, dropout_rate=0,
normalization=normalization, attention=False)(x)
model_name = 'rec' + backbone_block + '_pin'
if auxvar_array_is_given:
return Model(inputs=[x_in, s_in], outputs=x, name=model_name)
else:
return Model(inputs=[x_in], outputs=x, name=model_name)
| nilq/baby-python | python |
###############################################################################
##
## Copyright (C) 2014-2016, New York University.
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the New York University nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
from __future__ import division
from vistrails.core.data_structures.point import Point
from vistrails.db.domain import DBLocation
import unittest
import copy
import random
from vistrails.db.domain import IdScope
import vistrails.core
class Location(DBLocation, Point):
##########################################################################
# Constructors and copy
def __init__(self, *args, **kwargs):
DBLocation.__init__(self, *args, **kwargs)
if self.id is None:
self.id = -1
def __copy__(self):
return Location.do_copy(self)
def do_copy(self, new_ids=False, id_scope=None, id_remap=None):
cp = DBLocation.do_copy(self, new_ids, id_scope, id_remap)
cp.__class__ = Location
return cp
##########################################################################
# DB Conversion
@staticmethod
def convert(_location):
_location.__class__ = Location
##########################################################################
# Properties
id = DBLocation.db_id
x = DBLocation.db_x
y = DBLocation.db_y
##########################################################################
# Operators
def __str__(self):
"""__str__() -> str - Returns a string representation of an Annotation
object.
"""
rep = "<location id=%s x=%s y=%s/>"
return rep % (str(self.id), str(self.x), str(self.y))
eq_delta = 0.0001
def __eq__(self, other):
""" __eq__(other: Location) -> boolean
Returns True if self and other have the same attributes. Used by ==
operator.
"""
if type(other) != type(self):
return False
# Skip property lookup for performance
return ((self._db_x - other._db_x) ** 2 +
(self._db_y - other._db_y)) ** 2 < 1e-8
def __ne__(self, other):
return not self.__eq__(other)
def __neg__(self):
""" __neg__() -> Location
Compute a point p such that: self + p == Location(0,0),
and return a Location
"""
return Location(x=-self.db_x,y=-self.db_y)
def __add__(self, other):
""" __add__(other: Location) -> Location
Returns a point p such that: self + other == p, and return a Location
"""
return Location(x=(self.db_x + other.db_x), y=(self.db_y + other.db_y))
def __sub__(self, other):
""" __sub__(other: Location) -> Location
Returns a point p such that: self - other == p, and return a Location
"""
return Location(x=(self.db_x - other.db_x), y=(self.db_y - other.db_y))
def __mul__(self, other):
""" __mul__(other: float) -> Location
Interprets self as a vector to perform a scalar multiplication and
return a Location
"""
return Location(x=(self.db_x * other), y=(self.db_y * other))
def __rmul__(self, other):
""" __rmul__(other: float) -> Location
Interprets self as a vector to perform a scalar multiplication and
return a Location
"""
return Location(x=(self.db_x * other), y=(self.db_y * other))
################################################################################
# Testing
class TestLocation(unittest.TestCase):
@staticmethod
def assert_double_equals(a, b, eps = 0.00001):
assert abs(a-b) < eps
def create_location(self, id_scope=IdScope()):
location = Location(id=id_scope.getNewId(Location.vtType),
x=12.34567,
y=14.65431)
return location
def test_copy(self):
id_scope = IdScope()
loc1 = self.create_location(id_scope)
loc2 = copy.copy(loc1)
self.assertEquals(loc1, loc2)
self.assertEquals(loc1.id, loc2.id)
loc3 = loc1.do_copy(True, id_scope, {})
self.assertEquals(loc1, loc3)
self.assertNotEquals(loc1.id, loc3.id)
def test_serialization(self):
import vistrails.core.db.io
loc1 = self.create_location()
xml_str = vistrails.core.db.io.serialize(loc1)
loc2 = vistrails.core.db.io.unserialize(xml_str, Location)
self.assertEquals(loc1, loc2)
self.assertEquals(loc1.id, loc2.id)
def test_add_length(self):
"""Uses triangle inequality to exercise add and length"""
for i in xrange(100):
x = Location(x=random.uniform(-1.0, 1.0), y=random.uniform(-1.0, 1.0))
y = Location(x=random.uniform(-1.0, 1.0), y=random.uniform(-1.0, 1.0))
assert (x+y).length() <= x.length() + y.length()
def test_mul_length(self):
"""Uses vector space properties to exercise mul, rmul and length"""
for i in xrange(100):
x = Location(x=random.uniform(-1.0, 1.0), y=random.uniform(-1.0, 1.0))
s = random.uniform(0.0, 10.0)
self.assert_double_equals(s * x.length(), (s * x).length())
self.assert_double_equals(s * x.length(), (x * s).length())
def test_comparison_operators(self):
""" Test comparison operators """
a = Location(x=0, y=1)
b = Location(x=0, y=1)
assert a == b
assert a is not None
b = Location(x=0, y=0.1)
assert a != b
| nilq/baby-python | python |
from server import Server
def start(port):
def newServer(port):
s = Server(port)
return s
server = newServer(port)
server.start()
if __name__ == '__main__':
start(":9090") | nilq/baby-python | python |
import logging
from flask import (
Blueprint,
jsonify,
request,
abort,
current_app
)
from onepiece.exceptions import (
ComicbookException,
NotFoundError,
SiteNotSupport
)
from . import crawler
from . import task
from .const import ConfigKey
logger = logging.getLogger(__name__)
app = Blueprint("api", __name__, url_prefix='/api')
aggregate_app = Blueprint("aggregate", __name__, url_prefix='/aggregate')
manage_app = Blueprint("task", __name__, url_prefix='/manage')
def handle_404(error):
if isinstance(error, NotFoundError):
return jsonify(dict(message=str(error))), 404
elif isinstance(error, SiteNotSupport):
return jsonify(dict(message=str(error))), 400
else:
return jsonify(dict(message=str(error))), 500
app.register_error_handler(ComicbookException, handle_404)
aggregate_app.register_error_handler(ComicbookException, handle_404)
manage_app.register_error_handler(ComicbookException, handle_404)
def check_manage_secret(request):
secret = request.headers.get('API-Secret', '')
right_secret = current_app.config.get(ConfigKey.MANAGE_SECRET)
if right_secret:
if secret != right_secret:
abort(403)
@app.route("/<site>/comic/<comicid>")
def get_comicbook_info(site, comicid):
result = crawler.get_comicbook_info(site=site, comicid=comicid)
return jsonify(result)
@app.route("/<site>/comic/<comicid>/<int:chapter_number>")
def get_chapter_info(site, comicid, chapter_number):
result = crawler.get_chapter_info(site=site, comicid=comicid, chapter_number=chapter_number)
return jsonify(result)
@app.route("/<site>/search")
def search(site):
name = request.args.get('name')
page = request.args.get('page', default=1, type=int)
if not name:
abort(400)
result = crawler.get_search_resuult(site=site, name=name, page=page)
return jsonify(dict(search_result=result))
@app.route("/<site>/tags")
def tags(site):
result = crawler.get_tags(site)
return jsonify(dict(tags=result))
@app.route("/<site>/list")
def tag_list(site):
tag = request.args.get('tag')
page = request.args.get('page', default=1, type=int)
result = crawler.get_tag_result(site=site, tag=tag, page=page)
return jsonify(dict(list=result))
@app.route("/<site>/latest")
def latest(site):
page = request.args.get('page', default=1, type=int)
result = crawler.get_latest(site=site, page=page)
return jsonify(dict(latest=result))
@aggregate_app.route("/search")
def aggregate_search():
site = request.args.get('site')
name = request.args.get('name')
if not name:
abort(400)
result = crawler.aggregate_search(site=site, name=name)
return jsonify(dict(list=result))
@manage_app.route("/cookies/<site>", methods=['GET'])
def get_cookies(site):
check_manage_secret(request)
cookies = crawler.get_cookies(site=site)
return jsonify(dict(cookies=cookies))
@manage_app.route("/cookies/<site>", methods=['POST'])
def update_cookies(site):
check_manage_secret(request)
content = request.json or {}
cookies = content.get('cookies')
cover = content.get('cover', False)
if not cookies or not isinstance(cookies, list):
abort(400)
ret = crawler.update_cookies(site=site, cookies=cookies, cover=cover)
return jsonify(dict(cookies=ret))
@manage_app.route("/task/add")
def add_task():
site = request.args.get('site')
comicid = request.args.get('comicid')
chapter = request.args.get('chapter', default='-1')
send_mail = request.args.get('send_mail', default=0, type=int)
gen_pdf = request.args.get('gen_pdf', default=0, type=int)
receivers = request.args.get('receivers', default="")
is_all = 1 if request.args.get('is_all') == '1' else 0
check_manage_secret(request)
result = task.add_task(site=site,
comicid=comicid,
chapter=chapter,
is_all=is_all,
send_mail=send_mail,
gen_pdf=gen_pdf,
receivers=receivers)
return jsonify(dict(data=result))
@manage_app.route("/task/list")
def list_task():
page = request.args.get('page', default=1, type=int)
check_manage_secret(request)
size = 20
result = task.list_task(page=page, size=size)
return jsonify(dict(list=result))
| nilq/baby-python | python |
from flask import render_template,flash, redirect, request, jsonify
from flask_wtf import FlaskForm
from wtforms import TextField, validators, SubmitField, DecimalField, IntegerField, RadioField
from app import app, controller
#from .models import
from random import randint
import json
from .controller import plotMeteogramFile
from base64 import b64encode
import os
class searchForm(FlaskForm):
search = TextField("Search", [validators.Optional()])
lat = DecimalField("Latitude", [validators.Optional()])
lon = DecimalField("Longitude",[validators.Optional()])
days = IntegerField("Length of Meteogram in Days", default=3)
plotType = RadioField("Plottype", choices=[
('ensemble', "Pure Ensemble Data"),
('enhanced-hres', "HRES Enhanced Ensemble Data")],
default = 'ensemble', validators=[validators.Required()])
submit = SubmitField('Go!')
@app.route('/', methods=("GET", "POST"))
def index():
form = searchForm()
if form.validate_on_submit():
return redirect('/search')
return render_template("index.html",
title='VSUP - Meteogram',
form = form)
@app.route('/search', methods=("GET", "POST"))
def search():
#print('latitude: ' + request.form['latitude'])
#print('longitude: ' + request.form['longitude'])
#form = searchForm(csrf_enable=False)
#print(form)
print(request.args)
print([key for key in request.args.keys()])
#print('latitude: ' + request.form['lat'])
#print('longitude: ' + request.form['lon'])
form = searchForm()
if request.method == 'GET':
print("lon", request.args['lon'])
if request.args['search']:
searchLocation = str(request.args['search'])
form.search.data = searchLocation
print(searchLocation)
else:
searchLocation = ""
if request.args['lat']:
latitude = float(request.args['lat'])
form.lat.data = latitude
else:
latitude = None
if request.args['lon']:
longitude = float(request.args['lon'])
form.lon.data = longitude
else:
longitude = None
days = int(request.args['days'])
form.days.data = days
plotType = str(request.args['plotType'])
form.plotType.data = plotType
if form.validate_on_submit():
#print(form.search.data)
#print(form.days.data)
searchLocation = form.search.data
latitude = form.lat.data
longitude = form.lon.data
days = form.days.data
plotType = form.plotType.data
print('location: ' + searchLocation)
else:
print("invalid form")
if "latitude" in locals():
filename = plotMeteogramFile(latitude = latitude, longitude = longitude,
location = searchLocation,
days = days,
plotType = plotType)
with open("/tmp/"+filename, "rb") as fp:
fileContent = b64encode(fp.read())
#return jsonify( filename )
os.remove("/tmp/"+filename)
return render_template("meteogram.html",
form = form,
plotType = form.plotType.data,
image = 'data:image/png;base64,{}'.format(fileContent.decode())
)
return render_template("index.html",
title = 'VSUP - Meteogram',
form = form)
| nilq/baby-python | python |
import inspect
from typing import Any, Dict
import pytest
from di.utils.inspection.abstract import AbstractInspector
from tests.di.utils.inspection.module_abstract import (
CanonicalAbstract,
DuckAbstract1,
DuckAbstract2,
DuckAbstract3,
DuckAbstract4,
DuckAbstract5,
NormalClass,
abstract_async_fn,
abstract_fn,
normal_async_fn,
normal_fn,
)
def test_abstract_functions():
assert not AbstractInspector.is_abstract_function(normal_fn)
assert AbstractInspector.is_abstract_function(abstract_fn)
assert not AbstractInspector.is_abstract_function(normal_async_fn)
assert AbstractInspector.is_abstract_function(abstract_async_fn)
def test_abstract_classes():
assert not AbstractInspector.is_abstract_class(NormalClass)
assert AbstractInspector.is_abstract_class(CanonicalAbstract)
assert AbstractInspector.is_abstract_class(DuckAbstract1)
assert AbstractInspector.is_abstract_class(DuckAbstract2)
assert AbstractInspector.is_abstract_class(DuckAbstract3)
assert AbstractInspector.is_abstract_class(DuckAbstract4)
assert AbstractInspector.is_abstract_class(DuckAbstract5)
@pytest.fixture(scope="module")
def module_globals():
_globals = {}
from tests.di.utils.inspection import module_abstract
# noinspection PyTypeChecker
exec(inspect.getsource(module_abstract), _globals)
return _globals
def test_abstract_dynamic(module_globals: Dict[str, Any]):
assert not AbstractInspector.is_abstract_class(module_globals[NormalClass.__name__])
assert AbstractInspector.is_abstract_class(
module_globals[CanonicalAbstract.__name__]
)
assert AbstractInspector.is_abstract_class(module_globals[DuckAbstract1.__name__])
assert AbstractInspector.is_abstract_class(module_globals[DuckAbstract2.__name__])
assert AbstractInspector.is_abstract_class(module_globals[DuckAbstract3.__name__])
assert AbstractInspector.is_abstract_class(module_globals[DuckAbstract4.__name__])
assert AbstractInspector.is_abstract_class(module_globals[DuckAbstract5.__name__])
| nilq/baby-python | python |
"""
Surface Boolean Logic
~~~~~~~~~~~~~~~~~~~~~
Use a surface inside a volume to set scalar values on an array in the volume.
Adopted from https://docs.pyvista.org/examples/01-filter/clipping-with-surface.html
"""
import numpy as np
import pyvista as pv
from pyvista import _vtk as vtk
###############################################################################
# Make a gridded volume
n = 51
xx = yy = zz = 1 - np.linspace(0, n, n) * 2 / (n - 1)
dataset = pv.RectilinearGrid(xx, yy, zz)
###############################################################################
# Define a surface within the volume
surface = pv.Cone(direction=(0, 0, -1), height=3.0, radius=1, resolution=50, capping=False)
###############################################################################
# Preview the problem
p = pv.Plotter()
p.add_mesh(surface, color="w", label="Surface")
p.add_mesh(dataset, color="gold", show_edges=True, opacity=0.75, label="To Clip")
p.add_legend()
p.show()
###############################################################################
# Compute an implicit distance inside the volume using this surface, then
# inject new data arrays
dataset.compute_implicit_distance(surface, inplace=True)
###############################################################################
# Take note of the new ``implicit_distance`` scalar array. We will use this
# to fill in regions inside the surface with the value 3.0 and regions outside
# the surface with the value 2.0
dataset["my_array"] = np.zeros(dataset.n_points)
dataset["my_array"][dataset["implicit_distance"] >= 0] = 2.0
dataset["my_array"][dataset["implicit_distance"] < 0] = 3.0
dataset.plot(scalars="my_array", n_colors=2, clim=[1.5, 3.5])
| nilq/baby-python | python |
import logging
from typing import Union
from xml.dom.minidom import Element
import requests
from huaweisms.api.config import MODEM_HOST
from huaweisms.xml.util import get_child_text, parse_xml_string, get_dictionary_from_children
logger = logging.getLogger(__name__)
class ApiCtx:
def __init__(self, modem_host=None) -> None:
self.session_id = None
self.logged_in = False
self.login_token = None
self.tokens = []
self.__modem_host = modem_host if modem_host else MODEM_HOST
def __unicode__(self):
return '<{} modem_host={}>'.format(
self.__class__.__name__,
self.__modem_host
)
def __repr__(self):
return self.__unicode__()
def __str__(self):
return self.__unicode__()
@property
def api_base_url(self):
return 'http://{}/api'.format(self.__modem_host)
@property
def token(self):
if not self.tokens:
logger.warning('You ran out of tokens. You need to login again')
return None
return self.tokens.pop()
def common_headers():
return {
"X-Requested-With": "XMLHttpRequest"
}
def check_error(elem: Element) -> Union[dict, None]:
if elem.nodeName != "error":
return None
return {
"type": "error",
"error": {
"code": get_child_text(elem, "code"),
"message": get_child_text(elem, "message")
}
}
def api_response(r: requests.Response) -> dict:
r.encoding = ''
if r.status_code != 200:
r.raise_for_status()
xmldoc = parse_xml_string(r.text)
err = check_error(xmldoc.documentElement)
if err:
return err
return {
"type": "response",
"response": get_dictionary_from_children(xmldoc.documentElement)
}
def check_response_headers(resp, ctx: ApiCtx):
if '__RequestVerificationToken' in resp.headers:
toks = [x for x in resp.headers['__RequestVerificationToken'].split("#") if x != '']
if len(toks) > 1:
ctx.tokens = toks[2:]
elif len(toks) == 1:
ctx.tokens.append(toks[0])
if 'SessionID' in resp.cookies:
ctx.session_id = resp.cookies['SessionID']
def post_to_url(url: str, data: str, ctx: ApiCtx = None, additional_headers: dict = None, proxy=None) -> dict:
cookies = build_cookies(ctx)
headers = common_headers()
if additional_headers:
headers.update(additional_headers)
r = requests.post(url, data=data, headers=headers, cookies=cookies, proxies=proxy)
check_response_headers(r, ctx)
return api_response(r)
def get_from_url(url: str, ctx: ApiCtx = None, additional_headers: dict = None,
timeout: int = None, proxy=None) -> dict:
cookies = build_cookies(ctx)
headers = common_headers()
if additional_headers:
headers.update(additional_headers)
r = requests.get(url, headers=headers, cookies=cookies, timeout=timeout, proxies=proxy)
check_response_headers(r, ctx)
return api_response(r)
def build_cookies(ctx: ApiCtx):
cookies = None
if ctx and ctx.session_id:
cookies = {
'SessionID': ctx.session_id
}
return cookies
| nilq/baby-python | python |
'''
Created on 16.3.2012
@author: Antti Vainio
'''
from leader import leader
from follower import follower
from vector import vector
from thinker import unitType
class simulation():
'''
This class handles the calculation of simulation.
A single frame can be calculated and executed just by calling calculateFrame() once.
When a single frame is calculated,
first the new positions for all the thinkers are calculated
and only after that they are moved to their new positions.
This is because if they were moved instantly
the thinkers that were calculated after the first ones would use their new positions instead of their old ones.
New thinkers can also be created and old ones removed with simple function calls to this class.
Also when the window is resized this class should be informed for that
as the random placement of thinkers use that information.
This class also handles "moving the camera".
This is done so that first the new "position" for the camera is determined
and then all the thinkers are displaced so that they are in the middle of the window.
This way the simulation area is practically infinite.
'''
def __init__(self, x, y, amount, force_leader = False):
'''
x and y are the current dimensions of the window.
amount is the amount of followers to be created.
If any followers are to be created a leader will also be created,
otherwise no leader will be created.
if force_leader is set a leader will be created even if no followers are created.
'''
self.framecount = 0 #1 is added to this every time calculateFrame() is called
self.window_middle = vector(x, y) / 2.0
self.mean_position = vector(0, 0)
self.thinkers = []
self.leader = None
self.thinker_near_mouse = None
self.active_thinker = None
if amount or force_leader:
self.thinkers.append(leader(False, x, y))
self.leader = self.thinkers[0]
for i in range(amount): self.thinkers.append(follower(self.leader, x, y))
self.cam_movement = []
self.cam_offset = vector(0, 0)
self.camera_follow = True
self.user_controlled_leader = False
def setWindowSize(self, x, y):
'''
This should be called every time the window is resized.
'''
self.window_middle = vector(x, y) / 2.0
del self.cam_movement[:]
def move_camera(self, x, y):
'''
Is used for user forced camera movement.
'''
offset_vector = vector(x, y)
for i in self.thinkers: i.displace(offset_vector)
'''
-1 is added there to "fix" an error
This used to throw an out of range error probably because:
len(self.cam_movement) is 35 at first which is also the maximum
then in calculateFrame() cam_movement gets popped
then the following 'for' reaches the end where there is no object anymore
and throws an error
this can happen because these two functions can be called simultaneously because of threading
This "fix" only makes the last one of the camera trail dots (that is also soon to be deleted)
not to move in the debug-drawing mode when user is moving the camera
'''
for i in range(len(self.cam_movement) - 1): self.cam_movement[i]+= offset_vector
self.cam_offset+= offset_vector
def setMousePosition(self, x = -1000, y = -1000):
'''
This is used to inform this class about the position of the mouse.
'''
best_thinker = None
best_value = 300
for i in self.thinkers:
value = (i.pos.x - x) ** 2 + (i.pos.y - y) ** 2
if value < best_value:
best_thinker = i
best_value = value
self.thinker_near_mouse = best_thinker
def chooseThinker(self):
'''
Sets the active thinker.
'''
self.active_thinker = self.thinker_near_mouse
if not self.active_thinker: return unitType.none
elif self.active_thinker.is_leader: return unitType.leader
return unitType.follower
def removeThinker(self, thinker):
if thinker.is_leader:
for i in self.thinkers: i.leader = None
self.leader = None
self.thinkers.remove(thinker)
def createLeader(self, max_speed, max_force, size, random_position = True, x = 0, y = 0):
old_leader = self.leader
if random_position: self.thinkers.append(leader(self.user_controlled_leader, self.window_middle.x * 2.0, self.window_middle.y * 2.0, max_speed, max_force, size))
else: self.thinkers.append(leader(self.user_controlled_leader, x, y, max_speed, max_force, size, False))
self.leader = self.thinkers[-1]
for i in range(len(self.thinkers) - 1): self.thinkers[i].leader = self.leader
if old_leader: self.thinkers.remove(old_leader)
def createFollower(self, max_speed, max_force, size, random_position = True, x = 0, y = 0):
if random_position: self.thinkers.append(follower(self.leader, self.window_middle.x * 2.0, self.window_middle.y * 2.0, max_speed, max_force, size))
else: self.thinkers.append(follower(self.leader, x, y, max_speed, max_force, size, False))
def calculateFrame(self):
'''
First lets every thinker determine their new position.
Then lets them move to their new positions
and also displaces them so that they are in the middle of the window.
Then calculates the new displacement values for the next frame.
Finally handles camera trail and its displacement.
'''
if not len(self.thinkers): return
self.framecount+= 1
if self.camera_follow: offset_vector = self.mean_position
else: offset_vector = vector(0, 0)
self.mean_position = vector(0, 0)
for i in self.thinkers:
i.think(self.thinkers)
for i in self.thinkers:
i.move()
i.displace(offset_vector)
self.mean_position+= i.pos
self.mean_position/= len(self.thinkers)
self.mean_position = self.window_middle - self.mean_position
#camera movement trail and offset
if self.framecount % 20 == 0:
if len(self.cam_movement) == 35: self.cam_movement.pop()
for i in range(len(self.cam_movement)): self.cam_movement[i]+= offset_vector
#for i in self.cam_movement: i+= offset_vector
if self.framecount % 20 == 0: self.cam_movement.insert(0, self.window_middle + self.window_middle / 3.0)
self.cam_offset+= offset_vector
| nilq/baby-python | python |
s=str(input())
n1,n2=[int(e) for e in input().split()]
j=0
for i in range(len(s)):
if j<n1-1:
print(s[j],end="")
j+=1
elif j>=n1-1:
j=n2
if j>=n1:
print(s[j],end="")
j-=1
elif j<=k:
print(s[j],end="")
j+=1
k=n2-1
| nilq/baby-python | python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.