content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
import torch
class ModelPipeline:
def __init__(self, preprocessor, model, return_numpy=True):
self.preprocessor = preprocessor
self.model = model
self.return_numpy = return_numpy
def __call__(self, *args, **kwargs):
inputs = self.preprocessor(*args, **kwargs)
if isinstance(inputs, dict):
inputs = {key: torch.tensor(val) for key, val in inputs.items()}
else:
inputs = torch.tensor(inputs)
if isinstance(inputs, dict):
outputs = self.model(**inputs)
else:
outputs = self.model(inputs)
if self.return_numpy:
if isinstance(outputs, dict):
outputs = {key: val.detach().numpy() for key, val in outputs.items()}
else:
outputs = outputs.detach().numpy()
return outputs
|
nilq/baby-python
|
python
|
#!/Users/rblount/.pyenv/versions/AdOfCode/bin/python
import sys
import os
import numpy as np
from TerminalColors import BRED, BGREEN, ENDCOLOR
from AOC import AOC
testing = False
days = 100
def parse_input(data_input: list):
array = np.genfromtxt(data_input, dtype=int, delimiter=1)
return array
def print_octupuses(array: np.array):
_, x_size = array.shape
for (_, x), val in np.ndenumerate(array):
if val >= 10:
print(f"{BRED}", end="")
elif val == 0:
print(f"{BGREEN}", end="")
print(f"{val:>4}{ENDCOLOR}", end="")
if x == x_size - 1:
print()
print()
def process_cycle(array: np.array):
y_size, x_size = array.shape
count = 0
array += 1
array_map = [(y, x) for y, x in np.argwhere(array > 9)]
checked_locations = list()
while len(array_map) > 0:
for point in array_map:
y, x = point
y_min = y - 1 if y > 0 else 0
y_max = y + 1 if y < y_size - 1 else y_size - 1
x_min = x - 1 if x > 0 else 0
x_max = x + 1 if x < x_size - 1 else x_size - 1
array[y_min:y_max + 1, x_min:x_max + 1] += 1
count = np.sum(array >= 10)
checked_locations += array_map
new_array_map = [(y, x) for y, x in np.argwhere(array > 9)]
array_map = list(set(new_array_map).difference(set(checked_locations)))
array = array * (array < 10)
return array, count
def part1(array: np.array):
count = 0
for cycle in range(1, days + 1):
array, flash_count = process_cycle(array)
count += flash_count
print(f"After {cycle} Days: Total Flashes: {count}")
def part2(array: np.array):
all_syncd = array.shape[0] * array.shape[1]
count = 0
cycle = 1
while count < all_syncd:
array, count = process_cycle(array)
cycle += 1
print(f"After {cycle} Days: Total Flashes: {count}")
def main():
# Get the path name and strip to the last 1 or 2 characters
codePath = os.path.dirname(sys.argv[0])
codeDate = int(codePath.split("/")[-1][3:])
codeYear = int(codePath.split("/")[-2])
print(f"Running Advent of Code for Year: {codeYear} - Day {codeDate}")
# global data
code_data = AOC(codeDate, codeYear, test=testing)
data_input = code_data.read_lines()
data_input = parse_input(data_input)
part1(data_input)
part2(data_input)
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
from .realtime import interface, urlib
################################################################
## Simulated robot implementation
################################################################
class SimConnection:
"""Implements functionality to read simulated robot state (arm and F/T sensor) and command the robot in real-time."""
def __init__(self, env):
self.env = env
urlib.sim = env
def connect(self):
print('System ready.')
def disconnect(self):
pass
def execute(self, cmd, state):
"""Sends the command to control layer and reads back the state, emulating the wire protocol used with the real robot."""
state[:] = interface.execute_arm_command(cmd, 0)
self.env.update() # note that the sim update is called twice, once here and once by the hand's sim_connection
|
nilq/baby-python
|
python
|
import sys
import time
from networktables import NetworkTables
import logging
logging.basicConfig(level=logging.DEBUG)
NetworkTables.initialize(server = "localhost")
sd = NetworkTables.getTable("/vision")
while True:
try:
x = sd.getNumberArray('centerX')
width = sd.getNumberArray('width')
try:
firstEdge = x[1] - (width[1]/2)
secondEdge = x[0] + (width[0]/2)
edgeDiff = secondEdge - firstEdge
location = firstEdge + (edgeDiff/2)
locationError = location - 200
except IndexError:
locationError = 0
if (locationError == 0):
neededDirection = "Straight"
elif (locationError > 5):
neededDirection = "Right"
elif (locationError < -5):
neededDirection = "Left"
elif (-5 <= locationError <= 5):
neededDirection = "Stop"
else:
neededDirection = "Unknown"
print(neededDirection)
except KeyError:
print('Waiting for Connection...')
time.sleep(1)
|
nilq/baby-python
|
python
|
# Generated by Django 3.0.5 on 2020-09-02 22:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('polls', '0020_auto_20200903_0339'),
]
operations = [
migrations.AlterField(
model_name='questiontable',
name='count1',
field=models.DecimalField(decimal_places=2, default=0, max_digits=5),
),
migrations.AlterField(
model_name='questiontable',
name='count2',
field=models.DecimalField(decimal_places=2, default=0, max_digits=5),
),
migrations.AlterField(
model_name='questiontable',
name='count3',
field=models.DecimalField(decimal_places=2, default=0, max_digits=5),
),
migrations.AlterField(
model_name='questiontable',
name='count4',
field=models.DecimalField(decimal_places=2, default=0, max_digits=5),
),
]
|
nilq/baby-python
|
python
|
from django.apps import AppConfig
class HarvesterConfig(AppConfig):
default_auto_field = 'django.db.models.AutoField'
name = 'harvester'
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3.7
"""
The copyrights of this software are owned by Duke University.
Please refer to the LICENSE and README.md files for licensing instructions.
The source code can be found on the following GitHub repository: https://github.com/wmglab-duke/ascent
"""
import json
import os
from typing import Union
from .enums import TemplateMode
class TemplateOutput:
@staticmethod
def read(mode: TemplateMode) -> Union[list, dict]:
path = os.path.join(TemplateMode.path.value, mode.value)
with open(path, "r") as handle:
return json.load(handle)
@staticmethod
def write(data: Union[list, dict], dest_path):
with open(dest_path, "w") as handle:
handle.write(json.dumps(data, indent=2))
|
nilq/baby-python
|
python
|
"""ssoadmin module initialization; sets value for base decorator."""
from .models import ssoadmin_backends
from ..core.models import base_decorator
mock_ssoadmin = base_decorator(ssoadmin_backends)
|
nilq/baby-python
|
python
|
import unittest
from page.thread_page import Page
import time
class threadDemo(unittest.TestCase):
def __repr__(self):
return 'appdemo'
@classmethod
def setUpClass(cls):
cls.page = Page()
def test_a_thread(self):
time.sleep(6)
self.page.login_btn()
time.sleep(2)
self.page.account()
time.sleep(2)
self.page.password()
time.sleep(2)
self.page.login()
self.assertTrue(self.page.check(self.test_a_thread.__name__), 'msg')
# self.page.url()
# time.sleep(2)
# self.page.enter()
# self.assertTrue(self.page.check(self.test_a_thread.__name__), 'msg')
@classmethod
def tearDownClass(cls):
cls.page.quit()
|
nilq/baby-python
|
python
|
import falcon
from falcon.testing import TestResource as ResourceMock
from tests import RestTestBase
from monitorrent.rest import no_auth, AuthMiddleware
def is_auth_enabled():
return False
class TestAuthMiddleware(RestTestBase):
def setUp(self, disable_auth=False):
super(TestAuthMiddleware, self).setUp(disable_auth)
def test_auth_success(self):
self.api.add_route(self.test_route, ResourceMock())
self.simulate_request(self.test_route, headers={'Cookie': self.get_cookie()})
self.assertEqual(falcon.HTTP_OK, self.srmock.status)
def test_no_auth_success(self):
self.api.add_route(self.test_route, no_auth(ResourceMock()))
self.simulate_request(self.test_route)
self.assertEqual(falcon.HTTP_OK, self.srmock.status)
def test_authenticate(self):
resp = falcon.Response()
AuthMiddleware.authenticate(resp)
self.assertIsNotNone(resp._cookies)
jwt = resp._cookies[AuthMiddleware.cookie_name]
self.assertEqual(jwt.key, AuthMiddleware.cookie_name)
self.assertEqual(jwt.value, self.auth_token_verified)
self.assertEqual(jwt['path'], '/')
def test_auth_failed_without_cookie(self):
self.api.add_route(self.test_route, ResourceMock())
self.simulate_request(self.test_route)
self.assertEqual(falcon.HTTP_UNAUTHORIZED, self.srmock.status)
def test_auth_failed_with_modified_cookie(self):
self.api.add_route(self.test_route, ResourceMock())
self.simulate_request(self.test_route, headers={'Cookie': self.get_cookie(True)})
self.assertEqual(falcon.HTTP_UNAUTHORIZED, self.srmock.status)
def test_auth_failed_with_random_cookie(self):
self.api.add_route(self.test_route, ResourceMock())
self.simulate_request(self.test_route, headers={'Cookie': 'jwt=random; HttpOnly; Path=/'})
self.assertEqual(falcon.HTTP_UNAUTHORIZED, self.srmock.status)
def test_disabled_auth(self):
self.api.add_route(self.test_route, ResourceMock())
AuthMiddleware.init('secret!', 'monitorrent', is_auth_enabled)
self.simulate_request(self.test_route, headers={'Cookie': 'jwt=random; HttpOnly; Path=/'})
self.assertEqual(falcon.HTTP_OK, self.srmock.status)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import rospy
from std_msgs.msg import String, Bool
from burger_war_dev.msg import war_state
from actionlib_msgs.msg import GoalStatusArray
class StateControlBot():
def __init__(self):
self.pub = rospy.Publisher("main_state",String, queue_size=10)
self.sub = rospy.Subscriber("war_state_info", war_state, self.warStateCallback)
self.sub_navi_status = rospy.Subscriber('move_base/status', GoalStatusArray, self.navStateCallback)
self.sub_detectingEnemy = rospy.Subscriber('detect_enemy', Bool, self.detectEnemyCallback)
self.detecting_enemy = False
self.detected_time = None
self.state = "UNDEFINED"
self.navi_status = None
self.war_state = war_state()
def strategy(self):
self.publish_state("IDLING")
self.rate = rospy.Rate(1)
while not rospy.is_shutdown():
if self.state == "IDLING":
if self.war_state.state == "running":
self.publish_state("GO")
elif self.war_state.state == "stop":
if self.war_state.my_point < self.war_state.enemy_point:
self.publish_state("LOSE")
elif self.war_state.my_point > self.war_state.enemy_point:
self.publish_state("WIN")
else:
self.publish_state("EVEN")
elif self.state == "GO" and self.detecting_enemy:
self.publish_state("ESCAPE")
rospy.sleep(rospy.Duration(10))
self.publish_state("GO")
self.rate.sleep()
def navStateCallback(self, data):
if len(data.status_list) > 0:
status = data.status_list[0]
if status == self.navi_status:
return
self.navi_status = status
rospy.logdebug("Navi Status : {}".format(status))
def detectEnemyCallback(self,msg):
self.detecting_enemy = msg.data
def publish_state(self, state):
rospy.loginfo("STATE : {}".format(state))
self.state = state
msg = String(data=state)
self.pub.publish(msg)
def warStateCallback(self, msg):
self.war_state = msg
rospy.logdebug("msg.state {}".format(msg.state))
def main():
rospy.init_node('state_control')
bot = StateControlBot()
bot.strategy()
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
valor1 = 0
acumu1 = 0
valor2 = 10
acumu2 = 10
while valor <= 8:
print(acumulador, valor1)
else:
print('terminou o laço')
|
nilq/baby-python
|
python
|
from ._pyg_decoders import (
LogSoftmaxDecoderMaintainer,
SumPoolMLPDecoderMaintainer,
DiffPoolDecoderMaintainer,
DotProductLinkPredictionDecoderMaintainer
)
|
nilq/baby-python
|
python
|
import random
def sort_by_length(words):
t = []
for word in words:
t.append((len(word), word))
t = t[::-1]
res = []
for length, word in t:
res.append(word)
return res
def sort_by_length_random(words):
"""Modify this example so that words with the same length appear in random order."""
t = []
for word in words:
t.append((len(word), word))
random.shuffle(t)
res = []
for length, word in t:
res.append(word)
return res
print(sort_by_length(["milan", "jovan", "maksa"]))
print(sort_by_length_random(["milan", "jovan", "maksa"]))
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
# fix sys path so we don't need to setup PYTHONPATH
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
os.environ['DJANGO_SETTINGS_MODULE'] = 'userena.runtests.settings'
import django
if django.VERSION >= (1, 7, 0):
# starting from 1.7.0 we need to run setup() in order to populate
# app config
django.setup()
from django.conf import settings
from django.db.models import get_app
from django.test.utils import get_runner
def usage():
return """
Usage: python runtests.py [UnitTestClass].[method]
You can pass the Class name of the `UnitTestClass` you want to test.
Append a method name if you only want to test a specific method of that
class.
"""
def main():
TestRunner = get_runner(settings)
test_runner = TestRunner(verbosity=2, failfast=False)
if len(sys.argv) > 1:
test_modules = sys.argv[1:]
elif len(sys.argv) == 1:
test_modules = []
else:
print(usage())
sys.exit(1)
if django.VERSION >= (1, 6, 0):
# this is a compat hack because in django>=1.6.0 you must provide
# module like "userena.contrib.umessages" not "umessages"
test_modules = [
# be more strict by adding .tests to not run umessages tests twice
# if both userena and umessages are tested
get_app(module_name).__name__[:-7] + ".tests"
for module_name
in test_modules
]
if django.VERSION < (1, 7, 0):
# starting from 1.7.0 built in django migrations are run
# for older releases this patch is required to enable testing with
# migrations
from south.management.commands import patch_for_test_db_setup
patch_for_test_db_setup()
failures = test_runner.run_tests(test_modules or ['userena'])
sys.exit(failures)
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
import pytest
from omniscient.utils.query_graph_utils import QueryGraphUtils
@pytest.fixture(scope="class")
def setup():
sparql = """
PREFIX ns: <http://rdf.freebase.com/ns/>
SELECT DISTINCT ?x
WHERE {
FILTER (?x != ?c)
FILTER (!isLiteral(?x) OR lang(?x) = '' OR langMatches(lang(?x), 'en'))
?c ns:location.country.administrative_divisions ns:m.010vz .
?c ns:government.governmental_jurisdiction.governing_officials ?y .
?y ns:government.government_position_held.office_holder ?x .
?y ns:government.government_position_held.basic_title ns:m.060c4 .
FILTER(NOT EXISTS {?y ns:government.government_position_held.from ?sk0} ||
EXISTS {?y ns:government.government_position_held.from ?sk1 .
FILTER(xsd:gYear(?sk1) <= \"1980\"^^xsd:gYear) })
FILTER(NOT EXISTS {?y ns:government.government_position_held.to ?sk2} ||
EXISTS {?y ns:government.government_position_held.to ?sk3 .
FILTER(xsd:gYear(?sk3) >= \"1980\"^^xsd:gYear) })}
"""
utils = QueryGraphUtils(use_tdb_query=True, kb_type="freebase", kb_index_path="/tuna1/indexes/d-freebase")
parse, graph = utils.sparql_to_graph(sparql=sparql, is_grounding=True)
return graph, utils
class TestQueryGraphUtils(object):
def test_sparql_to_graph(self, setup):
graph, utils = setup
assert len(graph.get_edges()) == 4
def test_query_graph_stage_generation(self, setup):
graph, utils = setup
query_graph_stages = utils.query_graph_stage_generation(
sentence="Who was the president in 1980 of the country that has Azad Kashmir?",
query_graph=graph)
assert len(query_graph_stages) == 4
for stage in query_graph_stages:
stage_testing_examples = stage.to_testing_example(utils=utils)
stage_training_examples = stage.to_training_example(utils=utils)
|
nilq/baby-python
|
python
|
A = ['C', 'D', "E", "F", "G"]
B = [3, 0, 4, 1, 2]
def sort(A, B):
t = zip(A,B)
t = sorted(t, key=lambda x: x[1])
A, B = zip(*t)
return A
print sort(A,B)
|
nilq/baby-python
|
python
|
""""""
# Standard library modules.
import os
# Third party modules.
import pytest
import pyxray
# Local modules.
from pymontecarlo_penepma.importer import PenepmaImporter
# Globals and constants variables.
@pytest.fixture
def importer():
return PenepmaImporter()
@pytest.mark.asyncio
async def test_import(event_loop, importer, options, testdatadir):
dirpath = os.path.join(testdatadir, "sim1")
results = await importer.import_(options, dirpath)
assert len(results) == 2
result = results[0]
assert len(result) == 7 + 5
intensity = result[(29, "Ka1")]
assert intensity.n == pytest.approx(2.861705e-6, rel=1e-4)
assert intensity.s == pytest.approx(2.44e-6 / 3, rel=1e-4)
intensity = result[(29, "Ka")]
assert intensity.n == pytest.approx(2.861705e-6 + 1.040620e-6, rel=1e-4)
intensity = result[(29, "K")]
assert intensity.n == pytest.approx(
2.861705e-6 + 1.040620e-6 + 2.601550e-7, rel=1e-4
)
|
nilq/baby-python
|
python
|
import os
import streamlit as st
import pandas as pd
import plotly.express as px
from PIL import Image
favicon = Image.open("media/favicon.ico")
st.set_page_config(
page_title = "AICS Results",
page_icon = favicon,
menu_items={
'Get Help': 'https://github.com/All-IISER-Cubing-Society/Results',
'Report a bug': "https://github.com/All-IISER-Cubing-Society/Results/issues",
'About': "AICS Results is a Streamlit app to visualize data of weekly event results. Contact Purva at AICS for any issues or help."
}
)
results = "results/"
@st.cache
def load_data():
# Get all files in the results directory
files = os.listdir("results")
frames = []
# Loop through all files and append dataframes to a list
for f in files:
df = pd.read_csv(os.path.join("results", f))
# Convert Date column to datetime field
df['Date'] = pd.to_datetime(df['Date'])
# Create an event column
event = f.rstrip(".csv")
df['Event'] = [event for i in range(len(df))]
# Append to list
frames.append(df)
# Create combined data frame
cdf = pd.concat(frames)
return cdf
@st.cache
def load_event_data(data, name, events):
frames = []
for event in events:
df = data[data['Event'] == event]
frames.append(df)
combined_data = pd.concat(frames)
return combined_data
cdf = load_data()
st.sidebar.title("AICS - Results")
category = st.sidebar.radio(
'Select Category',
('Individual Results', 'Best Results', 'Institute-wise Results', 'Institute Leaderboard'))
if category == 'Individual Results':
# Get list of names in sorted order
names = sorted(cdf['Name'].unique())
# Sidebar name selector
name = st.sidebar.selectbox('Name', names)
# Person specific data
df = cdf[cdf['Name'] == name]
institute = df['Institute'].iloc[0]
st.header(name)
st.subheader(institute)
# Get events
events = df['Event'].unique()
selected_events = st.multiselect('Events', events, '3x3')
if len(selected_events) > 0:
selected_events_df = load_event_data(df, name, selected_events)
st.write("The graph is interactive. Feel free to play around with it.")
if 'FMC' in selected_events and len(selected_events) > 1:
st.write("FMC results are in Moves, and others in seconds. It would be better to plot FMC as a single graph.")
fig = px.line(selected_events_df, x='Date', y='Result', color='Event', markers=True)
st.plotly_chart(fig, use_container_width=True)
else:
st.write("Please select some events.")
st.write("If on mobile, select name from sidebar on top left.")
st.header("Event Participation")
participation_df = df['Event'].value_counts().reset_index()
participation_df.columns = ['Event', 'Count']
participation_df = participation_df.sort_values('Count', ascending=False)
st.dataframe(participation_df)
elif category == 'Best Results':
events = sorted(cdf['Event'].unique())
event = st.sidebar.selectbox('Event', events)
df = cdf[cdf['Event'] == event]
# First sort by Result, then do a stable sort on Name
df = df.sort_values('Result').sort_values('Name', kind='stable')
# Drop duplicates, then sort by result again
df = df.loc[df['Name'] != df['Name'].shift()].sort_values('Result')
df['Result'] = df['Result'].astype(str)
df['Date'] = df['Date'].astype(str)
df = df.reset_index().drop(columns=['index', 'Event'])
st.header(event)
st.write(df)
elif category == "Institute-wise Results":
institutes = sorted(cdf['Institute'].unique())
institute = st.sidebar.selectbox('Institute', institutes)
idf = cdf[cdf['Institute'] == institute]
st.header("Institute-wise Results")
st.subheader(institute)
events = sorted(idf['Event'].unique())
event = st.selectbox('Eevnt', events)
df = idf[idf['Event'] == event]
# First sort by Result, then do a stable sort on Name
df = df.sort_values('Result').sort_values('Name', kind='stable')
# Drop duplicates, then sort by result again
df = df.loc[df['Name'] != df['Name'].shift()].sort_values('Result')
df['Result'] = df['Result'].astype(str)
df['Date'] = df['Date'].astype(str)
df = df.reset_index().drop(columns=['index', 'Event'])
st.write(df)
elif category == "Institute Leaderboard":
events = sorted(cdf['Event'].unique())
event = st.sidebar.selectbox('Event', events)
df = cdf[cdf['Event'] == event]
# First sort by Result, then do a stable sort on Institute
df = df.sort_values('Result').sort_values('Institute', kind='stable')
# Drop duplicates, then sort by result again
df = df.loc[df['Institute'] != df['Institute'].shift()].sort_values('Result')
df['Result'] = df['Result'].astype(str)
df['Date'] = df['Date'].astype(str)
df = df.reset_index().drop(columns=['index', 'Event'])
st.header("Institute Leaderboard")
st.subheader(event)
st.write(df)
image = Image.open("media/AICS-Logo-Dark.png")
st.sidebar.image(image)
st.sidebar.markdown("[Website](https://all-iiser-cubing-society.github.io/#/) | [Instagram](https://www.instagram.com/all.iiser.cubing.society/) | [YouTube](https://www.youtube.com/channel/UCXOIh4FS48Dwy3BC9_FhprA)")
|
nilq/baby-python
|
python
|
"""
Create a movie
==============
This example shows how to create a movie, which is only possible if `ffmpeg` is
installed in a standard location.
"""
from pde import UnitGrid, ScalarField, DiffusionPDE, MemoryStorage, movie_scalar
grid = UnitGrid([16, 16]) # generate grid
state = ScalarField.random_uniform(grid, 0.2, 0.3) # generate initial condition
storage = MemoryStorage() # create storage
tracker = storage.tracker(interval=1) # create associated tracker
eq = DiffusionPDE() # define the physics
eq.solve(state, t_range=2, dt=0.005, tracker=tracker)
# create movie from stored data
movie_scalar(storage, '/tmp/diffusion.mov')
|
nilq/baby-python
|
python
|
import warnings
warnings.simplefilter('ignore')
import pytest
import numpy as np
import keras
from hand_classifier.hand_cnn import HandCNN
@pytest.mark.parametrize("img_shape, target_shape", [((512, 512, 3), (224, 224, 3)), ((820, 430, 3), (96, 96, 3)), ((400, 800, 3), (114, 114, 3))])
def test_preprocessing(img_shape, target_shape):
# Test size and normalization
warnings.simplefilter('ignore')
input_img = np.random.random_sample(img_shape) * 255
preprocessed_img = HandCNN.preprocess_input(input_img, target_shape[0], target_shape[1])
assert (np.asarray(preprocessed_img) < -1).sum() == 0, "preprocessed image contains values below 1"
assert (np.asarray(preprocessed_img) > 1).sum() == 0, "preprocessed image contains values above 1"
assert preprocessed_img.shape == target_shape, "preprocessed image doesn't have target shape"
@pytest.mark.parametrize("n_classes", [3, 6])
def test_model(n_classes):
warnings.simplefilter('ignore')
inputs = np.zeros((1, 224, 224, 3), dtype=np.float32)
targets = np.zeros((1, n_classes), np.float32)
model = HandCNN.get_model(n_classes, 224, 224)
model.compile(
loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adam(lr=1e-5))
model.fit(inputs, targets, batch_size=1)
@pytest.mark.parametrize("img_path", ["tests/hand_classifier/testdataset/fist/closeup1_0.jpg",
"tests/hand_classifier/testdataset/spok/closeup1_0.jpg",
"tests/hand_classifier/testdataset/palm/closeup1_0.jpg"])
def test_predictions(img_path):
warnings.simplefilter('ignore')
hand_cnn = HandCNN()
hand_cnn.LABELS = ["fist", "palm", "pointer", "spok", "thumb_down", "thumb_up"]
hand_cnn.train("tests/hand_classifier/testdataset/", batch_size=1, epochs=1, learning_rate=0.01,
checkpoints_callback=False)
res = hand_cnn.predict_img_path(img_path)
assert len(res[0]) == len(hand_cnn.LABELS)
np.testing.assert_almost_equal(np.sum(res[0]), 1, 5)
|
nilq/baby-python
|
python
|
from flask import Blueprint
main=Blueprint("main",__name__)
from .views import *
|
nilq/baby-python
|
python
|
# # scan_test.py
# # Author: Thomas MINIER - MIT License 2017-2018
# from query_engine.sage_engine import SageEngine
# from query_engine.iterators.scan import ScanIterator
# from query_engine.iterators.union import BagUnionIterator, RandomBagUnionIterator
# from database.hdt_file_connector import HDTFileConnector
#
# hdtDoc = HDTFileConnector('tests/data/test.hdt')
# engine = SageEngine()
# triple1 = {
# 'subject': 'http://example.org/s1',
# 'predicate': '?p',
# 'object': '?o'
# }
# triple2 = {
# 'subject': 'http://example.org/s2',
# 'predicate': '?p',
# 'object': '?o'
# }
#
#
# def test_bag_union_read():
# iterator1, card1 = hdtDoc.search(triple1['subject'], triple1['predicate'], triple1['object'])
# iterator2, card2 = hdtDoc.search(triple2['subject'], triple2['predicate'], triple2['object'])
# left = ScanIterator(iterator1, triple1, card1)
# right = ScanIterator(iterator2, triple2, card2)
# union = BagUnionIterator(left, right)
# (results, saved, done) = engine.execute(union, 10e7)
# assert len(results) == card1 + card2
# assert done
#
#
# def test_bag_union_interrupt():
# iterator1, card1 = hdtDoc.search(triple1['subject'], triple1['predicate'], triple1['object'])
# iterator2, card2 = hdtDoc.search(triple2['subject'], triple2['predicate'], triple2['object'])
# left = ScanIterator(iterator1, triple1, card1)
# right = ScanIterator(iterator2, triple2, card2)
# union = BagUnionIterator(left, right)
# (results, saved, done) = engine.execute(union, 10e-4)
# assert len(results) < card1 + card2
# assert not done
#
#
# def test_random_union_read():
# iterator1, card1 = hdtDoc.search(triple1['subject'], triple1['predicate'], triple1['object'])
# iterator2, card2 = hdtDoc.search(triple2['subject'], triple2['predicate'], triple2['object'])
# left = ScanIterator(iterator1, triple1, card1)
# right = ScanIterator(iterator2, triple2, card2)
# union = RandomBagUnionIterator(left, right)
# (results, saved, done) = engine.execute(union, 10e7)
# assert len(results) == card1 + card2
# assert done
#
#
# def test_random_union_interrupt():
# iterator1, card1 = hdtDoc.search(triple1['subject'], triple1['predicate'], triple1['object'])
# iterator2, card2 = hdtDoc.search(triple2['subject'], triple2['predicate'], triple2['object'])
# left = ScanIterator(iterator1, triple1, card1)
# right = ScanIterator(iterator2, triple2, card2)
# union = RandomBagUnionIterator(left, right)
# (results, saved, done) = engine.execute(union, 10e-4)
# assert len(results) < card1 + card2
# assert not done
|
nilq/baby-python
|
python
|
# Copyright 2018 Sabino Miranda Jimenez
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup
import ConceptModelling
version = ConceptModelling.__version__
setup(
name="ConceptModelling",
description="""ConceptModelling""",
version=version,
classifiers=[
"Development Status :: 3 - Alpha",
"Environment :: Console",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Natural Language :: English",
"Operating System :: MacOS :: MacOS X",
"Operating System :: POSIX :: Linux",
'Programming Language :: Python :: 3',
"Topic :: Scientific/Engineering :: Artificial Intelligence"],
url='https://github.com/ingeotec/ConceptModelling',
author="Sabino Miranda Jimenez",
author_email="mgraffg@ieee.org",
packages=['ConceptModelling', 'ConceptModelling/tests'],
include_package_data=True,
zip_safe=False,
# package_data={'ConceptModelling/conf': ['default_parameters.json'],
# 'ConceptModelling/tests': ['tweets.json']},
# install_requires=['B4MSA', 'EvoDAG'],
# entry_points={
# 'console_scripts': ['ConceptModelling-train=ConceptModelling.command_line:train',
# 'ConceptModelling-predict=ConceptModelling.command_line:predict',
# 'ConceptModelling-utils=ConceptModelling.command_line:utils',
# 'ConceptModelling-performance=ConceptModelling.command_line:performance']
# }
)
|
nilq/baby-python
|
python
|
from ._helpers import export_data, ExportScope
from . import orders, nested_orders
|
nilq/baby-python
|
python
|
RESNET = "resnet"
XCEPTION = "xception"
INCEPTIONV3 = "inceptionv3"
VGG16 = "vgg16"
IMAGENET = "imagenet"
CONFIG_FILE = "config.json"
MODEL_INFO_FILE = "model_info.json"
SCORING = "scoring"
RETRAINING = "retraining"
BEFORE_TRAIN = "before_train"
RETRAINED_SUFFIX="_retrained"
CUSTOM_TOP_SUFFIX = "_customtop"
RETRAINED = "retrained"
RETRAINED_PARAMS = "retrained_params"
TOP_PARAMS = "top_params"
NOTOP_SUFFIX = "_notop"
TENSORBOARD_LOGS = "tensorboard_logs"
LABEL = "__dku__image_label"
FILENAME = "__dku__image_filename"
MODEL_LABELS_FILE = "model_labels.csv"
PLUGIN_NAME = "dl-image-toolbox"
TENSORFLOW_VERSION_FOR_TENSORBOARD="tensorflow==1.13.1"
|
nilq/baby-python
|
python
|
import datetime
from dateutil import tz
def identity(x):
'''return the input value'''
return x
def local_timestamp(ts):
'''return a dst aware `datetime` object from `ts`'''
return datetime.datetime.fromtimestamp(ts, tz.tzlocal())
def strftime(ts):
if ts is None:
return 'None'
if isinstance(ts, int):
ts = local_timestamp(ts)
return ts.strftime('%F %T %z')
|
nilq/baby-python
|
python
|
"""
Copyright (c) 2018-2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pathlib import Path
import pickle
from functools import partial
from collections import OrderedDict
import numpy as np
from ..base_evaluator import BaseEvaluator
from ..quantization_model_evaluator import create_dataset_attributes
from ...adapters import create_adapter
from ...config import ConfigError
from ...launcher import create_launcher
from ...utils import contains_all, contains_any, extract_image_representations, get_path
from ...progress_reporters import ProgressReporter
from ...logging import print_info
def generate_name(prefix, with_prefix, layer_name):
return prefix + layer_name if with_prefix else layer_name.split(prefix)[-1]
class SuperResolutionFeedbackEvaluator(BaseEvaluator):
def __init__(self, dataset_config, launcher, model):
self.dataset_config = dataset_config
self.preprocessing_executor = None
self.preprocessor = None
self.dataset = None
self.postprocessor = None
self.metric_executor = None
self.launcher = launcher
self.srmodel = model
self._metrics_results = []
@classmethod
def from_configs(cls, config, delayed_model_loading=False):
dataset_config = config['datasets']
launcher_config = config['launchers'][0]
if launcher_config['framework'] == 'dlsdk' and 'device' not in launcher_config:
launcher_config['device'] = 'CPU'
launcher = create_launcher(launcher_config, delayed_model_loading=True)
model = SRFModel(
config.get('network_info', {}), launcher, config.get('_models', []), config.get('_model_is_blob'),
delayed_model_loading
)
return cls(dataset_config, launcher, model)
def process_dataset(
self, subset=None,
num_images=None,
check_progress=False,
dataset_tag='',
output_callback=None,
allow_pairwise_subset=False,
dump_prediction_to_annotation=False,
calculate_metrics=True,
**kwargs):
if self.dataset is None or (dataset_tag and self.dataset.tag != dataset_tag):
self.select_dataset(dataset_tag)
self._annotations, self._predictions = [], []
self._create_subset(subset, num_images, allow_pairwise_subset)
metric_config = self.configure_intermediate_metrics_results(kwargs)
compute_intermediate_metric_res, metric_interval, ignore_results_formatting = metric_config
if 'progress_reporter' in kwargs:
_progress_reporter = kwargs['progress_reporter']
_progress_reporter.reset(self.dataset.size)
else:
_progress_reporter = None if not check_progress else self._create_progress_reporter(
check_progress, self.dataset.size
)
self.srmodel.init_feedback(self.dataset.data_reader)
for batch_id, (batch_input_ids, batch_annotation, batch_inputs, batch_identifiers) in enumerate(self.dataset):
self.srmodel.fill_feedback(batch_inputs)
batch_inputs = self.preprocessor.process(batch_inputs, batch_annotation)
batch_inputs_extr, _ = extract_image_representations(batch_inputs)
callback = None
if callback:
callback = partial(output_callback,
metrics_result=None,
element_identifiers=batch_identifiers,
dataset_indices=batch_input_ids)
batch_raw_prediction, batch_prediction = self.srmodel.predict(
batch_identifiers, batch_inputs_extr, callback=callback
)
annotation, prediction = self.postprocessor.process_batch(batch_annotation, batch_prediction)
self.srmodel.feedback(prediction)
metrics_result = None
if self.metric_executor and calculate_metrics:
metrics_result, _ = self.metric_executor.update_metrics_on_batch(
batch_input_ids, annotation, prediction
)
if self.metric_executor.need_store_predictions:
self._annotations.extend(annotation)
self._predictions.extend(prediction)
if output_callback:
output_callback(
batch_raw_prediction[0],
metrics_result=metrics_result,
element_identifiers=batch_identifiers,
dataset_indices=batch_input_ids
)
if _progress_reporter:
_progress_reporter.update(batch_id, len(prediction))
if compute_intermediate_metric_res and _progress_reporter.current % metric_interval == 0:
self.compute_metrics(
print_results=True, ignore_results_formatting=ignore_results_formatting
)
if _progress_reporter:
_progress_reporter.finish()
if self.srmodel.store_predictions:
self.srmodel.save_predictions()
def compute_metrics(self, print_results=True, ignore_results_formatting=False):
if self._metrics_results:
del self._metrics_results
self._metrics_results = []
for result_presenter, evaluated_metric in self.metric_executor.iterate_metrics(
self._annotations, self._predictions):
self._metrics_results.append(evaluated_metric)
if print_results:
result_presenter.write_result(evaluated_metric, ignore_results_formatting)
return self._metrics_results
def extract_metrics_results(self, print_results=True, ignore_results_formatting=False):
if not self._metrics_results:
self.compute_metrics(False, ignore_results_formatting)
result_presenters = self.metric_executor.get_metric_presenters()
extracted_results, extracted_meta = [], []
for presenter, metric_result in zip(result_presenters, self._metrics_results):
result, metadata = presenter.extract_result(metric_result)
if isinstance(result, list):
extracted_results.extend(result)
extracted_meta.extend(metadata)
else:
extracted_results.append(result)
extracted_meta.append(metadata)
if print_results:
presenter.write_result(metric_result, ignore_results_formatting)
return extracted_results, extracted_meta
def print_metrics_results(self, ignore_results_formatting=False):
if not self._metrics_results:
self.compute_metrics(True, ignore_results_formatting)
return
result_presenters = self.metric_executor.get_metric_presenters()
for presenter, metric_result in zip(result_presenters, self._metrics_results):
presenter.write_result(metric_result, ignore_results_formatting)
@property
def dataset_size(self):
return self.dataset.size
def release(self):
self.srmodel.release()
self.launcher.release()
def reset(self):
if self.metric_executor:
self.metric_executor.reset()
if hasattr(self, '_annotations'):
del self._annotations
del self._predictions
del self._input_ids
del self._metrics_results
self._annotations = []
self._predictions = []
self._input_ids = []
self._metrics_results = []
if self.dataset:
self.dataset.reset(self.postprocessor.has_processors)
@staticmethod
def get_processing_info(config):
module_specific_params = config.get('module_config')
model_name = config['name']
dataset_config = module_specific_params['datasets'][0]
launcher_config = module_specific_params['launchers'][0]
return (
model_name, launcher_config['framework'], launcher_config['device'], launcher_config.get('tags'),
dataset_config['name']
)
def _create_subset(self, subset=None, num_images=None, allow_pairwise=False):
if self.dataset.batch is None:
self.dataset.batch = 1
if subset is not None:
self.dataset.make_subset(ids=subset, accept_pairs=allow_pairwise)
elif num_images is not None:
self.dataset.make_subset(end=num_images, accept_pairs=allow_pairwise)
@staticmethod
def configure_intermediate_metrics_results(config):
compute_intermediate_metric_res = config.get('intermediate_metrics_results', False)
metric_interval, ignore_results_formatting = None, None
if compute_intermediate_metric_res:
metric_interval = config.get('metrics_interval', 1000)
ignore_results_formatting = config.get('ignore_results_formatting', False)
return compute_intermediate_metric_res, metric_interval, ignore_results_formatting
def load_network(self, network=None):
self.srmodel.load_network(network, self.launcher)
def load_network_from_ir(self, models_list):
self.srmodel.load_model(models_list, self.launcher)
def get_network(self):
return self.srmodel.get_network()
def get_metrics_attributes(self):
if not self.metric_executor:
return {}
return self.metric_executor.get_metrics_attributes()
def register_metric(self, metric_config):
if isinstance(metric_config, str):
self.metric_executor.register_metric({'type': metric_config})
elif isinstance(metric_config, dict):
self.metric_executor.register_metric(metric_config)
else:
raise ValueError('Unsupported metric configuration type {}'.format(type(metric_config)))
def register_postprocessor(self, postprocessing_config):
pass
def register_dumped_annotations(self):
pass
def select_dataset(self, dataset_tag):
if self.dataset is not None and isinstance(self.dataset_config, list):
return
dataset_attributes = create_dataset_attributes(self.dataset_config, dataset_tag)
self.dataset, self.metric_executor, self.preprocessor, self.postprocessor = dataset_attributes
@staticmethod
def _create_progress_reporter(check_progress, dataset_size):
pr_kwargs = {}
if isinstance(check_progress, int) and not isinstance(check_progress, bool):
pr_kwargs = {"print_interval": check_progress}
return ProgressReporter.provide('print', dataset_size, **pr_kwargs)
class BaseModel:
def __init__(self, network_info, launcher, delayed_model_loading=False):
self.network_info = network_info
self.launcher = launcher
def predict(self, identifiers, input_data):
raise NotImplementedError
def release(self):
pass
# pylint: disable=E0203
class BaseDLSDKModel:
def print_input_output_info(self):
print_info('{} - Input info:'.format(self.default_model_suffix))
has_info = hasattr(self.network if self.network is not None else self.exec_network, 'input_info')
if self.network:
if has_info:
network_inputs = OrderedDict(
[(name, data.input_data) for name, data in self.network.input_info.items()]
)
else:
network_inputs = self.network.inputs
network_outputs = self.network.outputs
else:
if has_info:
network_inputs = OrderedDict([
(name, data.input_data) for name, data in self.exec_network.input_info.items()
])
else:
network_inputs = self.exec_network.inputs
network_outputs = self.exec_network.outputs
for name, input_info in network_inputs.items():
print_info('\tLayer name: {}'.format(name))
print_info('\tprecision: {}'.format(input_info.precision))
print_info('\tshape {}\n'.format(input_info.shape))
print_info('{} - Output info'.format(self.default_model_suffix))
for name, output_info in network_outputs.items():
print_info('\tLayer name: {}'.format(name))
print_info('\tprecision: {}'.format(output_info.precision))
print_info('\tshape: {}\n'.format(output_info.shape))
def automatic_model_search(self, network_info):
model = Path(network_info['srmodel'])
if model.is_dir():
is_blob = network_info.get('_model_is_blob')
if is_blob:
model_list = list(model.glob('*{}.blob'.format(self.default_model_suffix)))
if not model_list:
model_list = list(model.glob('*.blob'))
else:
model_list = list(model.glob('*{}.xml'.format(self.default_model_suffix)))
blob_list = list(model.glob('*{}.blob'.format(self.default_model_suffix)))
if not model_list and not blob_list:
model_list = list(model.glob('*.xml'))
blob_list = list(model.glob('*.blob'))
if not model_list:
model_list = blob_list
if not model_list:
raise ConfigError('Suitable model for {} not found'.format(self.default_model_suffix))
if len(model_list) > 1:
raise ConfigError('Several suitable models for {} found'.format(self.default_model_suffix))
model = model_list[0]
print_info('{} - Found model: {}'.format(self.default_model_suffix, model))
if model.suffix == '.blob':
return model, None
weights = get_path(network_info.get('weights', model.parent / model.name.replace('xml', 'bin')))
print_info('{} - Found weights: {}'.format(self.default_model_suffix, weights))
return model, weights
def load_network(self, network, launcher):
self.network = network
self.exec_network = launcher.ie_core.load_network(network, launcher.device)
def update_inputs_outputs_info(self):
raise NotImplementedError
def load_model(self, network_info, launcher, log=False):
model, weights = self.automatic_model_search(network_info)
if weights is not None:
self.network = launcher.read_network(str(model), str(weights))
self.exec_network = launcher.ie_core.load_network(self.network, launcher.device)
else:
self.exec_network = launcher.ie_core.import_network(str(model))
self.update_inputs_outputs_info()
if log:
self.print_input_output_info()
def create_model(model_config, launcher, delayed_model_loading=False):
launcher_model_mapping = {
'dlsdk': ModelDLSDKModel,
'tf': ModelTFModel,
}
framework = launcher.config['framework']
if 'predictions' in model_config and not model_config.get('store_predictions', False):
framework = 'dummy'
model_class = launcher_model_mapping.get(framework)
if not model_class:
raise ValueError('model for framework {} is not supported'.format(framework))
return model_class(model_config, launcher, delayed_model_loading)
class SRFModel(BaseModel):
def __init__(self, network_info, launcher, models_args, is_blob, delayed_model_loading=False):
super().__init__(network_info, launcher)
if models_args and not delayed_model_loading:
model = network_info.get('srmodel', {})
if not contains_any(model, ['model', 'onnx_model']) and models_args:
model['srmodel'] = models_args[0]
model['_model_is_blob'] = is_blob
network_info.update({'sr_model': model})
if not contains_all(network_info, ['srmodel']) and not delayed_model_loading:
raise ConfigError('network_info should contain srmodel field')
self.srmodel = create_model(network_info['srmodel'], launcher, delayed_model_loading)
self.feedback = self.srmodel.feedback
self.init_feedback = self.srmodel.init_feedback
self.fill_feedback = self.srmodel.fill_feedback
self.store_predictions = network_info['srmodel'].get('store_predictions', False)
self._predictions = [] if self.store_predictions else None
self._part_by_name = {'srmodel': self.srmodel}
self._raw_outs = OrderedDict()
def predict(self, identifiers, input_data, callback=None):
predictions, raw_outputs = [], []
for data in input_data:
output, prediction = self.srmodel.predict(identifiers, data)
if self.store_predictions:
self._predictions.append(prediction)
raw_outputs.append(output)
predictions.append(prediction)
return raw_outputs, predictions
def reset(self):
self.processing_frames_buffer = []
if self._predictions is not None:
self._predictions = []
def release(self):
self.srmodel.release()
def save_predictions(self):
if self._predictions is not None:
prediction_file = Path(self.network_info['srmodel'].get('predictions', 'model_predictions.pickle'))
with prediction_file.open('wb') as file:
pickle.dump(self._predictions, file)
def load_network(self, network_list, launcher):
for network_dict in network_list:
self._part_by_name[network_dict['name']].load_network(network_dict['srmodel'], launcher)
self.update_inputs_outputs_info()
def load_model(self, network_list, launcher):
for network_dict in network_list:
self._part_by_name[network_dict['name']].load_model(network_dict, launcher)
self.update_inputs_outputs_info()
def _add_raw_predictions(self, prediction):
for key, output in prediction.items():
if key not in self._raw_outs:
self._raw_outs[key] = []
self._raw_outs[key].append(output)
def get_network(self):
return [{'name': 'srmodel', 'model': self.srmodel.network}]
class FeedbackMixin:
def configure_feedback(self):
self._idx_to_name = {}
self._name_to_idx = {}
self._feedback_name = self.network_info['feedback_input']
self._feedback_data = {self._feedback_name: None}
self._first_step = True
self._inputs = self.network_info['inputs']
self._feedback_inputs = {self._feedback_name: [t for t in self._inputs if t['name'] == self._feedback_name][0]}
for input_info in self._inputs:
idx = int(input_info['value'])
self._idx_to_name[idx] = input_info['name']
self._name_to_idx[input_info['name']] = idx
self._feedback_idx = self._name_to_idx[self._feedback_name]
def init_feedback(self, reader):
info = self._feedback_inputs[self._feedback_name]
self._feedback_data[self._feedback_name] = reader.read(info['initializer'])
def feedback(self, data):
data = data[0]
self._feedback_data[self._feedback_name] = data[0].value
def fill_feedback(self, data):
data[0].data[self._feedback_idx] = self._feedback_data[self._feedback_name]
return data
class ModelDLSDKModel(BaseModel, BaseDLSDKModel, FeedbackMixin):
default_model_suffix = 'srmodel'
def __init__(self, network_info, launcher, delayed_model_loading=False):
super().__init__(network_info, launcher)
self.input_blob, self.output_blob = None, None
self.with_prefix = None
if not delayed_model_loading:
self.load_model(network_info, launcher, log=True)
self.adapter = create_adapter(network_info.get('adapter', 'super_resolution'))
self.configure_feedback()
def predict(self, identifiers, input_data):
input_data = self.fit_to_input(input_data)
raw_result = self.exec_network.infer(input_data)
result = self.adapter.process([raw_result], identifiers, [{}])
return raw_result, result
def release(self):
del self.exec_network
del self.launcher
def fit_to_input(self, input_data):
has_info = hasattr(self.exec_network, 'input_info')
if has_info:
input_info = self.exec_network.input_info
else:
input_info = self.exec_network.inputs
fitted = {}
for name, info in input_info.items():
data = input_data[self._name_to_idx[name]]
data = np.expand_dims(data, axis=0)
data = np.transpose(data, [0, 3, 1, 2])
assert tuple(info.input_data.shape) == np.shape(data)
fitted[name] = data
return fitted
def update_inputs_outputs_info(self):
has_info = hasattr(self.exec_network, 'input_info')
input_info = self.exec_network.input_info if has_info else self.exec_network.inputs
input_blob = next(iter(input_info))
with_prefix = input_blob.startswith(self.default_model_suffix + '_')
if (with_prefix != self.with_prefix) and with_prefix:
self.network_info['feedback_input'] = '_'.join([self.default_model_suffix,
self.network_info['feedback_input']])
for inp in self.network_info['inputs']:
inp['name'] = '_'.join([self.default_model_suffix, inp['name']])
if 'blob' in inp.keys():
inp['blob'] = '_'.join([self.default_model_suffix, inp['blob']])
self.network_info['adapter']['target_out'] = '_'.join([self.default_model_suffix,
self.network_info['adapter']['target_out']])
self.with_prefix = with_prefix
class ModelTFModel(BaseModel, FeedbackMixin):
default_model_suffix = 'srmodel'
def __init__(self, network_info, launcher, *args, **kwargs):
super().__init__(network_info, launcher)
model = self.automatic_model_search(network_info)
self.inference_session = launcher.create_inference_session(str(model))
self.adapter = create_adapter(network_info.get('adapter', 'super_resolution'))
self.configure_feedback()
def predict(self, identifiers, input_data):
input_data = self.fit_to_input(input_data)
raw_result = self.inference_session.predict([input_data])
result = self.adapter.process(raw_result, identifiers, [{}])
return raw_result, result
def fit_to_input(self, input_data):
fitted = {}
for idx, data in enumerate(input_data):
name = self._idx_to_name[idx]
data = np.expand_dims(data, axis=0)
fitted[name] = data
return fitted
def release(self):
del self.inference_session
@staticmethod
def automatic_model_search(network_info):
model = Path(network_info['model'])
return model
|
nilq/baby-python
|
python
|
"""
This module details user input api
"""
import time
from queue import Queue, Empty
from pubsub import pub
from fixate.config import RESOURCES
from collections import OrderedDict
USER_YES_NO = ("YES", "NO")
USER_RETRY_ABORT_FAIL = ("RETRY", "ABORT", "FAIL")
def _user_req_input(msg, target=None, attempts=5, **kwargs):
"""
A blocking function that waits for the user returned values
:param msg:
A message that will be shown to the user
:param target:
A function that will verify the user input
:param args:
Args for the target
:param kwargs:
Kwargs for the target
:return:
Returns the user response
"""
q = Queue()
pub.sendMessage("UI_block_start")
pub.sendMessage(
"UI_req_input", msg=msg, q=q, target=target, attempts=attempts, kwargs=kwargs
)
resp = q.get()
pub.sendMessage("UI_block_end")
return resp
def _user_req_choices(msg, choices, target=None, attempts=5):
"""
A blocking function that waits for the user returned values
:param msg:
A message that will be shown to the user
:param target:
A function that will verify the user input
:param args:
Args for the target
:param kwargs:
Kwargs for the target
:return:
Returns the user response
"""
if len(choices) < 2:
raise ValueError(
"Requires at least two choices to work, {} provided".format(choices)
)
q = Queue()
pub.sendMessage("UI_block_start")
pub.sendMessage(
"UI_req_choices",
msg=msg,
q=q,
choices=choices,
target=target,
attempts=attempts,
)
resp = q.get()
pub.sendMessage("UI_block_end")
return resp
def user_info(msg):
pub.sendMessage("UI_display", msg=msg)
def user_info_important(msg):
pub.sendMessage("UI_display_important", msg=msg)
def user_input(msg):
"""
Get information from the user
:param msg:
text string indicating the request to the user
:return:
user response
"""
# TODO - fix validation, bring it all into one method?? or move validation into target function for consistency
return _user_req_input(msg)
def _float_validate(entry):
try:
return float(entry)
except ValueError:
user_info("Please enter a number")
return False
def user_input_float(msg):
"""
Get information from the user
:param msg:
text string indicating the request to the user
:return:
user response if valid
"""
return _user_req_input(msg, target=_float_validate)
def user_action(msg, target):
"""
Prompts the user to complete an action.
Actively monitors the target infinitely until the event is detected or a user fail event occurs
:param msg:
Message to display to the user
:param target: A function that will be called until the user action is cancelled. The function
should return False if it hasn't completed. If the action is finished return True.
:return: True if target returns True to finish the loop, False if user
cancels vi the UserActionCallback
"""
class UserActionCallback:
def __init__(self):
# The UI implementation must provide queue.Queue object. We
# monitor that object. If it is non-empty, we get the message
# in the q and cancel the target call.
self.user_cancel_queue = None
# In the case that the target exists the user action instead
# of the user, we need to tell the UI to do any clean up that
# might be required. (e.g. return GUI buttons to the default state
# Does not need to be implemented by the UI.
# Function takes no args and should return None.
self.target_finished_callback = lambda: None
def set_user_cancel_queue(self, cancel_queue):
self.user_cancel_queue = cancel_queue
def set_target_finished_callback(self, callback):
self.target_finished_callback = callback
callback_obj = UserActionCallback()
pub.sendMessage("UI_action", msg=msg, callback_obj=callback_obj)
try:
while True:
try:
callback_obj.user_cancel_queue.get_nowait()
return False
except Empty:
pass
if target():
return True
# Yield control for other threads but don't slow down target
time.sleep(0)
finally:
# No matter what, if we exit, we want to reset the UI
callback_obj.target_finished_callback()
def user_ok(msg):
"""
Display the provided message and waits for the user to acknowledge
:param msg:
A message that will be shown to the user
"""
q = Queue()
pub.sendMessage("UI_block_start")
pub.sendMessage("UI_req", msg=msg, q=q)
resp = q.get()
pub.sendMessage("UI_block_end")
return resp
def user_image(path):
pub.sendMessage("UI_image", path=path)
def user_image_clear():
pub.sendMessage("UI_image_clear")
# TODO: This is used by the sequencer. Should make internal. Doesn't makes
# sense that a test script would call this.
def user_retry_abort_fail(msg):
return _user_req_choices(msg, target=_user_choices, choices=USER_RETRY_ABORT_FAIL)
def user_yes_no(msg, attempts=1):
return _user_req_choices(
msg, attempts=attempts, target=_user_choices, choices=USER_YES_NO
)
def _user_choices(response, choices):
if len(response) > 0:
for choice in choices:
if choice.startswith(response.upper()):
return choice
return False
def _ten_digit_serial(response):
return (len(response) == 10) and int(response)
def user_serial(msg, target=_ten_digit_serial, attempts=5):
serial = _user_req_input(msg, attempts=attempts, target=target)
return serial
def user_post_sequence_info_pass(msg):
"""
Adds information to be displayed to the user at the end of the sequence passes
This information will be displayed in the order that post sequence info calls are made and will remove duplicates
:param msg: String as it should be displayed
:return:
"""
if "_post_sequence_info" not in RESOURCES["SEQUENCER"].context_data:
RESOURCES["SEQUENCER"].context_data["_post_sequence_info"] = OrderedDict()
RESOURCES["SEQUENCER"].context_data["_post_sequence_info"][msg] = "PASSED"
def user_post_sequence_info_fail(msg):
"""
Adds information to be displayed to the user at the end of the sequence if the tests fail or error.
This information will be displayed in the order that post sequence info calls are made and will remove duplicates
:param msg: String as it should be displayed
:return:
"""
if "_post_sequence_info" not in RESOURCES["SEQUENCER"].context_data:
RESOURCES["SEQUENCER"].context_data["_post_sequence_info"] = OrderedDict()
RESOURCES["SEQUENCER"].context_data["_post_sequence_info"][msg] = "FAILED"
def user_post_sequence_info(msg):
"""
Adds information to be displayed to the user at the end of the sequence
This information will be displayed in the order that post sequence info calls are made and will remove duplicates
:param msg: String as it should be displayed
:return:
"""
if "_post_sequence_info" not in RESOURCES["SEQUENCER"].context_data:
RESOURCES["SEQUENCER"].context_data["_post_sequence_info"] = OrderedDict()
RESOURCES["SEQUENCER"].context_data["_post_sequence_info"][msg] = "ALL"
|
nilq/baby-python
|
python
|
"""
NOTE: Здесь можно описывать и другие аспекты, которые идут параллельно основному использованию.
Если слишком длинно - можно и ссылками на офиц. доку
"""
def example_1():
pass
if __name__ == "__main__":
example_1()
|
nilq/baby-python
|
python
|
import os.path
from data.base_dataset import BaseDataset, get_transforms_reid, get_transforms_LR_reid, get_transforms_norm_reid
from data.image_folder import make_reid_dataset
from PIL import Image
from scipy.io import loadmat
import numpy as np
class SingleMarketDataset(BaseDataset):
@staticmethod
def modify_commandline_options(parser, is_train):
# parser.add_argument('--dataset_type', type=str, default='A', help='the A set')
Market_attr_class_num = [4, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]
parser.add_argument('--up_scale', type=int, default=4, help='up_scale of the image super-resolution')
parser.add_argument('--num_attr', type=int, default=27, help='the number of the attributes')
parser.add_argument('--resize_h', type=int, default=256, help='the size of the height should be resized')
parser.add_argument('--resize_w', type=int, default=128, help='the size of the width should be resized')
parser.add_argument('--num_classes', type=int, default=751, help='the total num of the id classes')
parser.add_argument('--attr_class_num', nargs='+', type=int, help='the number of classes of each attributes')
parser.set_defaults(attr_class_num=Market_attr_class_num)
return parser
def initialize(self, opt):
self.opt = opt
self.dataPath = '/home/share/jiening/dgd_datasets/raw'
# self.root = opt.dataroot # opt.dataroot = Market-1501-v15.09.15
if opt.dataroot == 'Market':
self.root = 'Market-1501-v15.09.15'
self.dataset_type = opt.dataset_type
# load the attributes from the formatted attributes file, total 27 attributes
self.attrFile = os.path.join(self.dataPath, self.root, 'Market_attributes.mat') # get the attributes mat file
self.total_attr = loadmat(self.attrFile)
self.train_attr = self.total_attr['train_attr'] # 751 * 27
self.test_attr = self.total_attr['test_attr'] # 750 * 27
# load the attributes index from the index file, total 27 attributes
self.attrIndexFile = os.path.join(self.dataPath, self.root, 'Market_index.mat')
self.total_attrIndex = loadmat(self.attrIndexFile)
self.train_attrIndex = self.total_attrIndex['train_index'][0] # 751
self.test_attrIndex = self.total_attrIndex['test_index'][0] # 750
# -----------------------------------------
# query (test B) LR
dir_query = os.path.join(self.dataPath, self.root, 'query') # images in the query
query_paths, query_labels = make_reid_dataset(dir_query)
query_num = len(query_paths) # 2228
print('total %d images in query' % query_num)
# -----------------------------------------
# gallery (test A) HR
dir_gallery = os.path.join(self.dataPath, self.root, 'bounding_box_test')
gallery_paths, gallery_labels = make_reid_dataset(dir_gallery)
gallery_num = len(gallery_paths) # 17661
print('total %d images in bounding_box_test' % gallery_num)
self.test_attr_map = {}
# the query_labels are included in the gallery_labels
for i, label in enumerate(self.test_attrIndex):
self.test_attr_map[label] = i
if self.dataset_type == 'A':
self.img_paths = gallery_paths
self.img_labels = gallery_labels
else:
self.img_paths = query_paths
self.img_labels = query_labels
self.img_attrs = []
for i in query_labels:
# obtain the according id
attr_id = self.test_attr_map[i]
self.img_attrs.append(self.test_attr[attr_id])
# A: high-resolution, B: low-resolution
self.transform = get_transforms_reid(opt)
self.transform_LR = get_transforms_LR_reid(opt)
self.transform_norm = get_transforms_norm_reid()
def __getitem__(self, index):
img_path = self.img_paths[index]
img = Image.open(img_path).convert('RGB')
# img = self.transform_A(img)
img_label = self.img_labels[index]
# A: high-resolution, B: low-resolution
if self.dataset_type == 'A':
# high-resolution image
img = self.transform(img)
GT_img = self.transform_LR(img) # ground-truth low-resolution image
img = self.transform_norm(img)
GT_img = self.transform_norm(GT_img)
# do not need the attributes, do not have the attributes
img_attr = img_label
else:
# low-resolution image
GT_img = self.transform(img) # ground-truth high-resolution image
img = self.transform_LR(GT_img)
GT_img = self.transform_norm(GT_img)
img = self.transform_norm(img)
img_attr = self.img_attrs[index]
if self.opt.direction == 'BtoA':
input_nc = self.opt.output_nc
else:
input_nc = self.opt.input_nc
if input_nc == 1: # RGB to gray
tmp = img[0, ...] * 0.299 + img[1, ...] * 0.587 + img[2, ...] * 0.114
img = tmp.unsqueeze(0)
return {'img': img, 'img_paths': img_path,
'GT_img': GT_img,
'img_attr': img_attr,
'img_label': img_label}
def __len__(self):
return len(self.img_paths)
def name(self):
return 'SingleMarketDataset'
|
nilq/baby-python
|
python
|
from pm4pymdl.algo.mvp.utils import succint_mdl_to_exploded_mdl, clean_objtypes
import pandas as pd
def preprocess(df, parameters=None):
if parameters is None:
parameters = {}
conversion_needed = False
try:
if df.type == "succint":
conversion_needed = True
except:
pass
if len(df) == 0:
df = pd.DataFrame({"event_id": [], "event_activity": []})
if conversion_needed:
df = succint_mdl_to_exploded_mdl.apply(df)
#df = clean_objtypes.perfom_cleaning(df, parameters=parameters)
if len(df) == 0:
df = pd.DataFrame({"event_id": [], "event_activity": []})
return df
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# coding: utf-8
import numpy as np
import torchvision
import torch
import matplotlib.pyplot as plt
from pathlib import Path
import logging
import time
import pickle
from sklearn.model_selection import train_test_split
from torch.utils.data import TensorDataset, DataLoader
from torch.utils.data import Dataset
from mingpt.utils import set_seed, sample
from mingpt.model import GPT, GPTConfig
from mingpt.trainer import Trainer, TrainerConfig
import os
logging.basicConfig(
format='%(asctime)s|%(levelname)s|%(name)s|%(message)s',
datefmt='%Y-%d-%d %H:%M:%S',
level=logging.INFO,
)
set_seed(42) # make deterministic
GPT_S = dict(
embd_pdrop=0.0,
resid_pdrop=0.0,
attn_pdrop=0.0,
n_layer=24,
n_head=8,
n_embd=512,
)
def now_utc(): # unix time
seconds = round(time.time())
millis = seconds * 1000
unix = int(millis)
return unix
def load_pickle(f_path):
with open(f_path, 'rb') as fp:
return pickle.load(fp)
def get_train_test_split(X, y, test_size, random_state=42, verbose=False):
X_train, X_test, y_train, y_test = train_test_split(
X, y,
test_size=test_size,
random_state=random_state # reproducible results
)
if verbose:
logging.getLogger(__name__).info('train data: X ~ {}, y ~ {}'.format(X_train.shape, y_train.shape))
logging.getLogger(__name__).info('test data: X ~ {}, y ~ {}'.format(X_test.shape, y_test.shape))
return X_train, X_test, y_train, y_test
def get_data(file_path, max_imgs=2000):
dataset = load_pickle(Path(file_path).expanduser())
if len(dataset) == 2: # (images, masks)
X = dataset[0] # list of images
y = dataset[1] # list of corresponding mask
else: # unsupervised list of images
X = np.array(dataset, dtype='float32')[:max_imgs]
y = np.zeros(len(X))
pixel_size = X.shape[1] # should be == X.shape[2] == 32
X = np.array(np.ceil(X * 255), dtype='float32') # convert pixels to [0, 255] range
y = np.array(np.ceil(y * 255), dtype='float32')
X_train, X_test, y_train, y_test = get_train_test_split(X, y, 0.3, verbose=True)
tensor_X_train = torch.Tensor(X_train) # tensors
tensor_y_train = torch.Tensor(y_train)
tensor_X_test = torch.Tensor(X_test)
tensor_y_test = torch.Tensor(y_test)
t_train_dataset = TensorDataset(tensor_X_train, tensor_y_train)
t_test_dataset = TensorDataset(tensor_X_test, tensor_y_test)
return t_train_dataset, t_test_dataset, X_train
class ImageDataset(Dataset):
def __init__(self, pt_dataset, perm=None):
self.pt_dataset = pt_dataset
flattened_image_size = 32 * 32
self.perm = torch.arange(flattened_image_size) if perm is None else perm
self.vocab_size = 256 # possible values for pixels
self.block_size = flattened_image_size - 1
def __len__(self):
return len(self.pt_dataset)
def __getitem__(self, idx):
image_channels = 1 # grayscale
x, y = self.pt_dataset[idx]
x = torch.from_numpy(np.array(x)).view(-1, image_channels) # flatten out all pixels
x = x[self.perm].float() # reshuffle pixels with any fixed permutation and -> float
a = x[:, 0]
return a[:-1], a[1:] # always just predict the next one in the sequence
def get_model(mconf):
return GPT(mconf)
def train(model, n_epochs, train_dataset, test_dataset, checkpoint_path):
tokens_per_epoch = len(train_dataset) * train_dataset.block_size
# initialize a trainer instance and kick off training
tconf = TrainerConfig(
max_epochs=n_epochs,
batch_size=4,
learning_rate=3e-3,
betas=(0.9, 0.95),
weight_decay=0,
lr_decay=True,
warmup_tokens=tokens_per_epoch,
final_tokens=n_epochs * tokens_per_epoch,
ckpt_path=checkpoint_path,
num_workers=1
)
trainer = Trainer(model, train_dataset, test_dataset, tconf)
trainer.train()
return trainer
def model_first_token(dataset, X_train, n_clusters=256):
counts = torch.ones(n_clusters) # start counts as 1 not zero, this is called "smoothing"
rp = torch.randperm(len(dataset))
nest = X_train.shape[0] // 2 # how many images to use for the estimation
for i in range(nest):
a, _ = dataset[int(rp[i])]
t = a[0].item() # index of first token in the sequence
counts[int(t)] += 1
prob = counts / counts.sum() # normalize to have sum (prob) = 1
return prob
def sample_some(trainer, model, dataset, X_train, n_samples=40, out_path='./samples.png'):
prob = model_first_token(dataset, X_train)
start_pixel = np.random.choice(np.arange(dataset.vocab_size), size=(n_samples, 1), replace=True, p=prob.numpy())
start_pixel = torch.from_numpy(start_pixel).to(trainer.device)
flattened_image_size = 32 * 32
pixels = sample(model, start_pixel, flattened_image_size - 1, temperature=1.0, sample=True, top_k=40)
# for visualization we have to invert the permutation used to produce the pixels
iperm = torch.argsort(dataset.perm)
pixel_size = 32
n_cols = 8
n_rows = n_samples // n_cols
fig, axis = plt.subplots(n_rows, n_cols, figsize=(16, 8))
for i, ax in enumerate(axis.ravel()):
pxi = pixels[i][iperm] # undo the encoding permutation
pxi = pxi.view(pixel_size, pixel_size).cpu().numpy().astype(np.uint8) # grayscale -> 2D
ax.imshow(pxi, cmap='magma')
ax.axis('off')
plt.savefig(out_path)
def fine_tune(model):
pass
def do_it(data_path, n_embd, use_embd, folder_out):
os.makedirs(folder_out)
filename = './{}/log_{}.log'.format(folder_out, now_utc())
fileh = logging.FileHandler(filename, 'a')
log = logging.getLogger() # root logger
for hdlr in log.handlers[:]: # remove all old handlers
log.removeHandler(hdlr)
log.addHandler(fileh) # set the new handler
t_train_dataset, t_test_dataset, X_train = get_data(data_path) # raw data
train_dataset = ImageDataset(t_train_dataset) # build dataset
test_dataset = ImageDataset(t_test_dataset)
MY_GPT = dict(
n_layer=16,
n_embd=n_embd
)
MY_GPT = {**GPT_S, **MY_GPT} # inherit all other params
mconf = GPTConfig(
train_dataset.vocab_size,
train_dataset.block_size,
**MY_GPT,
bert=False,
use_embd=use_embd,
)
model = get_model(mconf)
checkpoint_path = './{}/latest_model.pt'.format(folder_out)
trainer = train(model, 10, train_dataset, test_dataset, checkpoint_path)
checkpoint = torch.load(checkpoint_path, map_location=torch.device('cuda:0')) # also on CPU
model.load_state_dict(checkpoint)
out_path='./{}/samples.png'.format(folder_out)
sample_some(trainer, model, train_dataset, X_train, out_path=out_path)
def do_them():
params = [
{
'data_path': './data/brain.pkl',
'n_embd': 256,
'use_embd': False,
'folder_out': './results/cremi/ll_256/',
}
]
for param in params:
do_it(**param)
if __name__ == "__main__":
do_them()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
# Meta-info
Author: Nelson Brochado
Created: 08/10/2017
Updated: 02/04/2018
# Description
Unit tests for the functions in the ands.algorithms.numerical.barycentric
module.
"""
import unittest
from ands.algorithms.numerical.barycentric import barycentric, compute_weights
from tests.algorithms.numerical.polynomial_interpolation_tests import *
class TestBarycentric(unittest.TestCase, PolynomialInterpolationTests):
def __init__(self, method_name="__init__"):
unittest.TestCase.__init__(self, method_name)
PolynomialInterpolationTests.__init__(self, barycentric)
def test_when_weights_are_provided(self):
# n points, so polynomial would be of degree n - 1.
xs = [8, 16, 64]
n = len(xs)
# Given that we want to call barycentric multiple times with different y
# values and different points of evaluation of the polynomial, i.e.
# different x0's, then we pre-compute the weights and pass them to the
# function barycentric.
ws = compute_weights(xs)
# f and g are functions.
for h in [f, g]:
ys = [h(x) for x in xs] # Evaluate the function at all xs points.
for x0 in [-2, 2]:
y0 = barycentric(xs, ys, x0, ws)
bi0 = barycentric_interpolate(xs, ys, x0)
self.assertAlmostEqual(bi0, np.array(y0))
|
nilq/baby-python
|
python
|
# -*- coding=utf-8 -*-
__all__ = [
'tiny_imagenet',
'imagewoof2',
'imagenette2'
]
import os
import torch
import torchvision
_default_batch_size = 32
_default_num_workers = 4
def _transform(train=True):
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
if train:
return torchvision.transforms.Compose([
torchvision.transforms.RandomResizedCrop(224),
torchvision.transforms.RandomHorizontalFlip(),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(mean, std)
])
else:
return torchvision.transforms.Compose([
torchvision.transforms.CenterCrop(224),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(mean, std)
])
def tiny_imagenet(name='train',
batch_size=_default_batch_size,
num_workers=_default_num_workers):
dataset = torchvision.datasets.ImageFolder(
os.path.join('datasets', 'tiny-imagenet-200', name),
transform=_transform(name == 'train')
)
dataloader = torch.utils.data.DataLoader(dataset,
batch_size=batch_size,
num_workers=num_workers,
drop_last=True,
shuffle=name == 'train')
return dataloader
def imagewoof2(name='train',
batch_size=_default_batch_size,
num_workers=_default_num_workers):
dataset = torchvision.datasets.ImageFolder(
os.path.join('datasets', 'imagewoof2', name),
transform=_transform(name == 'train')
)
dataloader = torch.utils.data.DataLoader(dataset,
batch_size=batch_size,
num_workers=num_workers,
drop_last=True,
shuffle=name == 'train')
return dataloader
def imagenette2(name='train',
batch_size=_default_batch_size,
num_workers=_default_num_workers):
dataset = torchvision.datasets.ImageFolder(
os.path.join('datasets', 'imagenette2', name),
transform=_transform(name == 'train')
)
dataloader = torch.utils.data.DataLoader(dataset,
batch_size=batch_size,
num_workers=num_workers,
drop_last=True,
shuffle=name == 'train')
return dataloader
|
nilq/baby-python
|
python
|
#!/usr/bin/python
import util
TAG_LIST_1 = ['keyspace', 'shard', 'type']
TAG_LIST_2 = ['type']
TAG_LIST_3 = ['method', 'keyspace', 'shard', 'type']
TAG_LIST_4 = ['method', 'keyspace', 'type']
def process_data(json_data):
epoch_time = util.get_epoch_time()
util.create_metric(epoch_time, "vitess.healthcheckConnections", json_data['HealthcheckConnections']
, TAG_LIST_1)
util.create_metric(epoch_time, "vitess.healthcheckErrors", json_data['HealthcheckErrors']
, TAG_LIST_1)
util.create_metric(epoch_time, "vitess.vtgateApiErrorCounts", json_data['VtgateApiErrorCounts']
, TAG_LIST_4)
util.create_metric(epoch_time, "vitess.vtgateApiRowsReturned", json_data['VtgateApiRowsReturned']
, TAG_LIST_4)
util.create_metric(epoch_time, "vitess.vtgateInfoErrorCounts", json_data['VtgateInfoErrorCounts']
, TAG_LIST_2)
util.create_metric(epoch_time, "vitess.vtgateInternalErrorCounts"
, json_data['VtgateInternalErrorCounts'], TAG_LIST_2)
util.create_metric(epoch_time, "vitess.vttabletCallErrorCount", json_data['VttabletCallErrorCount']
, TAG_LIST_3)
util.publish_metric(epoch_time, "vitess.vtgateApi.totalCount", json_data['VtgateApi']['TotalCount']
, None)
util.create_metric_histogram(epoch_time, "vitess.vtgateApi.count", json_data['VtgateApi']
, TAG_LIST_4)
util.publish_metric(epoch_time, "vitess.vttabletCall.totalCount"
, json_data['VttabletCall']['TotalCount'], None)
util.create_metric_histogram(epoch_time, "vitess.vttabletCall.count", json_data['VttabletCall']
, TAG_LIST_3)
def main():
url = util.get_url()
json_data = util.get_json_data(url)
process_data(json_data)
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
import requests
import os
API_URL = 'http://127.0.0.1:8000/api/devices/devicetype/1/'
API_KEY = os.environ['TESTAUTH']
headers = {'Authorization': f'Token {API_KEY}'}
r = requests.delete(API_URL, headers=headers)
print(r.status_code)
|
nilq/baby-python
|
python
|
from django.test import TestCase
class AnalyzerTasksTestCase(TestCase):
@classmethod
def setUpTestData(cls):
pass
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import sys,argparse
import numpy
import os
import time, datetime
import h5py
import scipy.misc
import configobj
def get_valid_stacks(f_names):
f_names_valid = []
for fn in f_names:
with h5py.File(fn,"r") as f:
if "mean" in f.keys():
f_names_valid.append(fn)
return f_names_valid
def get_dims(f_name):
with h5py.File(f_name,"r") as f:
s = numpy.shape(f["mean"])
list(s).pop(0)
return tuple(s)
def get_max_mask(f_names, ds_name, threshold):
d = []
for fn in f_names:
with h5py.File(fn, "r") as f:
d.append(numpy.array(f[ds_name]))
return (numpy.mean(d,axis=0) < threshold)
def get_min_mask(f_names, ds_name, threshold):
d = []
for fn in f_names:
with h5py.File(fn, "r") as f:
d.append(numpy.array(f[ds_name]))
return (numpy.mean(d,axis=0) > threshold)
def get_badpixelmask(f_name):
if f_name[-3:] == ".h5":
with h5py.File(f_name, "r"):
m = numpy.array(f["/data/data"])
elif f_name[-4:] == ".png":
m = scipy.misc.imread(f_name,flatten=True) / 255.
return m
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Hummingbird mask tool. Creates mask from stack files in current directory and given configuration file.')
parser.add_argument('config', type=str,
help="Configuration file")
parser.add_argument('-l', '--link', type=str, help="Creates symbolic link to the H5 mask from given path")
if(len(sys.argv) == 1):
parser.print_help()
args = parser.parse_args()
C = configobj.ConfigObj(args.config)
files = os.listdir(".")
files = [f for f in files if len(f) > 3]
files = [f for f in files if f[-3:] == ".h5"]
files = get_valid_stacks(files)
if len(files) == 0:
sys.exit(0)
s = get_dims(files[0])
mask = numpy.ones(shape=s, dtype="bool")
if C["mean_max"].lower() != 'none':
mask *= get_max_mask(files, "mean", float(C["mean_max"]))
if C["std_max"].lower() != 'none':
mask *= get_max_mask(files, "std", float(C["std_max"]))
if C["median_max"].lower() != 'none':
mask *= get_max_mask(files, "median", float(C["median_max"]))
if C["mean_min"].lower() != 'none':
mask *= get_min_mask(files, "mean", float(C["mean_min"]))
if C["std_min"].lower() != 'none':
mask *= get_min_mask(files, "std", float(C["std_min"]))
if C["median_min"].lower() != 'none':
mask *= get_min_mask(files, "median", float(C["median_min"]))
if C["badpixelmask"].lower() != 'none':
mask *= get_badpixelmask(C["badpixelmask"])
fn_root = files[-1].split("/")[-1][:-3]
outdir = C["outdir"]
os.system("mkdir -p %s" % outdir)
if bool(C["output_png"].lower()):
import matplotlib.pyplot as pypl
pypl.imsave("%s/mask_%s.png" % (outdir,fn_root), mask, cmap="binary_r", vmin=0, vmax=1)
with h5py.File("%s/mask_%s.h5" % (outdir,fn_root), "w") as f:
f["data/data"] = mask
os.system("cp %s %s/mask_%s.conf" % (args.config,outdir,fn_root))
if args.link:
os.system("ln -s -f %s/mask_%s.h5 %s" % (outdir, fn_root, args.link))
|
nilq/baby-python
|
python
|
#coding=utf-8
# Copyright (c) 2018 Baidu, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import solr_tools
if sys.argv[1] == "add_engine":
solr_tools.add_engine(sys.argv[2], sys.argv[3], sys.argv[4],
shard=1, replica=1, maxshardpernode=5, conf='myconf')
elif sys.argv[1] == "delete_engine":
solr_tools.delete_engine(sys.argv[2], sys.argv[3], sys.argv[4])
elif sys.argv[1] == "upload_doc":
solr_tools.upload_documents(sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5], num_thread=1)
elif sys.argv[1] == "clear_doc":
solr_tools.clear_documents(sys.argv[2], sys.argv[3], sys.argv[4])
|
nilq/baby-python
|
python
|
import json
import requests
import code
class Demand():
def __init__(self, region='ap-southeast-1', instanceType='m4.large', operatingSystem='Linux'):
self.url = 'https://pricing.us-east-1.amazonaws.com/offers/v1.0/aws/AmazonEC2/current/{}/index.json'.format(region)
self.instanceType = instanceType
self.operatingSystem = operatingSystem
pass
def get_price(self):
response = requests.get(self.url)
offers = json.loads(response.text)
# code.interact(local=locals())
SKU = [sku for sku in offers['products'] if offers['products'][sku]['attributes'].get('instanceType') == self.instanceType and offers['products'][sku]['attributes'].get('operatingSystem') == self.operatingSystem][0]
SKU_TERM = [sku_term for sku_term in offers['terms']['OnDemand'][SKU] if offers['terms']['OnDemand'][SKU][sku_term]['sku'] == SKU][0]
priceDimensionKey = offers['terms']['OnDemand'][SKU][SKU_TERM]['priceDimensions'].keys()[0]
price = offers['terms']['OnDemand'][SKU][SKU_TERM]['priceDimensions'][priceDimensionKey]['pricePerUnit']['USD']
return price
|
nilq/baby-python
|
python
|
"""
Our exception hierarchy:
* HTTPError
x RequestError
+ TransportError
- TimeoutException
· ConnectTimeout
· ReadTimeout
· WriteTimeout
· PoolTimeout
- NetworkError
· ConnectError
· ReadError
· WriteError
· CloseError
- ProtocolError
· LocalProtocolError
· RemoteProtocolError
- ProxyError
- UnsupportedProtocol
+ DecodingError
+ TooManyRedirects
+ RequestBodyUnavailable
x HTTPStatusError
* NotRedirectResponse
* CookieConflict
* StreamError
x StreamConsumed
x ResponseNotRead
x RequestNotRead
x ResponseClosed
"""
import contextlib
import typing
import httpcore
if typing.TYPE_CHECKING:
from ._models import Request, Response # pragma: nocover
class HTTPError(Exception):
"""
Base class for `RequestError` and `HTTPStatusError`.
Useful for `try...except` blocks when issuing a request,
and then calling `.raise_for_status()`.
For example:
```
try:
response = httpx.get("https://www.example.com")
response.raise_for_status()
except httpx.HTTPError as exc:
print(f"HTTP Exception for {exc.request.url} - {exc.message}")
```
"""
def __init__(self, message: str, *, request: "Request") -> None:
super().__init__(message)
self.request = request
class RequestError(HTTPError):
"""
Base class for all exceptions that may occur when issuing a `.request()`.
"""
def __init__(self, message: str, *, request: "Request") -> None:
super().__init__(message, request=request)
class TransportError(RequestError):
"""
Base class for all exceptions that occur at the level of the Transport API.
All of these exceptions also have an equivelent mapping in `httpcore`.
"""
# Timeout exceptions...
class TimeoutException(TransportError):
"""
The base class for timeout errors.
An operation has timed out.
"""
class ConnectTimeout(TimeoutException):
"""
Timed out while connecting to the host.
"""
class ReadTimeout(TimeoutException):
"""
Timed out while receiving data from the host.
"""
class WriteTimeout(TimeoutException):
"""
Timed out while sending data to the host.
"""
class PoolTimeout(TimeoutException):
"""
Timed out waiting to acquire a connection from the pool.
"""
# Core networking exceptions...
class NetworkError(TransportError):
"""
The base class for network-related errors.
An error occurred while interacting with the network.
"""
class ReadError(NetworkError):
"""
Failed to receive data from the network.
"""
class WriteError(NetworkError):
"""
Failed to send data through the network.
"""
class ConnectError(NetworkError):
"""
Failed to establish a connection.
"""
class CloseError(NetworkError):
"""
Failed to close a connection.
"""
# Other transport exceptions...
class ProxyError(TransportError):
"""
An error occurred while establishing a proxy connection.
"""
class UnsupportedProtocol(TransportError):
"""
Attempted to make a request to an unsupported protocol.
For example issuing a request to `ftp://www.example.com`.
"""
class ProtocolError(TransportError):
"""
The protocol was violated.
"""
class LocalProtocolError(ProtocolError):
"""
A protocol was violated by the client.
For example if the user instantiated a `Request` instance explicitly,
failed to include the mandatory `Host:` header, and then issued it directly
using `client.send()`.
"""
class RemoteProtocolError(ProtocolError):
"""
The protocol was violated by the server.
For exaample, returning malformed HTTP.
"""
# Other request exceptions...
class DecodingError(RequestError):
"""
Decoding of the response failed, due to a malformed encoding.
"""
class TooManyRedirects(RequestError):
"""
Too many redirects.
"""
class RequestBodyUnavailable(RequestError):
"""
Had to send the request again, but the request body was streaming, and is
no longer available.
"""
# Client errors
class HTTPStatusError(HTTPError):
"""
The response had an error HTTP status of 4xx or 5xx.
May be raised when calling `response.raise_for_status()`
"""
def __init__(
self, message: str, *, request: "Request", response: "Response"
) -> None:
super().__init__(message, request=request)
self.response = response
class NotRedirectResponse(Exception):
"""
Response was not a redirect response.
May be raised if `response.next()` is called without first
properly checking `response.is_redirect`.
"""
def __init__(self, message: str) -> None:
super().__init__(message)
class CookieConflict(Exception):
"""
Attempted to lookup a cookie by name, but multiple cookies existed.
Can occur when calling `response.cookies.get(...)`.
"""
def __init__(self, message: str) -> None:
super().__init__(message)
# Stream exceptions...
# These may occur as the result of a programming error, by accessing
# the request/response stream in an invalid manner.
class StreamError(Exception):
"""
The base class for stream exceptions.
The developer made an error in accessing the request stream in
an invalid way.
"""
def __init__(self, message: str) -> None:
super().__init__(message)
class StreamConsumed(StreamError):
"""
Attempted to read or stream response content, but the content has already
been streamed.
"""
def __init__(self) -> None:
message = (
"Attempted to read or stream response content, but the content has "
"already been streamed."
)
super().__init__(message)
class ResponseNotRead(StreamError):
"""
Attempted to access response content, without having called `read()`
after a streaming response.
"""
def __init__(self) -> None:
message = (
"Attempted to access response content, without having called `read()` "
"after a streaming response."
)
super().__init__(message)
class RequestNotRead(StreamError):
"""
Attempted to access request content, without having called `read()`.
"""
def __init__(self) -> None:
message = "Attempted to access request content, without having called `read()`."
super().__init__(message)
class ResponseClosed(StreamError):
"""
Attempted to read or stream response content, but the request has been
closed.
"""
def __init__(self) -> None:
message = (
"Attempted to read or stream response content, but the request has "
"been closed."
)
super().__init__(message)
# The `InvalidURL` class is no longer required. It was being used to enforce only
# 'http'/'https' URLs being requested, but is now treated instead at the
# transport layer using `UnsupportedProtocol()`.`
# We are currently still exposing this class, but it will be removed in 1.0.
InvalidURL = UnsupportedProtocol
@contextlib.contextmanager
def map_exceptions(
mapping: typing.Mapping[typing.Type[Exception], typing.Type[Exception]],
**kwargs: typing.Any,
) -> typing.Iterator[None]:
try:
yield
except Exception as exc:
mapped_exc = None
for from_exc, to_exc in mapping.items():
if not isinstance(exc, from_exc):
continue
# We want to map to the most specific exception we can find.
# Eg if `exc` is an `httpcore.ReadTimeout`, we want to map to
# `httpx.ReadTimeout`, not just `httpx.TimeoutException`.
if mapped_exc is None or issubclass(to_exc, mapped_exc):
mapped_exc = to_exc
if mapped_exc is None:
raise
message = str(exc)
raise mapped_exc(message, **kwargs) from None # type: ignore
HTTPCORE_EXC_MAP = {
httpcore.TimeoutException: TimeoutException,
httpcore.ConnectTimeout: ConnectTimeout,
httpcore.ReadTimeout: ReadTimeout,
httpcore.WriteTimeout: WriteTimeout,
httpcore.PoolTimeout: PoolTimeout,
httpcore.NetworkError: NetworkError,
httpcore.ConnectError: ConnectError,
httpcore.ReadError: ReadError,
httpcore.WriteError: WriteError,
httpcore.CloseError: CloseError,
httpcore.ProxyError: ProxyError,
httpcore.UnsupportedProtocol: UnsupportedProtocol,
httpcore.ProtocolError: ProtocolError,
httpcore.LocalProtocolError: LocalProtocolError,
httpcore.RemoteProtocolError: RemoteProtocolError,
}
|
nilq/baby-python
|
python
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Juniper MS-MPC generator for capirca."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import datetime
import logging
from capirca.lib import aclgenerator
from capirca.lib import juniper
from capirca.lib import nacaddr
import six
MAX_IDENTIFIER_LEN = 55 # It is really 63, but leaving room for added chars
class Term(juniper.Term):
"""Representation of an individual Juniper MS-MPC term.
The __str__ method must be implemented.
Args: term policy.Term object
"""
_PLATFORM = 'msmpc'
_DEFAULT_INDENT = 20
_ACTIONS = {'accept': 'accept', 'deny': 'discard', 'reject': 'reject'}
def __init__(self, term, term_type, noverbose, filter_name):
self.term = term
self.term_type = term_type
self.noverbose = noverbose
self.filter_name = filter_name
def __str__(self):
# Verify platform specific terms. Skip whole term if platform does not
# match.
if self.term.platform:
if self._PLATFORM not in self.term.platform:
return ''
if self.term.platform_exclude:
if self._PLATFORM in self.term.platform_exclude:
return ''
ret_str = juniper.Config(indent=self._DEFAULT_INDENT)
# COMMENTS
# this deals just fine with multi line comments, but we could probably
# output them a little cleaner; do things like make sure the
# len(output) < 80, etc. Note, if 'noverbose' is set for the filter, skip
# all comment processing.
if not self.noverbose:
if self.term.owner:
self.term.comment.append('Owner: %s' % self.term.owner)
if self.term.comment:
ret_str.Append('/*')
for comment in self.term.comment:
for line in comment.split('\n'):
ret_str.Append('** ' + line)
ret_str.Append('*/')
# Term verbatim output - this will skip over normal term creation
# code. Warning generated from policy.py if appropriate.
if self.term.verbatim:
for next_term in self.term.verbatim:
if next_term[0] == self._PLATFORM:
ret_str.Append(str(next_term[1]), verbatim=True)
return str(ret_str)
# Determine whether there are any match conditions for the term.
has_match_criteria = (
self.term.address or self.term.dscp_except or self.term.dscp_match or
self.term.destination_address or self.term.destination_port or
self.term.destination_prefix or self.term.destination_prefix_except or
self.term.encapsulate or self.term.ether_type or
self.term.flexible_match_range or self.term.forwarding_class or
self.term.forwarding_class_except or self.term.fragment_offset or
self.term.hop_limit or self.term.next_ip or self.term.port or
self.term.precedence or self.term.protocol or
self.term.protocol_except or self.term.source_address or
self.term.source_port or self.term.source_prefix or
self.term.source_prefix_except or self.term.traffic_type or
self.term.ttl)
suffixes = []
duplicate_term = False
if self.term_type == 'mixed':
if not (self.term.GetAddressOfVersion('source_address',
self.AF_MAP.get('inet6')) or
self.term.GetAddressOfVersion('source_address_exclude',
self.AF_MAP.get('inet6')) or
self.term.GetAddressOfVersion('destination_address',
self.AF_MAP.get('inet6')) or
self.term.GetAddressOfVersion('destination_address_exclude',
self.AF_MAP.get('inet6'))):
suffixes = ['inet']
elif not (self.term.GetAddressOfVersion('source_address',
self.AF_MAP.get('inet')) or
self.term.GetAddressOfVersion('source_address_exclude',
self.AF_MAP.get('inet')) or
self.term.GetAddressOfVersion('destination_address',
self.AF_MAP.get('inet')) or
self.term.GetAddressOfVersion('destination_address_exclude',
self.AF_MAP.get('inet'))):
suffixes = ['inet6']
else:
suffixes = ['inet', 'inet6']
duplicate_term = True
if not suffixes:
suffixes = [self.term_type]
for suffix in suffixes:
source_address = self.term.GetAddressOfVersion('source_address',
self.AF_MAP.get(suffix))
source_address_exclude = self.term.GetAddressOfVersion(
'source_address_exclude', self.AF_MAP.get(suffix))
source_address, source_address_exclude = self._MinimizePrefixes(
source_address, source_address_exclude)
destination_address = self.term.GetAddressOfVersion(
'destination_address', self.AF_MAP.get(suffix))
destination_address_exclude = self.term.GetAddressOfVersion(
'destination_address_exclude', self.AF_MAP.get(suffix))
destination_address, destination_address_exclude = self._MinimizePrefixes(
destination_address, destination_address_exclude)
if ((not source_address) and self.term.GetAddressOfVersion(
'source_address', self.AF_MAP.get('mixed')) and
not source_address_exclude) or (
(not destination_address) and self.term.GetAddressOfVersion(
'destination_address', self.AF_MAP.get('mixed')) and
not destination_address_exclude):
continue
if ('icmp' in self.term.protocol and
suffix == 'inet6') or ('icmpv6' in self.term.protocol and
suffix == 'inet'):
logging.debug(
self.NO_AF_LOG_PROTO.substitute(
term=self.term.name,
proto=', '.join(self.term.protocol),
af=suffix))
continue
# NAME
# if the term is inactive we have to set the prefix
if self.term.inactive:
term_prefix = 'inactive:'
else:
term_prefix = ''
ret_str.Append(
'%s term %s%s {' %
(term_prefix, self.term.name, '-' + suffix if duplicate_term else ''))
# We only need a "from {" clause if there are any conditions to match.
if has_match_criteria:
ret_str.Append('from {')
# SOURCE ADDRESS
if source_address or source_address_exclude:
ret_str.Append('source-address {')
if source_address:
for saddr in source_address:
for comment in self._Comment(saddr):
ret_str.Append('%s' % comment)
if saddr.version == 6 and 0 < saddr.prefixlen < 16:
for saddr2 in saddr.subnets(new_prefix=16):
ret_str.Append('%s;' % saddr2)
else:
if saddr == nacaddr.IPv6('0::0/0'):
saddr = 'any-ipv6'
elif saddr == nacaddr.IPv4('0.0.0.0/0'):
saddr = 'any-ipv4'
ret_str.Append('%s;' % saddr)
# SOURCE ADDRESS EXCLUDE
if source_address_exclude:
for ex in source_address_exclude:
for comment in self._Comment(ex):
ret_str.Append('%s' % comment)
if ex.version == 6 and 0 < ex.prefixlen < 16:
for ex2 in ex.subnets(new_prefix=16):
ret_str.Append('%s except;' % ex2)
else:
if ex == nacaddr.IPv6('0::0/0'):
ex = 'any-ipv6'
elif ex == nacaddr.IPv4('0.0.0.0/0'):
ex = 'any-ipv4'
ret_str.Append('%s except;' % ex)
ret_str.Append('}') # source-address {...}
# DESTINATION ADDRESS
if destination_address or destination_address_exclude:
ret_str.Append('destination-address {')
if destination_address:
for daddr in destination_address:
for comment in self._Comment(daddr):
ret_str.Append('%s' % comment)
if daddr.version == 6 and 0 < daddr.prefixlen < 16:
for daddr2 in daddr.subnets(new_prefix=16):
ret_str.Append('%s;' % daddr2)
else:
if daddr == nacaddr.IPv6('0::0/0'):
daddr = 'any-ipv6'
elif daddr == nacaddr.IPv4('0.0.0.0/0'):
daddr = 'any-ipv4'
ret_str.Append('%s;' % daddr)
# DESTINATION ADDRESS EXCLUDE
if destination_address_exclude:
for ex in destination_address_exclude:
for comment in self._Comment(ex):
ret_str.Append('%s' % comment)
if ex.version == 6 and 0 < ex.prefixlen < 16:
for ex2 in ex.subnets(new_prefix=16):
ret_str.Append('%s except;' % ex2)
else:
if ex == nacaddr.IPv6('0::0/0'):
ex = 'any-ipv6'
elif ex == nacaddr.IPv4('0.0.0.0/0'):
ex = 'any-ipv4'
ret_str.Append('%s except;' % ex)
ret_str.Append('}') # destination-address {...}
# source prefix <except> list
if self.term.source_prefix or self.term.source_prefix_except:
for pfx in self.term.source_prefix:
ret_str.Append('source-prefix-list ' + pfx + ';')
for epfx in self.term.source_prefix_except:
ret_str.Append('source-prefix-list ' + epfx + ' except;')
# destination prefix <except> list
if self.term.destination_prefix or self.term.destination_prefix_except:
for pfx in self.term.destination_prefix:
ret_str.Append('destination-prefix-list ' + pfx + ';')
for epfx in self.term.destination_prefix_except:
ret_str.Append('destination-prefix-list ' + epfx + ' except;')
# APPLICATION
if (self.term.source_port or self.term.destination_port or
self.term.icmp_type or self.term.protocol):
if hasattr(self.term, 'replacement_application_name'):
ret_str.Append('application-sets ' +
self.term.replacement_application_name + '-app;')
else:
ret_str.Append('application-sets ' +
self.filter_name[:((MAX_IDENTIFIER_LEN) // 2)] +
self.term.name[-((MAX_IDENTIFIER_LEN) // 2):] +
'-app;')
ret_str.Append('}') # from {...}
ret_str.Append('then {')
# ACTION
for action in self.term.action:
ret_str.Append(self._ACTIONS.get(str(action)) + ';')
if self.term.logging and 'disable' not in [
x.value for x in self.term.logging
]:
ret_str.Append('syslog;')
ret_str.Append('}') # then {...}
ret_str.Append('}') # term {...}
return str(ret_str)
class JuniperMSMPC(aclgenerator.ACLGenerator):
"""Juniper MSMPC rendering class.
This class takes a policy object and renders output into
a syntax which is understood ny Juniper routers with MS-MPC cards.
Args:
pol: policy.Policy object
"""
_PLATFORM = 'msmpc'
SUFFIX = '.msmpc'
_SUPPORTED_AF = frozenset(('inet', 'inet6', 'mixed'))
_AF_MAP = {'inet': 4, 'inet6': 6, 'mixed': None}
_AF_ICMP_MAP = {'icmp': 'inet', 'icmpv6': 'inet6'}
_SUPPORTED_DIRECTION = {
'': 'input-output',
'ingress': 'input',
'egress': 'output',
}
_OPTIONAL_SUPPORTED_KEYWORDS = frozenset([
'expiration',
])
def __init__(self, pol, exp_info):
self.applications = {}
super(JuniperMSMPC, self).__init__(pol, exp_info)
def _BuildTokens(self):
"""Build supported tokens for platform.
Returns:
tuple containing both supported tokens and sub tokens
"""
supported_tokens, supported_sub_tokens = super(JuniperMSMPC,
self)._BuildTokens()
supported_tokens |= {
'destination_prefix', 'destination_prefix_except', 'icmp_code',
'logging', 'owner', 'source_prefix', 'source_prefix_except'
}
supported_sub_tokens.update({
'option': {
'established',
# TODO(sneakywombat): add all options to lex.
'.*', # make ArbitraryOptions work, yolo.
'tcp-established',
'inactive'
}
})
return supported_tokens, supported_sub_tokens
def _BuildPort(self, ports):
"""Transform specified ports into list and ranges.
Args:
ports: a policy terms list of ports
Returns:
port_list: list of ports and port ranges
"""
port_list = []
for p in ports:
if p[0] == p[1]:
port_list.append(str(p[0]))
else:
port_list.append('%s-%s' % (str(p[0]), str(p[1])))
return port_list
def _GenerateApplications(self, filter_name):
target = []
apps_set_list = []
target.append('applications {')
done_apps = []
for app in sorted(self.applications[filter_name], key=lambda x: x['name']):
app_list = []
if app in done_apps:
continue
if app['protocol'] or app['sport'] or app['dport'] or app['icmp-type']:
# generate ICMP statements
if app['icmp-type']:
if app['timeout']:
timeout = app['timeout']
else:
timeout = 60
num_terms = len(app['protocol']) * len(app['icmp-type'])
apps_set_list.append('application-set ' + app['name'] + '-app {')
for i in range(num_terms):
apps_set_list.append('application ' + app['name'] + '-app%d' %
(i + 1) + ';')
apps_set_list.append('}') # application-set {...}
term_counter = 0
for i, code in enumerate(app['icmp-type']):
for proto in app['protocol']:
target.append('application ' + app['name'] + '-app%d' %
(term_counter + 1) + ' {')
if proto == 'icmp':
target.append('application-protocol %s;' % proto)
target.append('protocol %s;' % proto)
target.append('%s-type %s;' % (proto, str(code)))
if app['icmp-code']:
target.append('%s-code %s;' %
(proto, self._Group(app['icmp-code'])))
if int(timeout):
target.append('inactivity-timeout %s;' % int(timeout))
target.append('}') # application {...}
term_counter += 1
# generate non-ICMP statements
else:
i = 1
apps_set_list.append('application-set ' + app['name'] + '-app {')
for proto in app['protocol'] or ['']:
for sport in app['sport'] or ['']:
for dport in app['dport'] or ['']:
chunks = []
if proto:
# MSMPC does not like proto vrrp
if proto == 'vrrp':
proto = '112'
chunks.append('protocol %s;' % proto)
if sport and ('udp' in proto or 'tcp' in proto):
chunks.append('source-port %s;' % sport)
if dport and ('udp' in proto or 'tcp' in proto):
chunks.append('destination-port %s;' % dport)
if app['timeout']:
chunks.append(' inactivity-timeout %d;' % int(app['timeout']))
if chunks:
apps_set_list.append('application ' + app['name'] +
'-app%d;' % i)
app_list.append('application ' + app['name'] + '-app%d {' % i)
for chunk in chunks:
app_list.append(chunk)
app_list.append('}')
i += 1
apps_set_list.append('}')
done_apps.append(app)
if app_list:
for item in app_list:
target.append(item)
for item in apps_set_list:
target.append(item)
target.append('}')
# Return the output only if there is content inside of
# the "applications {\n}" lines, otherwise return nothing.
if len(target) > 2:
return target
else:
return []
def _TranslatePolicy(self, pol, exp_info):
current_date = datetime.date.today()
exp_info_date = current_date + datetime.timedelta(weeks=exp_info)
self.junipermsmpc_policies = []
for header, terms in pol.filters:
if self._PLATFORM not in header.platforms:
continue
filter_options = header.FilterOptions(self._PLATFORM)
filter_name = header.FilterName(self._PLATFORM)
filter_options.remove(filter_name)
filter_direction = None
filter_type = None
noverbose = 'noverbose' in filter_options
self.applications[filter_name] = []
if noverbose:
# noverbose is a strict boolean, remove it
# prior to iterating through the other options
# that require additional processing.
filter_options.remove('noverbose')
for filter_opt in filter_options:
# validate address families
if filter_opt in self._SUPPORTED_AF:
if not filter_type:
filter_type = filter_opt
continue
else:
raise ConflictingTargetOptionsError(
'only one address family can be '
'specified per header "%s"' % ' '.join(filter_options))
# validate direction
if filter_opt in self._SUPPORTED_DIRECTION.keys():
if not filter_direction:
filter_direction = self._SUPPORTED_DIRECTION.get(filter_opt)
continue
else:
raise ConflictingTargetOptionsError('only one direction can be '
'specified per header "%s"' %
' '.join(filter_options))
raise UnsupportedHeaderError(
'MSMPC Generator currently does not support '
'%s as a header option "%s"' %
(filter_opt, ' '.join(filter_options)))
if not filter_direction:
filter_direction = self._SUPPORTED_DIRECTION.get('')
if not filter_type:
filter_type = 'mixed'
term_names = set()
new_terms = []
for term in terms:
# Application sets need to be unique system-wide, so we construct
# a name from a combination of the filter and term names, shortening
# to the roughly half of the max identifier length for each part.
# When shortening, we take the start of the filter name and the end of
# the term name in a hope that we omit the most common bits
# like -inbound and accept-.
modified_term_name = filter_name[:(
(MAX_IDENTIFIER_LEN) // 2)] + term.name[-(
(MAX_IDENTIFIER_LEN) // 2):]
if term.stateless_reply:
logging.warning(
"WARNING: Term %s is a stateless reply term and will not be "
"rendered.", term.name)
continue
if set(['established', 'tcp-established']).intersection(term.option):
logging.debug(
'Skipping established term %s because MSMPC is stateful.',
term.name)
continue
# if inactive is set, deactivate the term and remove the option.
if 'inactive' in term.option:
term.inactive = True
term.option.remove('inactive')
if term.name in term_names:
raise JuniperMSMPCFilterError('Duplicate term name')
term_names.add(term.name)
if term.expiration:
if term.expiration <= exp_info_date:
logging.info(
'INFO: Term %s in policy %s expires '
'in less than two weeks.', term.name, filter_name)
if term.expiration <= current_date:
logging.warning(
'WARNING: Term %s in policy %s is expired and '
'will not be rendered.', term.name, filter_name)
continue
new_term = Term(term, filter_type, noverbose, filter_name)
new_terms.append(new_term)
# Because MSMPC terms can contain inet and inet6 addresses. We have to
# have ability to recover proper AF for ICMP type we need.
# If protocol is empty or we cannot map to inet or inet6 we insert bogus
# af_type name which will cause new_term.NormalizeIcmpTypes to fail.
if not term.protocol:
icmp_af_type = 'unknown_af_icmp'
else:
icmp_af_type = self._AF_ICMP_MAP.get(term.protocol[0],
'unknown_af_icmp')
tmp_icmptype = new_term.NormalizeIcmpTypes(term.icmp_type,
term.protocol, icmp_af_type)
# NormalizeIcmpTypes returns [''] for empty, convert to [] for eval
normalized_icmptype = tmp_icmptype if tmp_icmptype != [''] else []
# rewrites the protocol icmpv6 to icmp6
if 'icmpv6' in term.protocol:
protocol = list(term.protocol)
protocol[protocol.index('icmpv6')] = 'icmp6'
else:
protocol = term.protocol
# MSMPC requires tcp and udp to specify ports, rather than imply all
# ports
if 'udp' in term.protocol or 'tcp' in term.protocol:
if not term.source_port and not term.destination_port:
term.destination_port = [[1, 65535]]
new_application_set = {
'sport': self._BuildPort(term.source_port),
'dport': self._BuildPort(term.destination_port),
'protocol': protocol,
'icmp-type': normalized_icmptype,
'icmp-code': term.icmp_code,
'timeout': term.timeout
}
for application_set in self.applications[filter_name]:
if all(
item in list(application_set.items())
for item in new_application_set.items()):
new_application_set = ''
term.replacement_application_name = application_set['name']
break
if (modified_term_name == application_set['name'] and
new_application_set != application_set):
raise ConflictingApplicationSetsError(
'Application set %s has a conflicting entry' %
modified_term_name)
if new_application_set:
new_application_set['name'] = modified_term_name
self.applications[filter_name].append(new_application_set)
self.junipermsmpc_policies.append(
(header, filter_name, filter_direction, new_terms))
def _Group(self, group, lc=True):
"""If 1 item return it, else return [ item1 item2 ].
Args:
group: a list. could be a list of strings (protocols) or a list of tuples
(ports)
lc: return a lower cased result for text. Default is True.
Returns:
rval: a string surrounded by '[' and '];' if len(group) > 1
or with just ';' appended if len(group) == 1
"""
def _FormattedGroup(el, lc=True):
"""Return the actual formatting of an individual element.
Args:
el: either a string (protocol) or a tuple (ports)
lc: return lower cased result for text. Default is True.
Returns:
string: either the lower()'ed string or the ports, hyphenated
if they're a range, or by itself if it's not.
"""
if isinstance(el, str) or isinstance(el, six.text_type):
if not lc:
return el
else:
return el.lower()
elif isinstance(el, int):
return str(el)
# type is a tuple below here
elif el[0] == el[1]:
return '%d' % el[0]
else:
return '%d-%d' % (el[0], el[1])
if len(group) > 1:
rval = '[ ' + ' '.join([_FormattedGroup(x, lc=lc) for x in group]) + ' ];'
else:
rval = _FormattedGroup(group[0], lc=lc) + ';'
return rval
def __str__(self):
target = juniper.Config()
for (header, filter_name, filter_direction,
terms) in self.junipermsmpc_policies:
target.Append('groups {')
target.Append('replace:')
target.Append('/*')
# we want the acl to contain id and date tags, but p4 will expand
# the tags here when we submit the generator, so we have to trick
# p4 into not knowing these words. like taking c-a-n-d-y from a
# baby.
for line in aclgenerator.AddRepositoryTags('** '):
target.Append(line)
target.Append('**')
for comment in header.comment:
for line in comment.split('\n'):
target.Append('** ' + line)
target.Append('*/')
target.Append('%s {' % filter_name)
target.Append('services {')
target.Append('stateful-firewall {')
target.Append('rule %s {' % filter_name)
target.Append('match-direction %s;' % filter_direction)
for term in terms:
term_str = str(term)
if term_str:
target.Append(term_str, verbatim=True)
target.Append('}') # rule { ... }
target.Append('}') # stateful-firewall { ... }
target.Append('}') # services { ... }
for line in self._GenerateApplications(filter_name):
target.Append(line)
target.Append('}') # filter_name { ... }
target.Append('}') # groups { ... }
target.Append('apply-groups %s;' % filter_name)
return str(target) + '\n'
class Error(Exception):
pass
class JuniperMSMPCFilterError(Error):
pass
class ConflictingApplicationSetsError(Error):
pass
class ConflictingTargetOptionsError(Error):
pass
class UnsupportedHeaderError(Error):
pass
|
nilq/baby-python
|
python
|
"""
GMail! Woo!
"""
__title__ = 'gmail'
__version__ = '0.1'
__author__ = 'Charlie Guo'
__build__ = 0x0001
__license__ = 'Apache 2.0'
__copyright__ = 'Copyright 2013 Charlie Guo'
from gmail import Gmail
from mailbox import Mailbox
from message import Message
from utils import login, authenticate
|
nilq/baby-python
|
python
|
import pytest
from pype import *
from timeseries import *
__author__ = "Mynti207"
__copyright__ = "Mynti207"
__license__ = "mit"
def test_lexer():
# sample data
data = '''
3 + 4 * 10
+ -20 *2
'''
# pass data to lexer and tokenize
lexer.input(data)
for tok in lexer:
assert isinstance(tok, ply.lex.LexToken)
# sample data
data = '''
# sample comment
x := 3 + 42 * (s - t)
'''
# pass data to lexer and tokenize
lexer.input(data)
for tok in lexer:
assert isinstance(tok, ply.lex.LexToken)
# sample data
data = '''
# sample comment
sample_string = "bla"
'''
# pass data to lexer and tokenize
lexer.input(data)
for tok in lexer:
assert isinstance(tok, ply.lex.LexToken)
|
nilq/baby-python
|
python
|
# -*- coding:utf-8; -*-
class SolutionV1:
def letterCombinations(self, digits):
# 1. 定义一个集合存储最后的字符串
result = []
# 2. 然后定义一个递归函数,来生成符合条件的字符串
# 递归函数的参数如何定义:
# i 表示递归层数,虽然不知道i此时到底什么意思。
# digits表示要传递的数字字符,因为生成数字对应的字母字符串肯定是离不开这个参数的
def helper(i, digits, s):
# 3. 首先写递归模板
# 1)递归终止条件
# 5. 阅读题意,终止条件应该是:字符串的长度=len(digits),那么字符串肯定也是递归参数之一
if len(s) == len(digits):
# 4. 如果满足,就应该将字符串返回
result.append(s)
return
# 2) 处理当前层
# 6. 当前层处理逻辑是什么呢?应该是s+ (digit[i]上对应的字母),但是每个数字对应多个字母,所以有当前会有多种结果,这时候需要定义一个map,用于遍历数字对应的字母
digitAlpha = {
"2": ["a", "b", "c"],
"3": ["d", "e", "f"],
"4": ["g", "h", "i"],
"5": ["j", "k", "l"],
"6": ["m", "n", "o"],
"7": ["p", "q", "r", "s"],
"8": ["t", "u", "v"],
"9": ["w", "x", "y", "z"],
}
newS = []
for c in digitAlpha[digits[i]]:
newS.append(s + c)
# 3)递归处理下一层:对所有新生成的s调用递归函数,生成新长度的s
for s in newS:
helper(i + 1, digits, s)
# 4)清理当前层:当前层没有需要清理的
helper(0, digits, "")
return result
class Solution:
""" 从语言层面优化一下v1代码
"""
def letterCombinations(self, digits):
if not digits:
return []
digitAlpha = {
"2": ["a", "b", "c"],
"3": ["d", "e", "f"],
"4": ["g", "h", "i"],
"5": ["j", "k", "l"],
"6": ["m", "n", "o"],
"7": ["p", "q", "r", "s"],
"8": ["t", "u", "v"],
"9": ["w", "x", "y", "z"],
}
result = []
def helper(i, digits, s):
if len(s) == len(digits):
result.append(s)
return
for c in digitAlpha[digits[i]]:
helper(i + 1, digits, s + c)
helper(0, digits, "")
return result
|
nilq/baby-python
|
python
|
##
## This file is part of the libsigrok project.
##
## Copyright (C) 2013 Martin Ling <martin-sigrok@earth.li>
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
##
from functools import partial
from fractions import Fraction
from .lowlevel import *
from . import lowlevel
import itertools
__all__ = ['Error', 'Context', 'Driver', 'Device', 'Session', 'Packet', 'Log',
'LogLevel', 'PacketType', 'Quantity', 'Unit', 'QuantityFlag', 'ConfigKey',
'ProbeType', 'Probe', 'ProbeGroup']
class Error(Exception):
def __str__(self):
return sr_strerror(self.args[0])
def check(result):
if result != SR_OK:
raise Error(result)
def gvariant_to_python(value):
type_string = g_variant_get_type_string(value)
if type_string == 't':
return g_variant_get_uint64(value)
if type_string == 'b':
return g_variant_get_bool(value)
if type_string == 'd':
return g_variant_get_double(value)
if type_string == 's':
return g_variant_get_string(value, None)
if type_string == '(tt)':
return Fraction(
g_variant_get_uint64(g_variant_get_child_value(value, 0)),
g_variant_get_uint64(g_variant_get_child_value(value, 1)))
raise NotImplementedError(
"Can't convert GVariant type '%s' to a Python type." % type_string)
def python_to_gvariant(value):
if isinstance(value, int):
return g_variant_new_uint64(value)
if isinstance(value, bool):
return g_variant_new_boolean(value)
if isinstance(value, float):
return g_variant_new_double(value)
if isinstance(value, str):
return g_variant_new_string(value)
if isinstance(value, Fraction):
array = new_gvariant_ptr_array(2)
gvariant_ptr_array_setitem(array, 0,
g_variant_new_uint64(value.numerator))
gvariant_ptr_array_setitem(array, 1,
g_variant_new_uint64(value.denominator))
result = g_variant_new_tuple(array, 2)
delete_gvariant_ptr_array(array)
return result
raise NotImplementedError(
"Can't convert Python '%s' to a GVariant." % type(value))
def callback_wrapper(session, callback, device_ptr, packet_ptr):
device = session.context._devices[int(device_ptr.this)]
packet = Packet(session, packet_ptr)
callback(device, packet)
class Context(object):
def __init__(self):
context_ptr_ptr = new_sr_context_ptr_ptr()
check(sr_init(context_ptr_ptr))
self.struct = sr_context_ptr_ptr_value(context_ptr_ptr)
self._drivers = None
self._devices = {}
self.session = None
def __del__(self):
sr_exit(self.struct)
@property
def drivers(self):
if not self._drivers:
self._drivers = {}
driver_list = sr_driver_list()
for i in itertools.count():
driver_ptr = sr_dev_driver_ptr_array_getitem(driver_list, i)
if driver_ptr:
self._drivers[driver_ptr.name] = Driver(self, driver_ptr)
else:
break
return self._drivers
class Driver(object):
def __init__(self, context, struct):
self.context = context
self.struct = struct
self._initialized = False
@property
def name(self):
return self.struct.name
def scan(self, **kwargs):
if not self._initialized:
check(sr_driver_init(self.context.struct, self.struct))
self._initialized = True
options = []
for name, value in kwargs.items():
key = getattr(ConfigKey, name.upper())
src = sr_config()
src.key = key.id
src.data = python_to_gvariant(value)
options.append(src.this)
option_list = python_to_gslist(options)
device_list = sr_driver_scan(self.struct, option_list)
g_slist_free(option_list)
devices = [Device(self, gpointer_to_sr_dev_inst_ptr(ptr))
for ptr in gslist_to_python(device_list)]
g_slist_free(device_list)
return devices
class Device(object):
def __new__(cls, driver, struct):
address = int(struct.this)
if address not in driver.context._devices:
device = super(Device, cls).__new__(cls, driver, struct)
driver.context._devices[address] = device
return driver.context._devices[address]
def __init__(self, driver, struct):
self.driver = driver
self.struct = struct
self._probes = None
self._probe_groups = None
def __getattr__(self, name):
key = getattr(ConfigKey, name.upper())
data = new_gvariant_ptr_ptr()
try:
check(sr_config_get(self.driver.struct, self.struct, None,
key, data))
except Error as error:
if error.errno == SR_ERR_NA:
raise NotImplementedError(
"Device does not implement %s" % name)
else:
raise AttributeError
value = gvariant_ptr_ptr_value(data)
return gvariant_to_python(value)
def __setattr__(self, name, value):
try:
key = getattr(ConfigKey, name.upper())
except AttributeError:
super(Device, self).__setattr__(name, value)
return
check(sr_config_set(self.struct, None, key, python_to_gvariant(value)))
@property
def vendor(self):
return self.struct.vendor
@property
def model(self):
return self.struct.model
@property
def version(self):
return self.struct.version
@property
def probes(self):
if self._probes is None:
self._probes = {}
probe_list = self.struct.probes
while (probe_list):
probe_ptr = void_ptr_to_sr_probe_ptr(probe_list.data)
self._probes[probe_ptr.name] = Probe(self, probe_ptr)
probe_list = probe_list.next
return self._probes
@property
def probe_groups(self):
if self._probe_groups is None:
self._probe_groups = {}
probe_group_list = self.struct.probe_groups
while (probe_group_list):
probe_group_ptr = void_ptr_to_sr_probe_group_ptr(
probe_group_list.data)
self._probe_groups[probe_group_ptr.name] = ProbeGroup(self,
probe_group_ptr)
probe_group_list = probe_group_list.next
return self._probe_groups
class Probe(object):
def __init__(self, device, struct):
self.device = device
self.struct = struct
@property
def type(self):
return ProbeType(self.struct.type)
@property
def enabled(self):
return self.struct.enabled
@property
def name(self):
return self.struct.name
class ProbeGroup(object):
def __init__(self, device, struct):
self.device = device
self.struct = struct
self._probes = None
def __iter__(self):
return iter(self.probes)
def __getattr__(self, name):
key = config_key(name)
data = new_gvariant_ptr_ptr()
try:
check(sr_config_get(self.device.driver.struct, self.device.struct,
self.struct, key, data))
except Error as error:
if error.errno == SR_ERR_NA:
raise NotImplementedError(
"Probe group does not implement %s" % name)
else:
raise AttributeError
value = gvariant_ptr_ptr_value(data)
return gvariant_to_python(value)
def __setattr__(self, name, value):
try:
key = config_key(name)
except AttributeError:
super(ProbeGroup, self).__setattr__(name, value)
return
check(sr_config_set(self.device.struct, self.struct,
key, python_to_gvariant(value)))
@property
def name(self):
return self.struct.name
@property
def probes(self):
if self._probes is None:
self._probes = []
probe_list = self.struct.probes
while (probe_list):
probe_ptr = void_ptr_to_sr_probe_ptr(probe_list.data)
self._probes.append(Probe(self, probe_ptr))
probe_list = probe_list.next
return self._probes
class Session(object):
def __init__(self, context):
assert context.session is None
self.context = context
self.struct = sr_session_new()
context.session = self
def __del__(self):
check(sr_session_destroy())
def add_device(self, device):
check(sr_session_dev_add(device.struct))
def open_device(self, device):
check(sr_dev_open(device.struct))
def add_callback(self, callback):
wrapper = partial(callback_wrapper, self, callback)
check(sr_session_datafeed_python_callback_add(wrapper))
def start(self):
check(sr_session_start())
def run(self):
check(sr_session_run())
def stop(self):
check(sr_session_stop())
class Packet(object):
def __init__(self, session, struct):
self.session = session
self.struct = struct
self._payload = None
@property
def type(self):
return PacketType(self.struct.type)
@property
def payload(self):
if self._payload is None:
pointer = self.struct.payload
if self.type == PacketType.LOGIC:
self._payload = Logic(self,
void_ptr_to_sr_datafeed_logic_ptr(pointer))
elif self.type == PacketType.ANALOG:
self._payload = Analog(self,
void_ptr_to_sr_datafeed_analog_ptr(pointer))
else:
raise NotImplementedError(
"No Python mapping for packet type %s" % self.struct.type)
return self._payload
class Logic(object):
def __init__(self, packet, struct):
self.packet = packet
self.struct = struct
self._data = None
@property
def data(self):
if self._data is None:
self._data = cdata(self.struct.data, self.struct.length)
return self._data
class Analog(object):
def __init__(self, packet, struct):
self.packet = packet
self.struct = struct
self._data = None
@property
def num_samples(self):
return self.struct.num_samples
@property
def mq(self):
return Quantity(self.struct.mq)
@property
def unit(self):
return Unit(self.struct.unit)
@property
def mqflags(self):
return QuantityFlag.set_from_mask(self.struct.mqflags)
@property
def data(self):
if self._data is None:
self._data = float_array.frompointer(self.struct.data)
return self._data
class Log(object):
@property
def level(self):
return LogLevel(sr_log_loglevel_get())
@level.setter
def level(self, l):
check(sr_log_loglevel_set(l.id))
@property
def domain(self):
return sr_log_logdomain_get()
@domain.setter
def domain(self, d):
check(sr_log_logdomain_set(d))
class EnumValue(object):
_enum_values = {}
def __new__(cls, id):
if cls not in cls._enum_values:
cls._enum_values[cls] = {}
if id not in cls._enum_values[cls]:
value = super(EnumValue, cls).__new__(cls)
value.id = id
cls._enum_values[cls][id] = value
return cls._enum_values[cls][id]
class LogLevel(EnumValue):
pass
class PacketType(EnumValue):
pass
class Quantity(EnumValue):
pass
class Unit(EnumValue):
pass
class QuantityFlag(EnumValue):
@classmethod
def set_from_mask(cls, mask):
result = set()
while mask:
new_mask = mask & (mask - 1)
result.add(cls(mask ^ new_mask))
mask = new_mask
return result
class ConfigKey(EnumValue):
pass
class ProbeType(EnumValue):
pass
for symbol_name in dir(lowlevel):
for prefix, cls in [
('SR_LOG_', LogLevel),
('SR_DF_', PacketType),
('SR_MQ_', Quantity),
('SR_UNIT_', Unit),
('SR_MQFLAG_', QuantityFlag),
('SR_CONF_', ConfigKey),
('SR_PROBE_', ProbeType)]:
if symbol_name.startswith(prefix):
name = symbol_name[len(prefix):]
value = getattr(lowlevel, symbol_name)
setattr(cls, name, cls(value))
|
nilq/baby-python
|
python
|
import os
import shutil
import audeer
import audformat
import audiofile as af
import pandas as pd
src_dir = 'src'
build_dir = audeer.mkdir('build')
# Prepare functions for getting information from file names
def parse_names(names, from_i, to_i, is_number=False, mapping=None):
for name in names:
key = name[from_i:to_i]
if is_number:
key = int(key)
yield mapping[key] if mapping else key
# Gather metadata
description = (
'Berlin Database of Emotional Speech. '
'A German database of emotional utterances '
'spoken by actors '
'recorded as a part of the DFG funded research project '
'SE462/3-1 in 1997 and 1999. '
'Recordings took place in the anechoic chamber '
'of the Technical University Berlin, '
'department of Technical Acoustics. '
'It contains about 500 utterances '
'from ten different actors '
'expressing basic six emotions and neutral.'
)
files = sorted(
[os.path.join('wav', f) for f in os.listdir(os.path.join(src_dir, 'wav'))]
)
names = [audeer.basename_wo_ext(f) for f in files]
emotion_mapping = {
'W': 'anger',
'L': 'boredom',
'E': 'disgust',
'A': 'fear',
'F': 'happiness',
'T': 'sadness',
'N': 'neutral',
}
emotions = list(parse_names(names, from_i=5, to_i=6, mapping=emotion_mapping))
y = pd.read_csv(
os.path.join(src_dir, 'erkennung.txt'),
usecols=['Satz', 'erkannt'],
index_col='Satz',
delim_whitespace=True,
encoding='Latin-1',
decimal=',',
converters={'Satz': lambda x: os.path.join('wav', x)},
squeeze=True,
)
y = y.loc[files]
y = y.replace(to_replace=u'\xa0', value='', regex=True)
y = y.replace(to_replace=',', value='.', regex=True)
confidences = y.astype('float').values
male = audformat.define.Gender.MALE
female = audformat.define.Gender.FEMALE
language = audformat.utils.map_language('de')
speaker_mapping = {
3: {'gender': male, 'age': 31, 'language': language},
8: {'gender': female, 'age': 34, 'language': language},
9: {'gender': male, 'age': 21, 'language': language},
10: {'gender': female, 'age': 32, 'language': language},
11: {'gender': male, 'age': 26, 'language': language},
12: {'gender': female, 'age': 30, 'language': language},
13: {'gender': male, 'age': 32, 'language': language},
14: {'gender': female, 'age': 35, 'language': language},
15: {'gender': male, 'age': 25, 'language': language},
16: {'gender': female, 'age': 31, 'language': language},
}
speakers = list(parse_names(names, from_i=0, to_i=2, is_number=True))
transcription_mapping = {
'a01': 'Der Lappen liegt auf dem Eisschrank.',
'a02': 'Das will sie am Mittwoch abgeben.',
'a04': 'Heute abend könnte ich es ihm sagen.',
'a05': 'Das schwarze Stück Papier befindet sich da oben neben dem '
'Holzstück.',
'a07': 'In sieben Stunden wird es soweit sein.',
'b01': 'Was sind denn das für Tüten, die da unter dem Tisch '
'stehen.',
'b02': 'Sie haben es gerade hochgetragen und jetzt gehen sie '
'wieder runter.',
'b03': 'An den Wochenenden bin ich jetzt immer nach Hause '
'gefahren und habe Agnes besucht.',
'b09': 'Ich will das eben wegbringen und dann mit Karl was '
'trinken gehen.',
'b10': 'Die wird auf dem Platz sein, wo wir sie immer hinlegen.',
}
transcriptions = list(parse_names(names, from_i=2, to_i=5))
durations = audeer.run_tasks(
task_func=lambda x: pd.to_timedelta(
af.duration(os.path.join(src_dir, x)),
unit='s',
),
params=[([f], {}) for f in files],
num_workers=12,
)
# Convert to audformat
db = audformat.Database(
name='emodb',
author=(
'Felix Burkhardt, '
'Astrid Paeschke, '
'Miriam Rolfes, '
'Walter Sendlmeier, '
'Benjamin Weiss'
),
organization='audEERING',
license=audformat.define.License.CC0_1_0,
source='http://emodb.bilderbar.info/download/download.zip',
usage=audformat.define.Usage.UNRESTRICTED,
languages=[language],
description=description,
meta={
'pdf': (
'http://citeseerx.ist.psu.edu/viewdoc/'
'download?doi=10.1.1.130.8506&rep=rep1&type=pdf'
),
},
)
# Media
db.media['microphone'] = audformat.Media(
format='wav',
sampling_rate=16000,
channels=1,
)
# Raters
db.raters['gold'] = audformat.Rater()
# Schemes
db.schemes['emotion'] = audformat.Scheme(
labels=[str(x) for x in emotion_mapping.values()],
description='Six basic emotions and neutral.',
)
db.schemes['confidence'] = audformat.Scheme(
audformat.define.DataType.FLOAT,
minimum=0,
maximum=1,
description='Confidence of emotion ratings.',
)
db.schemes['speaker'] = audformat.Scheme(
labels=speaker_mapping,
description=(
'The actors could produce each sentence as often as '
'they liked and were asked to remember a real '
'situation from their past when they had felt this '
'emotion.'
),
)
db.schemes['transcription'] = audformat.Scheme(
labels=transcription_mapping,
description='Sentence produced by actor.',
)
db.schemes['duration'] = audformat.Scheme(dtype=audformat.define.DataType.TIME)
# Tables
index = audformat.filewise_index(files)
db['files'] = audformat.Table(index)
db['files']['duration'] = audformat.Column(scheme_id='duration')
db['files']['duration'].set(durations, index=index)
db['files']['speaker'] = audformat.Column(scheme_id='speaker')
db['files']['speaker'].set(speakers)
db['files']['transcription'] = audformat.Column(scheme_id='transcription')
db['files']['transcription'].set(transcriptions)
db['emotion'] = audformat.Table(index)
db['emotion']['emotion'] = audformat.Column(
scheme_id='emotion',
rater_id='gold',
)
db['emotion']['emotion'].set(emotions)
db['emotion']['emotion.confidence'] = audformat.Column(
scheme_id='confidence',
rater_id='gold',
)
db['emotion']['emotion.confidence'].set(confidences / 100.0)
# Save database to build folder
shutil.copytree(
os.path.join(src_dir, 'wav'),
os.path.join(build_dir, 'wav'),
)
db.save(build_dir)
|
nilq/baby-python
|
python
|
X_raw_0 = [
[0, 0.1, 0, 0, 0],
[0.1, 0, 0.1, 0, 0.1],
[0, 0.1, 0, 0.1, 0],
[0, 0, 0.1, 0, 0],
[0, 0.1, 0, 0, 0]
]
node_info_0 = [
{
'min_load': 0,
'max_load': 30,
'min_power': 0,
'max_power': 15,
'load_coeff': 10,
'load_ref': 20,
'power_coeff_a': 0.1,
'power_coeff_b': 2.5,
'power_coeff_c': 0,
'gen_ramp_up': 50,
'gen_ramp_down': 50
},
{
'min_load': 0,
'max_load': 0,
'min_power': 0,
'max_power': 0,
'load_coeff': 0,
'load_ref': 0,
'power_coeff_a': 1,
'power_coeff_b': 1,
'power_coeff_c': 1,
'gen_ramp_up': 5,
'gen_ramp_down': 5
},
{
'min_load': 0,
'max_load': 0,
'min_power': 0,
'max_power': 0,
'load_coeff': 0,
'load_ref': 1,
'power_coeff_a': 1,
'power_coeff_b': 1,
'power_coeff_c': 1,
'gen_ramp_up': 5,
'gen_ramp_down': 5
},
{
'min_load': 0,
'max_load': 0,
'min_power': 0,
'max_power': 0,
'load_coeff': 1,
'load_ref': 0,
'power_coeff_a': 10.1,
'power_coeff_b': 1,
'power_coeff_c': 1,
'gen_ramp_up': 5,
'gen_ramp_down': 5
},
{
'min_load': 0,
'max_load': 0,
'min_power': 0,
'max_power': 0,
'load_coeff': 1,
'load_ref': 0,
'power_coeff_a': 10.1,
'power_coeff_b': 1,
'power_coeff_c': 1,
'gen_ramp_up': 5,
'gen_ramp_down': 5
},
] # 9 - 13 - 11 5 - 10
connection_info_0 = {
'connection_index': [1],
'connection_x': [0.1],
'connection_area': [1],
'connection_exchange_max': [100]
}
player0_info = {
'index': 0,
'X_raw': X_raw_0,
'node_info': node_info_0,
'connection_info': connection_info_0
}
X_raw_1 = [
[0, 0.1, 0, 0, 0],
[0.1, 0, 0.1, 0, 0.1],
[0, 0.1, 0, 0.1, 0],
[0, 0, 0.1, 0, 0],
[0, 0.1, 0, 0, 0]
]
node_info_1 = [
{
'min_load': 0,
'max_load': 25,
'min_power': 0,
'max_power': 40,
'load_coeff': 10,
'load_ref': 20,
'power_coeff_a': 0.1,
'power_coeff_b': 2,
'power_coeff_c': 0,
'gen_ramp_up': 50,
'gen_ramp_down': 50
},
{
'min_load': 0,
'max_load': 0,
'min_power': 0,
'max_power': 0,
'load_coeff': 1,
'load_ref': 0,
'power_coeff_a': 0.1,
'power_coeff_b': 1,
'power_coeff_c': 1,
'gen_ramp_up': 5,
'gen_ramp_down': 5
},
{
'min_load': 0,
'max_load': 0,
'min_power': 0,
'max_power': 0,
'load_coeff': 1,
'load_ref': 0,
'power_coeff_a': 0.1,
'power_coeff_b': 1,
'power_coeff_c': 1,
'gen_ramp_up': 5,
'gen_ramp_down': 5
},
{
'min_load': 0,
'max_load': 0,
'min_power': 0,
'max_power': 0,
'load_coeff': 4,
'load_ref': 0,
'power_coeff_a': 0.1,
'power_coeff_b': 1,
'power_coeff_c': 1,
'gen_ramp_up': 5,
'gen_ramp_down': 5
},
{
'min_load': 0,
'max_load': 0,
'min_power': 0,
'max_power': 0,
'load_coeff': 2,
'load_ref': 0,
'power_coeff_a': 0.1,
'power_coeff_b': 1,
'power_coeff_c': 1,
'gen_ramp_up': 5,
'gen_ramp_down': 5
},
] # 8 - 10 - 9 4 - 19
connection_info_1 = {
'connection_index': [0, 2, 3],
'connection_x': [0.1, 0.1, 0.1],
'connection_area': [0, 2, 3],
'connection_exchange_max': [100, 100, 100]
}
player1_info = {
'index': 1,
'X_raw': X_raw_1,
'node_info': node_info_1,
'connection_info': connection_info_1,
}
X_raw_2 = [
[0, 0.1, 0, 0, 0],
[0.1, 0, 0.1, 0, 0.1],
[0, 0.1, 0, 0.1, 0],
[0, 0, 0.1, 0, 0],
[0, 0.1, 0, 0, 0]
]
node_info_2 = [
{
'min_load': 0,
'max_load': 25,
'min_power': 0,
'max_power': 20,
'load_coeff': 5,
'load_ref': 15,
'power_coeff_a': 0.1,
'power_coeff_b': 3,
'power_coeff_c': 0,
'gen_ramp_up': 50,
'gen_ramp_down': 50
},
{
'min_load': 0,
'max_load': 0,
'min_power': 0,
'max_power': 0,
'load_coeff': 4,
'load_ref': 0,
'power_coeff_a': 0.1,
'power_coeff_b': 1,
'power_coeff_c': 0,
'gen_ramp_up': 5,
'gen_ramp_down': 5
},
{
'min_load': 0,
'max_load': 0,
'min_power': 0,
'max_power': 0,
'load_coeff': 3,
'load_ref': 0,
'power_coeff_a': 0.1,
'power_coeff_b': 1,
'power_coeff_c': 0,
'gen_ramp_up': 5,
'gen_ramp_down': 5
},
{
'min_load': 0,
'max_load': 0,
'min_power': 0,
'max_power': 0,
'load_coeff': 1,
'load_ref': 0,
'power_coeff_a': 0.1,
'power_coeff_b': 1,
'power_coeff_c': 0,
'gen_ramp_up': 5,
'gen_ramp_down': 5
},
{
'min_load': 0,
'max_load': 0,
'min_power': 0,
'max_power': 0,
'load_coeff': 1,
'load_ref': 0,
'power_coeff_a': 0.1,
'power_coeff_b': 1,
'power_coeff_c': 0,
'gen_ramp_up': 5,
'gen_ramp_down': 5
},
] # 9 - 13 - 11 5 - 8
connection_info_2 = {
'connection_index': [1, 4],
'connection_x': [0.1, 0.1],
'connection_area': [1, 4],
'connection_exchange_max': [100, 100]
}
player2_info = {
'index': 2,
'X_raw': X_raw_2,
'node_info': node_info_2,
'connection_info': connection_info_2
}
X_raw_3 = [
[0, 0.1, 0, 0, 0],
[0.1, 0, 0.1, 0, 0.1],
[0, 0.1, 0, 0.1, 0],
[0, 0, 0.1, 0, 0],
[0, 0.1, 0, 0, 0]
]
node_info_3 = [
{
'min_load': 0,
'max_load': 25,
'min_power': 0,
'max_power': 10,
'load_coeff': 5,
'load_ref': 20,
'power_coeff_a': 0.1,
'power_coeff_b': 2,
'power_coeff_c': 0,
'gen_ramp_up': 50,
'gen_ramp_down': 50
},
{
'min_load': 0,
'max_load': 0,
'min_power': 0,
'max_power': 0,
'load_coeff': 4,
'load_ref': 0,
'power_coeff_a': 0.1,
'power_coeff_b': 1,
'power_coeff_c': 0,
'gen_ramp_up': 5,
'gen_ramp_down': 5
},
{
'min_load': 0,
'max_load': 0,
'min_power': 0,
'max_power': 0,
'load_coeff': 3,
'load_ref': 0,
'power_coeff_a': 0.1,
'power_coeff_b': 1,
'power_coeff_c': 0,
'gen_ramp_up': 5,
'gen_ramp_down': 5
},
{
'min_load': 0,
'max_load': 0,
'min_power': 0,
'max_power': 0,
'load_coeff': 1,
'load_ref': 0,
'power_coeff_a': 0.1,
'power_coeff_b': 1,
'power_coeff_c': 0,
'gen_ramp_up': 5,
'gen_ramp_down': 5
},
{
'min_load': 0,
'max_load': 0,
'min_power': 0,
'max_power': 0,
'load_coeff': 1,
'load_ref': 0,
'power_coeff_a': 0.1,
'power_coeff_b': 1,
'power_coeff_c': 0,
'gen_ramp_up': 5,
'gen_ramp_down': 5
},
] # 9 - 13 - 11 5 - 8
connection_info_3 = {
'connection_index': [1],
'connection_x': [0.1],
'connection_area': [1],
'connection_exchange_max': [100]
}
player3_info = {
'index': 3,
'X_raw': X_raw_3,
'node_info': node_info_3,
'connection_info': connection_info_3
}
X_raw_4 = [
[0, 0.1, 0, 0, 0],
[0.1, 0, 0.1, 0, 0.1],
[0, 0.1, 0, 0.1, 0],
[0, 0, 0.1, 0, 0],
[0, 0.1, 0, 0, 0]
]
node_info_4 = [
{
'min_load': 0,
'max_load': 25,
'min_power': 0,
'max_power': 10,
'load_coeff': 5,
'load_ref': 20,
'power_coeff_a': 0.1,
'power_coeff_b': 3,
'power_coeff_c': 0,
'gen_ramp_up': 50,
'gen_ramp_down': 50
},
{
'min_load': 0,
'max_load': 0,
'min_power': 0,
'max_power': 0,
'load_coeff': 4,
'load_ref': 0,
'power_coeff_a': 0.1,
'power_coeff_b': 1,
'power_coeff_c': 0,
'gen_ramp_up': 5,
'gen_ramp_down': 5
},
{
'min_load': 0,
'max_load': 0,
'min_power': 0,
'max_power': 0,
'load_coeff': 3,
'load_ref': 0,
'power_coeff_a': 0.1,
'power_coeff_b': 1,
'power_coeff_c': 0,
'gen_ramp_up': 5,
'gen_ramp_down': 5
},
{
'min_load': 0,
'max_load': 0,
'min_power': 0,
'max_power': 0,
'load_coeff': 1,
'load_ref': 0,
'power_coeff_a': 0.1,
'power_coeff_b': 1,
'power_coeff_c': 0,
'gen_ramp_up': 5,
'gen_ramp_down': 5
},
{
'min_load': 0,
'max_load': 0,
'min_power': 0,
'max_power': 0,
'load_coeff': 1,
'load_ref': 0,
'power_coeff_a': 0.1,
'power_coeff_b': 1,
'power_coeff_c': 0,
'gen_ramp_up': 5,
'gen_ramp_down': 5
},
] # 9 - 13 - 11 5 - 8
connection_info_4 = {
'connection_index': [2],
'connection_x': [0.1],
'connection_area': [2],
'connection_exchange_max': [100]
}
player4_info = {
'index': 4,
'X_raw': X_raw_4,
'node_info': node_info_4,
'connection_info': connection_info_4
}
namejqy = 'jqy'
|
nilq/baby-python
|
python
|
from typing import List
from pydantic import BaseModel, Field
__all__ = [
"ArticleRankDTO",
]
class ArticleRankDTO(BaseModel):
articleTitle: str = Field(
... ,
description = "文章标题"
)
viewCount: int = Field(
... ,
description = "文章浏览量"
)
|
nilq/baby-python
|
python
|
# ===========================================================================
# Copyright 2013 University of Limerick
#
# This file is part of DREAM.
#
# DREAM is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DREAM is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with DREAM. If not, see <http://www.gnu.org/licenses/>.
# ===========================================================================
"""
Created on 4 Dec 2013
@author: George
"""
"""
test script to test the generator
"""
from SimPy.Simulation import now, activate, simulate, infinity, initialize
from .EventGenerator import EventGenerator
from .Machine import Machine
from .Source import Source
from .Exit import Exit
from .Part import Part
from .Queue import Queue
from .Globals import G
from . import ExcelHandler
from . import Globals
G.trace = "Yes"
S = Source("S1", "Source", mean=1, item=Part)
M1 = Machine("M1", "Machine1", mean=0.75)
Q1 = Queue("Q1", "Queue1", capacity=infinity)
M2 = Machine("M2", "Machine2", mean=0.75)
Q2 = Queue("Q2", "Queue2", capacity=infinity)
E = Exit("E1", "Exit")
# define predecessors and successors for the objects
S.defineRouting([M1])
M1.defineRouting([S], [Q1])
Q1.defineRouting([M1], [M2])
M2.defineRouting([Q1], [Q2])
Q2.defineRouting([M2])
argumentDict = {"from": "Q2", "to": "E1", "safetyStock": 70, "consumption": 20}
EG = EventGenerator(
id="EV",
name="ExcessEntitiesMover",
start=60,
interval=60,
method=Globals.moveExcess,
argumentDict=argumentDict,
)
G.ObjList = [S, M1, M2, E, Q1, Q2, EG]
initialize() # initialize the simulation (SimPy method)
for object in G.ObjList:
object.initialize()
for object in G.ObjList:
activate(object, object.run())
G.maxSimTime = 400
simulate(until=G.maxSimTime) # run the simulation
# carry on the post processing operations for every object in the topology
for object in G.ObjList:
object.postProcessing()
ExcelHandler.outputTrace("TRACE")
print(("the system produced", E.numOfExits, "parts"))
print(
(
"the waiting ratio of",
M1.objName,
"is",
(M1.totalWaitingTime / G.maxSimTime) * 100,
"%",
)
)
print(
(
"the waiting ratio of",
M2.objName,
"is",
(M2.totalWaitingTime / G.maxSimTime) * 100,
"%",
)
)
|
nilq/baby-python
|
python
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2015 Toshio Kuratomi <tkuratomi@ansible.com>
# (c) 2017, Peter Sprygada <psprygad@redhat.com>
# (c) 2017 Ansible Project
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import fcntl
import gettext
import os
import shlex
from abc import abstractmethod, abstractproperty
from functools import wraps
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.module_utils.six import string_types
from ansible.module_utils._text import to_bytes, to_text
from ansible.plugins import AnsiblePlugin
from ansible.plugins.loader import shell_loader
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
__all__ = ['ConnectionBase', 'ensure_connect']
BUFSIZE = 65536
def ensure_connect(func):
@wraps(func)
def wrapped(self, *args, **kwargs):
if not self._connected:
self._connect()
return func(self, *args, **kwargs)
return wrapped
class ConnectionBase(AnsiblePlugin):
'''
A base class for connections to contain common code.
'''
has_pipelining = False
has_native_async = False # eg, winrm
always_pipeline_modules = False # eg, winrm
become_methods = C.BECOME_METHODS
# When running over this connection type, prefer modules written in a certain language
# as discovered by the specified file extension. An empty string as the
# language means any language.
module_implementation_preferences = ('',)
allow_executable = True
# the following control whether or not the connection supports the
# persistent connection framework or not
supports_persistence = False
force_persistence = False
default_user = None
def __init__(self, play_context, new_stdin, shell=None, *args, **kwargs):
super(ConnectionBase, self).__init__()
# All these hasattrs allow subclasses to override these parameters
if not hasattr(self, '_play_context'):
self._play_context = play_context
if not hasattr(self, '_new_stdin'):
self._new_stdin = new_stdin
# Backwards compat: self._display isn't really needed, just import the global display and use that.
if not hasattr(self, '_display'):
self._display = display
if not hasattr(self, '_connected'):
self._connected = False
self.success_key = None
self.prompt = None
self._connected = False
self._socket_path = None
if shell is not None:
self._shell = shell
# load the shell plugin for this action/connection
if play_context.shell:
shell_type = play_context.shell
elif hasattr(self, '_shell_type'):
shell_type = getattr(self, '_shell_type')
else:
shell_type = 'sh'
shell_filename = os.path.basename(self._play_context.executable)
try:
shell = shell_loader.get(shell_filename)
except Exception:
shell = None
if shell is None:
for shell in shell_loader.all():
if shell_filename in shell.COMPATIBLE_SHELLS:
break
shell_type = shell.SHELL_FAMILY
self._shell = shell_loader.get(shell_type)
if not self._shell:
raise AnsibleError("Invalid shell type specified (%s), or the plugin for that shell type is missing." % shell_type)
@property
def connected(self):
'''Read-only property holding whether the connection to the remote host is active or closed.'''
return self._connected
@property
def socket_path(self):
'''Read-only property holding the connection socket path for this remote host'''
return self._socket_path
def _become_method_supported(self):
''' Checks if the current class supports this privilege escalation method '''
if self._play_context.become_method in self.become_methods:
return True
raise AnsibleError("Internal Error: this connection module does not support running commands via %s" % self._play_context.become_method)
@staticmethod
def _split_ssh_args(argstring):
"""
Takes a string like '-o Foo=1 -o Bar="foo bar"' and returns a
list ['-o', 'Foo=1', '-o', 'Bar=foo bar'] that can be added to
the argument list. The list will not contain any empty elements.
"""
try:
# Python 2.6.x shlex doesn't handle unicode type so we have to
# convert args to byte string for that case. More efficient to
# try without conversion first but python2.6 doesn't throw an
# exception, it merely mangles the output:
# >>> shlex.split(u't e')
# ['t\x00\x00\x00', '\x00\x00\x00e\x00\x00\x00']
return [to_text(x.strip()) for x in shlex.split(to_bytes(argstring)) if x.strip()]
except AttributeError:
# In Python3, shlex.split doesn't work on a byte string.
return [to_text(x.strip()) for x in shlex.split(argstring) if x.strip()]
@abstractproperty
def transport(self):
"""String used to identify this Connection class from other classes"""
pass
@abstractmethod
def _connect(self):
"""Connect to the host we've been initialized with"""
# Check if PE is supported
if self._play_context.become:
self._become_method_supported()
@ensure_connect
@abstractmethod
def exec_command(self, cmd, in_data=None, sudoable=True):
"""Run a command on the remote host.
:arg cmd: byte string containing the command
:kwarg in_data: If set, this data is passed to the command's stdin.
This is used to implement pipelining. Currently not all
connection plugins implement pipelining.
:kwarg sudoable: Tell the connection plugin if we're executing
a command via a privilege escalation mechanism. This may affect
how the connection plugin returns data. Note that not all
connections can handle privilege escalation.
:returns: a tuple of (return code, stdout, stderr) The return code is
an int while stdout and stderr are both byte strings.
When a command is executed, it goes through multiple commands to get
there. It looks approximately like this::
[LocalShell] ConnectionCommand [UsersLoginShell (*)] ANSIBLE_SHELL_EXECUTABLE [(BecomeCommand ANSIBLE_SHELL_EXECUTABLE)] Command
:LocalShell: Is optional. It is run locally to invoke the
``Connection Command``. In most instances, the
``ConnectionCommand`` can be invoked directly instead. The ssh
connection plugin which can have values that need expanding
locally specified via ssh_args is the sole known exception to
this. Shell metacharacters in the command itself should be
processed on the remote machine, not on the local machine so no
shell is needed on the local machine. (Example, ``/bin/sh``)
:ConnectionCommand: This is the command that connects us to the remote
machine to run the rest of the command. ``ansible_ssh_user``,
``ansible_ssh_host`` and so forth are fed to this piece of the
command to connect to the correct host (Examples ``ssh``,
``chroot``)
:UsersLoginShell: This shell may or may not be created depending on
the ConnectionCommand used by the connection plugin. This is the
shell that the ``ansible_ssh_user`` has configured as their login
shell. In traditional UNIX parlance, this is the last field of
a user's ``/etc/passwd`` entry We do not specifically try to run
the ``UsersLoginShell`` when we connect. Instead it is implicit
in the actions that the ``ConnectionCommand`` takes when it
connects to a remote machine. ``ansible_shell_type`` may be set
to inform ansible of differences in how the ``UsersLoginShell``
handles things like quoting if a shell has different semantics
than the Bourne shell.
:ANSIBLE_SHELL_EXECUTABLE: This is the shell set via the inventory var
``ansible_shell_executable`` or via
``constants.DEFAULT_EXECUTABLE`` if the inventory var is not set.
We explicitly invoke this shell so that we have predictable
quoting rules at this point. ``ANSIBLE_SHELL_EXECUTABLE`` is only
settable by the user because some sudo setups may only allow
invoking a specific shell. (For instance, ``/bin/bash`` may be
allowed but ``/bin/sh``, our default, may not). We invoke this
twice, once after the ``ConnectionCommand`` and once after the
``BecomeCommand``. After the ConnectionCommand, this is run by
the ``UsersLoginShell``. After the ``BecomeCommand`` we specify
that the ``ANSIBLE_SHELL_EXECUTABLE`` is being invoked directly.
:BecomeComand ANSIBLE_SHELL_EXECUTABLE: Is the command that performs
privilege escalation. Setting this up is performed by the action
plugin prior to running ``exec_command``. So we just get passed
:param:`cmd` which has the BecomeCommand already added.
(Examples: sudo, su) If we have a BecomeCommand then we will
invoke a ANSIBLE_SHELL_EXECUTABLE shell inside of it so that we
have a consistent view of quoting.
:Command: Is the command we're actually trying to run remotely.
(Examples: mkdir -p $HOME/.ansible, python $HOME/.ansible/tmp-script-file)
"""
pass
@ensure_connect
@abstractmethod
def put_file(self, in_path, out_path):
"""Transfer a file from local to remote"""
pass
@ensure_connect
@abstractmethod
def fetch_file(self, in_path, out_path):
"""Fetch a file from remote to local"""
pass
@abstractmethod
def close(self):
"""Terminate the connection"""
pass
def check_become_success(self, b_output):
b_success_key = to_bytes(self._play_context.success_key)
for b_line in b_output.splitlines(True):
if b_success_key == b_line.rstrip():
return True
return False
def check_password_prompt(self, b_output):
if self._play_context.prompt is None:
return False
elif isinstance(self._play_context.prompt, string_types):
b_prompt = to_bytes(self._play_context.prompt).strip()
b_lines = b_output.splitlines()
return any(l.strip().startswith(b_prompt) for l in b_lines)
else:
return self._play_context.prompt(b_output)
def check_incorrect_password(self, b_output):
b_incorrect_password = to_bytes(gettext.dgettext(self._play_context.become_method, C.BECOME_ERROR_STRINGS[self._play_context.become_method]))
return b_incorrect_password and b_incorrect_password in b_output
def check_missing_password(self, b_output):
b_missing_password = to_bytes(gettext.dgettext(self._play_context.become_method, C.BECOME_MISSING_STRINGS[self._play_context.become_method]))
return b_missing_password and b_missing_password in b_output
def connection_lock(self):
f = self._play_context.connection_lockfd
display.vvvv('CONNECTION: pid %d waiting for lock on %d' % (os.getpid(), f), host=self._play_context.remote_addr)
fcntl.lockf(f, fcntl.LOCK_EX)
display.vvvv('CONNECTION: pid %d acquired lock on %d' % (os.getpid(), f), host=self._play_context.remote_addr)
def connection_unlock(self):
f = self._play_context.connection_lockfd
fcntl.lockf(f, fcntl.LOCK_UN)
display.vvvv('CONNECTION: pid %d released lock on %d' % (os.getpid(), f), host=self._play_context.remote_addr)
def reset(self):
display.warning("Reset is not implemented for this connection")
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
#encoding=utf-8
#-----------------------------------------
# Usage: python3 4-getattr-builtins.py
# Description: compare __getattr__ and __getattribute__
#-----------------------------------------
class GetAttr:
eggs = 88 # eggs stored on class, spam on instance
def __init__(self):
self.spam = 77
def __len__(self): # len here, else __getattr__ called with __len__
print('__len__: 42')
return 42
def __getattr__(self, attr): # Provide __str__ if asked, else dummy func
print('getattr: ' + attr)
if attr == '__str__':
return lambda *args: '[Getattr str]'
else:
return lambda *args: None
class GetAttribute(object): # object required in 2.X, implied in 3.X
eggs = 88 # In 2.X all are isinstance(object) auto
def __init__(self): # But must derive to get new-style tools,
self.spam = 77 # incl __getattribute__, some __X__ defaults
def __len__(self):
print('__len__: 42')
return 42
def __getattribute__(self, attr):
print('getattribute: ' + attr)
if attr == '__str__':
return lambda *args: '[GetAttribute str]'
else:
return lambda *args: None
if __name__ == '__main__':
for Class in GetAttr, GetAttribute:
print('\n' + Class.__name__.ljust(50, '='))
X = Class()
X.eggs # Class attr
X.spam # Instance attr
X.other # Missing attr
len(X) # __len__ defined explicitly
# New-styles must support [], +, call directly: redefine
try:
X[0] # __getitem__?
except:
print('fail []')
try:
X + 99 # __add__?
except:
print('fail +')
try:
X() # __call__? (implicit via built-in)
except:
print('fail ()')
X.__call__() # __call__? (explicit, not inherited)
print(X.__str__()) # __str__? (explicit, inherited from type)
print(X) # __str__? (implicit via built-in)
|
nilq/baby-python
|
python
|
# Exercício Python 024
# Leia o nome de uma cidade. Começa com 'SANTO'
cidade = str(input('Digite o nome de uma cidade: ')).strip()
minusculo = cidade.lower()
santo = 'santo'in minusculo[0:5]
print(santo)
# outra forma
print(cidade[:5].lower() == 'santo')
|
nilq/baby-python
|
python
|
import numpy
import pytest
import helpers
import meshio
@pytest.mark.parametrize(
"mesh",
[
helpers.tri_mesh,
helpers.tri_mesh_2d,
helpers.tet_mesh,
helpers.add_cell_data(helpers.tri_mesh, 1, dtype=float),
helpers.add_cell_data(helpers.tri_mesh, 1, dtype=numpy.int32),
],
)
def test_dolfin(mesh):
helpers.write_read(meshio.dolfin.write, meshio.dolfin.read, mesh, 1.0e-15)
def test_generic_io():
helpers.generic_io("test.xml")
# With additional, insignificant suffix:
helpers.generic_io("test.0.xml")
|
nilq/baby-python
|
python
|
"""
LINK: https://leetcode.com/problems/factorial-trailing-zeroes/
Given an integer n, return the number of trailing zeroes in n!.
Follow up: Could you write a solution that works in logarithmic time complexity?
Example 1:
Input: n = 3
Output: 0
Explanation: 3! = 6, no trailing zero.
Example 2:
Input: n = 5
Output: 1
Explanation: 5! = 120, one trailing zero.
Example 3:
Input: n = 0
Output: 0
Constraints:
0 <= n <= 104
"""
def trailingZeroes(n):
fives = 0
while n:
fives += n//5
n //= 5
return fives
def trailingZeroes_recursive(n):
if not n:
return 0
else:
return n//5 + trailingZeroes_recursive(n//5)
|
nilq/baby-python
|
python
|
from generators import *
from laws import (monoid_laws, functor_laws, applicative_laws, monad_laws, trans_laws)
from fplib.maybe import Maybe
from fplib.transformer import trans
from fplib.ident_t import IdT
T = trans(IdT, Maybe)
def cmpidt(idt0, idt1):
return idt0.unwrap == idt1.unwrap
def test_idt_functor():
xs = map(T.unit, random_strings(10))
functor_laws(xs, 100, cmp_fun=cmpidt)
def test_idt_applicative():
xs = map(T.unit, random_strings(10))
applicative_laws(T, xs, 100, cmp_fun=cmpidt)
def test_idt_monad():
xs = map(T.unit, random_strings(10))
monad_laws(T, xs, 100, cmp_fun=cmpidt)
def test_idt_transformer():
xs = random_maybes(random_strings(10))
trans_laws(T, xs, 100, cmp_fun=cmpidt)
|
nilq/baby-python
|
python
|
"""
Energy level and transitions classes
"""
import numpy as np
import astropy.units as u
import astropy.constants as const
from fiasco.util import vectorize_where
__all__ = ['Level', 'Transitions']
class Level(object):
def __init__(self, index, elvlc):
self._index = index
self._elvlc = elvlc
def __repr__(self):
return f"""Level: {self.level}
Configuration: {self.configuration}
Orbital Angular Momentum: {self.orbital_angular_momentum_label}
Energy: {self.energy.to(u.eV)}"""
@property
def level(self):
return self._elvlc['level'][self._index]
@property
def configuration(self):
return self._elvlc['config'][self._index]
@property
def multiplicity(self):
return self._elvlc['multiplicity'][self._index]
@property
def total_angular_momentum(self):
return self._elvlc['J'][self._index]
@property
def orbital_angular_momentum_label(self):
return self._elvlc['L_label'][self._index]
@property
@u.quantity_input
def energy(self) -> u.erg:
key = 'E_th' if self._elvlc['E_obs'][self._index] < 0 else 'E_obs'
return self._elvlc[key][self._index]*const.h*const.c
class Transitions(object):
def __init__(self, elvlc, wgfa):
self._elvlc = elvlc
self._wgfa = wgfa
@property
def is_twophoton(self):
"""
True if the transition is a two-photon decay
"""
return self._wgfa['wavelength'] == 0.*u.angstrom
@property
def is_observed(self):
"""
True for transitions that connect two observed energy levels
"""
return self._wgfa['wavelength'] > 0.*u.angstrom
@property
@u.quantity_input
def A(self) -> u.s**(-1):
"""
Spontaneous transition probability due to radiative decay
"""
return self._wgfa['A']
@property
@u.quantity_input
def wavelength(self) -> u.angstrom:
return np.fabs(self._wgfa['wavelength'])
@property
def upper_level(self):
return self._wgfa['upper_level']
@property
def lower_level(self):
return self._wgfa['lower_level']
@property
@u.quantity_input
def delta_energy(self) -> u.erg:
energy = u.Quantity(np.where(
self._elvlc['E_obs'].value == -1, self._elvlc['E_th'].value,
self._elvlc['E_obs'].value), self._elvlc['E_obs'].unit)
indices = np.vstack([vectorize_where(self._elvlc['level'], self.lower_level),
vectorize_where(self._elvlc['level'], self.upper_level)])
return np.diff(energy[indices], axis=0).flatten() * const.h * const.c
|
nilq/baby-python
|
python
|
"""
# -*- coding: utf-8 -*-
__author__ = "Akash"
__email__ = "akashjio6666@gmail.com"
__version__ = 1.0.0"
__copyright__ = "Copyright (c) 2004-2020 Leonard Richardson"
# Use of this source code is governed by the MIT license.
__license__ = "MIT"
Description:
Py-Insta Is A Python Library
Created By Akash Pattnaik From
India..
Py-Insta Helps Users To Easily
Scrape Instagram Data
And Print It Or You Can Define It Into A Variable...
If You Find Bugs Then Please Report To
@AKASH_AM1 On Telegram...
Pre-Requests:
from bs4 import BeautifulSoup
import requests
Documentation:
Github: https://github.com/BLUE-DEVIL1134/Py-Insta
PyPi: https://pypi.org/user/AkashPattnaik/
"""
__version__ = 1.0
import requests
from bs4 import BeautifulSoup
__url__ = "https://www.instagram.com/{}/"
def Insta(username):
try:
response = requests.get(__url__.format(username.replace('@','')),timeout=5) # InCase Someone Types @UserName
if '404' in str(response): # If The Username Is Invalid
data = 'No Such Username'
return data
else:
soup = BeautifulSoup(response.text, "html.parser")
meta = soup.find("meta", property="og:description")
try:
s = meta.attrs['content'].split(' ')
data = {
'Followers': s[0],
'Following': s[2],
'Posts': s[4],
'Name': s[13]
}
return data
except requests.exceptions.InvalidURL:
return 'No Such Username'
except (requests.ConnectionError, requests.Timeout):
return 'No InterNet Connection'
|
nilq/baby-python
|
python
|
import scrython
import time
query = input("Type the name of the set: ")
time.sleep(0.05)
sets = scrython.sets.Sets()
for i in range(sets.data_length()):
if sets.set_name(i) == query:
print("Set code:", sets.set_code(i).upper())
break
else:
continue
|
nilq/baby-python
|
python
|
import json
import binascii
import struct
import random
from io import BytesIO
import sys
from operator import itemgetter
class Item():
def __init__(self, name, index, quantity, rate):
self.name = name
self.index = index
self.quantity = quantity
self.rate = rate
def __repr__(self):
return self.__class__.__name__ + "({}, index={}, quantity={}, rate={})".format(self.name, self.index, self.quantity, self.rate)
class CommonItem(Item):
def __init__(self, *args):
super().__init__(*args)
class RareItem(Item):
def __init__(self, *args):
super().__init__(*args)
class CommonIngredient(Item):
def __init__(self, *args):
super().__init__(*args)
class RareIngredient(Item):
def __init__(self, *args):
super().__init__(*args)
class Shard(Item):
def __init__(self, *args):
super().__init__(*args)
class Coin():
def __init__(self, name, index, rate, override):
self.name = name
self.index = index
self.rate = rate
self.override = override
def __repr__(self):
return "Coin({}, index={}, rate={}, override={})".format(self.name, self.index, self.rate, self.override)
def getNameFromEntry(entry):
return entry["Key"]["Value"]["Value"]
def getRareItemFromEntry(entry):
name = entry["Properties"]["RareItemId\x00"][1]["Value"]
index = entry["Properties"]["RareItemId\x00"][1]["Index"]
quantity = entry["Properties"]["RareItemQuantity\x00"][1]
rate = entry["Properties"]["RareItemRate\x00"][1]
return RareItem(name, index, quantity, rate)
def getCommonItemFromEntry(entry):
name = entry["Properties"]["CommonItemId\x00"][1]["Value"]
index = entry["Properties"]["CommonItemId\x00"][1]["Index"]
quantity = entry["Properties"]["CommonItemQuantity\x00"][1]
rate = entry["Properties"]["CommonRate\x00"][1]
return CommonItem(name, index, quantity, rate)
def getRareIngredientFromEntry(entry):
name = entry["Properties"]["RareIngredientId\x00"][1]["Value"]
index = entry["Properties"]["RareIngredientId\x00"][1]["Index"]
quantity = entry["Properties"]["RareIngredientQuantity\x00"][1]
rate = entry["Properties"]["RareIngredientRate\x00"][1]
return RareIngredient(name, index, quantity, rate)
def getCommonIngredientFromEntry(entry):
name = entry["Properties"]["CommonIngredientId\x00"][1]["Value"]
index = entry["Properties"]["CommonIngredientId\x00"][1]["Index"]
quantity = entry["Properties"]["CommonIngredientQuantity\x00"][1]
rate = entry["Properties"]["CommonIngredientRate\x00"][1]
return CommonIngredient(name, index, quantity, rate)
def getShardFromEntry(entry):
name = entry["Properties"]["ShardId\x00"][1]["Value"]
index = entry["Properties"]["ShardId\x00"][1]["Index"]
rate = entry["Properties"]["ShardRate\x00"][1]
return Shard(name, index, 1, rate)
def getCoinFromEntry(entry):
name = entry["Properties"]["CoinType\x00"][1]["Value"]
index = entry["Properties"]["CoinType\x00"][1]["Index"]
override = entry["Properties"]["CoinOverride\x00"][1]
rate = entry["Properties"]["CoinRate\x00"][1]
return Coin(name, index, rate, override)
def getAllFromEntry(entry):
name = getNameFromEntry(entry)
shard = getShardFromEntry(entry)
ritem = getRareItemFromEntry(entry)
citem = getCommonItemFromEntry(entry)
ring = getRareIngredientFromEntry(entry)
cing = getCommonIngredientFromEntry(entry)
coin = getCoinFromEntry(entry)
return (name, shard, ritem, citem, ring, cing, coin)
class DropLocation():
def __init__(self, name, shard, rare_item, common_item, rare_ingredient, common_ingredient, coin):
self.name = name
self.shard = shard
self.rare_item = rare_item
self.common_item = common_item
self.rare_ingredient = rare_ingredient
self.common_ingredient = common_ingredient
self.coin = coin
def __repr__(self):
return "DropLocation(\n\t{},\n\t{},\n\t{},\n\t{},\n\t{},\n\t{},\n\t{}\n)".format( \
self.name, \
self.shard, \
self.rare_item, \
self.common_item, \
self.rare_ingredient, \
self.common_ingredient, \
self.coin)
#Yield all chests
def allChests(locs):
for loc in locs:
if "Treasurebox" in loc.name and filterChests(loc):
yield loc
#True: accept item into randomizer logic
#False: reject item from randomizer logic
def filterChests(loc):
#Names to filter out
bad_item_names = [
"MaxHPUP", "MaxMPUP", "MaxBulletUP", #Max HP/MP/Bullet upgrades
"ChangeHP", #Dunno what this is
"Silverbromide", #Progression item
"SpikeBreast" #Spike Aegis needed for progression, lock for now
]
for name in bad_item_names:
if name in loc.rare_item.name["Value"]:
print("Rejecting chest item: {}".format(name))
return False
if name in loc.common_item.name["Value"]:
print("Rejecting chest item: {}".format(name))
return False
return True
#Yield all shard entries
def allMobs(locs):
for loc in locs:
if "_Shard" in loc.name and filterMobs(loc):
yield loc
other_good_names = [
"_1ST_Treasure", #Carpenter
"_2ND_Treasure" #Also Carpenter
]
for other in other_good_names:
if other in loc.name:
yield loc
#True/False whether to include this specific shard in random pool
def filterMobs(loc):
progression_shard_names = [
"Reflectionray", #Reflect Ray
"Dimensionshift", #Dimension Shift
"Invert", #Invert
"Doublejump", #Double Jump
"Demoniccapture", #Craftwork
"Aquastream", #Only to make sure water access is available
"Bloodsteel", #Blood Steal
"SwingTentacle", #Galleon Minerva boss drop, must be valid
"Ceruleansplash", #really just need to make sure N3006_OpeningDemo has valid shard drop. I think...
]
for shard_name in progression_shard_names:
if shard_name in loc.shard.name["Value"]:
print("Rejecting shard: {}".format(loc.shard.name))
return False
return True
def allWalls(locs):
for loc in locs:
if "Wall_" in loc.name and filterWalls(loc):
yield loc
def filterWalls(loc):
bad_item_names = [
"MaxHPUP", "MaxMPUp", "MaxBulletUP", #Max HP/MP/Bullet upgrades
"ChangeHP", #Dunno what this is
]
for name in bad_item_names:
if name in loc.rare_item.name["Value"]:
print("Rejecting item: {}".format(name))
return False
if name in loc.common_item.name["Value"]:
print("Rejecting item: {}".format(name))
return False
return True
class Patch():
def __init__(self, offset, value):
self.offset = offset
self.value = value
def __repr__(self):
return "Patch(offset={}, value={})".format(self.offset, self.value)
def clearAllDrops(locs):
patches = []
for loc in locs:
patches.append(Patch(loc.shard.index["offset"], empty_drop.index["Value"]))
patches.append(Patch(loc.shard.rate["offset"], 0.0))
patches.append(Patch(loc.rare_item.index["offset"], empty_drop.index["Value"]))
patches.append(Patch(loc.rare_item.quantity["offset"], 0))
patches.append(Patch(loc.rare_item.rate["offset"], 0.0))
patches.append(Patch(loc.common_item.index["offset"], empty_drop.index["Value"]))
patches.append(Patch(loc.common_item.quantity["offset"], 0))
patches.append(Patch(loc.common_item.rate["offset"], 0.0))
patches.append(Patch(loc.rare_ingredient.index["offset"], empty_drop.index["Value"]))
patches.append(Patch(loc.rare_ingredient.quantity["offset"], 0))
patches.append(Patch(loc.rare_ingredient.rate["offset"], 0.0))
patches.append(Patch(loc.common_ingredient.index["offset"], empty_drop.index["Value"]))
patches.append(Patch(loc.common_ingredient.quantity["offset"], 0))
patches.append(Patch(loc.common_ingredient.rate["offset"], 0.0))
patches.append(Patch(loc.coin.index["offset"], empty_coin.index["Value"]))
patches.append(Patch(loc.coin.override["offset"], empty_coin.override["Value"]))
patches.append(Patch(loc.coin.rate["offset"], 100.0))
return patches
def assignShards(origs, news):
patchset = []
for orig, new in zip(origs,news):
patchset.append( Patch(orig.shard.index["offset"], new.index["Value"]) )
patchset.append( Patch(orig.shard.rate["offset"], new.rate["Value"]))
return patchset
def assignRareItems(origs, news):
patchset = []
for orig, new in zip(origs, news):
patchset.append( Patch(orig.rare_item.index["offset"], new.index["Value"]))
patchset.append( Patch(orig.rare_item.quantity["offset"], new.quantity["Value"]))
patchset.append( Patch(orig.rare_item.rate["offset"], new.rate["Value"]))
return patchset
def assignCommonItems(origs, news):
patchset = []
for orig, new in zip(origs, news):
patchset.append( Patch(orig.common_item.index["offset"], new.index["Value"]))
patchset.append( Patch(orig.common_item.quantity["offset"], new.quantity["Value"]))
patchset.append( Patch(orig.common_item.rate["offset"], new.rate["Value"]))
return patchset
def assignRareIngredients(origs, news):
patchset = []
for orig, new in zip(origs, news):
patchset.append( Patch(orig.rare_ingredient.index["offset"], new.index["Value"]))
patchset.append( Patch(orig.rare_ingredient.quantity["offset"], new.quantity["Value"]))
patchset.append( Patch(orig.rare_ingredient.rate["offset"], new.rate["Value"]))
return patchset
def assignCommonIngredients(origs, news):
patchset = []
for orig, new in zip(origs, news):
patchset.append( Patch(orig.common_ingredient.index["offset"], new.index["Value"]))
patchset.append( Patch(orig.common_ingredient.quantity["offset"], new.quantity["Value"]))
patchset.append( Patch(orig.common_ingredient.rate["offset"], new.rate["Value"]))
return patchset
def assignCoins(origs, news):
patchset = []
for orig, new in zip(origs, news):
if new.rate["Value"] == 0.0:
continue
patchset.append( Patch(orig.coin.index["offset"], new.index["Value"]))
patchset.append( Patch(orig.coin.override["offset"], new.override["Value"]))
patchset.append( Patch(orig.coin.rate["offset"], new.rate["Value"]))
return patchset
def applyPatches(raw, patches):
stream = BytesIO(raw)
for patch in patches:
stream.seek(patch.offset)
if isinstance(patch.value, int):
stream.write(struct.pack("i", patch.value))
elif isinstance(patch.value, float):
stream.write(struct.pack("f", patch.value))
else:
raise NotImplementedError(type(patch.offset))
return stream.getbuffer()
#Set drop rates to 100% for mobs that can only be fought once
#TODO: Untested!
def handleNonRepeatableMobs(locs):
relevantMobs = ['N1001', 'N1011', 'N1003', 'N2004', 'N1005',
'N2001', 'N1006', 'N1012', 'N1002', 'N2014',
'N2007', 'N2006', 'N1004', 'N1008', 'N1009',
'N1013', 'N2012']
patchset = []
for loc in locs:
for mobnum in relevantMobs:
if mobnum in loc.name:
patchset.append( Patch( loc.shard.rate["offset"], 100.0) )
patchset.append( Patch( loc.common_item.rate["offset"], 100.0) )
patchset.append( Patch( loc.rare_item.rate["offset"], 100.0) )
patchset.append( Patch( loc.common_ingredient.rate["offset"], 100.0) )
patchset.append( Patch( loc.rare_ingredient.rate["offset"], 100.0) )
return patchset
if __name__ == "__main__":
import argparse
import os
from uasset_dt_to_json import dumper as udump
parser = argparse.ArgumentParser( \
description="Bloodstained drop randomizer",
usage="%(prog)s --input [infile]"
)
parser.add_argument("--debug", help="Enable debug output", action='store_true', default=False)
parser.add_argument("--input", help="Original 'PB_DT_DropRateMaster.uasset' file", \
action='store', required=True)
parser.add_argument("--seed", help="Seed for randomizer", action='store', default=random.random())
#Parse arguments
args = parser.parse_args()
#Create JSON from original input file
with open(args.input, "rb") as original_file:
uasset = udump.UAsset(original_file)
items = [udump.Item(obj) for obj in uasset.Summary.Exports[0].Object.ObjectData.Data]
drop_rate_master = json.loads(json.dumps(items, cls=udump.UAssetEncoder))
#Set random seed
random.seed(args.seed)
#get all possible locations with associated drops
all_locations = [DropLocation(*getAllFromEntry(entry)) for entry in drop_rate_master]
#get just chests
all_chests = [loc for loc in allChests(all_locations)]
#get just mobs
all_mobs = [loc for loc in allMobs(all_locations)]
#get just walls
all_walls = [loc for loc in allWalls(all_locations)]
#Find empty/low drops to use if needed.
#Since they can be copied endlessly without breaking anything it's a safe default drop. Usually.
#find empty coin to copy into all chests without a valid drop
#FIXME: empty coin still screws up, using low-value coin instead
empty_coin = [c.coin for c in all_chests if "D10\u0000" in c.coin.name["Value"]][0]
#find empty drop
empty_drop = [e.common_item for e in all_chests if "None" in e.common_item.name["Value"]][0]
#Get list of all locations to be entered into the randomization pool
combined = all_chests + all_mobs + all_walls
#list of patches to apply to the final file
patches = []
#Clear all drop slots
patches += clearAllDrops(combined)
#Get all items
shards = [loc.shard for loc in combined]
rare_items = [loc.rare_item for loc in combined]
common_items = [loc.common_item for loc in combined]
rare_ingredients = [loc.rare_ingredient for loc in combined]
common_ingredients = [loc.common_ingredient for loc in combined]
coins = [loc.coin for loc in combined]
#shuffle them all around
random.shuffle(shards)
random.shuffle(rare_items)
random.shuffle(common_items)
random.shuffle(rare_ingredients)
random.shuffle(common_ingredients)
random.shuffle(coins)
#shuffle locations
random.shuffle(combined)
#re-assign random shards to first len(shards) locations
patches += assignShards(combined[: len(shards)], shards)
#'' '' '' first len(rare_items) locations
patches += assignRareItems(combined[: len(rare_items)], rare_items)
#etc etc
patches += assignCommonItems(combined[: len(common_items)], common_items)
patches += assignRareIngredients(combined[: len(rare_ingredients)], rare_ingredients)
patches += assignCommonIngredients(combined[: len(common_ingredients)], common_ingredients)
patches += assignCoins(combined[: len(coins)], coins)
#Should result in all shards/items/coins being re-assigned to somewhere.
#Does nothing to guarantee things intended to be re-aquired like ingredients are infinitely available.
#For mobs that are single-fight only, set drop rates to 100% for any none-None items/shards
#TODO: UNTESTED
patches += handleNonRepeatableMobs(combined)
#with open("PB_DT_DropRateMaster.uasset", "rb") as file:
with open(args.input, "rb") as file:
raw = file.read()
mod = applyPatches(raw, patches)
outputfile = "unrealpak\Randomizer\BloodstainedRotN\Content\Core\DataTable\PB_DT_DropRateMaster.uasset"
with open(outputfile, "wb") as file:
file.write(mod)
#create mod .pak file
os.system(r".\unrealpak\UnrealPak-With-Compression.bat Randomizer")
os.system(r"move .\unrealpak\Randomizer.pak .")
sys.exit()
|
nilq/baby-python
|
python
|
import os
from pytest import fixture
from zpz.filesys.path import relative_path
from zpz.spark import PySparkSession, ScalaSparkSession, SparkSession, SparkSessionError
livy_server_url = None
@fixture(scope='module')
def pysession():
return PySparkSession(livy_server_url)
@fixture(scope='module')
def scalasession():
return ScalaSparkSession(livy_server_url)
pi_py = """\
import random
NUM_SAMPLES = 100000
def sample(p):
x, y = random.random(), random.random()
return 1 if x*x + y*y < 1 else 0
count = sc.parallelize(range(0, NUM_SAMPLES)).map(sample).reduce(lambda a, b: a + b)
pi = 4.0 * count / NUM_SAMPLES
mylist = [1, 3, 'abc']
mytuple = ('a', 'b', 'c', 1, 2, 3)
mydict = {'a': 13, 'b': 'usa'}
# spark 2.0
# from pyspark.sql import Row
# pi_df = spark.createDataFrame([Row(value=pi)])
# spark 1.6:
from pyspark.sql import SQLContext, Row
pi_df = SQLContext(sc).createDataFrame([Row(value=pi)])
"""
def test_py(pysession):
print()
pysession.run('z = 1 + 3')
z = pysession.read('z')
assert z == 4
pysession.run(pi_py)
pi = pysession.read('pi')
print('printing a number:')
print(pi)
assert 3.0 < pi < 3.2
code = '''pip2 = pi + 2'''
pysession.run(code)
pip2 = pysession.read('pip2')
assert 3.0 < pip2 - 2 < 3.2
mylist = pysession.read('mylist')
assert mylist == [1, 3, 'abc']
mytuple = pysession.read('mytuple')
assert mytuple == ('a', 'b', 'c', 1, 2, 3)
mydict = pysession.read('mydict')
assert mydict == {'a': 13, 'b': 'usa'}
local_df = pysession.read('pi_df')
print()
print('printing a {}:'.format(type(local_df)))
print(local_df)
pi = local_df.iloc[0, 0]
assert 3.0 < pi < 3.2
assert pysession.read('3 + 6') == 9
print()
print('printing in Spark session:')
z = pysession.run('''print(type(pip2))''')
# `run` does not print.
# printouts in Spark are collected in the return of `run`.
print(z)
# `str` comes out as `str`
print()
print(pysession.read('str(type(pi))'))
print(pysession.read('type(pi_df).__name__'))
# `bool` comes out as `bool`
z = pysession.read('''isinstance(pi, float)''')
print()
print('printing boolean:')
print(z)
print(type(z))
assert z is True
assert pysession.read('str(isinstance(pi, float))') == 'True'
# `bool` comes out as `numpy.bool_`
# assert session.read(
# '''isinstance(pi_df, pyspark.sql.dataframe.DataFrame)''')
py_error = """\
class MySparkError(Exception):
pass
a = 3
b = 4
raise MySparkError('some thing is so wrong!)
print('abcd')
"""
def test_py_error(pysession):
try:
z = pysession.run(py_error)
except SparkSessionError as e:
print(e)
def test_file(pysession):
pysession.run_file(relative_path('./spark_test_scripts/script_a.py'))
z = pysession.read('magic')
assert 6.0 < z < 7.0
def test_func(pysession):
f = '''\
def myfunc(a, b, names, squared=False):
assert len(a) == 3
assert len(b) == 3
assert len(names) == 3
c = [aa + bb for (aa, bb) in zip(a, b)]
if squared:
c = [x*x for x in c]
d = {k:v for (k,v) in zip(names, c)}
return d
'''
pysession.run(f)
z = pysession.run_function('myfunc', [1, 2, 3], [4, 6, 8], [
'first', 'second', 'third'])
assert {k: z[k]
for k in sorted(z)} == {'first': 5, 'second': 8, 'third': 11}
z = pysession.run_function('myfunc', [1, 2, 3], [4, 6, 8], squared=True, names=[
'first', 'second', 'third'])
assert {k: z[k]
for k in sorted(z)} == {'first': 25, 'second': 64, 'third': 121}
pi_scala = """
val NUM_SAMPLES = 100000;
val count = sc.parallelize(1 to NUM_SAMPLES).map { i =>
val x = Math.random();
val y = Math.random();
if (x*x + y*y < 1) 1 else 0
}.reduce(_ + _);
val pi = 4.0 * count / NUM_SAMPLES;
println(\"Pi is roughly \" + pi)
"""
def test_scala(scalasession):
z = scalasession.run('1 + 1')
assert z == 'res0: Int = 2'
z = scalasession.run(pi_scala)
assert 'Pi is roughly 3.1' in z
scala_error = """
val NUM = 1000
val count = abc.NUM
"""
def test_scala_error(scalasession):
try:
z = scalasession.run(scala_error)
except SparkSessionError as e:
print(e)
def test_pyspark():
sess = SparkSession(livy_server_url, kind='pyspark')
z = sess.run('1 + 1')
assert z == '2'
z = sess.run('import math; math.sqrt(2.0)')
assert z.startswith('1.4142')
|
nilq/baby-python
|
python
|
from benchbuild.projects.benchbuild.group import BenchBuildGroup
from benchbuild.utils.wrapping import wrap
from benchbuild.settings import CFG
from benchbuild.utils.compiler import lt_clang, lt_clang_cxx
from benchbuild.utils.downloader import Git
from benchbuild.utils.run import run
from benchbuild.utils.versions import get_git_hash
from plumbum import local
from benchbuild.utils.cmd import make, mkdir, tar
from functools import partial
from os import path
class SpiderMonkey(BenchBuildGroup):
"""
SpiderMonkey requires a legacy version of autoconf: autoconf-2.13
"""
NAME = 'js'
DOMAIN = 'compilation'
src_uri = "https://github.com/mozilla/gecko-dev.git"
src_dir = "gecko-dev.git"
version = get_git_hash(src_uri)
if version == None:
VERSION = None
elif len(version) <= 7:
VERSION = str(version)
else:
VERSION = str(version)[:7]
def download(self):
Git(self.SRC_FILE, self.src_dir)
def configure(self):
js_dir = path.join(self.src_dir, "js", "src")
clang = lt_clang(self.cflags, self.ldflags, self.compiler_extension)
clang_cxx = lt_clang_cxx(self.cflags, self.ldflags,
self.compiler_extension)
with local.cwd(js_dir):
make_src_pkg = local["./make-source-package.sh"]
with local.env(DIST=self.builddir,
MOZJS_MAJOR_VERSION=0,
MOZJS_MINOR_VERSION=0,
MOZJS_PATCH_VERSION=0):
make_src_pkg()
mozjs_dir = "mozjs-0.0.0"
tar("xfj", mozjs_dir + ".tar.bz2")
with local.cwd(path.join(mozjs_dir, "js", "src")):
mkdir("obj")
autoconf = local["autoconf-2.13"]
autoconf()
with local.cwd("obj"):
with local.env(CC=str(clang),
CXX=str(clang_cxx)):
configure = local["../configure"]
run(configure)
def build(self):
mozjs_dir = path.join("mozjs-0.0.0", "js", "src", "obj")
with local.cwd(mozjs_dir):
run(make["-j", CFG["jobs"].value()])
def run_tests(self, experiment, run):
mozjs_dir = path.join("mozjs-0.0.0", "js", "src", "obj")
wrap(path.join(mozjs_dir, "js", "src", "shell", "js"),
partial(experiment, may_wrap=False))
with local.cwd(mozjs_dir):
run(make["check-jstests"])
|
nilq/baby-python
|
python
|
# Elaine Laguerta (github: @elaguerta)
# LBNL GIG
# File created: 28 May 2021
# Smell tests to verify Solution API functions
from gigpower.solution import Solution
from gigpower.solution_dss import SolutionDSS
from gigpower.solution_fbs import SolutionFBS
from gigpower.solution_nr3 import SolutionNR3
from gigpower.utils import get_nominal_bus_powers
import pytest
from pathlib import Path
import opendssdirect as dss
import pandas as pd
DSS_FILE_DIR = Path('./tests/test_feeders/')
@pytest.mark.parametrize(
"dss_file",
[
('IEEE_13_Bus_allwye.dss'),
('IEEE_13_Bus_allwye_noxfm_noreg.dss'),
('IEEE_34_Bus_allwye.dss'),
('IEEE_34_Bus_allwye_noxfm_noreg.dss'),
('IEEE_37_Bus_allwye.dss'),
('IEEE_37_Bus_allwye_noxfm_noreg.dss')
]
)
@pytest.mark.parametrize(
"algorithm",
[
(SolutionNR3),
(SolutionFBS),
(SolutionDSS)
]
)
class TestSolutionDFs:
def get_solution(self, dss_file, algorithm):
fp = str(Path(DSS_FILE_DIR, dss_file))
solution = algorithm(str(fp))
solution.solve()
return solution
def test_dfs(self, dss_file, algorithm):
"""
Run calls to get Solution.V, Solution.I, Solution.sV, Solution.VMag
as data frames
"""
solution = self.get_solution(dss_file, algorithm)
for param in Solution.SOLUTION_PARAMS:
df = solution.get_data_frame(param)
pytest.assume(not(df.empty)) # make sure df is not empty
def test_dfs_orient(self, dss_file, algorithm):
"""
Run calls to get solution params (Solution.V, Solution.I, Solution.sV,
Solution.VMag, Solution.Stx, Solution.Srx)
as data frames with both orientations (rows, columns) and make sure
that they have transposed shapes
"""
solution = self.get_solution(dss_file, algorithm)
for param in Solution.SOLUTION_PARAMS:
df_rows = solution.get_data_frame(param, orient='rows')
df_cols = solution.get_data_frame(param, orient='cols')
pytest.assume(df_rows.shape[-1::-1] == df_cols.shape)
# check that 3 phases are oriented correctly
pytest.assume(df_rows.shape[1] == 3)
pytest.assume(df_cols.shape[0] == 3)
def test_nominals(self, dss_file, algorithm):
"""
Make sure that Circuit class's nominal powers match those from
opendss' api
"""
solution = self.get_solution(dss_file, algorithm)
solution_nominals = solution.get_nominal_bus_powers(orient='rows')
# get a fresh dss object for each new dss file
fp = str(Path(DSS_FILE_DIR, dss_file))
dss.run_command('Redirect ' + fp)
dss.Solution.Solve()
dss_nominals = get_nominal_bus_powers(dss)
pd.testing.assert_frame_equal(solution_nominals, dss_nominals)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Django Models documentation build configuration file, created by
# sphinx-quickstart on Wed Nov 29 06:50:23 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
PROJECT_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../'))
sys.path.insert(0, PROJECT_ROOT)
from django_models import __version__
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
"sphinx.ext.intersphinx",
"sphinx.ext.ifconfig",
"sphinx.ext.graphviz",
"sphinx.ext.githubpages",
"sphinxjp.themes.basicstrap",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Django Models'
copyright = '2021, Rafael Henter'
author = 'Rafael Henter'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = __version__
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'pt_BR'
locale_dirs = ['locale/'] # path is example but recommended.
gettext_compact = False
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
# pygments_style = 'sphinx'
pygments_style = None
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
# on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
# if not on_rtd:
# import sphinx_rtd_theme
#
# html_theme = 'sphinx_rtd_theme'
# html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# -- Options for HTML output -------------------------------------------------
html_theme = 'basicstrap'
html_theme_options = {
# Set the lang attribute of the html tag. Defaults to 'en'
'lang': language,
# Disable showing the sidebar. Defaults to 'false'
'nosidebar': False,
# Show header searchbox. Defaults to false. works only "nosidber=True",
'header_searchbox': False,
# Put the sidebar on the right side. Defaults to false.
'rightsidebar': False,
# Set the width of the sidebar. Defaults to 3
'sidebar_span': 3,
# Fix navbar to top of screen. Defaults to true
'nav_fixed_top': True,
# Fix the width of the sidebar. Defaults to false
'nav_fixed': False,
# Set the width of the sidebar. Defaults to '900px'
'nav_width': '900px',
# Fix the width of the content area. Defaults to false
'content_fixed': False,
# Set the width of the content area. Defaults to '900px'
'content_width': '900px',
# Fix the width of the row. Defaults to false
'row_fixed': False,
# Disable the responsive design. Defaults to false
'noresponsive': False,
# Disable the responsive footer relbar. Defaults to false
'noresponsiverelbar': False,
# Disable flat design. Defaults to false.
# Works only "bootstrap_version = 3"
'noflatdesign': False,
# Enable Google Web Font. Defaults to false
'googlewebfont': False,
# Set the URL of Google Web Font's CSS.
# Defaults to 'http://fonts.googleapis.com/css?family=Text+Me+One'
'googlewebfont_url': 'http://fonts.googleapis.com/css?family=Lily+Script+One', # NOQA
# Set the Style of Google Web Font's CSS.
# Defaults to "font-family: 'Text Me One', sans-serif;"
'googlewebfont_style': u"font-family: 'Lily Script One' cursive;",
# Set 'navbar-inverse' attribute to header navbar. Defaults to false.
'header_inverse': False,
# Set 'navbar-inverse' attribute to relbar navbar. Defaults to false.
'relbar_inverse': False,
# Enable inner theme by Bootswatch. Defaults to false
'inner_theme': False,
# Set the name of innner theme. Defaults to 'bootswatch-simplex'
'inner_theme_name': 'bootswatch-simplex',
# Select Twitter bootstrap version 2 or 3. Defaults to '3'
'bootstrap_version': '3',
# Show "theme preview" button in header navbar. Defaults to false.
'theme_preview': False,
# Set the Size of Heading text. Defaults to None
# 'h1_size': '3.0em',
# 'h2_size': '2.6em',
# 'h3_size': '2.2em',
# 'h4_size': '1.8em',
# 'h5_size': '1.4em',
# 'h6_size': '1.1em',
}
# html_sidebars = {"**": ["sidebar.html"]}
show_related = True
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
# html_sidebars = {
# '**': [
# 'relations.html', # needs 'show_related': True theme option to display
# 'searchbox.html',
# ]
# }
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-api-client-doc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'DjangoApiClient.tex', 'Django Models Documentation',
'Rafael Henter', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'DjangoApiClient', 'Django Models Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'DjangoApiClient', 'Django Models Documentation',
author, 'DjangoApiClient',
'Django Models is a client for APIs in general, which allows iterating with the API as if they were using a Local model in their project, through a client and Custom CBV (Class based Views).',
'Miscellaneous'),
]
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
|
nilq/baby-python
|
python
|
load("@bazel_tools//tools/jdk:toolchain_utils.bzl", "find_java_runtime_toolchain", "find_java_toolchain")
def _proto_path(proto):
"""
The proto path is not really a file path
It's the path to the proto that was seen when the descriptor file was generated.
"""
path = proto.path
root = proto.root.path
ws = proto.owner.workspace_root
if path.startswith(root):
path = path[len(root):]
if path.startswith("/"):
path = path[1:]
if path.startswith(ws):
path = path[len(ws):]
if path.startswith("/"):
path = path[1:]
return path
def _protoc_cc_output_files(proto_file_sources):
cc_hdrs = []
cc_srcs = []
for p in proto_file_sources:
basename = p.basename[:-len(".proto")]
cc_hdrs.append(basename + ".pb.h")
cc_hdrs.append(basename + ".pb.validate.h")
cc_srcs.append(basename + ".pb.cc")
cc_srcs.append(basename + ".pb.validate.cc")
return cc_hdrs + cc_srcs
def _proto_sources(ctx):
protos = []
for dep in ctx.attr.deps:
protos += [f for f in dep[ProtoInfo].direct_sources]
return protos
def _output_dir(ctx):
dir_out = ctx.genfiles_dir.path
if ctx.label.workspace_root:
dir_out += "/" + ctx.label.workspace_root
return dir_out
def _protoc_gen_validate_cc_impl(ctx):
"""Generate C++ protos using protoc-gen-validate plugin"""
protos = _proto_sources(ctx)
cc_files = _protoc_cc_output_files(protos)
out_files = [ctx.actions.declare_file(out) for out in cc_files]
dir_out = _output_dir(ctx)
args = [
"--cpp_out=" + dir_out,
"--validate_out=lang=cc:" + dir_out,
]
return _protoc_gen_validate_impl(
ctx = ctx,
lang = "cc",
protos = protos,
out_files = out_files,
protoc_args = args,
package_command = "true",
)
def _protoc_python_output_files(proto_file_sources):
python_srcs = []
for p in proto_file_sources:
basename = p.basename[:-len(".proto")]
python_srcs.append(basename.replace("-", "_", maxsplit = None) + "_pb2.py")
return python_srcs
def _protoc_gen_validate_python_impl(ctx):
"""Generate Python protos using protoc-gen-validate plugin"""
protos = _proto_sources(ctx)
python_files = _protoc_python_output_files(protos)
out_files = [ctx.actions.declare_file(out) for out in python_files]
dir_out = _output_dir(ctx)
args = [
"--python_out=" + dir_out,
]
return _protoc_gen_validate_impl(
ctx = ctx,
lang = "python",
protos = protos,
out_files = out_files,
protoc_args = args,
package_command = "true",
)
def _protoc_gen_validate_impl(ctx, lang, protos, out_files, protoc_args, package_command):
protoc_args.append("--plugin=protoc-gen-validate=" + ctx.executable._plugin.path)
dir_out = ctx.genfiles_dir.path
if ctx.label.workspace_root:
dir_out += "/" + ctx.label.workspace_root
tds = depset([], transitive = [dep[ProtoInfo].transitive_descriptor_sets for dep in ctx.attr.deps])
descriptor_args = [ds.path for ds in tds.to_list()]
if len(descriptor_args) != 0:
protoc_args += ["--descriptor_set_in=%s" % ctx.configuration.host_path_separator.join(descriptor_args)]
package_command = package_command.format(dir_out = dir_out)
ctx.actions.run_shell(
outputs = out_files,
inputs = protos + tds.to_list(),
tools = [ctx.executable._plugin, ctx.executable._protoc],
command = " && ".join([
ctx.executable._protoc.path + " $@",
package_command,
]),
arguments = protoc_args + [_proto_path(proto) for proto in protos],
mnemonic = "ProtoGenValidate" + lang.capitalize() + "Generate",
use_default_shell_env = True,
)
return struct(
files = depset(out_files),
)
cc_proto_gen_validate = rule(
attrs = {
"deps": attr.label_list(
mandatory = True,
providers = [ProtoInfo],
),
"_protoc": attr.label(
cfg = "host",
default = Label("@com_google_protobuf//:protoc"),
executable = True,
allow_single_file = True,
),
"_plugin": attr.label(
cfg = "host",
default = Label("@com_envoyproxy_protoc_gen_validate//:protoc-gen-validate"),
allow_files = True,
executable = True,
),
},
output_to_genfiles = True,
implementation = _protoc_gen_validate_cc_impl,
)
_ProtoValidateSourceInfo = provider(
fields = {
"sources": "Depset of sources created by protoc with protoc-gen-validate plugin",
},
)
def _create_include_path(include):
return "--proto_path={0}={1}".format(_proto_path(include), include.path)
def _java_proto_gen_validate_aspect_impl(target, ctx):
proto_info = target[ProtoInfo]
includes = proto_info.transitive_imports
srcs = proto_info.direct_sources
options = ",".join(["lang=java"])
srcjar = ctx.actions.declare_file("%s-validate-gensrc.jar" % ctx.label.name)
args = ctx.actions.args()
args.add(ctx.executable._plugin.path, format = "--plugin=protoc-gen-validate=%s")
args.add("--validate_out={0}:{1}".format(options, srcjar.path))
args.add_all(includes, map_each = _create_include_path)
args.add_all(srcs, map_each = _proto_path)
ctx.actions.run(
inputs = depset(transitive = [proto_info.transitive_imports]),
outputs = [srcjar],
executable = ctx.executable._protoc,
arguments = [args],
tools = [ctx.executable._plugin],
progress_message = "Generating %s" % srcjar.path,
)
return [_ProtoValidateSourceInfo(
sources = depset(
[srcjar],
transitive = [dep[_ProtoValidateSourceInfo].sources for dep in ctx.rule.attr.deps],
),
)]
_java_proto_gen_validate_aspect = aspect(
_java_proto_gen_validate_aspect_impl,
provides = [_ProtoValidateSourceInfo],
attr_aspects = ["deps"],
attrs = {
"_protoc": attr.label(
cfg = "host",
default = Label("@com_google_protobuf//:protoc"),
executable = True,
allow_single_file = True,
),
"_plugin": attr.label(
cfg = "host",
default = Label("@com_envoyproxy_protoc_gen_validate//:protoc-gen-validate"),
allow_files = True,
executable = True,
),
},
)
def _java_proto_gen_validate_impl(ctx):
source_jars = [source_jar for dep in ctx.attr.deps for source_jar in dep[_ProtoValidateSourceInfo].sources.to_list()]
deps = [java_common.make_non_strict(dep[JavaInfo]) for dep in ctx.attr.java_deps]
deps += [dep[JavaInfo] for dep in ctx.attr._validate_deps]
java_info = java_common.compile(
ctx,
source_jars = source_jars,
deps = deps,
output_source_jar = ctx.outputs.srcjar,
output = ctx.outputs.jar,
java_toolchain = find_java_toolchain(ctx, ctx.attr._java_toolchain),
host_javabase = find_java_runtime_toolchain(ctx, ctx.attr._host_javabase),
)
return [java_info]
"""Bazel rule to create a Java protobuf validation library from proto sources files.
Args:
deps: proto_library rules that contain the necessary .proto files
java_deps: the java_proto_library of the protos being compiled.
"""
java_proto_gen_validate = rule(
attrs = {
"deps": attr.label_list(
providers = [ProtoInfo],
aspects = [_java_proto_gen_validate_aspect],
mandatory = True,
),
"java_deps": attr.label_list(
providers = [JavaInfo],
mandatory = True,
),
"_validate_deps": attr.label_list(
default = [
Label("@com_envoyproxy_protoc_gen_validate//validate:validate_java"),
Label("@com_google_re2j//jar"),
Label("@com_google_protobuf//:protobuf_java"),
Label("@com_google_protobuf//:protobuf_java_util"),
Label("@com_envoyproxy_protoc_gen_validate//java/pgv-java-stub/src/main/java/io/envoyproxy/pgv"),
Label("@com_envoyproxy_protoc_gen_validate//java/pgv-java-validation/src/main/java/io/envoyproxy/pgv"),
],
),
"_java_toolchain": attr.label(default = Label("@bazel_tools//tools/jdk:current_java_toolchain")),
"_host_javabase": attr.label(
cfg = "host",
default = Label("@bazel_tools//tools/jdk:current_host_java_runtime"),
),
},
fragments = ["java"],
provides = [JavaInfo],
outputs = {
"jar": "lib%{name}.jar",
"srcjar": "lib%{name}-src.jar",
},
implementation = _java_proto_gen_validate_impl,
)
python_proto_gen_validate = rule(
attrs = {
"deps": attr.label_list(
mandatory = True,
providers = ["proto"],
),
"_protoc": attr.label(
cfg = "host",
default = Label("@com_google_protobuf//:protoc"),
executable = True,
allow_single_file = True,
),
"_plugin": attr.label(
cfg = "host",
default = Label("@com_envoyproxy_protoc_gen_validate//:protoc-gen-validate"),
allow_files = True,
executable = True,
),
},
output_to_genfiles = True,
implementation = _protoc_gen_validate_python_impl,
)
|
nilq/baby-python
|
python
|
"""
Methods for working with releases, including the releaseObject class
definition live here.
"""
# standard library imports
from datetime import datetime, timedelta
import json
# second party imports
from bson import json_util
from bson.objectid import ObjectId
import flask
import pymongo
# local imports
from app import API, models, utils
def public_router(action):
""" Our "broker" method for accepting public API requests to perform an
action. The endpoints we support here are relatively basic, but we do
support one that handles OIDs, so that gets kind of sticky. """
# set platforms first, since actions below depend on knowing what platforms
# we support
platforms = []
for key, app_dict in API.config['KEYS'].items():
platforms.append(
{
'app': app_dict['owner'],
'api_key': key
}
)
# 1.) first handle misc./public actions that return lists
output = None
if action in ['dump', 'releases','all']:
output = list(utils.mdb.releases.find().sort('created_on', -1))
elif action in ['latest', 'current']:
if flask.request.method == 'POST':
platform = flask.request.get_json().get('platform', None)
if platform is not None:
output = utils.mdb.releases.find_one(
{'platform': platform, 'published': True},
sort=[( 'published_on', pymongo.DESCENDING )]
)
else:
output = []
for platform in platforms:
latest = utils.mdb.releases.find_one(
{'platform': platform['app'], 'published': True},
sort=[( 'published_on', pymongo.DESCENDING )]
)
if latest is not None:
output.append(latest)
elif action in ['upcoming']:
output = []
for platform in platforms:
upcoming = utils.mdb.releases.find(
{
'platform': platform['app'],
'$or': [
{'published': False},
{'published': None}
],
},
sort=[( 'created_on', pymongo.DESCENDING )]
)
if upcoming is not None:
output.extend(upcoming)
elif action == 'platforms':
output = platforms
if output is not None:
return flask.Response(
json.dumps(output, default=json_util.default),
status=200,
mimetype="application/json"
)
# finally, check and see if we're looking for a specific release
record = utils.mdb.releases.find_one({'_id': ObjectId(action)})
if ObjectId.is_valid(action) and record is not None:
return flask.Response('got it!', 200)
elif ObjectId.is_valid(action) and record is None:
return flask.Response('Release not found!', 404)
err = "'%s' method not allowed!" % action
return flask.Response(err, status=405)
def private_router(action):
""" The private version of the previous method. This one handles routes
where we require, at a minimum, a user that is recognized by the API as a
registered user. We also check to see if they're an admin. """
# we need to be an admin to get into here
if not flask.request.User.user.get('admin', False):
return utils.http_403
if action == 'new':
r_obj = releaseObject()
return flask.Response(
json.dumps(r_obj.record, default=json_util.default),
status=200,
mimetype="application/json"
)
# 3.) JSON is required below, so sanity check for it here:
if flask.request.get_json() is None:
err = (
"The '%s' action requires valid JSON in the POST (or is not a "
"valid endpoint)!"
)
raise utils.InvalidUsage(err % action, 422)
release_oid = flask.request.get_json().get('_id', None)
if release_oid is None:
raise utils.InvalidUsage('_id is required!', 422)
r_obj = releaseObject(_id=release_oid['$oid'])
if action == 'update':
r_obj.update()
return flask.Response(
json.dumps(r_obj.record, default=json_util.default),
status=200,
mimetype="application/json"
)
elif action == 'delete':
return flask.Response(
json.dumps(r_obj.delete().raw_result, default=json_util.default),
status=200,
mimetype="application/json"
)
# if we're still here, throw an error, because obviously we've got POST data
# to some oddball/unknown endpoint...
err = "'%s' method not allowed!" % action
return flask.Response(err, status=405)
class releaseObject(models.StructuredObject):
""" The releaseObject class definition. Initialize one of these to work
with a release. Initialize with no arguments to use the values in the
request.json. """
def __init__(self, *args, **kwargs):
""" Initialize with no args to create a new one. """
# first, execute the init of our base class method
super().__init__(self, *args, **kwargs)
self.request = flask.request.get_json()
self.logger = utils.get_logger(log_name='admin')
self.mdb = utils.mdb.releases
self.data_model = {
'created_on': datetime,
'created_by': ObjectId,
'modified_on': datetime,
'platform': str,
'version': dict,
'summary': str,
'sections': list,
'items': list,
'details': list,
'published': bool,
'published_on': datetime,
}
self.load() # sets self._id if it isn't set
def __repr__(self):
""" A nice repr string that shows the platform and version. """
return "%s release (%s)" % (self.platform, self.get_version_string())
def load(self):
""" Load a release record. """
if getattr(self, '_id', None) is None:
self.new()
self.record = self.mdb.find_one({'_id': self._id})
if self.record is None:
err = "Release OID '%s' not found!" % self._id
raise utils.InvalidUsage(err, status_code=400)
for key, value in self.data_model.items():
setattr(self, key, self.record.get(key, None))
def new(self):
""" Create a new release record. """
platform = self.request.get('platform', None)
if platform is None:
raise utils.InvalidUsage(
'Platform must be specified when creating a new release!',
status_code=422
)
self.logger.info("Creating a new release for '%s'" % platform)
self._id = self.mdb.insert({})
self.created_on = datetime.now()
self.created_by = flask.request.User._id
self.platform = platform
self.set_latest_version()
self.save()
def update(self):
""" Updates attributes, saves. Uses the request JSON! """
published_pre_update = getattr(self, 'published', False)
# call the base class method; update attrs
super().update(source=flask.request.get_json(), verbose=True)
published_post_update = getattr(self, 'published', False)
# handle published_on logic
if not published_pre_update and published_post_update:
self.published_on = datetime.now()
elif published_pre_update and not published_post_update:
self.published_on = None
# sort things we want to sort
self.sections = sorted(self.sections)
self.modified_on = datetime.now()
self.save(verbose=True)
#
# gets/sets
#
def get_version_string(self):
""" Returns the version dict as a string. """
if self.version is None:
self.version = {}
return "%s.%s.%s" % (
self.version.get('major', 0),
self.version.get('minor', 0),
self.version.get('patch', 0),
)
def set_latest_version(self):
""" Uses self.platform to get the latest release for that platform and
set the current self.version to that release's version. """
# set default
self.version = {'major': 0, 'minor': 0, 'patch': 0}
# try to get latest
latest = self.mdb.find_one(
{'platform': self.platform},
sort=[( 'created_on', pymongo.DESCENDING )]
)
# if latest a.) exists and b.) has a version, use it:
if latest is not None and latest.get('version', None) is not None:
for bit in ['major', 'minor', 'patch']:
self.version[bit] = latest['version'].get(bit, 0)
|
nilq/baby-python
|
python
|
#!/usr/bin/python3
# -*- coding:utf-8 -*-
import os
import sys
import signal
import time
from datetime import datetime
from datetime import timedelta
# import cv2 as cv
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt # 导入模块 matplotlib.pyplot,并简写成 plt
import numpy as np # 导入模块 numpy,并简写成 np
import csv
# 解决中文显示问题
mpl.matplotlib_fname()
mpl.rcParams[u'font.sans-serif'] = ['simhei']
mpl.rcParams['axes.unicode_minus'] = False
statistics = [[0 for row in range(0)] for col in range(14)]
class Show(object):
def __init__(self, data=None, code='', path='./stocks/', freq='D', name=''):
signal.signal(signal.SIGINT, self.signal_handler)
if path == '':
self.path = './'
else:
self.path = path + '/'
self.name = name
self.code = code
csv_data = pd.read_csv(self.path + self.code + '_price_' +
freq + '.csv', usecols=[2, 3, 10], header=None) # 读取数据
self.data = csv_data.values.tolist()
self.freq = freq
self.colors = {'ma4': 'gold', 'ma9': 'pink',
'ma18': 'blueviolet', 'ma60': 'cyan'}
def signal_handler(self, signal, frame):
sys.exit(0)
def get_position(self):
x = [i[0] for i in self.data]
x.reverse()
# print(x)
# print(len(x))
xs = [datetime.strptime(str(d)[0:-2], '%Y%m%d').date() for d in x]
# print(xs)
y = [i[1] for i in self.data]
y = [round(i, 2) for i in y]
y.reverse()
# print(y)
amount = [i[2] for i in self.data]
amount = [round(i, 2) for i in amount]
amount.reverse()
return xs, y, amount
def get_point(self, xs, y):
price_last = 0
price = 0
high_x = []
high_y = []
low_x = []
low_y = []
for i in range(len(y)):
if i == 1:
if price >= y[i]:
high_x.append(xs[i-1])
high_y.append(price)
elif price <= y[i]:
low_x.append(xs[i-1])
low_y.append(price)
if i == len(y) - 1:
if price <= y[i]:
high_x.append(xs[i])
high_y.append(y[i])
elif price >= y[i]:
low_x.append(xs[i])
low_y.append(y[i])
if price >= y[i] and price >= price_last and price_last != 0:
high_x.append(xs[i-1])
high_y.append(price)
if price <= y[i] and price <= price_last and price_last != 0:
low_x.append(xs[i-1])
low_y.append(price)
price_last = price
price = y[i]
return high_x, high_y, low_x, low_y
def draw_point(self, high_x, high_y, low_x, low_y):
# 绘制散点(3, 6)
for i in range(len(high_y)):
plt.scatter(high_x[i], high_y[i], s=25,
color='red') # s 为点的 size
plt.annotate(str(high_y[i]), color='red', xy=(
high_x[i], high_y[i]+0.003*high_y[i]), fontsize=10, xycoords='data') # 在(3.3, 5.5)上做标注
# 绘制散点(3, 6)
for i in range(len(low_y)):
plt.scatter(low_x[i], low_y[i], s=25,
color='green') # s 为点的 size
plt.annotate(str(low_y[i]), color='green', xy=(
low_x[i], low_y[i]-0.007*low_y[i]), fontsize=10, xycoords='data') # 在(3.3, 5.5)上做标注
# plt.text(3.3, 5, "this point very important",
# fontdict={'size': 12, 'color': 'green'}) # xycoords='data' 是说基于数据的值来选位置
def draw_high_line(self, high_x, high_y):
plt.plot(high_x, high_y, color='red',
linewidth=1.0, linestyle="--", label="y")
x = high_x
y = high_y
linewidth = 1.0
while len(y) >= 2:
high_x, high_y, temp_x, temp_y = self.get_point(x, y)
x = high_x
y = high_y
linewidth += 0.75
plt.plot(x, y, color='red', linewidth=linewidth,
linestyle="--", label="y")
def draw_low_line(self, low_x, low_y):
plt.plot(low_x, low_y, color='green',
linewidth=1.0, linestyle="--", label="y")
x = low_x
y = low_y
linewidth = 1.0
while len(x) >= 2:
temp_x, temp_y, low_x, low_y = self.get_point(x, y)
x = low_x
y = low_y
linewidth += 0.75
plt.plot(x, y, color='green', linewidth=linewidth,
linestyle="--", label="y")
def get_statistics(self, xs, ys, index, tag, meta):
if index+22 > len(ys)-1 or index < 22:
return
statistics[0].append(self.code)
statistics[1].append(self.name)
statistics[2].append(tag)
statistics[3].append(meta)
statistics[4].append(datetime.strftime(xs[index], "%Y%m%d"))
statistics[5].append(ys[index])
statistics[6].append(ys[index+1])
statistics[7].append(ys[index+2])
statistics[8].append(ys[index+3])
statistics[9].append(ys[index+4])
statistics[10].append(ys[index+5])
statistics[11].append(ys[index+10])
statistics[12].append(ys[index+15])
statistics[13].append(ys[index+22])
# print(statistics)
def amount_price_select(self, xs, ys, amount):
code = self.code + ':'
for i in range(5, len(ys)):
if(ys[i-4] < ys[i-5]) and amount[i-4] < amount[i-5]*0.9:
if(ys[i-3] < ys[i-4]) and amount[i-3] < amount[i-4]*0.9:
if(ys[i-2] < ys[i-3]) and amount[i-2] < amount[i-3]*0.9:
# self.get_statistics(xs, ys, i, 'amount0')
if(ys[i-1] < ys[i-2]) and amount[i-1] < amount[i-2]*0.9:
# self.get_statistics(xs, ys, i, 'amount1')
if(ys[i] > ys[i-1]) and amount[i] > amount[i-1]*1.2:
# self.get_statistics(xs, ys, i, 'amount2')
if (len(ys) - i - 1) < 2:
print(code, self.name,
xs[i], 'amount_price rush!!!')
def price_select(self, xs, ys):
code = self.code + ':'
max = 0
rush = False
rsi6 = self.get_rsi(ys, 6)
rsi12 = self.get_rsi(ys, 12)
for i in range(0, len(ys)):
if ys[i] >= max:
max = ys[i]
# if rush == False:
# rush = True
# self.get_statistics(xs, ys, i, 'price__', 'rush')
# if (len(ys) - i - 1) < 2:
# print(code, self.name, xs[i], 'price rush!!!')
if rsi6[i] > rsi12[i]:
if rush == False and rsi12[i] < 40 and rsi12[i] > 30:
rush = True
self.get_statistics(xs, ys, i, 'test', 'rush')
max = ys[i]
if (len(ys) - i - 1) < 2:
print(code, self.name, xs[i], 'rsi rush!!!')
if ys[i] < max*0.95 and rush == True:
rush = False
self.get_statistics(xs, ys, i, 'test', 'run')
if (len(ys) - i - 1) < 2:
print(code, self.name, xs[i], 'test run!!!')
def get_smooth(self, price, number):
smooth = [0]
for i in range(1, len(price)):
p = price[i]/number+smooth[i-1]*(number-1)/number
smooth.append(p)
return smooth
def get_rsi(self, price, number):
rsi = [0]
up = [0]
down = [0]
for i in range(1, len(price)):
temp = price[i] - price[i-1]
if temp >= 0:
up.append(temp)
down.append(0)
else:
down.append(abs(temp))
up.append(0)
up_smooth = self.get_smooth(up, number)
down_smooth = self.get_smooth(down, number)
for i in range(1, len(price)):
if up_smooth[i] == 0 and down_smooth[i] == 0:
r = rsi[i-1]
else:
r = up_smooth[i]/(up_smooth[i]+down_smooth[i])*100
rsi.append(round(r, 2))
return rsi
def rsi_select(self, xs, ys):
code = self.code + ':'
rsi6 = self.get_rsi(ys, 6)
rsi12 = self.get_rsi(ys, 12)
# rsi24 = self.get_rsi(ys, 24)
rush = False
run = False
for i in range(0, len(ys)):
if rsi6[i] > rsi12[i]:
run = False
if rush == False and rsi12[i] < 40 and rsi12[i] > 30:
rush = True
self.get_statistics(xs, ys, i, 'rsi6_12', 'rush')
max = ys[i]
if (len(ys) - i - 1) < 2:
print(code, self.name, xs[i], 'rsi rush!!!')
if rsi6[i] < rsi12[i]:
rush = False
if run == False and rsi6[i] > 60 and rsi6[i] < 70:
run = True
self.get_statistics(xs, ys, i, 'rsi6_12', 'run')
if (len(ys) - i - 1) < 2:
print(code, self.name, xs[i], 'rsi run!!!')
def get_average(self, price, number):
average = []
index = 0
for i in range(len(price)):
if i < number:
index = 0
else:
index = i-(number-1)
p = price[index:i+1]
average.append(round(np.mean(p), 2))
return average
def average_line_select(self, xs, ys):
ma4 = self.get_average(ys, 4)
ma9 = self.get_average(ys, 9)
ma18 = self.get_average(ys, 18)
# ma60 = self.get_average(ys, 60)
pre_rush = False
rush = False
pre_run = False
run = False
ret = False
code = self.code + ':'
for i in range(0, len(ys)):
# rush
if ma4[i] > ma9[i]:
if pre_rush == False:
pre_rush = True
self.get_statistics(xs, ys, i, 'ma4___9', 'rush')
# if (len(ys) - i - 1) < 2:
# print(code, self.name, xs[i], 'average pre_rush!')
if ma9[i] > ma18[i]:
if rush == False:
rush = True
self.get_statistics(xs, ys, i, 'ma9__18', 'rush')
plt.scatter(xs[i], ys[i], s=50,
color='red') # s 为点的 size
if (len(ys) - i - 1) < 2:
print(code, self.name, xs[i], 'average rush!!!')
ret = True
if ma9[i] < ma18[i]:
rush = False
if ma4[i] < ma9[i]:
if rush == False:
pre_rush = False
# run
if ma4[i] < ma9[i]:
if pre_run == False:
pre_run = True
self.get_statistics(xs, ys, i, 'ma4___9', 'run')
# print(code, xs[i], 'pre_run!')
if ma9[i] < ma18[i]:
if run == False:
run = True
self.get_statistics(xs, ys, i, 'ma9__18', 'run')
plt.scatter(xs[i], ys[i], s=50,
color='green') # s 为点的 size
if (len(ys) - i - 1) < 2:
print(code, self.name, xs[i], 'average run!!!')
if ma9[i] > ma18[i]:
run = False
if ma4[i] > ma9[i]:
if run == False:
pre_run = False
plt.plot(xs, ma4, color=self.colors['ma4'],
linewidth=1.5, linestyle="-", label='ma4')
plt.plot(xs, ma9, color=self.colors['ma9'],
linewidth=1.5, linestyle="-", label='ma9')
plt.plot(xs, ma18, color=self.colors['ma18'],
linewidth=1.5, linestyle="-", label='ma18')
# plt.plot(xs, ma60, color=self.colors['ma60'], linewidth=1.5, linestyle="-", label='ma60')
return ret
def show(self):
# 创建一个点数为 8 x 6 的窗口, 并设置分辨率为 80像素/每英寸
plt.figure(figsize=(24, 13.5), dpi=80)
# 再创建一个规格为 1 x 1 的子图
plt.subplot(111)
# fig1, ax = plt.subplots()
plt.title(self.name)
xs, ys, amount = self.get_position()
flag = False
flag = self.average_line_select(xs, ys)
# self.rsi_select(xs, ys)
self.price_select(xs, ys)
self.amount_price_select(xs, ys, amount)
high_x, high_y, low_x, low_y = self.get_point(xs, ys)
self.draw_point(high_x, high_y, low_x, low_y)
# self.draw_high_line(high_x, high_y)
# self.draw_low_line(low_x, low_y)
plt.plot(xs, ys, color='blue', linewidth=1.0,
linestyle="-", label="price")
plt.legend(loc='upper left', ncol=2) # 图例
# 设置横轴的上下限
# plt.xlim(20160818, 20200901)
# 设置纵轴的上下限
# plt.ylim(30, 500)
# 设置横轴标签
plt.xlabel("X")
# 设置纵轴标签
# plt.ylabel("Y")
# 设置横轴精准刻度
# plt.xticks([-1, -0.5, 0, 0.5, 1, 1.5, 2, 2.5, 3, 3.5, 4, 4.5, 5, 5.5])
# 设置纵轴精准刻度
# plt.yticks([-2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
# 设置横轴精准刻度
xticks = list(range(0, len(xs), 5))
xlabels = [xs[x] for x in xticks]
xlabels.append(xs[-1])
plt.xticks(xlabels, rotation=-90)
# # 设置纵轴精准刻度
# plt.yticks([-2, 0, 2, 4, 6, 8, 10],
# ["-2m", "0m", "2m", "4m", "6m", "8m", "10m"])
if flag is True:
plt.savefig(self.path + self.code + '_' +
self.name + '_' + self.freq + '.png')
filename = './statistics.csv'
if not os.path.exists(filename):
with open(filename, 'w') as f:
f_csv = csv.writer(f)
ar2 = [[row[i] for row in statistics]
for i in range(len(statistics[0]))]
f_csv.writerows(ar2)
else:
with open(filename, 'a') as f:
f_csv = csv.writer(f)
ar2 = [[row[i] for row in statistics]
for i in range(len(statistics[0]))]
f_csv.writerows(ar2)
# plt.show(block=False)
# while plt.waitforbuttonpress() == False:
# time.sleep(0.1)
if __name__ == "__main__":
csv_file = sys.argv[1]
freq = 'D'
name = ''
path = './stocks/'
if len(sys.argv) == 5:
path = sys.argv[4]
freq = sys.argv[3]
name = sys.argv[2]
elif len(sys.argv) == 4:
freq = sys.argv[3]
name = sys.argv[2]
elif len(sys.argv) == 3:
name = sys.argv[2]
show = Show(code=csv_file, name=name, freq=freq, path=path)
show.show()
|
nilq/baby-python
|
python
|
"""
Class to represent the results of a prediction.
"""
import codecs
import logging
import os
import warnings
from numpy import ndarray
from sklearn.exceptions import UndefinedMetricWarning
from sklearn.metrics import \
confusion_matrix, \
recall_score, \
precision_score, \
f1_score, \
accuracy_score
from tabulate import tabulate
from .argmin_components import evaluate_argmin_components
from .argmin_post_processing import relative_2_absolute
from .argmin_relations import evaluate_argmin_relations
from .metrics import compute_f1, compute_precision, compute_recall, pre_process
from .seq_2_seq_metrics import word_accuracy, edit_distance
from ..config.TaskConfig import TaskConfig
from ..constants import ENCODING_NONE, METRIC_ACCURACY, METRIC_F1, METRIC_PRECISION, METRIC_RECALL, TASK_TYPE_AM, \
METRIC_WORD_ACCURACY, METRIC_F1_O, METRIC_F1_B, \
METRIC_PRECISION_O, METRIC_PRECISION_B, METRIC_RECALL_O, METRIC_RECALL_B, METRIC_AM_COMPONENTS_05, \
METRIC_AM_COMPONENTS_0999, METRIC_AM_RELATIONS_05, METRIC_AM_RELATIONS_0999, METRIC_AVG_EDIT_DISTANCE, \
METRIC_MEDIAN_EDIT_DISTANCE
from ..data.Sample import Sample
from ..util import swap_dict
warnings.filterwarnings("ignore", category=UndefinedMetricWarning)
class ResultList(list):
"""
Class to represent the results of a prediction.
"""
def __init__(self, result_tuples, label_2_idx, task=None):
"""
Initialize a result list.
Creates swapped mapping functions and populates the internal list.
The list contains tuples with the following entries:
* Sentence with actual tokens
* Predicted labels as strings
* Gold labels as strings
* Sentence with indices
* Predicted labels with indices
* Gold labels with indices
* Sample object
Args:
result_tuples (`list` of `tuple` of object): A list of results represented as tuples consisting of
(sentence, gold label, predicted label, sample object). The sample object can be used to restore the
original sentence (words).
label_2_idx (`dict` of int): A mapping from label names to indices.
task (TaskConfig): The task to which the results belong to
"""
assert isinstance(label_2_idx, dict)
assert isinstance(task, TaskConfig)
logger = logging.getLogger("shared.result_list.init")
list.__init__(self)
self.label_2_idx = label_2_idx
self.idx_2_label = swap_dict(label_2_idx)
self.task = task
logger.debug("Initializing a result list for %d sentences", len (result_tuples))
for sentence, gold_labels, predicted_labels, sample in result_tuples:
assert isinstance(sample, Sample)
assert len(sentence) == len(gold_labels) == len(predicted_labels)
word_sentence = sample.raw_tokens
word_gold_labels = sample.raw_labels
docid = sample.docid
word_predicted_labels = [self.idx_2_label[idx] for idx in predicted_labels]
# Removal of padding if necessary
if len(word_sentence) != len(sentence):
# logger.debug("There is a padded sentence. Remove padding.")
# The raw sentence as stored in the sample object has the true length
true_length = len(word_sentence)
sentence = sentence[:true_length]
gold_labels = gold_labels[:true_length]
predicted_labels = predicted_labels[:true_length]
self.append((
word_sentence,
word_predicted_labels,
word_gold_labels,
sentence,
predicted_labels,
gold_labels,
sample
))
def get_true_and_pred(self):
"""
From the unmasked data in the result list, create a list of predictions and a list of truths.
Returns:
`tuple` of `list` of str: A tuple consisting of the truths and the predictions (in this order).
"""
y_true = []
y_pred = []
for _, pred, gold, _, _, _, sample in self:
for pred_label, gold_label in zip(pred, gold):
y_true.append(gold_label)
y_pred.append(pred_label)
return y_true, y_pred
def get_true_and_pred_sentences(self, word=False):
"""
Retrieve all true and predicted sentence labels. If `word` is True, retrieve the word representation for labels.
Otherwise, retrieve the index representation. The latter is required for calculating metrics on BIO.
Args:
word (bool): Whether to use word or index representations for the labels.
Returns:
`tuple` of `list` of `list` of str or `tuple` of `list` of `list` of int: A tuple consisting of gold label
sentences and predictions (in this order).
"""
true_sentences = []
predicted_sentences = []
for entry in self:
if word:
predicted_sentences.append(entry[1])
true_sentences.append(entry[2])
else:
predicted_sentences.append(entry[4])
true_sentences.append(entry[5])
return true_sentences, predicted_sentences
def confusion_matrix(self):
"""
Compute the confusion matrix for the result list.
Returns:
Confusion matrix
"""
y_true, y_pred = self.get_true_and_pred()
return confusion_matrix(y_true, y_pred, labels=list(self.idx_2_label.values()))
def print_confusion_matrix(self, matrix=None):
"""
Generate a ASCII representation for the confusion matrix.
Args:
matrix: A confusion matrix.
Returns:
A well-formatted confusion matrix.
"""
if matrix is None:
matrix = self.confusion_matrix()
if isinstance(matrix, ndarray):
matrix = matrix.tolist()
labels = list(self.idx_2_label.values())
for row_idx in range(len(matrix)):
# Prepend label for rows
matrix[row_idx] = [labels[row_idx]] + matrix[row_idx]
print (tabulate(matrix, headers=labels))
def accuracy(self):
"""
See http://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html#sklearn.metrics.accuracy_score
Returns:
float: accuracy score
"""
y_true, y_pred = self.get_true_and_pred()
return accuracy_score(y_true, y_pred)
def precision(self, correct_bio_errors="No"):
"""
Calculate the precision. If the task uses BIO, IOB or IOBES encoding, a special calculation method is used.
Otherwise, we fall back to the scikit learn implementation.
Args:
correct_bio_errors (str): If this is set to "O" or "B", a correction of incorrect "I-" labels is performed.
See `metrics.py` for further details.
Returns:
float: precision score
"""
if self.task is None or self.task.encoding == ENCODING_NONE:
# Not BIO, IOB or IOBES
y_true, y_pred = self.get_true_and_pred()
return precision_score(y_true, y_pred, labels=list(self.idx_2_label.values()), average="macro")
else:
y_true, y_pred = self.get_true_and_pred_sentences(word=False)
y_true, y_pred = pre_process(
y_pred,
y_true,
self.idx_2_label,
correct_bio_errors=correct_bio_errors,
encoding_scheme=self.task.encoding
)
return compute_precision(y_pred, y_true)
def recall(self, correct_bio_errors="No"):
"""
Calculate the precision. If the task uses BIO, IOB or IOBES encoding, a special calculation method is used.
Otherwise, we fall back to the scikit learn implementation.
Args:
correct_bio_errors (str): If this is set to "O" or "B", a correction of incorrect "I-" labels is performed.
See `metrics.py` for further details.
Returns:
float: precision score
"""
if self.task is None or self.task.encoding == ENCODING_NONE:
# Not BIO, IOB or IOBES
y_true, y_pred = self.get_true_and_pred()
return recall_score(y_true, y_pred, labels=list(self.idx_2_label.values()), average="macro")
else:
y_true, y_pred = self.get_true_and_pred_sentences(word=False)
y_true, y_pred = pre_process(
y_pred,
y_true,
self.idx_2_label,
correct_bio_errors=correct_bio_errors,
encoding_scheme=self.task.encoding
)
return compute_recall(y_pred, y_true)
def f1(self, correct_bio_errors="No"):
"""
Calculate the precision. If the task uses BIO, IOB or IOBES encoding, a special calculation method is used.
Otherwise, we fall back to the scikit learn implementation.
Args:
correct_bio_errors (str): If this is set to "O" or "B", a correction of incorrect "I-" labels is performed.
See `metrics.py` for further details.
Returns:
float: precision score
"""
if self.task is None or self.task.encoding == ENCODING_NONE:
# Not BIO, IOB or IOBES
y_true, y_pred = self.get_true_and_pred()
return f1_score(y_true, y_pred, labels=list(self.idx_2_label.values()), average="macro")
else:
y_true, y_pred = self.get_true_and_pred_sentences(word=False)
y_true, y_pred = pre_process(
y_pred,
y_true,
self.idx_2_label,
correct_bio_errors=correct_bio_errors,
encoding_scheme=self.task.encoding
)
return compute_f1(y_pred, y_true)
def argmin_components(self, ratio=0.5):
"""
Calculate the AM components score at the specified ratio.
Args:
ratio (float): Ratio for score calculation.
Returns:
float: f1 score
"""
conll_list = self.as_conll_list()
prediction_list = relative_2_absolute(conll_list, 0, 2)
truth_list = relative_2_absolute(conll_list, 0, 1)
result = evaluate_argmin_components(prediction_list, truth_list, 2, 2, ratio=ratio)
return result[3]
def argmin_relations(self, ratio=0.5):
"""
Calculate the AM relations score at the specified ratio.
Args:
ratio (float): Ratio for score calculation.
Returns:
float: f1 score
"""
conll_list = self.as_conll_list()
prediction_list = relative_2_absolute(conll_list, 0, 2)
truth_list = relative_2_absolute(conll_list, 0, 1)
result = evaluate_argmin_relations(prediction_list, truth_list, 2, 2, ratio=ratio)
return result[3]
def word_accuracy(self):
"""
Calculate the word accuracy.
Use this only for seq2seq tasks.
Returns:
float: word accuracy
"""
y_true, y_pred = self.get_true_and_pred_sentences(word=True)
return word_accuracy(y_pred, y_true)
def edit_distance(self, mode="avg"):
"""
Calculate the edit distance.
Use this only for seq2seq tasks.
Args:
mode (str, optional): How to combine the edit distances of the words. Valid options are "avg" and "median".
Defaults to "avg".
Returns:
float: average edit distance
"""
assert mode in ["avg", "median"]
y_true, y_pred = self.get_true_and_pred_sentences(word=True)
return edit_distance(y_pred, y_true, mode)
def compute_metric_by_name(self, metric_name):
"""
Compute the metric identified by `metric_name`. If the metric name is unknown,
a value error is raised.
Args:
metric_name (str): The name of a metric.
Returns:
float: metric value
"""
if metric_name == METRIC_ACCURACY:
return self.accuracy()
elif metric_name == METRIC_F1:
return self.f1()
elif metric_name == METRIC_F1_O:
return self.f1(correct_bio_errors="O")
elif metric_name == METRIC_F1_B:
return self.f1(correct_bio_errors="B")
elif metric_name == METRIC_PRECISION:
return self.precision()
elif metric_name == METRIC_PRECISION_O:
return self.precision(correct_bio_errors="O")
elif metric_name == METRIC_PRECISION_B:
return self.precision(correct_bio_errors="B")
elif metric_name == METRIC_RECALL:
return self.recall()
elif metric_name == METRIC_RECALL_O:
return self.recall(correct_bio_errors="O")
elif metric_name == METRIC_RECALL_B:
return self.recall(correct_bio_errors="B")
elif metric_name == METRIC_AM_COMPONENTS_05:
return self.argmin_components(ratio=0.5)
elif metric_name == METRIC_AM_COMPONENTS_0999:
return self.argmin_components(ratio=0.999)
elif metric_name == METRIC_AM_RELATIONS_05:
return self.argmin_relations(ratio=0.5)
elif metric_name == METRIC_AM_RELATIONS_0999:
return self.argmin_components(ratio=0.999)
elif metric_name == METRIC_WORD_ACCURACY:
return self.word_accuracy()
elif metric_name == METRIC_AVG_EDIT_DISTANCE:
return self.edit_distance(mode="avg")
elif metric_name == METRIC_MEDIAN_EDIT_DISTANCE:
return self.edit_distance(mode="median")
else:
raise ValueError("Metric with name %s is not supported by this method." % metric_name)
def as_conll_list(self, delimiter="\t"):
"""
Build a document in CoNNL format, but each line is a separate string within
a list.
Args:
delimiter (str, optional): Which character is used as a column separator. Defaults to tab (`\t`).
Returns:
`list` of str: A list of lines in CoNLL format (token truth prediction).
"""
output = []
for x, y, gold, _, _, _, sample in self:
#print(sample.docid)
docid = ""
if sample.docid != None:
docid = sample.docid
output.append(docid)
for i in range(len(x)):
output.append(delimiter.join([x[i], gold[i], y[i]]))
# Add empty line to separate sentences
output.append("")
return output
def __str__(self):
"""
Build a string representation for an instance of the result list class.
Returns:
Data in CONLL format with predicted labels in the last row.
"""
return "\n".join(self.as_conll_list())
def predictions_to_file(self, prediction_dir_path, filename):
"""
Write predictions to a file.
If the task is AM, two files are written that adhere to the format used by SE and JD.
Args:
prediction_dir_path (str): Path to prediction directory.
filename (str): Prediction filename
"""
assert os.path.exists(prediction_dir_path), "Expected that prediction directory path exists"
assert os.path.isdir(prediction_dir_path), "Expected that prediction directory path points to a directory"
logger = logging.getLogger("shared.result_list.predictions_to_file")
logger.debug("Writing predictions to file(s)")
if self.task and self.task.type == TASK_TYPE_AM:
pred_file_path = os.path.join(prediction_dir_path, filename + ".pred.corr.abs")
gold_file_path = os.path.join(prediction_dir_path, filename + ".truth.corr.abs")
logger.debug("Files: %s", [pred_file_path, gold_file_path])
conll_list = self.as_conll_list()
prediction_list = relative_2_absolute(conll_list, 0, 2)
truth_list = relative_2_absolute(conll_list, 0, 1)
with codecs.open(pred_file_path, mode="w", encoding="utf8") as f:
f.write("\n".join(prediction_list))
with codecs.open(gold_file_path, mode="w", encoding="utf8") as f:
f.write("\n".join(truth_list))
else:
file_path = os.path.join(prediction_dir_path, filename)
logger.debug("File: %s", file_path)
with codecs.open(file_path, mode="w", encoding="utf8") as f:
f.write(self.__str__())
def metrics_as_list(self):
"""
Provides the performance metrics for the result list as a list (useful for storing in CSV format).
Entries in the list:
* Number of performed predictions
* Number of correct predictions
* Number of incorrect predictions
* Accuracy
* Precision
* Recall
* F1 score
Returns:
`list` of int or `list` of float: List of metrics
"""
y_true, y_pred = self.get_true_and_pred()
num_total = len(y_true)
num_correct = len([1 for t, p in zip(y_true, y_pred) if t == p])
num_false = num_total - num_correct
return [
num_total,
num_correct,
num_false,
self.accuracy(),
self.precision(),
self.recall(),
self.f1()
]
|
nilq/baby-python
|
python
|
import server_socket
import threading
class Microphone(object):
def __init__(self, host, port, steer):
self.steer = steer
self.socket = server_socket.Server(host, port)
self.client = self.socket.Get_Client()
def Recv(self) :
while True :
# 스레드를 돌면서 steer 객체의 microphone 변수를 갱신함
speech = self.client.recv(128).decode()
print('speech', speech)
self.steer.Set_Microphone(speech)
def Run(self) :
# 코드를 병렬로 실행하기 위해서 스레드를 선언
# target으로 설정된 함수 스레드가 실행함
# 스레드가 실행하는 함수가 입력 파라미터가 필요한 경우 args에 선언함
mic_thread = threading.Thread(target=self.Recv, args=())
mic_thread.start()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python2
#
# Copyright 2010 Google Inc. All Rights Reserved.
"""This simulates a real job by producing a lot of output."""
from __future__ import print_function
__author__ = 'asharif@google.com (Ahmad Sharif)'
import time
def Main():
"""The main function."""
for j in range(10):
for i in range(10000):
print(str(j) + 'The quick brown fox jumped over the lazy dog.' + str(i))
time.sleep(60)
return 0
if __name__ == '__main__':
Main()
|
nilq/baby-python
|
python
|
############## Configurator for command line programs
#### tests
# 2016 Portia Frances Limited for UBS
# Author: Thomas Haederle
import logging
logger = logging.getLogger(__name__)
import pytest
#from nose.tools import *
from configurator import Configurator
def test_configurator_initialize():
conf = Configurator("This is a test")
assert conf.description == "This is a test"
def test_configurator_standardparse():
conf = Configurator()
args = conf.parser.parse_args()
print(args)
assert args.cobdate
assert args.rundate
assert args.mode
def test_configurator_commandconfig():
conf = Configurator()
assert conf.commandconfig is None
args = conf.configureCommandline()
assert conf.commandconfig
def test_configurator_setupAppconfig():
conf = Configurator()
#assert conf.appconfig is None
args = conf.setupAppconfig()
assert conf.appconfig
def test_configurator_setuplogger():
conf = Configurator()
rootlogger = conf.setupLogger()
assert type(rootlogger) == logging.RootLogger #is returned logger of type rootlogger
def test_cobdate():
conf = Configurator()
args = conf.configureCommandline()
assert conf.commandconfig["cobdate"]
assert conf.commandconfig["rundate"]
def test_cobdate():
conf = Configurator()
args = conf.setupAppconfig()
assert conf.appconfig.cobdate
assert conf.appconfig.rundate
|
nilq/baby-python
|
python
|
def signed8(b):
if b > 127:
return -256 + b
else:
return b
def signed16(v):
v &= 0xFFFF
if v > 0x7FFF:
return - 0x10000 + v
else:
return v
def signed24(v):
v &= 0xFFFFFF
if v > 0x7FFFFF:
return - 0x1000000 + v
else:
return v
def read_signed(stream, n):
byte = bytearray(stream.read(n))
signed_bytes = []
for b in byte:
signed_bytes.append(signed8(b))
return signed_bytes
def read_sint_8(stream):
byte = bytearray(stream.read(1))
if len(byte) is 1:
return signed8(byte[0])
return None
def read_int_8(stream):
byte = bytearray(stream.read(1))
if len(byte) is 1:
return byte[0]
return None
def read_int_16le(stream):
byte = bytearray(stream.read(2))
if len(byte) is 2:
return (byte[0] & 0xFF) + ((byte[1] & 0xFF) << 8)
return None
def read_int_16be(stream):
byte = bytearray(stream.read(2))
if len(byte) is 2:
return (byte[1] & 0xFF) + ((byte[0] & 0xFF) << 8)
return None
def read_int_24le(stream):
b = bytearray(stream.read(3))
if len(b) is 3:
return (b[0] & 0xFF) + ((b[1] & 0xFF) << 8) + \
((b[2] & 0xFF) << 16)
return None
def read_int_24be(stream):
b = bytearray(stream.read(3))
if len(b) is 3:
return (b[2] & 0xFF) + ((b[1] & 0xFF) << 8) + \
((b[0] & 0xFF) << 16)
return None
def read_int_32le(stream):
b = bytearray(stream.read(4))
if len(b) is 4:
return (b[0] & 0xFF) + ((b[1] & 0xFF) << 8) + \
((b[2] & 0xFF) << 16) + ((b[3] & 0xFF) << 24)
return None
def read_int_32be(stream):
b = bytearray(stream.read(4))
if len(b) is 4:
return (b[3] & 0xFF) + ((b[2] & 0xFF) << 8) + \
((b[1] & 0xFF) << 16) + ((b[0] & 0xFF) << 24)
return None
def read_string_8(stream, length):
byte = stream.read(length)
try:
return byte.decode('utf8')
except UnicodeDecodeError:
return None # Must be > 128 chars.
def read_string_16(stream, length):
byte = stream.read(length)
try:
return byte.decode('utf16')
except UnicodeDecodeError:
return None
|
nilq/baby-python
|
python
|
# import geopandas
# from utils.common import load_shape
# from pathlib import Path
# import sys
# sys.path.append(str(Path(__file__).parent.parent))
# from configs import server_config
# # from shapely.geometry import shape
# from db_connection import DBConnection
# from alchemy import Eez
# import shapely.geometry as sh
# eez = load_shape(server_config.EEZ_GEOJSON)
# eez = geopandas.GeoDataFrame(eez)
# # eez['geometry'] = [shape(e) for e in eez['geometry']]
# db = DBConnection() # Database Object
# for row in eez.itertuples():
# sovs = [row.properties[sov] for sov in ['SOVEREIGN1', 'SOVEREIGN2', 'SOVEREIGN3'] if row.properties[sov] is not None]
# geom = row.geometry
# # geom = geom.update({"crs" : {"properties" : {"name" : "urn:ogc:def:crs:EPSG:8.8.1:4326"}}}) # This is equivalent to the existing projectionn, but is recognized by postgres as mappable, so slightly preferred.
# e = Eez(
# mrgid=int(row.properties['MRGID']),
# geoname=row.properties['GEONAME'],
# pol_type=row.properties['POL_TYPE'],
# sovereigns=sovs,
# geometry="SRID=4326;"+sh.shape(row.geometry).wkt
# )
# db.sess.add(e)
# db.sess.commit()
# db.sess.close()
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
__all__ = ("main",)
def main():
from lyricli.console import main
main()
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
import nextcord
from util.mongo import Document
class afk_utils:
def __init__(self, bot):
self.db = bot.db
self.afk_db = Document(self.db, "afk_user_db")
async def create_afk(self, user, guild_id, reason):
dict = {
"_id" : user.id,
"guild_id" : guild_id,
"name" : user.name,
"reason": reason
}
await self.afk_db.upsert(dict)
async def fetch_afk(self, id):
data = await self.afk_db.find_by_id(id)
return data
async def delete_afk(self, id):
await self.afk_db.delete_by_id(id)
|
nilq/baby-python
|
python
|
from .core.serializers import *
|
nilq/baby-python
|
python
|
# firstline
# Foo header content
# Foo foo foo foo foo foo foo foo foo foo foo foo foo
# Foo foo foo foo foo foo foo foo foo foo foo foo foo
# Foo foo foo foo foo foo foo foo foo foo foo foo foo
# Foo foo foo foo foo foo foo foo foo foo foo foo foo
# Foo foo foo foo foo foo foo foo foo foo foo foo foo
# Foo foo foo foo foo foo foo foo foo foo foo foo foo
# lastline
import os
a = 1
|
nilq/baby-python
|
python
|
from app.schemas.game_schema import Positions, Action
from .action_handler import ActionHandler
class MoveActionHandler(ActionHandler):
@property
def activity_text(self):
return f"{self.player} moved"
def execute(self):
move_where = self.payload.move_where
player_position = self.game.players_position.get(self.player)
assert self.game.is_empty(move_where)
if player_position in Positions.jr_positions():
assert move_where == Positions.JR_B
elif player_position in Positions.fd_positions():
assert move_where == Positions.FD_B
elif player_position in Positions.tr_positions():
assert move_where in [Positions.FD_B, Positions.JR_B]
elif player_position == Positions.JR_B:
assert move_where in [Positions.TR, Positions.JR]
elif player_position == Positions.FD_B:
assert move_where in [Positions.FD, Positions.TR]
self.game.set_position(self.player, move_where)
self.game.next_turn()
self.game.last_action = Action(
action_type=Action.ActionType.MOVE,
)
|
nilq/baby-python
|
python
|
import logging
import os
import re
from scanapi.errors import BadConfigurationError
from scanapi.evaluators.code_evaluator import CodeEvaluator
logger = logging.getLogger(__name__)
class StringEvaluator:
variable_pattern = re.compile(
r"(?P<something_before>\w*)(?P<start>\${)(?P<variable>[\w|-]*)(?P<end>})(?P<something_after>\w*)"
) # ${<variable>}
@classmethod
def evaluate(cls, sequence, spec_vars, is_a_test_case=False):
sequence = cls._evaluate_env_var(sequence)
sequence = cls._evaluate_custom_var(sequence, spec_vars)
return CodeEvaluator.evaluate(sequence, spec_vars, is_a_test_case)
@classmethod
def _evaluate_env_var(cls, sequence):
matches = cls.variable_pattern.finditer(sequence)
for match in matches:
variable_name = match.group("variable")
if any(letter.islower() for letter in variable_name):
continue
try:
variable_value = os.environ[variable_name]
except KeyError as e:
raise BadConfigurationError(e)
sequence = cls.replace_var_with_value(
sequence, match.group(), variable_value
)
return sequence
@classmethod
def _evaluate_custom_var(cls, sequence, spec_vars):
matches = cls.variable_pattern.finditer(sequence)
for match in matches:
variable_name = match.group("variable")
if variable_name.isupper():
continue
if not spec_vars.get(variable_name):
continue
variable_value = spec_vars.get(variable_name)
sequence = cls.replace_var_with_value(
sequence, match.group(), variable_value
)
return sequence
@classmethod
def replace_var_with_value(cls, sequence, variable, variable_value):
if variable == sequence:
return variable_value
variable = re.escape(variable)
return re.sub(variable, str(variable_value), sequence)
|
nilq/baby-python
|
python
|
"""
A small tool to resize all Frames in a ByteBlower GUI project.
"""
import sys
import lxml.etree as ET
import random
if len(sys.argv) != 4:
print('Expected 2 arguments: <src bbp> <target bpp> <new frame size>')
sys.exit(-1)
filename = sys.argv[1]
target_name = sys.argv[2]
try:
new_size = int(sys.argv[3])
except:
print('The new frame size should be an integer, not "%s"'% sys.argv[3])
try:
with open(filename, 'r') as f:
tree = ET.parse(f)
except:
print("Can't parse '%s'" % filename)
def resize_string(in_str, target_size, filler_char='a'):
"""
Resizes a string to its new size.
"""
new_string = in_str[:target_size]
new_string += filler_char * (target_size - len(new_string))
return new_string
for fr in tree.iterfind('Frame'):
data = fr.attrib['bytesHexString']
fr.attrib['bytesHexString'] = resize_string(data, 2 * new_size)
tree.write(target_name)
|
nilq/baby-python
|
python
|
# This Source Code Form is subject to the terms of the MIT
# License. If a copy of the same was not distributed with this
# file, You can obtain one at
# https://github.com/akhilpandey95/altpred/blob/master/LICENSE.
import sys
import json
import certifi
import urllib3
import requests
import numpy as np
import pandas as pd
from tqdm import tqdm
from ast import literal_eval
from preprocessing import LDA
from bs4 import BeautifulSoup as BS
from collections import defaultdict
# function for computing sigmoid of a value
def sigmoid(value, derivative=False):
"""
Return the sigmoid of a numeric value
Parameters
----------
arg1 | value: int
The numeric value intended to convert into a continuos range
Returns
-------
Float
float
"""
try:
# compute the sigmoid
result = 1. / (1. + np.exp(-x))
# check if derivative is required
if derivative:
# return the sigmoid
return result * (1. - result)
# return the sigmoid
return result
except:
# return zero
return np.zeros(1)[0]
# function for downloading the content from a URI
def obtain_content(uri):
"""
Return the decoded response after making a get request to the URI
Parameters
----------
arg1 | uri: str
Index number that holds information for a class
Returns
-------
String
str
"""
try:
# create a urllib object
http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED', ca_certs=certifi.where())
# establish a connection and make a GET request to the URI
res = http.request('GET', uri)
# decode the value
res = res.data.decode('utf-8')
# return the decoded response
return res
except:
return None
# add title of the scholarly paper
def soupify(year):
"""
Prepare a soup object by storing information all the articles in a list
Parameters
----------
arg1 | year: int
The year from which we want to extrapolate information
Returns
-------
Dictionary
collections.defaultdict
"""
try:
# create the url string
url = 'https://www.altmetric.com/top100/'
# obtain the content for a given year
html = obtain_content(url + str(year))
# create a beautiful soup object
soup = BS(html, 'html.parser')
# return the soup
return soup.find_all('article')
except:
return None
# function for extracting article information from the soup object
def extract_article_information_year_2014(soup):
"""
Collect article information from the soup object
Parameters
----------
arg1 | soup: bs4.element.Tag
The specific article we are looking to extrapolte information
Returns
-------
Dictionary
collections.defaultdict
"""
try:
# get the soup object
data = defaultdict(dict)
# add the article rank
data['ranking'] = int(soup.find('div', class_='ranking').text)
# add the altmetric id
data['altmetric_id'] = int(soup.find('div', class_='metrics').find('a')['href'].split('=')[1])
# add the DOI of the article
data['doi'] = soup.find('h2').find('a')['href']
# add the title of the article
data['title'] = soup.find('h2').find('a').getText()
# add the author information of the article
data['authors'] = soup.find('div', class_='subtitle').text.strip()
# add the journal name of the article
data['journal'] = [x.find_next('td').text for x in \
soup.find('div', class_='details').find('table', class_='article-data') \
.find_all('th') if 'Journal' in x.text][0]
# add the journal name of the article
data['category'] = [x.find_next('td').text for x in \
soup.find('div', class_='details').find('table', class_='article-data') \
.find_all('th') if 'Category' in x.text][0]
# add the tweet count of the article
data['tweet_count'] = int([x.next_sibling.text.split(' ') \
for x in \
soup.find('div', class_='mentions').find_all('dt') if 'twitter' in x.text][0][0])
# return the data
return data
except:
return None
# function for iterating the information extraction from the soup object
def get_info_top_n(n, year, function, data, save=False):
"""
Iterate and collect article information from the soup object
for n articles belonging to a given year
Parameters
----------
arg1 | n: int
Number of articles we are looking to extrapolte information
arg2 | year: int
The specific year we are looking to extrapolte information
arg3 | function: function
The function needed to extract article information for that specific year
arg4 | data: collections.defaultdict
The function needed to extract article information for that specific year
Returns
-------
Dataframe
pandas.DataFrame
"""
try:
# iterate over the function given as input to obtain article information
result = [function(data(year)[number]) for number in tqdm(range(n))]
# convert the dict into a dataframe
result = pd.DataFrame(result)
# check if the save flag is given as an input
# in order to write the data to a CSV file
if save:
# save the dataframe into a csv
result.to_csv(str(function) + '_' + str(year) + '.csv', encoding='utf-8')
# return the data
return result
except:
return None
if __name__ == '__main__':
# extract the information f
print(get_info_top_n(3, 2014, extract_article_information_year_2014, soupify))
# read a dataframe
data = pd.read_csv('altmetrics_j2014_full_gamma.csv')
# preprocess the dataframe
data = data.assign(pub_subjects = list(map(literal_eval, data['pub_subjects'])))
# remove NA values
data = data.loc[data.pub_subjects.apply(len) != 0].reset_index(drop=True)
# obtain the X samples
X = [', '.join(x) for x in data['pub_subjects']]
# init the LDA class object
model = LDA()
# tokenize and normalize the input
input = [model.normalize(doc).split() for doc in tqdm(X[:10])]
# train the LDA model
output = model.train(input, 10, 5)
# print the topics
print(output.print_topics(num_topics=10, num_words=5))
else:
sys.exit(0)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import os, sys, json, re, shutil
from utils.queryBuilder import postQuery
def prep_inputs(ml_dir, ctx_file, in_file):
# get context
with open(ctx_file) as f:
j = json.load(f)
# get kwargs
kwargs = j #mstarch - with containerization, "kwargs" are in context at top level #json.loads(j['rule']['kwargs'])
# get classmap file and version
cm_file = os.path.basename(kwargs['classmap_file'].strip())
match = re.search(r'classmap_(datav.*?)\.json', cm_file)
if not match:
raise RuntimeError("Failed to extract classmap version: %s" % cm_file)
cm_version = match.group(1)
# get features file and version
ft_file = os.path.basename(kwargs['feat_file'].strip())
match = re.search(r'(featv.*?)\.json', ft_file)
if not match:
raise RuntimeError("Failed to extract feature version: %s" % ft_file)
ft_version = match.group(1)
# set classifier ID
clf_version = kwargs['clf_version']
clf_type = kwargs['clf_type']
username = j['username'] #mstarch - username is a paramemter
rule_name = j['name'] #mstarch - rule_name is a parameter
clf_name = "predictor_model-phunw_clfv%s_%s_%s-%s-%s" % (clf_version, cm_version,
ft_version, username, rule_name)
# get urls
ret, status = postQuery({ 'query': j['query']}) #mstarch - passthrough is now a parameter
urls = [i['url'] for i in ret]
# create input json
input = {
"clf_name": clf_name,
"clf_type": clf_type,
"classmap_file": cm_file,
"feat_file": ft_file,
"crossvalidate": 0,
"saveclf": 1,
"cacheoutput": 0,
"urls": urls,
}
# create product directory and chdir
os.makedirs(clf_name)
os.chdir(clf_name)
# write input file
with open(in_file, 'w') as f:
json.dump(input, f, indent=2)
# copy classmap and feature files
shutil.copy(os.path.join(ml_dir, 'classmaps', cm_file), cm_file)
shutil.copy(os.path.join(ml_dir, 'features', ft_file), ft_file)
if __name__ == "__main__":
prep_inputs(sys.argv[1], sys.argv[2], sys.argv[3])
|
nilq/baby-python
|
python
|
#Faça um programa que leia nome e peso de várias pessoas, guardando
#tudo em uma lista. No final mostre:
#A)- Quantas pessoas foram cadatradas
#B)- Uma listagem com as pessoas mais pesadas
#C)- Uma listagem com as pessoas mais leves
temp = []
pessoas = []
mai = men = 0
while True:
temp.append(str(input('Nome: ')))
temp.append(float(input('Peso: ')))
pessoas.append(temp[:])
if len(pessoas) == 1:
mai = men = temp[1]
else:
if temp[1] > mai:
mai = temp[1]
if temp[1] < men:
men = temp[1]
temp.clear()
esc = str(input('Deseja continuar? [S/N]: '))
if esc in 'Nn':
break
print(f'Foram cadastradas {len(pessoas)} pessoas')
print(f'O maior peso foi de {mai}Kg. Peso de ', end='')
for p in pessoas:
if p[1] == mai:
print(f'{p[0]} ', end='')
print()
print(f'O menor peso foi de {men}Kg. Peso de ', end='')
for p in pessoas:
if p[1] == men:
print(f'{p[0]} ', end='')
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*_
#
# Copyright (c) 2020, Pureport, Inc.
# All Rights Reserved
"""
The credentials module handles loading, parsing and returning a valid
object that can be passed into a :class:`pureport.session.Session`
instance to authenticate to the Pureport API. This module will search
for credentials in well-known locations as well as attempt to load
credentials from the current environment.
The method of precedence for credentials is:
1) Environment
2) Profile in ~/.pureport/credentials
3) "default" profile in ~/.pureport/credentials
If no valid API key and/or API secret could be loaded, then a
:class:`pureport.exceptions.PureportError` exception is raised.
"""
from __future__ import absolute_import
import os
import json
import logging
from collections import namedtuple
import yaml
from pureport import defaults
from pureport.exceptions import PureportError
log = logging.getLogger(__name__)
__all__ = ('default',)
def default():
"""Attempts to discover the configured credentials
This function will attempt to find the credentials to
be used for authorizing a Pureport API session. It will
also discover the Pureport base API URL. The function
follows a strict order for loading crendentials.
In order of precedence, the following credentials are used:
1) Loaded from the current environment
2) Loaded from ~/.pureport/credentials.[yml|yaml|json]
The function will use the following environement variables:
PUREPORT_API_KEY
PUREPORT_API_SECRET
PUREPORT_API_BASE_URL
If the environment variables are not set, then this function
will use the information in ~/.pureport/credentials.[yml|yaml|json].
The credentials file will be used in the following order:
1) ~/.pureport/credentials.yml
2) ~/.pureport/credentials.yaml
3) ~/.pureport/credentials.json
The credentials file has the following structure:
.. code-block:: yaml
---
current_profile: <string, default='default'>
profiles:
<string>:
api_url: <string>
api_key: <string>
api_secret: <string>
If no valid credentials are able to be found, then the function will
raise an exception.
This function will return a tuple of two elements. The first
element will be a valid instance of
:class:`pureport.credentials.Credentials`. The second element will
be a string that represents the Pureport API base url to
use. The tuple values can be used as the required arguments
when creating a new instance of :class:`pureport.session.Session`.
:return: a valid credentials instance, an api base url
:rtype: tuple
:raises: :class:`pureport.exceptions.PureportError`
"""
file_path = defaults.credentials_path
file_name = defaults.credentials_filename
for ext in ('yml', 'yaml', 'json'):
deserializer = json.loads if ext == 'json' else yaml.safe_load
fp = os.path.join(file_path, '{}.{}'.format(file_name, ext))
if os.path.exists(fp):
with open(fp) as f:
log.info("loading credentials file {}".format(fp))
content = deserializer(f.read())
break
else:
content = None
values = {}
if content:
profile = content.get('current_profile', 'default')
profiles = content.get('profiles', {})
values = profiles.get(profile, profiles.get('default'))
kwargs = {
'key': defaults.api_key or values.get('api_key'),
'secret': defaults.api_secret or values.get('api_secret')
}
base_url = defaults.api_base_url or values.get('api_url')
if any((kwargs['key'] is None, kwargs['secret'] is None)):
raise PureportError("missing or invalid credentials")
return namedtuple('Credentials', kwargs)(**kwargs), base_url
|
nilq/baby-python
|
python
|
import sublime, sublime_plugin
import winreg, subprocess
import re
from os import path
CONEMU = "C:\\Program Files\\ConEmu\\ConEmu64.exe"
CONEMUC = "C:\\Program Files\\ConEmu\\ConEmu\\ConEmuC64.exe"
try: # can we find ConEmu from App Paths?
apps = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, "SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\App Paths")
subkeys, nill, nill = winreg.QueryInfoKey(apps)
for k in range(subkeys):
app = winreg.EnumKey(apps, k)
if app.startswith("ConEmu"):
cemu = winreg.QueryValue(apps, app)
if path.exists(cemu):
CONEMU = cemu
dirName, fileName = path.split(cemu)
filePath = path.join(dirName,"ConEmu",fileName.replace('ConEmu','ConEmuC'))
if path.exists(filePath):
CONEMUC = filePath
break
finally:
winreg.CloseKey(apps)
# TODO: bundle Expand-Alias with functions to save it to disk and/or send it to sublime
# TODO: cmder style bundle including ConEmu, Sublime, PSReadLine and these macros
si = subprocess.STARTUPINFO()
si.dwFlags = subprocess.STARTF_USESHOWWINDOW
si.wShowWindow = subprocess.SW_HIDE
### For best results, we use PSReadLine and rely on it's hotkeys:
### We need KillLine and Yank set so we can copy/paste any existing command
# Set-PSReadlineKeyHandler Ctrl+k KillLine
# Set-PSReadlineKeyHandler Ctrl+i Yank
# { "keys": ["f5"], "command": "conemu_script" }
class ConemuScriptCommand(sublime_plugin.TextCommand):
def run(self, edit):
# duplicate ISE behavior:
if self.view.file_name():
if self.view.is_dirty():
self.view.run_command("save")
script = self.view.file_name()
else:
script = self.view.substr(sublime.Region(0, self.view.size()))
script = re.sub(r'\\', r'\\\\', script)
# Use PSReadline KillLine hotkey
subprocess.call([CONEMUC, "-GUIMACRO:0", "KEYS", "Home", "^k"], startupinfo=si)
subprocess.call([CONEMUC, "-GUIMACRO:0", "PASTE", "2", script + "\\n"], startupinfo=si)
# Use PSReadline Yank hotkey
subprocess.call([CONEMUC, "-GUIMACRO:0", "KEYS", "End", "^i"], startupinfo=si)
subprocess.call([CONEMU, "-SHOWHIDE"], startupinfo=si)
# { "keys": ["f8"], "command": "conemu_selection" }
class ConemuSelectionCommand(sublime_plugin.TextCommand):
def run(self, edit):
script = []
for region in self.view.sel():
if region.empty():
## If we wanted to duplicate ISE's bad behavior, we could:
# view.run_command("expand_selection", args={"to":"line"})
## Instead, we'll just get the line contents without selected them:
script += [self.view.substr(self.view.line(region))]
else:
script += [self.view.substr(region)]
script = "\n".join(script) + "\n"
script = re.sub(r'\\', r'\\\\', script)
# Use PSReadline KillLine hotkey
subprocess.call([CONEMUC, "-GUIMACRO:0", "KEYS", "Home", "^k"], startupinfo=si)
subprocess.call([CONEMUC, "-GUIMACRO:0", "PASTE", "2", script], startupinfo=si)
# Use PSReadline Yank hotkey
subprocess.call([CONEMUC, "-GUIMACRO:0", "KEYS", "End", "^i"], startupinfo=si)
subprocess.call([CONEMU, "-SHOWHIDE"], startupinfo=si)
|
nilq/baby-python
|
python
|
"""
Created on Sat Mar 09 16:33:01 2020
@author: Pieter Cawood
"""
from mesa import Model
from mesa.time import RandomActivation
from mesa.space import MultiGrid
from mesa.datacollection import DataCollector
from mesa_agents import Parking, Wall, Space, Robot
from ta_world import MAPNODETYPES
class Warehouse(Model):
def __init__(self, world, tsp_seqs, last_sim_step):
self.schedule = RandomActivation(self)
self.world = world
self.tsp_seq = tsp_seqs
self.last_sim_step = last_sim_step
self.time_step = 0
self.task_count = 0
self.grid = MultiGrid(world.width, world.height, torus=False)
self.data_collector = DataCollector(
{"task_count": "task_count"}
)
self.robot_count = 0
# Set up MultiGrid from csv map
for element in world:
if world[element] == MAPNODETYPES.WALL:
# Wall
agent = Wall(element, self)
self.grid.place_agent(agent, element)
self.schedule.add(agent)
# Task endpoint
elif world[element] == MAPNODETYPES.TASK_ENDPOINT:
agent = Space(element, self)
self.grid.place_agent(agent, element)
self.schedule.add(agent)
# Robot spawn endpoint
elif world[element] == MAPNODETYPES.PARKING:
# Parking location
agent = Parking(element, self)
self.grid.place_agent(agent, element)
self.schedule.add(agent)
# Robot location (At park initially)
self.robot_count += 1
agent = Robot(element, self, world.agents[self.robot_count].path)
self.grid.place_agent(agent, element)
self.schedule.add(agent)
self.running = True
def step(self):
new_task_count = 0
# Update tasks counter
for seq_id in self.tsp_seq:
if self.tsp_seq[seq_id].qsize() > 0:
if self.time_step >= self.tsp_seq[seq_id].queue[0].release_time:
if self.time_step in self.world.agents[seq_id].path:
if self.tsp_seq[seq_id].queue[0].delivery_endpoint == \
self.world.agents[seq_id].path[self.time_step]:
self.tsp_seq[seq_id].get()
new_task_count += self.tsp_seq[seq_id].qsize()
self.task_count = new_task_count
# Stop running once finished
if self.time_step >= self.last_sim_step:
self.running = False
# Next step
self.time_step += 1
self.schedule.step()
self.data_collector.collect(self)
|
nilq/baby-python
|
python
|
'''
Implement strStr().
Return the index of the first occurrence of needle in haystack, or -1 if needle is not part of haystack.
Example 1:
Input: haystack = "hello", needle = "ll"
Output: 2
Example 2:
Input: haystack = "aaaaa", needle = "bba"
Output: -1
Clarification:
What should we return when needle is an empty string? This is a great question to ask during an interview.
For the purpose of this problem, we will return 0 when needle is an empty string. This is consistent to C's strstr() and Java's indexOf().
'''
class Solution:
def strStr(self, haystack: str, needle: str) -> int:
if haystack == None:
return -1
if needle == '':
return 0
for i in range(0, len(haystack) - len(needle) + 1):
if needle == haystack[i:i+len(needle)]:
return i
return -1
if __name__ == "__main__":
solution = Solution()
print(solution.strStr('a', 'a'))
|
nilq/baby-python
|
python
|
import requests
from bs4 import BeautifulSoup as bs
import pandas as pd
players_image_urls = []
url = 'https://www.pro-football-reference.com/players/'
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:92.0) Gecko/20100101 Firefox/92.0'}
page = requests.get(url,headers=headers, timeout=2, allow_redirects = True )
soup = bs(page.content, 'html.parser')
ref_alphabet = soup.find('ul',{'class':'page_index'})
ref_li = ref_alphabet.find_all('li')
for j in ref_li:
while True:
try:
ref_li_letter = j.find('a', href=True)
for a_href in j.find_all('a', href=True):
alphabet_letter_ref = a_href['href']
base = 'https://www.pro-football-reference.com'
url = base + str(alphabet_letter_ref)
page = requests.get(url,headers=headers, timeout=2, allow_redirects = True )
soup = bs(page.content, 'html.parser')
players_section = soup.find('div',{'id':'div_players'})
for a_href_players in players_section.find_all('a', href=True):
player_link = a_href_players['href']
base = 'https://www.pro-football-reference.com'
url = base + str(player_link)
page = requests.get(url,headers=headers, timeout=2, allow_redirects = True )
soup = bs(page.content, 'html.parser')
while True:
try:
if soup.find('div', {'class': 'media-item'}):
player_img = soup.find('div', {'class': 'media-item'})
img = player_img.find('img')
img_src = img['src']
# Player Name
player_name = soup.find('h1', {'itemprop': 'name'})
player_name_span = player_name.find('span')
player_name_text = player_name_span.text
player_image = {
"Player": player_name_text,
"Player_img": img_src
}
players_image_urls.append(player_image)
if not soup.find('div', {'class': 'media-item'}):
break
except:
break
break
except:
break
print('process done')
player_img_df = pd.DataFrame(players_image_urls)
print(player_img_df.head)
player_img_df.to_csv('players_img_edited.csv', index=False)
|
nilq/baby-python
|
python
|
import pytest
from ethdata import ethdata
class TestAccountSetters(object):
def test_setter_1_address(self):
my_account = ethdata.Account("0x1cB424cB77B19143825004d0bd0a4BEE2c5e91A8")
assert my_account.address == "0x1cb424cb77b19143825004d0bd0a4bee2c5e91a8"
with pytest.raises(ValueError):
my_account.address = ""
def test_setter_2_transaction_receipts(self):
my_account = ethdata.Account("0x1cB424cB77B19143825004d0bd0a4BEE2c5e91A8")
my_account.transaction_receipts = "tx"
assert my_account.transaction_receipts == "tx"
def test_setter_3_query_range(self):
my_account = ethdata.Account("0x1cB424cB77B19143825004d0bd0a4BEE2c5e91A8")
assert my_account.query_range == {}
my_account.query_range = {"start": "2018-01-01", "end": "2018-01-02"}
assert my_account.query_range == {"start": "2018-01-01", "end": "2018-01-02"}
my_account.query_range = {"start": "2018-01-03"}
assert my_account.query_range == {"start": "2018-01-03"}
my_account.query_range = {"end": "2018-01-04"}
assert my_account.query_range == {"end": "2018-01-04"}
my_account.query_range = {"key": "value"}
assert my_account.query_range == {}
|
nilq/baby-python
|
python
|
import struct
from binascii import b2a_hex, a2b_hex
from pymodbus.exceptions import ModbusIOException
from pymodbus.utilities import checkLRC, computeLRC
from pymodbus.framer import ModbusFramer, FRAME_HEADER, BYTE_ORDER
ASCII_FRAME_HEADER = BYTE_ORDER + FRAME_HEADER
# --------------------------------------------------------------------------- #
# Logging
# --------------------------------------------------------------------------- #
import logging
_logger = logging.getLogger(__name__)
# --------------------------------------------------------------------------- #
# Modbus ASCII Message
# --------------------------------------------------------------------------- #
class ModbusAsciiFramer(ModbusFramer):
"""
Modbus ASCII Frame Controller::
[ Start ][Address ][ Function ][ Data ][ LRC ][ End ]
1c 2c 2c Nc 2c 2c
* data can be 0 - 2x252 chars
* end is '\\r\\n' (Carriage return line feed), however the line feed
character can be changed via a special command
* start is ':'
This framer is used for serial transmission. Unlike the RTU protocol,
the data in this framer is transferred in plain text ascii.
"""
def __init__(self, decoder, client=None):
""" Initializes a new instance of the framer
:param decoder: The decoder implementation to use
"""
self._buffer = b''
self._header = {'lrc': '0000', 'len': 0, 'uid': 0x00}
self._hsize = 0x02
self._start = b':'
self._end = b"\r\n"
self.decoder = decoder
self.client = client
# ----------------------------------------------------------------------- #
# Private Helper Functions
# ----------------------------------------------------------------------- #
def decode_data(self, data):
if len(data) > 1:
uid = int(data[1:3], 16)
fcode = int(data[3:5], 16)
return dict(unit=uid, fcode=fcode)
return dict()
def checkFrame(self):
""" Check and decode the next frame
:returns: True if we successful, False otherwise
"""
start = self._buffer.find(self._start)
if start == -1:
return False
if start > 0: # go ahead and skip old bad data
self._buffer = self._buffer[start:]
start = 0
end = self._buffer.find(self._end)
if end != -1:
self._header['len'] = end
self._header['uid'] = int(self._buffer[1:3], 16)
self._header['lrc'] = int(self._buffer[end - 2:end], 16)
data = a2b_hex(self._buffer[start + 1:end - 2])
return checkLRC(data, self._header['lrc'])
return False
def advanceFrame(self):
""" Skip over the current framed message
This allows us to skip over the current message after we have processed
it or determined that it contains an error. It also has to reset the
current frame header handle
"""
self._buffer = self._buffer[self._header['len'] + 2:]
self._header = {'lrc': '0000', 'len': 0, 'uid': 0x00}
def isFrameReady(self):
""" Check if we should continue decode logic
This is meant to be used in a while loop in the decoding phase to let
the decoder know that there is still data in the buffer.
:returns: True if ready, False otherwise
"""
return len(self._buffer) > 1
def addToFrame(self, message):
""" Add the next message to the frame buffer
This should be used before the decoding while loop to add the received
data to the buffer handle.
:param message: The most recent packet
"""
self._buffer += message
def getFrame(self):
""" Get the next frame from the buffer
:returns: The frame data or ''
"""
start = self._hsize + 1
end = self._header['len'] - 2
buffer = self._buffer[start:end]
if end > 0:
return a2b_hex(buffer)
return b''
def resetFrame(self):
""" Reset the entire message frame.
This allows us to skip ovver errors that may be in the stream.
It is hard to know if we are simply out of sync or if there is
an error in the stream as we have no way to check the start or
end of the message (python just doesn't have the resolution to
check for millisecond delays).
"""
self._buffer = b''
self._header = {'lrc': '0000', 'len': 0, 'uid': 0x00}
def populateResult(self, result):
""" Populates the modbus result header
The serial packets do not have any header information
that is copied.
:param result: The response packet
"""
result.unit_id = self._header['uid']
# ----------------------------------------------------------------------- #
# Public Member Functions
# ----------------------------------------------------------------------- #
def processIncomingPacket(self, data, callback, unit, **kwargs):
"""
The new packet processing pattern
This takes in a new request packet, adds it to the current
packet stream, and performs framing on it. That is, checks
for complete messages, and once found, will process all that
exist. This handles the case when we read N + 1 or 1 // N
messages at a time instead of 1.
The processed and decoded messages are pushed to the callback
function to process and send.
:param data: The new packet data
:param callback: The function to send results to
:param unit: Process if unit id matches, ignore otherwise (could be a
list of unit ids (server) or single unit id(client/server))
:param single: True or False (If True, ignore unit address validation)
"""
if not isinstance(unit, (list, tuple)):
unit = [unit]
single = kwargs.get('single', False)
self.addToFrame(data)
while self.isFrameReady():
if self.checkFrame():
if self._validate_unit_id(unit, single):
frame = self.getFrame()
result = self.decoder.decode(frame)
if result is None:
raise ModbusIOException("Unable to decode response")
self.populateResult(result)
self.advanceFrame()
callback(result) # defer this
else:
_logger.error("Not a valid unit id - {}, "
"ignoring!!".format(self._header['uid']))
self.resetFrame()
else:
break
def buildPacket(self, message):
""" Creates a ready to send modbus packet
Built off of a modbus request/response
:param message: The request/response to send
:return: The encoded packet
"""
encoded = message.encode()
buffer = struct.pack(ASCII_FRAME_HEADER, message.unit_id,
message.function_code)
checksum = computeLRC(encoded + buffer)
packet = bytearray()
params = (message.unit_id, message.function_code)
packet.extend(self._start)
packet.extend(('%02x%02x' % params).encode())
packet.extend(b2a_hex(encoded))
packet.extend(('%02x' % checksum).encode())
packet.extend(self._end)
return bytes(packet).upper()
# __END__
|
nilq/baby-python
|
python
|
#
# Copyright (c) nexB Inc. and others.
# SPDX-License-Identifier: Apache-2.0
#
# Visit https://aboutcode.org and https://github.com/nexB/ for support and download.
# ScanCode is a trademark of nexB Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from os.path import dirname
from os.path import exists
from os.path import join
from commoncode.testcase import FileBasedTesting
from commoncode import fileutils
from extractcode import new_name
class TestNewName(FileBasedTesting):
test_data_dir = join(dirname(__file__), 'data')
def test_new_name_without_extensions(self):
test_dir = self.get_test_loc('new_name/noext', copy=True)
renamed = new_name(join(test_dir, 'test'), is_dir=False)
assert not exists(renamed)
result = fileutils.file_name(renamed)
assert 'test_4' == result
renamed = new_name(join(test_dir, 'TEST'), is_dir=False)
assert not exists(renamed)
result = fileutils.file_name(renamed)
assert 'TEST_4' == result
renamed = new_name(join(test_dir, 'test_1'), is_dir=True)
assert not exists(renamed)
result = fileutils.file_name(renamed)
assert 'test_1_1' == result
def test_new_name_with_extensions(self):
test_dir = self.get_test_loc('new_name/ext', copy=True)
renamed = new_name(join(test_dir, 'test.txt'), is_dir=False)
assert not exists(renamed)
result = fileutils.file_name(renamed)
assert 'test_3.txt' == result
renamed = new_name(join(test_dir, 'TEST.txt'), is_dir=False)
assert not exists(renamed)
result = fileutils.file_name(renamed)
assert 'TEST_3.txt' == result
renamed = new_name(join(test_dir, 'TEST.tXt'), is_dir=False)
assert not exists(renamed)
result = fileutils.file_name(renamed)
assert 'TEST_3.tXt' == result
renamed = new_name(join(test_dir, 'test.txt'), is_dir=True)
assert not exists(renamed)
result = fileutils.file_name(renamed)
assert 'test.txt_2' == result
renamed = new_name(join(test_dir, 'teST.txt'), is_dir=True)
assert not exists(renamed)
result = fileutils.file_name(renamed)
assert 'teST.txt_2' == result
def test_new_name_with_empties(self):
base_dir = self.get_temp_dir()
self.assertRaises(AssertionError, new_name, '', is_dir=False)
test_file = base_dir + '/'
renamed = new_name(test_file, is_dir=False)
assert renamed
assert not exists(renamed)
test_file = join(base_dir, '.')
renamed = new_name(test_file, is_dir=False)
assert not exists(renamed)
result = fileutils.file_name(renamed)
assert '_' == result
test_dir = base_dir + '/'
renamed = new_name(test_dir, is_dir=True)
assert not exists(renamed)
result = fileutils.file_name(renamed)
assert result
test_dir = join(base_dir, '.')
renamed = new_name(test_dir, is_dir=True)
assert not exists(renamed)
result = fileutils.file_name(renamed)
assert '_' == result
|
nilq/baby-python
|
python
|
b='You Yi Xue Sa Xu Li Li Yuan Dui Huo Sha Leng Pou Hu Guo Bu Rui Wei Sou An Yu Xiang Heng Yang Xiao Yao Fan Bi Ci Heng Tao Liu Fei Zhu Tou Xi Zan Yi Dou Yuan Jiu Zai Bo Ti Ying Tou Yi Nian Shao Ben Gou Ban Mo Gai En She Caan Zhi Yang Jian Yuan Shui Ti Wei Xun Zhi Yi Ren Shi Hu Ne Ye Jian Sui Ying Bao Hu Hu Ye Yang Lian Xi En Dui Zan Zhu Ying Ying Jin Chuang Dan Kuai Yi Ye Jian En Ning Ci Qian Xue Bo Mi Shui Mo Liang Qi Qi Shou Fu Bo Beng Bie Yi Wei Huan Fan Qi Mao Fu Ang Ang Fu Qi Qun Tuo Yi Bo Pian Ba Keoi Xuan Baai Yu Chi Lu Yi Li Zaau Niao Xi Wu Gwing Lei Pu Zhuo Zui Zhuo Chang An Er Yu Leng Fu Zha Hun Chun Sou Bi Bi Zha Song He Li Giu Han Zai Gu Cheng Lou Mo Mi Mai Ao Zhe Zhu Huang Fan Deng Tong Du Wo Wei Ji Chi Lin Biao Long Jian Nie Luo Shen Ngon Gua Nie Yi Ku Wan Wa Qia Bo Kao Ling Gan Gua Hai Kuang Heng Kui Ze Ting Lang Bi Huan Po Yao Wan Ti Sui Kua Dui Ao Jian Mo Kui Kuai An Ma Qing Qiao Kao Hao Duo Xian Nai Suo Jie Pi Pa Song Chang Nie Man Song Ci Xian Kuo Gai Di Pou Tiao Zu'
|
nilq/baby-python
|
python
|
import json
from rest_framework.test import APITestCase
from django.urls import reverse
from rest_framework import status
from django.contrib.auth import get_user_model
from authors.apps.articles.models import Articles
from authors.apps.profiles.models import Profile
class TestGetEndpoint(APITestCase):
def setUp(self):
""" Prepares table for tests """
self.token = self.get_user_token()
self.slug = "life_love_death"
self.title = "Life Love and Death"
self.description = "What is life?"
self.body = "This is the real life body."
self.tagList = "life,love,death"
self.author = 'TestAuthor'
self.article = Articles(
slug=self.slug,
title=self.title,
description=self.description,
body=self.body,
tagList=self.tagList,
author=Profile.objects.get(username=self.author))
self.article.save()
def test_get_all_articles(self):
"""
This tests getting all articles successfully
"""
self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token)
url = reverse('articles')
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_successfully_not_getting_articles_if_token_not_used(self):
"""
Unauthorized error returned if no token is passed in
"""
url = reverse('articles')
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_get_article_id(self):
"""
Tests the pk of the article is true
"""
self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token)
url = reverse('articles')
response = self.client.get(url)
self.assertIn(b"1", response.content)
def test_articles_are_paginated(self):
"""
This tests if the returned articles are paginated
"""
self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token)
url = reverse('articles')
response = self.client.get(url).render()
# this checks the number of articles in the database
self.assertIn(b"1", response.content)
# next is null since there is only one article posted
self.assertIn(b"null", response.content)
# previous is null since only one article has been posted
# the page_size holds ten articles per page
self.assertIn(b"null", response.content) # previous
def test_get_specific_article(self):
"""
This gets a specific article
"""
self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token)
url = reverse('articleSpecific', kwargs={'slug': 'life_love_death'})
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_getting_and_checking_articles_content(self):
"""
This checks if the right content of an article is returned
"""
self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token)
url = reverse('articles')
response = self.client.get(url).render()
# checks if the body passed during posting is the one returned
self.assertIn(b"This is the real life body.", response.content)
# checks if id returned is 1
self.assertIn(b"1", response.content)
def test_wrong_request(self):
"""
Checks request for a non existing article
"""
self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token)
url = reverse(
'articleSpecific', kwargs={
'slug': 'life_love_death_live'})
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
response.render()
self.assertIn(b"Article does not exist", response.content)
def get_user_token(self):
user = {
"user": {
"username": "TestAuthor",
"email": "test_user@email.com",
"password": "test123user#Password"
}
}
response = self.client.post(
reverse('register'), data=user, format='json')
user = get_user_model()
user = user.objects.get(username="TestAuthor")
user.is_active = True
user.save()
response.render()
data = response.content
token = json.loads(data.decode('utf-8'))['user']['token']
return token
|
nilq/baby-python
|
python
|
from sympy import Wild, Indexed
from contextlib import contextmanager
class DestructuringError(ValueError):
'''
Represent an error due to the impossibility to destructure a given term.
At the present, we neither provide meaningful error messages nor objects
related to the context in which this exception was raised; moreover, we
do not distinguish the operator in the tackled combination term (Add, Mul,...).
'''
pass
# only for keep the same api, delete it when refactoring is finished,
# a good name to use could be: "destructuring_monomial_with_coeff_subscripts"
@contextmanager
def bind_Mul_indexed(term, indexed, forbidden_terms=[]):
'''
Destructure `term` against pattern `coeff * f[i j ...]`, binding `coeff`, `i` and `j ...`.
I attempt to destructure the given term respect the `Mul` operator, aiming to isolate
term `indexed`, which should be an instance of `Indexed` class, from a coefficient `coeff`,
which collect everything but `indexed` and, optionally, objects appearing in `forbidden_terms`.
If such destructuring fails, then I raise `DestructuringError`.
Examples
========
>>> from sympy import *
Main track, everything is good:
>>> f, n, k, j = IndexedBase('f'), *symbols('n k j')
>>> term = 3 * f[n,k,j]
>>> with bind_Mul_indexed(term, f) as (coeff, subscripts):
... print('{} * {}'.format(coeff, subscripts))
3 * [n, k, j]
Failure, not a vanilla product:
>>> term = 3 * f[n] + 1
>>> try:
... with bind_Mul_indexed(term, f) as (coeff, subscripts):
... print('{} * {}'.format(coeff, subscripts))
... except DestructuringError:
... print('something else')
something else
Failure, `f` not indexed at all:
>>> term = 3 * f
>>> try:
... with bind_Mul_indexed(term, f) as (coeff, subscripts):
... print('{} * {}'.format(coeff, subscripts))
... except DestructuringError:
... print('something else')
something else
'''
coeff_w, ind_w = Wild('coeff', exclude=[indexed] + forbidden_terms), Wild('ind')
matched = term.match(coeff_w * ind_w)
# if no indexing applied then `isinstance(matched[ind_w], IndexedBase)` holds
if (matched
and ind_w in matched
and coeff_w in matched
and isinstance(matched[ind_w], Indexed)):
_, *subscripts = matched[ind_w].args
yield matched[coeff_w], subscripts # do not splice subscripts, give them packed
else:
raise DestructuringError()
|
nilq/baby-python
|
python
|
"""
**download.py**
A commandline utility to retrieve test data from
https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/ for use in evaluating
LSAnamoly.
**usage**: download.py [-h] --params YML_PARAMS --data-dir DATA_DIR
[--sc-url SC_URL] [--mc-url MC_URL]
Retrieve datasets for LsAnomaly evaluation. By default, data is retrieved from
https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/
**Arguments**
-h, --help
show this help message and exit
--params YML_PARAMS, -p YML_PARAMS
YAML file with evaluation parameters
--data-dir DATA_DIR, -d DATA_DIR
directory to store retrieved data sets
--sc-url SC_URL
optional: single class test data URL; default:
https:/ /www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/binary/
--mc-url MC_URL
optional: Multi-class test data URL; default:
https:// www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multiclass/
"""
# The MIT License
#
# Copyright 2019 Chris Skiscim
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import bz2
import logging
import os
import requests
import yaml
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
fmt = "[%(asctime)s %(levelname)-8s] [%(filename)s:%(lineno)4s - %(funcName)s()] %(message)s" # noqa
logging.basicConfig(level=logging.INFO, format=fmt)
def unzip_write(file_path):
"""
Reads and inflates a .bz2 file and writes it back.
The compressed file is retrained. Used internally.
Args:
file_path (str): file to inflate
Raises:
FileNotFoundError
"""
try:
with open(file_path[:-4], "wb") as new_file, bz2.BZ2File(
file_path, "rb"
) as file:
for data in iter(lambda: file.read(100 * 1024), b""):
new_file.write(data)
except (FileNotFoundError, Exception) as e:
logger.exception("{}: {}".format(type(e), str(e)), exc_info=True)
raise
def write_contents(file_path, get_request):
"""
Writes the contents of the get request to the specified file path.
Args:
file_path (str): file path
get_request (requests.Response): response object
Raises:
IOError
"""
try:
open(file_path, "wb").write(get_request.content)
if file_path.endswith("bz2"):
unzip_write(file_path)
except (IOError, Exception) as e:
logger.exception("{}: {}".format(type(e), str(e)), exc_info=True)
raise
def get_request(dataset, file_path, sc_url, mc_url):
"""
Retrieve *dataset* trying first at `sc_url` and failing that, at
`mc_url`. If a data set cannot be retrieved, it is skipped.
The contents to `file_path` with the data set name as the file name.
Args:
dataset (str): Dataset name as referenced in
https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/
file_path (str): Directory where `dataset` will be written.
sc_url (str): single class data set URL
mc_url (str): multiclass data set URL
"""
url_get = sc_url + dataset
try:
get_req = requests.get(url_get, allow_redirects=True)
except (requests.exceptions.InvalidURL, Exception) as e:
logger.exception("{}: {}".format(type(e), str(e)), exc_info=True)
raise
if get_req.status_code == 200:
write_contents(file_path, get_req)
else:
url_get = mc_url + dataset
get_req = requests.get(url_get, allow_redirects=True)
if get_req.status_code == 200:
write_contents(file_path, get_req)
else:
logger.error("\tunable to retrieve {}".format(dataset))
logger.info("\tsuccess".format(dataset))
def main(param_file, sc_url, mc_url, data_fp):
"""
The main show. Tries to retrieve and store all the configured data-sets.
Args:
param_file (str): `.yml` File containing the evaluation parameters
sc_url (str): single class data set URL
mc_url (str): multiclass data set URL
data_fp (str): Directory where the datasets will be written
Raises:
ValueError: If `data_fp` is not a valid directory.
"""
try:
with open(param_file) as yml_file:
params = yaml.safe_load(yml_file)
except (FileNotFoundError, ValueError):
raise
datasets = params["evaluation"]["datasets"]
if not os.path.isdir(data_fp):
raise ValueError("no directory named {}".format(data_fp))
try:
for dataset in sorted(datasets):
logger.info("retrieving {}".format(dataset))
write_path = os.path.join(data_fp, dataset)
get_request(dataset, write_path, sc_url, mc_url)
except Exception as e:
logger.exception("{}: {}".format(type(e), str(e)), exc_info=True)
raise
if __name__ == "__main__":
import argparse
import sys
_sc_url = "https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/binary/"
_mc_url = (
"https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multiclass/"
)
parser = argparse.ArgumentParser(
description="Retrieve datasets for LsAnomaly evaluation. "
"By default, data is retrieved from "
"https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/"
)
parser.add_argument(
"--params",
"-p",
dest="yml_params",
required=True,
help="YAML file with evaluation parameters",
)
parser.add_argument(
"--data-dir",
"-d",
dest="data_dir",
required=True,
help="directory to store retrieved data sets",
)
parser.add_argument(
"--sc-url",
dest="sc_url",
required=False,
default=_sc_url,
help="optional: single class test data URL; default: {}".format(
_sc_url
),
)
parser.add_argument(
"--mc-url",
dest="mc_url",
required=False,
default=_mc_url,
help="optional: Multi-class test data URL; default: {}".format(
_mc_url
),
)
args = parser.parse_args()
try:
sys.exit(
main(args.yml_params, args.sc_url, args.mc_url, args.data_dir)
)
except SystemExit:
pass
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# encoding: utf-8
from __future__ import unicode_literals
from queries import SparqlQuery
class event_precis(SparqlQuery):
"""
"""
def __init__(self, *args, **kwargs):
super(event_precis, self).__init__(*args, **kwargs)
self.query_title = 'Get event precis'
self.description = 'Get precis for event which is a distillation of the event graph rather than verbatim report.'
self.url = 'event_precis'
self.world_cup_example = 'event_precis?uris.0=http://www.newsreader-project.eu/data/cars/2003/06/02/48RT-R260-009F-R155.xml%23ev18'
self.cars_example = 'event_precis?uris.0=http://www.newsreader-project.eu/data/cars/2003/06/02/48RT-R260-009F-R155.xml%23ev18'
self.ft_example = 'event_precis?uris.0=http://www.newsreader-project.eu/data/2013/10/312013/10/312013/10/31/11779884.xml%23ev7'
self.wikinews_example ='event_precis?uris.0=http://en.wikinews.org/wiki/Vettel_becomes_youngest_Formula_One_champion%23ev27_1'
self.query_template = ("""
SELECT DISTINCT ?subject ?predicate ?object ?graph
WHERE {{
{{
{uri_0} eso:hasPreSituation|eso:hasPostSituation|eso:hasDuringSituation ?graph .
GRAPH ?graph {{ ?subject ?predicate ?object }}
}} UNION {{
BIND ({uri_0} as ?subject)
{{
GRAPH ?graph {{ {uri_0} ?predicate ?object }}
FILTER (?predicate = sem:hasActor ||
?predicate = sem:hasPlace ||
?predicate = rdf:type && EXISTS {{ ?object rdfs:isDefinedBy eso: }} ||
EXISTS {{ ?predicate rdfs:isDefinedBy eso: }} )
}} UNION {{
GRAPH ?graph {{ {uri_0} sem:hasTime ?t }}
?t owltime:inDateTime ?object .
BIND (nwr:cleanedTime as ?predicate)
}} UNION {{
SELECT ("number of documents" AS ?predicate) ("graph" AS ?graph)
(COUNT(DISTINCT STRBEFORE(STR(?m), "#")) AS ?object)
WHERE {{ {uri_0} gaf:denotedBy ?m }}
}}
}}
}}
""")
self.count_template = ("""
SELECT (COUNT(*) as ?count)
WHERE{{
SELECT DISTINCT ?subject ?predicate ?object ?graph
WHERE {{
{{
{uri_0} eso:hasPreSituation|eso:hasPostSituation|eso:hasDuringSituation ?graph .
GRAPH ?graph {{ ?subject ?predicate ?object }}
}} UNION {{
BIND ({uri_0} as ?subject)
{{
GRAPH ?graph {{ {uri_0} ?predicate ?object }}
FILTER (?predicate = sem:hasActor ||
?predicate = sem:hasPlace ||
?predicate = rdf:type && EXISTS {{ ?object rdfs:isDefinedBy eso: }} ||
EXISTS {{ ?predicate rdfs:isDefinedBy eso: }} )
}} UNION {{
GRAPH ?graph {{ {uri_0} sem:hasTime ?t }}
?t owltime:inDateTime ?object .
BIND (nwr:cleanedTime as ?predicate)
}} UNION {{
SELECT ("number of documents" AS ?predicate) ("graph" AS ?graph)
(COUNT(DISTINCT STRBEFORE(STR(?m), "#")) AS ?object)
WHERE {{ {uri_0} gaf:denotedBy ?m }}
}}
}}
}}
}}
""")
self.jinja_template = 'table.html'
self.headers = ['subject', 'predicate', 'object', 'graph']
self.required_parameters = ["uris"]
self.optional_parameters = ["output"]
self.number_of_uris_required = 1
self._make_uri_filter_block()
self.query = self._build_query()
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Copyright 2021 the HERA Project
# Licensed under the MIT License
import pytest
import glob
from pyuvdata import UVData
from pyuvdata import UVCal
from ..data import DATA_PATH
from .. import chunker
from hera_qm.utils import apply_yaml_flags
import numpy as np
import sys
def test_chunk_data_files(tmpdir):
# list of data files:
tmp_path = tmpdir.strpath
data_files = sorted(glob.glob(DATA_PATH + '/zen.2458044.*.uvh5'))
nfiles = len(data_files)
# form chunks with three samples.
for chunk in range(0, nfiles, 2):
output = tmp_path + f'/chunk.{chunk}.uvh5'
chunker.chunk_files(data_files, data_files[chunk], output, 2,
polarizations=['ee'], spw_range=[0, 32],
throw_away_flagged_ants=True, ant_flag_yaml=DATA_PATH + '/test_input/a_priori_flags_sample_noflags.yaml')
# test that chunked files contain identical data (when combined)
# to original combined list of files.
# load in chunks
chunks = sorted(glob.glob(tmp_path + '/chunk.*.uvh5'))
uvd = UVData()
uvd.read(chunks)
# load in original file
uvdo = UVData()
uvdo.read(data_files, freq_chans=range(32))
apply_yaml_flags(uvdo, DATA_PATH + '/test_input/a_priori_flags_sample_noflags.yaml', throw_away_flagged_ants=True,
flag_freqs=False, flag_times=False, ant_indices_only=True)
assert np.all(np.isclose(uvdo.data_array, uvd.data_array))
assert np.all(np.isclose(uvdo.flag_array, uvd.flag_array))
assert np.all(np.isclose(uvdo.nsample_array, uvd.nsample_array))
# Repeate test with no spw_range or pols provided.
for chunk in range(0, nfiles, 2):
output = tmp_path + f'/chunk.{chunk}.uvh5'
chunker.chunk_files(data_files, data_files[chunk], output, 2,
polarizations=None, spw_range=None, clobber=True,
throw_away_flagged_ants=True, ant_flag_yaml=DATA_PATH + '/test_input/a_priori_flags_sample_noflags.yaml')
# test that chunked files contain identical data (when combined)
# to original combined list of files.
# load in chunks
chunks = sorted(glob.glob(tmp_path + '/chunk.*.uvh5'))
uvd = UVData()
uvd.read(chunks)
# load in original file
uvdo = UVData()
uvdo.read(data_files)
apply_yaml_flags(uvdo, DATA_PATH + '/test_input/a_priori_flags_sample_noflags.yaml', throw_away_flagged_ants=True,
flag_freqs=False, flag_times=False, ant_indices_only=True)
assert np.all(np.isclose(uvdo.data_array, uvd.data_array))
assert np.all(np.isclose(uvdo.flag_array, uvd.flag_array))
assert np.all(np.isclose(uvdo.nsample_array, uvd.nsample_array))
def test_chunk_cal_files(tmpdir):
# list of data files:
tmp_path = tmpdir.strpath
cal_files = sorted(glob.glob(DATA_PATH + '/test_input/*.abs.calfits_54x_only.part*'))
nfiles = len(cal_files)
# test ValueError
pytest.raises(ValueError, chunker.chunk_files, cal_files, cal_files[0], 'output', 2, spw_range=[0, 32], type='arglebargle')
# form chunks with three samples.
for chunk in range(0, nfiles, 2):
output = tmp_path + f'/chunk.{chunk}.calfits'
chunker.chunk_files(cal_files, cal_files[chunk], output, 2, spw_range=[0, 32], type='gains')
# test that chunked files contain identical data (when combined)
# to original combined list of files.
# load in chunks
chunks = sorted(glob.glob(tmp_path + '/chunk.*.calfits'))
uvc = UVCal()
uvc.read_calfits(chunks)
# load in original file
uvco = UVCal()
uvco.read_calfits(cal_files)
uvco.select(freq_chans=range(32))
assert np.all(np.isclose(uvco.gain_array, uvc.gain_array))
assert np.all(np.isclose(uvco.flag_array, uvc.flag_array))
# repeate test with None provided for spw_range and pols
for chunk in range(0, nfiles, 2):
output = tmp_path + f'/chunk.{chunk}.calfits'
chunker.chunk_files(cal_files, cal_files[chunk], output, 2, type='gains', clobber=True)
# test that chunked files contain identical data (when combined)
# to original combined list of files.
# load in chunks
chunks = sorted(glob.glob(tmp_path + '/chunk.*.calfits'))
uvc = UVCal()
uvc.read_calfits(chunks)
# load in original file
uvco = UVCal()
uvco.read_calfits(cal_files)
assert np.all(np.isclose(uvco.gain_array, uvc.gain_array))
assert np.all(np.isclose(uvco.flag_array, uvc.flag_array))
def test_chunk_parser():
sys.argv = [sys.argv[0], 'a', 'b', 'c', 'input', 'output', '3', '--type', 'gains']
ap = chunker.chunk_parser()
args = ap.parse_args()
assert args.filenames == ['a', 'b', 'c']
assert args.inputfile == 'input'
assert args.outputfile == 'output'
assert args.chunk_size == 3
assert args.type == 'gains'
|
nilq/baby-python
|
python
|
import argparse
import codecs
import json
import math
import os.path
import numpy as np
import tensorflow as tf
__all__ = ["create_default_hyperparams", "load_hyperparams",
"generate_search_lookup", "search_hyperparams", "create_hyperparams_file"]
def create_default_hyperparams(config_type):
"""create default hyperparameters"""
if config_type == "dam":
hyperparams = tf.contrib.training.HParams(
data_train_contextual_file="",
data_train_contextual_file_type="",
data_eval_contextual_file="",
data_eval_contextual_file_type="",
data_embedding_file="",
data_full_embedding_file="",
data_context_utterance_size=10,
data_context_word_size=50,
data_context_char_size=16,
data_response_candidate_size=3,
data_response_word_size=50,
data_response_char_size=16,
data_word_vocab_file="",
data_word_vocab_size=50000,
data_word_vocab_threshold=0,
data_word_unk="<unk>",
data_word_pad="<pad>",
data_char_vocab_file="",
data_char_vocab_size=1000,
data_char_vocab_threshold=0,
data_char_unk="*",
data_char_pad="#",
data_pipeline_mode="default",
data_num_parallel=4,
data_log_output_dir="",
data_result_output_dir="",
train_random_seed=100,
train_enable_shuffle=True,
train_shuffle_buffer_size=30000,
train_batch_size=32,
train_eval_batch_size=100,
train_eval_metric=["cp_auc@1", "precision@1"],
train_num_epoch=3,
train_ckpt_output_dir="",
train_summary_output_dir="",
train_step_per_stat=10,
train_step_per_ckpt=1000,
train_step_per_eval=1000,
train_clip_norm=5.0,
train_enable_debugging=False,
train_ema_enable=True,
train_ema_decay_rate=0.9999,
train_ema_enable_debias=False,
train_ema_enable_dynamic_decay=False,
train_regularization_enable=True,
train_regularization_type="l2",
train_regularization_scale=3e-7,
train_optimizer_type="adam",
train_optimizer_learning_rate=0.001,
train_optimizer_warmup_enable=False,
train_optimizer_warmup_mode="exponential_warmup",
train_optimizer_warmup_rate=0.01,
train_optimizer_warmup_end_step=1000,
train_optimizer_decay_enable=False,
train_optimizer_decay_mode="exponential_decay",
train_optimizer_decay_rate=0.95,
train_optimizer_decay_step=1000,
train_optimizer_decay_start_step=10000,
train_optimizer_momentum_beta=0.9,
train_optimizer_rmsprop_beta=0.999,
train_optimizer_rmsprop_epsilon=1e-8,
train_optimizer_adadelta_rho=0.95,
train_optimizer_adadelta_epsilon=1e-8,
train_optimizer_adagrad_init_accumulator=0.1,
train_optimizer_adam_beta_1=0.8,
train_optimizer_adam_beta_2=0.999,
train_optimizer_adam_epsilon=1e-07,
model_type="dam",
model_scope="contextual_modeling",
model_representation_word_embed_dim=300,
model_representation_word_dropout=0.1,
model_representation_word_embed_pretrained=True,
model_representation_word_feat_trainable=False,
model_representation_word_feat_enable=True,
model_representation_char_embed_dim=8,
model_representation_char_unit_dim=100,
model_representation_char_window_size=[5],
model_representation_char_hidden_activation="relu",
model_representation_char_dropout=0.1,
model_representation_char_pooling_type="max",
model_representation_char_feat_trainable=True,
model_representation_char_feat_enable=True,
model_representation_fusion_type="highway",
model_representation_fusion_num_layer=2,
model_representation_fusion_unit_dim=400,
model_representation_fusion_hidden_activation="relu",
model_representation_fusion_dropout=0.1,
model_representation_fusion_trainable=True,
model_understanding_context_num_layer=5,
model_understanding_context_num_head=8,
model_understanding_context_unit_dim=128,
model_understanding_context_hidden_activation="relu",
model_understanding_context_dropout=0.1,
model_understanding_context_attention_dropout=0.0,
model_understanding_context_layer_dropout=0.1,
model_understanding_context_trainable=True,
model_understanding_response_num_layer=5,
model_understanding_response_num_head=8,
model_understanding_response_unit_dim=128,
model_understanding_response_hidden_activation="relu",
model_understanding_response_dropout=0.1,
model_understanding_response_attention_dropout=0.0,
model_understanding_response_layer_dropout=0.1,
model_understanding_response_trainable=True,
model_understanding_enable_sharing=False,
model_interaction_context2response_num_layer=5,
model_interaction_context2response_num_head=8,
model_interaction_context2response_unit_dim=128,
model_interaction_context2response_hidden_activation="relu",
model_interaction_context2response_dropout=0.1,
model_interaction_context2response_attention_dropout=0.0,
model_interaction_context2response_layer_dropout=0.1,
model_interaction_context2response_trainable=True,
model_interaction_response2context_num_layer=5,
model_interaction_response2context_num_head=8,
model_interaction_response2context_unit_dim=128,
model_interaction_response2context_hidden_activation="relu",
model_interaction_response2context_dropout=0.1,
model_interaction_response2context_attention_dropout=0.0,
model_interaction_response2context_layer_dropout=0.1,
model_interaction_response2context_trainable=True,
model_matching_aggregation_num_layer=2,
model_matching_aggregation_unit_dim=[32, 16],
model_matching_aggregation_hidden_activation=["relu", "relu"],
model_matching_aggregation_conv_window=[3,3],
model_matching_aggregation_conv_stride=[1,1],
model_matching_aggregation_pool_window=[3,3],
model_matching_aggregation_pool_stride=[3,3],
model_matching_aggregation_pooling_type=["max", "max"],
model_matching_aggregation_dropout=[0.1, 0.1],
model_matching_aggregation_trainable=[True, True],
model_matching_projection_dropout=0.1,
model_matching_projection_trainable=True,
device_num_gpus=1,
device_default_gpu_id=0,
device_log_device_placement=False,
device_allow_soft_placement=False,
device_allow_growth=False,
device_per_process_gpu_memory_fraction=0.8
)
else:
raise ValueError("unsupported config type {0}".format(config_type))
return hyperparams
def load_hyperparams(config_file):
"""load hyperparameters from config file"""
if tf.gfile.Exists(config_file):
with codecs.getreader("utf-8")(tf.gfile.GFile(config_file, "rb")) as file:
hyperparams_dict = json.load(file)
hyperparams = create_default_hyperparams(hyperparams_dict["model_type"])
hyperparams.override_from_dict(hyperparams_dict)
return hyperparams
else:
raise FileNotFoundError("config file not found")
def generate_search_lookup(search,
search_lookup=None):
search_lookup = search_lookup if search_lookup else {}
search_type = search["stype"]
data_type = search["dtype"]
if search_type == "uniform":
range_start = search["range"][0]
range_end = search["range"][1]
if data_type == "int":
search_sample = np.random.randint(range_start, range_end)
elif data_type == "float":
search_sample = (range_end - range_start) * np.random.random_sample() + range_start
else:
raise ValueError("unsupported data type {0}".format(data_type))
elif search_type == "log":
range_start = math.log(search["range"][0], 10)
range_end = math.log(search["range"][1], 10)
if data_type == "float":
search_sample = math.pow(10, (range_end - range_start) * np.random.random_sample() + range_start)
else:
raise ValueError("unsupported data type {0}".format(data_type))
elif search_type == "discrete":
search_set = search["set"]
search_index = np.random.choice(len(search_set))
search_sample = search_set[search_index]
elif search_type == "lookup":
search_key = search["key"]
if search_key in search_lookup:
search_sample = search_lookup[search_key]
else:
raise ValueError("search key {0} doesn't exist in look-up table".format(search_key))
else:
raise ValueError("unsupported search type {0}".format(search_type))
data_scale = search["scale"] if "scale" in search else 1.0
data_shift = search["shift"] if "shift" in search else 0.0
if data_type == "int":
search_sample = int(data_scale * search_sample + data_shift)
elif data_type == "float":
search_sample = float(data_scale * search_sample + data_shift)
elif data_type == "string":
search_sample = str(search_sample)
elif data_type == "boolean":
search_sample = bool(search_sample)
elif data_type == "list":
search_sample = list(search_sample)
else:
raise ValueError("unsupported data type {0}".format(data_type))
return search_sample
def search_hyperparams(hyperparams,
config_file,
num_group,
random_seed):
"""search hyperparameters based on search config"""
if tf.gfile.Exists(config_file):
with codecs.getreader("utf-8")(tf.gfile.GFile(config_file, "rb")) as file:
hyperparams_group = []
np.random.seed(random_seed)
search_setting = json.load(file)
hyperparams_search_setting = search_setting["hyperparams"]
variables_search_setting = search_setting["variables"]
for i in range(num_group):
variables_search_lookup = {}
for key in variables_search_setting.keys():
variables_search = variables_search_setting[key]
variables_search_lookup[key] = generate_search_lookup(variables_search)
hyperparams_search_lookup = {}
for key in hyperparams_search_setting.keys():
hyperparams_search = hyperparams_search_setting[key]
hyperparams_search_lookup[key] = generate_search_lookup(hyperparams_search, variables_search_lookup)
hyperparams_sample = tf.contrib.training.HParams(hyperparams.to_proto())
hyperparams_sample.override_from_dict(hyperparams_search_lookup)
hyperparams_group.append(hyperparams_sample)
return hyperparams_group
else:
raise FileNotFoundError("config file not found")
def create_hyperparams_file(hyperparams_group, config_dir):
"""create config files from groups of hyperparameters"""
if not tf.gfile.Exists(config_dir):
tf.gfile.MakeDirs(config_dir)
for i in range(len(hyperparams_group)):
config_file = os.path.join(config_dir, "config_hyperparams_{0}.json".format(i))
with codecs.getwriter("utf-8")(tf.gfile.GFile(config_file, "w")) as file:
hyperparam_dict = hyperparams_group[i].values()
hyperparams_json = json.dumps(hyperparam_dict, indent=4)
file.write(hyperparams_json)
|
nilq/baby-python
|
python
|
from fractions import Fraction
def isPointinPolygon(pointlist, rangelist):#射线法先判断点是否在大多边形里面
# 判断是否在外包矩形内,如果不在,直接返回false
xlist = []#装大多边形的点的横坐标
ylist = []#装大多边形的点的纵坐标
for i in range(len(rangelist)-1):
xlist.append(rangelist[i][0])
ylist.append(rangelist[i][1])
maxx = max(xlist)
minx = min(xlist)
maxy = max(ylist)
miny = min(ylist)
#判断点是否大于外包矩阵的x,y或者小于外包矩阵的x,y
for point in pointlist:
if (point[0] > maxx or point[0] < minx or
point[1] > maxy or point[1] < miny):
print('小图形不在大图形里面')
return False
count = 0
point1 = rangelist[0]
for point in pointlist:
for i in range(1, len(rangelist)):
point2 = rangelist[i]
# 点与多边形顶点重合
if ((point[0] == point1[0] and point[1] == point1[1]) or
(point[0] == point2[0] and point[1] == point2[1])):
print('小图形不在大图形里面')
return False
# 判断线段两端点是否在射线两侧 不在肯定不相交 射线(-∞,lat)(lng,lat)
if (point1[1] < point[1] and point2[1] >= point[1]) or (point1[1] >= point[1] and point2[1] < point[1]):
# 求线段与射线交点 再和lat比较
point12lng = point2[0] - (point2[1] - point[1]) * (point2[0] - point1[0])/(point2[1] - point1[1])
# 点在多边形边上
if (point12lng == point[0]):
print("小图形不在大图形里面")
return False
if (point12lng < point[0]):
count +=1
point1 = point2
if count%2 == 0:
print('小图形不在大图形里面')
return False
else:
print('点在大图形里面')
return True
def line(line):#生成多边型每条边的函数形式并且输入x,y的范围
result=[]
for i in range(len(line)):
if i==len(line)-1:
break
if line[i][1]==line[i+1][1]:#形如x=b
a=0
b=line[i][1]
result.append([a,b,line[i][0],line[i+1][0],line[i][1],line[i+1][1]])
elif line[i][0]==line[i+1][0]:#形如y=b
a='不存在系数'
b=0
result.append([a,b,line[i][0],line[i+1][0],line[i][1],line[i+1][1]])
else:#形如y=ax+b
a=(line[i+1][1]-line[i][1])/(line[i+1][0]-line[i][0])
b=line[i][1]-a*line[i][0]
result.append([a,b,line[i][0],line[i+1][0],line[i][1],line[i+1][1]])
return result
def islineinPolygon(pointlist,rangelist):#判断两个多边形的边是否相交
pointline=line(pointlist)
rangeline=line(rangelist)
x=0
y=0
for i in pointline:
for j in rangeline:
if i[0]=='不存在系数' and j[0]=='不存在系数':#两条边都为x=b的形式一定是平行或者重合
x=0
if i[0]=='不存在系数':#小多边形的边为x=b形式
y=j[0]*i[2]+j[1]
if y>min(j[4:]) and y<max(j[4:]) and y>min(i[4:]) and y<max(i[4:]):
return print('小图形不在大图形里面')
if j[0]=='不存在系数':#大多边形的边为x=b形式
y=i[0]*j[2]+i[1]
if y>min(j[4:]) and y<max(j[4:]) and y>min(i[4:]) and y<max(i[4:]):
return print('小图形不在大图形里面')
if i[0]!=j[0] and i[0]!='不存在系数' and j[0]!='不存在系数':
x=(j[1]-i[1])/(i[0]-j[0])
if x>min(j[2:4]) and x<max(j[2:4]) and x>min(i[2:4]) and x<max(i[2:4]):
return print('小图形不在大图形里面')
print('小图形在大图形里面')
if __name__ == '__main__':
#大多边形构成的坐标点,开始点和最后的点要一样
l=[[0,4],[3,2],[1,0],[3,-2],[0,-4],[-3,-2],[-1,0],[-3,2],[0,4]]
#小多边形构成的坐标点,开始点和最后的点要一样
pointlist=[[-2,2],[2,-2],[-2,-2],[-2,2]]
if isPointinPolygon(pointlist, l):
islineinPolygon(pointlist,l)
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.