blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
65db83dd6e22faf2d53cb9f01ba52bff4adfef6f | 5e089154239b09a0d75745d4710d42de72876480 | /app/main/views.py | 089cb02dd28646b92158f47b6648eea771e12e2f | [] | no_license | hui-jz/Social_Blog | 7cc84f45459131e784859d9d9db9f04790272461 | 8faa9769f7b497e092c1e696d1b5e7ff79c9931c | refs/heads/main | 2023-01-10T07:18:47.836590 | 2020-11-09T03:16:04 | 2020-11-09T03:16:04 | 310,169,823 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,109 | py | from datetime import datetime
from flask import render_template, session, redirect, url_for
from . import main
from .forms import NameForm
from .. import db
from ..models import User
@main.route('/', methods=['GET', 'POST'])
def index():
form = NameForm()
if form.validate_on_submit():
# ...
user = User.query.filter_by(username=form.name.data).first()
if user is None:
user = User(username=form.name.data)
db.session.add(user)
db.session.commit()
session['known'] = False
if app.config['FLASKY_ADMIN']:
send_email(app.config['FLASKY_ADMIN'], 'New User', 'mail/new_user', user=user)
else:
session['known'] = True
session['name'] = form.name.data
form.name.data = ''
return redirect(url_for('.index'))
return render_template('index.html',
form=form, name=session.get('name'),
known=session.get('known', False),
current_time=datetime.utcnow()) | [
"2223136131@qq.com"
] | 2223136131@qq.com |
9bc8312a278238aacd0923da6d1080c78d3f5ca8 | ca06d322433e734ab0239d6766f4e502afc310e3 | /tests/unit/test_fp16.py | 0c0ef3edd3a85533bd4281b47d568a807b0d803f | [
"MIT",
"LicenseRef-scancode-generic-cla"
] | permissive | AnandEmbold/DeepSpeed | 5b125f91aa8166b6433e6972b1df203083754c5f | 9ff8a1278088564199103820553e0e034e5e68b0 | refs/heads/master | 2023-07-15T09:32:33.192470 | 2021-08-17T05:54:07 | 2021-08-17T05:54:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 30,415 | py | import torch
import deepspeed
import argparse
import pytest
import json
import os
from deepspeed.ops.adam import FusedAdam
from common import distributed_test
from simple_model import SimpleModel, SimpleOptimizer, random_dataloader, args_from_dict, create_deepspeed_args
from deepspeed.ops.op_builder import CPUAdamBuilder
try:
from apex import amp
_amp_available = True
except ImportError:
_amp_available = False
amp_available = pytest.mark.skip(_amp_available, reason="apex/amp is not installed")
def test_lamb_fp32_grad_clip(tmpdir):
config_dict = {
"train_batch_size": 2,
"steps_per_print": 1,
"optimizer": {
"type": "Lamb",
"params": {
"lr": 0.00015
}
},
"gradient_clipping": 1.0
}
args = args_from_dict(tmpdir, config_dict)
hidden_dim = 10
model = SimpleModel(hidden_dim)
@distributed_test(world_size=[1, 2])
def _test_lamb_fp32_grad_clip(args, model, hidden_dim):
model, _, _, _ = deepspeed.initialize(args=args,
model=model,
model_parameters=model.parameters())
data_loader = random_dataloader(model=model,
total_samples=50,
hidden_dim=hidden_dim,
device=model.device,
dtype=torch.float)
for n, batch in enumerate(data_loader):
loss = model(batch[0], batch[1])
model.backward(loss)
model.step()
_test_lamb_fp32_grad_clip(args=args, model=model, hidden_dim=hidden_dim)
def test_lamb_fp16_basic(tmpdir):
config_dict = {
"train_batch_size": 2,
"steps_per_print": 1,
"optimizer": {
"type": "Lamb",
"params": {
"lr": 0.00015
}
},
"gradient_clipping": 1.0,
"fp16": {
"enabled": True
}
}
args = args_from_dict(tmpdir, config_dict)
hidden_dim = 10
model = SimpleModel(hidden_dim)
@distributed_test(world_size=[1, 2])
def _test_lamb_fp16_basic(args, model, hidden_dim):
model, _, _, _ = deepspeed.initialize(args=args,
model=model,
model_parameters=model.parameters())
data_loader = random_dataloader(model=model,
total_samples=50,
hidden_dim=hidden_dim,
device=model.device)
for n, batch in enumerate(data_loader):
loss = model(batch[0], batch[1])
model.backward(loss)
model.step()
_test_lamb_fp16_basic(args=args, model=model, hidden_dim=hidden_dim)
def test_lamb_fp16_empty_grad(tmpdir):
config_dict = {
"train_batch_size": 2,
"steps_per_print": 1,
"optimizer": {
"type": "Lamb",
"params": {
"lr": 0.00015
}
},
"gradient_clipping": 1.0,
"fp16": {
"enabled": True
}
}
args = args_from_dict(tmpdir, config_dict)
hidden_dim = 10
model = SimpleModel(hidden_dim, empty_grad=True)
@distributed_test(world_size=[2])
def _test_lamb_fp16_empty_grad(args, model, hidden_dim):
model, _, _, _ = deepspeed.initialize(args=args,
model=model,
model_parameters=model.parameters())
data_loader = random_dataloader(model=model,
total_samples=50,
hidden_dim=hidden_dim,
device=model.device)
for n, batch in enumerate(data_loader):
loss = model(batch[0], batch[1])
model.backward(loss)
model.step()
_test_lamb_fp16_empty_grad(args=args, model=model, hidden_dim=hidden_dim)
def test_adam_fp32_empty_grad(tmpdir):
config_dict = {
"train_batch_size": 2,
"steps_per_print": 1,
"optimizer": {
"type": "Adam",
"params": {
"lr": 0.00015
}
},
"gradient_clipping": 1.0,
"fp16": {
"enabled": False
}
}
args = args_from_dict(tmpdir, config_dict)
hidden_dim = 10
model = SimpleModel(hidden_dim, empty_grad=True)
@distributed_test(world_size=[2])
def _test_adam_fp32_empty_grad(args, model, hidden_dim):
model, _, _, _ = deepspeed.initialize(args=args,
model=model,
model_parameters=model.parameters())
data_loader = random_dataloader(model=model,
total_samples=50,
hidden_dim=hidden_dim,
device=model.device,
dtype=torch.float)
for n, batch in enumerate(data_loader):
loss = model(batch[0], batch[1])
model.backward(loss)
model.step()
_test_adam_fp32_empty_grad(args=args, model=model, hidden_dim=hidden_dim)
def test_adamw_fp16_basic(tmpdir):
config_dict = {
"train_batch_size": 1,
"steps_per_print": 1,
"fp16": {
"enabled": True
}
}
args = args_from_dict(tmpdir, config_dict)
hidden_dim = 10
model = SimpleModel(hidden_dim)
@distributed_test(world_size=[1])
def _test_adamw_fp16_basic(args, model, hidden_dim):
optimizer = torch.optim.AdamW(params=model.parameters())
model, _, _, _ = deepspeed.initialize(args=args,
model=model,
optimizer=optimizer)
data_loader = random_dataloader(model=model,
total_samples=50,
hidden_dim=hidden_dim,
device=model.device)
for n, batch in enumerate(data_loader):
loss = model(batch[0], batch[1])
model.backward(loss)
model.step()
_test_adamw_fp16_basic(args=args, model=model, hidden_dim=hidden_dim)
def test_dict_config_adamw_fp16_basic():
config = {"train_batch_size": 1, "steps_per_print": 1, "fp16": {"enabled": True}}
args = create_deepspeed_args()
hidden_dim = 10
model = SimpleModel(hidden_dim)
@distributed_test(world_size=[1])
def _test_adamw_fp16_basic(args, model, hidden_dim, config):
optimizer = torch.optim.AdamW(params=model.parameters())
model, _, _, _ = deepspeed.initialize(args=args,
model=model,
optimizer=optimizer,
config=config)
data_loader = random_dataloader(model=model,
total_samples=50,
hidden_dim=hidden_dim,
device=model.device)
for n, batch in enumerate(data_loader):
loss = model(batch[0], batch[1])
model.backward(loss)
model.step()
_test_adamw_fp16_basic(args=args, model=model, hidden_dim=hidden_dim, config=config)
def test_adamw_fp16_empty_grad(tmpdir):
config_dict = {
"train_batch_size": 1,
"steps_per_print": 1,
"fp16": {
"enabled": True
}
}
args = args_from_dict(tmpdir, config_dict)
hidden_dim = 10
model = SimpleModel(hidden_dim)
@distributed_test(world_size=[1])
def _test_adamw_fp16_empty_grad(args, model, hidden_dim):
optimizer = torch.optim.AdamW(params=model.parameters())
model, _, _, _ = deepspeed.initialize(args=args,
model=model,
optimizer=optimizer)
data_loader = random_dataloader(model=model,
total_samples=50,
hidden_dim=hidden_dim,
device=model.device)
for n, batch in enumerate(data_loader):
loss = model(batch[0], batch[1])
model.backward(loss)
model.step()
_test_adamw_fp16_empty_grad(args=args, model=model, hidden_dim=hidden_dim)
@pytest.mark.parametrize('zero_stage, use_cpu_offload',
[(1,
False),
(2,
False),
(2,
True),
(3,
False),
(3,
True)])
def test_adam_fp16_zero_onecycle_compatibility(tmpdir, zero_stage, use_cpu_offload):
if use_cpu_offload and not deepspeed.ops.__compatible_ops__[CPUAdamBuilder.NAME]:
pytest.skip("cpu-adam is not compatible")
config_dict = {
"train_batch_size": 1,
"steps_per_print": 1,
"optimizer": {
"type": "Adam",
"params": {
"lr": 0.00015
}
},
"scheduler": {
"type": "OneCycle",
"params": {
"cycle_first_step_size": 16000,
"cycle_first_stair_count": 8000,
"decay_step_size": 16000,
"cycle_min_lr": 1e-06,
"cycle_max_lr": 3e-05,
"decay_lr_rate": 1e-07,
"cycle_min_mom": 0.85,
"cycle_max_mom": 0.99,
"decay_mom_rate": 0.0
}
},
"fp16": {
"enabled": True
},
"zero_optimization": {
"stage": zero_stage,
"cpu_offload": use_cpu_offload
}
}
args = args_from_dict(tmpdir, config_dict)
hidden_dim = 10
@distributed_test(world_size=[1])
def _test_adam_fp16_zero_onecycle_compatibility(args, zero_stage, hidden_dim):
model = SimpleModel(hidden_dim)
model, _, _,_ = deepspeed.initialize(args=args,
model=model,
model_parameters=model.parameters())
data_loader = random_dataloader(model=model,
total_samples=50,
hidden_dim=hidden_dim,
device=model.device)
for n, batch in enumerate(data_loader):
loss = model(batch[0], batch[1])
model.backward(loss)
model.step()
_test_adam_fp16_zero_onecycle_compatibility(args=args,
zero_stage=zero_stage,
hidden_dim=hidden_dim)
@pytest.mark.parametrize('zero_stage, use_cpu_offload',
[(1,
False),
(2,
False),
(2,
True),
(3,
False),
(3,
True)])
def test_zero_static_scale(tmpdir, zero_stage, use_cpu_offload):
if use_cpu_offload and not deepspeed.ops.__compatible_ops__[CPUAdamBuilder.NAME]:
pytest.skip("cpu-adam is not compatible")
config_dict = {
"train_batch_size": 4,
"steps_per_print": 1,
"optimizer": {
"type": "Adam",
"params": {
"lr": 0.00015
}
},
"fp16": {
"enabled": True,
"loss_scale": 138.
},
"zero_optimization": {
"stage": zero_stage,
"cpu_offload": use_cpu_offload
}
}
args = args_from_dict(tmpdir, config_dict)
@distributed_test(world_size=2)
def _test_zero_static_scale(args, zero_stage, hidden_dim):
#making hidden size not divisible by DP for covering this scenario
hidden_dim = hidden_dim
model = SimpleModel(hidden_dim)
model, optim, _, _ = deepspeed.initialize(args=args,
model=model,
model_parameters=model.parameters())
# Ensure the static scaler is configured.
assert optim.dynamic_loss_scale == False
assert optim.loss_scaler.loss_scale == 138.
# Now make sure things work..
data_loader = random_dataloader(model=model,
total_samples=10,
hidden_dim=hidden_dim,
device=model.device)
for n, batch in enumerate(data_loader):
loss = model(batch[0], batch[1])
model.backward(loss)
model.step()
#test when hidden_dim is not aligned with world size
_test_zero_static_scale(args=args, zero_stage=zero_stage, hidden_dim=9)
#test when hidden_dim is aligned with world size
_test_zero_static_scale(args=args, zero_stage=zero_stage, hidden_dim=10)
def test_zero_static_scale_deprecated_format(tmpdir):
config_dict = {
"train_batch_size": 4,
"steps_per_print": 1,
"optimizer": {
"type": "Adam",
"params": {
"lr": 0.00015
}
},
"fp16": {
"enabled": True,
"loss_scale": 138.
},
"zero_optimization": {
"stage": 1
}
}
args = args_from_dict(tmpdir, config_dict)
@distributed_test(world_size=2)
def _test_zero_static_scale(args):
hidden_dim = 10
model = SimpleModel(hidden_dim)
model, optim, _, _ = deepspeed.initialize(args=args,
model=model,
model_parameters=model.parameters())
# Ensure the static scaler is configured.
assert optim.dynamic_loss_scale == False
assert optim.loss_scaler.loss_scale == 138.
# Now make sure things work..
data_loader = random_dataloader(model=model,
total_samples=10,
hidden_dim=hidden_dim,
device=model.device)
for n, batch in enumerate(data_loader):
loss = model(batch[0], batch[1])
model.backward(loss)
model.step()
_test_zero_static_scale(args)
@pytest.mark.parametrize('zero_stage, use_cpu_offload',
[(1,
False),
(2,
False),
(2,
True),
(3,
False),
(3,
True)])
def test_zero_allow_untested_optimizer(tmpdir, zero_stage, use_cpu_offload):
if use_cpu_offload and not deepspeed.ops.__compatible_ops__[CPUAdamBuilder.NAME]:
pytest.skip("cpu-adam is not compatible")
config_dict = {
"train_batch_size": 4,
"steps_per_print": 1,
"fp16": {
"enabled": True,
},
"zero_optimization": {
"stage": zero_stage,
"cpu_offload": use_cpu_offload
},
"zero_allow_untested_optimizer": False
}
args = args_from_dict(tmpdir, config_dict)
@distributed_test(world_size=[1])
def _test_zero_allow_untested_optimizer(args, zero_stage):
hidden_dim = 10
model = SimpleModel(hidden_dim)
optimizer = SimpleOptimizer(model.parameters())
with pytest.raises(AssertionError):
model, optim, _, _ = deepspeed.initialize(args=args,
model=model,
optimizer=optimizer,
model_parameters=model.parameters())
_test_zero_allow_untested_optimizer(args, zero_stage)
@pytest.mark.parametrize('zero_stage, use_cpu_offload',
[(1,
False),
(2,
False),
(2,
True),
(3,
False),
(3,
True)])
def test_zero_empty_partition(tmpdir, zero_stage, use_cpu_offload):
if use_cpu_offload and not deepspeed.ops.__compatible_ops__[CPUAdamBuilder.NAME]:
pytest.skip("cpu-adam is not compatible")
if zero_stage == 3:
pytest.skip("skip for now")
config_dict = {
"train_micro_batch_size_per_gpu": 1,
"gradient_accumulation_steps": 1,
"fp16": {
"enabled": True,
"initial_scale_power": 8
},
"optimizer": {
"type": "Adam",
"params": {
"lr": 0.00015
}
},
"zero_optimization": {
"stage": zero_stage,
"cpu_offload": use_cpu_offload,
"reduce_bucket_size": 100,
"allgather_bucket_size": 100
}
}
args = args_from_dict(tmpdir, config_dict)
@distributed_test(world_size=[3])
def _test_zero_empty_partition(args, zero_stage):
hidden_dim = 1
model = SimpleModel(hidden_dim)
# Ensure model has 2 parameters, to cause empty partition with DP=3
assert len(list(model.parameters())) == 2
model, _, _, _ = deepspeed.initialize(args=args,
model=model,
model_parameters=model.parameters())
# Now make sure things work..
data_loader = random_dataloader(model=model,
total_samples=1,
hidden_dim=hidden_dim,
device=model.device)
for n, batch in enumerate(data_loader):
loss = model(batch[0], batch[1])
model.backward(loss)
model.step()
_test_zero_empty_partition(args=args, zero_stage=zero_stage)
@amp_available
def test_adam_amp_basic(tmpdir):
config_dict = {"train_batch_size": 1, "steps_per_print": 1, "amp": {"enabled": True}}
args = args_from_dict(tmpdir, config_dict)
hidden_dim = 10
model = SimpleModel(hidden_dim)
@distributed_test(world_size=[1])
def _test_adam_amp_basic(args, model, hidden_dim):
optimizer = torch.optim.Adam(params=model.parameters())
model, _, _, _ = deepspeed.initialize(args=args,
model=model,
optimizer=optimizer)
data_loader = random_dataloader(model=model,
total_samples=50,
hidden_dim=hidden_dim,
device=model.device)
for n, batch in enumerate(data_loader):
loss = model(batch[0], batch[1])
model.backward(loss)
model.step()
_test_adam_amp_basic(args=args, model=model, hidden_dim=hidden_dim)
@amp_available
def test_lamb_amp_basic(tmpdir):
config_dict = {
"train_batch_size": 2,
"steps_per_print": 1,
"optimizer": {
"type": "Lamb",
"params": {
"lr": 0.00015
}
},
"gradient_clipping": 1.0,
"amp": {
"enabled": True,
}
}
args = args_from_dict(tmpdir, config_dict)
hidden_dim = 10
model = SimpleModel(hidden_dim)
@distributed_test(world_size=[1, 2])
def _test_lamb_amp_basic(args, model, hidden_dim):
model, _, _, _ = deepspeed.initialize(args=args,
model=model,
model_parameters=model.parameters())
data_loader = random_dataloader(model=model,
total_samples=50,
hidden_dim=hidden_dim,
device=model.device)
for n, batch in enumerate(data_loader):
loss = model(batch[0], batch[1])
model.backward(loss)
model.step()
_test_lamb_amp_basic(args=args, model=model, hidden_dim=hidden_dim)
@amp_available
def test_adam_amp_o2(tmpdir):
config_dict = {
"train_batch_size": 2,
"steps_per_print": 1,
"optimizer": {
"type": "Adam",
"params": {
"lr": 0.00015
}
},
"gradient_clipping": 1.0,
"amp": {
"enabled": True,
"opt_level": "O2"
}
}
args = args_from_dict(tmpdir, config_dict)
hidden_dim = 10
model = SimpleModel(hidden_dim)
@distributed_test(world_size=[1, 2])
def _test_adam_amp_o2(args, model, hidden_dim):
model, _, _, _ = deepspeed.initialize(args=args,
model=model,
model_parameters=model.parameters())
data_loader = random_dataloader(model=model,
total_samples=50,
hidden_dim=hidden_dim,
device=model.device)
for n, batch in enumerate(data_loader):
loss = model(batch[0], batch[1])
model.backward(loss)
model.step()
_test_adam_amp_o2(args=args, model=model, hidden_dim=hidden_dim)
@amp_available
def test_adam_amp_o2_empty_grad(tmpdir):
config_dict = {
"train_batch_size": 2,
"steps_per_print": 1,
"optimizer": {
"type": "Adam",
"params": {
"lr": 0.00015
}
},
"gradient_clipping": 1.0,
"amp": {
"enabled": True,
"opt_level": "O2"
}
}
args = args_from_dict(tmpdir, config_dict)
hidden_dim = 10
model = SimpleModel(hidden_dim)
@distributed_test(world_size=[2])
def _test_adam_amp_o2_empty_grad(args, model, hidden_dim):
model, _, _, _ = deepspeed.initialize(args=args,
model=model,
model_parameters=model.parameters())
data_loader = random_dataloader(model=model,
total_samples=50,
hidden_dim=hidden_dim,
device=model.device)
for n, batch in enumerate(data_loader):
loss = model(batch[0], batch[1])
model.backward(loss)
model.step()
_test_adam_amp_o2_empty_grad(args=args, model=model, hidden_dim=hidden_dim)
@pytest.mark.parametrize('zero_stage, optimizer_constructor',
[(1,
FusedAdam),
(2,
torch.optim.Adam),
(2,
FusedAdam),
(3,
torch.optim.Adam),
(3,
FusedAdam)])
def test_zero_supported_client_optimizer(tmpdir, zero_stage, optimizer_constructor):
config_dict = {
"train_batch_size": 2,
"steps_per_print": 1,
"fp16": {
"enabled": True
},
"zero_optimization": {
"stage": zero_stage
}
}
args = args_from_dict(tmpdir, config_dict)
hidden_dim = 10
@distributed_test(world_size=[1])
def _test_zero_supported_client_optimizer(args, zero_stage, optimizer_constructor):
model = SimpleModel(hidden_dim)
client_optimizer = optimizer_constructor(params=model.parameters())
model, _, _, _ = deepspeed.initialize(args=args,
model=model,
optimizer=client_optimizer)
_test_zero_supported_client_optimizer(args=args,
zero_stage=zero_stage,
optimizer_constructor=optimizer_constructor)
def test_zero2_reduce_scatter_off(tmpdir):
config_dict = {
"train_batch_size": 2,
"steps_per_print": 1,
"optimizer": {
"type": "Adam",
"params": {
"lr": 0.00015
}
},
"gradient_clipping": 1.0,
"zero_optimization": {
"stage": 2,
"contiguous_gradients": True,
"allgather_bucket_size": 2000000000,
"reduce_bucket_size": 200000000,
"overlap_comm": False,
"reduce_scatter": False
},
"fp16": {
"enabled": True
}
}
args = args_from_dict(tmpdir, config_dict)
hidden_dim = 10
model = SimpleModel(hidden_dim)
@distributed_test(world_size=[2])
def _helper(args, model, hidden_dim):
model, _, _, _ = deepspeed.initialize(args=args,
model=model,
model_parameters=model.parameters())
data_loader = random_dataloader(model=model,
total_samples=50,
hidden_dim=hidden_dim,
device=model.device)
for n, batch in enumerate(data_loader):
loss = model(batch[0], batch[1])
model.backward(loss)
model.step()
_helper(args=args, model=model, hidden_dim=hidden_dim)
@pytest.mark.parametrize('adam_type, torch_impl',
[('Adam',
True),
('Adam',
False),
('AdamW',
True),
('AdamW',
False)])
def test_fp16_adam_types(tmpdir, adam_type, torch_impl):
config_dict = {
"train_batch_size": 1,
"steps_per_print": 1,
"fp16": {
"enabled": True,
"initial_scale_power": 10
},
"optimizer": {
"type": adam_type,
"torch_adam": torch_impl,
"params": {
"lr": 0.00015
}
}
}
args = args_from_dict(tmpdir, config_dict)
hidden_dim = 10
model = SimpleModel(hidden_dim)
@distributed_test(world_size=[1])
def _test_fp16_adam_types(args, model, hidden_dim):
model, _, _, _ = deepspeed.initialize(args=args,
model=model,
model_parameters=model.parameters())
data_loader = random_dataloader(model=model,
total_samples=10,
hidden_dim=hidden_dim,
device=model.device)
for _, batch in enumerate(data_loader):
loss = model(batch[0], batch[1])
model.backward(loss)
model.step()
_test_fp16_adam_types(args=args, model=model, hidden_dim=hidden_dim)
def test_zero3_lazyscatter(tmpdir):
config_dict = {
"train_batch_size": 1,
"steps_per_print": 1,
"fp16": {
"enabled": True,
"initial_scale_power": 10
},
"optimizer": {
"type": "AdamW",
"params": {
"lr": 0.00015
}
},
"zero_optimization": {
"stage": 3
}
}
args = args_from_dict(tmpdir, config_dict)
hidden_dim = 10
@distributed_test(world_size=[1])
def _go(args):
model = SimpleModel(hidden_dim)
model, _, _, _ = deepspeed.initialize(args=args,
model=model,
model_parameters=model.parameters())
data_loader = random_dataloader(model=model,
total_samples=10,
hidden_dim=hidden_dim,
device=model.device)
for _, batch in enumerate(data_loader):
loss = model(batch[0], batch[1])
model.backward(loss)
model.step()
_go(args=args)
@pytest.mark.parametrize('stage', [1, 2, 3])
def test_zero_empty_grad(tmpdir, stage):
config_dict = {
"train_batch_size": 1,
"steps_per_print": 1,
"fp16": {
"enabled": True
},
"zero_optimization": {
"stage": stage
}
}
args = args_from_dict(tmpdir, config_dict)
hidden_dim = 10
model = SimpleModel(hidden_dim)
@distributed_test(world_size=[1])
def _go(args, model, hidden_dim):
optimizer = torch.optim.Adam(model.parameters())
model, _, _, _ = deepspeed.initialize(args=args,
model=model,
optimizer=optimizer)
data_loader = random_dataloader(model=model,
total_samples=50,
hidden_dim=hidden_dim,
device=model.device)
for n, batch in enumerate(data_loader):
loss = model(batch[0], batch[1])
model.backward(loss)
model.step()
_go(args=args, model=model, hidden_dim=hidden_dim)
| [
"noreply@github.com"
] | noreply@github.com |
b5962536fc3196f30074e0c239782e8e6a4f6112 | 906e90e9d7fa7970ed74358ab5f1a9284341520f | /framework/View.py | 860bec3db1db6de80d78ef54f5a6f1da12adfc59 | [] | no_license | gprolog/uniwallet | 1b02a250a34e36ca4a793b48ed14c3d04ddbc4d1 | 020bd669344ecc305d95f7d7d31fad81ca032adc | refs/heads/master | 2020-03-10T03:56:25.979909 | 2017-07-02T17:18:15 | 2017-07-02T17:18:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 326 | py | from framework.Core import Core
class View(Core):
def __init__(self, request):
super().__init__(request)
def get_html(self, html_file_name):
"""
get_html(): It reads and returns the content of some html file placed on modules/{current_module}/html
"""
return self.get_file("html/{}.html".format(html_file_name)) | [
"gresendesa@gmail.com"
] | gresendesa@gmail.com |
6a0da6b5363ec06cb44c9f1faeba31dcc126eba2 | 4d3c948e97a13f8db16a095752743ebd1f78514f | /Bipolar_Stepper_Motor_Class.py | b17e01b938d60289494f40c241f8e259046b6bcc | [] | no_license | gwydyon/Engraver | b891566888a508a0ec560a8b4ad7b5bed0548b8a | 5ce4ca1f65776fc990556d20e2cc67255af1c277 | refs/heads/master | 2021-01-23T03:20:39.421166 | 2015-01-14T14:37:24 | 2015-01-14T14:37:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,765 | py | import RPi.GPIO as GPIO
import time
#sequence for a1, b2, a2, b1
#phase_seq=[[1,1,0,0],[0,1,1,0],[0,0,1,1],[1,0,0,1]];
#full step sequence. maximum torque
phase_seq=[[1,0,0,0],[1,1,0,0],[0,1,0,0],[0,1,1,0],[0,0,1,0],[0,0,1,1],[0,0,0,1],[1,0,0,1]]
#half-step sequence. double resolution. But the torque of the stepper motor is not constant
num_phase=len(phase_seq);
class Bipolar_Stepper_Motor:
phase=0;
dirction=0;
position=0;
a1=0;#pin numbers
a2=0;
b1=0;
b2=0;
def __init__(self,a1,a2,b1,b2):
#initial a Bipolar_Stepper_Moter objects by assigning the pins
GPIO.setmode(GPIO.BOARD);
self.a1=a1;
self.a2=a2;
self.b1=b1;
self.b2=b2;
GPIO.setup(self.a1,GPIO.OUT);
GPIO.setup(self.a2,GPIO.OUT);
GPIO.setup(self.b1,GPIO.OUT);
GPIO.setup(self.b2,GPIO.OUT);
self.phase=0;
self.dirction=0;
self.position=0;
def move(self, dirction, steps, delay=0.2):
for _ in range(steps):
next_phase=(self.phase+dirction) % num_phase;
GPIO.output(self.a1,phase_seq[next_phase][0]);
GPIO.output(self.b2,phase_seq[next_phase][1]);
GPIO.output(self.a2,phase_seq[next_phase][2]);
GPIO.output(self.b1,phase_seq[next_phase][3]);
self.phase=next_phase;
self.dirction=dirction;
self.position+=dirction;
time.sleep(delay);
def unhold(self):
GPIO.output(self.a1,0);
GPIO.output(self.a2,0);
GPIO.output(self.b1,0);
GPIO.output(self.b2,0);
| [
"imantz1@lsu.edu"
] | imantz1@lsu.edu |
4f3b17b0fee81e45c0772fff4e006c947578d44e | 356021ab75d994e0f23561b027a9f6ed48218e3d | /hypothesis/nn/amortized_ratio_estimation/multi_layered_perceptron/likelihood_to_evidence.py | d202427880fc37abb4115a026e2a5d1df51aa71a | [
"BSD-3-Clause"
] | permissive | juryowl/hypothesis | e15ff63105aa4918cc5d68b9ac805999ed9b4902 | 29a2b7b4649db345d43a8d3bf98aa5d817b43f1b | refs/heads/master | 2023-06-22T16:06:42.116124 | 2021-05-06T19:21:18 | 2021-05-06T19:21:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,480 | py | import hypothesis
import hypothesis.nn
import torch
from hypothesis.nn import MultiLayeredPerceptron
from hypothesis.nn.amortized_ratio_estimation import BaseLikelihoodToEvidenceRatioEstimator
from hypothesis.nn.neuromodulation import BaseNeuromodulatedModule
from hypothesis.nn.neuromodulation import allocate_neuromodulated_activation
from hypothesis.nn.neuromodulation import list_neuromodulated_modules
from hypothesis.nn.util import compute_dimensionality
class LikelihoodToEvidenceRatioEstimatorMLP(BaseLikelihoodToEvidenceRatioEstimator):
def __init__(self,
shape_inputs,
shape_outputs,
activation=hypothesis.default.activation,
dropout=hypothesis.default.dropout,
layers=hypothesis.default.trunk):
super(LikelihoodToEvidenceRatioEstimatorMLP, self).__init__()
dimensionality = compute_dimensionality(shape_inputs) + compute_dimensionality(shape_outputs)
self.mlp = MultiLayeredPerceptron(
shape_xs=(dimensionality,),
shape_ys=(1,),
activation=activation,
dropout=dropout,
layers=layers,
transform_output=None)
def log_ratio(self, inputs, outputs):
features = torch.cat([inputs, outputs], dim=1)
return self.mlp(features)
class LikelihoodToEvidenceRatioEstimatorNeuromodulatedMLP(BaseLikelihoodToEvidenceRatioEstimator):
def __init__(self,
shape_outputs,
controller_allocator,
activation=hypothesis.default.activation,
dropout=hypothesis.default.dropout,
layers=hypothesis.default.trunk):
super(LikelihoodToEvidenceRatioEstimatorNeuromodulatedMLP, self).__init__()
# Allocate the neuromodulated activation.
neuromodulated_activation = allocate_neuromodulated_activation(
activation=activation,
allocator=controller_allocator)
# Check if the specified activation is an i
self.mlp = MultiLayeredPerceptron(
shape_xs=shape_outputs,
shape_ys=(1,),
activation=neuromodulated_activation,
dropout=dropout,
layers=layers,
transform_output=None)
# List the neuromodulated modules.
self.neuromodulated_modules = list_neuromodulated_modules(self)
def log_ratio(self, inputs, outputs):
for module in self.neuromodulated_modules:
module.update(context=inputs)
return self.mlp(outputs)
| [
"noreply@github.com"
] | noreply@github.com |
f9893af57d00443f5d09d48216b23de7f7b60a2e | 930893b9f087a00a7200a3354bd0ec974b241e94 | /app/datamanager.py | da05464b6641b1321586a3185df5299077dcd4ea | [] | no_license | radix07/m2m_obs | 13819dafddeefbc898a49aae1400fd3576d45abe | 5213d48272995ae253bd5e1c49e84a6640b7895b | refs/heads/master | 2021-09-28T19:16:23.823757 | 2018-11-19T20:59:00 | 2018-11-19T20:59:00 | 17,032,087 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,647 | py | from app import db, models
from models import User, ROLE_USER, ROLE_ADMIN
from sqlalchemy import func
import sqlalchemy
import time
#######################GET################################
##Device
def removeAllDevices():
print "Deleting All Devices"
devices = models.device.query.all()
for dev in devices:
db.session.delete(dev)
db.session.commit()
def getDeviceListFormatted():
#valueList.append([connectID,lat,longit,group,connected,globID,disconnectTime ])
query = models.device.query.all()
list =[]
for q in query:
list.append([q.dev_connectware_id,q.dp_map_lat,q.dp_map_long,"",q.dp_connection_status,q.dp_global_ip,q.dp_last_disconnect_time])
return list
def getDeviceList():
return models.device.query.all()
def getDeviceByID(id):
return models.device.query.filter_by(dev_connectware_id=id).first()
def getStreamListByDeviceID(dev_id):
print dev_id
dev_id,set = fixDevID(dev_id)
return models.latestDataStreamPoints.query.filter(models.latestDataStreamPoints.dev_id.ilike("%"+dev_id.lower()+"%")).all()
def addOrGetUser(username,password):
user = User.query.filter_by(username=username).first()
if user is None:
user = models.User(username = username, password = "")
db.session.add(user)
return user
##Stream
def getStreamListByDeviceIDAndstream_id(did,sid):
did,set = fixDevID(did)
return models.latestDataStreamPoints.query.filter_by(stream_id=sid,dev_id=did).first()
def getStreamListByDeviceID(did):
return models.latestDataStreamPoints.query.filter_by(dev_id=did)
def getStreamList():
return models.latestDataStreamPoints.query.all()
def getStreamListBystream_id(id):
return models.latestDataStreamPoints.query.filter_by(stream_id=id).all()
##DataPoints
def getDataPoint(dev_id,stream_id,timestamp,datapoint):
dev_id,set = fixDevID(dev_id)
return models.dataPointRecords.query.filter_by(dev_id=dev_id,stream_id=stream_id,timestamp=timestamp,datapoint=datapoint).first()
def getMostRecentTSDataPoint(dev_id=0,stream_id=0):
if dev_id and stream_id:
dev_id,set = fixDevID(dev_id)
try:
lastrecord = db.session.query(models.dataPointRecords).filter(models.dataPointRecords.dev_id==dev_id.strip(),models.dataPointRecords.stream_id==stream_id).order_by(models.dataPointRecords.timestamp.desc()).first()
print dev_id,stream_id,":",lastrecord.timestamp,str(time.strftime('%B %d, %Y %H:%M:%S', time.localtime((float(lastrecord.timestamp)/1000))))
return lastrecord.timestamp
except Exception, e:
print "Exception, None exist returning 0 TS, e:",e
return 0
else:
return db.session.query(func.max(models.dataPointRecords.timestamp)).all()[0][0]
def getAnyDatapoint():
return models.dataPointRecords.query.limit(1).all()
def getAllDatapoints():
return models.dataPointRecords.query.order_by(models.dataPointRecords.timestamp.desc()).all()
def getAllDatapointsFiltered(dev_id,sinceTS,stream=None):
if stream is None:
return models.dataPointRecords.query.filter(models.dataPointRecords.dev_id==dev_id,models.dataPointRecords.timestamp >= sinceTS).order_by(models.dataPointRecords.timestamp.asc()).all()
else:
return models.dataPointRecords.query.filter(models.dataPointRecords.dev_id==dev_id,models.dataPointRecords.timestamp >= sinceTS,models.dataPointRecords.stream_id==stream).order_by(models.dataPointRecords.timestamp.asc()).all()
def getDecimatedDatapointsByID(dev_id,stream_id,interval):
data = db.engine.execute("SELECT ROW_NUMBER() FROM data_point_records WHERE stream_id LIKE 'PowerInputVoltage'")
#data = db.engine.execute("SELECT id,timestamp,datapoint,stream_id, ROW_NUMBER() OVER (ORDER BY id) AS rownum FROM data_point_records WHERE stream_id LIKE 'PowerInputVoltage'")
#data = db.engine.execute("SELECT id,timestamp,datapoint,stream_id FROM (SELECT id,timestamp,datapoint,stream_id, ROW_NUMBER() OVER (ORDER BY id) AS rownum FROM data_point_records WHERE stream_id LIKE 'PowerInputVoltage') AS t WHERE t.rownum % 30 = 0 ORDER BY t.id")
for i in data:
print i
def getAllDatapointsByIDRaw(dev_id,stream_id):
return db.engine.execute("SELECT \"timestamp\",datapoint FROM data_point_records WHERE \"stream_id\" LIKE '"+stream_id+"' AND \"dev_id\" LIKE '"+dev_id+"'")
#q = "SELECT timestamp,datapoint FROM data_point_records WHERE stream_id LIKE '{}' AND dev_id LIKE '{}'".format(dev_id,stream_id)
#print q
#data = db.engine.execute(q)
#data = db.engine.execute("SELECT timestamp,datapoint FROM data_point_records WHERE stream_id LIKE 'PowerInputVoltage' AND dev_id LIKE '00000000-00000000-00042DFF-FF0418FB'")
def getAllDatapointsByID(dev_id,stream_id):
dev_id,set = fixDevID(dev_id)
return models.dataPointRecords.query.filter(models.dataPointRecords.dev_id.ilike("%"+dev_id.lower()+"%"),
models.dataPointRecords.stream_id==stream_id).all()
#return models.dataPointRecords.query.filter_by(stream_id=stream_id,dev_id=dev_id).all()
def getAllEventOccurances(count=10, dev_id=None):
#print "Get Event Occurances"
if dev_id is None:
return formatEpochTimeofList(models.dataPointRecords.query.filter( models.dataPointRecords.stream_id=="EventList").order_by(models.dataPointRecords.timestamp.desc()).limit(10))
else:
return formatEpochTimeofList(models.dataPointRecords.query.filter( models.dataPointRecords.stream_id=="EventList",models.dataPointRecords.dev_id==dev_id).order_by(models.dataPointRecords.timestamp.desc()).limit(10))
####################ADD#############################
def addNewDevice(dev_connectware_id,dp_map_lat,dp_map_long,dp_connection_status,dp_global_ip,dp_last_disconnect_time):
#query if exists, then if doesnt
recordItem = models.device(dev_connectware_id=str(dev_connectware_id),
dp_map_lat=str(dp_map_lat),dp_map_long=str(dp_map_long),
dp_connection_status=str(dp_connection_status),
dp_global_ip=str(dp_global_ip),
dp_last_disconnect_time=str(dp_last_disconnect_time))
try:
db.session.save(recordItem) #was add()
except:
db.session.add(recordItem)
print "Pre Commit Changes"
db.session.commit()
print "Commit Change"
return recordItem
def addNewStream(dev_id,stream_id,timestamp,datapoint,commit=0):
dev_id,set = fixDevID(dev_id)
recordItem = models.latestDataStreamPoints(dev_id=dev_id,stream_id=stream_id,timestamp =timestamp ,datapoint=datapoint)
try:
db.session.save(recordItem)
except:
db.session.add(recordItem)
if commit:
db.session.commit()
def fastaddDataPoints(dev_id,stream_id,pointList,commit=0):
temp = [{"dev_id":dev_id,"stream_id":stream_id,"timestamp":i[0],"datapoint":i[1]} for i in pointList]
db.engine.execute(
models.dataPointRecords.__table__.insert(),
temp
#[{"dev_id":dev_id,"stream_id":stream_id,"timestamp":i[0],"datapoint":i[1]} for i in pointList]
)
return 1
#print "SqlAlchemy Core: Total time for " + str(n) + " records " + str(time.time() - t0) + " secs"
pass
def addDataPoint(dev_id,stream_id,timestamp,datapoint,commit=0):
#Bottle neck here...
dev_id,set = fixDevID(dev_id)
recordItem = models.dataPointRecords(dev_id=dev_id, stream_id=stream_id, timestamp = timestamp, datapoint=datapoint)
try:
db.session.save(recordItem)
except:
db.session.add(recordItem)
if commit:
db.session.commit()
########################DATA MANAGE##################
def cleanOldDataForDBThreshold(limit):
#NEED TO ENSURE SOMEHOW THAT LAST DATA POINT IN STREAM IS NOT REMOVED!!!!!!!!!!!!!!
#recordCount = db.session.execute('select count(*) from data_point_records')
recordCount = db.session.query(models.dataPointRecords).count()
#print "Datapoint Record Count:",recordCount
#group by stream, delete record count per group down to limit/(stream count)
if recordCount > limit:
result = db.session.execute('DELETE FROM data_point_records WHERE id IN (select id from data_point_records ORDER BY id ASC LIMIT '+str(long(recordCount) - limit)+")")
#while recordCount > limit:
#result = db.session.execute("DELETE FROM data_point_records WHERE id IN(SELECT MIN(id) FROM data_point_records GROUP BY 'stream_id')")
#recordCount = db.session.query(models.dataPointRecords).count()
print "Rec Count:",recordCount, "\tRemoved:",str(recordCount - limit),"\tLimit:",limit
db.session.commit()
def normalizeDataStreamRecords():
query = db.session.query(models.latestDataStreamPoints)
comFlag=0
for row in query:
row.dev_id,comFlag = fixDevID(row.dev_id)
if comFlag:
print "Commit Changes"
commitDB()
def normalizeDataPointRecords():
query = db.session.query(models.dataPointRecords)
#rows = query.statement.execute().fetchall()
comFlag=0
for row in query:
row.dev_id,comFlag = fixDevID(row.dev_id)
if comFlag:
print "Commit Changes"
commitDB()
def commitDB():
db.session.commit()
##############Utility#################
def formatEpochTimeofList(list):
for st in list:
try:
if st.timestamp.isdigit():
st.timestamp = time.strftime('%B %d, %Y %H:%M:%S', time.localtime((float(st.timestamp)/1000)))
except:
st.timestamp = time.strftime('%B %d, %Y %H:%M:%S', time.localtime((float(st.timestamp)/1000)))
return list
def fixDevID(dev_id):
set=0
if "python" in dev_id: #special test case
return dev_id,set
dev_id = dev_id.upper()
if dev_id.find("-") >8:
while dev_id.find("-") >7:
#print dev_id.find("-")
dev_id= dev_id[1:len(dev_id)]
elif dev_id[8] != "-": #fix missing 0 prefix in device
#print dev_id
dev_id = "0"+dev_id
set=1
if dev_id[0] == "0" and dev_id[len(dev_id)-1].islower(): #fix lowercase device id
#print dev_id
dev_id = dev_id.upper()
set=1
return dev_id,set
#def normalizeLatestDataStreamDeviceID():
#make all items same case and length for similar
#remove inherint potential duplicates
if __name__ == '__main__':
datapoints = getAllDatapointsByID(str(dev_id),stList[int(streamIndex)].stream_id) | [
"rsatterlee@precisiongovernors.com"
] | rsatterlee@precisiongovernors.com |
e13f9f5b75ee68f118ff8dd164171ae5c45197a8 | e06d6a38f835ff105eae9e55b60b08c6fea8b6dd | /rango/forms.py | c41604b0465ab1797018a5d3747f9041202a7c42 | [] | no_license | DiegoCardoso/tango_django | 0703759f5ce9b07c905d100315cd17e304ffa307 | 6218defa412688ecbd194cc1d99e6c6d7f54155e | refs/heads/master | 2016-09-10T19:35:57.166637 | 2014-10-04T23:22:41 | 2014-10-04T23:22:41 | 24,800,147 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 773 | py | from django import forms
from rango.models import Page, Category
class CategoryForm(forms.ModelForm):
name = forms.CharField(max_length=128, help_text='Please, enter the category name.')
views = forms.IntegerField(widget=forms.HiddenInput(), initial=0)
likes = forms.IntegerField(widget=forms.HiddenInput(), initial=0)
#An inline class to provide addittional information on the form
class Meta:
model = Category
class PageForm(forms.ModelForm):
title = forms.CharField(max_length=128, help_text='Please, enter the title of the page.')
url = forms.URLField(max_length=200, help_text='Please, enter the URL of the page.')
views = forms.IntegerField(widget=forms.HiddenInput(), initial=0)
class Meta:
model = Page
fields = ('title', 'url', 'views')
| [
"diegopc86@gmail.com"
] | diegopc86@gmail.com |
b7d144399c83854e3f8c36fc9d5d76812b6472ff | 43713debb41a39c4bbe7f497d07fe83a7945cc93 | /CalcCovWeight.py | 33834d640a0c4bbe284d7f7d32354fd35a70d261 | [] | no_license | ebonnassieux/Scripts | a94d045522a9f2dcd1978ddf933d8528a284def1 | b46e86792ce9e58a02464734c109d3d253e7a954 | refs/heads/master | 2023-08-03T10:40:58.046773 | 2023-07-10T07:55:32 | 2023-07-10T07:55:32 | 81,810,071 | 3 | 0 | null | 2020-06-16T12:30:38 | 2017-02-13T09:56:15 | Jupyter Notebook | UTF-8 | Python | false | false | 8,126 | py | import os
from pyrap.tables import table
import numpy as np
import pylab
from numpy import ma
import sys
import warnings
import time
import math
import argparse
class CovWeights:
def __init__(self,MSName,ntsol=1,SaveDataProducts=0,modelms=""):
if MSName[-1]=="/":
self.MSName=MSName[0:-1]
else:
self.MSName=MSName
self.MaxCorrTime=0
self.SaveDataProducts=SaveDataProducts
self.ntSol=ntsol
self.modelms=modelms
def FindWeights(self,tcorr=0):
ms=table(self.MSName)
# open antennas
ants=table(ms.getkeyword("ANTENNA"))
# open antenna tables
antnames=ants.getcol("NAME")
nAnt=len(antnames)
# load ant indices
A0=ms.getcol("ANTENNA1")
A1=ms.getcol("ANTENNA2")
Times=ms.getcol("TIME")
nbl=np.where(Times==Times[0])[0].size
warnings.filterwarnings("ignore")
warnings.filterwarnings("default")
if modelms!="":
mdms=table(self.modelms)
modeldata=mdms.getcol("MODEL_DATA")
corrdata=ms.getcol("CORRECTED_DATA")
residualdata=modeldata-corrdata
mdms.close()
else:
try:
residualdata=ms.getcol("RESIDUAL_DATA")
except RuntimeError:
if verb: print "RESIDUAL_DATA not in ms; try building it"
try:
modeldata=ms.getcol("MODEL_DATA")
except RuntimeError:
if verb: print "MODEL_DATA not in ms; reading CORRECTED_DATA while assuming it's residuals..."
residualdata=ms.getcol("CORRECTED_DATA")
### if you do want residual data saved, uncomment below ###
#if "RESIDUAL_DATA" not in ms.colnames():
# desc=ms.getcoldesc("CORRECTED_DATA")
# desc["name"]="RESIDUAL_DATA"
# desc['comment']=desc['comment'].replace(" ","_")
# ms.addcols(desc)
# ms.putcol("RESIDUAL_DATA",residualdata)
### stop uncommenting now ###
flags=ms.getcol("FLAG")
residualdata=ms.getcol("RESIDUAL_DATA")
flags=ms.getcol("FLAG")
# apply flags to data
residualdata[flags==1]=0
# exit files gracefully
ants.close()
# initialise
nChan=residualdata.shape[1]
nPola=residualdata.shape[2]
nt=residualdata.shape[0]/nbl
# reshape antennas and data columns
residualdata=residualdata.reshape((nt,nbl,nChan,nPola))
# average residual data within calibration cells
if self.ntSol>1:
tspill=nt%self.ntSol
nt1=nt+self.ntSol-tspill
for i in range(nt1/self.ntSol):
residualdata[i*self.ntSol:(i+1)*self.ntSol,:,:,:]=np.mean(residualdata[i*self.ntSol:(i+1)*self.ntSol,:,:,:],axis=0)
A0=A0.reshape((nt,nbl))
A1=A1.reshape((nt,nbl))
ant1=np.arange(nAnt)
# make rms array
darray=ms.getcol("CORRECTED_DATA").reshape((nt,nbl,nChan,nPola))
ms.close()
rmsarray=np.zeros((nt,nbl,nChan,2),dtype=np.complex64)
residuals=np.zeros_like(rmsarray,dtype=np.complex64)
rmsarray[:,:,:,0]=darray[:,:,:,1]
rmsarray[:,:,:,1]=darray[:,:,:,2]
# make proper residual array
residuals[:,:,:,0]=darray[:,:,:,0]
residuals[:,:,:,1]=darray[:,:,:,3]
# antenna coefficient array
CoeffArray=np.zeros((nt,nAnt))
# start calculating the weights
print "Begin calculating antenna-based coefficients"
warnings.filterwarnings("ignore")
print "Find variance-only weights"
for t_i in range(nt):
# build weights for each antenna at time t_i
for ant in ant1:
# set of vis for baselines ant-ant_i
set1=np.where(A0[t_i]==ant1)[0]
# set of vis for baselines ant_i-ant
set2=np.where(A1[t_i]==ant)[0]
CoeffArray[t_i,ant] = np.sqrt(np.mean(np.append(residuals[t_i,set1,:,:],residuals[t_i,set2,:,:])*np.append(residuals[t_i,set1,:,:],residuals[t_i,set2,:,:]).conj())\
- np.std( (np.append(rmsarray[t_i,set1,:,:], rmsarray[t_i,set2,:,:]))) )
PrintProgress(t_i,nt)
warnings.filterwarnings("default")
for i in range(nAnt):
thres=0.25*np.median(CoeffArray[:,i])
CoeffArray[CoeffArray[:,i]<thres,i]=thres
coeffFilename=self.MSName+"/CoeffArray.npy"
print "Save coefficient array as %s"%coeffFilename
np.save(coeffFilename,CoeffArray)
return CoeffArray
def SaveWeights(self,CoeffArray,colname="VAR_WEIGHT",AverageOverChannels=True,tcorr=0):
print "Begin saving the data"
ms=table(self.MSName,readonly=False)
# open antennas
ants=table(ms.getkeyword("ANTENNA"))
# open antenna tables
antnames=ants.getcol("NAME")
nAnt=len(antnames)
tarray=ms.getcol("TIME")
darray=ms.getcol("DATA")
tvalues=np.array(sorted(list(set(tarray))))
nt=tvalues.shape[0]
nbl=tarray.shape[0]/nt
nchan=darray.shape[1]
A0=np.array(ms.getcol("ANTENNA1").reshape((nt,nbl)))
A1=np.array(ms.getcol("ANTENNA2").reshape((nt,nbl)))
if colname in ms.colnames():
print "%s column already present; will overwrite"%colname
else:
W=np.ones((nt*nbl,nchan))
desc=ms.getcoldesc("IMAGING_WEIGHT")
desc["name"]=colname
desc['comment']=desc['comment'].replace(" ","_")
ms.addcols(desc)
ms.putcol(colname,W)
# create weight array
w=np.zeros((nt,nbl,nchan))
ant1=np.arange(nAnt)
print "Fill weights array"
A0ind=A0[0,:]
A1ind=A1[0,:]
warnings.filterwarnings("ignore")
for i in range(nbl):
for j in range(nchan):
w[:,i,j]=1./(CoeffArray[:,A0ind[i]]*CoeffArray[:,A1ind[i]] + 0.01)
PrintProgress(i,nbl)
warnings.filterwarnings("default")
w=w.reshape(nt*nbl,nchan)
w[np.isnan(w)]=0
w[np.isinf(w)]=0
# normalise
w=w/np.mean(w)
# save in weights column
ms.putcol(colname,w)
ants.close()
ms.close()
### auxiliary functions ###
def PrintProgress(currentIter,maxIter,msg=""):
sys.stdout.flush()
if msg=="":
msg="Progress:"
sys.stdout.write("\r%s %5.1f %% "%(msg,100*(currentIter+1.)/maxIter))
if currentIter==(maxIter-1):
sys.stdout.write("\n")
def invSVD(A):
u,s,v=np.linalg.svd(A)
s[s<1.e-6*s.max()]=1.e-6*s.max()
ssq=np.abs((1./s))
# rebuild matrix
Asq=np.dot(v,np.dot(np.diag(ssq),np.conj(u)))
v0=v.T*ssq.reshape(1,ssq.size)
return Asq
def readArguments():
parser=argparse.ArgumentParser("Calculate visibility imagin weights based on calibration quality")
parser.add_argument("-v","--verbose",help="Be verbose, say everything program does. Default is False",required=False,action="store_true")
parser.add_argument("--filename",type=str,help="Name of the measurement set for which weights want to be calculated",required=True,nargs="+")
parser.add_argument("--ntsol",type=int,help="Solution interval, in timesteps, for your calibration",required=True)
parser.add_argument("--ModelDataMS",type=str,help="Solution interval, in channels, for your calibration",required=False,default="")
args=parser.parse_args()
return vars(args)
### if program is called as main ###
if __name__=="__main__":
start_time=time.time()
args = readArguments()
msname = args["filename"]
ntsol = args["ntsol"]
modelms = args["ModelDataMS"]
for ms in msname:
print "Finding time-covariance weights for: %s"%ms
covweights=CovWeights(MSName=ms,ntsol=ntsol,modelms=modelms)
coefficients=covweights.FindWeights(tcorr=0)
print "Total runtime: %f min"%((time.time()-start_time)/60.)
| [
"etienne.bonnassieux@obspm.fr"
] | etienne.bonnassieux@obspm.fr |
ccc2f49e98f08dbc682a1d406eb665062d64515c | 5528aa52b90648b7e531a19ebd0fb798891803fe | /shop/migrations/0015_auto_20200202_1135.py | eb8c0e2cb5d2c52b51801cb023623298474e78d1 | [] | no_license | UditTarini/Django-ShopperSpace | 2f7b8fdb4d2130a155c00205c49bf4360525bd5e | fa4ceb9673a833dcc341207977eb2147f0f1586e | refs/heads/master | 2022-12-11T17:20:30.756757 | 2020-12-30T14:17:19 | 2020-12-30T14:17:19 | 233,062,072 | 0 | 0 | null | 2022-12-08T06:23:02 | 2020-01-10T14:18:05 | CSS | UTF-8 | Python | false | false | 390 | py | # Generated by Django 3.0.2 on 2020-02-02 06:05
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shop', '0014_auto_20200202_1115'),
]
operations = [
migrations.AlterField(
model_name='product',
name='productName',
field=models.CharField(max_length=100),
),
]
| [
"udit.tarini937@gmail.com"
] | udit.tarini937@gmail.com |
d4ae3524bd27211a73bfde9ce0cd40e6d625abb0 | d5964fb10bdf774650776acd942d82e1cd56ded9 | /20161222_Note - defining arguments, functions, and list operations.py | 726365d13df23e368c23623d60a66c8351b57606 | [] | no_license | maxmatical/Machine-Learning | 93754ebd6677f45cafc089e3b2b631360ffe4f31 | 064d094d8f1957e54e502cccf96e2b572db5c747 | refs/heads/master | 2021-01-23T12:52:56.529621 | 2019-03-20T18:23:09 | 2019-03-20T18:23:09 | 93,205,751 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 974 | py | # defining a function
########################################
# def function(argument, argument):
# blah blah blah
# return
# pass
# note: don't have to enter type of argument
# operations on elements in a list
#######################################
list_1 = [1,2,3]
# method 1: using for loops
# for loop element i in list
def add_1 (list):
new_list = [] # create empty list
for i in list:
new_list.append(i+1) # populate the new empty list with new values
return new_list # return the new value
add_1(list_1)
# method 2: no need to append
def subtract_1(list):
new_list = [i-1 for i in list] # operation to every element in the list
return new_list
subtract_1(list_1)
# note: always define a new list/value and return that new list/value
# working with arrays
#######################################
# 1) taking log (ln) of column k of an array in a dataframe df
k = 2
np_df[:,2] = [np.log(i) for i in np_df[:,k]]
| [
"noreply@github.com"
] | noreply@github.com |
95c93e4de114c812fa4ee1372e95c482ec213dc7 | 9a580ab3f32441c431f5e4b2d2e8d33315b46ec3 | /buzzer.py | 40fe0a2ae8a51c2209a65e984163826672c81ca2 | [] | no_license | Riwanlm/RASPBERRY_WEB | d2b4280c19f1e4b9dbfbad6550da033f57f8af24 | 7731f4407e07e134960c4fb5c4132c56cc0bd405 | refs/heads/main | 2023-01-09T09:05:11.838104 | 2020-11-11T20:01:29 | 2020-11-11T20:01:29 | 312,076,178 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,692 | py | #import des utilistaires python
import RPi.GPIO as GPIO
import time
class Buzzer:
def __init__(self, numGPIO):
self.shouldCancel = False
# constructeur pour instancier notre objet Buzzer
# création d'une variable d'instance "Numéro de GPIO"
self.numGPIO = numGPIO
# On dit au raspberry qu'on utilise la broche pour "écrire" dessus en mode "sortie"
GPIO.setup(self.numGPIO, GPIO.OUT)
# méthod "on" pour allumer la Buzzer
def on(self):
print('Buzzer '+str(self.numGPIO)+' on')
# On dit à la broche d'envoyer du courant
GPIO.output(self.numGPIO, GPIO.HIGH)
# méthod "off" pour éteindre la Buzzer
def off(self):
print('Buzzer '+str(self.numGPIO)+' off')
# on dit à la broche d'arrêter d'envoyer du courant
GPIO.output(self.numGPIO, GPIO.LOW)
def cancel(self):
self.shouldCancel = True
def blink(self, numBlink, sleepTime):
i = 0
self.shouldCancel = False
while i < numBlink and not self.shouldCancel:
self.on()
time.sleep(sleepTime)
self.off()
time.sleep(sleepTime)
i += 1
def beep(self, beepTime):
self.on()
time.sleep(beepTime)
self.off()
# beep selon un code morse de type '.-'
# Bonjour = -... --- -. .--- --- ..- .-.
def morse(self, code):
pulse = 0.3
for character in code:
if character == '.':
self.beep(pulse)
elif character == '-':
self.beep(pulse * 3)
else:
time.sleep(pulse * 2)
time.sleep(pulse) | [
"riwanlm@gmail.com"
] | riwanlm@gmail.com |
1f7510b21885d46305972e2d02ebab34663ae54a | 22e2accb0543a7119754bb24df0a449c760903b1 | /corToCity.py | a549e676d5079a576cd1735c049a8b6050069c5a | [] | no_license | aalexx-S/LBTAS-2018 | f2852c2edb165e1207479f4bc6f1869050a6aba4 | b819873383592e60f5d5615a5bc75d82748ac4d0 | refs/heads/master | 2020-04-05T19:45:56.632617 | 2018-12-31T02:30:03 | 2018-12-31T02:30:03 | 157,149,367 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,110 | py | import copy
import fiona
from shapely.geometry import shape, Point
from pyproj import Proj, transform
config = None
def cor_to_city(all_queries):
# read shapefile
ad_area_shp = fiona.open(config.shppath + '/' + config.shpname + '.shp', encoding='utf-8')
shapes = {}
attr = {}
for area in ad_area_shp:
city_id = int(area['properties']['COUNTYCODE'])
shapes[city_id] = shape(area['geometry'])
attr[city_id] = area['properties']
# search location for each push and poster
re = []
for query in all_queries:
# transfer to TWD97
tmp = copy.deepcopy(query)
# find matching shape, return the first one found. Whatever.
for city, sh in shapes.items():
if sh.contains(Point(tmp['longitude'], tmp['latitude'])):
tmp['city'] = attr[city]['COUNTYNAME']
break
re.append(tmp)
return re
# transfet longitude and latitude(EPAG4326) to TWD97(EPSG3826)
def trans(lon, lat):
inp = Proj(init='epsg:4326')
oup = Proj(init='epsg:3826')
return transform(inp, oup, lon, lat)
| [
"alex530519@gmail.com"
] | alex530519@gmail.com |
deb56472c890832c3e7ee3dae8b4a62f9590c3d3 | 74863206d868c63d73ed927c5d4559fe4e2320fd | /week 5/wk 5 q 2.py | 4e92daa6b065054e24c2e2d95ebeb2cbd758f5ac | [] | no_license | Shubhanshu-Nishad/210-Coursework-Amanjit-S-Phull | e58a622b9b0bd2da3259f318944d1164c9f3fd93 | 01ed9eb426d3af180cb486503ab8bfcdf6694e90 | refs/heads/master | 2022-12-18T06:08:58.172949 | 2020-10-01T14:27:44 | 2020-10-01T14:27:44 | 300,308,089 | 1 | 0 | null | 2020-10-01T14:26:13 | 2020-10-01T14:26:12 | null | UTF-8 | Python | false | false | 1,155 | py | class Node(object):
def __init__(self, value):
self.value=value
self.next=None
self.prev=None
class List(object):
def __init__(self):
self.head=None
self.tail=None
def insert(self,n,x):
if n!=None:
x.next=n.next
n.next=x
x.prev=n
if x.next!=None:
x.next.prev=x
if self.head==None:
self.head=self.tail=x
x.prev=x.next=None
elif self.tail==n:
self.tail=x
def delete(self,n): #Remove pointers to an element
if n.prev != None:
n.prev.next = n.next
else:
self.head = n.next
if n.next != None:
n.next.prev = n.prev
else:
self.tail = n.prev
def display(self):
values=[]
n=self.head
while n!=None:
values.append(str(n.value))
n=n.next
print ("List: ",",".join(values))
if __name__ == '__main__':
l=List()
l.insert(None, Node(4))
l.insert(l.head,Node(6))
l.insert(l.head,Node(8))
l.delete(l.tail)
l.display()
| [
"noreply@github.com"
] | noreply@github.com |
3c9b1f5735b73000613123f11f7489b4966ddcdf | 95fa7024211ed10a22e392bc07256a46814bc5ff | /config/settings/production.py | 2742546718df32718f0ec25fb47f1ea7bb0b802b | [] | no_license | mark-cunningham/test | 353346a351fce27ec7992e6fcedb6af71e5c77d0 | 68d080eb60a80dc60d9706fa51b787a5c46b1780 | refs/heads/master | 2020-04-23T12:12:52.743174 | 2019-02-17T19:22:12 | 2019-02-17T19:22:12 | 171,148,661 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,493 | py | from .base import * # noqa
from .base import env
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = env('DJANGO_SECRET_KEY')
# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = env.list('DJANGO_ALLOWED_HOSTS', default=['hashtag-learning.co.uk'])
# DATABASES
# ------------------------------------------------------------------------------
DATABASES['default'] = env.db('DATABASE_URL') # noqa F405
DATABASES['default']['ATOMIC_REQUESTS'] = True # noqa F405
DATABASES['default']['CONN_MAX_AGE'] = env.int('CONN_MAX_AGE', default=60) # noqa F405
# CACHES
# ------------------------------------------------------------------------------
CACHES = {
'default': {
'BACKEND': 'django_redis.cache.RedisCache',
'LOCATION': env('REDIS_URL'),
'OPTIONS': {
'CLIENT_CLASS': 'django_redis.client.DefaultClient',
# Mimicing memcache behavior.
# http://niwinz.github.io/django-redis/latest/#_memcached_exceptions_behavior
'IGNORE_EXCEPTIONS': True,
}
}
}
# SECURITY
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-proxy-ssl-header
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-ssl-redirect
SECURE_SSL_REDIRECT = env.bool('DJANGO_SECURE_SSL_REDIRECT', default=True)
# https://docs.djangoproject.com/en/dev/ref/settings/#session-cookie-secure
SESSION_COOKIE_SECURE = True
# https://docs.djangoproject.com/en/dev/ref/settings/#csrf-cookie-secure
CSRF_COOKIE_SECURE = True
# https://docs.djangoproject.com/en/dev/topics/security/#ssl-https
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-seconds
# TODO: set this to 60 seconds first and then to 518400 once you prove the former works
SECURE_HSTS_SECONDS = 60
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-include-subdomains
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool('DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS', default=True)
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-preload
SECURE_HSTS_PRELOAD = env.bool('DJANGO_SECURE_HSTS_PRELOAD', default=True)
# https://docs.djangoproject.com/en/dev/ref/middleware/#x-content-type-options-nosniff
SECURE_CONTENT_TYPE_NOSNIFF = env.bool('DJANGO_SECURE_CONTENT_TYPE_NOSNIFF', default=True)
# STORAGES
# ------------------------------------------------------------------------------
# https://django-storages.readthedocs.io/en/latest/#installation
INSTALLED_APPS += ['storages'] # noqa F405
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_ACCESS_KEY_ID = env('DJANGO_AWS_ACCESS_KEY_ID')
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_SECRET_ACCESS_KEY = env('DJANGO_AWS_SECRET_ACCESS_KEY')
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_STORAGE_BUCKET_NAME = env('DJANGO_AWS_STORAGE_BUCKET_NAME')
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_QUERYSTRING_AUTH = False
# DO NOT change these unless you know what you're doing.
_AWS_EXPIRY = 60 * 60 * 24 * 7
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_S3_OBJECT_PARAMETERS = {
'CacheControl': f'max-age={_AWS_EXPIRY}, s-maxage={_AWS_EXPIRY}, must-revalidate',
}
# STATIC
# ------------------------
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# MEDIA
# ------------------------------------------------------------------------------
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage'
MEDIA_URL = f'https://{AWS_STORAGE_BUCKET_NAME}.s3.amazonaws.com/'
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES[0]['OPTIONS']['loaders'] = [ # noqa F405
(
'django.template.loaders.cached.Loader',
[
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
]
),
]
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#default-from-email
DEFAULT_FROM_EMAIL = env(
'DJANGO_DEFAULT_FROM_EMAIL',
default='test <noreply@hashtag-learning.co.uk>'
)
# https://docs.djangoproject.com/en/dev/ref/settings/#server-email
SERVER_EMAIL = env('DJANGO_SERVER_EMAIL', default=DEFAULT_FROM_EMAIL)
# https://docs.djangoproject.com/en/dev/ref/settings/#email-subject-prefix
EMAIL_SUBJECT_PREFIX = env('DJANGO_EMAIL_SUBJECT_PREFIX', default='[test]')
# ADMIN
# ------------------------------------------------------------------------------
# Django Admin URL regex.
ADMIN_URL = env('DJANGO_ADMIN_URL')
# Anymail (Mailgun)
# ------------------------------------------------------------------------------
# https://anymail.readthedocs.io/en/stable/installation/#installing-anymail
INSTALLED_APPS += ['anymail'] # noqa F405
EMAIL_BACKEND = 'anymail.backends.mailgun.EmailBackend'
# https://anymail.readthedocs.io/en/stable/installation/#anymail-settings-reference
ANYMAIL = {
'MAILGUN_API_KEY': env('MAILGUN_API_KEY'),
'MAILGUN_SENDER_DOMAIN': env('MAILGUN_DOMAIN')
}
# Gunicorn
# ------------------------------------------------------------------------------
INSTALLED_APPS += ['gunicorn'] # noqa F405
# WhiteNoise
# ------------------------------------------------------------------------------
# http://whitenoise.evans.io/en/latest/django.html#enable-whitenoise
MIDDLEWARE.insert(1, 'whitenoise.middleware.WhiteNoiseMiddleware') # noqa F405
# LOGGING
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See https://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s '
'%(process)d %(thread)d %(message)s'
},
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose',
},
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True
},
'django.security.DisallowedHost': {
'level': 'ERROR',
'handlers': ['console', 'mail_admins'],
'propagate': True
}
}
}
# Your stuff...
# ------------------------------------------------------------------------------
| [
"cunniman13@gmail.com"
] | cunniman13@gmail.com |
b15b87aebf2cf07b8e065a527f31b2b55377fa13 | d7ee76b7f1d6cd038982335792f15959a58a8395 | /SWEA/4615. 재미있는 오셀로 게임.py | e557fb8ef44326abc668927b3051576baa6bd26d | [] | no_license | min1378/-algorithm | 1c5dea6b2f03e4d376275cfccbf11b240bc659d9 | bfb720277160077a816deec21469a7e597c62d14 | refs/heads/master | 2021-08-02T06:54:10.478501 | 2021-07-31T14:03:01 | 2021-07-31T14:03:01 | 202,688,865 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,638 | py | #import sys
from pprint import pprint
#sys.stdin = open('4615.txt', 'r')
# 벽체크 함수
def isWall(x, y):
if x > N or x < 1 :
return True
if y > N or y < 1 :
return True
return False
# 색을 바꿔야할 돌의 위치 체크.
def enermy_check(x, y, mode, color):
check_enermy = []
dx = [0, 1, 1, 1, 0, -1, -1, -1]
dy = [-1, -1, 0, 1, 1, 1, 0, -1]
while True:
# 전달받은 mode로 한 발자국 나아간다.
test_x = x+dx[mode]
test_y = y+dy[mode]
# 벽이라면 그 전까지 체크한 위치는 무시하고 빈 리스트 []를 반환
if isWall(test_x, test_y) == True:
return []
# 같은 색을 만났다면 그동안 체크한 좌표의 리스트를 반환
if data[test_y-1][test_x-1] == color:
return check_enermy
# 0을 만났다면 비어 있는 공간이므로 빈리스트 [] 반환
if data[test_y-1][test_x-1] == 0:
return []
# 나머지 조건들은 좌표를 체크하여 check_enermy에 저장한다.
else :
check_enermy.append([test_x, test_y])
# 좌표를 체크하였다면 갱신시킨다.
x = test_x
y = test_y
# 검사하는 함수
def inspect(x, y, color):
# 8방향 모드의 반복문을 실행한다.
for mode in range(8):
# enermy_check의 리턴 값을 받아온다.
result = enermy_check(x, y, mode, color)
# 만약 빈리스트가 아니라면
if result != []:
# result에서 좌표를 꺼내 data에 색칠한다.
for check_x, check_y in result:
data[check_y-1][check_x-1] = color
TC=int(input())
for test_case in range(1, TC+1):
N, M = map(int, input().split())
data = [[0]*N for _ in range(N)]
# 흑은 1 백은 2
data[N // 2 - 1][N // 2 - 1] = 2
data[N // 2 - 1][N // 2] = 1
data[N // 2][N // 2 - 1] = 1
data[N // 2][N // 2] = 2
check = [list(map(int, input().split())) for _ in range(M)]
while True:
if check == []:
break
#check에서 앞에서 하나씩 꺼내서 돌을 놓는다.
x, y, color = check.pop(0)
data[y-1][x-1] = color
# 돌을 놓았을 때 어떻게 변화할 지 확인한다.
inspect(x, y, color)
# 반복문이 끝나면 모든 돌을 놓았다는 것이므로 흑돌과 백돌의 개수를 체크한다.
black = 0
white = 0
for line in data:
black += line.count(1)
white += line.count(2)
print("#{} {} {}".format(test_case, black, white)) | [
"qwes123@naver.com"
] | qwes123@naver.com |
fdcfdfd429431291ef3a98faf19e4dc7d4ffcdb2 | 841c0df958129bef4ec456630203992a143c7dc7 | /src/1/1297.py | 8c9a783bb90ccd8c2f495c94b1b79838d0b82fc5 | [
"MIT"
] | permissive | xCrypt0r/Baekjoon | da404d3e2385c3278a1acd33ae175c2c1eb82e5e | 7d858d557dbbde6603fe4e8af2891c2b0e1940c0 | refs/heads/master | 2022-12-25T18:36:35.344896 | 2021-11-22T20:01:41 | 2021-11-22T20:01:41 | 287,291,199 | 16 | 25 | MIT | 2022-12-13T05:03:49 | 2020-08-13T13:42:32 | C++ | UTF-8 | Python | false | false | 385 | py | """
1297. TV 크기
작성자: xCrypt0r
언어: Python 3
사용 메모리: 29,380 KB
소요 시간: 72 ms
해결 날짜: 2020년 9월 20일
"""
def main():
d, h, w = map(int, input().split())
hk = pow(d ** 2 * h ** 2 // (h ** 2 + w ** 2), 0.5)
wk = pow(d ** 2 * w ** 2 // (h ** 2 + w ** 2), 0.5)
print(f'{int(hk)} {int(wk)}')
if __name__ == '__main__':
main()
| [
"fireintheholl@naver.com"
] | fireintheholl@naver.com |
812b33798a282b1ce8b7d31e14999b7e5d629e07 | 9255068b7b45348a084555b8c413fd55a4b12013 | /odfdo/link.py | 43d15ef07e9f2067b7636723ff4a05076ec64545 | [
"Apache-2.0"
] | permissive | mat-m/odfdo | fdf9752f0273760deb59403f23dbc20eac3de753 | a4a509a056517ecf91449e029b36fe9a8ffa8ed0 | refs/heads/master | 2020-03-18T05:04:16.263647 | 2018-05-21T21:46:13 | 2018-05-21T21:54:11 | 134,322,638 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,931 | py | # Copyright 2018 Jérôme Dumonteil
# Copyright (c) 2009-2013 Ars Aperta, Itaapy, Pierlis, Talend.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Authors (odfdo project): jerome.dumonteil@gmail.com
# The odfdo project is a derivative work of the lpod-python project:
# https://github.com/lpod/lpod-python
# Authors: Hervé Cauwelier <herve@itaapy.com>
# Jerome Dumonteil <jerome.dumonteil@itaapy.com>
from .element import Element, register_element_class
from .paragraph_base import ParagraphBase
class Link(ParagraphBase):
"""Link class, <text:a> odf_element.
"""
_tag = 'text:a'
_properties = (('url', 'xlink:href'), ('name',
'office:name'), ('title',
'office:title'),
('target_frame',
'office:target-frame-name'), ('show', 'xlink:show'),
('visited_style',
'text:visited-style-name'), ('style', 'text:style-name'))
def __init__(self,
url='',
name=None,
title=None,
text=None,
target_frame=None,
style=None,
visited_style=None,
**kwargs):
"""
Arguments:
url -- str
name -- str
title -- str
text -- str
target_frame -- '_self', '_blank', '_parent', '_top'
style -- string
visited_style -- string
return: Link
"""
super().__init__(**kwargs)
if self._do_init:
self.url = url
if name is not None:
self.name = name
if title is not None:
self.title = title
if text is not None:
self.text = text
if target_frame is not None:
self.target_frame = target_frame
# show can be: 'new' or 'replace'"
if target_frame == '_blank':
self.show = 'new'
else:
self.show = 'replace'
if style is not None:
self.style = style
if visited_style is not None:
self.visited_style = visited_style
Link._define_attribut_property()
register_element_class(Link)
| [
"jerome.dumonteil@gmail.com"
] | jerome.dumonteil@gmail.com |
1c7ed19f2aaacdb47b9e5eefd21dd227de5cb2ed | d024ccbb4cc04af3866a4db1ac1d8c1d7395d909 | /boj/3040.py | 7340db9c6af2e68f61e4fb313c8b4a7a0a8b412e | [] | no_license | demetoir/ps-solved-code | ff0418dddd10f3b053c9b8d32af48027b10c8481 | f4d4fd2183176b083f2287c9d89c6d5a1e983cc5 | refs/heads/master | 2022-10-14T20:11:34.581439 | 2020-06-12T11:24:11 | 2020-06-12T11:24:11 | 68,782,768 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 169 | py | import itertools
l=[]
for i in range(9):l+=[input()]
for s in itertools.combinations(range(9),7):
if sum(l[i] for i in s)==100:
print "\n".join(str(l[i]) for i in s) | [
"wnsqlehlswk@gmail.com"
] | wnsqlehlswk@gmail.com |
0d4374fc859560faca1bf38f60793e519cb4ea39 | e9173667eec2576782863a51ee63672f9b419297 | /k2.py | b0c3e665770160fd0f0a9f0e424c8e55fafe7c96 | [] | no_license | sabareesh123/pythonprogamming | d41c23ddae183ded09eafde445273126c6b56fcf | 004f248aa2e25f2855d6ccafbb9244447bfb5873 | refs/heads/master | 2020-05-30T06:28:54.901030 | 2019-08-06T11:50:10 | 2019-08-06T11:50:10 | 189,580,451 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 66 | py | e=int(input(""))
if(e%2==0):
print("Even")
else:
print("Odd")
| [
"noreply@github.com"
] | noreply@github.com |
50df324554bfc102953f18d1e3fac8b9dbff04d3 | 93bcd08541193bdbfdd2921d2462f8485db91753 | /src/services/map.py | cd9d454a637dbe07521ead7e0b7c19c72ea858c2 | [
"MIT"
] | permissive | fpaupier/samu_social | 258b8db00a743538da664203b00b4f08ba44d9eb | aa20763c632743ec25d8b6802c62dd5ce121d89f | refs/heads/master | 2020-04-23T04:42:08.513157 | 2019-09-29T17:56:00 | 2019-09-29T17:56:00 | 170,916,125 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,373 | py | import math
import requests
class Map(object):
def __init__(self):
self.url = 'https://api-adresse.data.gouv.fr/search/'
self.radius = 6371 # km
def distance(self, departure, arrival):
try:
departure_latitude, departure_longitude = float(departure['latitude']), float(departure['longitude'])
arrival_latitude, arrival_longitude = float(arrival['latitude']), float(arrival['longitude'])
except ValueError:
return None
latitude_distance = math.radians(arrival_latitude - departure_latitude)
longitude_distance = math.radians(arrival_longitude - departure_longitude)
a = (math.sin(latitude_distance / 2) * math.sin(latitude_distance / 2) +
math.cos(math.radians(arrival_longitude)) * math.cos(math.radians(arrival_latitude)) *
math.sin(longitude_distance / 2) * math.sin(longitude_distance / 2))
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
d = self.radius * c
return d
def point(self, location):
# TODO: fallback to the name of the hotel and other complementary operations if we don't have the
# address
if not location.get('address'):
return None
geographic_information = self.get(location)
geographic_information_features = geographic_information['features']
if not geographic_information_features:
return None
best_score_geophic_information = sorted(
geographic_information_features,
key=lambda k: k['properties']['score']
)[0]
latitude, longitude = best_score_geophic_information['geometry']['coordinates']
return {'latitude': latitude, 'longitude': longitude}
def get(self, parameters):
payload = {'q': parameters.get('address'), 'postcode': parameters.get('postcode')}
request = requests.get(self.url, params=payload)
request.raise_for_status()
response = request.json()
return response
if __name__ == '__main__':
map = Map()
address1 = {'address': 'Avenue Winston Churchill', 'postcode': 27000}
address2 = {'address': 'Rue Gay Lussac', 'postcode': 60000}
point1 = map.point(address1)
point2 = map.point(address2)
distance = map.distance(point1, point2)
print('{} kms'.format(distance))
| [
"brisemeric@gmail.com"
] | brisemeric@gmail.com |
ce1521069237c2ec7956f0d3bb95031f704516fd | 5e7eaac8b6488370cafa86622a4b07aa30aa8ca2 | /maze.py | 01d5f71c6c9804645fd4187b3a8e9b7eca4de3bb | [] | no_license | VioletFlamingo/MazeGenerator | 3ced2b7e04eea25b4013562f477657ddaeaf3d4e | 39c447b969932ad9f602b696b35616c139af3bd7 | refs/heads/master | 2021-01-10T15:19:43.563343 | 2016-04-08T21:10:50 | 2016-04-08T21:10:50 | 55,809,790 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 231 | py | class Maze:
def __init__(self, width, height, start, finish, paths=None, difficulty=None):
self.width = width
self.height = height
self.start = start
self.finish = finish
self.paths = paths
self.difficulty = difficulty | [
"paulina.winkowska@gmail.com"
] | paulina.winkowska@gmail.com |
438a534b66b835b18edc0a542fc5499bae377670 | ca23b411c8a046e98f64b81f6cba9e47783d2584 | /cache_replacement/policy_learning/cache/main.py | 5cbddf2a4c41057f1d91d6f6838f52f0665a237d | [
"CC-BY-4.0",
"Apache-2.0"
] | permissive | pdybczak/google-research | 1fb370a6aa4820a42a5d417a1915687a00613f9c | 0714e9a5a3934d922c0b9dd017943a8e511eb5bc | refs/heads/master | 2023-03-05T23:16:11.246574 | 2021-01-04T11:30:28 | 2021-01-04T11:30:28 | 326,629,357 | 1 | 0 | Apache-2.0 | 2021-02-01T12:39:09 | 2021-01-04T09:17:36 | Jupyter Notebook | UTF-8 | Python | false | false | 5,923 | py | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint: disable=line-too-long
r"""Runs cache simulation.
Example usage:
python3 -m cache_replacement.policy_learning.cache.main \
--experiment_base_dir=/tmp \
--experiment_name=sample_belady_llc \
--cache_configs=cache_replacement/policy_learning/cache/configs/default.json \
--cache_configs=cache_replacement/policy_learning/cache/configs/eviction_policy/belady.json \
--memtrace_file=cache_replacement/policy_learning/cache/traces/sample_trace.csv
Simulates a cache configured by the cache configs with Belady's as the
replacement policy on the sample trace.
"""
# pylint: enable=line-too-long
import os
from absl import app
from absl import flags
from absl import logging
import tensorflow.compat.v1 as tf
import tqdm
from cache_replacement.policy_learning.cache import cache as cache_mod
from cache_replacement.policy_learning.cache import evict_trace as evict
from cache_replacement.policy_learning.cache import memtrace
from cache_replacement.policy_learning.common import config as cfg
from cache_replacement.policy_learning.common import utils
FLAGS = flags.FLAGS
flags.DEFINE_multi_string(
"cache_configs",
[
"cache_replacement/policy_learning/cache/configs/default.json", # pylint: disable=line-too-long
"cache_replacement/policy_learning/cache/configs/eviction_policy/lru.json" # pylint: disable=line-too-long
],
"List of config paths merged front to back for the cache.")
flags.DEFINE_multi_string(
"config_bindings", [],
("override config with key=value pairs "
"(e.g., eviction_policy.policy_type=greedy)"))
flags.DEFINE_string(
"experiment_base_dir", "/tmp/experiments",
"Base directory to store all experiments in. Should not frequently change.")
flags.DEFINE_string(
"experiment_name", "unnamed",
"All data related to this experiment is written to"
" experiment_base_dir/experiment_name.")
flags.DEFINE_string(
"memtrace_file",
"cache_replacement/policy_learning/cache/traces/omnetpp_train.csv",
"Memory trace file path to use.")
flags.DEFINE_integer(
"tb_freq", 10000, "Number of cache reads between tensorboard logs.")
flags.DEFINE_integer(
"warmup_period", int(2e3), "Number of cache reads before recording stats.")
flags.DEFINE_bool(
"force_overwrite", False,
("If true, overwrites directory at "
" experiment_base_dir/experiment_name if it exists."))
def log_scalar(tb_writer, key, value, step):
summary = tf.Summary(value=[tf.Summary.Value(tag=key, simple_value=value)])
tb_writer.add_summary(summary, step)
def main(_):
# Set up experiment directory
exp_dir = os.path.join(FLAGS.experiment_base_dir, FLAGS.experiment_name)
utils.create_experiment_directory(exp_dir, FLAGS.force_overwrite)
tensorboard_dir = os.path.join(exp_dir, "tensorboard")
tf.disable_eager_execution()
tb_writer = tf.summary.FileWriter(tensorboard_dir)
miss_trace_path = os.path.join(exp_dir, "misses.csv")
evict_trace_path = os.path.join(exp_dir, "evictions.txt")
cache_config = cfg.Config.from_files_and_bindings(
FLAGS.cache_configs, FLAGS.config_bindings)
with open(os.path.join(exp_dir, "cache_config.json"), "w") as f:
cache_config.to_file(f)
flags_config = cfg.Config({
"memtrace_file": FLAGS.memtrace_file,
"tb_freq": FLAGS.tb_freq,
"warmup_period": FLAGS.warmup_period,
})
with open(os.path.join(exp_dir, "flags.json"), "w") as f:
flags_config.to_file(f)
logging.info("Config: %s", str(cache_config))
logging.info("Flags: %s", str(flags_config))
cache_line_size = cache_config.get("cache_line_size")
with memtrace.MemoryTrace(
FLAGS.memtrace_file, cache_line_size=cache_line_size) as trace:
with memtrace.MemoryTraceWriter(miss_trace_path) as write_trace:
with evict.EvictionTrace(evict_trace_path, False) as evict_trace:
def write_to_eviction_trace(cache_access, eviction_decision):
evict_trace.write(
evict.EvictionEntry(cache_access, eviction_decision))
cache = cache_mod.Cache.from_config(cache_config, trace=trace)
# Warm up cache
for _ in tqdm.tqdm(range(FLAGS.warmup_period), desc="Warming up cache"):
pc, address = trace.next()
hit = cache.read(pc, address, [write_to_eviction_trace])
if not hit:
write_trace.write(pc, address)
if trace.done():
raise ValueError()
# Discard warm-up cache statistics
cache.hit_rate_statistic.reset()
num_reads = 0
with tqdm.tqdm(desc="Simulating cache on MemoryTrace") as pbar:
while not trace.done():
pc, address = trace.next()
hit = cache.read(pc, address, [write_to_eviction_trace])
if not hit:
write_trace.write(pc, address)
num_reads += 1
if num_reads % FLAGS.tb_freq == 0:
log_scalar(tb_writer, "cache_hit_rate",
cache.hit_rate_statistic.success_rate(), num_reads)
pbar.update(1)
log_scalar(tb_writer, "cache_hit_rate",
cache.hit_rate_statistic.success_rate(), num_reads)
# Force flush, otherwise last writes will be lost.
tb_writer.flush()
if __name__ == "__main__":
app.run(main)
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
539e3168ff7838dd2857165753faf10c932bc548 | 21f0ad238f9380ce5346570aaa6deacd5219d92e | /code.py | aceb238a4bf31cc0ff9db51db34296f744a85f6e | [] | no_license | Mahesh2877/Nth-Element-of-LinkedList | 1f53ed3709eac80ca5069ee30e433b601d403099 | 3e56c7e3282cc3e79b621f95973b36190788246d | refs/heads/main | 2023-01-25T03:14:25.935662 | 2020-12-05T14:01:02 | 2020-12-05T14:01:02 | 318,800,515 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,182 | py | # Use this class to create linked lists.
class Node:
def __init__(self, value, child=None):
self.value = value
self.child = child
# The string representation of this node.
# Will be used for testing.
def __str__(self):
return str(self.value)
# Implement your function below.
def nth_from_last_two_pointers(head, n):
left = head
right = head
for i in range(n):
if right is None:
return None
right = right.child
while right is not None:
left = left.child
right = right.child
return left
# The following function converts the given linked list into an easy-to-read string format.
def linked_list_to_string(head):
current = head
str_list = []
while current:
str_list.append(str(current.value))
current = current.child
str_list.append('(None)')
return ' -> '.join(str_list)
# NOTE: The following input values will be used for testing your solution.
current = Node(1)
for i in range(2, 8):
current = Node(i, current)
head = current
# head = 7 -> 6 -> 5 -> 4 -> 3 -> 2 -> 1 -> (None)
current2 = Node(4)
for i in range(3, 0, -1):
current2 = Node(i, current2)
head2 = current2
# head2 = 1 -> 2 -> 3 -> 4 -> (None)
print("Test case 1:-")
print(linked_list_to_string(head))
print("Expected result is 1")
print(nth_from_last_two_pointers(head, 1))# should return 1.
print("Test case 2:-")
print(linked_list_to_string(head))
print("Expected result is 5")
print(nth_from_last_two_pointers(head, 5))# should return 5.
print("Test case 3:-")
print(linked_list_to_string(head2))
print("Expected result is 3")
print(nth_from_last_two_pointers(head2, 2))# should return 3.
print("Test case 4:-")
print(linked_list_to_string(head2))
print("Expected result is 1")
print(nth_from_last_two_pointers(head2, 4))# should return 1.
print("Test case 5:-")
print(linked_list_to_string(head2))
print("Expected result is None")
print(nth_from_last_two_pointers(head2, 5))# should return None.
print("Test case 6:-")
print(linked_list_to_string(None))
print("Expected result is None")
print(nth_from_last_two_pointers(None, 1))# should return None.
| [
"noreply@github.com"
] | noreply@github.com |
092653579244e4f4c095d89145e7b1090c29b97a | 8ecd899a8558ad0a644ecefa28faf93e0710f6fb | /other_practices/JOI2009_ho2.py | bf6da9af81b0852342546b9a6414ba07ece8d743 | [] | no_license | yut-inoue/AtCoder_ABC | b93885547049788d452e86b442a4a9f5ee191b0e | 3d2c4b2b2f8871c75f86040ad07ccd7736ad3dbe | refs/heads/master | 2021-07-03T09:09:20.478613 | 2021-02-21T13:20:31 | 2021-02-21T13:20:31 | 227,140,718 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 298 | py | import bisect
d = int(input())
n = int(input())
m = int(input())
dl = [int(input()) for _ in range(n-1)]
ml = [int(input()) for _ in range(m)]
dl.append(0)
dl.append(d)
dl.sort()
dis = 0
for m in ml:
ind = bisect.bisect_left(dl, m)
dis += min(abs(dl[ind]-m), abs(dl[ind-1]-m))
print(dis)
| [
"yinoue.1996787@gmail.com"
] | yinoue.1996787@gmail.com |
d64da998023218aa16722fe26d0d30d0d3edb18b | c5f9cb6619d4e98ecd8e7d952439adcce41c23f5 | /week 10/Project1/Proj1/submission/urls.py | 25ade1db478212781418c4a4dcf52fa008f0001e | [] | no_license | Zhanarik/webdev2019 | 8b94ce365bff63e5bad9bfb0c8eb8a51ecaaa0cc | f94f06a782204a532f1a2520beb381b4eabb65bd | refs/heads/master | 2022-01-09T22:52:40.738650 | 2019-05-14T05:32:39 | 2019-05-14T05:32:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 127 | py | from django.urls import path, include
from submission import views
urlpatterns = [
path('submission/', views.submission)
] | [
"zhanar991025@gmail.com"
] | zhanar991025@gmail.com |
c470796fe293d6d5817ad4d4cffa65f393d9db24 | b3bbce6d774e3b6d86b5bc18ab6ffaad90fe4e51 | /drafthub/asgi.py | 25c0bfb7de290180043c49697af3919792dd4f55 | [
"MIT"
] | permissive | felipelincoln/drafthub | 20227ae309b0c4ad1abbeeb8aa2fdf9c4fbcf469 | e8a6055205646d45c26419938c3277f544931925 | refs/heads/master | 2023-02-02T21:20:06.243615 | 2020-12-11T11:49:46 | 2020-12-11T11:49:46 | 251,270,142 | 2 | 0 | MIT | 2020-06-06T09:05:42 | 2020-03-30T10:18:57 | Python | UTF-8 | Python | false | false | 393 | py | """
ASGI config for drafthub project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'drafthub.settings')
application = get_asgi_application()
| [
"felipelincoln@usp.br"
] | felipelincoln@usp.br |
976eab4c20ccc6d97267a0e261e856efb42bac17 | 9a393d5dae8147088b1c9b78987197c60a6618cf | /0828/모의2.py | 5afbfdf130cc6d42c69e1a772ee5ab0f6d43cf74 | [] | no_license | bumbum9944/bumpycharm | 5444440379f6d5142130bc8a7a4b69276f23f991 | b487eb433d41ff0d2f6e1ca4f723225b114b96c0 | refs/heads/master | 2020-07-05T16:04:35.153231 | 2019-10-02T00:14:00 | 2019-10-02T00:14:00 | 202,693,662 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 785 | py | def shuffle(cards, card_u, card_d, N):
X = list(range(N))
global cnt
if cards != card_u and cards != card_d:
if cnt > 5:
return -1
else:
cnt += 1
for x in X:
if x > N // 2:
x = x - N // 2
for change in range(N // 2 - 1 - x, N // 2 + x, 2):
cards[change], cards[change + 1] = cards[change + 1], cards[change]
return shuffle(cards, card_u, card_d, N)
else:
return cnt
T = int(input())
for tc in range(1, T+1):
cnt = 0
N = int(input())
cards = list(map(int, input().split()))
card_u = sorted(cards)
card_d = card_u[::-1]
ans = shuffle(cards, card_u, card_d, N)
print('#{} {}'.format(tc, ans))
| [
"tong940526@gmail.com"
] | tong940526@gmail.com |
9aad506a33f74e07c9c4c1365fcc30229bae5b27 | 7923e0f415938efbe16482d53a4835a9e79e8741 | /MTRelation/models/model0.py | 97dd8e1b63696ba5fb9770b79e85eda94b90df8c | [] | no_license | AdamMeyers/Web-of-Law | 93b0b8d05955d12386ec5af24dc8bb02081bea26 | 49f7ae18dbe9fbfaa350dff8617ab32190c880eb | refs/heads/master | 2020-09-11T23:51:41.353592 | 2019-07-17T14:04:14 | 2019-07-17T14:04:14 | 94,456,676 | 9 | 1 | null | null | null | null | UTF-8 | Python | false | false | 705 | py | from utils import *
class Model0(nn.Module):
def __init__(self, params, embeddings):
super(Model0, self).__init__()
self.l_emb = embeddings['src']
self.l_birnn = GRUBiRNNWord(params['dim_emb'], params['dim_hid'], params['num_rnn_layer'])
self.l_relational = RelationalNet(params['dim_hid'], params['dropout'])
self.l_out = nn.Sequential(
torch.nn.Linear(params['dim_hid'], params['n_classes']),
torch.nn.LogSoftmax(1)
)
def forward(self, x):
x, seq_lens = self.l_emb(x['src'])
x = self.l_birnn(x, seq_lens)
x = self.l_relational(x, seq_lens, x, seq_lens)
x = self.l_out(x)
return x
| [
"wl1191@nyu.edu"
] | wl1191@nyu.edu |
e46dc93d73ecf40cad70b8beb65f250556b4fc5c | eab0f708b8abd1f1bfc12496344c516bbfe2581d | /fxtec.xyz/fxtecapp/migrations/0006_robot_timecheck.py | 8b4f1c2288496b90682678b7cad4c1e85686b450 | [] | no_license | minhdang741/FxtecWeb | 91dfd4db9b38d6d6e2083f157d56733fe42ef9bc | 821d6efadc85137080a0952f4c2cef011afbfe8c | refs/heads/master | 2023-04-21T09:33:49.976436 | 2021-05-29T11:45:52 | 2021-05-29T11:45:52 | 358,831,961 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 422 | py | # Generated by Django 2.2.12 on 2021-04-17 07:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('fxtecapp', '0005_robot_eqtcheck'),
]
operations = [
migrations.AddField(
model_name='robot',
name='timecheck',
field=models.IntegerField(default=True),
preserve_default=False,
),
]
| [
"minhdang741@github.com"
] | minhdang741@github.com |
ea7db6646783c4f5b7190aa6fb3fa228a8266c5b | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03061/s160632113.py | 8ebe292df9e3f2e8d6f104cfca93a5b226a41bb0 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 402 | py | from fractions import gcd
N = int(input())
A = list(map(int, input().split()))
L = [-1] * (N-1)
L[0] = A[0]
R = [-1] * (N-1)
R[0] = A[-1]
for i in range(1, N-1):
L[i] = gcd(L[i-1], A[i])
for i in range(1, N-1):
R[i] = gcd(R[i-1], A[-i-1])
ans = 0
for i in range(1, N-1):
tmp = gcd(L[i-1], R[N-i-2])
ans = max(ans, tmp)
ans = max(ans, L[N-2])
ans = max(ans, R[N-2])
print(ans)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
b81834a4e5b70d6c47b6e437dbeeb5406935421f | 320e216f7713c15556874cbb19308c5343f05681 | /project/data/co_ocurrence_tweets.py | b9f7d126b689927daa69abd654ef69eb87693968 | [] | no_license | anamestre/MIIS-SocialAnalytics | d3125af36afaf5d4cdb39b0244f5dbd8139870ab | 8cf26a966a5f0ba22fa1606359dac261bc926322 | refs/heads/master | 2022-05-26T05:10:28.468927 | 2020-04-23T16:56:32 | 2020-04-23T16:56:32 | 235,383,608 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,123 | py | import json, csv, re
tweets = {}
i = 0
with open('tweets.json', encoding="utf8") as f:
data = json.load(f)
for tweet in data["tweets"]:
i += 1
id_tweet = tweet['id']
text = tweet["text"]
usernames = re.findall("@[a-zA-Z0-9_]{0,15}", text) # Look up for twitter usernames
for p1 in range(len(usernames)):
for p2 in range(p1 + 1, len(usernames)):
user1 = usernames[p1].lower()
user2 = usernames[p2].lower()
pair = (user1, user2)
pair2 = (user2, user1)
if pair in tweets:
tweets[pair] += 1
elif pair2 in tweets:
tweets[pair2] += 1
else:
tweets[pair] = 1
print(i)
"""
numRTs = 10
with open('tweets_usernames10.csv', mode = 'w', encoding="utf8") as f:
writer = csv.writer(f, delimiter=';', quotechar='"')
writer.writerow(["Source", "Target", "Weight"])
for (x, y), val in tweets.items():
if val >= numRTs:
writer.writerow([x, y, val])
""" | [
"anamestree@gmail.com"
] | anamestree@gmail.com |
25c4a482b9654eb9753046ccadb427ef2aa7c38d | aa521dd3bb0e9d0b58ce8aa7f452dafefa006ebe | /app/middleware/db.py | ee942347066d6448e71af6e8103f9ebd3bbbbffb | [
"MIT"
] | permissive | Jonatha-Varjao/fast-api-nosql-template | 77ecf9de48027c6a348069e9e38dc026dd12de45 | 67987a91a87fdbd58c46b0be8fc95b5bf4c8ed97 | refs/heads/master | 2020-12-09T12:38:03.373925 | 2020-01-20T13:45:23 | 2020-01-20T13:45:23 | 233,305,656 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 414 | py | from app.db.mongodb import db
from starlette.middleware.base import BaseHTTPMiddleware
from starlette.requests import Request
class DBConnection(BaseHTTPMiddleware):
async def dispatch(self, request, call_next):
request.state.db = db.client
response = await call_next(request)
request.state.db.close()
return response
def get_db(request: Request):
return request.state.db
| [
"jonathavarjao@gmail.com"
] | jonathavarjao@gmail.com |
801ea0d4ddf339896969cca9b7abbdd7ab33f820 | 6f63dd99a1ce80fa06b9f5b02e694a96b9d45a2d | /Semester_1/Lesson_04/Lesson_04_ex_01.py | 62aa67cacdc3def01ebb08bfca389ed356f503ec | [] | no_license | mayakota/KOTA_MAYA | b1d4f6f45694fe6680de2fb46dd6a38f16b892f5 | f2cfc4c80b5e73e2e6a5c409d9daaf659c42bf9d | refs/heads/master | 2020-09-21T17:01:15.435826 | 2017-05-12T17:24:13 | 2017-05-12T17:24:13 | 67,815,386 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 696 | py | def format(item, price):
print("{:<12}\t{:>15f}".format(item, price))
item1 = input("Please enter item 1:")
price1 = float(input("Please enter the price:"))
item2 = input("Please enter item 2:")
price2 = float(input("Please enter the price:"))
item3 = input("Please enter item 3:")
price3 = float(input("Please enter the price:"))
subtotal = price1 + price2 + price3
tax = .07 * subtotal
total = subtotal + tax
print("<<<<<<<<<<<__Receipt__>>>>>>>>>>")
format(item1, price1)
format(item2, price2)
format(item3, price3)
format("Subtotal: ", subtotal)
format("Tax: ", tax )
format("Total: ", total)
print("___________________________________")
print("Thank you for your support")
| [
"Richard.Robinette@sduhsd.lan"
] | Richard.Robinette@sduhsd.lan |
cfa7164dda49a23369a3eed3c7bfa7e894f5ac5a | a4b66d32405249ef12267d0d0dee6a04edefc913 | /dobro.py | 75b052ff8c3c29a8b33a7628ccf2fd3e801e5565 | [] | no_license | sergiofagundesb/PythonRandomStuff | 0abd8ed1f79a09708fcb672f7f34be25f7bec411 | c5445509a27cedd3cfb6103739af6b5e679df770 | refs/heads/main | 2023-01-29T16:52:59.816812 | 2020-12-09T11:27:16 | 2020-12-09T11:27:16 | 319,809,257 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 422 | py | n=float(input('Digite um número'))
print('O número {} tem seu dobro {}, triplo {}, e raiz quadrada {}'.format(n,(2*n),(3*n),(n**(1/2))))
print('='*20)
n1=float(input('Digite o primeiro número'))
n2=float(input('Digite o segundo número'))
media=(n1+n2)/2
if media >= 5:
print('Aprovado')
if media < 5:
print('Reprovadíssimo')
print('A média aritimética entre {:.1f} e {:.1f} é {:.1f}'.format(n1,n2,media))
| [
"sergio.fagundesbarros@gmail.com"
] | sergio.fagundesbarros@gmail.com |
f692cc6732f887e77bd04e996b84fec82f5ff7a6 | 0d0da8a94ce91f236fbe63a2d613ce246bb57c05 | /khdict/urls.py | 65c82cae3d1f35cccd058e0aa64dee718034cde5 | [] | no_license | buganini/khdict-web | 45c4cb8aa47f755ccae2a37403d40f383e9f951c | 61f2587619c2394af92645f3cafa7a7a0f0c2274 | refs/heads/master | 2021-01-18T23:02:15.079360 | 2016-07-08T09:11:57 | 2016-07-08T09:11:57 | 23,557,499 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 334 | py | from django.conf.urls import patterns, include, url
from django.views.generic.base import RedirectView
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('',
url(r'^$', 'khdict.views.index'),
url(r'^q/(?P<key>.*)?$', 'khdict.views.query'),
)
| [
"buganini@gmail.com"
] | buganini@gmail.com |
92a05238afd3189143bdf1d508e8b2205b46dabe | 917c0949dd410439e7f882e20a3fb744b7b4bd6e | /Pandas/obesity.py | bf7e8dceb6a5561f1b97151d830ba938469e350c | [
"MIT"
] | permissive | daveaseeman/PyEng | 229d01df85c2959b4333d5bd19ba15029b11ee38 | 31403a7f0e557456eeaad865295213cf27847bf9 | refs/heads/master | 2020-12-28T19:11:49.210811 | 2017-05-15T23:13:42 | 2017-05-15T23:13:42 | 43,885,548 | 0 | 0 | null | 2015-10-08T12:03:50 | 2015-10-08T12:03:49 | null | UTF-8 | Python | false | false | 1,221 | py | import pandas as pd
import matplotlib.pyplot as plt
data = pd.ExcelFile("Obes-phys-acti-diet-eng-2014-tab.xls")
print data.sheet_names
# Read section 7.1 from the Excel file
# Define the columns to be read
columns1 = ['year', 'total', 'males', 'females']
data_gender = data.parse(u'7.1', skiprows=4, skipfooter=14, names=columns1)
#print data_gender
# Remove the N/A from the data
data_gender.dropna(inplace = True)
#print data_gender
data_gender.set_index('year', inplace=True)
# Plot all
data_gender.plot()
plt.show()
# Read 2nd section, by age
data_age = data.parse(u'7.2', skiprows=4, skipfooter=14)
print data_age
# Rename unames to year
data_age.rename(columns={u'Unnamed: 0': u'Year'}, inplace=True)
# Drop empties and reset index
data_age.dropna(inplace=True)
data_age.set_index('Year', inplace=True)
#plot
data_age.plot()
plt.show()
# Plotting everything cause total to override everything. So drop it.
# Drop the total column and plot
data_age_minus_total = data_age.drop('Total', axis = 1)
data_age_minus_total.plot()
plt.show()
plt.close()
#Plot children vs adults
data_age['Under 16'].plot(label = "Under 16")
data_age['25-34'].plot(label = "25-34")
plt.legend(loc="upper right")
plt.show()
| [
"a@a.com"
] | a@a.com |
1219af206b732c884b33f773fac0863e4183221e | 98876f5afc4839d9355da82359a5fd0a50829bd2 | /Curso de Python/Mundo 1/Aula 07/Exercicios/ex015.py | 6e28fbd09a48dba0627f4317129348dc8d515aed | [
"MIT"
] | permissive | tarcisioribeiro-zz/Python | bc1e15e0af45262a65f80aa2d7d2ea6a0669ec8f | 4d89b1e392f7c7fd743d6e5bcab1d387cdba398d | refs/heads/master | 2023-03-21T09:15:59.831050 | 2021-03-04T23:37:37 | 2021-03-04T23:37:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 371 | py | # Corrigido
print('Exercício 015')
print()
# Bloco de entrada
dias = int(input('Informe por quantos dias você alugou o carro: '))
dist = float(input('Informe a distância em Km percorridos no carro: '))
print()
# Bloco de cálculo
valor = (dias * 60) + (dist * 0.15)
# Bloco de saída
print('O valor pago pelo aluguel do carro é de R$ {}.'.format(valor))
print()
| [
"tarcisio.ribeiro.1840@hotmail.com"
] | tarcisio.ribeiro.1840@hotmail.com |
9ef04a08bc10dea64e0d9e928d37a877bfa39cc1 | 603ed82854e5b67af76d9bbdf4d2183419c6167c | /pages/views.py | 05b88e646d6daa37ff170c9d948d5fc2c442c219 | [] | no_license | IbrahimAlAzhar/Basic-CRUD | 26a209433fefb3da38d742602e54abeff83daa8d | 2e9d68537270fc72b44757b39eea845d78602902 | refs/heads/master | 2022-12-14T11:34:20.784724 | 2020-09-05T21:18:51 | 2020-09-05T21:18:51 | 293,155,575 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,068 | py | from django.shortcuts import render
# Create your views here.
from django.shortcuts import render
from django.http import HttpResponse
def home_view(request,*args, **kwargs):
print(args, kwargs)
print(request)
print(request.user)
# return HttpResponse("<h1>Hello world</h1>")
return render(request,"products/home.html", {})
def contact_view(request,*args, **kwargs):
# return HttpResponse("<h1>Contact page</h1>")
return render(request, "products/contact.html", {})
def about_view(request,*args, **kwargs):
print(request.user)
# return HttpResponse("<h1>Hello from the other side</h1>")
my_context = {
"my_text": "this is about us",
"my_name": "ibrahim al azhar",
"my_number": 123,
"my_list": [12,23,23,44,"abc","azhar"],
"my_html": "<h1>This one is html tag</h1>"
}
return render(request, "products/about.html", my_context)
def social_view(request,*args, **kwargs):
# return HttpResponse("<h1>Social page</h1>")
return render(request, "products/social.html", {}) | [
"ibrahimalazhar264@gmail.com"
] | ibrahimalazhar264@gmail.com |
011d718f746b1d5d536a562ff729e525b1912cf1 | 55192826c866d85c1ad08d024f6a10e3c75d6e2a | /scaninout/interface/__init__.py | 916fcde91f914f89ba17e301d1bf6e2a6222d01e | [] | no_license | gh2o/ScanInOut | b3a3cae45c27e93fd30fc6efa9d0123426dbda2b | e8d6620fed1f6a1ac3b908fcacbdee6a3e2ad52a | refs/heads/master | 2021-01-01T08:53:37.104501 | 2013-01-26T06:14:01 | 2013-01-26T06:14:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,644 | py | import os
import sys
client = None
def patch ():
from gi.overrides import Gtk
def TreeModelRow_iter (self):
return iter (self[0:self.model.get_n_columns()])
Gtk.TreeModelRow.__iter__ = TreeModelRow_iter
def main (args):
### PARSE OPTIONS
from optparse import OptionParser, IndentedHelpFormatter
from .. import DEFAULT_SOCKET_PATH
parser = OptionParser (
usage="%prog [options]",
formatter=IndentedHelpFormatter (max_help_position=40)
)
parser.add_option ('-s', '--socket', dest='socket',
help="socket path", metavar="SOCKET", default=DEFAULT_SOCKET_PATH)
parser.add_option ('-i', '--simulate', dest='simulate',
help="simulate scanner", default=False, action="store_true")
parser.add_option ('-f', '--fullscreen', dest='fullscreen',
help="run in fullscreen", default=False, action="store_true")
options, args = parser.parse_args (args)
### DEFAULT SIGNAL HANDLER
import signal
signal.signal (signal.SIGINT, signal.SIG_DFL)
### ERROR HANDLER
from gi.repository import Gtk, Gdk, GLib, Pango
import traceback
old_excepthook = sys.excepthook
def new_excepthook (tp, val, tb):
old_excepthook (tp, val, tb)
dialog = Gtk.MessageDialog (buttons=Gtk.ButtonsType.CLOSE, message_format="Error occurred!")
dialog.format_secondary_text ("%s: %s" % (tp.__name__, val))
tbbuf = Gtk.TextBuffer ()
tbbuf.set_text ("".join (traceback.format_tb (tb)).rstrip ())
tbtv = Gtk.TextView ()
tbtv.set_buffer (tbbuf)
tbtv.set_editable (False)
tbtv.modify_font (Pango.font_description_from_string ("monospace 8"))
tbscr = Gtk.ScrolledWindow ()
tbscr.add (tbtv)
tbscr.set_shadow_type (Gtk.ShadowType.IN)
dialog.get_content_area ().pack_end (tbscr, True, True, 4)
dialog.set_size_request (600, 500)
dialog.show_all ()
dialog.run ()
dialog.destroy ()
sys.excepthook = new_excepthook
### MONKEY PATCH
patch ()
### SET UP CLIENT
from ..client import Client
global client
client = Client ()
### SET UP SCANNER
from .scanner import Scanner
global scanner
scanner = Scanner ()
### SCANNER SIMULATOR
if options.simulate:
def scanner_simulate (channel, condition):
data = sys.stdin.readline ().strip ()
scanner.emit ('scan', data)
return True
iochannel = GLib.IOChannel (0)
iochannel.add_watch (GLib.IOCondition.IN, scanner_simulate)
### SET UP WINDOW
from .main_window import MainWindow
window = MainWindow (client)
window.connect ("destroy", lambda win: Gtk.main_quit ())
window.show_all ()
if options.fullscreen:
window.fullscreen ()
### ENUMERATE SCANNER
scanner.attach_to_window (window)
scanner.enumerate ()
### START APPLICATION
Gtk.main ()
| [
"git@thegavinli.com"
] | git@thegavinli.com |
e8214136d0a4a03ca532f5f4dd6c3bdeb95b8b3c | e4745b21e5365d64e5c839bd668fb012304305b6 | /blog_project/blog/models.py | cffd75b62a732e8b06fc162369dbb3ce0c44538d | [] | no_license | shubham897/Django-Blogging-website | b8b022d194fd960519e0227574bd2281ec16311c | be82b22f9850e4e4bbd5489865487591fa96d4d4 | refs/heads/master | 2022-11-14T23:32:42.537555 | 2020-07-15T16:46:40 | 2020-07-15T16:46:40 | 279,905,337 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,246 | py | from django.db import models
from django.utils import timezone
from django.urls import reverse
class Post(models.Model):
author = models.ForeignKey('auth.User',on_delete=models.DO_NOTHING,)
title = models.CharField(max_length=200)
text = models.TextField()
created_date = models.DateTimeField(default=timezone.now)
published_date = models.DateTimeField(blank=True, null=True)
def publish(self):
self.published_date = timezone.now()
self.save()
def approve_comments(self):
return self.comments.filter(approved_comment=True)
def get_absolute_url(self):
return reverse("post_detail",kwargs={'pk':self.pk})
def __str__(self):
return self.title
class Comment(models.Model):
post = models.ForeignKey('blog.Post', related_name='comments',on_delete=models.DO_NOTHING,)
author = models.CharField(max_length=200)
text = models.TextField()
created_date = models.DateTimeField(default=timezone.now)
approved_comment = models.BooleanField(default=False)
def approve(self):
self.approved_comment = True
self.save()
def get_absolute_url(self):
return reverse("post_list")
def __str__(self):
return self.text
| [
"56882368+shubham897@users.noreply.github.com"
] | 56882368+shubham897@users.noreply.github.com |
aff1a5f925b9a5fb61aa23bc3c7204c9d0b2fdf8 | 98f730ec6a43d8be4a34b0f2a44a9d35989d2287 | /tests/unit/entity/test_flow_file_entity.py | c96730ce6075f70da6c024829667d2c0880046c9 | [] | no_license | scottwr98/pynifi-client | 9337a4f322536ee466d419a788b8b5948cdc62d7 | 013ac2ffa591284a0d6cbb9ed552681cc6f91165 | refs/heads/master | 2020-04-18T08:47:03.680749 | 2017-11-04T23:59:58 | 2017-11-04T23:59:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,335 | py | # coding: utf-8
"""
NiFi Rest Api
The Rest Api provides programmatic access to command and control a NiFi instance in real time. Start and stop processors, monitor queues, query provenance data, and more. Each endpoint below includes a description, definitions of the expected input and output, potential response codes, and the authorizations required to invoke each service. # noqa: E501
OpenAPI spec version: 1.4.0
Contact: dev@nifi.apache.org
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import pynifi_client
from pynifi_client.models.flow_file_entity import FlowFileEntity # noqa: E501
from pynifi_client.rest import ApiException
class TestFlowFileEntity(unittest.TestCase):
"""FlowFileEntity unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testFlowFileEntity(self):
"""Test FlowFileEntity"""
# FIXME: construct object with mandatory attributes with example values
# model = pynifi_client.models.flow_file_entity.FlowFileEntity() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"ajish@rootedinsights.com"
] | ajish@rootedinsights.com |
837d305186f7d06dfd75bd4bc40e076ae5c90495 | 5e6d161f1636db521db63f5c55d0e3882616b79f | /ScanningChooser.py | 24911b1110bc125b5bd4ec81cb66772a75b415dc | [] | no_license | SergeySave/RBMC | 706077b951246f98834d51b2be57fbb23fe1f6c4 | 7b27520036f1c6477ce960a3d6f2259d197665b6 | refs/heads/master | 2023-04-07T12:53:54.465845 | 2020-04-29T16:18:56 | 2020-04-29T16:18:56 | 167,582,121 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,233 | py | import itertools
import chess
from Information import ViewportInformation
from collections import Counter
import random
def perform_scan(start_file, start_rank, num_files, num_ranks, game):
all_pairs = itertools.product(range(start_file, start_file + num_files),
range(start_rank, start_rank + num_ranks))
return ViewportInformation({square: game.board.piece_at(square) for square in
(chess.square(file, rank) for file, rank in all_pairs)})
# 36 possible viewports
# n states
# 36*n checks
#
# How can you check all viewports at the same time
# 9*4 bits can be used to represent any viewport (36 bits so it needs to be a long (or 2 ints or 3 shorts))
# this creates an injective mapping from 3x3 to a 36 bit string
# if this big string is stored as 3 shorts it can be created for each row by bit shifting each tile from the last one
# and then bit masking it, makes each row take only 8 computations
# then by combining the rows by combining the shorts on adjacent tiles of adjacent rows
# total 64 computations vs 324 computations if you just did a "brute force" mapping
# perform these computations for each state
# now combining all of the viewports that resulted from each state, will take n amount of time
# The heuristic can be done at this point
#
# so if the heuristic is the viewport with the most different possible configurations given the states
def most_configuration_heuristic(viewports):
max_size = max(len(counter) for loc, counter in viewports.items())
return random.choice([loc for loc, counter in viewports.items() if len(counter) == max_size])
def biggest_same_heuristic(viewports):
max_size = max(len(counter.most_common(1)) for loc, counter in viewports.items())
return random.choice([loc for loc, counter in viewports.items() if len(counter) == max_size])
def get_piece_code(piece):
if piece is None:
return 6
return piece.piece_type * (1 if piece.color == chess.WHITE else -1) + 6
def heuristic_scan3x3_loc(now_states):
if len(now_states) == 0:
return random.randrange(6), random.randrange(6)
viewports = {}
for board, count in now_states.items():
result = {}
# Original for loop code
for file, rank in itertools.product(range(8), range(8)):
this = get_piece_code(board.board.piece_at(chess.square(file, rank)))
up = (0, 0, 0) if file == 0 else result[(file - 1, rank)]
left = (0 if rank == 0 else result[(file, rank - 1)][2])
encoding = (up[1], up[2], this | ((left << 4) & 4095))
result[(file, rank)] = encoding
if file >= 2 and rank >= 2:
viewport_location = (file - 2, rank - 2)
if viewport_location not in viewports.keys():
viewports[viewport_location] = Counter()
viewports[viewport_location][encoding] += count
return most_configuration_heuristic(viewports)
def heuristic_scan3x3(now_states, game):
location = heuristic_scan3x3_loc(now_states)
# start_file and start_rank must be in the range [0, 5] because the scan is 3x3
return perform_scan(location[0], location[1], 3, 3, game)
| [
"sergeysav.nn@gmail.com"
] | sergeysav.nn@gmail.com |
c570ee9849bd9f6570218d86553e22d114fc0308 | 0130c8b14927097663157846adc4b146d67d2fda | /tests/common/test_op/conv_ad_v2.py | 8938d4b68efd6d2d80bc3ba6342cb407603a7729 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"BSD-3-Clause",
"NCSA",
"LLVM-exception",
"Zlib",
"BSD-2-Clause",
"MIT"
] | permissive | Shigangli/akg | e8be3e0ee1eafe3e42b4cc4d424c28f08ef4c0bc | 3766c54e0b109541932d147a6b5643a334b82403 | refs/heads/master | 2023-09-06T05:13:40.571583 | 2021-11-23T03:44:54 | 2021-11-23T03:44:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 27,379 | py | # Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""operator dsl function: conv_ad_v2"""
import akg.topi
import akg.tvm
import akg
import akg.lang.cce
from akg import dim
from akg.ops.nn import conv as conv_origin
from akg.tvm import truncdiv, truncmod, floordiv
from akg.utils import kernel_exec as utils
def set_dims(fmap_shape, filter_shape, pad_, stride_, dilation_, tile_hh,
tile_coco, tile_mm, tile_kk, tile_nn, block_size):
"""set dim info in attrs."""
in_n, in_c, in_h, in_w = fmap_shape
in_c = (in_c + block_size - 1) // block_size * block_size
in_c1 = in_c // block_size
# kernel shape (NCHW -> NC1HWC0 -> Fractal)
k_n, k_c, k_h, k_w = filter_shape
k_c = (k_c + block_size - 1) // block_size * block_size
k_n = (k_n + block_size - 1) // block_size * block_size
padding = (pad_[0], pad_[0], pad_[1], pad_[1])
p_top, p_bottom, p_left, p_right = padding
s_h, s_w = (stride_[0], stride_[1])
d_h, d_w = (dilation_[0], dilation_[1])
if (tile_hh == in_h):
tile_hh += p_top + p_bottom
tile_coco = (tile_coco + block_size - 1) // block_size * block_size
tile_mm = (tile_mm + block_size - 1) // block_size * block_size
tile_kk = (tile_kk + block_size - 1) // block_size * block_size
tile_nn = (tile_nn + block_size - 1) // block_size * block_size
k_h_d = (k_h - 1) * d_h + 1
k_w_d = (k_w - 1) * d_w + 1
out_h = (in_h + p_top + p_bottom - k_h_d) // (s_h) + 1
tile_out_h = (tile_hh - k_h_d) // s_h + 1
out_w = (in_w + p_left + p_right - k_w_d) // (s_w) + 1
out_shape_nc1hwc0 = (in_n, k_n // block_size, out_h, out_w, block_size)
out_n, out_c1, out_h, out_w, out_c0 = out_shape_nc1hwc0
if (tile_coco > 0):
c1_cut = tile_coco // block_size
else:
c1_cut = out_c1
# set dim
info = dim.Dim()
if (out_n > 1):
info.setdim(index=0, axis=0, tilel1=1, tilel0=0) # n
if (out_c1 > 1):
info.setdim(index=0, axis=0, tilel1=c1_cut, tilel0=0) # c1
if (out_h > 1):
info.setdim(index=0, axis="H", tilel1=tile_out_h, tilel0=0) # h
if (out_w > 1):
info.setdim(index=0, axis=3, tilel1=out_w, tilel0=0) # w
if (out_c0 > 1):
info.setdim(index=0, axis=4, tilel1=out_c0, tilel0=0) # c0
if (in_c1 > 1):
info.setdim(index=0, axis=5, tilel1=in_c1, tilel0=0) # kc1
if (k_h > 1):
info.setdim(index=0, axis=5, tilel1=k_h, tilel0=0) # kh
if (k_w > 1):
info.setdim(index=0, axis=5, tilel1=k_w, tilel0=0) # kw
return str(info)
def expr_to_int(A):
result = []
for i in range(len(A)):
result.append(A[i].value)
return result
@akg.tvm.register_func("akg.autodiff.conv_compute_forward")
def conv_compute_forward(fmap_shape, filter_shape, pad_, stride_, dilation_, A, B, bias_value=None,
tile_hh=0, tile_coco=0, tile_mm=0, tile_kk=0, tile_nn=0, bypass_l1=False,
use_bias=False, block_size=16, conv_dtype='float16'):
if (not isinstance(fmap_shape[0], int)):
fmap_shape = expr_to_int(fmap_shape)
if (not isinstance(filter_shape[0], int)):
filter_shape = expr_to_int(filter_shape)
if (not isinstance(pad_[0], int)):
pad_ = expr_to_int(pad_)
if (not isinstance(stride_[0], int)):
stride_ = expr_to_int(stride_)
if (not isinstance(dilation_[0], int)):
dilation_ = expr_to_int(dilation_)
# input shape (NCHW -> NC1HWC0)
in_n, in_c, in_h, in_w = fmap_shape
# kernel shape (NCHW -> NC1HWC0 -> Fractal)
k_n, k_c, k_h, k_w = filter_shape
# padding((padding_h, padding_w) -> (padding_top, padding_bottom, padding_left, padding_right))
padding = (pad_[0], pad_[0], pad_[1], pad_[1])
p_top, p_bottom, p_left, p_right = padding
# stride (stride_h, stride_w)
s_h, s_w = stride_
# dilation (dilation_h, dilation_w)
d_h, d_w = dilation_
if (tile_hh == in_h):
tile_hh += p_top + p_bottom
tile_coco = (tile_coco + block_size - 1) // block_size * block_size
tile_mm = (tile_mm + block_size - 1) // block_size * block_size
tile_kk = (tile_kk + block_size - 1) // block_size * block_size
tile_nn = (tile_nn + block_size - 1) // block_size * block_size
h_window_cut = (tile_hh - k_h) // s_h + 1
input_shape_nc1hwc0 = (in_n, in_c // block_size, in_h, in_w, block_size)
in_n, _, in_h, in_w, _ = input_shape_nc1hwc0
kernel_shape_nc1hwc0 = (k_n, k_c // block_size, k_h, k_w, block_size)
k_n, k_c1, k_h, k_w, k_c0 = kernel_shape_nc1hwc0
# bias shape
bias_shape_nc1hwc0 = (1, k_n // block_size, 1, 1, block_size)
if use_bias:
bias_name = 'input2'
bias_value = akg.tvm.placeholder(bias_shape_nc1hwc0, dtype=conv_dtype, name=bias_name)
else:
bias_name = 'None'
bias_value = None
# Create reduction variables
kc1 = akg.tvm.reduce_axis((0, k_c1), name='kc1')
kh = akg.tvm.reduce_axis((0, k_h), name='kh')
kw = akg.tvm.reduce_axis((0, k_w), name='kw')
kc0 = akg.tvm.reduce_axis((0, k_c0), name='kc0')
k_h_d = (k_h - 1) * d_h + 1
k_w_d = (k_w - 1) * d_w + 1
out_h = (in_h + p_top + p_bottom - k_h_d) // (s_h) + 1
out_w = (in_w + p_left + p_right - k_w_d) // (s_w) + 1
out_shape_nc1hwc0 = (in_n, k_n // block_size, out_h, out_w, block_size)
_, out_c1, out_h, out_w, _ = out_shape_nc1hwc0
if (tile_coco > 0):
c1_cut = tile_coco // block_size
else:
c1_cut = out_c1
# set dim
info = set_dims(fmap_shape, filter_shape, pad_, stride_, dilation_,
tile_hh, tile_coco, tile_mm, tile_kk, tile_nn, block_size)
# Compute the convolution
output_name = "output0"
output_bias_name = "output1"
C = akg.tvm.compute(out_shape_nc1hwc0,
lambda n, c1, h, w, c0: akg.lang.cce.mmad(
akg.tvm.if_then_else(akg.tvm.any((h * s_h + kh) < p_top, (h * s_h + kh) > (in_h + p_top - 1),
(w * s_w + kw) < p_left, (w * s_w + kw) > (in_w + p_left - 1)),
akg.tvm.const(0.0, 'float16'),
A[n, kc1, (h * s_h + (kh * d_h) - p_top), (w * s_w + (kw * d_w) - p_left), kc0])
* B[(kc1 * k_h + kh) * k_w + kw, c1, c0, kc0],
axis=[kc1, kh, kw, kc0]), name=output_name,
attrs={
"pragma_conv_kernel_n": k_n,
"pragma_conv_kernel_h": k_h,
"pragma_conv_kernel_w": k_w,
"pragma_conv_padding_top": p_top,
"pragma_conv_padding_bottom": p_bottom,
"pragma_conv_padding_left": p_left,
"pragma_conv_padding_right": p_right,
"pragma_conv_bypass_l1": 1 if bypass_l1 else 0,
"pragma_conv_stride_h": s_h,
"pragma_conv_stride_w": s_w,
"pragma_conv_dilation_h": d_h,
"pragma_conv_dilation_w": d_w,
"pragma_conv_fm_n": in_n,
"pragma_conv_fm_c": in_c,
"pragma_conv_fm_h": in_h,
"pragma_conv_fm_w": in_w,
"pragma_conv_h_cut": (h_window_cut - 1) * s_h + k_h_d,
"pragma_conv_w_cut": (in_w + p_left + p_right),
"pragma_conv_co_cut": c1_cut * k_c0,
"pragma_conv_m_cut": tile_mm,
"pragma_conv_k_cut": tile_kk,
"pragma_conv_n_cut": tile_nn,
"feature": A.op.name,
"filter": B.op.name,
"bias": bias_name,
"res": output_name,
"res_bias": output_bias_name})
if use_bias:
cube = akg.tvm.compute(out_shape_nc1hwc0,
lambda n, c1, h, w, c0: C[n, c1, h, w, c0] + bias_value[0, c1, 0, 0, c0],
name=output_bias_name)
else:
cube = C
return cube
def conv_01(fmap_shape, filter_shape, pad_, stride_, dilation_,
tile_hh=0, tile_coco=0, tile_mm=0, tile_kk=0, tile_nn=0,
use_bias=False, block_size=16, conv_dtype='float16'):
# input shape (NCHW -> NC1HWC0)
in_n, in_c, in_h, in_w = fmap_shape
in_c = (in_c + block_size - 1) // block_size * block_size
# kernel shape (NCHW -> NC1HWC0 -> Fractal)
k_n, k_c, k_h, k_w = filter_shape
k_c = (k_c + block_size - 1) // block_size * block_size
k_n = (k_n + block_size - 1) // block_size * block_size
input_shape_nc1hwc0 = (in_n, in_c // block_size, in_h, in_w, block_size)
kernel_shape_nc1hwc0 = (k_n, k_c // block_size, k_h, k_w, block_size)
k_n, _, k_h, k_w, _ = kernel_shape_nc1hwc0
kernel_shape_fractal = (k_c // block_size * k_h * k_w, k_n // block_size, block_size, block_size)
# A placeholder (NC1HWCO)
A = akg.tvm.placeholder(input_shape_nc1hwc0, dtype=conv_dtype, name="input0")
# B_placeholder (fractal)
B = akg.tvm.placeholder(kernel_shape_fractal, dtype=conv_dtype, name="input1")
data = [A, B]
if use_bias:
bias_shape_nc1hwc0 = (1, k_n // block_size, 1, 1, block_size)
bias_name = "input2"
bias_value = akg.tvm.placeholder(bias_shape_nc1hwc0, dtype=conv_dtype, name=bias_name)
data.append(bias_value)
else:
bias_name = 'None'
bias_value = None
conv, _ = conv_origin.conv(data, fmap_shape, filter_shape, pad_, stride_, dilation_, use_bias)
kernel_name = 'conv_ad'
k_n, k_c, k_h, k_w = filter_shape
k_c = (k_c + block_size - 1) // block_size * block_size
k_n = (k_n + block_size - 1) // block_size * block_size
k_hw = k_h * k_w
const_shift = k_hw - 1
# B in Fractal format; result in Fractal format
def flip_weight(B, k_c, k_hw, const_shift):
out_shape = (B.shape[1].value * k_hw, k_c // block_size, block_size, block_size)
B_flip = akg.tvm.compute(out_shape,
lambda i0, i1, i2, i3: B[i1 * k_hw + const_shift - truncmod(i0, k_hw),
floordiv(i0, k_hw), i3, i2],
name=B.name + "_flipped")
return B_flip
def strided_head(H, s_h, s_w):
n, c1, h, w, c0 = H.shape
out_shape = (n, c1, (h - 1) * s_h + 1, (w - 1) * s_w + 1, c0)
H_strided = akg.tvm.compute(out_shape, lambda i0, i1, i2, i3, i4:
akg.tvm.expr.Select(akg.tvm.any(truncmod(i2, s_h) != 0,
truncmod(i3, s_w) != 0),
akg.tvm.const(0.0, dtype="float16"),
H[i0, i1, floordiv(i2, s_h), floordiv(i3, s_w), i4]),
name=H.name + "_strided")
return H_strided
B_flip = flip_weight(B, k_c, k_hw, const_shift)
pld_B_flip = akg.tvm.placeholder(B_flip.shape, name="inp1_flipped", dtype='float16')
HEAD = akg.tvm.placeholder(conv.shape, name="Head", dtype='float16')
HEAD_n, HEAD_c1, HEAD_h, HEAD_w, HEAD_c0 = HEAD.shape
info = set_dims((HEAD_n.value, HEAD_c1.value * HEAD_c0.value, HEAD_h.value, HEAD_w.value),
(k_c, k_n, k_h, k_w), (2, 2), (1, 1), (1, 1),
tile_hh, tile_coco, tile_mm, tile_kk, tile_nn, block_size)
s_h, s_w = stride_
if (s_h == 1) and (s_w == 1):
ad_attrs = {"ad_conv_enable": 1, "ad_conv_reuse_conv": 1}
jacs = list(akg.differentiate(conv, [A], HEAD, ad_attrs, [HEAD, pld_B_flip, None]))
sjac = akg.tvm.create_schedule([jacs[0].op])
op_vars = [HEAD, pld_B_flip, jacs[0]]
info = set_dims((HEAD_n.value, HEAD_c1.value * HEAD_c0.value, HEAD_h.value, HEAD_w.value),
(k_c, k_n, k_h, k_w), (k_h - 1, k_w - 1), (1, 1), (1, 1),
tile_hh, tile_coco, tile_mm, tile_kk, tile_nn, block_size)
else:
Head_strided = strided_head(HEAD, s_h, s_w)
pld_Head_strided = akg.tvm.placeholder(Head_strided.shape, name="head_strided", dtype='float16')
ad_attrs = {"ad_conv_enable": 1, "ad_conv_reuse_conv": 1}
jacs = list(akg.differentiate(conv, [A], HEAD, ad_attrs, [pld_Head_strided, pld_B_flip, None]))
sjac = akg.tvm.create_schedule([jacs[0].op])
op_vars = [pld_Head_strided, pld_B_flip, jacs[0]]
h_n, h_c1, h_h, h_w, h_c0 = pld_Head_strided.shape
info = set_dims((h_n.value, h_c1.value * h_c0.value, h_h.value, h_w.value), (k_c, k_n, k_h, k_w),
(k_h - 1, k_w - 1), (1, 1), (1, 1), tile_hh, tile_coco, tile_mm, tile_kk, tile_nn, block_size)
with akg.build_config(add_lower_pass=utils.debug_mode(0), dump_pass_ir=True):
mod_backward = akg.build(sjac, op_vars, "cce", name=kernel_name, attrs={"dim": str(info)}, polyhedral=True)
def transpose_data(A):
out_shape = (A.shape[1] * block_size, truncdiv(A.shape[0], block_size), A.shape[2], A.shape[3], block_size)
A_transpose = akg.tvm.compute(out_shape,
lambda j0, j1, j2, j3, j4:
A[j1 * block_size + j4, truncdiv(j0, block_size), j2, j3, truncmod(j0, block_size)],
name=A.name + "_transposed")
return A_transpose
# Head is in 5D format
# Output is in Fractal format
def transpose_convert_head(Head):
out_shape = ((floordiv(Head.shape[0].value, block_size)) * Head.shape[2].value * Head.shape[3].value,
Head.shape[1].value, block_size, block_size)
tmp_6D_shape = (floordiv(Head.shape[0].value, block_size),
block_size, Head.shape[1].value, Head.shape[2].value, Head.shape[3].value, block_size)
Head_6D = akg.topi.reshape(Head, tmp_6D_shape)
# Transpose from (N//block_size_N, block_size_N, C//block_size_C, H, W, block_size_C)
# to (N//block_size_N, H, W, C//block_size_C, block_size_C, block_size_N,)
Head_6D_transpose = akg.topi.transpose(Head_6D, (0, 3, 4, 2, 5, 1))
Head_transpose_convert = akg.topi.reshape(Head_6D_transpose, out_shape)
return Head_transpose_convert
X_transposed = transpose_data(A)
pld_X_transposed = akg.tvm.placeholder(X_transposed.shape, name="inp0_transposed", dtype='float16')
if (s_h > 1) or (s_w > 1):
Head_transposed_converted = strided_head(HEAD, s_h, s_w)
else:
Head_transposed_converted = HEAD
strided_head_n, strided_head_c1, strided_head_h, strided_head_w, strided_head_c0 = Head_transposed_converted.shape
Head_transposed_converted = transpose_convert_head(Head_transposed_converted)
s_transposed_converted = akg.tvm.create_schedule(Head_transposed_converted.op)
pld_Head_transposed_converted = akg.tvm.placeholder(Head_transposed_converted.shape,
name="head_transposed",
dtype='float16')
ad_attrs = {"ad_conv_enable": 1, "ad_conv_reuse_conv": 1}
jacs = list(akg.differentiate(conv, [B], HEAD, ad_attrs, [pld_X_transposed, pld_Head_transposed_converted, None]))
sjac = akg.tvm.create_schedule([jacs[0].op])
op_vars = [HEAD, pld_X_transposed, pld_Head_transposed_converted, jacs[0]]
in_n, in_c1, in_h, in_w, in_c0 = A.shape
info = set_dims((in_c1.value * in_c0.value, in_n.value, in_h.value, in_w.value),
(strided_head_c1.value * strided_head_c0.value, strided_head_n.value,
strided_head_h.value, strided_head_w.value),
(0, 0), (1, 1), (1, 1),
tile_hh, tile_coco, tile_mm, tile_kk, tile_nn, block_size)
with akg.build_config(add_lower_pass=utils.debug_mode(0), dump_pass_ir=True):
mod_backward2 = akg.build(sjac, op_vars, "cce",
name="conv_backward_weight",
attrs={"dim": str(info)},
polyhedral=True)
return mod_backward, mod_backward2
def conv_02(fmap_shape, filter_shape, pad_, stride_, dilation_,
tile_hh=0, tile_coco=0, tile_mm=0, tile_kk=0, tile_nn=0, bypass_l1=False,
use_bias=False, block_size=16, conv_dtype='float16'):
# input shape (NCHW -> NC1HWC0)
in_n, in_c, in_h, in_w = fmap_shape
in_c = (in_c + block_size - 1) // block_size * block_size
# kernel shape (NCHW -> NC1HWC0 -> Fractal)
k_n, k_c, k_h, k_w = filter_shape
k_c = (k_c + block_size - 1) // block_size * block_size
k_n = (k_n + block_size - 1) // block_size * block_size
input_shape_nc1hwc0 = (in_n, in_c // block_size, in_h, in_w, block_size)
in_n, _, in_h, in_w, _ = input_shape_nc1hwc0
kernel_shape_nc1hwc0 = (k_n, k_c // block_size, k_h, k_w, block_size)
k_n, _, k_h, k_w, _ = kernel_shape_nc1hwc0
kernel_shape_fractal = (k_c // block_size * k_h * k_w, k_n // block_size, block_size, block_size)
# A placeholder (NC1HWCO)
A = akg.tvm.placeholder(input_shape_nc1hwc0, dtype=conv_dtype, name="input0")
# B_placeholder (fractal)
B = akg.tvm.placeholder(kernel_shape_fractal, dtype=conv_dtype, name="input1")
if use_bias:
bias_shape_nc1hwc0 = (1, k_n // block_size, 1, 1, block_size)
bias_name = "input2"
bias_value = akg.tvm.placeholder(bias_shape_nc1hwc0, dtype=conv_dtype, name=bias_name)
else:
bias_name = 'None'
bias_value = None
conv_forward = conv_compute_forward(fmap_shape, filter_shape, pad_, stride_, dilation_, A, B, bias_value,
tile_hh, tile_coco, tile_mm, tile_kk, tile_nn, bypass_l1,
use_bias, block_size, conv_dtype)
k_hw = k_h * k_w
const_shift = k_hw - 1
# B in Fractal format; result in Fractal format
def flip_weight(B, k_c, k_hw, const_shift):
out_shape = (B.shape[1].value * k_hw, k_c // block_size, block_size, block_size)
B_flip = akg.tvm.compute(out_shape,
lambda i0, i1, i2, i3:
B[i1 * k_hw + const_shift - truncmod(i0, k_hw), floordiv(i0, k_hw), i3, i2],
name=B.name + "_flipped")
return B_flip
# H in 5D format; result in 5D format
def strided_head(H, s_h, s_w):
n, c1, h, w, c0 = H.shape
out_shape = (n, c1, (h - 1) * s_h + 1, (w - 1) * s_w + 1, c0)
H_strided = akg.tvm.compute(out_shape,
lambda i0, i1, i2, i3, i4:
akg.tvm.expr.Select(akg.tvm.any(truncmod(i2, s_h) != 0, truncmod(i3, s_w) != 0),
akg.tvm.const(0.0, dtype="float16"),
H[i0, i1, floordiv(i2, s_h), floordiv(i3, s_w), i4]),
name=H.name + "_strided")
return H_strided
# A in 5D format; result in 5D format
def transpose_data(A):
out_shape = (A.shape[1].value * block_size, A.shape[0].value // block_size,
A.shape[2].value, A.shape[3].value, block_size)
A_transpose = akg.tvm.compute(out_shape,
lambda j0, j1, j2, j3, j4:
A[j1 * block_size + j4, floordiv(j0, block_size), j2, j3, truncmod(j0, block_size)],
name=A.name + "_transposed")
return A_transpose
# Head is in 5D format; result in Fractal format
def transpose_convert_head(Head):
out_shape = ((Head.shape[0].value // block_size) * Head.shape[2].value * Head.shape[3].value,
Head.shape[1].value, block_size, block_size)
tmp_6D_shape = (Head.shape[0].value // block_size, block_size,
Head.shape[1].value, Head.shape[2].value, Head.shape[3].value, block_size)
Head_6D = akg.topi.reshape(Head, tmp_6D_shape)
Head_6D_transpose = akg.topi.transpose(Head_6D, (0, 3, 4, 2, 5, 1))
Head_transpose_convert = akg.topi.reshape(Head_6D_transpose, out_shape)
return Head_transpose_convert
HEAD = akg.tvm.placeholder(conv_forward.shape, name="Head", dtype='float16')
Head_transposed_NCHW = (HEAD.shape[1].value * HEAD.shape[4].value, HEAD.shape[0].value,
HEAD.shape[2].value, HEAD.shape[3].value)
s_h, s_w = stride_
Head_strided_NCHW = (HEAD.shape[0].value, HEAD.shape[1].value * HEAD.shape[4].value,
(HEAD.shape[2].value - 1) * s_h + 1, (HEAD.shape[3].value - 1) * s_w + 1)
A_transposed_NCHW = (in_c, in_n, in_h, in_w)
K_flip_rot_NCHW = (k_c, k_n, k_h, k_w)
Head_transposed_converted = transpose_convert_head(HEAD)
pld_Head_transposed_converted = akg.tvm.placeholder(Head_transposed_converted.shape,
name="Head_trans_fractal", dtype=conv_dtype)
A_transposed = transpose_data(A)
pld_A_transposed = akg.tvm.placeholder(A_transposed.shape, name="A_trans", dtype=conv_dtype)
info = dim.Dim()
info.setdim(index=0, axis=0, tilel1=1, tilel0=1)
info.setdim(index=0, axis=1, tilel1=1, tilel0=1)
info.setdim(index=0, axis=2, tilel1=1, tilel0=1)
info.setdim(index=0, axis=3, tilel1=1, tilel0=1)
B_flip = flip_weight(B, k_c, k_hw, const_shift)
pld_B_flipped = akg.tvm.placeholder(B_flip.shape, name="B_flip", dtype=conv_dtype)
s_flipped = akg.tvm.create_schedule(B_flip.op)
with akg.build_config(add_lower_pass=utils.debug_mode(0), dump_pass_ir=True):
mod_weight_flipped = akg.build(s_flipped, [B, B_flip], "cce", name=B.name + "_flipped",
attrs={"dim": str(info)}, polyhedral=True)
s_transposed_converted = akg.tvm.create_schedule(Head_transposed_converted.op)
with akg.build_config(add_lower_pass=utils.debug_mode(0), dump_pass_ir=True):
mod_head_transposed_converted = akg.build(s_transposed_converted, [HEAD, Head_transposed_converted],
"cce", name="H_trans_converted",
attrs={"dim": str(info)},
polyhedral=True)
Head_strided = strided_head(HEAD, s_h, s_w)
pld_Head_strided = akg.tvm.placeholder(Head_strided.shape, name="Head_trans_5D", dtype=conv_dtype)
s_strided = akg.tvm.create_schedule(Head_strided.op)
with akg.build_config(add_lower_pass=utils.debug_mode(0), dump_pass_ir=True):
mod_head_strided = akg.build(s_strided, [HEAD, Head_strided],
"cce", name="H_strided", attrs={"dim": str(info)}, polyhedral=True)
s_transposed = akg.tvm.create_schedule(A_transposed.op)
with akg.build_config(add_lower_pass=utils.debug_mode(0), dump_pass_ir=True):
mod_transposed = akg.build(s_transposed, [A, A_transposed], "cce",
name="A_transposed", attrs={"dim": str(info)}, polyhedral=True)
ad_attrs = {"ad_conv_enable": 1, "ad_conv_reuse_conv": 1}
jacs = list(akg.differentiate(conv_forward, [A], HEAD, ad_attrs, [pld_Head_strided, pld_B_flipped, None]))
info = set_dims(Head_strided_NCHW, (k_c, k_n, k_h, k_w), (k_h - 1, k_w - 1), (1, 1), (1, 1),
tile_hh, tile_coco, tile_mm, tile_kk, tile_nn, block_size)
sjac = akg.tvm.create_schedule([jacs[0].op])
with akg.build_config(add_lower_pass=utils.debug_mode(0), dump_pass_ir=True):
mod_AD_data = akg.build(sjac, [pld_Head_strided, pld_B_flipped, jacs[0]], "cce",
name="conv_AD_data", attrs={"dim": str(info)}, polyhedral=True)
conv_data = conv_compute_forward(Head_strided_NCHW, K_flip_rot_NCHW,
(k_h - 1, k_h - 1, k_w - 1, k_w - 1), (1, 1), (1, 1),
pld_Head_strided, pld_B_flipped, None,
tile_hh, tile_coco, tile_mm, tile_kk, tile_nn, bypass_l1,
use_bias, block_size, conv_dtype)
info = set_dims(Head_strided_NCHW, (k_c, k_n, k_h, k_w), (k_h - 1, k_w - 1), (1, 1), (1, 1),
tile_hh, tile_coco, tile_mm, tile_kk, tile_nn, block_size)
s_data = akg.tvm.create_schedule(conv_data.op)
with akg.build_config(add_lower_pass=utils.debug_mode(0), dump_pass_ir=True):
mod_data = akg.build(s_data, [pld_Head_strided, pld_B_flipped, conv_data], "cce",
name="conv_data", attrs={"dim": str(info)}, polyhedral=True)
ad_attrs = {"ad_conv_enable": 1, "ad_conv_reuse_conv": 1}
jacs = list(akg.differentiate(conv_forward, [B], HEAD, ad_attrs, [pld_A_transposed, pld_Head_transposed_converted, None]))
info = set_dims(A_transposed_NCHW, Head_transposed_NCHW, (0, 0), (1, 1), (s_h, s_w),
tile_hh, tile_coco, tile_mm, tile_kk, tile_nn, block_size)
sjac = akg.tvm.create_schedule([jacs[0].op])
with akg.build_config(add_lower_pass=utils.debug_mode(0), dump_pass_ir=True):
mod_AD_weight = akg.build(sjac, [pld_A_transposed, pld_Head_transposed_converted, jacs[0]], "cce",
name="conv_AD_weight", attrs={"dim": str(info)}, polyhedral=True)
conv_weight = conv_compute_forward(A_transposed_NCHW, Head_transposed_NCHW,
(0, 0, 0, 0), (1, 1), (s_h, s_w),
pld_A_transposed, pld_Head_transposed_converted, None,
tile_hh, tile_coco, tile_mm, tile_kk, tile_nn, bypass_l1,
use_bias, block_size, conv_dtype)
info = set_dims(A_transposed_NCHW, Head_transposed_NCHW, (0, 0), (1, 1), (s_h, s_w),
tile_hh, tile_coco, tile_mm, tile_kk, tile_nn, block_size)
s_weight = akg.tvm.create_schedule(conv_weight.op)
with akg.build_config(add_lower_pass=utils.debug_mode(0), dump_pass_ir=True):
mod_weight = akg.build(s_weight, [pld_A_transposed, pld_Head_transposed_converted, conv_weight], "cce",
name="conv_weight", attrs={"dim": str(info)}, polyhedral=True)
return mod_AD_data, mod_AD_weight, mod_transposed, mod_head_transposed_converted, mod_head_strided, mod_weight_flipped
| [
"1027252281@qq.com"
] | 1027252281@qq.com |
b6dca720f4e38f83363c8812d81b4850e23b7786 | 3f31750f4e90cbfd931e914101fb097c94452769 | /urls.py | c5160a2b4610a9508266ffd4189db794e3055cc6 | [] | no_license | shivaprasadNeginaha/ltti | 90a242f49cfb2876f27f6396c6d7cb5924611d15 | 5c32368622243a5f2ef59a57824738693ed4c34d | refs/heads/main | 2023-05-31T10:48:48.131728 | 2021-06-14T17:09:08 | 2021-06-14T17:09:08 | 376,890,863 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 196 | py | from django.contrib import admin
from django.urls import path
# we need to import views from our contact app
from contact import views
urlpatterns = [
path('', views.index, name='index'),
] | [
"shivaprasadne@gmail.com"
] | shivaprasadne@gmail.com |
169d2f9eaf9c0b81731413c8b44f2ef351665f7c | 2f95fffcc7eca186516e76a39c537e89849794ed | /oled.py | 2c75ea4e8fbdfb8e011a04c9e4350bf0deb8dbe7 | [] | no_license | DELL3580/test | 2acfba6096eecb664d5e2144af2cdb646c3a432f | 88f2bf2df0c79055fb286426a08563112b56127b | refs/heads/main | 2023-03-21T02:06:31.107366 | 2021-03-19T09:51:50 | 2021-03-19T09:51:50 | 348,711,339 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,064 | py | import busio
import subprocess
import os
import time
from board import SCL, SDA
from oled_text import OledText, Layout32, BigLine, SmallLine
i2c = busio.I2C(SCL, SDA)
# Create the display, pass its pixel dimensions
oled = OledText(i2c, 128, 32)
# Select a layout with a FontAwesome font
# To print unicode characters, prefix them with \u
oled.text('\uf240', 1)
time.sleep(2)
while True :
########################################################################
#'\' is used to splite python line
ipaddress = os.popen("ifconfig wlan0 \
| grep 'inet addr' \
| awk -F: '{print $2}' \
| awk '{print $1}'").read()
ssid = os.popen("iwconfig wlan0 \
| grep 'ESSID' \
| awk '{print $4}' \
| awk -F\\\" '{print $2}'").read()
print("ssid: " + ssid)
print("ipaddress: " + ipaddress)
# Draw a black filled box to clear the image.
draw.rectangle((0,0,width,height), outline=0, fill=0)
cmd = "hostname -I | cut -d\' \' -f1"
IP = subprocess.check_output(cmd, shell = True )
cmd = "top -bn1 | grep load | awk '{printf \"CPU Load: %.2f\", $(NF-2)}'"
CPU = subprocess.check_output(cmd, shell = True )
cmd = "free -m | awk 'NR==2{printf \"Mem: %s/%sMB %.2f%%\", $3,$2,$3*100/$2 }'"
MemUsage = subprocess.check_output(cmd, shell = True )
cmd = "df -h | awk '$NF==\"/\"{printf \"Disk: %d/%dGB %s\", $3,$2,$5}'"
Disk = subprocess.check_output(cmd, shell = True )
#################################################################################""
# Use a custom display layout
# Either use the provided fonts, or give a full path to your own
oled.layout = {
1: SmallLine(0, 0),
2: BigLine(5, 15, font="Arimo.ttf", size=24),
3: BigLine(5, 40, font="Arimo.ttf", size=18)
}
oled.text("ssid:"+" "+str(ssid))
time.sleep(3)
oled.clear()
oled.text("IP:"+" "+str(IP))
time.sleep(3)
oled.clear()
oled.text("CPU:"+" "+str(CPU))
time.sleep(3)
oled.clear()
time.sleep(4)
oled.clear()
| [
"noreply@github.com"
] | noreply@github.com |
2438dc850e5d62d640bcdc86236a89bc67376373 | 1d60c5a7b8ce6277bff514e376f79848f706344c | /Data Engineer with Python/04. Writing Efficient Python Code/04. Basic pandas optimizations/08. Bringing it all together: Predict win percentage.py | cf12b3bebb30941ce6308446c58c8d8a439da8bb | [] | no_license | DidiMilikina/DataCamp | 338c6e6d3b4f5b6c541c1aba155a36e9ee24949d | 3bf2cf3c1430190a7f8e54efda7d50a5fd66f244 | refs/heads/master | 2020-12-15T13:16:54.178967 | 2020-05-06T17:30:54 | 2020-05-06T17:30:54 | 235,113,616 | 4 | 3 | null | null | null | null | UTF-8 | Python | false | false | 2,790 | py | '''
Bringing it all together: Predict win percentage
A pandas DataFrame (baseball_df) has been loaded into your session. For convenience, a dictionary describing each column within baseball_df has been printed into your console. You can reference these descriptions throughout the exercise.
You'd like to attempt to predict a team's win percentage for a given season by using the team's total runs scored in a season ('RS') and total runs allowed in a season ('RA') with the following function:
def predict_win_perc(RS, RA):
prediction = RS ** 2 / (RS ** 2 + RA ** 2)
return np.round(prediction, 2)
Let's compare the approaches you've learned to calculate a predicted win percentage for each season (or row) in your DataFrame.
Instructions 1/4
25 XP
1
Use a for loop and .itertuples() to predict the win percentage for each row of baseball_df with the predict_win_perc() function. Save each row's predicted win percentage as win_perc_pred and append each to the win_perc_preds_loop list.
2
Apply predict_win_perc() to each row of the baseball_df DataFrame using a lambda function. Save the predicted win percentage as win_perc_preds_apply.
3
Calculate the predicted win percentages by passing the underlying 'RS' and 'RA' arrays from baseball_df into predict_win_perc(). Save these predictions as win_perc_preds_np.
'''
SOLUTION
1
win_perc_preds_loop = []
# Use a loop and .itertuples() to collect each row's predicted win percentage
for row in baseball_df.itertuples():
runs_scored = row.RS
runs_allowed = row.RA
win_perc_pred = predict_win_perc(runs_scored, runs_scored)
win_perc_preds_loop.append(win_perc_pred)
2
win_perc_preds_loop = []
# Use a loop and .itertuples() to collect each row's predicted win percentage
for row in baseball_df.itertuples():
runs_scored = row.RS
runs_allowed = row.RA
win_perc_pred = predict_win_perc(runs_scored, runs_allowed)
win_perc_preds_loop.append(win_perc_pred)
# Apply predict_win_perc to each row of the DataFrame
win_perc_preds_apply = baseball_df.apply(lambda row: predict_win_perc(row['RS'], row['RA']), axis=1)
3
win_perc_preds_loop = []
# Use a loop and .itertuples() to collect each row's predicted win percentage
for row in baseball_df.itertuples():
runs_scored = row.RS
runs_allowed = row.RA
win_perc_pred = predict_win_perc(runs_scored, runs_allowed)
win_perc_preds_loop.append(win_perc_pred)
# Apply predict_win_perc to each row of the DataFrame
win_perc_preds_apply = baseball_df.apply(lambda row: predict_win_perc(row['RS'], row['RA']), axis=1)
# Calculate the win percentage predictions using NumPy arrays
win_perc_preds_np = predict_win_perc(baseball_df['RS'].values, baseball_df['RA'].values)
baseball_df['WP_preds'] = win_perc_preds_np
print(baseball_df.head()) | [
"didimilikina8@gmail.com"
] | didimilikina8@gmail.com |
3f126799ab9a40abdd2ebaae9d63469bf925c350 | 65381b8dffa1ade89746f6fc3a4979a7eb548d34 | /analytic_structure/models/analytic_dimension.py | 3e0c1f79cf94b69f49c82b31d834c963f9d7f218 | [] | no_license | ff4f/AISJ-13 | a4240d1952c3854dd5b21a62cf7dbfdebb16fde5 | a2f2183e0f753100842877efecc844bdc72f8bd4 | refs/heads/master | 2023-05-08T22:54:43.972954 | 2021-06-03T14:44:10 | 2021-06-03T14:48:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 967 | py | # -*- coding: utf-8 -*-
from odoo import models, fields
class AnalyticDimension(models.Model):
######################
# Private attributes #
######################
_name = "account.analytic.dimension"
###################
# Default methods #
###################
######################
# Fields declaration #
######################
name = fields.Char(string="Dimension Name",
required=True)
dependency_id = fields.Many2one(comodel_name="account.analytic.dimension",
string="Dependent On")
##############################
# Compute and search methods #
##############################
############################
# Constrains and onchanges #
############################
#########################
# CRUD method overrides #
#########################
##################
# Action methods #
##################
| [
"LuisAngelMalaveMora@gmail.com"
] | LuisAngelMalaveMora@gmail.com |
6a64c23916508a771fed5e4e09ebb9f4d2e9e1dc | 02444f07cd8363624eebf6aaf0d6b1c658008a65 | /compare_classifiers.py | e8c48a4348d8f142ea133816cb9940d7484f29a6 | [
"MIT"
] | permissive | haoran-c/hotel_reviews | 2792135f801e6b39268305b2a002c04dc7650a4c | 393a860b2202046c2b95d6f304ab7e9ebcf0db27 | refs/heads/master | 2020-11-26T13:35:11.647673 | 2019-12-20T06:10:10 | 2019-12-20T06:10:10 | 229,088,244 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,604 | py | import json
import os
import numpy
from joblib import dump, load
from sklearn.svm import SVC
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn import tree
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import classification_report
def svm_classifier(x_train, y_train):
"""
SVM classifier
:param x_train: the review text
:param y_train: label, 1 for positive, 0 for negative
:return: the classifier
"""
print('************ Support Vector Machine Model************')
clf = SVC(kernel='linear', probability=True) # default with 'rbf'
clf.fit(x_train, y_train) # training,for supervised learning we use fit(X, y), not fit(X)
dump(clf, 'classifiers/SvmClassifier.jbl')
return clf
def nb_classifier(x_train, y_train):
"""
naive bayes model
:param x_train:
:param y_train:
:return:
"""
print('************** Naive Bayes Model ********************')
clf = MultinomialNB(alpha=0.01).fit(x_train, y_train)
dump(clf, 'classifiers/NbClassifier.jbl')
return clf
def logistic_classifier(x_train, y_train):
"""
logistic regression model
:param x_train:
:param y_train:
:return:
"""
print('************** Logistic Regression ******************')
clf = LogisticRegression(penalty='l2')
clf.fit(x_train, y_train)
dump(clf, 'classifiers/LogisticClassifier.jbl')
return clf
def knn_classifier(x_train, y_train):
"""
KNN model
:param x_train:
:param y_train:
:return:
"""
print('************ K-nearest Neighbors Model **************')
clf = KNeighborsClassifier()
clf.fit(x_train, y_train)
dump(clf, 'classifiers/KnnClassifier.jbl')
return clf
def decision_classifier(x_train, y_train):
"""
decision tree model
:param x_train:
:param y_train:
:return:
"""
print('************** Decision Tree Model ******************')
clf = tree.DecisionTreeClassifier()
clf.fit(x_train, y_train)
dump(clf, 'classifiers/DeciClassifier.jbl')
return clf
def random_forest_class(x_train, y_train):
"""
random forest model
:param x_train:
:param y_train:
:return:
"""
print('************** Random Forest Model ******************')
clf = RandomForestClassifier(n_estimators=8) # n_estimators is the number of trees to be used in the forest
clf.fit(x_train, y_train)
dump(clf, 'classifiers/RandomFClassifier.jbl')
return clf
def precision(clf):
"""
the helper method to print each classifier's precision rate
:param clf:
:return:
"""
doc_class_predicted = clf.predict(x_test)
precision, recall, thresholds = precision_recall_curve(y_test, clf.predict(x_test))
answer = clf.predict_proba(x_test)[:, 1]
report = answer > 0.5
from sklearn.metrics import accuracy_score
print('Precision: %.2f' % accuracy_score(y_test, doc_class_predicted))
print("--------------------")
print(classification_report(y_test, report, target_names=['neg', 'pos']))
if __name__ == '__main__':
if not os.path.isdir('classifiers'):
os.mkdir('classifiers')
data = []
labels = []
print('Reading training data set...\n')
with open("data/train_set.json", "r", encoding='utf-8') as read_file:
dataset = json.loads(read_file.read())
for line in dataset:
labels.append(dataset[line][1])
data.append(dataset[line][0])
x = numpy.array(data)
labels = numpy.array(labels)
labels = [int(i) for i in labels]
movie_target = labels
# convert the data into vectors
vec = TfidfVectorizer(binary=False)
# load the data set, set 80% of the data for training and the rest 20% for test
x_train, x_test, y_train, y_test = train_test_split(x, movie_target, test_size=0.2)
x_train = vec.fit_transform(x_train)
x_test = vec.transform(x_test)
dump(vec, 'classifiers/vectorizer.jbl')
print('Trained vectorizer is saved to data/vectorizer.jbl\n')
precision(svm_classifier(x_train, y_train))
precision(nb_classifier(x_train, y_train))
precision(knn_classifier(x_train, y_train))
precision(logistic_classifier(x_train, y_train))
precision(decision_classifier(x_train, y_train))
precision(random_forest_class(x_train, y_train))
| [
"haoranchen@brandeis.edu"
] | haoranchen@brandeis.edu |
22f07dc730bca851d7ff57229e0897b077701ede | f44e440f6d422825749228aaa8208afeef65068c | /peticiones.py | 6cf570e1b408f1cc5b1329edc459e0a68c6ff2a8 | [] | no_license | Andregal/Orion-The-Game | 1ae94251d41963a4a6707663da6500dcf5caf737 | d71735d6e2bb90a2ddb8278351d71e3964b0ebce | refs/heads/master | 2020-03-08T17:48:30.955483 | 2018-07-13T06:02:26 | 2018-07-13T06:02:26 | 128,278,239 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,957 | py | # Modulos necesarios
import json # Convertir a JSON
import http.client # Realizar peticiones
from subprocess import call #Ejecutar servidor
##### FUNCIONES INTERNAS #####
# Convertir a String
def convertirString(binario):
nuevoString = binario.decode('utf8').replace("'", '"')
return nuevoString
# Convertir a JSON
def convertirJSON(objeto):
nuevoString = convertirString(objeto)
nuevoJSON = json.loads(nuevoString)
return nuevoJSON
##### PETICIONES #####
# Peticion validarExistencia
def validarExistencia(username):
conn = http.client.HTTPConnection("hidden-atoll-65961.herokuapp.com")
conn.request("GET", "/usuario/validarExistencia/" + username)
response = convertirJSON(conn.getresponse().read())
conn.close()
return response["result"]
#Ejemplo:
# Metodo: validarExistencia("cesar")
# Respuesta: True
# Peticion crearUsuario
def crearUsuario(username, password):
if not validarExistencia(username):
usuario = {
"username": username,
"password": password
}
conn = http.client.HTTPConnection("hidden-atoll-65961.herokuapp.com")
conn.request("POST", "/usuario/crear", body=json.dumps(usuario), headers={
"Content-Type": "application/json"
})
conn.close()
print("Usuario creado")
else:
print("El usuario ya existe")
#Ejemplo:
# Metodo: crearUsuario("juan", "juan1234")
# Respuesta: None
# Peticion validarContrasena
def validarContrasena(username, password):
usuario = {
"username": username,
"password": password
}
conn = http.client.HTTPConnection("hidden-atoll-65961.herokuapp.com")
conn.request("POST", "/usuario/validarContrasena", body=json.dumps(usuario), headers={
"Content-Type": "application/json"
})
response = convertirJSON(conn.getresponse().read())
conn.close()
return response["result"]
#Ejemplo:
# Metodo: validarContrasena("cesar", "cesar1234")
# Respuesta: False
# Peticion actualizarContrasena
def actualizarContrasena(username, password):
usuario = {
"username": username,
"password": password
}
conn = http.client.HTTPConnection("hidden-atoll-65961.herokuapp.com")
conn.request("PUT", "/usuario/actualizarContrasena", body=json.dumps(usuario), headers={
"Content-Type": "application/json"}
)
conn.close()
print("Contraseña actualiza")
#Ejemplo:
# Metodo: actualizarContrasena("rodrigo", "galindo1234")
# Respuesta: None
# Peticion obtenerScore
def obtenerScore(username):
conn = http.client.HTTPConnection("hidden-atoll-65961.herokuapp.com")
conn.request("GET", "/usuario/obtenerScore/" + username)
response = convertirJSON(conn.getresponse().read())
conn.close()
return response["score"]
#Ejemplo:
# Metodo: obtenerScore("bacini")
# Respuesta: 1000
# Peticion actualizarScore
def actualizarScore(username, score):
if score > obtenerScore(username):
usuario = {
"username": username,
"score": score
}
conn = http.client.HTTPConnection("hidden-atoll-65961.herokuapp.com")
conn.request("PUT", "/usuario/actualizarScore", body=json.dumps(usuario), headers={
"Content-Type": "application/json"
})
conn.close()
print("Score actualizado")
#Ejemplo:
# Metodo: actualizarScore("rodrigo", 600)
# Respuesta: None
# Peticion mostrarRanking
def mostrarRanking():
conn = http.client.HTTPConnection("hidden-atoll-65961.herokuapp.com")
conn.request("GET", "/ranking/mostrar")
response = convertirJSON(conn.getresponse().read())
conn.close()
return response
#Ejemplo:
# Metodo: mostrarRanking()
# Respuesta: [{"username": "cesar", "score": 1000}, {"username": "rodrigo", "score": 500}, ...]
# Peticion mostrarUsuarioSegunPuesto
def mostrarUsuarioSegunPuesto(puesto):
conn = http.client.HTTPConnection("hidden-atoll-65961.herokuapp.com")
conn.request("GET", "/ranking/mostrar/" + str(puesto))
response = convertirJSON(conn.getresponse().read())
conn.close()
return response
#Ejemplo:
# Metodo: mostrarUsuarioSegunPuesto(2)
# Respuesta: {"username": "bacini", "score": 200}
# Peticion mostrarPosUsuario
def mostrarPosUsuario(username):
conn = http.client.HTTPConnection("hidden-atoll-65961.herokuapp.com")
conn.request("GET", "/ranking/posicion/" + username)
response = convertirJSON(conn.getresponse().read())
conn.close()
return response["pos"]
#Ejemplo:
# Metodo: mostrarPosUsuario("cesar")
# Respuesta: 5
#Prender servidor
##print(mostrarRanking())
##print(validarExistencia("cesar"))
# Estas funciones se van a integrar con los archivos principales del juego
# en sus respectivas pantallas.
# Todas las funciones realizan las peticiones sin problemas.
| [
"20130514@aloe.ulima.edu.pe"
] | 20130514@aloe.ulima.edu.pe |
cc7a68cd0a40518011c93f4e0ea7416c9ae20dc9 | b62551c83cbb990033584b9254f330491c237e9a | /pset4/ps4c.py | 24e708be6d63710359ef08673ef1396c83ed1a2d | [] | no_license | Carpetoid/6.0001_sols | 75cb356115b072b4531d918dcbf08e440dc1a007 | 65dd76c9236be1b036b169eaabc458bcab19d1be | refs/heads/master | 2023-03-13T22:36:46.478434 | 2021-03-14T17:47:38 | 2021-03-14T17:47:38 | 304,158,360 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,188 | py | # Problem Set 4C
# Name: <your name here>
# Collaborators:
# Time Spent: x:xx
import string
from ps4a import get_permutations
### HELPER CODE ###
def load_words(file_name):
'''
file_name (string): the name of the file containing
the list of words to load
Returns: a list of valid words. Words are strings of lowercase letters.
Depending on the size of the word list, this function may
take a while to finish.
'''
print("Loading word list from file...")
# inFile: file
inFile = open(file_name, 'r')
# wordlist: list of strings
wordlist = []
for line in inFile:
wordlist.extend([word.lower() for word in line.split(' ')])
print(" ", len(wordlist), "words loaded.")
return wordlist
def is_word(word_list, word):
'''
Determines if word is a valid word, ignoring
capitalization and punctuation
word_list (list): list of words in the dictionary.
word (string): a possible word.
Returns: True if word is in word_list, False otherwise
Example:
>>> is_word(word_list, 'bat') returns
True
>>> is_word(word_list, 'asdf') returns
False
'''
word = word.lower()
word = word.strip(" !@#$%^&*()-_+={}[]|\:;'<>?,./\"")
return word in word_list
### END HELPER CODE ###
WORDLIST_FILENAME = 'words.txt'
# you may find these constants helpful
VOWELS_LOWER = 'aeiou'
VOWELS_UPPER = 'AEIOU'
CONSONANTS_LOWER = 'bcdfghjklmnpqrstvwxyz'
CONSONANTS_UPPER = 'BCDFGHJKLMNPQRSTVWXYZ'
class SubMessage(object):
def __init__(self, text):
'''
Initializes a SubMessage object
text (string): the message's text
A SubMessage object has two attributes:
self.message_text (string, determined by input text)
self.valid_words (list, determined using helper function load_words)
'''
self.message_text = text
self.valid_words = load_words(WORDLIST_FILENAME)
#done
def get_message_text(self):
'''
Used to safely access self.message_text outside of the class
Returns: self.message_text
'''
return self.message_text
#done
def get_valid_words(self):
'''
Used to safely access a copy of self.valid_words outside of the class.
This helps you avoid accidentally mutating class attributes.
Returns: a COPY of self.valid_words
'''
return self.valid_words.copy()
#done
def build_transpose_dict(self, vowels_permutation):
'''
vowels_permutation (string): a string containing a permutation of vowels (a, e, i, o, u)
Creates a dictionary that can be used to apply a cipher to a letter.
The dictionary maps every uppercase and lowercase letter to an
uppercase and lowercase letter, respectively. Vowels are shuffled
according to vowels_permutation. The first letter in vowels_permutation
corresponds to a, the second to e, and so on in the order a, e, i, o, u.
The consonants remain the same. The dictionary should have 52
keys of all the uppercase letters and all the lowercase letters.
Example: When input "eaiuo":
Mapping is a->e, e->a, i->i, o->u, u->o
and "Hello World!" maps to "Hallu Wurld!"
Returns: a dictionary mapping a letter (string) to
another letter (string).
'''
unchanged = ['a', 'e', 'i', 'o', 'u']
dct_vowels = {}
dct = {}
vowels_permutation_ls = list(vowels_permutation)
for i in range(5):
dct_vowels[unchanged[i]] = vowels_permutation_ls[i]
for i in string.ascii_uppercase:
dct[i] = i
for i in string.ascii_lowercase:
dct[i] = i
for key in dct_vowels:
for i in dct:
if key.upper() == i:
dct[i] = dct_vowels[key].upper()
for key in dct_vowels:
for i in dct:
if key == i:
dct[i] = dct_vowels[key]
return dct
#done
def apply_transpose(self, transpose_dict):
'''
transpose_dict (dict): a transpose dictionary
Returns: an encrypted version of the message text, based
on the dictionary
'''
ls_encrypted = []
for i in self.message_text:
if i in transpose_dict:
ls_encrypted.append(transpose_dict[i])
else:
ls_encrypted.append(i)
return ''.join(ls_encrypted)
#done
class EncryptedSubMessage(SubMessage):
def __init__(self, text):
'''
Initializes an EncryptedSubMessage object
text (string): the encrypted message text
An EncryptedSubMessage object inherits from SubMessage and has two attributes:
self.message_text (string, determined by input text)
self.valid_words (list, determined using helper function load_words)
'''
SubMessage.__init__(self, text)
#done
def decrypt_message(self):
'''
Attempt to decrypt the encrypted message
Idea is to go through each permutation of the vowels and test it
on the encrypted message. For each permutation, check how many
words in the decrypted text are valid English words, and return
the decrypted message with the most English words.
If no good permutations are found (i.e. no permutations result in
at least 1 valid word), return the original string. If there are
multiple permutations that yield the maximum number of words, return any
one of them.
Returns: the best decrypted message
Hint: use your function from Part 4A
'''
permutations = get_permutations('aeiou')
permutation1 = None
counter1 = 0
for permutation in permutations:
decrypted = self.apply_transpose(self.build_transpose_dict(permutation))
ls = decrypted.split(" ")
counter = 0
for word in ls:
if is_word(self.get_valid_words(), word):
counter += 1
if counter >= counter1:
counter1 = counter
permutation1 = permutation
if counter1 == 0:
return self.message_text
return self.apply_transpose(self.build_transpose_dict(permutation1))
if __name__ == '__main__':
# Example test case
message = SubMessage("Hello World!")
permutation = "eaiuo"
enc_dict = message.build_transpose_dict(permutation)
print("Original message:", message.get_message_text(), "Permutation:", permutation)
print("Expected encryption:", "Hallu Wurld!")
print("Actual encryption:", message.apply_transpose(enc_dict))
enc_message = EncryptedSubMessage(message.apply_transpose(enc_dict))
print("Decrypted message:", enc_message.decrypt_message())
#TODO: WRITE YOUR TEST CASES HERE
message = SubMessage("Almost done")
permutation = "eioua"
enc_dict = message.build_transpose_dict(permutation)
print("Original message:", message.get_message_text(), "Permutation:", permutation)
print("Expected encryption:", 'Elmost duni')
print("Actual encryption:", message.apply_transpose(enc_dict))
enc_message = EncryptedSubMessage(message.apply_transpose(enc_dict))
print("Decrypted message:", enc_message.decrypt_message())
message = SubMessage("Hi person reading this!")
permutation = "ioeau"
enc_dict = message.build_transpose_dict(permutation)
print("Original message:", message.get_message_text(), "Permutation:", permutation)
print("Expected encryption:", 'He porsan roideng thes!')
print("Actual encryption:", message.apply_transpose(enc_dict))
enc_message = EncryptedSubMessage(message.apply_transpose(enc_dict))
print("Decrypted message:", enc_message.decrypt_message())
| [
"abdulrahmanfayoumi2@gmail.com"
] | abdulrahmanfayoumi2@gmail.com |
bc99bcdcc7488fddaf39428a7b0c74063fba673f | 5745358f6dd4246c4e1e86fc70cb6b532fc80a3b | /AnInterestingProject/algos/Dfs.py | 65e3a1fc39821a08454839ebf324fdd4326bba80 | [] | no_license | sandro101/interestingProject | e52decef582d1c1e911f7357c056fc25517c4d17 | 826ad07f0391640b398c157baf2bdfc453b2f7e9 | refs/heads/master | 2018-09-17T20:20:34.636897 | 2018-06-05T21:25:58 | 2018-06-05T21:25:58 | 104,572,475 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,420 | py | from collections import deque
from AnInterestingProject.jcollection.TreeNode import build_some_tree, TreeNode
def dfs(tree, order=None):
if tree is None:
return
if order == "preorder" and not tree.discovered:
print(tree.data + " visited")
tree.discovered = True
for child in tree.children:
dfs(child, order=order)
if order == "postorder" and not tree.discovered:
print(tree.data + " visited")
tree.discovered = True
def dfs_non_recursive_preorder(tree):
container = [tree]
while container:
tree = container.pop()
if not tree.discovered:
print(tree.data + " visited")
tree.discovered = True
for child in reversed(tree.children):
if not child.discovered:
container.append(child)
def dfs_non_recursive_postorder(tree):
container = [tree]
post_order = []
while container:
tree = container.pop()
post_order.append(tree)
for child in tree.children:
if not child.discovered:
container.append(child)
for node in reversed(post_order):
print(node.data + " visited")
##dfs(build_some_tree(), order='preorder')
##dfs(build_some_tree(), order='postorder')
##dfs(build_some_tree(), order='inorder')
##dfs_non_recursive_postorder(build_some_tree())
##dfs_non_recursive_preorder(build_some_tree())
| [
"robert.sanderson@teahcfirst.org.uk"
] | robert.sanderson@teahcfirst.org.uk |
a1560bbfbb2664b258598e195a2ece3bcf4f135d | 8ab60ae62d5dc19b1480a2a4ed08149888d12794 | /tests/influx_mock.py | 9f1b0c82c9a48de80c05ed4c6393d545eeb63194 | [
"MIT"
] | permissive | CVi/influxdb_aggregation | 44f41241ef2adcfdae2ce4693b25e13502ca23d6 | 082946da3066d5ca908076d61069336cd5b98c0f | refs/heads/master | 2020-03-28T07:43:04.069267 | 2018-09-08T17:14:36 | 2018-09-08T17:14:36 | 147,919,724 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,207 | py | from mock import Mock
def make_client(measurements=None, continuous_queries=None,
retention_policies=None, show=None):
client = Mock()
if show is None:
show = {}
if measurements is not None:
show["MEASUREMENTS"] = measurements
if continuous_queries is not None:
show["CONTINUOUS QUERIES"] = continuous_queries
if retention_policies is not None:
show["RETENTION POLICIES"] = retention_policies
def mock_query_result(q):
if q.startswith("SHOW "):
key = q[5:]
points = []
if key in show:
points = show[key]
elif q.startswith("CREATE "):
client.create_query(q[7:])
points = Mock()
elif q.startswith("ALTER "):
client.alter_query(q[6:])
points = Mock()
elif q.startswith("DROP "):
client.drop_query(q[5:])
points = Mock()
else:
client.other_query(q)
points = Mock()
query_result = Mock()
query_result.get_points.return_value = points
return query_result
client.query.side_effect = mock_query_result
return client
| [
"christoffer@viken.me"
] | christoffer@viken.me |
da09133fb4518ae1ebcb754d0dc8624e28e1a2a8 | a073416203ed779459842e6c59b36173906f3bac | /laundry/settings.py | cc02a3b28f2301c8175716e61b02cdd1fca74c88 | [] | no_license | paulinakaminska/laundry | 3ade728e6b3ff8bddddae66b59618748b7636579 | a5210186fd3950647691bac4c4c74c244b8bdf51 | refs/heads/master | 2020-09-16T09:41:21.743517 | 2019-11-28T19:40:41 | 2019-11-28T19:40:41 | 223,728,867 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,102 | py | """
Django settings for laundry project.
Generated by 'django-admin startproject' using Django 2.2.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'p^=us!tdac25)y!)0m6nly&4z^ca(=5t=0+2^4ycp4_tf8tjf6'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'app',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'laundry.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'laundry.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| [
"paulinakaminska@hotmail.com"
] | paulinakaminska@hotmail.com |
bf099cc75161c182c20b815f9d7a22cdc614fc7b | eaf8b302348e2775d4f66c49a6c0a1dd21bf58e9 | /Prometheus插件开发/pmweb/alarmconfig/migrations/0001_initial.py | f56f11e650c68f96383123cb607e176d1b74187c | [] | no_license | whantt/PrometheusMonitor | a2105dd1a1c523ae7aa73dc75e2d5a44b5798683 | 6e615738b94fbc8e1fc46ca8dbddfbee1cc0f303 | refs/heads/master | 2020-05-05T04:08:04.888606 | 2019-04-02T11:31:09 | 2019-04-02T11:31:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 967 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2019-03-28 03:33
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='PrometheusAlarm',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', models.CharField(max_length=100, verbose_name='\u544a\u8b66\u65b9\u5f0f')),
('url', models.CharField(max_length=1000, verbose_name='\u5bf9\u63a5\u5730\u5740')),
('name', models.CharField(max_length=100, verbose_name='\u544a\u8b66\u7ec4')),
('comment', models.CharField(max_length=1000, verbose_name='\u8bf4\u660e')),
],
options={
'db_table': 'alarm_url',
},
),
]
| [
"30825490+1182640071@users.noreply.github.com"
] | 30825490+1182640071@users.noreply.github.com |
386b87b23a4abb72e8025a74ef4beb8cda822341 | c2bcf42e04a1e2146b41b250ff14e62fddcdf589 | /docs/examples/plot_gpr.py | b38412ecdc8bb8734c124690fb196f341c3f89ea | [
"Apache-2.0"
] | permissive | onnx/sklearn-onnx | 0f958e1c090572fbe11e15f95bec975d1780cf8d | 895c3a76a315c7a6567a1a07a96dc658994ec16a | refs/heads/main | 2023-08-18T18:49:25.164433 | 2023-08-17T09:52:31 | 2023-08-17T09:52:31 | 162,340,939 | 455 | 92 | Apache-2.0 | 2023-08-31T16:04:13 | 2018-12-18T20:18:48 | Python | UTF-8 | Python | false | false | 6,674 | py | # SPDX-License-Identifier: Apache-2.0
"""
.. _l-gpr-example:
Discrepencies with GaussianProcessorRegressor: use of double
============================================================
The `GaussianProcessRegressor
<https://scikit-learn.org/stable/modules/generated/sklearn.gaussian_process.
GaussianProcessRegressor.html>`_ involves
many matrix operations which may requires double
precisions. *sklearn-onnx* is using single floats by default
but for this particular model, it is better to use double.
Let's see how to create an ONNX file using doubles.
Train a model
+++++++++++++
A very basic example using *GaussianProcessRegressor*
on the Boston dataset.
"""
import pprint
import numpy
import sklearn
from sklearn.datasets import load_diabetes
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import DotProduct, RBF
from sklearn.model_selection import train_test_split
import onnx
import onnxruntime as rt
import skl2onnx
from skl2onnx.common.data_types import FloatTensorType, DoubleTensorType
from skl2onnx import convert_sklearn
dataset = load_diabetes()
X, y = dataset.data, dataset.target
X_train, X_test, y_train, y_test = train_test_split(X, y)
gpr = GaussianProcessRegressor(DotProduct() + RBF(), alpha=1.0)
gpr.fit(X_train, y_train)
print(gpr)
###########################
# First attempt to convert a model into ONNX
# ++++++++++++++++++++++++++++++++++++++++++
#
# The documentation suggests the following way to
# convert a model into ONNX.
initial_type = [("X", FloatTensorType([None, X_train.shape[1]]))]
onx = convert_sklearn(gpr, initial_types=initial_type, target_opset=12)
sess = rt.InferenceSession(onx.SerializeToString())
try:
pred_onx = sess.run(None, {"X": X_test.astype(numpy.float32)})[0]
except RuntimeError as e:
print(str(e))
###########################
# Second attempt: variable dimensions
# +++++++++++++++++++++++++++++++++++
#
# Unfortunately, even though the conversion
# went well, the runtime fails to compute the prediction.
# The previous snippet of code imposes fixed dimension
# on the input and therefore let the runtime assume
# every node output has outputs with fixed dimensions
# And that's not the case for this model.
# We need to disable these checkings by replacing
# the fixed dimensions by an empty value.
# (see next line).
initial_type = [("X", FloatTensorType([None, None]))]
onx = convert_sklearn(gpr, initial_types=initial_type, target_opset=12)
sess = rt.InferenceSession(onx.SerializeToString())
pred_onx = sess.run(None, {"X": X_test.astype(numpy.float32)})[0]
pred_skl = gpr.predict(X_test)
print(pred_skl[:10])
print(pred_onx[0, :10])
###################################
# The differences seems quite important.
# Let's confirm that by looking at the biggest
# differences.
diff = numpy.sort(numpy.abs(numpy.squeeze(pred_skl) - numpy.squeeze(pred_onx)))[-5:]
print(diff)
print("min(Y)-max(Y):", min(y_test), max(y_test))
###########################
# Third attempt: use of double
# ++++++++++++++++++++++++++++
#
# The model uses a couple of matrix computations
# and matrices have coefficients with very different
# order of magnitude. It is difficult to approximate
# the prediction made with scikit-learn if the converted
# model sticks to float. Double precision is needed.
#
# The previous code requires two changes. The first
# one indicates that inputs are now of type
# ``DoubleTensorType``. The second change
# is the extra parameter ``dtype=numpy.float64``
# tells the conversion function that every real
# constant matrix such as the trained coefficients
# will be dumped as doubles and not as floats anymore.
initial_type = [("X", DoubleTensorType([None, None]))]
onx64 = convert_sklearn(gpr, initial_types=initial_type, target_opset=12)
sess64 = rt.InferenceSession(onx64.SerializeToString())
pred_onx64 = sess64.run(None, {"X": X_test})[0]
print(pred_onx64[0, :10])
################################
# The new differences look much better.
diff = numpy.sort(numpy.abs(numpy.squeeze(pred_skl) - numpy.squeeze(pred_onx64)))[-5:]
print(diff)
print("min(Y)-max(Y):", min(y_test), max(y_test))
####################################
# Size increase
# +++++++++++++
#
# As a result, the ONNX model is almost twice bigger
# because every coefficient is stored as double and
# and not as floats anymore.
size32 = len(onx.SerializeToString())
size64 = len(onx64.SerializeToString())
print("ONNX with floats:", size32)
print("ONNX with doubles:", size64)
#################################
# return_std=True
# +++++++++++++++
#
# `GaussianProcessRegressor <https://scikit-learn.org/stable/modules/
# generated/sklearn.gaussian_process.GaussianProcessRegressor.html>`_
# is one model which defined additional parameter to the predict function.
# If call with ``return_std=True``, the class returns one more results
# and that needs to be reflected into the generated ONNX graph.
# The converter needs to know that an extended graph is required.
# That's done through the option mechanism
# (see :ref:`l-conv-options`).
initial_type = [("X", DoubleTensorType([None, None]))]
options = {GaussianProcessRegressor: {"return_std": True}}
try:
onx64_std = convert_sklearn(
gpr, initial_types=initial_type, options=options, target_opset=12
)
except RuntimeError as e:
print(e)
######################################
# This error highlights the fact that the *scikit-learn*
# computes internal variables on first call to method predict.
# The converter needs them to be initialized by calling method
# predict at least once and then converting again.
gpr.predict(X_test[:1], return_std=True)
onx64_std = convert_sklearn(
gpr, initial_types=initial_type, options=options, target_opset=12
)
sess64_std = rt.InferenceSession(onx64_std.SerializeToString())
pred_onx64_std = sess64_std.run(None, {"X": X_test[:5]})
pprint.pprint(pred_onx64_std)
###############################
# Let's compare with *scikit-learn* prediction.
pprint.pprint(gpr.predict(X_test[:5], return_std=True))
#######################################
# It looks good. Let's do a better checks.
pred_onx64_std = sess64_std.run(None, {"X": X_test})
pred_std = gpr.predict(X_test, return_std=True)
diff = numpy.sort(
numpy.abs(numpy.squeeze(pred_onx64_std[1]) - numpy.squeeze(pred_std[1]))
)[-5:]
print(diff)
#################################
# There are some discrepencies but it seems reasonable.
#
# **Versions used for this example**
print("numpy:", numpy.__version__)
print("scikit-learn:", sklearn.__version__)
print("onnx: ", onnx.__version__)
print("onnxruntime: ", rt.__version__)
print("skl2onnx: ", skl2onnx.__version__)
| [
"noreply@github.com"
] | noreply@github.com |
f69ff6497829390c33f93193168cccea268172d5 | 32ce76e47881f7fd1bad2b92771e55f6e8235334 | /app/forms.py | 944de38b022585b8c831d18bab7006693cc17e07 | [] | no_license | Larisa1992/E9_TEST | 29560266d8e3ed5b9c8cada2170b4d991df87d36 | d75980634b5b2a11d6ff4e6cfc1827e46efe5bc1 | refs/heads/master | 2023-01-05T23:08:34.910779 | 2020-11-05T20:21:33 | 2020-11-05T20:21:33 | 310,408,151 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 493 | py | from flask_wtf import FlaskForm
from wtforms import StringField
from wtforms.validators import DataRequired
from wtforms.fields.html5 import DateField
from wtforms.validators import DataRequired
class EventsForm(FlaskForm):
city = StringField('city')
date = StringField('date')
from_date = DateField('date', format='%d-%m-%y', validators=[DataRequired()])
to_date = DateField('date', format='%d-%m-%y', validators=[DataRequired()])
temperature = StringField('temperature') | [
"pelar1992@mail.ru"
] | pelar1992@mail.ru |
f48df9f0bb342216d37d09d9789d9bc31d5bf374 | d63338e7b697f1ada2ef0da9dcccbdc2922fe358 | /cli/diaspora/models.py | b7204082a5272fc8079f011eb9720c2c368c9f09 | [] | no_license | panchorifa/diaspora | 39eabd8036242244c49ee5fdca45e06db95e2bbb | b28cabd994bbf0fadd6a900851a8b9be6b09437d | refs/heads/master | 2021-07-09T04:06:51.667980 | 2017-10-02T11:59:09 | 2017-10-02T11:59:09 | 104,109,192 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,901 | py | """
diaspora models
"""
import datetime
import operator
class StreamPost(object):
def __init__(self, post):
self.author = post['author_name'].encode('utf-8').strip()
self.time = post['post_time']
self.likes = "{}".format(post['post_likes'])
self.reshares = "{}".format(post['post_reshares'])
def __str__(self):
return '{} {} {} {}'.format(
self.time.rjust(35),
self.author.rjust(45),
self.likes.rjust(15),
self.reshares.rjust(15))
class Stream(object):
def __init__(self, res):
posts = res.json()['body']['stream']
self.posts = [StreamPost(post) for post in posts]
def __str__(self):
print '{} {} {} {}'.format(
'time'.rjust(35),
'author'.rjust(45),
'likes'.rjust(15),
'reshares'.rjust(15))
print "="*120
for post in self.posts:
print post
return "="*120
class Stats(object):
def __init__(self, res):
posts = res.json()['body']['stream']
self.posts = [StreamPost(post) for post in posts]
self.stats = {}
for post in self.posts:
if post.author not in self.stats:
self.stats[post.author] = []
self.stats[post.author].append(post)
def __str__(self):
print "\n\n="*80
print '{} {}'.format(
'author'.rjust(55),
'posts'.rjust(15))
print "="*80
unsorted = {author: len(posts) for author, posts in self.stats.iteritems()}
sortedStats = sorted(unsorted.iteritems(),
key=operator.itemgetter(1), reverse=True)
for author, posts in sortedStats:
print "{} {}".format(
author.rjust(55),
"{}".format(posts).rjust(15)
)
return "="*80
| [
"victor@1910.io"
] | victor@1910.io |
44465a4a6db8996eacce62966259ef8c47a0909e | 1915774790a77a630c00e70738ac41a315f5a2cb | /doorscalc/migrations/0034_order.py | 0f5f4e76a216a981d62729e85601dd332467b201 | [] | no_license | coconutcake/hajduktools | 842948646d2e8d3368b4d420d73bba981d649d43 | 6f9e678a1168195d77d1163bc9145205d03bb141 | refs/heads/master | 2020-07-02T20:02:19.914649 | 2019-09-13T17:44:05 | 2019-09-13T17:44:05 | 201,648,138 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,456 | py | # Generated by Django 2.1.11 on 2019-08-21 11:08
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('doorscalc', '0033_auto_20190821_0947'),
]
operations = [
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('w', models.DecimalField(decimal_places=0, max_digits=3, verbose_name='Width')),
('h', models.DecimalField(decimal_places=0, max_digits=3, verbose_name='Height')),
('d', models.DecimalField(decimal_places=0, max_digits=3, verbose_name='Depth')),
('status', models.CharField(choices=[('Pending', 'Pending'), ('Accepted', 'Accepted'), ('Ordered', 'Ordred')], default='Pending', help_text='Status zamówienia', max_length=50, null=True, verbose_name='Status')),
('published_date', models.DateTimeField(blank=True, null=True)),
('door', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='doorscalc.Door', verbose_name='Type')),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"contact@mign.pl"
] | contact@mign.pl |
67032eeb4f2ea4129a550c569f568b99688b109e | c9952dcac5658940508ddc139344a7243a591c87 | /lab/lecture/reader1.py | d3885dfb767c08b06da281dc2472b7af0b93c299 | [] | no_license | wongcyrus/ite3101_introduction_to_programming | 5da1c15212528423b3df91997327fe148abef4de | 7cd76d0861d5355db5a6e2e171735bee2e78f829 | refs/heads/master | 2023-08-31T17:27:06.193049 | 2023-08-21T08:30:26 | 2023-08-21T08:30:26 | 136,574,036 | 3 | 2 | null | 2023-08-21T08:30:28 | 2018-06-08T06:06:49 | Python | UTF-8 | Python | false | false | 44 | py | message = input('Message? ')
print(message)
| [
"cywong@vtc.edu.hk"
] | cywong@vtc.edu.hk |
007f23698d6912bc77866da737020febfb52d7a3 | 084b7fbcaecf589546d89b825d03e8861fec0f07 | /mdx_bib/styles.py | ec47b9ea2cadd6b74c08ae1be7c144aa6b85aa2c | [
"MIT"
] | permissive | tramebleue/mdx_bib | b9e6d3f5cd696ff0baca69eeb84c114db1da8aae | 34e978ef4804d22b40ecc3438f1ebe266bfa9225 | refs/heads/master | 2020-04-20T13:11:15.436648 | 2019-02-02T20:11:15 | 2019-02-02T20:11:15 | 168,862,224 | 0 | 0 | null | 2019-02-02T18:14:08 | 2019-02-02T18:14:08 | null | UTF-8 | Python | false | false | 1,446 | py | from pybtex.style.formatting.unsrt import Style, date, pages
from pybtex.richtext import Symbol, Text
from pybtex.style.formatting import toplevel
from pybtex.richtext import Text, Tag
from pybtex.style.template import (
field, first_of, href, join, names, optional, optional_field, sentence,
tag, together, words, _format_list
)
from pybtex.style.names import lastfirst
class APAStyle(Style):
def __init__(self, name_style=lastfirst.NameStyle, abbreviate_names=True, *args, **kwargs):
kwargs['name_style'] = name_style
kwargs['abbreviate_names'] = abbreviate_names
super().__init__(*args, **kwargs)
def get_article_template(self, e):
volume_and_pages = first_of [
# volume and pages, with optional issue number
optional [
join [
field('volume'),
optional['(', field('number'),')'],
':', pages
],
],
# pages only
words ['pages', pages],
]
template = toplevel [
self.format_names('author'),
join ['(', field('year'), ').'],
self.format_title(e, 'title'),
sentence [
tag('em') [field('journal')],
optional[ volume_and_pages ]],
sentence [ optional_field('note') ],
self.format_web_refs(e),
]
return template
| [
"christophe.rousson@gmail.com"
] | christophe.rousson@gmail.com |
fd02c276130026302ff9adbde59149419c9895ca | dd60abfce153fe980443e19ca830e314e0d2bdde | /pandas/core/array_algos/take.py | 30529ae62f56f5025010c26a3797019014f7d559 | [
"BSD-3-Clause"
] | permissive | datapythonista/pandas | f4111081e22d2fdd608f5ad6b05d76f9f08dbf6c | 54bf475fd4d38a08a353a47e44dfecce24cdfb4b | refs/heads/main | 2023-06-26T01:03:42.644407 | 2023-06-23T21:20:49 | 2023-06-23T21:20:49 | 29,217,872 | 2 | 1 | BSD-3-Clause | 2021-07-05T20:58:13 | 2015-01-13T23:53:20 | Python | UTF-8 | Python | false | false | 20,920 | py | from __future__ import annotations
import functools
from typing import (
TYPE_CHECKING,
cast,
overload,
)
import numpy as np
from pandas._libs import (
algos as libalgos,
lib,
)
from pandas.core.dtypes.cast import maybe_promote
from pandas.core.dtypes.common import (
ensure_platform_int,
is_1d_only_ea_dtype,
)
from pandas.core.dtypes.missing import na_value_for_dtype
from pandas.core.construction import ensure_wrapped_if_datetimelike
if TYPE_CHECKING:
from pandas._typing import (
ArrayLike,
AxisInt,
npt,
)
from pandas.core.arrays._mixins import NDArrayBackedExtensionArray
from pandas.core.arrays.base import ExtensionArray
@overload
def take_nd(
arr: np.ndarray,
indexer,
axis: AxisInt = ...,
fill_value=...,
allow_fill: bool = ...,
) -> np.ndarray:
...
@overload
def take_nd(
arr: ExtensionArray,
indexer,
axis: AxisInt = ...,
fill_value=...,
allow_fill: bool = ...,
) -> ArrayLike:
...
def take_nd(
arr: ArrayLike,
indexer,
axis: AxisInt = 0,
fill_value=lib.no_default,
allow_fill: bool = True,
) -> ArrayLike:
"""
Specialized Cython take which sets NaN values in one pass
This dispatches to ``take`` defined on ExtensionArrays. It does not
currently dispatch to ``SparseArray.take`` for sparse ``arr``.
Note: this function assumes that the indexer is a valid(ated) indexer with
no out of bound indices.
Parameters
----------
arr : np.ndarray or ExtensionArray
Input array.
indexer : ndarray
1-D array of indices to take, subarrays corresponding to -1 value
indices are filed with fill_value
axis : int, default 0
Axis to take from
fill_value : any, default np.nan
Fill value to replace -1 values with
allow_fill : bool, default True
If False, indexer is assumed to contain no -1 values so no filling
will be done. This short-circuits computation of a mask. Result is
undefined if allow_fill == False and -1 is present in indexer.
Returns
-------
subarray : np.ndarray or ExtensionArray
May be the same type as the input, or cast to an ndarray.
"""
if fill_value is lib.no_default:
fill_value = na_value_for_dtype(arr.dtype, compat=False)
elif isinstance(arr.dtype, np.dtype) and arr.dtype.kind in "mM":
dtype, fill_value = maybe_promote(arr.dtype, fill_value)
if arr.dtype != dtype:
# EA.take is strict about returning a new object of the same type
# so for that case cast upfront
arr = arr.astype(dtype)
if not isinstance(arr, np.ndarray):
# i.e. ExtensionArray,
# includes for EA to catch DatetimeArray, TimedeltaArray
if not is_1d_only_ea_dtype(arr.dtype):
# i.e. DatetimeArray, TimedeltaArray
arr = cast("NDArrayBackedExtensionArray", arr)
return arr.take(
indexer, fill_value=fill_value, allow_fill=allow_fill, axis=axis
)
return arr.take(indexer, fill_value=fill_value, allow_fill=allow_fill)
arr = np.asarray(arr)
return _take_nd_ndarray(arr, indexer, axis, fill_value, allow_fill)
def _take_nd_ndarray(
arr: np.ndarray,
indexer: npt.NDArray[np.intp] | None,
axis: AxisInt,
fill_value,
allow_fill: bool,
) -> np.ndarray:
if indexer is None:
indexer = np.arange(arr.shape[axis], dtype=np.intp)
dtype, fill_value = arr.dtype, arr.dtype.type()
else:
indexer = ensure_platform_int(indexer)
dtype, fill_value, mask_info = _take_preprocess_indexer_and_fill_value(
arr, indexer, fill_value, allow_fill
)
flip_order = False
if arr.ndim == 2 and arr.flags.f_contiguous:
flip_order = True
if flip_order:
arr = arr.T
axis = arr.ndim - axis - 1
# at this point, it's guaranteed that dtype can hold both the arr values
# and the fill_value
out_shape_ = list(arr.shape)
out_shape_[axis] = len(indexer)
out_shape = tuple(out_shape_)
if arr.flags.f_contiguous and axis == arr.ndim - 1:
# minor tweak that can make an order-of-magnitude difference
# for dataframes initialized directly from 2-d ndarrays
# (s.t. df.values is c-contiguous and df._mgr.blocks[0] is its
# f-contiguous transpose)
out = np.empty(out_shape, dtype=dtype, order="F")
else:
out = np.empty(out_shape, dtype=dtype)
func = _get_take_nd_function(
arr.ndim, arr.dtype, out.dtype, axis=axis, mask_info=mask_info
)
func(arr, indexer, out, fill_value)
if flip_order:
out = out.T
return out
def take_1d(
arr: ArrayLike,
indexer: npt.NDArray[np.intp],
fill_value=None,
allow_fill: bool = True,
mask: npt.NDArray[np.bool_] | None = None,
) -> ArrayLike:
"""
Specialized version for 1D arrays. Differences compared to `take_nd`:
- Assumes input array has already been converted to numpy array / EA
- Assumes indexer is already guaranteed to be intp dtype ndarray
- Only works for 1D arrays
To ensure the lowest possible overhead.
Note: similarly to `take_nd`, this function assumes that the indexer is
a valid(ated) indexer with no out of bound indices.
Parameters
----------
arr : np.ndarray or ExtensionArray
Input array.
indexer : ndarray
1-D array of indices to take (validated indices, intp dtype).
fill_value : any, default np.nan
Fill value to replace -1 values with
allow_fill : bool, default True
If False, indexer is assumed to contain no -1 values so no filling
will be done. This short-circuits computation of a mask. Result is
undefined if allow_fill == False and -1 is present in indexer.
mask : np.ndarray, optional, default None
If `allow_fill` is True, and the mask (where indexer == -1) is already
known, it can be passed to avoid recomputation.
"""
if not isinstance(arr, np.ndarray):
# ExtensionArray -> dispatch to their method
return arr.take(indexer, fill_value=fill_value, allow_fill=allow_fill)
if not allow_fill:
return arr.take(indexer)
dtype, fill_value, mask_info = _take_preprocess_indexer_and_fill_value(
arr, indexer, fill_value, True, mask
)
# at this point, it's guaranteed that dtype can hold both the arr values
# and the fill_value
out = np.empty(indexer.shape, dtype=dtype)
func = _get_take_nd_function(
arr.ndim, arr.dtype, out.dtype, axis=0, mask_info=mask_info
)
func(arr, indexer, out, fill_value)
return out
def take_2d_multi(
arr: np.ndarray,
indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]],
fill_value=np.nan,
) -> np.ndarray:
"""
Specialized Cython take which sets NaN values in one pass.
"""
# This is only called from one place in DataFrame._reindex_multi,
# so we know indexer is well-behaved.
assert indexer is not None
assert indexer[0] is not None
assert indexer[1] is not None
row_idx, col_idx = indexer
row_idx = ensure_platform_int(row_idx)
col_idx = ensure_platform_int(col_idx)
indexer = row_idx, col_idx
mask_info = None
# check for promotion based on types only (do this first because
# it's faster than computing a mask)
dtype, fill_value = maybe_promote(arr.dtype, fill_value)
if dtype != arr.dtype:
# check if promotion is actually required based on indexer
row_mask = row_idx == -1
col_mask = col_idx == -1
row_needs = row_mask.any()
col_needs = col_mask.any()
mask_info = (row_mask, col_mask), (row_needs, col_needs)
if not (row_needs or col_needs):
# if not, then depromote, set fill_value to dummy
# (it won't be used but we don't want the cython code
# to crash when trying to cast it to dtype)
dtype, fill_value = arr.dtype, arr.dtype.type()
# at this point, it's guaranteed that dtype can hold both the arr values
# and the fill_value
out_shape = len(row_idx), len(col_idx)
out = np.empty(out_shape, dtype=dtype)
func = _take_2d_multi_dict.get((arr.dtype.name, out.dtype.name), None)
if func is None and arr.dtype != out.dtype:
func = _take_2d_multi_dict.get((out.dtype.name, out.dtype.name), None)
if func is not None:
func = _convert_wrapper(func, out.dtype)
if func is not None:
func(arr, indexer, out=out, fill_value=fill_value)
else:
# test_reindex_multi
_take_2d_multi_object(
arr, indexer, out, fill_value=fill_value, mask_info=mask_info
)
return out
@functools.lru_cache
def _get_take_nd_function_cached(
ndim: int, arr_dtype: np.dtype, out_dtype: np.dtype, axis: AxisInt
):
"""
Part of _get_take_nd_function below that doesn't need `mask_info` and thus
can be cached (mask_info potentially contains a numpy ndarray which is not
hashable and thus cannot be used as argument for cached function).
"""
tup = (arr_dtype.name, out_dtype.name)
if ndim == 1:
func = _take_1d_dict.get(tup, None)
elif ndim == 2:
if axis == 0:
func = _take_2d_axis0_dict.get(tup, None)
else:
func = _take_2d_axis1_dict.get(tup, None)
if func is not None:
return func
# We get here with string, uint, float16, and complex dtypes that could
# potentially be handled in algos_take_helper.
# Also a couple with (M8[ns], object) and (m8[ns], object)
tup = (out_dtype.name, out_dtype.name)
if ndim == 1:
func = _take_1d_dict.get(tup, None)
elif ndim == 2:
if axis == 0:
func = _take_2d_axis0_dict.get(tup, None)
else:
func = _take_2d_axis1_dict.get(tup, None)
if func is not None:
func = _convert_wrapper(func, out_dtype)
return func
return None
def _get_take_nd_function(
ndim: int,
arr_dtype: np.dtype,
out_dtype: np.dtype,
axis: AxisInt = 0,
mask_info=None,
):
"""
Get the appropriate "take" implementation for the given dimension, axis
and dtypes.
"""
func = None
if ndim <= 2:
# for this part we don't need `mask_info` -> use the cached algo lookup
func = _get_take_nd_function_cached(ndim, arr_dtype, out_dtype, axis)
if func is None:
def func(arr, indexer, out, fill_value=np.nan) -> None:
indexer = ensure_platform_int(indexer)
_take_nd_object(
arr, indexer, out, axis=axis, fill_value=fill_value, mask_info=mask_info
)
return func
def _view_wrapper(f, arr_dtype=None, out_dtype=None, fill_wrap=None):
def wrapper(
arr: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=np.nan
) -> None:
if arr_dtype is not None:
arr = arr.view(arr_dtype)
if out_dtype is not None:
out = out.view(out_dtype)
if fill_wrap is not None:
# FIXME: if we get here with dt64/td64 we need to be sure we have
# matching resos
if fill_value.dtype.kind == "m":
fill_value = fill_value.astype("m8[ns]")
else:
fill_value = fill_value.astype("M8[ns]")
fill_value = fill_wrap(fill_value)
f(arr, indexer, out, fill_value=fill_value)
return wrapper
def _convert_wrapper(f, conv_dtype):
def wrapper(
arr: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=np.nan
) -> None:
if conv_dtype == object:
# GH#39755 avoid casting dt64/td64 to integers
arr = ensure_wrapped_if_datetimelike(arr)
arr = arr.astype(conv_dtype)
f(arr, indexer, out, fill_value=fill_value)
return wrapper
_take_1d_dict = {
("int8", "int8"): libalgos.take_1d_int8_int8,
("int8", "int32"): libalgos.take_1d_int8_int32,
("int8", "int64"): libalgos.take_1d_int8_int64,
("int8", "float64"): libalgos.take_1d_int8_float64,
("int16", "int16"): libalgos.take_1d_int16_int16,
("int16", "int32"): libalgos.take_1d_int16_int32,
("int16", "int64"): libalgos.take_1d_int16_int64,
("int16", "float64"): libalgos.take_1d_int16_float64,
("int32", "int32"): libalgos.take_1d_int32_int32,
("int32", "int64"): libalgos.take_1d_int32_int64,
("int32", "float64"): libalgos.take_1d_int32_float64,
("int64", "int64"): libalgos.take_1d_int64_int64,
("int64", "float64"): libalgos.take_1d_int64_float64,
("float32", "float32"): libalgos.take_1d_float32_float32,
("float32", "float64"): libalgos.take_1d_float32_float64,
("float64", "float64"): libalgos.take_1d_float64_float64,
("object", "object"): libalgos.take_1d_object_object,
("bool", "bool"): _view_wrapper(libalgos.take_1d_bool_bool, np.uint8, np.uint8),
("bool", "object"): _view_wrapper(libalgos.take_1d_bool_object, np.uint8, None),
("datetime64[ns]", "datetime64[ns]"): _view_wrapper(
libalgos.take_1d_int64_int64, np.int64, np.int64, np.int64
),
("timedelta64[ns]", "timedelta64[ns]"): _view_wrapper(
libalgos.take_1d_int64_int64, np.int64, np.int64, np.int64
),
}
_take_2d_axis0_dict = {
("int8", "int8"): libalgos.take_2d_axis0_int8_int8,
("int8", "int32"): libalgos.take_2d_axis0_int8_int32,
("int8", "int64"): libalgos.take_2d_axis0_int8_int64,
("int8", "float64"): libalgos.take_2d_axis0_int8_float64,
("int16", "int16"): libalgos.take_2d_axis0_int16_int16,
("int16", "int32"): libalgos.take_2d_axis0_int16_int32,
("int16", "int64"): libalgos.take_2d_axis0_int16_int64,
("int16", "float64"): libalgos.take_2d_axis0_int16_float64,
("int32", "int32"): libalgos.take_2d_axis0_int32_int32,
("int32", "int64"): libalgos.take_2d_axis0_int32_int64,
("int32", "float64"): libalgos.take_2d_axis0_int32_float64,
("int64", "int64"): libalgos.take_2d_axis0_int64_int64,
("int64", "float64"): libalgos.take_2d_axis0_int64_float64,
("float32", "float32"): libalgos.take_2d_axis0_float32_float32,
("float32", "float64"): libalgos.take_2d_axis0_float32_float64,
("float64", "float64"): libalgos.take_2d_axis0_float64_float64,
("object", "object"): libalgos.take_2d_axis0_object_object,
("bool", "bool"): _view_wrapper(
libalgos.take_2d_axis0_bool_bool, np.uint8, np.uint8
),
("bool", "object"): _view_wrapper(
libalgos.take_2d_axis0_bool_object, np.uint8, None
),
("datetime64[ns]", "datetime64[ns]"): _view_wrapper(
libalgos.take_2d_axis0_int64_int64, np.int64, np.int64, fill_wrap=np.int64
),
("timedelta64[ns]", "timedelta64[ns]"): _view_wrapper(
libalgos.take_2d_axis0_int64_int64, np.int64, np.int64, fill_wrap=np.int64
),
}
_take_2d_axis1_dict = {
("int8", "int8"): libalgos.take_2d_axis1_int8_int8,
("int8", "int32"): libalgos.take_2d_axis1_int8_int32,
("int8", "int64"): libalgos.take_2d_axis1_int8_int64,
("int8", "float64"): libalgos.take_2d_axis1_int8_float64,
("int16", "int16"): libalgos.take_2d_axis1_int16_int16,
("int16", "int32"): libalgos.take_2d_axis1_int16_int32,
("int16", "int64"): libalgos.take_2d_axis1_int16_int64,
("int16", "float64"): libalgos.take_2d_axis1_int16_float64,
("int32", "int32"): libalgos.take_2d_axis1_int32_int32,
("int32", "int64"): libalgos.take_2d_axis1_int32_int64,
("int32", "float64"): libalgos.take_2d_axis1_int32_float64,
("int64", "int64"): libalgos.take_2d_axis1_int64_int64,
("int64", "float64"): libalgos.take_2d_axis1_int64_float64,
("float32", "float32"): libalgos.take_2d_axis1_float32_float32,
("float32", "float64"): libalgos.take_2d_axis1_float32_float64,
("float64", "float64"): libalgos.take_2d_axis1_float64_float64,
("object", "object"): libalgos.take_2d_axis1_object_object,
("bool", "bool"): _view_wrapper(
libalgos.take_2d_axis1_bool_bool, np.uint8, np.uint8
),
("bool", "object"): _view_wrapper(
libalgos.take_2d_axis1_bool_object, np.uint8, None
),
("datetime64[ns]", "datetime64[ns]"): _view_wrapper(
libalgos.take_2d_axis1_int64_int64, np.int64, np.int64, fill_wrap=np.int64
),
("timedelta64[ns]", "timedelta64[ns]"): _view_wrapper(
libalgos.take_2d_axis1_int64_int64, np.int64, np.int64, fill_wrap=np.int64
),
}
_take_2d_multi_dict = {
("int8", "int8"): libalgos.take_2d_multi_int8_int8,
("int8", "int32"): libalgos.take_2d_multi_int8_int32,
("int8", "int64"): libalgos.take_2d_multi_int8_int64,
("int8", "float64"): libalgos.take_2d_multi_int8_float64,
("int16", "int16"): libalgos.take_2d_multi_int16_int16,
("int16", "int32"): libalgos.take_2d_multi_int16_int32,
("int16", "int64"): libalgos.take_2d_multi_int16_int64,
("int16", "float64"): libalgos.take_2d_multi_int16_float64,
("int32", "int32"): libalgos.take_2d_multi_int32_int32,
("int32", "int64"): libalgos.take_2d_multi_int32_int64,
("int32", "float64"): libalgos.take_2d_multi_int32_float64,
("int64", "int64"): libalgos.take_2d_multi_int64_int64,
("int64", "float64"): libalgos.take_2d_multi_int64_float64,
("float32", "float32"): libalgos.take_2d_multi_float32_float32,
("float32", "float64"): libalgos.take_2d_multi_float32_float64,
("float64", "float64"): libalgos.take_2d_multi_float64_float64,
("object", "object"): libalgos.take_2d_multi_object_object,
("bool", "bool"): _view_wrapper(
libalgos.take_2d_multi_bool_bool, np.uint8, np.uint8
),
("bool", "object"): _view_wrapper(
libalgos.take_2d_multi_bool_object, np.uint8, None
),
("datetime64[ns]", "datetime64[ns]"): _view_wrapper(
libalgos.take_2d_multi_int64_int64, np.int64, np.int64, fill_wrap=np.int64
),
("timedelta64[ns]", "timedelta64[ns]"): _view_wrapper(
libalgos.take_2d_multi_int64_int64, np.int64, np.int64, fill_wrap=np.int64
),
}
def _take_nd_object(
arr: np.ndarray,
indexer: npt.NDArray[np.intp],
out: np.ndarray,
axis: AxisInt,
fill_value,
mask_info,
) -> None:
if mask_info is not None:
mask, needs_masking = mask_info
else:
mask = indexer == -1
needs_masking = mask.any()
if arr.dtype != out.dtype:
arr = arr.astype(out.dtype)
if arr.shape[axis] > 0:
arr.take(indexer, axis=axis, out=out)
if needs_masking:
outindexer = [slice(None)] * arr.ndim
outindexer[axis] = mask
out[tuple(outindexer)] = fill_value
def _take_2d_multi_object(
arr: np.ndarray,
indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]],
out: np.ndarray,
fill_value,
mask_info,
) -> None:
# this is not ideal, performance-wise, but it's better than raising
# an exception (best to optimize in Cython to avoid getting here)
row_idx, col_idx = indexer # both np.intp
if mask_info is not None:
(row_mask, col_mask), (row_needs, col_needs) = mask_info
else:
row_mask = row_idx == -1
col_mask = col_idx == -1
row_needs = row_mask.any()
col_needs = col_mask.any()
if fill_value is not None:
if row_needs:
out[row_mask, :] = fill_value
if col_needs:
out[:, col_mask] = fill_value
for i, u_ in enumerate(row_idx):
if u_ != -1:
for j, v in enumerate(col_idx):
if v != -1:
out[i, j] = arr[u_, v]
def _take_preprocess_indexer_and_fill_value(
arr: np.ndarray,
indexer: npt.NDArray[np.intp],
fill_value,
allow_fill: bool,
mask: npt.NDArray[np.bool_] | None = None,
):
mask_info: tuple[np.ndarray | None, bool] | None = None
if not allow_fill:
dtype, fill_value = arr.dtype, arr.dtype.type()
mask_info = None, False
else:
# check for promotion based on types only (do this first because
# it's faster than computing a mask)
dtype, fill_value = maybe_promote(arr.dtype, fill_value)
if dtype != arr.dtype:
# check if promotion is actually required based on indexer
if mask is not None:
needs_masking = True
else:
mask = indexer == -1
needs_masking = bool(mask.any())
mask_info = mask, needs_masking
if not needs_masking:
# if not, then depromote, set fill_value to dummy
# (it won't be used but we don't want the cython code
# to crash when trying to cast it to dtype)
dtype, fill_value = arr.dtype, arr.dtype.type()
return dtype, fill_value, mask_info
| [
"noreply@github.com"
] | noreply@github.com |
f3c1cedc3effb5981e3dc9a4b38161f343dc0da1 | 27877bd13142480f8f23f344519ba8851d97dce5 | /pyheos/source.py | a3de6c06e0f14243691456e4a3b8eadfcf2b663e | [
"MIT"
] | permissive | nellering/pyheos | 48a4ee3a62b6bb997cadf6cae9fb2e6ac51300c6 | f66115e8ef432adf729e98ab3fbffb748ab0c03e | refs/heads/master | 2020-05-16T08:00:44.970190 | 2019-04-19T00:55:11 | 2019-04-19T00:55:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,927 | py | """Define the heos source module."""
from typing import Optional, Sequence # pylint: disable=unused-import
class InputSource:
"""Define an input source."""
def __init__(self, player_id: int, name: str, input_name: str):
"""Init the source."""
self._player_id = player_id # type: int
self._name = name # type: str
self._input_name = input_name # type: str
def __str__(self):
"""Get a user-readable representation of the source."""
return "<{} ({})>".format(self._name, self._input_name)
def __repr__(self):
"""Get a debug representation of the source."""
return "<{} ({}) on {}>".format(self._name, self._input_name,
self._player_id)
@property
def name(self) -> str:
"""Get the friendly display name."""
return self._name
@property
def input_name(self) -> str:
"""Get the input source name."""
return self._input_name
@property
def player_id(self) -> int:
"""Get the player id."""
return self._player_id
class HeosSource:
"""Define an individual heos source."""
def __init__(self, commands, data: Optional[dict] = None):
"""Init the source class."""
self._commands = commands
self._name = None # type: str
self._image_url = None # type: str
self._type = None # type: str
self._source_id = None # type: int
self._available = None # type: bool
self._service_username = None # type: str
self._container = None # type: bool
self._media_id = None # type: str
self._playable = None # type: bool
if data:
self._from_data(data)
def _from_data(self, data: dict):
self._name = data['name']
self._image_url = data['image_url']
self._type = data['type']
source_id = data.get('sid')
if source_id:
self._source_id = int(source_id)
self._available = data.get('available') == 'true'
self._service_username = data.get('service_username')
self._container = data.get('container') == 'yes'
self._media_id = data.get('mid')
self._playable = data.get('playable') == 'yes'
def __str__(self):
"""Get a user-readable representation of the source."""
return "<{} ({})>".format(self._name, self._type)
def __repr__(self):
"""Get a debug representation of the source."""
return "<{} ({}) {}>".format(self._name, self._type, self._source_id)
async def browse(self) -> 'Sequence[HeosSource]':
"""Browse the contents of the current source."""
items = await self._commands.browse(self._source_id)
return [HeosSource(self._commands, item) for item in items]
@property
def name(self) -> str:
"""Get the name of the source."""
return self._name
@property
def image_url(self) -> str:
"""Get the image url of the source."""
return self._image_url
@property
def type(self) -> str:
"""Get the type of the source."""
return self._type
@property
def source_id(self) -> int:
"""Get the id of the source."""
return self._source_id
@property
def available(self) -> bool:
"""Return True if the source is available."""
return self._available
@property
def service_username(self) -> str:
"""Get the service username."""
return self._service_username
@property
def media_id(self) -> str:
"""Get the media id."""
return self._media_id
@property
def container(self) -> bool:
"""Return True if the source is a container."""
return self._container
@property
def playable(self) -> bool:
"""Return True if the source is playable."""
return self._playable
| [
"6730289+andrewsayre@users.noreply.github.com"
] | 6730289+andrewsayre@users.noreply.github.com |
bd9b3e6313ea6387c3ad74cf5a29de28b414e98c | c24cc0544ff838b8eb837b941bf2425ce7895293 | /eggs/__init__.py | 5587e8f54e093b901242ce4f0b28eb5a8d58da44 | [] | no_license | mscroggs/readthedocstest | 16d9fc35f841a4398fc2e47e36016dc86f102564 | f0750170fe8aab0e69281eb8c98d9c513f9832c8 | refs/heads/master | 2021-01-01T16:22:31.053270 | 2017-07-21T08:01:18 | 2017-07-21T08:01:18 | 97,815,926 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 37 | py | from eggs import *
from oil import *
| [
"matthew.w.scroggs@gmail.com"
] | matthew.w.scroggs@gmail.com |
1d1da31e0e1695d3f6ffc21265e6affb90838a5c | 921914140ffa6960a6fa012712edcb8017e89835 | /src/models/pcfg.py | 2e7843ccadea679a0f851ccf42695a76ee49de5d | [
"Apache-2.0"
] | permissive | plai-group/tvo_playground | 255cfd414399527dc238fc686e100476f1f18c43 | 47ecb62eaa01741deca57009eade754d40a04cb9 | refs/heads/master | 2023-04-27T11:20:30.626656 | 2023-04-14T16:45:31 | 2023-04-14T16:45:31 | 344,283,565 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 26,216 | py | import torch
import torch.nn as nn
import src.models.pcfg_util as util
from torch.distributions import *
from src.models.base import ProbModelBaseClass
import numpy as np
class GenerativeModel(nn.Module):
def __init__(self, grammar, production_probs_init=None, max_depth=30):
super(GenerativeModel, self).__init__()
self.grammar = grammar
if self.grammar['name'] == 'polynomial':
self.xs = torch.linspace(-10, 10, 100)
if production_probs_init is None:
self.production_logits = nn.ParameterDict({
k: nn.Parameter(torch.randn((len(v),)))
for k, v in grammar['productions'].items()})
else:
self.production_logits = nn.ParameterDict({
k: nn.Parameter(torch.log(v))
for k, v in production_probs_init.items()})
self.max_depth = max_depth
def sample_tree(self, symbol=None, depth=0):
"""Sample tree from prior.
Args: start symbol
Returns: list of lists or string
"""
if symbol is None:
symbol = self.grammar['start_symbol']
if symbol in self.grammar['terminals']:
return symbol
elif depth > self.max_depth:
return symbol
else:
dist = Categorical(logits=self.production_logits[symbol])
production_index = dist.sample().detach()
production = self.grammar['productions'][symbol][production_index]
return [symbol] + \
[self.sample_tree(s, depth=depth + 1) for s in production]
def sample_tree_and_obs(self):
"""Samples a (tree, obs) tuple from prior."""
tree = self.sample_tree()
if self.grammar['name'] == 'astronomers':
sentence = util.get_leaves(tree)
obs = sentence
elif self.grammar['name'] == 'polynomial':
ys = util.eval_polynomial(tree, self.xs)
obs = ys
return tree, obs
def sample_obs(self):
"""Samples obs from prior."""
return self.sample_tree_and_obs()[1]
def get_tree_log_prob(self, tree):
"""Log probability of tree.
Args:
tree: list of lists or string
Returns: scalar tensor
"""
if isinstance(tree, list):
non_terminal = tree[0]
subtrees = tree[1:]
production = [util.get_root(subtree) for subtree in subtrees]
production_index = util.get_production_index(
non_terminal, production, self.grammar['productions'])
dist = Categorical(logits=self.production_logits[non_terminal])
log_prob = dist.log_prob(torch.tensor(production_index))
subtree_log_probs = [self.get_tree_log_prob(subtree)
for subtree in subtrees]
return log_prob + sum(subtree_log_probs)
else:
return torch.zeros(())
def get_sentence_log_likelihood(self, sentence, tree):
"""Minus ABC distance instead of log p(sentence | tree). ABC distance
is the Levenshtein distance.
Args:
sentence: list of strings
tree: list of lists or string
Returns: scalar tensor"""
sentence_from_tree = util.get_leaves(tree)
levenshtein_distance = torch.tensor(
util.get_levenshtein_distance(sentence_from_tree, sentence,
self.grammar['terminals']),
dtype=torch.float)
# if levenshtein_distance.item() == 0:
# return levenshtein_distance
# else:
# return torch.tensor(float('-inf'))
# return -(torch.exp(levenshtein_distance) - 1)
# return -levenshtein_distance
return -levenshtein_distance**2
def get_polynomial_log_likelihood(self, ys, tree):
"""Minus ABC distance instead of log p(ys | tree, xs) where xs is
torch.linspace(-10, 10, 100). ABC distance is log(1 + mse).
Args:
ys: torch.tensor of shape [100]
tree: list of lists or string
Returns: -log(1 + mse(ys, eval(tree))); scalar tensor
"""
return -torch.log(
1 + util.mse(ys, util.eval_polynomial(tree, self.xs)))
def get_log_prob(self, tree, obs, sum_prior_and_likelihood=True):
"""Joint log probability p(obs, tree).
Args:
tree: list of lists or string
obs: sentence (list of strings) or ys (torch.tensor of shape [100])
Returns: scalar tensor
"""
if self.grammar['name'] == 'astronomers':
sentence = obs
if sum_prior_and_likelihood:
return self.get_tree_log_prob(tree) + \
self.get_sentence_log_likelihood(sentence, tree)
else:
return self.get_tree_log_prob(tree), self.get_sentence_log_likelihood(sentence, tree)
# The following is the non-ABC version for which p(sentence | tree)
# is 1 if tree's leaves match the sentence and 0 otherwise
#
# if util.get_leaves(tree) == sentence:
# return self.get_tree_log_prob(tree)
# else:
# return torch.tensor(float('-inf'))
elif self.grammar['name'] == 'polynomial':
ys = obs
if sum_prior_and_likelihood:
return self.get_tree_log_prob(tree) + \
self.get_polynomial_log_likelihood(ys, tree)
else:
return self.get_tree_log_prob(tree), self.get_polynomial_log_likelihood(ys, tree)
class InferenceNetwork(nn.Module):
def __init__(self, grammar, obs_embedding_dim=100,
inference_hidden_dim=100, max_depth=30):
super(InferenceNetwork, self).__init__()
self.grammar = grammar
self.obs_embedding_dim = obs_embedding_dim
self.inference_hidden_dim = inference_hidden_dim
self.max_depth = max_depth
self.sample_address_embedding_dim = len(grammar['non_terminals'])
self.word_embedding_dim = len(self.grammar['terminals'])
if grammar['name'] == 'astronomers':
self.sentence_embedder_gru = nn.GRU(
input_size=self.word_embedding_dim,
hidden_size=self.obs_embedding_dim,
num_layers=1)
elif grammar['name'] == 'polynomial':
self.xs = torch.linspace(-10, 10, 100)
self.gray_embedder_cnn = nn.Sequential(
nn.Conv2d(1, 20, 3),
nn.ReLU(),
nn.MaxPool2d(2),
nn.Conv2d(20, 20, 3),
nn.ReLU(),
nn.MaxPool2d(2),
nn.Conv2d(20, 20, 3),
nn.ReLU(),
nn.MaxPool2d(2),
nn.Conv2d(20, 10, 3),
nn.ReLU())
self.gray_embedder_mlp = nn.Sequential(
nn.Linear(640, 320),
nn.ReLU(),
nn.Linear(320, 160),
nn.ReLU(),
nn.Linear(160, obs_embedding_dim))
self.sample_embedding_dim = max(
[len(v) for _, v in self.grammar['productions'].items()])
self.inference_gru = nn.GRUCell(
input_size=self.obs_embedding_dim + self.sample_embedding_dim
+ self.sample_address_embedding_dim,
hidden_size=self.inference_hidden_dim)
self.proposal_layers = nn.ModuleDict({
k: nn.Sequential(nn.Linear(inference_hidden_dim, 50),
nn.ReLU(),
nn.Linear(50, 25),
nn.ReLU(),
nn.Linear(25, len(v)))
for k, v in grammar['productions'].items()})
def get_sentence_embedding(self, sentence):
"""Args:
sentence: list of strings
Returns: tensor of shape [obs_embedding_dim]
"""
output, _ = self.sentence_embedder_gru(util.sentence_to_one_hots(
sentence, self.grammar['terminals']).unsqueeze(1))
return output[-1][0]
def get_ys_embedding(self, ys):
"""Args:
ys: tensor of shape [100]
Returns: tensor of shape [obs_embedding_dim]
"""
gray = util.xsys2gray(self.xs, ys)
input_to_mlp = self.gray_embedder_cnn(
gray.view(1, 1, 100, 100)).view(-1).squeeze(0)
return self.gray_embedder_mlp(input_to_mlp).squeeze(0)
def get_obs_embedding(self, obs):
"""Args:
obs: sentence (list of strings) or ys (torch.tensor of shape [100])
Returns: tensor of shape [obs_embedding_dim]
"""
if self.grammar['name'] == 'astronomers':
sentence = obs
return self.get_sentence_embedding(sentence)
elif self.grammar['name'] == 'polynomial':
ys = obs
return self.get_ys_embedding(ys)
def get_logits_from_inference_gru_output(self, inference_gru_output,
non_terminal):
"""Args:
inference_gru_output: tensor of shape [inference_hidden_dim]
non_terminal: string
Returns: logits for Categorical distribution
"""
input_ = inference_gru_output.unsqueeze(0)
return self.proposal_layers[non_terminal](input_).squeeze(0)
def get_sample_embedding(self, production_index):
"""Args: int
Returns: one hot vector of shape [sample_embedding_dim]
"""
return util.one_hot(torch.tensor([production_index]),
self.sample_embedding_dim)[0]
def get_inference_gru_output(self, obs_embedding,
previous_sample_embedding,
sample_address_embedding, inference_hidden):
"""Args:
obs_embedding: tensor [obs_embedding_dim]
previous_sample_embedding: tensor [sample_embedding_dim]
sample_address_embedding: tensor [sample_embedding_address_dim]
inference_hidden: tensor [inference_hidden_dim]
Returns: tensor [inference_hidden_dim]
"""
return self.inference_gru(
torch.cat([obs_embedding,
previous_sample_embedding,
sample_address_embedding]).unsqueeze(0),
inference_hidden.unsqueeze(0)).squeeze(0)
def get_tree_log_prob(self, tree, obs_embedding=None,
previous_sample_embedding=None,
inference_hidden=None, obs=None):
"""Log probability of tree given obs.
Args:
tree: list or string
obs_embedding: tensor [obs_embedding_dim]
previous_sample_embedding: tensor [sample_embedding_dim]
inference_hidden: tensor [inference_hidden_dim]
obs: sentence (list of strings) or ys (torch.tensor of shape [100])
Returns: log_prob (scalar tensor)"""
if obs_embedding is None:
obs_embedding = self.get_obs_embedding(obs)
if previous_sample_embedding is None:
previous_sample_embedding = torch.zeros(
(self.sample_embedding_dim,))
if inference_hidden is None:
inference_hidden = torch.zeros((self.inference_hidden_dim,))
if isinstance(tree, list):
non_terminal = tree[0]
sample_address_embedding = util.get_sample_address_embedding(
non_terminal, self.grammar['non_terminals'])
inference_gru_output = self.get_inference_gru_output(
obs_embedding, previous_sample_embedding,
sample_address_embedding, inference_hidden)
subtrees = tree[1:]
production = [util.get_root(subtree) for subtree in subtrees]
production_index = util.get_production_index(
non_terminal, production, self.grammar['productions'])
sample_embedding = self.get_sample_embedding(production_index)
logits = self.get_logits_from_inference_gru_output(
inference_gru_output, non_terminal)
dist = Categorical(logits=logits)
log_prob = dist.log_prob(torch.tensor(production_index))
subtree_log_probs = [
self.get_tree_log_prob(subtree, obs_embedding,
sample_embedding, inference_gru_output)
for subtree in subtrees]
return log_prob + sum(subtree_log_probs)
else:
return torch.zeros(())
def sample_tree(self, symbol=None, obs_embedding=None,
previous_sample_embedding=None, inference_hidden=None,
obs=None, depth=0):
"""Samples a tree given a obs and a start symbol (can be terminal
or non-terminal).
Args:
symbol: string
obs_embedding: tensor [obs_embedding_dim]
previous_sample_embedding: tensor [sample_embedding_dim]
inference_hidden: tensor [inference_hidden_dim]
obs: sentence (list of strings) or ys (torch.tensor of shape [100])
Returns: tree
"""
if symbol is None:
symbol = self.grammar['start_symbol']
if obs_embedding is None:
obs_embedding = self.get_obs_embedding(obs)
if previous_sample_embedding is None:
previous_sample_embedding = torch.zeros(
(self.sample_embedding_dim,))
if inference_hidden is None:
inference_hidden = torch.zeros((self.inference_hidden_dim,))
if symbol in self.grammar['terminals']:
return symbol
elif depth > self.max_depth:
return symbol
else:
# import ipdb; ipdb.set_trace()
sample_address_embedding = util.get_sample_address_embedding(
symbol, self.grammar['non_terminals'])
inference_gru_output = self.get_inference_gru_output(
obs_embedding, previous_sample_embedding,
sample_address_embedding, inference_hidden)
logits = self.get_logits_from_inference_gru_output(
inference_gru_output, symbol)
dist = Categorical(logits=logits)
production_index = dist.sample().detach()
sample_embedding = self.get_sample_embedding(production_index)
production = self.grammar['productions'][symbol][production_index]
return [symbol] + [
self.sample_tree(s, obs_embedding, sample_embedding,
inference_gru_output, depth=depth + 1)
for s in production]
def sample_tree_relax(self, symbol=None, obs_embedding=None,
previous_sample_embedding=None,
inference_hidden=None, obs=None, depth=0):
"""Samples a tree given a obs and a start symbol (can be terminal
or non-terminal).
Args:
symbol: string
obs_embedding: tensor [obs_embedding_dim]
previous_sample_embedding: tensor [sample_embedding_dim]
inference_hidden: tensor [inference_hidden_dim]
obs: sentence (list of strings) or ys (torch.tensor of shape [100])
Returns:
tree: e.g.
['S', ['NP', 'astronomers'],
['VP', ['V' 'saw'],
['NP' 'stars']]]
or 'stars'
tree_aux: e.g.
[[0.5], [[.9, 1., .2, .1, -.1, .1], None],
[[-0.3 0.8], [[0.3], None]
[[.9, -.1, .2, .1, 1., .1], None]]]
or None
tree_aux_tilde: similar to tree_aux
"""
if symbol is None:
symbol = self.grammar['start_symbol']
if obs_embedding is None:
obs_embedding = self.get_obs_embedding(obs)
if previous_sample_embedding is None:
previous_sample_embedding = torch.zeros(
(self.sample_embedding_dim,))
if inference_hidden is None:
inference_hidden = torch.zeros((self.inference_hidden_dim,))
if symbol in self.grammar['terminals']:
return symbol, None, None
elif depth > self.max_depth:
return symbol, None, None
else:
sample_address_embedding = util.get_sample_address_embedding(
symbol, self.grammar['non_terminals'])
inference_gru_output = self.get_inference_gru_output(
obs_embedding, previous_sample_embedding,
sample_address_embedding, inference_hidden)
logits = self.get_logits_from_inference_gru_output(
inference_gru_output, symbol)
oh_production_index, production_index_aux, \
production_index_aux_tilde = util.sample_relax(logits=logits)
production_index = torch.argmax(oh_production_index)
sample_embedding = self.get_sample_embedding(production_index)
production = self.grammar['productions'][symbol][production_index]
tree = [symbol]
tree_aux = [production_index_aux]
tree_aux_tilde = [production_index_aux_tilde]
for s in production:
subtree, subtree_aux, subtree_aux_tilde = \
self.sample_tree_relax(
s, obs_embedding, sample_embedding,
inference_gru_output, depth=depth + 1)
tree.append(subtree)
tree_aux.append(subtree_aux)
tree_aux_tilde.append(subtree_aux_tilde)
return tree, tree_aux, tree_aux_tilde
class ControlVariate(nn.Module):
def __init__(self, grammar, obs_embedding_dim=100,
tree_obs_embedding_dim=100):
super(ControlVariate, self).__init__()
self.grammar = grammar
self.obs_embedding_dim = obs_embedding_dim
self.word_embedding_dim = len(self.grammar['terminals'])
self.tree_obs_embedding_dim = tree_obs_embedding_dim
self.obs_embedder_gru = nn.GRU(
input_size=self.word_embedding_dim,
hidden_size=self.obs_embedding_dim,
num_layers=1)
self.sample_address_embedding_dim = len(grammar['non_terminals'])
self.sample_embedding_dim = max(
[len(v) for _, v in grammar['productions'].items()])
self.tree_obs_embedder_gru = nn.GRUCell(
input_size=self.obs_embedding_dim + self.sample_embedding_dim +
self.sample_address_embedding_dim,
hidden_size=tree_obs_embedding_dim)
self.tree_obs_mlp = nn.Sequential(
nn.Linear(tree_obs_embedding_dim, 50),
nn.ReLU(),
nn.Linear(50, 25),
nn.ReLU(),
nn.Linear(25, 1))
def get_obs_embedding(self, obs):
"""Args:
obs: list of strings
Returns: tensor of shape [obs_embedding_dim]
"""
output, _ = self.obs_embedder_gru(util.sentence_to_one_hots(
obs, self.grammar['terminals']).unsqueeze(1))
return output[-1][0]
def get_tree_obs_gru_output(self, obs_embedding, sample_embedding,
sample_address_embedding, tree_obs_hidden):
"""Args:
obs_embedding: tensor [obs_embedding_dim]
sample_embedding: tensor [sample_embedding_dim]
sample_address_embedding: tensor [sample_embedding_address_dim]
tree_obs_hidden: tensor [tree_obs_embedding_dim]
Returns: tensor of shape [tree_obs_embedding_dim]
"""
return self.tree_obs_embedder_gru(
torch.cat([obs_embedding,
sample_embedding,
sample_address_embedding]).unsqueeze(0),
tree_obs_hidden.unsqueeze(0)).squeeze(0)
def get_tree_obs_embedding(self, tree, tree_aux, obs_embedding):
"""Args:
tree: e.g.
['S', ['NP', 'astronomers'],
['VP', ['V' 'saw'],
['NP' 'stars']]]
or 'stars'
tree_aux: e.g.
[[0.5], [[.9, 1., .2, .1, -.1, .1], None],
[[-0.3 0.8], [[0.3], None]
[[.9, -.1, .2, .1, 1., .1], None]]]
or None
obs_embedding: tensor of shape [obs_embedding_dim]
Returns: tensor of shape [tree_obs_embedding_dim]
"""
if isinstance(tree, list):
non_terminal = tree[0]
sample_address_embedding = util.get_sample_address_embedding(
non_terminal, self.grammar['non_terminals'])
sample_embedding = util.pad_zeros(tree_aux[0],
self.sample_embedding_dim)
subtrees = tree[1:]
subtrees_aux = tree_aux[1:]
tree_obs_hidden = 0
for subtree, subtree_aux in zip(subtrees, subtrees_aux):
tree_obs_hidden += self.get_tree_obs_embedding(
subtree, subtree_aux, obs_embedding)
return self.get_tree_obs_gru_output(
obs_embedding, sample_embedding, sample_address_embedding,
tree_obs_hidden)
else:
return torch.zeros((self.tree_obs_embedding_dim,))
def control_variate_single(self, tree, tree_aux, obs_embedding):
"""Args:
tree: e.g.
['S', ['NP', 'astronomers'],
['VP', ['V' 'saw'],
['NP' 'stars']]]
or 'stars'
tree_aux: e.g.
[[0.5], [[.9, 1., .2, .1, -.1, .1], None],
[[-0.3 0.8], [[0.3], None]
[[.9, -.1, .2, .1, 1., .1], None]]]
or None
obs_embedding: tensor of shape [obs_embedding_dim]
Returns: scalar tensor
"""
return self.tree_obs_mlp(self.get_tree_obs_embedding(
tree, tree_aux, obs_embedding).unsqueeze(0)).squeeze(0)
def forward(self, trees, trees_aux, obs_embeddings):
"""Args:
trees_aux: list of lists of shape [num_obs, num_particles] where
each element is either a tree_aux or tree_aux_tilde
obs_embeddings: list of tensors of length num_obs where each tensor
is of shape [obs_embedding_dim]
Returns: tensor of shape [num_obs]
"""
num_obs = len(obs_embeddings)
num_particles = len(trees_aux[0])
c = torch.zeros(num_obs, num_particles)
for obs_idx in range(num_obs):
for particle_idx in range(num_particles):
c[obs_idx, particle_idx] = self.control_variate_single(
trees[obs_idx][particle_idx],
trees_aux[obs_idx][particle_idx], obs_embeddings[obs_idx])
return torch.logsumexp(c, dim=1) - np.log(num_particles)
class PCFG(ProbModelBaseClass):
def __init__(self, grammar, args, **kwargs):
D = None # trees have no fixed dimension
super(PCFG, self).__init__(D, args)
self.generative_model = GenerativeModel(grammar, **kwargs)
self.inference_network = InferenceNetwork(grammar, **kwargs)
self._log_q = None
self._log_prior = None
self._log_likelihood = None
def set_internals(self, data, S):
self.x = data[0]
self.y = False # unsupervised
self.z = False # we compute log_prior, log_likelihood, log_guide directly below
log_q = torch.zeros(len(self.x), S)
log_prior = torch.zeros(len(self.x), S)
log_likelihood = torch.zeros(len(self.x), S)
# this is painful, batching difficult b/c of different length trees.
# therefore iterate once and save log_prior, log_guide, log_likelihood
# manually
for obs_idx, obs in enumerate(self.x):
for particle_idx in range(S):
tree = self.inference_network.sample_tree(obs=obs)
log_q_ = self.inference_network.get_tree_log_prob(tree, obs=obs)
log_prior_, log_likelihood_ = self.generative_model.get_log_prob(tree, obs, sum_prior_and_likelihood=False)
log_q[obs_idx, particle_idx] = log_q_
log_prior[obs_idx, particle_idx] = log_prior_
log_likelihood[obs_idx, particle_idx] = log_likelihood_
self._log_q = log_q
self._log_prior = log_prior
self._log_likelihood = log_likelihood
self.check_internals()
def check_internals(self):
super().check_internals()
assert self._log_q is not None, "self._log_q not set"
assert self._log_prior is not None, "self._log_prior not set"
assert self._log_likelihood is not None, "self._log_likelihood not set"
def sample_latent(self, S, sample=True):
raise ValueError("Sample latent not used in PCFG")
def log_prior(self):
return self._log_prior
def log_guide(self):
return self._log_q
def log_likelihood(self):
return self._log_likelihood
def evaluate_pq(self, data_loader, epoch):
true_generative_model = data_loader.dataset.true_generative_model
metrics = {
"p_error":util.get_p_error(true_generative_model, self.generative_model),
"q_error_to_true":util.get_q_error(true_generative_model, self.inference_network),
"q_error_to_model":util.get_q_error(self.generative_model, self.inference_network)
}
return metrics
def get_sleep_phi_loss(self, data):
"""Returns:
loss: scalar that we call .backward() on and step the optimizer.
"""
log_q_sum = 0
for _ in range(self.args.S):
tree, obs = self.generative_model.sample_tree_and_obs()
log_q = self.inference_network.get_tree_log_prob(tree, obs=obs)
log_q_sum = log_q_sum + log_q
return -log_q_sum / self.args.S
| [
"vadmas@cs.ubc.ca"
] | vadmas@cs.ubc.ca |
3c94abe2f34bdb6f6c5aca4fdebf3308176a9065 | 01e73c10d9be842e1cdebd012ed72e660f67d454 | /estate/estate/utils/regex_tool.py | f045787013219f6ac29b7e7af35897aaad909990 | [] | no_license | netquake/wonder | b759b7ce2972a12a05cbb4f7b773e9ace3e54f23 | ce895fc59c8aec261f09deee47b53a6dfee9c30c | refs/heads/master | 2022-08-07T20:55:44.125995 | 2020-08-23T14:59:48 | 2020-08-23T14:59:48 | 229,778,551 | 0 | 1 | null | 2022-07-29T22:39:36 | 2019-12-23T15:31:40 | Python | UTF-8 | Python | false | false | 268 | py | from re import compile
class RegexTool(object):
FLOAT_LOADER = compile('[\x00-\xff]*')
@classmethod
def read_float_from_string(cls, string):
f = cls.FLOAT_LOADER.search(
string
).group()
return float(f) if f else 0.
| [
"barry.paneer@qq.com"
] | barry.paneer@qq.com |
116f8e435fb5d8680cbfdfa1bcd1a10fcf0d6ffb | e5bfc56a518b55ca0cea703235802d69124b45a9 | /largest.py | 9d521e6c7082686bd96da95b8cb78e178be0dc77 | [] | no_license | Arunsinistersixsix/python-class-cek | 7e8835f212581f292344e267b093a3dbbb8f302f | 20eaa80464b7ed8e33e33b1003ee39cbe93c46ee | refs/heads/master | 2021-06-24T07:07:55.871841 | 2019-09-19T10:20:20 | 2019-09-19T10:20:20 | 209,255,513 | 0 | 0 | null | 2021-03-20T01:43:36 | 2019-09-18T08:17:38 | HTML | UTF-8 | Python | false | false | 77 | py | if(int(input("a:")))>(int(input("b:"))):
print("a")
else:
print("b") | [
"arunakar47@gmail.com"
] | arunakar47@gmail.com |
492325495aaa4defd61051a6238326be058f6737 | b956020dacc1fdd6c6dcaf5fc79a74ad12731b89 | /words_list.py | 3492d8ca1ae3da66740c34f61109d864b1e1c8db | [] | no_license | Nikhil0417/Text-document-manipulation | f72a35ce69a33f02bfaa468ae60a10d56cfb0618 | 6a53129f94746392a27b9fcde9c345344a88660f | refs/heads/master | 2021-09-01T15:24:22.096194 | 2017-12-27T17:35:41 | 2017-12-27T17:35:41 | 114,145,544 | 0 | 0 | null | 2017-12-27T17:37:30 | 2017-12-13T16:46:41 | Python | UTF-8 | Python | false | false | 202 | py | fname = input("Enter file name: ")
fh = open(fname)
lst = list()
for line in fh:
line.rstrip()
words = line.split()
for word in words:
if word not in lst:
lst.append(word)
lst.sort()
print(lst)
| [
"noreply@github.com"
] | noreply@github.com |
c0fda12954d82dd3a44313c715b0d476d2c87363 | e5eec1428da1d24d3e9b86f5723c51cd2ca636cd | /백준 삼성역량테스트기출/시험감독.py | 4c3226fd88492be95fe560c0c9ef3c4b27668a7e | [] | no_license | jamwomsoo/Algorithm_prac | 3c36c381f59277721517d331a8f1640399d80c1d | 8393f3cc2f950214c47f3cf0b2c1271791f115d0 | refs/heads/master | 2023-06-09T06:49:14.739255 | 2021-06-18T06:41:01 | 2021-06-18T06:41:01 | 325,227,295 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 284 | py |
n = int(input())
a_lst = list(map(int, input().split()))
b, c = map(int, input().split())
total = 0
for i in range(n):
total+=1
a_lst[i] -= b
if a_lst[i] > 0:
total += a_lst[i]//c
z = a_lst[i] % c
if z > 0:
total+=1
print(total) | [
"41579282+jamwomsoo@users.noreply.github.com"
] | 41579282+jamwomsoo@users.noreply.github.com |
f23936e9589cd748e56c95468b5690c8f6aa9a39 | b270488062050a0354f2ece1fd4ac2a47572403a | /Lab_5/NLP.py | 4936592e24f2dcf2821e6944c759fa0263e26bdf | [] | no_license | cfcastaneda98/CS2302 | 960f766778c4d7c610823fcec11714240a326548 | 9ef205b75522fb54cfbdafbcb4669a71999ca0c0 | refs/heads/master | 2020-04-21T20:13:35.492006 | 2019-05-12T22:11:07 | 2019-05-12T22:11:07 | 169,836,167 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,594 | py | """
Author: Carlos Fernando Castaneda
Class : CS 2302
Date Modified: April 1, 2019
Instructor: Olac Fuentes
Assingment: Lab 5 Natural Language Processing
TA: Anindita Nath & Maliheh Zaragan
Purpose: to implement hash tables in order to compare its running times and
to succesfully find the comparison of two given words.
"""
#Imports various tools to help us calculate the hash tables to be used in this lab
import math
import time
# This class in the program is used to create objects of BTrees, or binary trees
class BST(object):
#Creates the Constructor
def __init__(self, item, left=None, right=None):
self.item = item
self.left = left
self.right = right
# Builds a hash table of a certain size
class HashTableC(object):
# Creates the Constructor
def __init__(self,size,num_items=0):
self.item = []
self.num_items = num_items
if num_items//size==1:
size = (size*2)+1
for i in range(size):
self.item.append([])
#Method that caluclates the dot product, and returns it
def dotProduct(e0,e1):
#total starts at 0
total = 0
#For i in length of e0, it will add total to the prodcut of the current e0 and e1 values
for i in range(len(e0)):
total += e0[i]*e1[i]
#Returns the total value
return total
#Method that returns the magnitude used in the simulation methods
def Magnitude(ex):
return math.sqrt(dotProduct(ex,ex))
#Method used in the BST used to insert new items into the tree
def InsertBST(T,newItem):
#If T is none, T will add a new item inside of it
if T == None:
T = BST(newItem)
#If the current value of T is greater than the value of the nrew item, it will insert on the left side of the tree
elif T.item[0][0] > newItem[0][0]:
T.left = InsertBST(T.left,newItem)
#If the current value of T is less than the value of the nrew item, it will insert on the right side of the tree
else:
T.right = InsertBST(T.right,newItem)
#Returns the value of T
return T
#Method that counts the nodes of a BST
def CountNodesBST(T):
#Creates a new value called count that starts at 0
count = 0
#If T is not empty, it will add one value to count
if T is not None:
count += 1
#Otherwise it will add 0 to count
else:
return 0
#Returns the value of count with thw number of nodes in the right and left children
return count + CountNodesBST(T.right) + CountNodesBST(T.left)
#Method that counts the maximum depth of a certain BST
def maxDepthBST(T):
#If T is empty, it will return 0
if T is None:
return 0
#Otherwise it will compute the depth of each subtree, and use the largest one to find its depth
else :
leftDepth = maxDepthBST(T.left)
rightDepth = maxDepthBST(T.right)
if (leftDepth > rightDepth):
return leftDepth+1
else:
return rightDepth+1
#Method that returns the address of an item k, or None if not present in a BST
def FindBST(T,k):
#If T is none or the initial item is k, it returns the value of T.item[1]
if T is None or T.item[0][0] == k:
return T.item[1]
#If the current item is less than k, it will return the value found from the right side of the tree
if T.item[0][0]<k:
return FindBST(T.right,k)
#Otherwise, it will return the value found from the lest side of the tree
return FindBST(T.left,k)
#Method that finds the value of similarities between two words in the BST
def simulationBST(w0,w1,T):
#Creates the value e0, which will locate the first word in the BST
e0 = FindBST(T,w0)
#Creates the value e1, which will locate the second word in the BST
e1 = FindBST(T,w1)
#Returns the dot product form the two words, and returns it to an integer dot
dot = dotProduct(e0,e1)
#Finds the magnitude of the two e values
Me0 = Magnitude(e0)
Me1 = Magnitude(e1)
#Calculates the result value of the similarities betweeen the two words
result = (dot)/(Me0*Me1)
#Returns the result to BSTComparison
return result
#Method that inserts k in the appropriate bucket other wise it does nothing if k is already in the table
def InsertHTC(H,k,l):
#Finds the slot for that item
a = hashTable(k[0][0],len(H.item))
#Adds that item to the HTC
H.item[a].append([k[0][0],l])
#Increases the value of the number of items in the HTC
H.num_items += 1
#Method that returns the bucket and index of an item
def FindHTC(H,k):
#Finds the slot for that item
a = hashTable(k,len(H.item))
for i in range(len(H.item[a])):
if H.item[a][i][0] == k:
return H.item[a][i][1]
#Returns -1 if the item is not found
return a, -1, -1
#Method that calculates the placement of a particular item into a slot of an HTC, and returns that value
def hashTable(s,n):
value = 0
for c in s:
value = (value*n + ord(c))% n
return value
#Method that calculates the load factor of the HTC
def LoadFactorHTC(H):
#Creates an empty decimal number called count
count = 0.0
#For every item in the hash table, count will add itself to the length of i
for i in H.item:
count +=len(i)
#Returns the value of count divided vy the length of the item in the hash table
return count/len(H.item)
#Method that finds the value of similarities between two words in the HTC
def simulationHTC(w0,w1,H):
#Creates the value e0, which will locate the first word in the HTC
e0 = FindHTC(H,w0)
#Creates the value e1, which will locate the second word in the HTC
e1 = FindHTC(H,w1)
#Returns the dot product form the two words, and returns it to an integer dot
dot = dotProduct(e0,e1)
#Finds the magnitude of the two e values
Me0 = Magnitude(e0)
Me1 = Magnitude(e1)
#Calculates the result value of the similarities betweeen the two words
result = (dot)/(Me0*Me1)
#Returns the result to HTCComparison
return result
#The method that will create a Binary Search Tree and compare different words and determine how similar they are.
def BSTComparison():
#Creates an empty BST called T
T = None
#Opens the text file 'glove.6B.50d.txt', which willl be used to determine if the words being used are similar or not.
f = open('glove.6B.50d.txt',encoding='utf-8')
#Goes through the entire text file and inserts the items inside of a BST
for line in f:
lines = line.split()
name = [lines[0]]
nums = []
for i in range(len(lines)-1):
nums.append(float(lines[i+1]))
p = [name,nums]
T = InsertBST(T,p)
print()
#Prints the Binary Search Tree stats, including the number of nodes, the height, and the running time it took to create the binary search tree
print("Binary Search Tree stats:")
#Returns the number of nodes
print("Number of nodes: ",CountNodesBST(T))
#Returns the height of the Binary Search Tree
print("Height: ",maxDepthBST(T))
#Starts the timer for the constriuction of the BST
elapsed_time_CONSTRUCTION_BST = time.time()-start
#Returns the time it took to create the BST
print("Running time for binary search tree construction:", round(elapsed_time_CONSTRUCTION_BST),"seconds")
print()
#Alerts the user that it is detecting the similarities between the words
print("Reading word file to determine similarities")
print()
#Prints the words being compared, and the value of its comparison
print("Word similarities found:")
print("Similarity [bear,bear] = ",round(simulationBST('bear','bear',T),4))
print("Similarity [barley,shrimp] = ",round(simulationBST('barley','shrimp',T),4))
print("Similarity [barley,oat] = ",round(simulationBST('barley','oat',T),4))
print("Similarity [federer,baseball] = ",round(simulationBST('federer','baseball',T),4))
print("Similarity [federer,tennis] = ",round(simulationBST('federer','tennis',T),4))
print("Similarity [harvard,stanford] = ",round(simulationBST('harvard','stanford',T),4))
print("Similarity [harvard,utep] = ",round(simulationBST('harvard','utep',T),4))
print("Similarity [harvard,ant] = ",round(simulationBST('harvard','ant',T),4))
print("Similarity [raven,crow] = ",round(simulationBST('raven','crow',T),4))
print("Similarity [raven,whale] = ",round(simulationBST('raven','whale',T),4))
print("Similarity [spain,france] = ",round(simulationBST('spain','france',T),4))
print("Similarity [spain,mexico] = ",round(simulationBST('spain','mexico',T),4))
print("Similarity [mexico,france] = ",round(simulationBST('mexico','france',T),4))
print("Similarity [mexico,guatemala] = ",round(simulationBST('mexico','guatemala',T),4))
print("Similarity [computer,platypus] = ",round(simulationBST('computer','platypus',T),4))
print()
#Starts the timer for the query processing of the BST
elapsed_time_QUERY_BST = time.time()-start
#Returns the timer for the query processing of the BST
print("Running time for binary search tree query processing:", round(elapsed_time_QUERY_BST),"seconds")
#The method that will create a Hash Chaining Table and compare different words and determine how similar they are.
def HTCComparison():
#Creates an empty Hash Table with 12 items
IV = 12
H = HashTableC(IV)
#Opens the text file 'glove.6B.50d.txt', which willl be used to determine if the words being used are similar or not.
f = open('glove.6B.50d.txt',encoding='utf-8')
#Goes through the entire text file and inserts the items inside of a HTC
for line in f:
lines = line.split()
name = [lines[0]]
nums = []
for i in range(len(lines)-1):
nums.append(float(lines[i+1]))
p = [name,nums]
InsertHTC(H,p,p[1])
print()
#Prints the Hash Table stats, including the initial table size, the final table size, the load factor, percentage of empty lists, and standard deviation of the lengths of the lists
print("Hash table stats:")
print("Initial table size:",IV)
print("Final table size:")
print("Load factor:",LoadFactorHTC(H))
print("Percentage of empty lists:")
print("Standard deviation of the lengths of the lists:")
print()
#Starts the timer for the constriuction of the HTC
elapsed_time_CONSTRUCTION_HTC = time.time()-start
#Returns the time it took to create the HTC
print("Running time for Hash Table construction:", round(elapsed_time_CONSTRUCTION_HTC),"seconds")
print()
#Alerts the user that it is detecting the similarities between the words
print("Reading word file to determine similarities")
print()
#Prints the words being compared, and the value of its comparison
print("Word similarities found:")
print("Similarity [bear,bear] = ",round(simulationHTC('bear','bear',H),4))
print("Similarity [barley,shrimp] = ",round(simulationHTC('barley','shrimp',H),4))
print("Similarity [barley,oat] = ",round(simulationHTC('barley','oat',H),4))
print("Similarity [federer,baseball] = ",round(simulationHTC('federer','baseball',H),4))
print("Similarity [federer,tennis] = ",round(simulationHTC('federer','tennis',H),4))
print("Similarity [harvard,stanford] = ",round(simulationHTC('harvard','stanford',H),4))
print("Similarity [harvard,utep] = ",round(simulationHTC('harvard','utep',H),4))
print("Similarity [harvard,ant] = ",round(simulationHTC('harvard','ant',H),4))
print("Similarity [raven,crow] = ",round(simulationHTC('raven','crow',H),4))
print("Similarity [raven,whale] = ",round(simulationHTC('raven','whale',H),4))
print("Similarity [spain,france] = ",round(simulationHTC('spain','france',H),4))
print("Similarity [spain,mexico] = ",round(simulationHTC('spain','mexico',H),4))
print("Similarity [mexico,france] = ",round(simulationHTC('mexico','france',H),4))
print("Similarity [mexico,guatemala] = ",round(simulationHTC('mexico','guatemala',H),4))
print("Similarity [computer,platypus] = ",round(simulationHTC('computer','platypus',H),4))
print()
#Starts the timer for the query processing of the HTC
elapsed_time_END_HTC = time.time()-start
#Returns the timer for the query processing of the HTC
print("Running time for hash table query processing:", round(elapsed_time_END_HTC),"seconds")
#Starts the clock to print the runtime of the find method at the end
start = time.time()
#Asks the user to choose a binary search tree or hash table with chaining
print('Choose table implementation')
print('Type 1 for binary search tree or 2 for hash table with chaining')
#Sets a variable 'x' to be made for the input section of the program
x = int(input('Choice: '))
print()
#If the user selects 1, it will find the words using a binary search tree
if x == 1:
print('Building binary search tree')
BSTComparison()
#If the user selects 2, it will find the words using a hash table
elif x == 2:
print('Building hash table with chaining')
HTCComparison()
#If anything else is inputed, the program will end with an error message.
else:
print('Incorrect input! Try again.') | [
"noreply@github.com"
] | noreply@github.com |
2f746d428074ccf9e4a6d0998467d09acddf52c4 | 012c8089c575d47f3b47ee97a1c58a1c8a34e940 | /Snakefile | 215f574b04cc173e4458db8119bc031ada80989d | [] | no_license | paraslonic/peptideSearch | 35523eaa4f2816235a1b4a2db9f395925172e5c7 | 5adcf4372150bb6e2772b5cbac56991c69fbc0a6 | refs/heads/master | 2022-12-12T14:43:38.498440 | 2020-09-18T10:40:37 | 2020-09-18T10:40:37 | 296,578,369 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 396 | pepbatches,=glob_wildcards("peptides/{batch}.fasta")
iwant = []
iwant.append(expand("blast/{batch}.out", batch = pepbatches))
rule all:
input: iwant
rule blast:
input: "peptides/{batch}.fasta"
output: "blast/{batch}.out"
shell:
"blastp -db uniprot_human -query {input} -outfmt '6 std nident salltitles' -evalue 100 -word_size 2 -out {output}"
| [
"paraslonic@gmail.com"
] | paraslonic@gmail.com | |
2cbd45af7d26fd7efc079cde6e33ae3cf3e2f982 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_143/ch118_2020_03_29_04_35_07_099201.py | d8962c300590a574cb248be19c49e2d9ef558047 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 275 | py | import math
def snell_descartes(n1, n2, o):
y=(n1/n2)
x=math.sin(math.radians(o))
z=x*y
o2= math.asin(z)
o2=math.degrees(o2)
return o2
def reflexao_total_interna (n1, n2, o2):
if (n2*o2)/n1 == o2:
return True
else:
return False | [
"you@example.com"
] | you@example.com |
fe84d2018b258a3987ff9a8286c8b1bfd285391b | 14a93a8d9a75bb5a9d115e193931d6291910f054 | /mdl.py | c84fc27cb49798aa9314ff720ef556c92ea4ecfa | [] | no_license | Sujay2611/Coding | 94a4f24fa5665d807fb2a95f8aa67c7fa8e9e518 | 67d95d4183592cdb6f92e81f38a88664273e117a | refs/heads/master | 2023-01-11T03:02:04.439617 | 2022-12-30T10:13:47 | 2022-12-30T10:13:47 | 244,128,290 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 198 | py | t=int(input())
for _ in range(t):
n=int(input())
a=[int(x) for x in input().split()]
x=max(a)
y=min(a)
if(a.index(x)>a.index(y)):
print(y,x)
else:
print(x,y)
| [
"noreply@github.com"
] | noreply@github.com |
a6681169fe270861ab20c12bb9dd080537671d0c | 80ae9b5cfb45b6e9cf7873ef7c46e17e117e4019 | /data/HackerRank-ProblemSolving/Is This a Binary Search Tree.py | 731ae39593eed79cc53c99eb8fef64bfffb5dc12 | [] | no_license | Ritvik19/CodeBook | ef7764d89b790e902ede5802f36d5ca910d8a50e | 2b4ed7938bbf156553d6ba5cba6216449528f0fc | refs/heads/master | 2021-07-04T08:25:52.478719 | 2020-08-08T06:54:14 | 2020-08-08T06:54:14 | 138,744,302 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 437 | py | """ Node is defined as
class node:
def __init__(self, data):
self.data = data
self.left = None
self.right = None
"""
def check_binary_search_tree_(root):
return check_node(root, -1, 10001)
def check_node(node, Min, Max):
if not node:
return True
if Min < node.data < Max:
return check_node(node.left, Min, node.data) and check_node(node.right, node.data, Max)
return False
| [
"rastogiritvik99@gmail.com"
] | rastogiritvik99@gmail.com |
54b259ab6197ca64e49fda03669d339629eb117b | 78032757dac6a3052354bc5c9675cbfa385de2d2 | /graph/Minimum Passes in A matrix/Solution.py | 2e2b86ee0e06859c4679d3a8cd0614ffb1b3948b | [
"MIT"
] | permissive | joydeepnandi/Algo | 77ce7c8362c37bb183751f32423d1805c4385617 | 1c5c49cb172c47cad7df81dc9b66fedbacbb5b55 | refs/heads/main | 2023-07-11T14:27:00.612183 | 2021-08-15T13:52:02 | 2021-08-15T13:52:02 | 383,427,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,097 | py | def minimumPassesOfMatrix(matrix):
# Write your code here.
passes= convertNegatives(matrix)
return passes-1 if not containsNegative(matrix) else -1
def convertNegatives(matrix):
q=getPositivePos(matrix)
passes=0
while q:
size=len(q)
while size>0:
currentRow,currentCol=q.pop(0)
adjecent=FindAdj(currentRow,currentCol,matrix)
for x in adjecent:
row,col=x
val=matrix[row][col]
if val<0:
matrix[row][col]*=-1
q.append([row,col])
size-=1
passes+=1
return passes
def getPositivePos(matrix):
pos=[]
for i in range(len(matrix)):
for j in range(len(matrix[0])):
if matrix[i][j]>0:
pos.append([i,j])
return pos
def FindAdj(row,col,matrix):
adj=[]
if row>0:
adj.append([row-1,col])
if row<len(matrix)-1:
adj.append([row+1,col])
if col>0:
adj.append([row,col-1])
if col<len(matrix[0])-1:
adj.append([row,col+1])
return adj
def containsNegative(matrix):
for i in range(len(matrix)):
for j in range(len(matrix[0])):
if matrix[i][j]<0:
return True
return False | [
"noreply@github.com"
] | noreply@github.com |
8d80eb9f24d23ee3639b061d0166638fbdc587f2 | 9587a5f81827714982e8671021ca9dece642788c | /monotonic.py | 767a44fe80b6d3ac265e5259b9fc8a840a076faa | [] | no_license | ConstantDelta/proganone | 02966164635fad6712ed439abf9f99038b8206dc | a05367aeffd42d2f42a0b635111d1587efeba855 | refs/heads/main | 2023-08-23T00:50:35.333145 | 2021-11-05T21:11:31 | 2021-11-05T21:11:31 | 417,240,887 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 541 | py | import random
from random import randint
def is_monotonic(nums):
result = False
increases = 0
decreases = 0
len_nums = len(nums)
for i in range(len_nums - 1):
if nums[i] > nums[i + 1]:
decreases += 1
elif nums[i] < nums[i + 1]:
increases += 1
if decreases == 0:
result = True
elif increases == 0:
result = True
return result
n = int(input())
nums = [randint(1, n) for i in range(n)]
#print(nums)
print(is_monotonic(nums)) | [
"noreply@github.com"
] | noreply@github.com |
f471c218aea9af6624f66a10317c7ddfcc4a7c1b | 3e74b2d423d7b4d472ffce4ead1605621fb2d401 | /doubletdetec/script.py | f25271fc20f2cb73a4092cfc37cf2636d0f22b16 | [] | no_license | jamesjcai/My_Code_Collection | 954988ee24c7bd34139d35c880a2093b01cef8d1 | 99905cc5d063918cbe6c4126b5d7708a4ddffc90 | refs/heads/master | 2023-07-06T07:43:00.956813 | 2023-07-03T22:17:32 | 2023-07-03T22:17:32 | 79,670,576 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 1,033 | py | import os
import pandas as pd
import numpy as np
import doubletdetection as dd
os.chdir("U:\\GitHub\\My_Code_Collection\\doubletdetec")
counts=pd.read_csv("Xsmall.csv").values
#clf = dd.BoostClassifier()
#labels = clf.fit(raw_counts).predict()
#scores = clf.doublet_score()
#counts = np.random.poisson(size=(500, 100))
# no phenograph
clf = dd.BoostClassifier(n_iters=2, use_phenograph=False, standard_scaling=True)
labels=clf.fit(counts.T).predict(p_thresh=1e-16, voter_thresh=0.5)
pd.DataFrame(clf.doublet_score().mask).to_csv('output.csv',index=False,header=False)
# pd.DataFrame(clf.doublet_score().mask).to_csv('output.csv',index=False,header=['isdoublet'])
# with phenograph
# clf = doubletdetection.BoostClassifier(n_iters=2, use_phenograph=True, standard_scaling=True)
# clf.fit(counts).predict(p_thresh=1e-16, voter_thresh=0.5)
# clf.doublet_score()
# doubletdetection.plot.convergence(clf, show=False, p_thresh=1e-16, voter_thresh=0.5)
# doubletdetection.plot.threshold(clf, show=False, p_step=6) | [
"jamescai@genomezoo.net"
] | jamescai@genomezoo.net |
cce36302df9fb42d1bdc253baf59ec4f7fa2ab77 | 0c1f83fc30a6fcb8ceb3740da77b23bec7151652 | /quis/quiz_tut.py | 68cefc3838f72f7727f359c8c03590897525ac03 | [] | no_license | smer44/python_evo | ab09c2dbbc3d415e312672977db873a91e508d69 | a43fe15113e0f51a10c6097a1c1e988a16cddc44 | refs/heads/master | 2023-03-28T14:11:08.335598 | 2021-04-03T21:38:11 | 2021-04-03T21:38:11 | 320,589,458 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,137 | py | import tkinter as tk
import tkinter.font as font
class Quis:
def __init__(self, data):
self.window = tk.Tk()
self.data= data
self.correct_answers = 0
self.myFont = font.Font(family='Helvetica', size = 20)
self.question = tk.Label(text="--- questions loading ---" , font = self.myFont)
self.question.pack()
self.answers = tk.Listbox( font = self.myFont)
self.answers.pack()
self.confirm_button = tk.Button(text = 'Select', command = self.select_variant, font = self.myFont)
self.confirm_button.pack()
self.counter_lbl = tk.Label(text="Correct answers : 0 " , font = self.myFont)
self.counter_lbl.pack()
def finish_and_show_results(self):
for widget in self.window.winfo_children():
widget.destroy()
self.result_label = tk.Label(text=
f"""Congratulations !!!
question solved : {self.correct_answers}
corret answers = {int(self.correct_answers / len(self.data) * 100)}%"""
, font = self.myFont)
self.result_label.pack()
def select_variant_show(self):
sel = self.answers.curselection()
print(f'sel = {sel}')
if sel:
print(f'answer: { self.answers.get(sel)}')
def select_variant(self):
#Listbox.curselection =(0)
sel = self.answers.curselection()
if sel:
if sel[0] == self.correct:
self.correct_answers+=1
self.counter_lbl.config(text = "Correct answers : " + str(self.correct_answers))
self.current_question+=1
if (self.current_question >= len(self.data)):
self.finish_and_show_results()
else:
self.update_question()
def start(self):
self.current_question = 0
self.update_question()
self.window.mainloop()
def update_question(self):
question, answers , self.correct = self.data[self.current_question ]
self.question.config(text=str(question))
#self.answers.destroy() # deletes all elements
self.answers.delete(0,tk.END)
for answ in answers:
self.answers.insert(tk.END,answ)
data = [ ['what is your cat name?', ['alice' , ' bob' , 'i have no cat' , 'charlie'], 2] ,
['what programming language do you prefer?', ['Python' , ' Jacascript' , 'C++'],0] ,
['what mashine learning method i use to train the snake?', ['neural network' , 'ecolutionary algo', ' markov chain' ], 1] ,
['on the chess turnier with n participants, each player plays 3 games with each other player. How many games will be played?',
['3/2n!' , '3*n!/(n+1)/2', ' 3*n(n-1)/2' ], 2] ,
]
q = Quis(data)
q.start()
| [
"peter4i4ik@gmail.com"
] | peter4i4ik@gmail.com |
b1cbf86996f5ea37516b9cff3485b1b37a7431e2 | ab45f59323869b6192f9b1093823685171ea6d8d | /MachineHypermediaToolkit/schema/Schema.py | 517f27ea46fce16a96f4e172dd3f57f8afcdbb02 | [
"Apache-2.0"
] | permissive | connectIOT/MachineHypermediaToolkit | 860f50103a66609e0d5c62b80cda26a9334f9a89 | ac1a5b247a41063cfb64fbb68a76f807fe62d84f | refs/heads/master | 2021-01-10T13:14:51.494412 | 2017-02-27T15:49:16 | 2017-02-27T15:49:16 | 50,071,213 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,503 | py | """
schema for abstract resource models
"""
import MachineHypermediaToolkit.terms as v
import json
class Schema:
def __init__(self, schema=None):
self._schema = {}
self._nodeArray = []
if schema:
self._schema = schema
self._elementArray = schema[v._resource]
if [] != self._elementArray:
for nodeMap in self._elementArray :
self.addNode( SchemaNode(nodeMap) )
def addNode(self, node):
self._nodeArray.append(node)
def serialize(self):
objectArray = []
for node in self._nodeArray:
objectArray.append(node._nodeMap)
return json.dumps(objectArray, sort_keys=True, indent=4, separators=(',', ': '))
class SchemaNode:
def __init__(self, nodeMap):
self._nodeMap = {}
self._node = self.configure(nodeMap) if nodeMap else {}
def configure(self, nodeMap):
self._nodeMap = nodeMap
self._class = nodeMap[v._class]
self._subClassOf = nodeMap[v._subClassOf]
self._mayHave = nodeMap[v._mayHave]
self._usedBy = nodeMap[v._usedBy]
self._description = nodeMap[v._description]
return self
def serialize(self):
return json.dumps(self._nodeMap, sort_keys=True, indent=4, separators=(',', ': '))
def selfTest():
from WoTschema import WoTschema
print Schema(WoTschema).serialize()
if __name__ == "__main__" :
selfTest()
| [
"michaeljohnkoster@gmail.com"
] | michaeljohnkoster@gmail.com |
16ac513c45dd82a92c2c6c59f11827ee675e6ced | 559455504629bd5d578745c0c9e73c7802ceb2df | /code/main.py | 51bb028bae060c3f429a77ff5ce4727fd6f88c5a | [] | no_license | chanmi168/CompressedSensingJHU | 6833a42ddc19bbeae13b7dd9f723606adde99d99 | 2c2dffd76e1cff28ee339ebc85dbfe7b6e3dbcfb | refs/heads/master | 2021-01-24T16:20:24.692734 | 2018-05-17T03:09:34 | 2018-05-17T03:09:34 | 123,187,274 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 8,047 | py | # Michae's tryout on torchvision ResNet
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision
from torchvision import datasets
import torchvision.models as models
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
from torch.autograd import Variable
from torch.nn.parameter import Parameter
# os.environ['CUDA_VISIBLE_DEVICES'] = '0'
import argparse
import logging
import numpy as np
import pickle
from tqdm import tqdm
import matplotlib.pyplot as plt
import os
# TODO get these scripts imported right
from modules import *
# Training settings
parser = argparse.ArgumentParser(description='ResNet Tryout -- ')
parser.add_argument('--batch-size', type=int, default=256, metavar='B',
help='input batch size for training (default: 64)')
parser.add_argument('--test-batch-size', type=int, default=1000, metavar='TB',
help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=10, metavar='E',
help='number of epochs to train (default: 10)')
parser.add_argument('--lr', type=float, default=0.01, metavar='LR',
help='learning rate (default: 0.01)')
parser.add_argument('--optimizer', type=str, default='sgd', metavar='O',
help='Optimizer options are sgd, p3sgd, adam, rms_prop')
parser.add_argument('--momentum', type=float, default=0.5, metavar='MO',
help='SGD momentum (default: 0.5)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log_interval', type=int, default=100, metavar='I',
help="""how many batches to wait before logging detailed
training status, 0 means never log """)
parser.add_argument('--dataset', type=str, default='CIFAR10', metavar='D',
help='Options are mnist, fashion_mnist and fashion_mnist_handbag')
parser.add_argument('--data_dir', type=str, default='../../data/', metavar='F',
help='Where to put data')
parser.add_argument('--name', type=str, default='', metavar='N',
help="""A name for this training run, this
affects the directory so use underscores and not spaces.""")
parser.add_argument('--model', type=str, default='default', metavar='M',
help="""Options are default, P2Q7DefaultChannelsNet,
P2Q7HalfChannelsNet, P2Q7DoubleChannelsNet,
P2Q8BatchNormNet, P2Q9DropoutNet, P2Q10DropoutBatchnormNet,
P2Q11ExtraConvNet, P2Q12RemoveLayerNet, and P2Q13UltimateNet.""")
parser.add_argument('--print_log', action='store_true', default=False,
help='prints the csv log when training is complete')
parser.add_argument('--depthLayers', action='store_true', default=False,
help='modify layers for depth completion')
args = parser.parse_args()
if args.depthLayers:
print('ResNet18 modified for depth completion')
else:
print('ResNet18')
def saveResults(train_losses, train_accs, val_losses, val_acc, best_val_acc):
# TODO implement me
raise NotImplementedError
def train(args, model, optimizer, train_loader, epoch):
# Training
model.train()
correct_count = np.array(0)
train_loss = 0
progress_bar = tqdm(train_loader, desc='Training')
for batch_idx, (data, target) in enumerate(progress_bar):
if args.cuda:
data, target = data.cuda(0), target.cuda(0)
data, target = Variable(data), Variable(target)
optimizer.zero_grad()
# Forward prediction step
output = model(data)
loss = F.cross_entropy(output, target)
train_loss += loss
# Backpropagation step
loss.backward()
optimizer.step()
# The batch has ended, determine the
# accuracy of the predicted outputs
_, argmax = torch.max(output, 1)
# target labels and predictions are
# categorical values
accuracy = (target == argmax.squeeze()).float().mean()
pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability
correct_count += pred.eq(target.data.view_as(pred)).cpu().sum()
train_size = Variable(torch.Tensor([len(train_loader.dataset)]).double())
train_loss /= train_size
progress_bar.write(
'Epoch: {} - train results - Average train_loss: {:.4f}, train_acc: {}/{} ({:.2f}%)'.format(
epoch, np.squeeze(train_loss.data.numpy()), correct_count, len(train_loader.dataset),
100. * correct_count / len(train_loader.dataset)))
return accuracy, train_loss
def test(args, model, test_loader, epoch):
# Validation Testing
model.eval()
test_loss = 0
correct = 0
progress_bar = tqdm(test_loader, desc='Validation')
for data, target in progress_bar:
if args.cuda:
data, target = data.cuda(0), target.cuda(0)
print(torch.cuda.get_device_name(0))
data, target = Variable(data, volatile=True), Variable(target)
output = model(data)
test_loss += F.cross_entropy(output, target) # sum up batch loss
pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability
correct += pred.eq(target.data.view_as(pred)).cpu().sum()
test_size = Variable(torch.Tensor([len(test_loader.dataset)]).double())
test_loss /= test_size
acc = np.array(correct, np.float32) / test_size.data.numpy()
progress_bar.write(
'Epoch: {} - validation test results - Average val_loss: {:.4f}, val_acc: {}/{} ({:.2f}%)'.format(
epoch, np.squeeze(test_loss.data.numpy()), correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
return acc, test_loss
def run_experiment(args, train_loader, test_loader):
total_minibatch_count = 0
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
kwargs = {'num_classes': 10}
model = ResNet18(**kwargs)
# model = depthnet(**kwargs)
if args.cuda:
model.cuda()
# model = models.resnet18(pretrained=False, **kwargs)
epochs_to_run = args.epochs
optimizer = optim.Adam(model.parameters())
# Run the primary training loop, starting with validation accuracy of 0
val_acc = 0
best_val_acc = 0
train_losses = []
train_accs = []
val_losses = []
val_accs = []
for epoch in range(1, epochs_to_run + 1):
# train for 1 epoch (forward+backward)
(train_acc, train_loss) = train(args, model, optimizer, train_loader, epoch)
train_losses.append(np.squeeze(train_loss.data.numpy()))
train_accs.append(train_acc.data.numpy())
# validate progress on test dataset
(val_acc, val_loss) = test(args, model, test_loader, epoch)
val_losses.append(np.squeeze(val_loss.data.numpy()))
val_accs.append(val_acc)
if val_acc >= best_val_acc:
best_val_acc = val_acc
torch.save(model.state_dict(), '../../data/.pt')
saveResults(train_losses, train_accs, val_losses, val_acc, best_val_acc)
def resnetMain():
global args
args.cuda = not args.no_cuda and torch.cuda.is_available()
# Prepare Dataset
# train_dataset = ImageDataset(data = train_data, labels = train_label)
# train_dataloader = ImageDataset(train_dataset, batch_size=args.batch_size,
# shuffle=True)
print('==> Preparing data..')
(train_loader, test_loader) = prepareDataset(args)
# Start training, evaluate loss and acc
run_experiment(args, train_loader, test_loader)
# TODO implement me
# raise NotImplementedError
if __name__ == '__main__':
resnetMain() | [
"mchan32@jhu.edu"
] | mchan32@jhu.edu |
3fb332f428c5254810061e3dd8eb5293bcfdcc6d | a235448520bb719e3e88a50acbf835749093e9df | /tKGR/database_op.py | 6727d690a192fcb8e0a14a132d46207bf3fde3d4 | [] | no_license | moguizhizi/xERTE | aeceaf443b6afc5f0d5e5e19a18b4bdc0c78016d | d7a55d1c692cc97cdff8dde131093217dc6ea082 | refs/heads/main | 2023-04-05T17:49:53.393523 | 2021-04-13T13:39:51 | 2021-04-13T13:39:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,444 | py | import os
import sys
import time
from copy import deepcopy
from bson.objectid import ObjectId
from typing import List
import sqlite3
from sqlite3 import Error
import pymongo
PackageDir = os.path.dirname(__file__)
sys.path.insert(1, PackageDir)
class DBDriver:
def __init__(self, useMongo: bool = False, useSqlite: bool = False, MongoServerIP=None, sqlite_dir=None, DATABASE='tKGR'):
self.mongodb = None
self.sqlite_conn = None
if useMongo:
self.client = DBDriver.create_mongo_connection(MongoServerIP, DATABASE=DATABASE)
self.mongodb = getattr(self.client, DATABASE)
if useSqlite:
self.sqlite_conn = DBDriver.create_connection(sqlite_dir)
self.sql_task_schema = ('dataset', 'emb_dim', 'emb_dim_sm', 'lr', 'batch_size', 'sampling', 'DP_steps',
'DP_num_neighbors', 'max_attended_edges', 'add_reverse',
'node_score_aggregation', 'diac_embed', 'simpl_att', 'emb_static_ratio', 'loss_fn')
DBDriver.create_logging_table(self.sqlite_conn)
if self.sqlite_conn is None:
print("Error! cannot create the database connection.")
def log_task(self, args, checkpoint_dir, git_hash=None, git_comment=None, device=None):
if self.mongodb:
task_id = DBDriver.insert_a_task_mongo(self.mongodb, args, checkpoint_dir, git_hash, git_comment, device)
print("save task information in mongoDB under id: ", task_id)
if self.sqlite_conn:
DBDriver.create_task_table(self.sqlite_conn, self.sql_task_schema, args)
task_id = DBDriver.insert_into_task_table(self.sqlite_conn, self.sql_task_schema, args, checkpoint_dir,
git_hash)
print("save task information in sqlite3 under id: ", task_id)
def log_evaluation(self, checkpoint_dir, epoch, performance_dict):
"""
:param checkpoint_dir:
:param epoch:
:param performance_dict: dictionary, key is the name of the metric, value is the quantity
:return:
"""
if self.sqlite_conn:
DBDriver.insert_into_logging_table(self.sqlite_conn, checkpoint_dir, epoch, performance_dict)
if self.mongodb:
DBDriver.insert_a_evaluation_mongo(self.mongodb, checkpoint_dir, epoch, performance_dict)
def test_evaluation(self, checkpoint_dir, epoch, performance_dict):
if self.sqlite_conn:
DBDriver.insert_into_logging_table(self.sqlite_conn, checkpoint_dir, epoch, performance_dict, table_name='Test_Evaluation')
if self.mongodb:
DBDriver.insert_a_evaluation_mongo(self.mongodb, checkpoint_dir, epoch, performance_dict, collection='Test_Evaluation')
def close(self):
if self.sqlite_conn:
self.sqlite_conn.close()
if self.mongodb:
self.client.close()
@staticmethod
def create_mongo_connection(IP_ADDRESS, DATABASE='tKGR', USER='peng', PASSWORD='siemens'):
client = pymongo.MongoClient("mongodb://{}:{}@{}/{}".format(USER, PASSWORD, IP_ADDRESS, DATABASE),
socketTimeoutMS=20000)
print("Connection to {}/{} established".format(IP_ADDRESS, DATABASE))
return client
@staticmethod
def create_connection(db_file):
""" create a database connection to the SQLite database
specified by db_file
:param db_file: database file
:return: Connection object or None
"""
conn = None
try:
conn = sqlite3.connect(db_file)
except Error as e:
print(e)
return conn
@staticmethod
def insert_a_task_mongo(db, args, checkpoint_dir, git_hash, git_comment, device):
task = deepcopy(vars(args))
task['git_hash'] = git_hash
task['git_comment'] = git_comment
task['checkpoint_dir'] = checkpoint_dir
task['aws_device'] = device
return db['tasks'].insert_one(task).inserted_id
def register_query_mongo(self, collection, src_idx_l: List[int], rel_idx_l: List[int], cut_time_l: List[int],
target_idx_l: List[int], experiment_info: dict, id2entity, id2relation) -> List[int]:
mongo_id = []
for src, rel, ts, target in zip(src_idx_l, rel_idx_l, cut_time_l, target_idx_l):
query = {'subject': int(src),
'subject(semantic)': id2entity[src],
'relation': int(rel),
'relation(semantic)': id2relation[rel],
'timestamp': int(ts),
'object': int(target),
'object(semantic)': id2entity[target],
'experiment_info': experiment_info}
mongo_id.append(self.mongodb[collection].insert_one(query).inserted_id)
return mongo_id
@staticmethod
def insert_a_evaluation_mongo(db, checkpoint_dir, epoch, performance_dict, collection='logging'):
checkpoint = db[collection].find_one({'checkpoint_dir': checkpoint_dir})
if checkpoint:
db[collection].update_one({"_id": checkpoint['_id']}, {"$set": {"epoch."+str(epoch): performance_dict}})
else:
log = {'checkpoint_dir': checkpoint_dir, 'epoch': {str(epoch): performance_dict}}
db[collection].insert_one(log)
@staticmethod
def create_table(conn, create_table_sql):
""" create a table from the create_table_sql statement
:param conn: Connection object
:param create_table_sql: a CREATE TABLE statement
:return:
"""
try:
c = conn.cursor()
c.execute(create_table_sql)
except Error as e:
print(e)
@staticmethod
def create_task_table(conn, hyperparameters: List[str], args, table_name="tasks"):
"""
:param conn:
:param hyperparameters: hyparameters to be stored in database, except checkpoint_dir, which is primary key
:param args:
:return:
"""
with conn:
cur = conn.cursor()
table_exists = False
# get the count of tables with the name
cur.execute(
''' SELECT count(name) FROM sqlite_master WHERE type='table' AND name='{}' '''.format(table_name))
# if the count is 1, then table exists
if cur.fetchone()[0] == 1:
print("table exists")
table_exists = True
if table_exists:
columns = [i[1] for i in cur.execute('PRAGMA table_info({})'.format(table_name))]
create_table_sql = "CREATE TABLE IF NOT EXISTS {} (checkpoint_dir TEXT PRIMARY KEY, ".format(table_name)
for hp in hyperparameters:
try:
arg = getattr(args, hp)
if isinstance(arg, int):
arg_type = "INTEGER"
elif isinstance(arg, float):
arg_type = "REAL"
elif isinstance(arg, bool):
arg_type = "INTEGER"
elif isinstance(arg, list):
arg_type = 'TEXT'
elif isinstance(arg, str):
arg_type = "TEXT"
else:
raise AttributeError("Doesn't support this data type in create_task_table, database_op.py")
except:
raise AttributeError("'Namespace' object has no attribute " + hp)
if table_exists:
if hp not in columns:
cur.execute('ALTER TABLE {} ADD COLUMN {} {}'.format(table_name, hp, arg_type))
create_table_sql += " ".join([hp, arg_type]) + ", "
if table_exists:
if "git_hash" not in columns:
cur.execute('ALTER TABLE {} ADD COLUMN git_hash TEXT'.format(table_name))
create_table_sql += "git_hash TEXT NOT NULL);"
if not table_exists:
DBDriver.create_table(conn, create_table_sql)
@staticmethod
def insert_into_task_table(conn, hyperparameters, args, checkpoint_dir, git_hash, table_name='tasks'):
with conn:
cur = conn.cursor()
# get the count of tables with the name
cur.execute(
''' SELECT count(name) FROM sqlite_master WHERE type='table' AND name='{}' '''.format(table_name))
# if the count is 1, then table exists
if cur.fetchone()[0] != 1:
raise Error("table doesn't exist")
placeholders = ', '.join('?' * (len(hyperparameters) + 2))
sql_hp = 'INSERT OR IGNORE INTO {}({}) VALUES ({})'.format(table_name,
'checkpoint_dir, git_hash, ' + ', '.join(
hyperparameters), placeholders)
sql_hp_val = [checkpoint_dir, git_hash]
for hp in hyperparameters:
try:
arg = getattr(args, hp)
if isinstance(arg, bool):
arg = int(arg)
elif isinstance(arg, list):
arg = ','.join([str(_) for _ in arg])
sql_hp_val.append(arg)
except:
raise AttributeError("'Namespace' object has no attribute " + hp)
cur.execute(sql_hp, sql_hp_val)
task_id = cur.lastrowid
return task_id
@staticmethod
def create_logging_table(conn, table_name="logging"):
"""
:param conn:
:param hyperparameters: hyparameters to be stored in database, except checkpoint_dir, which is primary key
:param args:
:return:
"""
with conn:
logging_col = (
'checkpoint_dir', 'epoch', 'training_loss', 'validation_loss', 'HITS_1_raw', 'HITS_3_raw',
'HITS_10_raw',
'HITS_INF', 'MRR_raw', 'HITS_1_fil', 'HITS_3_fil', 'HITS_10_fil', 'MRR_fil')
sql_create_loggings_table = """ CREATE TABLE IF NOT EXISTS logging (
checkpoint_dir text NOT NULL,
epoch integer NOT NULL,
training_loss real,
validation_loss real,
HITS_1_raw real,
HITS_3_raw real,
HITS_10_raw real,
HITS_INF real,
MRR_raw real,
HITS_1_fil real,
HITS_3_fil real,
HITS_10_fil real,
MRR_fil real,
PRIMARY KEY (checkpoint_dir, epoch),
FOREIGN KEY (checkpoint_dir) REFERENCES tasks (checkpoint_dir)
);"""
DBDriver.create_table(conn, sql_create_loggings_table)
@staticmethod
def insert_into_logging_table(conn, checkpoint_dir, epoch, performance_dict, table_name='logging'):
"""
sqlite is not vertical scalable, make sure the schema is identical with the existing table
:param conn:
:param checkpoint_dir:
:param epoch:
:param performance_dict:
:param table_name:
:return:
"""
with conn:
cur = conn.cursor()
# get the count of tables with the name
cur.execute(
''' SELECT count(name) FROM sqlite_master WHERE type='table' AND name='{}' '''.format(table_name))
# if the count is 1, then table exists
if cur.fetchone()[0] != 1:
raise Error("table doesn't exist")
# logging_col = (
# 'checkpoint_dir', 'epoch', 'training_loss', 'validation_loss', 'HITS_1_raw', 'HITS_3_raw',
# 'HITS_10_raw',
# 'HITS_INF', 'MRR_raw', 'HITS_1_fil', 'HITS_3_fil', 'HITS_10_fil', 'MRR_fil')
logging_col = list(performance_dict.keys())
placeholders = ', '.join('?' * len(logging_col))
sql_logging = 'INSERT OR IGNORE INTO {}({}) VALUES ({})'.format(table_name, ', '.join(logging_col),
placeholders)
sql_logging_val = [checkpoint_dir, epoch] + performance_dict.values()
cur.execute(sql_logging, sql_logging_val)
| [
"ubuntu@ip-172-31-40-172.eu-central-1.compute.internal"
] | ubuntu@ip-172-31-40-172.eu-central-1.compute.internal |
b550022c8996e1254ad04bbc6e68d43f9a20036d | 8bbeb7b5721a9dbf40caa47a96e6961ceabb0128 | /python3/745.Find Smallest Letter Greater Than Target(寻找比目标字母大的最小字母).py | a515ae5b4ea2c96f75d6260137b0d993b0a8432c | [
"MIT"
] | permissive | lishulongVI/leetcode | bb5b75642f69dfaec0c2ee3e06369c715125b1ba | 6731e128be0fd3c0bdfe885c1a409ac54b929597 | refs/heads/master | 2020-03-23T22:17:40.335970 | 2018-07-23T14:46:06 | 2018-07-23T14:46:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,576 | py | """
<p>
Given a list of sorted characters <code>letters</code> containing only lowercase letters, and given a target letter <code>target</code>, find the smallest element in the list that is larger than the given target.
</p><p>
Letters also wrap around. For example, if the target is <code>target = 'z'</code> and <code>letters = ['a', 'b']</code>, the answer is <code>'a'</code>.
</p>
<p><b>Examples:</b><br />
<pre>
<b>Input:</b>
letters = ["c", "f", "j"]
target = "a"
<b>Output:</b> "c"
<b>Input:</b>
letters = ["c", "f", "j"]
target = "c"
<b>Output:</b> "f"
<b>Input:</b>
letters = ["c", "f", "j"]
target = "d"
<b>Output:</b> "f"
<b>Input:</b>
letters = ["c", "f", "j"]
target = "g"
<b>Output:</b> "j"
<b>Input:</b>
letters = ["c", "f", "j"]
target = "j"
<b>Output:</b> "c"
<b>Input:</b>
letters = ["c", "f", "j"]
target = "k"
<b>Output:</b> "c"
</pre>
</p>
<p><b>Note:</b><br>
<ol>
<li><code>letters</code> has a length in range <code>[2, 10000]</code>.</li>
<li><code>letters</code> consists of lowercase letters, and contains at least 2 unique letters.</li>
<li><code>target</code> is a lowercase letter.</li>
</ol>
</p><p>给定一个只包含小写字母的有序数组<code>letters</code> 和一个目标字母 <code>target</code>,寻找有序数组里面比目标字母大的最小字母。</p>
<p>数组里字母的顺序是循环的。举个例子,如果目标字母<code>target = 'z'</code> 并且有序数组为 <code>letters = ['a', 'b']</code>,则答案返回 <code>'a'</code>。</p>
<p><strong>示例:</strong></p>
<pre>
<strong>输入:</strong>
letters = ["c", "f", "j"]
target = "a"
<strong>输出:</strong> "c"
<strong>输入:</strong>
letters = ["c", "f", "j"]
target = "c"
<strong>输出:</strong> "f"
<strong>输入:</strong>
letters = ["c", "f", "j"]
target = "d"
<strong>输出:</strong> "f"
<strong>输入:</strong>
letters = ["c", "f", "j"]
target = "g"
<strong>输出:</strong> "j"
<strong>输入:</strong>
letters = ["c", "f", "j"]
target = "j"
<strong>输出:</strong> "c"
<strong>输入:</strong>
letters = ["c", "f", "j"]
target = "k"
<strong>输出:</strong> "c"
</pre>
<p><strong>注:</strong></p>
<ol>
<li><code>letters</code>长度范围在<code>[2, 10000]</code>区间内。</li>
<li><code>letters</code> 仅由小写字母组成,最少包含两个不同的字母。</li>
<li>目标字母<code>target</code> 是一个小写字母。</li>
</ol>
<p>给定一个只包含小写字母的有序数组<code>letters</code> 和一个目标字母 <code>target</code>,寻找有序数组里面比目标字母大的最小字母。</p>
<p>数组里字母的顺序是循环的。举个例子,如果目标字母<code>target = 'z'</code> 并且有序数组为 <code>letters = ['a', 'b']</code>,则答案返回 <code>'a'</code>。</p>
<p><strong>示例:</strong></p>
<pre>
<strong>输入:</strong>
letters = ["c", "f", "j"]
target = "a"
<strong>输出:</strong> "c"
<strong>输入:</strong>
letters = ["c", "f", "j"]
target = "c"
<strong>输出:</strong> "f"
<strong>输入:</strong>
letters = ["c", "f", "j"]
target = "d"
<strong>输出:</strong> "f"
<strong>输入:</strong>
letters = ["c", "f", "j"]
target = "g"
<strong>输出:</strong> "j"
<strong>输入:</strong>
letters = ["c", "f", "j"]
target = "j"
<strong>输出:</strong> "c"
<strong>输入:</strong>
letters = ["c", "f", "j"]
target = "k"
<strong>输出:</strong> "c"
</pre>
<p><strong>注:</strong></p>
<ol>
<li><code>letters</code>长度范围在<code>[2, 10000]</code>区间内。</li>
<li><code>letters</code> 仅由小写字母组成,最少包含两个不同的字母。</li>
<li>目标字母<code>target</code> 是一个小写字母。</li>
</ol>
"""
class Solution:
def nextGreatestLetter(self, letters, target):
"""
:type letters: List[str]
:type target: str
:rtype: str
"""
| [
"lishulong@wecash.net"
] | lishulong@wecash.net |
1094a919ee00b4136d877401a04011ef4e3c2f08 | 7b358c3af9b1d10ace466d492909c90b8937bb38 | /models/utils.py | 865c6c5c51e79993de84c555ce0805b820531d9a | [
"Apache-2.0"
] | permissive | shibaji7/model.CODE_BASE | b3090b0aa88c62e0fe62cb1b6c8bbca196b9e674 | 1ef8cffbbde1dbb05c405aedd1c0cac612ac6330 | refs/heads/master | 2023-04-11T13:07:52.722081 | 2021-09-23T02:47:25 | 2021-09-23T02:47:25 | 276,458,519 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,464 | py | #!/usr/bin/env python
"""utils.py: utils is dedicated to utility functions."""
__author__ = "Chakraborty, S."
__copyright__ = "Copyright 2020, SuperDARN@VT"
__credits__ = []
__license__ = "MIT"
__version__ = "1.0."
__maintainer__ = "Chakraborty, S."
__email__ = "shibaji7@vt.edu"
__status__ = "Research"
import os
import sys
import numpy as np
import pandas as pd
from scipy.interpolate import interp1d
from scipy import array
import datetime as dt
from netCDF4 import Dataset, num2date
import scipy.integrate as intg
from pysolar.solar import get_altitude
import calendar
import copy
import verify
import xarray
from timezonefinder import TimezoneFinder
from dateutil import tz
import aacgmv2
if sys.version_info.major > 2:
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import Matern
from collision import *
from absorption import *
from constant import *
def extrap1d(x,y,kind="linear"):
""" This method is used to extrapolate 1D paramteres """
interpolator = interp1d(x,y,kind=kind)
xs = interpolator.x
ys = interpolator.y
def pointwise(x):
if x < xs[0]: return ys[0]+(x-xs[0])*(ys[1]-ys[0])/(xs[1]-xs[0])
elif x > xs[-1]: return ys[-1]+(x-xs[-1])*(ys[-1]-ys[-2])/(xs[-1]-xs[-2])
else: return interpolator(x)
def ufunclike(xs):
return array(list(map(pointwise, array(xs))))
return ufunclike
def download_goes_data(dn, sat=15, v=False):
""" Download GOES data """
def _get_month_bounds_(start_time):
""" This method is used to get the first and last date of the month """
month_start = start_time.replace(day = 1).strftime("%Y%m%d")
_, month_end = calendar.monthrange(start_time.year, start_time.month)
month_end = (start_time.replace(day = 1) + dt.timedelta(days=month_end-1)).strftime("%Y%m%d")
return month_start, month_end
fname = "data/tElec/{dnx}/goes/goes.csv".format(dnx=dn.strftime("%Y.%m.%d.%H.%M"))
if not os.path.exists(fname+".gz"):
month_start, month_end = _get_month_bounds_(dn)
url = "https://satdat.ngdc.noaa.gov/sem/goes/data/avg/{year}/{month}/goes{sat}/netcdf/"\
"g{sat}_xrs_1m_{mstart}_{mend}.nc".format(year=dn.year, month="%02d"%dn.month, sat=sat,
mstart=month_start, mend=month_end)
if v: print("\n Download file -from- " + url)
tag_vars = ["A_AVG","B_AVG"]
fn = fname.replace(".csv",".nc")
os.system("wget -O {fn} {url}".format(fn=fn, url=url))
if os.path.exists(fn):
nc = Dataset(fn)
tt = nc.variables["time_tag"]
jd = np.array(num2date(tt[:],tt.units))
data = {}
for var in tag_vars: data[var] = nc.variables[var][:]
data["date"] = jd
data_dict = pd.DataFrame(data)
data_dict.to_csv(fname, index=False, header=True)
os.system("gzip {fname}".format(fname=fname))
if v: print("\n File saved -to- " + fname)
os.remove(fn)
else: print("\n Unable to download file.")
return
def download_riometer(dn, stn, v=False):
"""
This method is used to download riometer absorption data from UCalgary ftp server.
It stores the dataset into the local drive for future run. It only downloads 1m resolution dataset.
URL - http://data.phys.ucalgary.ca/sort_by_instrument/riometer/GO-Canada_Rio/txt/
"""
fname = "data/tElec/{dnx}/rio/{stn}.csv".format(dnx=dn.strftime("%Y.%m.%d.%H.%M"), stn=stn)
if stn != "ott" and not os.path.exists(fname+".gz"):
f_name = "norstar_k2_rio-%s_%s_v01.txt" % (stn, dn.strftime("%Y%m%d"))
base_url = "http://data.phys.ucalgary.ca/sort_by_instrument/riometer/GO-Canada_Rio/txt"\
"/{year}/{month}/{day}/".format(year=dn.year, month="%02d"%dn.month, day="%02d"%dn.day)
uri = base_url + f_name
tag_vars = ["date","hf_abs"]
os.system("wget -O {fn} {url}".format(fn=f_name, url=uri))
if os.path.exists(f_name):
if v: print("\n Download file -from- " + uri)
with open(f_name) as c: lines = c.read().split("\n")
data = []
for line in lines[13:-2]:
x = np.nan
line = list(filter(None,line.replace("\n","").split(" ")))
try:
x = float(line[2])
data.append([dt.datetime.strptime(line[0]+" "+line[1],"%d/%m/%y %H:%M:%S"),x])
except: continue
if len(data) > 0:
data_dict = pd.DataFrame(data,columns=tag_vars)
data_dict.to_csv(fname, index=False, header=True)
os.system("gzip {fname}".format(fname=fname))
if v: print("\n File saved -to- " + fname)
os.remove(f_name)
else: print("\n Unable to download file.")
elif stn == "ott" and not os.path.exists(fname+".gz"):
f_name = "/home/shibaji/model_run/riometers/ott_{year}-{month}-{day}.csv".format(year=dn.year,
month="%02d"%dn.month, day="%02d"%dn.day)
if os.path.exists(f_name):
data_dict = pd.read_csv(f_name, index_col=0)
data_dict = (data_dict[["DATE","_ABS"]]).rename(columns={"DATE":"date", "_ABS":"hf_abs"})
data_dict.to_csv(fname, index=False, header=True)
os.system("gzip {fname}".format(fname=fname))
if v: print("\n File saved -to- " + fname)
else:
if v: print("\n File not exists.")
return
def get_riom_loc(stn):
""" This method is to get the location of the riometer """
_o = pd.read_csv("config/riometers.csv")
_o = _o[_o.rio==stn]
lat, lon = _o["lat"].tolist()[0], np.mod( (_o["lon"].tolist()[0] + 180), 360 ) - 180
return lat, lon
def read_goes(dn, arc=False):
""" This method is used to fetch GOES x-ray data for a given day """
gzfname = "data/tElec/{dnx}/goes/goes.csv.gz".format(dnx=dn.strftime("%Y.%m.%d.%H.%M"))
fname = "data/tElec/{dnx}/goes/goes.csv".format(dnx=dn.strftime("%Y.%m.%d.%H.%M"))
if arc:
gzfname = "data/tElec/archive/{dnx}/goes/goes.csv.gz".format(dnx=dn.strftime("%Y.%m.%d.%H.%M"))
fname = "data/tElec/archive/{dnx}/goes/goes.csv".format(dnx=dn.strftime("%Y.%m.%d.%H.%M"))
os.system("gzip -d " + gzfname)
_o = pd.read_csv(fname,parse_dates=["date"])
os.system("gzip {fname}".format(fname=fname))
return _o
def read_riometer(dn, stn, arc=False):
""" This method is used to fetch riometer absorption data for a given day and station """
gzfname = "data/tElec/{dnx}/rio/{stn}.csv.gz".format(dnx=dn.strftime("%Y.%m.%d.%H.%M"), stn=stn)
fname = "data/tElec/{dnx}/rio/{stn}.csv".format(dnx=dn.strftime("%Y.%m.%d.%H.%M"), stn=stn)
if arc:
gzfname = "data/tElec/archive/{dnx}/rio/{stn}.csv.gz".format(dnx=dn.strftime("%Y.%m.%d.%H.%M"), stn=stn)
fname = "data/tElec/archive/{dnx}/rio/{stn}.csv".format(dnx=dn.strftime("%Y.%m.%d.%H.%M"), stn=stn)
if os.path.exists(gzfname):
os.system("gzip -d " + gzfname)
_o = pd.read_csv(fname,parse_dates=["date"])
os.system("gzip {fname}".format(fname=fname))
else: _o = pd.DataFrame()
return _o
def get_height_integrated_absorption(beta, height):
""" This method is used to calculate height integrated absorption """
beta[np.isnan(beta)] = 0.
beta[beta < 0.] = 0.
beta_L = intg.trapz(beta)
return beta_L
def calculate_sza(dates, lat, lon, alts):
"""
This method is used to estimate the solar zenith angle for a specific date and
sepcific location in space. Note that this method uses skyfield api to estimate
solar zenith angle. This has been validated against NOAA website values.
"""
sza = np.zeros((len(dates), len(alts)))
for i, d in enumerate(dates):
for j, a in enumerate(alts):
d = d.replace(tzinfo=dt.timezone.utc)
sza[i,j] = 90. - get_altitude(lat, lon, d)
return sza
class PointGrid(object):
"""
This class initializes all the parameters for a lat, lon and 0,500 km altitiudes profiles. This is a 2D
grid for one latitude an longitude X axis time with 1m resolution Y axis altitude 1km resolution.
"""
def __init__(self, rio, ev, stime, etime, bins = 37, freq=30, v=False, fname="data/sim/{dn}/"):
self.rio = rio
self.alts = model["alts"]
self.start_time = stime
self.end_time = etime
self.ev = ev
self.lat, self.lon = get_riom_loc(rio)
self.bins = bins
self.freq = freq
d = int((etime-stime).total_seconds()/60.)
self.dn = [stime + dt.timedelta(seconds = i*60) for i in range(d)]
fname = (fname + "bgc.{stn}.nc.gz").format(dn=self.ev.strftime("%Y.%m.%d.%H.%M"), stn=self.rio)
os.system("gzip -d "+fname)
self._nc = Dataset(fname.replace(".gz", ""))
os.system("gzip "+fname.replace(".gz", ""))
self.igrf = {
"Bx":self._nc.variables["igrf.bx"][:],
"By":self._nc.variables["igrf.by"][:],
"Bz":self._nc.variables["igrf.bz"][:],
"B":self._nc.variables["igrf.b"][:]
}
self.iri = {
"Ne":self._nc.variables["iri.ne"][:],
"Ni":self._nc.variables["iri.ni"][:],
"Te":self._nc.variables["iri.te"][:],
"Ti":self._nc.variables["iri.ti"][:],
"ions":{
"NO+":self._nc.variables["iri.ions.no+"][:],
"O+":self._nc.variables["iri.ions.o+"][:],
"O2+":self._nc.variables["iri.ions.o2+"][:]
}
}
self.msis = {
"Tn":self._nc.variables["msis.tn"][:],
"rho":self._nc.variables["msis.rho"][:],
"AR":self._nc.variables["msis.ar"][:],
"H":self._nc.variables["msis.h"][:],
"HE":self._nc.variables["msis.he"][:],
"N2":self._nc.variables["msis.n2"][:],
"O":self._nc.variables["msis.o"][:],
"O2":self._nc.variables["msis.o2"][:],
"O_anomalous":self._nc.variables["msis.o_a"][:],
"nn":self._nc.variables["msis.nn"][:],
"NO":self._nc.variables["msis.no"][:],
"CO":self._nc.variables["msis.co"][:],
"H2O":self._nc.variables["msis.h2o"][:],
"CO2":self._nc.variables["msis.co2"][:],
}
self.Ne = np.zeros((len(self.dn),len(self.alts)))
self.chi = self._nc.variables["chi"][:]
self._col_ = Collision.load(self._nc)
self._abs_ = Absorption.load(self._nc)
if v: print("\n Grid point %.2f,%.2f is loaded." % (self.lat,self.lon))
return
def update_grid(self, cm, _ix_="all"):
self.ne = cm.Ne[::60, :]
self.ni = cm.Np[::60, :]
self.ni_e = cm.Nm[::60, :]
self.ni_x = cm.Nxp[::60, :]
self._abs_ = Absorption(self.igrf["B"], self._col_, self.ne, fo=self.freq*1e6)
self.drap = Absorption._drap_(self.ev, self.dn, self.rio, self.freq)
self.sato = Absorption._sato_(self.ev, self.dn, self.rio, self.freq)
return
def add_chi(ev, rio, start, end):
""" Add SZA to the Bgc file """
lat, lon = get_riom_loc(rio)
d = int((end-start).total_seconds()/60.)
dn = [start + dt.timedelta(seconds = i*60) for i in range(d)]
fname = "data/tElec/{dn}/bgc.{stn}.nc.gz".format(dn=ev.strftime("%Y.%m.%d.%H.%M"), stn=rio)
os.system("gzip -d "+fname)
rootgrp = Dataset(fname.replace(".gz",""), "a")
chi = rootgrp.createVariable("chi", "f8", ("ntimes","nalts"))
chi[:] = calculate_sza(dn, lat, lon, model["alts"])
chi.description = "Solar Zenith Angle"
chi.uints = "Deg(o)"
print(rootgrp.variables.keys())
rootgrp.close()
os.system("gzip "+fname.replace(".gz",""))
return
def extp(x, y, xlim, kind="slinear", scale="log"):
""" Extrapolate NaN values for smooth outputs. """
if scale == "log":
fn = extrap1d(x[x>xlim], np.log10(y[x>xlim]), kind=kind)
ynew = np.concatenate((10**fn(x[x<=xlim]), y[x>xlim]))
else:
fn = extrap1d(x[x>xlim], y[x>xlim], kind=kind)
ynew = np.concatenate((fn(x[x<=xlim]), y[x>xlim]))
return ynew
def int_absorption(_a, _h, extpoint=68, llim = 60, ulim = 150, method="trapz"):
""" Height integrate HF absorption """
_o = []
def line_integration(y, x, method="trapz"):
from scipy.integrate import simps, trapz
if method == "simps": z = simps(y, x)
elif method == "trapz": z = trapz(y, x)
else: z = None
return z
for _ix in range(_a.shape[0]):
_u = pd.DataFrame()
_u["h"], _u["a"] = _h, extp(_h, _a[_ix,:], xlim=extpoint)
_u = _u[(_u.h>=llim) & (_u.h<=ulim)]
_o.append(line_integration(_u["a"], _u["h"], method=method))
return np.array(_o)
def smooth(x,window_len=51,window="hanning"):
if x.ndim != 1: raise ValueError("smooth only accepts 1 dimension arrays.")
if x.size < window_len: raise ValueError("Input vector needs to be bigger than window size.")
if window_len<3: return x
if not window in ["flat", "hanning", "hamming", "bartlett", "blackman"]: raise ValueError("Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'")
s = np.r_[x[window_len-1:0:-1],x,x[-2:-window_len-1:-1]]
if window == "flat": w = numpy.ones(window_len,"d")
else: w = eval("np."+window+"(window_len)")
y = np.convolve(w/w.sum(),s,mode="valid")
d = window_len - 1
y = y[int(d/2):-int(d/2)]
return y
def estimate_error(m, d, kind="rmse"):
""" Estimate error between model and data """
xsec = [(x-m.date.tolist()[0]).total_seconds() for x in m.date]
xnsec = [(x-m.date.tolist()[0]).total_seconds() for x in d.date]
dx = interp1d(xsec, m.hf_abs)(xnsec)
e = np.sqrt(np.mean((dx-np.array(d.hf_abs.tolist()))**2))
return e
def store_cmd(args):
""" Store the commands """
return
class Performance(object):
""" Class to estimate Skillset """
def __init__(self, stn, ev, times, model, start, end, bar=4., alt=None):
""" Initialize the parameters """
self.stn = stn
self.ev = ev
self.times = times
self.model = model
self.start = start
self.end = end
self.bar = bar
self.alt = alt
self._read_data_()
return
def _read_data_(self):
""" Read data from GOES and Riometer """
gos = read_goes(self.ev, False)
rio = read_riometer(self.ev, self.stn, False)
self.gos = gos[(gos.date>=self.start) & (gos.date<self.end)]
if len(rio) > 0:
rio = rio[(rio.date>=self.start) & (rio.date<=self.end)]
if not np.isnan(self.bar): self.rio = rio[rio.hf_abs <= self.bar]
else: self.rio = rio
elif np.isnan(self.alt) and not np.isnan(self.bar): self.alt = self.bar
y = np.array(self.gos.B_AVG.tolist())
yn = (y - np.min(y)) / (np.max(y) - np.min(y))
if np.isnan(self.alt): self.mx = np.max(self.rio.hf_abs.tolist())
else: self.mx = self.alt
self.yx = self.mx * yn
return
def _skill_(self):
""" Estimate skills """
self.acc, self.attrs = {}, {}
dic = {"MSE":"MSE_{r}", "RMSE":"RMSE_{r}", "MAE":"MAE_{r}", "MdAE":"MdAE_{r}",
"nRMSE":"nRMSE_{r}", "MASE":"MASE_{r}", "MAPE":"MAPE_{r}", "MdAPE":"MdAPE_{r}",
"MdSymAcc":"MdSymAcc_{r}"}
self.acc.update({"t": {"dims": ("t"), "data":self.gos.date.tolist()}})
for k in self.model.keys():
d = pd.DataFrame()
d["date"], d["hf_abs"] = self.times, self.model[k]
d = d[(d.date>=self.start) & (d.date<self.end)]
self.attrs.update(dict((dic[m].format(r=k), v) for (m,v) in verify.accuracy(np.array(d.hf_abs), self.yx).items()))
self.attrs.update(dict((dic[m].format(r=k), v) for (m,v) in verify.scaledAccuracy(np.array(d.hf_abs), self.yx).items()))
self.attrs.update({"mRMSE_" + k: np.sqrt(np.abs(np.max(d.hf_abs)-self.mx))})
self.attrs.update({"mPeak_" + k: np.max(d.hf_abs)})
self.acc.update({"e_" + k: {"dims": ("t"), "data": self.yx - np.array(d.hf_abs)}})
self.acc.update({"m_" + k: {"dims": ("t"), "data": np.array(d.hf_abs)}})
self.acc.update({"dat": {"dims": ("t"), "data": self.yx}})
self.attrs.update({"dPeak": self.mx})
return self
def _to_mag_(self, times, lat, lon):
mlats, mlons, mlts = [], [], []
for t in times:
mlat, mlon, mlt = aacgmv2.get_aacgm_coord(lat, lon, 100, t, method="TRACE")
mlats.append(mlat)
mlons.append(mlon)
mlts.append(mlt)
return mlats, mlons, mlts
def _params_(self):
""" Extract parameters """
times = self.gos.date.tolist()
lat, lon = get_riom_loc(self.stn)
self.attrs.update({"lat":lat, "lon":lon, "stn": self.stn, "event": self.ev.strftime("%Y.%m.%d.%H.%M")})
self.acc.update({"sza": {"dims": ("t"),
"data": calculate_sza(times, lat, lon, np.array([100])).ravel()}})
tf = TimezoneFinder()
from_zone = tz.tzutc()
to_zone = tz.gettz(tf.timezone_at(lng=lon, lat=lat))
LT = [t.replace(tzinfo=from_zone).astimezone(to_zone).to_pydatetime() for t in times]
now = self.start.replace(tzinfo=from_zone).astimezone(to_zone).to_pydatetime().replace(hour=0,minute=0,second=0)
LT = [(x - now).total_seconds()/3600. for x in LT]
self.acc.update({"local_time": {"dims": ("t"), "data": LT}})
mlats, mlons, mlts = self._to_mag_(times, lat, lon)
self.acc.update({"mlt": {"dims": ("t"), "data": mlts}})
self.attrs.update({"mlat": np.mean(mlats)})
self.attrs.update({"mlon": np.mean(mlons)})
return self
def _to_netcdf_(self, fname):
""" Save to netCDF4 (.nc) file """
ds = xarray.Dataset.from_dict(self.acc)
ds.attrs = self.attrs
print("---------------------Skills----------------------")
print(ds)
print("-------------------------------------------------")
ds.to_netcdf(fname,mode="w")
return
| [
"shibaji7@vt.edu"
] | shibaji7@vt.edu |
10ba96abd7fbec0f39742d29991a6863ac7d558b | 17c14b758959cdceec0dce8f783346fdeee8e111 | /chap05_nlp/sequence_labeling/eng_model/main.py | 9bc95c095d4eb0780ca8db2ad4280e23fd2c0801 | [] | no_license | yurimkoo/tensormsa_jupyter | b0a340119339936d347d12fbd88fb017599a0029 | 0e75784114ec6dc8ee7eff8094aef9cf37131a5c | refs/heads/master | 2021-07-18T12:22:31.396433 | 2017-10-25T01:42:24 | 2017-10-25T01:42:24 | 109,469,220 | 1 | 0 | null | 2017-11-04T05:20:15 | 2017-11-04T05:20:15 | null | UTF-8 | Python | false | false | 1,871 | py | import os
from eng_model.data_utils import get_trimmed_glove_vectors, load_vocab, \
get_processing_word, CoNLLDataset
from eng_model.general_utils import get_logger
from eng_model.model import NERModel
from eng_model.config import config
try :
# directory for training outputs
if not os.path.exists(config.output_path):
os.makedirs(config.output_path)
# load vocabs
vocab_words = load_vocab(config.words_filename)
vocab_tags = load_vocab(config.tags_filename)
vocab_chars = load_vocab(config.chars_filename)
# get processing functions
processing_word = get_processing_word(vocab_words, vocab_chars,
lowercase=config.lowercase, chars=config.chars)
processing_tag = get_processing_word(vocab_tags, lowercase=False)
# get pre trained embeddings
embeddings = get_trimmed_glove_vectors(config.trimmed_filename)
# create dataset
dev = CoNLLDataset(config.dev_filename, processing_word,
processing_tag, config.max_iter)
test = CoNLLDataset(config.test_filename, processing_word,
processing_tag, config.max_iter)
train = CoNLLDataset(config.train_filename, processing_word,
processing_tag, config.max_iter)
# get logger
logger = get_logger(config.log_path)
# build model
model = NERModel(config, embeddings, ntags=len(vocab_tags),
nchars=len(vocab_chars), logger=logger)
model.build()
# train, evaluate and interact
model.train(train, dev, vocab_tags)
model.evaluate(test, vocab_tags)
model.predict(vocab_tags, processing_word, "Germany 's representative")
model.predict(vocab_tags, processing_word, "Germany")
model.predict(vocab_tags, processing_word, "Hello Germany 's representative")
except Exception as e :
raise Exception (e) | [
"tmddno1@naver.com"
] | tmddno1@naver.com |
5663d5a42857c62bef05718c08bf277bbac81e9d | d1e2558cd0c4468d8f85c9198d2f03070ec227a3 | /test_600510591.py | ba9fe1e83f51c49beebe2f874e66ad4c4ddbc197 | [] | no_license | amontheera-gy/git-workshop-2017 | 8e89554dd48773d285f02c939748e9c0116f6356 | b165c2cf9968d9f4049821735fc9096dbcc73b70 | refs/heads/master | 2021-04-09T11:58:34.832336 | 2018-03-11T08:04:57 | 2018-03-11T08:04:57 | 124,730,080 | 0 | 0 | null | 2018-03-11T06:32:26 | 2018-03-11T06:32:25 | null | UTF-8 | Python | false | false | 127 | py | def main():
a = int(input())
b = int(input())
x = a*(b**2);
x = x/3
if __name__ == "__main__":
main()
| [
"sun-sun_shawol@hotmail.com"
] | sun-sun_shawol@hotmail.com |
02071689f5953babf59c3f59f50627be0762c4ef | fc0fd6c61376ae5211d7f873374b095cfd44b9f3 | /api/api/crud.py | e903bacbd156540b836d60ac9e52b360da43c75d | [] | no_license | AthulMuralidhar/rss-scraper | a575de951c99bbf034f781a9117f0f16f6b90ab3 | c02b34a1909d1ea1e14000f980dbbc2323d62567 | refs/heads/main | 2023-06-18T22:17:54.314552 | 2021-07-20T07:43:11 | 2021-07-20T07:43:11 | 383,718,384 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,064 | py | import ipdb
from sqlalchemy.orm import Session
from .models import User, FeedItems
from .schemas import UserCreate, FeedItemCreate
def get_user(db: Session, user_id: int):
return db.query(User).filter(User.id == user_id).first()
def get_user_by_email(db: Session, email: str):
return db.query(User).filter(User.email == email).first()
def get_users(db: Session, skip: int = 0, limit: int = 100):
return db.query(User).offset(skip).limit(limit).all()
def create_user_db(db: Session, user: UserCreate):
fake_hashed_password = user.password + "notreallyhashed"
db_user = User(email=user.email, hashed_password=fake_hashed_password)
db.add(db_user)
db.commit()
db.refresh(db_user)
return db_user
def get_feeds(db: Session, skip: int = 0, limit: int = 100):
return db.query(FeedItems).offset(skip).limit(limit).all()
def create_user_feed(db: Session, feed: FeedItemCreate, user_id: int):
db_feed = FeedItems(**feed, user_id=user_id)
db.add(db_feed)
db.commit()
db.refresh(db_feed)
return db_feed
| [
"athul.m68@gmail.com"
] | athul.m68@gmail.com |
7e3d11d2439dcaeaa75efe1e10f4e9c63e3bf655 | a07ff16550397b3433a185da34cc19379da31c26 | /day07/movie_choice_01.py | 9e1b4e7ccf67e4df11a2f10265e3fc89c568ce8b | [] | no_license | gangys0411/python | c349dc5be41488e4632bd775925bc503b61067be | dd6a2c5cd4e25cfe0442fe8d3f97ca0444e789b7 | refs/heads/master | 2020-06-08T06:37:47.671495 | 2019-07-11T13:55:23 | 2019-07-11T13:55:23 | 193,178,789 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 623 | py | # -*- coding: utf-8 -*-
# UTF-8 encoding when using korean
movies = ["Batman", "Harry Porter", "Scream", "Happy Dog"]
#for 문을 이용하여 movies 안에 있는 영화 제목을 모두 출력합니다.
print "영화 목록 : "
for movie in movies:
print("%s " % movie)
# 사용자에게 어떤 영화를 볼지 질문을 출력합니다.
print "관람할 영화를 선택해 주세요"
choice = raw_input()
#while 문을 이용하여 선택한 영화가 목록에 없을 시에는 영화 제목을 다시 입력받습니다.
while choice not in movies:
print "다시 선택해 주세요."
choice = raw_input()
| [
"gangys0411@gmail.com"
] | gangys0411@gmail.com |
9ceabedb19f991cf57f0828813b8c488db7de6e2 | 204217d7aa03e7f7567d74cf6bc41ad39a82c629 | /video_input.py | 9bb2bf891ee32761bef0299c06369c35a76a4e8b | [] | no_license | XYudong/BuzzardDetector | e894de7dc9f720f2c1cbaf3141b5333e80551985 | 1d072c9d1d8ff5f017e6ea85cad2bfb125beae47 | refs/heads/master | 2020-04-08T22:08:38.148956 | 2018-12-11T04:11:45 | 2018-12-11T04:11:45 | 159,773,895 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 955 | py | import numpy as np
import cv2
"""extract and images from a video"""
video_idx = 9
path_to_video = "data/video/new/video2_" + str(video_idx) + ".mp4"
path_to_image = "data/video/image2/"
cap = cv2.VideoCapture(path_to_video)
i = 0
while True:
# Capture frame-by-frame
ret, frame = cap.read()
if frame is None:
print("video finished")
break
# Our operations on the frame come here
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# print(gray.shape)
# pick some frames and save them to image files
num = 10
remainder = 1
if i % num == remainder:
cv2.imwrite(path_to_image + "im_video2_" + str(video_idx) + "_" + str(int(i/(num+remainder))) + ".jpg", gray)
i += 1
# Display the resulting frame
# cv2.imshow('frame', gray)
# if cv2.waitKey(1) & 0xFF == ord('q'):
# break
print(i)
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
| [
"leon.yuyudong@hotmail.com"
] | leon.yuyudong@hotmail.com |
7d82921518464ce7f2a717c5b1a78f77df5a4884 | 0334280e95b0239e36f286013744401084ecc737 | /Ej7.py | 2db50cdfdb44a6cbe1e8c8af78c12391cf6ae1d2 | [] | no_license | SamantaGomez/tkinter | 304f9cdb4de4b5912750f7e60dee805488bec2f6 | 76ce8e57d7684ba4868563005dfe23de5a9c7abb | refs/heads/master | 2020-05-24T09:50:00.210846 | 2019-05-17T14:17:30 | 2019-05-17T14:17:30 | 187,215,212 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 237 | py | from tkinter import *
master = Tk()
var1 = IntVar()
Checkbutton(master, text="Hombre", variable=var1).grid(row=0, sticky=W)
var2 = IntVar()
Checkbutton(master, text="Mujer", variable=var2).grid(row=1, sticky=W)
mainloop()
| [
"noreply@github.com"
] | noreply@github.com |
cede73216293a8ce2fb462daf6702e71a3c0f983 | 51885da54b320351bfea42c7dd629f41985454cd | /abc023/d.py | 44e56efe280652514dbc388ca3b19c414d04f3e6 | [] | no_license | mskt4440/AtCoder | dd266247205faeda468f911bff279a792eef5113 | f22702e3932e129a13f0683e91e5cc1a0a99c8d5 | refs/heads/master | 2021-12-15T10:21:31.036601 | 2021-12-14T08:19:11 | 2021-12-14T08:19:11 | 185,161,276 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,354 | py | #
# abc023 d
#
import sys
from io import StringIO
import unittest
import bisect
class TestClass(unittest.TestCase):
def assertIO(self, input, output):
stdout, stdin = sys.stdout, sys.stdin
sys.stdout, sys.stdin = StringIO(), StringIO(input)
resolve()
sys.stdout.seek(0)
out = sys.stdout.read()[:-1]
sys.stdout, sys.stdin = stdout, stdin
self.assertEqual(out, output)
def test_入力例1(self):
input = """4
5 6
12 4
14 7
21 2"""
output = """23"""
self.assertIO(input, output)
def test_入力例2(self):
input = """6
100 1
100 1
100 1
100 1
100 1
1 30"""
output = """105"""
self.assertIO(input, output)
def resolve():
N = int(input())
ok = 0
global H, S
H = []
S = []
for _ in range(N):
h, s = map(int, input().split())
H.append(h)
S.append(s)
ok = max(ok, h+s*(N-1))
ok -= 1
ng = max(H)-1
while abs(ok-ng) > 1:
mid = (ok+ng)//2
if isOK(mid):
ok = mid
else:
ng = mid
print(ok)
def isOK(x):
time = [(x-h)/s for (h, s) in zip(H, S)]
time.sort()
for i, t in enumerate(time):
if i > t:
return False
return True
if __name__ == "__main__":
# unittest.main()
resolve()
| [
"mskt4440@gmail.com"
] | mskt4440@gmail.com |
14b48bbbf62470ff68ffb9122f28308444f5f2f1 | 25873da962b0acdcf2c46b60695866d29008c11d | /src/programr/clients/events/console/config.py | 16a2c9b254edf08455d0a327b7f522385af6cbbc | [] | no_license | LombeC/program-r | 79f81fa82a617f053ccde1115af3344369b1cfa5 | a7eb6820696a2e5314d29f8d82aaad45a0dc0362 | refs/heads/master | 2022-12-01T14:40:40.208360 | 2020-08-10T21:10:30 | 2020-08-10T21:10:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,186 | py | from programr.config.client.config import ClientConfigurationData
class ConsoleConfiguration(ClientConfigurationData):
def __init__(self):
super().__init__("console")
self._default_userid = "console"
self._prompt = ">>>"
@property
def default_userid(self):
return self._default_userid
@property
def prompt(self):
return self._prompt
def load_configuration(self, configuration_file, bot_root):
console = configuration_file.get_section(self.section_name)
if console is not None:
self._default_userid = configuration_file.get_option(console, "default_userid", missing_value="Console")
self._prompt = configuration_file.get_option(console, "prompt", missing_value=">>>")
super().load_configuration(configuration_file, console, bot_root)
def to_yaml(self, data, defaults=True):
if defaults is True:
data['default_userid'] = "console"
data['prompt'] = ">>>"
else:
data['default_userid'] = self._default_userid
data['prompt'] = self._prompt
super(ConsoleConfiguration, self).to_yaml(data, defaults) | [
"hilbert.cantor@gmail.com"
] | hilbert.cantor@gmail.com |
178910e4f15626f235806824e33a9222ee63e9b0 | 308953409e1a3b828ac49b7301c1e751cbf762cf | /suite_EETc 12/tst_Open_Import_Export/test.py | 4453463efcc939e846f44d4a6859e0aa61a262cf | [] | no_license | asthagaur1/danfoss-automation | 4dcc7d8f000917b67e4d6f46ff862a525ddcbc5e | 213a99d3375889cd0e0c801421a50e9fe6085879 | refs/heads/main | 2023-03-31T23:26:56.956107 | 2021-04-01T08:52:37 | 2021-04-01T08:52:37 | 353,627,845 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 408 | py | def main():
excel = r"C:\gitworkspace\KoolProg-TestAutomation\Master_Functions\Test_Automation\SourceCode\suite_EETc 12\shared\testdata\Open_Import_Export.xls";
#Mapping with Global scripts for Function library and key action.
source(findFile("scripts", "Functions.py"))
source(findFile("scripts", "Actions.py"))
#source(findFile("scripts", "object_id.py"))
keyAction(excel)
| [
"asthagaur@danfoss.com"
] | asthagaur@danfoss.com |
c432fe4da6ceb9bab34a11508cde42551b6e2fc8 | 9e1216e21c2955bfe916a765645724b2f548d88d | /crushsim.py | e24b663986859877832e4fa0909e7c049ec33042 | [
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] | permissive | xdongp/CRUSHsim | aad2986eb052b5093fb3accaa364ee69e8bfb660 | dbebf15a596ef1a506de8b538b9f00547c8b7d26 | refs/heads/master | 2020-12-11T08:15:01.559979 | 2015-06-19T19:44:40 | 2015-06-19T19:44:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,965 | py |
# ====================================================================
# CRUSHSim - CRUSH Simulation web app for Ceph admins
# ---------------------------------------------------
#
# By Xavier Villaneau, 2015
# xavier.villaneau@fr.clara.net or xvillaneau@gmail.com
# Claranet SAS, Rennes, France
# ====================================================================
# crushsim.py - Core Python script for the server
# - Handles everything server-side
# - All pages and valid URLs are defined here
# - Manages the stored files and how they are accessed
# - Calls crushtool to run the actual simulation
#
# Changelog:
# ----------
# May 4th 2015 - Initial release
# June 15th 2015 - Now handles CRUSH map metadata and added GET/crushdata
# Imports and initialization
# --------------------------
# Flask functions we'll need
from flask import Flask, url_for, render_template, flash, request, send_from_directory, make_response, redirect, abort
# Flask-Uploads for easier uploads
from flask.ext import uploads
# Other used libraries
from subprocess import call, Popen, PIPE
import uuid
import json
import re
import os
from sys import exit
from datetime import datetime
app = Flask(__name__)
# Configuration management
# ------------------------
# Get configuration from file
app.config.from_pyfile('crushsim.cfg', silent= True)
# Require the SECRET_KEY to be set
if not app.config['SECRET_KEY']:
print "Please set the SECRET_KEY in crushsim.cfg"
exit(1)
# Default custom configuration (those are not defined in Flask/Werkzeug)
defaultconf = {
'SERVER_ADDR': '127.0.0.1',
'SERVER_PORT': 7180,
'CRUSHTOOL_PATH': '/usr/bin/crushtool',
'FILES_DIR': 'tmp'
}
# Apply default configuration if not defined in the configuration file
for c in defaultconf.keys():
if not c in app.config.keys():
app.config[c] = defaultconf[c]
# Create the directory for temporary files if it doesn't exist
if not os.path.exists(app.config['FILES_DIR']):
os.makedirs(app.config['FILES_DIR'])
# Create the subdirectories and store their paths for easier access
filedir = {}
for d in ['txt_maps','json_maps','compiled_maps','test_results', 'simulate']:
filedir[d] = app.config['FILES_DIR'] + '/' + d + '/'
if not os.path.exists(filedir[d]):
os.makedirs(filedir[d])
# FlaskUpload configuration
app.config['UPLOADED_CRUSHUPLOAD_DEST'] = filedir['txt_maps']
crushupload = uploads.UploadSet('crushupload', uploads.TEXT)
uploads.configure_uploads(app, (crushupload))
# strftime filter for Jinja, for easier time handling
@app.template_filter('strftime')
def _jinja2_filter_datetime(timestamp, fmt=None):
d = datetime.fromtimestamp(timestamp)
tfmt='%c'
return d.strftime(tfmt)
# Flask Routing
# -------------
@app.route('/')
def page_home():
return render_template('home.html')
@app.route('/editor', methods=['GET','POST'])
def page_editor_list():
return render_template('editor-list.html', crushmaps= get_saved_maps())
@app.route('/editor/<crushid>')
def page_editor(crushid):
# The Input CRUSH map is automatically set to the one used before editing
# Maybe this behaviour isn't practical... Can't tell for now
flash("Input CRUSH map set to " + crushid, category='info')
resp = make_response(render_template('editor.html', crushid= crushid))
resp.set_cookie('id_ini', value=crushid)
return resp
@app.route('/analyze', methods=['GET', 'POST'])
def page_analyze():
if request.method == 'GET':
# Displays the Analyze page
return render_template('analyze.html', crushmaps= get_saved_maps())
if request.method == 'POST':
# Will get all simulation parameters in the cookies and do
# many server-side checks before launching the simulation.
params_ini = {}
params_fin = {}
for prop in ['id', 'rule', 'size', 'minsize'] :
params_ini[prop] = request.cookies.get(prop +'_ini')
params_fin[prop] = request.cookies.get(prop +'_fin')
if not params_ini[prop]:
return "The parameter '"+prod+"' for the initial map is missing !", 400
if not params_fin[prop]:
params_fin[prop] = params_ini[prop]
def check_params(params):
# Check if maps exist
if not (crush_exists(params['id'])):
return "The given CRUSH map ("+params['id']+") does not exist!", 404
# Check if rule exists
data = crush_read_json(params['id'])
if not params['rule'] in data['rules'].keys():
return "There is no ruleset " + params['rule'] + " in the CRUSH map!", 400
# Check if the given sizes are valid integers and have coherent values
try:
params['size'] = int(params['size'])
params['minsize'] = int(params['minsize'])
except ValueError:
return "The nominal and minimal sizes should be integers!", 400
if params['size'] < params['minsize'] or params['minsize'] < 1:
return "The nominal and minimal sizes are invalid!", 400
return "It went well !", 200
res = check_params(params_ini)
if res[1] != 200:
return res
res = check_params(params_fin)
if res[1] != 200:
return res
# Everything was successful! Now the simulation can actually be launched
def make_simulation(params):
fileid = params['id']
textpath = filedir['txt_maps'] + fileid + '.txt'
comppath = filedir['compiled_maps'] + fileid
# Compile the CRUSH map
if not os.path.isfile(comppath):
call([app.config['CRUSHTOOL_PATH'],'-c',textpath,'-o',comppath])
# Check for options
options = ''
if 'rule' in params.keys():
fileid += '_r' + params['rule']
options += ' --rule ' + params['rule']
if 'size' in params.keys():
fileid += '_n' + str(params['size'])
options += ' --num-rep ' + str(params['size'])
statpath = filedir['test_results'] + fileid + '.txt'
with open(str(statpath), 'w') as statfile:
Popen(app.config['CRUSHTOOL_PATH'] + " --test --show-statistics -i " + comppath + options, shell=True, stdout=statfile).wait()
return fileid
stats_ini = make_simulation(params_ini)
stats_fin = make_simulation(params_fin)
resp = make_response("Success!")
resp.set_cookie('stats_ini', value= stats_ini)
resp.set_cookie('stats_fin', value= stats_fin)
return resp
@app.route('/results')
def page_results():
return render_template('results.html')
@app.route('/simulation/<sim_id>')
def page_simulation(sim_id):
return send_from_directory(filedir['test_results'], sim_id + '.txt')
@app.route('/api/simulate', methods=['PUT'])
def api_simulate():
"""
Will run a simulation on the sent crushmap.
So we're writing a file on the server then using an executable on it.
Do I *really* have to explain why it can be dangerous ?
But for now there's no way around it.
"""
# Test the request and its payload
# Is it text? Can it be read? Is it empty?
if request.mimetype != "text/plain":
return "Bad request, expecting CRUSH map", 400
try:
crushmap = request.get_data()
except:
return "Bad request, expecting CRUSH map", 400
if (crushmap == ""):
return "Bad request, expecting CRUSH map", 400
# Now try to get the arguments
try:
args = request.args
except:
return "URL argument parsing has failed for some reason", 500
# Test if rule and size are given. Otherwise, refuse to process
if not ('rule' in args and args['rule'].isdigit()):
return "Please specify a valid rule number to apply", 400
if not ('size' in args and args['size'].isdigit()):
return "Please specify a valid size to apply", 400
# Assign a random uuid for the operation, build two filenames from it
tid = str(uuid.uuid4())
fntxtcrush = filedir['simulate'] + tid + '.txt'
fnbincrush = filedir['simulate'] + tid + '.bin'
# Now write the input we were given in the file
with open(fntxtcrush, 'w') as ftxtcrush:
ftxtcrush.write(crushmap)
# Make it into a binary CRUSH map.
# TODO: catch compilation error
simcompstr = app.config['CRUSHTOOL_PATH'] + ' -c ' + fntxtcrush + ' -o ' + fnbincrush
Popen(simcompstr, shell=True).wait()
os.remove(fntxtcrush)
# Build options for the simulation
options = ''
options += ' --rule ' + args['rule']
options += ' --num-rep ' + args['size']
# If a certain number of PGs is asked, include it
if 'pgs' in args and args['pgs'].isdigit():
options += ' --min-x 0'
options += ' --max-x ' + str(int(args['pgs']) - 1)
# Execute the simulation itself
# TODO: catch simulation error
simexecstr = app.config['CRUSHTOOL_PATH'] + " --test --show-statistics -i " + fnbincrush + options
simproc = Popen(simexecstr, shell=True, stdout=PIPE)
output = simproc.stdout.read()
os.remove(fnbincrush)
# Everything went well (I hope), let's send the results!
return output
@app.route('/crushdata', methods=['GET','POST'])
def page_crushdata_noid():
if request.method == 'GET':
# Return JSON list of all maps and their metadata
resp = make_response(json.dumps(get_saved_maps()))
resp.mimetype = "application/json"
return resp
if request.method == 'POST':
if 'crushTextFile' in request.files:
# The request we're getting is for a brand new CRUSH map
fileid = str(uuid.uuid4())
# Upload text file to tmp/crushtxtfiles
# The '.' at the end tells FlaskUpload to append file extension
crushupload.save(request.files['crushTextFile'],name= fileid + '.')
# Metadata handling
metadata = {}
if 'crushTextName' in request.form:
metadata['name'] = request.form['crushTextName']
if len(metadata) > 0:
with open(filedir['txt_maps'] + fileid + '.metadata.json','w') as mdf:
mdf.write(json.dumps(metadata))
# Generate JSON data in tmp/crushjsondata
with open(filedir['txt_maps'] + fileid + '.txt') as crushfile:
with open(filedir['json_maps'] + fileid + '.json','w') as jsonfile:
crush_unwrap(crushfile, jsonfile)
flash('CRUSH map uploaded with ID ' + fileid, category='success')
return redirect('/')
else:
try:
crushdata = json.loads(request.data)
except TypeError:
flash("Upload failed, data was not valid JSON", category='error')
abort(415)
fileid = str(uuid.uuid4())
with open(filedir['json_maps'] + fileid + '.json','w') as crushjsonfile:
crushjsonfile.write(request.data)
if not os.path.isfile(filedir['txt_maps'] + fileid + '.txt'):
# The raw CRUSH file doesn't exist, so we'll create it
crush_wrap(request.data, filedir['txt_maps'] + fileid + '.txt')
flash("New CRUSH map successfully uploaded with ID" + fileid)
resp = make_response("It worked!") # TODO : Redirect to analyze page?
resp.set_cookie('id_fin', value=fileid)
return resp
@app.route('/crushdata/<crush_id>', methods=['GET','PUT'])
def crushdata_withid(crush_id):
if request.method == "GET":
if crush_id.endswith('.json'):
return send_from_directory(filedir['json_maps'], crush_id)
else:
return send_from_directory(filedir['txt_maps'], crush_id + '.txt')
if request.method == "PUT":
try:
inputdata = request.get_json()
except:
return "The given request is not valid JSON", 400
if os.path.isfile(filedir['txt_maps'] + crush_id + ".metadata.json"):
with open(filedir['txt_maps'] + crush_id + ".metadata.json") as mdfr:
prevdata = json.loads(mdfr.read())
else:
prevdata = {}
if "name" in inputdata:
with open(filedir['txt_maps'] + crush_id + ".metadata.json", 'w') as mdfw:
prevdata.update(inputdata)
mdfw.write(json.dumps(prevdata))
resp = make_response("It worked!")
return resp
@app.route('/crushtree/<crush_id>')
def crushtree(crush_id):
if crush_id.endswith('.json'):
# The output is going to be JSON anyway...
crush_id = crush_id[:-5]
if not crush_exists(crush_id):
abort(404)
with open(filedir['json_maps'] + crush_id + '.json') as crushfile:
crushdata = json.loads(crushfile.read())
return json.dumps(crush_makejsontree(crushdata['buckets']))
# Useful functions
# ----------------
def crush_exists(crushid):
return os.path.isfile(filedir['txt_maps'] + crushid + '.txt')
def crush_read_json(crushid):
if not crush_exists(crushid):
return False
with open(filedir['json_maps'] + crushid + '.json') as f:
return json.loads(f.read())
def get_saved_maps():
"""
Returns a list of all stored CRUSH maps as dictionnaries.
If a metadata file is present, its data will be included.
"""
crushmaps = []
files = os.listdir(filedir['txt_maps'])
for f in files:
if f.endswith('.txt'):
# Take only the data files, not the metadata
crushmap = {}
# The most important: the UUID of the map
crushmap['id'] = f[:-4]
# The creation time of the map. TODO: Maybe put it in the metadata ?
crushmap['modtime'] = int(os.path.getmtime(filedir['txt_maps'] + f))
# Check if a metadata file exists, if it does add its data to the dictionnary
if os.path.isfile(filedir['txt_maps'] + crushmap['id'] + ".metadata.json"):
with open(filedir['txt_maps'] + crushmap['id'] + ".metadata.json") as md:
crushmap.update(json.loads(md.read()))
crushmaps.append(crushmap)
# Finally, sort maps by creation time before returning the list
return sorted(crushmaps, key=lambda k: k['modtime'])
# Parse functions
# ---------------
def crush_unwrap(crushfile, jsonfile):
"""
Converts a human-readable CRUSH map file into a more computer-friendly dictionnary.
Required parameter : file object to write into
Output : CRUSH map dictionnary
"""
# Empty data declaration
crushtunables = {}
crushtypes = {}
crushdevices = []
crushbuckets = {}
crushrules = {}
# Variables for rule/bucket mode
inrule = ''
inbucket = ''
for line in crushfile:
# Keep only the interesting part of lines
m = re.search('^\s*([\w\-{}\. ]*)', line)
line = m.group(1)
tmp = line.split(' ')
if line == '':
# Skip whole process if there is no useful information
continue
elif line == '}':
# Get out of rule/bucket mode
if inrule:
crushrules[int(rule['ruleset'])] = rule
inrule = ''
inbucket = ''
elif inrule:
# Rule mode
if tmp[0] == 'step':
rule['step'].append(' '.join(tmp[1:]))
else:
rule[tmp[0]] = ' '.join(tmp[1:])
elif inbucket:
# Bucket mode
if tmp[0] == 'item':
item = {}
item['name'] = tmp[1]
item['weight'] = float(tmp[3])
crushbuckets[inbucket]['item'].append(item)
elif tmp[0] == 'id':
crushbuckets[inbucket]['id'] = int(tmp[1])
else:
crushbuckets[inbucket][tmp[0]] = tmp[1]
elif line.startswith('tunable '):
# Tunable declaration
crushtunables[tmp[1]] = tmp[2]
elif line.startswith('type' ):
# Type declaration
crushtypes[int(tmp[1])] = tmp[2]
elif line.startswith('device '):
# OSD declaration
crushdevices.append(int(tmp[1]))
elif line.startswith('rule '):
# Rule start
inrule = tmp[1]
rule = {}
rule['name'] = inrule
rule['step'] = [] # Must be an array to stay ORDERED
else:
# It should be a bucket... I hope
inbucket = tmp[1]
crushbuckets[inbucket] = {}
crushbuckets[inbucket]['type'] = tmp[0]
crushbuckets[inbucket]['item'] = []
crushdata = {}
crushdata['tunables'] = crushtunables
crushdata['devices'] = crushdevices
crushdata['types'] = crushtypes
crushdata['buckets'] = crushbuckets
crushdata['rules'] = crushrules
jsonfile.write(json.dumps(crushdata))
def crush_wrap (crushdata, crushfilename):
"""Converts a JSON-ified CRUSH map into text that can be compiled by crushtool.
Parameters: The JSON data and the file path."""
# TODO : it might be cleaner to ask for a previously opened file object
data = json.loads(crushdata)
def recursive_bucket_write(bucketname, crushdata, crushfile):
"""Internal recursive function used to write the buckets in a specific order.
This is necessary because a bucket must be declared before used as item of
another hierarchically higher bucket."""
b = crushdata[str(bucketname)]
if ('item' in b.keys() and len(b['item']) > 0):
# If the bucket has items
for item in b['item']:
if not item['name'].startswith('osd.'):
# If it's not as OSD, go deeper in recursion to write it first
recursive_bucket_write(item['name'], crushdata, crushfile)
# All 'children' buckets have been taken care of, now the bucket itself can be written
crushfile.write(b['type'] + ' ' + bucketname + ' {\n')
crushfile.write('\tid ' + str(b['id']) + '\t\t# do not change unnecessarily\n')
crushfile.write('\talg ' + b['alg'] + '\n')
crushfile.write('\thash ' + b['hash'] + '\t# rjenkins1\n')
for i in b['item']:
if i['name'].startswith('osd.'):
crushfile.write('\titem ' + i['name'] + ' weight ' + str(i['weight']) + '\n')
else:
crushfile.write('\titem ' + i['name'] + '\n') # We'll leave the weight out for now
crushfile.write('}\n')
with open(crushfilename,'w') as f:
f.write('# begin crush map\n')
f.write('# This file was generated automatically by CRUSHsim\n')
f.write('\n# tunables\n')
for t in data['tunables'].keys():
f.write('tunable ' + t + ' ' + data['tunables'][t] + '\n')
f.write('\n# devices\n')
for d in data['devices']:
f.write('device ' + str(d) + ' osd.' + str(d) + '\n')
f.write('\n# types\n')
typeids = [int(it) for it in data['types'].keys()]
typeids.sort()
for t in typeids:
f.write('type ' + str(t) + ' ' + data['types'][str(t)] + '\n')
f.write('\n# buckets\n')
for bn in data['buckets'].keys():
# Let's look for roots in order to start the recursive search
# This assumes "root" is the highest hierarchy level... I could allow
# more sophisticated architectures, but it's annoying so I'll do that later.
b = data['buckets'][bn]
if b['type'] == 'root':
recursive_bucket_write(bn, data['buckets'], f)
f.write('\n# rules\n')
# It turns out crushtool is sensitive to the order of the rules,
# and ignores the ruleset number ! They have to be written in te right order
rulesets = [int(i) for i in data['rules'].keys()]
rulesets.sort()
for ri in rulesets:
r = data['rules'][str(ri)]
f.write('rule ' + r['name'] + ' {\n')
f.write('\truleset ' + str(ri) + '\n')
f.write('\ttype ' + r['type'] + '\n')
f.write('\tmin_size ' + r['min_size'] + '\n')
f.write('\tmax_size ' + r['max_size'] + '\n')
for s in r['step']:
f.write('\tstep ' + s + '\n')
f.write('}\n')
f.write('\n# end crush map\n')
def crush_makejsontree(crushbuckets):
"""
Generates a tree of the cluster map from "raw" CRUSH buckets data.
Required for display of the map with D3.
"""
# We're only going to add one entry, so a shallow copy is enough
buckets = crushbuckets.copy()
# Find the roots of the CRUSH map
roots = []
for b in buckets:
if buckets[b]['type'] == 'root':
roots.append(b)
# Add a "cluster" element to the buckets, used as entry point for the recursive search
buckets['cluster'] = {}
buckets['cluster']['item'] = [{'name': r} for r in roots]
def recursive_tree_build(buckets, target):
"""
Recursive function used to build a tree dictionnary of the CRUSH map.
Given the list of the buckets and an entry point, returns the tree from this point.
"""
tree = {}
tree['name'] = target
tree['children'] = []
if target != 'cluster':
# The 'cluster' entry is different: it doesn't exist in the actual
# CRUSH map so it doesn't have any data. Otherwise, copy this data.
tree['id'] = buckets[target]['id']
tree['type'] = buckets[target]['type']
for i in buckets[target]['item']:
# Walk through the children of the target
if i['name'].startswith('osd.'):
# If it's an OSD, generate the entry
tmp = {}
tmp['id'] = int(i['name'][4:])
tmp['name'] = i['name']
tmp['type'] = 'osd'
tmp['size'] = int(i['weight'] * 1000)
else:
# Otherwise, go one step further in recursion
tmp = recursive_tree_build(buckets, i['name'])
tree['children'].append(tmp)
return tree
return recursive_tree_build(buckets, 'cluster')
# Flask application launch
# ------------------------
if __name__ == '__main__':
app.run(host= app.config['SERVER_ADDR'], port= app.config['SERVER_PORT'])
# vim: set ts=4 sw=4 autoindent:
| [
"xvillaneau@gmail.com"
] | xvillaneau@gmail.com |
55fc6d2b39fe89bf2236b8eb70c0424438a1186b | ffe9c83622edcb896fc39345dad790c16f78804c | /algorithm/5_1_二叉树的最大深度.py | a8ee79f2c11c2d87d3ce3d9cf408979a3e30e812 | [] | no_license | guanzizai1006/dataWhaleWithBruce | 56fa34823de533d66c8c833ce4d4ce8e38225794 | 6ec6a9a648a8be208be0d5e71c5a157602869197 | refs/heads/master | 2020-06-29T14:49:27.141000 | 2019-08-26T12:17:11 | 2019-08-26T12:17:11 | 200,563,253 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 656 | py | int minDepth(TreeNode * root)
{
if(root == NULL)
return 0;
if(root->left == NULL && root->right == NULL)
return 1;
int left = minDepth(root->left) + 1;
int right = minDepth(root->right) + 1;
if(left == 1) //等于1说明没有左子树有右子树,为避免干扰结果,另其为一个最大数
left = INT_MAX;
if(right == 1) //等于1说明没有右子树有左子树,为避免干扰结果,另其为一个最大数
right = INT_MAX;
return left > right ? right : left; //返回二者之中较小数
}
| [
"noreply@github.com"
] | noreply@github.com |
56c90b4716f1cc14341f23413d49aaa8b0682632 | a9e3f3ad54ade49c19973707d2beb49f64490efd | /Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/lms/djangoapps/bulk_email/tests/test_views.py | d2ec21c3ba6ac57f01f91d77bfab7dc4daf89163 | [
"AGPL-3.0-only",
"AGPL-3.0-or-later",
"MIT"
] | permissive | luque/better-ways-of-thinking-about-software | 8c3dda94e119f0f96edbfe5ba60ca6ec3f5f625d | 5809eaca7079a15ee56b0b7fcfea425337046c97 | refs/heads/master | 2021-11-24T15:10:09.785252 | 2021-11-22T12:14:34 | 2021-11-22T12:14:34 | 163,850,454 | 3 | 1 | MIT | 2021-11-22T12:12:31 | 2019-01-02T14:21:30 | JavaScript | UTF-8 | Python | false | false | 3,247 | py | """
Test the bulk email opt out view.
"""
import ddt
import pytest
from django.http import Http404
from django.test.client import RequestFactory
from django.test.utils import override_settings
from django.urls import reverse
from common.djangoapps.student.tests.factories import UserFactory
from lms.djangoapps.bulk_email.models import Optout
from lms.djangoapps.bulk_email.views import opt_out_email_updates
from lms.djangoapps.discussion.notification_prefs.views import UsernameCipher
from openedx.core.lib.tests import attr
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
@attr(shard=1)
@ddt.ddt
@override_settings(SECRET_KEY="test secret key")
class OptOutEmailUpdatesViewTest(ModuleStoreTestCase):
"""
Check the opt out email functionality.
"""
def setUp(self):
super().setUp()
self.user = UserFactory.create(username="testuser1", email='test@example.com')
self.course = CourseFactory.create(run='testcourse1', display_name='Test Course Title')
self.token = UsernameCipher.encrypt('testuser1')
self.request_factory = RequestFactory()
self.url = reverse('bulk_email_opt_out', args=[self.token, str(self.course.id)])
# Ensure we start with no opt-out records
assert Optout.objects.count() == 0
def test_opt_out_email_confirm(self):
"""
Ensure that the default GET view asks for confirmation.
"""
response = self.client.get(self.url)
self.assertContains(response, "confirm unsubscribe from")
assert Optout.objects.count() == 0
def test_opt_out_email_unsubscribe(self):
"""
Ensure that the POSTing "confirm" creates the opt-out record.
"""
response = self.client.post(self.url, {'unsubscribe': True})
self.assertContains(response, "You have successfully unsubscribed from")
assert Optout.objects.count() == 1
def test_opt_out_email_cancel(self):
"""
Ensure that the POSTing "cancel" does not create the opt-out record
"""
response = self.client.post(self.url)
self.assertContains(response, "You have not been unsubscribed from")
assert Optout.objects.count() == 0
@ddt.data(
("ZOMG INVALID BASE64 CHARS!!!", "base64url", False),
("Non-ASCII\xff".encode(), "base64url", False),
("D6L8Q01ztywqnr3coMOlq0C3DG05686lXX_1ArEd0ok", "base64url", False),
("AAAAAAAAAAA=", "initialization_vector", False),
("nMXVK7PdSlKPOovci-M7iqS09Ux8VoCNDJixLBmj", "aes", False),
("AAAAAAAAAAAAAAAAAAAAAMoazRI7ePLjEWXN1N7keLw=", "padding", False),
("AAAAAAAAAAAAAAAAAAAAACpyUxTGIrUjnpuUsNi7mAY=", "username", False),
("_KHGdCAUIToc4iaRGy7K57mNZiiXxO61qfKT08ExlY8=", "course", 'course-v1:testcourse'),
)
@ddt.unpack
def test_unsubscribe_invalid_token(self, token, message, course):
"""
Make sure that view returns 404 in case token is not valid
"""
request = self.request_factory.get("dummy")
with pytest.raises(Http404) as err:
opt_out_email_updates(request, token, course)
assert message in err
| [
"rafael.luque@osoco.es"
] | rafael.luque@osoco.es |
3bd23eba3e700806f3122e17d9527ed471f46cde | a609f05150d73cae683ccffd9f892366277891f8 | /orcid-crawler/database_pull_requests.py | 3b54bcf0a2e8e2b3c8b2cfe0191f4529f4a63285 | [] | no_license | bt3101-project-1/bt3101-project | f7e4ed7eafc6023ea0887cff3311ffffdc2f9377 | 2260bc7b1520bac5c0adabab35968ab3fa97f08b | refs/heads/master | 2021-01-21T03:20:46.668560 | 2017-11-03T08:24:09 | 2017-11-03T08:24:09 | 101,890,028 | 0 | 1 | null | 2017-10-29T05:09:32 | 2017-08-30T14:15:50 | JavaScript | UTF-8 | Python | false | false | 783 | py | import csv
from database_handler import *
db_handler = DatabaseHandler()
user_requests = db_handler.get_crawl_requests()
crawler_inputs = []
for user_request in user_requests:
request_id = user_request['_id']
university_id = user_request['universityId']
professor_ids = user_request['professorIds']
original_status = user_request['status']
crawler_inputs = db_handler.get_input(
request_id, original_status, university_id, professor_ids)
if len(crawler_inputs) != 0:
# write a csv file for orcid_crawler.js to crawl
with open("crawler_inputs.csv", "w") as f:
writer = csv.writer(f)
# writer.writerow(["professor_name", "university_name", "professor_id", "request_id", "original_status"])
writer.writerows(crawler_inputs)
| [
"moyujiao0515@gmail.com"
] | moyujiao0515@gmail.com |
ddac1d2cfd519bf7e80784a018e0d3d053fdaec4 | 0bd597e1902332858a944bfcfe27670f0d9334e3 | /Contoh 11_3 Penggunaan Tipe Data String.py | 0bd47856293402975a4c8663b4af93231c37b1ee | [] | no_license | nikyayulestari/phyton-candy | 2cd9d064204e49abe745b525bff403b627a1091d | 7609a69be4acae57da4724b158030c2672253fb2 | refs/heads/master | 2020-08-23T13:58:17.719873 | 2019-10-21T18:01:23 | 2019-10-21T18:01:23 | 216,631,817 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 440 | py | Python 2.7.3 (default, Apr 10 2012, 23:31:26) [MSC v.1500 32 bit (Intel)] on win32
Type "copyright", "credits" or "license()" for more information.
>>> kalimat = 'Saya berteriak, "Awas ada Phyton !!!"'
>>> print kalimat
Saya berteriak, "Awas ada Phyton !!!"
>>> kalimat_panjang = 'Baris ini akan diatur menjadi \n 2 baris yang sama panjang'
>>> print kalimat_panjang
Baris ini akan diatur menjadi
2 baris yang sama panjang
>>>
| [
"noreply@github.com"
] | noreply@github.com |
7a7a11851b97214a13194eae2a9c435772e0952f | a831dc2adadb0d091a4f7915411e58550397084a | /serviceAnnouncer.py | da0332eea9916e3d9d4ec6b4a5edab840db8037f | [] | no_license | mehmetsenavci/Peer-to-Peer-Chat-Application | c80ed60690d7fd8568d48818fdbb96a9d2bdb0ee | c0a860236787bc870a279d755635e873d6321a50 | refs/heads/master | 2022-02-21T22:12:06.310327 | 2019-06-09T12:04:31 | 2019-06-09T12:04:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 634 | py | import socket
import json
import time
serviceAnn = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
serviceAnn.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
serviceAnn.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
# serviceAnn.settimeout(0.2)
# serviceAnn.bind(("", 5000))
username = input("Please enter your username: ")
message = {"username": username, "ip_address": socket.gethostbyname(socket.gethostname())}
jsonMessage = json.dumps(message).encode()
while True:
print("Broadcasting the message.")
serviceAnn.sendto(jsonMessage, ("<broadcast>", 5000))
time.sleep(20)
| [
"mehmetsnvc@users.noreply.github.com"
] | mehmetsnvc@users.noreply.github.com |
5d19eefec7fc353b8906d6017ba512d60c8c1bcd | 76841b589b237fdc7876d03bed80dbb47b221968 | /mysite/illusion/migrations/0011_filetest.py | 5b4c6ea92dff74a6731a39a47bc823ef195fb32f | [] | no_license | developerbch/Style_Transfer-EagleAI | f706b2e1e62781622d47630f073507063d5789a3 | 1ba0f5b2f6ddd6387412ab1818e28e0fe2336fd6 | refs/heads/master | 2023-04-06T02:07:04.247152 | 2019-11-20T05:00:51 | 2019-11-20T05:00:51 | 220,609,276 | 1 | 0 | null | 2023-03-24T23:18:13 | 2019-11-09T07:49:07 | CSS | UTF-8 | Python | false | false | 552 | py | # Generated by Django 2.2.6 on 2019-10-28 13:11
from django.db import migrations, models
import illusion.file_upload
class Migration(migrations.Migration):
dependencies = [
('illusion', '0010_delete_contentimage'),
]
operations = [
migrations.CreateModel(
name='FileTest',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('path', models.FileField(null=True, upload_to=illusion.file_upload.file_upload_path)),
],
),
]
| [
"51702197+developerbch@users.noreply.github.com"
] | 51702197+developerbch@users.noreply.github.com |
c9d6d29933fe6c85a7be665a5c63fc684fca925c | 98e29cc86d200daaf31959d57c6251409b41810f | /lucid/misc/batching.py | 292270453b7b6ddc0445d79f0fdbedd20c3a4fda | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | serviolimareina/lucid | e8e0c224de72e021be331f428daff36e14aa3896 | c095b851da3ed4ff2b9c1992ee87d3e9be85c70a | refs/heads/master | 2020-04-10T15:15:30.479655 | 2018-11-29T04:44:15 | 2018-11-29T04:44:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,276 | py | # Copyright 2018 The Lucid Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for breaking lists into chunks."""
def batch(list, batch_size=None, num_batches=None):
assert not (batch_size is not None and num_batches)
if batch_size is not None:
return _batches_of_max_size(list, batch_size)
elif num_batches is not None:
return _n_batches(list, num_batches)
else:
raise RuntimeError("Either batch size or num_batches needs to be specified!")
def _batches_of_max_size(list, size):
for i in range(0, len(list), size):
yield list[i : i + size]
def _n_batches(list, size):
raise NotImplementedError
| [
"schubert@openai.com"
] | schubert@openai.com |
153ee4cb687d579804101061864e1282c71f7112 | 25375837ad0de8bd1265054ebb4cac49ce2e7404 | /problem009.py | f9c8659cc467ce0c851a58945baef3954a0d480b | [] | no_license | lorenswenson/project-euler | 2908bb1c0c77885a87fdd843d2c3898d7a3bdd9a | db389e41f23042bbf437279a908e051c89f01dae | refs/heads/master | 2021-09-06T19:10:43.192825 | 2018-02-10T05:44:31 | 2018-02-10T05:44:31 | 105,838,162 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 538 | py | """
A Pythagorean triplet is a set of three natural numbers, a < b < c, for which,
a**2 + b**2 = c**2
For example, 3**2 + 4**2 = 9 + 16 = 25 = 5**2.
There exists exactly one Pythagorean triplet for which a + b + c = 1000.
Find the product abc.
"""
def problem():
for a in range(1000):
for b in range(a + 1, 1000):
if a + b + (a**2 + b**2) ** (1 / 2) == 1000:
print("{}, {}, abc: {}".format(
a, b, a * b * (a**2 + b**2)**(1 / 2)))
if __name__ == "__main__":
problem()
| [
"27909197+lorenswenson@users.noreply.github.com"
] | 27909197+lorenswenson@users.noreply.github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.