blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d2390f10db9d9c5d0511908207ac27b12a272d7c | dae3ceb4affd5b77649d66a979ccd7a4dfc98008 | /weekTwelve/Module12/pageRankPointDisFinal.py | bb538866a2b7b542d0d869c2ac05a7697e01fafa | [] | no_license | SubalakshmiShanthosi/JOC-Python_NPTEL | 84a47919b74c4a18e253b9d8fe94dfab4838b13f | 744df05836417be2e8a4a2f4237a18d7262b90eb | refs/heads/master | 2020-07-08T15:45:31.925353 | 2019-11-16T10:42:27 | 2019-11-16T10:42:27 | 203,716,559 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,283 | py | # @Author: subalakshmi
# @Date: 2019-11-16T15:59:29+05:30
# @Last modified by: subalakshmi
# @Last modified time: 2019-11-16T16:09:50+05:30
import networkx as nx
import random
import matplotlib.pyplot as plt
def add_edges(aGraph):
nodes=list(aGraph.nodes())
for source in nodes:
for target in nodes:
if source != target:
randProb=random.random()
if randProb<=0.5:
aGraph.add_edge(source,target)
return aGraph
aGraph=nx.DiGraph()
aGraph.add_nodes_from([i for i in range(10)])
aGraph=add_edges(aGraph)
def assign_points(aGraph):
nodes=list(aGraph.nodes())
point=[]
for each in nodes:
point.append(100)
return point
def distribute_points(aGraph,points):
nodes=list(aGraph.nodes())
new_points=[]
# Getting part
for i in range(len(nodes)):
new_points.append(0)
# Giving part
for aNode in nodes:
out=list(aGraph.out_edges(aNode))
if(len(out)==0):
new_points[aNode]=new_points[aNode]+points[aNode]
else:
share=points[aNode]/len(out)
for (source,target) in out:
new_points[target]=new_points[target]+share
return new_points
def share_points(points,aGraph):
nodes=list(aGraph.nodes())
while(1):
new_points=distribute_points(aGraph,points)
print(new_points,end='\n')
points=new_points
stop=input("Press # to stop or any key to continue")
if stop=='#':
break
return new_points
def rank_by_points(converged_points):
aDict={}
for i in range(len(converged_points)):
aDict[i]=converged_points[i]
#sortedDict=sorted(aDict.items(),key=operator.itemgetter(1))
print(sorted(aDict.items(),key=lambda f:f[1]))
#Visualise aGraph
nx.draw(aGraph,with_labels=True)
plt.savefig('pointDisMetPageRank.png')
# Assign initial score for all points -- All nodes with 100 points each
points=assign_points(aGraph)
# Share points to neighbours equally
converged_points=share_points(points,aGraph)
print(converged_points)
# Rank by points
rank_by_points(converged_points)
# Default networkx pageRank
pageRankOutput=nx.pagerank(aGraph)
print(sorted(pageRankOutput.items(),key=lambda f:f[1]))
| [
"=subalakshmicv@gmail.com"
] | =subalakshmicv@gmail.com |
e985e7b3fffd47e584ad67ba886f81c66f322d7a | 0fbca6a8b6458b78bc3521679797fc1b160e3c82 | /furnace_pb2_grpc.py | 499829c0cad359595fafc4ab957822b2c7e8931d | [] | no_license | go-furnace/python-plugin | 6fbbe469ffca12c491885b1245524b0e430cf5e0 | c3eaf254b06d5aae45c931bb5df31b004b6bc6b4 | refs/heads/master | 2020-03-28T19:59:30.669048 | 2018-10-16T20:12:48 | 2018-10-16T20:12:48 | 149,028,372 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,140 | py | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
import furnace_pb2 as furnace__pb2
class PreCreateStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Execute = channel.unary_unary(
'/proto.PreCreate/Execute',
request_serializer=furnace__pb2.Stack.SerializeToString,
response_deserializer=furnace__pb2.Proceed.FromString,
)
class PreCreateServicer(object):
# missing associated documentation comment in .proto file
pass
def Execute(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_PreCreateServicer_to_server(servicer, server):
rpc_method_handlers = {
'Execute': grpc.unary_unary_rpc_method_handler(
servicer.Execute,
request_deserializer=furnace__pb2.Stack.FromString,
response_serializer=furnace__pb2.Proceed.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'proto.PreCreate', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
class PostCreateStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Execute = channel.unary_unary(
'/proto.PostCreate/Execute',
request_serializer=furnace__pb2.Stack.SerializeToString,
response_deserializer=furnace__pb2.Empty.FromString,
)
class PostCreateServicer(object):
# missing associated documentation comment in .proto file
pass
def Execute(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_PostCreateServicer_to_server(servicer, server):
rpc_method_handlers = {
'Execute': grpc.unary_unary_rpc_method_handler(
servicer.Execute,
request_deserializer=furnace__pb2.Stack.FromString,
response_serializer=furnace__pb2.Empty.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'proto.PostCreate', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
class PreDeleteStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Execute = channel.unary_unary(
'/proto.PreDelete/Execute',
request_serializer=furnace__pb2.Stack.SerializeToString,
response_deserializer=furnace__pb2.Proceed.FromString,
)
class PreDeleteServicer(object):
# missing associated documentation comment in .proto file
pass
def Execute(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_PreDeleteServicer_to_server(servicer, server):
rpc_method_handlers = {
'Execute': grpc.unary_unary_rpc_method_handler(
servicer.Execute,
request_deserializer=furnace__pb2.Stack.FromString,
response_serializer=furnace__pb2.Proceed.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'proto.PreDelete', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
class PostDeleteStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Execute = channel.unary_unary(
'/proto.PostDelete/Execute',
request_serializer=furnace__pb2.Stack.SerializeToString,
response_deserializer=furnace__pb2.Empty.FromString,
)
class PostDeleteServicer(object):
# missing associated documentation comment in .proto file
pass
def Execute(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_PostDeleteServicer_to_server(servicer, server):
rpc_method_handlers = {
'Execute': grpc.unary_unary_rpc_method_handler(
servicer.Execute,
request_deserializer=furnace__pb2.Stack.FromString,
response_serializer=furnace__pb2.Empty.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'proto.PostDelete', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
| [
"skarlso777@gmail.com"
] | skarlso777@gmail.com |
7d76fb490c6658151c003119e291d6865091f624 | 1e3b7d8f62099cfc412e80672f95ea809a4adf04 | /Taurus/script/velocityfield0.py | b6b6be141eec2940b8390088a03e2033e9f16d7a | [] | no_license | qianlivan/Catalog | 945e86799a37159c88c5dc6325c365ed5e27d064 | 1f2f152c19a77b223d5aac351de4e34f9636d34d | refs/heads/master | 2021-01-11T20:44:15.500483 | 2017-01-20T04:50:25 | 2017-01-20T04:50:25 | 79,174,408 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,801 | py | import pyfits
from pylab import *
import math
import os,sys
import numpy as np
import matplotlib.cm as cm
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
import mpl_toolkits
from matplotlib.patches import Ellipse
hdulist = pyfits.open('t13_new.fits')
image = hdulist[0].data
nx = hdulist[0].header['naxis1']
ny = hdulist[0].header['naxis2']
nz = hdulist[0].header['naxis3']
crvalx = hdulist[0].header['crval1']
cdeltax = hdulist[0].header['cdelt1']
crpixx = hdulist[0].header['crpix1']
crvaly = hdulist[0].header['crval2']
cdeltay = hdulist[0].header['cdelt2']
crpixy = hdulist[0].header['crpix2']
crvalz = hdulist[0].header['crval3']
cdeltaz = hdulist[0].header['cdelt3']
crpixz = hdulist[0].header['crpix3']
x = np.arange(-crpixx*cdeltax+crvalx,(nx-1-crpixx)*cdeltax+crvalx,cdeltax)
y = np.arange(-crpixy*cdeltay+crvaly,(ny-1-crpixy)*cdeltay+crvaly,cdeltay)
vfield=np.zeros([ny,nx])
vfieldtemp=np.load('velocity0_0_499.npy')
vfield=vfield+vfieldtemp
vfieldtemp=np.load('velocity0_500_999.npy')
vfield=vfield+vfieldtemp
vfieldtemp=np.load('velocity0_1000_1499.npy')
vfield=vfield+vfieldtemp
vfieldtemp=np.load('velocity0_1500_2068.npy')
vfield=vfield+vfieldtemp
print size(vfieldtemp[0,:]),size(vfieldtemp[:,0])
vfield[vfield<0]=0
#vfield[vfield>12.0]=0
os.system('rm -f velocity0.fits')
hduout=pyfits.PrimaryHDU(vfield)
hdulistout=pyfits.HDUList([hduout])
hdulistout.writeto('velocity0.fits')
ax = plt.subplot(111)
#im = plt.imshow(vfield, cmap=cm.gist_heat
#im = plt.imshow(vfield, cmap=cm.rainbow
im = plt.imshow(vfield, cmap=cm.spectral
,origin='lower', aspect='equal'
,interpolation='none')
xlabel('RA')
ylabel('Dec')
plt.colorbar(im,orientation='vertical')
savefig('velocityfield0.eps')
savefig('velocityfield0.png')
plt.show()
| [
"lqian@nao.cas.cn"
] | lqian@nao.cas.cn |
b675902d1cbee77e803006d28e864e9893d6c010 | 46666c91b55311dad15b59e805cf42ea6a15d970 | /demo.py | 382feb4475d0ac133fab511c97e8ec2851109765 | [] | no_license | zyjImmortal/LeetCode | a411756481c4d7c3d08a475cc32cde637ae8c504 | 33b1c6384f67527bba5499c013ccddfed92809eb | refs/heads/master | 2020-03-22T19:30:24.388741 | 2018-12-01T13:07:48 | 2018-12-01T13:07:48 | 140,533,740 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 481 | py | # -*- coding: utf-8 -*-
# @Time : 2018/7/24 上午10:20
# @Author : zhouyajun
import os
path = '/Users/mac/PycharmProjects/LeetCode'
def print_directory_contents(path):
for child in os.listdir(path):
print("===="+child)
child_path = os.path.join(path, child)
if os.path.isdir(child_path):
print_directory_contents(child_path)
else:
print(child_path)
if __name__ == '__main__':
print_directory_contents(path) | [
"zhouyajun@didapinche.com"
] | zhouyajun@didapinche.com |
6511592e6810655b1bf0ef09338b91728067e6fe | 31e113e0baa03ccc7b58ecef8a1116ad6501e33a | /tensorflow_probability/python/experimental/mcmc/preconditioned_hmc_test.py | d13b3d5d494cf099bbc7be9fd385bbe34bc6cde2 | [
"Apache-2.0"
] | permissive | ksachdeva/probability | 9dbb771ec4da8094dea1c31d6cd5d514c2fe2c6f | dd24b7a6495e8801b7e7852aab16d6704993147c | refs/heads/master | 2021-07-19T12:40:09.133886 | 2021-02-09T16:29:17 | 2021-02-09T16:31:18 | 241,638,637 | 2 | 0 | Apache-2.0 | 2020-02-19T14:12:55 | 2020-02-19T14:12:54 | null | UTF-8 | Python | false | false | 28,713 | py | # Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for preconditioned_hmc."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
# Dependency imports
from absl.testing import parameterized
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.internal import test_util
from tensorflow_probability.python.internal import unnest
tfb = tfp.bijectors
tfd = tfp.distributions
tfde = tfp.experimental.distributions
# Allowed type of preconditioning schemes to use.
# See code for details.
PRECONDITION_SCHEMES = {
'direct', 'precision_factor', 'sqrtm', 'scale',
# `None` ==> No preconditioner. This is different than a "bad"
# preconditioner. We will be able to check asymptotics with "None".
'no_preconditioner',
}
RunHMCResults = collections.namedtuple('RunHMCResults', [
'draws',
'step_size',
'final_step_size',
'asymptotic_step_size',
'accept_prob',
'mean_accept_prob',
'min_ess',
'sample_mean',
'sample_cov',
'sample_var',
'mean_atol',
'cov_atol',
'var_rtol',
])
def _make_composite_tensor(dist):
"""Wrapper to make distributions of linear operators composite."""
if dist is None:
return dist
composite_dist = tfp.experimental.auto_composite_tensor(dist.__class__,
omit_kwargs='name')
p = dist.parameters
for k in p:
if isinstance(p[k], tfp.distributions.Distribution):
p[k] = _make_composite_tensor(p[k])
elif isinstance(p[k], tf.linalg.LinearOperator):
composite_linop = tfp.experimental.auto_composite_tensor(p[k].__class__)
p[k] = composite_linop(**p[k].parameters)
ac_dist = composite_dist(**p)
return ac_dist
@test_util.test_graph_and_eager_modes
class PreconditionedHMCCorrectnessTest(test_util.TestCase):
"""More careful tests that sampling/preconditioning is actually working."""
def _calculate_asymptotic_step_size(self, scales, prob_accept):
"""Calculate the (asymptotic) expected step size for given scales/P[accept].
The distribution should be a multivariate Gaussian, and the approximation is
appropriate in high dimensions when the spectrum is polynomially decreasing.
For details, see [1], equations (3.1, 3.2).
Args:
scales: Tensor with the square roots of the eigenvalues of the
covariance matrix.
prob_accept: Average acceptance probability.
Returns:
step_size: Float of approximate step size to achieve the target acceptance
rate.
#### References
[1]: Langmore, Ian, Michael Dikovsky, Scott Geraedts, Peter Norgaard, and
Rob Von Behren. 2019. “A Condition Number for Hamiltonian Monte Carlo."
http://arxiv.org/abs/1905.09813.
"""
inv_nu = tf.reduce_sum((1. / scales) ** 4, axis=-1) ** -0.25
step_size = (inv_nu *
(2**1.75) *
tf.sqrt(tfd.Normal(0., 1.).quantile(1 - prob_accept / 2.)))
return step_size
def _run_hmc_with_step_size(
self,
target_mvn,
precondition_scheme,
target_accept=0.75,
num_results=2000,
num_adaptation_steps=20,
):
"""Run HMC with step_size adaptation, and return RunHMCResults."""
assert precondition_scheme in PRECONDITION_SCHEMES
dims = target_mvn.event_shape[0]
target_cov = target_mvn.covariance()
cov_linop = tf.linalg.LinearOperatorFullMatrix(
target_cov,
is_self_adjoint=True,
is_positive_definite=True)
if precondition_scheme == 'no_preconditioner':
momentum_distribution = None
# Internal to the sampler, these scales are being used (implicitly).
internal_scales = tf.sqrt(tf.linalg.eigvalsh(target_cov))
elif precondition_scheme == 'direct':
momentum_distribution = tfd.MultivariateNormalLinearOperator(
# The covariance of momentum is inv(covariance of position), and we
# parameterize distributions by a square root of the covariance.
scale=cov_linop.inverse().cholesky(),
)
# Internal to the sampler, these scales are being used (implicitly).
internal_scales = tf.ones(dims)
elif precondition_scheme == 'precision_factor':
momentum_distribution = tfde.MultivariateNormalPrecisionFactorLinearOperator(
# The precision of momentum is the covariance of position.
# The "factor" is the cholesky factor.
precision_factor=cov_linop.cholesky(),
)
# Internal to the sampler, these scales are being used (implicitly).
internal_scales = tf.ones(dims)
elif precondition_scheme == 'sqrtm':
momentum_distribution = tfde.MultivariateNormalPrecisionFactorLinearOperator(
# The symmetric square root is a perfectly valid "factor".
precision_factor=tf.linalg.LinearOperatorFullMatrix(
tf.linalg.sqrtm(target_cov)),
)
# Internal to the sampler, these scales are being used (implicitly).
internal_scales = tf.ones(dims)
elif precondition_scheme == 'scale':
momentum_distribution = tfde.MultivariateNormalPrecisionFactorLinearOperator(
# Nothing wrong with using "scale", since the scale should be the
# same as cov_linop.cholesky().
precision_factor=target_mvn.scale,
)
# Internal to the sampler, these scales are being used (implicitly).
internal_scales = tf.ones(dims)
else:
raise RuntimeError(
'Unhandled precondition_scheme: {}'.format(precondition_scheme))
momentum_distribution = _make_composite_tensor(momentum_distribution)
# Asyptotic step size, assuming P[accept] = target_accept.
expected_step = self._calculate_asymptotic_step_size(
scales=internal_scales,
prob_accept=target_accept,
)
# Initialize step size to something close to the expected required step
# size. This helps reduce the need for a long burn-in. Don't use the
# expected step size exactly, since that would be cheating.
initial_step_size = expected_step / 2.345
# Set num_leapfrog_steps so that we get decent ESS.
max_internal_scale = tf.reduce_max(internal_scales)
num_leapfrog_steps = tf.minimum(
tf.cast(
tf.math.ceil(1.5 * max_internal_scale / expected_step),
dtype=tf.int32), 30)
hmc_kernel = tfp.mcmc.DualAveragingStepSizeAdaptation(
tfp.experimental.mcmc.PreconditionedHamiltonianMonteCarlo(
target_log_prob_fn=target_mvn.log_prob,
momentum_distribution=momentum_distribution,
step_size=initial_step_size,
num_leapfrog_steps=num_leapfrog_steps),
num_adaptation_steps=num_adaptation_steps,
target_accept_prob=target_accept)
def trace_fn(_, pkr):
results = pkr.inner_results
return {
'accept_prob':
tf.exp(tf.minimum(0., results.log_accept_ratio)),
'step_size':
results.accepted_results.step_size,
}
@tf.function
def do_run_run_run():
"""Do a run, return RunHMCResults."""
states, trace = tfp.mcmc.sample_chain(
num_results,
current_state=tf.identity(target_mvn.sample(seed=0)),
kernel=hmc_kernel,
num_burnin_steps=num_adaptation_steps,
seed=test_util.test_seed(),
trace_fn=trace_fn)
# If we had some number of chain dimensions, we would change sample_axis.
sample_axis = 0
sample_cov = tfp.stats.covariance(states, sample_axis=sample_axis)
max_variance = tf.reduce_max(tf.linalg.diag_part(sample_cov))
max_stddev = tf.sqrt(max_variance)
min_ess = tf.reduce_min(tfp.mcmc.effective_sample_size(states))
mean_accept_prob = tf.reduce_mean(trace['accept_prob'])
# Asymptotic step size given that P[accept] = mean_accept_prob.
asymptotic_step_size = self._calculate_asymptotic_step_size(
scales=internal_scales,
prob_accept=mean_accept_prob,
)
return RunHMCResults(
draws=states,
step_size=trace['step_size'],
final_step_size=trace['step_size'][-1],
asymptotic_step_size=asymptotic_step_size,
accept_prob=trace['accept_prob'],
mean_accept_prob=mean_accept_prob,
min_ess=tf.reduce_min(tfp.mcmc.effective_sample_size(states)),
sample_mean=tf.reduce_mean(states, axis=sample_axis),
sample_cov=sample_cov,
sample_var=tf.linalg.diag_part(sample_cov),
# Standard error in variance estimation is related to standard
# deviation of variance estimates. For a Normal, this is just Sqrt(2)
# times variance divided by sqrt sample size (or so my old notes say).
# So a relative tolerance is useful.
# Add in a factor of 5 as a buffer.
var_rtol=5 * tf.sqrt(2.) / tf.sqrt(min_ess),
# For covariance matrix estimates, there can be terms that have
# expectation = 0 (e.g. off diagonal entries). So the above doesn't
# hold. So use an atol.
cov_atol=5 * max_variance / tf.sqrt(min_ess),
# Standard error in mean estimation is stddev divided by sqrt
# sample size. This is an absolute tolerance.
# Add in a factor of 5 as a buffer.
mean_atol=5 * max_stddev / tf.sqrt(min_ess),
)
# Evaluate now, to ensure that states/accept_prob/etc... all match up with
# the same graph evaluation. This is a gotcha about TFP MCMC in graph mode.
return self.evaluate(do_run_run_run())
def _check_correctness_of_moments_and_preconditioning(
self,
target_mvn,
num_results,
precondition_scheme,
check_step_size_asymptotics=True,
asymptotic_step_size_rtol=0.2,
):
"""Test that step size adaptation finds the theoretical optimal step size.
See _caclulate_expected_step_size for formula details, but roughly, for a
high dimensional Gaussian posterior, we can calculate the approximate step
size to achieve a given target accept rate. For such a posterior,
`PreconditionedHMC` mimics the dynamics of sampling from an isotropic
standard normal distribution, and so should adapt to the step size where
the scales are all ones.
In the example below, `expected_step` is around 0.00002, so there is
significantly different behavior when conditioning.
Args:
target_mvn: Multivariate normal instance to sample from.
num_results: Number of samples to collect (post burn-in).
precondition_scheme: String telling how to do preconditioning.
Should be in PRECONDITION_SCHEMES.
check_step_size_asymptotics: Boolean telling whether to check that the
step size and P[accept] match up with expected values. This checks
that the "internal/implicit" sampling distribution is as expected. E.g.
when preconditioning, we expect the internal distribution to be a
standard Normal. When not preconditioning we expect it to be the target.
asymptotic_step_size_rtol: rtol for the asymptotic step size test.
The "nastier" spectra (with a small number of tiny eigenvalues) often
require larger tolerance. About 10% rtol is what we can expect.
20% is the default for safety. When a "bad preconditioner" is used,
these two are off by 100% or more (but no guarantee, since luck may
prevail).
Returns:
RunHMCResults
"""
results = self._run_hmc_with_step_size(
target_mvn, precondition_scheme=precondition_scheme)
if check_step_size_asymptotics:
self.assertAllClose(
results.final_step_size,
results.asymptotic_step_size,
rtol=asymptotic_step_size_rtol)
self.assertAllClose(
results.sample_mean, target_mvn.mean(), atol=results.mean_atol)
self.assertAllClose(
results.sample_var, target_mvn.variance(), rtol=results.var_rtol)
self.assertAllClose(
results.sample_cov, target_mvn.covariance(), atol=results.cov_atol)
return results
@parameterized.named_parameters(
dict(testcase_name='_' + str(scheme), precondition_scheme=scheme)
for scheme in PRECONDITION_SCHEMES)
def test_correctness_with_2d_mvn_tril(self, precondition_scheme):
# Low dimensional test to help people who want to step through and debug.
target_mvn = tfd.MultivariateNormalTriL(
loc=tf.constant([0., 0.]),
scale_tril=[[1., 0.], [0.5, 2.]],
)
self._check_correctness_of_moments_and_preconditioning(
target_mvn,
# Lots of results, to test tight tolerance.
# We're using a small dims here, so this isn't a big deal.
num_results=5000,
precondition_scheme=precondition_scheme,
# We're in such low dimensions that we don't expect asymptotics to work.
check_step_size_asymptotics=False)
@parameterized.named_parameters(
dict(testcase_name='_' + str(scheme), precondition_scheme=scheme)
for scheme in PRECONDITION_SCHEMES)
def test_correctness_with_200d_mvn_tril(self, precondition_scheme):
# This is an almost complete check of the Gaussian case.
dims = 200
scale_wishart = tfd.WishartLinearOperator(
# Important that df is just slightly bigger than dims. This makes the
# scale_wishart ill condtioned. The result is that tests fail if we do
# not handle transposes correctly.
df=1.1 * dims,
scale=tf.linalg.LinearOperatorIdentity(dims),
input_output_cholesky=True,
name='wishart_for_samples',
)
# evaluate right here to avoid working with a random target_mvn in graph
# mode....that would cause issues, since we read off expected statistics
# from looking at the mvn properties, so it would be bad if these properties
# changed with every graph eval.
scale_tril = self.evaluate(scale_wishart.sample(seed=test_util.test_seed()))
target_mvn = tfd.MultivariateNormalTriL(
# Non-trivial "loc" ensures we do not rely on being centered at 0.
loc=tf.range(0., dims),
scale_tril=scale_tril,
)
self._check_correctness_of_moments_and_preconditioning(
target_mvn,
# Lots of results, to test tight tolerance.
num_results=3000,
precondition_scheme=precondition_scheme,
asymptotic_step_size_rtol=(
0.5 if precondition_scheme == 'no_preconditioner' else 0.25),
)
def test_sets_kinetic_energy(self):
dist = tfd.MultivariateNormalDiag(scale_diag=tf.constant([0.1, 10.]))
step_size = 0.1
kernel = tfp.experimental.mcmc.PreconditionedHamiltonianMonteCarlo(
target_log_prob_fn=dist.log_prob,
step_size=step_size,
num_leapfrog_steps=1,
store_parameters_in_results=True)
init_state = tf.constant([0.1, 0.1])
kr = kernel.bootstrap_results(init_state)
# Manually set the momentum distribution.
kr = unnest.replace_innermost(kr, momentum_distribution=dist)
# Take one leapfrog step using the kernel.
_, nkr = kernel.one_step(init_state, kr, seed=test_util.test_seed())
# Need to evaluate here for consistency in graph mode.
(momentum_parts,
target_grad_parts,
proposed_state,
final_momentum,
target_log_prob,
grads_target_log_prob) = self.evaluate([
nkr.proposed_results.initial_momentum,
nkr.accepted_results.grads_target_log_prob,
nkr.proposed_state,
nkr.proposed_results.final_momentum,
nkr.proposed_results.target_log_prob,
nkr.proposed_results.grads_target_log_prob])
# Take one leapfrog step manually.
leapfrog = tfp.mcmc.internal.leapfrog_integrator.SimpleLeapfrogIntegrator(
target_fn=dist.log_prob,
step_sizes=[step_size],
num_steps=1)
# Again, need to evaluate here for graph mode consistency.
(next_momentum,
next_state,
next_target_log_prob,
grads_next_target_log_prob) = self.evaluate(leapfrog(
momentum_parts=momentum_parts,
state_parts=[init_state],
target=dist.log_prob(init_state),
target_grad_parts=target_grad_parts,
kinetic_energy_fn=lambda x: -dist.log_prob(x)))
# Verify resulting states are the same
self.assertAllClose(proposed_state,
next_state[0])
self.assertAllClose(final_momentum,
next_momentum)
self.assertAllClose(target_log_prob,
next_target_log_prob)
self.assertAllClose(grads_target_log_prob,
grads_next_target_log_prob)
@test_util.test_all_tf_execution_regimes
@parameterized.named_parameters(
dict(testcase_name='_default', use_default=True),
dict(testcase_name='_explicit', use_default=False))
class PreconditionedHMCTest(test_util.TestCase):
def test_f64(self, use_default):
if use_default:
momentum_distribution = None
else:
momentum_distribution = tfp.experimental.as_composite(
tfd.Normal(0., tf.constant(.5, dtype=tf.float64)))
kernel = tfp.experimental.mcmc.PreconditionedHamiltonianMonteCarlo(
lambda x: -x**2, step_size=.5, num_leapfrog_steps=2,
momentum_distribution=momentum_distribution)
kernel = tfp.mcmc.SimpleStepSizeAdaptation(kernel, num_adaptation_steps=3)
self.evaluate(tfp.mcmc.sample_chain(
1, kernel=kernel, current_state=tf.ones([], tf.float64),
num_burnin_steps=5, trace_fn=None))
# TODO(b/175787154): Enable this test
def DISABLED_test_f64_multichain(self, use_default):
if use_default:
momentum_distribution = None
else:
momentum_distribution = tfp.experimental.as_composite(
tfd.Normal(0., tf.constant(.5, dtype=tf.float64)))
kernel = tfp.experimental.mcmc.PreconditionedHamiltonianMonteCarlo(
lambda x: -x**2, step_size=.5, num_leapfrog_steps=2,
momentum_distribution=momentum_distribution)
kernel = tfp.mcmc.SimpleStepSizeAdaptation(kernel, num_adaptation_steps=3)
nchains = 7
self.evaluate(tfp.mcmc.sample_chain(
1, kernel=kernel, current_state=tf.ones([nchains], tf.float64),
num_burnin_steps=5, trace_fn=None))
def test_diag(self, use_default):
"""Test that a diagonal multivariate normal can be effectively sampled from.
Note that the effective sample size is expected to be exactly 100: this is
because the step size is tuned well enough that a single HMC step takes
a point to nearly the antipodal point, which causes a negative lag 1
autocorrelation, and the effective sample size calculation cuts off when
the autocorrelation drops below zero.
Args:
use_default: bool, whether to use a custom momentum distribution, or
the default.
"""
mvn = tfd.MultivariateNormalDiag(
loc=[1., 2., 3.], scale_diag=[0.1, 1., 10.])
if use_default:
momentum_distribution = None
step_size = 0.1
else:
momentum_distribution = tfde.MultivariateNormalPrecisionFactorLinearOperator(
precision_factor=mvn.scale,
)
step_size = 0.3
hmc_kernel = tfp.experimental.mcmc.PreconditionedHamiltonianMonteCarlo(
target_log_prob_fn=mvn.log_prob,
momentum_distribution=momentum_distribution,
step_size=step_size,
num_leapfrog_steps=10)
draws = tfp.mcmc.sample_chain(
110,
tf.zeros(3),
kernel=hmc_kernel,
seed=test_util.test_seed(),
trace_fn=None)
ess = tfp.mcmc.effective_sample_size(draws[-100:],
filter_threshold=0,
filter_beyond_positive_pairs=False)
if not use_default:
self.assertAllClose(ess, tf.fill([3], 100.))
else:
self.assertLess(self.evaluate(tf.reduce_min(ess)), 100.)
def test_tril(self, use_default):
if tf.executing_eagerly():
self.skipTest('b/169882656 Too many warnings are issued in eager logs')
cov = 0.9 * tf.ones([3, 3]) + 0.1 * tf.eye(3)
scale = tf.linalg.cholesky(cov)
mv_tril = tfd.MultivariateNormalTriL(loc=[1., 2., 3.],
scale_tril=scale)
if use_default:
momentum_distribution = None
else:
momentum_distribution = tfde.MultivariateNormalPrecisionFactorLinearOperator(
# TODO(b/170015229) Don't use the covariance as inverse scale,
# it is the wrong preconditioner.
precision_factor=tf.linalg.LinearOperatorFullMatrix(cov),
)
hmc_kernel = tfp.experimental.mcmc.PreconditionedHamiltonianMonteCarlo(
target_log_prob_fn=mv_tril.log_prob,
momentum_distribution=momentum_distribution,
step_size=0.2,
num_leapfrog_steps=10)
draws = tfp.mcmc.sample_chain(
120,
tf.zeros(3),
kernel=hmc_kernel,
seed=test_util.test_seed(),
trace_fn=None)
ess = tfp.mcmc.effective_sample_size(draws[-100:],
filter_threshold=0,
filter_beyond_positive_pairs=False)
# TODO(b/170015229): These and other tests like it, which assert ess is
# greater than some number, were all passing, even though the preconditioner
# was the wrong one. Why is that? A guess is that since there are *many*
# ways to have larger ess, these tests don't really test correctness.
# Perhaps remove all tests like these.
if not use_default:
self.assertAllClose(ess, tf.fill([3], 100.))
else:
self.assertLess(self.evaluate(tf.reduce_min(ess)), 100.)
def test_transform(self, use_default):
mvn = tfd.MultivariateNormalDiag(loc=[1., 2., 3.], scale_diag=[1., 1., 1.])
diag_variance = tf.constant([0.1, 1., 10.])
if use_default:
momentum_distribution = None
else:
momentum_distribution = tfde.MultivariateNormalPrecisionFactorLinearOperator(
precision_factor=tf.linalg.LinearOperatorDiag(
tf.math.sqrt(diag_variance)))
hmc_kernel = tfp.experimental.mcmc.PreconditionedHamiltonianMonteCarlo(
target_log_prob_fn=mvn.log_prob,
momentum_distribution=momentum_distribution,
step_size=0.3,
num_leapfrog_steps=10)
transformed_kernel = tfp.mcmc.TransformedTransitionKernel(
hmc_kernel, bijector=tfb.Scale(tf.math.rsqrt(diag_variance)))
draws = tfp.mcmc.sample_chain(
110,
tf.zeros(3),
kernel=transformed_kernel,
seed=test_util.test_seed(),
trace_fn=None)
ess = tfp.mcmc.effective_sample_size(draws[-100:],
filter_threshold=0,
filter_beyond_positive_pairs=False)
if not use_default:
self.assertAllClose(ess, tf.fill([3], 100.))
else:
self.assertLess(self.evaluate(tf.reduce_min(ess)), 100.)
def test_multi_state_part(self, use_default):
mvn = tfd.JointDistributionSequential([
tfd.Normal(1., 0.1),
tfd.Normal(2., 1.),
tfd.Independent(tfd.Normal(3 * tf.ones([2, 3, 4]), 10.), 3)
])
if use_default:
momentum_distribution = None
step_size = 0.1
else:
reshape_to_scalar = tfp.bijectors.Reshape(event_shape_out=[])
reshape_to_234 = tfp.bijectors.Reshape(event_shape_out=[2, 3, 4])
momentum_distribution = tfd.JointDistributionSequential([
reshape_to_scalar(
tfde.MultivariateNormalPrecisionFactorLinearOperator(
precision_factor=tf.linalg.LinearOperatorDiag([0.1]))),
reshape_to_scalar(
tfde.MultivariateNormalPrecisionFactorLinearOperator(
precision_factor=tf.linalg.LinearOperatorDiag([1.]))),
reshape_to_234(
tfde.MultivariateNormalPrecisionFactorLinearOperator(
precision_factor=tf.linalg.LinearOperatorDiag(
tf.fill([24], 10.))))
])
step_size = 0.3
hmc_kernel = tfp.experimental.mcmc.PreconditionedHamiltonianMonteCarlo(
target_log_prob_fn=mvn.log_prob,
momentum_distribution=momentum_distribution,
step_size=step_size,
num_leapfrog_steps=10)
draws = tfp.mcmc.sample_chain(
100, [0., 0., tf.zeros((2, 3, 4))],
kernel=hmc_kernel,
seed=test_util.test_seed(),
trace_fn=None)
ess = tfp.mcmc.effective_sample_size(draws,
filter_threshold=0,
filter_beyond_positive_pairs=False)
if not use_default:
self.assertAllClose(
self.evaluate(ess),
[tf.constant(100.),
tf.constant(100.), 100. * tf.ones((2, 3, 4))])
else:
self.assertLess(
self.evaluate(
tf.reduce_min(tf.nest.map_structure(tf.reduce_min, ess))),
50.)
def test_batched_state(self, use_default):
mvn = tfd.MultivariateNormalDiag(
loc=[1., 2., 3.], scale_diag=[0.1, 1., 10.])
batch_shape = [2, 4]
if use_default:
momentum_distribution = None
step_size = 0.1
else:
momentum_distribution = tfde.MultivariateNormalPrecisionFactorLinearOperator(
tf.zeros((2, 4, 3)), precision_factor=mvn.scale)
step_size = 0.3
hmc_kernel = tfp.experimental.mcmc.PreconditionedHamiltonianMonteCarlo(
target_log_prob_fn=mvn.log_prob,
momentum_distribution=momentum_distribution,
step_size=step_size,
num_leapfrog_steps=10)
draws = tfp.mcmc.sample_chain(
110,
tf.zeros(batch_shape + [3]),
kernel=hmc_kernel,
seed=test_util.test_seed(),
trace_fn=None)
ess = tfp.mcmc.effective_sample_size(draws[10:], cross_chain_dims=[1, 2],
filter_threshold=0,
filter_beyond_positive_pairs=False)
if not use_default:
self.assertAllClose(self.evaluate(ess), 100 * 2. * 4. * tf.ones(3))
else:
self.assertLess(self.evaluate(tf.reduce_min(ess)), 100.)
def test_batches(self, use_default):
mvn = tfd.JointDistributionSequential(
[tfd.Normal(1., 0.1),
tfd.Normal(2., 1.),
tfd.Normal(3., 10.)])
n_chains = 10
if use_default:
momentum_distribution = None
step_size = 0.1
else:
reshape_to_scalar = tfp.bijectors.Reshape(event_shape_out=[])
momentum_distribution = tfd.JointDistributionSequential([
reshape_to_scalar(
tfde.MultivariateNormalPrecisionFactorLinearOperator(
precision_factor=tf.linalg.LinearOperatorDiag(
tf.fill([n_chains, 1], 0.1)))),
reshape_to_scalar(
tfde.MultivariateNormalPrecisionFactorLinearOperator(
precision_factor=tf.linalg.LinearOperatorDiag(
tf.fill([n_chains, 1], 1.)))),
reshape_to_scalar(
tfde.MultivariateNormalPrecisionFactorLinearOperator(
precision_factor=tf.linalg.LinearOperatorDiag(
tf.fill([n_chains, 1], 10.)))),
])
step_size = 0.3
hmc_kernel = tfp.experimental.mcmc.PreconditionedHamiltonianMonteCarlo(
target_log_prob_fn=mvn.log_prob,
momentum_distribution=momentum_distribution,
step_size=step_size,
num_leapfrog_steps=10)
draws = tfp.mcmc.sample_chain(
100, [tf.zeros([n_chains]) for _ in range(3)],
kernel=hmc_kernel,
seed=test_util.test_seed(),
trace_fn=None)
ess = tfp.mcmc.effective_sample_size(
draws, cross_chain_dims=[1 for _ in draws],
filter_threshold=0, filter_beyond_positive_pairs=False)
if not use_default:
self.assertAllClose(self.evaluate(ess), 100 * n_chains * tf.ones(3))
else:
self.assertLess(self.evaluate(tf.reduce_min(ess)), 100.)
if __name__ == '__main__':
tf.test.main()
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
65d30851b539ee325140e006a3d8b45ecb45b9d1 | 3dee458122b8abcf09e361f41cb5b40978bcda63 | /leetcode/python/Pascals_Triangle.py | 48adbc67cc6d3da7c554d3cdc5cb7f7e36755975 | [] | no_license | xuedagong/hello | ac39021e5c618a839e4068650a395373aec34dbc | 2e73b1a381f58e4e4f8d274ec5247ff20424dc24 | refs/heads/master | 2016-09-06T00:51:48.544894 | 2015-12-11T15:52:06 | 2015-12-11T15:52:06 | 41,428,537 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 917 | py | #coding=utf-8
'''
Given numRows, generate the first numRows of Pascal's triangle.
For example, given numRows = 5,
Return
[
[1],
[1,1],
[1,2,1],
[1,3,3,1],
[1,4,6,4,1]
]
'''
class Solution(object):
def generate(self, numRows):
"""
:type numRows: int
:rtype: List[List[int]]
"""
lst=[]
last_lst=[]
for i in xrange(numRows):
now_lst=self.get_one_list(last_lst)
lst.append(now_lst)
last_lst=now_lst
return lst
#根据上一个队列来生成新的队列
def get_one_list(self,last_lst):
if len(last_lst)==0:
return [1]
new_lst=[]
new_lst.append(1)
for i in xrange(len(last_lst)-1):
new_lst.append(last_lst[i]+last_lst[i+1])
new_lst.append(1)
return new_lst
if __name__ == '__main__':
print Solution().generate(0)
| [
"xue_dagong@sina.com"
] | xue_dagong@sina.com |
e1059ae6e9b86f602d1bc6205a6ed704ffdc4962 | 5845ee6d82d9f691e846360fa267b9cca6829d99 | /supervised_learning/0x0F-word_embeddings/0-bag_of_words.py | 637623c05195091bb4a31ba366e5d15fe022ab76 | [] | no_license | jlassi1/holbertonschool-machine_learning | 6e8c11ebaf2fd57e101bd0b20b7d83358cc15374 | d45e18bcbe1898a1585e4b7b61f3a7af9f00e787 | refs/heads/main | 2023-07-02T20:25:52.216926 | 2021-08-11T14:19:49 | 2021-08-11T14:19:49 | 317,224,593 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 416 | py | #!/usr/bin/env python3
""" 0. Bag Of Words """
from sklearn.feature_extraction.text import CountVectorizer
def bag_of_words(sentences, vocab=None):
"""function that creates a bag of words embedding matrix"""
vectorizer = CountVectorizer(vocabulary=vocab)
X = vectorizer.fit_transform(sentences)
features = vectorizer.get_feature_names()
embeddings = X.toarray()
return embeddings, features
| [
"khawlajlassi1990@gmail.com"
] | khawlajlassi1990@gmail.com |
b198a7d3151111774b4e4c73c66f16b200b02e69 | cad54a387f0fbeadbb195b186fa2fdf0816eafcd | /src/lars.py | 77c7d47c09e5fbc9fb29ba7e09dd728d7f17c984 | [] | no_license | diegoirra/San-Francisco-Biking-Machine-Learning | 0583675da61441f1df7ea62d856c0efe609cda2a | 074804eb3880aac4e0cbcdc623e4e9f29c8d7491 | refs/heads/master | 2020-05-31T15:15:35.353236 | 2017-06-22T20:04:56 | 2017-06-22T20:04:56 | 94,035,311 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 450 | py | from sklearn.linear_model import Lars
from my_machine_learning import train_model, make_prediction
import os
os.chdir('..')
model = Lars()
model_name = 'lars'
print "EXECUTING: "+ model_name
model, X_test, y_test = train_model(model, model_name, filtered=False)
if raw_input('Training done. Make prediction? [y/n]: ') == 'y':
make_prediction(model, model_name)
print 'Output generated.'
else:
print 'No output generated' | [
"dgirra@hotmail.com"
] | dgirra@hotmail.com |
24f3226e98104542eb3275543b7427e3cf3958dc | bfe4a60a111409a2db4ff7ba2f21fff7c94d920e | /alien_invasion/settings.py | da96f9ec8b5daa9a4a7ea1a7c80083b9367d68f2 | [] | no_license | bindas1/alien_invasion | 086ab3bc9d9cee820fc6bae71c6c0b2fdd931c63 | b7e3173c36db277381daf6027d02e25306e25573 | refs/heads/master | 2022-04-07T03:20:01.399915 | 2020-02-27T11:17:45 | 2020-02-27T11:17:45 | 155,073,033 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 705 | py | class Settings():
def __init__(self):
"""Initialize the game's settings."""
# Screen settings
self.screen_width = 1024
self.screen_height = 640
self.bg_color = (230, 230, 230)
# Ship settings
self.ship_speed = 5.0
self.ship_limit = 3
# Bullet settings
self.bullet_speed_factor = 8
self.bullet_width = 5
self.bullet_height = 15
self.bullet_color = (218, 165, 32)
self.bullets_allowed = 5
# Alien settings
self.alien_speed_factor = 2
self.fleet_drop_speed = 10
# fleet_direction of 1 represents right; -1 represents left.
self.fleet_direction = 1
| [
"noreply@github.com"
] | noreply@github.com |
2198faf9b1e2103c7296e6c8f8e6872747b322eb | 19580fbbbe58e4290e2a0d1792515abe3417f336 | /2021/aoc/day08/part1.py | 3aadac8a389a49f14894caf25f04653586aeed90 | [
"MIT"
] | permissive | GarmOfGnipahellir/advent-of-code | f95b17a9778faf8a3467408fdc2f88275d133dec | f6d0efca4c2f2b2820ac38a9be6a91875ef3930d | refs/heads/master | 2023-01-12T10:34:35.170598 | 2022-12-25T11:30:22 | 2022-12-25T11:30:22 | 229,446,207 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 356 | py | # Advent of Code - Day 8 - Part One
def result(input):
input = [(*[spl.split() for spl in ln.split("|")],) for ln in input]
count = 0
for entry in input:
for output in entry[1]:
loutput = len(output)
if loutput == 2 or loutput == 4 or loutput == 3 or loutput == 7:
count += 1
return count
| [
"melsom.henrik@gmail.com"
] | melsom.henrik@gmail.com |
81e799321bbe281000076b4713feae9f8f8cea2c | a751df876004c6ea0b1d35222f25128ee55dba6e | /commands/circle.py | d1afc737cf44691e3428d26bdcfdc50f32057bd2 | [] | no_license | brennand97/DependencyManager | 78ad0bcb222f3566bcc4ba49462872753e179879 | 95fdacc60beac8ebe8d867deb0c9010976cd87cf | refs/heads/master | 2020-12-02T16:42:23.967021 | 2017-08-23T19:57:44 | 2017-08-23T19:57:44 | 96,572,472 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 912 | py |
__help__ = " Name: Circluar\n" \
" Syntax: circle [-df | --delete-forward] [-db | --delete-backward]\n" \
" Description: Displays/Deletes existing circler references"
__arg_list__ = {
"-df" : 0,
"--delete-forward" : 0,
"-db" : 0,
"--delete-backward" : 0
}
def cmd(data, arg_lst):
if len(arg_lst) == 0:
paths = data.get_circular_dependencies()
for cp in paths:
s = ""
for n in cp:
s = "{}{}".format(s, "{} <- ".format(n))
s = s[:-4]
print("Circular path found: {}".format(s))
else:
if arg_lst[0][0] == "-df" or arg_lst[0][0] == "--delete-forward":
data.remove_circular_dependencies(True)
elif arg_lst[0][0] == "-db" or arg_lst[0][0] == "--delete-backward":
data.remove_circular_dependencies(False)
| [
"brennand97@gmail.com"
] | brennand97@gmail.com |
19306f52a11478a632131ee63f1ab9e692f9c075 | 544c4d9822ca42764a60d55b804e8eaabc345cab | /account/roleop.py | 677d752b3e92cca441daf074bcf692079a62b00b | [] | no_license | lxguidu/parkhero | 24a3cf28ed3f9ed594137080c36bc317453f66ba | b5f5e2d13ac46812666c0e9d20bfd35b335a4994 | refs/heads/master | 2021-01-12T14:49:29.404775 | 2016-10-27T10:57:45 | 2016-10-27T10:57:45 | 72,099,407 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,623 | py | #-*- coding: utf-8 -*-
import logging
from django.contrib.auth import authenticate, login
from django.contrib.auth.models import User, Group, Permission
from rest_framework.authentication import (
SessionAuthentication, BasicAuthentication,
get_authorization_header
)
from rest_framework.decorators import (
authentication_classes, permission_classes
)
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from parkhero.status_code import STATUS_CODE
logger = logging.getLogger(__name__)
# pre-defined roles
ROLES = ['operator_parkinglot', 'operator_group_user', 'operator_bill',
'operator_end_user', 'operator_app', 'group_user', 'default']
# 角色的增删改查,改主要是针对权限,并且是模块级的权限,而不是对象级别的,
# 因为对象级别的需求(group_user)在operator里面已经进行了处理,其它需求的
# 级别都是模块级,所以此处只进行模块级的处理
class Role_Op(APIView):
# ret: 0 - success, 1 - cant login, 2 - can login, but no admin
def auth_check(self, request):
if not request.user.is_authenticated():
detail = {'detail': 'Please login.'}
detail['status'] = STATUS_CODE['need_login']
return 1, detail
if str(request.user) != 'sysadmin':
detail = {'detail': 'Please login as administrator'}
detail['status'] = STATUS_CODE['non_administrator']
return 2, detail
return 0, None
# remove an role
@permission_classes((IsAuthenticated,))
def delete(self, request, format=None):
retval, ret_detail = self.auth_check(request)
if retval != 0:
return Response(ret_detail)
data = request.data
groupId = data.get('groupId')
if not groupId:
detail = {'detail': 'Please provide the group id.'}
detail['status'] = STATUS_CODE['lostparam']
logger.warning(detail)
return Response(detail)
try:
specgroup = Group.objects.get(pk=groupId, name__in=ROLES)
specgroup.permissions.clear()
specgroup.delete()
detail = {'detail': 'successfully deleted group[%s]' % specgroup.name}
detail['status'] = STATUS_CODE['success']
return Response(detail)
except (Group.DoesNotExist, Exception) as ex:
if isinstance(ex, Group.DoesNotExist):
detail = {'detail': 'Please provide a valid group id[%s].'%groupId}
detail['status'] = STATUS_CODE['non_such_role']
logger.warning(detail)
return Response(detail)
detail = {'detail': 'Database error occur: %s.'%ex}
detail['status'] = STATUS_CODE["database_err"]
logger.warning(detail)
return Response(detail)
# query role
@permission_classes((IsAuthenticated,))
def get(self, request, format=None):
retval, ret_detail = self.auth_check(request)
if retval != 0:
return Response(ret_detail)
data = request.data
groupId = data.get('groupId')
if not groupId: # get all
try:
allgroups = Group.objects.all()
groupinfo = []
for item in allgroups:
if item.name in ROLES:
groupinfo.append({
"groupId" :item.id,
"groupname" :item.name
})
detail = {'detail': 'successfully get all group info'}
detail['status'] = STATUS_CODE['success']
detail['groupinfo'] = groupinfo
return Response(detail)
except Exception as ex:
detail = {'detail': 'Database error occur: %s.'%ex}
detail['status'] = STATUS_CODE["database_err"]
logger.warning(detail)
return Response(detail)
try:
#detail = self.handle_one_group(groupid)
specgroup = Group.objects.get(pk=groupId)
specperms = specgroup.permissions.values()
perminfos = []
for permitem in specperms:
perminfos.append({
'permid' : permitem.id,
'permname' : permitem.codename,
'permdesc' : permitem.name
})
detail = {'detail': 'successfully get group[%s]\' info'%groupid}
detail['perminfo'] = perminfos
detail['status'] = STATUS_CODE['success']
return Response(detail)
except (Group.DoesNotExist, Exception) as ex:
if isinstance(ex, Group.DoesNotExist):
detail = {'detail': 'No Such group: %s.'%ex}
detail['status'] = STATUS_CODE["non_such_role"]
logger.warning(detail)
return Response(detail)
detail = {'detail': 'Database error occur: %s.'%ex}
detail['status'] = STATUS_CODE["database_err"]
logger.warning(detail)
return Response(detail)
# add an role
@permission_classes((IsAuthenticated,))
def post(self, request, format=None):
retval, ret_detail = self.auth_check(request)
if retval != 0:
return Response(ret_detail)
data = request.data
groupname = data.get('groupname')
if not groupname or groupname not in ROLES:
detail = {'detail': 'Please provide a valid group name.'}
detail['status'] = STATUS_CODE['lostparam']
logger.warning(detail)
return Response(detail)
# check if user name existed
try:
specgroup = Group.objects.get(name=groupname)
detail = {'detail': 'Group name already existed.'}
detail['status'] = STATUS_CODE['groupname_exists']
return Response(detail)
except (Group.DoesNotExist, Exception) as ex:
if not isinstance(ex, Group.DoesNotExist):
detail = {'detail': 'Database error occur: %s.'%ex}
detail['status'] = STATUS_CODE["database_err"]
return Response(detail)
try:
newgroup = Group()
newgroup.name = groupname
newgroup.save()
except Exception as ex:
logger.error(ex)
detail = {'detail': '%s'%ex}
detail['status'] = STATUS_CODE['database_err']
return Response(detail)
detail = {'detail': 'successfully added group[%s]' % groupname}
detail['status'] = STATUS_CODE['success']
return Response(detail)
pass
# update an role
@permission_classes((IsAuthenticated,))
def put(self, request, format=None):
data = request.data
groupId = data.get('groupId')
permIds = data.get('perms')
if permIds[0] == '[':
permIds = permIds[1:len(permIds)-1]
permIds = permIds.split(',')
print("groupId: %s"%groupId)
print("permIds: %s"%permIds)
if not groupId or not permIds:
detail = {'detail': 'Please provide a valid param.'}
detail['status'] = STATUS_CODE['lostparam']
logger.warning(detail)
return Response(detail)
try:
specgroup = Group.objects.get(pk=groupId)
for permitem in permIds:
specgroup.permissions.add(permitem)
detail = {'detail': 'successfully change group[%s]' % groupId}
detail['status'] = STATUS_CODE['success']
return Response(detail)
except (Group.DoesNotExist, Exception) as ex:
if isinstance(ex, Group.DoesNotExist):
detail = {'detail': 'No Such group: %s.'%ex}
detail['status'] = STATUS_CODE["non_such_role"]
logger.warning(detail)
return Response(detail)
detail = {'detail': 'Database error occur: %s.'%ex}
detail['status'] = STATUS_CODE["database_err"]
logger.warning(detail)
return Response(detail) | [
"root@work.linxg.com"
] | root@work.linxg.com |
e0496f50c98467811842743bdcac4c7f1dc14c9e | c424ffe3c31422e72810b4865f482d505d145e87 | /fliermailses/models.py | 7eaea73f99fb1b029fe3303c6f16d0ab41e0e949 | [
"BSD-2-Clause"
] | permissive | hdknr/fliermail-ses | d49724b7f1eb648a806e4301738db96a50e098ca | 91366535b1a0890b4766c09d70aee1ec5387f7f0 | refs/heads/master | 2020-06-19T04:57:02.261919 | 2018-03-15T05:18:16 | 2018-03-15T05:18:16 | 94,177,602 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,760 | py | from django.db import models
from django.utils.translation import ugettext_lazy as _
from . import defs, methods, querysets
class Service(defs.Service, methods.Service):
class Meta:
verbose_name = _('SES Service')
verbose_name_plural = _('SES Service')
def __str__(self):
return self.name
class Source(defs.Source, methods.Source):
service = models.ForeignKey(
Service, verbose_name=_('Service'), help_text=_('Service Help'),
on_delete=models.SET_NULL,
null=True, blank=True, default=None, )
class Meta:
verbose_name = _('SES Source')
verbose_name_plural = _('SES Source')
def __str__(self):
return "ses:{0}".format(self.address)
class Topic(defs.Topic):
source = models.ForeignKey(
Source, null=True, blank=True, default=None,
on_delete=models.SET_NULL, )
class Meta:
verbose_name = _('SNS Topic')
verbose_name_plural = _('SNS Topic')
unique_together = (('source', 'topic', ), )
def __str__(self):
return u"{0} {1}".format(
self.source.__str__(),
self.get_topic_display())
class Notification(defs.Notification, methods.Notification):
topic = models.ForeignKey(
Topic, null=True, blank=True, default=None,
on_delete=models.SET_NULL, )
class Meta:
verbose_name = _('Notification')
verbose_name_plural = _('Notification')
objects = querysets.NotificationQuerySet.as_manager()
class Certificate(defs.Certificate, methods.Certificate):
service = models.ForeignKey(
Service, on_delete=models.CASCADE, )
class Meta:
verbose_name = _('SES Certificate')
verbose_name_plural = _('SES Certificate')
| [
"gmail@hdknr.com"
] | gmail@hdknr.com |
7749a20ff656dbbda22f40e4a465a5a5fa242f1f | d3e5c67e0fe89d25175f82c9eb2402f606411d25 | /mariadb_connection.py | 4998a2d489ec0e5354d196320a85a8b32390a492 | [] | no_license | hcorrea4/pythoncodes | eff42b82f38ad8e99cd8daa26c594cfeb22796d0 | 3f9b114f15a1f452858d8450e176621a6834c8ef | refs/heads/main | 2023-04-01T05:17:18.180147 | 2021-04-06T00:32:51 | 2021-04-06T00:32:51 | 352,216,731 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,185 | py | #importar modulos sql
import mariadb
import sys
#Conectar hacia MariaDB
try:
conexion = mariadb.connect(
user = "hefesto",
password = "hefesto",
host = "192.168.1.69",
port = 3306,
database = "decode_encode_db"
)
except mariadb.Error as error:
print(f"Error de Conexion con MariaDB: {error}")
sys.exit(1)
#Obtener cursor
cur = conexion.cursor()
#Agregar Datos
sql_insertar = "INSERT INTO decoded_table (decoded_passwd,original_passwd) VALUES (%s,%s)"
sql_datos = ("PE#$)#879","Casa_123")
try:
#Ejecutar el comando SQL
cur.execute(sql_insertar,sql_datos)
#Registrar cambios con commit
conexion.commit()
except:
#Hacer Rollback en caso de algun error
conexion.rollback()
#Imprimir que los datos fueron ingresados correctamente
print("Datos ingresados correctamente")
#Mostrar Datos de una tabla en particular
cur.execute("SELECT id_decoded_passwd,decoded_passwd FROM decoded_table")
#Imprimir Resultados
for (id_decoded_passwd,decoded_passwd) in cur:
print(f"Id Constraseña desencriptada: {id_decoded_passwd}, Contraseña desencriptada: {decoded_passwd}")
#Cerrar Conexion
conexion.close()
| [
"noreply@github.com"
] | noreply@github.com |
e872d8089a62b5d92696f6668390f4ab68945df9 | 6547d657706c041f2a87b0680936dd3d473ad328 | /httprunner/cli.py | f60004271687446d2bcfb3af3c86d5de03b91a41 | [
"Apache-2.0"
] | permissive | lixiaofeng1993/httprunner | 62c01f6b5adb8e3eded564947ac196938e3c88fb | 15c5d89605dc2d54fc624c3468be85eebcc8446e | refs/heads/master | 2020-07-26T09:18:35.310008 | 2019-10-21T16:03:50 | 2019-10-21T16:03:50 | 208,601,514 | 1 | 0 | Apache-2.0 | 2019-09-15T13:54:13 | 2019-09-15T13:54:13 | null | UTF-8 | Python | false | false | 6,813 | py | # encoding: utf-8
def main_hrun():
""" API test: parse command line options and run commands.
"""
import sys
import argparse
from httprunner.logger import color_print
from httprunner import __description__, __version__
from httprunner.api import HttpRunner
from httprunner.compat import is_py2
from httprunner.validator import validate_json_file
from httprunner.utils import (create_scaffold, get_python2_retire_msg,
prettify_json_file)
parser = argparse.ArgumentParser(description=__description__)
parser.add_argument(
'-V', '--version', dest='version', action='store_true',
help="show version")
parser.add_argument(
'testcase_paths', nargs='*',
help="testcase file path")
parser.add_argument(
'--log-level', default='INFO',
help="Specify logging level, default is INFO.")
parser.add_argument(
'--log-file',
help="Write logs to specified file path.")
parser.add_argument(
'--dot-env-path',
help="Specify .env file path, which is useful for keeping sensitive data.")
parser.add_argument(
'--report-template',
help="specify report template path.")
parser.add_argument(
'--report-dir',
help="specify report save directory.")
parser.add_argument(
'--failfast', action='store_true', default=False,
help="Stop the test run on the first error or failure.")
parser.add_argument(
'--save-tests', action='store_true', default=False,
help="Save loaded tests and parsed tests to JSON file.")
parser.add_argument(
'--startproject',
help="Specify new project name.")
parser.add_argument(
'--validate', nargs='*',
help="Validate JSON testcase format.")
parser.add_argument(
'--prettify', nargs='*',
help="Prettify JSON testcase format.")
args = parser.parse_args()
if is_py2:
color_print(get_python2_retire_msg(), "YELLOW")
if args.version:
color_print("{}".format(__version__), "GREEN")
exit(0)
if args.validate:
validate_json_file(args.validate)
exit(0)
if args.prettify:
prettify_json_file(args.prettify)
exit(0)
project_name = args.startproject
if project_name:
create_scaffold(project_name)
exit(0)
runner = HttpRunner(
failfast=args.failfast,
save_tests=args.save_tests,
report_template=args.report_template,
report_dir=args.report_dir,
log_level=args.log_level,
log_file=args.log_file
)
try:
for path in args.testcase_paths:
runner.run(path, dot_env_path=args.dot_env_path)
except Exception:
color_print("!!!!!!!!!! exception stage: {} !!!!!!!!!!".format(runner.exception_stage), "YELLOW")
raise
if runner.summary and runner.summary["success"]:
sys.exit(0)
else:
sys.exit(1)
def main_locust():
""" Performance test with locust: parse command line options and run commands.
"""
try:
# monkey patch ssl at beginning to avoid RecursionError when running locust.
from gevent import monkey; monkey.patch_ssl()
import multiprocessing
import sys
from httprunner import logger
from httprunner import locusts
except ImportError:
msg = "Locust is not installed, install first and try again.\n"
msg += "install command: pip install locustio"
print(msg)
exit(1)
sys.argv[0] = 'locust'
if len(sys.argv) == 1:
sys.argv.extend(["-h"])
if sys.argv[1] in ["-h", "--help", "-V", "--version"]:
locusts.start_locust_main()
sys.exit(0)
# set logging level
if "-L" in sys.argv:
loglevel_index = sys.argv.index('-L') + 1
elif "--loglevel" in sys.argv:
loglevel_index = sys.argv.index('--loglevel') + 1
else:
loglevel_index = None
if loglevel_index and loglevel_index < len(sys.argv):
loglevel = sys.argv[loglevel_index]
else:
# default
loglevel = "WARNING"
logger.setup_logger(loglevel)
# get testcase file path
try:
if "-f" in sys.argv:
testcase_index = sys.argv.index('-f') + 1
elif "--locustfile" in sys.argv:
testcase_index = sys.argv.index('--locustfile') + 1
else:
testcase_index = None
assert testcase_index and testcase_index < len(sys.argv)
except AssertionError:
print("Testcase file is not specified, exit.")
sys.exit(1)
testcase_file_path = sys.argv[testcase_index]
sys.argv[testcase_index] = locusts.parse_locustfile(testcase_file_path)
if "--processes" in sys.argv:
""" locusts -f locustfile.py --processes 4
"""
if "--no-web" in sys.argv:
logger.log_error("conflict parameter args: --processes & --no-web. \nexit.")
sys.exit(1)
processes_index = sys.argv.index('--processes')
processes_count_index = processes_index + 1
if processes_count_index >= len(sys.argv):
""" do not specify processes count explicitly
locusts -f locustfile.py --processes
"""
processes_count = multiprocessing.cpu_count()
logger.log_warning("processes count not specified, use {} by default.".format(processes_count))
else:
try:
""" locusts -f locustfile.py --processes 4 """
processes_count = int(sys.argv[processes_count_index])
sys.argv.pop(processes_count_index)
except ValueError:
""" locusts -f locustfile.py --processes -P 8888 """
processes_count = multiprocessing.cpu_count()
logger.log_warning("processes count not specified, use {} by default.".format(processes_count))
sys.argv.pop(processes_index)
locusts.run_locusts_with_processes(sys.argv, processes_count)
else:
locusts.start_locust_main()
if __name__ == "__main__":
""" debugging mode
"""
import sys
import os
if len(sys.argv) == 0:
exit(0)
sys.path.insert(0, os.getcwd())
cmd = sys.argv.pop(1)
if cmd in ["hrun", "httprunner", "ate"]:
main_hrun()
elif cmd in ["locust", "locusts"]:
main_locust()
else:
from httprunner.logger import color_print
color_print("Miss debugging type.", "RED")
example = "\n".join([
"e.g.",
"python -m httprunner.cli hrun /path/to/testcase_file",
"python -m httprunner.cli locusts -f /path/to/testcase_file"
])
color_print(example, "yellow")
| [
"mail@debugtalk.com"
] | mail@debugtalk.com |
f8889bbc3c0079be40ed9a3f6a488ec763b9bdc7 | 82c73b70c2002f647bdc254125f0bdb18f0b79d2 | /hav-gclient-3.2_newton/LoginFrame.py | 4c2962aa59dbe33fe4c098fceb1ef2d4cb084ea8 | [
"Apache-2.0"
] | permissive | xuweiliang/Codelibrary | cfb5755ced54c65cacdb3e35ab2b98385f8d5f8e | 54e45b2daa205132c05b0ff5a2c3db7fca2853a7 | refs/heads/master | 2021-05-04T00:31:42.025238 | 2018-03-20T07:05:20 | 2018-03-20T07:05:20 | 71,852,078 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,772 | py | #!/usr/bin/env python
# coding=utf8
'''
Created on Jun 6, 2012
@author: gf
'''
import wx
import os
import threading
from time import sleep
import Setting
import SettingDialog
import Resource
import Session
import MainFrame
import Logger
import ProgressDialog
import ShutdownDialog
import Util
from Setting import FirstUser
from SendRequests import RestartDeviceRequests
CA_DOWNLOAD_CACHE=[]
PASSWORD = 0
def PassWord():
return PASSWORD
class LoginThread(threading.Thread):
def __init__(self, window, url, username, password):
threading.Thread.__init__(self)
self.url = url
self.username = username
self.password = password
self.window = window
self.cancel = False
self.ret = None
def stop(self):
self.cancel = True
def run(self):
if self.cancel:
return
#wx.CallAfter(self.window.WorkFinished, u'下载根证书... 成功')
wx.CallAfter(self.window.Update, 1, u'正在连接服务器 ...')
self.ret = Session.login(self.url, self.username, self.password)
wx.CallAfter(self.window.WorkFinished, u'认证成功')
wx.CallAfter(self.window.Finish)
def getReturnValue(self):
return self.ret
class BackgroundPanel(wx.Panel):
def __init__(self, parent, imagename):
wx.Panel.__init__(self, parent, -1)
self.width, self.height = wx.ScreenDC().GetSize()
##Resource.load(self.width, self.height)
area = wx.Display().GetGeometry()
self.width = area.GetWidth()
self.height = area.GetHeight()
self.bmp = Resource.ui_login
self.SetBackgroundStyle(wx.BG_STYLE_CUSTOM)
self.Bind(wx.EVT_ERASE_BACKGROUND, self.OnEraseBackground)
self.Bind(wx.EVT_KEY_UP, self.onKeyup)
self.InitControls()
# Modi by wdr 20150601 start
'''
if Setting.getAuto().lower() != 'true' :
print 'not auto'
pass
else :
print 'auto'
evt = wx.CommandEvent(wx.wxEVT_COMMAND_BUTTON_CLICKED, self..GetId())
#self.autoLogin()
#self.InitControls()
#self.password.SetValue('');
#self.password.SetFocus();
'''
# Modi by wdr 20150601 end
def InitControls(self):
xradio = self.width / 1440.0
yradio = self.height / 900.0
username = wx.TextCtrl(self, -1,
Setting.getLastLogin(),
style = wx.BORDER_NONE)
username.SetPosition((int(xradio * 776),
int(yradio * 404)))
username.SetSize((int(xradio * 176),
int(yradio * 28)))
password = wx.TextCtrl(self, -1, '', style = wx.BORDER_NONE|wx.PASSWORD)
#if Setting.getSign().lower() == 'true' :
# password.SetValue(Setting.getPasswd())
#password = wx.TextCtrl(self, -1, , style = wx.BORDER_NONE|wx.PASSWORD)
password.SetPosition((int(xradio * 776),
int(yradio * 451)))
password.SetSize((int(xradio * 178),
int(yradio * 28)))
self.auto = wx.CheckBox(self, -1, u'自动登录')
self.auto.SetValue(Setting.getAuto().lower() == 'true')
self.sign = wx.CheckBox(self, -1, u'保存密码')
self.sign.SetValue(Setting.getSign().lower() == 'true')
self.sign.SetPosition((int(xradio * 731),
int(yradio * 500)))
self.auto.SetPosition((int(xradio * 879),
int(yradio * 500)))
#self.auto.Enable(False)
self.Bind(wx.EVT_CHECKBOX, self.OnSign, self.sign)
self.Bind(wx.EVT_CHECKBOX, self.OnAuto, self.auto)
self.sign.SetValue(Setting.getSign().lower() == 'true')
btn_login = wx.BitmapButton(self, -1, Resource.btn_login,None)
btn_login.SetPosition((int(xradio * 880),
int(yradio * 530)))
btn_login.SetDefault()
self.Bind(wx.EVT_BUTTON, self.OnLogin, btn_login)
# btn_shutdown = wx.BitmapButton(self, -1, Resource.btn_shutdown,None)
# btn_shutdown.SetPosition((int(xradio * 1405),
# int(yradio * 865)))
# btn_shutdown.SetSize((int(xradio * 36), int(yradio * 36)))
# self.Bind(wx.EVT_BUTTON, self.OnShutdown, btn_shutdown)
btn_shutdown = wx.Button(self, -1, u"关机", style=wx.NO_BORDER)
btn_shutdown.SetPosition((int(xradio * 1385),
int(yradio * 865)))
btn_shutdown.SetSize((int(xradio * 60), int(yradio * 40)))
self.Bind(wx.EVT_BUTTON, self.OnShutdown, btn_shutdown)
if username.GetValue() == '':
username.SetFocus()
else:
password.SetFocus()
self.username = username
self.password = password
if Setting.getSign().lower() == 'true':
password.SetValue(Setting.getPasswd())
self.auto.SetValue(Setting.getAuto().lower() == 'true')
else:
self.auto.SetValue(False)
# Add by wdr 20150601 start
if Setting.getAuto().lower() != 'true' :
#print 'not auto'
pass
else :
#print 'auto'
evt = wx.CommandEvent(wx.wxEVT_COMMAND_BUTTON_CLICKED, btn_login.GetId())
wx.PostEvent(self, evt)
#self.autoLogin()
# Add by wdr 20150601 end
def OnSign(self, evt):
Setting.setSign("%s" % self.sign.GetValue())
if self.sign.GetValue() != True:
self.auto.SetValue(False)
Setting.setAuto("%s" % self.auto.GetValue())
Setting.setPasswd(self.password.GetValue())
Setting.save()
def OnAuto(self, evt):
#if self.sign.GetValue() == 'True':
# self.auto.Enable(True)
#else :
# self.auto.Enable(False)
if self.sign.GetValue() != True:
self.sign.SetValue(True)
Setting.setAuto("%s" % self.auto.GetValue())
Setting.setSign("%s" % self.sign.GetValue())
Setting.setPasswd(self.password.GetValue())
Setting.save()
#else:
# self.auto.SetValue( self.sign.GetValue() == True )
def OnEraseBackground(self, evt):
"""
Add a picture to the background
"""
dc = evt.GetDC()
if not dc:
dc = wx.ClientDC(self)
rect = self.GetUpdateRegion().GetBox()
dc.SetClippingRect(rect)
dc.Clear()
dc.DrawBitmap(self.bmp, 0, 0)
def autoLogin(self):
if Setting.getSign().lower() == 'false':
return False
if Setting.getAuto().lower() == 'true' :
pass
else :
return False
username = Setting.getLastLogin()
passwd = Setting.getPasswd()
if username == '' or passwd == '' :
Util.MessageBox(self, u'缺少用户名或密码!', u'错误', wx.OK | wx.ICON_ERROR)
return
dlg = ProgressDialog.ProgressDialog(
self, u'连接服务器...')
dlg.CenterOnScreen()
url = 'http://%s:5000/v2.0' % (Setting.getServer())
RestartDeviceRequests()
loginthread = LoginThread(dlg, url,
username, passwd)
loginthread.start()
#dlg.SetPosition((100,100))
#dlg.Move((Resource.screenX-dlg.))
#dlg.CenterOnScreen()
#ret = dlg.ShowModal()
#dlg.Destroy()
if dlg.ShowModal() == wx.ID_CANCEL:
loginthread.stop()
return
if loginthread:
loginthread.stop()
dlg.Destroy()
Logger.info("Connect to %s", url)
Logger.info("UserId: %s, Password: ******", username)
ret, reason, detail = loginthread.getReturnValue()
Logger.info("Result: %s, reason: %s, detail: %s", ret, reason, detail)
if not ret:
Util.MessageBox(self, detail, reason, wx.OK | wx.ICON_ERROR)
self.ShowFullScreen(True)
Session.logout()
else:
f = MainFrame.MainFrame(self.GetParent(), wx.ScreenDC().GetSize())
f.ShowFullScreen(True)
self.GetParent().Hide()
#f.autOn()
def OnShutdown(self, event):
dlg = ShutdownDialog.ShutdownDialog(None, u'系统将在5秒钟后关机...')
dlg.CenterOnScreen()
dlg.Update(0, u"系统将在5秒钟后关机...")
ret = dlg.ShowModal()
dlg.Destroy()
#os.system("init 0")
def OnLogin(self, event):
global PASSWORD
PASSWORD = self.password.GetValue()
# Valid Check
if self.username.GetValue() == '' or self.password.GetValue() == '' :
Util.MessageBox(self, u'缺少用户名或密码!', u'错误', wx.OK | wx.ICON_ERROR)
return
dlg = ProgressDialog.ProgressDialog(
self, u'连接服务器...')
url = 'http://%s:5000/v2.0' % (Setting.getServer())
RestartDeviceRequests()
loginthread = LoginThread(dlg, url,
self.username.GetValue(), self.password.GetValue())
loginthread.start()
#ret = dlg.ShowModal()
#dlg.Destroy()
if dlg.ShowModal() == wx.ID_CANCEL:
loginthread.stop()
return
if loginthread:
loginthread.stop()
dlg.Destroy()
Logger.info("Connect to %s", url)
Logger.info("UserId: %s, Password: ******", self.username.GetValue())
ret, reason, detail = loginthread.getReturnValue()
Logger.info("Result: %s, reason: %s, detail: %s", ret, reason, detail)
if Setting.getSign().lower() == 'false':
self.password.SetValue('')
self.password.SetFocus()
if not ret:
Util.MessageBox(self, detail, reason, wx.OK | wx.ICON_ERROR)
Session.logout()
else:
Setting.setLastLogin(FirstUser['firstuser'].username)
if self.sign.GetValue() == True:
Setting.setPasswd(self.password.GetValue())
else:
Setting.setPasswd('1df#$!cd123~')
Setting.save()
area = wx.Display().GetGeometry()
width = area.GetWidth()
height = area.GetHeight()
f = MainFrame.MainFrame(self.GetParent(), (width,height))
f.ShowFullScreen(True)
self.GetParent().Hide()
def OnSetting(self, event):
dlg = SettingDialog.SettingDialog(self)
dlg.CenterOnScreen()
ret = dlg.ShowModal()
if ret == wx.ID_OK:
dlg.SaveSetting()
dlg.Destroy()
def onKeyup(self,event):
if event.GetKeyCode() == wx.WXK_F4 :
dlg = SettingDialog.SettingDialog(self)
#dlg.CenterOnScreen()
ret = dlg.ShowModal()
if ret == wx.ID_OK:
dlg.SaveSetting()
dlg.Destroy()
class LoginFrame(wx.Frame):
def __init__(self, parent):
wx.Frame.__init__(self, None, -1, 'LoginBackgroundFrame')
self.backPanel=BackgroundPanel(self, 'images/gf_login_ui.png')
def autoLogin(self):
self.backPanel.autoLogin()
if __name__ == '__main__':
app = wx.PySimpleApp()
Resource.load(1600, 900)
frame = LoginFrame(None)
frame.Show(True)
#frame.autoLogin()
app.MainLoop()
| [
"xu.weiliang@junesh.com"
] | xu.weiliang@junesh.com |
1de851ae7e9d05355b3575b0e672336f59399b10 | 760fbcc2258ca5318410b0e18324bd1fa6d7068d | /deep/din_estimator/train.py | e6a17dd2d967816818660321f9432e0e52154588 | [] | no_license | liukanglucky/deep_ctr_practice | cbf954f77d91f0959671d781fe508bf259fedb92 | e305cf97e002c46c5e17903f50798fb7a8d6496a | refs/heads/master | 2020-09-11T21:40:14.627823 | 2020-03-27T09:46:23 | 2020-03-27T09:46:23 | 222,198,614 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 3,465 | py | #!/usr/bin/env python
# encoding: utf-8
"""
@author: liukang
@file: train.py
@time: 2019/12/10 上午10:53
@desc:
"""
import os
import sys
import random
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + '/')
import tensorflow as tf
gpus = tf.config.experimental.list_physical_devices(device_type='GPU')
tf.config.experimental.set_virtual_device_configuration(
gpus[0],
[tf.config.experimental.VirtualDeviceConfiguration(memory_limit=1024 * 6)])
from deep.din_estimator.deep_interest_network import *
from deep.din_estimator.input_fn import *
from deep.utils import *
model_dir = ""
output_model = ""
train_data = ""
eval_data = ""
train_steps = 1
eval_steps = 1
batch_size = 1024
shuffle_buffer_size = 10000
learning_rate = 0.0003
hidden_units = [128, 80, 40]
attention_hidden_units = [32, 16]
dropout_rate = 0.25
num_parallel_readers = 10
save_checkpoints_steps = 5000
use_batch_norm = True
num_epochs = 3
def main():
train_files = ["hdfs://xxxx" + p for p in get_file_list(root_path=train_data)]
eval_files = ["hdfs://xxxx" + p for p in get_file_list(root_path=eval_data)]
for d in input_fn(train_files, batch_size).take(1):
print(d)
print("train_data:", train_files)
print("eval_data:", eval_files)
print("train steps:", train_steps, "batch_size:", batch_size)
print("shuffle_buffer_size:", shuffle_buffer_size)
wide_columns, deep_columns = create_feature_columns()
model = DIN(
params={
'wide_features': wide_columns,
'deep_features': deep_columns,
'hidden_units': hidden_units,
'learning_rate': learning_rate,
'attention_hidden_units': attention_hidden_units,
'vocab_size': item_ids_features_vocab_size,
'embedding_size': item_ids_features_emb_size,
'dropout_rate': dropout_rate
},
optimizer='Adam',
config=tf.estimator.RunConfig(model_dir=model_dir,
save_checkpoints_steps=save_checkpoints_steps)
)
for i in range(num_epochs):
print('[INFO: train_and_evalute begin to TRAIN, epoch = ' + str(i) + ']')
random.shuffle(train_files)
# early stop
early_stop_hook = tf.estimator.experimental.stop_if_no_decrease_hook(
model,
eval_dir=model.eval_dir(),
metric_name='loss',
max_steps_without_decrease=1000,
min_steps=100)
model.train(
input_fn=
lambda: input_fn(train_files, batch_size),
steps=train_steps,
hooks=[early_stop_hook]
)
print('[INFO] train_and_evalute begin to EVALUATE...')
notice_results = model.evaluate(
input_fn=lambda: input_fn(eval_files, batch_size),
steps=eval_steps)
for key in sorted(notice_results):
print("[INFO] train_and_evalute == EVALUATE RESULTS == %s: %s" % (
key, notice_results[key]))
feature_spec = get_feature_description()
serving_input_receiver_fn = tf.estimator.export.build_parsing_serving_input_receiver_fn(
feature_spec)
model.export_saved_model(model_dir + "/saved_model_{0}/".format(i),
serving_input_receiver_fn)
if __name__ == "__main__":
strategy = tf.distribute.MirroredStrategy()
with strategy.scope():
main()
| [
"luckyliukang@didiglobal.com"
] | luckyliukang@didiglobal.com |
a1eb0a446c826ab2870899d66a5832e63332eec9 | 50d1a8c332df2f40c37c2d0d778a783fdedbc01a | /arena_navigation/arena_local_planner/model_based/sensor_simulator/scripts/scenario_police.py | acc5d2ea7e1401ebb982d73a0c7ec18c0caff851 | [] | no_license | ignc-research/arena-fsm-ego-planner | b801433bb9f4223346ef63e0fc76859dc78fdf81 | c6ee3616b814d213c1359a41c56afe6cf7aa49d2 | refs/heads/main | 2023-08-25T03:42:22.519463 | 2021-10-12T07:33:57 | 2021-10-12T07:33:57 | 406,738,681 | 34 | 3 | null | null | null | null | UTF-8 | Python | false | false | 4,909 | py | #!/usr/bin/env python
import numpy as np
import math
import rospy
from sensor_msgs.msg import LaserScan
from std_msgs.msg import Int16
from visualization_msgs.msg import Marker
from nav_msgs.msg import Path, Odometry
from ford_msgs.msg import Clusters
from geometry_msgs.msg import PoseStamped
#
class police():
def __init__(self):
self.n_col = 0
self.n_replan_pm = 0
self.n_replan_mb = 0
self.collision_flag = False
self.odom = Odometry()
self.cluster = Clusters()
self.subgoal = PoseStamped()
self.subgoal_wgp = PoseStamped()
self.global_path = Path()
self.gp_received = False
self.sg_received = False
self.sg_wpg_received = False
self.update_cluster = True
self.gp_published = False
# sub
self.scan = rospy.Subscriber('/scan',LaserScan, self.cbScan)
# rospy.Subscriber('/planning_vis/goal',Marker, self.get_pm_path)
# rospy.Subscriber('/move_base/DWAPlannerROS/global_plan',Path, self.get_mb_path)
# rospy.Subscriber('/move_base/TebLocalPlannerROS/global_plan',Path, self.get_mb_path)
rospy.Subscriber('/odom',Odometry, self.cb_odom)
rospy.Subscriber('/subgoal',PoseStamped, self.cb_subgoal)
rospy.Subscriber('/subgoal_wpg',PoseStamped, self.cb_subgoal_wpg)
rospy.Subscriber('/vis_global_path',Path, self.cb_global_path)
# rospy.Subscriber('/obst_odom',Clusters, self.cb_cluster)
# pub
self.pub_col = rospy.Publisher('police/collision', Int16, queue_size=10)
# self.pub_mb_replan = rospy.Publisher('police/mb_replanned', Int16, queue_size=10)
# self.pub_pb_replan = rospy.Publisher('police/pm_replanned', Int16, queue_size=10)
self.pub_odom = rospy.Publisher('police/odom', Odometry, queue_size=10)
self.pub_subg = rospy.Publisher('police/subgoal', PoseStamped, queue_size=10)
self.pub_subg_wpg = rospy.Publisher('police/subgoal_wpg', PoseStamped, queue_size=10)
self.pub_subgp = rospy.Publisher('police/gplan', Path, queue_size=10)
# self.pub_obst_odom = rospy.Publisher('police/obst_odom',Clusters,queue_size=1)
rospy.Timer(rospy.Duration(0.5),self.publish_state)
def cb_cluster(self,msg):
if self.update_cluster:
self.cluster = Clusters()
num_clusters = len(msg.mean_points)
# print(num_clusters)
for i in range(num_clusters):
if num_clusters < 24:
self.cluster.mean_points.append(msg.mean_points[i])
self.cluster.velocities.append(msg.velocities[i])
self.cluster.labels.append(msg.labels[i])
elif msg.labels[i] >= 24:
self.cluster.mean_points.append(msg.mean_points[i])
self.cluster.velocities.append(msg.velocities[i])
self.cluster.labels.append(msg.labels[i])
#self.cluster = msg
def cb_global_path(self, msg):
self.global_path = msg
self.gp_received = True
def cb_odom(self, msg):
self.odom = msg
def cb_subgoal(self, msg):
self.subgoal = msg
self.sg_received = True
def cb_subgoal_wpg(self, msg):
self.subgoal_wgp = msg
self.sg_wpg_received = True
def get_pm_path(self,msg):
self.n_replan_pm += 1
def get_mb_path(self,msg):
self.n_replan_mb += 1
def publish_state(self, event):
# print(self.odom)
# self.update_cluster = False
# self.pub_obst_odom.publish(self.cluster)
# self.update_cluster = True
self.pub_odom.publish(self.odom)
if self.sg_received:
self.pub_subg.publish(self.subgoal)
self.sg_received = False
if self.sg_wpg_received:
self.pub_subg_wpg.publish(self.subgoal_wgp)
self.sg_wpg_received = False
if self.gp_received and not self.gp_published:
self.pub_subgp.publish(self.global_path)
self.gp_received = False
self.gp_published = True
# print(self.subgoal)
# self.pub_mb_replan.publish(self.n_replan_mb)
# self.pub_pb_replan.publish(self.n_replan_pm)
def cbScan(self,msg):
scan_array = np.asarray(msg.ranges)
d_min = np.nanmin(scan_array)
if np.isnan(d_min):
d_min = 3.5
if d_min > 0.5:
self.collision_flag = False
if d_min <= 0.35 and not self.collision_flag:
self.collision_flag = True
self.n_col += 1
self.pub_col.publish(self.n_col)
print(self.n_col)
def run():
rospy.init_node('scenario_police',anonymous=False)
print("watching scene")
police()
rospy.spin()
if __name__=="__main__":
run()
| [
"duc.pichel@gmail.com"
] | duc.pichel@gmail.com |
29af7ecd0175995ce0c75d9ff4594e44b5ba71ad | c0b3fe8f61d968be0018fe011775c1863a91c91b | /5/5_1d.py | e7c2a302bc11d6bfe5b1aa2417277efd1a8b4149 | [] | no_license | Alval2001/Valiavskiy_191_352_web | c697c04b187bcd7201e0aad1861504fd6426adc9 | e78a019aca10a2773140f8d035f7782b339d523c | refs/heads/master | 2023-04-23T00:25:24.846703 | 2021-05-11T20:40:58 | 2021-05-11T20:40:58 | 353,090,169 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,588 | py | """
Задание 5.1d
Переделать скрипт из задания 5.1c таким образом, чтобы, при запросе параметра,
пользователь мог вводить название параметра в любом регистре.
Пример выполнения скрипта:
$ python task_5_1d.py
Введите имя устройства: r1
Введите имя параметра (ios, model, vendor, location, ip): IOS
15.4
Ограничение: нельзя изменять словарь london_co.
Все задания надо выполнять используя только пройденные темы.
То есть эту задачу можно решить без использования условия if.
"""
london_co = {
"r1": {
"location": "21 New Globe Walk",
"vendor": "Cisco",
"model": "4451",
"ios": "15.4",
"ip": "10.255.0.1",
},
"r2": {
"location": "21 New Globe Walk",
"vendor": "Cisco",
"model": "4451",
"ios": "15.4",
"ip": "10.255.0.2",
},
"sw1": {
"location": "21 New Globe Walk",
"vendor": "Cisco",
"model": "3850",
"ios": "3.6.XE",
"ip": "10.255.0.101",
"vlans": "10,20,30",
"routing": True,
},
}
name = input("Введите имя устройства:")
p = input(f"Введите имя параметра({','.join((list(london_co[name])))}):")
print(london_co[name].get(p.lower(), 'Такого параметра нет')) | [
"75988110+Alval2001@users.noreply.github.com"
] | 75988110+Alval2001@users.noreply.github.com |
b2f932585355ebed8f89d4fa71b69a7c268a89a8 | 2bb40bd455bbfb1d7b128b6962828db66b696862 | /main.py | 84d9337a47582326d7b5e3f790234993c6fa023a | [
"MIT"
] | permissive | kimmypracha/CCG-Fighting-Simulator | 9805661bdea58bb13ca94ee63c0ac9508a31aafc | 6bfd06810afed221bcc9d0dca57dedcf63e74cf7 | refs/heads/main | 2023-05-28T01:57:10.384613 | 2021-06-17T20:42:43 | 2021-06-17T20:42:43 | 377,942,479 | 2 | 0 | MIT | 2021-06-17T20:42:44 | 2021-06-17T19:37:13 | Python | UTF-8 | Python | false | false | 1,145 | py | import random
from Player import Player
from Game import CodeConquerorGame as ccg
from config import game_conf
userList = [Player(name = "A" + str(i),
display_mode = game_conf.display_mode)
for i in range(100)]
game = ccg(userList)
silent_table = []
# simulation
for i in range(10000):
game.play()
silent_table += game.compute_silent()
print("============================================")
mn = min(silent_table)
mx = max(silent_table)
avg = sum(silent_table)//len(silent_table)
print(f"Minimum Silent Time : {mn//60}m {mn%60}s")
print(f"Maximum Silent Time : {mx//60}m {mx%60}s")
print(f"Average Silent Time : {avg//60}m {avg%60}s")
print("============================================")
mn = min(game.end_time)
mx = max(game.end_time)
avg = sum(game.end_time)//len(game.end_time)
print(f"Minimum End Time : {mn//60}m {mn%60}s")
print(f"Maximum End Time : {mx//60}m {mx%60}s")
print(f"Average End Time : {avg//60}m {avg%60}s")
print("============================================")
print(f"Give up Match (No move left) : {game.nomove_cnt}")
print(f"Time up Match : {game.timeup_cnt}")
| [
"pracha.promtaow@gmail.com"
] | pracha.promtaow@gmail.com |
373a89176f87953ea1fc5560dbe263b64ad63fd3 | 0d0a895744f4d2681f93a99dd3d607e92d4707f1 | /stacks/stack.py | 8705decf7a21c0b97730c94c90d4a3aa356c6e38 | [] | no_license | JinaZhu/Code-Challenges | 41cb88915fe297ac3aaa47600f15a83746e6777e | f3c73e6afef7fc0785822ddaed27ed79581c85fb | refs/heads/master | 2021-03-16T13:53:37.998939 | 2021-03-05T01:12:08 | 2021-03-05T01:12:08 | 246,913,504 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,547 | py | # We are given an array asteroids of integers representing asteroids in a row.
# For each asteroid, the absolute value represents its size, and the sign represents its direction (positive meaning right, negative meaning left). Each asteroid moves at the same speed.
# Find out the state of the asteroids after all collisions. If two asteroids meet, the smaller one will explode. If both are the same size, both will explode. Two asteroids moving in the same direction will never meet.
def asteroidCollision(asteroids):
stack = []
i = 0
while i < len(asteroids):
current_asteroid = asteroids[i]
# if stack is empty
# or last item in stack is negative
# or last item in stack is positive and current item is positive
if len(stack) == 0 or stack[-1] < 0 or (stack[-1] >= 0 and current_asteroids >= 0):
# if so, add the item to the stack
stack.append(current_asteroid)
else:
# if current is equal to last item in stack
if abs(current_asteroid) == abs(stack[-1]):
# remove last item from stack
stack.pop()
# if current is greater than last item
elif abs(current_asteroid) > abs(stack[-1]):
# also remove and decrease i
# i is decrease because we want the current asteroid to still current for the next check
stack.pop()
i -= 1
i += 1
return stack
print('asteroidCollision', asteroidCollision([5, 10, -5]))
| [
"jinazhu87@gmail.com"
] | jinazhu87@gmail.com |
a598ad149855a6d0542afc2bad8bc7c4733e4330 | 3554e9e4ac99a24e01d99bc0a86a9b6b0023f9bd | /cycleGAN/attention/atten_Unet_5.py | 087178b65f2ac1b79f5e94daadf0b0a3757a5be8 | [] | no_license | stevebong31/endoscopy_stomach_recon | c6d7dbd8ee9cdab8b8b7640ba1b41559222d9bd8 | da596e82c120e159cfb363ed59e975e14fe57f21 | refs/heads/master | 2023-06-17T14:35:54.003221 | 2021-07-13T05:58:53 | 2021-07-13T05:58:53 | 380,897,726 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,272 | py | # To add a new cell, type '# %%'
# To add a new markdown cell, type '# %% [markdown]'
# %%
from tensorflow.keras.layers import Conv2D, MaxPooling2D, UpSampling2D, BatchNormalization, Reshape, LeakyReLU, Activation, Input, add, multiply
from tensorflow.keras.layers import concatenate, Dropout
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.optimizers import SGD
from tensorflow.keras.layers import Lambda
import tensorflow.keras.backend as K
import instancenormalization
def up_and_concate(down_layer, layer, data_format='channels_last'):
if data_format == 'channels_first':
in_channel = down_layer.get_shape().as_list()[1]
else:
in_channel = down_layer.get_shape().as_list()[3]
# up = Conv2DTranspose(out_channel, [2, 2], strides=[2, 2])(down_layer)
up = UpSampling2D(size=(2, 2), data_format=data_format)(down_layer)
if data_format == 'channels_first':
my_concat = Lambda(lambda x: K.concatenate([x[0], x[1]], axis=1))
else:
my_concat = Lambda(lambda x: K.concatenate([x[0], x[1]], axis=3))
concate = my_concat([up, layer])
return concate
def attention_up_and_concate(down_layer, layer, data_format='channels_last'):
if data_format == 'channels_first':
in_channel = down_layer.get_shape().as_list()[1]
else:
in_channel = down_layer.get_shape().as_list()[3]
# up = Conv2DTranspose(out_channel, [2, 2], strides=[2, 2])(down_layer)
up = UpSampling2D(size=(2, 2), data_format=data_format)(down_layer)
layer = attention_block_2d(x=layer, g=up, inter_channel=in_channel // 4, data_format=data_format)
if data_format == 'channels_first':
my_concat = Lambda(lambda x: K.concatenate([x[0], x[1]], axis=1))
else:
my_concat = Lambda(lambda x: K.concatenate([x[0], x[1]], axis=3))
concate = my_concat([up, layer])
return concate
def attention_block_2d(x, g, inter_channel, data_format='channels_last'):
# theta_x(?,g_height,g_width,inter_channel)
theta_x = Conv2D(inter_channel, [1, 1], strides=[1, 1], data_format=data_format)(x)
# phi_g(?,g_height,g_width,inter_channel)
phi_g = Conv2D(inter_channel, [1, 1], strides=[1, 1], data_format=data_format)(g)
# f(?,g_height,g_width,inter_channel)
f = Activation('relu')(add([theta_x, phi_g]))
# psi_f(?,g_height,g_width,1)
psi_f = Conv2D(1, [1, 1], strides=[1, 1], data_format=data_format)(f)
rate = Activation('sigmoid')(psi_f)
# rate(?,x_height,x_width)
# att_x(?,x_height,x_width,x_channel)
att_x = multiply([x, rate])
return att_x
def att_unet(img_w, img_h, data_format='channels_last'):
inputs = Input((img_w, img_h, 3))
x = inputs
depth = 4
features = 32
skips = []
for i in range(depth):
x = Conv2D(features, 5, activation=LeakyReLU(), padding='same')(x)
x = instancenormalization.InstanceNormalization()(x)
x = Conv2D(features, 5, activation=LeakyReLU(), padding='same')(x)
x = instancenormalization.InstanceNormalization()(x)
skips.append(x)
x = MaxPooling2D(2)(x)
features = features * 2
x = Conv2D(features, (5, 5), activation=LeakyReLU(), padding='same', data_format=data_format)(x)
x = instancenormalization.InstanceNormalization()(x)
x = Conv2D(features, (5, 5), activation=LeakyReLU(), padding='same', data_format=data_format)(x)
x = instancenormalization.InstanceNormalization()(x)
for i in reversed(range(depth)):
features = features // 2
x = attention_up_and_concate(x, skips[i], data_format=data_format)
x = Conv2D(features, (5, 5), activation=LeakyReLU(), padding='same', data_format=data_format)(x)
x = instancenormalization.InstanceNormalization()(x)
x = Conv2D(features, (5, 5), activation=LeakyReLU(), padding='same', data_format=data_format)(x)
x = instancenormalization.InstanceNormalization()(x)
conv6 = Conv2D(3, (1, 1), padding='same', data_format=data_format)(x)
conv7 = Activation('tanh')(conv6)
model = Model(inputs=inputs, outputs=conv7)
#model.compile(optimizer=Adam(lr=1e-5), loss=[focal_loss()], metrics=['accuracy', dice_coef])
return model
# %%
| [
"qhdgur3410@gmail.com"
] | qhdgur3410@gmail.com |
ee864bf4f45435d16fd37093d8533828dfc9fe61 | ad469d0ca144c485fc0cdcfb2ebfdd0bddf86271 | /src/models/base.py | 54694b4039a9f44b73fa58b3fa5fc83c93fa823d | [] | no_license | ngxbac/Kaggle-Google-Landmark-2019 | 3e8a29e83e835b29262df439b9af12ca27cee768 | 274864e2778acde9007c096607c113c268882343 | refs/heads/master | 2020-05-31T04:37:32.003023 | 2019-06-04T00:41:51 | 2019-06-04T00:41:51 | 190,102,248 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,154 | py | import torch
import torch.nn as nn
import torchvision.models as models
class Net(nn.Module):
def __init__(self, num_classes=100, norm=True, scale=True):
super(Net,self).__init__()
self.extractor = Extractor()
self.embedding = Embedding()
self.classifier = Classifier(num_classes)
self.s = nn.Parameter(torch.FloatTensor([10]))
self.norm = norm
self.scale = scale
def forward(self, x):
x = self.extractor(x)
x = self.embedding(x)
if self.norm:
x = self.l2_norm(x)
if self.scale:
x = self.s * x
x = self.classifier(x)
return x
def extract(self, x):
x = self.extractor(x)
x = self.embedding(x)
x = self.l2_norm(x)
return x
def l2_norm(self,input):
input_size = input.size()
buffer = torch.pow(input, 2)
normp = torch.sum(buffer, 1).add_(1e-10)
norm = torch.sqrt(normp)
_output = torch.div(input, norm.view(-1, 1).expand_as(input))
output = _output.view(input_size)
return output
def weight_norm(self):
w = self.classifier.fc.weight.data
norm = w.norm(p=2, dim=1, keepdim=True)
self.classifier.fc.weight.data = w.div(norm.expand_as(w))
class Extractor(nn.Module):
def __init__(self):
super(Extractor,self).__init__()
basenet = models.resnet50(pretrained=True)
self.extractor = nn.Sequential(*list(basenet.children())[:-1])
for param in self.extractor.parameters():
param.requires_grad = False
def forward(self, x):
x = self.extractor(x)
x = x.view(x.size(0), -1)
return x
class Embedding(nn.Module):
def __init__(self):
super(Embedding,self).__init__()
self.fc = nn.Linear(2048, 2048)
def forward(self, x):
x = self.fc(x)
return x
class Classifier(nn.Module):
def __init__(self, num_classes):
super(Classifier,self).__init__()
self.fc = nn.Linear(2048, num_classes, bias=False)
def forward(self, x):
x = self.fc(x)
return x | [
"ngxbac.dt@gmail.com"
] | ngxbac.dt@gmail.com |
36062c442c4ba39c3735cc883790c87297d14df7 | 4e71725de98b539bdf13ce61ce976be490595d86 | /Project 3 - Barcode Generator/Project 3.py | aba0a1420927c1633442ae31adddd3e903930cad | [] | no_license | ivanaairenee/Foundations-of-Programming-1 | d9ae9abc7538413db506991ab82baceff4ace03b | dbde3a403d751497aed687b4b33e5c7b9ba6f3f5 | refs/heads/master | 2021-01-19T03:34:35.177333 | 2017-04-08T02:09:47 | 2017-04-08T02:09:47 | 87,323,360 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,435 | py | from tkinter import * #import all tkinter modules
invalid_chars = "/?<>\:*|\"" #define invalid chars for further use in file name's character exception
#make a dictionary for the pattern of barcode, and the decoding of every digit
LcodeDict = {"0":"0001101", "1":"0011001", "2":"0010011", "3":"0111101", "4":"0100011", "5":"0110001", "6":"0101111", "7":"0111011", "8":"0110111", "9":"0001011"}
RcodeDict = {"0":"1110010", "1":"1100110", "2":"1101100", "3":"1000010", "4":"1011100", "5":"1001110", "6":"1010000", "7":"1000100", "8":"1001000", "9":"1110100"}
GcodeDict = {"0":"0100111", "1":"0110011", "2":"0011011", "3":"0100001", "4":"0011101", "5":"0111001", "6":"0000101", "7":"0010001", "8":"0001001", "9":"0010111"}
FirstsixDict = {"0":"LLLLLL", "1":"LLGLGG", "2":"LLGGLG", "3":"LLGGGL", "4":"LGLLGG", "5":"LGGLLG", "6":"LGGGLL", "7":"LGLGLG", "8":"LGLGGL", "9":"LGGLGL"}
def checkDigit(x): #define the function to count the check digit
digit = [int(i) for i in x]
a = (digit[0]+digit[2]+digit[4]+digit[6]+digit[8]+digit[10])
b = (digit[1]+digit[3]+digit[5]+digit[7]+digit[9]+digit[11])
c = (a+b*3)
if c%10 == 0:
result = "0"
else:
result = str(10-(c%10))
return result
#create a new class to process barcode
class processBarcode:
def process(inp):
string=inp+str(checkDigit(inp))
formatDepan = (FirstsixDict[(string[0])])
stringbaru = string[1:] #make the barcode starting from the first digit to the last digit plus the check digit
x,y = 0,0
barcode = "" #iterate through the barcode and decode it to EAN-13 format
for i in formatDepan:
if i == "L":
digit = LcodeDict[stringbaru[x]]
elif i == "G":
digit = GcodeDict[stringbaru[x]]
x+=1
barcode = barcode+digit
y = 6
for i in range(len(stringbaru[6:])):
digit = RcodeDict[stringbaru[y]]
barcode = barcode+digit
y+=1
return barcode
def check(inp):
return inp+str(checkDigit(inp)) #a function to return barcode plus the checkdigit
#define a class
class BarcodeWriter(processBarcode):
def __init__(self):
master = Tk() #assign the Tk() module to a variable called master
master.title("EAN-13 by Ivana Irene Thomas") #create title for tkinter window
master.resizable(width=False, height=False) #make the window not resizeable
text1 = Label(text="Save barcode to PS file [eg: EAN13.eps]:",font=("Helvetica 12 bold")) #make label and change its properties
text1.pack() #pack the label into the master
self.entry = StringVar() #assign a string variable to the self.entry variable
self.enterFilename = Entry(master, textvariable=self.entry) #create an entry box, put it on master and declare its text variable properties self.entry
self.enterFilename.bind("<Return>", self.enter) #bind the entry box and enter key to function enter
self.enterFilename.pack() #pack the entry box
text2 = Label(text="Enter code (first decimal digits):",font=("Helvetica 12 bold")) #create another label
text2.pack() #pack the label to the master
self.barcode = StringVar() #assign a string variable to the self.barcode variable
self.enterBarcode = Entry(master, textvariable=self.barcode) #create another entry box, put it on master and declre its text variable properties self.barcode
self.enterBarcode.bind("<Return>", self.enter) #bind the entry box and enter key to function enter
self.enterBarcode.pack() #pack the entry box to the master
self.canvas = Canvas(master, width=250, height=350, bg="white") #create a canvas and declare its height and width
self.canvas.pack() #pack the canvas to the master
master.mainloop()
def enter(self, event):
self.name = self.entry.get() #get the string input of the entry box self.entry and assign it to self.name
inp = self.barcode.get() #get the string input of the entry box self.barcode and assign it to inp
for i in invalid_chars: #iterate through invalid chars and find whether self.name have it
if i in self.name:
messagebox.showwarning( #show warning message box when user inputs invalid file name
"Invalid File Name",
"Please enter a valid file name")
return
if self.name[-4:] != ".eps": #show warning when user doesn't input the file name with .eps as its extension
messagebox.showwarning(
"Invalid File Name",
"Please enter a valid file name"
)
elif len(inp)!=12 or not inp.isdigit(): #show warning when user doesn't input a valid barcode
messagebox.showwarning(
"Invalid Barcode",
"Please enter a valid barcode number")
else:
self.canvas.delete("all") #delete canvas at every enter pressed and continue
barcode=processBarcode.process(inp)
string=processBarcode.check(inp)
#create text inside the canvas
title = self.canvas.create_text(29,50, anchor="nw",text="EAN-13 Barcode:", font=("Helvetica 19 bold"))
#make the starting lines of barcode
self.canvas.create_line(30, 100, 30, 240, fill = "brown", tags = "Line", width=2)
self.canvas.create_line(32, 100, 32, 240, fill = "white", tags = "Line", width=2)
self.canvas.create_line(34, 100, 34, 240, fill = "brown", tags = "Line", width=2)
x = 36
#iterate through the second to the sixth digit of barcode and make the lines of barcode according to the decoded EAN-13
#make black lines for every 1s, and white for every 0s
for k in barcode[0:42]:
if k =="1":
self.canvas.create_line(x, 100, x, 230, fill = "brown", tags = "Line", width=2)
elif k == "0":
self.canvas.create_line(x, 100, x, 230, fill = "white", tags = "Line", width=2)
x+=2
#make the middle pattern lines of barcode
x+=2
self.canvas.create_line(x, 100, x, 240, fill = "white", tags = "Line", width=2)
x+=2
self.canvas.create_line(x, 100, x, 240, fill = "brown", tags = "Line", width=2)
x+=2
self.canvas.create_line(x, 100, x, 240, fill = "white", tags = "Line", width=2)
x+=2
self.canvas.create_line(x, 100, x, 240, fill = "brown", tags = "Line", width=2)
x+=2
self.canvas.create_line(x, 100, x, 240, fill = "white", tags = "Line", width=2)
x+=2
#iterate through the last six digits of barcode and make the lines of barcode according to the decoded EAN-13
#make black lines for every 1s, and white for every 0s
for k in barcode[42:]:
if k =="1":
self.canvas.create_line(x, 100, x, 230, fill = "brown", tags = "Line", width=2)
elif k =="0":
self.canvas.create_line(x, 100, x, 230, fill = "white", tags = "Line", width=2)
x+=2
#make the end pattern lines of barcode
x+=2
self.canvas.create_line(x, 100, x, 240, fill = "brown", tags = "Line", width=2)
x+=2
self.canvas.create_line(x, 100, x, 240, fill = "white", tags = "Line", width=2)
x+=2
self.canvas.create_line(x, 100, x, 240, fill = "brown", tags = "Line", width=2)
#create text for informing user of the check digit
tulisan = self.canvas.create_text(24, 245, anchor="nw",text="{} {} {}".format(string[0], string[1:7], string[7:]), font=("Helvetica 19 bold"), justify="center")
check = self.canvas.create_text(55, 295, anchor="nw",text="Check Digit: {}".format(checkDigit(inp)), font=("Helvetica 14 bold"), fill="orange")
self.canvas.postscript(file=self.name, colormode='color')
BarcodeWriter()
| [
"zirenely@gmail.com"
] | zirenely@gmail.com |
df2ffa0accf83f4363cc11f2b219eb6f5a74b0c3 | dd834845a2ab346dafd04f3beb4ba0916b64dc51 | /test_case/task/test_200smart_sanity_clear_001.py | fc61417bcb137b08429c8f21631cfea146deaf4b | [] | no_license | Lewescaiyong/auto_test_framework | ae51726b705fbf125c30fce447c7c75510597047 | 2d3490393737b3e5f086cb6623369b988ffce67f | refs/heads/master | 2020-11-25T09:18:29.209261 | 2020-02-10T13:48:12 | 2020-02-10T13:48:12 | 228,590,729 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,443 | py | #!/usr/bin/env python
from lib.exceptions.check_exception import CheckException
from lib.base.script.integration_test.case_mw import CaseMW
class Test200SmartSanityClear001(CaseMW):
"""Clear OB
No.: test_200smart_sanity_clear_001
Preconditions:
1. Open Micro/WINr;
2. Set up connection with PLC;
3. Download a project which has OB,DB,SDB;
Step actions:
1. Clear program block;
2. Compare;
Expected results:
1. Clear successful;
2. The OB is different;
Priority: H
Author: Cai, Yong
ChangeInfo: Cai, Yong 2019-09-20 create
"""
def prepare(self):
"""the preparation before executing the test steps
Args:
Example:
Return:
Author: Cai, Yong
IsInterface: False
ChangeInfo: Cai, Yong 2019-09-20 create
"""
super(Test200SmartSanityClear001, self).prepare()
self.logger.info('Preconditions:')
self.logger.info('1. Open Micro/WINr; ')
self.logger.info('2. Set up connection with PLC;')
self.logger.info('3. Download a project which has OB,DB,SDB;')
self.MicroWIN.test_prepare('ob_db_sdb_01.smart', False)
def process(self):
"""execute the test steps
Args:
Example:
Return:
Author: Cai, Yong
IsInterface: False
ChangeInfo: Cai, Yong 2019-09-20 create
"""
super(Test200SmartSanityClear001, self).process()
self.logger.info('Step actions:')
self.logger.info('1. Clear program block;')
result1 = self.PLC['1'].plc_clear('ob')
self.logger.info('2. Compare;')
result2 = self.MicroWIN.compare_with_plc()
self.logger.info('Expected results:')
self.logger.info('1. Clear successful;')
if result1['code'] != 0:
raise CheckException('1. Clear OB failed;')
self.logger.info('2. The OB is different;')
if not ((not result2['ob']) and result2['db'] and result2['sdb']):
self.logger.info('Compare result: %s' % result2)
raise CheckException('Compare failed;')
def cleanup(self):
"""clean up after performing the test steps
Args:
Example:
Return:
Author: Cai, Yong
IsInterface: False
ChangeInfo: Cai, Yong 2019-09-20 create
"""
super(Test200SmartSanityClear001, self).cleanup()
| [
"1351153527@qq.com"
] | 1351153527@qq.com |
0fff791c138c950914f0c75883bdd5e4cf0a35ee | 83b60dc1342577d84005864429306abd34f6546a | /course1/week3/week3.py | b5574893cd3c89c253b07441b9f6cc13a9de943c | [] | no_license | xuxinhang/Bobs_deeplearning.ai_course_practice | 733ac723ef750c92154230601a8830e01ee16b3e | 3b17e9ceb52beb8a47e4f50fee39f01af24ecbc0 | refs/heads/master | 2020-03-07T18:22:02.585659 | 2019-10-01T10:08:48 | 2019-10-01T10:08:48 | 127,636,132 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,095 | py | import numpy as np
from testCases import *
from planar_utils import plot_decision_boundary, sigmoid, load_planar_dataset, load_extra_datasets
def sigma(x):
return 1/(1+np.exp(-x))
def tanh_d(x):
return 1-(np.tanh(x))**2
# 导入数据:花瓣数据/其他数据
planar = load_planar_dataset()
noisy_circles, noisy_moons, blobs, gaussian_quantiles, no_structure = load_extra_datasets()
X, Y = planar
# 如果你不用planar数据,你需要添加下面这一行
# X, Y = X.T, Y.reshape(1, Y.shape[0])
# 初始化参数矩阵
W1 = np.random.randn(4,2) * 0.01
b1 = np.zeros((4,1))
W2 = np.random.randn(1,4) * 0.01
b2 = np.zeros((1,1))
# 需要的参数
m = Y.shape[1]
alaph = 1
A0 = X
# 开始迭代
for i in range(10000):
# 正向传播
Z1 = np.dot(W1, A0) + b1
A1 = np.tanh(Z1)
A2 = sigma(np.dot(W2, A1) + b2)
# 反向传播
J = -1/m * np.sum( np.multiply(Y, np.log(A2)) + np.multiply((1-Y), np.log(1-A2)) )
dZ2 = A2 - Y
dW2 = 1/m * np.dot(dZ2, A1.T)
db2 = 1/m * np.sum(dZ2, axis=1, keepdims=True)
dZ1 = np.dot(W2.T, dZ2) * (1 - np.power(A1, 2))
dW1 = 1/m * np.dot(dZ1, A0.T)
db1 = 1/m * np.sum(dZ1, axis=1, keepdims=True)
# 梯度下降
W2 = W2 - alaph * dW2
W1 = W1 - alaph * dW1
b2 = b2 - alaph * db2
b1 = b1 - alaph * db1
# 输出
if i%1000 == 0:
print("Loss:", J)
print('W1', W1, '\nW2\n', W2)
print('dW1', dW1, '\ndW2\n', dW2)
print('b1', b1, 'b2', b2)
# # # # # #
# 检验学习成果 #
# # # # # #
pred_A1 = np.tanh(np.dot(W1, X) + b1)
pred_A2 = sigma(np.dot(W2, pred_A1) + b2)
pred_val = np.around(pred_A2)
fail_rate = np.sum(np.absolute(pred_val-Y)) / m
print('For training set, the fail rate is ', fail_rate*100)
# 预测函数
def predictor(inp_X):
pred_A1 = np.tanh(np.dot(W1, inp_X) + b1)
pred_A2 = sigma(np.dot(W2, pred_A1) + b2)
pred_val = np.around(pred_A2)
predictions = np.array( [1 if x >0.5 else 0 for x in A2.reshape(-1,1)] ).reshape(A2.shape)
return pred_val
# 预测某个点
predictor([[4],[2]])
# 绘制图形 in iPython-Notebook
%matplotlib inline
plot_decision_boundary(lambda x: predictor(x.T),X,Y)
| [
"xuxinhang4567@126.com"
] | xuxinhang4567@126.com |
e8b4ca2669f82d1b25da94d71c3df86149eb927e | 16232db3867ef7c87e3dad0ba9b328de3eee96d2 | /Linux/pyxhook.py | ff947ba71efbb092226e51579773619ab5ed3cfc | [] | no_license | ganeshkumartk/keylogger | f0f0af3033988159757709385a827a7797e5298e | 8c775408b8ffe71ffbcfc4b6855e8145f399087b | refs/heads/master | 2022-05-05T03:24:46.993957 | 2019-06-18T17:47:29 | 2019-06-18T17:47:29 | 191,026,654 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,309 | py | #!/usr/bin/env python
# modified version of pyxhook.py
# Reformatted/modified to work with Python 3+.
# pyxhook -- an extension to emulate some of the PyHook library on linux.
#
# Copyright (C) 2008 Tim Alexander <dragonfyre13@gmail.com>
#
# View the repo: https://github.com/JeffHoogland/pyxhook
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Thanks to Alex Badea <vamposdecampos@gmail.com> for writing the Record
# demo for the xlib libraries. It helped me immensely working with these
# in this library.
#
# Thanks to the python-xlib team. This wouldn't have been possible without
# your code.
#
# This requires:
# at least python-xlib 1.4
# xwindows must have the 'record' extension present, and active.
#
# This file has now been somewhat extensively modified by
# Ganesh Kumar <ganeshkumartk@outlook.com>
# So if there are any bugs, they are probably my fault. :)
from __future__ import print_function
import sys
import re
import time
import threading
from Xlib import X, XK, display, error # noqa
from Xlib.ext import record
from Xlib.protocol import rq
#######################################################################
# ######################START CLASS DEF################################
#######################################################################
def print_err(*args, **kwargs):
""" A wrapper for print() that uses stderr by default. """
if kwargs.get('file', None) is None:
kwargs['file'] = sys.stderr
print(*args, **kwargs)
class HookManager(threading.Thread):
""" This is the main class. Instantiate it, and you can hand it KeyDown
and KeyUp (functions in your own code) which execute to parse the
PyxHookKeyEvent class that is returned.
This simply takes these two values for now:
KeyDown = The function to execute when a key is pressed, if it returns
anything. It hands the function an argument that is the
PyxHookKeyEvent class.
KeyUp = The function to execute when a key is released, if it returns
anything. It hands the function an argument that is the
PyxHookKeyEvent class.
"""
def __init__(self):
threading.Thread.__init__(self)
self.finished = threading.Event()
# Give these some initial values
self.mouse_position_x = 0
self.mouse_position_y = 0
self.ison = {'shift': False, 'caps': False}
# Compile our regex statements.
self.isshift = re.compile('^Shift')
self.iscaps = re.compile('^Caps_Lock')
self.shiftablechar = re.compile('|'.join((
'^[a-z0-9]$',
'^minus$',
'^equal$',
'^bracketleft$',
'^bracketright$',
'^semicolon$',
'^backslash$',
'^apostrophe$',
'^comma$',
'^period$',
'^slash$',
'^grave$'
)))
self.logrelease = re.compile('.*')
self.isspace = re.compile('^space$')
# Assign default function actions (do nothing).
self.KeyDown = lambda x: True
self.KeyUp = lambda x: True
self.MouseAllButtonsDown = lambda x: True
self.MouseAllButtonsUp = lambda x: True
self.contextEventMask = [X.KeyPress, X.MotionNotify]
# Hook to our display.
self.local_dpy = display.Display()
self.record_dpy = display.Display()
def run(self):
# Check if the extension is present
if not self.record_dpy.has_extension('RECORD'):
print_err('RECORD extension not found')
sys.exit(1)
r = self.record_dpy.record_get_version(0, 0)
print_err('RECORD extension version {}.{}'.format(
r.major_version,
r.minor_version
))
# Create a recording context; we only want key and mouse events
self.ctx = self.record_dpy.record_create_context(
0,
[record.AllClients],
[{
'core_requests': (0, 0),
'core_replies': (0, 0),
'ext_requests': (0, 0, 0, 0),
'ext_replies': (0, 0, 0, 0),
'delivered_events': (0, 0),
# (X.KeyPress, X.ButtonPress),
'device_events': tuple(self.contextEventMask),
'errors': (0, 0),
'client_started': False,
'client_died': False,
}]
)
# Enable the context; this only returns after a call to record_disable
# context, while calling the callback function in the meantime
self.record_dpy.record_enable_context(self.ctx, self.processevents)
# Finally free the context
self.record_dpy.record_free_context(self.ctx)
def cancel(self):
self.finished.set()
self.local_dpy.record_disable_context(self.ctx)
self.local_dpy.flush()
def printevent(self, event):
print(event)
def HookKeyboard(self):
# We don't need to do anything here anymore, since the default mask
# is now set to contain X.KeyPress
# self.contextEventMask[0] = X.KeyPress
pass
def HookMouse(self):
# We don't need to do anything here anymore, since the default mask
# is now set to contain X.MotionNotify
# need mouse motion to track pointer position, since ButtonPress
# events don't carry that info.
# self.contextEventMask[1] = X.MotionNotify
pass
def processevents(self, reply):
if reply.category != record.FromServer:
return
if reply.client_swapped:
print_err('* received swapped protocol data, cowardly ignored')
return
try:
# Python 2
intval = ord(reply.data[0])
except TypeError:
# Python 3.
intval = reply.data[0]
if (not reply.data) or (intval < 2):
# not an event
return
data = reply.data
while len(data):
event, data = rq.EventField(None).parse_binary_value(
data,
self.record_dpy.display,
None,
None
)
if event.type == X.KeyPress:
hookevent = self.keypressevent(event)
self.KeyDown(hookevent)
elif event.type == X.KeyRelease:
hookevent = self.keyreleaseevent(event)
self.KeyUp(hookevent)
elif event.type == X.ButtonPress:
hookevent = self.buttonpressevent(event)
self.MouseAllButtonsDown(hookevent)
elif event.type == X.ButtonRelease:
hookevent = self.buttonreleaseevent(event)
self.MouseAllButtonsUp(hookevent)
elif event.type == X.MotionNotify:
# use mouse moves to record mouse position, since press and
# release events
# do not give mouse position info
# (event.root_x and event.root_y have bogus info).
self.mousemoveevent(event)
# print('processing events...', event.type)
def keypressevent(self, event):
matchto = self.lookup_keysym(
self.local_dpy.keycode_to_keysym(event.detail, 0)
)
if self.shiftablechar.match(
self.lookup_keysym(
self.local_dpy.keycode_to_keysym(event.detail, 0))):
# This is a character that can be typed.
if not self.ison['shift']:
keysym = self.local_dpy.keycode_to_keysym(event.detail, 0)
return self.makekeyhookevent(keysym, event)
else:
keysym = self.local_dpy.keycode_to_keysym(event.detail, 1)
return self.makekeyhookevent(keysym, event)
else:
# Not a typable character.
keysym = self.local_dpy.keycode_to_keysym(event.detail, 0)
if self.isshift.match(matchto):
self.ison['shift'] = self.ison['shift'] + 1
elif self.iscaps.match(matchto):
if not self.ison['caps']:
self.ison['shift'] = self.ison['shift'] + 1
self.ison['caps'] = True
if self.ison['caps']:
self.ison['shift'] = self.ison['shift'] - 1
self.ison['caps'] = False
return self.makekeyhookevent(keysym, event)
def keyreleaseevent(self, event):
if self.shiftablechar.match(
self.lookup_keysym(
self.local_dpy.keycode_to_keysym(event.detail, 0))):
if not self.ison['shift']:
keysym = self.local_dpy.keycode_to_keysym(event.detail, 0)
else:
keysym = self.local_dpy.keycode_to_keysym(event.detail, 1)
else:
keysym = self.local_dpy.keycode_to_keysym(event.detail, 0)
matchto = self.lookup_keysym(keysym)
if self.isshift.match(matchto):
self.ison['shift'] = self.ison['shift'] - 1
return self.makekeyhookevent(keysym, event)
def buttonpressevent(self, event):
return self.makemousehookevent(event)
def buttonreleaseevent(self, event):
return self.makemousehookevent(event)
def mousemoveevent(self, event):
self.mouse_position_x = event.root_x
self.mouse_position_y = event.root_y
# need the following because XK.keysym_to_string() only does printable
# chars rather than being the correct inverse of XK.string_to_keysym()
def lookup_keysym(self, keysym):
for name in dir(XK):
if name.startswith('XK_') and getattr(XK, name) == keysym:
return name.lstrip('XK_')
return '[{}]'.format(keysym)
def asciivalue(self, keysym):
asciinum = XK.string_to_keysym(self.lookup_keysym(keysym))
if asciinum < 256:
return asciinum
else:
return 0
def makekeyhookevent(self, keysym, event):
storewm = self.xwindowinfo()
if event.type == X.KeyPress:
MessageName = 'key down'
elif event.type == X.KeyRelease:
MessageName = 'key up'
return PyxHookKeyEvent(
storewm['handle'],
storewm['name'],
storewm['class'],
self.lookup_keysym(keysym),
self.asciivalue(keysym),
False,
event.detail,
MessageName
)
def makemousehookevent(self, event):
storewm = self.xwindowinfo()
if event.detail == 1:
MessageName = 'mouse left '
elif event.detail == 3:
MessageName = 'mouse right '
elif event.detail == 2:
MessageName = 'mouse middle '
elif event.detail == 5:
MessageName = 'mouse wheel down '
elif event.detail == 4:
MessageName = 'mouse wheel up '
else:
MessageName = 'mouse {} '.format(event.detail)
if event.type == X.ButtonPress:
MessageName = '{}down'.format(MessageName)
elif event.type == X.ButtonRelease:
MessageName = '{}up'.format(MessageName)
return PyxHookMouseEvent(
storewm['handle'],
storewm['name'],
storewm['class'],
(self.mouse_position_x, self.mouse_position_y),
MessageName
)
def xwindowinfo(self):
try:
windowvar = self.local_dpy.get_input_focus().focus
wmname = windowvar.get_wm_name()
wmclass = windowvar.get_wm_class()
wmhandle = str(windowvar)[20:30]
except:
# This is to keep things running smoothly.
# It almost never happens, but still...
return {'name': None, 'class': None, 'handle': None}
if (wmname is None) and (wmclass is None):
try:
windowvar = windowvar.query_tree().parent
wmname = windowvar.get_wm_name()
wmclass = windowvar.get_wm_class()
wmhandle = str(windowvar)[20:30]
except:
# This is to keep things running smoothly.
# It almost never happens, but still...
return {'name': None, 'class': None, 'handle': None}
if wmclass is None:
return {'name': wmname, 'class': wmclass, 'handle': wmhandle}
else:
return {'name': wmname, 'class': wmclass[0], 'handle': wmhandle}
class PyxHookKeyEvent(object):
"""This is the class that is returned with each key event.f
It simply creates the variables below in the class.
Window = The handle of the window.
WindowName = The name of the window.
WindowProcName = The backend process for the window.
Key = The key pressed, shifted to the correct caps value.
Ascii = An ascii representation of the key. It returns 0 if the ascii
value is not between 31 and 256.
KeyID = This is just False for now. Under windows, it is the Virtual Key
Code, but that's a windows-only thing.
ScanCode = Please don't use this. It differs for pretty much every type of
keyboard. X11 abstracts this information anyway.
MessageName = 'key down', 'key up'.
"""
def __init__(
self, Window, WindowName, WindowProcName, Key, Ascii, KeyID,
ScanCode, MessageName):
self.Window = Window
self.WindowName = WindowName
self.WindowProcName = WindowProcName
self.Key = Key
self.Ascii = Ascii
self.KeyID = KeyID
self.ScanCode = ScanCode
self.MessageName = MessageName
def __str__(self):
return '\n'.join((
'Window Handle: {s.Window}',
'Window Name: {s.WindowName}',
'Window\'s Process Name: {s.WindowProcName}',
'Key Pressed: {s.Key}',
'Ascii Value: {s.Ascii}',
'KeyID: {s.KeyID}',
'ScanCode: {s.ScanCode}',
'MessageName: {s.MessageName}',
)).format(s=self)
class PyxHookMouseEvent:
"""This is the class that is returned with each key event.f
It simply creates the variables below in the class.
Window = The handle of the window.
WindowName = The name of the window.
WindowProcName = The backend process for the window.
Position = 2-tuple (x,y) coordinates of the mouse click
MessageName = 'mouse left|right|middle down', 'mouse left|right|middle up'
"""
def __init__(
self, Window, WindowName, WindowProcName, Position, MessageName):
self.Window = Window
self.WindowName = WindowName
self.WindowProcName = WindowProcName
self.Position = Position
self.MessageName = MessageName
def __str__(self):
return '\n'.join((
'Window Handle: {s.Window}',
'Window\'s Process Name: {s.WindowProcName}',
'Position: {s.Position}',
'MessageName: {s.MessageName}',
)).format(s=self)
#######################################################################
# ########################END CLASS DEF################################
#######################################################################
if __name__ == '__main__':
hm = HookManager()
hm.HookKeyboard()
hm.HookMouse()
hm.KeyDown = hm.printevent
hm.KeyUp = hm.printevent
hm.MouseAllButtonsDown = hm.printevent
hm.MouseAllButtonsUp = hm.printevent
hm.start()
time.sleep(10)
hm.cancel()
| [
"noreply@github.com"
] | noreply@github.com |
96ed1d46dc681ab9450c3dc3b2f8bba55c34bf12 | 2c53e69fedd597914c745102e4e71f1a4657ac51 | /flask/image_classification/web/app.py | fc4f24ce6eaf303310abfe9a4b5715e8d72709f8 | [] | no_license | arjunrm/python | 1fa0d736c2341c82f25bbe56efde3ce6669d664c | 0b67822fa01fd2ef943e8fda26f27fef7857aa57 | refs/heads/master | 2020-04-05T15:35:45.248447 | 2019-01-08T17:23:38 | 2019-01-08T17:23:38 | 156,975,420 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,953 | py | """
Resources Address Protocol Params Return codes
Register /register POST username, pwd 200 OK, 301 Invalid username
Classify /classify POST username, pwd, url/*.jpeg 200 OK, 301 Invalid username, 302 Invalid pwd, 303 Out of tokens
Refill /refill POST username, pwd, admin_pwd, refill 200 OK, 301 Invalid username, 302 Invalid pwd, 304 Invalid admin credentials
"""
from flask import Flask, jsonify, request
from flask_restful import Api, Resource
from pymongo import MongoClient
from bson.json_util import dumps
import bcrypt
import requests
import subprocess
import json
app = Flask(__name__)
api = Api(app=app)
client = MongoClient("mongodb://db:27017")
db = client.ImageRecognition
users = db["users"]
admin = db["admin"]
admin.delete_many({})
# insert admin credentials into admin collection
admin.insert_one({
"username" : "admin",
"password" : bcrypt.hashpw("abc123".encode('utf8'), bcrypt.gensalt())
})
def user_exists(username):
if users.count_documents({"username" : username}) == 0:
return False
else:
return True
def verify_pwd(username, password):
# get hashed pwd stored in db
hash_pwd = users.find({
"username" : username
})[0]["password"]
if bcrypt.hashpw(password.encode('utf8'), hash_pwd) == hash_pwd:
return True
else:
return False
def verify_admin_credentials(username, password):
if admin.count_documents({"username" : username}) == 1:
hash_pwd = admin.find_one({"username" :username})["password"]
if bcrypt.hashpw(password.encode('utf8'), hash_pwd) == hash_pwd:
return True
else:
return False
else:
return False
def get_tokens(username):
tokens = users.find_one({
"username" : username
})["tokens"]
return tokens
def ret_json(status, message):
retJson = {
"status" : status,
"message" : message
}
return jsonify(retJson)
@api.resource("/dispusers")
class DispUsers(Resource):
def get(self):
return dumps(users.find())
@api.resource("/dispadmin")
class DispAdmin(Resource):
def get(self):
return dumps(admin.find())
@api.resource("/dropusers")
class DropUsers(Resource):
def get(self):
result = users.delete_many({})
retJson = {
"status" : 200,
"message" : "Dropped documents in users collection",
"deleted count" : result.deleted_count()
}
return jsonify(retJson)
@api.resource("/register")
class Register(Resource):
def post(self):
# get the posted data
posted_data = request.get_json()
# get the data
username = posted_data["username"]
password = posted_data["password"]
# check if user exists
if user_exists(username):
return ret_json(301, "Invalid username")
# hash(password + salt)
hash_password = bcrypt.hashpw(password.encode('utf8'), bcrypt.gensalt())
# store in db
users.insert_one({
"username" : username,
"password" : hash_password,
"tokens" : 2
})
# create return json message
return ret_json(200, "You have successfully signed up to the API")
@api.resource("/classify")
class Classify(Resource):
def post(self):
# get the posted data
posted_data = request.get_json()
# get the data
username = posted_data["username"]
password = posted_data["password"]
url = posted_data["url"]
# check if user exists
if not user_exists(username):
return ret_json(301, "Invalid username")
if not verify_pwd(username, password):
return ret_json(302, "Invalid password")
tokens = get_tokens(username)
if tokens <= 0:
return ret_json(303, "Out of tokens")
r = requests.get(url)
retJson = {}
with open("temp.jpg", "wb") as f:
f.write(r.content)
proc = subprocess.Popen('python classify_image.py --model_dir=. --image_file=temp.jpg', stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
proc.communicate()[0]
proc.wait()
with open("text.txt") as g:
retJson = json.load(g)
users.update({
"username" : username,
},
{
"$set" : {
"tokens" : tokens - 1
}
})
return retJson
@api.resource("/refill")
class Refill(Resource):
def post(self):
# get the posted data
posted_data = request.get_json()
# get the data
username = posted_data["username"]
password = posted_data["password"]
admin_username = posted_data["admin_username"]
admin_password = posted_data["admin_password"]
refill = posted_data["refill"]
# check if user exists
if not user_exists(username):
return ret_json(301, "Invalid username")
if not verify_pwd(username, password):
return ret_json(302, "Invalid password")
# verify admin credentials
if not verify_admin_credentials(admin_username, admin_password):
return ret_json(304, "Invalid admin credentials")
tokens = get_tokens(username)
users.update_one({
"username" : username
},
{
"$set" : {
"tokens" : tokens + refill
}
})
return ret_json(200, "Refilled successfully")
if __name__ == "__main__":
app.run(host="0.0.0.0", debug=True)
| [
"arjun.r.m@gmail.com"
] | arjun.r.m@gmail.com |
244a28682c43ecd69013198d26c87517bb559d89 | ed7412b75887753c4b4a37b134e7b869d183514b | /taxProject/tax/resources.py | 606a5304df01ff9689e8866be5e2871f72f2e743 | [] | no_license | alison990222/pwc-tax-project | 4ad73dbcc3f2330bf6d4919ee515887b97fa3b2b | c065ad4d1468262ffbdbd2e959cbcf1231dc2a69 | refs/heads/master | 2023-02-24T23:36:07.836691 | 2021-01-17T15:34:58 | 2021-01-17T15:34:58 | 278,887,740 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 261 | py | from import_export import resources
from .models import TaxDatabase,itemDatabase
class taxResource(resources.ModelResource):
class Meta:
model = TaxDatabase
class itemResource(resources.ModelResource):
class Meta:
model = itemDatabase | [
"zhangxt0222@163.com"
] | zhangxt0222@163.com |
27226416850f2fe28cb2a86260e79f51dcd60fbb | 8e71da707818ae4845a612caa488abba7cd62c60 | /index.spec | ce553e2e559b9268534351242d4181f0c8a95d0c | [] | no_license | etheral12138/Auto-Vscode-Cpp | 8e94a38773c5beb2934b1bfddc3ec1bd51163d74 | 7ac6dbef330955ffa4efaf38db76f4237118fce6 | refs/heads/main | 2023-07-16T00:21:34.015366 | 2021-08-25T15:08:17 | 2021-08-25T15:08:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 884 | spec | # -*- mode: python ; coding: utf-8 -*-
block_cipher = None
a = Analysis(['app.pytic\\index.h'],
pathex=['D:\\Project\\auto-vscode-cpp'],
binaries=[],
datas=[('static', 'static')],
hiddenimports=[],
hookspath=[],
runtime_hooks=[],
excludes=[],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher,
noarchive=False)
pyz = PYZ(a.pure, a.zipped_data,
cipher=block_cipher)
exe = EXE(pyz,
a.scripts,
a.binaries,
a.zipfiles,
a.datas,
[],
name='index',
debug=False,
bootloader_ignore_signals=False,
strip=False,
upx=True,
upx_exclude=[],
runtime_tmpdir=None,
console=False )
| [
"318483724@qq.com"
] | 318483724@qq.com |
8dff938bdff7afd9c4ad0cc6ec46696b77ba446a | ea24a5114db3d40dfc290bcfbc30e5a9f24541c1 | /project/site02/forms.py | 77636ae21c580f9ef5d619889bd8b02180a6c1fa | [] | no_license | pawan-quovantis/Goals | 1c070015f871c1190c6b25b119e25e00c81c2abe | 317012c10e6eca875837f84bc574dded780c5433 | refs/heads/master | 2021-01-20T19:13:10.982332 | 2016-10-01T19:28:22 | 2016-10-01T19:28:22 | 65,194,826 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 393 | py | from django import forms
PHONE_FIELD_REGEX = r'^\+?1?[\d\- ]{8,23}$'
class SignupForm(forms.Form):
name = forms.CharField(label="Name")
email = forms.EmailField(label = "E-Mail")
phone = forms.RegexField(regex=PHONE_FIELD_REGEX, label="Phone Number")
dob = forms.DateField(label = "Date Of Birth")
password = forms.CharField(widget=forms.PasswordInput, label="Password") | [
"pawan.uppal@quovantis.com"
] | pawan.uppal@quovantis.com |
44b75915d885184414c8aaa8c6bc76eebca0c7ea | 09c62e251c7ca035ce7bc61e0630271552082b32 | /torch_connectomics/utils/vis/visualize.py | f844c87f73c95730f4c26adabc95d33ac0afb40b | [
"MIT"
] | permissive | HoraceKem/pytorch_connectomics | 5a9994b40e49040826a4427a39af209cb6fcd696 | 2cd4e17b6fa83005a13c1347a01b8b6964e746c3 | refs/heads/master | 2020-06-12T09:40:38.341103 | 2019-06-14T01:29:08 | 2019-06-14T01:29:08 | 194,261,646 | 1 | 0 | MIT | 2019-06-28T11:21:51 | 2019-06-28T11:21:51 | null | UTF-8 | Python | false | false | 2,180 | py | import torch
import torchvision.utils as vutils
N = 8 # default maximum number of sections to show
def prepare_data(volume, label, output):
if len(volume.size()) == 4: # 2D Inputs
if volume.size()[0] > N:
return volume[:N], label[:N], output[:N]
else:
return volume, label, output
elif len(volume.size()) == 5: # 3D Inputs
volume, label, output = volume[0].permute(1,0,2,3), label[0].permute(1,0,2,3), output[0].permute(1,0,2,3)
if volume.size()[0] > N:
return volume[:N], label[:N], output[:N]
else:
return volume, label, output
def visualize(volume, label, output, iteration, writer):
volume, label, output = prepare_data(volume, label, output)
sz = volume.size() # z,c,y,x
volume_visual = volume.detach().cpu().expand(sz[0],3,sz[2],sz[3])
output_visual = output.detach().cpu().expand(sz[0],3,sz[2],sz[3])
label_visual = label.detach().cpu().expand(sz[0],3,sz[2],sz[3])
volume_show = vutils.make_grid(volume_visual, nrow=8, normalize=True, scale_each=True)
output_show = vutils.make_grid(output_visual, nrow=8, normalize=True, scale_each=True)
label_show = vutils.make_grid(label_visual, nrow=8, normalize=True, scale_each=True)
writer.add_image('Input', volume_show, iteration)
writer.add_image('Label', label_show, iteration)
writer.add_image('Output', output_show, iteration)
def visualize_aff(volume, label, output, iteration, writer):
volume, label, output = prepare_data(volume, label, output)
sz = volume.size() # z,c,y,x
canvas = []
volume_visual = volume.detach().cpu().expand(sz[0],3,sz[2],sz[3])
canvas.append(volume_visual)
output_visual = [output[:,i].detach().cpu().unsqueeze(1).expand(sz[0],3,sz[2],sz[3]) for i in range(3)]
label_visual = [label[:,i].detach().cpu().unsqueeze(1).expand(sz[0],3,sz[2],sz[3]) for i in range(3)]
canvas = canvas + output_visual
canvas = canvas + label_visual
canvas_merge = torch.cat(canvas, 0)
canvas_show = vutils.make_grid(canvas_merge, nrow=8, normalize=True, scale_each=True)
writer.add_image('Affinity', canvas_show, iteration) | [
"linzudi@g.harvard.edu"
] | linzudi@g.harvard.edu |
aee42854a395b4d61d8ce51c1e221373732792c1 | ab102e0a4849708cf7635cdf241755b933cb6f11 | /test_1/test/2.py | 5077d38571fbd82f8be1613075a5315fe1c6b81a | [] | no_license | onioned/python_program | 1e6a21992fbe549b0ce4f88694398d721fb23f13 | c8b7859d1a909fd9b26947ed8f09d10a265eb351 | refs/heads/master | 2022-11-15T18:01:41.347279 | 2018-02-07T16:07:25 | 2018-02-07T16:07:25 | 116,539,362 | 0 | 1 | null | 2022-10-23T02:21:46 | 2018-01-07T05:11:02 | Python | UTF-8 | Python | false | false | 86 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
a='222222'
print('这是模块test.2'); | [
"870666103@qq.com"
] | 870666103@qq.com |
1106bfe166779ee20fd33f2cb5b54e81739b2933 | 3ef333f6fd14aa7081883fa02459eb0e98b47274 | /cmssw_changed_files/DoublePhoton_closeECAL.py | 4268a98209ea1ef7c38ee6b9754e7b22c66ede86 | [] | no_license | pfclustering/ECALGen | 91c2121dd216691f5a059f346582fe494d3d8aba | 650f1d90a8c7631faab41e1ea0e05ef191e3ef40 | refs/heads/master | 2020-03-22T11:47:32.967328 | 2019-07-19T09:15:08 | 2019-07-19T09:15:08 | 139,996,395 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,916 | py | ##### TO DO: check the parameters
import FWCore.ParameterSet.Config as cms
process.generator = cms.EDProducer("CloseByParticleGunProducer",
PGunParameters = cms.PSet(PartID = cms.vint32(22, 22),
NParticles = cms.int32(2),
EnMin = cms.double(1.), # in GeV
EnMax = cms.double(100.),
RMin = cms.double(123.8), # in cm
RMax = cms.double(123.8),
ZMin = cms.double(-304.5), # in cm
ZMax = cms.double(304.5),
Delta = cms.double(300), # in cm -> phi1-phi2 = Delta/R
Pointing = cms.bool(True),# otherwise showers parallel/perpendicular to beam axis
Overlapping = cms.bool(False),
RandomShoot = cms.bool(False),
MaxPhi = cms.double(3.14159265359),
MinPhi = cms.double(-3.14159265359),
MaxEta = cms.double(0.), # dummy, it is not used
MinEta = cms.double(0.), # dummy, it is not used
),
Verbosity = cms.untracked.int32(1),
psethack = cms.string('two particles close to EB'),
AddAntiParticle = cms.bool(False),
firstRun = cms.untracked.uint32(1)
)
| [
"maria.giulia.ratti@cern.ch"
] | maria.giulia.ratti@cern.ch |
0614aa941c80c6a29b4d064d90bb192943fb19fa | 2d95bc422b3cbd01a5fafbb017996e9588d50fc3 | /utils/hash_utils.py | 2726a261f54e5168ca20671c1c968a27ecebdb1c | [] | no_license | nroshni/block-chain | 91f86d40bbfae011cba1995e9a4a9aa44bae01c5 | 0a435c6df3c094c8986e9046967c5c2dede83688 | refs/heads/master | 2021-04-24T07:20:47.081022 | 2020-04-09T22:01:51 | 2020-04-09T22:01:51 | 250,090,637 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 680 | py | import json
import logging
import hashlib
logger = logging.getLogger(__name__)
def hash_string_sha256(sstring):
return hashlib.sha256(sstring).hexdigest()
def hash_block(block):
""" Returns the hash of the block """
logger.info("Computing hash of the block")
hashable_block = block.__dict__.copy() # Create a copy as it would
# otherwise change the prev dict while hashing
# Convert transaction objects within a block to dictionaries as well
hashable_block['transactions'] = [
tx.to_ordered_dict() for tx in hashable_block['transactions']
]
return hash_string_sha256(
json.dumps(hashable_block, sort_keys=True).encode())
| [
"roshni.navinchandra@gmail.com"
] | roshni.navinchandra@gmail.com |
a89d9222bee0ded8bd36c1c69d2dacb9bfb28e01 | 7a6a2076cffbbd47316818b37ddf22a932002065 | /python/702 - Search in a Sorted Array of Unknown Size/main.py | f23ffb8bc239c9335e262a01b41c66efce7866a5 | [] | no_license | or0986113303/LeetCodeLearn | 6bd0aa16c8c80581e1c85032aca0f7a055f5e234 | 96fdc45d15b4150cefe12361b236de6aae3bdc6a | refs/heads/develop | 2023-06-14T01:30:41.103572 | 2021-07-01T08:59:08 | 2021-07-01T08:59:08 | 291,066,699 | 0 | 0 | null | 2020-08-31T02:44:26 | 2020-08-28T14:25:53 | Python | UTF-8 | Python | false | false | 1,577 | py | # """
# This is ArrayReader's API interface.
# You should not implement it, or speculate about its implementation
# """
#class ArrayReader(object):
# def get(self, index):
# """
# :type index: int
# :rtype int
# """
class Solution(object):
def fibosearch(self, source, target):
fibo1 = 1
fibo2 = 0
fibosum = fibo1 + fibo2
offset = -1
capacity = 0
resulttmp = float('-inf')
while resulttmp < target:
fibo2 = fibo1
fibo1 = fibosum
fibosum = fibo1 + fibo2
resulttmp = source.get(fibosum)
capacity = fibosum + 1
print(capacity)
while fibosum > 1:
operatorindex = min(fibo2 + offset, capacity - 1)
if source.get(operatorindex) == target:
return operatorindex
elif source.get(operatorindex) > target:
fibosum = fibo1
fibo1 = fibo2
fibo2 = fibosum - fibo1
else :
fibo2 = fibo1
fibo1 = fibosum
fibosum = fibo1 + fibo2
offset = operatorindex
return -1
def search(self, reader, target):
"""
:type reader: ArrayReader
:type target: int
:rtype: int
"""
if reader is None:
return -1
elif reader.get(0) == target:
return 0
result = self.fibosearch(reader, target)
print(result)
return result
| [
"or0986113303@gmail.com"
] | or0986113303@gmail.com |
d47c6227ad427320d5ded50f108c7fa022711e39 | 4e6e4e91dd104d7505dbbf50b5171f19a72c3b3d | /pix2pix.py | 8add4839b20b78d7f82f3bfa3c29014c2bc9074f | [] | no_license | nnUyi/pix2pix | ad0e5d0ee1e2868c420f3ab3459b584d4415152c | 9c51c4a7f6e09c10906692e155d09f4265ffef7b | refs/heads/master | 2021-05-06T16:30:26.287155 | 2017-12-10T14:59:23 | 2017-12-10T14:59:23 | 113,755,616 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,942 | py | import tensorflow as tf
import tensorflow.contrib.slim as slim
import numpy as np
import os
import time
from ops import *
from glob import glob
from utils import *
class pix2pix():
model_name = 'pix2pix'
def __init__(self, config, batch_size=1, input_height=256, input_width=256, input_channels=3, df_dim=64, gf_dim=64, sess=None):
self.batch_size = batch_size
self.gf_dim = gf_dim
self.df_dim = df_dim
self.input_height = input_height
self.input_width = input_width
self.input_channels = input_channels
self.config = config
self.sess = sess
def generator_unet(self, input_x, scope_name='generator', reuse=False):
with tf.variable_scope(scope_name) as scope:
if reuse:
scope.reuse_variables()
with slim.arg_scope([slim.conv2d_transpose, slim.conv2d],
#weights_regularizer = slim.l2_regularizer(0.05),
weights_initializer = tf.truncated_normal_initializer(stddev=0.02),
activation_fn = None,
normalizer_fn = slim.batch_norm,
padding='SAME'):
conv1 = leaky_relu(slim.conv2d(input_x, self.gf_dim, [5,5], stride=2, normalizer_fn=None, scope='g_conv1'))
conv2 = leaky_relu(slim.conv2d(conv1, self.gf_dim*2, [5,5], stride=2, scope='g_conv2'))
conv3 = leaky_relu(slim.conv2d(conv2, self.gf_dim*4, [5,5], stride=2, scope='g_conv3'))
conv4 = leaky_relu(slim.conv2d(conv3, self.gf_dim*8, [5,5], stride=2, scope='g_conv4'))
conv5 = leaky_relu(slim.conv2d(conv4, self.gf_dim*8, [5,5], stride=2, scope='g_conv5'))
conv6 = leaky_relu(slim.conv2d(conv5, self.gf_dim*8, [5,5], stride=2, scope='g_conv6'))
conv7 = leaky_relu(slim.conv2d(conv6, self.gf_dim*8, [5,5], stride=2, scope='g_conv7'))
conv8 = slim.conv2d(conv7, self.gf_dim*8, [5,5], stride=2, activation_fn=None, scope='g_conv8')
dconv1 = slim.conv2d_transpose(tf.nn.relu(conv8), self.gf_dim*8, [5,5], stride=2, activation_fn=None, scope='g_dconv1')
dconv1 = tf.nn.dropout(dconv1, 0.5)
dconv1 = tf.concat([dconv1, conv7], 3)
dconv2 = slim.conv2d_transpose(tf.nn.relu(dconv1), self.gf_dim*8, [5,5], stride=2, activation_fn=None, scope='g_dconv2')
dconv2 = tf.nn.dropout(dconv2, 0.5)
dconv2 = tf.concat([dconv2, conv6], 3)
dconv3 = slim.conv2d_transpose(tf.nn.relu(dconv2), self.gf_dim*8, [5,5], stride=2, activation_fn=None, scope='g_dconv3')
dconv3 = tf.nn.dropout(dconv3, 0.5)
dconv3 = tf.concat([dconv3, conv5], 3)
dconv4 = slim.conv2d_transpose(tf.nn.relu(dconv3), self.gf_dim*8, [5,5], stride=2, activation_fn=None, scope='g_dconv4')
#dconv4 = tf.nn.dropout(dconv4, 0.5)
dconv4 = tf.concat([dconv4, conv4], 3)
dconv5 = slim.conv2d_transpose(tf.nn.relu(dconv4), self.gf_dim*4, [5,5], stride=2, activation_fn=None, scope='g_dconv5')
#dconv5 = tf.nn.dropout(dconv5, 0.5)
dconv5 = tf.concat([dconv5, conv3], 3)
dconv6 = slim.conv2d_transpose(tf.nn.relu(dconv5), self.gf_dim*2, [5,5], stride=2, activation_fn=None, scope='g_dconv6')
#dconv6 = tf.nn.dropout(dconv6, 0.5)
dconv6 = tf.concat([dconv6, conv2], 3)
# 128
dconv7 = slim.conv2d_transpose(tf.nn.relu(dconv6), self.gf_dim, [5,5], stride=2, activation_fn=None, scope='g_dconv7')
#dconv7 = tf.nn.dropout(dconv7, 0.5)
dconv7 = tf.concat([dconv7, conv1], 3)
# 256
out = slim.conv2d_transpose(tf.nn.relu(dconv7), self.input_channels, [5,5], stride=2, normalizer_fn=None, activation_fn=tf.nn.tanh, scope='g_out')
print(out)
return out
def discriminator(self, input_x, scope_name='discriminator', reuse=False):
with tf.variable_scope(scope_name) as scope:
if reuse:
scope.reuse_variables()
with slim.arg_scope([slim.conv2d],
weights_initializer=tf.truncated_normal_initializer(stddev=0.02),
#weights_regularizer=slim.l2_regularizer(0.05),
activation_fn = None,
normalizer_fn = slim.batch_norm,
padding='SAME'):
# 256->128
conv1 = leaky_relu(slim.conv2d(input_x, self.df_dim, [5,5], stride=2, normalizer_fn=None, scope='d_conv1'))
print(conv1)
# 128->64
conv2 = leaky_relu(slim.conv2d(conv1, self.df_dim*2, [5,5], stride=2, scope='d_conv2'))
print(conv2)
# 64->32
conv3 = leaky_relu(slim.conv2d(conv2, self.df_dim*4, [5,5], stride=2, scope='d_conv3'))
print(conv3)
# 32->31
#conv3 = tf.pad(conv3, [[0,0],[1,1],[1,1],[0,0]], mode='CONSTANT')
conv4 = leaky_relu(slim.conv2d(conv3, self.df_dim*8, [5,5], stride=1, scope='d_conv4'))
print(conv4)
# 31->30
#conv4 = tf.pad(conv4, [[0,0],[1,1],[1,1],[0,0]], mode='CONSTANT')
#conv5 = slim.conv2d(conv4, 1, [4,4], stride=1, normalizer_fn=None, activation_fn=None, padding='VALID', scope='d_conv5')
conv4_flat = tf.reshape(conv4, [self.batch_size, -1])
fc1 = slim.fully_connected(conv4_flat, 1, normalizer_fn=None, activation_fn=None, scope='d_fc1')
print(fc1)
return(fc1)
#return conv5
def build_model(self):
self.input_A = tf.placeholder(tf.float32, [self.batch_size, self.input_height, self.input_width, self.input_channels], name='input_A')
self.input_B = tf.placeholder(tf.float32, [self.batch_size, self.input_height, self.input_width, self.input_channels], name='input_B')
self.input_AB = tf.concat([self.input_A, self.input_B], 3)
assert self.input_AB.get_shape().as_list() == [self.batch_size, self.input_height, self.input_width, self.input_channels*2]
self.D_real_logits = self.discriminator(self.input_AB, reuse=False)
self.fake_B = self.generator_unet(self.input_A, reuse=False)
self.fake_AB = tf.concat([self.input_A, self.fake_B], 3)
self.D_fake_logits = self.discriminator(self.fake_AB, reuse=True)
def sigmoid_cross_entropy_with_logits(x, y):
try:
return tf.nn.sigmoid_cross_entropy_with_logits(logits=x, labels=y)
except:
return tf.nn.sigmoid_cross_entropy_with_logits(logits=x, targets=y)
self.D_real_loss = tf.reduce_mean(sigmoid_cross_entropy_with_logits(self.D_real_logits, tf.ones_like(self.D_real_logits)))
self.D_fake_loss = tf.reduce_mean(sigmoid_cross_entropy_with_logits(self.D_fake_logits, tf.zeros_like(self.D_fake_logits)))
self.d_loss = self.D_real_loss + self.D_fake_loss
self.l1_loss = tf.reduce_mean(tf.abs(self.fake_B-self.input_B))
self.G_adv_loss = tf.reduce_mean(sigmoid_cross_entropy_with_logits(self.D_fake_logits, tf.ones_like(self.D_fake_logits)))
self.g_loss = self.config.lambd*self.l1_loss + self.G_adv_loss
t_vars = tf.trainable_variables()
d_vars = [var for var in t_vars if 'd_' in var.name]
g_vars = [var for var in t_vars if 'g_' in var.name]
self.d_optimization = tf.train.AdamOptimizer(learning_rate=self.config.lr, beta1=self.config.beta1, beta2=self.config.beta2).minimize(self.d_loss, var_list=d_vars)
self.g_optimization = tf.train.AdamOptimizer(learning_rate=self.config.lr, beta1=self.config.beta1, beta2=self.config.beta2).minimize(self.g_loss, var_list=g_vars)
self.l1_loss_summary = tf.summary.scalar('l1_loss', self.l1_loss)
self.d_loss_summary = tf.summary.scalar('d_loss', self.d_loss)
self.g_loss_summary = tf.summary.scalar('g_loss', self.g_loss)
self.summaries = tf.summary.merge_all()
self.summary_writer = tf.summary.FileWriter('logs', self.sess.graph)
# save model
self.saver = tf.train.Saver()
def train(self):
try:
tf.global_variables_initializer().run()
except:
tf.initialize_all_variables().run()
data_list = glob(os.path.join(self.config.dataset_dir, self.config.dataset_name, self.config.phase, '*.*'))
batch_idxs = int(len(data_list)/self.batch_size)
counter = 0
check_bool, counter = self.load_model(self.config.checkpoint_dir)
if check_bool:
print('[!!!] load model successfully')
counter = counter+1
else:
print('[***] fail to load model')
counter = 1
start_time = time.time()
for epoch in range(self.config.epoches):
for idx in range(batch_idxs):
batch_files = data_list[idx*self.batch_size:(idx+1)*self.batch_size]
batch_x = [get_image(batch_file) for batch_file in batch_files]
batch_x = np.array(batch_x).astype(np.float32)
input_B = batch_x[:,:,:self.input_width,:]
input_A = batch_x[:,:,self.input_width:,:]
_, d_loss, summaries = self.sess.run([self.d_optimization, self.d_loss, self.summaries], feed_dict={self.input_A:input_A,
self.input_B:input_B})
_, g_loss, l1_loss, summaries = self.sess.run([self.g_optimization, self.g_loss, self.l1_loss, self.summaries], feed_dict={self.input_A:input_A,self.input_B:input_B})
#_, g_loss, l1_loss, summaries = self.sess.run([self.g_optimization, self.g_loss, self.l1_loss, self.summaries], feed_dict={self.input_A:input_A,self.input_B:input_B})
counter=counter+1
end_time = time.time()
total_time = end_time - start_time
print('epoch{}[{}/{}]:phase:{}, total_time:{:.4f}, d_loss:{:.4f}, g_loss:{:.4f}, l1_loss:{:.4f}'.format(epoch, idx, batch_idxs, self.config.phase, total_time, d_loss, g_loss, self.config.lambd*l1_loss))
self.summary_writer.add_summary(summaries, global_step=counter)
if np.mod(counter, 100)==0:
self.sample(self.config.sample_dir, epoch, idx)
if np.mod(counter, 500)==0:
self.save_model(self.config.checkpoint_dir, counter)
def sample(self, sample_dir, epoch, idx):
input_A, input_B = self.load_sample()
sample_B = self.sess.run(self.fake_B, feed_dict={self.input_A:input_A, self.input_B:input_B})
sample = np.concatenate([input_A, input_B, sample_B], 2)
save_images(sample, [1,1], '{}/{}_{}_{:04d}_{:04d}.png'.format(self.config.sample_dir,self.config.dataset_name, self.config.phase, epoch, idx))
def load_sample(self):
batch_files = np.random.choice(glob(os.path.join(self.config.dataset_dir, self.config.dataset_name, 'val', '*.*')), self.batch_size)
batch_data = [get_image(batch_file) for batch_file in batch_files]
batch_data = np.array(batch_data).astype(np.float32)
input_A = batch_data[:,:,self.input_width:,:]
input_B = batch_data[:,:,:self.input_width,:]
return input_A, input_B
def test(self):
data_list = glob(os.path.join(self.config.dataset_dir, self.config.dataset_name, self.config.phase, '*.*'))
batch_idxs = int(len(data_list)/self.batch_size)
print('test')
counter = 0
check_bool, counter = self.load_model(self.config.checkpoint_dir)
if check_bool:
print('[!!!] load model successfully')
else:
print('[***] fail to load model')
return
for idx in range(batch_idxs):
batch_files = data_list[idx*self.batch_size:(idx+1)*self.batch_size]
batch_x = [get_image(batch_file) for batch_file in batch_files]
batch_x = np.array(batch_x).astype(np.float32)
input_B = batch_x[:,:,:self.input_width,:]
input_A = batch_x[:,:,self.input_width:,:]
#input_B = np.random.normal(-1,1,[1,256,256,3])
#print(batch_files)
sample_B = self.sess.run(self.fake_B, feed_dict={self.input_A:input_A})
sample = np.concatenate([input_A, input_B, sample_B], 2)
save_images(sample, [1,1], '{}/{}_{}_{:04d}.png'.format(self.config.test_dir, self.config.dataset_name, self.config.phase, idx))
#save_images(batch_x, [1,1], '{}/{}_{}_{:04d}.png'.format(self.config.test_dir, self.config.dataset_name, 'real', idx))
print('testing:{}'.format(idx))
def valuate(self, sample_dir, epoch, idx):
pass
# save model
@property
def model_dir(self):
return "{}_{}_{}".format(
self.model_name, self.config.dataset_name,
self.batch_size)
def save_model(self, checkpoint_dir, step):
checkpoint_dir = os.path.join(checkpoint_dir, self.model_dir, self.model_name)
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
self.saver.save(self.sess, os.path.join(checkpoint_dir, self.model_name+'.model'), global_step=step)
def load_model(self, checkpoint_dir):
import re
print(" [*] Reading checkpoints...")
checkpoint_dir = os.path.join(checkpoint_dir, self.model_dir, self.model_name)
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
self.saver.restore(self.sess, os.path.join(checkpoint_dir, ckpt_name))
counter = int(next(re.finditer("(\d+)(?!.*\d)",ckpt_name)).group(0))
print(" [*] Success to read {}".format(ckpt_name))
return True, counter
else:
print(" [*] Failed to find a checkpoint")
return False, 0
if __name__=='__main__':
input_x = np.random.normal(-1,1, [64,256,256,3]).astype(np.float32)
gan = pix2pix(None)
gan.discriminator(input_x)
gan.generator_unet(input_x)
| [
"noreply@github.com"
] | noreply@github.com |
c0b10673a6a1226da1e5ccff546ad69cad5d823f | 62bd80c2a30d90dc491d90872de5addab8773ef8 | /insitu/analysis/generate_dax.py | 77db7d4c9535f8bb9585e9f2f0e3c89c371020ba | [] | no_license | tumaianhdo/LLNL-HPC-BigData | ad81fdab14747a66e3272096995122f22045f806 | 5c2f7096b9c5179c2aed52176d81e26fe14c5cbb | refs/heads/master | 2020-03-25T05:28:24.570543 | 2019-07-11T22:51:51 | 2019-07-11T22:51:51 | 143,448,526 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,145 | py | #!/usr/bin/env python
import sys
import os
import json
# Import the Python DAX library
from Pegasus.DAX3 import *
# The name of the DAX file is the first argument
if len(sys.argv) != 4:
sys.stderr.write("Usage: %s DAXFILE SPARK_CYCLE_NUMBER CONFIG_FILE\n" % (sys.argv[0]))
sys.exit(1)
# Get input arguments
daxfile = sys.argv[1]
cycle_num = int(sys.argv[2])
configfile = sys.argv[3]
# Load event configuration file
data = None
with open(configfile) as data_file:
data = json.load(data_file)
# Get file name to handle in current cycle
name = data["event-dir"] + data["event-content"] + "_" + str(cycle_num * data["event-cycle"]) + ".npy"
if data["event-type"]=="hdfs-dir":
data_placement="nvm"
hdfs_path = "hdfs://"+ os.environ['HADOOP_NAMENODE']+ ":" + os.environ['HADOOP_NAMENODE_PORT']
file_name = hdfs_path + name
elif data["event-type"]=="file-dir":
data_placement="lustre"
lustre_path = "file://"
file_name = lustre_path + name
print name
print file_name
# Create a abstract dag
print "Creating ADAG..."
spark_tst_wf = ADAG("spark-test-workflow")
cur_dir = os.getcwd()
work_dir = os.environ['INST_WORK_HOME']
# spark_jar = File("analysis.py")
# spark_jar.addPFN(PFN("file://" + work_dir + "/analysis/input/analysis.py", "catalyst"))
spark_jar = File("MLTest_rdd.py")
spark_jar.addPFN(PFN("file://" + work_dir + "/analysis/input/MLTest_rdd.py", "catalyst"))
spark_tst_wf.addFile(spark_jar)
# Add spark test job
print "Adding Spark job..."
spark_tst_job = Job(namespace="pegasus",name="sprktest")
spark_tst_job.addArguments(spark_jar, file_name)
spark_tst_job.uses(spark_jar, link=Link.INPUT)
spark_tst_job.addProfile(Profile("pegasus", "runtime", "120"))
spark_tst_wf.addJob(spark_tst_job)
# Add clean up job
print "Adding clean up job..."
clean_up_job = Job(namespace="pegasus",name="cleanup")
clean_up_job.addArguments(data_placement, name)
spark_tst_wf.addJob(clean_up_job)
# Add dependency between jobs
spark_tst_wf.addDependency(Dependency(parent=spark_tst_job,child=clean_up_job))
# Write the DAX to stdout
print "Writing %s" % daxfile
f = open(daxfile, "w")
spark_tst_wf.writeXML(f)
f.close()
| [
"do7@catalyst159.llnl.gov"
] | do7@catalyst159.llnl.gov |
015a8e9ef9d42e0845eedd82384f1664674a5957 | 3be42b83a15d022f5863c96ec26e21bac0f7c27e | /tensorflow_probability/python/mcmc/legacy_random_walk_metropolis_test.py | cc0e6d73a93c859b63903599869a1b5536077d7b | [
"Apache-2.0"
] | permissive | ogrisel/probability | 846f5c13cddee5cf167b215e651b7479003f15d2 | 8f67456798615f9bf60ced2ce6db5d3dba3515fe | refs/heads/master | 2022-11-09T10:53:23.000918 | 2020-07-01T23:16:03 | 2020-07-01T23:17:25 | 276,580,359 | 2 | 1 | Apache-2.0 | 2020-07-02T07:37:58 | 2020-07-02T07:37:57 | null | UTF-8 | Python | false | false | 6,468 | py | # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for RandomWalkMetropolisNormal and RandomWalkMetropolisUniform."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tensorflow_probability.python import distributions as tfd
from tensorflow_probability.python.internal import test_util
@test_util.test_all_tf_execution_regimes
class RWMTest(test_util.TestCase):
def testRWM1DUniform(self):
"""Sampling from the Standard Normal Distribution."""
dtype = np.float32
target = tfd.Normal(loc=dtype(0), scale=dtype(1))
samples, _ = tfp.mcmc.sample_chain(
num_results=2000,
current_state=dtype(1),
kernel=tfp.mcmc.RandomWalkMetropolis(
target.log_prob,
new_state_fn=tfp.mcmc.random_walk_uniform_fn(scale=dtype(2.)),
seed=test_util.test_seed()),
num_burnin_steps=500,
parallel_iterations=1) # For determinism.
sample_mean = tf.math.reduce_mean(samples, axis=0)
sample_std = tf.math.reduce_std(samples, axis=0)
[sample_mean_, sample_std_] = self.evaluate([sample_mean, sample_std])
self.assertAllClose(0., sample_mean_, atol=0.17, rtol=0.)
self.assertAllClose(1., sample_std_, atol=0.2, rtol=0.)
def testRWM1DNormal(self):
"""Sampling from the Standard Normal Distribution with adaptation."""
dtype = np.float32
target = tfd.Normal(loc=dtype(0), scale=dtype(1))
samples, _ = tfp.mcmc.sample_chain(
num_results=500,
current_state=dtype([1] * 8), # 8 parallel chains
kernel=tfp.mcmc.RandomWalkMetropolis(
target.log_prob,
seed=test_util.test_seed()),
num_burnin_steps=500,
parallel_iterations=1) # For determinism.
sample_mean = tf.math.reduce_mean(samples, axis=(0, 1))
sample_std = tf.math.reduce_std(samples, axis=(0, 1))
[sample_mean_, sample_std_] = self.evaluate([sample_mean, sample_std])
self.assertAllClose(0., sample_mean_, atol=0.2, rtol=0.)
self.assertAllClose(1., sample_std_, atol=0.2, rtol=0.)
def testRWM1DCauchy(self):
"""Sampling from the Standard Normal Distribution using Cauchy proposal."""
dtype = np.float32
num_burnin_steps = 750
num_chain_results = 400
target = tfd.Normal(loc=dtype(0), scale=dtype(1))
def cauchy_new_state_fn(scale, dtype):
cauchy = tfd.Cauchy(loc=dtype(0), scale=dtype(scale))
def _fn(state_parts, seed):
seed_stream = tfp.util.SeedStream(
seed, salt='RandomWalkCauchyIncrement')
next_state_parts = [
state + cauchy.sample(state.shape, seed=seed_stream())
for state in state_parts]
return next_state_parts
return _fn
samples, _ = tfp.mcmc.sample_chain(
num_results=num_chain_results,
num_burnin_steps=num_burnin_steps,
current_state=dtype([1] * 8), # 8 parallel chains
kernel=tfp.mcmc.RandomWalkMetropolis(
target.log_prob,
new_state_fn=cauchy_new_state_fn(scale=0.5, dtype=dtype),
seed=test_util.test_seed()),
parallel_iterations=1) # For determinism.
sample_mean = tf.math.reduce_mean(samples, axis=(0, 1))
sample_std = tf.math.reduce_std(samples, axis=(0, 1))
[sample_mean_, sample_std_] = self.evaluate([sample_mean, sample_std])
self.assertAllClose(0., sample_mean_, atol=0.2, rtol=0.)
self.assertAllClose(1., sample_std_, atol=0.2, rtol=0.)
def testRWM2DNormal(self):
"""Sampling from a 2-D Multivariate Normal distribution."""
dtype = np.float32
true_mean = dtype([0, 0])
true_cov = dtype([[1, 0.5], [0.5, 1]])
num_results = 500
num_chains = 100
# Target distribution is defined through the Cholesky decomposition
chol = tf.linalg.cholesky(true_cov)
target = tfd.MultivariateNormalTriL(loc=true_mean, scale_tril=chol)
# Assume that the state is passed as a list of 1-d tensors `x` and `y`.
# Then the target log-density is defined as follows:
def target_log_prob(x, y):
# Stack the input tensors together
z = tf.stack([x, y], axis=-1) - true_mean
return target.log_prob(tf.squeeze(z))
# Initial state of the chain
init_state = [np.ones([num_chains, 1], dtype=dtype),
np.ones([num_chains, 1], dtype=dtype)]
# Run Random Walk Metropolis with normal proposal for `num_results`
# iterations for `num_chains` independent chains:
states, _ = tfp.mcmc.sample_chain(
num_results=num_results,
current_state=init_state,
kernel=tfp.mcmc.RandomWalkMetropolis(
target_log_prob_fn=target_log_prob,
seed=test_util.test_seed()),
num_burnin_steps=200,
num_steps_between_results=1,
parallel_iterations=1)
states = tf.stack(states, axis=-1)
sample_mean = tf.math.reduce_mean(states, axis=[0, 1])
x = states - sample_mean
sample_cov = tf.math.reduce_mean(
tf.linalg.matmul(x, x, transpose_a=True), axis=[0, 1])
[sample_mean_, sample_cov_] = self.evaluate([
sample_mean, sample_cov])
self.assertAllClose(np.squeeze(sample_mean_), true_mean, atol=0.1, rtol=0.1)
self.assertAllClose(np.squeeze(sample_cov_), true_cov, atol=0.1, rtol=0.1)
def testRWMIsCalibrated(self):
rwm = tfp.mcmc.RandomWalkMetropolis(
target_log_prob_fn=lambda x: -tf.square(x) / 2.,
)
self.assertTrue(rwm.is_calibrated)
def testUncalibratedRWIsNotCalibrated(self):
uncal_rw = tfp.mcmc.UncalibratedRandomWalk(
target_log_prob_fn=lambda x: -tf.square(x) / 2.,
)
self.assertFalse(uncal_rw.is_calibrated)
if __name__ == '__main__':
tf.test.main()
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
f812c4bd4a39c7b4c82a80a8072a3e21f1466daa | 9c9c79b2fcb993b96ba81a4c889515f474662ceb | /src/lang_models/cat_squad_files.py | a6db763ba397f6ea902879bfca7bf9762c1ed5db | [] | no_license | hhhhzy/AmazonQ-A | 6121aaf2391681ff2ef53dd4d6dfdc3b753b666d | 25c65bd8dc3675ca2fbf1e4eaf56ccd2cd34fdf3 | refs/heads/main | 2023-07-24T04:30:27.258291 | 2021-09-07T16:01:33 | 2021-09-07T16:01:33 | 404,034,992 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,425 | py |
import convert_squad
import config
import constants as C
import json
TEMPFILEPATH = './temp'
def cat_files(category, mode, max_review_len, max_num_spans, max_num_products, seed, num_processes):
paragraphs = []
for process_idx in range(num_processes):
filename = convert_squad.process_filepath(category, mode, max_review_len, max_num_spans, seed, process_idx)
with open(filename, 'r') as fp:
for line in fp:
paragraphs.append(json.loads(line.strip()))
data = [{
'title': 'AmazonDataset',
'paragraphs': paragraphs,
}]
out = {"data":data, "version":"1.0"}
outfile = 'Amazon-Squad_%s_%s_%d_%d_%d_%d.json' % (category, mode, max_review_len, max_num_spans, max_num_products, seed)
with open(outfile, 'w') as outfile:
json.dump(out, outfile)
def main():
main_params = convert_squad.get_main_params()
model_name = C.LM_QUESTION_ANSWERS_REVIEWS
params = config.get_model_params(model_name)
params[C.MODEL_NAME] = model_name
model_name = C.LM_QUESTION_ANSWERS_REVIEWS
params = config.get_model_params(model_name)
cat_files(
params[C.CATEGORY],
main_params.mode,
main_params.max_review_len,
main_params.max_num_spans,
main_params.max_num_products,
main_params.seed,
main_params.num_processes
)
if __name__ == '__main__':
main()
| [
"zheyuanh@uci.edu"
] | zheyuanh@uci.edu |
6c3ac720caf953775a53c2ce3a8b01e4afa2085d | c35d5157450c62f713e0521eb114c7d9c02463f9 | /r_debit/r_debit/asgi.py | 394bb7f9633ca56f2674d06cdbcad120815db361 | [
"MIT"
] | permissive | AshishMadhu/r-credit | e6acda68f7d210a9f6ce16a58f1360a916f5ac73 | 113c185bfc50192e6aade2387c6ba51ca4eeb03f | refs/heads/main | 2023-09-02T13:45:15.670284 | 2021-11-07T04:01:00 | 2021-11-07T04:01:00 | 339,771,012 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 391 | py | """
ASGI config for r_debit project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'r_debit.settings')
application = get_asgi_application()
| [
"ashishmadhu26@gmail.com"
] | ashishmadhu26@gmail.com |
79f998c1ae08f5eac4dccac29ea00bf209c906d0 | 60044c76b631e622edb28f3a74971ce06211fac5 | /Python-for-Everybody/Python-Data-Structures/list.py | fa31bc357f500aa7cefac067eb8f807c1c0089d0 | [] | no_license | NestorMonroy/Courses-coursera | 8d45a858c79567d74f013ac27ac33d47e43abb96 | 98ac1aa5bb0cd9da5cea5be02995d5b65c779201 | refs/heads/master | 2023-08-14T13:36:07.348994 | 2021-09-22T06:13:57 | 2021-09-22T06:13:57 | 327,753,375 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,064 | py | """
List are mutable
String are "inmutable" - we cannont change the contents of a string-
we must make a new string to make any change
List are "mutable" we can change an element of a list using the index
operator
"""
fruit = ['Banana']
fruit[0]= 'b' # error
x = fruit.lower()
print(x)
lotto = [2, 15, 26, 41, 63 ]
print(lotto)
lotto[2]= 28
print(lotto)
# How long is a list
greet = 'Hello Boke'
print(len(greet))
x = [1, 4, 'joe', 99]
print(len(x))
# using the range function
"""
The range function returns a list of numbers that range from
zero to one less than the parameter value
We can construct an index loop using for and integer iterator
"""
print(range(4))
friends = ['joel', 'david', 'jon']
print(len(friends))
print(range(len(friends)))
# A tale of two loops
friends = ['joel', 'david', 'jon']
for friend in friends:
print('Happy new year: ', friend)
for i in range(len(friends)):
friend = friends[i]
print('Happy new year: ', friend)
print(len(friends))
print(range(len(friends))) | [
"nestor.monroy.90@gmail.com"
] | nestor.monroy.90@gmail.com |
c2425a23eaa6ba2b413691f0ece1d0c6de00d2c8 | 6701fa31a19cf8e30a77ae3f2076dffbf2c0f697 | /cooperation_level/PGGw_epS01/sigmaRECIactGRIMw.py | 938bb5a556adeac7a41bc8e208012d6c4ad7030a | [] | no_license | l5d1l5/signalling-reciprocity | e94e5408d30ad6a4ae0ceb378f96d1618920e98d | f01a24e9971f20886f081a2bff8cf8d157ce564d | refs/heads/master | 2023-01-08T10:00:57.654948 | 2020-11-19T06:05:28 | 2020-11-19T06:05:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 132,241 | py | # -*- coding: utf-8 -*-
def declareSTR(eps):
# Strategy: (sig_PGG, sig_0, A_sig0_T, A_sig0_noT, A_sig1_T, A_sig1_noT )
# if eps is scalar, it is just noise
# if eps is a list/array: eps[0]:noise, eps[1]:error in perception of the state of the nature
if isinstance(eps, (list, tuple, np.ndarray)):
noise=eps[0]
else:
noise=eps
nelem=6
STRm=np.zeros((2**nelem,nelem))
nST=0
i=np.zeros((nelem),dtype=int)
for i[0] in range (1,-1,-1):
for i[1] in range (1,-1,-1):
for i[2] in range (1,-1,-1):
for i[3] in range (1,-1,-1):
for i[4] in range (1,-1,-1):
for i[5] in range (1,-1,-1):
nST+=1
for j in range (0,nelem):
STRm[nST-1,j]=(1.-noise)*i[j]+noise*(1-i[j])
#if (j==0 or j==1): STRm[nST-1,j]=i[j] # signaling without error
if isinstance(eps, (list, tuple, np.ndarray)):
for iST in range (0,nST):
STRm[iST,0]=eps[1]*STRm[iST,1]+(1.-eps[1])*STRm[iST,0]
STRm[iST,1]=eps[1]*STRm[iST,0]+(1.-eps[1])*STRm[iST,1]
return STRm
def declareSTR_CD(epsv):
# AllC and AllD
eps=epsv[0]
STRm=np.zeros((2,6))
eps1=1.-eps
STRm[0,:]=[0, 0, eps, eps, eps, eps]
STRm[1,:]=[0, 0, eps1, eps1, eps1, eps1]
return STRm
def declareSTR_REC(epsv):
# AllC, AllD, TFT, ATFT
eps=epsv[0]
STRm=np.zeros((4,6))
eps1=1.-eps
STRm[0,:]=[0, 0, eps, eps, eps, eps]
STRm[1,:]=[0, 0, eps1, eps, eps1, eps]
STRm[2,:]=[0, 0, eps, eps1, eps, eps1]
STRm[3,:]=[0, 0, eps1, eps1, eps1, eps1]
return STRm
def declareSTR_SIG(eps):
# Only signalling strategies
# if eps is scalar, it is just noise
# if eps is a list/array: eps[0]:noise, eps[1]:error in perception of the state of the nature
if isinstance(eps, (list, tuple, np.ndarray)):
noise=eps[0]
else:
noise=eps
nelem=6
STRm=np.zeros((16,nelem))
noise1=1.-noise
nST=0
i=np.zeros((nelem),dtype=int)
for i[0] in range (1,-1,-1):
for i[1] in range (1,-1,-1):
for i[2] in range (1,-1,-1):
for i[4] in range (1,-1,-1):
nST+=1
STRm[nST-1,0]=noise1*i[0]+noise*(1.-i[0])
STRm[nST-1,1]=noise1*i[1]+noise*(1.-i[1])
STRm[nST-1,2]=noise1*i[2]+noise*(1.-i[2])
STRm[nST-1,3]=STRm[nST-1,2]
STRm[nST-1,4]=noise1*i[4]+noise*(1.-i[4])
STRm[nST-1,5]=STRm[nST-1,4]
if isinstance(eps, (list, tuple, np.ndarray)):
for iST in range (0,nST):
STRm[iST,0]=eps[1]*STRm[iST,1]+(1.-eps[1])*STRm[iST,0]
STRm[iST,1]=eps[1]*STRm[iST,0]+(1.-eps[1])*STRm[iST,1]
return STRm
def declareSTATE(N):
# State of acting
# k individuls from the first strategy
nelem=6
#STATEm=np.zeros(((N-k+1)*(k+1),2))
STATEmat=np.zeros((N+1,int((N/2+1)**2)+1,2))-1
nSTATEv=np.zeros((N+1),dtype=int)
i=np.zeros((nelem),dtype=int)
for k in range(0,N+1):
nSTATE=0
for i in range (k,-1,-1):
for j in range (N-k,-1,-1):
nSTATE+=1
STATEmat[k,nSTATE-1,:]=[i, j];
nSTATEv[k]=nSTATE
return STATEmat, nSTATEv
def calcERS(b,c,cs,lamb,N,Z,M,eps):
H=calcH(N,Z)
STRm=declareSTR(eps); nSTR=STRm.shape[0];
STATEmat,nSTATEv=declareSTATE(N)
coef=np.array([[b[0]*lamb, -c[0]*lamb, -cs[0]*lamb],[b[1]*(1.-lamb), -c[1]*(1.-lamb), -cs[1]*(1.-lamb)]])
listERS=[]
for i in range(0,nSTR): # resident
isERS=1
SIGi=STRm[i,0:2]; ACTi=STRm[i,2:6];
for j in range(0,nSTR): #mutant
if i!=j:
SIGj=STRm[j,0:2]; ACTj=STRm[j,2:6];
k=N-1; BCi,BCj=calcBC2st(SIGi,ACTi,SIGj,ACTj,k,N,M,STATEmat[k,0:nSTATEv[k],0:nSTATEv[k]])
k=N; BCiR,ttt=calcBC2st(SIGi,ACTi,SIGj,ACTj,N,N,M,STATEmat[k,0:nSTATEv[k],0:nSTATEv[k]])
#k=0; ttt,BCjR=calcBC2st(SIGi,ACTi,SIGj,ACTj,k,N,M,STATEmat[k,0:nSTATEv[k],0:nSTATEv[k]])
PAYi=H[N-1,Z-2]*np.sum(coef*BCiR)+H[N-2,Z-2]*np.sum(coef*BCi)
PAYj=np.sum(coef*BCj)
#print([i,j,PAYi,PAYj]) #, H[N-1,Z-2], H[N-2,Z-2]])
if PAYj>PAYi: isERS=0; break
if isERS==1: listERS=np.append(listERS,i)
listERS=listERS.astype(int)
return listERS
def calcBC2st1nat(SIGi,ACTi,SIGj,ACTj,k,N,M,Q,STATEm,w): ### MODIFICAR
# Calculate BC, where BC[0]*k*c-BC[1]*c-BC[2]*cs.
# k individuls type i; N-k individuals type j
import scipy.sparse.linalg as lin
from scipy.stats import binom # import scipy.stats.binom as binom
N2=1.*N/2.; Nk=N-k;
nSTATE=(N-k+1)*(k+1); MAT=np.zeros((nSTATE,nSTATE))
#STATEmOPk=k-STATEm; STATEmOPNk=Nk-STATEm;
#STATEmk=np.ones((nSTATE,2)) if k==0 else STATEm/k
#STATEmNk=np.ones((nSTATE,2)) if k==N else STATEm/Nk
##SIGTTi=SIGi[0]-SIGi[1]; SIGTTj=SIGj[0]-SIGj[1]
##TS=k*(ACTi[1]-ACTi[0])+Nk*(ACTj[1]-ACTj[0])
##TNS=k*ACTi[0]+Nk*ACTj[0]
#print(SIGi)
#print(SIGj)
ns=k*SIGi+Nk*SIGj
consS=np.piecewise(ns,[ns<Q,ns>=Q],[0.,1.])
#print([ns,Q,consS])
nc= STATEm[:,0]+STATEm[:,1] # in the current state (that has passed)
consA=np.piecewise(nc,[nc<M,nc>=M],[0.,1.])
#print([k,consS,consA])
Pcoopi=consA*consS*ACTi[2]+consA*(1.-consS)*ACTi[0]+(1.-consA)*consS*ACTi[3]+(1.-consA)*(1.-consS)*ACTi[1]
Pcoopj=consA*consS*ACTj[2]+consA*(1.-consS)*ACTj[0]+(1.-consA)*consS*ACTj[3]+(1.-consA)*(1.-consS)*ACTj[1]
for j in range(0,nSTATE):
#print([STATEmk[j,0],STATEmOPk[j,0],consA[1]])
#MAT[:,j]= BINOm[STATEm[j,0]]*((consA*SIGTTi+SIGi[1])**STATEm[j,0])*((1.-(consA*SIGTTi+SIGi[1]))**STATEmOPk[j,0]) \
# *BINOm[STATEm[j,1]]*((consA*SIGTTj+SIGj[1])**STATEm[j,1])*((1.-(consA*SIGTTj+SIGj[1]))**STATEmOPNk[j,1])
##for i in range(0,nSTATE):
## MAT[i,j]=binom.pmf(STATEm[j,0],k,consA[i]*SIGTTi+SIGi[1])*binom.pmf(STATEm[j,1],Nk,consA[i]*SIGTTj+SIGj[1])
MAT[:,j]=binom.pmf(STATEm[j,0],k,Pcoopi)*binom.pmf(STATEm[j,1],Nk,Pcoopj)
#print([SIGTTi,SIGi[1],SIGTTj,SIGj[1]])
#print([STATEm[j,0],consA[0]*SIGTTi+SIGi[1],k,np.random.binomial(STATEm[j,0],consA[1]*SIGTTi+SIGi[1],k)])
#print([i, consA[i], consA[i]*SIGTTi+SIGi[1],SIGTTi,SIGi[1] ])#binom.pmf(STATEm[j,0],k,consA[i]*SIGTTi+SIGi[1]), binom.pmf(STATEm[j,1],Nk,consA[i]*SIGTTj+SIGj[1])])
#print(MAT)
#print(STATEm[:,0]); print(STATEm[:,1])
if w>=1.:
# val,vect=lin.eigs(np.transpose(MAT),k=1,which='LR'); vect=np.real(vect/np.sum(vect))
from discreteMarkovChain import markovChain
mc=markovChain(MAT)
mc.computePi('eigen') # We can use 'linear', 'power', 'krylov' or 'eigen'
vect=(mc.pi).reshape(-1,1)
else:
vect=(1-w)*np.linalg.inv((np.identity(nSTATE)-w*MAT))[nSTATE-1,:]
#print(nc)
#print(consS)
#print(consA)
#print(vect)
#print(nc*consA/N)
BCi=np.zeros((3)); BCj=np.zeros((3))
benef=np.dot(nc*consA/N,vect)
if (k!=0):
BCi[0]=benef
BCi[1]=np.dot(STATEm[:,0]/k,vect)
BCi[2]=SIGi
if(k!=N):
BCj[0]=benef
BCj[1]=np.dot(STATEm[:,1]/Nk,vect)
BCj[2]=SIGj
return BCi, BCj
def calcBC2st(SIGi,ACTi,SIGj,ACTj,k,N,M,Q,STATEm,w):
# output: different rows for different states of nature; columns probabilities of benefit, cooperating, signaling
BCi1,BCj1=calcBC2st1nat(SIGi[0],ACTi,SIGj[0],ACTj,k,N,M,Q,STATEm,w)
BCi2,BCj2=calcBC2st1nat(SIGi[1],ACTi,SIGj[1],ACTj,k,N,M,Q,STATEm,w)
BCi=np.stack((BCi1, BCi2)) ;BCj=np.stack((BCj1, BCj2))
return BCi, BCj
def calcH (N,Z):
import numpy as np
from scipy.stats import hypergeom
H=np.zeros((N+1,Z+1))
H[0,0]=1 # H(0,:)=0, H(0,0)=1 Attention!
for K in range(1,Z+1):
for k in range(0,N+1):
H[k,K]=hypergeom.pmf(k,Z-1,K,N-1)
return H
def calcFIX1vec(STi,STj,STRm,N,Z,M,Q,STATEmat,nSTATEv,H,w):
# i invades j (j->i)
# output: (Z-1,2,3)
SIGi=STRm[STi,0:2]; ACTi=STRm[STi,2:6]; SIGj=STRm[STj,0:2]; ACTj=STRm[STj,2:6];
#PAYi=np.zeros((N+1)); PAYj=np.zeros((N+1));
BCki=np.zeros((N+1,2,3)); BCkj=np.zeros((N+1,2,3));
for k in range(0,N+1):
BCi,BCj=calcBC2st(SIGi,ACTi,SIGj,ACTj,k,N,M,Q,STATEmat[k,0:nSTATEv[k],0:nSTATEv[k]],w)
#print(k); print(BCi); print(BCj);
BCki[k,...]=BCi; BCkj[k,...]=BCj;
#print([k, BCki[k,...],BCkj[k,...]])
#PAYi[k]=lamb*(BCi[0,0]*b[0]-BCi[0,1]*c[0]-BCi[0,2]*cs[0])+(1.-lamb)*(BCi[1,0]*b[1]-BCi[1,1]*c[1]-BCi[1,2]*cs[1])
#PAYj[k]=lamb*(BCj[0,0]*b[0]-BCj[0,1]*c[0]-BCj[0,2]*cs[0])+(1.-lamb)*(BCj[1,0]*b[1]-BCj[1,1]*c[1]-BCj[1,2]*cs[1])
#print([k, PAYi[k], PAYj[k] ])
#PIKi=np.zeros((Z,2,3)); PIKj=np.zeros((Z,2,3));
#PIKiI=np.zeros((Z,2,3)); PIKjI=np.zeros((Z,2,3));
PIKi=np.einsum('kK,kij->Kij',H[0:N,0:Z-1],BCki[1:N+1,...])
PIKj=np.einsum('kK,kij->Kij',H[0:N,1:Z],BCkj[0:N,...])
PIKjI=np.einsum('kK,kij->Kij',H[0:N,0:Z-1],BCkj[::-1,...][1:N+1,...])
PIKiI=np.einsum('kK,kij->Kij',H[0:N,1:Z],BCki[::-1,...][0:N,...])
#for K in range(1,Z-1+1):
# PIKi[K,:,:]=np.sum( [H[0:N,K-1]*BCki[1:N+1,:,:]],axis=0 )
# PIKj[K,:,:]=np.sum( [H[0:N,K]*BCkj[0:N,:,:]],axis=0 )
#PIi[K]=np.sum( [H[0:N,K-1]*PAYi[1:N+1]] )
#PIj[K]=np.sum( [H[0:N,K]*PAYj[0:N]] )
#print(H[0:N,K-1]); print(np.flipud(PAYj)[1:3]);print([PAYj[1], PAYj[0]])
#PIjI[K]=np.sum( [H[0:N,K-1]*PAYj[N-1:0+1:-1]] )
#PIiI[K]=np.sum( [H[0:N,K]*PAYi[N:1+1:-1]] )
# PIKjI[K,:,:]=np.sum( [H[0:N,K-1]*np.flip(BCkj,0)[1:N+1]], axis=0 )
# PIKiI[K,:,:]=np.sum( [H[0:N,K]*np.flip(BCki,0)[0:N]],axis=0 )
#print([K, PIi[K], PIj[K] ])
#EXPK=expb**(PIi-PIj)
#EXPKI=expb**(PIjI-PIiI)
DIFK=PIKi-PIKj
DIFKI=PIKjI-PIKiI
#print(DIFK); print(DIFKI)
#suma=0.
#sumaI=0.
#term=1.
#termI=1.
#sumterm=np.zeros((2,3)); sumtermI=np.zeros((2,3))
sumterm=np.cumsum(DIFK,axis=0); sumtermI=np.cumsum(DIFKI,axis=0)
#print('-----')
#print(sumterm)
#print('-----')
#for m in range(0,Z-2+1): #range(1,Z-1+1):
# sumterm[m,:,:]=DIFK[m,:,:]
# sumtermI[m,:,:]=DIFKI[m,:,:]
#term*=EXPK[m]
#termI*=EXPKI[m]
#suma+=term
#sumaI+=termI
return sumterm, sumtermI
def calcFIXMvec(N,Z,M,Q,STRm,STATEmat,nSTATEv,H,w):
# from i to j (i->j)
nSTR=STRm.shape[0];
fixMvec=np.zeros((nSTR,nSTR,Z-1,2,3))
for i in range(0,nSTR):
for j in range(i+1,nSTR):
#if i==0 and j==63:
print([i, j])
pfixvec,pfixIvec=calcFIX1vec(i,j,STRm,N,Z,M,Q,STATEmat,nSTATEv,H,w)
fixMvec[j,i,...]=pfixvec; fixMvec[i,j,...]=pfixIvec
#print(fixMvec[i,j,...])
return fixMvec
def calcFIXM(coef,expb,Z,fixMvec):
# from i to j (i->j)
# fixMvec(2,3)
#expb=np.array([[np.exp(-beta*r[0]*c[0]*(1.-lamb)), np.exp(-beta*c[0]*(1.-lamb)), np.exp(-beta*cs[0])*(1.-lamb)], \
# [np.exp(-beta*r[1]*c[1]*lamb), np.exp(-beta*c[1]*lamb), np.exp(-beta*cs[1])*lamb]])
#print(shape)
fixM=1./(1.+np.sum(expb**np.einsum('ijmab,ab->ijm',fixMvec,coef),axis=2))
#print(fixMvec[0,63,...])
#print(coef)
#print([fixM[0,63], fixM[63,0]])
np.fill_diagonal(fixM, 0.)
nSTR=len(fixM)
fixM=fixM/nSTR
suma=np.sum(fixM,axis=1)
fixM[range(nSTR),range(nSTR)]=1.-suma
# suma=np.sum(fixM,axis=1)
# fixM[range(nSTR),range(nSTR)]=1.-suma/nSTR
#
# print(fixM)
# np.savetxt('ttt.dat',fixM,delimiter=', ',newline='],\n')
#
#print(np.sum(fixM,axis=1))
#nSTR=fixMvec.shape[0];
#fixM=np.zeros((nSTR,nSTR))
#for i in range(0,nSTR):
# for j in range(0,nSTR):
# if(i!=j):
#
#
# np.einsum('ab,kij->Kij',H[0:N,0:Z-1],np.flip(BCkj,0)[1:N+1,:,:])
#
# suma=0
# for m in range(0,Z-2+1): #range(1,Z-1+1): #bad. it should be 0->Z-2 (+1)
# suma+=np.prod(coef**fixMvec[i,j,m,:,:])
# fixM[i,j]=1./(1.+suma)
# fixM[i,i]=1.-np.sum(fixM[i,:])/nSTR
# print(np.sum(fixM,axis=1))
return fixM
def calcSD(fixM):
# import scipy.sparse.linalg as lin
# vals,vecs=lin.eigs(np.transpose(fixM),k=1,which='LR',tol=1e-12)
# vecsabs=np.real(np.absolute(vecs))
# SD=vecsabs/np.sum(vecsabs)
from discreteMarkovChain import markovChain
mc=markovChain(fixM)
mc.computePi('linear') # We can use 'linear', 'power', 'krylov' or 'eigen'
SD=(mc.pi).reshape(-1,1)
return SD
def calcHOMO(coef,lamb,eps,N,M,Q,STATEmat,nSTATEv,SD,w):
# PAYhomo: (nSx1); payoffs homogeneous populations
# COOPhome: (nSx3); 0:cooperation level, 1: cooperation level lambda=1, 2: cooperation level lambda=0
# COOPtotal: scalar; total cooperation level (taking into account SD)
nSTR=SD.shape[0];
STRm=declareSTR(eps);
PAYhomo=np.zeros((nSTR)); COOPhomo=np.zeros((nSTR,3)); COOPtot=np.zeros((3))
for i in range(0,nSTR):
SIG=STRm[i,0:2]; ACT=STRm[i,2:6]; k=1;
BCi,BCj=calcBC2st(SIG,ACT,SIG,ACT,k,N,M,Q,STATEmat[k,0:nSTATEv[k],0:nSTATEv[k]],w)
PAYhomo[i]=np.sum(BCi*coef)
COOPhomo[i,1]=BCi[0,1]
COOPhomo[i,2]=BCi[1,1]
COOPhomo[:,0]=COOPhomo[:,1]*lamb+COOPhomo[:,2]*(1-lamb)
COOPtot[0]=np.dot(COOPhomo[:,0],SD)
COOPtot[1]=np.dot(COOPhomo[:,1],SD)
COOPtot[2]=np.dot(COOPhomo[:,2],SD)
return PAYhomo,COOPhomo,COOPtot
def calcHOMO_CD(coef,lamb,eps,N,M,Q,STATEmat,nSTATEv,SD,w):
# PAYhomo: (nSx1); payoffs homogeneous populations
# COOPhome: (nSx3); 0:cooperation level, 1: cooperation level lambda=1, 2: cooperation level lambda=0
# COOPtotal: scalar; total cooperation level (taking into account SD)
nSTR=SD.shape[0];
STRm=declareSTR_CD(eps);
PAYhomo=np.zeros((nSTR)); COOPhomo=np.zeros((nSTR,3)); COOPtot=np.zeros((3))
for i in range(0,nSTR):
SIG=STRm[i,0:2]; ACT=STRm[i,2:6]; k=1;
BCi,BCj=calcBC2st(SIG,ACT,SIG,ACT,k,N,M,Q,STATEmat[k,0:nSTATEv[k],0:nSTATEv[k]],w)
PAYhomo[i]=np.sum(BCi*coef)
COOPhomo[i,1]=BCi[0,1]
COOPhomo[i,2]=BCi[1,1]
COOPhomo[:,0]=COOPhomo[:,1]*lamb+COOPhomo[:,2]*(1-lamb)
COOPtot[0]=np.dot(COOPhomo[:,0],SD)
COOPtot[1]=np.dot(COOPhomo[:,1],SD)
COOPtot[2]=np.dot(COOPhomo[:,2],SD)
return PAYhomo,COOPhomo,COOPtot
def calcHOMO_REC(coef,lamb,eps,N,M,Q,STATEmat,nSTATEv,SD,w):
# PAYhomo: (nSx1); payoffs homogeneous populations
# COOPhome: (nSx3); 0:cooperation level, 1: cooperation level lambda=1, 2: cooperation level lambda=0
# COOPtotal: scalar; total cooperation level (taking into account SD)
nSTR=SD.shape[0];
STRm=declareSTR_REC(eps);
PAYhomo=np.zeros((nSTR)); COOPhomo=np.zeros((nSTR,3)); COOPtot=np.zeros((3))
for i in range(0,nSTR):
SIG=STRm[i,0:2]; ACT=STRm[i,2:6]; k=1;
BCi,BCj=calcBC2st(SIG,ACT,SIG,ACT,k,N,M,Q,STATEmat[k,0:nSTATEv[k],0:nSTATEv[k]],w)
PAYhomo[i]=np.sum(BCi*coef)
COOPhomo[i,1]=BCi[0,1]
COOPhomo[i,2]=BCi[1,1]
COOPhomo[:,0]=COOPhomo[:,1]*lamb+COOPhomo[:,2]*(1-lamb)
COOPtot[0]=np.dot(COOPhomo[:,0],SD)
COOPtot[1]=np.dot(COOPhomo[:,1],SD)
COOPtot[2]=np.dot(COOPhomo[:,2],SD)
return PAYhomo,COOPhomo,COOPtot
def calcHOMO_SIG(coef,lamb,eps,N,M,Q,STATEmat,nSTATEv,SD,w):
# PAYhomo: (nSx1); payoffs homogeneous populations
# COOPhome: (nSx3); 0:cooperation level, 1: cooperation level lambda=1, 2: cooperation level lambda=0
# COOPtotal: scalar; total cooperation level (taking into account SD)
nSTR=SD.shape[0];
STRm=declareSTR_SIG(eps);
PAYhomo=np.zeros((nSTR)); COOPhomo=np.zeros((nSTR,3)); COOPtot=np.zeros((3))
for i in range(0,nSTR):
SIG=STRm[i,0:2]; ACT=STRm[i,2:6]; k=1;
BCi,BCj=calcBC2st(SIG,ACT,SIG,ACT,k,N,M,Q,STATEmat[k,0:nSTATEv[k],0:nSTATEv[k]],w)
PAYhomo[i]=np.sum(BCi*coef)
COOPhomo[i,1]=BCi[0,1]
COOPhomo[i,2]=BCi[1,1]
COOPhomo[:,0]=COOPhomo[:,1]*lamb+COOPhomo[:,2]*(1-lamb)
COOPtot[0]=np.dot(COOPhomo[:,0],SD)
COOPtot[1]=np.dot(COOPhomo[:,1],SD)
COOPtot[2]=np.dot(COOPhomo[:,2],SD)
return PAYhomo,COOPhomo,COOPtot
def writefixMvec(fixMvec,file):
np.save(file,fixMvec)
return
def readfixMvec(file):
fixMvec=np.load(file+'.npy')
return fixMvec
def doINI(N,Z,M,Q,eps,w):
from pathlib import Path
labelfile='GRIM_N_'+str(N)+'_M_'+str(M)+'_Q_'+str(Q)+'_eps_'+str(eps)+'_w_'+str(w)
file = Path(labelfile+'.npy')
if not file.is_file():
print(file)
H=calcH(N,Z)
STRm=declareSTR(eps); # nSTR=STRm.shape[0]; #print(STm[0],STm[63])
STATEmat,nSTATEv=declareSTATE(N); #print(STATEmat); print(nSTATEv)
fixMvec=calcFIXMvec(N,Z,M,Q,STRm,STATEmat,nSTATEv,H,w)
writefixMvec(fixMvec,labelfile)
return fixMvec
def doREST(b,c,cs,lamb,beta,N,Z,M,Q,eps,w):
expb=np.exp(-beta)
coef=np.array([[b[0]*lamb, -c[0]*lamb, -cs[0]*lamb],[b[1]*(1.-lamb), -c[1]*(1.-lamb), -cs[1]*(1.-lamb)]])
labelfile='GRIM_N_'+str(N)+'_M_'+str(M)+'_Q_'+str(Q)+'_eps_'+str(eps)+'_w_'+str(w)
fixMvec=readfixMvec(labelfile)
#print(fixMvec)
fixM=calcFIXM(coef,expb,Z,fixMvec)
#print(fixM)
SD=calcSD(fixM)
return SD
def doINI_CD(N,Z,M,Q,eps,w):
from pathlib import Path
labelfile='GRIM_N_'+str(N)+'_M_'+str(M)+'_Q_'+str(Q)+'_eps_'+str(eps)+'_w_'+str(w)+'_CD'
file = Path(labelfile+'.npy')
if not file.is_file():
print(file)
H=calcH(N,Z)
STRm=declareSTR_CD(eps); # nSTR=STRm.shape[0]; #print(STm[0],STm[63])
STATEmat,nSTATEv=declareSTATE(N); #print(STATEmat); print(nSTATEv)
fixMvec=calcFIXMvec(N,Z,M,Q,STRm,STATEmat,nSTATEv,H,w)
writefixMvec(fixMvec,labelfile)
return fixMvec
def doREST_CD(b,c,cs,lamb,beta,N,Z,M,Q,eps,w):
expb=np.exp(-beta)
coef=np.array([[b[0]*lamb, -c[0]*lamb, -cs[0]*lamb],[b[1]*(1.-lamb), -c[1]*(1.-lamb), -cs[1]*(1.-lamb)]])
labelfile='GRIM_N_'+str(N)+'_M_'+str(M)+'_Q_'+str(Q)+'_eps_'+str(eps)+'_w_'+str(w)+'_CD'
fixMvec=readfixMvec(labelfile)
#print(fixMvec)
fixM=calcFIXM(coef,expb,Z,fixMvec)
#print(fixM)
SD=calcSD(fixM)
return SD
def doINI_REC(N,Z,M,Q,eps,w):
from pathlib import Path
labelfile='GRIM_N_'+str(N)+'_M_'+str(M)+'_Q_'+str(Q)+'_eps_'+str(eps)+'_w_'+str(w)+'_REC'
file = Path(labelfile+'.npy')
if not file.is_file():
print(file)
H=calcH(N,Z)
STRm=declareSTR_REC(eps); # nSTR=STRm.shape[0]; #print(STm[0],STm[63])
STATEmat,nSTATEv=declareSTATE(N); #print(STATEmat); print(nSTATEv)
fixMvec=calcFIXMvec(N,Z,M,Q,STRm,STATEmat,nSTATEv,H,w)
writefixMvec(fixMvec,labelfile)
return fixMvec
def doREST_REC(b,c,cs,lamb,beta,N,Z,M,Q,eps,w):
expb=np.exp(-beta)
coef=np.array([[b[0]*lamb, -c[0]*lamb, -cs[0]*lamb],[b[1]*(1.-lamb), -c[1]*(1.-lamb), -cs[1]*(1.-lamb)]])
labelfile='GRIM_N_'+str(N)+'_M_'+str(M)+'_Q_'+str(Q)+'_eps_'+str(eps)+'_w_'+str(w)+'_REC'
fixMvec=readfixMvec(labelfile)
#print(fixMvec)
fixM=calcFIXM(coef,expb,Z,fixMvec)
#print(fixM)
SD=calcSD(fixM)
return SD
def doINI_SIG(N,Z,M,Q,eps,w):
from pathlib import Path
labelfile='GRIM_N_'+str(N)+'_M_'+str(M)+'_Q_'+str(Q)+'_eps_'+str(eps)+'_w_'+str(w)+'_SIG'
file = Path(labelfile+'.npy')
if not file.is_file():
print(file)
H=calcH(N,Z)
STRm=declareSTR_SIG(eps); # nSTR=STRm.shape[0]; #print(STm[0],STm[63])
STATEmat,nSTATEv=declareSTATE(N); #print(STATEmat); print(nSTATEv)
fixMvec=calcFIXMvec(N,Z,M,Q,STRm,STATEmat,nSTATEv,H,w)
writefixMvec(fixMvec,labelfile)
return fixMvec
def doREST_SIG(b,c,cs,lamb,beta,N,Z,M,Q,eps,w):
expb=np.exp(-beta)
coef=np.array([[b[0]*lamb, -c[0]*lamb, -cs[0]*lamb],[b[1]*(1.-lamb), -c[1]*(1.-lamb), -cs[1]*(1.-lamb)]])
labelfile='GRIM_N_'+str(N)+'_M_'+str(M)+'_Q_'+str(Q)+'_eps_'+str(eps)+'_w_'+str(w)+'_SIG'
fixMvec=readfixMvec(labelfile)
#print(fixMvec)
fixM=calcFIXM(coef,expb,Z,fixMvec)
#print(fixM)
SD=calcSD(fixM)
return SD
def doHOMO(lamb,eps,N,M,Q,b,c,cs,SD,w):
coef=np.array([[b[0]*lamb, -c[0]*lamb, -cs[0]*lamb],[b[1]*(1.-lamb), -c[1]*(1.-lamb), -cs[1]*(1.-lamb)]])
STATEmat,nSTATEv=declareSTATE(N)
PAYhomo,COOPhomo,COOPtot=calcHOMO(coef,lamb,eps,N,M,Q,STATEmat,nSTATEv,SD,w)
return PAYhomo,COOPhomo,COOPtot
def doHOMO_CD(lamb,eps,N,M,Q,b,c,cs,SD,w):
coef=np.array([[b[0]*lamb, -c[0]*lamb, -cs[0]*lamb],[b[1]*(1.-lamb), -c[1]*(1.-lamb), -cs[1]*(1.-lamb)]])
STATEmat,nSTATEv=declareSTATE(N)
PAYhomo,COOPhomo,COOPtot=calcHOMO_CD(coef,lamb,eps,N,M,Q,STATEmat,nSTATEv,SD,w)
return PAYhomo,COOPhomo,COOPtot
def doHOMO_REC(lamb,eps,N,M,Q,b,c,cs,SD,w):
coef=np.array([[b[0]*lamb, -c[0]*lamb, -cs[0]*lamb],[b[1]*(1.-lamb), -c[1]*(1.-lamb), -cs[1]*(1.-lamb)]])
STATEmat,nSTATEv=declareSTATE(N)
PAYhomo,COOPhomo,COOPtot=calcHOMO_REC(coef,lamb,eps,N,M,Q,STATEmat,nSTATEv,SD,w)
return PAYhomo,COOPhomo,COOPtot
def doHOMO_SIG(lamb,eps,N,M,Q,b,c,cs,SD,w):
coef=np.array([[b[0]*lamb, -c[0]*lamb, -cs[0]*lamb],[b[1]*(1.-lamb), -c[1]*(1.-lamb), -cs[1]*(1.-lamb)]])
STATEmat,nSTATEv=declareSTATE(N)
PAYhomo,COOPhomo,COOPtot=calcHOMO_SIG(coef,lamb,eps,N,M,Q,STATEmat,nSTATEv,SD,w)
return PAYhomo,COOPhomo,COOPtot
def doMATCOOP(csV,lambV,bV,MV,QV):
# cs,lamb,b,M,Q
bigmatCOOP=np.zeros((len(csV),len(lambV),len(bV),len(MV),len(QV)))
for ics in range(0,len(csV)):
for ilamb in range(0,len(lambV)):
for ib in range(0,len(bV)):
for iM in range(0,len(MV)):
for iQ in range(0,len(QV)):
bigmatCOOP[ics,ilamb,ib,iM,iQ],SD=doONEALL(csV[ics],lambV[ilamb],bV[ib],MV[iM],QV[iQ],w)
return bigmatCOOP
def doMATSD(beta,Z,N,nSTR,c1,csV,lambV,bV,MV,QV,w,eps):
# cs,lamb,b,M,Q
bigmatSD=np.zeros((nSTR,len(csV),len(lambV),len(bV),len(MV),len(QV)))
bigmatCOOP=np.zeros((len(csV),len(lambV),len(bV),len(MV),len(QV)))
for ics in range(0,len(csV)):
for ilamb in range(0,len(lambV)):
for ib in range(0,len(bV)):
for iM in range(0,len(MV)):
for iQ in range(0,len(QV)):
bigmatCOOP[ics,ilamb,ib,iM,iQ],bigmatSD[:,ics,ilamb,ib,iM,iQ]=doONEALL(beta,Z,N,c1,csV[ics],lambV[ilamb],bV[ib],MV[iM],QV[iQ],w,eps)
return bigmatCOOP,bigmatSD
def doMATSD_SIG(beta,Z,N,nSTR,c1,csV,lambV,bV,MV,QV,w,eps):
# cs,lamb,b,M,Q
bigmatSD=np.zeros((nSTR,len(csV),len(lambV),len(bV),len(MV),len(QV)))
bigmatCOOP=np.zeros((len(csV),len(lambV),len(bV),len(MV),len(QV)))
for ics in range(0,len(csV)):
for ilamb in range(0,len(lambV)):
for ib in range(0,len(bV)):
for iM in range(0,len(MV)):
for iQ in range(0,len(QV)):
bigmatCOOP[ics,ilamb,ib,iM,iQ],bigmatSD[:,ics,ilamb,ib,iM,iQ]=doONEALL_SIG(beta,Z,N,c1,csV[ics],lambV[ilamb],bV[ib],MV[iM],QV[iQ],w,eps)
return bigmatCOOP,bigmatSD
def doMATSD_REC(beta,Z,N,nSTR,c1,csV,lambV,bV,MV,QV,w,eps):
# cs,lamb,b,M,Q
bigmatSD=np.zeros((nSTR,len(csV),len(lambV),len(bV),len(MV),len(QV)))
bigmatCOOP=np.zeros((len(csV),len(lambV),len(bV),len(MV),len(QV)))
for ics in range(0,len(csV)):
for ilamb in range(0,len(lambV)):
for ib in range(0,len(bV)):
for iM in range(0,len(MV)):
for iQ in range(0,len(QV)):
bigmatCOOP[ics,ilamb,ib,iM,iQ],bigmatSD[:,ics,ilamb,ib,iM,iQ]=doONEALL_REC(beta,Z,N,c1,csV[ics],lambV[ilamb],bV[ib],MV[iM],QV[iQ],w,eps)
return bigmatCOOP,bigmatSD
def doMATSD_CD(beta,Z,N,nSTR,c1,csV,lambV,bV,MV,QV,w,eps):
# cs,lamb,b,M,Q
bigmatSD=np.zeros((nSTR,len(csV),len(lambV),len(bV),len(MV),len(QV)))
bigmatCOOP=np.zeros((len(csV),len(lambV),len(bV),len(MV),len(QV)))
for ics in range(0,len(csV)):
for ilamb in range(0,len(lambV)):
for ib in range(0,len(bV)):
for iM in range(0,len(MV)):
for iQ in range(0,len(QV)):
bigmatCOOP[ics,ilamb,ib,iM,iQ],bigmatSD[:,ics,ilamb,ib,iM,iQ]=doONEALL_CD(beta,Z,N,c1,csV[ics],lambV[ilamb],bV[ib],MV[iM],QV[iQ],w,eps)
return bigmatCOOP,bigmatSD
def calcBIGPAY(bigmatSD,csV,lambV,MV,QV,b,c,N,eps,w):
# output: bigmatPAY has the average payoff of an specific environment (weighted over all the strategies, taking into account the SD)
# bigmatSD has to be coherent with other inputsm including their dimensions (except b)
bigmatPAY=np.zeros((len(csV),len(lambV),len(MV),len(QV)))
STATEmat,nSTATEv=declareSTATE(N)
nSTR=bigmatSD.shape[0]
PAYhomo=np.zeros((nSTR))
STRm=declareSTR(eps)
for ics in range(0,len(csV)):
cs=csV[ics]
for ilamb in range(0,len(lambV)):
lamb=lambV[ilamb]
coef=np.array([[b*lamb, -c*lamb, -cs*lamb],[0*(1.-lamb), -c*(1.-lamb), -cs*(1.-lamb)]]) # assuming b=0 in nPGG
print(['ics, ilamb: ',ics,ilamb])
for iM in range(0,len(MV)):
for iQ in range(0,len(QV)):
for i in range(0,nSTR):
k=1
BCi,BCj=calcBC2st(STRm[i,0:2],STRm[i,2:6],STRm[i,0:2],STRm[i,2:6],k,N,MV[iM],QV[iQ],STATEmat[k,0:nSTATEv[k],0:nSTATEv[k]],w)
PAYhomo[i]=np.sum(BCi*coef)
bigmatPAY[ics,ilamb,iM,iQ]=np.dot(PAYhomo,bigmatSD[:,ics,ilamb,0,iM,iQ])
return bigmatPAY
def doONEALL(beta,Z,N,c1,cs1,lamb,b1,M,Q,w,eps):
import numpy as np
#H, L
c=np.array([1., 1.])*c1
cs=np.array([1., 1.])*cs1
b=np.array([b1, 0.]) #*c
#eps=0.01
#STRmPUR=declareSTR(0.)
doINI(N,Z,M,Q,eps,w)
SD=doREST(b,c,cs,lamb,beta,N,Z,M,Q,eps,w)
SD=SD[:,0]
PAYhomo,COOPhomo,COOPtot=doHOMO(lamb,eps,N,M,Q,b,c,cs,SD,w)
print([cs1,lamb,b1,M,Q,COOPtot])
return COOPtot[0], SD # Carefull: only first component of COOPtot
def doONEALL_SIG(beta,Z,N,c1,cs1,lamb,b1,M,Q,w,eps):
import numpy as np
#H, L
c=np.array([1., 1.])*c1
cs=np.array([1., 1.])*cs1
b=np.array([b1, 0.]) #*c
#eps=0.01
#STRmPUR=declareSTR(0.)
doINI_SIG(N,Z,M,Q,eps,w)
SD=doREST_SIG(b,c,cs,lamb,beta,N,Z,M,Q,eps,w)
SD=SD[:,0]
PAYhomo,COOPhomo,COOPtot=doHOMO_SIG(lamb,eps,N,M,Q,b,c,cs,SD,w)
print([cs1,lamb,b1,M,Q,COOPtot])
return COOPtot[0], SD # Carefull: only first component of COOPtot
def doONEALL_REC(beta,Z,N,c1,cs1,lamb,b1,M,Q,w,eps):
import numpy as np
#H, L
c=np.array([1., 1.])*c1
cs=np.array([1., 1.])*cs1
b=np.array([b1, 0.]) #*c
#eps=0.01
#STRmPUR=declareSTR(0.)
doINI_REC(N,Z,M,Q,eps,w)
SD=doREST_REC(b,c,cs,lamb,beta,N,Z,M,Q,eps,w)
SD=SD[:,0]
PAYhomo,COOPhomo,COOPtot=doHOMO_REC(lamb,eps,N,M,Q,b,c,cs,SD,w)
print([cs1,lamb,b1,M,Q,COOPtot])
return COOPtot[0], SD # Carefull: only first component of COOPtot
def doONEALL_CD(beta,Z,N,c1,cs1,lamb,b1,M,Q,w,eps):
import numpy as np
#H, L
c=np.array([1., 1.])*c1
cs=np.array([1., 1.])*cs1
b=np.array([b1, 0.]) #*c
#eps=0.01
#STRmPUR=declareSTR(0.)
doINI_CD(N,Z,M,Q,eps,w)
SD=doREST_CD(b,c,cs,lamb,beta,N,Z,M,Q,eps,w)
SD=SD[:,0]
PAYhomo,COOPhomo,COOPtot=doHOMO_CD(lamb,eps,N,M,Q,b,c,cs,SD,w)
print([cs1,lamb,b1,M,Q,COOPtot])
return COOPtot[0], SD # Carefull: only first component of COOPtot
def plot_COOPcs(bigmatCOOP,csV):
import matplotlib.pyplot as plt
plt.figure(1)
for ilamb in range(0,len(lambV)):
plt.plot(csV,bigmatCOOP[:,ilamb,0,0,0])
plt.ylabel('Cooperation level')
plt.xlabel('c_s')
return
def plot_COOPcslamb(bigmatCOOP,csV,lambV,bV,MV,QV):
import matplotlib.pyplot as plt
import matplotlib as mpl
#plt.figure(1)
nr=bigmatCOOP.shape[3]; nc=bigmatCOOP.shape[4]-1 # excluding last column
# f=plt.figure(1,figsize=(20,20))
f,axs=plt.subplots(nrows=nr, ncols=nc, sharex='all', sharey='all')
f.subplots_adjust(hspace=0.2, wspace=0.2)
vmin=0;vmax=1;
norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax)
for iM in range(nr-1,-1,-1):
axs[iM,nc-1].text(1.1,0.4,"$M=%s$" % str(MV[iM]), size=10 )
for iQ in range(nc-1,-1,-1):
h=axs[iM,iQ].contourf(lambV,csV,bigmatCOOP[:,:,0,iM,iQ],vmin=vmin,vmax=vmax)
axs[iM,iQ].set_xticks([0,0.5,1]); #axs[iM,iQ].set_yticks([0,0.5,1])
axs[iM,iQ].set_xticklabels(["0","0.5","1"]); #axs[iM,iQ].set_yticklabels(["0","0.5","1"])
axs[iM,iQ].set_yticks([0,0.5,1.,1.5]);
axs[iM,iQ].set_yticklabels(["0","0.1","0.2","0.3"]);
axs[iM,iQ].tick_params(axis='both', which='major', labelsize=8)
if iM==0:
axs[iM,iQ].set_title("$Q=%s$" % str(QV[iQ]), size=10 )
margleft=0.13; margright=0.75
f.subplots_adjust(right=margright,top=0.87,bottom=0.15, left=margleft)
cbar_ax = f.add_axes([0.85, 0.13, 0.05, 0.77])
hb=mpl.colorbar.ColorbarBase(cbar_ax, norm=norm,label='Cooperation level')
#hb.set_ticks(np.linspace(0,1,11))
# plt.show()
f.text((margright-margleft)/2+margleft, 0.04, '$\lambda$', ha='center',size=16)
f.text(0.04, 0.5, '$c_s$', va='center', rotation='vertical',size=16)
f.savefig('cooperation.png', dpi=300)
f.clf()
return
def classST():
ST=declareSTR(0)
STsign=STsignonly=STmem=STmemonly=STsignmem=STs00=STs11=STs10=STs01=np.array([],int)
for i in range(0,ST.shape[0]):
if (ST[i,2]!=ST[i,4])or(ST[i,3]!=ST[i,5]): # sign
STsign=np.append(STsign,i)
if (ST[i,2]==ST[i,3])and(ST[i,4]==ST[i,5]): # no mem
STsignonly=np.append(STsignonly,i)
if (ST[i,2]!=ST[i,3])or(ST[i,4]!=ST[i,5]): # mem
STsignmem=np.append(STsignmem,i)
if (ST[i,2]!=ST[i,3])or(ST[i,4]!=ST[i,5]): # mem
STmem=np.append(STmem,i)
if (ST[i,2]==ST[i,4])and(ST[i,3]==ST[i,5]): # no sign
STmemonly=np.append(STmemonly,i)
if (ST[i,0]==0 and ST[i,1]==0): # 00
STs00=np.append(STs00,i)
if (ST[i,0]==1 and ST[i,1]==1): # 11
STs11=np.append(STs11,i)
if (ST[i,0]==1 and ST[i,1]==0): # 10
STs10=np.append(STs10,i)
if (ST[i,0]==0 and ST[i,1]==1): # 01
STs01=np.append(STs01,i)
return STs00,STs11,STs10,STs01,STsign, STsignonly, STmem, STmemonly, STsignmem
def plot_SDcslamb(label,STv,bigmatSD,csV,lambV,bV,MV,QV,vmin,vmax):
# STv: array with the strategies to agregate
import matplotlib.pyplot as plt
import matplotlib as mpl
#plt.figure(1)
bigmatAGR=np.sum(bigmatSD[STv,...],axis=0)
nr=bigmatAGR.shape[3]; nc=bigmatAGR.shape[4]-1 # excluding last column
# f=plt.figure(1,figsize=(20,20))
f,axs=plt.subplots(nrows=nr, ncols=nc, sharex='all', sharey='all')
f.subplots_adjust(hspace=0.2, wspace=0.2)
norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax)
for iM in range(nr-1,-1,-1):
axs[iM,nc-1].text(1.1,0.4,"$M=%s$" % str(MV[iM]), size=10 )
for iQ in range(nc-1,-1,-1):
h=axs[iM,iQ].contourf(lambV,csV,bigmatAGR[:,:,0,iM,iQ],vmin=vmin,vmax=vmax)
axs[iM,iQ].set_xticks([0,0.5,1]); #axs[iM,iQ].set_yticks([0,0.5,1])
axs[iM,iQ].set_xticklabels(["0","0.5","1"]); #axs[iM,iQ].set_yticklabels(["0","0.5","1"])
axs[iM,iQ].set_yticks([0,0.5,1.,1.5]);
axs[iM,iQ].set_yticklabels(["0","0.1","0.2","0.3"]);
axs[iM,iQ].tick_params(axis='both', which='major', labelsize=8)
if iM==0:
axs[iM,iQ].set_title("$Q=%s$" % str(QV[iQ]), size=10 )
margleft=0.13; margright=0.75
f.subplots_adjust(right=margright,top=0.87,bottom=0.15, left=margleft)
cbar_ax = f.add_axes([0.85, 0.13, 0.05, 0.77])
hb=mpl.colorbar.ColorbarBase(cbar_ax, norm=norm,label='Probability')
#hb.set_ticks(np.linspace(vmin,vmax,11))
# plt.show()
f.text((margright-margleft)/2+margleft, 0.04, '$\lambda$', ha='center',size=16)
f.text(0.04, 0.5, '$c_s$', va='center', rotation='vertical',size=16)
f.text(0.5, 0.95, label, ha='center',size=16)
f.savefig(label+'.pdf', dpi=300)
#f.savefig(label+'.png', dpi=300)
f.clf()
return
def plot_SDcslambDIF(label,labup,labdown,STvpos,STvneg,bigmatSD,csV,lambV,bV,MV,QV,vmin,vmax,comap,ext):
# STv: array with the strategies to agregate
# comap='RdBu_r' (blue to red),
import matplotlib.pyplot as plt
import matplotlib as mpl
#plt.figure(1)
bigmatAGR=np.sum(bigmatSD[STvpos,...],axis=0)-np.sum(bigmatSD[STvneg,...],axis=0)
nr=bigmatAGR.shape[3]; nc=bigmatAGR.shape[4] #-1 # excluding last column
# f=plt.figure(1,figsize=(20,20))
f,axs=plt.subplots(nrows=nr, ncols=nc, sharex='all', sharey='all')
f.subplots_adjust(hspace=0.2, wspace=0.2)
norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax)
for iM in range(nr-1,-1,-1):
axs[iM,nc-1].text(1.1,0.4,"$M=%s$" % str(MV[iM]), size=10 )
for iQ in range(nc-1,-1,-1):
h=axs[iM,iQ].contourf(lambV,csV,bigmatAGR[:,:,0,iM,iQ],vmin=vmin,vmax=vmax, cmap=comap)
axs[iM,iQ].set_xticks([0,0.5,1]); #axs[iM,iQ].set_yticks([0,0.5,1])
axs[iM,iQ].set_xticklabels(["0","0.5","1"]); #axs[iM,iQ].set_yticklabels(["0","0.5","1"])
#axs[iM,iQ].set_yticks([0,0.5,1.,1.5]);
#axs[iM,iQ].set_yticklabels(["0","0.1","0.2","0.3"]);
#axs[iM,iQ].set_yticks([0,0.5,1.,1.5]);
#axs[iM,iQ].set_yticklabels(["0","0.1","0.2","0.3"]);
axs[iM,iQ].tick_params(axis='both', which='major', labelsize=8)
if iM==0:
axs[iM,iQ].set_title("$Q=%s$" % str(QV[iQ]), size=10 )
margleft=0.13; margright=0.75
f.subplots_adjust(right=margright,top=0.87,bottom=0.15, left=margleft)
cbar_ax = f.add_axes([0.85, 0.13, 0.05, 0.77])
hb=mpl.colorbar.ColorbarBase(cbar_ax, norm=norm,label='Probability',cmap=comap)
#hb.set_ticks(np.linspace(vmin,vmax,11))
# plt.show()
f.text((margright-margleft)/2+margleft, 0.04, '$\lambda$', ha='center',size=16)
f.text(0.04, 0.5, '$c_s$', va='center', rotation='vertical',size=16)
f.text(0.874, 0.95, labup, va='center', ha='center',color='darkred',size=10)
f.text(0.874, 0.08, labdown, va='center', ha='center',color='darkblue',size=10)
#f.text(0.5, 0.95, label, ha='center',size=16)
#f.savefig(label+'.png', dpi=300)
f.savefig(label+'.'+ext, dpi=300)
f.clf()
return
def plot_SDcslambDIF_1(label,labup,labdown,STvpos,STvneg,bigmatSD,csV,lambV,bV,MV,QV,vmin,vmax,comap):
# STv: array with the strategies to agregate
# comap='RdBu_r' (blue to red),
import matplotlib.pyplot as plt
import matplotlib as mpl
vmin=0.45
alp=1.
#plt.figure(1)
bigmatAGR=np.sum(bigmatSD[STvpos,...],axis=0)-np.sum(bigmatSD[STvneg,...],axis=0)
bigmatAGR2=np.sum(bigmatSD[[52,53,54,55,60, 61, 62, 63],...],axis=0)-np.sum(bigmatSD[STvneg,...],axis=0)
bigmatAGR3=np.sum(bigmatSD[[48,49,50,51],...],axis=0)-np.sum(bigmatSD[STvneg,...],axis=0)
bigmatAGR4=np.sum(bigmatSD[[56,57,58,59],...],axis=0)-np.sum(bigmatSD[STvneg,...],axis=0)
bigmatAGR5=np.sum(bigmatSD[[33,35],...],axis=0)-np.sum(bigmatSD[STvneg,...],axis=0)
nr=bigmatAGR.shape[3]; nc=bigmatAGR.shape[4] #-1 # excluding last column
# f=plt.figure(1,figsize=(20,20))
f,axs=plt.subplots(nrows=2, ncols=2, sharex='all', sharey='all')
f.subplots_adjust(hspace=0.2, wspace=0.2)
norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax)
for iM in range(nr-1,-1,-1):
axs[iM,nc-1].text(1.1,0.4,"$M=%s$" % str(MV[iM]), size=10 )
for iQ in range(nc-1,-1,-1):
mins=vmin
step=0.02
h=axs[iM,iQ].contourf(lambV,csV,bigmatAGR[:,:,0,iM,iQ],np.arange(mins,1.1,step),alpha=alp,vmin=vmin,vmax=vmax, cmap='Reds')
h=axs[iM,iQ].contourf(lambV,csV,bigmatAGR2[:,:,0,iM,iQ],np.arange(mins,1.1,step), alpha=alp,vmin=vmin,vmax=vmax, cmap='Blues')
h=axs[iM,iQ].contourf(lambV,csV,bigmatAGR3[:,:,0,iM,iQ],np.arange(mins,1.1,step), alpha=alp,vmin=vmin,vmax=vmax, cmap='Purples')
h=axs[iM,iQ].contourf(lambV,csV,bigmatAGR4[:,:,0,iM,iQ],np.arange(mins,1.1,step), alpha=alp,vmin=vmin,vmax=vmax, cmap='Greens')
h=axs[iM,iQ].contourf(lambV,csV,bigmatAGR5[:,:,0,iM,iQ],np.arange(mins,1.1,step), alpha=alp,vmin=vmin,vmax=vmax, cmap='Greys')
axs[iM,iQ].set_xticks([0,0.5,1]); #axs[iM,iQ].set_yticks([0,0.5,1])
axs[iM,iQ].set_xticklabels(["0","0.5","1"]); #axs[iM,iQ].set_yticklabels(["0","0.5","1"])
#axs[iM,iQ].set_yticks([0,0.5,1.,1.5]);
#axs[iM,iQ].set_yticklabels(["0","0.1","0.2","0.3"]);
#axs[iM,iQ].set_yticks([0,0.5,1.,1.5]);
#axs[iM,iQ].set_yticklabels(["0","0.1","0.2","0.3"]);
axs[iM,iQ].tick_params(axis='both', which='major', labelsize=8)
if iM==0:
axs[iM,iQ].set_title("$Q=%s$" % str(QV[iQ]), size=10 )
margleft=0.13; margright=0.75
f.subplots_adjust(right=margright,top=0.87,bottom=0.15, left=margleft)
cbar_ax = f.add_axes([0.85, 0.13, 0.05, 0.77])
hb=mpl.colorbar.ColorbarBase(cbar_ax, norm=norm,label='Probability',cmap='Greys')
#hb.set_ticks(np.linspace(vmin,vmax,11))
# plt.show()
f.text((margright-margleft)/2+margleft, 0.04, '$\lambda$', ha='center',size=16)
f.text(0.04, 0.5, '$c_s$', va='center', rotation='vertical',size=16)
f.text(0.874, 0.95, labup, va='center', ha='center',color='darkred',size=10)
f.text(0.874, 0.08, labdown, va='center', ha='center',color='darkblue',size=10)
#f.text(0.5, 0.95, label, ha='center',size=16)
#f.savefig(label+'.png', dpi=300)
#f.savefig(label+'.svg', dpi=300)
f.savefig(label+'.tiff', dpi=300)
f.savefig(label+'.pdf', dpi=300)
f.clf()
return
def truncate_colormap(cmap, minval=0.0, maxval=1.0, n=100):
import matplotlib.colors as colors
new_cmap = colors.LinearSegmentedColormap.from_list(
'trunc({n},{a:.2f},{b:.2f})'.format(n=cmap.name, a=minval, b=maxval),
cmap(np.linspace(minval, maxval, n)))
return new_cmap
def plot_SDcslambDIF_agre(label,groups,comapsV,nameg,bigmatSD,csV,lambV,bV,MV,QV,vmin,vmax,ext):
# STv: array with the strategies to agregate
# comap='RdBu_r' (blue to red),
import matplotlib.pyplot as plt
import matplotlib as mpl
alp=1.
lAGR=list(bigmatSD.shape); del lAGR[0]; lAGR.insert(0,len(groups)); bigmatAGR=np.empty(lAGR)
for i in range(0,len(groups)):
bigmatAGR[i,:]=np.sum(bigmatSD[groups[i],...],axis=0)
nr=bigmatAGR.shape[4]; nc=bigmatAGR.shape[5]
# f=plt.figure(1,figsize=(20,20))
f,axs=plt.subplots(nrows=nr, ncols=nc, sharex='all', sharey='all' )
f.subplots_adjust(hspace=0.2, wspace=0.2)
norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax)
comaps=comapsV
for i in range(len(groups)):
comaps[i]=plt.get_cmap(comapsV[i])
comaps[i]= truncate_colormap(comaps[i], 0.25, 1)
for iM in range(nr-1,-1,-1):
axs[iM,nc-1].text(1.1,0.48,"$M=%s$" % str(MV[iM]), size=9 ,va='center')
for iQ in range(nc-1,-1,-1):
step=0.02
if MV[iM]>5: # to avoid problems with [0010**], which is two places for w=1
rg=range(len(groups)-1,-1,-1)
else:
rg=range(0,len(groups))
for i in rg:
h=axs[iM,iQ].contourf(lambV,csV,bigmatAGR[i,:,:,0,iM,iQ],np.arange(vmin,vmax+0.1,step),alpha=alp,vmin=vmin,vmax=vmax, cmap=comaps[i])
axs[iM,iQ].set_xticks([0,0.5,1]); axs[iM,iQ].set_yticks([0,0.5,1])
axs[iM,iQ].set_xticklabels(["0","0.5","1"]); axs[iM,iQ].set_yticklabels(["0","0.5","1"])
#axs[iM,iQ].set_yticks([0,0.5,1.,1.5]);
#axs[iM,iQ].set_yticklabels(["0","0.1","0.2","0.3"]);
#axs[iM,iQ].set_yticks([0,0.5,1.,1.5]);
#axs[iM,iQ].set_yticklabels(["0","0.1","0.2","0.3"]);
axs[iM,iQ].tick_params(axis='both', which='major', labelsize=8)
axs[iM,iQ].grid(which='both', axis='both',ls='dashed')
if iM==0:
axs[iM,iQ].set_title("$Q=%s$" % str(QV[iQ]), size=9 )
margbottom=0.15; margtop=0.87
f.text(0.0, 0.5, '$c_s$', va='center', rotation='vertical',size=12)
if nameg==0:
margleft=0.1; margright=0.75;
f.subplots_adjust(right=margright,top=margtop,bottom=margbottom, left=margleft)
cbar_ax = f.add_axes([margright+0.1, margbottom, 1.-margleft-margright-0.12, margtop-margbottom])
hb=mpl.colorbar.ColorbarBase(cbar_ax, norm=norm,label='Probability',cmap=comaps[-1])
else:
margleft=0.09; margright=0.66;
f.subplots_adjust(right=margright,top=margtop,bottom=margbottom, left=margleft)
for i in range(0,len(groups)):
mr=0.06; hh=(margtop-margbottom)/len(groups); hib=hh-0.11; botb=margtop-hh*(i+1)+0.109-0.027*i;
#botb=(margtop-margbottom)/2.+(i-np.floor(len(groups)/2.))*0.2 ; hib=0.03
cbar_ax = f.add_axes([margright+0.11, botb, 1.-margleft-margright-0.06, hib])
hb=mpl.colorbar.ColorbarBase(cbar_ax, norm=norm,cmap=comaps[i],orientation='horizontal')
step=0.25; ti=np.arange(vmin,vmax+step,step); ti_s=["%.2f" % x for x in ti]; # ti_s[0]='<'+ti_s[0]
hb.set_ticks(ti)
hb.set_ticklabels(ti_s)
cbar_ax.tick_params(labelsize=8)
cbar_ax.set_title(nameg[i],size=8,color=mpl.cm.get_cmap(comaps[i])(1.))
f.text((margright-margleft)/2+margleft, 0.04, '$\lambda$', ha='center',size=12)
#hb.set_ticks(np.linspace(vmin,vmax,11))
# plt.show()
#f.text(0.874, 0.95, labup, va='center', ha='center',color='darkred',size=10)
#f.text(0.874, 0.08, labdown, va='center', ha='center',color='darkblue',size=10)
#for i in range(0,len(ext)):
f.savefig(label+'.'+ext, dpi=300)
f.clf()
return
def plot_SDspace_agre(label,groups,comapsV,nameg,bigmatSDlist,yV,xV,iM,iQ,M,labup,labright,vmin,vmax,ext):
# groups: list of list with the strategies to agregate (properties: compas,nameg)
# each panel: list of list (horizontal and vertical distribution): bigmatSDlist, iQ,iM
# careful, dimensions must be coherent
import matplotlib.pyplot as plt
import matplotlib as mpl
alp=1.
nc=len(bigmatSDlist[0]); nr=len(bigmatSDlist)
# f=plt.figure(1,figsize=(20,20))
f,axs=plt.subplots(nrows=nr, ncols=nc, sharex='all', sharey='all' )
f.subplots_adjust(hspace=0.2, wspace=0.2)
norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax)
comaps=comapsV
for i in range(len(groups)):
comaps[i]=plt.get_cmap(comapsV[i])
comaps[i]= truncate_colormap(comaps[i], 0.25, 1)
for ir in range(nr-1,-1,-1):
axs[ir,nc-1].text(1.1,0.48,labright[ir], size=9 ,va='center',ha='left')
for ic in range(nc-1,-1,-1):
bigmatSD=bigmatSDlist[ir][ic]
lAGR=list(bigmatSD.shape); del lAGR[0]; lAGR.insert(0,len(groups)); bigmatAGR=np.empty(lAGR)
for i in range(0,len(groups)):
bigmatAGR[i,:]=np.sum(bigmatSD[groups[i],...],axis=0)
step=0.02
if M>5: # to avoid problems with [0010**], which is two places for w=1
rg=range(len(groups)-1,-1,-1)
else:
rg=range(0,len(groups))
for i in rg:
h=axs[ir,ic].contourf(xV,yV,bigmatAGR[i,:,:,0,iM,iQ],np.arange(vmin,vmax+0.1,step),alpha=alp,vmin=vmin,vmax=vmax, cmap=comaps[i])
axs[ir,ic].set_xticks([0,0.5,1]); #axs[iM,iQ].set_yticks([0,0.5,1])
axs[ir,ic].set_xticklabels(["0","0.5","1"]); #axs[iM,iQ].set_yticklabels(["0","0.5","1"])
axs[ir,ic].set_yticks([0,0.5,1]);
axs[ir,ic].set_yticklabels(["0","0.5","1"]);
#axs[iM,iQ].set_yticks([0,0.5,1.,1.5]);
#axs[iM,iQ].set_yticklabels(["0","0.1","0.2","0.3"]);
#axs[iM,iQ].set_yticks([0,0.5,1.,1.5]);
#axs[iM,iQ].set_yticklabels(["0","0.1","0.2","0.3"]);
axs[ir,ic].tick_params(axis='both', which='major', labelsize=8)
axs[ir,ic].grid(which='both', axis='both',ls='dashed')
if ir==0:
axs[ir,ic].set_title(labup[ic], size=9 )
margbottomI=0.15; margtopI=0.87
margbottom=0.15; margtop=0.87
if nameg==0:
margleft=0.1; margright=0.75;
f.subplots_adjust(right=margright,top=margtop,bottom=margbottom, left=margleft)
cbar_ax = f.add_axes([margright+0.1, margbottom, 1.-margleft-margright-0.12, margtop-margbottom])
hb=mpl.colorbar.ColorbarBase(cbar_ax, norm=norm,label='Probability',cmap=comaps[-1])
else:
#margleft=0.08; margright=0.66;
margleft=0.15; margright=0.5;
f.subplots_adjust(right=margright,top=margtopI,bottom=margbottomI, left=margleft)
for i in range(0,len(groups)):
mr=0.06; hh=(margtop-margbottom)/len(groups); hib=hh-0.11; botb=margtop-hh*(i+1)+0.109-0.027*i;
#botb=(margtop-margbottom)/2.+(i-np.floor(len(groups)/2.))*0.2 ; hib=0.03
cbar_ax = f.add_axes([margright+0.15, botb, 1.-margleft-margright-0.1, hib])
hb=mpl.colorbar.ColorbarBase(cbar_ax, norm=norm,cmap=comaps[i],orientation='horizontal')
step=0.25; ti=np.arange(vmin,vmax+step,step); ti_s=["%.2f" % x for x in ti]; # ti_s[0]='<'+ti_s[0]
hb.set_ticks(ti)
hb.set_ticklabels(ti_s)
cbar_ax.tick_params(labelsize=8)
cbar_ax.set_title(nameg[i],size=8,color=mpl.cm.get_cmap(comaps[i])(1.))
f.text(margleft-0.1, (margtopI-margbottomI)/2.+margbottomI, '$c_s$', va='center', rotation='vertical',size=12)
f.text((margright-margleft)/2+margleft, 0.04, '$\lambda$', ha='center',size=12)
#hb.set_ticks(np.linspace(vmin,vmax,11))
# plt.show()
#f.text(0.874, 0.95, labup, va='center', ha='center',color='darkred',size=10)
#f.text(0.874, 0.08, labdown, va='center', ha='center',color='darkblue',size=10)
#for i in range(0,len(ext)):
f.savefig(label+'.'+ext, dpi=300)
f.clf()
return
def plot_PAYcslamb(label,bigmatPAY,csV,lambV,bV,MV,QV,vmin,vmax):
# bigmatPAY[cs,lamb,M,Q] (no b)
import matplotlib.pyplot as plt
import matplotlib as mpl
#plt.figure(1)
nr=bigmatPAY.shape[2]; nc=bigmatPAY.shape[3] # excluding last column
# f=plt.figure(1,figsize=(20,20))
f,axs=plt.subplots(nrows=nr, ncols=nc, sharex='all', sharey='all')
f.subplots_adjust(hspace=0.2, wspace=0.2)
#vmax=10
#print(csV[15],lambV[15],bV[0]*lambV[15],bigmatPAY[15,15,3,0])
norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax)
for iM in range(nr-1,-1,-1):
axs[iM,nc-1].text(1.1,0.4,"$M=%s$" % str(MV[iM]), size=10 )
for iQ in range(nc-1,-1,-1):
step=0.02
PAYplt=bigmatPAY[:,:,iM,iQ]/(bV[0]*lambV) #np.transpose(np.array(lambV)[np.newaxis])
h=axs[iM,iQ].contourf(lambV,csV,PAYplt,vmin=vmin,vmax=vmax, cmap='Greens')
axs[iM,iQ].set_xticks([0,0.5,1]); #axs[iM,iQ].set_yticks([0,0.5,1])
axs[iM,iQ].set_xticklabels(["0","0.5","1"]); #axs[iM,iQ].set_yticklabels(["0","0.5","1"])
#axs[iM,iQ].set_yticks([0,0.5,1.,1.5]);
#axs[iM,iQ].set_yticklabels(["0","0.1","0.2","0.3"]);
axs[iM,iQ].tick_params(axis='both', which='major', labelsize=8)
axs[iM,iQ].grid(which='both', axis='both',ls='dashed')
if iM==0:
axs[iM,iQ].set_title("$Q=%s$" % str(QV[iQ]), size=10 )
margbottom=0.15; margtop=0.87
f.text(0.0, 0.5, '$c_s$', va='center', rotation='vertical',size=16)
margleft=0.1; margright=0.75;
f.subplots_adjust(right=margright,top=margtop,bottom=margbottom, left=margleft)
cbar_ax = f.add_axes([margright+0.1, margbottom, 1.-margleft-margright-0.12, margtop-margbottom])
hb=mpl.colorbar.ColorbarBase(cbar_ax, norm=norm,label=r'$\overline{W}\ (\lambda rc)^{-1}$',cmap='Greens')
f.text((margright-margleft)/2+margleft, 0.04, '$\lambda$', ha='center',size=16)
#hb.set_ticks(np.linspace(vmin,vmax,11))
# plt.show()
#f.text(0.5, 0.95, label, ha='center',size=16)
#f.savefig(label+'.png', dpi=300)
f.savefig(label+'.eps', dpi=300)
f.clf()
return
def plot_BAR(labup,STv,STvC,axs,beta,Z,N,M,Q,lamb,eps,w,c1,cs1,b1,vmax):
import matplotlib.pyplot as plt
import matplotlib as mpl
c=np.array([1., 1.]) *c1 #*1.* 5. #*0.3 *0.8
cs=np.array([1., 1.]) *cs1 #*0.1 *5. #*0.06 *c *0.8
b=np.array([1., 0.]) *b1 #*20. *c #7*c
STRmPUR=declareSTR(0); # nSTR=STRmPUR.shape[0];
doINI(N,Z,M,Q,eps,w)
SD=doREST(b,c,cs,lamb,beta,N,Z,M,Q,eps,w)
#nr=1; nc=1
#f,axs=plt.subplots(nrows=nr, ncols=nc, sharex='all', sharey='all')
#f.subplots_adjust(hspace=0.2, wspace=0.2)
###f = plt.figure()
###axs = f.add_subplot(ncol,nrow,npl)
bars=SD[STv]; n=len(STv)
ind=np.arange(n)
width=0.8
h=axs.bar(ind,bars[:,0],width,align='center',color=STvC)
axs.set_xlim(-1.,ind[-1]+1.)
axs.set_ylim(0,vmax)
axs.set_ylabel(' ')
axs.set_yticks([0,0.2,0.4,0.6])
axs.set_yticklabels([0,0.2,0.4,0.6],fontsize=6)
axs.set_xticks(ind)
axs.set_xticklabels(labup,rotation=90,fontsize=5.5,ha='center',va='top')
[t.set_color(i) for (i,t) in zip(STvC,axs.xaxis.get_ticklabels())]
axs.yaxis.set_ticks_position('left')
axs.xaxis.set_ticks_position('bottom')
#title="$M=%s, Q=%s, \lambda=%s, c_s=%s, b=%s$" % (str(M),str(Q),str(lamb),str(cs1/c1),str(b1/c1))
title="$M=%s, Q=%s, \lambda=%s, c_s=%s$" % (str(M),str(Q),str(lamb),str(cs1/c1))
axs.text(ind[-1]/2, vmax-0.01, title, va='top', ha='center',size=6)
return
def reduc(M,SD,th):
ix=np.where(SD>=th)[0]
iNx=np.where(SD<th)[0]
SDN=SD[iNx][:,0]
#print(SDN)
sumN=np.sum(SDN)
nSTred=len(ix)+1
Mred=np.zeros((nSTred,nSTred))
for i in range(0,nSTred-1):
Mred[i,0:nSTred-1]=M[ix[i],ix]
Mred[nSTred-1,i]=np.dot(SDN,M[iNx,ix[i]])/sumN # it may be wrong the /sumN
Mred[i,nSTred-1]=np.dot(SDN,M[ix[i],iNx])/sumN # it may be wrong the /sumN
#labred=np.array(["%i" % x for x in ix])
#labred=np.append(labred,'others')
labre=ix; labre=np.append(labre,-999999)
np.fill_diagonal(Mred,0.)
SDred=np.append(SD[ix],sumN)
return Mred,SDred,labre
def groupM(M,SD,gST):
# input: transition probability matrix (M), groups of strategies (list of lists)(gST), SD of strategies
# output: transition probability matrix and SD of groups (sorted as in gST)
M2=np.empty([len(gST),len(M)])
for g in range(0,len(gST)): # vertical (groups receive links)
M2[g,:]=np.sum(M[:,gST[g]],1) # M2 is transposed
M2=M2*SD[:,0] # horizontal (groups send links)
M2=np.transpose(M2)
Mred=np.empty([len(gST),len(gST)])
SDred=np.array([])
for g in range(0,len(gST)):
sumN=np.sum(SD[gST[g]])
Mred[g,:]=np.sum(M2[gST[g],:],0)/sumN
SDred=np.append(SDred,sumN)
return Mred,SDred
def plotNET(b,c,cs,lamb,beta,N,Z,M,Q,eps,w,SD):
import networkx as nx
import matplotlib.pyplot as plt
th=1./len(SD)
STRmPUR=declareSTR(0)
expb=np.exp(-beta)
coef=np.array([[b[0]*lamb, -c[0]*lamb, -cs[0]*lamb],[b[1]*(1.-lamb), -c[1]*(1.-lamb), -cs[1]*(1.-lamb)]])
labelfile='GRIM_N_'+str(N)+'_M_'+str(M)+'_Q_'+str(Q)+'_eps_'+str(eps)+'_w_'+str(w)
fixMvec=readfixMvec(labelfile)
#print(fixMvec)
fixM=calcFIXM(coef,expb,Z,fixMvec)
fixMred,SDred,labre=reduc(fixM,SD,th)
#print(fixMred)
#print(labre)
#print(SDred)
labred=np.array(["%i" % x for x in labre])
labred[-1]="others"
SDred[-1]=0.
G=nx.from_numpy_matrix(fixMred,create_using=nx.DiGraph())
#print([fixM[28,0],fixM[0,28]])
#print([G[28][0],G[0][28]])
sizenode=SDred*8000 #np.log(SDred*len(SD)*0.8)*500
#nx.draw_networkx(G,pos=nx.spring_layout(G,scale=2),
# node_size=sizenode,node_color=SD,
# width=0.05,linewidth=100)
plt.figure(1)
#plt.subplot(211);
plt.axis('off')
#pos = nx.circular_layout(G)
pos = nx.spring_layout(G, iterations=500)
nx.draw_networkx_nodes(G, pos , node_size=sizenode,node_color=SDred)
labels={}; nodesize={}; nodecolor={};
for inode in range(0,G.number_of_nodes()-1):
#labels[inode]=labred[inode]
labels[inode]=str(STRmPUR[labre[inode]].astype(int))
nodesize[inode]=SDred[inode]*50
if SDred[inode]>=0.1:
nodecolor[inode]='Red'
elif SDred[inode]>=0.05 and SDred[inode]<0.1:
nodecolor[inode]='Blue'
else:
nodecolor[inode]='Green'
nodesize[G.number_of_nodes()-1]=1; nodecolor[G.number_of_nodes()-1]='Gray50'; labels[G.number_of_nodes()-1]='others'
#edgecolor={};
#print(fixMvec)
H=G;
for u,v,d in H.edges(data=True):
#edgecolor[iedge]='Red' if d[iedge]>=0.01 else 'Gray';
if d['weight']>0.011:
d['c']='Gray80'
elif d['weight']>0.009 and d['weight']<0.011:
d['c']='RoyalBlue'
else:
d['c']='Gray05'
d['weight']*=10
nx.set_node_attributes(H, 'x_fact', nodesize); nx.set_node_attributes(H, 'y_fact', nodesize)
nx.set_node_attributes(H, 'bc', 'Black'); nx.set_node_attributes(H, 'ic', nodecolor)
H=nx.relabel_nodes(H,labels)
nx.write_pajek(H, "net2.net")
nx.draw_networkx_labels(G,pos,font_size=8,labels=labels)
#print(fixMred)
#print(labre)
#print(SDred)
edgewidth =np.array( [ d['weight'] for (u,v,d) in G.edges(data=True)] )
nx.draw_networkx_edges(G, pos, width=edgewidth, edge_color=edgewidth,
edge_vmin=0.0001,edge_vmax=0.001, arrows=True)
plt.savefig('net.png', dpi=300)
plt.clf()
return fixM
def plotNETgroup(name,M,SD,labg,colg,nSTg,Z):
# create pajek file
# nSTg: number of strategies in each group (row or column of M or SD)
import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
#print(nSTg)
##drift=(1./Z/64)*np.array(nSTg) # drift changes depending on the number of strategies in each receptor group; assumed 64 strategies in total
##M=Mi/drift
#print(M[4,0])
mu=1./64 # assuming 64 strategies
neudrift=1./Z
G=nx.from_numpy_matrix(M,create_using=nx.DiGraph())
H=G
#np.log(SDred*len(SD)*0.8)*500
#nx.draw_networkx(G,pos=nx.spring_layout(G,scale=2),
# node_size=sizenode,node_color=SD,
# width=0.05,linewidth=100)
#labels={}; nodesize={}; nodecolor={};
#for inode in range(0,G.number_of_nodes()-1):
#labels[inode]=labred[inode]
#labels[inode]=str(STRmPUR[labre[inode]].astype(int))
#nodesize[inode]=SDred[inode]*50
#if SDred[inode]>=0.1:
# nodecolor[inode]='Red'
#elif SDred[inode]>=0.05 and SDred[inode]<0.1:
# nodecolor[inode]='Blue'
#else:
# nodecolor[inode]='Green'
##nodesize[G.number_of_nodes()-1]=1;# nodecolor[G.number_of_nodes()-1]='Gray50'; labels[G.number_of_nodes()-1]='others'
#edgecolor={};
#print(fixMvec)
sizeg=SD*10.
sizeg[sizeg<1]=1.
H=G;
##drift=1./100/64 # assuming Z=100, and transition probabilities were divided by N_str=64
#small=0.001 #drift*0.01
for u,v,d in H.edges(data=True):
#edgecolor[iedge]='Red' if d[iedge]>=0.01 else 'Gray';
if u==v:
d['weight']=0.
d['weight']/=(neudrift*mu)
if d['weight']>=(1.+0.01): #1.+small: #.drift+small:
d['c']='Gray70'
#d['weight']=.3
#d['weight']*=50.
d['weight']=np.log10(d['weight'])
elif d['weight']>(1.-0.01) and d['weight']<(1.+0.01): #d['weight']>1.-small and d['weight']<1.+small:
d['c']='RoyalBlue'
else:
d['c']='Gray05'
d['weight']=0.
nx.set_node_attributes(H, 'Black', 'bc'), nx.set_node_attributes(H, dict(enumerate(colg)), 'ic')
nx.set_node_attributes(H, dict(enumerate(sizeg.astype(str))), 'x_fact'); nx.set_node_attributes(H, dict(enumerate(sizeg.astype(str))), 'y_fact')
# nx.set_node_attributes(H, {1:'ee'}, 'ic')
# nx.set_node_attributes(H, dict(enumerate(sizeg)), 'x_fact'); nx.set_node_attributes(H, dict(enumerate(sizeg)), 'y_fact')
# nx.set_node_attributes(H, sizeg, 'x_fact'); nx.set_node_attributes(H, sizeg, 'y_fact')
# nx.set_node_attributes(H, 'Black', 'bc'); nx.set_node_attributes(H, colg, 'ic')
#print(labg)
H=nx.relabel_nodes(H,dict(enumerate(labg)))
nx.write_pajek(H, name+".net")
# plt.figure(1)
# #plt.subplot(211);
# plt.axis('off')
#
# #pos = nx.circular_layout(G)
# pos = nx.spring_layout(G, iterations=500)
# nx.draw_networkx_nodes(G, pos , node_size=sizenode,node_color=SDred)
# nx.draw_networkx_labels(G,pos,font_size=8,labels=labg)
# #print(fixMred)
# #print(labre)
# #print(SDred)
# edgewidth =np.array( [ d['weight'] for (u,v,d) in G.edges(data=True)] )
# nx.draw_networkx_edges(G, pos, width=edgewidth, edge_color=edgewidth,
# edge_vmin=0.0001,edge_vmax=0.001, arrows=True)
#
# plt.savefig(name+'.png', dpi=300)
# plt.clf()
return fixM
def findDRIFTgroup(fixM,Z):
M=np.copy(fixM)
small=0.0000001
th=1./Z/len(fixM)
M[M<th-small]=0.
M[M>th+small]=0.
#print(M)
groups=[]
g=-1
for i in range(0,len(M)):
jg=0
for j in range(i+1,len(M)):
if M[i,j]!=0:
jg+=1
if jg==1:
g+=1
groups.append([i])
groups[g].append(j)
for i in range(0,len(groups)):
if groups[i] !=-1:
for j in range(i+1,len(groups)):
if groups[j] !=-1:
if groups[i][-1] == groups[j][-1]:
groups[j]=-1
groups[:] = [value for value in groups if value!=-1]
return groups
def doONLYONE():
beta=1.
Z=100
N=9
M=5 #5
Q=4.5 #4.5
lamb=.5 #0.5 #0.8
eps=0.01
w=1.
#H, L
c1=0.5 #2.5
c=np.array([1., 1.]) *1. *c1 #*0.3 *0.8
cs=np.array([1., 1.]) *0.01 *c1 #*0.06 *c *0.8
b=np.array([1., 0.]) *20. *c1 #7*c
STRmPUR=declareSTR(0); # nSTR=STRmPUR.shape[0];
doINI(N,Z,M,Q,eps,w)
SD=doREST(b,c,cs,lamb,beta,N,Z,M,Q,eps,w)
PAYhomo,COOPhomo,COOPtot=doHOMO(lamb,eps,N,M,Q,b,c,cs,SD,w)
SSD=np.concatenate((STRmPUR,np.transpose([PAYhomo]),np.transpose([COOPhomo[:,0]]),SD),axis=1)
SSDsort=SSD[np.argsort(SSD[..., 8])]
for i in range(0,len(SSDsort)):
print('{0:3.0f} {1:5.0f} {2:3.0f} {3:5.0f} {4:3.0f} {5:3.0f} {6:3.0f} {7:12.2e} {8:6.2f} {9:8.2f}'.format(np.argsort(SSD[..., 8])[i],SSDsort[i,0],SSDsort[i,1],SSDsort[i,2],SSDsort[i,3],SSDsort[i,4],SSDsort[i,5],SSDsort[i,6],SSDsort[i,7],SSDsort[i,8])) #print(SSDsort[i,:])
#print(COOPtot)
fixM=plotNET(b,c,cs,lamb,beta,N,Z,M,Q,eps,w,SD)
return fixM, SD
if __name__ == "__main__":
import numpy as np
import time; import timeit
#doONLYONE()
gSC=[20,28] # SC
gSCm=[22,30] # SC mut
gSD=[33,35] # SD
gSDm=[41,43] # SD mut
gSF=[48,49,50,51] # SF
gSFm=[56,57,58,59] # SF mut
gMBc=[52, 53, 54, 55] # MB C
gMBd=[60, 61, 62, 63] # MB D
gMB=gMBc+gMBd
gALL=list(range(0,64))
gG=gSC+gSCm+gSD+gSDm+gSF+gSFm+gMBc+gMBd
gNO = [x for x in gALL if x not in gG]
gST=[gSC,gSCm,gSD,gSDm,gSF,gSFm,gMBc,gMBd,gNO]
gS11=list(range(0,16))
gS10=list(range(16,32))
gS01=list(range(32,48))
gS00=list(range(48,64))
#print(gST)
gSCO=[22,30]
gSDO=[43,41]
gSC1=[29,28]
gSD1=[39,35]
gSCt=[20,29,28]
gSDt=[33,39,35]
gGt=gSCt+gSDt+gSF+gSFm+gMBc+gMBd
gNOt=[x for x in gALL if x not in gGt]
gNO2 = [x for x in gALL if x not in gSC+gSD+gSF+gSFm+gMBc+gMBd ]
gALL=list(range(0,64))
# beta=1.
# Z=100
# N=9
# M=5 #5
# Q=4.5 #4.5
# lamb=0.5 #0.5 #0.8
# eps=0.01
# w=1.
# lamb=1.
# c1=0.5
# c=np.array([1., 1.]) *1. *c1 #*0.3 *0.8
# b=np.array([1., 0.]) *20. *c1 #7*c
# cs=np.array([1., 1.]) *9999. *c1
# doINI_CD(N,Z,M,Q,eps,w)
# SD=doREST_CD(b,c,cs,lamb,beta,N,Z,M,Q,eps,w)
# coop=doHOMO_CD(lamb,eps,N,M,Q,b,c,cs,SD,w)[2]
# print(SD)
############## Cooperation level ##########################
beta=1.
Z=100
N=9
M=5
Q=4.5 #4.5
lamb=0.5 #0.5 #0.8
epsS=0.1
eps=[0.01, epsS]
w=1.
#H, L
c1=1. #2.5
c=np.array([1., 1.]) *1. *c1 #*0.3 *0.8
b=np.array([1., 0.]) *10. *c1 #7*c
#csVo= np.linspace(0,2,10)
lambV= np.linspace(0,1,50)
expb=np.exp(-beta)
coop=np.zeros((len(lambV),10,3))
coopPGG=np.zeros((len(lambV),10))
for i in range(0,len(lambV)):
lamb=lambV[i]
cs=np.array([1., 1.]) *0. *c1
coef=np.array([[b[0]*lamb, -c[0]*lamb, -cs[0]*lamb],[b[1]*(1.-lamb), -c[1]*(1.-lamb), -cs[1]*(1.-lamb)]])
doINI(N,Z,M,Q,eps,w)
SD=doREST(b,c,cs,lamb,beta,N,Z,M,Q,eps,w)
coop[i,0,:]=doHOMO(lamb,eps,N,M,Q,b,c,cs,SD,w)[2]
doINI_SIG(N,Z,M,Q,eps,w)
SD=doREST_SIG(b,c,cs,lamb,beta,N,Z,M,Q,eps,w)
coop[i,1,:]=doHOMO_SIG(lamb,eps,N,M,Q,b,c,cs,SD,w)[2]
cs=np.array([1., 1.]) *0.1 *c1
coef=np.array([[b[0]*lamb, -c[0]*lamb, -cs[0]*lamb],[b[1]*(1.-lamb), -c[1]*(1.-lamb), -cs[1]*(1.-lamb)]])
doINI(N,Z,M,Q,eps,w)
SD=doREST(b,c,cs,lamb,beta,N,Z,M,Q,eps,w)
coop[i,2,:]=doHOMO(lamb,eps,N,M,Q,b,c,cs,SD,w)[2]
doINI_SIG(N,Z,M,Q,eps,w)
SD=doREST_SIG(b,c,cs,lamb,beta,N,Z,M,Q,eps,w)
coop[i,3,:]=doHOMO_SIG(lamb,eps,N,M,Q,b,c,cs,SD,w)[2]
cs=np.array([1., 1.]) *0.3 *c1
coef=np.array([[b[0]*lamb, -c[0]*lamb, -cs[0]*lamb],[b[1]*(1.-lamb), -c[1]*(1.-lamb), -cs[1]*(1.-lamb)]])
doINI(N,Z,M,Q,eps,w)
SD=doREST(b,c,cs,lamb,beta,N,Z,M,Q,eps,w)
coop[i,4,:]=doHOMO(lamb,eps,N,M,Q,b,c,cs,SD,w)[2]
doINI_SIG(N,Z,M,Q,eps,w)
SD=doREST_SIG(b,c,cs,lamb,beta,N,Z,M,Q,eps,w)
coop[i,5,:]=doHOMO_SIG(lamb,eps,N,M,Q,b,c,cs,SD,w)[2]
cs=np.array([1., 1.]) *0.5 *c1
coef=np.array([[b[0]*lamb, -c[0]*lamb, -cs[0]*lamb],[b[1]*(1.-lamb), -c[1]*(1.-lamb), -cs[1]*(1.-lamb)]])
doINI(N,Z,M,Q,eps,w)
SD=doREST(b,c,cs,lamb,beta,N,Z,M,Q,eps,w)
coop[i,6,:]=doHOMO(lamb,eps,N,M,Q,b,c,cs,SD,w)[2]
doINI_SIG(N,Z,M,Q,eps,w)
SD=doREST_SIG(b,c,cs,lamb,beta,N,Z,M,Q,eps,w)
coop[i,7,:]=doHOMO_SIG(lamb,eps,N,M,Q,b,c,cs,SD,w)[2]
doINI_REC(N,Z,M,Q,eps,w)
SD=doREST_REC(b,c,cs,lamb,beta,N,Z,M,Q,eps,w)
coop[i,8,:]=doHOMO_REC(lamb,eps,N,M,Q,b,c,cs,SD,w)[2]
doINI_CD(N,Z,M,Q,eps,w)
SD=doREST_CD(b,c,cs,lamb,beta,N,Z,M,Q,eps,w)
coop[i,9,:]=doHOMO_CD(lamb,eps,N,M,Q,b,c,cs,SD,w)[2]
# PAYhomo,COOPhomo,COOPtot=doHOMO_REC(lamb,eps,N,M,Q,b,c,cs,SD,w)
# print(COOPhomo[:,0])
# SSD=np.concatenate((declareSTR_REC(0),np.transpose([PAYhomo]),np.transpose([COOPhomo[:,0]]),SD),axis=1)
# SSDsort=SSD[np.argsort(SSD[..., 8])]
# for ii in range(0,len(SSDsort)):
# print('{0:3.0f} {1:5.0f} {2:3.0f} {3:5.0f} {4:3.0f} {5:3.0f} {6:3.0f} {7:12.2e} {8:6.2f} {9:8.2f}'.format(np.argsort(SSD[..., 8])[i],SSDsort[i,0],SSDsort[i,1],SSDsort[i,2],SSDsort[i,3],SSDsort[i,4],SSDsort[i,5],SSDsort[i,6],SSDsort[i,7],SSDsort[i,8])) #print(SSDsort[i,:])
# print(i,lambV[i],COOPtot)
print(i,lambV[i],coop[i,:,0])
np.save('coop_w1_beta1_r10_epsS01',coop)
#np.save('coop_w1_beta1_r10_M7_epsS01',coop)
#np.save('coop_w1_beta1_r10_M9_epsS01',coop)
#
# coop=np.load('coop_w1_beta1_r10'+'.npy')
# coop7=np.load('coop_w1_beta1_r10_M7'+'.npy')
# coop9=np.load('coop_w1_beta1_r10_M9'+'.npy')
#
# import matplotlib.pyplot as plt
# #lab=["S+R, $c_S=0$", "S, $c_S=0$", "S+R, $c_S=0.5$", "S, $c_S=0.5$", "S+R, $c_S=1$", "S, $c_S=1$","S+R, $c_S=1.5$", "S, $c_S=1.5$", "R", "C+D"]
# lab=["S+R", "S", "S+R", "S", "R", "C+D","S+R", "S","S+R", "S"] #["S+R", "S", "S+R", "S","S+R", "S","S+R", "S", "R", "C+D"]
# lin=['b-' , 'g-', 'b--', 'g--', 'b:', 'g:', 'b-.', 'g-.', 'r-', 'k-']
### f = plt.figure()
### for j in range(0,len(coop[0,:,0])):
### axs=f.add_subplot(111); plt.plot(lambV,coop[:,j,0],lin[j],label=lab[j]); axs.set_xlim(0., 1.); axs.set_ylim(0., 1.); axs.set_ylabel('Level of cooperation'); axs.set_xlabel('$\lambda$');
### #axs.set_xticks(range(1,N+1)); axs.tick_params(axis='major', which='major', labelsize=8); axs.grid(which='major', axis='both',ls='dashed')
### axs.legend(loc='best', shadow=False, fontsize=8)
### f.savefig('mechanisms_w09.eps', dpi=300)
### f.clf()
### f = plt.figure()
### for j in range(0,len(coop[0,:])):
### axs=f.add_subplot(111); plt.plot(lambV,coop[:,j,1],lin[j],label=lab[j]); axs.set_xlim(0., 1.); axs.set_ylim(0., 1.); axs.set_ylabel('Level of cooperation'); axs.set_xlabel('$\lambda$');
### #axs.set_xticks(range(1,N+1)); axs.tick_params(axis='major', which='major', labelsize=8); axs.grid(which='major', axis='both',ls='dashed')
### axs.legend(loc='best', shadow=False, fontsize=8)
### f.savefig('mechanisms_PGG_w09.eps', dpi=300)
### f.clf()
##
#
## f = plt.figure()
##
## ax=plt.subplot(121)
## for j in range(0,len(coop[0,:,0])):
## plt.plot(lambV,coop[:,j,0],lin[j],label=lab[j]); plt.xlim(0., 1.); plt.ylim(0., 1.); plt.ylabel('Level of cooperation'); plt.xlabel('$\lambda$');
## ax.set_xticks([0,0.25,0.5,0.75,1]); ax.set_xticklabels(["0","0.25","0.5","0.75","1"]); ax.tick_params(axis='major', which='major', labelsize=8); ax.grid(which='major', axis='both',ls='dashed')
## plt.title('$G$ + $\hat{G}$')
## h, l = ax.get_legend_handles_labels()
## ph = plt.plot([],marker="", ls="")[0]
## handles = [ph,h[0],h[1],ph,h[2],h[3],ph,h[4],h[5],ph,h[6],h[7],ph,h[8],h[9] ]
## labels = ["$c_S=0$",lab[0],lab[1],"$c_S=0.1$",lab[2],lab[3],"$c_S=0.3$",lab[4],lab[5],"$c_S=0.5$",lab[6],lab[7]," ",lab[8],lab[9] ]
## leg=plt.legend(handles, labels, bbox_to_anchor=(0., 1.15, 2.2, .102), loc=8,
## ncol=5, mode="expand", borderaxespad=0.,fontsize=8,edgecolor='black')
## for t in leg._legend_handle_box.get_children():
## for hpack in t.get_children()[0:1]:
## hpack.get_children()[0].set_width(0)
##
##
## for j in range(0,len(coop[0,:])):
## axs=f.add_subplot(122); plt.plot(lambV,coop[:,j,1],lin[j],label=lab[j]); axs.set_xlim(0., 1.); axs.set_ylim(0., 1.); axs.set_xlabel('$\lambda$'); #axs.set_ylabel('Level of cooperation');
## #axs.set_xticks(range(1,N+1)); axs.tick_params(axis='major', which='major', labelsize=8); axs.grid(which='major', axis='both',ls='dashed')
## axs.set_xticks([0,0.25,0.5,0.75,1]); axs.set_xticklabels(["0","0.25","0.5","0.75","1"]); axs.tick_params(axis='major', which='major', labelsize=8); axs.grid(which='major', axis='both',ls='dashed')
## plt.title('$G$')
## plt.subplots_adjust(top=0.7)
## #axs.legend(loc='best', shadow=False, fontsize=8)
## f.savefig('coop_mechanisms_w1_r10.eps', dpi=300)
## f.clf()
##
#
##
# f,axs=plt.subplots(nrows=2, ncols=2, sharex='all', sharey='all' )
#
# print(coop)
#
# selcoop=np.array([0,1,4,5,8,9])
#
# ax=ax00=axs[0,0]
## for j in range(0,len(coop[0,:,0])):
# for jj in range(0,len(selcoop)):
# j=selcoop[jj]
# ax.plot(lambV,coop[:,j,0],lin[j],label=lab[j]); ax.set_xlim(0., 1.); ax.set_ylim(0., 1.); #ax.ylabel('Level of cooperation'); ax.xlabel('$\lambda$');
# ax.set_xticks([0,0.25,0.5,0.75,1]); ax.set_xticklabels(["0","0.25","0.5","0.75","1"]); ax.tick_params(axis='major', which='major', labelsize=8); ax.grid(which='major', axis='both',ls='dashed')
# ax.set_title('$G$ + $\hat{G}$',size=10)
#
# ax=axs[0,1]
## for j in range(0,len(coop[0,:,0])):
# for jj in range(0,len(selcoop)):
# j=selcoop[jj]
# ax.plot(lambV,coop[:,j,1],lin[j],label=lab[j]); ax.set_xlim(0., 1.); ax.set_ylim(0., 1.) #; ax.set_xlabel('$\lambda$'); #axs.set_ylabel('Level of cooperation');
# ax.set_xticks([0,0.25,0.5,0.75,1]); ax.set_xticklabels(["0","0.25","0.5","0.75","1"]); ax.tick_params(axis='major', which='major', labelsize=8); ax.grid(which='major', axis='both',ls='dashed')
# ax.set_title('$G$',size=10)
# ax.text(1.1,0.48,"$M=5$", size=12 ,va='center')
#
# #lambV= np.linspace(0,1,20)
# ax=axs[1,0]
## for j in range(0,len(coop[0,:,0])):
# for jj in range(0,len(selcoop)):
# j=selcoop[jj]
# ax.plot(lambV,coop7[:,j,0],lin[j],label=lab[j]); ax.set_xlim(0., 1.); ax.set_ylim(0., 1.); ax.set_xlabel('$\lambda$',size=12); #axs.set_ylabel('Level of cooperation');
# ax.set_xticks([0,0.25,0.5,0.75,1]); ax.set_xticklabels(["0","0.25","0.5","0.75","1"]); ax.tick_params(axis='major', which='major', labelsize=8); ax.grid(which='major', axis='both',ls='dashed')
#
# ax=axs[1,1]
## for j in range(0,len(coop[0,:,0])):
# for jj in range(0,len(selcoop)):
# j=selcoop[jj]
# ax.plot(lambV,coop7[:,j,1],lin[j],label=lab[j]); ax.set_xlim(0., 1.); ax.set_ylim(0., 1.); ax.set_xlabel('$\lambda$',size=12); #axs.set_ylabel('Level of cooperation');
# ax.set_xticks([0,0.25,0.5,0.75,1]); ax.set_xticklabels(["0","0.25","0.5","0.75","1"]); ax.tick_params(axis='major', which='major', labelsize=8); ax.grid(which='major', axis='both',ls='dashed')
# ax.text(1.1,0.48,"$M=7$", size=12 ,va='center')
#
# margleft=0.2; margright=0.8; margtop=0.78; margbottom=0.12; wspace=hspace=0.15
# f.subplots_adjust(hspace=hspace, wspace=wspace, right=margright,top=margtop,bottom=margbottom, left=margleft)
## for i in range(0,len(groups)):
##
## mr=0.06; hh=(margtop-margbottom)/len(groups); hib=hh-0.11; botb=margtop-hh*(i+1)+0.11-0.015*i;
##
## #botb=(margtop-margbottom)/2.+(i-np.floor(len(groups)/2.))*0.2 ; hib=0.03
## cbar_ax = f.add_axes([margright+0.13, botb, 0.2, hib])
## hb=mpl.colorbar.ColorbarBase(cbar_ax, norm=norm,cmap=comaps[i],orientation='horizontal')
## step=0.2; ti=np.arange(vmin,vmax+step,step); ti_s=["%.1f" % x for x in ti]; # ti_s[0]='<'+ti_s[0]
## hb.set_ticks(ti)
## hb.set_ticklabels(ti_s)
## cbar_ax.tick_params(labelsize=7)
## cbar_ax.set_title(nameg[i],size=8,color=mpl.cm.get_cmap(comaps[i])(1.))
#
# f.text(margleft-0.13, (margtop-margbottom)/2.+margbottom, 'Cooperation level', va='center', rotation='vertical',size=12)
# #f.text((margright-margleft)/2+margleft, margbottom-0.1, '$\lambda$', ha='center',size=12)
#
#
# h, l = ax00.get_legend_handles_labels()
# ph = ax00.plot([],marker="", ls="")[0]
## handles = [ph,h[0],h[1],ph,h[2],h[3],ph,h[4],h[5],ph,h[6],h[7],ph,h[8],h[9] ]
## labels = ["$c_S=0$",lab[0],lab[1],"$c_S=0.1$",lab[2],lab[3],"$c_S=0.3$",lab[4],lab[5],"$c_S=0.5$",lab[6],lab[7]," ",lab[8],lab[9] ]
# handles = [ph,h[0],h[1],ph,ph,ph,ph,h[2],h[3],ph,ph,ph,ph,h[4],h[5] ]
# labels = ["$c_S=0$",lab[0],lab[1]," "," "," ","$c_S=0.3$",lab[2],lab[3]," "," "," "," ",lab[4],lab[5] ]
# leg=ax00.legend(handles, labels, bbox_to_anchor=(0., margtop+0.47, 2+wspace, .102), loc=8,
# ncol=5, mode="expand", borderaxespad=0.,fontsize=8,edgecolor='black')
# for t in leg._legend_handle_box.get_children():
# for hpack in t.get_children()[0:1]:
# hpack.get_children()[0].set_width(0)
#
# f.savefig('coop_mechanisms_w1_r10_reduc.eps', dpi=300)
# f.clf()
##
########################################################################
###### Extracting graph of invasions ##########################
#
#####----- separating strategies ------
## gST=[[28]]+[[20]]+[[35]]+[[33]]+[[i] for i in gSF]+[[i] for i in gSFm]+[[i] for i in gMBc] +[[i] for i in gMBd] +[gNO2]
## #gST=[[28]]+[[20]]+[[29]]+[[35]]+[[33]]+[[39]]+[[i] for i in gSF]+[[i] for i in gSFm]+[[i] for i in gMBc] +[[i] for i in gMBd] #+[gNOt]
## colg= ['Red']*2+['Grey30']*2+['Mulberry']*len(gSF)+['Green']*len(gSFm)+['Cyan']*len(gMBc)+['Blue']*len(gMBd) +['Gray05']
##
## labg=['']*len(gST)
## STRmPUR=declareSTR(0)
## for i in range(0,len(gST)):
## labg[i]=str(STRmPUR[gST[i][0]].astype(int))
## labg[-1]="others"
######-------------------------------
#
####----- groups of strategies ------
# gN = [x for x in gALL if x not in gSC+gSD+gSF+gSFm+gMBc+gMBd+gSCO+gSDO ]
# gST=[gSC,gSD,gSCO,gSDO,gSF,gSFm,gMB,gN]
# colg= ['Red']+['Red']+['Gray30']+['Gray30']+['Purple']+['Green']+['NavyBlue'] +['Gray05']
# labg= ['SC']+['SD']+['SC-O']+['SD-O']+['FR-C']+['FR-O']+['FR-D'] +['others']
## gN = [x for x in gALL if x not in gSC+gSD+gSF+gSFm+gMBc+gMBd]
## gST=[gSC,gSD,gSF,gSFm,gMB,gN]
## colg= ['Red']+['Red']+['Purple']+['Green']+['NavyBlue'] +['Gray05']
## labg= ['SC']+['SD']+['FR-C']+['FR-O']+['FR-D'] +['others']
#####-------------------------------
#
#####----- test ------
## gST=[[28],[35]]
## colg= ['Red']+['Gray30'] #+['Purple']+['Green']+['NavyBlue'] +['Gray05']
## labg= ['SC']+['SD'] #+['SF-C']+['SF-01']+['SF-D + SF-10'] +['others']
######-------------------------------
#
# beta=1.
# Z=100
# N=9
# M=5
# Q=4.5
# lamb=0.9 #0.5 #0.8
# cs0=0.5
# eps=0.01
# w=1.
# b0=10.
# #H, L
# c1=1. #2.5
# c=np.array([1., 1.]) *1. *c1 #*0.3 *0.8
# cs=np.array([1., 1.]) *cs0 *c1 #*0.06 *c *0.8
# b=np.array([1., 0.]) *b0 *c1 #7*c
#
# expb=np.exp(-beta)
# coef=np.array([[b[0]*lamb, -c[0]*lamb, -cs[0]*lamb],[b[1]*(1.-lamb), -c[1]*(1.-lamb), -cs[1]*(1.-lamb)]])
# labelfile='GRIM_N_'+str(N)+'_M_'+str(M)+'_Q_'+str(Q)+'_eps_'+str(eps)+'_w_'+str(w)
# fixMvec=readfixMvec(labelfile)
# fixM=calcFIXM(coef,expb,Z,fixMvec)
# doINI(N,Z,M,Q,eps,w)
# SD=doREST(b,c,cs,lamb,beta,N,Z,M,Q,eps,w)
#
# Mred,SDred=groupM(fixM,SD,gST)
# SDred[SDred<0]=0.; Mred[Mred<0]=0. # correct very small negative values
#
## STRmPUR=declareSTR(0)
## #print(STRmPUR[28],STRmPUR[35])
## #print(fixM[28,35]*64*100,fixM[35,28]*64*100)
## print(fixM[:,63]*64*100)
## print(fixM[:,28]*64*100)
## print((fixM[:,28]-fixM[:,63])*64*100)
## print(SD[28],SD[63])
## print(Mred[4,0]*64*100)
#
#
# nSTg=[len(g) for g in gST]
# name='NET_M_'+str(M)+'_Q_'+str(Q)+'_lamb_'+str(lamb)+'_cs_'+str(cs0)+'_b_'+str(b0)
# plotNETgroup(name,Mred,SDred,labg,colg,nSTg,Z)
#
#
#
#
################################################################
########### Limits drifting groups #########
# from scipy.stats import binom
# import matplotlib.pyplot as plt
# from decimal import Decimal
#
# Z=100
# N=9
# eps=0.01
# M=np.array(range(1,N))
#
# Pr_lessM=binom.cdf(M-1,N,eps)*(1.-eps**M)
# Rlim=np.log(1.-1./Z)/np.log(Pr_lessM) +1 # R < R_lim ---> xx10xx equivalent to xx00xx (assuming a tolerance of 1/Z). All start D, and they never enter in Nc>=M by mistake
# wlim=1.-1./Rlim
#
# print(Pr_lessM)
# print(Rlim)
# print(wlim)
#
## f = plt.figure()
## for i in range(0,len(cs1V)):
## nrow=len(cs1V)
## ncol=1
## npl=i+1
## axs = f.add_subplot(nrow,ncol,npl)
## if npl!=nrow:
## labx=[]
## else:
## labx=labup
## plot_BAR(labx,STv,STvC,axs,beta,Z,N,MV[i],QV[i],lambV[i],epsV[i],w,c1,cs1V[i],bV[i],vmax)
## f.text(0.05, 0.5, 'Stationary Distribution', rotation=90, va='center', ha='center',size=8)
## f.savefig(label+'.eps', dpi=300)
## f.clf()
##
#
#
# f = plt.figure()
# for tol in [1e-2, 1e-3, 1e-6, 1e-9]:
# Rlim=np.log(1.-tol)/np.log(Pr_lessM) +1 # R < R_lim ---> xx10xx equivalent to xx00xx (assuming a tolerance of 1/Z). All start D, and they never enter in Nc>=M by mistake
# wlim=1.-1./Rlim
# #axs=plt.subplot(221); plt.semilogy(M,wlim); axs.set_xlim(0, N+1); axs.set_ylim(0.9, 1.01); axs.set_yticks([0.9,0.99,1]); axs.set_xticks(range(0,N+1))
# #axs=plt.subplot(222); plt.plot(M,wlim); axs.set_xlim(0, N+1); axs.set_ylim(0.9, 1.01); axs.set_yticks([0.9,0.99,1]); axs.set_xticks(range(0,N+1))
# #axs=plt.subplot(223); plt.semilogy(M,Rlim); axs.set_xlim(0, N+1); axs.set_ylim(1, 100000); axs.set_yticks([10,100]); axs.set_xticks(range(0,N+1))
# #axs=plt.subplot(224); plt.plot(M,Rlim); axs.set_xlim(0, N+1); axs.set_ylim(1, 100000); axs.set_yticks([10,100]); axs.set_xticks(range(0,N+1))
# axs=f.add_subplot(111); plt.semilogy(M,Rlim,label='%.0e' % Decimal(tol)); axs.set_xlim(0.5, N); axs.set_ylim(1, 100000); axs.set_ylabel('$R_{lim}=(1-\omega_{lim})^{-1}$'); axs.set_xlabel('M');
# axs.set_xticks(range(1,N+1)); axs.tick_params(axis='major', which='major', labelsize=8); axs.grid(which='major', axis='both',ls='dashed')
# axs.legend(loc='upper left', shadow=False, fontsize=10, title='Tolerance')
# f.savefig('equiv_00-10.eps', dpi=300)
# f.clf()
#
# 1.-Pr_lessM > 1.-1./Z ; Pr_lessM < 1./Z # ---> xx10xx equivalent to xx11xx. Start D and enter Nc>=M by mistakes
# print(1.-Pr_lessM) # never commit enough mitakes, unless w=1
#
#############################################
# beta=1.
# Z=100
# N=9 21 1 0 1 0 1 0 9.24e-01 0.50 0.00
# w=0.9
# eps=0.01
#
# c1=5.
# csV=c1*np.linspace(0,0.3,51)
# lambV=np.linspace(0,1,51)
# #bV=np.array([20.,10.,5.])
# bV=c1*np.array([20.])
# MV= np.array([5,6,7,8,9]) #np.array([1,3,5,7,9]) #np.array([2,6,10,14,18]) #np.array([3,9,15,21,27])
# QV= np.array([4.5,5.0,5.5,6.0,6.5,7.0,7.5,8.0,8.5,9.0]) #np.array([2.0,6.0,9.0,10.0,14.0,18.0]) #np.array([3.0,9.0,13.5,15.0,21.0,27.0])
#
## beta=1.
## Z=100
## N=9
## w=1.
## eps=0.01
## c1=5.
## csV=c1*np.linspace(0,0.3,51)
## lambV=np.linspace(0,1,51)
## #bV=np.array([20.,10.,5.])
## bV=c1*np.array([20.])
## MV= np.array([1,3,5,7,9]) #np.array([2,6,10,14,18]) #np.array([3,9,15,21,27])
## QV= np.array([1.0,3.0,4.5,5.0,7.0,9.0]) #np.array([2.0,6.0,9.0,10.0,14.0,18.0]) #np.array([3.0,9.0,13.5,15.0,21.0,27.0])
##
# bigmatSD=np.load('file_SD_N9_beta5_b20_w1_X_testQM.npy')
# vmin=-0.
# vmax=1.
# groups=[20,28,33,35,48, 49, 50, 51,52, 53, 54, 55, 60, 61, 62, 63]
# nogroups= [x for x in range(0,64) if x not in groups]
# print(groups)
# print(nogroups)
# plot_SDcslambDIF('all--','others','groups',nogroups,groups,bigmatSD,csV,lambV,bV,MV,QV,vmin,vmax)
# np.set_printoptions(threshold=np.inf)
# fixM,SD=doONLYONE()
# Mred,SDred=groupM(fixM,SD[:,0],gST)
# print(Mred); print(np.sum(SDred))
######################## find drift groups ###############################3
# beta=1.
# Z=100
# N=9
# M=3
# Q=6.5
# lamb=0.7 #0.5 #0.8
# eps=0.01
# w=1.
# #H, L
# c1=1. #2.5
# c=np.array([1., 1.]) *1. *c1 #*0.3 *0.8
# cs=np.array([1., 1.]) *0.2 *c1 #*0.06 *c *0.8
# b=np.array([1., 0.]) *10. *c1 #7*c
#
# STRmPUR=declareSTR(0)
# expb=np.exp(-beta)
# coef=np.array([[b[0]*lamb, -c[0]*lamb, -cs[0]*lamb],[b[1]*(1.-lamb), -c[1]*(1.-lamb), -cs[1]*(1.-lamb)]])
# labelfile='GRIM_N_'+str(N)+'_M_'+str(M)+'_Q_'+str(Q)+'_eps_'+str(eps)+'_w_'+str(w)
# fixMvec=readfixMvec(labelfile)
# fixM=calcFIXM(coef,expb,Z,fixMvec)
#
# doINI(N,Z,M,Q,eps,w)
# SD=doREST(b,c,cs,lamb,beta,N,Z,M,Q,eps,w)
# PAYhomo,COOPhomo,COOPtot=doHOMO(lamb,eps,N,M,Q,b,c,cs,SD,w)
# SSD=np.concatenate((STRmPUR,np.transpose([PAYhomo]),np.transpose([COOPhomo[:,0]]),SD),axis=1)
# SSDsort=SSD[np.argsort(SSD[..., 8])]
# for i in range(0,len(SSDsort)):
# print('{0:3.0f} {1:5.0f} {2:3.0f} {3:5.0f} {4:3.0f} {5:3.0f} {6:3.0f} {7:12.2e} {8:6.2f} {9:8.2f}'.format(np.argsort(SSD[..., 8])[i],SSDsort[i,0],SSDsort[i,1],SSDsort[i,2],SSDsort[i,3],SSDsort[i,4],SSDsort[i,5],SSDsort[i,6],SSDsort[i,7],SSDsort[i,8])) #print(SSDsort[i,:])
#
# groups=findDRIFTgroup(fixM,100)
# print(groups)
#
## STRmPUR=declareSTR_SIG(0)
## expb=np.exp(-beta)
## coef=np.array([[b[0]*lamb, -c[0]*lamb, -cs[0]*lamb],[b[1]*(1.-lamb), -c[1]*(1.-lamb), -cs[1]*(1.-lamb)]])
## labelfile='GRIM_N_'+str(N)+'_M_'+str(M)+'_Q_'+str(Q)+'_eps_'+str(eps)+'_w_'+str(w)+'_SIG'
## fixMvec=readfixMvec(labelfile)
## fixM=calcFIXM(coef,expb,Z,fixMvec)
##
## doINI_SIG(N,Z,M,Q,eps,w)
## SD=doREST_SIG(b,c,cs,lamb,beta,N,Z,M,Q,eps,w)
## PAYhomo,COOPhomo,COOPtot=doHOMO_SIG(lamb,eps,N,M,Q,b,c,cs,SD,w)
## SSD=np.concatenate((STRmPUR,np.transpose([PAYhomo]),np.transpose([COOPhomo[:,0]]),SD),axis=1)
## SSDsort=SSD[np.argsort(SSD[..., 8])]
## for i in range(0,len(SSDsort)):
## print('{0:3.0f} {1:5.0f} {2:3.0f} {3:5.0f} {4:3.0f} {5:3.0f} {6:3.0f} {7:12.2e} {8:6.2f} {9:8.2f}'.format(np.argsort(SSD[..., 8])[i],SSDsort[i,0],SSDsort[i,1],SSDsort[i,2],SSDsort[i,3],SSDsort[i,4],SSDsort[i,5],SSDsort[i,6],SSDsort[i,7],SSDsort[i,8])) #print(SSDsort[i,:])
#
#
#
#################################################
########### CREATE BARS ############################################################################
# beta=1.
# Z=100
# N=9
# w=0.9
# eps=0.01
#
# c1=0.5
#
###------ vertical ---------------------
## csv=np.array([0, 0.2,0.4,0.6,0.8,1.,1.2,1.4,1.6])
## cs1V= c1*np.transpose(np.array([csv,csv]))
## lambV= np.transpose(np.array([[0.3]*len(cs1V),[0.7]*len(cs1V)]))
## bV=c1*np.transpose(np.array([[20]*len(lambV)]*2))
## epsV= np.transpose(np.array([[0.01]*len(lambV)]*2))
## QV= np.transpose( np.array([[4.5]*len(lambV)]*2))
## MV= np.transpose( np.array([[3]*len(lambV)]*2))
###--------------------------------------
##
######------ horizontal ---------------------
## lv=[0,0.2,0.4,0.5,0.6,0.8,1.]
## lambV= np.transpose(np.array([lv,lv]))
## cs1V= c1*np.transpose(np.array([[0.01]*len(lambV),[0.5]*len(lambV)]))
## bV=c1*np.transpose(np.array([[15]*len(lambV)]*2))
## epsV= np.transpose(np.array([[0.01]*len(lambV)]*2))
## QV= np.transpose( np.array([[4.5]*len(lambV)]*2))
## MV= np.transpose( np.array([[3]*len(lambV)]*2))
#####--------------------------------------
#
###------ horver ---------------------
# lv=[0,0.2,0.4,0.5,0.6,0.7,0.8,1.] # 2 horizontal
# csv=np.array([0, 0.2,0.4,0.6,0.8,1.,1.2,1.5]) # 1 vertical
# lambV= np.transpose(np.array([lv,lv,[0.5]*len(lv)]))
# cs1V= c1*np.transpose(np.array([[0.5]*len(lv),[1.5]*len(lv),csv]))
#
#
# bV=c1*np.transpose(np.array([[20]*len(lambV)]*3))
# epsV= np.transpose(np.array([[0.01]*len(lambV)]*3))
# QV= np.transpose( np.array([[4.5]*len(lambV)]*3))
# MV= np.transpose( np.array([[7]*len(lambV)]*3))
##--------------------------------------
#
##
### bV=np.array([20,20,20,20,20,20]) *c1
### epsV= np.array([0.01, 0.01, 0.01 , 0.01, 0.01,0.01])
### lambV= np.array([0.5, 0.5, 0.5 , 0.5, 0.5,0.5])
### cs1Vl= np.array([0.05, 0.25, 0.05, 0.25, 0.1,0.1])
### cs1V= cs1Vl*c1
### QV= np.array([4.5, 4.5, 7, 7, 4.5,4.5])
### MV= np.array([5, 5, 9, 9, 5, 7])
##
## #lambV= np.array([0.5, 0.5, 0.5 , 0.5, 0.5,0.5])
## #cs1Vl= np.array([0.05, 0.13, 0.135, 0.14, 0.145,0.15])
## #cs1V= cs1Vl*c1
## #QV= np.array([4.5, 4.5, 4.5, 4.5, 4.5,4.5])
## #MV= np.array([5, 5, 5, 5, 5, 5])
##
## #epsV= np.array([0.01, 0.1, 0.3 , 0.01, 0.1,0.3])
## #lambV= np.array([0.5, 0.5, 0.5 , 0.5, 0.5,0.5])
## #cs1Vl= np.array([0.05, 0.05, 0.05, 0.25, 0.25,0.25])
## #cs1V= cs1Vl*c1
## #QV= np.array([4.5, 4.5, 4.5, 4.5, 4.5,4.5])
## #MV= np.array([5, 5, 5, 5, 5, 5])
##
### lambV= np.array([0.55, 0.55, 0.65 , 0.6, 0.65,0.6])
### cs1Vl= np.array([0.1, 0.1, 0.12, 0.12, 0.15,0.25])
### cs1V= cs1Vl*c1
### QV= np.array([4.5, 4.5, 5., 5., 5.,5.])
### MV= np.array([7, 7, 7, 7, 7, 7])
#
## STv=np.array([20, 22, 28, 30, 33, 35, 43, 51, 53, 55, 57, 58, 59, 61, 63])
## STv=np.array(range(0,64))
## STv=np.array([20,28, # SC
## 22,30, # SC mut
## 33,35, # SD
## 41,43, # SD mut
## 48,49,50,51, # SF
## 56,57,58,59, # SF mut
## 52, 53, 54, 55, # MB C
## 60, 61, 62, 63, # MB D
## 0, 4, 8, 12,
## 1, 3, 5, 7, 9, 11, 13, 15,
## 2, 6, 10, 14,
## 17, 19,
## 21, 23, 29, 31,
## 25, 27,
## 36, 44,
## 37, 39, 45, 47,
## 38, 46,
## 16, 18, 24, 26, 32, 34, 40, 45 ]) # not in groups
## STvC=(['xkcd:dark red','xkcd:dark red', # SC
## 'xkcd:salmon','xkcd:salmon', # SC mut
## 'xkcd:dark grey','xkcd:dark grey', # SD
## 'xkcd:grey','xkcd:grey', # SD mut 21 1 0 1 0 1 0 9.24e-01 0.50 0.00
## #'xkcd:sienna','xkcd:sienna', # SD
## #'xkcd:tan','xkcd:tan', # SD mut 21 1 0 1 0 1 0 9.24e-01 0.50 0.00
## #'xkcd:dark green','xkcd:dark green','xkcd:dark green','xkcd:dark green', # SF
## 'xkcd:violet','xkcd:violet','xkcd:violet','xkcd:violet', # SF
## 'xkcd:green','xkcd:green','xkcd:green','xkcd:green', # SF mut
## 'xkcd:medium blue','xkcd:medium blue','xkcd:medium blue','xkcd:medium blue', # MB C
## 'xkcd:dark blue', 'xkcd:dark blue', 'xkcd:dark blue', 'xkcd:dark blue'] # MB D
## + ['xkcd:tan']*(64-24) ) # all the others
##
## STRp=declareSTR(0)
## labup=['']*len(STv)
## for i in range(0,len(STv)):
## sen=str(STRp[STv[i],:])
## labup[i]=sen.replace(". ","").replace("[","").replace(".]","").replace(" ","")
##
## label='BAR_w09_horit_Q4.5_M3_r15' #'BAR_ttt'
## vmax=0.4
## ncol=2
## import matplotlib.pyplot as plt
## f = plt.figure()
## j=0
## for i in range(0,len(cs1V)):
## nrow=len(cs1V)
## npl=ncol*i+j+1
## axs = f.add_subplot(nrow,ncol,npl)
## f.subplots_adjust(wspace=0.0,left=0.05,right=0.95,top=0.93,bottom=0.1)
## if i+1!=len(cs1V):
## labx=[]
## else:
## labx=labup
## plot_BAR(labx,STv,STvC,axs,beta,Z,N,MV[i,j],QV[i,j],lambV[i,j],epsV[i,j],w,c1,cs1V[i,j],bV[i,j],vmax)
## j=1
## for i in range(0,len(cs1V)):
## nrow=len(cs1V)
## npl=ncol*i+j+1
## axs = f.add_subplot(nrow,ncol,npl)
## f.subplots_adjust(wspace=0.0,left=0.05,right=0.95,top=0.93,bottom=0.1)
## if i+1!=len(cs1V):
## labx=[]
## else:
## labx=labup
## plot_BAR(labx,STv,STvC,axs,beta,Z,N,MV[i,j],QV[i,j],lambV[i,j],epsV[i,j],w,c1,cs1V[i,j],bV[i,j],vmax)
## axs.yaxis.set_major_locator(plt.NullLocator())
### j=2
### for i in range(0,len(cs1V)):
### nrow=len(cs1V)
### npl=ncol*i+j+1
### axs = f.add_subplot(nrow,ncol,npl)
### f.subplots_adjust(wspace=0.0,left=0.05,right=0.95,top=0.93,bottom=0.1)
### if i+1!=len(cs1V):
### labx=[]
### else:
### labx=labup
### plot_BAR(labx,STv,STvC,axs,beta,Z,N,MV[i,j],QV[i,j],lambV[i,j],epsV[i,j],w,c1,cs1V[i,j],bV[i,j],vmax)
### axs.yaxis.tick_right()
## f.text(0.5, 0.96, 'Stationary Distribution', rotation=0, va='center', ha='center',size=8)
## #f.text(0.05, 0.5, 'Stationary Distribution', rotation=90, va='center', ha='center',size=8)
## f.savefig(label+'.pdf', dpi=300)
## f.clf()
#
#
##------- only chosen strategies -------------------------------------------------------
# STv=np.array([28,20,29, # SC
# #16,17,18,19,21,22,23,24,25,26,27,30,31, # 10 non SC
# 35, 33, 39, # SD
# #32,34,36,37,38,40,41,42,43,44,45,46,47, # 01 non SD
# 48,49,50,51, # SF-C
# 56,57,58,59, # SF-01
# 52, 53, 54, 55, # SF-10
# 60, 61, 62, 63, # SF-D
# ])
# STvC=(['xkcd:dark red']*3 # SC
# #+['xkcd:salmon']*(16-3) # 10 non SC
# +['xkcd:dark grey']*3 # SC
# #+['xkcd:grey']*(16-3) # 10 non SC
# +['xkcd:violet']*4 # SF-C
# +['xkcd:green']*4 # SF-01
# +['xkcd:medium blue']*4 # SF-10
# +['xkcd:dark blue']*4) # SF-D
#
#
# STRp=declareSTR(0)
# labup=['']*len(STv)
# for i in range(0,len(STv)):
# sen=str(STRp[STv[i],:])
# labup[i]=sen.replace(". ","").replace("[","").replace(".]","").replace(" ","")
#
# label='BAR_w09_horver_Q4.5_M7' #'BAR_ttt'
# vmax=0.5
# ncol=3
# import matplotlib.pyplot as plt
# f = plt.figure()
# j=0
# for i in range(0,len(cs1V)):
# nrow=len(cs1V)
# npl=ncol*i+j+1
# axs = f.add_subplot(nrow,ncol,npl)
# f.subplots_adjust(wspace=0.0,left=0.05,right=0.95,top=0.93,bottom=0.1)
# if i+1!=len(cs1V):
# labx=[]
# else:
# labx=labup
# plot_BAR(labx,STv,STvC,axs,beta,Z,N,MV[i,j],QV[i,j],lambV[i,j],epsV[i,j],w,c1,cs1V[i,j],bV[i,j],vmax)
# j=1
# for i in range(0,len(cs1V)):
# nrow=len(cs1V)
# npl=ncol*i+j+1
# axs = f.add_subplot(nrow,ncol,npl)
# f.subplots_adjust(wspace=0.0,left=0.05,right=0.95,top=0.93,bottom=0.1)
# if i+1!=len(cs1V):
# labx=[]
# else:
# labx=labup
# plot_BAR(labx,STv,STvC,axs,beta,Z,N,MV[i,j],QV[i,j],lambV[i,j],epsV[i,j],w,c1,cs1V[i,j],bV[i,j],vmax)
# axs.yaxis.set_major_locator(plt.NullLocator())
# j=2
# for i in range(0,len(cs1V)):
# nrow=len(cs1V)
# npl=ncol*i+j+1
# axs = f.add_subplot(nrow,ncol,npl)
# f.subplots_adjust(wspace=0.0,left=0.05,right=0.95,top=0.93,bottom=0.1)
# if i+1!=len(cs1V):
# labx=[]
# else:
# labx=labup
# plot_BAR(labx,STv,STvC,axs,beta,Z,N,MV[i,j],QV[i,j],lambV[i,j],epsV[i,j],w,c1,cs1V[i,j],bV[i,j],vmax)
# axs.yaxis.tick_right()
# f.text(0.5, 0.96, 'Stationary Distribution', rotation=0, va='center', ha='center',size=8)
# #f.text(0.05, 0.5, 'Stationary Distribution', rotation=90, va='center', ha='center',size=8)
# f.savefig(label+'.eps', dpi=300)
# f.clf()
##--------------------------------------------------
#
#################################################################################################################
# doINI(N,Z,M,Q,eps)
# M=4; N=5; eps=0.01; STATEmat,nSTATEv=declareSTATE(N);
# Z=100; H=calcH(N,Z)
# STRm=declareSTR(eps)
# nSTR=STRm.shape[0];
# for i in range(0,nSTR):
# for j in range(i+1,nSTR):
# SGi=STRm[i,0:2]; ACTi=STRm[i,2:6]
# SGj=STRm[j,0:2]; ACTj=STRm[j,2:6]
#for k in range(1,N):
# print([i, j, k])
# Q=2.5; BCiA,BCjA=calcBC2st(SGi,ACTi,SGj,ACTj,k,N,M,Q,STATEmat[k,0:nSTATEv[k],0:nSTATEv[k]])
# Q=3.; BCiB,BCjB=calcBC2st(SGi,ACTi,SGj,ACTj,k,N,M,Q,STATEmat[k,0:nSTATEv[k],0:nSTATEv[k]])
# if not (np.all(np.abs(BCiA-BCiB)<0.000001) and np.all(np.abs(BCjA-BCjB)<0.000001)):
# print(BCiA)
# print(BCiB)
# print([i, j])
# Q=2.5; stermA,stermIA=calcFIX1vec(i,j,STRm,N,Z,M,Q,STATEmat,nSTATEv,H)
# Q=3.; stermB,stermIB=calcFIX1vec(i,j,STRm,N,Z,M,Q,STATEmat,nSTATEv,H)
# if not (np.all(np.abs(stermA-stermB)<0.000001) and np.all(np.abs(stermIA-stermIB)<0.000001)):
# print(stermA,stermIA)
# print(stermB,stermIB)
# fixM=doONLYONE()
#beta=1.
#Z=100
#N=5
#M=4
#Q=2.5
#lamb=0.8
#eps=0.01
#H=calcH(N,Z)
#STRm=declareSTR(eps); # nSTR=STRm.shape[0]; #print(STm[0],STm[63])
#STATEmat,nSTATEv=declareSTATE(N); #print(STATEmat); print(nSTATEv)
#fixMvec=calcFIXMvec(N,Z,M,Q,STRm,STATEmat,nSTATEv,H)
# STRmPURE=declareSTR(0); nSTR=STRmPURE.shape[0];
#
## csV=np.linspace(0,1,51)
## lambV=np.linspace(0,1,11)
## #bV=np.array([20.,10.,5.])
## bV=np.array([10.])
## MV=np.array([1,2,3,4,5])
## QV=np.array([1,2,2.5,3,4,5])
## #MV=np.array([1,3,5,7,9])
## #QV=np.array([1,3,4.5,5,7,9])
############# SAVE BIG MATRIX for Q-M plots ###################
# beta=1.
# Z=100
# N=18
# eps=0.01
#
# STRmPURE=declareSTR(0); nSTR=STRmPURE.shape[0];
#
# lambV=np.linspace(0,1,31)
#
# csVo= np.linspace(0,1,31) #np.linspace(0,0.3,51)
# MV= np.array([2,6,10,14,18]) #np.array([1,3,5,7,9]) #np.array([2,6,10,14,18]) #np.array([1,3,5,7,9]) #np.array([5,6,7,8,9]) #np.array([2,6,10,14,18]) #np.array([3,9,15,21,27])
# QV= np.array([2., 4., 9.5,13.5,17.5]) #np.array([1., 2.5, 4.5,6.5,8.5]) #np.array([2., 4., 9.5,13.5,17.5]) #np.array([1., 2.5, 4.5,6.5,8.5]) #np.array([1., 2.5, 4.5,6.5,8.5]) #np.array([4.5,5.5,6.5,7.5,8.5]) #np.array([2.0,6.0,9.0,10.0,14.0,18.0]) #np.array([3.0,9.0,13.5,15.0,21.0,27.0])
#
#
# ##### test ####
## lambV=np.linspace(0,1,31)
## csVo= np.linspace(0,2,31) #np.linspace(0,0.3,51)
## bVo=np.array([20.])
## MV= np.array([5]) #np.array([5,6,7,8,9]) #np.array([2,6,10,14,18]) #np.array([3,9,15,21,27])
## QV= np.array([4.5]) #np.array([4.5,5.5,6.5,7.5,8.5]) #np.array([2.0,6.0,9.0,10.0,14.0,18.0]) #np.array([3.0,9.0,13.5,15.0,21.0,27.0])
## w=0.9
## c1=0.2
## csV=c1* csVo #np.linspace(0,0.3,51)
## bV=c1* bVo
## #bigmatCOOP,bigmatSD=doMATSD(beta,Z,N,nSTR,c1,csV,lambV,bV,MV,QV,w,eps)
## #np.save('file_SD_N9_beta02_b20_w09_NEWtest_4.5_5.npy',bigmatSD)
# ################
#
#
# STRmPURE=declareSTR(0); nSTR=STRmPURE.shape[0];
#
# bVo=np.array([20.])
## w=0.9
## c1=1. #0.5
## csV=c1* csVo #np.linspace(0,0.3,51)
## bV=c1* bVo
## bigmatCOOP,bigmatSD=doMATSD(beta,Z,N,nSTR,c1,csV,lambV,bV,MV,QV,w,eps)
## np.save('file_SD_N9_beta5_b10_w09_NEW_1.npy',bigmatSD)
### np.save('file_COOP_N9_beta5_b10_w1_X.npy',bigmatCOOP)
## w=0.9
## c1=0.5
## csV=c1* csVo #np.linspace(0,0.3,51)
## bV=c1* bVo
## bigmatCOOP,bigmatSD=doMATSD(beta,Z,N,nSTR,c1,csV,lambV,bV,MV,QV,w,eps)
## np.save('file_SD_N9_beta1_b20_w09_NEWtest.npy',bigmatSD)
#
## bVo=np.array([30.])
## w=0.9
## c1=0.5
## csV=c1* csVo #np.linspace(0,0.3,51)
## bV=c1* bVo
## bigmatCOOP,bigmatSD=doMATSD(beta,Z,N,nSTR,c1,csV,lambV,bV,MV,QV,w,eps)
## np.save('file_SD_N18_beta05_b30_w09_NEW.npy',bigmatSD)
##
# w=1.
# c1=1.
# csV=c1* csVo #np.linspace(0,0.3,51)
# bV=c1* bVo
# bigmatCOOP,bigmatSD=doMATSD(beta,Z,N,nSTR,c1,csV,lambV,bV,MV,QV,w,eps)
# np.save('file_SD_N18_beta1_b20_w1_NEW_1.npy',bigmatSD)
#
#
###################################################################
# STs00,STs11,STs10,STs01,STsign, STsignonly, STmem, STmemonly, STsignmem =classST()
# print(STsign)
# print(STsignonly)
# print(STmem)
# print(STmemonly)
# print(STsignmem)
# csV=np.linspace(0,1,51)
# lambV=np.linspace(0,1,11)
# bV=np.array([20.])
# MV=np.array([1,2,3,4,5])
# QV=np.array([1,2,2.5,3,4,5])
################### PLOTS Q-M ############################################################################
## bigmatCOOP=np.load('file_COOP_N9_beta5_b20_w1_X.npy')
## lab='SD_N9_beta05_b20_w09'
## bigmatSD=np.load('file_'+lab+'_NEW.npy')
## PAYhomo,COOPhomo,COOPtot=doHOMO(lamb,eps,N,M,Q,b,c,cs,SD)
#
# lambV=np.linspace(0,1,31)
# csVo= np.linspace(0,1,31) #np.linspace(0,2,31) #np.linspace(0,0.3,51)
# bVo=np.array([10.])
# MV= np.array([1,3,5,7,9]) #np.array([2,6,10,14,18]) #n #np.array([5,6,7,8,9]) #np.array([3,9,15,21,27])
# QV= np.array([1., 2.5, 4.5,6.5,8.5]) #np.array([2., 4., 9.5,13.5,17.5]) # #np.array([1., 2.5, 4.5,6.5,8.5]) #np.array([1., 2.5, 4.5,6.5,8.5]) #np.array([4.5,5.5,6.5,7.5,8.5]) #np.array([2.0,6.0,9.0,10.0,14.0,18.0]) #np.array([3.0,9.0,13.5,15.0,21.0,27.0])
#
# beta=1.
# Z=100
# N=9
# eps=0.01
#
# c1=0.5
# csV= csVo
# bV=c1* bVo
#
# lab='SD_N9_beta05_b10_w09'
# bigmatSD=np.load('file_'+lab+'_NEW_1.npy')
# ext='eps'
## vmin=0.4; vmax=1.
## comaps=['Blues','Purples','Greens','Reds','Greys']
## groups=[gMB,gSF,gSFm,gSC,gSD]
## #ngroups=['Blues','Purples','Greens','Reds','Greys']
## ngroups=[r'[00 00$\ast$$\ast$] + [00 10$\ast$$\ast$]', r'[00 11$\ast$$\ast$]',r'[00 01$\ast$$\ast$]',r'[10 0011] + [10 1011]',r'[01 1100] + [01 1110]']
## plot_SDcslambDIF_agre('SD_agre_'+lab,groups,comaps,ngroups,bigmatSD,csV,lambV,bV,MV,QV,vmin,vmax,ext)
## vmin=0; vmax=1.
## plot_SDcslambDIF_agre('SD_gS00_'+lab,[gS00],['Blues'],0,bigmatSD,csV,lambV,bV,MV,QV,vmin,vmax,ext)
## plot_SDcslambDIF_agre('SD_gS10_'+lab,[gS10],['Reds'],0,bigmatSD,csV,lambV,bV,MV,QV,vmin,vmax,ext)
## plot_SDcslambDIF_agre('SD_gS01_'+lab,[gS01],['Greys'],0,bigmatSD,csV,lambV,bV,MV,QV,vmin,vmax,ext)
## plot_SDcslambDIF_agre('SD_gS11_'+lab,[gS11],['Greys'],0,bigmatSD,csV,lambV,bV,MV,QV,vmin,vmax,ext)
#
## #STRmPURE=declareSTR(0)
## STs00,STs11,STs10,STs01,STsign, STsignonly, STmem, STmemonly, STsignmem=classST()
## #np.set_printoptions(threshold=np.inf)
## #print(STRmPURE[STmemonly])
## vmin=0; vmax=0.5
## plot_SDcslambDIF_agre('REC_'+lab,[STmemonly],['Greys'],0,bigmatSD,csV,lambV,bV,MV,QV,vmin,vmax,ext)
## plot_SDcslambDIF_agre('SIG_'+lab,[STsignonly],['Greys'],0,bigmatSD,csV,lambV,bV,MV,QV,vmin,vmax,ext)
#
## lab='SD_N9_beta1_b10_w09'
## bigmatSD=np.load('file_'+lab+'_NEW_1.npy')
## bigmatSD_abun_09=np.load('file_SD_N9_beta05_b20_w09_bper_abun.npy')
## bigmatSD_abun_1=np.load('file_SD_N9_beta05_b20_w1_bper_abun.npy') ext='eps'
## vmin=0.4; vmax=1.
## comaps=['Blues','Purples','Greens','Reds','Greys']
## groups=[gMB,gSF,gSFm,gSC,gSD]
## #ngroups=['Blues','Purples','Greens','Reds','Greys']
## #ngroups=[r'[00 00$\ast$$\ast$] + [00 10$\ast$$\ast$]', r'[00 11$\ast$$\ast$] + [00 10$\ast$$\ast$]',r'[00 01$\ast$$\ast$]',r'[10 0011] + [10 1011] + [10 0010]',r'[01 1100] + [01 1110] + [01 1000]']
## ngroups=[r'FR-D + FR-10$^{M\geqslant5}$', r'FR-C',r'FR-01',r'SC + SC$_{C}$$^{M\geqslant5}$',r'SD + SD$_{C}$$^{M\geqslant5}$']
##
## plot_SDcslambDIF_agre('SD_agre_'+lab,groups,comaps,ngroups,bigmatSD,csV,lambV,bV,MV,QV,vmin,vmax,ext)
##
## lab='SD_N9_beta1_b10_w1'
## bigmatSD=np.load('file_'+lab+'_NEW_1.npy')
## ext='eps'
## vmin=0.4; vmax=1.
## comaps=['Blues','Purples','Greens','Reds','Greys']
## groups=[gMB,gSF+gMBc,gSFm,gSCt,gSDt]
## #ngroups=['Blues','Purples','Greens','Reds','Greys']
## #ngroups=[r'[00 00$\ast$$\ast$] + [00 10$\ast$$\ast$]', r'[00 11$\ast$$\ast$] + [00 10$\ast$$\ast$]',r'[00 01$\ast$$\ast$]',r'[10 0011] + [10 1011] + [10 0010]',r'[01 1100] + [01 1110] + [01 1000]']
## ngroups=[r'FR-D + FR-10$^{M>5}$', r'FR-C + FR-10$^{M<5}$',r'FR-01',r'SC + SC$_{C}$$^{M>5}$ + SC$_{D}$$^{M<5}$',r'SD + SD$_{C}$$^{M>5}$ + SD$_{D}$$^{M<5}$']
##
## plot_SDcslambDIF_agre('SD_agre_'+lab,groups,comaps,ngroups,bigmatSD,csV,lambV,bV,MV,QV,vmin,vmax,ext)
#
## lab='SD_N9_beta1_b10_w09'
## bigmatSD=np.load('file_'+lab+'_NEW_1.npy')
## ext='eps'
## vmin=0.5; vmax=1.
## comaps=['Blues','Purples','Greens','Reds','Greys','Oranges']
## groups=[gMB,gSF,gSFm,gSC,[22,30],gSD]
## #ngroups=['Blues','Purples','Greens','Reds','Greys']
## #ngroups=[r'[00 00$\ast$$\ast$] + [00 10$\ast$$\ast$]', r'[00 11$\ast$$\ast$] + [00 10$\ast$$\ast$]',r'[00 01$\ast$$\ast$]',r'[10 0011] + [10 1011] + [10 0010]',r'[01 1100] + [01 1110] + [01 1000]']
## ngroups=[r'FR-D + FR-10$^{M\geqslant5}$', r'FR-C',r'FR-01',r'SC + SC$_{C}$$^{M\geqslant5}$',r'[10 $\ast$001]',r'SD + SD$_{C}$$^{M\geqslant5}$']
##
## plot_SDcslambDIF_agre('SD_agre_'+lab,groups,comaps,ngroups,bigmatSD,csV,lambV,bV,MV,QV,vmin,vmax,ext)
#
# lab='SD_N9_beta1_b10_w1'
# bigmatSD=np.load('file_'+lab+'_NEW_1.npy')
# ext='eps'
# vmin=0.5; vmax=1.
# comaps=['Blues','Purples','Greens','Reds','Greys']
# groups=[gMB,gSF,gSFm,gSC+gSD,[22,30, 43,41]]
# #ngroups=['Blues','Purples','Greens','Reds','Greys']
# #ngroups=[r'[00 00$\ast$$\ast$] + [00 10$\ast$$\ast$]', r'[00 11$\ast$$\ast$] + [00 10$\ast$$\ast$]',r'[00 01$\ast$$\ast$]',r'[10 0011] + [10 1011] + [10 0010]',r'[01 1100] + [01 1110] + [01 1000]']
# #ngroups=[r'FR-D + FR-10$^{M>5}$', r'FR-C + FR-10$^{M<5}$',r'FR-01',r'SC + SC$_{C}$$^{M>5}$ + SC$_{D}$$^{M<5}$',r'[10 $\ast$001]',r'SD + SD$_{C}$$^{M>5}$ + SD$_{D}$$^{M<5}$']
# ngroups=[r'FR-D',r'FR-C',r'FR-O',r'SC + SD',r'SC-O + SD-O']
#
# plot_SDcslambDIF_agre('SD_agre_'+lab,groups,comaps,ngroups,bigmatSD,csV,lambV,bV,MV,QV,vmin,vmax,ext)
##
##
## lab='SD_N9_beta1_b10_w09'
## bigmatSD=np.load('file_'+lab+'_NEW_1.npy')
## ext='eps'
## vmin=0.5; vmax=1.
## comaps=['Blues','Purples','Greens','Reds','Greys']
## groups=[gMB,gSF,gSFm,gSC+gSD,[22,30, 43,41]]
## #ngroups=['Blues','Purples','Greens','Reds','Greys']
## #ngroups=[r'[00 00$\ast$$\ast$] + [00 10$\ast$$\ast$]', r'[00 11$\ast$$\ast$] + [00 10$\ast$$\ast$]',r'[00 01$\ast$$\ast$]',r'[10 0011] + [10 1011] + [10 0010]',r'[01 1100] + [01 1110] + [01 1000]']
## #ngroups=[r'FR-D + FR-10$^{M>5}$', r'FR-C + FR-10$^{M<5}$',r'FR-01',r'SC + SC$_{C}$$^{M>5}$ + SC$_{D}$$^{M<5}$',r'[10 $\ast$001]',r'SD + SD$_{C}$$^{M>5}$ + SD$_{D}$$^{M<5}$']
## ngroups=[r'FR-D',r'FR-C',r'FR-O',r'SC + SD',r'SC-O + SD-O']
##
## plot_SDcslambDIF_agre('SD_agre_'+lab,groups,comaps,ngroups,bigmatSD,csV,lambV,bV,MV,QV,vmin,vmax,ext)
##
##
##
##
##
##
## vmin=0.1
## vmax=1.
## comap='Reds'
## plot_SDcslambDIF_1('gSC','gSC',' ',gSC,[],bigmatSD,csV,lambV,bV,MV,QV,vmin,vmax,comap)
## plot_SDcslambDIF_1('gSD','gSD',' ',gSD,[],bigmatSD,csV,lambV,bV,MV,QV,vmin,vmax,comap)
## plot_SDcslambDIF_1('gSF','gSF',' ',gSF,[],bigmatSD,csV,lambV,bV,MV,QV,vmin,vmax,comap)
## plot_SDcslambDIF_1('gSCm','gSCm',' ',gSCm,[],bigmatSD,csV,lambV,bV,MV,QV,vmin,vmax,comap)
## plot_SDcslambDIF_1('gSDm','gSDm',' ',gSDm,[],bigmatSD,csV,lambV,bV,MV,QV,vmin,vmax,comap)
## plot_SDcslambDIF_1('gSFm','gSFm',' ',gSFm,[],bigmatSD,csV,lambV,bV,MV,QV,vmin,vmax,comap)
## plot_SDcslambDIF_1('gMB','gMB',' ',gMBc+gMBd,[],bigmatSD,csV,lambV,bV,MV,QV,vmin,vmax,comap)
## plot_SDcslambDIF_1('ALL-g','All-groups','groups',gALL,gMBc+gMBd+gSC+gSD+gSF+gSCm+gSDm+gSFm,bigmatSD,csV,lambV,bV,MV,QV,vmin,vmax,comap)
## #plot_SDcslambDIF_1('gMBc','gMBc',' ',gMBc,[],bigmatSD,csV,lambV,bV,MV,QV,vmin,vmax,comap)
## #plot_SDcslambDIF_1('gMBd','gMBd',' ',gMBd,[],bigmatSD,csV,lambV,bV,MV,QV,vmin,vmax,comap)
##
## vmin=0
## vmax=1.
## comap='Reds'
## plot_SDcslambDIF_1('gS00','gS00',' ',gS00,[],bigmatSD,csV,lambV,bV,MV,QV,vmin,vmax,comap)
## plot_SDcslambDIF_1('gS11','gS11',' ',gS11,[],bigmatSD,csV,lambV,bV,MV,QV,vmin,vmax,comap)
## plot_SDcslambDIF_1('gS10','gS10',' ',gS10,[],bigmatSD,csV,lambV,bV,MV,QV,vmin,vmax,comap)
## plot_SDcslambDIF_1('gS01','gS01',' ',gS01,[],bigmatSD,csV,lambV,bV,MV,QV,vmin,vmax,comap)
##
##################################################################################################################
################### PLOTS signal action panels ############################################################################
# import matplotlib.pyplot as plt
# import matplotlib as mpl
#
# lambV=np.linspace(0,1,31)
# csVo= np.linspace(0,1,31) #np.linspace(0,0.3,51)
# bVo=np.array([10.])
# M=5
#
# beta=1.
# Z=100
# N=9
# eps=0.01
#
# c1=1.
# csV= csVo
# bV=c1* bVo
#
# bigmatSD=np.load('file_SD_N9_beta1_b10_w1_NEW_1.npy')
# STs00,STs11,STs10,STs01,STsign, STsignonly, STmem, STmemonly, STsignmem=classST()
#
# alp=1.; step=0.02; iM=2; iQ=2
# f,axs=plt.subplots(nrows=2, ncols=3, sharex='none', sharey='all' )
# f.subplots_adjust(hspace=0.7, wspace=0.2)
#
# vmin=-1e-10; vmax=1.
# norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax)
# axs[0,0].contourf(lambV,csVo,np.sum(bigmatSD[gS00,:,:,0,iM,iQ],axis=0),np.arange(vmin,vmax+0.1,step),alpha=alp,vmin=vmin,vmax=vmax, cmap='Reds')
# axs[0,0].set_title(r'[00 $\ast$$\ast$$\ast$$\ast$]', size=8 )
# axs[0,1].contourf(lambV,csVo,np.sum(bigmatSD[gS01,:,:,0,iM,iQ],axis=0),np.arange(vmin,vmax+0.1,step),alpha=alp,vmin=vmin,vmax=vmax, cmap='Reds')
# axs[0,1].set_title(r'[01 $\ast$$\ast$$\ast$$\ast$]', size=8 )
# axs[0,2].contourf(lambV,csVo,np.sum(bigmatSD[gS10,:,:,0,iM,iQ],axis=0),np.arange(vmin,vmax+0.1,step),alpha=alp,vmin=vmin,vmax=vmax, cmap='Reds')
# axs[0,2].set_title(r'[10 $\ast$$\ast$$\ast$$\ast$]', size=8 )
# f.subplots_adjust(left=0.1,right=0.8)
# cbar_ax = f.add_axes([0.85, 0.6, 0.03, 0.28])
# hb=mpl.colorbar.ColorbarBase(cbar_ax, norm=norm,label='Signal',cmap='Reds')
# cbar_ax.tick_params(labelsize=8)
#
# print(STsignonly)
# vmin=-1e-10; vmax=0.5
# norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax)
# axs[1,0].contourf(lambV,csVo,np.sum(bigmatSD[[0,63],:,:,0,iM,iQ],axis=0),np.arange(vmin,vmax+0.1,step),alpha=alp,vmin=vmin,vmax=vmax, cmap='Blues')
# axs[1,0].set_title(r'[$\ast$$\ast$ 0000]+[$\ast$$\ast$ 1111]', size=8)
# axs[1,1].contourf(lambV,csVo,np.sum(bigmatSD[STmemonly,:,:,0,iM,iQ],axis=0),np.arange(vmin,vmax+0.1,step),alpha=alp,vmin=vmin,vmax=vmax, cmap='Blues')
# axs[1,1].set_title(r'[$\ast$$\ast$ 1010]+[$\ast$$\ast$ 0101]', size=8)
# axs[1,2].contourf(lambV,csVo,np.sum(bigmatSD[STsignonly,:,:,0,iM,iQ],axis=0),np.arange(vmin,vmax+0.1,step),alpha=alp,vmin=vmin,vmax=vmax, cmap='Blues')
# axs[1,2].set_title(r'[$\ast$$\ast$ 1100]+[$\ast$$\ast$ 0011]', size=8)
# cbar_ax = f.add_axes([0.85, 0.123, 0.03, 0.28])
# hb=mpl.colorbar.ColorbarBase(cbar_ax, norm=norm,label='Action',cmap='Blues')
# step=0.1; ti=np.arange(0.,vmax+step,step); ti_s=["%.1f" % x for x in ti]; ti_s[-1]='>'+ti_s[-1]
# hb.set_ticks(ti)
# hb.set_ticklabels(ti_s)
# cbar_ax.tick_params(labelsize=8)
#
# for i in range(0,len(axs)):
# axs[i,0].set_ylabel(r'$c_s$', size=12)
# for j in range(0,len(axs[0])):
# axs[i,j].set_xlabel(r'$\lambda$',size=12)
# axs[i,j].set_xticks([0,0.25,0.5,0.75,1]); #axs[iM,iQ].set_yticks([0,0.5,1])
# axs[i,j].set_xticklabels(["0","0.25","0.5","0.75","1"]); #axs[iM,iQ].set_yticklabels(["0","0.5","1"])
# axs[i,j].set_yticks([0,0.25,0.5,0.75,1]);
# axs[i,j].set_yticklabels(["0","0.25","0.5","0.75","1"]);
# axs[i,j].tick_params(axis='both', which='major', labelsize=8)
# axs[i,j].grid(which='both', axis='both',ls='dashed')
#
# f.savefig('signal-action_mechanism_w1_M5_1.eps', dpi=300)
# f.clf()
#
####################################################################################################################
################### PLOTS r-N ############################################################################
#
#
# lambV=np.linspace(0,1,31)
# csVo= np.linspace(0,1,31) #np.linspace(0,0.3,51)
# bVo=np.array([10.])
# M=5
#
# beta=1.
# Z=100
# N=9
# eps=0.01
#
# c1=1.
# csV= csVo
# bV=c1* bVo
#
# bigmatSD_5_9=np.load('file_SD_N9_beta1_b5_w1_NEW_1.npy')
# bigmatSD_10_9=np.load('file_SD_N9_beta1_b10_w1_NEW_1.npy')
# bigmatSD_20_9=np.load('file_SD_N9_beta1_b20_w1_NEW_1.npy')
# bigmatSD_30_9=np.load('file_SD_N9_beta1_b30_w1_NEW_1.npy')
# bigmatSD_5_18=np.load('file_SD_N18_beta1_b5_w1_NEW_1.npy')
# bigmatSD_10_18=np.load('file_SD_N18_beta1_b10_w1_NEW_1.npy')
# bigmatSD_20_18=np.load('file_SD_N18_beta1_b20_w1_NEW_1.npy')
# bigmatSD_30_18=np.load('file_SD_N18_beta1_b30_w1_NEW_1.npy')
# labright=['$r=$5','$r=$10','$r=$20','$r=$30']
# labup=['$N$=9','$N$=18']
# bigmatSDlist=[[bigmatSD_5_9,bigmatSD_10_9,bigmatSD_20_9,bigmatSD_30_9],[bigmatSD_5_18,bigmatSD_10_18,bigmatSD_20_18,bigmatSD_30_18]]
# bigmatSDlist=list(map(list, zip(*bigmatSDlist))) # transposing list
# #iQ=np.zeros((3,2),int); iM=np.zeros((3,2),int)
# iQ=2; iM=2
#
# ext='eps'
# vmin=0.5; vmax=1.
## comaps=['Blues','Purples','Greens','Reds','Greys']
## groups=[gMB,gSF,gSFm,gSC,gSD]
## ngroups=[r'FR-D + FR-10', r'FR-C',r'FR-01',r'SC + SC$_{C}$',r'SD + SD$_{C}$']
# comaps=['Blues','Purples','Greens','Reds','Greys']
# groups=[gMB,gSF,gSFm,gSC+gSD,[22,30, 43,41]]
# ngroups=[r'FR-D',r'FR-C',r'FR-O',r'SC + SD',r'SC-O + SD-O']
#
# plot_SDspace_agre('SD_agre_r-N_w1',groups,comaps,ngroups,bigmatSDlist,csV,lambV,iM,iQ,M,labup,labright,vmin,vmax,ext)
##
## bigmatSD_15_9=np.load('file_SD_N9_beta05_b15_w09_NEW.npy')
## bigmatSD_20_9=np.load('file_SD_N9_beta05_b20_w09_NEW.npy')
## bigmatSD_30_9=np.load('file_SD_N9_beta05_b30_w09_NEW.npy')
## bigmatSD_15_18=np.load('file_SD_N18_beta05_b15_w09_NEW.npy')
## bigmatSD_20_18=np.load('file_SD_N18_beta05_b20_w09_NEW.npy')
## bigmatSD_30_18=np.load('file_SD_N18_beta05_b30_w09_NEW.npy')
## labright=['$r=$15','$r=$20','$r=$30']
## labup=['$N$=9','$N$=18']
## bigmatSDlist=[[bigmatSD_15_9,bigmatSD_20_9,bigmatSD_30_9],[bigmatSD_15_18,bigmatSD_20_18,bigmatSD_30_18]]
## bigmatSDlist=list(map(list, zip(*bigmatSDlist))) # transposing list
## #iQ=np.zeros((3,2),int); iM=np.zeros((3,2),int)
## iQ=2; iM=2
##
## ext='eps'
## vmin=0.4; vmax=1.
## comaps=['Blues','Purples','Greens','Reds','Greys']
## groups=[gMB,gSF,gSFm,gSC,gSD]
## ngroups=[r'FR-D + FR-10', r'FR-C',r'FR-01',r'SC + SC$_{C}$',r'SD + SD$_{C}$']
##
## plot_SDspace_agre('SD_agre_r-N_w09',groups,comaps,ngroups,bigmatSDlist,csV,lambV,iM,iQ,M,labup,labright,vmin,vmax,ext)
#
#################################################################################################################
############ PAYOFSS ####################
# STRp=declareSTR(0)
# for i in range(0,STRp.shape[0]):
# print([i, STRp[i,:]])
#
# beta=1.
# Z=100
# N=9
# eps=0.01
# w=0.9
# c1=0.5
#
# lambV=np.linspace(0,1,31)
# csVo= np.linspace(0,2,31) #np.linspace(0,0.3,51)
# bVo=np.array([20.])
# MV= np.array([1,3,5,7,9]) #np.array([5,6,7,8,9]) #np.array([2,6,10,14,18]) #np.array([3,9,15,21,27])
# QV= np.array([1., 2.5, 4.5,6.5,8.5])
# csV= c1*csVo
# bV=c1* bVo
# bigmatSD=np.load('file_SD_N9_beta05_b20_w09_NEW.npy') # CAREFUL: IT HAS TO BE COHERENT WITH PARAMETERS DEFINED ABOVE
# bigmatPAY=calcBIGPAY(bigmatSD,csV,lambV,MV,QV,bV[0],c1,N,eps,w)
# np.save('file_PAY_N9_beta05_b20_w09_NEW.npy',bigmatPAY)
#
# bigmatPAY=np.load('file_PAY_N9_beta05_b20_w09_NEW.npy')
# #print(np.amin(bigmatPAY)/bV[0])
# vmin=0.
# vmax=1.
# plot_PAYcslamb('PAYavg_w09',bigmatPAY,csVo,lambV,bV,MV,QV,vmin,vmax)
#############################################
########## SAVE BIG MATRIX for Q-M plots - ONLY SIG ###################
# beta=1.
# Z=100
# N=9
# eps=0.01
#
# STRmPURE=declareSTR(0); nSTR=STRmPURE.shape[0];
#
# lambV=np.linspace(0,1,31)
#
# csVo= np.linspace(0,1,31) #np.linspace(0,0.3,51)
# MV= np.array([1,3,5,7,9]) #np.array([2,6,10,14,18]) #np.array([1,3,5,7,9]) #np.array([5,6,7,8,9]) #np.array([2,6,10,14,18]) #np.array([3,9,15,21,27])
# QV= np.array([1., 2.5, 4.5,6.5,8.5]) #np.array([2., 4., 9.5,13.5,17.5]) #np.array([1., 2.5, 4.5,6.5,8.5]) #np.array([1., 2.5, 4.5,6.5,8.5]) #np.array([4.5,5.5,6.5,7.5,8.5]) #np.array([2.0,6.0,9.0,10.0,14.0,18.0]) #np.array([3.0,9.0,13.5,15.0,21.0,27.0])
#
#
# ##### test ####
## lambV=np.linspace(0,1,31)
## csVo= np.linspace(0,2,31) #np.linspace(0,0.3,51)
## bVo=np.array([20.])
## MV= np.array([5]) #np.array([5,6,7,8,9]) #np.array([2,6,10,14,18]) #np.array([3,9,15,21,27])
## QV= np.array([4.5]) #np.array([4.5,5.5,6.5,7.5,8.5]) #np.array([2.0,6.0,9.0,10.0,14.0,18.0]) #np.array([3.0,9.0,13.5,15.0,21.0,27.0])
## w=0.9
## c1=0.2
## csV=c1* csVo #np.linspace(0,0.3,51)
## bV=c1* bVo
## #bigmatCOOP,bigmatSD=doMATSD(beta,Z,N,nSTR,c1,csV,lambV,bV,MV,QV,w,eps)
## #np.save('file_SD_N9_beta02_b20_w09_NEWtest_4.5_5.npy',bigmatSD)
# ################
#
#
# STRmPURE=declareSTR_SIG(0); nSTR=STRmPURE.shape[0];
#
# bVo=np.array([10.])
## w=0.9
## c1=1. #0.5
## csV=c1* csVo #np.linspace(0,0.3,51)
## bV=c1* bVo
## bigmatCOOP,bigmatSD=doMATSD(beta,Z,N,nSTR,c1,csV,lambV,bV,MV,QV,w,eps)
## np.save('file_SD_N9_beta5_b10_w09_NEW_1_SIG.npy',bigmatSD)
#### np.save('file_COOP_N9_beta5_b10_w1_X.npy',bigmatCOOP)
### w=0.9
### c1=0.5
### csV=c1* csVo #np.linspace(0,0.3,51)
### bV=c1* bVo
### bigmatCOOP,bigmatSD=doMATSD(beta,Z,N,nSTR,c1,csV,lambV,bV,MV,QV,w,eps)
### np.save('file_SD_N9_beta1_b20_w09_NEWtest.npy',bigmatSD)
##
### bVo=np.array([30.])
### w=0.9
### c1=0.5
### csV=c1* csVo #np.linspace(0,0.3,51)
### bV=c1* bVo
### bigmatCOOP,bigmatSD=doMATSD(beta,Z,N,nSTR,c1,csV,lambV,bV,MV,QV,w,eps)
### np.save('file_SD_N18_beta05_b30_w09_NEW.npy',bigmatSD)
###
# w=1.
# c1=1.
# csV=c1* csVo #np.linspace(0,0.3,51)
# bV=c1* bVo
# bigmatCOOP,bigmatSD=doMATSD_SIG(beta,Z,N,nSTR,c1,csV,lambV,bV,MV,QV,w,eps)
# np.save('file_SD_N9_beta1_b10_w1_NEW_1_SIG.npy',bigmatSD)
#
###################
################### PLOTS Q-M - ONLY SIG ############################################################################
#
# lambV=np.linspace(0,1,31)
# csVo= np.linspace(0,1,31) #np.linspace(0,2,31) #np.linspace(0,0.3,51)
# bVo=np.array([10.])
# MV= np.array([1,3,5,7,9]) #np.array([2,6,10,14,18]) #n #np.array([5,6,7,8,9]) #np.array([3,9,15,21,27])
# QV= np.array([1., 2.5, 4.5,6.5,8.5]) #np.array([2., 4., 9.5,13.5,17.5]) # #np.array([1., 2.5, 4.5,6.5,8.5]) #np.array([1., 2.5, 4.5,6.5,8.5]) #np.array([4.5,5.5,6.5,7.5,8.5]) #np.array([2.0,6.0,9.0,10.0,14.0,18.0]) #np.array([3.0,9.0,13.5,15.0,21.0,27.0])
#
# beta=1.
# Z=100
# N=9
# eps=0.01
#
# c1=0.5
# csV= csVo
# bV=c1* bVo
#
#
# lab='SD_N9_beta1_b10_w1'
# bigmatSD=np.load('file_'+lab+'_NEW_1_SIG.npy')
# ext='eps'
# vmin=0.5; vmax=1.
# comaps=['Blues','Purples','Reds']
# groups=[[14, 15],[12, 13],[6, 9]]
# ngroups=[r'FR-D',r'FR-C',r'SC + SD']
#
# plot_SDcslambDIF_agre('SD_agre_SIG_'+lab,groups,comaps,ngroups,bigmatSD,csV,lambV,bV,MV,QV,vmin,vmax,ext)
#
#####################################
######################### find drift groups - ONLY SIG ###############################3
# beta=1.
# Z=100
# N=9
# M=5
# Q=4.5
# lamb=0.5 #0.5 #0.8
# eps=0.01
# w=1.
# #H, L
# c1=1. #2.5
# c=np.array([1., 1.]) *1. *c1 #*0.3 *0.8
# cs=np.array([1., 1.]) *0.1 *c1 #*0.06 *c *0.8
# b=np.array([1., 0.]) *10. *c1 #7*c
#
# STRmPUR=declareSTR_SIG(0)
# expb=np.exp(-beta)
# coef=np.array([[b[0]*lamb, -c[0]*lamb, -cs[0]*lamb],[b[1]*(1.-lamb), -c[1]*(1.-lamb), -cs[1]*(1.-lamb)]])
# labelfile='GRIM_N_'+str(N)+'_M_'+str(M)+'_Q_'+str(Q)+'_eps_'+str(eps)+'_w_'+str(w)+'_SIG'
# fixMvec=readfixMvec(labelfile)
# fixM=calcFIXM(coef,expb,Z,fixMvec)
#
# doINI_SIG(N,Z,M,Q,eps,w)
# SD=doREST_SIG(b,c,cs,lamb,beta,N,Z,M,Q,eps,w)
# PAYhomo,COOPhomo,COOPtot=doHOMO_SIG(lamb,eps,N,M,Q,b,c,cs,SD,w)
# SSD=np.concatenate((STRmPUR,np.transpose([PAYhomo]),np.transpose([COOPhomo[:,0]]),SD),axis=1)
# SSDsort=SSD[np.argsort(SSD[..., 8])]
# for i in range(0,len(SSDsort)):
# print('{0:3.0f} {1:5.0f} {2:3.0f} {3:5.0f} {4:3.0f} {5:3.0f} {6:3.0f} {7:12.2e} {8:6.2f} {9:8.2f}'.format(np.argsort(SSD[..., 8])[i],SSDsort[i,0],SSDsort[i,1],SSDsort[i,2],SSDsort[i,3],SSDsort[i,4],SSDsort[i,5],SSDsort[i,6],SSDsort[i,7],SSDsort[i,8])) #print(SSDsort[i,:])
#############
###### SAVE BIG MATRIX for Q-M plots - ONLY REC ###################
# beta=1.
# Z=100
# N=9
# eps=0.01
#
# STRmPURE=declareSTR(0); nSTR=STRmPURE.shape[0];
#
# lambV=np.linspace(0,1,31)
#
# csVo= np.linspace(0,1,31) #np.linspace(0,0.3,51)
# MV= np.array([1,3,5,7,9]) #np.array([2,6,10,14,18]) #np.array([1,3,5,7,9]) #np.array([5,6,7,8,9]) #np.array([2,6,10,14,18]) #np.array([3,9,15,21,27])
# QV= np.array([1.]) #np.array([2., 4., 9.5,13.5,17.5]) #np.array([1., 2.5, 4.5,6.5,8.5]) #np.array([1., 2.5, 4.5,6.5,8.5]) #np.array([4.5,5.5,6.5,7.5,8.5]) #np.array([2.0,6.0,9.0,10.0,14.0,18.0]) #np.array([3.0,9.0,13.5,15.0,21.0,27.0])
#
#
#
# STRmPURE=declareSTR_REC(0); nSTR=STRmPURE.shape[0];
#
# bVo=np.array([10.])
#
# w=1.
# c1=1.
# csV=c1* csVo #np.linspace(0,0.3,51)
# bV=c1* bVo
# bigmatCOOP,bigmatSD=doMATSD_REC(beta,Z,N,nSTR,c1,csV,lambV,bV,MV,QV,w,eps)
# np.save('file_SD_N9_beta1_b10_w1_NEW_1_REC.npy',bigmatSD)
#
###################
################### PLOTS Q-M - ONLY REC ############################################################################
#
# lambV=np.linspace(0,1,31)
# csVo= np.linspace(0,1,31) #np.linspace(0,2,31) #np.linspace(0,0.3,51)
# bVo=np.array([10.])
# MV= np.array([1,3,5,7,9]) #np.array([2,6,10,14,18]) #n #np.array([5,6,7,8,9]) #np.array([3,9,15,21,27])
# QV= np.array([1.]) #np.array([2., 4., 9.5,13.5,17.5]) # #np.array([1., 2.5, 4.5,6.5,8.5]) #np.array([1., 2.5, 4.5,6.5,8.5]) #np.array([4.5,5.5,6.5,7.5,8.5]) #np.array([2.0,6.0,9.0,10.0,14.0,18.0]) #np.array([3.0,9.0,13.5,15.0,21.0,27.0])
#
# beta=1.
# Z=100
# N=9
# eps=0.01
#
# c1=0.5
# csV= csVo
# bV=c1* bVo
#
#
# lab='SD_N9_beta1_b10_w1'
# bigmatSD=np.load('file_'+lab+'_NEW_1_REC.npy')
# ext='eps'
# vmin=0.5; vmax=1.
# comaps=['Blues','Purples','Greens','Reds']
# groups=[[0],[3],[2], [1]]
# ngroups=[r'D',r'C',r'aTFT (O)', r'TFT']
#
# plot_SDcslambDIF_agre('SD_agre_REC_'+lab,groups,comaps,ngroups,bigmatSD,csV,lambV,bV,MV,QV,vmin,vmax,ext)
#
###########
######################### find drift groups - ONLY REC ###############################3
# beta=1.
# Z=100
# N=9
# M=7
# Q=4.5
# lamb=1. #0.5 #0.8
# eps=0.01
# w=1.
# #H, L
# c1=1. #2.5
# c=np.array([1., 1.]) *1. *c1 #*0.3 *0.8
# cs=np.array([1., 1.]) *0.1 *c1 #*0.06 *c *0.8
# b=np.array([1., 0.]) *10. *c1 #7*c
#
# STRmPUR=declareSTR_REC(0)
# expb=np.exp(-beta)
# coef=np.array([[b[0]*lamb, -c[0]*lamb, -cs[0]*lamb],[b[1]*(1.-lamb), -c[1]*(1.-lamb), -cs[1]*(1.-lamb)]])
# labelfile='GRIM_N_'+str(N)+'_M_'+str(M)+'_Q_'+str(Q)+'_eps_'+str(eps)+'_w_'+str(w)+'_REC'
# fixMvec=readfixMvec(labelfile)
# fixM=calcFIXM(coef,expb,Z,fixMvec)
#
# doINI_REC(N,Z,M,Q,eps,w)
# SD=doREST_REC(b,c,cs,lamb,beta,N,Z,M,Q,eps,w)
# PAYhomo,COOPhomo,COOPtot=doHOMO_REC(lamb,eps,N,M,Q,b,c,cs,SD,w)
# SSD=np.concatenate((STRmPUR,np.transpose([PAYhomo]),np.transpose([COOPhomo[:,0]]),SD),axis=1)
# SSDsort=SSD[np.argsort(SSD[..., 8])]
# for i in range(0,len(SSDsort)):
# print('{0:3.0f} {1:5.0f} {2:3.0f} {3:5.0f} {4:3.0f} {5:3.0f} {6:3.0f} {7:12.2e} {8:6.2f} {9:8.2f}'.format(np.argsort(SSD[..., 8])[i],SSDsort[i,0],SSDsort[i,1],SSDsort[i,2],SSDsort[i,3],SSDsort[i,4],SSDsort[i,5],SSDsort[i,6],SSDsort[i,7],SSDsort[i,8])) #print(SSDsort[i,:])
#############
#
######## SAVE BIG MATRIX for Q-M plots - ONLY C+D ###################
## beta=1.
## Z=100
## N=9
## eps=0.01
##
##
## lambV=np.linspace(0,1,31)
##
## csVo= np.linspace(0,1,1)
## MV= np.array([1,3,5,7,9]) #np.array([2,6,10,14,18]) #np.array([1,3,5,7,9]) #np.array([5,6,7,8,9]) #np.array([2,6,10,14,18]) #np.array([3,9,15,21,27])
## QV= np.array([1.]) #np.array([2., 4., 9.5,13.5,17.5]) #np.array([1., 2.5, 4.5,6.5,8.5]) #np.array([1., 2.5, 4.5,6.5,8.5]) #np.array([4.5,5.5,6.5,7.5,8.5]) #np.array([2.0,6.0,9.0,10.0,14.0,18.0]) #np.array([3.0,9.0,13.5,15.0,21.0,27.0])
##
##
##
## STRmPURE=declareSTR_CD(0); nSTR=STRmPURE.shape[0];
##
## bVo=np.array([10.])
##
## w=1.
## c1=1.
## csV=c1* csVo #np.linspace(0,0.3,51)
## bV=c1* bVo
## bigmatCOOP,bigmatSD=doMATSD_CD(beta,Z,N,nSTR,c1,csV,lambV,bV,MV,QV,w,eps)
## np.save('file_SD_N9_beta1_b10_w1_NEW_1_C+D.npy',bigmatSD)
##
##########
#
#
################ Plot SIG only and REC, C+D
# lambV=np.linspace(0,1,31)
# csVo= np.linspace(0,1,31) #np.linspace(0,2,31) #np.linspace(0,0.3,51)
# bVo=np.array([10.])
# MV= np.array([1,3,5,7,9]) #np.array([2,6,10,14,18]) #n #np.array([5,6,7,8,9]) #np.array([3,9,15,21,27])
# QV= np.array([1.,2.5, 4.5,6.5,8.5]) #np.array([2., 4., 9.5,13.5,17.5]) # #np.array([1., 2.5, 4.5,6.5,8.5]) #np.array([1., 2.5, 4.5,6.5,8.5]) #np.array([4.5,5.5,6.5,7.5,8.5]) #np.array([2.0,6.0,9.0,10.0,14.0,18.0]) #np.array([3.0,9.0,13.5,15.0,21.0,27.0])
#
# beta=1.
# Z=100
# N=9
# eps=0.01
#
# c1=0.5
# csV= csVo
# bV=c1* bVo
#
#
# label='SD_N9_beta1_b10_w1'
# bigmatSD=np.load('file_'+label+'_NEW_1_SIG.npy')
# ext='svg'
# vmin=0.5; vmax=1.
# comapsV=['Blues','Purples','Reds']
# groups=[[14, 15],[12, 13],[6, 9]]
# nameg=[r'FR-D',r'FR-C',r'SC + SD']
#
# bigmatSDREC=np.load('file_'+label+'_NEW_1_REC.npy')
# bigmatSDCD=np.load('file_'+label+'_NEW_1_C+D.npy')
#
# import matplotlib.pyplot as plt
# import matplotlib as mpl
# alp=1.
# lAGR=list(bigmatSD.shape); del lAGR[0]; lAGR.insert(0,len(groups)); bigmatAGR=np.empty(lAGR)
# for i in range(0,len(groups)):
# bigmatAGR[i,:]=np.sum(bigmatSD[groups[i],...],axis=0)
# nr=bigmatAGR.shape[4]; nc=bigmatAGR.shape[5]
## f=plt.figure(1,figsize=(20,20))
# f,axs=plt.subplots(nrows=nr, ncols=nc, sharex='all', sharey='all' )
# f.subplots_adjust(hspace=0.2, wspace=0.2)
#
# norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax)
# comaps=comapsV
# for i in range(len(groups)):
# comaps[i]=plt.get_cmap(comapsV[i])
# comaps[i]= truncate_colormap(comaps[i], 0.25, 1)
# for iM in range(nr-1,-1,-1):
# axs[iM,nc-1].text(1.1,0.48,"$M=%s$" % str(MV[iM]), size=9 ,va='center')
# for iQ in range(nc-1,0,-1):
# step=0.02
# if MV[iM]>5: # to avoid problems with [0010**], which is two places for w=1
# rg=range(len(groups)-1,-1,-1)
# else:
# rg=range(0,len(groups))
# for i in rg:
# h=axs[iM,iQ].contourf(lambV,csV,bigmatAGR[i,:,:,0,iM,iQ],np.arange(vmin,vmax+0.1,step),alpha=alp,vmin=vmin,vmax=vmax, cmap=comaps[i])
# axs[iM,iQ].set_xticks([0,0.5,1]); axs[iM,iQ].set_yticks([0,0.5,1])
# axs[iM,iQ].set_xticklabels(["0","0.5","1"]); axs[iM,iQ].set_yticklabels(["0","0.5","1"])
# #axs[iM,iQ].set_yticks([0,0.5,1.,1.5]);
# #axs[iM,iQ].set_yticklabels(["0","0.1","0.2","0.3"]);
# #axs[iM,iQ].set_yticks([0,0.5,1.,1.5]);
# #axs[iM,iQ].set_yticklabels(["0","0.1","0.2","0.3"]);
# axs[iM,iQ].tick_params(axis='both', which='major', labelsize=8)
# axs[iM,iQ].grid(which='both', axis='both',ls='dashed')
# axs[iM,iQ].set_xlim([0, 1]); axs[iM,iQ].set_ylim([0, 1])
# if iM==0:
# axs[iM,iQ].set_title("$Q=%s$" % str(QV[iQ]), size=9 )
#
# margbottom=0.15; margtop=0.87
# f.text(0.0, 0.5, '$c_s$', va='center', rotation='vertical',size=12)
# if nameg==0:
# margleft=0.1; margright=0.75;
# f.subplots_adjust(right=margright,top=margtop,bottom=margbottom, left=margleft)
# cbar_ax = f.add_axes([margright+0.1, margbottom, 1.-margleft-margright-0.12, margtop-margbottom])
# hb=mpl.colorbar.ColorbarBase(cbar_ax, norm=norm,label='Probability',cmap=comaps[-1])
# else:
# margleft=0.09; margright=0.66;
# f.subplots_adjust(right=margright,top=margtop,bottom=margbottom, left=margleft)
# for i in range(0,len(groups)):
#
# mr=0.06; hh=(margtop-0.45)/len(groups); hib=hh-0.11; botb=margtop-hh*(i+1)+0.109-0.027*i;
#
# #botb=(margtop-margbottom)/2.+(i-np.floor(len(groups)/2.))*0.2 ; hib=0.03
# cbar_ax = f.add_axes([margright+0.11, botb, 1.-margleft-margright-0.06, hib])
# hb=mpl.colorbar.ColorbarBase(cbar_ax, norm=norm,cmap=comaps[i],orientation='horizontal')
# step=0.25; ti=np.arange(vmin,vmax+step,step); ti_s=["%.2f" % x for x in ti]; # ti_s[0]='<'+ti_s[0]
# hb.set_ticks(ti)
# hb.set_ticklabels(ti_s)
# cbar_ax.tick_params(labelsize=8)
# cbar_ax.set_title(nameg[i],size=8,color=mpl.cm.get_cmap(comaps[i])(1.))
#
# f.text((margright-margleft)/2+margleft, 0.04, '$\lambda$', ha='center',size=12)
#
# iQ=0
# for iM in range(nr-1,-1,-1):
# axs[iM,iQ].plot(lambV,bigmatSDCD[1,0,:,0,iM,iQ],linewidth=0.8,color='Black',label='C (B)')
# axs[iM,iQ].plot(lambV,bigmatSDREC[0,0,:,0,iM,iQ],color='Blue',label='D')
# axs[iM,iQ].plot(lambV,bigmatSDREC[3,0,:,0,iM,iQ],color='Purple',label='C')
# axs[iM,iQ].plot(lambV,bigmatSDREC[2,0,:,0,iM,iQ],'--',color='Green',label='O')
# axs[iM,iQ].plot(lambV,bigmatSDREC[1,0,:,0,iM,iQ],'--',color='Orange',label='F')
# axs[iM,iQ].set_xticks([0,0.5,1]); axs[iM,iQ].set_yticks([0,0.5,1])
# axs[iM,iQ].set_xticklabels(["0","0.5","1"]); axs[iM,iQ].set_yticklabels(["0","0.5","1"])
# #axs[iM,iQ].set_yticks([0,0.5,1.,1.5]);
# #axs[iM,iQ].set_yticklabels(["0","0.1","0.2","0.3"]);
# #axs[iM,iQ].set_yticks([0,0.5,1.,1.5]);
# #axs[iM,iQ].set_yticklabels(["0","0.1","0.2","0.3"]);
# axs[iM,iQ].tick_params(axis='both', which='major', labelsize=8)
# axs[iM,iQ].grid(which='both', axis='both',ls='dashed')
# axs[iM,iQ].set_xlim([0, 1]); axs[iM,iQ].set_ylim([0, 1])
# axs[0,0].set_title("B & R", size=8 )
# axs[0,0].legend(loc=1,bbox_to_anchor=(8.5,-3), shadow=False, fontsize=8)
#
#
# #hb.set_ticks(np.linspace(vmin,vmax,11))
## plt.show()
# #f.text(0.874, 0.95, labup, va='center', ha='center',color='darkred',size=10)
# #f.text(0.874, 0.08, labdown, va='center', ha='center',color='darkblue',size=10)
#
# #for i in range(0,len(ext)):
# f.savefig(label+'_SIG_REC.'+'svg', dpi=600)
# f.clf()
#
#############
############ Plot SIG and SIG+REC
# lambV=np.linspace(0,1,31)
# csVo= np.linspace(0,1,31) #np.linspace(0,2,31) #np.linspace(0,0.3,51)
# bVo=np.array([10.])
#
# beta=1.
# Z=100
# N=9
# eps=0.01
#
# c1=0.5
# csV= csVo
# bV=c1* bVo
#
# ext='eps'
# vmin=0.5; vmax=1.
#
# import matplotlib.pyplot as plt
# import matplotlib as mpl
# alp=1.
# step=0.02
# iM=2; iQ=2
#
# f,axs=plt.subplots(nrows=2, ncols=2, sharex='all')#, sharey='all' )
# f.subplots_adjust(hspace=0.8, wspace=0.45)
#
#
#
# label='SD_N9_beta1_b10_w1'
# bigmatSD=np.load('file_'+label+'_NEW_1_SIG.npy')
# comapsV=['Blues','Purples','Reds']
# groups=[[14, 15],[12, 13],[6, 9]]
# nameg=[r'FR-D',r'FR-C',r'SC + SD']
#
# lAGR=list(bigmatSD.shape); del lAGR[0]; lAGR.insert(0,len(groups)); bigmatAGR=np.empty(lAGR)
# for i in range(0,len(groups)):
# bigmatAGR[i,:]=np.sum(bigmatSD[groups[i],...],axis=0)
# nr=bigmatAGR.shape[4]; nc=bigmatAGR.shape[5]
#
# norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax)
# comaps=comapsV
# for i in range(len(groups)):
# comaps[i]=plt.get_cmap(comapsV[i])
# comaps[i]= truncate_colormap(comaps[i], 0.25, 1)
#
# rg=range(0,len(groups))
# for i in rg:
# h=axs[1,0].contourf(lambV,csV,bigmatAGR[i,:,:,0,iM,iQ],np.arange(vmin,vmax+0.1,step),alpha=alp,vmin=vmin,vmax=vmax, cmap=comaps[i])
# axs[1,0].set_xticks([0,0.5,1]); axs[1,0].set_yticks([0,0.5,1])
# axs[1,0].set_xticklabels(["0","0.5","1"]); axs[1,0].set_yticklabels(["0","0.5","1"])
# axs[1,0].tick_params(axis='both', which='major', labelsize=8)
# axs[1,0].grid(which='both', axis='both',ls='dashed')
# axs[1,0].set_xlim([0, 1]); axs[1,0].set_ylim([0, 1])
# axs[1,0].set_xlabel('$\lambda$',size=18)
# axs[1,0].set_ylabel('$c_s$', rotation='vertical',size=18)
# axs[1,0].set_title('S',size=18,pad=10)
#
# label='SD_N9_beta1_b10_w1'
# bigmatSD=np.load('file_'+label+'_NEW_1.npy')
# comapsV=['Blues','Purples','Greens','Reds','Greys']
# groups=[gMB,gSF,gSFm,gSC+gSD,[22,30, 43,41]]
# nameg=[r'FR-D',r'FR-C',r'SC + SD']
#
# lAGR=list(bigmatSD.shape); del lAGR[0]; lAGR.insert(0,len(groups)); bigmatAGR=np.empty(lAGR)
# for i in range(0,len(groups)):
# bigmatAGR[i,:]=np.sum(bigmatSD[groups[i],...],axis=0)
# nr=bigmatAGR.shape[4]; nc=bigmatAGR.shape[5]
#
# norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax)
# comaps=comapsV
# for i in range(len(groups)):
# comaps[i]=plt.get_cmap(comapsV[i])
# comaps[i]= truncate_colormap(comaps[i], 0.25, 1)
#
# rg=range(0,len(groups))
# for i in rg:
# h=axs[1,1].contourf(lambV,csV,bigmatAGR[i,:,:,0,iM,iQ],np.arange(vmin,vmax+0.1,step),alpha=alp,vmin=vmin,vmax=vmax, cmap=comaps[i])
# axs[1,1].set_xticks([0,0.5,1]); axs[1,1].set_yticks([0,0.5,1])
# axs[1,1].set_xticklabels(["0","0.5","1"]); axs[1,1].set_yticklabels(["0","0.5","1"])
# axs[1,1].tick_params(axis='both', which='major', labelsize=8)
# axs[1,1].grid(which='both', axis='both',ls='dashed')
# axs[1,1].set_xlim([0, 1]); axs[1,1].set_ylim([0, 1])
# axs[1,1].set_xlabel('$\lambda$',size=18)
# axs[1,1].set_ylabel('$c_s$', rotation='vertical',size=18)
# axs[1,1].set_title('S + R',size=18,pad=10)
#
# axs[1,0].tick_params(labelsize=14)
# axs[1,1].tick_params(labelsize=14)
# f.subplots_adjust(top=1.5, bottom=0.15, left=0.13, right=0.95)
#
# f.savefig(label+'_SIG_vs_REC_1panel.'+ext, bbox_inches=mpl.transforms.Bbox([[0,0], [6, 3]]), dpi=600)
# f.clf()
#
########
| [
"psiquefnx@yahoo.es"
] | psiquefnx@yahoo.es |
ae83c609dc422c29cbce9d62bb41510d5452457c | 67baa6d2d6db9dc4c1208223f4cc5e72554acc49 | /backend/apps/profiles/api/serializers.py | 260e4c67286292ce5d351092e0a60c17a0d1452f | [] | no_license | emanulz/iFact3 | 7d306cb4840c89e28be9f993316f3bb2cfe3bf36 | a5d3383e028295affb7a0fb2f5ced4b5c96c0e0e | refs/heads/master | 2021-05-09T15:17:26.380561 | 2018-04-18T04:06:45 | 2018-04-18T04:06:45 | 119,082,502 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 803 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from rest_framework import serializers
from ..models import Profile
from django.contrib.auth.models import User, Permission
class ProfileSerializer(serializers.ModelSerializer):
class Meta:
model = Profile
fields = ('id', 'user', 'avatar', 'birth_date', 'id_num', 'pin')
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id', 'username', 'first_name', 'last_name', 'email', 'groups', 'user_permissions', 'is_staff',
'is_active', 'is_superuser', 'last_login', 'date_joined')
class PermissionSerializer(serializers.ModelSerializer):
class Meta:
model = Permission
fields = ('id', 'name', 'content_type', 'codename')
| [
"emanuzuniga@gmail.com"
] | emanuzuniga@gmail.com |
7dc9d98ac0c20aba2f104d226f51b7223a263ef2 | a931da87e41aac7ceb1b388b7384a16c17b8099b | /part02-e02_file_listing/src/file_listing.py | fbaf0111d5c78bcf149f33e0be773af3a8ae5e56 | [] | no_license | Dolj0/Data-Analysis-with-Python-2021 | 8d8b2cfe3a7faca5c86dc1e1e8f21bae6f5419dd | 3a911055f59493dcd7e62013bc62e6189ef11490 | refs/heads/main | 2023-06-02T12:41:08.541182 | 2021-06-28T09:35:25 | 2021-06-28T09:35:25 | 380,969,285 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 487 | py | #!/usr/bin/env python3
import re
def file_listing(filename="src/listing.txt"):
l =[]
regExpression = r'(\d+)\s+(\w+)\s+(\d+)\s+(\d+):(\d+)\s(.+)'
with open(filename, 'r') as f:
for line in f:
size, month, day, hour, minute, filename = re.search(regExpression,line).groups()
l.append((int(size),month,int(day),int(hour),int(minute),filename))
return l
def main():
pass
if __name__ == "__main__":
main()
| [
"74183085+Dolj0@users.noreply.github.com"
] | 74183085+Dolj0@users.noreply.github.com |
5237cad8961254c8b400da29a2b4de2d22e27c70 | 585d27de175381f291623a5a7c41a86ef54a8f75 | /day_16.py | 7352e4344412bb7ea64bbdda39b4dbc68d0d4a80 | [] | no_license | anvt/advent-of-code-2019 | 6e6dd7574693ac59072aa4227cea27fa9fe5b6e4 | 4396034588c6a6cd5aae468640143ab143e0c61e | refs/heads/master | 2021-01-04T02:09:39.161661 | 2019-12-26T09:49:08 | 2019-12-26T09:49:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,010 | py | from itertools import cycle, accumulate
def read(file):
with open(file, "r") as f:
return f.readline()
def get_pattern(n):
def inner():
for i in cycle([0, 1, 0, -1]):
for _ in range(n):
yield i
iterator = inner()
next(iterator)
yield from iterator
def apply_pattern(signal, n):
result = [x * y for x, y in zip(signal, get_pattern(n))]
return abs(sum(result)) % 10
def phase(signal):
return [apply_pattern(signal, i) for i in range(1, len(signal) + 1)]
def part_one():
signal = list(map(int, read("inputs/day_16.txt")))
for _ in range(100):
signal = phase(signal)
return int("".join(map(str, signal[:8])))
def part_two():
signal = read("inputs/day_16.txt") * 10000
offset = int(signal[:7])
signal = [int(i) for i in signal][offset:][::-1]
for _ in range(100):
signal = list(accumulate(signal, lambda a, b: (a + b) % 10))
return int("".join(map(str, signal[::-1][:8])))
| [
"kerwin.connolly@thehutgroup.com"
] | kerwin.connolly@thehutgroup.com |
954e1b053a8d302f64f191297523388708ecdaf0 | 3170fe0fe45571867c4cec61b9e1115f255c7dab | /S4/decimo_quinto_programa.py | 04e5090aa8ab4f045131920f1095ed68bb207635 | [] | no_license | marcelopontes1/Estudos-Python-GUPPE | de3bcb99c99fd3a10e22171857aaeaadac9f3dfe | c7c2a51bd703f436a4f210943fe041dbd50152f8 | refs/heads/master | 2023-08-16T14:25:00.070383 | 2021-10-07T08:36:10 | 2021-10-07T08:36:10 | 389,924,504 | 0 | 0 | null | 2021-09-10T10:33:11 | 2021-07-27T09:31:39 | Python | UTF-8 | Python | false | false | 220 | py | angulo_radianos = input('Digite aqui o ângulo em radianos: ')
angulo_radianos_real = float(angulo_radianos)
angulo_graus = (angulo_radianos_real * 180)/3.14
print(f'O valor do ângulo em graus é de {angulo_graus}')
| [
"marcelopontes.tele@gmail.com"
] | marcelopontes.tele@gmail.com |
d1bc37e3df64589881bf810be6fcb173012c508a | f8478f77d3bc81b8f7e7c0fa1a477f9c27005be1 | /1. Algorithmic ToolBox/week2_algorithmic_warmup/5_fibonacci_number_again/fibonacci_huge.py | 09d32a1825cafa5c62d4db1150e8d4c7e65b4135 | [] | no_license | princeofpython/UCSD-DSA-specialization | 258f2ba9ee26631d90895bd0bbc2d5e8472b21ab | ae0a4fef2fa1c1b4758973161c3b8cd34b9eb1cf | refs/heads/master | 2020-06-13T05:05:19.699080 | 2020-04-06T16:36:10 | 2020-04-06T16:36:10 | 194,545,239 | 5 | 5 | null | null | null | null | UTF-8 | Python | false | false | 864 | py | # Uses python3
import sys
def get_fibonacci_huge_naive(n, m):
if n <= 1:
return n
previous = 0
current = 1
for _ in range(n - 1):
previous, current = current, previous + current
return current % m
def get_fibonacci_mod_m(n,m):
if n <= 1:
return n
previous = 0
current = 1
for _ in range(n - 1):
previous, current = current, (previous + current)%m
return current
def pisano_period(m):
previous=0
current=1
for i in range(m*m):
previous, current = current, (previous + current)%m
if previous==0 and current==1:
return (i+1)
def get_fibonacci_huge_fast(n,m):
return get_fibonacci_mod_m(n%(pisano_period(m)),m)
if __name__ == '__main__':
input = sys.stdin.read();
n, m = map(int, input.split())
print(get_fibonacci_huge_fast(n, m))
| [
"47978078+princeofpython@users.noreply.github.com"
] | 47978078+princeofpython@users.noreply.github.com |
487a05afa21a97e164c32d773edcfbfc3a9c5770 | b761a9b2e92832d18127e3e23c728afe8c9be2c4 | /simple-talos/config.py | c6a92251b3d5816732bd7c9c13de82e86dcacc0c | [] | no_license | jhopkinsmoz/buildbot-configs | 2b88567df7a17f117cc639d5a84ca96ca34fc2de | 7755fa3b2efa2b9bd5e1055e58b51dd19d563756 | refs/heads/master | 2021-01-20T23:27:03.844637 | 2012-07-05T18:52:15 | 2012-07-05T18:52:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 850 | py | platforms = {
'leopard-o': {
'slaves': ['leopard-o1'],
'env': 'mac-perf'
},
'leopard': {
'slaves': ['leopard1'],
'env': 'mac-perf'
},
'snowleopard': {
'slaves': ['snowleopard1'],
'env': 'mac-perf'
},
'snowleopard-r4': {
'slaves': ['snowleopard-r4-1'],
'env': 'mac-perf'
},
'xp': {
'slaves': ['xp1'],
'env': 'win32-perf'
},
'win7': {
'slaves': ['win7-1'],
'env': 'win32-perf'
},
'w764': {
'slaves': ['w764-1'],
'env': 'win64-perf'
},
'fedora': {
'slaves': ['fed1'],
'env': 'linux-perf'
},
'fedora64': {
'slaves': ['fed64-1'],
'env': 'linux-perf'
},
}
buildBranch = 'MozillaTest'
talosBranch = 'MozillaTest'
branchName = 'MozillaTest'
| [
"bhearsum@mozilla.com"
] | bhearsum@mozilla.com |
f241451248b2068a4e530d4141bb6ad1821d7639 | 6b7c9f1909093c5be55572f1fa4bec60679c3443 | /optimization_Algorithm.py | 29d9af24d26a33fff9df20f0777d56a41f75b664 | [
"Apache-2.0"
] | permissive | blitzpaal/Lightweight_Construction_Seminar | f54ee63c7645930cbd1a6975f277118a34ed0be9 | f533623e9a3efe21f91333d1bf13f57efc3c5474 | refs/heads/main | 2023-03-02T17:57:04.269763 | 2021-02-08T16:13:56 | 2021-02-08T16:13:56 | 328,729,480 | 1 | 0 | null | 2021-02-08T16:13:57 | 2021-01-11T16:45:40 | Python | UTF-8 | Python | false | false | 1,817 | py | from scipy.optimize import NonlinearConstraint, Bounds, differential_evolution, shgo, dual_annealing, minimize
import numpy as np
from Shaft_dimensioning import calculate_shaft_strength, compose_stack
from CLT_calculation import calc_Q_0, CLT_ABD
# Material data
t_ply = 0.125 # ply thickness in mm
E_11 = 126000 # Longitudinal tensile modulus in MPa
E_22 = 9000 # Transverse tensile modulus in MPa
G_12 = 4600 # Shear modulus in MPa
v_12 = 0.3 # Poisson’s ratio 1
# Stiffness matrix of UD-Layer
Q_0 = calc_Q_0(E_11, E_22, G_12, v_12)
def balanced(stack_angle):
if symetric == True:
stack_angle = np.concatenate((stack_angle, np.flip(stack_angle)))
stack = compose_stack(stack_angle, t_ply)
stack = compose_stack(stack_angle, t_ply)
ABD = CLT_ABD(stack, Q_0)
return ABD[0:2,2]
# Specify limits using a `Bounds` object
bounds = Bounds([-90., -90., -90., -90., -90.], [90., 90., 90., 90., 90.])
# Constraints for balanced laminate (two options)
balanced_laminate = NonlinearConstraint(balanced, 0.0, 0.0)
balanced = False
# Constraint for symetric laminate
symetric = True
"""
# Global optimization
glob_result = differential_evolution(calculate_shaft_strength, bounds=bounds, args=(balanced, symetric))
#glob_result = differential_evolution(calculate_shaft_strength, bounds=bounds, args=(balanced, symetric), constraints=(balanced_laminate))
print(glob_result.x, glob_result.fun)
"""
# Local optimization
x0 = np.array([81.98268888, -78.81994856, 47.28577036, 39.81491606, -9.35838018])
#loc_result = minimize(calculate_shaft_strength, x0, tol=1e-6, bounds=bounds, args=(balanced, symetric))
loc_result = minimize(calculate_shaft_strength, x0, tol=1e-6, bounds=bounds, args=(balanced, symetric), constraints=(balanced_laminate))
print(loc_result.x, loc_result.fun)
| [
"39123245+blitzpaal@users.noreply.github.com"
] | 39123245+blitzpaal@users.noreply.github.com |
81bf3c105d1a1393058d90b3633bcebdd5ae4fbf | 9b1446b26e81a79c303f9799fb6a91785c7adb03 | /.history/Code/histogram_20200120113537.py | 4f52929b9fac6bf129f57f7e695e94974d77475a | [] | no_license | SamirIngley/CS1.2-Tweet-Gen | 017ea15b1113881a156ff24682828bc654eb6c81 | bcd95fa63e05849cbf8e36230d8e31032b99daaa | refs/heads/master | 2020-12-14T20:19:57.733290 | 2020-08-04T23:19:23 | 2020-08-04T23:19:23 | 234,856,234 | 0 | 0 | null | 2020-06-05T21:13:04 | 2020-01-19T07:05:55 | Python | UTF-8 | Python | false | false | 696 | py |
def list_histo(source):
''' Takes text. Stores each item in text compares each item to the rest of the words in
text and keeps a running total. Used list account for no repeats.
'''
histo = []
used = []
text = source.split()
print(text)
for word in text:
counter = 0
if word in used:
continue
used.append(word)
for word2 in text:
if word == word2:
counter += 1
instance = [word, counter]
histo.append(instance)
print(histo)
return histo
if __name__ == '__main__':
source = 'one fish two fish red fish blue fish'
list_histo(source)
| [
"samir.ingle7@gmail.com"
] | samir.ingle7@gmail.com |
53ea762cb4cce7b48be34a7a9898a99384f751ed | 236c603ca9fb2eb008eb92c1cd37fc796d1fa50b | /ap/houses/admin.py | 86a752b9fea0fceb9e8350e7a5932f6313154adc | [] | no_license | lifedispenser/djattendance | 7c3366c9cd34b44c86931f584668fca591fad79e | e765d0f31c8cde8a4795d8037684dd8faa7d5145 | refs/heads/master | 2020-04-06T07:11:03.816217 | 2014-07-07T21:43:17 | 2014-07-07T21:43:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 636 | py | from django import forms
from django.contrib import admin
from houses.models import House, Room, Bunk
from aputils.models import Address
class HouseAdminForm(forms.ModelForm):
address = forms.ModelChoiceField(queryset=Address.objects.order_by('address1'))
class Meta:
model = House
class HouseAdmin(admin.ModelAdmin):
form = HouseAdminForm
list_display = (
'name',
'address',
'gender',
'used',
)
ordering = ('used', 'gender', 'name', 'address',)
search_fields = ['name']
admin.site.register(House, HouseAdmin)
admin.site.register(Room)
admin.site.register(Bunk) | [
"fermat200pg@gmail.com"
] | fermat200pg@gmail.com |
4d9b2f72046bbd70aa3f3e6a4be43d3cb9c3e5cb | cdb9b079d0d21ed70e8a2d49d5f98be5c40aaca0 | /Signature_Creator.py | dbe4ca0415dabcc66d34d06b3e8239b6e12f5aa6 | [] | no_license | devu-62442/AASC-Android-Application-Signature-Creation-Through-Graphs | 8df5e3bfcbbe16a7248f361c56402446bf99756d | 5a29b0da7e5070ca5934fd91b0106641f33f00dd | refs/heads/master | 2022-05-13T18:06:41.521095 | 2022-03-07T12:22:09 | 2022-03-07T12:22:09 | 216,142,171 | 2 | 0 | null | 2022-03-07T12:24:05 | 2019-10-19T03:15:50 | Python | UTF-8 | Python | false | false | 3,931 | py | #Android Application Graph Signature
# © Created By - Devyani Vij
#Header Files
import optparse
import networkx as nx
import re
import matplotlib.pyplot as plt
import pylab
import warnings
import os
import glob
import fnmatch
import pyfiglet
import warnings
warnings.filterwarnings("ignore")
#Android Application Graph Signature ASCII Banner
ascii_banner = pyfiglet.figlet_format("Android \t Application \t Graph \t Signature",width=1000)
print(ascii_banner)
#Reading the Callgraphs created using androguard tool
G2 = nx.read_gml('callgraph.gml',label='label')
#List containing the names of all the sensitive API without their methods.
sensitive_api=['TelephonyManager','SmsManager','LocationManager','AudioManager','HttpURLConnection','ConnectivityManager','BroadcastReceiver','Cipher','AccessibleObject','PackageManager']
sensitive_api_malware=[]
count_api_in_malware = 0
#Using Regex to fetch all the sensitive API calls from the call graphs and counting the TOTAL number of sensitive API in Application
for j in sensitive_api:
for i in G2.nodes():
data = re.split('[;]',i)
data1 = re.split('/',data[0])
for k in data1:
if k in sensitive_api:
if i in sensitive_api_malware:
continue
else:
sensitive_api_malware.append(i)
count_api_in_malware=count_api_in_malware+1
print('\033[93m'+"Total Sensitive API Calls found in the MALWARE: "+str(count_api_in_malware))
#Reading the graph of the Application
G = nx.read_gml('callgraph.gml',label='id')
data=[]
b=nx.get_node_attributes(G,'label')
for keys,values in b.items():
splitting = re.split('[[]',values)
if splitting[0] in sensitive_api_malware:
data.append(keys)
#Getting the CALLER and CALLEE relationship between the Sensitive API's fetched above.
listing=[]
U = nx.DiGraph()
counter_in_degree=0
for i in data:
a=G.in_edges(i)
for j in a:
b=list(j)
for k in b:
if k in data:
if G.nodes[k]['label'] not in listing:
listing.append(G.nodes[k]['label'])
counter_in_degree=counter_in_degree+G.in_degree(i)
else:
continue
else:
continue
#Sorting the API names in ascending order to construct a DiGraph showing a relation between caller and callee.
sensitive_api_in_malware_name=[]
for el in sorted(listing):
sensitive_api_in_malware_name.append(el)
#Printing the Sensitive API calls found and Creating the DiGraph for sensitive API calls with the API's calling them
print("\n\nSensitive API calls are:")
for i in range(0,len(sensitive_api_in_malware_name)):
for j in data:
if G.nodes[j]['label']==sensitive_api_in_malware_name[i]:
print('\033[96m'+sensitive_api_in_malware_name[i])
a=G.in_edges(j)
H=nx.DiGraph(a)
break
else:
continue
U = nx.disjoint_union(U, H)
b=[]
print("\n\n\n\033[93mCaller - Callee Relationship:")
for i in range(0,len(sensitive_api_in_malware_name)):
for j in data:
if G.nodes[j]['label']==sensitive_api_in_malware_name[i]:
print('\n\033[93m'+'CALLEE -'+'\n\033[96m'+sensitive_api_in_malware_name[i])
a=G.in_edges(j)
if(len(a)==0):
continue
else:
print('\033[93m'+'CALLER -')
for j in a:
b=list(j)
for k in b:
if k not in data:
print('\033[96m'+G.nodes[k]['label'])
else:
continue
nx.write_gml(U, "Signature.gml")
#Plotting the Graph
nx.draw(U,arrows=True,with_labels=True,edge_color = 'b')
plt.show()
print("\n")
| [
"noreply@github.com"
] | noreply@github.com |
3b97d98cc204f758f37eb76c8521f749d6b32beb | c5ab3e122bc014427ddf7198ee6648b5660a26d5 | /install/lib/python2.7/dist-packages/opencv_apps/cfg/SmoothingConfig.py | f8d04bf5fdcd665792d194783be4c97d97042580 | [] | no_license | edwardwterry/lfm_project | 3c631747135d3a347bbe4b08e2e5f96df93e4e2a | 2b208cda6de47e66b0c42e627d40a0bb08a84dda | refs/heads/master | 2020-04-06T07:57:54.977826 | 2018-11-24T00:49:22 | 2018-11-24T00:49:22 | 157,290,676 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,471 | py | ## *********************************************************
##
## File autogenerated for the opencv_apps package
## by the dynamic_reconfigure package.
## Please do not edit.
##
## ********************************************************/
from dynamic_reconfigure.encoding import extract_params
inf = float('inf')
config_description = {'upper': 'DEFAULT', 'lower': 'groups', 'srcline': 245, 'name': 'Default', 'parent': 0, 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'cstate': 'true', 'parentname': 'Default', 'class': 'DEFAULT', 'field': 'default', 'state': True, 'parentclass': '', 'groups': [], 'parameters': [{'srcline': 290, 'description': 'Indicates that the camera_info topic should be subscribed to to get the default input_frame_id. Otherwise the frame from the image message will be used.', 'max': True, 'cconsttype': 'const bool', 'ctype': 'bool', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'use_camera_info', 'edit_method': '', 'default': False, 'level': 0, 'min': False, 'type': 'bool'}, {'srcline': 290, 'description': 'Smoothing Filter Methods', 'max': 3, 'cconsttype': 'const int', 'ctype': 'int', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'filter_type', 'edit_method': "{'enum_description': 'An enum for Smoothing Filter Mehtods', 'enum': [{'srcline': 10, 'description': 'Homogeneous blur', 'srcfile': '/home/et/Documents/lfm_ws/src/opencv_apps/cfg/Smoothing.cfg', 'cconsttype': 'const int', 'value': 0, 'ctype': 'int', 'type': 'int', 'name': 'Homogeneous_Blur'}, {'srcline': 11, 'description': 'Gaussian blur', 'srcfile': '/home/et/Documents/lfm_ws/src/opencv_apps/cfg/Smoothing.cfg', 'cconsttype': 'const int', 'value': 1, 'ctype': 'int', 'type': 'int', 'name': 'Gaussian_Blur'}, {'srcline': 12, 'description': 'Median blur', 'srcfile': '/home/et/Documents/lfm_ws/src/opencv_apps/cfg/Smoothing.cfg', 'cconsttype': 'const int', 'value': 2, 'ctype': 'int', 'type': 'int', 'name': 'Median_Blur'}, {'srcline': 13, 'description': 'Bilateral Filter', 'srcfile': '/home/et/Documents/lfm_ws/src/opencv_apps/cfg/Smoothing.cfg', 'cconsttype': 'const int', 'value': 3, 'ctype': 'int', 'type': 'int', 'name': 'Bilateral_Filter'}]}", 'default': 1, 'level': 0, 'min': 0, 'type': 'int'}, {'srcline': 290, 'description': 'Size of the kernel (only one because we use a square window). Must be odd.', 'max': 31, 'cconsttype': 'const int', 'ctype': 'int', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'kernel_size', 'edit_method': '', 'default': 7, 'level': 0, 'min': 1, 'type': 'int'}], 'type': '', 'id': 0}
min = {}
max = {}
defaults = {}
level = {}
type = {}
all_level = 0
#def extract_params(config):
# params = []
# params.extend(config['parameters'])
# for group in config['groups']:
# params.extend(extract_params(group))
# return params
for param in extract_params(config_description):
min[param['name']] = param['min']
max[param['name']] = param['max']
defaults[param['name']] = param['default']
level[param['name']] = param['level']
type[param['name']] = param['type']
all_level = all_level | param['level']
Smoothing_Homogeneous_Blur = 0
Smoothing_Gaussian_Blur = 1
Smoothing_Median_Blur = 2
Smoothing_Bilateral_Filter = 3
| [
"edward.william.terry@gmail.com"
] | edward.william.terry@gmail.com |
ccaea6d0f8d0575b21950f01102024d99e163720 | aec4ec0d25dc087087ee468cc066a46cc027314c | /[Black Watch 入群题]PWN.py | 8a5b8d006cab8186106bb9c88597cef0abea55c0 | [] | no_license | ShawRo0t/buuctf_pwn | 0305cad3d43998b695b19401cc9aaa5520c14f6b | 6019a384d8e8dda6080c7cff7101883a29ace012 | refs/heads/main | 2023-08-10T22:01:48.417090 | 2021-09-18T07:53:39 | 2021-09-18T07:53:39 | 405,001,504 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 909 | py | from pwn import *
elf = ELF('./spwn')
local = 0
if local == 1:
io = process('./spwn')
#gdb.attach(io,'b * 0x08048511')
libc = ELF('/lib/i386-linux-gnu/libc.so.6')
else:
io = remote('node4.buuoj.cn',26203)
bss = 0x804a300
leave_addr = 0x08048511
write_plt = elf.plt['write']
write_got = elf.got['write']
main_addr = elf.symbols['main']
io.recvuntil("name?")
shellcode = p32(0xdeadbeef)+p32(write_plt)+p32(main_addr)+p32(1)+p32(write_got)+p32(4)
io.sendline(shellcode)
io.recvuntil("say?")
payload = 'a'*0x18+p32(bss)+p32(leave_addr)
io.send(payload)
write_addr = u32(io.recv(4))
print(hex(write_addr))
libcbase = write_addr - 0x0d43c0
system_addr = libcbase + 0x3a940
binsh = libcbase + 0x15902b
io.recvuntil("name?")
io.sendline(p32(0xdeadbeef)+p32(system_addr)+p32(0)+p32(binsh)+p32(0))
io.recvuntil("say?")
payload = 'a'*0x18+p32(bss)+p32(leave_addr)
io.send(payload)
io.interactive()
| [
"noreply@github.com"
] | noreply@github.com |
2b8777bfaeb28b13636578e33ff279714e3a5800 | 32542f04e8f20d1dde7577b57fe5967ea5850a0c | /app/src/handlers/Managment.py | 4382d6a50af523125b1ca2de6c4fe66bb1f3ad77 | [] | no_license | miner34006/recruiter_bot | 15a3422f1a95825ac96f76fd04a1f58df1cd449e | aa7f2c08a8378f58d656e771393a7a8cc15590c8 | refs/heads/master | 2022-02-25T22:43:30.349239 | 2019-10-05T05:52:09 | 2019-10-05T05:52:09 | 207,090,965 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24,013 | py | # -*- coding: utf-8 -*-
from functools import partial
from datetime import datetime, timedelta
import logging
from telegram import ReplyKeyboardMarkup, ReplyKeyboardRemove
from telegram.ext import CommandHandler, CallbackQueryHandler, \
ConversationHandler, MessageHandler, Filters
from app import db_session
from app.models import Channel, Inviter, ChannelInviter, Referral
from app.src.bot_constants import *
from app.src.utils import *
logger = logging.getLogger()
def payment_required(finish_conversation=False):
def decorator(function):
def wrapper(bot, update, *args, **kwargs):
_, channel_id, channel_name = update.callback_query.data.split(':')
channel = Channel.query.get(channel_id)
if need_payment(channel):
keyboard = get_need_payment_keyboard(
channel_id, channel_name)
send_response(bot, update,
Messages.NEED_PAYMENT,
keyboard)
if finish_conversation:
return ConversationHandler.END
return None
return function(bot, update, *args, **kwargs)
return wrapper
return decorator
class Managment(object):
@staticmethod
def add_handlers(dispatcher):
"""Adding managment handlers to dispatcher
:param dispatcher: dispatcher object
:type dispatcher: telegram.dispatcher
"""
# Publishing post into the channel
dispatcher.add_handler(ConversationHandler(
entry_points=[
CallbackQueryHandler(
Managment.create_post,
pattern='{0}:.+'.format(Actions.CREATE_POST),
pass_user_data=True)],
states={
States.GET_POST_DATA: [
MessageHandler(
(Filters.text & ~ Filters.command),
Managment.receive_post_text,
pass_user_data=True),
MessageHandler(
Filters.photo,
Managment.receive_post_photo,
pass_user_data=True)
],
},
fallbacks=[CommandHandler(
Commands.CANCEL,
Managment.cancel_post,
pass_user_data=True)]
), group=0)
# Changing referral message
dispatcher.add_handler(ConversationHandler(
entry_points=[
CallbackQueryHandler(
Managment.create_message,
pattern='{0}:.+'.format(Actions.CREATE_MESSAGE),
pass_user_data=True)],
states={
States.GET_MESSAGE: [
MessageHandler(
(Filters.text & ~ Filters.command),
Managment.receive_message,
pass_user_data=True)],
},
fallbacks=[CommandHandler(
Commands.CANCEL,
Managment.cancel_message,
pass_user_data=True)]
), group=1)
dispatcher.add_handler(
CommandHandler(
Commands.MANAGMENT,
Managment.list_managment))
dispatcher.add_handler(
CallbackQueryHandler(
Managment.dummy_function,
pattern=Actions.DUMMY))
dispatcher.add_handler(
CallbackQueryHandler(
Managment.list_managment,
pattern=Actions.MANAGMENT_LIST))
dispatcher.add_handler(
CallbackQueryHandler(
Managment.channel_managment,
pattern='{0}:.+'.format(Commands.MANAGMENT)))
dispatcher.add_handler(
CallbackQueryHandler(
Managment.managment_help,
pattern='{0}:.+'.format(Actions.MANAGEMENT_HELP)))
dispatcher.add_handler(
CallbackQueryHandler(
partial(Managment.change_referral_state, is_running=True),
pattern='{0}:.+'.format(Actions.START_REFERRAL)))
dispatcher.add_handler(
CallbackQueryHandler(
partial(Managment.change_referral_state, is_running=False),
pattern='{0}:.+'.format(Actions.STOP_REFERRAL)))
@staticmethod
def list_managment(bot, update):
""" Send or edit last message with channels list for managment purpose
:param bot: bot
:type bot: telegram.Bot
:param update: update event
:type update: relegram.Update
"""
user_id = update.effective_user.id
channels = db_session.query(Channel).filter_by(admin_id=user_id)
if not db_session.query(channels.exists()).scalar():
logger.info('User <{0}> has no channels for managment'
.format(user_id))
return send_response(bot, update, Messages.NO_REFERRAL_CHANNELS)
buttons = [
Buttons.get_button(Commands.MANAGMENT,
label=channel.username,
channel_id=channel.channel_id,
channel_name=channel.username)
for channel in channels
]
keyboard = create_inline_keyboard(buttons, width=3)
return send_response(bot,
update,
Messages.SELECT_CHANNEL_TO_MANAGE,
keyboard)
@staticmethod
@payment_required()
@admin_required
def channel_managment(bot, update):
""" Show user all available managment actions with current channel settings:
1. Start referral program;
2. Stop referral program;
4. Setting the referral message;
5. Publish post in channel;
:param bot: bot
:type bot: telegram.Bot
:param update: update event
:type update: relegram.Update
"""
_, channel_id, channel_name = update.callback_query.data.split(':')
channel = Channel.query.get(channel_id)
keyboard = get_managment_keyboard(channel)
text = get_managment_statistics(channel)
return send_response(bot, update, text, keyboard)
@staticmethod
def managment_help(bot, update):
""" Show user managment options help information (description)
:param bot: bot
:type bot: telegram.Bot
:param update: update event
:type update: relegram.Update
"""
_, channel_id, channel_name = update.callback_query.data.split(':')
channel = Channel.query.get(channel_id)
if need_payment(channel):
keyboard = get_need_payment_keyboard(channel_id,
channel_name)
else:
keyboard = get_managment_keyboard(channel)
return send_response(bot, update, Messages.MANAGMENT_HELP, keyboard)
@staticmethod
@payment_required()
@admin_required
def change_referral_state(bot, update, is_running):
""" Stopping referral program by stop managment button
:param bot: bot
:type bot: telegram.Bot
:param update: update event
:type update: relegram.Update
"""
_, channel_id, channel_name = update.callback_query.data.split(':')
channel = Channel.query.get(channel_id)
channel.is_running = is_running
db_session.add(channel)
db_session.commit()
return Managment.channel_managment(bot, update)
@staticmethod
@payment_required(finish_conversation=True)
@admin_required
def create_message(bot, update, user_data):
""" Handler to start message creation procedure with user
:param bot: bot
:type bot: telegram.Bot
:param update: update event
:type update: relegram.Update
:param user_data: user data from conversation
:type user_data: dict
"""
_, channel_id, channel_name = update.callback_query.data.split(':')
channel = Channel.query.get(channel_id)
update.callback_query.answer()
text = Messages.MESSAGE_ADD.format(channel_name)
keyboard = [[ButtonsLabels.PREREVIEW],
[ButtonsLabels.CANCEL, ButtonsLabels.SAVE]]
reply_markup = ReplyKeyboardMarkup(keyboard, resize_keyboard=True)
bot.send_message(chat_id=update.effective_chat.id,
parse_mode=ParseMode.HTML,
text=text,
reply_markup=reply_markup)
user_data['message'] = channel.message
user_data['channel'] = channel
return States.GET_MESSAGE
@staticmethod
def save_message(bot, update, user_data):
""" Save "hi message" according to user input
:param bot: bot
:type bot: telegram.Bot
:param update: update event
:type update: relegram.Update
:param user_data: user data from conversation
:type user_data: dict
"""
channel = user_data['channel']
channel.message = user_data['message']
db_session.add(channel)
db_session.commit()
send_response(bot, update, Messages.SAVE_MESSAGE,
ReplyKeyboardRemove())
keyboard = get_managment_keyboard(channel)
text = get_managment_statistics(channel)
send_response(bot, update, text, keyboard)
return ConversationHandler.END
@staticmethod
def cancel_message(bot, update, user_data):
""" Cancel message creation anf go to managment screen
:param bot: bot
:type bot: telegram.Bot
:param update: update event
:type update: relegram.Update
:param user_data: user data from conversation
:type user_data: dict
"""
bot.send_message(chat_id=update.effective_chat.id,
parse_mode=ParseMode.HTML,
text=Messages.CANCEL_MESSAGE,
reply_markup=ReplyKeyboardRemove())
channel = user_data['channel']
keyboard = get_managment_keyboard(channel)
text = get_managment_statistics(channel)
bot.send_message(chat_id=update.effective_chat.id,
parse_mode=ParseMode.HTML,
text=text,
reply_markup=keyboard)
return ConversationHandler.END
@staticmethod
def preview_message(bot, update, user_data):
""" Preview "hi message" according to previous user input
:param bot: bot
:type bot: telegram.Bot
:param update: update event
:type update: relegram.Update
:param user_data: user data from conversation
:type user_data: dict
"""
new_message = user_data['message']
keyboard = InlineKeyboardMarkup([
[InlineKeyboardButton(ButtonsLabels.SHOW_LINK,
callback_data=Actions.DUMMY)]
])
bot.send_message(chat_id=update.effective_chat.id,
parse_mode=ParseMode.HTML,
text=new_message + Messages.INLINE_GUIDE,
reply_markup=keyboard)
return States.GET_MESSAGE
@staticmethod
def receive_message(bot, update, user_data):
""" Function handles message creation dialog with user
:param bot: bot
:type bot: telegram.Bot
:param update: update event
:type update: relegram.Update
:param user_data: user data from conversation
:type user_data: dict
"""
user_message = update.effective_message.text
if user_message == ButtonsLabels.PREREVIEW:
return Managment.preview_message(bot, update, user_data)
if user_message == ButtonsLabels.CANCEL:
return Managment.cancel_message(bot, update, user_data)
if user_message == ButtonsLabels.SAVE:
return Managment.save_message(bot, update, user_data)
if len(update.effective_message.text) > MAXIMUM_INLINE_LENGTH:
bot.send_message(chat_id=update.effective_chat.id,
parse_mode=ParseMode.HTML,
text=Messages.MORE_THAN_MAXIMUM_LENGTH)
return States.GET_MESSAGE
user_data['message'] = update.effective_message.text
bot.send_message(chat_id=update.effective_chat.id,
parse_mode=ParseMode.HTML,
text=Messages.RECEIVED)
return States.GET_MESSAGE
@staticmethod
def dummy_function(bot, update):
""" Dummy function (do nothing)
:param bot: bot
:type bot: telegram.Bot
:param update: update event
:type update: relegram.Update
"""
update.callback_query.answer()
@staticmethod
@payment_required(finish_conversation=True)
@admin_required
def create_post(bot, update, user_data):
""" Creating post message for user
:param bot: bot
:type bot: telegram.Bot
:param update: update event
:type update: relegram.Update
:param user_data: user data from conversation
:type user_data: dict
"""
_, channel_id, channel_name = update.callback_query.data.split(':')
channel = Channel.query.get(channel_id)
update.callback_query.answer()
reply_markup = ReplyKeyboardMarkup(
[[ButtonsLabels.PREREVIEW], [ButtonsLabels.CANCEL,
ButtonsLabels.PUBLISH]],
resize_keyboard=True)
bot.send_message(chat_id=update.effective_chat.id,
parse_mode=ParseMode.HTML,
text=Messages.POST_CREATION.format(channel_name),
reply_markup=reply_markup)
user_data.clear()
user_data['channel'] = channel
return States.GET_POST_DATA
@staticmethod
def preview_post(bot, update, user_data):
""" Send post preview to user
:param bot: bot
:type bot: telegram.Bot
:param update: update event
:type update: relegram.Update
:param user_data: user data from conversation
:type user_data: dict
"""
post_text = user_data.get('text')
post_image = user_data.get('image')
if not post_text and not post_image:
bot.send_message(chat_id=update.effective_chat.id,
parse_mode=ParseMode.HTML,
text=Messages.NOTHING_TO_PREVIEW)
return States.GET_POST_DATA
text, reply_markup = get_post(post_text, user_data['channel'].name, post_image)
bot.send_message(chat_id=update.effective_chat.id,
parse_mode=ParseMode.HTML,
text=text,
reply_markup=reply_markup,
disable_web_page_preview=False)
return States.GET_POST_DATA
@staticmethod
def publish_post(bot, update, user_data):
""" Publishing post in channel and close the conversation
:param bot: bot
:type bot: telegram.Bot
:param update: update event
:type update: relegram.Update
:param user_data: user data from conversation
:type user_data: dict
"""
post_text = user_data.get('text')
post_image = user_data.get('image')
if not post_text and not post_image:
bot.send_message(chat_id=update.effective_chat.id,
parse_mode=ParseMode.HTML,
text=Messages.NOTHING_TO_PUBLISH)
return States.GET_POST_DATA
# Publish information post in channel
channel = user_data['channel']
callback_data = '{0}:{1}:{2}'\
.format(Actions.JOIN_PROGRAM,
channel.channel_id,
channel.username)
text, reply_markup = get_post(post_text, channel.name, post_image, callback_data)
bot.send_message(chat_id=channel.channel_id,
parse_mode=ParseMode.HTML,
text=text,
reply_markup=reply_markup,
disable_web_page_preview=False)
# Notify user that information post was published
send_response(bot, update, Messages.PUBLISH_POST,
ReplyKeyboardRemove())
# Send user managment message with channel actions
keyboard = get_managment_keyboard(channel)
text = get_managment_statistics(channel)
send_response(bot, update, text, keyboard)
return ConversationHandler.END
@staticmethod
def receive_post_text(bot, update, user_data):
""" Handler for text messages in post creation dialog
:param bot: bot
:type bot: telegram.Bot
:param update: update event
:type update: relegram.Update
:param user_data: user data from conversation
:type user_data: dict
"""
user_message = update.effective_message.text
if user_message == ButtonsLabels.PREREVIEW:
return Managment.preview_post(bot, update, user_data)
if user_message == ButtonsLabels.CANCEL:
return Managment.cancel_post(bot, update, user_data)
if user_message == ButtonsLabels.PUBLISH:
return Managment.publish_post(bot, update, user_data)
user_data['text'] = user_message
bot.send_message(chat_id=update.effective_chat.id,
parse_mode=ParseMode.HTML,
text=Messages.RECEIVED)
return States.GET_POST_DATA
@staticmethod
def receive_post_photo(bot, update, user_data):
""" Handler when post image received
:param bot: bot
:type bot: telegram.Bot
:param update: update event
:type update: relegram.Update
:param user_data: user data from conversation
:type user_data: dict
"""
channel = user_data['channel']
file_id = update.message.photo[-1].file_id
file_url = bot.get_file(file_id).file_path
image_url = download_image(file_url, file_id, 'post')
if image_url:
user_data['image'] = image_url
text=Messages.RECEIVED
else:
text=Messages.POST_IMAGE_ERROR
bot.send_message(chat_id=update.effective_chat.id,
parse_mode=ParseMode.HTML,
text=text)
return States.GET_POST_DATA
@staticmethod
def cancel_post(bot, update, user_data):
""" Cancel message creation anf go to managment screen
:param bot: bot
:type bot: telegram.Bot
:param update: update event
:type update: relegram.Update
:param user_data: user data from conversation
:type user_data: dict
"""
bot.send_message(chat_id=update.effective_chat.id,
parse_mode=ParseMode.HTML,
text=Messages.CANCEL_POST_CREATION,
reply_markup=ReplyKeyboardRemove())
channel = user_data['channel']
keyboard = get_managment_keyboard(channel)
text = get_managment_statistics(channel)
bot.send_message(chat_id=update.effective_chat.id,
parse_mode=ParseMode.HTML,
text=text,
reply_markup=keyboard)
return ConversationHandler.END
# !UTILS FUNCTIONS
def get_managment_statistics(channel):
""" Create statistics meesage for current channel managment message
:param channel: chanel object
:type channel: models.Channel
:return: message to send to user
:rtype: str
"""
payment_status = '' if channel.has_infinite_subsribe \
else '▶️ Оплачено до <b>{0}</b>\n\n'.format(
channel.due_date.strftime("%d-%m-%Y %H:%M"))
header = Messages.HEADER.format(channel.username)
status = 'ЗАПУЩЕНА ✅' if channel.is_running else 'ОСТАНОВЛЕНА ⛔️'
return '{0}' \
'{1}' \
'▶️ Реферальная программа <b>{2}</b>\n\n' \
'️🔽 <b>Сообщение ваших рекрутеров</b> 🔽\n{3}' \
.format(header, payment_status, status, channel.message)
def need_payment(channel):
""" Check channel need payment
:param channel: channel to check
:type channel: Channel
:return: channel needs payment
:rtype: bool
"""
if channel.has_infinite_subsribe:
return False
return datetime.now() > channel.due_date
def get_post(message, channel_name, img_url, inline_button_callback=None):
""" Get post data for publishing
:param message: message to publish
:type message: basestring
:param img_url: url to publish
:type img_url: basestring
:param inline_button_callback: callback for button, defaults to None
:type inline_button_callback: basestring, optional
:return: text with keyboard
:rtype: tuple
"""
callback_data = inline_button_callback or Actions.DUMMY
keyboard = InlineKeyboardMarkup([
[InlineKeyboardButton(ButtonsLabels.JOIN_PROGRAM,
callback_data=callback_data)]
])
if not message:
message = Messages.POST_GUIDE.format(channel_name)
else:
message = message + '\n\n' + Messages.POST_GUIDE.format(channel_name)
text = '{0}<a href="{1}">‍</a>'.format(message, img_url)
return text, keyboard
def get_managment_keyboard(channel):
""" Building main managment keyboard
:param channel: channel object
:type channel: models.Channel
:return: InlineKeyboardMarkup
:rtype: telegram.InlineKeyboardMarkup
"""
channel_id = channel.channel_id
channel_name = channel.username
buttons = []
if not channel.has_infinite_subsribe:
buttons.append([Buttons.get_button(
Actions.START_PAYMENT, ButtonsLabels.START_PAYMENT,
channel_id, channel_name)])
buttons.append([
Buttons.get_button(
Actions.START_REFERRAL,
ButtonsLabels.START_REFERRAL,
channel_id, channel_name),
Buttons.get_button(
Actions.STOP_REFERRAL,
ButtonsLabels.STOP_REFERRAL,
channel_id, channel_name)])
buttons.append([
Buttons.get_button(
Actions.CREATE_MESSAGE,
ButtonsLabels.CREATE_MESSAGE,
channel_id, channel_name),
Buttons.get_button(
Actions.CREATE_POST,
ButtonsLabels.CREATE_POST,
channel_id, channel_name)])
buttons.append([
Buttons.BACK(Actions.MANAGMENT_LIST),
Buttons.get_button(
Actions.MANAGEMENT_HELP,
ButtonsLabels.HELP,
channel_id, channel_name)])
return InlineKeyboardMarkup(buttons)
def get_need_payment_keyboard(channel_id, channel_name):
""" Return keyboard for payment poorpose
:param channel_id: channel id
:type channel_id: basestring
:param channel_name: channel name
:type channel_name: basestring
:return: InlineKeyboardMarkup
:rtype: telegram.InlineKeyboardMarkup
"""
return InlineKeyboardMarkup([
[Buttons.get_button(
Actions.START_PAYMENT, ButtonsLabels.START_PAYMENT,
channel_id, channel_name)],
[Buttons.BACK(
Actions.MANAGMENT_LIST),
Buttons.get_button(
Actions.MANAGEMENT_HELP,
ButtonsLabels.HELP,
channel_id, channel_name)]
])
| [
"miner34006@gmail.com"
] | miner34006@gmail.com |
ab449a29ef494fce19d11effb4c553b6278f4b3c | c72a5db9d6059b62ca258655f719ff5f1e14e58a | /mustache/stack.py | 3b5da799ca4fa4d73ad4223e5168959b95a97134 | [
"MIT"
] | permissive | ymloac/python-mustache | 0671d476dd868f397eb39f5fa8f8853e1a1367bc | ea3753696ea9886b6eb39cc5de27db7054adc069 | refs/heads/master | 2020-06-18T05:52:43.942232 | 2015-06-03T15:05:11 | 2015-06-03T15:05:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 660 | py | # coding: utf-8
from copy import copy
class Nothing(object):
""" A sentinal object for when things don't exist. """
pass
class Stack(list):
""" A simple wrapper for lists, providing nicer semantics for accessing the
last element and alternate syntax for appending items. """
def __init__(self, obj=Nothing, *args, **kwargs):
super(list, self).__init__(*args, **kwargs)
if obj is not Nothing:
self.append(obj)
def __call__(self, _copy=False):
if not _copy:
return self[-1]
return copy(self[-1])
def push(self, *args, **kwargs):
return self.append(*args, **kwargs)
| [
"peterldowns@gmail.com"
] | peterldowns@gmail.com |
681efd02151a09f2dd8421793f8f2066192e2527 | beb37be1e0712a5401fcaca4749a85b7870ea2d0 | /parablew.pyw | cc82baa449833304a6d664af1f05498ca0636e9f | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | designscience/parable_trinity | d3e27b668a8c68de123986c68617eb6d72bbf6ec | 75f5339aab47d72e0cefb73cb710c4b59dc110af | refs/heads/master | 2021-01-01T06:56:30.820919 | 2018-06-21T23:53:32 | 2018-06-21T23:53:32 | 97,554,349 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 38,469 | pyw | #!/usr/bin/python
from PythonCard import model, timer, dialog
import parclasses
import parthreads
import sequenceimport
import random
import time
import threading
import Queue
import wx
from os import path
import winsound
import pygame
import vlc
# import logging # save to a log
"""
__version__ = "$Revision: 1.3 $"
__date__ = "$Date: 2016/06/22 $"
"""
""" ******************************************************************
This is the main file for the ParableW (Parable running on Windows)
program. Parable is the sequencing program developed for the Shiva Vista
project but can be applied to other sequencing applications as well.
The parablew class encapsulates the ParableW program... its objects, its
GUI controls and the event handlers for them, the program initialization
and the
1.03 - corrected mapping inconsistencies between GUI display and electrical
channels. Added updated BTIC class that ignores first few taps to reduce
human error.
1.04 - Moved lights to 2011 configuration. Included straight bank mapping.
Added joystick support for foot switch (needs testing).
1.10 - Replaced BTIC class with Beatnik class
1.11 - Wrapped BeatRecorder() (tap handler) in exception block. Force loaded
sequences to stop().
1.12a - Adds improved accuracy of sequence playback. Ensures sequence are scaled
to whole number of beats. Also, tap time is sent with tap beat for better accuracy
when program is busy. Still uses picle for saving sequence objects, but with greatly
improved ControlList.useCurrent() implementation that forces scale against ref_times
insteade of comprimising reference times.5/26/2012
1.14 - Sequences now saved as .seqx files (XML instead of pickle)
1.15 - Perpetual sync added. Sequences re-sync with beat at the start of each
sequence loop.
1.15a - bug in win2k requires str() to detect file path.
1.16 - adding tempo preload
1.2 - adding ethernet control for Raspberry Pi switch box, randomizing feature, channel bank features
1.3 - adding back ValvePort_Object (fom 2010 Ein Hammer code) to record for later playback
***** Issue Notes *****
* kill (abort button) checks main thread and restarts if dead. A hack.
* consider sending time in Align, as is done with tap command
* still some small error in playback versus tap period, but better
* all sequences must be recreated due to use of pickle and changes in parclasses
* program hangs occasionally with doing graphic import of images
* auto pilot doesn't respect looping sequences (they keep looping)
****************************************************************** """
__version__ = "1.16"
class parablew(model.Background):
def __init__(self, aParent, aBgRsrc):
model.Background.__init__(self, aParent, aBgRsrc)
# This doesn't seem to bind the event
# panel = wx.Panel(self)
# panel.Bind(wx.EVT_KEY_DOWN, self.on_key_down)
self.title = "Parable 2016 - v" + __version__
# self.components.chkBeeps.visible = True
# Initialize the sound mixer
pygame.mixer.init()
# foot switch current state
self.foot = False # set to True when switch is closed
# button maintenance
self.num_buttons = 35 # number of possibel sequence buttons
self.top_button = 0 # 0-based button (name) index "SEQ0"
self.auto_pilot = False # run in auto pilot mode
self.auto_pilot_rate = 50 # how frequently to play sequences 1-100
self.auto_pilot_triggered = False # currently playing a sequence
self.auto_pilot_next = time.time() # next time to trigger
# sequence maintenance
self.sequences = [""] * self.num_buttons # Sequence name
self.trigger_times = [0.0] * self.num_buttons # Sequence name
self.seq_directory = "C:\\sequences\\"
self.compose_file = 'Compose\\WORKING.seqx'
self.music_directory = "C:\\Users\\Stu\\Documents\\Burning Man\\Shiva Vista\\2016\\"
self.featured_music = "Keith_Emerson_Tribute_rev1.ogg"
# Media object for music playback, depends on VLC player
self.media = vlc.MediaPlayer(self.music_directory + self.featured_music)
self.media_time = 0
# Threading queues
self.out_queue = Queue.Queue() # send commands to main thread
self.in_queue = Queue.Queue() # get responses from main thread
self.ev_queue = Queue.Queue() # get event records from main thread
self.temp_out_queue = Queue.Queue() # send commands to temp seq thread
self.temp_ev_queue = Queue.Queue() # get event records from main thread
# create a channel map for the actual cannons
self.effect_map = parclasses.ChannelMap(24) # for effects channels
self.gui_map = parclasses.ChannelMap(24) # for GUI 'lights'
self.straight_map = parclasses.ChannelMap(24) # for straight import mapping
self.import_map = parclasses.ChannelMap(24) # for alternative graphic import mapping (else use straight map)
self.controlbox_map = parclasses.ChannelMap(18) # modified effects map
self.effect_map.addMapping(1, 2)
self.effect_map.addMapping(2, 5)
self.effect_map.addMapping(3, 8)
self.effect_map.addMapping(4, 14) # was 11
self.effect_map.addMapping(5, 17) # was 14
self.effect_map.addMapping(6, 20) # was 17
self.effect_map.addMapping(7, 1)
self.effect_map.addMapping(8, 4)
self.effect_map.addMapping(9, 7)
self.effect_map.addMapping(10, 13) # was 10
self.effect_map.addMapping(11, 16) # was 13
self.effect_map.addMapping(12, 19) # was 16
self.effect_map.addMapping(13, 3)
self.effect_map.addMapping(14, 6)
self.effect_map.addMapping(15, 9)
self.effect_map.addMapping(16, 15) # was 12
self.effect_map.addMapping(17, 18) # was 15
self.effect_map.addMapping(18, 21) # was 18
self.effect_map.addMapping(19, 10) # was 19
self.effect_map.addMapping(20, 11) # was 20
self.effect_map.addMapping(21, 23) # was 21
self.effect_map.addMapping(22, 22) # was 22
self.effect_map.addMapping(23, 0)
self.effect_map.addMapping(24, 0)
# map that works for GUI display "lights"
self.gui_map.addMapping(1, 2)
self.gui_map.addMapping(2, 5)
self.gui_map.addMapping(3, 8)
self.gui_map.addMapping(4, 11)
self.gui_map.addMapping(5, 14)
self.gui_map.addMapping(6, 17)
self.gui_map.addMapping(7, 1)
self.gui_map.addMapping(8, 4)
self.gui_map.addMapping(9, 7)
self.gui_map.addMapping(10, 10)
self.gui_map.addMapping(11, 13)
self.gui_map.addMapping(12, 16)
self.gui_map.addMapping(13, 3)
self.gui_map.addMapping(14, 6)
self.gui_map.addMapping(15, 9)
self.gui_map.addMapping(16, 12)
self.gui_map.addMapping(17, 15)
self.gui_map.addMapping(18, 18)
self.gui_map.addMapping(19, 19)
self.gui_map.addMapping(20, 20)
self.gui_map.addMapping(21, 21)
self.gui_map.addMapping(22, 22)
self.gui_map.addMapping(23, 0)
self.gui_map.addMapping(24, 0)
# map for importing direct to channels
self.straight_map.addMapping(1, 1)
self.straight_map.addMapping(2, 2)
self.straight_map.addMapping(3, 3)
self.straight_map.addMapping(4, 4)
self.straight_map.addMapping(5, 5)
self.straight_map.addMapping(6, 6)
self.straight_map.addMapping(7, 7)
self.straight_map.addMapping(8, 8)
self.straight_map.addMapping(9, 9)
self.straight_map.addMapping(10, 10)
self.straight_map.addMapping(11, 11)
self.straight_map.addMapping(12, 12)
self.straight_map.addMapping(13, 13)
self.straight_map.addMapping(14, 14)
self.straight_map.addMapping(15, 15)
self.straight_map.addMapping(16, 16)
self.straight_map.addMapping(17, 17)
self.straight_map.addMapping(18, 18)
self.straight_map.addMapping(19, 19)
self.straight_map.addMapping(20, 20)
self.straight_map.addMapping(21, 21)
self.straight_map.addMapping(22, 22)
self.straight_map.addMapping(23, 0)
self.straight_map.addMapping(24, 0)
# map for importing direct to channels
self.controlbox_map.addMapping(1, 1)
self.controlbox_map.addMapping(2, 2)
self.controlbox_map.addMapping(3, 5)
self.controlbox_map.addMapping(4, 8)
self.controlbox_map.addMapping(5, 1)
self.controlbox_map.addMapping(6, 14)
self.controlbox_map.addMapping(7, 17)
self.controlbox_map.addMapping(8, 11)
self.controlbox_map.addMapping(9, 7)
self.controlbox_map.addMapping(10, 4)
self.controlbox_map.addMapping(11, 10)
self.controlbox_map.addMapping(12, 13)
self.controlbox_map.addMapping(13, 16)
self.controlbox_map.addMapping(14, 3)
self.controlbox_map.addMapping(15, 6)
self.controlbox_map.addMapping(16, 9)
self.controlbox_map.addMapping(17, 12)
self.controlbox_map.addMapping(18, 15)
self.controlbox_map.addMapping(19, 19)
self.controlbox_map.addMapping(20, 20)
self.controlbox_map.addMapping(21, 21)
self.controlbox_map.addMapping(22, 22)
self.controlbox_map.addMapping(23, 0)
self.controlbox_map.addMapping(24, 0)
# map for alternative import mapping
# ironically, this provides straight column-to-channel mapping
# using straight_map (or no map) you get 6 centers, 6 lefts, 6 rights, 4 talons
# interesting note: this is the inverse of gui_map
self.import_map.addMapping(2, 1)
self.import_map.addMapping(5, 2)
self.import_map.addMapping(8, 3)
self.import_map.addMapping(11, 4)
self.import_map.addMapping(14, 5)
self.import_map.addMapping(17, 6)
self.import_map.addMapping(1, 7)
self.import_map.addMapping(4, 8)
self.import_map.addMapping(7, 9)
self.import_map.addMapping(10, 10)
self.import_map.addMapping(13, 11)
self.import_map.addMapping(16, 12)
self.import_map.addMapping(3, 13)
self.import_map.addMapping(6, 14)
self.import_map.addMapping(9, 15)
self.import_map.addMapping(12, 16)
self.import_map.addMapping(15, 17)
self.import_map.addMapping(18, 18)
self.import_map.addMapping(19, 19)
self.import_map.addMapping(20, 20)
self.import_map.addMapping(21, 21)
self.import_map.addMapping(22, 22)
self.import_map.addMapping(23, 0)
self.import_map.addMapping(24, 0)
# create the temp sequence object - used to try out sequences
self.seq = parclasses.ControlList()
self.seq.name = "Temp Sequence"
# create valveport (output) objects
self.vp1 = parclasses.ValvePort_GUI(22, 6, self.components.OutputCanvas1)
self.vp1.setMap(self.gui_map)
# position sequencing "lights" on the screen
for i in range(0, 6):
ch = (i*3) # 0-based channel index
"""
self.vp1.set_light(ch+1, (100 * i + 50, 20))
self.vp1.set_light(ch+2, (100 * i + 25, 20))
self.vp1.set_light(ch+3, (100 * i , 20))
self.vp1.set_light(ch+1, (100 * i + 25, 50))
self.vp1.set_light(ch+2, (100 * i + 25, 25))
self.vp1.set_light(ch+3, (100 * i + 25, 0))
"""
self.vp1.set_light(ch+1, ((100 * i), 25))
self.vp1.set_light(ch+2, ((100 * i) + 20, 50))
self.vp1.set_light(ch+3, ((100 * i) + 40, 25))
self.vp1.set_light(19, (630, 35))
self.vp1.set_light(20, (630, 8))
self.vp1.set_light(21, (665, 8))
self.vp1.set_light(22, (665, 35))
# create screen buttons
for i in range(self.num_buttons):
self.components['SEQ' + str(i)] = {'type':'Button', 'name':'SEQ' + str(i), 'id':i, 'position':(20 +(152 * (i%5)), 150 + (40 * int(i/5))), 'size':(120, 30), 'label':'Sequence ' + str(i+1), 'command':'seqBtn' + str(i+1), 'visible':False}
# Other output objects
self.vp2 = parclasses.ValvePort_Parallel(24, 6)
self.vp2.setMap(self.effect_map)
# self.vp2 = parclasses.ValvePort_Ethernet(18, 6)
# self.vp2.setMap(self.straight_map)
# self.vp2.setMap(self.controlbox_map)
# TODO: testing only!
# for j in range(1,5):
# for i in range(1, 18):
# self.vp2.oneChannelExec(i)
# sleep(0.2)
self.vp3 = parclasses.ValvePort_Beep() # not very good
self.vp3.setMap(self.effect_map)
self.vp3.mute = True
# ValvePort_Object records performance to a file
self.vp4 = parclasses.ValvePort_Object(24, 6, self.seq) # capture to file
self.vp4.setMap(self.straight_map)
# temp sequence rate scaling factor
# self.scaleFactor = 1.0
self.scaleFactor = 1.10
# add output objects to an output bank
self.vpb = parclasses.ValvePortBank()
self.vpb.addPort(self.vp1)
self.vpb.addPort(self.vp2)
self.vpb.addPort(self.vp3)
self.vpb.addPort(self.vp4)
self.vpb.execute() # show the lights
# Create initial temp sequence
for i in range(12):
ch = i + 1
# li = parclasses.spiral(10, 22, 3, 12, 3)
# li = parclasses.beep(ch, 2, 2, 0, 0)
li = parclasses.randy(64, 22, 1, 2)
self.seq.append(li)
self.seq.sortEvents()
# Create the threaded sequence handler (ControlBank)
self.cb = parthreads.ControlBank("C:\\sequences\\")
# Create thread objects
self.ttemp = threading.Thread(target=self.seq, args=(self.temp_ev_queue,self.temp_out_queue))
self.tmain = threading.Thread(target=self.cb, args=(self.ev_queue,self.out_queue,self.in_queue))
# Load bank folder list
self.bank_index = 0
self.banks = None
if path.exists('banks.txt'):
# Load bank paths from the file
self.banks = []
with open('banks.txt', 'r') as f:
for eachLine in f:
if len(eachLine) > 2:
self.banks.append(eachLine.splitlines()[0])
# check banks paths
# TODO: check this above so partial banks can be loaded
for bank in self.banks:
if path.exists(path.join(self.seq_directory, bank)) is False:
print "Bank at path {0} not found. Banks not loaded.".format(path.join(self.seq_directory, bank))
self.banks = None
if self.banks:
print "{0} banks loaded from file".format(len(self.banks))
else:
print "banks.txt not found"
"""
# create joystick object
pygame.joystick.init()
if pygame.joystick.get_count() > 0:
self.stick = pygame.joystick.Joystick(0)
self.stick.init()
print "Joystick detected with " + str(self.stick.get_numbuttons()) + " buttons"
else:
self.stick = None
print "No joystick"
"""
# When exiting the program, do some cleanup
def __exit__(self, exc_type, exc_value, traceback):
self.media.stop()
pygame.mixer.quit()
# key press handler (for when I figure out how to bind it
def on_key_down(self, event):
keycode = event.GetKeyCode()
print "Key pressed " + str(keycode)
if keycode == wx.WXK_F1:
self.on_pbTap_mouseDown(event)
event.Skip()
def on_initialize(self, event):
""" initialize the UI components """
self.components.slSeqRate.setRange(1, 100)
self.components.slSeqRate.value = 50
self.components.chkLoop.checked = False
self.myTimer = timer.Timer(self.components.OutputCanvas1, -1) # create a timer
self.myTimer.Start(5)
self.components.btnHello.start_time = time.time() # to establish the variable
# bind key down event to handler DOES NOT WORK!!
# self.Bind(wx.EVT_KEY_DOWN, self.on_key_down)
# start and initialize main thread
self.tmain.start()
# self.out_queue.put("loadbank|drumming up the heat")
self.out_queue.put("loadbank|")
"""
complist = self.findAllComponents()
for comp in complist:
print comp
print " "
self.components.__setitem__("parable01", "wxCheckBox")
"""
def on_idle(self, event):
while self.ev_queue.empty() is False:
ev = self.ev_queue.get()
self.vpb.setEventExec(ev)
while self.temp_ev_queue.empty() is False:
ev = self.temp_ev_queue.get()
self.vpb.setEventExec(ev)
while self.in_queue.empty() is False:
self.processCommand(self.in_queue.get())
def processCommand(self, cmdstr):
""" process incoming commands from the main thread """
# print ">>> " + cmdstr
cmd = cmdstr.split("|")
# kill - kill the cannons
if cmd[0] == "kill":
self.vpb.reset()
# running - color button to indicate running status
elif cmd[0] == "started":
for i in range(self.num_buttons):
if self.sequences[i] == cmd[1]:
btn = "SEQ" + str(i)
self.components[btn].backgroundColor = (255,0,0,255)
self.components[btn].foregroundColor = (255,255,255,255)
break
if (self.auto_pilot == True):
self.auto_pilot_triggered = True # don't play another seq until done
# stopped - color button to indicate stopped status
elif cmd[0] == "stopped":
for i in range(self.num_buttons):
if self.sequences[i] == cmd[1]:
btn = "SEQ" + str(i)
self.components[btn].backgroundColor = (236, 233, 216, 255)
self.components[btn].foregroundColor = (0, 0, 0, 0)
break
if (self.auto_pilot is True):
self.arm_auto_pilot(); # sequence done, start another one
# learbank - hide sequence buttons
elif cmd[0] == "clearbank":
for i in range(self.num_buttons):
btn = "SEQ" + str(i)
self.components[btn].visible = False
self.top_button = 0
# newseq - add a new sequence
elif cmd[0] == "newseq":
if self.top_button < self.num_buttons:
btn = "SEQ" + str(self.top_button)
self.sequences[self.top_button] = cmd[1]
self.components[btn].label = cmd[1]
self.components[btn].visible = True
self.top_button += 1
# beat- toggle beat light
elif cmd[0] == "beat": # toggle the state of the beat light
self.components.ImageButton1.visible = \
not self.components.ImageButton1.visible and \
self.components.chkUseBeat.checked
# beaton - turn on beat light
elif cmd[0] == "beaton":
if self.components.chkUseBeat.checked:
self.components.ImageButton1.visible = True
else:
self.components.ImageButton1.visible = False # always off
# beatoff - turn off beat light
elif cmd[0] == "beatoff":
self.components.ImageButton1.visible = False
# exception - report exception
elif cmd[0] == "exception":
self.title = "Exception: " + cmd[1]
# message - display message
elif cmd[0] == "message":
self.title = str(cmd[1])
def on_pbTap_mouseDown(self, event):
""" process a tap beat to keep time with music """
self.out_queue.put("tap|" + str(time.time())) # new 7/2012 - sending tap time
if self.components.chkUseBeat.checked is False:
self.components.chkUseBeat.checked = True
self.out_queue.put("usebeat|yes")
def on_align_mouseDown(self, event):
""" realign the start_time for the tap beat """
self.out_queue.put("align|" + str(time.time()))
def on_OutputCanvas1_timer(self, event):
if self.auto_pilot == True and self.auto_pilot_triggered == False:
if time.time() > self.auto_pilot_next:
self.auto_pilot_trigger() # start another sequence
def on_chkLoop_mouseClick(self, event):
self.seq.looping = self.components.chkLoop.checked
# def on_chkBeeps_mouseClick(self, event):
# self.vp3.mute = not self.components.chkBeeps.checked
def on_chkUseBeat_mouseClick(self, event):
if self.components.chkUseBeat.checked is True:
self.out_queue.put("usebeat|yes")
else:
self.out_queue.put("usebeat|no")
# def on_btnAutoPilot_mouseClick(self, event):
# # self.components.AutoPilotBox.visible = self.components.btnAutoPilot.checked
# self.auto_pilot = self.components.btnAutoPilot.checked
# self.out_queue.put("stop|") # stop all current activity
# if (self.auto_pilot is True):
# self.components.btnAutoPilot.backgroundColor = (255,0,0,255)
# self.components.btnAutoPilot.foregroundColor = (255,255,255,255)
# self.arm_auto_pilot() # set the next auto pilot fire time
# else:
# self.components.btnAutoPilot.backgroundColor = (255,255,255,255)
# self.components.btnAutoPilot.foregroundColor = (0,0,0,255)
def on_btnKill_mouseClick(self, event):
""" attempt to kill all cannons """
self.out_queue.put("stop|")
self.components.chkUseBeat.checked = False
self.out_queue.put("usebeat|no")
self.vpb.reset()
self.auto_pilot = False
# self.components.btnAutoPilot.checked = False
# self.components.btnAutoPilot.backgroundColor = (255,255,255,255)
# self.components.btnAutoPilot.foregroundColor = (0,0,0,255)
if self.tmain.isAlive():
self.title = "Thread is alive"
else:
self.title = "Threads dead - attempting restart"
self.tmain = threading.Thread(target=self.cb, args=(self.ev_queue,self.out_queue,self.in_queue))
self.tmain.start()
self.out_queue.put("stop|")
"""
def on_btnHello_mouseClick(self, event):
#def on_btnHello_mouseDown(self, event):
#print self.cl
if (self.seq.running() == True):
#if self.t1.isAlive() == True:
self.out_queue.put("stop")
self.seq.stop()
else: # sequence is not yet running
self.seq.scaleToBeat(parclasses.TimeCode(15))
self.seq.start()
# test threaded operation
#while self.out_queue.empty() == False:
# self.out_queue.get()
#self.components.btnHello.start_time = time.time()
#self.t1.start()
"""
def on_btnHello_mouseClick(self, event):
""" start a thread to playback this sequence """
if self.ttemp.isAlive() is True:
self.temp_out_queue.put("stop")
else:
# self.seq.scaleToBeat(parclasses.TimeCode(15))
while self.temp_out_queue.empty() is False:
self.temp_out_queue.get()
self.components.btnHello.start_time = time.time()
# destroy the temp thread and recreate
# "you can't stop a thread object and restart it. Don't try"
del self.ttemp
self.ttemp = threading.Thread(target=self.seq, args=(self.temp_ev_queue, self.temp_out_queue))
self.ttemp.start()
"""
def on_btnHello_mouseUp(self, event):
if (time.time() - self.components.btnHello.start_time) > 0.2:
if (self.seq.running() == True):
self.out_queue.put("stop")
#self.seq.stop()
event.skip()
"""
# ugly but functional - redirect sequence button mouse events to
# single handler functions. Future: find a better way to bind the
# events to the handler
def on_SEQ0_mouseDown(self, event): self.seqCmdDown(event)
def on_SEQ0_mouseUp(self, event): self.seqCmdUp(event)
def on_SEQ1_mouseDown(self, event): self.seqCmdDown(event)
def on_SEQ1_mouseUp(self, event): self.seqCmdUp(event)
def on_SEQ2_mouseDown(self, event): self.seqCmdDown(event)
def on_SEQ2_mouseUp(self, event): self.seqCmdUp(event)
def on_SEQ3_mouseDown(self, event): self.seqCmdDown(event)
def on_SEQ3_mouseUp(self, event): self.seqCmdUp(event)
def on_SEQ4_mouseDown(self, event): self.seqCmdDown(event)
def on_SEQ4_mouseUp(self, event): self.seqCmdUp(event)
def on_SEQ5_mouseDown(self, event): self.seqCmdDown(event)
def on_SEQ5_mouseUp(self, event): self.seqCmdUp(event)
def on_SEQ6_mouseDown(self, event): self.seqCmdDown(event)
def on_SEQ6_mouseUp(self, event): self.seqCmdUp(event)
def on_SEQ7_mouseDown(self, event): self.seqCmdDown(event)
def on_SEQ7_mouseUp(self, event): self.seqCmdUp(event)
def on_SEQ8_mouseDown(self, event): self.seqCmdDown(event)
def on_SEQ8_mouseUp(self, event): self.seqCmdUp(event)
def on_SEQ9_mouseDown(self, event): self.seqCmdDown(event)
def on_SEQ9_mouseUp(self, event): self.seqCmdUp(event)
def on_SEQ10_mouseDown(self, event): self.seqCmdDown(event)
def on_SEQ10_mouseUp(self, event): self.seqCmdUp(event)
def on_SEQ11_mouseDown(self, event): self.seqCmdDown(event)
def on_SEQ11_mouseUp(self, event): self.seqCmdUp(event)
def on_SEQ12_mouseDown(self, event): self.seqCmdDown(event)
def on_SEQ12_mouseUp(self, event): self.seqCmdUp(event)
def on_SEQ13_mouseDown(self, event): self.seqCmdDown(event)
def on_SEQ13_mouseUp(self, event): self.seqCmdUp(event)
def on_SEQ14_mouseDown(self, event): self.seqCmdDown(event)
def on_SEQ14_mouseUp(self, event): self.seqCmdUp(event)
def on_SEQ15_mouseDown(self, event): self.seqCmdDown(event)
def on_SEQ15_mouseUp(self, event): self.seqCmdUp(event)
def on_SEQ16_mouseDown(self, event): self.seqCmdDown(event)
def on_SEQ16_mouseUp(self, event): self.seqCmdUp(event)
def on_SEQ17_mouseDown(self, event): self.seqCmdDown(event)
def on_SEQ17_mouseUp(self, event): self.seqCmdUp(event)
def on_SEQ18_mouseDown(self, event): self.seqCmdDown(event)
def on_SEQ18_mouseUp(self, event): self.seqCmdUp(event)
def on_SEQ19_mouseDown(self, event): self.seqCmdDown(event)
def on_SEQ19_mouseUp(self, event): self.seqCmdUp(event)
def on_SEQ20_mouseDown(self, event): self.seqCmdDown(event)
def on_SEQ20_mouseUp(self, event): self.seqCmdUp(event)
def on_SEQ21_mouseDown(self, event): self.seqCmdDown(event)
def on_SEQ21_mouseUp(self, event): self.seqCmdUp(event)
def on_SEQ22_mouseDown(self, event): self.seqCmdDown(event)
def on_SEQ22_mouseUp(self, event): self.seqCmdUp(event)
def on_SEQ23_mouseDown(self, event): self.seqCmdDown(event)
def on_SEQ23_mouseUp(self, event): self.seqCmdUp(event)
def on_SEQ24_mouseDown(self, event): self.seqCmdDown(event)
def on_SEQ24_mouseUp(self, event): self.seqCmdUp(event)
def on_SEQ25_mouseDown(self, event): self.seqCmdDown(event)
def on_SEQ25_mouseUp(self, event): self.seqCmdUp(event)
def on_SEQ26_mouseDown(self, event): self.seqCmdDown(event)
def on_SEQ26_mouseUp(self, event): self.seqCmdUp(event)
def on_SEQ27_mouseDown(self, event): self.seqCmdDown(event)
def on_SEQ27_mouseUp(self, event): self.seqCmdUp(event)
def on_SEQ28_mouseDown(self, event): self.seqCmdDown(event)
def on_SEQ28_mouseUp(self, event): self.seqCmdUp(event)
def on_SEQ29_mouseDown(self, event): self.seqCmdDown(event)
def on_SEQ29_mouseUp(self, event): self.seqCmdUp(event)
def on_SEQ30_mouseDown(self, event): self.seqCmdDown(event)
def on_SEQ30_mouseUp(self, event): self.seqCmdUp(event)
def on_SEQ31_mouseDown(self, event): self.seqCmdDown(event)
def on_SEQ31_mouseUp(self, event): self.seqCmdUp(event)
def on_SEQ32_mouseDown(self, event): self.seqCmdDown(event)
def on_SEQ32_mouseUp(self, event): self.seqCmdUp(event)
def on_SEQ33_mouseDown(self, event): self.seqCmdDown(event)
def on_SEQ33_mouseUp(self, event): self.seqCmdUp(event)
def on_SEQ34_mouseDown(self, event): self.seqCmdDown(event)
def on_SEQ34_mouseUp(self, event): self.seqCmdUp(event)
def on_SEQ35_mouseDown(self, event): self.seqCmdDown(event)
def on_SEQ35_mouseUp(self, event): self.seqCmdUp(event)
def seqCmdDown(self, event):
""" This is called on the down click of all sequence buttons.
it toggles the sequence state (toggle handled in ControlBank) """
if (self.auto_pilot is False):
# print"toggle|" + self.sequences[event.target.id]
self.out_queue.put("toggle|" + self.sequences[event.target.id])
self.trigger_times[event.target.id] = time.time()
event.Skip()
def seqCmdUp(self, event):
""" Depending on how long the button was pressed, either stop
the sequence or does nothing """
if (self.auto_pilot is False):
if time.time() - self.trigger_times[event.target.id] > 0.2:
# print "stop|" + self.sequences[event.target.id]
self.out_queue.put("stop|" + self.sequences[event.target.id])
event.Skip()
def on_fileImport_command(self, event):
if (self.seq.running() is False):
# self.components.slSeqRate.value = 50
self.components.slSeqRate.value = 92
# self.scaleFactor = 1.0
self.scaleFactor = 0.25 # goo typical rate
aStyle = wx.OPEN | wx.FD_CHANGE_DIR
result = dialog.fileDialog(self, 'Import', self.seq_directory, '', "*.jpg", aStyle)
if result.accepted is True:
del self.seq
gi = sequenceimport.GraphicImport()
self.seq = gi.import_sequence(result.paths[0], 22, 10, 250, self.import_map)
# self.seq = gi.import_sequence(result.paths[0], 22, 10, 250, self.straight_map) # import from old gfx
self.seq.scaleOnNext(self.scaleFactor) # @@@ this is a hack to save time - pre-scale when loading in
else:
self.temp_out_queue.put("stop")
# self.seq.stop()
print "Sequence was running. lease try again"
def on_fileSave_command(self, event):
""" Saves sequence as XML .seqx file """
aStyle = wx.SAVE | wx.HIDE_READONLY | wx.OVERWRITE_PROMPT
result = dialog.fileDialog(self, 'Save Sequence', self.seq_directory, self.seq.name, "*.seqx", aStyle )
if result.accepted is True:
self.seq.saveXML(result.paths[0])
def on_fileOpen_command(self, event):
""" Opens XML .seqx file """
if self.seq.running() is False:
self.components.slSeqRate.value = 50
dialog_style = wx.OPEN | wx.FD_CHANGE_DIR
result = dialog.fileDialog(self, 'Open Sequence', self.seq_directory, '', "*.seqx", dialog_style)
if result.accepted is True:
del self.seq
self.seq = parclasses.ControlList(result.paths[0])
else:
self.temp_out_queue.put("stop")
# self.seq.stop()
print "Sequence was running. Please try again"
def on_fileOpenBank_command(self, event):
if self.seq.running() is False:
self.components.slSeqRate.value = 50
# self.scaleFactor = 1.0
aStyle = wx.DD_DIR_MUST_EXIST | wx.RESIZE_BORDER # | wx.DD_CHANGE_DIR
result = dialog.directoryDialog(self, 'Open Bank', self.seq_directory, aStyle)
if result.accepted is True:
self.out_queue.put("clearbank")
self.out_queue.put("loadbank|" + result.path[len(self.seq_directory):])
# read the tempo from a file
tempofn = "" + result.path + "\\tempo.txt"
self.title = tempofn.replace("\\", "\\\\") + " not found"
with open(tempofn.replace("\\", "\\\\"), "r") as tempofile:
if tempofile is not None:
tempo = tempofile.readline()
self.title = tempo
self.out_queue.put("settempo|" + tempo)
else:
self.temp_out_queue.put("stop")
print "Sequence was running. Please try again"
def on_btnNextBank_mouseClick(self, event):
if self.banks:
if self.bank_index < len(self.banks) - 1:
self.bank_index += 1
self.open_bank(self.bank_index)
def on_btnPrevBank_mouseClick(self, event):
if self.banks:
if self.bank_index > 0:
self.bank_index -= 1
self.open_bank(self.bank_index)
def open_bank(self, bank_index):
self.title = ""
if self.banks is not None and 0 <= bank_index < self.banks.count:
if self.seq.running():
self.temp_out_queue.put("stop")
else:
self.components.slSeqRate.value = 50
self.out_queue.put("clearbank")
self.out_queue.put("loadbank|" + self.banks[bank_index])
self.title = "Bank \"{0}\" loaded ".format(self.banks[bank_index])
# read the tempo from a file
tempofn = path.join(self.seq_directory, self.banks[bank_index], "tempo.txt")
with open(tempofn, "r") as tempofile:
if tempofile is not None:
tempo = tempofile.readline()
self.title += tempo
self.out_queue.put("settempo|" + tempo)
def on_slSeqRate_mouseUp(self, event):
self.scaleFactor = 0.05 * (101 - self.components.slSeqRate.value)
self.seq.scaleOnNext(self.scaleFactor)
event.skip()
def on_close(self, event):
# command threads to stop then wait
if self.tmain.isAlive():
self.out_queue.put("die")
self.tmain.join() # wait for thread to finish
if self.ttemp.isAlive():
self.temp_out_queue.put("die")
self.ttemp.join() # wait for thread to finish
print "Exiting program"
event.Skip()
def arm_auto_pilot(self):
# get auto pilot ready to arm
self.auto_pilot_triggered = False
nexttime = .05 * (101 - self.auto_pilot_rate) * random.randint(1, 10)
print "Next sequence in " + str(nexttime)
self.auto_pilot_next = time.time() + nexttime
# self.auto_pilot_next = time.time() + 1 # testing only
def auto_pilot_trigger(self):
""" run a random sequence now """
self.auto_pilot_triggered = True
nextseq = random.randint(0, self.top_button - 1)
btn = "SEQ" + str(nextseq)
print "Next " + btn + " " + self.components[btn].label
self.out_queue.put("loop|" + self.components[btn].label + "|off")
self.out_queue.put("start|" + self.components[btn].label)
def on_btnStart_mouseClick(self, event):
""" open a sequence file if exists, start ValvePort_Object capture """
# give a heads-up beep sequence
# i = 4
# while i > 0:
# winsound.Beep(440, 4)
# time.sleep(1)
# i -= 1
# winsound.Beep(880, 4)
# self.seq = parclasses.ControlList()
# self.vp4.cl = self.seq
# load previous sequence from external file
# try:
# self.vp4.cl.loadXML(self.seq_directory + self.compose_file)
# self.seq.loadXML(self.seq_directory + self.compose_file)
# except IOError:
# print 'Unable to read working sequence file'
self.media.play()
self.vp4.start()
def on_btnStop_mouseClick(self, event):
""" open a sequence file if exists, start ValvePort_Object capture """
self.vp4.stop()
self.media_time = self.media.get_time()
self.media.stop()
print "Music stopped at: " + str(self.media_time)
del self.seq
self.seq = parclasses.ControlList(self.vp4.cl) # ??? doesn't seem to work
def on_btnSave_mouseClick(self, event):
""" adds the new sequence to the master project file """
self.vp4.stop()
self.media_time = self.media.get_time()
print "Music stopped at: " + str(self.media_time)
self.media.stop()
self.vp4.cl.reconcile() # TODO: use reconcile instead?
self.vp4.cl.saveXML(self.seq_directory + self.compose_file)
self.vp4.cl.saveXML(self.seq_directory + 'Compose/SESSION.' + str(time.time()) + '.seqx') # session file
# load previous sequence from external file
try:
self.seq.loadXML(self.seq_directory + self.compose_file)
except IOError:
print 'Unable to read working sequence file'
def on_btnLoad_mouseClick(self, event):
""" Loads the working compose sequence to the ValvePort_Object recorder """
# TODO: this is messed up. Revisit this. Do we need to del self.seq? Just reload it? start from scratch
self.seq = parclasses.ControlList()
self.vp4.cl = self.seq # TODO: override eq operator to make the same, not to point to the same object
# load previous sequence from external file
self.seq.loadXML(self.seq_directory + self.compose_file)
self.seq.sortEvents()
print self.seq
def on_btnNew_mouseClick(self, event):
""" Overwrites the compose file with a new, blank file """
self.vp4.cl.clear()
self.vp4.cl.saveXML(self.seq_directory + self.compose_file)
self.seq.clear()
if __name__ == '__main__':
app = model.Application(parablew)
app.MainLoop()
| [
"studa@design-sci.com"
] | studa@design-sci.com |
a4627d0ae60aa09a0862fc95b1b1f89531f6b959 | 65ad9ec657c86528e0c8b2910fd2924eb02ba51e | /tests.py | 8e9c6121ecc43784938a0f2710f1eb3862e35325 | [
"MIT"
] | permissive | duchri66/openshift-sandbox | c81a8bc41b103634601d6a4bf9cfc4c14a4e9a04 | a70d90870f325e1ec10fe94db6e75fa018b2b155 | refs/heads/master | 2020-11-25T03:11:05.106281 | 2018-07-25T08:24:08 | 2018-07-25T08:24:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 171 | py | # An example test script.
# Run this with
# pytest tests.py
from wsgi import hello
def test_default_route_method_says_hello():
assert hello() == "Hello World!"
| [
"noreply@github.com"
] | noreply@github.com |
ded3dd38c264f610a411c333630b35c531493ddd | 0fc48ca162a66163e26e0d0c7949fc07323d41a6 | /checkbox451_bot/handlers/auth.py | a1ec3c2a133ad28a20bec98b4c66fc5a4849f50c | [
"MIT"
] | permissive | wingsergey/checkbox451_bot | 0a471ec75fc13c120016d3e9c31d3ea512406e80 | bba01df0243947f29623601308fc778b47881a21 | refs/heads/master | 2023-04-03T07:05:16.541605 | 2021-03-28T17:02:26 | 2021-03-28T17:02:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 963 | py | from aiogram.types import Message
from checkbox451_bot import auth, bot, kbd
from checkbox451_bot.handlers import helpers
def init(dispatcher):
@dispatcher.message_handler(content_types=["contact"])
@helpers.error_handler
async def contact(message: Message):
if message.contact is not None:
if user := auth.sign_in(message.contact):
if user.roles:
return await message.answer(
f"Ролі: {user.roles}",
reply_markup=kbd.remove,
)
await message.answer(
"Адміністратор має підтвердити",
reply_markup=kbd.remove,
)
await helpers.broadcast(
message.chat.id,
auth.ADMIN,
bot.obj.send_message,
f"new user: {user}",
)
| [
"mm@m10e.net"
] | mm@m10e.net |
226191874c0df731d564c4a262b4ade01dcfb8b8 | 4d44d6b3918f8a731566551a9524294f02aeb1de | /Projects/solution/code/rv2coe.py | e5907dba3ef3f4a7185e4dcf9b19ab9e93c12a1f | [] | no_license | skulumani/MAE3145 | 6347a196f3db23c7d5a7d4e2ec02d7ec8f6c6b3c | a0f4eaf51e8ececbd0500279df31f5952651eecc | refs/heads/master | 2021-10-11T00:27:31.535750 | 2019-01-19T19:50:32 | 2019-01-19T19:50:32 | 93,906,485 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,146 | py | """This script will input RV1.txt and print to another text file the output
that the students should generate
"""
from astro import kepler, constants, tle, time
import numpy as np
import pdb
def solution(infile='./data/RV1.txt', outfile='./data/RV1_solution.txt'):
"""Generate the solution that the students should output
"""
mu = constants.earth.mu
output_string = ''
with open(infile, 'r') as f:
line = f.readline().split()
while line:
r_in = np.array([float(i) for i in line[0:3]])
v_in = np.array([float(i) for i in line[3:6]])
# convert to coes
p, a, ecc, inc, raan, arg_p, nu, _, _, _, _ = kepler.rv2coe(r_in, v_in, mu)
# compute orbit properties
prop_string = kepler.orbit_el(p, ecc, inc, raan, arg_p, nu, mu)
# print to text file
output_string += prop_string
# read the next line
line = f.readline().split()
with open(outfile, 'w') as f:
f.write(output_string)
def generate_data(tle_file='./data/RV2COE_tle.txt', outfile='./data/RV2COE_tle_rv.txt'):
"""Generate test inputs and outputs for the students
Uses a saved TLE file - can get more using tle.get_tle_spacetrack(outfile, 'rv2coe')
"""
jd_start, _ = time.date2jd(2018, 10, 6, 0, 0, 0) # time in UTC
jd_end, _ = time.date2jd(2018, 10, 13, 0, 0, 0)
jd_step = 10 / (24 * 60)
jd_span = np.arange(jd_start, jd_end, jd_step)
# read some TLEs and get the state vector and write to a file
sats = tle.get_tle(tle_file)
# get the orbital elements for each satellite in TLE list
with open(outfile, 'w') as f:
for sat in sats:
# propogate for several time periods and get the r, v vectors
sat.tle_update(jd_span)
r_arr = sat.r_eci
v_arr = sat.v_eci
# format and write to a text file
for r, v in zip(r_arr[0::10], v_arr[0::10]):
f.write('{:16.6f} {:16.6f} {:16.6f} {:16.6f} {:16.6f} {:16.6f}\n'.format(r[0], r[1], r[2], v[0], v[1], v[2]))
| [
"shanks.k@gmail.com"
] | shanks.k@gmail.com |
123b813e7965accab206ba33a2504c62cda076c9 | 251116375baffdf0a60ab469d6a0a06b739044fc | /src/Display/ConsoleInputs.py | 09cb38c555b03405cc3c87d9abd6ebcf2a9d6b16 | [] | no_license | rosspow49/GroupSixProject | 24939dc1f48cc08c8f37e5f43cab583f6717c495 | cf5479592bcc56a1fcc00e85e0595239ef6be388 | refs/heads/main | 2023-03-30T14:00:27.872928 | 2021-03-31T14:54:23 | 2021-03-31T14:54:23 | 346,412,515 | 0 | 0 | null | 2021-03-31T14:54:24 | 2021-03-10T16:04:00 | Python | UTF-8 | Python | false | false | 521 | py | def getFileToPlay(fileList, logger):
validFileIdentifier = False
while not validFileIdentifier:
fileIdentifier = logger.takeInput("Please enter the track number:")
try:
fileIdentifier = int(fileIdentifier)
if fileIdentifier not in range(len(fileList)):
raise ValueError
fileName = fileList[fileIdentifier]
validFileIdentifier = True
except:
logger.showOutput("That is an invalid track number")
return fileName | [
"2538781I@student.gla.ac.uk"
] | 2538781I@student.gla.ac.uk |
7c3c8a7192a94802f247915a4206093631f27e17 | dd1ab4751e34f200c1b6928d310ade448c73d7e4 | /tools/training.py | ad7e81681d12f59e7d3f86fabf92317ea01fda30 | [
"MIT"
] | permissive | zfang92/transfer-learning | 8a92699cdcde56eba222acf3378652e7be31a195 | 0efcf447449995e8a589c0b6e6bfccd2a7791193 | refs/heads/master | 2021-06-11T19:23:20.834990 | 2017-02-19T22:08:01 | 2017-02-19T22:08:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,891 | py | import os
import h5py
from tools import build_transfer_net
from tools.datasets.urban_tribes import load_data
class Session(object):
"""Training session.
This is a helper class to bundle model and history together and make it
easy to resume training and recording history.
"""
def __init__(self, model, history=None):
self.model = model
self.history = history
def dump(self, path):
W, b = self.model.layers[-1].get_weights()
with h5py.File(path, 'w') as f:
f.create_dataset('W', data=W)
f.create_dataset('b', data=b)
group = f.create_group('history')
for k, v in self.history.items():
group.create_dataset(k, data=v)
@classmethod
def load(cls, model, path):
W, b = cls.load_weights(path)
history = cls.load_history(path)
model.layers[-1].set_weights([W, b])
return cls(model, history=history)
def train(self, *args, **kwargs):
new_history = self.model.fit(*args, **kwargs)
self._record(new_history.history)
def _record(self, new_history):
if self.history is None:
self.history = new_history
else:
for key in self.history.keys():
self.history[key].extend(new_history[key])
@staticmethod
def load_history(path):
with h5py.File(path, 'r') as f:
history = {}
for k, v in f['history'].items():
history[k] = list(v)
return history
@staticmethod
def load_weights(path):
with h5py.File(path, 'r') as f:
W = f['W'][:]
b = f['b'][:]
return W, b
def transfer_learn(layer_name, nb_sample, nb_epoch, output_file):
"""Transfer learning for image classification.
Args:
layer_name: Transfer layer name.
nb_sample: Number of samples per categories.
nb_epoch: Number of epochs to train in total.
output_file: Name of the output file to pick history to.
"""
# Build model
model = build_transfer_net(output_dim=11,
transfer_layer_name=layer_name)
# Prepare data
(x_train, y_train), (x_val, y_val), (x_test, y_test) = \
load_data(images_per_category=nb_sample)
# Train
model.compile(optimizer='adadelta', loss='categorical_crossentropy',
metrics=['accuracy'])
if os.path.exists(output_file):
print('Resuming')
session = Session.load(model, output_file)
nb_epoch -= len(session.history['loss'])
if nb_epoch <= 0:
return session
else:
print('Starting')
session = Session(model)
session.train(x_train, y_train, batch_size=nb_sample, nb_epoch=nb_epoch,
validation_data=(x_val, y_val))
session.dump(output_file)
return session
| [
"qobilidop@gmail.com"
] | qobilidop@gmail.com |
031f86b63261ef4c68f89508bf5043cbc1b24e8b | df433b748bf16cce7453b1975dfd0ca0cae50bdb | /homeworks/kate.chepurna_niampire/Homework-5/Apple.py | da649b0b679269d8eab7411f555f3bac983fe464 | [] | no_license | MastersAcademy/Programming-Basics | cce3ce7cba05ee68b40da8b2087557dd13997860 | d8d7f83e586c8b55fdb584c06f478cf83d9d8098 | refs/heads/master | 2021-01-13T08:58:57.838431 | 2017-01-19T21:09:00 | 2017-01-19T21:09:00 | 71,988,316 | 62 | 376 | null | 2017-02-09T15:27:47 | 2016-10-26T09:35:12 | Python | UTF-8 | Python | false | false | 170 | py | from Product import Product
class Apple(Product):
def __init__(self, name, weight, last_day, price):
Product.__init__(self, name, weight, last_day, price)
| [
"niampiriatko@gmail.com"
] | niampiriatko@gmail.com |
1ecb996f4097f56f0ce63ab0d6dedf6b7f3b0ff8 | 80a3d98eae1d755d6914b5cbde63fd10f5cc2046 | /autox/autox_video/mmaction2/configs/recognition/slowonly/slowonly_imagenet_pretrained_r50_8x4x1_64e_ucf101_rgb.py | 48df87cc320b51fd2cd980cd78eade24f3d1d968 | [
"Apache-2.0"
] | permissive | 4paradigm/AutoX | efda57b51b586209e1d58e1dab7d0797083aadc5 | 7eab9f4744329a225ff01bb5ec360c4662e1e52e | refs/heads/master | 2023-05-24T00:53:37.109036 | 2023-02-14T14:21:50 | 2023-02-14T14:21:50 | 388,068,949 | 752 | 162 | Apache-2.0 | 2022-07-12T08:28:09 | 2021-07-21T09:45:41 | Jupyter Notebook | UTF-8 | Python | false | false | 3,034 | py | _base_ = [
'../../_base_/models/slowonly_r50.py',
'../../_base_/schedules/sgd_150e_warmup.py',
'../../_base_/default_runtime.py'
]
# model settings
model = dict(cls_head=dict(num_classes=101))
# dataset settings
dataset_type = 'RawframeDataset'
data_root = 'data/ucf101/rawframes/'
data_root_val = 'data/ucf101/rawframes/'
split = 1 # official train/test splits. valid numbers: 1, 2, 3
ann_file_train = f'data/ucf101/ucf101_train_split_{split}_rawframes.txt'
ann_file_val = f'data/ucf101/ucf101_val_split_{split}_rawframes.txt'
ann_file_test = f'data/ucf101/ucf101_val_split_{split}_rawframes.txt'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False)
train_pipeline = [
dict(type='SampleFrames', clip_len=8, frame_interval=4, num_clips=1),
dict(type='RawFrameDecode'),
dict(type='Resize', scale=(-1, 256)),
dict(type='RandomResizedCrop'),
dict(type='Resize', scale=(224, 224), keep_ratio=False),
dict(type='Flip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs', 'label'])
]
val_pipeline = [
dict(
type='SampleFrames',
clip_len=8,
frame_interval=4,
num_clips=1,
test_mode=True),
dict(type='RawFrameDecode'),
dict(type='Resize', scale=(-1, 256)),
dict(type='CenterCrop', crop_size=224),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs'])
]
test_pipeline = [
dict(
type='SampleFrames',
clip_len=8,
frame_interval=4,
num_clips=10,
test_mode=True),
dict(type='RawFrameDecode'),
dict(type='Resize', scale=(-1, 256)),
dict(type='ThreeCrop', crop_size=256),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs'])
]
data = dict(
videos_per_gpu=8,
workers_per_gpu=2,
test_dataloader=dict(videos_per_gpu=1),
train=dict(
type=dataset_type,
ann_file=ann_file_train,
data_prefix=data_root,
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=ann_file_val,
data_prefix=data_root_val,
pipeline=val_pipeline),
test=dict(
type=dataset_type,
ann_file=ann_file_test,
data_prefix=data_root_val,
pipeline=test_pipeline))
evaluation = dict(
interval=1, metrics=['top_k_accuracy', 'mean_class_accuracy'])
# optimizer
optimizer = dict(lr=0.1) # this lr is used for 8 gpus
# learning policy
lr_config = dict(policy='CosineAnnealing', min_lr=0, by_epoch=False)
total_epochs = 64
# runtime settings
work_dir = './work_dirs/slowonly_r50_8x4x1_64e_ucf101_rgb'
| [
"caixiaochen@4ParadigmdeMacBook-Pro.local"
] | caixiaochen@4ParadigmdeMacBook-Pro.local |
be16954d0a750987370b7f380fc7e6db042b31ae | 20428cab3a57ddabbb94910b437e6666fcaa75f2 | /Chapter9_PriorityQueue/SortedPriorityQueue.py | 300c078787caf4eaaeadefc5ae815a90b6b948a2 | [] | no_license | RuichengGeng/PythonDataStructure | 5a0a48b1a91506f911613c8cc7128c77fd7b9972 | cf1449cd4bcad1edfa8eac1ab122b4398496a0b7 | refs/heads/main | 2023-09-06T00:07:55.757923 | 2021-11-07T08:44:26 | 2021-11-07T08:44:26 | 418,759,818 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,966 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Nov 1 09:00:09 2021
@author: Ruich
"""
'''sorted priority queue implement by double linked list'''
from PriorityQueueBase import PriorityQueueBase
from PriorityQueueBase import _Item
import random
class Empty(Exception):
pass
class _DNode:
'''double linked node'''
def __init__(self,_element,_prev,_next):
self._element = _element
self._prev = _prev
self._next = _next
def is_na(self):
return self._element is None
def prev_node(self):
return self._prev
def next_node(self):
return self._next
class _DoubleLinkedBase:
'''base class for double linked list related data type'''
def __init__(self):
self._head = _DNode(None,None,None)
self._trailer = _DNode(None,None,None)
self._head._next = self._trailer
self._trailer._prev = self._head
self._size = 0
def is_empty(self):
return self._size == 0
def __len__(self):
return self._size
def _insert_between(self,element,_prev,_next):
new = _DNode(element,_prev,_next)
_prev._next = new
_next._prev = new
self._size += 1
return new
def _delete_between(self,node):
_prev = node._prev
_next = node._next
_prev._next = _next
_next._prev = _prev
self._size -= 1
element = node._element
node._prev,node._element,node._next = None,None,None # deprecate the node
return element
class DoubleLinkedList(_DoubleLinkedBase):
def __init__(self):
super().__init__()
def first(self):
if self.is_empty():
raise Empty("Empty list")
return self._head._next
def last(self):
if self.is_empty():
raise Empty("Empty list")
return self._trailer._prev
def insert_first(self,element):
self._insert_between(element,self._head,self._head._next)
def insert_last(self,element):
self._insert_between(element,self._trailer._prev,self._trailer)
def delete_first(self):
if self.is_empty():
raise Empty("Empty list")
return self._delete_between(self._head._next)
def delete_last(self):
if self.is_empty():
raise Empty("Empty list")
return self._delete_between(self._trailer._prev)
def __iter__(self):
if self.is_empty():
raise Empty("Empty double linked list")
thisNode = self._head._next ## notice the head of the list is None
while not thisNode.is_na():
yield thisNode
thisNode = thisNode._next
class SortedPriorityQueue(PriorityQueueBase):
'''head to tail,small to big'''
def __init__(self):
self._data = DoubleLinkedList()
def __len__(self):
return len(self._data)
def add(self,key,value):
item = _Item(key,value)
if self.is_empty():
self._data.insert_first(item)
else:
insert = 0
for node in self._data:
if (node._element < item) and (insert == 0):
self._data._insert_between(item,node._prev,node)
insert = 1
if insert == 0:
self._data.insert_last(item)
def get_min(self):
if self.is_empty():
assert Empty("Empty queue")
p = self._data.first()
return (p.key,p.value)
def remove_min(self):
if self.is_empty():
assert Empty("Empty queue")
p = self._data.delete_first()
return (p.key,p.value)
def test_SortedPriorityQueue():
q = SortedPriorityQueue()
for _ in range(15):
q.add(random.randint(a= 0,b = 10),0)
while not q.is_empty():
print(q.remove_min())
if __name__ == '__main__':
test_SortedPriorityQueue()
| [
"ruicheng.geng@hotmail.com"
] | ruicheng.geng@hotmail.com |
5b18fbd4b0a8183ff967c046a05f8f8ac468e3eb | 2711e7408e590648ac6a51725c2177a56c566403 | /smilebuddies/urls.py | ea9397e69f37780d921d593336f630dad2ff758f | [] | no_license | SeedyROM/smilebuddies | 457415c1c843b495d92bdb925b0597411f1222c2 | 6ba4827205ce48c1b19786c9e32b9993cf8b43aa | refs/heads/master | 2020-03-21T15:29:13.592031 | 2018-06-26T10:38:38 | 2018-06-26T10:38:38 | 138,715,243 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 881 | py | """smilebuddies URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.views.generic import TemplateView
urlpatterns = [
path('admin/', admin.site.urls),
path('', TemplateView.as_view(template_name='landing.html'), name='landing')
]
| [
"rallokkcaz@gmail.com"
] | rallokkcaz@gmail.com |
ecf0282f4a1d470d9299507c2e8d1820382891e7 | fd7e8aab67366f7087265279d7f41038cdeceeaf | /Python_Crawler/thread_demo/demo5.py | 07632a7bd3229ec34e0a1e52f5ec7822a5e17dec | [] | no_license | pzhren/Python | e9aef0ad983c81249c3aafec22fe9375fd1dbe70 | 6b0c1ed68984889395c4270213afa20dac497f27 | refs/heads/master | 2021-05-24T08:47:30.834486 | 2020-06-05T10:39:57 | 2020-06-05T10:39:57 | 253,477,663 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,580 | py | #encoding: utf-8
import threading
import random
import time
gMoney = 1000
gCondition = threading.Condition()
gTotalTimes = 10
gTimes = 0
class Producer(threading.Thread):
def run(self):
global gMoney
global gTimes
while True:
money = random.randint(100,1000)
gCondition.acquire()
if gTimes >= gTotalTimes:
gCondition.release()
break
gMoney += money
print('%s生产了%d元钱,剩余%d元钱'%(threading.current_thread(),money,gMoney))
gTimes += 1
gCondition.notify_all()
gCondition.release()
time.sleep(0.5)
class Consumer(threading.Thread):
def run(self):
global gMoney
while True:
money = random.randint(100,1000)
gCondition.acquire()
while gMoney < money:
if gTimes >= gTotalTimes:
gCondition.release()
return
print('%s准备消费%d元钱,剩余%d元钱,不足!' % (threading.current_thread(),money,gMoney))
gCondition.wait()
gMoney -= money
print('%s消费了%d元钱,剩余%d元钱' % (threading.current_thread(),money,gMoney))
gCondition.release()
time.sleep(0.5)
def main():
for x in range(3):
t = Consumer(name='消费者线程%d'%x)
t.start()
for x in range(5):
t = Producer(name="生产者线程%d"%x)
t.start()
if __name__ == '__main__':
main() | [
"34993251+bensange123@users.noreply.github.com"
] | 34993251+bensange123@users.noreply.github.com |
0a41f9fba4940a599b729686b089dc887ef437ee | 57ff13f8da5b581547d51fb829154d9153aaf53c | /src/custom_exceptions.py | b7f3824af16fe7aca45ccafcebc86f39ae5c94b9 | [] | no_license | blawney/mycalc | e777aecb97131b4596b8d0cbab3207e3aebf98a3 | 4a625b0b00040a06f2b52ac74d60c636012f1dd4 | refs/heads/master | 2021-01-19T17:16:42.216373 | 2017-11-27T18:06:17 | 2017-11-27T18:06:17 | 82,431,026 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,104 | py | __author__ = 'brian'
class FileSourceNotFound(Exception):
pass
class NoReactionParser(Exception):
pass
class MissingRateConstantException(Exception):
pass
class RateConstantFormatException(Exception):
pass
class ExtraRateConstantException(Exception):
pass
class MalformattedReactionDirectionSymbolException(Exception):
pass
class MalformattedReactionException(Exception):
pass
class InvalidSymbolName(Exception):
pass
class MalformattedReactionFileException(Exception):
pass
class MissingInitialConditionsException(Exception):
pass
class MissingRequiredInitialConditionsException(Exception):
pass
class InvalidSimulationTimeException(Exception):
pass
class InitialConditionGivenForMissingElement(Exception):
pass
class InvalidInitialConditionException(Exception):
pass
class RequiredSpeciesException(Exception):
pass
class ReactionErrorWithTrackerException(Exception):
def __init__(self, error_index, detailed_message):
self.error_index = error_index
self.detailed_message = detailed_message
| [
"blawney@jimmy.harvard.edu"
] | blawney@jimmy.harvard.edu |
51b10688eabd91f7155fc07e13ea362d19c0cc8d | 0389e0bf1e2942089fa84ce8ab79ef859f5d8215 | /parents/migrations/0001_initial.py | ce9cbb250fd38c26df3efa680e2059468590c7b8 | [] | no_license | jitin2707/SchoolManagement | 89f78e35b2b1f387083115064b0a54423de09cc7 | 7024d84dc0dfed4864a0ff9c58d045a1453bdb06 | refs/heads/master | 2020-08-05T11:39:56.958757 | 2019-11-23T03:57:01 | 2019-11-23T03:57:01 | 212,488,353 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,380 | py | # Generated by Django 2.0.6 on 2019-11-12 06:17
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('myUser', '0006_loginrecords'),
]
operations = [
migrations.CreateModel(
name='Parents',
fields=[
('name', models.CharField(default='', max_length=255, null=True)),
('email', models.EmailField(default='', max_length=255, primary_key=True, serialize=False)),
('password', models.CharField(default='', max_length=255, null=True)),
('address', models.CharField(default='', max_length=255, null=True)),
('mobile', models.CharField(default='', max_length=255, null=True)),
('is_active', models.NullBooleanField(default=True)),
('image', models.CharField(default='', max_length=255, null=True)),
('last_login_time', models.CharField(default='', max_length=255, null=True)),
('last_login_date', models.CharField(default='', max_length=255, null=True)),
('last_logout', models.CharField(default='', max_length=255, null=True)),
('role', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='myUser.UserRole')),
],
),
]
| [
"truepx247@gmail.com"
] | truepx247@gmail.com |
0eaa98f37052fd6356edfee0721ca64abf89bf44 | 7657b23db44741ffecd39795aaafcca51b78570f | /bootstrapproject/questions/urls.py | a71c7330b90581cdd954088ece8e1271e25f0662 | [] | no_license | Sky-Akash001/Learning-Portal | c18bf7393cb27bc0593cb1a1d027f6486116f0f1 | 250bf36de376dcc60c71d273ebaa656855213e42 | refs/heads/main | 2023-05-21T20:59:56.988106 | 2021-06-13T12:38:35 | 2021-06-13T12:38:35 | 376,540,699 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 384 | py | from django.contrib import admin
from django.urls import path
from .views import *
urlpatterns = [
path('quizhome/' , quizhome , name="quizhome"),
path('view_score/' , view_score , name="view_score"),
path('api/check_score/' , check_score , name="check_score"),
path('<id>/' , take_quiz , name="take_quiz"),
path('api/<id>/' , api_question , name="api_question"),
]
| [
"akashvardhan108@gmail.com"
] | akashvardhan108@gmail.com |
69e17f4c855e3719a67fb44ed072035427f7e853 | 91d1a6968b90d9d461e9a2ece12b465486e3ccc2 | /glue_read_2/workflow-run_get.py | eb26a1136104d518e28d211b93a913de8e86b4f2 | [] | no_license | lxtxl/aws_cli | c31fc994c9a4296d6bac851e680d5adbf7e93481 | aaf35df1b7509abf5601d3f09ff1fece482facda | refs/heads/master | 2023-02-06T09:00:33.088379 | 2020-12-27T13:38:45 | 2020-12-27T13:38:45 | 318,686,394 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,045 | py | #!/usr/bin/python
# -*- codding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from common.execute_command import execute_two_parameter
# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/glue/get-workflow-run.html
if __name__ == '__main__':
"""
get-workflow-runs : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/glue/get-workflow-runs.html
resume-workflow-run : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/glue/resume-workflow-run.html
start-workflow-run : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/glue/start-workflow-run.html
stop-workflow-run : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/glue/stop-workflow-run.html
"""
parameter_display_string = """
# name : Name of the workflow being run.
# run-id : The ID of the workflow run.
"""
execute_two_parameter("glue", "get-workflow-run", "name", "run-id", parameter_display_string) | [
"hcseo77@gmail.com"
] | hcseo77@gmail.com |
e5850cab963a2bed4094268fcad193eda0cd489c | 717171ed7a14ad60dd42d62fe0dd217a0c0c50fd | /19年7月/7.18/url编码和解码.py | 44e1a5f421f2f103c0c08b57f4de71423a436f54 | [] | no_license | friedlich/python | 6e9513193227e4e9ee3e30429f173b55b9cdb85d | 1654ef4f616fe7cb9fffe79d1e6e7d7721c861ac | refs/heads/master | 2020-09-04T14:34:48.237404 | 2019-11-18T14:54:44 | 2019-11-18T14:54:44 | 219,756,451 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,798 | py | # Python进行URL解码
# 所用模块:urllib
# 所用函数:urllib.unquote()
from urllib.request import quote, unquote
# import urllib # 这样不行
rawurl = "%E6%B2%B3%E6%BA%90"
url = unquote(rawurl)
print(url)
print(quote("河源"))
print(type(quote('河源')))
# URL为何要编码、解码?
# 通常如果一样东西需要编码,说明这样东西并不适合传输。原因多种多样,如Size过大,包含隐私数据。对于Url来说,之所以要进行编码,
# 是因为Url中有些字符会引起歧义。
# 例如,Url参数字符串中使用key=value键值对这样的形式来传参,键值对之间以&符号分隔,如/s?q=abc&ie=utf-8。如果你的value字符串中
# 包含了=或者&,那么势必会造成接收Url的服务器解析错误,因此必须将引起歧义的&和=符号进行转义,也就是对其进行编码。
# 又如,Url的编码格式采用的是ASCII码,而不是Unicode,这也就是说你不能在Url中包含任何非ASCII字符,例如中文。否则如果客户端浏览器
# 和服务端浏览器支持的字符集不同的情况下,中文可能会造成问题。
# -*- coding: utf-8 -*-
# @File : urldecode_demo.py
# @Date : 2018-05-11
from urllib.request import quote, unquote
# 编码
url1 = "https://www.baidu.com/s?wd=中国"
# utf8编码,指定安全字符
ret1 = quote(url1, safe=";/?:@&=+$,", encoding="utf-8")
print(ret1)
print(type(ret1))
# https://www.baidu.com/s?wd=%E4%B8%AD%E5%9B%BD
# gbk编码
ret2 = quote(url1, encoding="gbk")
print(ret2)
print(type(ret2))
# https%3A//www.baidu.com/s%3Fwd%3D%D6%D0%B9%FA
# 解码
url3 = "https://www.baidu.com/s?wd=%E4%B8%AD%E5%9B%BD"
print(unquote(url3))
url4 = 'https%3A//www.baidu.com/s%3Fwd%3D%D6%D0%B9%FA'
print(unquote(url4, encoding='gbk'))
| [
"1164166295@qq.com"
] | 1164166295@qq.com |
ba82e0e343037ba03d836effb34bfca835a40faa | a8dc8df49b76bde4bb88de0556a606938f7b764a | /staramr/blast/results/BlastHitPartitions.py | ce3dee86aa3b02d8d1050bce98c4a3bb3f636627 | [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | mjram0s/staramr | e06c3b98f81b2a66599134ee3c2d08511d35b6a3 | d8bc1f71dfb9534b2c559f3e39635f462cc5107a | refs/heads/master | 2020-04-06T16:05:37.652033 | 2018-11-14T19:15:28 | 2018-11-14T19:15:45 | 157,605,130 | 0 | 0 | null | 2018-11-14T20:12:35 | 2018-11-14T20:12:34 | null | UTF-8 | Python | false | false | 3,802 | py | import logging
from typing import Dict
from typing import List
from typing import Optional
from typing import Tuple
from typing import Union
from collections import OrderedDict
logger = logging.getLogger('BlastHits')
from staramr.blast.results.AMRHitHSP import AMRHitHSP
from staramr.exceptions.InvalidPositionException import InvalidPositionException
"""
Class for partitioning up blast hits into non-overlapping regions.
"""
class BlastHitPartitions:
def __init__(self):
"""
Creates a new object to store BLAST hit partitions.
"""
self._partitions = OrderedDict()
def append(self, hit: AMRHitHSP) -> None:
"""
Adds a new blast hit to the set of partitions.
:param hit: The hit to add.
:return: None
"""
if hit.get_genome_contig_start() > hit.get_genome_contig_end() and hit.get_genome_contig_strand() == 'plus':
raise InvalidPositionException(
"Unsupported condition: strand=plus and contig start > contig end for hit (contig=" + hit.get_genome_contig_id() + ", start=" +
str(hit.get_genome_contig_start()) + ", end=" + str(hit.get_genome_contig_end()) + ")")
partition = self._get_existing_partition(hit)
if (partition is None):
self._create_new_parition(hit)
else:
self._add_hit_partition(hit, partition)
def _add_hit_partition(self, hit: AMRHitHSP, partition: Dict[str, Union[int, List[AMRHitHSP]]]) -> None:
start, end = self._stranded_ends(hit)
if start < partition['start']:
partition['start'] = start
if end > partition['end']:
partition['end'] = end
partition['hits'].append(hit)
def _get_existing_partition(self, hit: AMRHitHSP) -> Optional[Dict[str, Union[int, List[AMRHitHSP]]]]:
partition_name = hit.get_genome_contig_id()
if partition_name in self._partitions:
contig_partitions_list = self._partitions[partition_name]
for partition in contig_partitions_list:
if self._hit_in_parition(hit, partition):
return partition
return None
def _hit_in_parition(self, hit: AMRHitHSP, partition: Dict[str, Union[int, List[AMRHitHSP]]]) -> bool:
pstart, pend = partition['start'], partition['end']
start, end = self._stranded_ends(hit)
return (pstart < start < pend) or (pstart < end < pend) or (start <= pstart and end >= pend)
def _create_new_parition(self, hit: AMRHitHSP) -> None:
start, end = self._stranded_ends(hit)
contig_name = hit.get_genome_contig_id()
partition = {
'start': start,
'end': end,
'hits': [hit]
}
if contig_name in self._partitions:
self._partitions[contig_name].append(partition)
else:
self._partitions[contig_name] = [partition]
def get_hits_nonoverlapping_regions(self) -> List[List[AMRHitHSP]]:
"""
Gets BLAST hits divided up into separate lists for non-overlapping regions..
:return: A list of BLAST hits divided up into non-overlapping regions.
"""
return [p['hits'] for name in self._partitions for p in self._partitions[name]]
def _stranded_ends(self, hit: AMRHitHSP) -> Tuple[int, int]:
"""
Gets the start/end coordinates, taking into account the strand.
:param hit: The hit.
:return: The (start,end) as a tuple.
"""
start = hit.get_genome_contig_start() if hit.get_genome_contig_strand() == 'plus' else hit.get_genome_contig_end()
end = hit.get_genome_contig_end() if hit.get_genome_contig_strand() == 'plus' else hit.get_genome_contig_start()
return start, end
| [
"aaron.petkau@canada.ca"
] | aaron.petkau@canada.ca |
5a34493171c954272acc41e2ff53aee86c0742c4 | 702ec4ccc0d809fe3469ac262be159eabe6e356f | /DQN/CirTurtleBot/dqn_cirturtlebot2.py | 36d113427513948a07c588a3f0b0285cbb468afd | [] | no_license | porterpan/CSN-RL | fad3588120f67af3a4e07126fe73d817d39d44ed | e027629acefa66ac39ec65e027e7cd9f635f4c9c | refs/heads/master | 2020-04-27T16:54:33.091694 | 2018-08-03T13:01:30 | 2018-08-03T13:01:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,438 | py | #!/usr/bin/env python
import time
import numpy as np
import gym
import json
from keras.models import Sequential
from keras.layers import Dense, Activation, Flatten, Input
from keras.optimizers import Adam, RMSprop
# from rl.agents.dqn import DQNAgent
from DQN.dqn import DQNAgent
from common.policy import BoltzmannQPolicy, EpsGreedyQPolicy, EpsDisGreedyQPolicy
# from rl.policy import BoltzmannQPolicy, EpsGreedyQPolicy
# from rl.memory import SequentialMemory
from common.memory import SequentialMemory
from matplotlib import pyplot
from keras.models import model_from_json
# from rl.callbacks import TestLogger, TrainEpisodeLogger, TrainIntervalLogger, Visualizer, CallbackList, FileLogger
from common.callbacks import TestLogger, TrainEpisodeLogger, TrainIntervalLogger, Visualizer, CallbackList, FileLogger
import environments
from datetime import datetime
timenow = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
# import gym_gazebo
if __name__ == '__main__':
ENV_NAME = 'GazeboCircuit2TurtlebotLidar-v1'
# ENV_NAME = 'GazeboCircuit2TurtlebotLidarNn-v1'
# Get the environment and extract the number of actions.
env = gym.make(ENV_NAME)
np.random.seed(1234)
env.seed(1234)
nb_actions = env.action_space.n
print('action numbers:{}'.format(nb_actions))
# Next, we build a very simple model.
model = Sequential()
model.add(Flatten(input_shape=(1,) + env.observation_space.shape))
# model.add(Flatten(input_shape=(1, 5)))
# model.add(Dense(16, input_dim=5, activation='relu'))
model.add(Dense(24))
model.add(Activation('relu'))
model.add(Dense(24))
model.add(Activation('relu'))
model.add(Dense(24))
model.add(Activation('relu'))
model.add(Dense(nb_actions))
model.add(Activation('linear'))
print(model.summary())
# serialize model to JSON
model_save = model.to_json()
with open("save/NNmodel1.json", "w") as json_file:
json_file.write(model_save)
print("Saved model to disk!")
# Finally, we configure and compile our agent. You can use every built-in Keras optimizer and
# even the metrics!
memory = SequentialMemory(limit=50000, window_length=1)
# policy1 = BoltzmannQPolicy()
policy1 = EpsDisGreedyQPolicy(eps=0.05, eps_decay=0.999)
policy2 = BoltzmannQPolicy(tau=0.8)
callback1 = FileLogger(filepath='save/nhistory1_{}'.format(timenow), interval=1)
callback2 = FileLogger(filepath='save/nhistory2_{}'.format(timenow), interval=1)
callback3 = FileLogger(filepath='save/nhistory3_{}'.format(timenow), interval=1)
callback4 = FileLogger(filepath='save/nhistory4_{}'.format(timenow), interval=1)
callback5 = FileLogger(filepath='save/nhistory5_{}'.format(timenow), interval=1)
# dqn1 = DQNAgent(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=1000,
# target_model_update=1e-2, policy=policy1)
# dqn1.compile(Adam(lr=1e-3), metrics=['mae'])
# history1 = dqn1.fit(env, nb_epsteps=5000, visualize=False, callbacks=[callback1], verbose=2)
dqn2 = DQNAgent(model=model, nb_actions=nb_actions, memory=memory, batch_size=32, nb_steps_warmup=1000,
target_model_update=1e-2, policy=policy2)
dqn2.compile(Adam(lr=0.01), metrics=['mse'])
# dqn2.save_weights('save/dqn_blotzmann0.8_{}_weights.h5f'.format(ENV_NAME), overwrite=True)
history2 = dqn2.fit(env, nb_steps=200000, visualize=False, callbacks=[callback1], verbose=2)
time.sleep(3600)
# dqn3 = DQNAgent(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=100,
# target_model_update=1e-2, policy=policy1, enable_double_dqn=False)
# dqn3.compile(Adam(lr=1e-3), metrics=['mae'])
# history3 = dqn3.fit(env, nb_epsteps=100, visualize=False, callbacks=[callback3], verbose=2)
# dqn4 = DQNAgent(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=100,
# target_model_update=1e-2, policy=policy2, enable_double_dqn=False)
# dqn4.compile(Adam(lr=1e-3), metrics=['mae'])
# history4 = dqn4.fit(env, nb_epsteps=100, visualize=False, callbacks=[callback4], verbose=2)
# print(history1.history.keys())
# print(len(history1.history['policy_config']))
# print(history1.history['policy_config']['config'])
# pyplot.plot(history1.history['policy_config']['eps'])
# pyplot.show()
# pyplot.subplot(2, 1, 1)
# pyplot.plot(history.history['nb_episode_steps'], history.history['episode_reward'])
'''
pyplot.figure()
pyplot.subplot(2, 1, 1)
pyplot.plot(history1.history['episode_reward'], 'r--',history3.history['episode_reward'], 'b--')
pyplot.subplot(2, 1, 2)
#pyplot.plot(history1.history['nb_steps'], history1.history['episode_reward'], 'r', history2.history['nb_steps'], history2.history['episode_reward'], 'g')
pyplot.plot(history2.history['episode_reward'], 'r', history4.history['episode_reward'], 'b')
pyplot.show()
#pyplot.savefig('save/BoltzmannQPolicy')
'''
# After training is done, we save the final weights.
# dqn1.save_weights('save/dqn1_{}_weights_test.h5f'.format(ENV_NAME), overwrite=True)
#dqn2.save_weights('save/dqn5_{}.h5f'.format(timenow), overwrite=True)
# dqn3.save_weights('save/dqn3_{}_weights.h5f'.format(ENV_NAME), overwrite=True)
# dqn4.save_weights('save/dqn4_{}_weights.h5f'.format(ENV_NAME), overwrite=True)
#print('Weights saved!')
| [
"shengnan0509.chen@gmail.com"
] | shengnan0509.chen@gmail.com |
369ed9d347ffcacbe951e57fc6c86852bb45626d | 83402cc9327dd7899d91889bb814ecddfbbcdee3 | /scraping.py | e68a0fd3a0dbdabdb9a0ad25ecb521696002f285 | [] | no_license | PaigeSpiller/Mission_to_Mars | 5ff8e60949eaf9bb0a55b9a0d15d387c30b73224 | ca0b54a767ceb87bf7d256e7443ed7939d9bf02b | refs/heads/main | 2023-04-22T04:56:43.286901 | 2021-05-06T23:19:10 | 2021-05-06T23:19:10 | 354,395,527 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,650 | py | # Import Splinter and BeautifulSoup
from splinter import Browser
from bs4 import BeautifulSoup as soup
from webdriver_manager.chrome import ChromeDriverManager
import pandas as pd
import datetime as dt
def scrape_all():
# set up splinter
executable_path = {'executable_path': ChromeDriverManager().install()}
browser = Browser('chrome', **executable_path, headless=True)
news_title, news_paragraph = mars_news(browser)
# Run all scraping functions and store results in a dictionary
data = {
"news_title": news_title,
"news_paragraph": news_paragraph,
"featured_image": featured_image(browser),
"facts": mars_facts(),
"last_modified": dt.datetime.now(),
"hemispheres":mars_image(browser)
}
# Stop webdriver and return data
browser.quit()
return data
def mars_news(browser):
# Visit the mars nasa news site
url = 'https://redplanetscience.com'
browser.visit(url)
# Optional delay for loading the page
browser.is_element_present_by_css('div.list_text', wait_time=1)
# convert the browser html to a soup object
html = browser.html
news_soup = soup(html, 'html.parser')
try:
slide_elem = news_soup.select_one('div.list_text')
#slide_elem.find('div', class_='content_title')
# Use the parent element to find the first `a` tag and save it as `news_title`
news_title = slide_elem.find('div', class_='content_title').get_text()
# Use the parent element to find the paragraph text
news_p = slide_elem.find('div', class_='article_teaser_body').get_text()
except AttributeError:
return None, None
return news_title, news_p
# ### Featured Images
def featured_image(browser):
# Visit URL
url = 'https://spaceimages-mars.com'
browser.visit(url)
# Find and click the full image button
full_image_elem = browser.find_by_tag('button')[1]
full_image_elem.click()
# Parse the resulting html with soup
html = browser.html
img_soup = soup(html, 'html.parser')
try:
# Find the relative image url
img_url_rel = img_soup.find('img', class_='fancybox-image').get('src')
except AttributeError:
return None
# Use the base URL to create an absolute URL
img_url = f'https://spaceimages-mars.com/{img_url_rel}'
return img_url
# Mars Facts
def mars_facts():
try:
df = pd.read_html('https://galaxyfacts-mars.com')[0]
except BaseException:
return None
df.columns=['description', 'Mars', 'Earth']
df.set_index('description', inplace=True)
# Convert dataframe into HTML format, add bootstrap
return df.to_html(classes="table table-striped")
def mars_image(browser):
try:
url = 'https://marshemispheres.com/'
browser.visit(url)
hemisphere_image_urls = []
links = browser.find_by_css('a.product-item img')
html = browser.html
img_soup = soup(html, 'html.parser')
for i in range(len(links)):
hemisphere = {}
browser.find_by_css('a.product-item img')[i].click()
mars_img = browser.links.find_by_text('Sample').first
hemisphere['img_url'] = mars_img['href']
hemisphere['title'] = browser.find_by_css('h2.title').text
hemisphere_image_urls.append(hemisphere)
browser.back()
return hemisphere_image_urls
except AttributeError:
return None
if __name__ == "__main__":
# If running as script, print scraped data
print(scrape_all()) | [
"paige.spiller2@gmail.com"
] | paige.spiller2@gmail.com |
994cf62781e7659e046c4469ef816947c9e4ce38 | b4254d6e1704750a9698e912b3ceaf8feb905361 | /tests/test_transaction_util.py | 154f8c2f40bbc91c106cc5a2027694e4694d6e15 | [
"Apache-2.0"
] | permissive | pacoxu/etcd3-py | d71ccc15321eacc1cec67e7a498d05d0288c15d0 | 0c12f315d4e4f1b780df23ad2dd7ab02ef422e44 | refs/heads/master | 2020-05-03T09:35:42.256366 | 2019-03-28T14:59:36 | 2019-03-28T14:59:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,346 | py | import pytest
from etcd3 import Client
from tests.docker_cli import docker_run_etcd_main
from .envs import protocol, host
from .etcd_go_cli import etcdctl, NO_ETCD_SERVICE
@pytest.fixture(scope='module')
def client():
"""
init Etcd3Client, close its connection-pool when teardown
"""
_, p, _ = docker_run_etcd_main()
c = Client(host, p, protocol)
yield c
c.close()
@pytest.mark.timeout(60)
@pytest.mark.skipif(NO_ETCD_SERVICE, reason="no etcd service available")
def test_transaction(client):
etcdctl('put foo bar')
txn = client.Txn()
txn.compare(txn.key('foo').value == 'bar')
txn.success(txn.put('foo', 'bra'))
r = txn.commit()
assert r.succeeded
assert client.range('foo').kvs[0].value == b'bra'
txn = client.Txn()
txn.If(txn.key('foo').value == 'bar')
txn.Then(txn.put('foo', 'bra'))
txn.Else(txn.put('foo', 'bar'))
txn.commit()
assert client.range('foo').kvs[0].value == b'bar'
etcdctl('put foo 2')
txn = client.Txn()
txn.If(txn.key('foo').value > b'1')
txn.If(txn.key('foo').value < b'3')
txn.If(txn.key('foo').value != b'0')
txn.Then(txn.put('foo', 'bra'))
r = txn.commit()
assert r.succeeded
assert client.range('foo').kvs[0].value == b'bra'
etcdctl('put foo bar')
etcdctl('put fizz buzz')
txn = client.Txn()
txn.success(txn.range('foo'))
txn.success(txn.delete('fizz'))
r = txn.commit()
assert r.succeeded
for i in r.responses:
if 'response_range' in i:
assert i.response_range.kvs[0].value == b'bar'
else: # delete
assert i.response_delete_range.deleted == 1
assert not client.range('fizz').kvs
with pytest.raises(NotImplementedError):
txn.If(txn.key('foo').value >= b'1')
with pytest.raises(NotImplementedError):
txn.If(txn.key('foo').value <= b'1')
with pytest.raises(TypeError):
txn.If(txn.key('foo').value < 1)
with pytest.raises(TypeError):
txn.If(txn.key('foo').version < 'a')
with pytest.raises(TypeError):
txn.If(txn.key('foo').create < 'a')
with pytest.raises(TypeError):
txn.If(txn.key('foo').mod < 'a')
with pytest.raises(TypeError):
txn.If(txn.key('foo').mod.value < 1)
with pytest.raises(TypeError):
client.Txn().key(123)
| [
"revol.cai@daocloud.io"
] | revol.cai@daocloud.io |
b4a9c9a8fdf8976a5bb862a97961be30bd9f8263 | 23d512bc45f45f259168f47a5be47f36771580d4 | /userbot/plugins/hack.py | 94bb0191d23aa71825c254add30f32b71ad89c3f | [
"MIT"
] | permissive | ashan890/X-tra-Telegram | e939261893276317334911bae5712a9f49d255d3 | 1842234d8c1e5a180660df2699a6674fcc80c7dd | refs/heads/master | 2020-12-03T19:21:46.373652 | 2020-01-13T12:51:35 | 2020-01-13T12:51:35 | 231,449,450 | 0 | 0 | MIT | 2020-01-02T19:52:22 | 2020-01-02T19:52:21 | null | UTF-8 | Python | false | false | 1,755 | py | """Emoji
Available Commands:
.emoji shrug
.emoji apple
.emoji :/
.emoji -_-"""
from telethon import events
import asyncio
@borg.on(events.NewMessage(pattern=r"\.(.*)", outgoing=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 2
animation_ttl = range(0, 11)
input_str = event.pattern_match.group(1)
if input_str == "hack":
await event.edit(input_str)
animation_chars = [
"`Connecting To Hacked Private Server...`",
"`Target Selected.`",
"`Hacking... 0%\n▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Hacking... 4%\n█▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Hacking... 8%\n██▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Hacking... 20%\n█████▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Hacking... 36%\n█████████▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Hacking... 52%\n█████████████▒▒▒▒▒▒▒▒▒▒▒▒ `",
"`Hacking... 84%\n█████████████████████▒▒▒▒ `",
"`Hacking... 100%\n█████████HACKED███████████ `",
"`Targeted Account Hacked...\n\nPay 69$ To` @Nub_xD `Or send nudes of female Homo Sapiens To Remove This Hack`"
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 11])
| [
"noreply@github.com"
] | noreply@github.com |
b2d0262bbe3abdb54226b297d4b7d8985cbe3cbc | baa2a9eb29b373a9c630d9123a65e1be7852b9b1 | /janaganana/templatetags/india_format.py | a63fb64d5b0376e82eab378b74a26d073b2d93c4 | [
"MIT"
] | permissive | factly/janaganana | 1570f41db82b74063003886beb7b34b6a7029aaa | e9e285d7642a23c50c04cb64ec9f66b6db8244e6 | refs/heads/master | 2022-08-07T22:57:12.698781 | 2021-04-09T13:56:08 | 2021-04-09T13:56:08 | 82,102,427 | 14 | 15 | NOASSERTION | 2022-07-01T22:16:21 | 2017-02-15T20:17:28 | JavaScript | UTF-8 | Python | false | false | 1,018 | py | from django import template
import locale
import decimal
register = template.Library()
# {% load insint %} load in template
# @register.filter(name='insint')
# def insint(value): # Only one argument.
# """Formats a number into Indian Numeric System"""
# locale.setlocale(locale.LC_NUMERIC, "en_IN")
# return locale.format("%d", value, grouping=True)
@register.filter(name='india_format')
def india_format(value):
d = decimal.Decimal(str(value))
if d.as_tuple().exponent < -2:
s = str(value)
else:
s = '{0:.2f}'.format(value)
l = len(s)
i = l-1;
res = ''
flag = 0
k = 0
while i>=0:
if flag==0:
res = res + s[i]
if s[i]=='.':
flag = 1
elif flag==1:
k = k + 1
res = res + s[i]
if k==3 and i-1>=0:
res = res + ','
flag = 2
k = 0
else:
k = k + 1
res = res + s[i]
if k==2 and i-1>=0:
res = res + ','
flag = 2
k = 0
i = i - 1
res = res[::-1]
return res[:-3] | [
"mahesh.thipparthi@gmail.com"
] | mahesh.thipparthi@gmail.com |
70d3484be49be888d5d7d90e2b95c5ebb9a3eb03 | fb255218941b3173eed5792b91203edb162c4303 | /app/celery_extention.py | 2f4276ccd0475d69f11b7f42d8fc72ccf9d4a6e0 | [] | no_license | Colaplusice/hello_flask | 8291b5ce4ea25513cf04756ad6454f76fda2cf12 | 4d67924eed921f660ae455e2db5d03cfd9a91ca0 | refs/heads/master | 2020-03-17T16:17:52.203938 | 2019-02-22T15:35:21 | 2019-02-22T15:35:21 | 133,743,148 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 576 | py | import celery
class Celery(celery.Celery):
def init_app(self, app):
self.config_from_object(app.config.get_namespace("CELERY_"))
#
# def buses_route(name, args, kwargs, options, task):
# if name.startswith('buses.'):
# app = get_current_app()
# conf = app.config.get_namespace('CELERY_')
# return {
# 'queue': conf['task_buses_queue'],
# 'exchange': conf['task_buses_exchange'],
# 'exchange_type': conf['task_buses_exchange_type'],
# 'routing_key': name
# }
# return None
| [
"jialiang.fan@shanbay.com"
] | jialiang.fan@shanbay.com |
cec5aa3ccde2e96a09802b9fd5c0f9627c82ab72 | eb6147c14dc11557cd0f2bc4407925d0c2c6d2e1 | /problems/stackOfPlates.py | 91cf8f85062247efce29c431d9c4b5fae7e52938 | [] | no_license | Lobarr/interview-practice | cb58341a0ac16b48e2289eaddab0807fa78648b8 | 4cb02a9f89ecd66721034566fff29e53d954826d | refs/heads/master | 2023-03-15T00:17:56.011495 | 2021-03-17T12:27:18 | 2021-03-17T12:27:18 | 228,081,991 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,559 | py | class StackOfPlates:
def __init__(self, stackLimit: int):
self.stacks = []
self.stackLimit = stackLimit
def _isFull(self, stack: list):
return len(stack) >= self.stackLimit
def push(self, data):
if not self.stacks or self._isFull(self.stacks[-1]):
print('creating new stack')
newStack = []
newStack.append(data)
self.stacks.append(newStack)
else:
print('adding to last stack')
self.stacks[-1].append(data)
def isEmpty(self):
return False if self.stacks else True
def pop(self):
print('removing last element from tail stack')
if self.stacks:
tailStack = self.stacks[-1]
lastElement = tailStack.pop(-1)
if not tailStack:
print('removing empty stack')
self.stacks.pop(-1)
return lastElement
return None
def popAt(self, index):
if not (0 <= index < len(self.stacks)):
raise Exception('invalid index provided')
selectedStack = self.stacks[index]
lastElement = selectedStack.pop(-1)
if not selectedStack:
self.stacks.pop(index)
return lastElement
if __name__ == '__main__':
stackofPlates = StackOfPlates(3)
for i in range(9):
stackofPlates.push(i)
print(f'added element {i}')
print('removing element from first stack', stackofPlates.popAt(0))
while not stackofPlates.isEmpty():
print(stackofPlates.pop())
| [
"jesulobaegunjobi@hotmail.com"
] | jesulobaegunjobi@hotmail.com |
00034604309fa890c7df981fc0b4ec9a5184f3c4 | 59d85c2eb13d80d26eeb966ae8d3e400cb75c7f3 | /jvd/feeds/benign.py | 3403bd5ba9f0d67bcdbd5da5505c72c6f2a604d5 | [
"Apache-2.0"
] | permissive | jon1scr/JARV1S-Disassembler | 34e5257a734dda3c2f075d6a2d6b1fdb78fcc615 | 36121628525f9cbc704e0a8d0603e4b065b0b50c | refs/heads/master | 2023-02-02T18:02:03.662737 | 2020-12-20T08:12:36 | 2020-12-20T08:12:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,124 | py | from pathlib import Path
import os
import requests
from lxml import html
from tqdm import tqdm
import urllib.request as ur
from zipfile import ZipFile
from jvd.ida.ida import IDA
import hashlib
import sys
def sha256sum(filename):
if isinstance(filename, str):
h = hashlib.sha256()
b = bytearray(128*1024)
mv = memoryview(b)
with open(filename, 'rb', buffering=0) as f:
for n in iter(lambda: f.readinto(mv), 0):
h.update(mv[:n])
return h.hexdigest()
else:
return hashlib.sha256(filename).hexdigest()
def _ren_dis_entry(ext):
for bin_file in os.listdir(ext):
sha256, tp = os.path.splitext(os.path.basename(bin_file))
target = os.path.join(ext, sha256+'.bin')
source = os.path.join(ext, bin_file)
if os.path.exists(target):
os.remove(source)
else:
os.rename(
source,
target)
def _disassemble_all(path):
disassember = IDA()
disassember.disassemble_all(
path, cfg=False, as_gzip=True,
)
def _cleanup_all(ext):
for bin_file in os.listdir(ext):
bin_file = os.path.join(ext, bin_file)
ext = Path(bin_file).suffix
if ext in ['i64', 'id0', 'id1', 'id2', 'til', 'nam', 'json', '']:
os.remove(bin_file)
if __name__ == '__main__':
base = 'I:/benign'
lines = []
ds = []
for d in os.listdir(base):
d = os.path.join(base, d)
for f in os.listdir(d):
f = os.path.join(d, f)
sha256 = sha256sum(f)
if not os.path.exists(os.path.join(d, sha256+'.bin')):
os.rename(f, os.path.join(d, sha256+'.bin'))
lines.append(','.join([
sha256,
os.path.basename(d).replace(',', '_'),
os.path.basename(f).replace(',', '_'),
]))
ds.append(d)
with open(os.path.join(base, 'full.csv'), 'w') as wf:
for l in lines:
wf.write(l+'\n')
for d in ds:
_disassemble_all(d)
_cleanup_all(d) | [
"steven.h.ding@mail.mcgill.ca"
] | steven.h.ding@mail.mcgill.ca |
bf7a2ac208c91d74eabad8f86baad8cc5bea1afc | e1e7db1eb2f07dc5421f98bb1ce941aab0165b39 | /ParseDAD/trade_day.py | dc3e9f74315d3352028104b464d584e614142405 | [] | no_license | muqingliu/trade_tool | baae112c780e941e100e677dfe2e6367f3378383 | d295f1c484b5eb5832850b35ba4b6912ec11f0f8 | refs/heads/master | 2022-12-02T07:15:52.625980 | 2020-08-17T22:04:47 | 2020-08-17T22:04:47 | 288,291,843 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 290 | py | def make_trade_days():
days = []
f = open("days.txt", "rt")
l = f.readline()
while l:
days.append(int(l))
l = f.readline()
f.close()
return days
def get_days(start):
days = make_trade_days()
for x in xrange(0,len(days)):
if start == days[x]:
return days[x:]
return []
| [
"tidusfantasy2008@sina.com"
] | tidusfantasy2008@sina.com |
16661322b8d82abf0d33cbcad7bf66415a5a8f7b | 413b75a60553d1067cb10970a66175bfd8eaad47 | /from_introduction_to_practice/my_car.py | ebc594c9a6cc8f9482e6c933b7e7ac5e8fa2a762 | [] | no_license | Ygritte0/blog | 45d5f50b2b27599cc7a102e9ccb4a6cc8c2e0c9d | d09d965eaa7ef44545acdaaa9a76b7db58537107 | refs/heads/master | 2021-04-26T23:51:30.372063 | 2019-10-13T09:16:28 | 2019-10-13T09:16:28 | 123,868,956 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 178 | py | #-*-coding:utf-8-*-
from car import Car
my_new_car = Car('audi','a4','2016')
print(my_new_car.get_descriptive_name())
my_new_car.odometer_reading = 23
my_new_car.read_odometer() | [
"1624849952@qq.com"
] | 1624849952@qq.com |
daac0b6f67d632b4fc2f3411cbad6e534cc6ba0c | c53789912c13e9671b1e30dfba2829834da0b0b7 | /backend/test_set_order_status.py | f0b70ff52f50490b3e20fc6fbfab1b36f9351b08 | [] | no_license | metallicOxide/BurgerWebsite | 94ee09771fb689dfe051df162669298d8ff7d5c5 | ed20c140f4f585d6bcf825e869d6ebf531f477b6 | refs/heads/master | 2021-06-16T08:56:57.465899 | 2019-07-12T10:49:44 | 2019-07-12T10:49:44 | 195,658,700 | 1 | 0 | null | 2021-04-20T18:20:37 | 2019-07-07T14:07:59 | Python | UTF-8 | Python | false | false | 3,176 | py | from backend.Ordering_system import OrderingSystem
from backend.Ingredient import Bun, Patty, MainsIngredient, Side, Drink
from backend.Order import Order
from backend.Inventory import Inventory
from backend.order_interface import DrinkOrder, SideOrder, MainOrder, Burger, Wrap
from backend.errors import StatusError
import pytest
def test_set_order_status(order_fixture, gourmet_fixture):
print("=== Test set current order status ===\n")
system = order_fixture
inventory = gourmet_fixture
print ("\nList of Current Orders\n")
system.staff_view_orders(inventory)
assert len(system.curr_orders) == 5
# set status of order as completed
system.set_order_status("Collected", "Current", 1)
print ("\nList of Current Orders after setting order as Collected\n")
system.staff_view_orders(inventory)
assert len(system.curr_orders) == 4
print("\nList of Completed Orders after setting order as Collected\n")
system.staff_view_completed_orders(inventory)
assert len(system.completed_orders) == 1
assert system.completed_orders[0].status == "Collected"
def test_set_completed_order_status(order_fixture, gourmet_fixture):
print("\n=== Test set completed order status ===")
system = order_fixture
inventory = gourmet_fixture
system.set_order_status("Collected", "Current", 1)
print ("\nList of Current Orders\n")
assert len(system.curr_orders) == 4
system.staff_view_orders(inventory)
print("\nList of Completed Orders\n")
system.staff_view_completed_orders(inventory)
assert len(system.completed_orders) == 1
# set status of order as completed
system.set_order_status("Preparing", "Completed", 1)
assert len(system.completed_orders) == 0
assert len(system.curr_orders) == 5
assert order_fixture.get_curr_order_by_ID(1).status == "Preparing"
print("\nList of Current Orders after reverting order in Completed List\n")
system.staff_view_orders(inventory)
def test_set_current_order_status_empty_exception(order_fixture, gourmet_fixture):
print("\n=== Test set order status empty status exception===")
system = order_fixture
with pytest.raises(StatusError) as e:
system.set_order_status("", "Current", 1)
assert str(e.value) == "Please provide an Order Status from the drop down list.\n"
assert len(system.curr_orders) == 5
def test_set_current_order_status_incorrect_exception(order_fixture, gourmet_fixture):
print("\n=== Test set order status incorrect status exception===")
system = order_fixture
with pytest.raises(StatusError) as e:
system.set_order_status("HELLO", "Current", 1)
assert str(e.value) == "Please provide an Order Status from the drop down list.\n"
assert len(system.curr_orders) == 5
def test_set_current_order_list_incorrect_exception(order_fixture, gourmet_fixture):
print("\n=== Test set order status empty list exception===")
system = order_fixture
with pytest.raises(StatusError) as e:
system.set_order_status("Collected", "", 1)
assert str(e.value) == "Please specify if the order is current or completed.\n"
assert len(system.curr_orders) == 5
| [
"jerrylu1987@hotmail.com"
] | jerrylu1987@hotmail.com |
f8ad4a989e47ceddb51c6c45cee9f9a6fa711be2 | dd5b38d23d71ca9f95f53bb00ec9049cdce892e4 | /corpus/retrieval/webdriverwrapper/page.py | ca0addf89e2904b74736900304da3ad9bc1b54cb | [] | no_license | lmiguelmh/selenium-web-mining | 27d7428c04c8ae2582e984042db0f88fff02e698 | d64ebf3011fcc762a77aea31a76891d698c49319 | refs/heads/master | 2021-01-19T00:36:06.701132 | 2016-12-06T20:45:20 | 2016-12-06T20:45:20 | 73,046,587 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,629 | py | # based on https://github.com/dakotasmith/page-object-examples
import time
from selenium.common.exceptions import NoAlertPresentException, NoSuchElementException, StaleElementReferenceException
from selenium.webdriver.support.wait import WebDriverWait
from .errors import WaitForElementError
class Page(object):
def __init__(self, driver):
self.driver = driver
@property
def referrer(self):
return self.driver.execute_script('return document.referrer')
def sleep(self, seconds=0.25):
time.sleep(seconds)
def find_element_by_locator(self, locator):
return self.driver.find_element_by_locator(locator)
def find_elements_by_locator(self, locator):
return self.driver.find_elements_by_locator(locator)
def wait_for_available(self, locator, timeout_tries=80, sleep_interval=.25):
for i in range(timeout_tries):
if self.driver.is_element_available(locator):
break
self.sleep(sleep_interval)
else:
raise WaitForElementError('Wait for available timed out')
return True
def wait_for_visible(self, locator, timeout_tries=80, sleep_interval=.25):
for i in range(timeout_tries):
if self.driver.is_visible(locator):
break
self.sleep(sleep_interval)
else:
raise WaitForElementError('Wait for visible timed out')
return True
def wait_for_change(self, locator, text, timeout_tries=80, sleep_interval=.25):
for i in range(timeout_tries):
try:
e = self.driver.find_element_by_locator(locator)
if e is not None and text != e.text:
break
self.sleep(sleep_interval)
# except NoSuchElementException as e,StaleElementReferenceException as e:
except:
pass
else:
raise WaitForElementError('Wait for visible timed out')
return True
def wait_for_hidden(self, locator, timeout_tries=80, sleep_interval=.25):
for i in range(timeout_tries):
if self.driver.is_visible(locator):
self.sleep(sleep_interval)
else:
break
else:
raise WaitForElementError('Wait for hidden timed out')
return True
def wait_for_alert(self, timeout_tries=80, sleep_interval=.25):
for i in range(timeout_tries):
try:
alert = self.driver.switch_to_alert()
if alert.text:
break
except NoAlertPresentException as nape:
pass
self.sleep(sleep_interval)
else:
raise NoAlertPresentException(msg='Wait for alert timed out')
return True
def _dispatch(self, l_call, l_args, d_call, d_args):
pass
def open_and_wait_for_ready_state(self, page_url, timeout=10, sleep_interval=0.5):
self.driver.get(page_url)
self.wait_for_load(timeout=timeout, sleep_interval=sleep_interval)
def wait_for_load(self, timeout=10, sleep_interval=0.25, ready_state='interactive'):
"""
:param timeout:
:param sleep_interval:
:param ready_state: interactive, complete https://developer.mozilla.org/en-US/docs/Web/API/Document/readyState
:return:
"""
WebDriverWait(self.driver, timeout, sleep_interval) \
.until(lambda d: d.execute_script('return document.readyState') == 'complete' or
d.execute_script('return document.readyState') == 'interactive')
| [
"lmiguelmh@gmail.com"
] | lmiguelmh@gmail.com |
a387e40ab96f93e4f85d141be0fc88027481c749 | 66fbe675f9bf45387513e49893a0c91a64755c8b | /routes/main.py | da1fb671376c291f8e0e3e3a6802418e4d3e745b | [] | no_license | Maxximl/cyber-pets | f9a1b76284fd3e21abaad54732b410f14c920943 | cb710318de8b14175ea4678ff0c55bf894ec9224 | refs/heads/master | 2023-05-31T04:29:58.162170 | 2020-11-01T08:58:15 | 2020-11-01T08:58:15 | 379,866,215 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,616 | py | from docxtpl import DocxTemplate
from docxtpl import InlineImage
from docx.shared import Mm
import sys
import json
from datetime import date
print(sys.argv[1])
data = json.loads(sys.argv[1])
print(data)
today = date.today()
print("Today's date:", today.day,today.month,today.year)
doc = DocxTemplate("./files/cardTemplate.docx")
context = { 'cardId': data['cardId'],
'dd' : today.day,
'month' : today.month,
'shortName' : data['shortName'],
'operatingOrganizationId' : data['operatingOrganizationId'],
'aviary' : data['aviary'],
'image' : InlineImage(doc,'./files/2.jpg',height=Mm(70)),
'age' : data['age'],
'weight' : data['weight'],
'nickName' : data['nickName'],
'breed' : data['breed'],
'sex': data['sex'],
'size' : data['size'],
'tail' : data['tail'],
'ears' : data['ears'],
'identificationMark': data['identificationMark'],
'sterilizationDate' : data['sterilizationDate'],
'veterinarian' : data['veterinarian'],
'socialized' : data['socialized'],
'workOrder' : data['workOrder'],
'captureAct' : data['captureAct'],
'catchingAddress' : data['catchingAddress'],
'workOrderDate' : data['workOrderDate'],
'receiptDate' : data['receiptDate']
}
doc.render(context)
doc.save("./files/generated.docx")
print(sys.argv[1])
data = json.loads(sys.argv[1])
print(data['name'])
print(data['age'])
print('Ok')
| [
"fearmax3d@gmail.com"
] | fearmax3d@gmail.com |
846caeb31f010fb1433c159129af174cdbd52294 | ffbe3405c34fd63d176c9fe21d3815be2aa0c91f | /Sunshine-2019/Entry-Exam/run.py | 158cae4cae374ea8519b651f60a9f4f4ec4e855e | [] | no_license | D1r3Wolf/CTF-writeups | 33464d871dda24d3ccda36da8c02208f83d3ac2f | ab8e0c3f38411241b03b230fa88604905dd1ac3d | refs/heads/master | 2020-04-19T15:49:01.500010 | 2019-10-17T05:46:48 | 2019-10-17T05:46:48 | 168,284,838 | 7 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,701 | py | from PIL import Image
from requests import session
from bs4 import BeautifulSoup
from time import sleep
Coords = {
1 : (330,430),
2 : (330,520),
3 : (330,610),
4 : (330,700),
5 : (330,791),
6 : (330,880),
7 : (330,974),
8 : (330,1064),
9 : (330,1154),
10: (330,1244),
11: (820,430),
12: (820,520),
13: (820,610),
14: (820,700),
15: (820,791),
16: (820,881),
17: (820,974),
18: (820,1064),
19: (820,1154),
20: (820,1244)
}
def Edit(I,O,C,Img):
E = Image.open('img/'+O+'.png')
Pix = E.load()
Ans_P = Img.load() ; w = C[0] ; h = C[1]
for i in range(340):
for j in range(60):
Ans_P[w+i,h+j] = Pix[i,j]
def Answer_Sheet(D):
Ans = Image.open("img/scantron.png")
for i in Coords:
Edit(i,D[i],Coords[i],Ans)
Ans.save("img/Ans.png")
def End(S):
flag = S.get(url).text
print("[+] Flag is :: %s"%flag)
def Post(S):
File = { "file" : open("img/Ans.png",'rb').read()}
Data = { "submit" : "value" }
A = S.post(url,data=Data,files=File)
if '<h1>Exam Section' not in A.text:
print("[-] Error :: %s"%A.text)
else:
print("[+] Wow Move On :: %s"%(A.text.split('\n')[0]))
def Get_Answers(html_doc):
soup = BeautifulSoup(html_doc, 'html.parser')
Elem = [x.get_text() for x in soup.find_all("li")]
if len(Elem) != 100 : return 1
D = {}
for i in range(20):
Q = Elem[i*5] ; A = [int(x) for x in Elem[i*5+1:i*5+5]]
D[i+1] = chr(65+A.index(int(eval(Q))))
return D
def Exam(S,i):
A = S.get(url).text
Ans = Get_Answers(A)
if Ans == 1:
End(S) ; return 0
print("[{0}] Grabbed Answers :: {1}".format(i,str(Ans.values())))
Answer_Sheet(Ans)
Post(S)
Exam(S,i+1)
def main():
Ss = session()
Ss.get(url)
Exam(Ss,0)
url = "http://ee.sunshinectf.org/exam"
main() | [
"d1r3wolf.aj@gmail.com"
] | d1r3wolf.aj@gmail.com |
1cdef2efca5b6c72f28e2bd56aee45c125d3e2e9 | 21cfc943bf4989362fe4b1063ee9451a44175098 | /kitsune/kbadge/__init__.py | 24d06aa6bd2f1657367f5d85448c307b3bbb3212 | [] | permissive | feer56/Kitsune1 | 6230a8f01b554c3bb6b4a7016edf198f7b2d74dd | 0b39cbc41cb7a067699ce8401d80205dd7c5138d | refs/heads/master | 2023-01-07T14:34:24.046353 | 2014-11-23T04:38:04 | 2014-11-23T04:38:04 | 27,058,591 | 1 | 0 | BSD-3-Clause | 2022-12-27T14:53:52 | 2014-11-24T03:14:18 | Python | UTF-8 | Python | false | false | 28 | py | BADGER_BADGE_PAGE_SIZE = 12
| [
"rehandalal@gmail.com"
] | rehandalal@gmail.com |
086cf4701861fad4be6c1fddbec68a164965c6b3 | 77db2d2381cd5d09ba97e710a1b42fee4fd2546c | /1-array-and-strings/1_isUnique.py | cb8625dc8b2bbc8c804e36d2e0a7712a082d32bd | [] | no_license | jinshunlee/ctci | 08a1ca38b26a1309e81ab51a69fa10bb9009e557 | e2a56c7cd57f1a9eb8a423a7b0daa6f9f6f73ea1 | refs/heads/master | 2020-03-27T07:00:31.934649 | 2018-10-12T15:47:34 | 2018-10-12T15:47:34 | 146,155,930 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,136 | py | # Is Unique: Implement an algorithm to determine if a string has all unique characters. What if you
# cannot use additional data structures?
# Questions to ask interviewer:
# Is the String ASCII or Unicode
# - ASCII has 128 characters
# - Unicode has 2^16 65536 characters
# we can do a check if len(str) > no. of possible characters in either ascii or unicode
# return false
# Assuming we cannot use additional data structure:
# Time O(n^2)
# Space O(1)
def isUnique(str):
for i in range(0, len(str) - 1):
for j in range(i + 1, len(str)):
if str[i] == str[j]:
return False
return True
# With auxilary data structure allowed:
def isUniqueWithSet(str):
return len(set(str)) == len(str)
# If we are allowed to modify the input string, we may consider sorting the string
# then compare adjacent characters of the string for duplicates in linear time
# Time O(n log n)
# Space O(1) -> Using Heap Sort -> no auxilary space used
if __name__ == "__main__":
print(isUnique("abcd"))
print(isUnique("abcda"))
print(isUniqueWithSet("abcd"))
print(isUniqueWithSet("abcda"))
| [
"ljsrockz@gmail.com"
] | ljsrockz@gmail.com |
dc4ab926f4640d2dca2e0f151e6964d71b572b33 | 975b2d421d3661e6770b601929d5f11d981d8985 | /msgraph/generated/groups/item/sites/item/term_store/sets/item/parent_group/sets/item/children/item/children/count/count_request_builder.py | 6ef96315893f33a545c0a998df7bfd82d1d74ac8 | [
"MIT"
] | permissive | microsoftgraph/msgraph-sdk-python | a7c551b85daadeebf76ec4ae12668664ea639b42 | 27de7ccbe688d7614b2f6bde0fdbcda4bc5cc949 | refs/heads/main | 2023-09-03T21:45:27.989672 | 2023-08-31T06:22:18 | 2023-08-31T06:22:18 | 534,665,999 | 135 | 18 | MIT | 2023-09-14T11:04:11 | 2022-09-09T14:00:17 | Python | UTF-8 | Python | false | false | 4,755 | py | from __future__ import annotations
from dataclasses import dataclass, field
from kiota_abstractions.base_request_builder import BaseRequestBuilder
from kiota_abstractions.get_path_parameters import get_path_parameters
from kiota_abstractions.method import Method
from kiota_abstractions.request_adapter import RequestAdapter
from kiota_abstractions.request_information import RequestInformation
from kiota_abstractions.request_option import RequestOption
from kiota_abstractions.serialization import Parsable, ParsableFactory
from typing import Any, Callable, Dict, List, Optional, TYPE_CHECKING, Union
if TYPE_CHECKING:
from ...............models.o_data_errors.o_data_error import ODataError
class CountRequestBuilder(BaseRequestBuilder):
"""
Provides operations to count the resources in the collection.
"""
def __init__(self,request_adapter: RequestAdapter, path_parameters: Optional[Union[Dict[str, Any], str]] = None) -> None:
"""
Instantiates a new CountRequestBuilder and sets the default values.
Args:
path_parameters: The raw url or the Url template parameters for the request.
request_adapter: The request adapter to use to execute the requests.
"""
super().__init__(request_adapter, "{+baseurl}/groups/{group%2Did}/sites/{site%2Did}/termStore/sets/{set%2Did}/parentGroup/sets/{set%2Did1}/children/{term%2Did}/children/$count{?%24search,%24filter}", path_parameters)
async def get(self,request_configuration: Optional[CountRequestBuilderGetRequestConfiguration] = None) -> Optional[int]:
"""
Get the number of the resource
Args:
request_configuration: Configuration for the request such as headers, query parameters, and middleware options.
Returns: Optional[int]
"""
request_info = self.to_get_request_information(
request_configuration
)
from ...............models.o_data_errors.o_data_error import ODataError
error_mapping: Dict[str, ParsableFactory] = {
"4XX": ODataError,
"5XX": ODataError,
}
if not self.request_adapter:
raise Exception("Http core is null")
return await self.request_adapter.send_primitive_async(request_info, "int", error_mapping)
def to_get_request_information(self,request_configuration: Optional[CountRequestBuilderGetRequestConfiguration] = None) -> RequestInformation:
"""
Get the number of the resource
Args:
request_configuration: Configuration for the request such as headers, query parameters, and middleware options.
Returns: RequestInformation
"""
request_info = RequestInformation()
request_info.url_template = self.url_template
request_info.path_parameters = self.path_parameters
request_info.http_method = Method.GET
request_info.headers["Accept"] = ["text/plain"]
if request_configuration:
request_info.add_request_headers(request_configuration.headers)
request_info.set_query_string_parameters_from_raw_object(request_configuration.query_parameters)
request_info.add_request_options(request_configuration.options)
return request_info
@dataclass
class CountRequestBuilderGetQueryParameters():
"""
Get the number of the resource
"""
def get_query_parameter(self,original_name: Optional[str] = None) -> str:
"""
Maps the query parameters names to their encoded names for the URI template parsing.
Args:
original_name: The original query parameter name in the class.
Returns: str
"""
if not original_name:
raise TypeError("original_name cannot be null.")
if original_name == "filter":
return "%24filter"
if original_name == "search":
return "%24search"
return original_name
# Filter items by property values
filter: Optional[str] = None
# Search items by search phrases
search: Optional[str] = None
from kiota_abstractions.base_request_configuration import BaseRequestConfiguration
@dataclass
class CountRequestBuilderGetRequestConfiguration(BaseRequestConfiguration):
from kiota_abstractions.base_request_configuration import BaseRequestConfiguration
"""
Configuration for the request such as headers, query parameters, and middleware options.
"""
# Request query parameters
query_parameters: Optional[CountRequestBuilder.CountRequestBuilderGetQueryParameters] = None
| [
"GraphTooling@service.microsoft.com"
] | GraphTooling@service.microsoft.com |
f7fd0940f186d71b367064d0d5edb6e5e5126639 | c228f73222f0a29b06210bddf6ed1364353d93aa | /LeetCode/p0143/III/reorder-list.py | fd6d2498624ba493b9f9427708d6f7e67296af06 | [] | no_license | Ynjxsjmh/PracticeMakesPerfect | 40e2071e7f34ea7ae02a11f93af21e89947001c6 | 860590239da0618c52967a55eda8d6bbe00bfa96 | refs/heads/master | 2023-04-30T00:35:14.530113 | 2023-04-14T15:06:41 | 2023-04-14T15:06:41 | 167,309,940 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,365 | py | # -*- coding: utf-8 -*-
# ********************************************************************************
# Copyright © 2023 Ynjxsjmh
# File Name: reorder-list.py
# Author: Ynjxsjmh
# Email: ynjxsjmh@gmail.com
# Created: 2023-04-14 15:20:00
# Last Updated:
# By: Ynjxsjmh
# Description:
# ********************************************************************************
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution(object):
def reorderList(self, head):
"""
:type head: ListNode
:rtype: None Do not return anything, modify head in-place instead.
"""
slow = fast = head
# 找中点,slow 最后指向中点
while fast and fast.next:
slow = slow.next
fast = fast.next.next
# 逆序中点后面节点
curr = slow.next
slow.next = None
prev = None
while curr:
next = curr.next
curr.next = prev
prev = curr
curr = next
beg = head
end = prev
while beg and end:
next1 = beg.next
next2 = end.next
beg.next = end
end.next = next1
beg = next1
end = next2
| [
"ynjxsjmh@gmail.com"
] | ynjxsjmh@gmail.com |
c6074cb5f36958ef66d7d67d94fb9fec5cc148c8 | 3db21b1fc8998ef51918c7ae76961c6decb26853 | /app/settings.py | 9c3612ed292f8b032b36cbbb08d17554ac563c4d | [
"MIT"
] | permissive | devdazed/django-docker-template | c59f0e4d561fa8cc889f7c29682b169a77fd4e4f | 520968ac5cd54070885de41f0e725c310f1cf380 | refs/heads/main | 2023-01-28T01:18:06.724011 | 2020-12-03T23:27:48 | 2020-12-03T23:27:48 | 318,345,970 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,137 | py | """
Django settings for app project.
Generated by 'django-admin startproject' using Django 3.1.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
import os
import sys
import logging
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get('DJANGO_SECRET_KEY', '4p93o1^6%hugp(0)g(t))6t_#6b69#xu4@@ft99+5cxyq+y6+z')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.environ.get('DEBUG', True)
ENVIRONMENT = os.environ.get('ENVIRONMENT', 'development')
LEVEL = 'INFO'
if DEBUG:
LEVEL = 'DEBUG'
TESTING = len(sys.argv) > 1 and sys.argv[1] == 'test'
if TESTING:
logging.disable(logging.CRITICAL)
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'format': {
'format': '%(levelname)s %(asctime)-15s %(module)s %(message)s',
}
},
'handlers': {
'console': {
'level': LEVEL,
'class': 'logging.StreamHandler',
'formatter': 'format',
}
},
# First config for root logger: console1 -> fmt1
'root': {
'handlers': ['console'],
'level': LEVEL,
'propagate': True,
}
}
ALLOWED_HOSTS = ['localhost',
'127.0.0.1',
'app']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_extensions',
# Auth
'rest_framework.authtoken',
# REST
'rest_framework',
# Celery
'django_celery_results',
# Local Apps
'app.health.apps.HealthConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': os.environ.get('POSTGRES_DB', 'postgres'),
'USER': os.environ.get('POSTGRES_USER', 'postgres'),
'PASSWORD': os.environ.get('POSTGRES_PASSWORD', 'postgres'),
'HOST': os.environ.get('POSTGRES_HOST', 'db'),
'PORT': os.environ.get('POSTGRES_PORT', 5432),
}
}
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": os.environ.get('REDIS_LOCATION', 'redis://redis:6379/1'),
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient"
},
"KEY_PREFIX": "django-"
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, "app/dist/static")
NOTEBOOK_ARGUMENTS = [
# exposes IP
'--ip=0.0.0.0',
# disables the browser
'--no-browser',
'--allow-root',
"--NotebookApp.password='argon2:$argon2id$v=19$m=10240,t=10,p=8$HohumliP/VlLzFyc8lWIhw$vrw/hYgJXbcLc7ZKqFnWLA'" # nativo is the password
]
# Celery Settings
CELERY_RESULT_BACKEND = 'django-db'
CELERY_CACHE_BACKEND = 'django-cache'
| [
"devdazed@users.noreply.github.com"
] | devdazed@users.noreply.github.com |
ef1f17171644bd371ed1ca300b535c4846c8caf0 | 85d992386122ac1d7fe6f1a45c6ac0bb27951fae | /SynGuar/helper_eval/eval_invoke.py | b3256ed66d650388006ff70e87551614177e24e8 | [
"MIT"
] | permissive | HALOCORE/SynGuar | 44765206fbba5bea255db1c59ba23b50c950bd77 | 8f7f9ba52e83091ad3def501169fd60d20b28321 | refs/heads/master | 2023-07-16T02:05:23.131685 | 2021-08-24T06:53:03 | 2021-08-24T06:53:03 | 347,832,622 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,282 | py | import os
import requests
import random
from .eval_consts import *
def invoke_strprose_evaluation_requests():
strprose_example_files = [x for x in list(os.listdir(EXAMPLE_STRPROSE_FULLDIR)) if x.endswith(".csv")]
print("#strprose_example_files", len(strprose_example_files))
random.shuffle(strprose_example_files)
for example_file in strprose_example_files:
for epsilon in [0.02, 0.05, 0.1]:
request_data = {
"synthesizer": "StrPROSE",
"example_file": example_file,
"epsilon": epsilon,
"delta": 0.02,
"k": 1
}
print("# [eval] POST ", request_data)
resp = requests.post(SYNGUAR_API_ENDPOINT, json=request_data)
if resp.status_code != 200:
print("# [eval] ERROR: status_code", resp.status_code)
assert(False)
def invoke_strprose_spacedrop_requests():
strprose_example_files = [x for x in list(os.listdir(EXAMPLE_STRPROSE_FULLDIR)) if x.endswith(".csv")]
print("#strprose_example_files", len(strprose_example_files))
random.shuffle(strprose_example_files)
for example_file in strprose_example_files:
request_data = {
"synthesizer": "StrPROSE",
"example_file": example_file,
"example_size": STRPROSE_SPACEDROP_SAMPLE_SIZE,
"no_counting": False,
"cache_only": False,
"keepalive": 0
}
print("# [eval] POST ", request_data)
resp = requests.post(SYNTH_API_ENDPOINT, json=request_data)
if resp.status_code != 200:
print("# [eval] ERROR: status_code", resp.status_code)
assert(False)
def invoke_strstun_evaluation_requests():
strstun_example_files = [x for x in list(os.listdir(EXAMPLE_STRSTUN_FULLDIR)) if x.endswith(".eg.txt")]
print("#strstun_example_files", len(strstun_example_files))
random.shuffle(strstun_example_files)
for example_file in strstun_example_files:
request_data = {
"synthesizer": "StrSTUN",
"example_file": example_file,
"epsilon": 0.05,
"delta": 0.02,
"k": 20
}
print("# [eval] POST ", request_data)
resp = requests.post(SYNGUAR_API_ENDPOINT, json=request_data)
if resp.status_code != 200:
print("# [eval] ERROR: status_code", resp.status_code)
assert(False)
def invoke_strstun_4examples_requests():
strstun_example_files = [x for x in list(os.listdir(EXAMPLE_STRSTUN_FULLDIR)) if x.endswith(".eg.txt")]
print("#strstun_example_files", len(strstun_example_files))
random.shuffle(strstun_example_files)
for example_file in strstun_example_files:
request_data = {
"synthesizer": "StrSTUN",
"example_file": example_file,
"example_size": 4,
"no_counting": False,
"cache_only": False,
"keepalive": 0
}
print("# [eval] POST ", request_data)
resp = requests.post(SYNTH_API_ENDPOINT, json=request_data)
if resp.status_code != 200:
print("# [eval] ERROR: status_code", resp.status_code)
assert(False) | [
"wbprosci@outlook.com"
] | wbprosci@outlook.com |
336758b25cd6431a76d65e10428c7ffdd76061a4 | 956270ab378baf0386015f9b1aae6f3702ebfe01 | /lection4/code/ui/fixtures.py | bfcf340b6d2653e89b1f2437679eefd643db44ec | [] | no_license | nekitvand/qa-python | 4bf5daac8d2f08373f8f7efd767a2b9d8c4489eb | 7956d172a8b30ab5dfd658765531446b441d9700 | refs/heads/master | 2022-06-23T04:26:33.252883 | 2020-05-14T11:40:09 | 2020-05-14T11:40:09 | 282,399,084 | 0 | 2 | null | 2020-07-25T08:01:41 | 2020-07-25T08:01:40 | null | UTF-8 | Python | false | false | 2,771 | py | import pytest
from selenium import webdriver
from selenium.webdriver import ChromeOptions
from webdriver_manager.chrome import ChromeDriverManager
from webdriver_manager.firefox import GeckoDriverManager
from ui.pages.base import BasePage
from ui.pages.main import MainPage
from ui.pages.python_events import PythonEventsPage
from ui.pages.bad_ssl import BadSSLPage
from ui.pages.download import DownloadPage
from ui.pages.python_382 import PythonPage382
class UsupportedBrowserException(Exception):
pass
@pytest.fixture(scope='function')
def base_page(driver):
return BasePage(driver)
@pytest.fixture(scope='function')
def main_page(driver):
return MainPage(driver)
@pytest.fixture(scope='function')
def bad_ssl_page(driver):
return BadSSLPage(driver)
@pytest.fixture(scope='function')
def download_page(driver):
return DownloadPage(driver)
@pytest.fixture(scope='function')
def python382_page(driver):
return PythonPage382(driver)
@pytest.fixture(scope='function')
def driver(config):
browser = config['browser']
version = config['version']
url = config['url']
download_dir = config['download_dir']
if browser == 'chrome':
options = ChromeOptions()
options.add_argument("--window-size=800,600")
prefs = {"download.default_directory": download_dir}
options.add_experimental_option('prefs', prefs)
capabilities = {'acceptInsecureCerts': True,
'browserName': 'chrome',
'version': version,
}
driver = webdriver.Remote(command_executor='http://127.0.0.1:4444/wd/hub/',
options=options,
desired_capabilities=capabilities
)
elif browser == 'firefox':
manager = GeckoDriverManager(version=version)
driver = webdriver.Firefox(executable_path=manager.install())
else:
raise UsupportedBrowserException(f'Usupported browser: "{browser}"')
driver.get(url)
driver.maximize_window()
yield driver
driver.close()
@pytest.fixture(scope='function', params=['chrome', 'firefox'])
def all_drivers(config, request):
browser = request.param
url = config['url']
if browser == 'chrome':
manager = ChromeDriverManager(version='latest')
driver = webdriver.Chrome(executable_path=manager.install())
elif browser == 'firefox':
manager = GeckoDriverManager(version='latest')
driver = webdriver.Firefox(executable_path=manager.install())
else:
raise UsupportedBrowserException(f'Usupported browser: "{browser}"')
driver.maximize_window()
driver.get(url)
yield driver
driver.close()
| [
"cherednichenko.ya@gmail.com"
] | cherednichenko.ya@gmail.com |
1c4a287283f0584e7a1b97de9d088876b897084d | e280eb99dcc23a512c7c1963c489a74dd2a52220 | /tests/__init__.py | 12f42d1035f7d7a3da724bc3f5d7f1260260c577 | [
"MIT"
] | permissive | Zwork101/Clamor | 63669967759624570779c581812686646004c846 | 13222b90532938e6ebdbe8aea0430512e7d22817 | refs/heads/master | 2020-06-23T03:02:02.803982 | 2019-07-21T08:37:14 | 2019-07-21T08:37:14 | 198,487,734 | 0 | 0 | MIT | 2019-07-23T18:32:35 | 2019-07-23T18:32:35 | null | UTF-8 | Python | false | false | 381 | py | # -*- coding: utf-8 -*-
import os
import sys
import unittest.runner
_dir = os.path.dirname(__file__)
def suite():
test_loader = unittest.TestLoader()
test_suite = test_loader.discover(_dir, 'test_*.py')
return test_suite
if __name__ == '__main__':
runner = unittest.TextTestRunner()
result = runner.run(suite())
sys.exit(not result.wasSuccessful())
| [
"valentin.be@protonmail.com"
] | valentin.be@protonmail.com |
aaad7a9ee22293811ac818e5834ec90629ab5074 | 6e9d9b9cf4726acf87974897d84ef6f64b651493 | /api_gitlab/gitlab.py | c0c6c762d871927ef91ac0267df6a3a498fa43d4 | [] | no_license | alalek/common-pullrequest-plugin | 072a6e1fe1f8f3390a2d67804c2b6aa8c44aaab5 | 04dba1c5dbbf4d69c2d0dda9d5aad4eaed7beed3 | refs/heads/master | 2021-01-22T21:13:23.727184 | 2015-10-01T22:06:58 | 2015-10-01T22:45:54 | 28,002,096 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,539 | py | #!/usr/bin/env python
'''
Client for GitLab API v3
'''
import json, urllib, urllib2
from twisted.web.client import Agent, readBody
from twisted.internet import defer, reactor
from twisted.web.http_headers import Headers
from twisted.web.iweb import IBodyProducer
from zope.interface.declarations import implements
TIMEOUT = 60
# Exception base class
class Error(Exception):
def __init__(self, url, request, response):
super(Error, self).__init__(url)
self.request = request
self.response = response
# 404 Exception
class ErrorNotFound(Error):
pass
class GitLab(object):
status = 0
x_ratelimit_remaining = -1
x_ratelimit_limit = -1
def __init__(self, apiURL, userAgent, private_token, async=False):
self._apiUrl = apiURL;
self.userAgent = userAgent
self._private_token = private_token
self._async = async
def _process(self, method, path, **kw):
# prepare HTTP request input parameters
url_params = None
http_body = None
if method == 'GET' and kw:
args = []
for key, value in kw.iteritems():
args.append('%s=%s' % (key, urllib.quote(str(value))))
url_params = '&'.join(args)
if method in ['POST', 'PATCH', 'PUT']:
http_body = json.dumps(kw)
url = '%s%s%s' % (self._apiUrl, path, '' if url_params is None else '?' + url_params)
def _parse_headers(self, headers):
isValid = False
for k in headers:
h = k.lower()
if h == 'status':
self.status = int(headers[k].split(' ')[0])
elif h == 'content-type':
isValid = headers[k].startswith('application/json')
return isValid
if not self._async:
# process synchronous call
request = urllib2.Request(url, data=http_body)
request.get_method = lambda: method
request.add_header('User-Agent', self.userAgent)
request.add_header('PRIVATE-TOKEN', self._private_token)
if method in ['POST', 'PATCH', 'PUT']:
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
try:
response = urllib2.build_opener(urllib2.HTTPHandler, urllib2.HTTPSHandler).open(request, timeout=TIMEOUT)
isValid = self._parse_headers(response.headers)
if isValid:
return json.loads(response.read())
except urllib2.HTTPError, e:
isValid = self._parse_headers(e.headers)
if isValid:
json_data = json.loads(e.read())
req = dict(method=method, url=url)
resp = dict(code=e.code, json=json_data)
if resp['code'] == 404:
raise ErrorNotFound(url, req, resp)
raise Error(url, req, resp)
else:
# process asynchronous calls (Twisted)
if method in ['GET', 'DELETE']:
@defer.inlineCallbacks
def asyncGet():
agent = Agent(reactor)
headers = {'User-Agent':[self.userAgent],
'PRIVATE-TOKEN':[self._private_token]}
response = yield agent.request(method, url, headers=Headers(headers))
self.status = response.code
resp_headers = {}
for k in response.headers._rawHeaders:
resp_headers[k] = response.headers._rawHeaders[k][0];
isValid = self._parse_headers(resp_headers)
if isValid:
body = yield readBody(response)
defer.returnValue(json.loads(body))
defer.returnValue(None)
return asyncGet()
if method in ['POST', 'PATCH', 'PUT']:
@defer.inlineCallbacks
def asyncPost():
agent = Agent(reactor)
headers = {'User-Agent':[self.userAgent],
'PRIVATE-TOKEN':[self._private_token]}
class StringProducer(object):
implements(IBodyProducer)
def __init__(self):
self.length = len(http_body)
def startProducing(self, consumer):
consumer.write(http_body)
return defer.succeed(None)
def stopProducing(self):
pass
def pauseProducing(self):
pass
def resumeProducing(self):
pass
response = yield agent.request(method, url, headers=Headers(headers), bodyProducer=StringProducer() if http_body else None)
resp_headers = {}
for k in response.headers._rawHeaders:
resp_headers[k] = response.headers._rawHeaders[k][0];
isValid = self._parse_headers(resp_headers)
if isValid:
body = yield readBody(response)
defer.returnValue(json.loads(body))
defer.returnValue(None)
return asyncPost()
'''
Helper classes for smart path processing
'''
def __getattr__(self, attr):
return self._Entry(self, '/%s' % attr)
class _EndPoint(object):
def __init__(self, client, path, method):
self._client = client
self._path = path
self._method = method
def __call__(self, **kw):
return self._client._process(self._method, self._path, **kw)
class _Entry(object):
def __init__(self, client, path):
self._client = client
self._path = path
def __getattr__(self, attr):
if attr in ['get', 'put', 'post', 'patch', 'delete']:
return self._client._EndPoint(self._client, self._path, attr.upper())
name = '%s/%s' % (self._path, attr)
return self._client._Entry(self._client, name)
def __call__(self, *args):
if len(args) == 0:
return self
name = '%s/%s' % (self._path, '/'.join([str(arg) for arg in args]))
return self._client._Entry(self._client, name)
| [
"alexander.alekhin@itseez.com"
] | alexander.alekhin@itseez.com |
678e6f4f6eccac9411530829287f031a5e9e7553 | 712eba52393391a408e816dab577ea898fac9033 | /fixkori_api/apps.py | bd678f6601f41033797e64c2f74e52db717fd535 | [
"MIT"
] | permissive | ShovanSarker/fixkori | 9076942423b36cdda1d600f58839741a6491ccee | 3b4415de28e774729dd84c16bc12385a1c9393e4 | refs/heads/master | 2020-05-07T17:56:32.473041 | 2019-08-01T15:30:40 | 2019-08-01T15:30:40 | 180,747,562 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 161 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps import AppConfig
class FixkoriApiConfig(AppConfig):
name = 'fixkori_api'
| [
"exorcist.shovan@gmail.com"
] | exorcist.shovan@gmail.com |
045b679f8ed8927925361cefafd32726512871b8 | 1387aeea3b4e224bb906348f6ec3ec1f2111e80e | /resources/2019-06-10/train.py | 3a8205d3c48a0cde0f3955632d75adc445d8ae93 | [] | no_license | openscoring/openscoring.github.io | 2594cbd68dbd7871e3df4e9b055242eb04b6d538 | 33b2b6269c1102b6dd9a21f7e2d4634c579346b7 | refs/heads/master | 2023-07-20T06:06:14.742950 | 2023-07-17T18:06:25 | 2023-07-17T18:06:25 | 17,081,773 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,174 | py | from sklearn_pandas import DataFrameMapper
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import LabelBinarizer
from sklearn2pmml.decoration import Alias, CategoricalDomain, ContinuousDomain
from sklearn2pmml.preprocessing import ExpressionTransformer
import pandas
df = pandas.read_csv("audit.csv")
cat_columns = ["Education", "Employment", "Marital", "Occupation"]
cont_columns = ["Age", "Hours", "Income"]
X = df[cat_columns + cont_columns]
y = df["Adjusted"]
mapper = DataFrameMapper(
[([cat_column], [CategoricalDomain(), LabelBinarizer()]) for cat_column in cat_columns] +
[(cont_columns, ContinuousDomain())] +
[(["Income", "Hours"], Alias(ExpressionTransformer("X[0] / (X[1] * 52.0)"), "Hourly_Income", prefit = True))]
)
feature_eng_pipeline = Pipeline([
("mapper", mapper)
])
Xt = feature_eng_pipeline.fit_transform(X)
Xt = Xt.astype(float)
from sklearn2pmml.tpot import make_pmml_config
from tpot.config import classifier_config_dict
# Classes supported by TPOT
tpot_config = classifier_config_dict
# Union between classes supported by TPOT and SkLearn2PMML
tpot_pmml_config = make_pmml_config(tpot_config)
# Exclude ensemble model types
tpot_pmml_config = { key: value for key, value in tpot_pmml_config.items() if not (key.startswith("sklearn.ensemble.") or key.startswith("xgboost.")) }
# Exclude some more undesirable elementary model types
del tpot_pmml_config["sklearn.neighbors.KNeighborsClassifier"]
from tpot import TPOTClassifier
classifier = TPOTClassifier(generations = 7, population_size = 11, scoring = "roc_auc", config_dict = tpot_pmml_config, random_state = 13, verbosity = 2)
classifier.fit(Xt, y)
tpot_pipeline = classifier.fitted_pipeline_
from sklearn2pmml import make_pmml_pipeline, sklearn2pmml
# Combine fitted sub-pipelines to a fitted pipeline
pipeline = Pipeline(feature_eng_pipeline.steps + tpot_pipeline.steps)
pmml_pipeline = make_pmml_pipeline(pipeline, active_fields = X.columns.values, target_fields = [y.name])
#pmml_pipeline.verify(X.sample(50, random_state = 13, replace = False), precision = 1e-11, zeroThreshold = 1e-11)
sklearn2pmml(pmml_pipeline, "TPOTAudit.pmml", with_repr = True) | [
"villu.ruusmann@gmail.com"
] | villu.ruusmann@gmail.com |
4d7fbb683f749be440f1e3f86814a797b247768e | 47fc606bcdfe5b563409386c94f745f920408851 | /src/python/twitter/common/python/marshaller.py | b5c29a06a99c6afbea083559b3636740c63a4085 | [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | ewhauser/commons | 2ef443c4f0be2fbbf1ff3226ed35058a7cc8254a | 0777b346cf1b32722b7b5f6ae9e6593fe185de22 | refs/heads/master | 2021-01-18T06:00:06.901691 | 2013-06-11T22:14:55 | 2013-06-11T22:14:55 | 1,741,747 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,032 | py | from imp import get_magic
import marshal
import struct
import time
from twitter.common.lang import Compatibility
class CodeTimestamp(object):
TIMESTAMP_RANGE = (4, 8)
@classmethod
def from_timestamp(timestamp):
return CodeTimestamp(timestamp)
@classmethod
def from_object(pyc_object):
stamp = time.localtime(
struct.unpack('I', pyc_object[slice(*CodeTimestamp.TIMESTAMP_RANGE)])[0])
return CodeTimestamp(stamp)
def __init__(self, stamp=time.time()):
self._stamp = stamp
def to_object(self):
return struct.pack('I', self._stamp)
class CodeMarshaller(object):
class InvalidCode(Exception): pass
MAGIC = struct.unpack('I', get_magic())[0]
MAGIC_RANGE = (0, 4)
TIMESTAMP_RANGE = (4, 8)
@staticmethod
def from_pyc(pyc):
if not isinstance(pyc, Compatibility.bytes) and not hasattr(pyc, 'read'):
raise CodeMarshaller.InvalidCode(
"CodeMarshaller.from_pyc expects a code or file-like object!")
if not isinstance(pyc, Compatibility.bytes):
pyc = pyc.read()
pyc_magic = struct.unpack('I', pyc[slice(*CodeMarshaller.MAGIC_RANGE)])[0]
if pyc_magic != CodeMarshaller.MAGIC:
raise CodeMarshaller.InvalidCode("Bad magic number! Got 0x%X" % pyc_magic)
stamp = time.localtime(struct.unpack('I', pyc[slice(*CodeMarshaller.TIMESTAMP_RANGE)])[0])
try:
code = marshal.loads(pyc[8:])
except ValueError as e:
raise CodeMarshaller.InvalidCode("Unmarshaling error! %s" % e)
return CodeMarshaller(code, stamp)
@staticmethod
def from_py(py, filename):
stamp = int(time.time())
code = compile(py, filename, 'exec')
return CodeMarshaller(code, stamp)
def __init__(self, code, stamp):
self._code = code
self._stamp = stamp
@property
def code(self):
return self._code
def to_pyc(self):
sio = Compatibility.BytesIO()
sio.write(struct.pack('I', CodeMarshaller.MAGIC))
sio.write(struct.pack('I', self._stamp))
sio.write(marshal.dumps(self._code))
return sio.getvalue()
| [
"jsirois@twitter.com"
] | jsirois@twitter.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.