blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9c6f8e2d69abda1d81242ce8afaa0e6977564abe | 32c062ee5c40182cfd1e9e8e72ff656646c1f07c | /relationships/models.py | 12ff96e1ee902e64da0b36cfbd46f140dd3bcdc3 | [
"MIT"
] | permissive | walison17/pulso-api | c2c312621822eab2decbc3e503f1105cf877c035 | b9edfc3f6042676dbdb50d7efcdb461a19ea90ed | refs/heads/master | 2021-09-15T13:43:21.059131 | 2018-06-03T01:45:33 | 2018-06-03T01:45:33 | 122,399,953 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 923 | py | from django.db import models
from django.conf import settings
from django.utils import timezone
from django.dispatch import Signal
from django.core.exceptions import ValidationError
class Follow(models.Model):
from_user = models.ForeignKey(
to=settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
related_name='%(class)s_followers',
)
to_user = models.ForeignKey(
to=settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
related_name='%(class)s_following',
)
created_at = models.DateTimeField(default=timezone.now)
class Meta:
unique_together = ('from_user', 'to_user')
def save(self, *args, **kwargs):
if self.from_user == self.to_user:
raise ValidationError('O usuário não pode seguir ele mesmo.')
super(Follow, self).save(*args, **kwargs)
new_follower = Signal(providing_args=['follower', 'followee'])
| [
"walisonfilipe@hotmail.com"
] | walisonfilipe@hotmail.com |
227366a6f8bf3422f20ad0997d757866923e26e8 | 1e999187c24790c15b4ff1b79c4ebe5a58318e97 | /CompStrDefns/Pymol/strands_3FYX.py | d540ca50285a7eb55c52844fce57fa3e55f1ac76 | [] | no_license | mwfranklin/OMBBNetwork | 34b542dbc3dc8a08073993053afb63156fde8a87 | ac4b5f175cbd9b37adf90761cc2754657a523cec | refs/heads/master | 2021-07-06T12:12:15.537319 | 2018-12-12T21:48:31 | 2018-12-12T21:48:31 | 132,608,629 | 0 | 1 | null | 2018-05-17T21:31:16 | 2018-05-08T12:52:52 | Makefile | UTF-8 | Python | false | false | 1,612 | py | from pymol import cmd, stored
cmd.load("/Users/meghan/Documents/PhD/GitProjects/v6_2018_Network/CompStrDefns/CompPDBs/3FYX.pdb")
cmd.hide("everything", "all")
cmd.color("wheat","all")
cmd.select("Astrand0", "resi 10-22 & chain A ")
cmd.color ("white", "Astrand0")
cmd.select("Astrand1", "resi 36-51 & chain A ")
cmd.color ("red", "Astrand1")
cmd.select("Astrand2", "resi 55-65 & chain A ")
cmd.color ("orange", "Astrand2")
cmd.select("Astrand3", "resi 80-90 & chain A ")
cmd.color ("purple", "Astrand3")
cmd.select("Astrand4", "resi 94-100 & chain A ")
cmd.color ("yellow", "Astrand4")
cmd.select("Astrand5", "resi 135-143 & chain A ")
cmd.color ("green", "Astrand5")
cmd.select("Astrand6", "resi 148-165 & chain A ")
cmd.color ("cyan", "Astrand6")
cmd.select("Astrand7", "resi 169-181 & chain A ")
cmd.color ("blue", "Astrand7")
cmd.select("Astrand8", "resi 184-195 & chain A ")
cmd.color ("purple", "Astrand8")
cmd.select("Astrand9", "resi 211-221 & chain A ")
cmd.color ("red", "Astrand9")
cmd.select("Astrand10", "resi 226-240 & chain A ")
cmd.color ("orange", "Astrand10")
cmd.select("Astrand11", "resi 249-263 & chain A ")
cmd.color ("yellow", "Astrand11")
cmd.select("Astrand12", "resi 270-281 & chain A ")
cmd.color ("green", "Astrand12")
cmd.select("Astrand13", "resi 289-303 & chain A ")
cmd.color ("cyan", "Astrand13")
cmd.select("Astrand14", "resi 306-318 & chain A ")
cmd.color ("blue", "Astrand14")
cmd.select("Astrand15", "resi 331-339 & chain A ")
cmd.color ("purple", "Astrand15")
cmd.select("barrel", "Astrand*")
cmd.show("cartoon", "barrel")
cmd.zoom("barrel")
| [
"mwfranklin@ku.edu"
] | mwfranklin@ku.edu |
92bf31f8af37bbdde36487465fdf2b7a6d04d5da | 55a4e29bffbe2534f0f56eb0a3b36e03417b1081 | /dlx/callbacks.py | 4fa7571f29f3d42e71f394c16abf8c2989273d0b | [] | no_license | lchmo444/dlx | 0c6285e6f84ba7fca96a2cabb7558838fc7a053a | d481be879d879f131a90fed6337a2f22608bcbef | refs/heads/master | 2020-03-19T06:29:18.978876 | 2016-04-03T16:49:42 | 2016-04-03T16:49:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,802 | py | '''
Almost same as keras.callbacks
'''
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
import time
import json
import warnings
from collections import deque
from dlx.util.generic_utils import Progbar
from dlx.util import keras_utils as K
class CallbackList(object):
def __init__(self, callbacks=[], queue_length=10):
self.callbacks = [c for c in callbacks]
self.queue_length = queue_length
def append(self, callback):
self.callbacks.append(callback)
def _set_params(self, params):
for callback in self.callbacks:
callback._set_params(params)
def _set_model(self, model):
for callback in self.callbacks:
callback._set_model(model)
def on_epoch_begin(self, epoch, logs={}):
for callback in self.callbacks:
callback.on_epoch_begin(epoch, logs)
self._delta_t_batch = 0.
self._delta_ts_batch_begin = deque([], maxlen=self.queue_length)
self._delta_ts_batch_end = deque([], maxlen=self.queue_length)
def on_epoch_end(self, epoch, logs={}):
for callback in self.callbacks:
callback.on_epoch_end(epoch, logs)
def on_batch_begin(self, batch, logs={}):
t_before_callbacks = time.time()
for callback in self.callbacks:
callback.on_batch_begin(batch, logs)
self._delta_ts_batch_begin.append(time.time() - t_before_callbacks)
delta_t_median = np.median(self._delta_ts_batch_begin)
if self._delta_t_batch > 0. and delta_t_median > 0.95 * \
self._delta_t_batch and delta_t_median > 0.1:
warnings.warn('Method on_batch_begin() is slow compared '
'to the batch update (%f). Check your callbacks.'
% delta_t_median)
self._t_enter_batch = time.time()
def on_batch_end(self, batch, logs={}):
if not hasattr(self, '_t_enter_batch'):
self._t_enter_batch = time.time()
self._delta_t_batch = time.time() - self._t_enter_batch
t_before_callbacks = time.time()
for callback in self.callbacks:
callback.on_batch_end(batch, logs)
self._delta_ts_batch_end.append(time.time() - t_before_callbacks)
delta_t_median = np.median(self._delta_ts_batch_end)
if self._delta_t_batch > 0. and delta_t_median > 0.95 * \
self._delta_t_batch and delta_t_median > 0.1:
warnings.warn('Method on_batch_end() is slow compared '
'to the batch update (%f). Check your callbacks.'
% delta_t_median)
def on_train_begin(self, logs={}):
for callback in self.callbacks:
callback.on_train_begin(logs)
def on_train_end(self, logs={}):
for callback in self.callbacks:
callback.on_train_end(logs)
class Callback(object):
'''Abstract base class used to build new callbacks.
# Properties
params: dict. Training parameters
(eg. verbosity, batch size, number of epochs...).
model: instance of `keras.models.Model`.
Reference of the model being trained.
The `logs` dictionary that callback methods
take as argument will contain keys for quantities relevant to
the current batch or epoch.
Currently, the `.fit()` method of the `Sequential` model class
will include the following quantities in the `logs` that
it passes to its callbacks:
on_epoch_end: logs optionally include `val_loss`
(if validation is enabled in `fit`), and `val_acc`
(if validation and accuracy monitoring are enabled).
on_batch_begin: logs include `size`,
the number of samples in the current batch.
on_batch_end: logs include `loss`, and optionally `acc`
(if accuracy monitoring is enabled).
'''
def __init__(self):
pass
def _set_params(self, params):
self.params = params
def _set_model(self, model):
self.model = model
def on_epoch_begin(self, epoch, logs={}):
pass
def on_epoch_end(self, epoch, logs={}):
pass
def on_batch_begin(self, batch, logs={}):
pass
def on_batch_end(self, batch, logs={}):
pass
def on_train_begin(self, logs={}):
pass
def on_train_end(self, logs={}):
pass
class BaseLogger(Callback):
'''Callback that prints events to the standard output.
This callback is automatically applied to
every Keras model (it is the basis of the verbosity modes
in models).
'''
def on_train_begin(self, logs={}):
self.verbose = self.params['verbose']
self.nb_epoch = self.params['nb_epoch']
def on_epoch_begin(self, epoch, logs={}):
if self.verbose:
print('Epoch %d/%d' % (epoch + 1, self.nb_epoch))
self.progbar = Progbar(target=self.params['nb_sample'],
verbose=self.verbose)
self.seen = 0
self.totals = {}
def on_batch_begin(self, batch, logs={}):
if self.seen < self.params['nb_sample']:
self.log_values = []
def on_batch_end(self, batch, logs={}):
batch_size = logs.get('size', 0)
self.seen += batch_size
for k, v in logs.items():
if k in self.totals:
self.totals[k] += v * batch_size
else:
self.totals[k] = v * batch_size
for k in self.params['metrics']:
if k in logs:
self.log_values.append((k, logs[k]))
# skip progbar update for the last batch;
# will be handled by on_epoch_end
if self.verbose and self.seen < self.params['nb_sample']:
self.progbar.update(self.seen, self.log_values)
def on_epoch_end(self, epoch, logs={}):
for k in self.params['metrics']:
if k in self.totals:
self.log_values.append((k, self.totals[k] / self.seen))
if k in logs:
self.log_values.append((k, logs[k]))
if self.verbose:
self.progbar.update(self.seen, self.log_values)
class History(Callback):
'''Callback that records events
into a `History` object.
This callback is automatically applied to
every Keras model. The `History` object
gets returned by the `fit` method of models.
'''
def on_train_begin(self, logs={}):
self.epoch = []
self.history = {}
def on_epoch_begin(self, epoch, logs={}):
self.seen = 0
self.totals = {}
def on_batch_end(self, batch, logs={}):
batch_size = logs.get('size', 0)
self.seen += batch_size
for k, v in logs.items():
if k in self.totals:
self.totals[k] += v * batch_size
else:
self.totals[k] = v * batch_size
def on_epoch_end(self, epoch, logs={}):
self.epoch.append(epoch)
for k, v in self.totals.items():
if k not in self.history:
self.history[k] = []
self.history[k].append(v / self.seen)
for k, v in logs.items():
if k not in self.history:
self.history[k] = []
self.history[k].append(v)
class ModelCheckpoint(Callback):
'''Save the model after every epoch.
`filepath` can contain named formatting options,
which will be filled the value of `epoch` and
keys in `logs` (passed in `on_epoch_end`).
For example: if `filepath` is `weights.{epoch:02d}-{val_loss:.2f}.hdf5`,
then multiple files will be save with the epoch number and
the validation loss.
# Arguments
filepath: string, path to save the model file.
monitor: quantity to monitor.
verbose: verbosity mode, 0 or 1.
save_best_only: if `save_best_only=True`,
the latest best model according to
the validation loss will not be overwritten.
mode: one of {auto, min, max}.
If `save_best_only=True`, the decision
to overwrite the current save file is made
based on either the maximization or the
minization of the monitored. For `val_acc`,
this should be `max`, for `val_loss` this should
be `min`, etc. In `auto` mode, the direction is
automatically inferred from the name of the monitored quantity.
'''
def __init__(self, filepath, monitor='val_loss', verbose=0,
save_best_only=False, mode='auto'):
super(Callback, self).__init__()
self.monitor = monitor
self.verbose = verbose
self.filepath = filepath
self.save_best_only = save_best_only
if mode not in ['auto', 'min', 'max']:
warnings.warn('ModelCheckpoint mode %s is unknown, '
'fallback to auto mode.' % (self.mode),
RuntimeWarning)
mode = 'auto'
if mode == 'min':
self.monitor_op = np.less
self.best = np.Inf
elif mode == 'max':
self.monitor_op = np.greater
self.best = -np.Inf
else:
if 'acc' in self.monitor:
self.monitor_op = np.greater
self.best = -np.Inf
else:
self.monitor_op = np.less
self.best = np.Inf
def on_epoch_end(self, epoch, logs={}):
filepath = self.filepath.format(epoch=epoch, **logs)
if self.save_best_only:
current = logs.get(self.monitor)
if current is None:
warnings.warn('Can save best model only with %s available, '
'skipping.' % (self.monitor), RuntimeWarning)
else:
if self.monitor_op(current, self.best):
if self.verbose > 0:
print('Epoch %05d: %s improved from %0.5f to %0.5f,'
' saving model to %s'
% (epoch, self.monitor, self.best,
current, filepath))
self.best = current
self.model.save_weights(filepath, overwrite=True)
else:
if self.verbose > 0:
print('Epoch %05d: %s did not improve' %
(epoch, self.monitor))
else:
if self.verbose > 0:
print('Epoch %05d: saving model to %s' % (epoch, filepath))
self.model.save_weights(filepath, overwrite=True)
class EarlyStopping(Callback):
'''Stop training when a monitored quantity has stopped improving.
# Arguments
monitor: quantity to be monitored.
patience: number of epochs with no improvement
after which training will be stopped.
verbose: verbosity mode.
mode: one of {auto, min, max}. In 'min' mode,
training will stop when the quantity
monitored has stopped decreasing; in 'max'
mode it will stop when the quantity
monitored has stopped increasing.
'''
def __init__(self, monitor='val_loss', patience=0, verbose=0, mode='auto'):
super(Callback, self).__init__()
self.monitor = monitor
self.patience = patience
self.verbose = verbose
self.wait = 0
if mode not in ['auto', 'min', 'max']:
warnings.warn('EarlyStopping mode %s is unknown, '
'fallback to auto mode.' % (self.mode), RuntimeWarning)
mode = 'auto'
if mode == 'min':
self.monitor_op = np.less
self.best = np.Inf
elif mode == 'max':
self.monitor_op = np.greater
self.best = -np.Inf
else:
if 'acc' in self.monitor:
self.monitor_op = np.greater
self.best = -np.Inf
else:
self.monitor_op = np.less
self.best = np.Inf
def on_epoch_end(self, epoch, logs={}):
current = logs.get(self.monitor)
if current is None:
warnings.warn('Early stopping requires %s available!' %
(self.monitor), RuntimeWarning)
if self.monitor_op(current, self.best):
self.best = current
self.wait = 0
else:
if self.wait >= self.patience:
if self.verbose > 0:
print('Epoch %05d: early stopping' % (epoch))
self.model.stop_training = True
self.wait += 1
class RemoteMonitor(Callback):
'''Callback used to stream events to a server.
Requires the `requests` library.
# Arguments
root: root url to which the events will be sent (at the end
of every epoch). Events are sent to
`root + '/publish/epoch/end/'`. Calls are HTTP POST,
with a `data` argument which is a JSON-encoded dictionary
of event data.
'''
def __init__(self, root='http://localhost:9000'):
self.root = root
def on_epoch_begin(self, epoch, logs={}):
self.seen = 0
self.totals = {}
def on_batch_end(self, batch, logs={}):
batch_size = logs.get('size', 0)
self.seen += batch_size
for k, v in logs.items():
if k in self.totals:
self.totals[k] += v * batch_size
else:
self.totals[k] = v * batch_size
def on_epoch_end(self, epoch, logs={}):
import requests
send = {}
send['epoch'] = epoch
for k, v in self.totals.items():
send[k] = v / self.seen
for k, v in logs.items():
send[k] = v
try:
requests.post(self.root + '/publish/epoch/end/',
{'data': json.dumps(send)})
except:
print('Warning: could not reach RemoteMonitor '
'root server at ' + str(self.root))
class LearningRateScheduler(Callback):
'''Learning rate scheduler.
# Arguments
schedule: a function that takes an epoch index as input
(integer, indexed from 0) and returns a new
learning rate as output (float).
'''
def __init__(self, schedule):
super(LearningRateScheduler, self).__init__()
self.schedule = schedule
def on_epoch_begin(self, epoch, logs={}):
assert hasattr(self.model.optimizer, 'lr'), \
'Optimizer must have a "lr" attribute.'
lr = self.schedule(epoch)
assert type(lr) == float, 'The output of the "schedule" function should be float.'
K.set_value(self.model.optimizer.lr, lr)
| [
"lxastro0@163.com"
] | lxastro0@163.com |
d4d26bed4686fa46e885b091bb925475e2c91f1a | 4050c190f4495a81d5b93a16c45804986fc90b98 | /plugins/sdm/test/atest-sdm-euler-shocktube-2d.py | ac3101b598b3bf317d9f6639a12b01f139de643c | [] | no_license | vivianbolsee/coolfluid3 | 5490ce042c9441691b36c5e358cd6a0b557e8672 | 6ce39dee83861f0508622b003394324205dc51f4 | refs/heads/master | 2020-12-24T17:35:55.337089 | 2012-05-24T07:35:51 | 2012-05-24T07:35:51 | 3,425,557 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,760 | py | import sys
import coolfluid
# The cf root component
root = coolfluid.Core.root()
env = coolfluid.Core.environment()
### Logging configuration
env.options().configure_option('assertion_backtrace', True)
env.options().configure_option('exception_backtrace', True)
env.options().configure_option('regist_signal_handlers', True)
env.options().configure_option('exception_log_level', 10)
env.options().configure_option('log_level', 3)
env.options().configure_option('exception_outputs', True)
############################
# Create simulation
############################
model = root.create_component('shocktube_2d','cf3.solver.ModelUnsteady');
time = model.create_time()
solver = model.create_solver('cf3.sdm.SDSolver')
physics = model.create_physics('cf3.physics.NavierStokes.NavierStokes2D')
domain = model.create_domain()
###### Following generates a square mesh
mesh = domain.create_component('mesh','cf3.mesh.Mesh')
mesh_generator = domain.create_component("mesh_generator","cf3.mesh.SimpleMeshGenerator")
mesh_generator.options().configure_option("mesh",mesh.uri())
mesh_generator.options().configure_option("nb_cells",[20,20])
mesh_generator.options().configure_option("lengths",[10,10])
mesh_generator.options().configure_option("offsets",[-5,-5])
mesh_generator.execute()
load_balance = mesh_generator.create_component("load_balancer","cf3.mesh.actions.LoadBalance")
load_balance.options().configure_option("mesh",mesh)
load_balance.execute()
#####
### Configure physics
physics.options().configure_option('gamma',1.4)
physics.options().configure_option('R',287.05)
### Configure solver
solver.options().configure_option('time',time)
solver.options().configure_option('mesh',mesh)
solver.options().configure_option('solution_vars','cf3.physics.NavierStokes.Cons2D')
solver.options().configure_option('solution_order',3)
solver.options().configure_option('iterative_solver','cf3.sdm.RungeKuttaLowStorage2')
### Configure timestepping
time.options().configure_option('time_step',1.);
time.options().configure_option('end_time',0.008);
solver.access_component('TimeStepping').options().configure_option('cfl','0.2');
solver.access_component('TimeStepping/IterativeSolver').options().configure_option('nb_stages',3)
### Prepare the mesh for Spectral Difference (build faces and fields etc...)
solver.get_child('PrepareMesh').execute()
### Set the initial condition
solver.get_child('InitialConditions').create_initial_condition( name = 'shocktube')
functions = [
'r_L:=4.696; r_R:=1.408; u_L:=0; u_R:=0; v_L:=0; v_R:=0; p_L:=404400; p_R:=101100; g:=1.4; if(x<=0 & y<=0,r_L,r_R)',
'r_L:=4.696; r_R:=1.408; u_L:=0; u_R:=0; v_L:=0; v_R:=0; p_L:=404400; p_R:=101100; g:=1.4; if(x<=0 & y<=0,r_L*u_L,r_R*u_R)',
'r_L:=4.696; r_R:=1.408; u_L:=0; u_R:=0; v_L:=0; v_R:=0; p_L:=404400; p_R:=101100; g:=1.4; if(x<=0 & y<=0,r_L*v_L,r_R*v_R)',
'r_L:=4.696; r_R:=1.408; u_L:=0; u_R:=0; v_L:=0; v_R:=0; p_L:=404400; p_R:=101100; g:=1.4; if(x<=0 & y<=0,p_L/(g-1)+0.5*r_L*(u_L*u_L+v_L*v_L),p_R/(g-1)+0.5*r_R*(u_R*u_R*v_R*v_R))'
]
solver.get_child('InitialConditions').get_child('shocktube').options().configure_option("functions",functions)
solver.get_child('InitialConditions').execute();
### Create convection term
convection = solver.get_child('DomainDiscretization').create_term(name = 'convection', type = 'cf3.sdm.navierstokes.Convection2D')
nullbc = solver.get_child('BoundaryConditions').create_boundary_condition(name= 'nullbc', type = 'cf3.sdm.BCNull',
regions=[
mesh.access_component('topology/left').uri(),
mesh.access_component('topology/right').uri(),
mesh.access_component('topology/top').uri(),
mesh.access_component('topology/bottom').uri()
])
#######################################
# SIMULATE
#######################################
model.simulate()
########################
# OUTPUT
########################
fields = [
mesh.access_component("solution_space/solution").uri(),
mesh.access_component("solution_space/wave_speed").uri(),
mesh.access_component("solution_space/residual").uri()
]
# tecplot
#########
tec_writer = model.get_child('tools').create_component("writer","cf3.mesh.tecplot.Writer")
tec_writer.options().configure_option("mesh",mesh)
tec_writer.options().configure_option("fields",fields)
tec_writer.options().configure_option("cell_centred",False)
tec_writer.options().configure_option("file",coolfluid.URI("file:sdm_output.plt"))
tec_writer.execute()
# gmsh
######
gmsh_writer = model.create_component("writer","cf3.mesh.gmsh.Writer")
gmsh_writer.options().configure_option("mesh",mesh)
gmsh_writer.options().configure_option("fields",fields)
gmsh_writer.options().configure_option("file",coolfluid.URI("file:sdm_output.msh"))
gmsh_writer.options().configure_option("enable_surfaces",False)
gmsh_writer.execute()
| [
"wdeconinck@me.com"
] | wdeconinck@me.com |
fc6eb7af5e22855c1fe1d3122f3587bfa6402bdf | 30c9a7247c3633c4b025c7370bbc30cf2a55ee5c | /API/parsers/publications.py | 9e745f01519d950484888b7baee9ddcec0ed39e3 | [] | no_license | DaryaRu/Publication_flask | 765b2089de8bbf04b22ff8bffc1940849108558e | 7dc24a8a16d68c0e0724d31ca62e063be6e9633c | refs/heads/main | 2023-08-10T17:12:58.987750 | 2021-09-12T09:14:38 | 2021-09-12T09:14:38 | 400,288,858 | 2 | 0 | null | 2021-09-12T09:14:39 | 2021-08-26T19:51:13 | Python | UTF-8 | Python | false | false | 852 | py | from flask_restful import reqparse
publication_create_parser = reqparse.RequestParser()
publication_create_parser.add_argument("title", type=str, required=True,
help="This field cannot be left blank")
publication_create_parser.add_argument("content", type=str, required=True,
help="This field cannot be left blank")
publication_create_parser.add_argument("rubric_id", type=int, required=True,
help="Every item needs a rubric id")
publication_list_parser = reqparse.RequestParser()
publication_list_parser.add_argument("page", type=int)
publication_list_parser.add_argument("page_size", type=int)
publication_list_parser.add_argument("rubric_id", type=int)
publication_list_parser.add_argument("title", type=str)
publication_list_parser.add_argument("content", type=str)
| [
"89119484+DaryaRu@users.noreply.github.com"
] | 89119484+DaryaRu@users.noreply.github.com |
c9515e9fd438a2014aabbf49a0b6da9b8cecba33 | 66bdb73805475a522d09c5c19bb437e7064bee00 | /gambling/test.py | 7b42b2680e811fcbb056c74b3fa5bbdb1ef58d6b | [] | no_license | redrockhorse/python | 199799d6e0d2ebcf285e19338668ad57fc238641 | 202e9a0234808d099824f6fbb3689e1bf09d467f | refs/heads/master | 2021-06-02T21:33:30.852520 | 2021-04-22T02:06:35 | 2021-04-22T02:06:35 | 112,588,915 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 546 | py | __author__ = 'mahone'
import numpy as np
def getNameByOneHotCode(oneHot,dictArray):
oneHotArray = np.array(oneHot)
_positon =np.argmax(oneHotArray)
name = dictArray[_positon]
return name
if __name__ == '__main__':
oneHot = [0,0,1,0,0,0,0,0,0]
dict ={}
dict['a']=0
dict['b']=1
dict['c']=2
dict['d']=3
dict['e']=4
dict['f']=5
dict['g']=6
dict['h']=7
dictArray=['a','b','c','d','e','f','g','h','f']
name = getNameByOneHotCode(oneHot,dictArray)
print(name) | [
"noreply@github.com"
] | redrockhorse.noreply@github.com |
923357819defeee2cd4a820e2e5270c48bfef22f | 73b5701ff20719a70f62be81ff73df4c03ca86a7 | /src/models/inception_resnet_v1_expression.py | 2b90cff1ce8e059207576d2457531601e0be55f8 | [
"Apache-2.0",
"MIT"
] | permissive | hengxyz/FaceLiveNet | 32ef05419c68f98b7413afbfbc2146c00929eb4f | 74e249cd2217155b1c5524fd0e55573ae4327f4c | refs/heads/master | 2020-09-15T07:39:02.659209 | 2020-03-18T10:35:26 | 2020-03-18T10:35:26 | 240,273,629 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,090 | py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the definition of the Inception Resnet V1 architecture.
As described in http://arxiv.org/abs/1602.07261.
Inception-v4, Inception-ResNet and the Impact of Residual Connections
on Learning
Christian Szegedy, Sergey Ioffe, Vincent Vanhoucke, Alex Alemi
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import tensorflow.contrib.slim as slim
# Inception-Resnet-A
def block35(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
"""Builds the 35x35 resnet block."""
with tf.variable_scope(scope, 'Block35', [net], reuse=reuse):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 32, 1, scope='Conv2d_1x1')
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 32, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 32, 3, scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
tower_conv2_0 = slim.conv2d(net, 32, 1, scope='Conv2d_0a_1x1')
tower_conv2_1 = slim.conv2d(tower_conv2_0, 32, 3, scope='Conv2d_0b_3x3')
tower_conv2_2 = slim.conv2d(tower_conv2_1, 32, 3, scope='Conv2d_0c_3x3')
mixed = tf.concat([tower_conv, tower_conv1_1, tower_conv2_2], 3)
up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None,
activation_fn=None, scope='Conv2d_1x1')
net += scale * up
if activation_fn:
net = activation_fn(net)
return net
# Inception-Resnet-B
def block17(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
"""Builds the 17x17 resnet block."""
with tf.variable_scope(scope, 'Block17', [net], reuse=reuse):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 128, 1, scope='Conv2d_1x1')
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 128, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 128, [1, 7],
scope='Conv2d_0b_1x7')
tower_conv1_2 = slim.conv2d(tower_conv1_1, 128, [7, 1],
scope='Conv2d_0c_7x1')
mixed = tf.concat([tower_conv, tower_conv1_2], 3)
up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None,
activation_fn=None, scope='Conv2d_1x1')
net += scale * up
if activation_fn:
net = activation_fn(net)
return net
# Inception-Resnet-C
def block8(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
"""Builds the 8x8 resnet block."""
with tf.variable_scope(scope, 'Block8', [net], reuse=reuse):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 192, 1, scope='Conv2d_1x1')
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 192, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 192, [1, 3],
scope='Conv2d_0b_1x3')
tower_conv1_2 = slim.conv2d(tower_conv1_1, 192, [3, 1],
scope='Conv2d_0c_3x1')
mixed = tf.concat([tower_conv, tower_conv1_2], 3)
up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None,
activation_fn=None, scope='Conv2d_1x1')
net += scale * up
if activation_fn:
net = activation_fn(net)
return net
def reduction_a(net, k, l, m, n):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, n, 3, stride=2, padding='VALID',
scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, k, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, l, 3,
scope='Conv2d_0b_3x3')
tower_conv1_2 = slim.conv2d(tower_conv1_1, m, 3,
stride=2, padding='VALID',
scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_2'):
tower_pool = slim.max_pool2d(net, 3, stride=2, padding='VALID',
scope='MaxPool_1a_3x3')
net = tf.concat([tower_conv, tower_conv1_2, tower_pool], 3)
return net
def reduction_b(net):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1')
tower_conv_1 = slim.conv2d(tower_conv, 384, 3, stride=2,
padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
tower_conv1 = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1, 256, 3, stride=2,
padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_2'):
tower_conv2 = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1')
tower_conv2_1 = slim.conv2d(tower_conv2, 256, 3,
scope='Conv2d_0b_3x3')
tower_conv2_2 = slim.conv2d(tower_conv2_1, 256, 3, stride=2,
padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_3'):
tower_pool = slim.max_pool2d(net, 3, stride=2, padding='VALID',
scope='MaxPool_1a_3x3')
net = tf.concat([tower_conv_1, tower_conv1_1,
tower_conv2_2, tower_pool], 3)
return net
def inference(images, keep_probability, phase_train=True,
bottleneck_layer_size=512, weight_decay=0.0, reuse=None):
batch_norm_params = {
# Decay for the moving averages.
'decay': 0.995,
# epsilon to prevent 0s in variance.
'epsilon': 0.001,
# force in-place updates of mean and variance estimates
'updates_collections': None,
# Moving averages ends up in the trainable variables collection
'variables_collections': [ tf.GraphKeys.TRAINABLE_VARIABLES ],
}
with slim.arg_scope([slim.conv2d, slim.fully_connected],
weights_initializer=slim.initializers.xavier_initializer(),
weights_regularizer=slim.l2_regularizer(weight_decay),
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params):
return inception_resnet_v1(images, is_training=phase_train,
dropout_keep_prob=keep_probability, bottleneck_layer_size=bottleneck_layer_size, reuse=reuse)
def inception_resnet_v1(inputs, is_training=True,
dropout_keep_prob=0.8,
bottleneck_layer_size=512,
reuse=None,
scope='InceptionResnetV1'):
"""Creates the Inception Resnet V1 model.
Args:
inputs: a 4-D tensor of size [batch_size, height, width, 3].
num_classes: number of predicted classes.
is_training: whether is training or not.
dropout_keep_prob: float, the fraction to keep before final layer.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
Returns:
logits: the logits outputs of the model.
end_points: the set of end_points from the inception model.
"""
end_points = {}
with tf.variable_scope(scope, 'InceptionResnetV1', [inputs], reuse=reuse):
with slim.arg_scope([slim.batch_norm, slim.dropout],
is_training=is_training):
with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
stride=1, padding='SAME'):
# 149 x 149 x 32
net = slim.conv2d(inputs, 32, 3, stride=2, padding='VALID',
scope='Conv2d_1a_3x3')
end_points['Conv2d_1a_3x3'] = net
# 147 x 147 x 32
net = slim.conv2d(net, 32, 3, padding='VALID',
scope='Conv2d_2a_3x3')
end_points['Conv2d_2a_3x3'] = net
# 147 x 147 x 64
net = slim.conv2d(net, 64, 3, scope='Conv2d_2b_3x3')
end_points['Conv2d_2b_3x3'] = net
# 73 x 73 x 64
net = slim.max_pool2d(net, 3, stride=2, padding='VALID',
scope='MaxPool_3a_3x3')
end_points['MaxPool_3a_3x3'] = net
# 73 x 73 x 80
net = slim.conv2d(net, 80, 1, padding='VALID',
scope='Conv2d_3b_1x1')
end_points['Conv2d_3b_1x1'] = net
# 71 x 71 x 192
net = slim.conv2d(net, 192, 3, padding='VALID',
scope='Conv2d_4a_3x3')
end_points['Conv2d_4a_3x3'] = net
# 35 x 35 x 256
net = slim.conv2d(net, 256, 3, stride=2, padding='VALID',
scope='Conv2d_4b_3x3')
end_points['Conv2d_4b_3x3'] = net
# 5 x Inception-resnet-A
net = slim.repeat(net, 5, block35, scale=0.17)
end_points['Mixed_5a'] = net
# Reduction-A
with tf.variable_scope('Mixed_6a'):
net = reduction_a(net, 192, 192, 256, 384)
end_points['Mixed_6a'] = net
# 10 x Inception-Resnet-B
net = slim.repeat(net, 10, block17, scale=0.10)
end_points['Mixed_6b'] = net
# Reduction-B
with tf.variable_scope('Mixed_7a'):
net = reduction_b(net)
end_points['Mixed_7a'] = net
# 5 x Inception-Resnet-C
net = slim.repeat(net, 5, block8, scale=0.20)
end_points['Mixed_8a'] = net
net = block8(net, activation_fn=None)
end_points['Mixed_8b'] = net
with tf.variable_scope('Logits'):
end_points['PrePool'] = net
#pylint: disable=no-member
net = slim.avg_pool2d(net, net.get_shape()[1:3], padding='VALID',
scope='AvgPool_1a_8x8')
net = slim.flatten(net)
net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
scope='Dropout')
end_points['PreLogitsFlatten'] = net
net = slim.fully_connected(net, bottleneck_layer_size, activation_fn=None,
scope='Bottleneck', reuse=False)
return net, end_points
def inference_expression(net, keep_probability, phase_train=True, weight_decay=0.0, reuse=None):
batch_norm_params = {
# Decay for the moving averages.
'decay': 0.995,
# epsilon to prevent 0s in variance.
'epsilon': 0.001,
# force in-place updates of mean and variance estimates
'updates_collections': None,
}
with slim.arg_scope([slim.conv2d],
#weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
weights_initializer=tf.contrib.layers.xavier_initializer(),##mzh Xavier initialization
weights_regularizer=slim.l2_regularizer(weight_decay),
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params):
# return inception_resnet_v1_expression(images, is_training=phase_train,
# dropout_keep_prob=keep_probability, reuse=reuse)
return inception_resnet_v1_expression_plus(net, is_training=phase_train,
dropout_keep_prob=keep_probability, reuse=reuse)
def inception_resnet_v1_expression_plus(net, is_training=True,
dropout_keep_prob=1.0,
reuse=None,
scope='InceptionResnetV1_expression'):
"""Creates the Inception Resnet V1 model.
Args:
inputs: a 4-D tensor of size [batch_size, height, width, 3].
num_classes: number of predicted classes.
is_training: whether is training or not.
dropout_keep_prob: float, the fraction to keep before final layer.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
Returns:
logits: the logits outputs of the model.
end_points: the set of end_points from the inception model.
"""
end_points = {}
with tf.variable_scope(scope, 'InceptionResnetV1_expression', [net], reuse=reuse):
with slim.arg_scope([slim.batch_norm, slim.dropout],
is_training=is_training):
with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
stride=1, padding='SAME'):
# # 149 x 149 x 32
# net = slim.conv2d(inputs, 32, 3, stride=2, padding='VALID',
# scope='Conv2d_1a_3x3')
# end_points['Conv2d_1a_3x3'] = net
# # 147 x 147 x 32
# net = slim.conv2d(net, 32, 3, padding='VALID',
# scope='Conv2d_2a_3x3')
# end_points['Conv2d_2a_3x3'] = net
# # 147 x 147 x 64
# net = slim.conv2d(net, 64, 3, scope='Conv2d_2b_3x3')
# end_points['Conv2d_2b_3x3'] = net
# # 73 x 73 x 64
# net = slim.max_pool2d(net, 3, stride=2, padding='VALID',
# scope='MaxPool_3a_3x3')
# end_points['MaxPool_3a_3x3'] = net
# # 73 x 73 x 80
# net = slim.conv2d(net, 80, 1, padding='VALID',
# scope='Conv2d_3b_1x1')
# end_points['Conv2d_3b_1x1'] = net
# # 71 x 71 x 192
# net = slim.conv2d(net, 192, 3, padding='VALID',
# scope='Conv2d_4a_3x3')
# end_points['Conv2d_4a_3x3'] = net
# # 35 x 35 x 256
# net = slim.conv2d(net, 256, 3, stride=2, padding='VALID',
# scope='Conv2d_4b_3x3')
# end_points['Conv2d_4b_3x3'] = net
#
# # 5 x Inception-resnet-A
# net = slim.repeat(net, 5, block35, scale=0.17)
# end_points['Mixed_5a'] = net
#
# # Reduction-A
# with tf.variable_scope('Mixed_6a'):
# net = reduction_a(net, 192, 192, 256, 384)
# end_points['Mixed_6a'] = net
#
# # 10 x Inception-Resnet-B
# net = slim.repeat(net, 1, block17, scale=0.10)
# end_points['Mixed_6.5a'] = net
# Reduction-B
# with tf.variable_scope('Mixed_7a'):
# net = reduction_b(net)
# end_points['Mixed_7a'] = net
# 5 x Inception-Resnet-C
net = slim.repeat(net, 1, block8, scale=0.20)
# end_points['Mixed_8a'] = net
net = block8(net, activation_fn=None)
with tf.variable_scope('Logits'):
end_points['PrePool'] = net
# pylint: disable=no-member
net = slim.avg_pool2d(net, net.get_shape()[1:3], padding='VALID',
scope='AvgPool_1a_8x8')
net = slim.flatten(net)
net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
scope='Dropout')
end_points['PreLogitsFlatten'] = net
return net, end_points | [
"mingzuheng@hotmail.com"
] | mingzuheng@hotmail.com |
2836f7639ad2855cedc48cac08000e57bbc2334d | 633eda99e8a4dd00a5549b6a63f5880dabaa0957 | /math/372_OO_Super_Pow.py | 431cdc3fb52d39f05b8bf2fe9764c2e73bd6bcea | [] | no_license | jamiezeminzhang/Leetcode_Python | 1256e9097a482ab4a80363ddef8828986bbf9bde | 56383c2b07448a9017a7a707afb66e08b403ee76 | refs/heads/master | 2020-04-05T14:08:30.336545 | 2016-09-06T01:07:34 | 2016-09-06T01:07:34 | 53,762,214 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 674 | py | # -*- coding: utf-8 -*-
"""
Created on Thursday July 14 07:07:11 2016
372. Super Pow
Total Accepted: 2171
Total Submissions: 7216
Difficulty: Medium
Your task is to calculate ab mod 1337 where a is a positive integer and b is an extremely large positive integer given in the form of an array.
Example1:
a = 2
b = [3]
Result: 8
Example2:
a = 2
b = [1,0]
Result: 1024
@author: zzhang
"""
class Solution(object):
def superPow(self, a, b):
"""
:type a: int
:type b: List[int]
:rtype: int
"""
res = 1
for po in b:
res = pow(res, 10, 1337) * pow(a, po, 1337) % 1337
return res
| [
"zeminzhang@Zemins-MacBook-Pro.local"
] | zeminzhang@Zemins-MacBook-Pro.local |
2d6e891ed412613ebeb598d8acfe68f6ad0f5eb8 | 15bac409f66ff1800140aa11eafee7c3686a67b4 | /core/migrations/0004_auto_20151011_0300.py | 8fac34dc903ac54d80ffac25be2919e62868a112 | [] | no_license | oasisvali/hwcentral | 4a0297d6e623523dd7a651413556ca44a24ed7ea | b09a3f7a587d072d0f1b06e852f3d8f3929ddc66 | refs/heads/master | 2021-01-18T01:48:06.073429 | 2017-01-26T06:10:43 | 2017-01-26T06:10:43 | 36,690,510 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 967 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('core', '0003_auto_20150922_0003'),
]
operations = [
migrations.AddField(
model_name='announcement',
name='announcer',
field=models.ForeignKey(default=1, to=settings.AUTH_USER_MODEL,
help_text=b'The user who made this announcement'),
preserve_default=False,
),
migrations.AlterField(
model_name='question',
name='school',
field=models.ForeignKey(
help_text=b'The school question bank that this question belongs to. Use 1 if it belongs to the hwcentral question bank.',
to='core.School'),
),
]
| [
"oasis.vali@gmail.com"
] | oasis.vali@gmail.com |
dc94995061a88c795f93deb5719820a9c7d233f6 | 9dba277eeb0d5e9d2ac75e2e17ab5b5eda100612 | /exercises/1901020002/1001S02E05_string.py | 2ef785d1bf8d2423628b42ff569e92038180dac4 | [] | no_license | shen-huang/selfteaching-python-camp | e8410bfc06eca24ee2866c5d890fd063e9d4be89 | 459f90c9f09bd3a3df9e776fc64dfd64ac65f976 | refs/heads/master | 2022-05-02T05:39:08.932008 | 2022-03-17T07:56:30 | 2022-03-17T07:56:30 | 201,287,222 | 9 | 6 | null | 2019-08-08T15:34:26 | 2019-08-08T15:34:25 | null | UTF-8 | Python | false | false | 1,395 | py | sample_text = '''
The Zen of Python, by Tim Peters
Beautiful is better than ugly.
Explicit is better than implicit.
Simple is better than complex.
Complex is better than complicated.
Flat is better than nested.
Sparse is better than dense.
Readability counts.
Special cases aren't special enough to break the rules.
Although practicality beats purity.
Errors should never pass silently.
Unless explicitly silenced.
In the face of ambxiguity, refuse the temptation to guess.
There should be one-- and preferably only one --obvious way to do it.
Although that way may not be obvious at first unless you're Dutch.
Now is better than never.
Although never is often better than *right* now.
If the implementation is hard to explain, it's a bad idea.
If the implementation is easy to explain, it may be a good idea.
Namespaces are one honking great idea -- let's do more of those!
'''
#1.2 better替换worse
test = sample_text.replace('better','worse')
print('better全部替换成worse',test)
#1.3 剔除包含ea的单词
words = test.split()
filtered = []
for word in words:
if word.find('ea') < 0:
filtered.append(word)
print('剔除包含ea的单词',filtered)
#1.4 大小写翻转
swapcased = [i.swapcase() for i in filtered]
print('大小写翻转',swapcased)
#1.5 升序排列
print('升序排列',sorted(swapcased))
print('降序',sorted(swapcased,reverse=True)) | [
"40155646+seven-tears@users.noreply.github.com"
] | 40155646+seven-tears@users.noreply.github.com |
eb2c25dd135444e820ad0a35b754ba6743b5fe49 | 4fab1eb1f623785efd492805f7e12b60bc3cec5e | /venv/bin/lsm2bin | 5496b0787574dfbc77875cdfd9aaf86334091e40 | [] | no_license | royrajchamp/AttendanceProject | c1bb0840326b60ff922eb58be0ef5cfa469f21db | ea0458072fc87e355b42b966534c61d1974a419e | refs/heads/master | 2023-02-11T14:28:34.132463 | 2021-01-04T06:00:09 | 2021-01-04T06:00:09 | 326,584,449 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 264 | #!/Users/royrajchamp/Desktop/Python/FaceRecognition/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from tifffile.lsm2bin import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"royrajchamp@gmail.com"
] | royrajchamp@gmail.com | |
945755e73c4c8fe1438bc352cd5a0861918ad25a | c14d9512c62fc479ba05ea5ed256828e8e1038c5 | /stripe/models/account.py | eaecab1ba97d6ff6408961f48b09a5193aa3c01d | [
"MIT"
] | permissive | jayvdb/saaskit-stripe | c44e6e387d4dd27f564f6959c134ec6aaff8f3c5 | bd292182b0bed47dff86a627231bdabafb99bf71 | refs/heads/master | 2021-09-07T17:25:14.710472 | 2018-01-24T15:17:41 | 2018-02-26T21:10:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,082 | py | # -*- coding: utf-8 -*-
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django_extensions.db.fields import json
from ..utils import UnixDateTimeField
from .charge import CURRENCY_CHOICES
ACCOUNT_TYPES = (
('custom', _('Custom')),
('standard', _('Standard')),
)
class Account(models.Model):
"""Stripe Account object.
This is an object representing your Stripe account. You can retrieve it to
see properties on the account like its current e-mail address or if the
account is enabled yet to make live charges.
Some properties, marked as 'managed accounts only', are only available to
platforms who want to create and manage Stripe accounts.
"""
id = models.CharField(max_length=255, primary_key=True)
charges_enabled = models.BooleanField(
help_text=_(
'Whether or not the account can create live charges',
),
)
country = models.CharField( # todo: add CHOICES
max_length=255,
help_text=_('The country of the account')
)
currencies_supports = json.JSONField(
help_text=_(
'The currencies this account can submit when creating charges',
),
)
default_currency = models.CharField(
max_length=255, help_text=_(
'The currency this account has chosen to use as the default'),
choices=CURRENCY_CHOICES)
details_submitted = models.BooleanField(
help_text=_(
'Whether or not account details have been submitted yet. '
'Standalone accounts cannot receive transfers before this is true.',
),
)
transfers_enabled = models.BooleanField(
help_text=_(
'Whether or not Stripe will send automatic transfers for this '
'account. This is only false when Stripe is waiting for '
'additional information from the account holder.',
),
default=True,
)
display_name = models.CharField(
max_length=255,
help_text=_(
'The display name for this account. This is used on the Stripe '
'dashboard to help you differentiate between accounts.',
),
)
email = models.EmailField(help_text=_('The primary user’s email address'))
statement_descriptor = models.TextField(
help_text=_(
'The text that will appear on credit card statements',
),
)
timezone = models.CharField(
max_length=255,
help_text=_(
'The timezone used in the Stripe dashboard for this account. A '
'list of possible timezone values is maintained at the IANA '
'Timezone Database.',
),
)
business_name = models.CharField(
max_length=255,
help_text=_(
'The publicly visible name of the business',
),
)
business_logo = models.CharField(max_length=255, null=True)
business_url = models.URLField(
help_text=_('The publicly visible website of the business'),
null=True,
)
created = UnixDateTimeField()
metadata = json.JSONField(
help_text=_(
'A set of key/value pairs that you can attach to a charge object. '
'it can be useful for storing additional information about the '
'charge in a structured format.',
),
)
support_email = models.EmailField(null=True)
support_phone = models.CharField(
max_length=255,
help_text=_(
'The publicly visible support phone number for the business',
),
null=True,
)
payout_schedule = json.JSONField(null=True)
payout_statement_descriptor = models.CharField(max_length=255, null=True)
payouts_enabled = models.BooleanField()
bank_accounts = json.JSONField(
help_text=_(
'(Managed Accounts Only) '
'Bank accounts currently attached to this account.',
),
)
debit_negative_balances = models.BooleanField(
help_text=_(
'(Managed Accounts Only) '
'Whether or not Stripe will attempt to reclaim negative account '
'balances from this account’s bank account.',
),
)
decline_charge_on = json.JSONField(
help_text=_(
'(Managed Accounts Only) '
'Account-level settings to automatically decline certain types of '
'charges regardless of the bank’s decision.',
),
)
legal_entity = json.JSONField(
help_text=_(
'(Managed Accounts Only) '
'Information regarding the owner of this account, including '
'verification status.',
),
)
product_description = models.TextField(
help_text=_(
'(Managed Accounts Only) '
'An internal-only description of the product or service provided. '
'This is used by Stripe in the event the account gets flagged for '
'potential fraud.',
),
null=True,
)
tos_acceptance = json.JSONField(
help_text=_(
'(Managed Accounts Only) '
'Who accepted the Stripe terms of service, and when they accepted '
'it.',
),
)
transfer_schedule = json.JSONField(
help_text=_(
'(Managed Accounts Only) '
'When payments collected will be automatically paid out to the '
'account holder’s bank account',
),
)
type = models.CharField(max_length=255, choices=ACCOUNT_TYPES)
verification = json.JSONField(
help_text=_(
'(Managed Accounts Only) '
'That state of the account’s information requests, including what '
'information is needed and by when it must be provided.',
),
)
@classmethod
def from_stripe_object(cls, stripe_object):
_dict = stripe_object.to_dict()
_dict.pop('object')
_dict.pop('external_accounts') # todo: handle this
a = cls(**_dict)
a.save()
return a
| [
"tony@git-pull.com"
] | tony@git-pull.com |
4421c4879fc4ff83bf45f2fad89ca621e1a7a27e | 117dfcb837a6627f9d92a5cdb710ff4157091d3e | /clouds/utils.py | a0ee843d9b060a61e613bdcf279181dc4af00228 | [] | no_license | martinperezmaneiro/clouds | 945d360c17b09b2952b1951a47d88d93b585a21a | 740cafc903eda66149bc730df28b4d8fb4412bac | refs/heads/main | 2023-06-29T05:08:50.801204 | 2021-07-30T15:33:03 | 2021-07-30T15:33:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,496 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 17 11:24:54 2021
@author: hernando
"""
import numpy as np
#
#--- Utilities
#
def ut_fscale(values, a = 0, b = 1):
values = values.astype(int) if values.dtype == bool else values
xmin, xmax = np.min(values), np.max(values)
def _fun(val):
scale = (val - xmin)/(xmax - xmin)
return scale
return _fun
def ut_scale(values, a = 0, b = 1):
values = values.astype(int) if values.dtype == bool else values
xmin, xmax = np.min(values), np.max(values)
scale = (values - xmin)/(xmax - xmin)
return scale
def ut_steps(bins):
steps = np.array([ibin[1] - ibin[0] for ibin in bins])
return steps
def ut_centers(xs : np.array) -> np.array:
return 0.5* ( xs[1: ] + xs[: -1])
def ut_mesh(bins):
centers = [ut_centers(ibin) for ibin in bins]
# indexing 'ij', ensures access via coordinates x[i, j, k]
xmesh = np.meshgrid(*centers, indexing = 'ij')
return xmesh
def arstep(x, step, delta = False):
delta = step/2 if delta else 0.
return np.arange(np.min(x) - delta, np.max(x) + step + delta, step)
def to_coors(vs):
ndim = len(vs[0])
xs = [np.array([vi[i] for vi in vs]) for i in range(ndim)]
return xs
def ut_sort(values, ids, reverse = True):
vals_ = sorted(zip(values, ids), reverse = reverse)
vals = np.array([v[0] for v in vals_])
kids = np.array([v[1] for v in vals_])
return vals, kids | [
"hernando.joseangel@gmail.com"
] | hernando.joseangel@gmail.com |
58a513a543317eae70adb8d81445905b24667182 | feccf7588777becba68921c0bfade3e21f5210ce | /tests/www/views/test_views_base.py | 10eb3d5ea4b1ed3d2c841e64340bc37eb739d51a | [
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] | permissive | francescomucio/airflow | f17ed9abd8b41d8a2227deca052508edf12f1cbf | c199b1a10563a11cf24436e38cb167ae82c01601 | refs/heads/master | 2023-04-14T17:44:53.438246 | 2023-04-06T06:44:23 | 2023-04-06T06:44:23 | 217,327,641 | 0 | 0 | Apache-2.0 | 2020-09-09T13:26:47 | 2019-10-24T15:06:52 | Python | UTF-8 | Python | false | false | 14,809 | py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import datetime
import json
import pytest
from airflow.jobs.base_job import BaseJob
from airflow.utils import timezone
from airflow.utils.session import create_session
from airflow.www import app as application
from tests.test_utils.asserts import assert_queries_count
from tests.test_utils.config import conf_vars
from tests.test_utils.www import check_content_in_response, check_content_not_in_response
def test_index_redirect(admin_client):
resp = admin_client.get("/")
assert resp.status_code == 302
assert "/home" in resp.headers.get("Location")
resp = admin_client.get("/", follow_redirects=True)
check_content_in_response("DAGs", resp)
def test_homepage_query_count(admin_client):
with assert_queries_count(17):
resp = admin_client.get("/home")
check_content_in_response("DAGs", resp)
def test_doc_urls(admin_client, monkeypatch):
# Mocking this way is tying the test closer to the implementation much more than I'd like. :shrug:
from airflow.www.views import AirflowBaseView
monkeypatch.setitem(AirflowBaseView.extra_args, "get_docs_url", lambda _: "!!DOCS_URL!!")
resp = admin_client.get("/", follow_redirects=True)
check_content_in_response("!!DOCS_URL!!", resp)
check_content_in_response("/api/v1/ui", resp)
@pytest.fixture()
def heartbeat_healthy():
# case-1: healthy scheduler status
last_heartbeat = timezone.utcnow()
job = BaseJob(
job_type="SchedulerJob",
state="running",
latest_heartbeat=last_heartbeat,
)
with create_session() as session:
session.add(job)
yield "healthy", last_heartbeat.isoformat()
with create_session() as session:
session.query(BaseJob).filter(
BaseJob.job_type == "SchedulerJob",
BaseJob.state == "running",
BaseJob.latest_heartbeat == last_heartbeat,
).delete()
@pytest.fixture()
def heartbeat_too_slow():
# case-2: unhealthy scheduler status - scenario 1 (SchedulerJob is running too slowly)
last_heartbeat = timezone.utcnow() - datetime.timedelta(minutes=1)
job = BaseJob(
job_type="SchedulerJob",
state="running",
latest_heartbeat=last_heartbeat,
)
with create_session() as session:
session.query(BaseJob).filter(
BaseJob.job_type == "SchedulerJob",
).update({"latest_heartbeat": last_heartbeat - datetime.timedelta(seconds=1)})
session.add(job)
yield "unhealthy", last_heartbeat.isoformat()
with create_session() as session:
session.query(BaseJob).filter(
BaseJob.job_type == "SchedulerJob",
BaseJob.state == "running",
BaseJob.latest_heartbeat == last_heartbeat,
).delete()
@pytest.fixture()
def heartbeat_not_running():
# case-3: unhealthy scheduler status - scenario 2 (no running SchedulerJob)
with create_session() as session:
session.query(BaseJob).filter(
BaseJob.job_type == "SchedulerJob",
BaseJob.state == "running",
).delete()
yield "unhealthy", None
@pytest.mark.parametrize(
"heartbeat",
["heartbeat_healthy", "heartbeat_too_slow", "heartbeat_not_running"],
)
def test_health(request, admin_client, heartbeat):
# Load the corresponding fixture by name.
scheduler_status, last_scheduler_heartbeat = request.getfixturevalue(heartbeat)
resp = admin_client.get("health", follow_redirects=True)
resp_json = json.loads(resp.data.decode("utf-8"))
assert "healthy" == resp_json["metadatabase"]["status"]
assert scheduler_status == resp_json["scheduler"]["status"]
assert last_scheduler_heartbeat == resp_json["scheduler"]["latest_scheduler_heartbeat"]
def test_users_list(admin_client):
resp = admin_client.get("users/list", follow_redirects=True)
check_content_in_response("List Users", resp)
@pytest.mark.parametrize(
"path, body_content",
[("roles/list", "List Roles"), ("roles/show/1", "Show Role")],
)
def test_roles_read(admin_client, path, body_content):
resp = admin_client.get(path, follow_redirects=True)
check_content_in_response(body_content, resp)
def test_roles_read_unauthorized(viewer_client):
resp = viewer_client.get("roles/list", follow_redirects=True)
check_content_in_response("Access is Denied", resp)
@pytest.fixture(scope="module")
def delete_role_if_exists(app):
def func(role_name):
if app.appbuilder.sm.find_role(role_name):
app.appbuilder.sm.delete_role(role_name)
return func
@pytest.fixture()
def non_exist_role_name(delete_role_if_exists):
role_name = "test_roles_create_role"
delete_role_if_exists(role_name)
yield role_name
delete_role_if_exists(role_name)
@pytest.fixture()
def exist_role_name(app, delete_role_if_exists):
role_name = "test_roles_create_role_new"
app.appbuilder.sm.add_role(role_name)
yield role_name
delete_role_if_exists(role_name)
@pytest.fixture()
def exist_role(app, exist_role_name):
return app.appbuilder.sm.find_role(exist_role_name)
def test_roles_create(app, admin_client, non_exist_role_name):
admin_client.post("roles/add", data={"name": non_exist_role_name}, follow_redirects=True)
assert app.appbuilder.sm.find_role(non_exist_role_name) is not None
def test_roles_create_unauthorized(app, viewer_client, non_exist_role_name):
resp = viewer_client.post("roles/add", data={"name": non_exist_role_name}, follow_redirects=True)
check_content_in_response("Access is Denied", resp)
assert app.appbuilder.sm.find_role(non_exist_role_name) is None
def test_roles_edit(app, admin_client, non_exist_role_name, exist_role):
admin_client.post(
f"roles/edit/{exist_role.id}", data={"name": non_exist_role_name}, follow_redirects=True
)
updated_role = app.appbuilder.sm.find_role(non_exist_role_name)
assert exist_role.id == updated_role.id
def test_roles_edit_unauthorized(app, viewer_client, non_exist_role_name, exist_role_name, exist_role):
resp = viewer_client.post(
f"roles/edit/{exist_role.id}", data={"name": non_exist_role_name}, follow_redirects=True
)
check_content_in_response("Access is Denied", resp)
assert app.appbuilder.sm.find_role(exist_role_name)
assert app.appbuilder.sm.find_role(non_exist_role_name) is None
def test_roles_delete(app, admin_client, exist_role_name, exist_role):
admin_client.post(f"roles/delete/{exist_role.id}", follow_redirects=True)
assert app.appbuilder.sm.find_role(exist_role_name) is None
def test_roles_delete_unauthorized(app, viewer_client, exist_role, exist_role_name):
resp = viewer_client.post(f"roles/delete/{exist_role.id}", follow_redirects=True)
check_content_in_response("Access is Denied", resp)
assert app.appbuilder.sm.find_role(exist_role_name)
@pytest.mark.parametrize(
"url, client, content",
[
("userstatschartview/chart/", "admin_client", "User Statistics"),
("userstatschartview/chart/", "viewer_client", "Access is Denied"),
("actions/list", "admin_client", "List Actions"),
("actions/list", "viewer_client", "Access is Denied"),
("resources/list/", "admin_client", "List Resources"),
("resources/list/", "viewer_client", "Access is Denied"),
("permissions/list/", "admin_client", "List Permissions"),
("permissions/list/", "viewer_client", "Access is Denied"),
("resetpassword/form?pk=1", "admin_client", "Reset Password Form"),
("resetpassword/form?pk=1", "viewer_client", "Access is Denied"),
("users/list", "admin_client", "List Users"),
("users/list", "viewer_client", "Access is Denied"),
],
ids=[
"userstatschertview-admin",
"userstatschertview-viewer",
"actions-admin",
"actions-viewer",
"resources-admin",
"resources-viewer",
"permissions-admin",
"permissions-viewer",
"resetpassword-admin",
"resetpassword-viewer",
"users-admin",
"users-viewer",
],
)
def test_views_get(request, url, client, content):
resp = request.getfixturevalue(client).get(url, follow_redirects=True)
check_content_in_response(content, resp)
def _check_task_stats_json(resp):
return set(list(resp.json.items())[0][1][0].keys()) == {"state", "count"}
@pytest.mark.parametrize(
"url, check_response",
[
("blocked", None),
("dag_stats", None),
("task_stats", _check_task_stats_json),
],
)
def test_views_post(admin_client, url, check_response):
resp = admin_client.post(url, follow_redirects=True)
assert resp.status_code == 200
if check_response:
assert check_response(resp)
@pytest.mark.parametrize(
"url, client, content, username",
[
("resetmypassword/form", "viewer_client", "Password Changed", "test_viewer"),
("resetpassword/form?pk={}", "admin_client", "Password Changed", "test_admin"),
("resetpassword/form?pk={}", "viewer_client", "Access is Denied", "test_viewer"),
],
ids=["my-viewer", "pk-admin", "pk-viewer"],
)
def test_resetmypasswordview_edit(app, request, url, client, content, username):
user = app.appbuilder.sm.find_user(username)
resp = request.getfixturevalue(client).post(
url.format(user.id), data={"password": "blah", "conf_password": "blah"}, follow_redirects=True
)
check_content_in_response(content, resp)
def test_resetmypasswordview_read(viewer_client):
# Tests with viewer as all roles should have access.
resp = viewer_client.get("resetmypassword/form", follow_redirects=True)
check_content_in_response("Reset Password Form", resp)
def test_get_myuserinfo(admin_client):
resp = admin_client.get("users/userinfo/", follow_redirects=True)
check_content_in_response("Your user information", resp)
def test_edit_myuserinfo(admin_client):
resp = admin_client.post(
"userinfoeditview/form",
data={"first_name": "new_first_name", "last_name": "new_last_name"},
follow_redirects=True,
)
check_content_in_response("User information changed", resp)
@pytest.mark.parametrize(
"url",
["users/add", "users/edit/1", "users/delete/1"],
ids=["add-user", "edit-user", "delete-user"],
)
def test_views_post_access_denied(viewer_client, url):
resp = viewer_client.get(url, follow_redirects=True)
check_content_in_response("Access is Denied", resp)
@pytest.fixture()
def non_exist_username(app):
username = "fake_username"
user = app.appbuilder.sm.find_user(username)
if user is not None:
app.appbuilder.sm.del_register_user(user)
yield username
user = app.appbuilder.sm.find_user(username)
if user is not None:
app.appbuilder.sm.del_register_user(user)
def test_create_user(app, admin_client, non_exist_username):
resp = admin_client.post(
"users/add",
data={
"first_name": "fake_first_name",
"last_name": "fake_last_name",
"username": non_exist_username,
"email": "fake_email@email.com",
"roles": [1],
"password": "test",
"conf_password": "test",
},
follow_redirects=True,
)
check_content_in_response("Added Row", resp)
assert app.appbuilder.sm.find_user(non_exist_username)
@pytest.fixture()
def exist_username(app, exist_role):
username = "test_edit_user_user"
app.appbuilder.sm.add_user(
username,
"first_name",
"last_name",
"email@email.com",
exist_role,
password="password",
)
yield username
if app.appbuilder.sm.find_user(username):
app.appbuilder.sm.del_register_user(username)
def test_edit_user(app, admin_client, exist_username):
user = app.appbuilder.sm.find_user(exist_username)
resp = admin_client.post(
f"users/edit/{user.id}",
data={"first_name": "new_first_name"},
follow_redirects=True,
)
check_content_in_response("new_first_name", resp)
def test_delete_user(app, admin_client, exist_username):
user = app.appbuilder.sm.find_user(exist_username)
resp = admin_client.post(
f"users/delete/{user.id}",
follow_redirects=True,
)
check_content_in_response("Deleted Row", resp)
@conf_vars({("webserver", "show_recent_stats_for_completed_runs"): "False"})
def test_task_stats_only_noncompleted(admin_client):
resp = admin_client.post("task_stats", follow_redirects=True)
assert resp.status_code == 200
@conf_vars({("webserver", "instance_name"): "Site Title Test"})
def test_page_instance_name(admin_client):
resp = admin_client.get("home", follow_redirects=True)
check_content_in_response("Site Title Test", resp)
def test_page_instance_name_xss_prevention(admin_client):
xss_string = "<script>alert('Give me your credit card number')</script>"
with conf_vars({("webserver", "instance_name"): xss_string}):
resp = admin_client.get("home", follow_redirects=True)
escaped_xss_string = "<script>alert('Give me your credit card number')</script>"
check_content_in_response(escaped_xss_string, resp)
check_content_not_in_response(xss_string, resp)
instance_name_with_markup_conf = {
("webserver", "instance_name"): "<b>Bold Site Title Test</b>",
("webserver", "instance_name_has_markup"): "True",
}
@conf_vars(instance_name_with_markup_conf)
def test_page_instance_name_with_markup(admin_client):
resp = admin_client.get("home", follow_redirects=True)
check_content_in_response("<b>Bold Site Title Test</b>", resp)
check_content_not_in_response("<b>Bold Site Title Test</b>", resp)
@conf_vars(instance_name_with_markup_conf)
def test_page_instance_name_with_markup_title():
appbuilder = application.create_app(testing=True).appbuilder
assert appbuilder.app_name == "Bold Site Title Test"
| [
"noreply@github.com"
] | francescomucio.noreply@github.com |
6b0d3aaa5d1324146b6e7560b6d9b74264c597cb | b41f2312465eaeb8a1cc408236ff39e7e653e674 | /Python/ParaPro/ParaInt.py | 164c5e78a6d67478ae189dbc395da5d7e6c18e1a | [] | no_license | tlfhmy/Homework | 6ae06c2b5ae9deba5172b6080724dc515b04b42a | 3a4edd0fcc2d57e9c14da1a1f6931a32547073c2 | refs/heads/master | 2020-03-31T14:52:27.515375 | 2019-06-06T16:36:33 | 2019-06-06T16:36:33 | 152,313,042 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,722 | py | from threading import Thread
from time import sleep
from math import sin,cos,pi,exp,sqrt,log
xmin = -5
xmax = 5
def function(x):
return exp(-x**2/2+4*x)
def find_range():
ymin = function(xmin)
ymax = function(xmin)
step = (xmax - xmin) / 5000
for i in range(0,5000):
fva = function(xmin + i*step)
if fva <= ymin:
ymin = fva
if fva >= ymax:
ymax = fva
return max(abs(ymin), abs(ymax))
ymax = find_range()
ymin = -ymax
def trancor(a,b,c,d):
return ((c-d)/(a-b), (a*d-b*c)/(a-b))
points = []
N = 1000
wait_time = 0.01
def dwait(wt):
if wt < 1e-8:
return
else:
sleep(wt)
for i in range(0,N):
xi = xmin+(xmax - xmin)/N*i
yi = function(xi)
points.append((xi, yi))
integral = 0.0
x_coordgap = (xmax - xmin) / 20.
y_coordgap = (ymax - ymin) / 20.
def part1():
for i in range(0,N//4):
if i < N-2:
global integral
integral += (xmax-xmin)/N*points[i][1]
print("Part1")
def part2():
for i in range(N//4,N//2):
if i < N-2:
global integral
integral += (xmax-xmin)/N*points[i][1]
print("Part2")
def part3():
for i in range(N//2,N*3//4):
if i < N-2:
global integral
integral += (xmax-xmin)/N*points[i][1]
print("Part3")
def part4():
for i in range(N*3//4,N):
if i < N-2:
global integral
integral += (xmax-xmin)/N*points[i][1]
print("Part4")
t1 = Thread(target=part1)
t2 = Thread(target=part2)
t3 = Thread(target=part3)
t4 = Thread(target=part4)
t1.start()
t2.start()
t3.start()
t4.start()
print(integral) | [
"41378536+tlfhmy@users.noreply.github.com"
] | 41378536+tlfhmy@users.noreply.github.com |
bb6c101d284be09a0ff2ae31f695ab7b0d58c063 | eb45e482180bcfd56a4288f5d8c2d087a2c799ee | /Prueba Menu/Nave3.py | d9a43ad29bc30cf47f6f5bdc5f59bde2fa92dfc6 | [] | no_license | rodriepsilon/ProyectoIE0117 | 40ed0383b8dafa5b69f0e7163171d47a7cff298f | 0a211057cc8137dac1061660ae1fb74afecf28a8 | refs/heads/master | 2023-01-23T06:46:29.203313 | 2020-12-12T16:24:38 | 2020-12-12T16:24:38 | 320,868,423 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,559 | py | import pygame
import Proyectil
class naveEspacial(pygame.sprite.Sprite):
def __init__(self, ancho, alto):
pygame.sprite.Sprite.__init__(self)
self.ImagenNave = pygame.image.load("Imagenes/nave3.png")
self.ImagenExplosion = pygame.image.load("Imagenes/explosion.jpg")
self.rect = self.ImagenNave.get_rect()
self.rect.centerx = ancho/2
self.rect.centery = alto - 30
self.listaDisparo = []
self.Vida = True
self.velocidad = 20
self.sonidoDisparo = pygame.mixer.Sound("Sonidos/Disparo1.wav") #Definir ruta, tiene que ser .wav
self.sonidoExplosion = pygame.mixer.Sound("Sonidos/Disparo2.wav")
def movimientoDerecha(self):
self.rect.right += self.velocidad
self.__movimiento()
def movimientoIzquierda(self):
self.rect.left -= self.velocidad
self.__movimiento()
def __movimiento(self):
if self.Vida == True:
if self.rect.left <= 0:
self.rect.left = 0
elif self.rect.right > 870:
self.rect.right = 840
def disparar(self,x,y):
miProyectil = Proyectil.Proyectil(x,y, "Imagenes/disparoa.jpg", True)
self.listaDisparo.append(miProyectil)
self.sonidoDisparo.play()
def destruccion(self):
self.sonidoExplosion.play()
self.Vida = False
self.velocidad = 0
self.ImagenNave = self.ImagenExplosion
def dibujar(self, superficie):
superficie.blit(self.ImagenNave,self.rect)
| [
"rodriepsilon@gmail.com"
] | rodriepsilon@gmail.com |
af4c1960bda9e1828b335560c1264d8c34a317b7 | e09cf4c8b8e52b4bdd9cd502b4ae4851b7834425 | /VayaSiteTool/SJPControl.py | a00990bed650a120b5159e9c47c7276028f38b69 | [] | no_license | simonhdl/PythonProject | 4bf999adee9f9c0c913d34a3449df8658f10e2cc | 64017dbc1f050a46e58d0800fae458fe3e4802c9 | refs/heads/master | 2016-09-06T06:55:47.526588 | 2015-06-02T02:09:10 | 2015-06-02T02:09:10 | 27,421,510 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24,154 | py | import os
import serial
import time
import PyQt4, PyQt4.QtGui, PyQt4.QtCore, sys
from serial.tools import list_ports
from PyQt4 import QtCore, QtGui
# Define Global Variable
SJP_SerialNumber = [0x00, 0x00, 0x00, 0x00, 0x00, 0x00] # Store the connected Smart Jack Pro Serial number
FixtureDiscovered = [] # Store the Fixture SN which have been discover
FixtureSN = []
SearchRange = [[0x000000000000, 0xFFFFFFFFFFFF]] # Initial Search Range
global ser
global SJP_SN
global ComPort
def serial_ports(self):
"""
Returns a generator for all available serial ports
"""
if os.name == 'nt':
# windows
for i in range(256):
try:
s = serial.Serial(i)
s.close()
yield 'COM' + str(i + 1)
except serial.SerialException:
pass
else:
# unix
for port in list_ports.comports():
yield port[0]
def Send_Get_SerialNumber() :
"""
Send Command to SJP to get the SJP serial number
"""
Payload = []
SJPSendPacket(0x0A, Payload )
def Receive_Get_SerialNumber():
global SJP_SerialNumber
global SJP_SN
try:
s=ser.read(50)
except serial.SerialException:
pass
if s=="" or len(list(s))<6: #invalid data
return 0
S = []
SJP_SerialNumber = [0x00, 0x00] # SJP return 4bytes, but UID contain 6 bytes, so we add 2 bytes ahead
SJP_SN = ""
for c in s:
S.append(ord(c))
for i in range(7,3,-1):
if S[i]<10:
A = str(hex(S[i]))
SJP_SN += ('0'+A[2:3])
else:
A = str(hex(S[i]))
SJP_SN += A[2:4]
SJP_SerialNumber.append(S[i])
return SJP_SN
def Open_SJP ():
"""
Search each available com port for SJP, if SN read back positive, then connect to this com port as SJP.
"""
global ser
global SJP_SN
global ComPort
if os.name == 'nt':
# windows
for i in range(256):
try:
ser = serial.Serial(i, timeout = 0.05)
ComPort = i
Send_Get_SerialNumber()
if(Receive_Get_SerialNumber()):
return 1
ser.close()
#print 'COM' + str(i + 1)
except serial.SerialException:
pass
else:
# unix
for port in list_ports.comports():
print port[0]
return 0
def Close_SJP():
"""
Close com port which attached to SJP
"""
global ser
ser.close()
def FastConnect():
"""
Debug use. For un-handle connection exception
"""
global ComPort
global ser
try:
ser.close()
time.sleep(0.5)
ser = serial.Serial(ComPort, timeout = 0.05)
except:
pass
def Send_DMX_Packet (payload):
"""
Send normal DMX512 packet to SJP
"""
DMXPacket = [0x00]
DMXPacket.extend(payload)
SJPSendPacket(0x06, DMXPacket)
def SJPSendPacket (type, payload):
"""
Construct a Smart Jack pro packet and send out.
"""
SJPPacket = []
global ser
SJPPacketHeader = 0x7E
SJPPacketType = type
SJPPacketLenLSB = (len(payload))&0xFF
SJPPacketLenMSB = (len(payload)>>8)&0xFF
SJPPacketEnder = 0xE7
SJPPacket.append(SJPPacketHeader)
SJPPacket.append(SJPPacketType)
SJPPacket.append(SJPPacketLenLSB)
SJPPacket.append(SJPPacketLenMSB)
if(len(payload)>0):
SJPPacket += payload
SJPPacket.append(SJPPacketEnder)
#UART Send out the packet
time.sleep(0.02) # need delay!! dont know why
try:
ser.write(SJPPacket)
except NameError:
pass
#print SJPPacket
def Send_Unmute_Command ():
"""
This is a BROADCAST command that every fixture on the bus will act. but not reply.
"""
Payload = []
CheckSum = 0
RDMStartCode = [0xCC] # RDM Start Code 0xCC
RDMMessage1 = [0x01, 0x18] # RDM packet length 24
RDMDestination_ID = [0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF] # Destination UID, it is a broadcast command
RDMSource_ID = SJP_SerialNumber # Source UID
RDMMessage2 = [0x00, 0x01, 0x00, 0x00, 0x00, 0x10, 0x00, 0x03] # RDM message
RDMMessage3 = [0x00]
RDMChecksum = []
RDMPacket = RDMStartCode + RDMMessage1 + RDMDestination_ID + RDMSource_ID + RDMMessage2 + RDMMessage3
RDMChecksum = RDMCheckSumCalculate(RDMPacket)
Payload = RDMPacket + RDMChecksum
SJPSendPacket(0x07, Payload )
def Send_Mute_Command (TargetUID):
"""
Input parameter: Tartget UID that will be muted.
After mute, the fixture will not respond to any RDM command until reset.
"""
Payload = []
CheckSum = 0
RDMStartCode = [0xCC] # RDM Start Code 0xCC
RDMMessage1 = [0x01, 0x18] # RDM packet length 24
RDMDestination_ID = TargetUID
RDMSource_ID = SJP_SerialNumber # Source UID
RDMMessage2 = [0x00, 0x01, 0x00, 0x00, 0x00, 0x10, 0x00, 0x02] # RDM message
RDMMessage3 = [0x00]
RDMChecksum = []
RDMPacket = RDMStartCode + RDMMessage1 + RDMDestination_ID + RDMSource_ID + RDMMessage2 + RDMMessage3
RDMChecksum = RDMCheckSumCalculate(RDMPacket)
Payload = RDMPacket + RDMChecksum
SJPSendPacket(0x07, Payload )
def Send_Discovery_Command (LowBond, UpBond):
"""
Send Discovery Command between the given address range.
The fixtures between this range will respond.
"""
Payload = []
CheckSum = 0
RDMStartCode = [0xCC] # RDM Start Code 0xCC
RDMMessage1 = [0x01, 0x24] # RDM packet length 24
RDMDestination_ID = [0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF]
RDMSource_ID = SJP_SerialNumber # Source UID
RDMMessage2 = [0x01, 0x01, 0x00, 0x00, 0x00, 0x10, 0x00, 0x01]
LBHex = [(LowBond>>40&0xFF), (LowBond>>32&0xFF), (LowBond>>24&0xFF), (LowBond>>16&0xFF), (LowBond>>8&0xFF), (LowBond&0xFF)]
UBHex = [(UpBond>>40&0xFF), (UpBond>>32&0xFF), (UpBond>>24&0xFF), (UpBond>>16&0xFF), (UpBond>>8&0xFF), (UpBond&0xFF)]
RDMMessage3 = [0x0C] + LBHex + UBHex
RDMChecksum = []
RDMPacket = RDMStartCode + RDMMessage1 + RDMDestination_ID + RDMSource_ID + RDMMessage2 + RDMMessage3
RDMChecksum = RDMCheckSumCalculate(RDMPacket)
Payload = RDMPacket + RDMChecksum
SJPSendPacket(0x0B, Payload )
def Handle_DiscoverPacket():
"""
Analyze the receive packet to check if it is a valid discover respond packet.
If valid, then add the fixture SN into discover fixture list.
"""
global FixtureDiscovered
global ser
try:
s=ser.read(50)
except NameError:
return 0
if s=='':
return 0
if (ord(s[0])==0x7E and ord(s[1])==0x0C) :
return 0
S = []
DiscRespond = []
DiscoverdSN = []
Checksum = 0
for c in s:
S.append(ord(c))
if (S[0]==126) and (S[2] > 24): # valid RDM Packet
DiscRespond = S[5:29] # vaild Respcond packet
#print DiscRespond
for i in range(8,20):
Checksum += DiscRespond[i]
#valid singel respond packet (Check sum OK)
if ((Checksum>>8 | 0xAA)==DiscRespond[20])and((Checksum>>8 | 0x55)==DiscRespond[21])and((Checksum &0xFF | 0xAA)==DiscRespond[22])and((Checksum&0xFF | 0x55)==DiscRespond[23]) :
for i in range(6):
DiscoverdSN.append((DiscRespond[2*i + 8] & DiscRespond[2*i+1 + 8]))
FixtureDiscovered.append(DiscoverdSN)
return 1 # Single Fixture
else:
return 2 # Multiple Fixture
def Process_Discovery ():
"""
Algorithm for discovery process.
"""
global SearchRange
global FixtureDiscovered
SearchRange = [[0x000000000000, 0xFFFFFFFFFFFF]] # Original Search Range
FixtureDiscovered = [] # Fixture that discovered should be empty at the beginning
NoRespondRange = [] # Also record down the no fixture range.
MultipleFlag = 0
CompleteFlag = 0
Send_Unmute_Command ()
while CompleteFlag==0:
# Remove the NoRespondRange in SearchRange, the process will not send discover packet in this range.
for item in NoRespondRange:
if(item in SearchRange):
SearchRange.remove(item)
NoRespondRange = [] # Empty the NoRespondRange
for item in SearchRange:
#print "Current Search Range is : ", item
for i in range(32): # ??? One range search 32 times???
time.sleep(0.05)
Send_Discovery_Command(item[0],item[1]) # Select one range and send the discover command, then wait for feedback
Result = Handle_DiscoverPacket()
if( Result == 0 ):
#print "No Fixture in this range"
NoRespondRange.append(item)
CompleteFlag = 1
break
elif ( Result == 1 ):
#print "One Fixture Found : ", FixtureDiscovered[-1]
Send_Mute_Command (FixtureDiscovered[-1])
CompleteFlag = 0 # not exit the loop because sometimes the response delay for different fixture is different. You need to search more than one time.
elif ( Result == 2 ):
#print "Multiple Fixtures in this range"
MultipleFlag = 1
CompleteFlag = 0
break
if MultipleFlag == 1:
# Divide the Search Range and Update Search Range
tempitem = [0,0]
tempitem[0]=item[0]
tempitem[1]=(item[0]+item[1])/2
newitem = [ (item[0]+item[1])/2 +1, item[1] ]
SearchRange.insert( SearchRange.index(item)+1, newitem)
SearchRange[SearchRange.index(item)] = tempitem
MultipleFlag = 0
break # since we change the Search Range, we re-start the process.
def RDMCheckSumCalculate (payload):
"""
Calculate the RDM packet Checksum.
"""
CheckSum = 0
CS = []
for i in range(len(payload)):
CheckSum += payload[i]
CS = [CheckSum>>8 & 0xFF, CheckSum & 0xFF]
return CS
def RDMCheckSumCheck (payload):
"""
Verify the Checksum of the received RDM packet.
"""
CheckSum = 0
CS = []
for i in range(len(payload)-2):
CheckSum += payload[i]
CS = [CheckSum>>8 & 0xFF, CheckSum & 0xFF]
if(CS == payload[len(payload)-2: len(payload)]):
return 1
else:
return 0
"""
PARAMETER ID LIST
0001 DISC_UNIQUE_BRANCH
0002 DISC_MUTE
0003 DISC_UNMUTE
0050 SUPPORTED_PARAMETERS
0051 PARAMETERS_DESCRIPTION
0060 DEVICE_INFO
0080 DEVICE_MODEL_DESCRIPTION
0081 MFG_LABEL
00C0 SOFTWARE_VERSION_LABEL
00F0 DMX_START_ADDRESS
1000 IDENTIFY_DEVICE
"""
def Send_RDM_GetCommand (TartgetUID, para, payload):
"""
Get Command is to get the parameters from the fixture.
"""
RDMPacket = []
RDMStartCode = [0xCC]
RDMSubStartCode = [0x01]
RDMMsgLength = [0x00]
RDMDestinationUID = TartgetUID
RDMSourceUID = SJP_SerialNumber
RDMTransNum = [0x00]
RDMPortID = [0x00]
RDMMsgCount = [0x01] # Must start from 0x01
RDMSubDevice = [0x00, 0x00]
RDMCmdClass = [0x20] # Get Command 0x20, GetCommand Respond 0x21
RDMParaID = [0x00,0x00]
RDMParaLength = [0x00]
RDMParaData = payload
RDMChecksum = [0x00, 0x00]
RDMParaID = para
RDMPacket = RDMStartCode + RDMSubStartCode + RDMMsgLength + RDMDestinationUID + RDMSourceUID \
+ RDMTransNum + RDMPortID + RDMMsgCount + RDMSubDevice + RDMCmdClass + RDMParaID + \
RDMParaLength + RDMParaData
RDMPacket[2] = len(RDMPacket)
RDMChecksum = RDMCheckSumCalculate(RDMPacket)
Payload = RDMPacket + RDMChecksum
SJPSendPacket(0x07, Payload )
def Send_RDM_SetCommand (TartgetUID, para, payload):
"""
Set Command is the command to set parameter to the fixture.
"""
RDMPacket = []
RDMStartCode = [0xCC]
RDMSubStartCode = [0x01]
RDMMsgLength = [0x00]
RDMDestinationUID = TartgetUID
RDMSourceUID = SJP_SerialNumber
RDMTransNum = [0x00]
RDMPortID = [0x00]
RDMMsgCount = [0x00]
RDMSubDevice = [0x00, 0x00]
RDMCmdClass = [0x30] # Set Command 0x30, GetCommand Respond 0x31
RDMParaID = [0x00,0x00]
RDMParaLength = [0x00]
RDMParaData = payload
RDMChecksum = [0x00, 0x00]
RDMParaID = para
RDMPacket = RDMStartCode + RDMSubStartCode + RDMMsgLength + RDMDestinationUID + RDMSourceUID \
+ RDMTransNum + RDMPortID + RDMMsgCount + RDMSubDevice + RDMCmdClass + RDMParaID + \
RDMParaLength + RDMParaData
RDMPacket[2] = len(RDMPacket)
RDMChecksum = RDMCheckSumCalculate(RDMPacket)
Payload = RDMPacket + RDMChecksum
SJPSendPacket(0x07, Payload )
def Handle_RDM_Command ():
"""
Process the RDM received packet
"""
try:
s=ser.read(500)
except :
return 0
# Is valid SJP Packet?
if s=='' :
return 0
if (ord(s[0])==0x7E and ord(s[1])==0x0C) or (ord(s[1])!= 0x05) or (ord(s[0])!=0x7E and ord(s[-1])!=0xE7) or s=='':
return 0
if s==0:
return 0
S = []
RDMPacket = []
Checksum = 0
RDMFlag = 0
RDMCount = 0
DataRecv = ''
DatainStr = ''
for c in s:
S.append(ord(c))
for item in S:
if(item==0xCC):
RDMFlag = 1
if(RDMFlag==1):
RDMPacket.append(item)
RDMCount +=1
if(len(RDMPacket)>2):
if((RDMCount-2)==RDMPacket[2]):
RDMFlag=0
RDMCount=0
break
if RDMCheckSumCheck(RDMPacket):
#print RDMPacket
s = ""
#print "Smart Jack Pro ID : ", RDMPacket[3:9]
#print "Vaya Fixture ID : ", RDMPacket[9:15]
#print RDMPacket
if RDMPacket[20]==0x21 : # GET COMMAND
#print "GET CMD : "
#print "PARA ID : ", RDMPacket[21:23]
#print "DATA : ", RDMPacket[24:24+RDMPacket[23]]
DataRecv = RDMPacket[24:24+RDMPacket[23]]
#for item in RDMPacket[24:24+RDMPacket[23]]:
# s +=chr(item)
#print "DATA in STRING : ", s
elif RDMPacket[20]==0x31 : # SET COMMAND
#print "SET CMD : "
#print "PARA ID : ", RDMPacket[21:23]
#print "DATA : ", RDMPacket[24:24+RDMPacket[23]]
DataRecv = RDMPacket[24:24+RDMPacket[23]]
#for item in RDMPacket[24:24+RDMPacket[23]]:
# s +=chr(item)
#print "DATA in STRING : ", s
return DataRecv
else:
print "PACKET RECEIVE NG"
###################################################################
def RDM_Get_Supported_Para (TargetUID):
"""
RDM command to get the fixture support parameter list.
"""
para = [0x00, 0x50]
payload = []
Send_RDM_GetCommand(TargetUID, para, payload)
Handle_RDM_Command()
###################################################################
def RDM_Get_Para_Description (TargetUID):
"""
RDM command to get the fixture parameters description.
"""
para = [0x00, 0x51]
payload = []
Send_RDM_GetCommand(TargetUID, para, payload)
Handle_RDM_Command()
###################################################################
def RDM_Get_Device_Info (TargetUID):
"""
RDM command to get the fixture device information.
"""
para = [0x00, 0x60]
payload = []
Send_RDM_GetCommand(TargetUID, para, payload)
return Handle_RDM_Command()
###################################################################
def RDM_Get_Device_Model_Description (TargetUID):
"""
RDM command to get the fixture model description.
"""
para = [0x00, 0x80]
payload = []
s = ""
Send_RDM_GetCommand(TargetUID, para, payload)
DataRev = Handle_RDM_Command()
#print DataRev
if DataRev ==0:
return ''
for item in DataRev:
s +=chr(item)
return s
###################################################################
def RDM_Get_Mfg_Label (TargetUID):
"""
RDM command to get the fixture manufacturing label (Not use).
"""
para = [0x00, 0x81]
payload = []
s = ""
Send_RDM_GetCommand(TargetUID, para, payload)
DataRev = Handle_RDM_Command()
#print DataRev
if DataRev ==0:
return ''
for item in DataRev:
s +=chr(item)
return s
###################################################################
def RDM_Get_Device_Label (TargetUID):
"""
RDM command to get the fixture device label.
"""
para = [0x00, 0x82]
payload = []
s = ""
Send_RDM_GetCommand(TargetUID, para, payload)
DataRev = Handle_RDM_Command()
#print DataRev
if DataRev ==0:
return ''
for item in DataRev:
s +=chr(item)
return s
###################################################################
def RDM_Get_Software_Version (TargetUID):
"""
RDM command to get the fixture software version.
"""
para = [0x00, 0xC0]
payload = []
s = ""
Send_RDM_GetCommand(TargetUID, para, payload)
DataRev = Handle_RDM_Command()
for item in DataRev:
s +=chr(item)
return s
###################################################################
def RDM_Get_DMX_Start_Address (TargetUID):
"""
RDM command to get the fixture DMX start address.
"""
DataRev = ''
FixtureAddr = 0
para = [0x00, 0xF0]
payload = []
Send_RDM_GetCommand(TargetUID, para, payload)
DataRev = Handle_RDM_Command()
FixtureAddr = DataRev[0]<<8 | DataRev[1]
return str(FixtureAddr)
###################################################################
def RDM_Get_UID (TargetUID):
"""
RDM command to get the fixture UID (Serial Number).
"""
DataRev = ''
FixtureAddr = 0
para = [0xA0, 0x01]
payload = []
Send_RDM_GetCommand(TargetUID, para, payload)
DataRev = Handle_RDM_Command()
#print DataRev
return DataRev
###################################################################
def RDM_Get_Identify_Device (TargetUID):
"""
RDM command to Get the fixture identify status.
"""
para = [0x10, 0x00]
payload = []
Send_RDM_GetCommand(TargetUID, para, payload)
Handle_RDM_Command()
###################################################################
def RDM_Set_Identify_Device (TargetUID, OnOff):
"""
RDM command to tell fixture to identify itself.
"""
para = [0x10, 0x00]
payload = OnOff
Send_RDM_SetCommand(TargetUID, para, payload)
Handle_RDM_Command()
###################################################################
def RDM_Set_DMX_Start_Address (TargetUID, DMXStartAddress):
"""
RDM command to set the fixture DMX start address.
"""
para = [0x00, 0xF0]
payload = DMXStartAddress
Send_RDM_SetCommand(TargetUID, para, payload)
Handle_RDM_Command()
###################################################################
def RDM_Set_NewUID (TargetUID, NewUID):
"""
RDM command to set the fixture UID (Serial Number).
"""
para = [0xA0, 0x00]
payload = []
Send_RDM_SetCommand(TargetUID, para, payload)
time.sleep(0.05)
para = [0xA0, 0x01]
payload = NewUID
Send_RDM_SetCommand(TargetUID, para, payload)
Handle_RDM_Command()
###################################################################
def RDM_Set_Model_Description (TargetUID, NewModel):
"""
RDM command to set the fixture model description.
"""
para = [0x00, 0x80]
payload = []
payloadTemp = list(NewModel)
for item in payloadTemp:
#print item
#QString -> QByteArray -> QChar -> int -> char -> hex ...... stupid!!
DataTemp = ord(chr(PyQt4.QtCore.QChar(item.toAscii()).unicode()))
#print DataTemp
#payload.append((item.toAscii()).toHex())
payload.append(DataTemp)
#payload.append(ord(item))
#print payload
Send_RDM_SetCommand(TargetUID, para, payload)
Handle_RDM_Command()
###################################################################
def RDM_Set_Device_Label (TargetUID, NewMfgLabel):
"""
RDM command to set the fixture device label.
"""
para = [0x00, 0x82]
payload = []
payloadTemp = list(NewMfgLabel)
for item in payloadTemp:
#print item
#QString -> QByteArray -> QChar -> int -> char -> hex ...... stupid!!
DataTemp = ord(chr(PyQt4.QtCore.QChar(item.toAscii()).unicode()))
#print DataTemp
#payload.append((item.toAscii()).toHex())
payload.append(DataTemp)
#payload.append(ord(item))
#print payload
Send_RDM_SetCommand(TargetUID, para, payload)
Handle_RDM_Command()
####################################################################
def RDM_Set_NumOfOutput (TargetUID, Number):
"""
RDM command to set the fixture UID (Serial Number).
"""
para = [0xA0, 0x00]
payload = []
Send_RDM_SetCommand(TargetUID, para, payload)
time.sleep(0.05)
para = [0xA0, 0x0B]
payload = [Number]
Send_RDM_SetCommand(TargetUID, para, payload)
Handle_RDM_Command()
###################################################################
def ConvertSN2ASCII (SN):
"""
Small function to convert the fixture Serial number information to ascii code.
"""
global FixtureSN
FixtureSN = []
for item in SN:
tempSN = ''
i=0
for item2 in item:
temp = hex(item2)
i+=1
if len(temp)==3:
tempSN += '0'+temp[2:]
else:
tempSN += temp[2:]
if i==2:
tempSN += ':'
FixtureSN.append(tempSN.upper())
def ConvertRDMVersion2ASCII (S):
"""
Small function to convert the fixture RDM version information to ascii code.
"""
i=0
tempS = ''
for item in S:
temp = hex(item)
i+=1
if len(temp)==3:
tempS += '0'+temp[2:]
else:
tempS += temp[2:]
if i==1:
tempS += '.'
return tempS
def ConvertFootprint2ASCII (S):
"""
Small function to convert the fixture footprint information to ascii code.
"""
tempS = ''
temp = hex(S)
if len(temp)==3:
tempS += '0'+temp[2:]
else:
tempS += temp[2:]
return tempS
if __name__ == '__main__':
print "Available Serial Port: ", list(serial_ports())
ser = serial.Serial(4, timeout = 0.05)
print "Smart Jack Pro Serial Port: ", ser.name
Send_Get_SerialNumber()
print "Smart Jack Pro Serial Number: ", Receive_Get_SerialNumber()
print SJP_SerialNumber
raw_input("Press Enter to Start Discover!")
print "Discovery starts at ", time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))
Status = Process_Discovery()
for item in FixtureDiscovered:
print FixtureDiscovered.index(item), ":" , item
print "Discovery ends at ", time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))
"""
raw_input("Press Enter to Start RDM!")
TargetFixture = [0x50, 0x68, 0x15, 0x39, 0x89, 0x5A]
print "\nREAD DEVICE INFO"
RDM_Get_Device_Info(TargetFixture)
print "\nREAD DEVICE MODEL"
RDM_Get_Device_Model_Description(TargetFixture)
print "\nREAD SOFTWARE VERSION"
RDM_Get_Software_Version(TargetFixture)
print "\nREAD SUPPORT PARA"
RDM_Get_Supported_Para(TargetFixture)
print "\nREAD PARA DESCRIPTION"
RDM_Get_Para_Description(TargetFixture)
print "\nREAD MANUFACTURING LABEL"
RDM_Get_Mfg_Label(TargetFixture)
print "\nREAD DMX START ADDRESS"
RDM_Get_DMX_Start_Address(TargetFixture)
#print "\nSET NEW UID"
#RDM_Set_NewUID(TargetFixture, [0x50, 0x68, 0x15, 0x39, 0x89, 0x5A])
print "\nSET NEW MODEL NAME"
RDM_Set_Model_Description (TargetFixture, '0027_PHILIPS_BCP_SZ_RGB ')
"""
#RDM_Get_UID([0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF])
ser.close()
| [
"simon.dl.he@philips.com"
] | simon.dl.he@philips.com |
acbe30a76358b81d1b34fb6b8c750f09a386d5f3 | 725ab3b4bf58de327c1a77ad5a56a646e6757161 | /Training/vmodel.py | 405606426b39921ca6ea1ee3f0e71b15e6a4c7dd | [] | no_license | AhmedWael205/MI-Go-Game | e12b1959363b49b1c43dc5a3b54b60e03c79f2a8 | d18a191da8e131ce8f5de5c3365b5b0d01e6e3b4 | refs/heads/master | 2022-06-30T21:25:15.103786 | 2020-05-10T21:56:13 | 2020-05-10T21:56:13 | 252,157,974 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,564 | py | from keras.models import Sequential
from keras.layers import Dense, Conv2D, Flatten # create model
from keras.layers import Input
from keras.models import Sequential, load_model, Model
import numpy as np
import pickle as pk
from keras import regularizers
def conv_layer(x, filters, kernel_size, act):
x = Conv2D(
filters=filters
, kernel_size=kernel_size
, data_format="channels_first"
, padding='same'
, kernel_regularizer=regularizers.l2(0.0001)
, use_bias=False
, activation=act
)(x)
return (x)
def softmax(x, name):
x = Flatten()(x)
x = Dense(
1
, use_bias=False
, kernel_regularizer=regularizers.l2(0.0001)
, name=name
)(x)
return (x)
def build_model():
main_input = Input(shape=(24, 19, 19), name='main_input')
x = conv_layer(main_input, 92, 5, 'linear')
for h in range(3):
x = conv_layer(x, 384, 3, 'relu')
V = conv_layer(x, 1, 3, 'linear')
V = softmax(V, "v")
model = Model(inputs=[main_input], outputs=[V])
model.compile(loss='mean_squared_error', optimizer='adam', metrics=['categorical_accuracy'])
return model
def readData(modelInput):
TrainingStates = []
vTruth=[]
for data in modelInput:
TrainingStates.append(data[0])
vTruth.append(data[2])
vTruth = np.asarray(vTruth)
TrainingStates = np.asarray(TrainingStates)
return TrainingStates, vTruth
model = build_model()
#model = load_model('ValueNoKoModel1.h5')
modelInput1 = pk.load(open("VtrainigData/NoKoTrainingData_1200.pkl", 'rb'))
modelInput2 = pk.load(open("VtrainigData/NoKoTrainingData_1500.pkl", 'rb'))
modelInput3 = pk.load(open("VtrainigData/NoKoTrainingData_1800.pkl", 'rb'))
modelInput4 = pk.load(open("VtrainigData/NoKoTrainingData_2100.pkl", 'rb'))
modelInput5 = pk.load(open("VtrainigData/NoKoTrainingData_2400.pkl.", 'rb'))
modelInput6 = pk.load(open("VtrainigData/NoKoTrainingData_2700.pkl", 'rb'))
#modelInput7 = pk.load(open("VtrainigData/NoKoTrainingData_3000.pkl", 'rb'))
"""""
modelInput8 = pk.load(open("VtrainigData/NoKoTrainingData_3088.pkl", 'rb'))
modelInput9 = pk.load(open("VtrainigData/NoKoTrainingData_3300.pkl", 'rb'))
modelInput10 = pk.load(open("VtrainigData/NoKoTrainingData_3600.pkl", 'rb'))
modelInput11 = pk.load(open("VtrainigData/NoKoTrainingData_3900.pkl", 'rb'))
modelInput12 = pk.load(open("VtrainigData/NoKoTrainingData_1650.pkl", 'rb'))
modelInput13 = pk.load(open("VtrainigData/NoKoTrainingData_1700.pkl", 'rb'))
modelInput14 = pk.load(open("VtrainigData/NoKoTrainingData_1150.pkl", 'rb'))
modelInput15 = pk.load(open("TrainingData_680.pkl", 'rb'))
modelInput16 = (pk.load(open("TrainingData_700.pkl", 'rb')))
modelInput17 = pk.load(open("TrainingData_731.pkl", 'rb'))
modelInput18 = (pk.load(open("TrainingData_720.pkl", 'rb')))
"""
modelInput = np.concatenate((modelInput1, modelInput2, modelInput3, modelInput4, modelInput5, modelInput6
), axis=0)
del modelInput1
del modelInput2
del modelInput3
del modelInput4
del modelInput5
del modelInput6
#MI=[modelInput1, modelInput2,modelInput3]
TrainingStatesT, TruthT = readData(modelInput)
model.fit(TrainingStatesT[3:], TruthT[3:], epochs=40, batch_size=256, shuffle=True, verbose=2, validation_split=.01)
model.save("ValueNoKoModel12.h5")
pred = model.predict(np.asarray([TrainingStatesT[0]]))
pred2 = model.predict(np.asarray([TrainingStatesT[2]]))
pred3 = model.predict(np.asarray([TrainingStatesT[1]]))
print(pred)
print(pred2)
print(pred3)
| [
"ahmed.saeed98@eng-st.cu.edu.eg"
] | ahmed.saeed98@eng-st.cu.edu.eg |
17bc4a4da4d2080745b884d4594038a40cbaccad | 878eb2075afe9b33d5a4f647db0010f7491acf55 | /accounts/urls.py | a6e1811a7aca1c72331a26e6eac22c7f143cd189 | [] | no_license | ringomag/crm1Project | 4bae1c35ef77f297f79524dc2dc2bb9e2e72771f | 8a28d1008f576b20e0cfb758e38cd78e152bd540 | refs/heads/master | 2023-03-25T05:53:49.754333 | 2021-03-09T16:57:22 | 2021-03-09T16:57:22 | 346,078,081 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 448 | py |
from django.urls import path
from . import views
urlpatterns = [
path('', views.home, name='home'),
path('products/', views.products, name='products'),
path('customer/<str:pk>', views.customer, name='customer'),
path('order_form/', views.createOrder, name="create_order" ),
path('update_order/<str:pk>', views.updateOrder, name="update_order" ),
path('delete_order/<str:pk>', views.deleteOrder, name="delete_order" ),
]
| [
"optimuskrajm@gmail.com"
] | optimuskrajm@gmail.com |
94da97e60f624aff0471d53b253d4836561aeacf | 00f56dd38a0fc7d63b5fcea50963db07ab412e1b | /api/test_models.py | cae75a3c6a220ccee74cd8a8cadc4ac40c928edb | [] | no_license | james604s/pokedex_api | ec225225af49b081b511eec54ca15e537f02e67a | 1a11d4d6ea84667dcf7b1155fa25fdb7a1cbd8ed | refs/heads/main | 2023-04-01T06:48:01.149531 | 2021-04-12T09:09:30 | 2021-04-12T09:09:30 | 356,895,569 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,330 | py | from django.test import TestCase
from api.models import *
# Create your tests here.
class PokemonModelTest(TestCase):
def setUp(self):
poke_info = PokeInfo.objects.create(
poke_number="001",
poke_name = "Bulbasaur"
)
poke_info.types.create(poke_type="Grass")
poke_info.types.create(poke_type="Poison")
poke_info = PokeInfo.objects.create(
poke_number="002",
poke_name = "Ivysaur"
)
poke_info.types.create(poke_type="Grass")
poke_info.types.create(poke_type="Poison")
poke_info = PokeInfo.objects.create(
poke_number="003",
poke_name = "Venusaur"
)
poke_info.types.create(poke_type="Grass")
poke_info.types.create(poke_type="Poison")
poke_evo = PokeInfo.objects.get(poke_number="001")
poke_evo.evos.create(poke_evo="002")
poke_evo.evos.create(poke_evo="003")
poke_evo = PokeInfo.objects.get(poke_number="002")
poke_evo.evos.create(poke_evo="003")
def test_pokemon_data(self):
data = PokeInfo.objects.all()
self.assertTrue(len(data)) | [
"47516926+james604s@users.noreply.github.com"
] | 47516926+james604s@users.noreply.github.com |
2abc3a0f1c6f650f67886a302f7a8a47167f2736 | 93a5508c28a4b25c949ac3b15fed592a4cb1e60a | /cwiczenia10/udp-echo-client.py | ad2ad604882b9a3cbc8c6f9d311f34d3561d2551 | [] | no_license | jakubpodolski/sk-2019 | 9f1c2ab819f4f5a4cb100ac0766f09a1050b9935 | 303c3a6f7795710a62d40aaf7ed7e77f863a5dbd | refs/heads/master | 2020-04-23T15:04:34.961232 | 2019-06-18T19:45:47 | 2019-06-18T19:45:47 | 171,252,764 | 0 | 3 | null | 2019-05-14T03:17:43 | 2019-02-18T09:21:50 | Python | UTF-8 | Python | false | false | 252 | py | #!/usr/bin/env python3
import socket
import sys
HOST = '127.0.0.1'
PORT = 65433
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s:
s.sendto(str.encode(sys.argv[1]), (HOST, PORT))
data = s.recv(1024)
print('Received', repr(data)) | [
"noreply@github.com"
] | jakubpodolski.noreply@github.com |
034d42940af343c1638afe358b2506823e840bf4 | 1be4f95b722397f255e58b21a182171eb24b6fe5 | /datalad_neuroimaging/extractors/tests/test_dicom.py | 338f2fa4c994f2dd11ced3bf44f4f0f768516770 | [
"MIT"
] | permissive | yarikoptic/datalad-neuroimaging | 5f9a7b0993ac56bbeaba95427541b2c75ed711ea | 7ee146d6c7c864aafc8b540d0ccd9b3a1b5b7210 | refs/heads/master | 2022-11-11T02:57:46.228562 | 2018-04-10T14:05:21 | 2018-04-10T14:05:21 | 128,942,708 | 0 | 0 | null | 2018-04-10T14:04:46 | 2018-04-10T14:04:46 | null | UTF-8 | Python | false | false | 3,032 | py | # emacs: -*- mode: python-mode; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil; coding: utf-8 -*-
# ex: set sts=4 ts=4 sw=4 noet:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""Test audio extractor"""
from datalad.tests.utils import SkipTest
try:
from datalad_neuroimaging.extractors.dicom import MetadataExtractor as DicomExtractor
except ImportError:
raise SkipTest
from shutil import copy
from os.path import dirname
from os.path import join as opj
from datalad.api import Dataset
from datalad.tests.utils import with_tempfile
from datalad.tests.utils import ok_clean_git
from datalad.tests.utils import assert_status
from datalad.tests.utils import assert_result_count
from datalad.tests.utils import eq_
from datalad.tests.utils import assert_dict_equal
from datalad.tests.utils import assert_in
from datalad.tests.utils import assert_not_in
@with_tempfile(mkdir=True)
def test_dicom(path):
ds = Dataset(path).create()
ds.config.add('datalad.metadata.nativetype', 'dicom', where='dataset')
copy(
opj(dirname(dirname(dirname(__file__))), 'tests', 'data', 'dicom.dcm'),
path)
ds.add('.')
ok_clean_git(ds.path)
res = ds.aggregate_metadata()
assert_status('ok', res)
# query for the file metadata
res = ds.metadata('dicom.dcm')
assert_result_count(res, 1)
# from this extractor
meta = res[0]['metadata']['dicom']
assert_in('@context', meta)
# no point in testing ALL keys, but we got plenty
assert(len(meta.keys()) > 70)
eq_(meta['SeriesDate'], '20070205')
# now ask for the dataset metadata, which should have both the unique props
# and a list of imageseries (one in this case, but a list)
res = ds.metadata(reporton='datasets')
assert_result_count(res, 1)
dsmeta = res[0]['metadata']['dicom']
# same context
assert_dict_equal(meta['@context'], dsmeta['@context'])
meta.pop('@context')
eq_(dsmeta['Series'], [meta])
# for this artificial case pretty much the same info also comes out as
# unique props, but wrapped in lists
ucp = res[0]['metadata']["datalad_unique_content_properties"]['dicom']
assert_dict_equal(
{k: [v]
for k, v in dsmeta['Series'][0].items()
if k not in DicomExtractor._unique_exclude and k in ucp},
{k: v
for k, v in ucp.items()
if k not in DicomExtractor._unique_exclude})
# buuuut, if we switch of file-based metadata storage
ds.config.add('datalad.metadata.aggregate-content-dicom', 'false', where='dataset')
ds.aggregate_metadata()
res = ds.metadata(reporton='datasets')
# the auto-uniquified bits are gone but the Series description stays
assert_not_in("datalad_unique_content_properties", res[0]['metadata'])
eq_(dsmeta['Series'], [meta])
| [
"michael.hanke@gmail.com"
] | michael.hanke@gmail.com |
a2eb7128900a56f43e0ece19dedc06e35f192da8 | c2d3b7855b055cb8b0563a3812fb0dbfc670bc09 | /lessons_src/03_CFL_Condition.py | 7ecf7fe0fd959b7819bcdc7829ed929d41253a87 | [
"CC-BY-3.0",
"BSD-3-Clause",
"CC-BY-4.0"
] | permissive | tnakaicode/python-cfd | 85fab343c4c99f32777e45163b89f4d952d83e96 | 174176bdcb1c31e021fefd8fd54e2b3dd898dc62 | refs/heads/master | 2023-08-08T16:53:34.455088 | 2020-05-07T17:14:54 | 2020-05-07T17:14:54 | 261,902,096 | 0 | 0 | NOASSERTION | 2023-07-06T21:27:39 | 2020-05-06T23:30:09 | Jupyter Notebook | UTF-8 | Python | false | false | 5,978 | py | #!/usr/bin/env python
# coding: utf-8
# Text provided under a Creative Commons Attribution license, CC-BY. All code is made available under the FSF-approved BSD-3 license. (c) Lorena A. Barba, Gilbert F. Forsyth 2017. Thanks to NSF for support via CAREER award #1149784.
# [@LorenaABarba](https://twitter.com/LorenaABarba)
# 12 steps to Navier–Stokes
# =====
# ***
# Did you experiment in Steps [1](./01_Step_1.ipynb) and [2](./02_Step_2.ipynb) using different parameter choices? If you did, you probably ran into some unexpected behavior. Did your solution ever blow up? (In my experience, CFD students *love* to make things blow up.)
#
# You are probably wondering why changing the discretization parameters affects your solution in such a drastic way. This notebook complements our [interactive CFD lessons](https://github.com/barbagroup/CFDPython) by discussing the CFL condition. And learn more by watching Prof. Barba's YouTube lectures (links below).
# Convergence and the CFL Condition
# ----
# ***
# For the first few steps, we've been using the same general initial and boundary conditions. With the parameters we initially suggested, the grid has 41 points and the timestep is 0.25 seconds. Now, we're going to experiment with increasing the size of our grid. The code below is identical to the code we used in [Step 1](./01_Step_1.ipynb), but here it has been bundled up in a function so that we can easily examine what happens as we adjust just one variable: **the grid size**.
# In[1]:
import numpy # numpy is a library for array operations akin to MATLAB
from matplotlib import pyplot # matplotlib is 2D plotting library
# get_ipython().run_line_magic('matplotlib', 'inline')
def linearconv(nx):
dx = 2 / (nx - 1)
nt = 20 # nt is the number of timesteps we want to calculate
dt = .025 # dt is the amount of time each timestep covers (delta t)
c = 1
# defining a numpy array which is nx elements long with every value equal to 1.
u = numpy.ones(nx)
# setting u = 2 between 0.5 and 1 as per our I.C.s
u[int(.5 / dx):int(1 / dx + 1)] = 2
# initializing our placeholder array, un, to hold the values we calculate for the n+1 timestep
un = numpy.ones(nx)
for n in range(nt): # iterate through time
un = u.copy() # copy the existing values of u into un
for i in range(1, nx):
u[i] = un[i] - c * dt / dx * (un[i] - un[i - 1])
pyplot.plot(numpy.linspace(0, 2, nx), u)
pyplot.show()
# Now let's examine the results of our linear convection problem with an increasingly fine mesh.
# In[2]:
linearconv(41) # convection using 41 grid points
# This is the same result as our Step 1 calculation, reproduced here for reference.
# In[3]:
linearconv(61)
# Here, there is still numerical diffusion present, but it is less severe.
# In[4]:
linearconv(71)
# Here the same pattern is present -- the wave is more square than in the previous runs.
# In[5]:
linearconv(85)
# This doesn't look anything like our original hat function.
# ### What happened?
# To answer that question, we have to think a little bit about what we're actually implementing in code.
#
# In each iteration of our time loop, we use the existing data about our wave to estimate the speed of the wave in the subsequent time step. Initially, the increase in the number of grid points returned more accurate answers. There was less numerical diffusion and the square wave looked much more like a square wave than it did in our first example.
#
# Each iteration of our time loop covers a time-step of length $\Delta t$, which we have been defining as 0.025
#
# During this iteration, we evaluate the speed of the wave at each of the $x$ points we've created. In the last plot, something has clearly gone wrong.
#
# What has happened is that over the time period $\Delta t$, the wave is travelling a distance which is greater than `dx`. The length `dx` of each grid box is related to the number of total points `nx`, so stability can be enforced if the $\Delta t$ step size is calculated with respect to the size of `dx`.
#
# $$\sigma = \frac{u \Delta t}{\Delta x} \leq \sigma_{\max}$$
#
# where $u$ is the speed of the wave; $\sigma$ is called the **Courant number** and the value of $\sigma_{\max}$ that will ensure stability depends on the discretization used.
#
# In a new version of our code, we'll use the CFL number to calculate the appropriate time-step `dt` depending on the size of `dx`.
#
#
# In[6]:
import numpy
from matplotlib import pyplot
def linearconv(nx):
dx = 2 / (nx - 1)
nt = 20 # nt is the number of timesteps we want to calculate
c = 1
sigma = .5
dt = sigma * dx
u = numpy.ones(nx)
u[int(.5 / dx):int(1 / dx + 1)] = 2
un = numpy.ones(nx)
for n in range(nt): # iterate through time
un = u.copy() # copy the existing values of u into un
for i in range(1, nx):
u[i] = un[i] - c * dt / dx * (un[i] - un[i - 1])
pyplot.plot(numpy.linspace(0, 2, nx), u)
# In[7]:
linearconv(41)
# In[8]:
linearconv(61)
# In[9]:
linearconv(81)
# In[10]:
linearconv(101)
# In[11]:
linearconv(121)
# Notice that as the number of points `nx` increases, the wave convects a shorter and shorter distance. The number of time iterations we have advanced the solution at is held constant at `nt = 20`, but depending on the value of `nx` and the corresponding values of `dx` and `dt`, a shorter time window is being examined overall.
# Learn More
# -----
# ***
# It's possible to do rigurous analysis of the stability of numerical schemes, in some cases. Watch Prof. Barba's presentation of this topic in **Video Lecture 9** on You Tube.
# In[12]:
from IPython.display import YouTubeVideo
YouTubeVideo('Yw1YPBupZxU')
# In[13]:
from IPython.core.display import HTML
def css_styling():
styles = open("../styles/custom.css", "r").read()
return HTML(styles)
css_styling()
| [
"tnakaicode@gmail.com"
] | tnakaicode@gmail.com |
55d827d485fafcf2175493438fddf702bd07fca4 | b503b00b34d4581959ed7534aea4ea196f99d7f7 | /coresize_vs_t.py | 3dd5e6a6665220b198f7184379214073bd44ce86 | [] | no_license | afitts/analysis_scripts | 1f76feff7213a26a010e136dea9c5375f447f513 | e02678a2ecbb9628937cceb0a958e19b5b540597 | refs/heads/master | 2021-01-22T20:07:43.033514 | 2017-08-03T13:54:06 | 2017-08-03T13:54:06 | 49,749,370 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,118 | py | import numpy as np
import sys
import glob
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.colors as co
import pylab
import pygadgetreader as pg
import scipy.integrate
import time as dtime
import scipy.stats as stats
import pandas as pd
import scipy.optimize as opt
from profiles import *
from mikecm import mikecm
from matplotlib import rcParams
import matplotlib.animation as animation
from matplotlib.ticker import FixedLocator
from matplotlib.ticker import FixedFormatter
from mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
rcParams['lines.linewidth'] = 4
rcParams['axes.linewidth'] = 2
rcParams['xtick.major.size'] = 10
rcParams['xtick.minor.size'] = 5
rcParams['xtick.major.width'] = 2
rcParams['xtick.minor.width'] = 2
rcParams['ytick.major.size'] = 10
rcParams['ytick.minor.size'] = 5
rcParams['ytick.major.width'] = 2
rcParams['ytick.minor.width'] = 2
rcParams['font.size'] = 20
rcParams['xtick.labelsize']= '16'
rcParams['ytick.labelsize']= '16'
rcParams['savefig.bbox'] = 'tight'
def fit_profile(hnum,res,ver,dmo,snum):
grain = 60
if dmo == 0: ###For hydro runs ###
### Load in dataframe ###
pathname = '/nobackup/afitts/Gadget-2.0.7/production/mfm%s%s_giz%s_raw_output/'%(hnum,res,ver)
hdf = pd.HDFStore('%sanalysis/dataframes/halo%s%s_giz%s_snap%03d.h5'%(pathname,hnum,res,ver,snum))
red = np.float(hdf['props']['redshift'])
time = np.float(hdf['props']['time'])
rhalf = np.float(hdf['props']['rhalf'])*1000/(red+1)
rvir = np.float(hdf['props']['rvir'])*1000/(red+1)
dm = hdf['particles/dm']['mass'].as_matrix()
gm = hdf['particles/gas']['mass'].as_matrix()
sm = hdf['particles/star']['mass'].as_matrix()
### Recentering procedure ###
dx = hdf['particles/dm']['x'].as_matrix()
dy = hdf['particles/dm']['y'].as_matrix()
dz = hdf['particles/dm']['z'].as_matrix()
gx = hdf['particles/gas']['x'].as_matrix()
gy = hdf['particles/gas']['y'].as_matrix()
gz = hdf['particles/gas']['z'].as_matrix()
sx = hdf['particles/star']['x'].as_matrix()
sy = hdf['particles/star']['y'].as_matrix()
sz = hdf['particles/star']['z'].as_matrix()
dp = np.column_stack((dx,dy,dz))
sp = np.column_stack((sx,sy,sz))
dsp = np.vstack((dp,sp))
dsm = np.append(dm,sm)
dsc = mikecm(fname = dsp,nofile=True,pmass=dsm)
dpos = np.sqrt((dp[:,0]-dsc[0])**2+(dp[:,1]-dsc[1])**2+(dp[:,2]-dsc[2])**2)/.71*1000/(red+1)
gpos = np.sqrt((gp[:,0]-dsc[0])**2+(gp[:,1]-dsc[1])**2+(gp[:,2]-dsc[2])**2)/.71*1000/(red+1)
spos = np.sqrt((sp[:,0]-dsc[0])**2+(sp[:,1]-dsc[1])**2+(sp[:,2]-dsc[2])**2)/.71*1000/(red+1)
### Binning procedure ###
binz = np.logspace(np.log(.0014),np.log(rvir),grain,base=np.e)
x = np.e**(np.log(binz[1:])-np.log(binz[1]/binz[0])/2)
dlogr = np.log(binz[1]/binz[0])
massall, bin_edge = np.histogram(dpos,bins=binz, weights =dm)
gmassall, bin_edge = np.histogram( gpos,bins=binz, weights =gm)
smassall, bin_edge = np.histogram( spos,bins=binz, weights =sm)
totden = (massall+gmassall+smassall)/(4*3.14159*x**3)/dlogr
### Chi-squared minimization fitting routine ###
chimin =10000
sig = 0.1 * totden
cenden = totden[np.sqrt((x-.3)**2).argmin()]
for p in np.logspace(-1,np.log10(10),100):
(fit, cmatrix)= opt.curve_fit(psuedoiso,x,totden,p0=(cenden,p),sigma=sig)
chisq = sum((totden-psuedoiso(x,*fit))**2/(sig)**2)
if chisq < chimin:
chimin = chisq
bestfit = fit
else: ### For DMO runs ###
### Load in dataframe ###
pathname = '/nobackup/afitts/Gadget-2.0.7/production/gizdmsi%s%s_raw_output/'%(hnum,res)
hdf = pd.HDFStore('%sanalysis/dataframes/halo%s%s_giz%s_snap%03d.h5'%(pathname,hnum,res,ver,snum))
red = np.float(hdf['props']['redshift'])
time = np.float(hdf['props']['time'])
c = np.float(hdf['props']['cNFW'])
rhalf = np.float(hdf['props']['rhalf'])*1000/(red+1)
rvir = np.float(hdf['props']['rvir'])*1000/(red+1)
dm = hdf['particles/dm']['mass'].as_matrix()
mvir = sum(dm)
### Recentering procedure ###
dx = hdf['particles/dm']['x'].as_matrix()
dy = hdf['particles/dm']['y'].as_matrix()
dz = hdf['particles/dm']['z'].as_matrix()
dp = np.column_stack((dx,dy,dz))
dsc = mikecm(fname = dp,nofile=True,pmass=dm)
dpos = np.sqrt((dp[:,0]-dsc[0])**2+(dp[:,1]-dsc[1])**2+(dp[:,2]-dsc[2])**2)/.71*1000/(red+1)
### Binning procedure ###
binz = np.logspace(np.log(.0014),np.log(rvir),grain,base=np.e)
x = np.e**(np.log(binz[1:])-np.log(binz[1]/binz[0])/2)
dlogr = np.log(binz[1]/binz[0])
massall, bin_edge = np.histogram(dpos,bins=binz, weights =dm)
totden = (massall)/(4*3.14159*x**3)/dlogr
### Chi-squared minimization fitting routine ###
chimin =10000
sig = 0.1 * totden
prad = .3 #power radius (approximation for now)
cenden0 = totden[np.sqrt((x-prad)**2).argmin()]
param_bounds=([cenden0*1e-1,.499],[cenden0*1e1,10])
outxlim = 50
fit_lims = (x<outxlim) & (x>prad)
### Burkert fit ###
for p in np.logspace(np.log10(.5),np.log10(2),20):
for cenden in np.logspace(np.log10(cenden0*.5),np.log10(cenden0*1.5),20):
(fit, cmatrix)= opt.curve_fit(burkert,x[fit_lims],totden[fit_lims],p0=(cenden,p),bounds=param_bounds)
chisq = sum((totden[fit_lims]-burkert(x[fit_lims],*fit))**2/(sig[fit_lims])**2)
if chisq < chimin:
chimin = chisq
bestfit = fit
### Psuedo iso fit ###
chimin =10000
for p in np.logspace(np.log10(.5),np.log10(2),20):
for cenden in np.logspace(np.log10(cenden0*.5),np.log10(cenden0*1.5),20):
(fit, cmatrix)= opt.curve_fit(psuedoiso,x[fit_lims],totden[fit_lims],p0=(cenden,p),bounds=param_bounds)
chisq = sum((totden[fit_lims]-psuedoiso(x[fit_lims],*fit))**2/(sig[fit_lims])**2)
if chisq < chimin:
chimin = chisq
bestfit1 = fit
### Cored-NFW fit ###
chimin =10000
def CNFW(mvir,c): return lambda r,n,rc : corenfw(r,mvir,c,n,rc)
nfwcored = CNFW(mvir,c)
param_bounds=([0.1,0.1],[1.1,5])
for rc in np.logspace(np.log10(0.1),np.log10(5),20):
for n in np.logspace(np.log10(0.1),np.log10(1.1),20):
(bestfit2, cmatrix)= opt.curve_fit(nfwcored,x[fit_lims],np.log(totden[fit_lims]),p0 = (n,rc),bounds=param_bounds)
chisq = sum((totden[fit_lims]-psuedoiso(x[fit_lims],*bestfit2))**2/(sig[fit_lims])**2)
if chisq < chimin:
chimin = chisq
bestfit2 = fit
test_plot = radpro()
test_plot.add_dmoline(x,totden,'SIDM')
test_plot.add_fit(x,burkert(x,*bestfit),'bur')
test_plot.add_fit(x,psuedoiso(x,*bestfit1),'piso')
test_plot.add_fit(x,np.exp(corenfw(x,mvir,c,*bestfit2)),'cnfw')
date = dtime.strftime("%m_%d_%Y")
test_plot.save(date,hnum)
return time, bestfit
hnum = '11707'
res = '_13'
ver = '5_12_16'
dmo = 1
time, coresize = fit_profile(hnum,res,ver,dmo,89)
time = comm.gather(time, root=0)
coresize = comm.gather(coresize[1], root=0)
if rank == 0:
print 'HI',coresize
a = plt.plot(time,coresize)
plt.savefig('coresize_test.pdf',transparent=True)
| [
"fitts.alex@gmail.com"
] | fitts.alex@gmail.com |
bfac8c377efa67c14110dbc257e8e53e609f0744 | 381f9865a134f74385d6daaebe49af18534a06ac | /sdapis/views/post_views.py | bab89f1ab23537c17ea1358e57451869a4f279c7 | [
"MIT"
] | permissive | mmtahir-dev/cmput404-socialdistribution | fc2d8fc350751639da78453a83fa0b6d01a617ca | a5a4749c8d9c27ccd062e33be5a4fb2f76697394 | refs/heads/main | 2023-08-26T02:45:13.949291 | 2021-10-29T19:49:42 | 2021-10-29T19:49:42 | 421,166,315 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,960 | py | from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework import status
from django.shortcuts import get_object_or_404
from django.conf import settings
from sdapis.pagination import PostPagination
from sdapis.serializers import PostSerializer
from sdapis.models import Post, Author
from .node_helper import is_valid_node
HOST_NAME = settings.HOST_NAME
@api_view(['GET'])
def all_post_view(request):
'''
get all posts, ordered by published time
'''
valid = is_valid_node(request)
if not valid:
return Response({"message":"not a valid node"}, status=status.HTTP_403_FORBIDDEN)
if request.method == "GET":
# get recent posts of author (paginated)
paginator = PostPagination()
posts = Post.objects.all().order_by('-published')
paginated = paginator.paginate_queryset(posts, request)
serializer = PostSerializer(paginated, many=True)
return paginator.get_paginated_response(serializer.data)
@api_view(['GET', 'POST'])
def post_view(request, author_id):
'''
get an author's all posts,
create a post
'''
valid = is_valid_node(request)
if not valid:
return Response({"message":"not a valid node"}, status=status.HTTP_403_FORBIDDEN)
if request.method == "GET":
# get recent posts of author (paginated)
paginator = PostPagination()
posts = Post.objects.filter(author_id=author_id, unlisted=False).order_by('-published')
paginated = paginator.paginate_queryset(posts, request)
serializer = PostSerializer(paginated, many=True)
return paginator.get_paginated_response(serializer.data)
elif request.method == "POST":
# create a new post
data = request.data
data['author_id'] = author_id
serializer = PostSerializer(data=data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response({'message':serializer.errors}, status=status.HTTP_400_BAD_REQUEST)
@api_view(['GET','DELETE', 'PUT', 'POST'])
def post_detail_view(request, author_id, post_id):
'''
view a post's detail, delete a post(authenticated), forward a post, update a post(authenticated)
'''
valid = is_valid_node(request)
if not valid:
return Response({"message":"not a valid node"}, status=status.HTTP_403_FORBIDDEN)
if request.method == "GET":
# get post data
post = get_object_or_404(Post, id=post_id)
serializer = PostSerializer(post)
return Response(serializer.data)
elif request.method == "DELETE":
try:
del_post = get_object_or_404(Post, id=post_id)
except Post.DoesNotExist:
return Response(status = status.HTTP_404_NOT_FOUND)
del_post.delete()
return Response({'message': "delete successful!"}, status=status.HTTP_200_OK)
elif request.method == "PUT":
# create a new post with the given id
try:
author = Author.objects.get(author_id=author_id)
po = Post.objects.get(id=post_id)
except Author.DoesNotExist:
return Response({"message":"author id not found"}, status=status.HTTP_404_NOT_FOUND)
except Post.DoesNotExist:
return Response({"message":"post id not found"}, status=status.HTTP_404_NOT_FOUND)
data = request.data
data['id'] = post_id
data['source'] = request.build_absolute_uri()
data['origin'] = HOST_NAME
foward_author = data['author_id']
try:
get_object_or_404(Author, author_id=foward_author)
except Author.DoesNotExist:
return Response({"message": "foward author does not exist"}, status=status.HTTP_404_NOT_FOUND)
serializer = PostSerializer(data=data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response({'message':serializer.errors}, status=status.HTTP_400_BAD_REQUEST)
elif request.method == "POST":
# update the post
new_data = request.data
new_data['author_id'] = author_id
new_data['post_id'] = id
try:
this_post = get_object_or_404(Post, id=post_id)
except Post.DoesNotExist:
return Response({"message":"post not found"}, status = status.HTTP_404_NOT_FOUND)
if this_post.author_id != author_id:
return Response({"message":"author not found"}, status = status.HTTP_401_UNAUTHORIZED)
serializer = PostSerializer(this_post, data=new_data)
if serializer.is_valid():
post = serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
else:
return Response({'message':serializer.errors}, status=status.HTTP_400_BAD_REQUEST) | [
"jxiang2@ualberta.ca"
] | jxiang2@ualberta.ca |
16d01ee4642643a3fa9a06a6f2fb3e7d14bc6433 | 8eab8ab725c2132bb8d090cdb2d23a5f71945249 | /virt/Lib/site-packages/jupyter_client/jsonutil.py | 9903f70ecee4d8e753d94367e32ed64f5e0d57aa | [
"MIT"
] | permissive | JoaoSevergnini/metalpy | 6c88a413a82bc25edd9308b8490a76fae8dd76ca | c2d0098a309b6ce8c756ff840bfb53fb291747b6 | refs/heads/main | 2023-04-18T17:25:26.474485 | 2022-09-18T20:44:45 | 2022-09-18T20:44:45 | 474,773,752 | 3 | 1 | MIT | 2022-11-03T20:07:50 | 2022-03-27T22:21:01 | Python | UTF-8 | Python | false | false | 5,944 | py | """Utilities to manipulate JSON objects."""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import math
import numbers
import re
import types
import warnings
from binascii import b2a_base64
from collections.abc import Iterable
from datetime import datetime
from typing import Optional
from typing import Union
from dateutil.parser import parse as _dateutil_parse # type: ignore
from dateutil.tz import tzlocal # type: ignore
next_attr_name = "__next__" # Not sure what downstream library uses this, but left it to be safe
# -----------------------------------------------------------------------------
# Globals and constants
# -----------------------------------------------------------------------------
# timestamp formats
ISO8601 = "%Y-%m-%dT%H:%M:%S.%f"
ISO8601_PAT = re.compile(
r"^(\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2})(\.\d{1,6})?(Z|([\+\-]\d{2}:?\d{2}))?$"
)
# holy crap, strptime is not threadsafe.
# Calling it once at import seems to help.
datetime.strptime("1", "%d")
# -----------------------------------------------------------------------------
# Classes and functions
# -----------------------------------------------------------------------------
def _ensure_tzinfo(dt: datetime) -> datetime:
"""Ensure a datetime object has tzinfo
If no tzinfo is present, add tzlocal
"""
if not dt.tzinfo:
# No more naïve datetime objects!
warnings.warn(
"Interpreting naive datetime as local %s. Please add timezone info to timestamps." % dt,
DeprecationWarning,
stacklevel=4,
)
dt = dt.replace(tzinfo=tzlocal())
return dt
def parse_date(s: Optional[str]) -> Optional[Union[str, datetime]]:
"""parse an ISO8601 date string
If it is None or not a valid ISO8601 timestamp,
it will be returned unmodified.
Otherwise, it will return a datetime object.
"""
if s is None:
return s
m = ISO8601_PAT.match(s)
if m:
dt = _dateutil_parse(s)
return _ensure_tzinfo(dt)
return s
def extract_dates(obj):
"""extract ISO8601 dates from unpacked JSON"""
if isinstance(obj, dict):
new_obj = {} # don't clobber
for k, v in obj.items():
new_obj[k] = extract_dates(v)
obj = new_obj
elif isinstance(obj, (list, tuple)):
obj = [extract_dates(o) for o in obj]
elif isinstance(obj, str):
obj = parse_date(obj)
return obj
def squash_dates(obj):
"""squash datetime objects into ISO8601 strings"""
if isinstance(obj, dict):
obj = dict(obj) # don't clobber
for k, v in obj.items():
obj[k] = squash_dates(v)
elif isinstance(obj, (list, tuple)):
obj = [squash_dates(o) for o in obj]
elif isinstance(obj, datetime):
obj = obj.isoformat()
return obj
def date_default(obj):
"""DEPRECATED: Use jupyter_client.jsonutil.json_default"""
warnings.warn(
"date_default is deprecated since jupyter_client 7.0.0."
" Use jupyter_client.jsonutil.json_default.",
stacklevel=2,
)
return json_default(obj)
def json_default(obj):
"""default function for packing objects in JSON."""
if isinstance(obj, datetime):
obj = _ensure_tzinfo(obj)
return obj.isoformat().replace('+00:00', 'Z')
if isinstance(obj, bytes):
return b2a_base64(obj).decode('ascii')
if isinstance(obj, Iterable):
return list(obj)
if isinstance(obj, numbers.Integral):
return int(obj)
if isinstance(obj, numbers.Real):
return float(obj)
raise TypeError("%r is not JSON serializable" % obj)
# Copy of the old ipykernel's json_clean
# This is temporary, it should be removed when we deprecate support for
# non-valid JSON messages
def json_clean(obj):
# types that are 'atomic' and ok in json as-is.
atomic_ok = (str, type(None))
# containers that we need to convert into lists
container_to_list = (tuple, set, types.GeneratorType)
# Since bools are a subtype of Integrals, which are a subtype of Reals,
# we have to check them in that order.
if isinstance(obj, bool):
return obj
if isinstance(obj, numbers.Integral):
# cast int to int, in case subclasses override __str__ (e.g. boost enum, #4598)
return int(obj)
if isinstance(obj, numbers.Real):
# cast out-of-range floats to their reprs
if math.isnan(obj) or math.isinf(obj):
return repr(obj)
return float(obj)
if isinstance(obj, atomic_ok):
return obj
if isinstance(obj, bytes):
# unanmbiguous binary data is base64-encoded
# (this probably should have happened upstream)
return b2a_base64(obj).decode('ascii')
if isinstance(obj, container_to_list) or (
hasattr(obj, '__iter__') and hasattr(obj, next_attr_name)
):
obj = list(obj)
if isinstance(obj, list):
return [json_clean(x) for x in obj]
if isinstance(obj, dict):
# First, validate that the dict won't lose data in conversion due to
# key collisions after stringification. This can happen with keys like
# True and 'true' or 1 and '1', which collide in JSON.
nkeys = len(obj)
nkeys_collapsed = len(set(map(str, obj)))
if nkeys != nkeys_collapsed:
raise ValueError(
'dict cannot be safely converted to JSON: '
'key collision would lead to dropped values'
)
# If all OK, proceed by making the new dict that will be json-safe
out = {}
for k, v in obj.items():
out[str(k)] = json_clean(v)
return out
if isinstance(obj, datetime):
return obj.strftime(ISO8601)
# we don't understand it, it's probably an unserializable object
raise ValueError("Can't clean for JSON: %r" % obj)
| [
"joao.a.severgnini@gmail.com"
] | joao.a.severgnini@gmail.com |
93248952101638dd63e2d980f3ce2641a5a9dad7 | ebb1e564c8a11e5af453f3749dcba1b66e2f3931 | /test/quantization/fx/test_model_report_fx.py | d123a8752ca72a05c6e3064859c2dee6efe72fd5 | [
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0"
] | permissive | jjsjann123/pytorch | 06c3aee8dd3565664ac2e2fda0306432cf62dd7c | de9b3fb3e5eb54660190cbd20b6592fc5cbda547 | refs/heads/master | 2023-08-25T00:22:02.568347 | 2022-07-27T18:50:20 | 2022-07-27T22:38:28 | 152,169,545 | 0 | 1 | NOASSERTION | 2022-08-11T07:55:44 | 2018-10-09T01:33:17 | C++ | UTF-8 | Python | false | false | 79,542 | py | # -*- coding: utf-8 -*-
# Owner(s): ["oncall: quantization"]
import torch
import torch.nn as nn
import torch.ao.quantization.quantize_fx as quantize_fx
import torch.nn.functional as F
from torch.ao.quantization import QConfig, QConfigMapping
from torch.ao.quantization.fx._model_report.detector import (
DynamicStaticDetector,
InputWeightEqualizationDetector,
PerChannelDetector,
OutlierDetector,
)
from torch.ao.quantization.fx._model_report.model_report_observer import ModelReportObserver
from torch.ao.quantization.fx._model_report.model_report_visualizer import ModelReportVisualizer
from torch.ao.quantization.fx._model_report.model_report import ModelReport
from torch.ao.quantization.observer import HistogramObserver, default_per_channel_weight_observer
from torch.nn.intrinsic.modules.fused import ConvReLU2d, LinearReLU
from torch.testing._internal.common_quantization import (
ConvModel,
QuantizationTestCase,
SingleLayerLinearModel,
TwoLayerLinearModel,
skipIfNoFBGEMM,
skipIfNoQNNPACK,
override_quantized_engine,
)
"""
Partition of input domain:
Model contains: conv or linear, both conv and linear
Model contains: ConvTransposeNd (not supported for per_channel)
Model is: post training quantization model, quantization aware training model
Model is: composed with nn.Sequential, composed in class structure
QConfig utilizes per_channel weight observer, backend uses non per_channel weight observer
QConfig_dict uses only one default qconfig, Qconfig dict uses > 1 unique qconfigs
Partition on output domain:
There are possible changes / suggestions, there are no changes / suggestions
"""
# Default output for string if no optimizations are possible
DEFAULT_NO_OPTIMS_ANSWER_STRING = (
"Further Optimizations for backend {}: \nNo further per_channel optimizations possible."
)
# Example Sequential Model with multiple Conv and Linear with nesting involved
NESTED_CONV_LINEAR_EXAMPLE = torch.nn.Sequential(
torch.nn.Conv2d(3, 3, 2, 1),
torch.nn.Sequential(torch.nn.Linear(9, 27), torch.nn.ReLU()),
torch.nn.Linear(27, 27),
torch.nn.ReLU(),
torch.nn.Conv2d(3, 3, 2, 1),
)
# Example Sequential Model with Conv sub-class example
LAZY_CONV_LINEAR_EXAMPLE = torch.nn.Sequential(
torch.nn.LazyConv2d(3, 3, 2, 1),
torch.nn.Sequential(torch.nn.Linear(5, 27), torch.nn.ReLU()),
torch.nn.ReLU(),
torch.nn.Linear(27, 27),
torch.nn.ReLU(),
torch.nn.LazyConv2d(3, 3, 2, 1),
)
# Example Sequential Model with Fusion directly built into model
FUSION_CONV_LINEAR_EXAMPLE = torch.nn.Sequential(
ConvReLU2d(torch.nn.Conv2d(3, 3, 2, 1), torch.nn.ReLU()),
torch.nn.Sequential(LinearReLU(torch.nn.Linear(9, 27), torch.nn.ReLU())),
LinearReLU(torch.nn.Linear(27, 27), torch.nn.ReLU()),
torch.nn.Conv2d(3, 3, 2, 1),
)
# Test class
# example model to use for tests
class ThreeOps(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(3, 3)
self.bn = nn.BatchNorm2d(3)
self.relu = nn.ReLU()
def forward(self, x):
x = self.linear(x)
x = self.bn(x)
x = self.relu(x)
return x
def get_example_inputs(self):
return (torch.randn(1, 3, 3, 3),)
class TwoThreeOps(nn.Module):
def __init__(self):
super().__init__()
self.block1 = ThreeOps()
self.block2 = ThreeOps()
def forward(self, x):
x = self.block1(x)
y = self.block2(x)
z = x + y
z = F.relu(z)
return z
def get_example_inputs(self):
return (torch.randn(1, 3, 3, 3),)
class TestFxModelReportDetector(QuantizationTestCase):
"""Prepares and callibrate the model"""
def _prepare_model_and_run_input(self, model, q_config_mapping, input):
model_prep = torch.ao.quantization.quantize_fx.prepare_fx(model, q_config_mapping, input) # prep model
model_prep(input).sum() # callibrate the model
return model_prep
"""Case includes:
one conv or linear
post training quantiztion
composed as module
qconfig uses per_channel weight observer
Only 1 qconfig in qconfig dict
Output has no changes / suggestions
"""
@skipIfNoFBGEMM
def test_simple_conv(self):
with override_quantized_engine('fbgemm'):
torch.backends.quantized.engine = "fbgemm"
q_config_mapping = QConfigMapping()
q_config_mapping.set_global(torch.ao.quantization.get_default_qconfig(torch.backends.quantized.engine))
input = torch.randn(1, 3, 10, 10)
prepared_model = self._prepare_model_and_run_input(ConvModel(), q_config_mapping, input)
# run the detector
per_channel_detector = PerChannelDetector(torch.backends.quantized.engine)
optims_str, per_channel_info = per_channel_detector.generate_detector_report(prepared_model)
# no optims possible and there should be nothing in per_channel_status
self.assertEqual(
optims_str,
DEFAULT_NO_OPTIMS_ANSWER_STRING.format(torch.backends.quantized.engine),
)
# there shoud only be one conv there in this model
self.assertEqual(per_channel_info["conv"]["backend"], torch.backends.quantized.engine)
self.assertEqual(len(per_channel_info), 1)
self.assertEqual(list(per_channel_info)[0], "conv")
self.assertEqual(
per_channel_info["conv"]["per_channel_quantization_supported"],
True,
)
self.assertEqual(per_channel_info["conv"]["per_channel_quantization_used"], True)
"""Case includes:
Multiple conv or linear
post training quantization
composed as module
qconfig doesn't use per_channel weight observer
Only 1 qconfig in qconfig dict
Output has possible changes / suggestions
"""
@skipIfNoQNNPACK
def test_multi_linear_model_without_per_channel(self):
with override_quantized_engine('qnnpack'):
torch.backends.quantized.engine = "qnnpack"
q_config_mapping = QConfigMapping()
q_config_mapping.set_global(torch.ao.quantization.get_default_qconfig(torch.backends.quantized.engine))
prepared_model = self._prepare_model_and_run_input(
TwoLayerLinearModel(),
q_config_mapping,
TwoLayerLinearModel().get_example_inputs()[0],
)
# run the detector
per_channel_detector = PerChannelDetector(torch.backends.quantized.engine)
optims_str, per_channel_info = per_channel_detector.generate_detector_report(prepared_model)
# there should be optims possible
self.assertNotEqual(
optims_str,
DEFAULT_NO_OPTIMS_ANSWER_STRING.format(torch.backends.quantized.engine),
)
# pick a random key to look at
rand_key: str = list(per_channel_info.keys())[0]
self.assertEqual(per_channel_info[rand_key]["backend"], torch.backends.quantized.engine)
self.assertEqual(len(per_channel_info), 2)
# for each linear layer, should be supported but not used
for linear_key in per_channel_info.keys():
module_entry = per_channel_info[linear_key]
self.assertEqual(module_entry["per_channel_quantization_supported"], True)
self.assertEqual(module_entry["per_channel_quantization_used"], False)
"""Case includes:
Multiple conv or linear
post training quantization
composed as Module
qconfig doesn't use per_channel weight observer
More than 1 qconfig in qconfig dict
Output has possible changes / suggestions
"""
@skipIfNoQNNPACK
def test_multiple_q_config_options(self):
with override_quantized_engine('qnnpack'):
torch.backends.quantized.engine = "qnnpack"
# qconfig with support for per_channel quantization
per_channel_qconfig = QConfig(
activation=HistogramObserver.with_args(reduce_range=True),
weight=default_per_channel_weight_observer,
)
# we need to design the model
class ConvLinearModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv1 = torch.nn.Conv2d(3, 3, 2, 1)
self.fc1 = torch.nn.Linear(9, 27)
self.relu = torch.nn.ReLU()
self.fc2 = torch.nn.Linear(27, 27)
self.conv2 = torch.nn.Conv2d(3, 3, 2, 1)
def forward(self, x):
x = self.conv1(x)
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
x = self.conv2(x)
return x
q_config_mapping = QConfigMapping()
q_config_mapping.set_global(
torch.ao.quantization.get_default_qconfig(torch.backends.quantized.engine)
).set_object_type(torch.nn.Conv2d, per_channel_qconfig)
prepared_model = self._prepare_model_and_run_input(
ConvLinearModel(),
q_config_mapping,
torch.randn(1, 3, 10, 10),
)
# run the detector
per_channel_detector = PerChannelDetector(torch.backends.quantized.engine)
optims_str, per_channel_info = per_channel_detector.generate_detector_report(prepared_model)
# the only suggestions should be to linear layers
# there should be optims possible
self.assertNotEqual(
optims_str,
DEFAULT_NO_OPTIMS_ANSWER_STRING.format(torch.backends.quantized.engine),
)
# to ensure it got into the nested layer
self.assertEqual(len(per_channel_info), 4)
# for each layer, should be supported but not used
for key in per_channel_info.keys():
module_entry = per_channel_info[key]
self.assertEqual(module_entry["per_channel_quantization_supported"], True)
# if linear False, if conv2d true cuz it uses different config
if "fc" in key:
self.assertEqual(module_entry["per_channel_quantization_used"], False)
elif "conv" in key:
self.assertEqual(module_entry["per_channel_quantization_used"], True)
else:
raise ValueError("Should only contain conv and linear layers as key values")
"""Case includes:
Multiple conv or linear
post training quantization
composed as sequential
qconfig doesn't use per_channel weight observer
Only 1 qconfig in qconfig dict
Output has possible changes / suggestions
"""
@skipIfNoQNNPACK
def test_sequential_model_format(self):
with override_quantized_engine('qnnpack'):
torch.backends.quantized.engine = "qnnpack"
q_config_mapping = QConfigMapping()
q_config_mapping.set_global(torch.ao.quantization.get_default_qconfig(torch.backends.quantized.engine))
prepared_model = self._prepare_model_and_run_input(
NESTED_CONV_LINEAR_EXAMPLE,
q_config_mapping,
torch.randn(1, 3, 10, 10),
)
# run the detector
per_channel_detector = PerChannelDetector(torch.backends.quantized.engine)
optims_str, per_channel_info = per_channel_detector.generate_detector_report(prepared_model)
# there should be optims possible
self.assertNotEqual(
optims_str,
DEFAULT_NO_OPTIMS_ANSWER_STRING.format(torch.backends.quantized.engine),
)
# to ensure it got into the nested layer
self.assertEqual(len(per_channel_info), 4)
# for each layer, should be supported but not used
for key in per_channel_info.keys():
module_entry = per_channel_info[key]
self.assertEqual(module_entry["per_channel_quantization_supported"], True)
self.assertEqual(module_entry["per_channel_quantization_used"], False)
"""Case includes:
Multiple conv or linear
post training quantization
composed as sequential
qconfig doesn't use per_channel weight observer
Only 1 qconfig in qconfig dict
Output has possible changes / suggestions
"""
@skipIfNoQNNPACK
def test_conv_sub_class_considered(self):
with override_quantized_engine('qnnpack'):
torch.backends.quantized.engine = "qnnpack"
q_config_mapping = QConfigMapping()
q_config_mapping.set_global(torch.ao.quantization.get_default_qconfig(torch.backends.quantized.engine))
prepared_model = self._prepare_model_and_run_input(
LAZY_CONV_LINEAR_EXAMPLE,
q_config_mapping,
torch.randn(1, 3, 10, 10),
)
# run the detector
per_channel_detector = PerChannelDetector(torch.backends.quantized.engine)
optims_str, per_channel_info = per_channel_detector.generate_detector_report(prepared_model)
# there should be optims possible
self.assertNotEqual(
optims_str,
DEFAULT_NO_OPTIMS_ANSWER_STRING.format(torch.backends.quantized.engine),
)
# to ensure it got into the nested layer and it considered the lazyConv2d
self.assertEqual(len(per_channel_info), 4)
# for each layer, should be supported but not used
for key in per_channel_info.keys():
module_entry = per_channel_info[key]
self.assertEqual(module_entry["per_channel_quantization_supported"], True)
self.assertEqual(module_entry["per_channel_quantization_used"], False)
"""Case includes:
Multiple conv or linear
post training quantization
composed as sequential
qconfig uses per_channel weight observer
Only 1 qconfig in qconfig dict
Output has no possible changes / suggestions
"""
@skipIfNoFBGEMM
def test_fusion_layer_in_sequential(self):
with override_quantized_engine('fbgemm'):
torch.backends.quantized.engine = "fbgemm"
q_config_mapping = QConfigMapping()
q_config_mapping.set_global(torch.ao.quantization.get_default_qconfig(torch.backends.quantized.engine))
prepared_model = self._prepare_model_and_run_input(
FUSION_CONV_LINEAR_EXAMPLE,
q_config_mapping,
torch.randn(1, 3, 10, 10),
)
# run the detector
per_channel_detector = PerChannelDetector(torch.backends.quantized.engine)
optims_str, per_channel_info = per_channel_detector.generate_detector_report(prepared_model)
# no optims possible and there should be nothing in per_channel_status
self.assertEqual(
optims_str,
DEFAULT_NO_OPTIMS_ANSWER_STRING.format(torch.backends.quantized.engine),
)
# to ensure it got into the nested layer and it considered all the nested fusion components
self.assertEqual(len(per_channel_info), 4)
# for each layer, should be supported but not used
for key in per_channel_info.keys():
module_entry = per_channel_info[key]
self.assertEqual(module_entry["per_channel_quantization_supported"], True)
self.assertEqual(module_entry["per_channel_quantization_used"], True)
"""Case includes:
Multiple conv or linear
quantitative aware training
composed as model
qconfig does not use per_channel weight observer
Only 1 qconfig in qconfig dict
Output has possible changes / suggestions
"""
@skipIfNoQNNPACK
def test_qat_aware_model_example(self):
# first we want a QAT model
class QATConvLinearReluModel(torch.nn.Module):
def __init__(self):
super(QATConvLinearReluModel, self).__init__()
# QuantStub converts tensors from floating point to quantized
self.quant = torch.quantization.QuantStub()
self.conv = torch.nn.Conv2d(1, 1, 1)
self.bn = torch.nn.BatchNorm2d(1)
self.relu = torch.nn.ReLU()
# DeQuantStub converts tensors from quantized to floating point
self.dequant = torch.quantization.DeQuantStub()
def forward(self, x):
x = self.quant(x)
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
x = self.dequant(x)
return x
with override_quantized_engine('qnnpack'):
# create a model instance
model_fp32 = QATConvLinearReluModel()
model_fp32.qconfig = torch.quantization.get_default_qat_qconfig("qnnpack")
# model must be in eval mode for fusion
model_fp32.eval()
model_fp32_fused = torch.quantization.fuse_modules(model_fp32, [["conv", "bn", "relu"]])
# model must be set to train mode for QAT logic to work
model_fp32_fused.train()
# prepare the model for QAT, different than for post training quantization
model_fp32_prepared = torch.quantization.prepare_qat(model_fp32_fused)
# run the detector
per_channel_detector = PerChannelDetector(torch.backends.quantized.engine)
optims_str, per_channel_info = per_channel_detector.generate_detector_report(model_fp32_prepared)
# there should be optims possible
self.assertNotEqual(
optims_str,
DEFAULT_NO_OPTIMS_ANSWER_STRING.format(torch.backends.quantized.engine),
)
# make sure it was able to find the single conv in the fused model
self.assertEqual(len(per_channel_info), 1)
# for the one conv, it should still give advice to use different qconfig
for key in per_channel_info.keys():
module_entry = per_channel_info[key]
self.assertEqual(module_entry["per_channel_quantization_supported"], True)
self.assertEqual(module_entry["per_channel_quantization_used"], False)
"""
Partition on Domain / Things to Test
- All zero tensor
- Multiple tensor dimensions
- All of the outward facing functions
- Epoch min max are correctly updating
- Batch range is correctly averaging as expected
- Reset for each epoch is correctly resetting the values
Partition on Output
- the calcuation of the ratio is occurring correctly
"""
class TestFxModelReportObserver(QuantizationTestCase):
class NestedModifiedSingleLayerLinear(torch.nn.Module):
def __init__(self):
super().__init__()
self.obs1 = ModelReportObserver()
self.mod1 = SingleLayerLinearModel()
self.obs2 = ModelReportObserver()
self.fc1 = torch.nn.Linear(5, 5).to(dtype=torch.float)
self.relu = torch.nn.ReLU()
def forward(self, x):
x = self.obs1(x)
x = self.mod1(x)
x = self.obs2(x)
x = self.fc1(x)
x = self.relu(x)
return x
def run_model_and_common_checks(self, model, ex_input, num_epochs, batch_size):
# split up data into batches
split_up_data = torch.split(ex_input, batch_size)
for epoch in range(num_epochs):
# reset all model report obs
model.apply(
lambda module: module.reset_batch_and_epoch_values()
if isinstance(module, ModelReportObserver)
else None
)
# quick check that a reset occurred
self.assertEqual(
getattr(model, "obs1").average_batch_activation_range,
torch.tensor(float(0)),
)
self.assertEqual(getattr(model, "obs1").epoch_activation_min, torch.tensor(float("inf")))
self.assertEqual(getattr(model, "obs1").epoch_activation_max, torch.tensor(float("-inf")))
# loop through the batches and run through
for index, batch in enumerate(split_up_data):
num_tracked_so_far = getattr(model, "obs1").num_batches_tracked
self.assertEqual(num_tracked_so_far, index)
# get general info about the batch and the model to use later
batch_min, batch_max = torch.aminmax(batch)
current_average_range = getattr(model, "obs1").average_batch_activation_range
current_epoch_min = getattr(model, "obs1").epoch_activation_min
current_epoch_max = getattr(model, "obs1").epoch_activation_max
# run input through
model(ex_input)
# check that average batch activation range updated correctly
correct_updated_value = (current_average_range * num_tracked_so_far + (batch_max - batch_min)) / (
num_tracked_so_far + 1
)
self.assertEqual(
getattr(model, "obs1").average_batch_activation_range,
correct_updated_value,
)
if current_epoch_max - current_epoch_min > 0:
self.assertEqual(
getattr(model, "obs1").get_batch_to_epoch_ratio(),
correct_updated_value / (current_epoch_max - current_epoch_min),
)
"""Case includes:
all zero tensor
dim size = 2
run for 1 epoch
run for 10 batch
tests input data observer
"""
def test_zero_tensor_errors(self):
# initialize the model
model = self.NestedModifiedSingleLayerLinear()
# generate the desired input
ex_input = torch.zeros((10, 1, 5))
# run it through the model and do general tests
self.run_model_and_common_checks(model, ex_input, 1, 1)
# make sure final values are all 0
self.assertEqual(getattr(model, "obs1").epoch_activation_min, 0)
self.assertEqual(getattr(model, "obs1").epoch_activation_max, 0)
self.assertEqual(getattr(model, "obs1").average_batch_activation_range, 0)
# we should get an error if we try to calculate the ratio
with self.assertRaises(ValueError):
ratio_val = getattr(model, "obs1").get_batch_to_epoch_ratio()
"""Case includes:
non-zero tensor
dim size = 2
run for 1 epoch
run for 1 batch
tests input data observer
"""
def test_single_batch_of_ones(self):
# initialize the model
model = self.NestedModifiedSingleLayerLinear()
# generate the desired input
ex_input = torch.ones((1, 1, 5))
# run it through the model and do general tests
self.run_model_and_common_checks(model, ex_input, 1, 1)
# make sure final values are all 0 except for range
self.assertEqual(getattr(model, "obs1").epoch_activation_min, 1)
self.assertEqual(getattr(model, "obs1").epoch_activation_max, 1)
self.assertEqual(getattr(model, "obs1").average_batch_activation_range, 0)
# we should get an error if we try to calculate the ratio
with self.assertRaises(ValueError):
ratio_val = getattr(model, "obs1").get_batch_to_epoch_ratio()
"""Case includes:
non-zero tensor
dim size = 2
run for 10 epoch
run for 15 batch
tests non input data observer
"""
def test_observer_after_relu(self):
# model specific to this test
class NestedModifiedObserverAfterRelu(torch.nn.Module):
def __init__(self):
super().__init__()
self.obs1 = ModelReportObserver()
self.mod1 = SingleLayerLinearModel()
self.obs2 = ModelReportObserver()
self.fc1 = torch.nn.Linear(5, 5).to(dtype=torch.float)
self.relu = torch.nn.ReLU()
def forward(self, x):
x = self.obs1(x)
x = self.mod1(x)
x = self.fc1(x)
x = self.relu(x)
x = self.obs2(x)
return x
# initialize the model
model = NestedModifiedObserverAfterRelu()
# generate the desired input
ex_input = torch.randn((15, 1, 5))
# run it through the model and do general tests
self.run_model_and_common_checks(model, ex_input, 10, 15)
"""Case includes:
non-zero tensor
dim size = 2
run for multiple epoch
run for multiple batch
tests input data observer
"""
def test_random_epochs_and_batches(self):
# set up a basic model
class TinyNestModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.obs1 = ModelReportObserver()
self.fc1 = torch.nn.Linear(5, 5).to(dtype=torch.float)
self.relu = torch.nn.ReLU()
self.obs2 = ModelReportObserver()
def forward(self, x):
x = self.obs1(x)
x = self.fc1(x)
x = self.relu(x)
x = self.obs2(x)
return x
class LargerIncludeNestModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.obs1 = ModelReportObserver()
self.nested = TinyNestModule()
self.fc1 = SingleLayerLinearModel()
self.relu = torch.nn.ReLU()
def forward(self, x):
x = self.obs1(x)
x = self.nested(x)
x = self.fc1(x)
x = self.relu(x)
return x
class ModifiedThreeOps(torch.nn.Module):
def __init__(self, batch_norm_dim):
super(ModifiedThreeOps, self).__init__()
self.obs1 = ModelReportObserver()
self.linear = torch.nn.Linear(7, 3, 2)
self.obs2 = ModelReportObserver()
if batch_norm_dim == 2:
self.bn = torch.nn.BatchNorm2d(2)
elif batch_norm_dim == 3:
self.bn = torch.nn.BatchNorm3d(4)
else:
raise ValueError("Dim should only be 2 or 3")
self.relu = torch.nn.ReLU()
def forward(self, x):
x = self.obs1(x)
x = self.linear(x)
x = self.obs2(x)
x = self.bn(x)
x = self.relu(x)
return x
class HighDimensionNet(torch.nn.Module):
def __init__(self):
super(HighDimensionNet, self).__init__()
self.obs1 = ModelReportObserver()
self.fc1 = torch.nn.Linear(3, 7)
self.block1 = ModifiedThreeOps(3)
self.fc2 = torch.nn.Linear(3, 7)
self.block2 = ModifiedThreeOps(3)
self.fc3 = torch.nn.Linear(3, 7)
def forward(self, x):
x = self.obs1(x)
x = self.fc1(x)
x = self.block1(x)
x = self.fc2(x)
y = self.block2(x)
y = self.fc3(y)
z = x + y
z = F.relu(z)
return z
# the purpose of this test is to give the observers a variety of data examples
# initialize the model
models = [
self.NestedModifiedSingleLayerLinear(),
LargerIncludeNestModel(),
ModifiedThreeOps(2),
HighDimensionNet(),
]
# get some number of epochs and batches
num_epochs = 10
num_batches = 15
input_shapes = [(1, 5), (1, 5), (2, 3, 7), (4, 1, 8, 3)]
# generate the desired inputs
inputs = []
for shape in input_shapes:
ex_input = torch.randn((num_batches, *shape))
inputs.append(ex_input)
# run it through the model and do general tests
for index, model in enumerate(models):
self.run_model_and_common_checks(model, inputs[index], num_epochs, num_batches)
"""
Partition on domain / things to test
There is only a single test case for now.
This will be more thoroughly tested with the implementation of the full end to end tool coming soon.
"""
class TestFxModelReportDetectDynamicStatic(QuantizationTestCase):
@skipIfNoFBGEMM
def test_nested_detection_case(self):
class SingleLinear(torch.nn.Module):
def __init__(self):
super(SingleLinear, self).__init__()
self.linear = torch.nn.Linear(3, 3)
def forward(self, x):
x = self.linear(x)
return x
class TwoBlockNet(torch.nn.Module):
def __init__(self):
super(TwoBlockNet, self).__init__()
self.block1 = SingleLinear()
self.block2 = SingleLinear()
def forward(self, x):
x = self.block1(x)
y = self.block2(x)
z = x + y
z = F.relu(z)
return z
with override_quantized_engine('fbgemm'):
# create model, example input, and qconfig mapping
torch.backends.quantized.engine = "fbgemm"
model = TwoBlockNet()
example_input = torch.randint(-10, 0, (1, 3, 3, 3))
example_input = example_input.to(torch.float)
q_config_mapping = QConfigMapping()
q_config_mapping.set_global(torch.ao.quantization.get_default_qconfig("fbgemm"))
# prep model and select observer
model_prep = quantize_fx.prepare_fx(model, q_config_mapping, example_input)
obs_ctr = ModelReportObserver
# find layer to attach to and store
linear_fqn = "block2.linear" # fqn of target linear
target_linear = None
for node in model_prep.graph.nodes:
if node.target == linear_fqn:
target_linear = node
break
# insert into both module and graph pre and post
# set up to insert before target_linear (pre_observer)
with model_prep.graph.inserting_before(target_linear):
obs_to_insert = obs_ctr()
pre_obs_fqn = linear_fqn + ".model_report_pre_observer"
model_prep.add_submodule(pre_obs_fqn, obs_to_insert)
model_prep.graph.create_node(op="call_module", target=pre_obs_fqn, args=target_linear.args)
# set up and insert after the target_linear (post_observer)
with model_prep.graph.inserting_after(target_linear):
obs_to_insert = obs_ctr()
post_obs_fqn = linear_fqn + ".model_report_post_observer"
model_prep.add_submodule(post_obs_fqn, obs_to_insert)
model_prep.graph.create_node(op="call_module", target=post_obs_fqn, args=(target_linear,))
# need to recompile module after submodule added and pass input through
model_prep.recompile()
num_iterations = 10
for i in range(num_iterations):
if i % 2 == 0:
example_input = torch.randint(-10, 0, (1, 3, 3, 3)).to(torch.float)
else:
example_input = torch.randint(0, 10, (1, 3, 3, 3)).to(torch.float)
model_prep(example_input)
# run it through the dynamic vs static detector
dynamic_vs_static_detector = DynamicStaticDetector()
dynam_vs_stat_str, dynam_vs_stat_dict = dynamic_vs_static_detector.generate_detector_report(model_prep)
# one of the stats should be stationary, and the other non-stationary
# as a result, dynamic should be recommended
data_dist_info = [
dynam_vs_stat_dict[linear_fqn][DynamicStaticDetector.PRE_OBS_DATA_DIST_KEY],
dynam_vs_stat_dict[linear_fqn][DynamicStaticDetector.POST_OBS_DATA_DIST_KEY],
]
self.assertTrue("stationary" in data_dist_info)
self.assertTrue("non-stationary" in data_dist_info)
self.assertTrue(dynam_vs_stat_dict[linear_fqn]["dynamic_recommended"])
class TestFxModelReportClass(QuantizationTestCase):
@skipIfNoFBGEMM
def test_constructor(self):
"""
Tests the constructor of the ModelReport class.
Specifically looks at:
- The desired reports
- Ensures that the observers of interest are properly initialized
"""
with override_quantized_engine('fbgemm'):
# set the backend for this test
torch.backends.quantized.engine = "fbgemm"
backend = torch.backends.quantized.engine
# create a model
model = ThreeOps()
q_config_mapping = QConfigMapping()
q_config_mapping.set_global(torch.ao.quantization.get_default_qconfig(torch.backends.quantized.engine))
model_prep = quantize_fx.prepare_fx(model, q_config_mapping, model.get_example_inputs()[0])
# make an example set of detectors
test_detector_set = set([DynamicStaticDetector(), PerChannelDetector(backend)])
# initialize with an empty detector
model_report = ModelReport(model_prep, test_detector_set)
# make sure internal valid reports matches
detector_name_set = set([detector.get_detector_name() for detector in test_detector_set])
self.assertEqual(model_report.get_desired_reports_names(), detector_name_set)
# now attempt with no valid reports, should raise error
with self.assertRaises(ValueError):
model_report = ModelReport(model, set([]))
# number of expected obs of interest entries
num_expected_entries = len(test_detector_set)
self.assertEqual(len(model_report.get_observers_of_interest()), num_expected_entries)
for value in model_report.get_observers_of_interest().values():
self.assertEqual(len(value), 0)
@skipIfNoFBGEMM
def test_prepare_model_callibration(self):
"""
Tests model_report.prepare_detailed_calibration that prepares the model for callibration
Specifically looks at:
- Whether observers are properly inserted into regular nn.Module
- Whether the target and the arguments of the observers are proper
- Whether the internal representation of observers of interest is updated
"""
with override_quantized_engine('fbgemm'):
# create model report object
# create model
model = TwoThreeOps()
# make an example set of detectors
torch.backends.quantized.engine = "fbgemm"
backend = torch.backends.quantized.engine
test_detector_set = set([DynamicStaticDetector(), PerChannelDetector(backend)])
# initialize with an empty detector
# prepare the model
example_input = model.get_example_inputs()[0]
current_backend = torch.backends.quantized.engine
q_config_mapping = QConfigMapping()
q_config_mapping.set_global(torch.ao.quantization.get_default_qconfig(torch.backends.quantized.engine))
model_prep = quantize_fx.prepare_fx(model, q_config_mapping, example_input)
model_report = ModelReport(model_prep, test_detector_set)
# prepare the model for callibration
prepared_for_callibrate_model = model_report.prepare_detailed_calibration()
# see whether observers properly in regular nn.Module
# there should be 4 observers present in this case
modules_observer_cnt = 0
for fqn, module in prepared_for_callibrate_model.named_modules():
if isinstance(module, ModelReportObserver):
modules_observer_cnt += 1
self.assertEqual(modules_observer_cnt, 4)
model_report_str_check = "model_report"
# also make sure arguments for observers in the graph are proper
for node in prepared_for_callibrate_model.graph.nodes:
# not all node targets are strings, so check
if isinstance(node.target, str) and model_report_str_check in node.target:
# if pre-observer has same args as the linear (next node)
if "pre_observer" in node.target:
self.assertEqual(node.args, node.next.args)
# if post-observer, args are the target linear (previous node)
if "post_observer" in node.target:
self.assertEqual(node.args, (node.prev,))
# ensure model_report observers of interest updated
# there should be two entries
self.assertEqual(len(model_report.get_observers_of_interest()), 2)
for detector in test_detector_set:
self.assertTrue(detector.get_detector_name() in model_report.get_observers_of_interest().keys())
# get number of entries for this detector
detector_obs_of_interest_fqns = model_report.get_observers_of_interest()[detector.get_detector_name()]
# assert that the per channel detector has 0 and the dynamic static has 4
if isinstance(detector, PerChannelDetector):
self.assertEqual(len(detector_obs_of_interest_fqns), 0)
elif isinstance(detector, DynamicStaticDetector):
self.assertEqual(len(detector_obs_of_interest_fqns), 4)
# ensure that we can prepare for callibration only once
with self.assertRaises(ValueError):
prepared_for_callibrate_model = model_report.prepare_detailed_calibration()
def get_module_and_graph_cnts(self, callibrated_fx_module):
r"""
Calculates number of ModelReportObserver modules in the model as well as the graph structure.
Returns a tuple of two elements:
int: The number of ModelReportObservers found in the model
int: The number of model_report nodes found in the graph
"""
# get the number of observers stored as modules
modules_observer_cnt = 0
for fqn, module in callibrated_fx_module.named_modules():
if isinstance(module, ModelReportObserver):
modules_observer_cnt += 1
# get number of observers in the graph
model_report_str_check = "model_report"
graph_observer_cnt = 0
# also make sure arguments for observers in the graph are proper
for node in callibrated_fx_module.graph.nodes:
# not all node targets are strings, so check
if isinstance(node.target, str) and model_report_str_check in node.target:
# increment if we found a graph observer
graph_observer_cnt += 1
return (modules_observer_cnt, graph_observer_cnt)
@skipIfNoFBGEMM
def test_generate_report(self):
"""
Tests model_report.generate_model_report to ensure report generation
Specifically looks at:
- Whether correct number of reports are being generated
- Whether observers are being properly removed if specified
- Whether correct blocking from generating report twice if obs removed
"""
with override_quantized_engine('fbgemm'):
# set the backend for this test
torch.backends.quantized.engine = "fbgemm"
# check whether the correct number of reports are being generated
filled_detector_set = set([DynamicStaticDetector(), PerChannelDetector(torch.backends.quantized.engine)])
single_detector_set = set([DynamicStaticDetector()])
# create our models
model_full = TwoThreeOps()
model_single = TwoThreeOps()
# prepare and callibrate two different instances of same model
# prepare the model
example_input = model_full.get_example_inputs()[0]
current_backend = torch.backends.quantized.engine
q_config_mapping = QConfigMapping()
q_config_mapping.set_global(torch.ao.quantization.get_default_qconfig(torch.backends.quantized.engine))
model_prep_full = quantize_fx.prepare_fx(model_full, q_config_mapping, example_input)
model_prep_single = quantize_fx.prepare_fx(model_single, q_config_mapping, example_input)
# initialize one with filled detector
model_report_full = ModelReport(model_prep_full, filled_detector_set)
# initialize another with a single detector set
model_report_single = ModelReport(model_prep_single, single_detector_set)
# prepare the models for callibration
prepared_for_callibrate_model_full = model_report_full.prepare_detailed_calibration()
prepared_for_callibrate_model_single = model_report_single.prepare_detailed_calibration()
# now callibrate the two models
num_iterations = 10
for i in range(num_iterations):
example_input = torch.tensor(torch.randint(100, (1, 3, 3, 3)), dtype=torch.float)
prepared_for_callibrate_model_full(example_input)
prepared_for_callibrate_model_single(example_input)
# now generate the reports
model_full_report = model_report_full.generate_model_report(True)
model_single_report = model_report_single.generate_model_report(False)
# check that sizes are appropriate
self.assertEqual(len(model_full_report), len(filled_detector_set))
self.assertEqual(len(model_single_report), len(single_detector_set))
# make sure observers are being properly removed for full report since we put flag in
modules_observer_cnt, graph_observer_cnt = self.get_module_and_graph_cnts(prepared_for_callibrate_model_full)
self.assertEqual(modules_observer_cnt, 0) # assert no more observer modules
self.assertEqual(graph_observer_cnt, 0) # assert no more observer nodes in graph
# make sure observers aren't being removed for single report since not specified
modules_observer_cnt, graph_observer_cnt = self.get_module_and_graph_cnts(prepared_for_callibrate_model_single)
self.assertNotEqual(modules_observer_cnt, 0)
self.assertNotEqual(graph_observer_cnt, 0)
# make sure error when try to rerun report generation for full report but not single report
with self.assertRaises(Exception):
model_full_report = model_report_full.generate_model_report(
prepared_for_callibrate_model_full, False
)
# make sure we don't run into error for single report
model_single_report = model_report_single.generate_model_report(False)
@skipIfNoFBGEMM
def test_generate_visualizer(self):
"""
Tests that the ModelReport class can properly create the ModelReportVisualizer instance
Checks that:
- Correct number of modules are represented
- Modules are sorted
- Correct number of features for each module
"""
with override_quantized_engine('fbgemm'):
# set the backend for this test
torch.backends.quantized.engine = "fbgemm"
# test with multiple detectors
detector_set = set()
detector_set.add(OutlierDetector(reference_percentile=0.95))
detector_set.add(InputWeightEqualizationDetector(0.5))
model = TwoThreeOps()
# get tst model and callibrate
prepared_for_callibrate_model, mod_report = _get_prepped_for_calibration_model_helper(
model, detector_set, model.get_example_inputs()[0]
)
# now we actually callibrate the model
example_input = model.get_example_inputs()[0]
example_input = example_input.to(torch.float)
prepared_for_callibrate_model(example_input)
# try to visualize without generating report, should throw error
with self.assertRaises(Exception):
mod_rep_visualizaiton = mod_report.generate_visualizer()
# now get the report by running it through ModelReport instance
generated_report = mod_report.generate_model_report(remove_inserted_observers=False)
# now we get the visualizer should not error
mod_rep_visualizer: ModelReportVisualizer = mod_report.generate_visualizer()
# since we tested with outlier detector, which looks at every base level module
# should be six entries in the ordered dict
mod_fqns_to_features = mod_rep_visualizer.generated_reports
self.assertEqual(len(mod_fqns_to_features), 6)
# outlier detector has 9 feature per module
# input-weight has 12 features per module
# there are 1 common data point, so should be 12 + 9 - 1 = 20 unique features per common modules
# all linears will be common
for module_fqn in mod_fqns_to_features:
if ".linear" in module_fqn:
linear_info = mod_fqns_to_features[module_fqn]
self.assertEqual(len(linear_info), 20)
class TestFxDetectInputWeightEqualization(QuantizationTestCase):
class SimpleConv(torch.nn.Module):
def __init__(self, con_dims):
super().__init__()
self.relu = torch.nn.ReLU()
self.conv = torch.nn.Conv2d(con_dims[0], con_dims[1], kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
def forward(self, x):
x = self.conv(x)
x = self.relu(x)
return x
class TwoBlockComplexNet(torch.nn.Module):
def __init__(self):
super().__init__()
self.block1 = TestFxDetectInputWeightEqualization.SimpleConv((3, 32))
self.block2 = TestFxDetectInputWeightEqualization.SimpleConv((3, 3))
self.conv = torch.nn.Conv2d(32, 3, kernel_size=(1, 1), stride=(1, 1), padding=(1, 1), bias=False)
self.linear = torch.nn.Linear(768, 10)
self.relu = torch.nn.ReLU()
def forward(self, x):
x = self.block1(x)
x = self.conv(x)
y = self.block2(x)
y = y.repeat(1, 1, 2, 2)
z = x + y
z = z.flatten(start_dim=1)
z = self.linear(z)
z = self.relu(z)
return z
def get_fusion_modules(self):
return [['conv', 'relu']]
def get_example_inputs(self):
return (torch.randn((1, 3, 28, 28)),)
class ReluOnly(torch.nn.Module):
def __init__(self):
super().__init__()
self.relu = torch.nn.ReLU()
def forward(self, x):
x = self.relu(x)
return x
def get_example_inputs(self):
return (torch.arange(27).reshape((1, 3, 3, 3)),)
def _get_prepped_for_calibration_model(self, model, detector_set, fused=False):
r"""Returns a model that has been prepared for callibration and corresponding model_report"""
# pass in necessary inputs to helper
example_input = model.get_example_inputs()[0]
return _get_prepped_for_calibration_model_helper(model, detector_set, example_input, fused)
@skipIfNoFBGEMM
def test_input_weight_equalization_determine_points(self):
# use fbgemm and create our model instance
# then create model report instance with detector
with override_quantized_engine('fbgemm'):
detector_set = set([InputWeightEqualizationDetector(0.5)])
# get tst model and callibrate
non_fused = self._get_prepped_for_calibration_model(self.TwoBlockComplexNet(), detector_set)
fused = self._get_prepped_for_calibration_model(self.TwoBlockComplexNet(), detector_set, fused=True)
# reporter should still give same counts even for fused model
for prepared_for_callibrate_model, mod_report in [non_fused, fused]:
# supported modules to check
mods_to_check = set([nn.Linear, nn.Conv2d])
# get the set of all nodes in the graph their fqns
node_fqns = set([node.target for node in prepared_for_callibrate_model.graph.nodes])
# there should be 4 node fqns that have the observer inserted
correct_number_of_obs_inserted = 4
number_of_obs_found = 0
obs_name_to_find = InputWeightEqualizationDetector.DEFAULT_PRE_OBSERVER_NAME
for node in prepared_for_callibrate_model.graph.nodes:
# if the obs name is inside the target, we found an observer
if obs_name_to_find in str(node.target):
number_of_obs_found += 1
self.assertEqual(number_of_obs_found, correct_number_of_obs_inserted)
# assert that each of the desired modules have the observers inserted
for fqn, module in prepared_for_callibrate_model.named_modules():
# check if module is a supported module
is_in_include_list = sum(list(map(lambda x: isinstance(module, x), mods_to_check))) > 0
if is_in_include_list:
# make sure it has the observer attribute
self.assertTrue(hasattr(module, InputWeightEqualizationDetector.DEFAULT_PRE_OBSERVER_NAME))
else:
# if it's not a supported type, it shouldn't have observer attached
self.assertTrue(not hasattr(module, InputWeightEqualizationDetector.DEFAULT_PRE_OBSERVER_NAME))
@skipIfNoFBGEMM
def test_input_weight_equalization_report_gen(self):
# use fbgemm and create our model instance
# then create model report instance with detector
with override_quantized_engine('fbgemm'):
test_input_weight_detector = InputWeightEqualizationDetector(0.4)
detector_set = set([test_input_weight_detector])
model = self.TwoBlockComplexNet()
# prepare the model for callibration
prepared_for_callibrate_model, model_report = self._get_prepped_for_calibration_model(
model, detector_set
)
# now we actually callibrate the model
example_input = model.get_example_inputs()[0]
example_input = example_input.to(torch.float)
prepared_for_callibrate_model(example_input)
# now get the report by running it through ModelReport instance
generated_report = model_report.generate_model_report(True)
# check that sizes are appropriate only 1 detector
self.assertEqual(len(generated_report), 1)
# get the specific report for input weight equalization
input_weight_str, input_weight_dict = generated_report[test_input_weight_detector.get_detector_name()]
# we should have 5 layers looked at since 4 conv / linear layers
self.assertEqual(len(input_weight_dict), 4)
# we can validate that the max and min values of the detector were recorded properly for the first one
# this is because no data has been processed yet, so it should be values from original input
example_input = example_input.reshape((3, 28, 28)) # reshape input
for module_fqn in input_weight_dict:
# look for the first linear
if "block1.linear" in module_fqn:
block_1_lin_recs = input_weight_dict[module_fqn]
# get input range info and the channel axis
ch_axis = block_1_lin_recs[InputWeightEqualizationDetector.CHANNEL_KEY]
# ensure that the min and max values extracted match properly
example_min, example_max = torch.aminmax(example_input, dim=ch_axis)
dimension_min = torch.amin(example_min, dim=ch_axis)
dimension_max = torch.amax(example_max, dim=ch_axis)
# make sure per channel min and max are as expected
min_per_key = InputWeightEqualizationDetector.ACTIVATION_PREFIX
min_per_key += InputWeightEqualizationDetector.PER_CHANNEL_MIN_KEY
max_per_key = InputWeightEqualizationDetector.ACTIVATION_PREFIX
max_per_key += InputWeightEqualizationDetector.PER_CHANNEL_MAX_KEY
per_channel_min = block_1_lin_recs[min_per_key]
per_channel_max = block_1_lin_recs[max_per_key]
self.assertEqual(per_channel_min, dimension_min)
self.assertEqual(per_channel_max, dimension_max)
# make sure per channel min and max are as expected
min_key = InputWeightEqualizationDetector.ACTIVATION_PREFIX
min_key += InputWeightEqualizationDetector.GLOBAL_MIN_KEY
max_key = InputWeightEqualizationDetector.ACTIVATION_PREFIX
max_key += InputWeightEqualizationDetector.GLOBAL_MAX_KEY
# make sure the global min and max were correctly recorded and presented
global_min = block_1_lin_recs[min_key]
global_max = block_1_lin_recs[max_key]
self.assertEqual(global_min, min(dimension_min))
self.assertEqual(global_max, max(dimension_max))
input_ratio = torch.sqrt((per_channel_max - per_channel_min) / (global_max - global_min))
# ensure comparision stat passed back is sqrt of range ratios
# need to get the weight ratios first
# make sure per channel min and max are as expected
min_per_key = InputWeightEqualizationDetector.WEIGHT_PREFIX
min_per_key += InputWeightEqualizationDetector.PER_CHANNEL_MIN_KEY
max_per_key = InputWeightEqualizationDetector.WEIGHT_PREFIX
max_per_key += InputWeightEqualizationDetector.PER_CHANNEL_MAX_KEY
# get weight per channel and global info
per_channel_min = block_1_lin_recs[min_per_key]
per_channel_max = block_1_lin_recs[max_per_key]
# make sure per channel min and max are as expected
min_key = InputWeightEqualizationDetector.WEIGHT_PREFIX
min_key += InputWeightEqualizationDetector.GLOBAL_MIN_KEY
max_key = InputWeightEqualizationDetector.WEIGHT_PREFIX
max_key += InputWeightEqualizationDetector.GLOBAL_MAX_KEY
global_min = block_1_lin_recs[min_key]
global_max = block_1_lin_recs[max_key]
weight_ratio = torch.sqrt((per_channel_max - per_channel_min) / (global_max - global_min))
# also get comp stat for this specific layer
comp_stat = block_1_lin_recs[InputWeightEqualizationDetector.COMP_METRIC_KEY]
weight_to_input_ratio = weight_ratio / input_ratio
self.assertEqual(comp_stat, weight_to_input_ratio)
# only looking at the first example so can break
break
@skipIfNoFBGEMM
def test_input_weight_equalization_report_gen_empty(self):
# tests report gen on a model that doesn't have any layers
# use fbgemm and create our model instance
# then create model report instance with detector
with override_quantized_engine('fbgemm'):
test_input_weight_detector = InputWeightEqualizationDetector(0.4)
detector_set = set([test_input_weight_detector])
model = self.ReluOnly()
# prepare the model for callibration
prepared_for_callibrate_model, model_report = self._get_prepped_for_calibration_model(model, detector_set)
# now we actually callibrate the model
example_input = model.get_example_inputs()[0]
example_input = example_input.to(torch.float)
prepared_for_callibrate_model(example_input)
# now get the report by running it through ModelReport instance
generated_report = model_report.generate_model_report(True)
# check that sizes are appropriate only 1 detector
self.assertEqual(len(generated_report), 1)
# get the specific report for input weight equalization
input_weight_str, input_weight_dict = generated_report[test_input_weight_detector.get_detector_name()]
# we should have 0 layers since there is only a Relu
self.assertEqual(len(input_weight_dict), 0)
# make sure that the string only has two lines, as should be if no suggestions
self.assertEqual(input_weight_str.count("\n"), 2)
class TestFxDetectOutliers(QuantizationTestCase):
class LargeBatchModel(torch.nn.Module):
def __init__(self, param_size):
super().__init__()
self.param_size = param_size
self.linear = torch.nn.Linear(param_size, param_size)
self.relu_1 = torch.nn.ReLU()
self.conv = torch.nn.Conv2d(param_size, param_size, 1)
self.relu_2 = torch.nn.ReLU()
def forward(self, x):
x = self.linear(x)
x = self.relu_1(x)
x = self.conv(x)
x = self.relu_2(x)
return x
def get_example_inputs(self):
param_size = self.param_size
return (torch.randn((1, param_size, param_size, param_size)),)
def get_outlier_inputs(self):
param_size = self.param_size
random_vals = torch.randn((1, param_size, param_size, param_size))
# change one in some of them to be a massive value
random_vals[:, 0:param_size:2, 0, 3] = torch.tensor([3.28e8])
return (random_vals,)
def _get_prepped_for_calibration_model(self, model, detector_set, use_outlier_data=False):
r"""Returns a model that has been prepared for callibration and corresponding model_report"""
# call the general helper function to callibrate
example_input = model.get_example_inputs()[0]
# if we specifically want to test data with outliers replace input
if use_outlier_data:
example_input = model.get_outlier_inputs()[0]
return _get_prepped_for_calibration_model_helper(model, detector_set, example_input)
@skipIfNoFBGEMM
def test_outlier_detection_determine_points(self):
# use fbgemm and create our model instance
# then create model report instance with detector
# similar to test for InputWeightEqualization but key differences that made refactoring not viable
# not explicitly testing fusion because fx workflow automatically
with override_quantized_engine('fbgemm'):
detector_set = set([OutlierDetector(reference_percentile=0.95)])
# get tst model and callibrate
prepared_for_callibrate_model, mod_report = self._get_prepped_for_calibration_model(
self.LargeBatchModel(param_size=128), detector_set
)
# supported modules to check
mods_to_check = set([nn.Linear, nn.Conv2d, nn.ReLU])
# there should be 4 node fqns that have the observer inserted
correct_number_of_obs_inserted = 4
number_of_obs_found = 0
obs_name_to_find = InputWeightEqualizationDetector.DEFAULT_PRE_OBSERVER_NAME
number_of_obs_found = sum(
[1 if obs_name_to_find in str(node.target) else 0 for node in prepared_for_callibrate_model.graph.nodes]
)
self.assertEqual(number_of_obs_found, correct_number_of_obs_inserted)
# assert that each of the desired modules have the observers inserted
for fqn, module in prepared_for_callibrate_model.named_modules():
# check if module is a supported module
is_in_include_list = isinstance(module, tuple(mods_to_check))
if is_in_include_list:
# make sure it has the observer attribute
self.assertTrue(hasattr(module, InputWeightEqualizationDetector.DEFAULT_PRE_OBSERVER_NAME))
else:
# if it's not a supported type, it shouldn't have observer attached
self.assertTrue(not hasattr(module, InputWeightEqualizationDetector.DEFAULT_PRE_OBSERVER_NAME))
@skipIfNoFBGEMM
def test_no_outlier_report_gen(self):
# use fbgemm and create our model instance
# then create model report instance with detector
with override_quantized_engine('fbgemm'):
# test with multiple detectors
outlier_detector = OutlierDetector(reference_percentile=0.95)
dynamic_static_detector = DynamicStaticDetector(tolerance=0.5)
param_size: int = 4
detector_set = set([outlier_detector, dynamic_static_detector])
model = self.LargeBatchModel(param_size=param_size)
# get tst model and callibrate
prepared_for_callibrate_model, mod_report = self._get_prepped_for_calibration_model(
model, detector_set
)
# now we actually callibrate the model
example_input = model.get_example_inputs()[0]
example_input = example_input.to(torch.float)
prepared_for_callibrate_model(example_input)
# now get the report by running it through ModelReport instance
generated_report = mod_report.generate_model_report(True)
# check that sizes are appropriate only 2 detectors
self.assertEqual(len(generated_report), 2)
# get the specific report for input weight equalization
outlier_str, outlier_dict = generated_report[outlier_detector.get_detector_name()]
# we should have 5 layers looked at since 4 conv + linear + relu
self.assertEqual(len(outlier_dict), 4)
# assert the following are true for all the modules
for module_fqn in outlier_dict:
# get the info for the specific module
module_dict = outlier_dict[module_fqn]
# there really should not be any outliers since we used a normal distribution to perform this calculation
outlier_info = module_dict[OutlierDetector.OUTLIER_KEY]
self.assertEqual(sum(outlier_info), 0)
# ensure that the number of ratios and batches counted is the same as the number of params
self.assertEqual(len(module_dict[OutlierDetector.COMP_METRIC_KEY]), param_size)
self.assertEqual(len(module_dict[OutlierDetector.NUM_BATCHES_KEY]), param_size)
@skipIfNoFBGEMM
def test_all_outlier_report_gen(self):
# make the percentile 0 and the ratio 1, and then see that everything is outlier according to it
# use fbgemm and create our model instance
# then create model report instance with detector
with override_quantized_engine('fbgemm'):
# create detector of interest
outlier_detector = OutlierDetector(ratio_threshold=1, reference_percentile=0)
param_size: int = 16
detector_set = set([outlier_detector])
model = self.LargeBatchModel(param_size=param_size)
# get tst model and callibrate
prepared_for_callibrate_model, mod_report = self._get_prepped_for_calibration_model(
model, detector_set
)
# now we actually callibrate the model
example_input = model.get_example_inputs()[0]
example_input = example_input.to(torch.float)
prepared_for_callibrate_model(example_input)
# now get the report by running it through ModelReport instance
generated_report = mod_report.generate_model_report(True)
# check that sizes are appropriate only 1 detector
self.assertEqual(len(generated_report), 1)
# get the specific report for input weight equalization
outlier_str, outlier_dict = generated_report[outlier_detector.get_detector_name()]
# we should have 5 layers looked at since 4 conv + linear + relu
self.assertEqual(len(outlier_dict), 4)
# assert the following are true for all the modules
for module_fqn in outlier_dict:
# get the info for the specific module
module_dict = outlier_dict[module_fqn]
# everything should be an outlier because we said that the max should be equal to the min for all of them
# however we will just test and say most should be in case we have several 0 channel values
outlier_info = module_dict[OutlierDetector.OUTLIER_KEY]
assert sum(outlier_info) >= len(outlier_info) / 2
# ensure that the number of ratios and batches counted is the same as the number of params
self.assertEqual(len(module_dict[OutlierDetector.COMP_METRIC_KEY]), param_size)
self.assertEqual(len(module_dict[OutlierDetector.NUM_BATCHES_KEY]), param_size)
@skipIfNoFBGEMM
def test_multiple_run_consistent_spike_outlier_report_gen(self):
# specifically make a row really high consistently in the number of batches that you are testing and try that
# generate report after just 1 run, and after many runs (30) and make sure above minimum threshold is there
with override_quantized_engine('fbgemm'):
# detector of interest
outlier_detector = OutlierDetector(reference_percentile=0.95)
param_size: int = 8
detector_set = set([outlier_detector])
model = self.LargeBatchModel(param_size=param_size)
# get tst model and callibrate
prepared_for_callibrate_model, mod_report = self._get_prepped_for_calibration_model(
model, detector_set, use_outlier_data=True
)
# now we actually callibrate the model
example_input = model.get_outlier_inputs()[0]
example_input = example_input.to(torch.float)
# now callibrate minimum 30 times to make it above minimum threshold
for i in range(30):
example_input = model.get_outlier_inputs()[0]
example_input = example_input.to(torch.float)
# make 2 of the batches to have zero channel
if i % 14 == 0:
# make one channel constant
example_input[0][1] = torch.zeros_like(example_input[0][1])
prepared_for_callibrate_model(example_input)
# now get the report by running it through ModelReport instance
generated_report = mod_report.generate_model_report(True)
# check that sizes are appropriate only 1 detector
self.assertEqual(len(generated_report), 1)
# get the specific report for input weight equalization
outlier_str, outlier_dict = generated_report[outlier_detector.get_detector_name()]
# we should have 5 layers looked at since 4 conv + linear + relu
self.assertEqual(len(outlier_dict), 4)
# assert the following are true for all the modules
for module_fqn in outlier_dict:
# get the info for the specific module
module_dict = outlier_dict[module_fqn]
# because we ran 30 times, we should have at least a couple be significant
# could be less because some channels could possibly be all 0
sufficient_batches_info = module_dict[OutlierDetector.IS_SUFFICIENT_BATCHES_KEY]
assert sum(sufficient_batches_info) >= len(sufficient_batches_info) / 2
# half of them should be outliers, because we set a really high value every 2 channels
outlier_info = module_dict[OutlierDetector.OUTLIER_KEY]
self.assertEqual(sum(outlier_info), len(outlier_info) / 2)
# ensure that the number of ratios and batches counted is the same as the number of params
self.assertEqual(len(module_dict[OutlierDetector.COMP_METRIC_KEY]), param_size)
self.assertEqual(len(module_dict[OutlierDetector.NUM_BATCHES_KEY]), param_size)
# for the first one ensure the per channel max values are what we set
if module_fqn == "linear.0":
# check that the non-zero channel count, at least 2 should be there
# for the first module
counts_info = module_dict[OutlierDetector.CONSTANT_COUNTS_KEY]
assert sum(counts_info) >= 2
# half of the recorded max values should be what we set
matched_max = sum([val == 3.28e8 for val in module_dict[OutlierDetector.MAX_VALS_KEY]])
self.assertEqual(matched_max, param_size / 2)
class TestFxModelReportVisualizer(QuantizationTestCase):
def _callibrate_and_generate_visualizer(self, model, prepared_for_callibrate_model, mod_report):
r"""
Callibrates the passed in model, generates report, and returns the visualizer
"""
# now we actually callibrate the model
example_input = model.get_example_inputs()[0]
example_input = example_input.to(torch.float)
prepared_for_callibrate_model(example_input)
# now get the report by running it through ModelReport instance
generated_report = mod_report.generate_model_report(remove_inserted_observers=False)
# now we get the visualizer should not error
mod_rep_visualizer: ModelReportVisualizer = mod_report.generate_visualizer()
return mod_rep_visualizer
@skipIfNoFBGEMM
def test_get_modules_and_features(self):
"""
Tests the get_all_unique_module_fqns and get_all_unique_feature_names methods of
ModelReportVisualizer
Checks whether returned sets are of proper size and filtered properly
"""
with override_quantized_engine('fbgemm'):
# set the backend for this test
torch.backends.quantized.engine = "fbgemm"
# test with multiple detectors
detector_set = set()
detector_set.add(OutlierDetector(reference_percentile=0.95))
detector_set.add(InputWeightEqualizationDetector(0.5))
model = TwoThreeOps()
# get tst model and callibrate
prepared_for_callibrate_model, mod_report = _get_prepped_for_calibration_model_helper(
model, detector_set, model.get_example_inputs()[0]
)
mod_rep_visualizer: ModelReportVisualizer = self._callibrate_and_generate_visualizer(
model, prepared_for_callibrate_model, mod_report
)
# ensure the module fqns match the ones given by the get_all_unique_feature_names method
actual_model_fqns = set(mod_rep_visualizer.generated_reports.keys())
returned_model_fqns = mod_rep_visualizer.get_all_unique_module_fqns()
self.assertEqual(returned_model_fqns, actual_model_fqns)
# now ensure that features are all properly returned
# all the linears have all the features for two detectors
# can use those as check that method is working reliably
b_1_linear_features = mod_rep_visualizer.generated_reports["block1.linear"]
# first test all features
returned_all_feats = mod_rep_visualizer.get_all_unique_feature_names(False)
self.assertEqual(returned_all_feats, set(b_1_linear_features.keys()))
# now test plottable features
plottable_set = set()
for feature_name in b_1_linear_features:
if type(b_1_linear_features[feature_name]) == torch.Tensor:
plottable_set.add(feature_name)
returned_plottable_feats = mod_rep_visualizer.get_all_unique_feature_names()
self.assertEqual(returned_plottable_feats, plottable_set)
def _prep_visualizer_helper(self):
r"""
Returns a mod rep visualizer that we test in various ways
"""
# set backend for test
torch.backends.quantized.engine = "fbgemm"
# test with multiple detectors
detector_set = set()
detector_set.add(OutlierDetector(reference_percentile=0.95))
detector_set.add(InputWeightEqualizationDetector(0.5))
model = TwoThreeOps()
# get tst model and callibrate
prepared_for_callibrate_model, mod_report = _get_prepped_for_calibration_model_helper(
model, detector_set, model.get_example_inputs()[0]
)
mod_rep_visualizer: ModelReportVisualizer = self._callibrate_and_generate_visualizer(
model, prepared_for_callibrate_model, mod_report
)
return mod_rep_visualizer
@skipIfNoFBGEMM
def test_generate_tables_match_with_report(self):
"""
Tests the generate_table_view()
ModelReportVisualizer
Checks whether the generated dict has proper information
Visual check that the tables look correct performed during testing
"""
with override_quantized_engine('fbgemm'):
# get the visualizer
mod_rep_visualizer = self._prep_visualizer_helper()
table_dict = mod_rep_visualizer.generate_filtered_tables()
# test primarily the dict since it has same info as str
tensor_headers, tensor_table = table_dict[ModelReportVisualizer.TABLE_TENSOR_KEY]
channel_headers, channel_table = table_dict[ModelReportVisualizer.TABLE_CHANNEL_KEY]
# these two together should be the same as the generated report info in terms of keys
tensor_info_modules = set(row[1] for row in tensor_table)
channel_info_modules = set(row[1] for row in channel_table)
combined_modules: Set = tensor_info_modules.union(channel_info_modules)
generated_report_keys: Set = set(mod_rep_visualizer.generated_reports.keys())
self.assertEqual(combined_modules, generated_report_keys)
@skipIfNoFBGEMM
def test_generate_tables_no_match(self):
"""
Tests the generate_table_view()
ModelReportVisualizer
Checks whether the generated dict has proper information
Visual check that the tables look correct performed during testing
"""
with override_quantized_engine('fbgemm'):
# get the visualizer
mod_rep_visualizer = self._prep_visualizer_helper()
# try a random filter and make sure that there are no rows for either table
empty_tables_dict = mod_rep_visualizer.generate_filtered_tables(module_fqn_filter="random not there module")
# test primarily the dict since it has same info as str
tensor_headers, tensor_table = empty_tables_dict[ModelReportVisualizer.TABLE_TENSOR_KEY]
channel_headers, channel_table = empty_tables_dict[ModelReportVisualizer.TABLE_CHANNEL_KEY]
tensor_info_modules = set(row[1] for row in tensor_table)
channel_info_modules = set(row[1] for row in channel_table)
combined_modules: Set = tensor_info_modules.union(channel_info_modules)
self.assertEqual(len(combined_modules), 0) # should be no matching modules
@skipIfNoFBGEMM
def test_generate_tables_single_feat_match(self):
"""
Tests the generate_table_view()
ModelReportVisualizer
Checks whether the generated dict has proper information
Visual check that the tables look correct performed during testing
"""
with override_quantized_engine('fbgemm'):
# get the visualizer
mod_rep_visualizer = self._prep_visualizer_helper()
# try a matching filter for feature and make sure only those features show up
# if we filter to a very specific feature name, should only have 1 additional column in each table row
single_feat_dict = mod_rep_visualizer.generate_filtered_tables(feature_filter=OutlierDetector.MAX_VALS_KEY)
# test primarily the dict since it has same info as str
tensor_headers, tensor_table = single_feat_dict[ModelReportVisualizer.TABLE_TENSOR_KEY]
channel_headers, channel_table = single_feat_dict[ModelReportVisualizer.TABLE_CHANNEL_KEY]
# get the number of features in each of these
tensor_info_features = len(tensor_headers)
channel_info_features = len(channel_headers) - ModelReportVisualizer.NUM_NON_FEATURE_CHANNEL_HEADERS
# make sure that there are no tensor features, and that there is one channel level feature
self.assertEqual(tensor_info_features, 0)
self.assertEqual(channel_info_features, 1)
def _get_prepped_for_calibration_model_helper(model, detector_set, example_input, fused: bool = False):
r"""Returns a model that has been prepared for callibration and corresponding model_report"""
# set the backend for this test
torch.backends.quantized.engine = "fbgemm"
# create model instance and prepare it
example_input = example_input.to(torch.float)
q_config_mapping = torch.ao.quantization.get_default_qconfig_mapping()
# if they passed in fusion paramter, make sure to test that
if fused:
model = torch.quantization.fuse_modules(model, model.get_fusion_modules())
model_prep = quantize_fx.prepare_fx(model, q_config_mapping, example_input)
model_report = ModelReport(model_prep, detector_set)
# prepare the model for callibration
prepared_for_callibrate_model = model_report.prepare_detailed_calibration()
return (prepared_for_callibrate_model, model_report)
| [
"pytorchmergebot@users.noreply.github.com"
] | pytorchmergebot@users.noreply.github.com |
828a703ebae1eccef6e6dc8304b2d5396451533a | 8ac7946c16fcea34bc8680b78cba4a796834b533 | /serializers.py | 95bc8dbe1ee056a6c05bc334ee4ec64d3404c5a0 | [
"MIT"
] | permissive | anyric/Yummy_Recipe_Api | bc4937505fa11e397cfb1dd10f3336c391f1c317 | 1d0639a1244988892d398cbee4ba479bdcb370f0 | refs/heads/master | 2021-05-14T08:01:19.035452 | 2017-12-20T09:02:10 | 2017-12-20T09:02:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 463 | py | # coding=utf-8
import datetime
from marshmallow import Schema, fields, pre_load
from marshmallow import validate
from flask_marshmallow import Marshmallow
ma = Marshmallow()
class UserSchema(ma.Schema):
"""
User model schema
"""
id = fields.Integer(dump_only=True)
username = fields.String(required=True, validate=validate.Length(3))
hashed_password = fields.String()
url = ma.URLFor('api.userresource', id='<id>', _external=True)
| [
"samoeikev@gmail.com"
] | samoeikev@gmail.com |
24c1026d70712dc58f96d6e0a9023fab0f1cdfd6 | 5c531de5e4759c904e608b4fc653b2b041f79a0e | /Snap_monte_carlo_simulation.py | 7cb310205cf32290a10076f203756fb68c14d270 | [] | no_license | jianhui-ben/leetcode_python | 133c7e6e5c7316d00607ba2e327239e002de28b2 | fcc16124cc24a5993e27f5d97e78d8f290e68230 | refs/heads/master | 2022-06-05T22:32:18.034581 | 2022-05-17T02:27:11 | 2022-05-17T02:27:11 | 250,683,308 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 965 | py | #N个 Turkers标数据,数据很模糊基本靠瞎猜,有M个选项可选。
#问这些人达到了majority共识的概率有多大?也就是有超过半数的人都选了某一选项的概率。
#要求先给出数学解析解,然后给出coding实现方法来求近似解。
#代码其实很简单,Monte Carlo simulation,跑个足够多的次数,用统计结果来近似概率
## p= (1/M)**(N//2)
print(12//2)
import random
random.randint(1, 2)
import collections
collections.Counter([1,1,1,2, 3,3,3,3]).most_common(1)[0][1]
def prob(M, N):
import random
import collections
major=0
for _ in range(100000):
choices= [None]* N
for i in range(N):
choices[i]= random.randint(1, M)
if collections.Counter(choices).most_common(1)[0][1]> int(N//2):
major+=1
return float(major)/100000.0*100.0
def verify(M, N):
return (1.0/float(M))**int(N//2)*100.0
verify(7, 3)
prob(7, 3) | [
"jianhui.ben@gmail.com"
] | jianhui.ben@gmail.com |
3b14b0eb38ceefa834a3a25a79f46f208f9026cc | ddeee1af632e7366c1ee069076fb157c9264e53e | /membership/test_utils.py | 9e77f13a83fb5984a41eae6bc53d8350ba96f439 | [] | no_license | Wraithh/sikteeri | b4a6a86f9ca1f451d3fdb867eb684ca1bd7c7d6b | 97ab5de5bb64355bf802925b72c0ac869cc96434 | refs/heads/master | 2021-01-17T14:41:50.968825 | 2011-10-04T17:01:29 | 2011-10-04T17:01:29 | 1,303,281 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,219 | py | # -*- coding: utf-8 -*-
from random import choice
from models import *
# Finnish population register center's most popular first names for year 2009
first_names = [
u"Maria", u"Juhani", u"Aino", u"Veeti", u"Emilia", u"Johannes", u"Venla",
u"Eetu", u"Sofia", u"Mikael", u"Emma", u"Onni", u"Olivia", u"Matias",
u"Ella", u"Aleksi", u"Aino", u"Olavi", u"Sofia", u"Leevi", u"Amanda",
u"Onni", u"Aada", u"Elias", u"Matilda", u"Ilmari", u"Sara", u"Lauri",
u"Helmi", u"Oskari", u"Iida", u"Joona", u"Aurora", u"Elias", u"Anni",
u"Matias", u"Ilona", u"Oliver", u"Helmi", u"Leo", u"Iida", u"Eemeli",
u"Emilia", u"Niilo", u"Eveliina", u"Valtteri", u"Siiri", u"Rasmus", u"Katariina",
u"Aleksi", u"Veera", u"Oliver", u"Ella", u"Antero", u"Sanni", u"Miro",
u"Aada", u"Viljami", u"Vilma", u"Jimi", u"Kristiina", u"Kristian", u"Nea",
u"Aatu", u"Anni", u"Tapani", u"Milla", u"Daniel", u"Johanna", u"Samuel",
u"Pinja", u"Juho", u"Emma", u"Lauri", u"Lotta", u"Aapo", u"Sara",
u"Tapio", u"Olivia", u"Eemeli", u"Linnea", u"Veeti", u"Elli", u"Jesse",
u"Anna", u"Eetu", u"Emmi", u"Arttu", u"Elina", u"Emil", u"Ronja",
u"Lenni", u"Venla", u"Petteri", u"Elsa", u"Valtteri", u"Julia", u"Daniel",
u"Nella", u"Otto", u"Aleksandra", u"Eemil", u"Kerttu", u"Aaro", u"Helena",
u"Juho", u"Oona", u"Joel", u"Siiri", u"Leevi", u"Viivi", u"Niklas",
u"Karoliina", u"Joona", u"Julia", u"Ville", u"Inkeri", u"Julius", u"Pihla",
u"Roope", u"Alexandra", u"Elmeri", u"Peppi", u"Konsta", u"Alisa", u"Leo",
u"Nelli", u"Juuso", u"Susanna", u"Otto", u"Neea", u"Luka", u"Josefiina",
u"Aleksanteri", u"Jenna", u"Mikael", u"Kaarina", u"Akseli", u"Laura", u"Samuel",
u"Lotta", u"Sakari", u"Anna", u"Oskari", u"Alina", u"Anton", u"Milja",
u"Julius", u"Ellen", u"Veikko", u"Enni", u"Luukas", u"Veera", u"Toivo",
u"Alisa", u"Jere", u"Sanni", u"Eino", u"Ilona", u"Niko", u"Kerttu",
u"Niilo", u"Inka", u"Eelis", u"Elsa", u"Jaakko", u"Amanda", u"Eeli",
u"Elli", u"Rasmus", u"Minea", u"Anton", u"Vilma", u"Antti", u"Matilda",
u"Eino", u"Vilhelmiina", u"Väinö", u"Iina", u"Emil", u"Nea", u"Henrik",
u"Eevi", u"Kasper", u"Anneli", u"Matti", u"Ellen", u"Tuomas", u"Maija",
u"Aatu", u"Saana", u"Eemil", u"Tuulia", u"Kalevi", u"Minttu", u"Akseli",
u"Anniina", u"Joonatan", u"Lilja", u"Viljami"]
# Kapsi members public unique last name listing as of today.
last_names = [
u"Aalto", u"Aaltonen", u"Addams-Moring", u"Aho", u"Ahola", u"Ahonen",
u"Aimonen", u"Al-Khanji", u"Ala-Kojola", u"Alakotila", u"Alanenpää", u"Alanko",
u"Alardt", u"Alaspää", u"Alatalo", u"Andelin", u"Annala", u"Antinkaapo",
u"Anttila", u"Anttonen", u"Arstila", u"Arvelin", u"Auvinen", u"Averio",
u"Bainton", u"Behm", u"Blomberg", u"Borén", u"Brander", u"Brockman",
u"Brunberg", u"Busk", u"Ceder", u"Corsini", u"Duldin", u"Eerikäinen",
u"Eerola", u"Ekblom", u"Ekman", u"Eloranta", u"Emas", u"Eriksson",
u"Ernsten", u"Erola", u"Eräluoto", u"Eskelinen", u"Eskola", u"Everilä",
u"Finnilä", u"Fjällström", u"Forslund", u"Grandell", u"Grenrus", u"Gröhn",
u"Grönlund", u"Haapajärvi", u"Haapala", u"Haasanen", u"Haatainen", u"Haataja",
u"Haavisto", u"Hagelberg", u"Hahtola", u"Haikonen", u"Haimi", u"Hakanen",
u"Hakkarainen", u"Halkosaari", u"Halla", u"Hallamaa", u"Hallikainen", u"Halme",
u"Halmu", u"Halonen", u"Hamara", u"Hanhijärvi", u"Hannola", u"Hannus",
u"Hansson", u"Harju", u"Harkila", u"Harma", u"Hasanen", u"Hassinen",
u"Hast", u"Hastrup", u"Hatanpää", u"Haverinen", u"Heikkerö", u"Heikkilä",
u"Heikkinen", u"Heikura", u"Heimonen", u"Heinikangas", u"Heinonen", u"Heinänen",
u"Heiramo", u"Heiskanen", u"Helander", u"Helenius", u"Herd", u"Herranen",
u"Herukka", u"Heusala", u"Hietala", u"Hietanen", u"Hietaranta", u"Hiilesrinne",
u"Hiljander", u"Hill", u"Hillervo", u"Hiltunen", u"Hinkula", u"Hintikka",
u"Hirvojärvi", u"Holopainen", u"Hongisto", u"Honkanen", u"Honkonen", u"Hopiavuori",
u"Hotti", u"Huhtala", u"Huhtinen", u"Hulkko", u"Huoman", u"Huotari",
u"Huovinen", u"Hurtta", u"Huttunen", u"Huuhtanen", u"Huuskonen", u"Hyttinen",
u"Hyvärinen", u"Häkkinen", u"Hämeenkorpi", u"Hämäläinen", u"Hänninen", u"Höglund",
u"Ihatsu", u"Ijäs", u"Ikonen", u"Ilmonen", u"Iltanen", u"Ingman",
u"Inha", u"Inkinen", u"Isaksson", u"Isomäki", u"Ituarte", u"Itäsalo",
u"Jaakkola", u"Jaatinen", u"Jakobsson", u"Jalonen", u"Jetsu", u"Johansson",
u"Jokela", u"Jokinen", u"Jokitalo", u"Jormanainen", u"Junni", u"Juopperi",
u"Juutinen", u"Juvankoski", u"Juvonen", u"Järvenpää", u"Järvensivu", u"Järvinen",
u"Jääskelä", u"Jääskeläinen", u"Kaarela", u"Kaartti", u"Kaija", u"Kaikkonen",
u"Kaila", u"Kainulainen", u"Kajan", u"Kakko", u"Kallio", u"Kanniainen",
u"Kanninen", u"Kare-Mäkiaho", u"Karhunen", u"Kari", u"Karimäki", u"Karisalmi",
u"Karjalainen", u"Karlsson", u"Karppi", u"Karttunen", u"Karvinen", u"Karvonen",
u"Kasari", u"Kataja", u"Katavisto", u"Kattelus", u"Kauppi", u"Kauppinen",
u"Keihänen", u"Keijonen", u"Kekki", u"Kekkonen", u"Kelanne", u"Kenttälä",
u"Keränen", u"Keskitalo", u"Kesti", u"Ketolainen", u"Ketonen", u"Kettinen",
u"Kianto", u"Kiiskilä", u"Kilpiäinen", u"Kinnula", u"Kinnunen", u"Kirkkopelto",
u"Kirves", u"Kittilä", u"Kiviharju", u"Kivikunnas", u"Kivilahti", u"Kiviluoto",
u"Kivimäki", u"Kivirinta", u"Knuutinen", u"Kohtamäki", u"Kois", u"Koivisto",
u"Koivu", u"Koivula", u"Koivulahti", u"Koivumaa", u"Koivunalho", u"Koivunen",
u"Koivuranta", u"Kokkonen", u"Kokkoniemi", u"Komulainen", u"Konsala", u"Konttila",
u"Konttinen", u"Koponen", u"Korhonen", u"Kortesalmi", u"Kortetmäki", u"Koskela",
u"Koskenniemi", u"Koski", u"Petteri", u"Koskinen", u"Kotanen", u"Koulu",
u"Kraft", u"Krohn", u"Krüger", u"Kudjoi", u"Kuhanen", u"Kuittinen",
u"Kuitunen", u"Kujala", u"Kujansuu", u"Kulju", u"Kurkimäki", u"Kuukasjärvi",
u"Kuusisto", u"Kuvaja", u"Kymäläinen", u"Kyntöaho", u"Kähkönen", u"Käki",
u"Kärkkäinen", u"Kärnä", u"Laaksonen", u"Laalo", u"Laapotti", u"Lagren",
u"Lagus", u"Lahdenmäki", u"Lahdenperä", u"Lahikainen", u"Lahtela", u"Laine",
u"Lainiola", u"Laitila", u"Laitinen", u"Untamo", u"Lakhan", u"Lamminen",
u"Lammio", u"Lampela", u"Lampén", u"Lampi", u"Lampinen", u"Lankila",
u"Lapinniemi", u"Lappalainen", u"Larivaara", u"Larja", u"Latvatalo", u"Laurila",
u"Laxström", u"Lehmuskenttä", u"Lehtinen", u"Lehtola", u"Lehtonen", u"Leikkari",
u"Leiviskä", u"Leivo", u"Lempinen", u"Lepistö", u"Leppänen", u"Levonen",
u"Lievemaa", u"Liimatta", u"Likitalo", u"Liljeqvist", u"Lindeman", u"Lindén",
u"Lindfors", u"Lindström", u"Linkoaho", u"Linkola", u"Linnaluoto", u"Linnamäki",
u"Lintervo", u"Lintumäki", u"Lipsanen", u"Liukkonen", u"Loikkanen", u"Loponen",
u"Louhiranta", u"Lundan", u"Luosmaa", u"Luukko", u"Luukkonen", u"Lähdemäki",
u"Lähteenmäki", u"Löfgren", u"Löytty", u"Maaranen", u"Magga", u"Makkonen",
u"Maksimainen", u"Malinen", u"Malm", u"Malmivirta", u"Manner", u"Manninen",
u"Mansikkala", u"Marin", u"Marjamaa", u"Marjoneva", u"Markkanen", u"Martikainen",
u"Marttila", u"Matikainen", u"Matkaselkä", u"Mattila", u"Maukonen", u"Melama",
u"Melenius", u"Mellin", u"Merikivi", u"Meriläinen", u"Merisalo", u"Meskanen",
u"Miettunen", u"Miinin", u"Mikkonen", u"Moisala", u"Moisio", u"Mononen",
u"Montonen", u"Mustonen", u"Myllymäki", u"Myllyselkä", u"Myntti", u"Myyry",
u"Mähönen", u"Mäkelä", u"Mäkeläinen", u"Mäkinen", u"Mäkitalo", u"Mänki",
u"Mäntylä", u"Märsy", u"Mättö", u"Mäyränen", u"Määttä", u"Möller",
u"Nemeth", u"Niemelä", u"Niemenmaa", u"Niemi", u"Nieminen", u"Niiranen",
u"Nikander", u"Nikkonen", u"Nikula", u"Niskanen", u"Nisula", u"Nousiainen",
u"Nummiaho", u"Nurmi", u"Nurminen", u"Nygren", u"Nykänen", u"Nylund",
u"Nyrhilä", u"Näyhä", u"Ohtamaa", u"Ojala", u"Ollila", u"Olmari",
u"Oras", u"Paajanen", u"Paalanen", u"Paananen", u"Packalen", u"Pahalahti",
u"Paimen", u"Pakkanen", u"Palo", u"Palokangas", u"Palomäki", u"Palosaari",
u"Panula", u"Pappinen", u"Parkkinen", u"Partanen", u"Parviainen", u"Pasila",
u"Paul", u"Pekkanen", u"Peltola", u"Peltonen", u"Pennala", u"Pentikäinen",
u"Penttilä", u"Perttunen", u"Perälä", u"Pesonen", u"Peuhkuri", u"Peurakoski",
u"Piesala", u"Pietarinen", u"Pietikäinen", u"Pietilä", u"Pieviläinen", u"Pihkala",
u"Pihlaja", u"Pihlajaniemi", u"Piittinen", u"Pikkarainen", u"Pirinen", u"Pirttijärvi",
u"Pitkänen", u"Pohjalainen", u"Pohjanraito", u"Pohjola", u"Pokkinen", u"Polso",
u"Portaankorva", u"Portti", u"Posti", u"Prusi", u"Pulliainen", u"Puranen",
u"Pusa", u"Pussinen", u"Pyhäjärvi", u"Pylvänäinen", u"Pölönen", u"Pöykkö",
u"Raatikainen", u"Rahikainen", u"Rainela", u"Raitanen", u"Raitmaa", u"Raittila",
u"Rajala", u"Rajamäki", u"Ranki", u"Ranta", u"Rantala", u"Rantamäki",
u"Rapo", u"Rasilainen", u"Rauhala", u"Rautiainen", u"Rehu", u"Reijonen",
u"Reunanen", u"Riikonen", u"Rimpiläinen", u"Rissanen", u"Ristilä", u"Rokka",
u"Roponen", u"Ruhanen", u"Runonen", u"Rutanen", u"Ruuhonen", u"Ruusu",
u"Ryhänen", u"Rytkönen", u"Räsänen", u"Räty", u"Rönkkö", u"Rössi",
u"Saarenmäki", u"Saarijoki", u"Saarikoski", u"Saarinen", u"Saastamoinen", u"Saine",
u"Saksa", u"Salkia", u"Salmela", u"Salmi", u"Salminen", u"Salo",
u"Salokanto", u"Salomaa", u"Salomäki", u"Salonen", u"Sand", u"Sanisalo",
u"Santala", u"Savolainen", u"Schwartz", u"Selin", u"Seppä", u"Seppälä",
u"Seppänen", u"Setälä", u"Siekkinen", u"Sievänen", u"Sihvo", u"Siironen",
u"Siitonen", u"Silfver", u"Sillanpää", u"Siltala", u"Simola", u"Simon",
u"Siniluoto", u"Sinivaara", u"Sipilä", u"Sivula", u"Sjöberg", u"Soili",
u"Soini", u"Soininen", u"Solja", u"Solkio", u"Sonck", u"Sopanen",
u"Sotejeff", u"Staven", u"Strand", u"Suckman", u"Sunell", u"Suolahti",
u"Suominen", u"Suoniitty", u"Suonvieri", u"Suorsa", u"Suvanne", u"Syreeni",
u"Syrjä", u"Syrjälä", u"Syvänen", u"Särkkä", u"Säämäki", u"Sääskilahti",
u"Södervall", u"Tahvanainen", u"Taina", u"Taipale", u"Taivalsalmi", u"Tallqvist",
u"Tamminen", u"Tammisto", u"Tanhua", u"Tanner", u"Tanskanen", u"Tapper-Veirto",
u"Tarsa", u"Tarvainen", u"Tiainen", u"Tiira", u"Tikka", u"Tikkanen",
u"Toivanen", u"Toivonen", u"Tolvanen", u"Tulonen", u"Tunkkari", u"Tuohimaa",
u"Tuomela", u"Tuomi", u"Tuomimaa", u"Tuominen", u"Tuomivaara", u"Turanlahti",
u"Turpeinen", u"Turunen", u"Tuunainen", u"Tuusa", u"Tykkä", u"Tyrväinen",
u"Tähtinen", u"Töttö", u"Urhonen", u"Uuksulainen", u"Uusitalo", u"Vaarala",
u"Vaaramaa", u"Vainio", u"Vainionpää", u"Valkeinen", u"Valkonen", u"Valtonen",
u"Valve", u"Varanka", u"Varrio", u"Varsaluoma", u"Vartiainen", u"Veijalainen",
u"Veijola", u"Velhonoja", u"Venäläinen", u"Vesala", u"Vesiluoma", u"Vestu",
u"Vierimaa", u"Viippola", u"Viitala", u"Viitanen", u"Vilkki", u"Vilppunen",
u"Vire", u"Virta", u"Virtala", u"Virtanen", u"Vitikka", u"Voipio",
u"Vuokko", u"Vuola", u"Vuollet", u"Vuorela", u"Vuorinen", u"Vähäkylä",
u"Vähämäki", u"Vähänen", u"Väisänen", u"Välimaa", u"Väänänen", u"Wahalahti",
u"Wikman", u"Yli-Hukka", u"Ylimäinen", u"Ylinen", u"Ylönen", u"Yrttikoski",
u"Äijänen", u"Ärmänen"]
def random_first_name():
return choice(first_names)
def random_last_name():
return choice(last_names)
class MockLoggingHandler(logging.Handler):
"""Mock logging handler to check for expected logs as per:
<http://stackoverflow.com/questions/899067/how-should-i-verify-a-log-message-when-testing-python-code-under-nose/1049375#1049375>"""
def __init__(self, *args, **kwargs):
self.reset()
logging.Handler.__init__(self, *args, **kwargs)
def emit(self, record):
self.messages[record.levelname.lower()].append(record.getMessage())
def reset(self):
self.messages = {
'debug': [],
'info': [],
'warning': [],
'error': [],
'critical': [],
}
| [
"atte.hinkka@iki.fi"
] | atte.hinkka@iki.fi |
a11fe76b80dd8631a4026939bf97fec859db6074 | 6f5abc51e09a7913d1895f48dfe23738d8378d89 | /neural networks - Copy/visualiser.py | d37d50309a4665783e83e8389747d2f9f7e008ef | [] | no_license | SiamakMarandi/water_project | ea27326bdab0485cacc9ce2c042cc727463a5c72 | 43bae48e9c49d386ea28f2ff278ff251066f09bc | refs/heads/main | 2023-02-11T07:32:51.296607 | 2021-01-13T14:08:32 | 2021-01-13T14:08:32 | 320,759,586 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,150 | py | import pandas as pd
import numpy as np
from sklearn.semi_supervised import LabelPropagation
from sklearn.preprocessing import OneHotEncoder
import sklearn
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder, StandardScaler
from sklearn import preprocessing, metrics
from sklearn.metrics import mean_squared_error, r2_score, explained_variance_score, max_error
import matplotlib.pyplot as plt
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import cross_val_predict
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import f1_score
from sklearn.metrics import plot_confusion_matrix
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
from gaussrank import *
import warnings
import seaborn as sns
sns.set()
warnings.filterwarnings('ignore')
def label_write(plt, x_axis, y_axis):
for x,y in zip(x_axis, y_axis):
label = "{:.2f}".format(y)
plt.annotate(label, # this is the text
(x,y), # this is the point to label
textcoords="offset points", # how to position the text
xytext=(0,10), # distance from text to points (x,y)
ha='center') # horizontal alignment can be left, right or center
def plotter(model, x_train, y_train, x_true, y_true):
y_pred = model.predict(x_true)
x_var = np.arange(0, len(y_true))
plt.scatter(x_var, y_true, color='black', label="original")
plt.plot(x_var, y_pred, color='blue', linewidth=3, label="predicted")
plt.xticks(())
plt.yticks(())
plt.xlabel('X-axis')
plt.ylabel('Y-axis')
plt.legend(loc='best',fancybox=True, shadow=True)
plt.show()
def computation_range_plotter_mae(df, msg):
result = df.loc[df.groupby("Computation Range")["Mean Absolout Error"].idxmin()]
computation_range = result ['Computation Range'].tolist()
what_hour = result ['What Hour'].tolist()
mean_absolout_error = result ['Mean Absolout Error'].tolist()
plt.plot(computation_range, what_hour, label = 'What Hour', marker='o', linewidth=2)
label_write(plt, computation_range, what_hour)
plt.plot(computation_range, mean_absolout_error, label = 'Mean Absolout Error', marker='o', linewidth=2)
label_write(plt, computation_range, mean_absolout_error)
plt.xlabel('Computation Range')
plt.legend()
plt.xticks(computation_range)
# plt.yticks(np.arange(0, len(what_hour) + 1, 1))
plt.title(msg)
plt.show()
def computation_range_plotter_r2(df, msg):
result = df.loc[df.groupby("Computation Range")["r2_score"].idxmin()]
computation_range = result ['Computation Range'].tolist()
what_hour = result ['What Hour'].tolist()
r2_score = result ['r2_score'].tolist()
plt.plot(computation_range, what_hour, label = 'What Hour', marker='o', linewidth=2)
label_write(plt, computation_range, what_hour)
plt.plot(computation_range, r2_score, label = 'r2_score', marker='o', linewidth=2)
label_write(plt, computation_range, r2_score)
plt.xlabel('Computation Range')
plt.legend()
plt.xticks(computation_range)
# plt.yticks(np.arange(0, len(what_hour) + 1, 1))
plt.title(msg)
plt.show()
# gb_plotter(gk, 'What Hour', 'Mean Absolout Error', "Chart of sum of miminum MAE of all device ID")
def gb_plotter(pk, x_lable, y_label, title):
gk = pk.groupby([f'{x_lable}'], axis=0).sum()
gk.groupby([f'{x_lable}'], axis=0).sum().plot(kind="line", linewidth='2',
label='MAE',marker="o",
markerfacecolor="red", markersize=10)
# label_write(plt, np.arange(1, 5, 1), gk)
# plt.xticks(())
# plt.yticks(())
# df1({'count' : pk.groupby([f'{x_lable}'], axis=0).sum()}).reset_index()
df1 = pk.groupby([f'{x_lable}'], axis=0).sum().reset_index()
# print("df1 : ", df1)
label_write(plt, df1[f'{x_lable}'] , df1[f'{y_label}'])
plt.xlabel(x_lable)
plt.ylabel(y_label)
plt.title(title)
plt.legend(loc='best',fancybox=True, shadow=True)
plt.show()
| [
"Siamak"
] | Siamak |
647779096fab20072f20915b024115a8adda90e7 | c53412d10a4c14e5a5dc1ca2081fc973ba32ee35 | /week2/Какое число больше.py | a9b0d430e649a0c1c7662c8a6cf2a468b5ca9193 | [] | no_license | mxmaria/coursera_python_course | 81f85d93b6a2d8379b87bc9c7c6e7113dc246f87 | 18c6c7c4eb21d1103dada64fbb7e4add3bbc72b6 | refs/heads/master | 2021-01-05T09:17:21.609173 | 2020-02-16T21:38:22 | 2020-02-16T21:38:22 | 240,967,667 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 405 | py | # Даны два целых числа.
# Программа должна вывести число "1", если первое число больше второго, число "2", если второе больше первого или число "0", если они равны.
x = int(input())
y = int(input())
if x > y:
print(1)
elif x < y:
print(2)
elif x == y:
print(0)
| [
"masha.pecherizyna@yandex.ru"
] | masha.pecherizyna@yandex.ru |
6c7033ce7711e3008a5e3037ae5fccdfc7943568 | c66b39c8974081872f6fe38e1199eaa9235f06e6 | /code/standardmnist.py | 244f56f0b55599a0cf05bc6b319d21226d116a5d | [
"MIT"
] | permissive | travers-rhodes/ricnn | e01e041498ad79c7ba5202c169cbe48612cf26ef | c5a7a15bff78b8f1495223b2bd70c4a1326e469e | refs/heads/master | 2021-05-06T22:47:54.679822 | 2017-12-07T04:21:40 | 2017-12-07T04:21:40 | 112,868,793 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,360 | py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A deep MNIST classifier using convolutional layers.
See extensive documentation at
https://www.tensorflow.org/get_started/mnist/pros
"""
# Disable linter warnings to maintain consistency with tutorial.
# pylint: disable=invalid-name
# pylint: disable=g-bad-import-order
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import tempfile
import cv2
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data
from tensorflow.contrib.learn.python.learn.datasets.mnist import DataSet
from tensorflow.python.framework import dtypes
import tensorflow as tf
FLAGS = None
def deepnn(x):
"""deepnn builds the graph for a deep net for classifying digits.
Args:
x: an input tensor with the dimensions (N_examples, 784), where 784 is the
number of pixels in a standard MNIST image.
Returns:
A tuple (y, keep_prob). y is a tensor of shape (N_examples, 10), with values
equal to the logits of classifying the digit into one of 10 classes (the
digits 0-9). keep_prob is a scalar placeholder for the probability of
dropout.
"""
# Reshape to use within a convolutional neural net.
# Last dimension is for "features" - there is only one here, since images are
# grayscale -- it would be 3 for an RGB image, 4 for RGBA, etc.
with tf.name_scope('reshape'):
x_image = tf.reshape(x, [-1, 28, 28, 1])
# First convolutional layer - maps one grayscale image to 32 feature maps.
with tf.name_scope('conv1'):
W_conv1 = weight_variable([5, 5, 1, 32])
b_conv1 = bias_variable([32])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
# Pooling layer - downsamples by 2X.
with tf.name_scope('pool1'):
h_pool1 = max_pool_2x2(h_conv1)
# Second convolutional layer -- maps 32 feature maps to 64.
with tf.name_scope('conv2'):
W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
# Second pooling layer.
with tf.name_scope('pool2'):
h_pool2 = max_pool_2x2(h_conv2)
# Fully connected layer 1 -- after 2 round of downsampling, our 28x28 image
# is down to 7x7x64 feature maps -- maps this to 1024 features.
with tf.name_scope('fc1'):
W_fc1 = weight_variable([7 * 7 * 64, 1024])
b_fc1 = bias_variable([1024])
h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
# Dropout - controls the complexity of the model, prevents co-adaptation of
# features.
with tf.name_scope('dropout'):
keep_prob = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
# Map the 1024 features to 10 classes, one for each digit
with tf.name_scope('fc2'):
W_fc2 = weight_variable([1024, 10])
b_fc2 = bias_variable([10])
y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
return y_conv, keep_prob
def conv2d(x, W):
"""conv2d returns a 2d convolution layer with full stride."""
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
"""max_pool_2x2 downsamples a feature map by 2X."""
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
def weight_variable(shape):
"""weight_variable generates a weight variable of a given shape."""
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
"""bias_variable generates a bias variable of a given shape."""
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def main(_):
# Import data
mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)
image_size = 28
cols=image_size
rows=image_size
rawimgs = np.reshape(mnist.test.images,(-1,image_size,image_size))
rottest = np.zeros(rawimgs.shape)
for ind, rawimg in enumerate(rawimgs):
ang = np.random.randint(1, 360)
M = cv2.getRotationMatrix2D((cols/2 - 0.5,rows/2 - 0.5),ang,1)
rottest[ind] = cv2.warpAffine(rawimg,M,(rows,cols))
rottest = np.reshape(rottest, (-1,image_size**2))
rawtrain = np.reshape(mnist.train.images,(-1,image_size,image_size))
rottrain = np.zeros(rawtrain.shape)
for ind, rawimg in enumerate(rawtrain):
ang = np.random.randint(1, 360)
M = cv2.getRotationMatrix2D((cols/2 - 0.5,rows/2 - 0.5),ang,1)
rottrain[ind] = cv2.warpAffine(rawimg,M,(rows,cols))
rottrain = np.reshape(rottrain, (-1,image_size**2,1,1))
dtype = dtypes.float32
reshape=True
seed=None
options = dict(dtype=dtype, reshape=reshape, seed=seed)
trainMnist = DataSet(rottrain, mnist.train.labels, **options);
# Create the model
x = tf.placeholder(tf.float32, [None, 784])
# Define loss and optimizer
y_ = tf.placeholder(tf.float32, [None, 10])
# Build the graph for the deep net
y_conv, keep_prob = deepnn(x)
with tf.name_scope('loss'):
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=y_,
logits=y_conv)
cross_entropy = tf.reduce_mean(cross_entropy)
with tf.name_scope('adam_optimizer'):
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
with tf.name_scope('accuracy'):
correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
correct_prediction = tf.cast(correct_prediction, tf.float32)
accuracy = tf.reduce_mean(correct_prediction)
graph_location = tempfile.mkdtemp()
print('Saving graph to: %s' % graph_location)
train_writer = tf.summary.FileWriter(graph_location)
train_writer.add_graph(tf.get_default_graph())
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(20000):
batch = trainMnist.next_batch(50)
if i % 100 == 0:
train_accuracy = accuracy.eval(feed_dict={
x: batch[0], y_: batch[1], keep_prob: 1.0})
print('step %d, training accuracy %g' % (i, train_accuracy))
train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})
print('test accuracy %g' % accuracy.eval(feed_dict={
x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0}))
print('rotated test accuracy %g' % accuracy.eval(feed_dict={
x: rottest, y_: mnist.test.labels, keep_prob: 1.0}))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str,
default='/tmp/tensorflow/mnist/input_data',
help='Directory for storing input data')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| [
"traversr@andrew.cmu.edu"
] | traversr@andrew.cmu.edu |
3f163594b3b9b11894e0d6ab61bdba16ad9b3b76 | 22f7c253deabd23e36a854be17f0996ae7949507 | /categories/serializers.py | e44f026ff5ada458e81b34ed284aa0273fcbd123 | [] | no_license | den503/categories-api | e739c7bc73ea4bde77ae5a83c1fba8364f57ca5a | dd6c1a7a0dcba79c020525ae89ff83b60a3e36da | refs/heads/master | 2021-09-25T18:22:03.278965 | 2020-06-08T06:31:38 | 2020-06-08T06:31:38 | 251,291,176 | 0 | 0 | null | 2021-09-22T18:48:51 | 2020-03-30T11:58:32 | Python | UTF-8 | Python | false | false | 2,043 | py | from rest_framework import serializers
from .models import Category
class CategorySerializerGet(serializers.Serializer):
id = serializers.PrimaryKeyRelatedField(read_only=True)
name = serializers.CharField(max_length=255)
parents = serializers.SerializerMethodField('get_parents')
children = serializers.SerializerMethodField('get_children')
siblings = serializers.SerializerMethodField('get_siblings')
@staticmethod
def get_parents(category):
is_have_parent = False if not category.parent else True
parent_list = []
parent = {}
if is_have_parent:
parent = category.parent
while is_have_parent:
parent_object = {
'id': parent.id,
'name': parent.name
}
parent_list.append(parent_object)
if parent.parent:
parent = parent.parent
else:
is_have_parent = False
return parent_list
@staticmethod
def get_children(category):
children_list = []
for child in Category.objects.filter(parent=category):
child_object = {
'id': child.id,
'name': child.name
}
children_list.append(child_object)
return children_list
@staticmethod
def get_siblings(category):
siblings_list = []
parent = None if not category.parent else category.parent
if parent:
for child in Category.objects.filter(parent=parent):
if child != category:
child_object = {
'id': child.id,
'name': child.name
}
siblings_list.append(child_object)
return siblings_list
class CategorySerializerPost(serializers.ModelSerializer):
class Meta:
model = Category
fields = ['name', 'parent']
def create(self, validated_data):
return Category.objects.create(**validated_data)
| [
"zaychuk.dd@gmail.com"
] | zaychuk.dd@gmail.com |
588cb0f08c24dabc182e357f3b5efba012b7b98e | bcf42041a64fdefcaec54843900c3d8f833f2215 | /emc/kb/browser/dataout.py | cf3d77f951b9bb4e11b4e53d61cd34ea9ffe24d5 | [] | no_license | adam139/emc.kb | 487650837207e0f773c077310f001a524965ee4f | ff21383762dad96ac09d414e7d1e8104c51b91f9 | refs/heads/master | 2022-01-14T09:42:49.790659 | 2020-09-22T13:16:27 | 2020-09-22T13:16:27 | 49,318,509 | 0 | 3 | null | 2016-12-26T17:37:20 | 2016-01-09T09:34:12 | Python | UTF-8 | Python | false | false | 7,602 | py | #-*- coding: UTF-8 -*-
import csv
from cStringIO import StringIO
from zope import event
from zope.component import getMultiAdapter
from five import grok
from zope.interface import implements
from zope.interface import Interface
from Products.Five.browser import BrowserView
from Products.CMFCore.utils import getToolByName
from Products.statusmessages.interfaces import IStatusMessage
import datetime
from plone import api
from emc.policy.events import AddloginEvent,NormalUserloginEvent
from emc.policy import get_ip,fmt,list2str,getfullname_orid
from emc.kb import _
# todo code cp932
# need byte string
data_VALUES = [
u"主体".encode('utf-8'),
u"客体".encode('utf-8'),
u"时间".encode('utf-8'),
u"ip".encode('utf-8'),
u"级别".encode('utf-8'),
u"描述".encode('utf-8'),
u"结果".encode('utf-8')
]
userlog_header = [
u"用户".encode('utf-8'),
u"时间".encode('utf-8'),
u"ip".encode('utf-8'),
u"级别".encode('utf-8'),
u"描述".encode('utf-8'),
u"结果".encode('utf-8')
]
class AdminLogDataOut (grok.View):
"""AdminLog Data export as CSV files.
"""
grok.context(Interface)
grok.name('export_csv')
grok.require('zope2.View')
def searchview(self,viewname="admin_logs"):
searchview = getMultiAdapter((self.context, self.request),name=viewname)
return searchview
def render(self):
method = self.request.get('REQUEST_METHOD', 'GET')
# import pdb
# pdb.set_trace()
if (method != 'POST'):
return self.request.response.redirect(self.context.absolute_url())
if self.request.form.get('form.button.Cancel'):
return self.request.response.redirect(self.context.absolute_url())
searchview = self.searchview()
# datadic receive front ajax post data
datadic = self.request.form
start = int(datadic['start']) # batch search start position
size = int(datadic['size']) # batch search size
sortcolumn = datadic['sortcolumn']
sortdirection = datadic['sortdirection']
keyword = (datadic['searchabletext']).strip()
# origquery = searchview.getPathQuery()
origquery = {}
# default reverse,as is desc
origquery['sort_on'] = sortcolumn
# sql db sortt_order:asc,desc
origquery['sort_order'] = sortdirection
#模糊搜索
if keyword != "":
origquery['SearchableText'] = '%'+keyword+'%'
else:
origquery['SearchableText'] = ""
#origquery provide batch search
origquery['size'] = size
origquery['start'] = start
#totalquery search all
totalquery = origquery.copy()
totalquery['size'] = 0
# search all size = 0 return numbers of recorders
totalnum = searchview.search_multicondition(totalquery)
origquery.update({"size":totalnum})
resultDicLists = searchview.search_multicondition(origquery)
del origquery
del totalquery
if totalnum == 0: return
#fire a log event
user = api.user.get_current()
ip = get_ip(self.request)
if user is None:
return
des = "从用户日志表导出了%s条日志" % totalnum
loginEvent = NormalUserloginEvent(userid = getfullname_orid(user),
datetime = datetime.datetime.now().strftime(fmt),
ip = ip,
type = 0,
description = des,
result = 1)
if loginEvent.available():
if loginEvent.is_normal_user():
event.notify(loginEvent)
else:
des = "从管理员日志表导出了%s条日志" % totalnum
loginEvent = AddloginEvent(adminid = getfullname_orid(user),
userid = "",
datetime = datetime.datetime.now().strftime(fmt),
ip = ip,
type = 0,
description = des,
result = 1)
event.notify(loginEvent)
return self.exportData(resultDicLists)
def exportData(self,recorders):
"""Export Data within CSV file."""
datafile = self._createCSV(self._getDataInfos(recorders))
return self._createRequest(datafile.getvalue(), "admin_log_export.log")
def _getDataInfos(self,recorders):
"""Generator filled with the recorders."""
from emc.kb.utils import kind
from emc.kb.utils import level as log_level
from emc.kb.utils import result as log_result
for i in recorders:
i = list(i)
i[4] = kind[i[4]]
i[5] = log_level[i[5]]
i[7] = log_result[i[7]]
yield i
def _createCSV(self, lines):
"""Write header and lines within the CSV file."""
datafile = StringIO()
datafile.write(u'\ufeff'.encode('utf-8'))
writor = csv.writer(datafile)
writor.writerow(data_VALUES)
map(writor.writerow, lines)
return datafile
def _createRequest(self, data, filename):
"""Create the request to be returned.
Add the right header and the CSV file.
"""
self.request.response.addHeader('Content-Disposition', "attachment; filename=%s" % filename)
self.request.response.addHeader('Content-Type', "text/csv;charset=utf-8")
self.request.response.addHeader("Content-Transfer-Encoding", "8bit")
self.request.response.addHeader('Content-Length', "%d" % len(data))
self.request.response.addHeader('Pragma', "no-cache")
self.request.response.addHeader('Cache-Control', "must-revalidate, post-check=0, pre-check=0, public")
self.request.response.addHeader('Expires', "0")
return data
class UserLogDataOut (AdminLogDataOut):
"""UserLog Data export as CSV files.
"""
# grok.context(Interface)
grok.name('userlog_export_csv')
# grok.require('zope2.View')
def searchview(self,viewname="user_logs"):
searchview = getMultiAdapter((self.context, self.request),name=viewname)
return searchview
def _createCSV(self, lines):
"""Write header and lines within the CSV file."""
datafile = StringIO()
writor = csv.writer(datafile)
writor.writerow(userlog_header)
map(writor.writerow, lines)
return datafile
def exportData(self,recorders):
"""Export Data within CSV file."""
datafile = self._createCSV(self._getDataInfos(recorders))
return self._createRequest(datafile.getvalue(), "user_log_export.log")
def _getDataInfos(self,recorders):
"""Generator filled with the recorders."""
from emc.kb.utils import kind
from emc.kb.utils import level as log_level
from emc.kb.utils import result as log_result
for i in recorders:
i = list(i)
i[3] = kind[i[3]]
i[4] = log_level[i[4]]
i[6] = log_result[i[6]]
yield i | [
"yuejun.tang@gmail.com"
] | yuejun.tang@gmail.com |
984b0b09cf6085cacdc48e34443a94b00d6542f2 | 6c4e274f9c10ca2579329b236ff1cca27c87d673 | /cpsc4770/02-palmetto/.ipynb_checkpoints/gethostname-checkpoint.py | 113a918dfc205486691abc04d782c5b426a2395f | [] | no_license | xueyun-ys/gpgpu | ebcfbb80645a58391607d38532ac518f26b8ae7c | 32672e791cecb253e53a336eae6ce82aecc9a1e7 | refs/heads/master | 2023-06-11T12:14:36.774123 | 2021-07-01T16:46:17 | 2021-07-01T16:46:17 | 382,035,314 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 90 | py | #gethostname.py
import socket
print ("hello world from host %s" % (socket.gethostname()))
| [
"xueyun.ys@gmail.com"
] | xueyun.ys@gmail.com |
7d8d2a7d613ecbd9087ac3588eca08034858f9f9 | 1ee90596d52554cb4ef51883c79093897f5279a0 | /Sisteme/[C++]System Pet OFFICIAL/uipetsystem.py | 4772d43117166548d16ed66d4b2f02322ab6c6fd | [] | no_license | Reizonr1/metin2-adv | bf7ecb26352b13641cd69b982a48a6b20061979a | 5c2c096015ef3971a2f1121b54e33358d973c694 | refs/heads/master | 2022-04-05T20:50:38.176241 | 2020-03-03T18:20:58 | 2020-03-03T18:20:58 | 233,462,795 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 25,267 | py | import os
import ui
import player
import mouseModule
import net
import app
import snd
import item
import player
import chat
import grp
import uiScriptLocale
import localeInfo
import constInfo
import ime
import wndMgr
import petskill
import uipetfeed
import uiToolTip
import uipetsystem
import interfaceModule
AFFECT_DICT = {
item.APPLY_MAX_HP : localeInfo.TOOLTIP_MAX_HP,
item.APPLY_MAX_SP : localeInfo.TOOLTIP_MAX_SP,
item.APPLY_CON : localeInfo.TOOLTIP_CON,
item.APPLY_INT : localeInfo.TOOLTIP_INT,
item.APPLY_STR : localeInfo.TOOLTIP_STR,
item.APPLY_DEX : localeInfo.TOOLTIP_DEX,
item.APPLY_ATT_SPEED : localeInfo.TOOLTIP_ATT_SPEED,
item.APPLY_MOV_SPEED : localeInfo.TOOLTIP_MOV_SPEED,
item.APPLY_CAST_SPEED : localeInfo.TOOLTIP_CAST_SPEED,
item.APPLY_HP_REGEN : localeInfo.TOOLTIP_HP_REGEN,
item.APPLY_SP_REGEN : localeInfo.TOOLTIP_SP_REGEN,
item.APPLY_POISON_PCT : localeInfo.TOOLTIP_APPLY_POISON_PCT,
item.APPLY_STUN_PCT : localeInfo.TOOLTIP_APPLY_STUN_PCT,
item.APPLY_SLOW_PCT : localeInfo.TOOLTIP_APPLY_SLOW_PCT,
item.APPLY_CRITICAL_PCT : localeInfo.TOOLTIP_APPLY_CRITICAL_PCT,
item.APPLY_PENETRATE_PCT : localeInfo.TOOLTIP_APPLY_PENETRATE_PCT,
item.APPLY_ATTBONUS_WARRIOR : localeInfo.TOOLTIP_APPLY_ATTBONUS_WARRIOR,
item.APPLY_ATTBONUS_ASSASSIN : localeInfo.TOOLTIP_APPLY_ATTBONUS_ASSASSIN,
item.APPLY_ATTBONUS_SURA : localeInfo.TOOLTIP_APPLY_ATTBONUS_SURA,
item.APPLY_ATTBONUS_SHAMAN : localeInfo.TOOLTIP_APPLY_ATTBONUS_SHAMAN,
item.APPLY_ATTBONUS_MONSTER : localeInfo.TOOLTIP_APPLY_ATTBONUS_MONSTER,
item.APPLY_ATTBONUS_HUMAN : localeInfo.TOOLTIP_APPLY_ATTBONUS_HUMAN,
item.APPLY_ATTBONUS_ANIMAL : localeInfo.TOOLTIP_APPLY_ATTBONUS_ANIMAL,
item.APPLY_ATTBONUS_ORC : localeInfo.TOOLTIP_APPLY_ATTBONUS_ORC,
item.APPLY_ATTBONUS_MILGYO : localeInfo.TOOLTIP_APPLY_ATTBONUS_MILGYO,
item.APPLY_ATTBONUS_UNDEAD : localeInfo.TOOLTIP_APPLY_ATTBONUS_UNDEAD,
item.APPLY_ATTBONUS_DEVIL : localeInfo.TOOLTIP_APPLY_ATTBONUS_DEVIL,
item.APPLY_STEAL_HP : localeInfo.TOOLTIP_APPLY_STEAL_HP,
item.APPLY_STEAL_SP : localeInfo.TOOLTIP_APPLY_STEAL_SP,
item.APPLY_MANA_BURN_PCT : localeInfo.TOOLTIP_APPLY_MANA_BURN_PCT,
item.APPLY_DAMAGE_SP_RECOVER : localeInfo.TOOLTIP_APPLY_DAMAGE_SP_RECOVER,
item.APPLY_BLOCK : localeInfo.TOOLTIP_APPLY_BLOCK,
item.APPLY_DODGE : localeInfo.TOOLTIP_APPLY_DODGE,
item.APPLY_RESIST_SWORD : localeInfo.TOOLTIP_APPLY_RESIST_SWORD,
item.APPLY_RESIST_TWOHAND : localeInfo.TOOLTIP_APPLY_RESIST_TWOHAND,
item.APPLY_RESIST_DAGGER : localeInfo.TOOLTIP_APPLY_RESIST_DAGGER,
item.APPLY_RESIST_BELL : localeInfo.TOOLTIP_APPLY_RESIST_BELL,
item.APPLY_RESIST_FAN : localeInfo.TOOLTIP_APPLY_RESIST_FAN,
item.APPLY_RESIST_BOW : localeInfo.TOOLTIP_RESIST_BOW,
item.APPLY_RESIST_FIRE : localeInfo.TOOLTIP_RESIST_FIRE,
item.APPLY_RESIST_ELEC : localeInfo.TOOLTIP_RESIST_ELEC,
item.APPLY_RESIST_MAGIC : localeInfo.TOOLTIP_RESIST_MAGIC,
item.APPLY_RESIST_WIND : localeInfo.TOOLTIP_APPLY_RESIST_WIND,
item.APPLY_REFLECT_MELEE : localeInfo.TOOLTIP_APPLY_REFLECT_MELEE,
item.APPLY_REFLECT_CURSE : localeInfo.TOOLTIP_APPLY_REFLECT_CURSE,
item.APPLY_POISON_REDUCE : localeInfo.TOOLTIP_APPLY_POISON_REDUCE,
item.APPLY_KILL_SP_RECOVER : localeInfo.TOOLTIP_APPLY_KILL_SP_RECOVER,
item.APPLY_EXP_DOUBLE_BONUS : localeInfo.TOOLTIP_APPLY_EXP_DOUBLE_BONUS,
item.APPLY_GOLD_DOUBLE_BONUS : localeInfo.TOOLTIP_APPLY_GOLD_DOUBLE_BONUS,
item.APPLY_ITEM_DROP_BONUS : localeInfo.TOOLTIP_APPLY_ITEM_DROP_BONUS,
item.APPLY_POTION_BONUS : localeInfo.TOOLTIP_APPLY_POTION_BONUS,
item.APPLY_KILL_HP_RECOVER : localeInfo.TOOLTIP_APPLY_KILL_HP_RECOVER,
item.APPLY_IMMUNE_STUN : localeInfo.TOOLTIP_APPLY_IMMUNE_STUN,
item.APPLY_IMMUNE_SLOW : localeInfo.TOOLTIP_APPLY_IMMUNE_SLOW,
item.APPLY_IMMUNE_FALL : localeInfo.TOOLTIP_APPLY_IMMUNE_FALL,
item.APPLY_BOW_DISTANCE : localeInfo.TOOLTIP_BOW_DISTANCE,
item.APPLY_DEF_GRADE_BONUS : localeInfo.TOOLTIP_DEF_GRADE,
item.APPLY_ATT_GRADE_BONUS : localeInfo.TOOLTIP_ATT_GRADE,
item.APPLY_MAGIC_ATT_GRADE : localeInfo.TOOLTIP_MAGIC_ATT_GRADE,
item.APPLY_MAGIC_DEF_GRADE : localeInfo.TOOLTIP_MAGIC_DEF_GRADE,
item.APPLY_MAX_STAMINA : localeInfo.TOOLTIP_MAX_STAMINA,
item.APPLY_MALL_ATTBONUS : localeInfo.TOOLTIP_MALL_ATTBONUS,
item.APPLY_MALL_DEFBONUS : localeInfo.TOOLTIP_MALL_DEFBONUS,
item.APPLY_MALL_EXPBONUS : localeInfo.TOOLTIP_MALL_EXPBONUS,
item.APPLY_MALL_ITEMBONUS : localeInfo.TOOLTIP_MALL_ITEMBONUS,
item.APPLY_MALL_GOLDBONUS : localeInfo.TOOLTIP_MALL_GOLDBONUS,
item.APPLY_SKILL_DAMAGE_BONUS : localeInfo.TOOLTIP_SKILL_DAMAGE_BONUS,
item.APPLY_NORMAL_HIT_DAMAGE_BONUS : localeInfo.TOOLTIP_NORMAL_HIT_DAMAGE_BONUS,
item.APPLY_SKILL_DEFEND_BONUS : localeInfo.TOOLTIP_SKILL_DEFEND_BONUS,
item.APPLY_NORMAL_HIT_DEFEND_BONUS : localeInfo.TOOLTIP_NORMAL_HIT_DEFEND_BONUS,
item.APPLY_PC_BANG_EXP_BONUS : localeInfo.TOOLTIP_MALL_EXPBONUS_P_STATIC,
item.APPLY_PC_BANG_DROP_BONUS : localeInfo.TOOLTIP_MALL_ITEMBONUS_P_STATIC,
item.APPLY_RESIST_WARRIOR : localeInfo.TOOLTIP_APPLY_RESIST_WARRIOR,
item.APPLY_RESIST_ASSASSIN : localeInfo.TOOLTIP_APPLY_RESIST_ASSASSIN,
item.APPLY_RESIST_SURA : localeInfo.TOOLTIP_APPLY_RESIST_SURA,
item.APPLY_RESIST_SHAMAN : localeInfo.TOOLTIP_APPLY_RESIST_SHAMAN,
item.APPLY_MAX_HP_PCT : localeInfo.TOOLTIP_APPLY_MAX_HP_PCT,
item.APPLY_MAX_SP_PCT : localeInfo.TOOLTIP_APPLY_MAX_SP_PCT,
item.APPLY_ENERGY : localeInfo.TOOLTIP_ENERGY,
item.APPLY_COSTUME_ATTR_BONUS : localeInfo.TOOLTIP_COSTUME_ATTR_BONUS,
item.APPLY_MAGIC_ATTBONUS_PER : localeInfo.TOOLTIP_MAGIC_ATTBONUS_PER,
item.APPLY_MELEE_MAGIC_ATTBONUS_PER : localeInfo.TOOLTIP_MELEE_MAGIC_ATTBONUS_PER,
item.APPLY_RESIST_ICE : localeInfo.TOOLTIP_RESIST_ICE,
item.APPLY_RESIST_EARTH : localeInfo.TOOLTIP_RESIST_EARTH,
item.APPLY_RESIST_DARK : localeInfo.TOOLTIP_RESIST_DARK,
item.APPLY_ANTI_CRITICAL_PCT : localeInfo.TOOLTIP_ANTI_CRITICAL_PCT,
item.APPLY_ANTI_PENETRATE_PCT : localeInfo.TOOLTIP_ANTI_PENETRATE_PCT,
}
def checkdiv(n):
x = str(n/10.0)
if len(x) > 3:
return str(x)[0:3]
return str(x)
def pointop(n):
t = int(n)
if t / 10 < 1:
return "0."+n
else:
return n[0:len(n)-1]+"."+n[len(n)-1:]
def GetAffectString(affectType, affectValue):
if 0 == affectType:
return None
if 0 == affectValue:
return None
try:
return AFFECT_DICT[affectType](affectValue)
except TypeError:
return "UNKNOWN_VALUE[%s] %s" % (affectType, affectValue)
except KeyError:
return "UNKNOWN_TYPE[%s] %s" % (affectType, affectValue)
class PetSystemMain(ui.ScriptWindow):
class TextToolTip(ui.Window):
def __init__(self, y):
ui.Window.__init__(self, "TOP_MOST")
textLine = ui.TextLine()
textLine.SetParent(self)
textLine.SetHorizontalAlignLeft()
textLine.SetOutline()
textLine.Show()
self.y = y
self.textLine = textLine
def __del__(self):
ui.Window.__del__(self)
def SetText(self, text):
self.textLine.SetText(text)
def OnRender(self):
(mouseX, mouseY) = wndMgr.GetMousePosition()
self.textLine.SetPosition(mouseX, mouseY - 60 + self.y)
def __init__(self, vnum = 0):
ui.ScriptWindow.__init__(self)
self.vnum = vnum
self.__LoadWindow()
def __del__(self):
ui.ScriptWindow.__del__(self)
def Show(self):
ui.ScriptWindow.Show(self)
def Close(self):
self.Hide()
constInfo.PET_MAIN = 0
self.feedwind.Close()
def __LoadWindow(self):
try:
pyScrLoader = ui.PythonScriptLoader()
pyScrLoader.LoadScriptFile(self, "uiscript/PetInformationWindow.py")
except:
import exception
exception.Abort("PetInformationWindow.LoadWindow.LoadObject")
try:
self.feedwind = uipetfeed.PetFeedWindow()
self.board = self.GetChild("board")
self.boardclose = self.GetChild("CloseButton")
self.slotimgpet = self.GetChild("UpBringing_Pet_Slot")
self.evolname = self.GetChild("EvolName")
self.petname = self.GetChild("PetName")
self.expwind = self.GetChild("UpBringing_Pet_EXP_Gauge_Board")
self.tooltipexp = []
for i in range(0,4):
self.tooltipexp.append(self.TextToolTip(15*i))
self.tooltipexp[i].Hide()
self.petlifeg = self.GetChild("LifeGauge")
self.petlevel = self.GetChild("LevelValue")
self.petexpa = self.GetChild("UpBringing_Pet_EXPGauge_01")
self.petexpb = self.GetChild("UpBringing_Pet_EXPGauge_02")
self.petexpc = self.GetChild("UpBringing_Pet_EXPGauge_03")
self.petexpd = self.GetChild("UpBringing_Pet_EXPGauge_04")
self.petexpe = self.GetChild("UpBringing_Pet_EXPGauge_05")
self.petexppages = []
self.petexppages.append(self.petexpa)
self.petexppages.append(self.petexpb)
self.petexppages.append(self.petexpc)
self.petexppages.append(self.petexpd)
self.petexppages.append(self.petexpe)
for exp in self.petexppages:
exp.SetSize(0, 0)
#exp.Hide()
self.petages = self.GetChild("AgeValue")
self.petdur = self.GetChild("LifeTextValue")
#gaugehp
self.nutribtn = self.GetChild("FeedLifeTimeButton")
self.sviluppobtn = self.GetChild("FeedEvolButton")
self.itemexp = self.GetChild("FeedExpButton")
self.pethp = self.GetChild("HpValue")
self.petdef = self.GetChild("DefValue")
self.petsp = self.GetChild("SpValue")
self.petskill0 = self.GetChild("PetSkillSlot0")
#self.petskill0.SetPetSkillSlot(0, 2, 10)
#self.petskill0.SetPetSkillSlot(1, 11, 10)
#self.petskill0.SetPetSkillSlot(2, 5, 10)
self.petskill0.SetSlot(0, 2, 32, 32, petskill.GetEmptySkill())
self.petskill0.SetSlot(1, 2, 32, 32, petskill.GetEmptySkill())
self.petskill0.SetSlot(2, 2, 32, 32, petskill.GetEmptySkill())
#self.petskill0.SetCoverButton(0)
#self.petskill0.SetCoverButton(1)
#self.petskill0.SetCoverButton(2)
#self.petskill0.SetAlwaysRenderCoverButton(0, TRUE)
#self.petskill0.SetAlwaysRenderCoverButton(1, TRUE)
#self.petskill0.SetAlwaysRenderCoverButton(2, TRUE)
self.petskill0.SetSelectItemSlotEvent(ui.__mem_func__(self.UseSkill))
self.petskill0.SetUseSlotEvent(ui.__mem_func__(self.UseSkill))
self.petskill0.SetOverInItemEvent(ui.__mem_func__(self.PetSkillTooltipShow))
self.petskill0.SetOverOutItemEvent(ui.__mem_func__(self.PetSkillTooltipHide))
self.SetDefaultInfo()
self.arrytooltip = [ [-1,-1], [-1,-1], [-1,-1]]
PET_FILE_NAME = "%s/pet_skill.txt" % app.GetLocalePath()
PET_FILE_SKILL = "%s/pet_skill_bonus.txt" % app.GetLocalePath()
self.linespet = pack_open(PET_FILE_NAME, "r").readlines()
self.linespetskill = pack_open(PET_FILE_SKILL, "r").readlines()
self.SkillTooltip = uiToolTip.ToolTip(180)
#Event
self.boardclose.SetEvent(ui.__mem_func__(self.Close,))
self.nutribtn.SetToggleDownEvent(lambda arg=0,arg1=1: self.OpenFeedBox(arg,arg1))
self.nutribtn.SetToggleUpEvent(lambda arg=1,arg1=0: self.OpenFeedBox(arg,arg1))
self.itemexp.SetToggleDownEvent(lambda arg=0,arg1=3: self.OpenFeedBox(arg,arg1))
self.itemexp.SetToggleUpEvent(lambda arg=1,arg1=0: self.OpenFeedBox(arg,arg1))
self.sviluppobtn.SetToggleDownEvent(lambda arg=0: self.evolution(arg))
self.sviluppobtn.SetToggleUpEvent(lambda arg=1: self.evolution(arg))
except:
import exception
exception.Abort("PetInformationWindow.LoadWindow.BindObject")
def PetSkillTooltipShow(self, slot):
if self.arrytooltip[slot][0] > 0:
tokens = self.linespet[self.arrytooltip[slot][0]-1][:-1].split("\t")
tokens2 = self.linespetskill[self.arrytooltip[slot][0]-1][:-1].split("\t")
self.SkillTooltip.ClearToolTip()
self.SkillTooltip.AutoAppendTextLine(tokens[1], grp.GenerateColor(0.9490, 0.9058, 0.7568, 1.0))
self.SkillTooltip.AppendDescription(tokens[4], 26)
self.SkillTooltip.AppendSpace(5)
if self.arrytooltip[slot][0] != 10 and self.arrytooltip[slot][0] != 17 and self.arrytooltip[slot][0] != 18:
self.SkillTooltip.AutoAppendTextLine(GetAffectString(int(tokens2[1]), int(tokens2[self.arrytooltip[slot][1]+1])))
elif self.arrytooltip[slot][0] == 10:
self.SkillTooltip.AutoAppendTextLine("Hp Restored:" + str(tokens2[self.arrytooltip[slot][1]+1]))
elif self.arrytooltip[slot][0] == 17:
self.SkillTooltip.AutoAppendTextLine("Immortality Time:" + checkdiv(int(tokens2[self.arrytooltip[slot][1]+1])) + "s")
self.SkillTooltip.AutoAppendTextLine("Cooldown: "+tokens[5]+"s", grp.GenerateColor(1.0, 0.7843, 0.0, 1.0))
self.SkillTooltip.AlignHorizonalCenter()
self.SkillTooltip.ShowToolTip()
def PetSkillTooltipHide(self):
self.SkillTooltip.HideToolTip()
def evolution(self, mode):
if mode == 0:
net.SendChatPacket("/petvoincrease")
self.sviluppobtn.Enable()
#self.SkillTooltip.HideToolTip()
def SetDefaultInfo(self):
self.evolname.SetText("")
self.petname.SetText("")
self.petlevel.SetText("")
self.petages.SetText("")
self.petdur.SetText("")
self.pethp.SetText("")
self.petdef.SetText("")
self.petsp.SetText("")
self.SetDuration("0", "0")
self.slotimgpet.ClearSlot(0)
self.petskill0.ClearSlot(0)
self.petskill0.ClearSlot(1)
self.petskill0.ClearSlot(2)
self.petskill0.SetSlot(0, 2, 32, 32, petskill.GetEmptySkill())
self.petskill0.SetSlot(1, 2, 32, 32, petskill.GetEmptySkill())
self.petskill0.SetSlot(2, 2, 32, 32, petskill.GetEmptySkill())
self.SetExperience(0,0,0)
self.arrytooltip = [ [-1,-1], [-1,-1], [-1,-1]]
self.nutribtn.Disable()
self.sviluppobtn.Disable()
self.itemexp.Disable()
def OpenFeedBox(self, mode, btn):
if constInfo.FEEDWIND == btn or constInfo.FEEDWIND == 0:
if mode == 0:
self.feedwind.Show()
constInfo.FEEDWIND = btn
else:
self.feedwind.Close()
constInfo.FEEDWIND = 0
else:
self.nutribtn.Enable()
self.sviluppobtn.Enable()
self.itemexp.Enable()
self.feedwind.Close()
constInfo.FEEDWIND = 0
def SetImageSlot(self, vnum):
self.slotimgpet.SetItemSlot(0, int(vnum), 0)
self.slotimgpet.SetAlwaysRenderCoverButton(0, TRUE)
def SetEvolveName(self, name):
self.evolname.SetText(name)
def SetName(self, name):
if name != "":
self.nutribtn.Enable()
self.sviluppobtn.Enable()
self.itemexp.Enable()
#pet.SetTop()
else:
self.nutribtn.Disable()
self.sviluppobtn.Disable()
self.itemexp.Disable()
self.petname.SetText(name)
def SetLevel(self, level):
if int(level) == 40 or int(level) == 60 or int(level) == 80:
constInfo.EVOLUTION = int(level)
else:
constInfo.EVOLUTION = 0
self.petlevel.SetText(level)
def SetAges(self, ages):
self.petages.SetText(ages)
def SetDuration(self, dur, durt):
dur1 = int(dur)/60
durt1 = int(durt)/60
tmpage = int((int(durt)/60 -int(dur) /60)/24)
if int(dur) > 0:
self.petlifeg.SetPercentage(int(dur)*1.6, int(durt))
self.petlifeg.Show()
else:
self.petlifeg.Hide()
self.petdur.SetText(str(dur1)+"/"+str(durt1)+" Hours")
self.SetAges(str(tmpage)+"Days")
def SetHp(self, hp):
self.pethp.SetText(pointop(hp)+"%")
def SetDef(self, deff):
self.petdef.SetText(pointop(deff)+"%")
def SetSp(self, sp):
self.petsp.SetText(pointop(sp)+"%")
def SetSkill(self, slot, idx, lv):
if int(idx) != -1:
self.petskill0.ClearSlot(int(slot))
self.petskill0.SetPetSkillSlot(int(slot), int(idx), int(lv))
self.petskill0.SetCoverButton(int(slot))
self.petskill0.SetAlwaysRenderCoverButton(int(slot), TRUE)
self.arrytooltip[int(slot)][0] = int(idx)
self.arrytooltip[int(slot)][1] = int(lv)
#chat.AppendChat(chat.CHAT_TYPE_INFO, "Slot:"+str(slot)+" idx: "+str(idx)+" Lv:"+str(lv))
def SetExperience(self, expm, expi, exptot):
expm = int(expm)
expi = int(expi)
exptot = int(exptot)
if exptot > 0:
totalexp = exptot
totexpm = int( float(totalexp) / 100 * 90 )
totexpi = totalexp - totexpm
expi = min(expi, totexpi)
expmp = float(expm) / totexpm * 100
expip = float(expi) / totexpi * 100
else:
totalexp = 0
totexpm = 0
totexpi = 0
expmp = 0
expip = 0
curPoint = int(min(expm, totexpm))
curPoint = int(max(expm, 0))
maxPoint = int(max(totexpm, 0))
curPointi = int(min(expi, totexpi))
curPointi = int(max(expi, 0))
maxPointi = int(max(totexpi, 0))
quarterPoint = maxPoint / 4
quarterPointi = maxPointi
FullCount = 0
FullCounti = 0
if 0 != quarterPoint:
FullCount = min(4, curPoint / quarterPoint)
if 0 != quarterPointi:
FullCounti = min(1, curPointi / quarterPointi)
for i in xrange(4):
self.petexppages[i].Hide()
self.petexppages[4].Hide()
for i in xrange(FullCount):
self.petexppages[i].SetRenderingRect(0.0, 0.0, 0.0, 0.0)
self.petexppages[i].Show()
for i in xrange(FullCounti):
self.petexppages[4].SetRenderingRect(0.0, 0.0, 0.0, 0.0)
self.petexppages[4].Show()
if 0 != quarterPoint:
if FullCount < 4:
Percentage = float(curPoint % quarterPoint) / quarterPoint - 1.0
self.petexppages[FullCount].SetRenderingRect(0.0, Percentage, 0.0, 0.0)
self.petexppages[FullCount].Show()
if 0 != quarterPointi:
if FullCounti < 1:
Percentage = float(curPointi % quarterPointi) / quarterPointi - 1.0
self.petexppages[4].SetRenderingRect(0.0, Percentage, 0.0, 0.0)
self.petexppages[4].Show()
#chat.AppendChat(chat.CHAT_TYPE_INFO, str(curPoint)+"-"+str(maxPoint)+"-"+str(FullCount)+"--"+str(quarterPoint))
#####
self.tooltipexp[0].SetText("Experience : %d of %d" % (expm, totexpm))
self.tooltipexp[1].SetText("Experience : %.2f%%" % expmp)
self.tooltipexp[2].SetText("ExperienceI : %d of %d" % (expi, totexpi))
self.tooltipexp[3].SetText("ExperienceI : %.2f%%" % expip)
def UseSkill(self, slot):
#chat.AppendChat(chat.CHAT_TYPE_INFO, "+ --> "+str(slot))
#chat.AppendChat(chat.CHAT_TYPE_INFO, "Skill: "+ str(petskill.GetSkillbySlot(slot)))
net.SendChatPacket("/petskills "+str(slot))
def OnUpdate(self):
if constInfo.FEEDWIND == 0:
self.nutribtn.Enable()
#self.sviluppobtn.Enable()
self.itemexp.Enable()
if TRUE == self.expwind.IsIn():
for i in range(0,4):
self.tooltipexp[i].Show()
else:
for i in range(0,4):
self.tooltipexp[i].Hide()
class PetSystemMini(ui.ScriptWindow):
class TextToolTip(ui.Window):
def __init__(self, y):
ui.Window.__init__(self, "TOP_MOST")
textLine = ui.TextLine()
textLine.SetParent(self)
textLine.SetHorizontalAlignLeft()
textLine.SetOutline()
textLine.Show()
self.y = y
self.textLine = textLine
def __del__(self):
ui.Window.__del__(self)
def SetText(self, text):
self.textLine.SetText(text)
def OnRender(self):
(mouseX, mouseY) = wndMgr.GetMousePosition()
self.textLine.SetPosition(mouseX, mouseY - 60 + self.y)
def __init__(self, vnum = 0):
ui.ScriptWindow.__init__(self)
self.vnum = vnum
self.__LoadWindow()
def __del__(self):
ui.ScriptWindow.__del__(self)
def Show(self):
ui.ScriptWindow.Show(self)
def Close(self):
self.Hide()
def __LoadWindow(self):
try:
pyScrLoader = ui.PythonScriptLoader()
pyScrLoader.LoadScriptFile(self, "uiscript/PetMiniInformationWindow.py")
except:
import exception
exception.Abort("PetMiniInformationWindow.LoadWindow.LoadObject")
try:
self.expwind = self.GetChild("pet_mini_info_exp_gauge_board")
self.expwind1 = self.GetChild("pet_mini_info_exp_gauge_board1")
self.mainbg = self.GetChild("main_bg")
self.mainicon = self.GetChild("main_slot_img")
self.main_slot_img = self.GetChild("pet_icon_slot")
self.tooltipexp = []
for i in range(0,4):
self.tooltipexp.append(self.TextToolTip(15*i))
self.tooltipexp[i].Hide()
self.pet_icon_slot_ani_img = self.GetChild("pet_icon_slot_ani_img")
self.pet_mini_exp_01 = self.GetChild("pet_mini_EXPGauge_01")
self.pet_mini_exp_02 = self.GetChild("pet_mini_EXPGauge_02")
self.pet_mini_exp_03 = self.GetChild("pet_mini_EXPGauge_03")
self.pet_mini_exp_04 = self.GetChild("pet_mini_EXPGauge_04")
self.pet_mini_exp_05 = self.GetChild("pet_mini_EXPGauge_05")
self.petmini_exp = []
self.petmini_exp.append(self.pet_mini_exp_01)
self.petmini_exp.append(self.pet_mini_exp_02)
self.petmini_exp.append(self.pet_mini_exp_03)
self.petmini_exp.append(self.pet_mini_exp_04)
self.petmini_exp.append(self.pet_mini_exp_05)
self.petlifeg = self.GetChild("LifeGauge")
self.pet_icon_slot_ani_img.Hide()
self.skillslot = self.GetChild("mini_skill_slot0")
#self.skillslot.SetSlotScale(0, 2, 16, 16, petskill.GetEmptySkill(), 0.5, 0.5)
#self.skillslot.SetSlotScale(1, 2, 16, 16, petskill.GetEmptySkill(), 0.5, 0.5)
#self.skillslot.SetSlotScale(2, 2, 16, 16, petskill.GetEmptySkill(), 0.5, 0.5)
self.skillslot.SetSelectItemSlotEvent(ui.__mem_func__(self.UseSkill))
self.skillslot.SetUseSlotEvent(ui.__mem_func__(self.UseSkill))
self.main_slot_img.SetUseSlotEvent(ui.__mem_func__(self.OpenPet))
self.main_slot_img.SetSelectItemSlotEvent(ui.__mem_func__(self.OpenPet))
self.SetDefaultInfo()
#self.mainbg.Show()
except:
import exception
exception.Abort("PetMiniInformationWindow.LoadWindow.BindObject")
def SetDefaultInfo(self):
self.SetDuration("0", "0")
self.main_slot_img.ClearSlot(0)
self.skillslot.ClearSlot(0)
self.skillslot.ClearSlot(1)
self.skillslot.ClearSlot(2)
self.skillslot.SetSlotScale(0, 2, 16, 16, petskill.GetEmptySkill(), 0.5, 0.5)
self.skillslot.SetSlotScale(1, 2, 16, 16, petskill.GetEmptySkill(), 0.5, 0.5)
self.skillslot.SetSlotScale(2, 2, 16, 16, petskill.GetEmptySkill(), 0.5, 0.5)
self.SetExperience(0,0,0)
def OpenPet(self):
net.SendChatPacket("/gift")
def SetImageSlot(self, vnum):
self.main_slot_img.SetItemSlot(0, int(vnum), 0)
self.main_slot_img.SetAlwaysRenderCoverButton(0, TRUE)
def SetDuration(self, dur, durt):
tmpage = int((int(durt)/60 -int(dur) /60)/24)
if int(dur) > 0:
self.petlifeg.SetPercentage(int(dur), int(durt))
self.petlifeg.Show()
else:
self.petlifeg.Hide()
def SetSkill(self, slot, idx, lv):
if int(idx) != -1:
self.skillslot.ClearSlot(int(slot))
self.skillslot.SetPetSkillSlot(int(slot), int(idx), int(lv), 0.5, 0.5)
self.skillslot.SetCoverButton(int(slot), "d:/ymir work/ui/pet/mini_window/pet_slot_corvermini.sub", "d:/ymir work/ui/pet/mini_window/pet_slot_corvermini.sub", "d:/ymir work/ui/pet/mini_window/pet_slot_corvermini.sub" , "d:/ymir work/ui/pet/mini_window/pet_slot_corvermini.sub")
self.skillslot.SetAlwaysRenderCoverButton(int(slot), TRUE)
def SetExperience(self, expm, expi, exptot):
expm = int(expm)
expi = int(expi)
exptot = int(exptot)
if exptot > 0:
totalexp = exptot
totexpm = int( float(totalexp) / 100 * 90 )
totexpi = totalexp - totexpm
expi = min(expi, totexpi)
expmp = float(expm) / totexpm * 100
expip = float(expi) / totexpi * 100
else:
totalexp = 0
totexpm = 0
totexpi = 0
expmp = 0
expip = 0
curPoint = int(min(expm, totexpm))
curPoint = int(max(expm, 0))
maxPoint = int(max(totexpm, 0))
curPointi = int(min(expi, totexpi))
curPointi = int(max(expi, 0))
maxPointi = int(max(totexpi, 0))
quarterPoint = maxPoint / 4
quarterPointi = maxPointi
FullCount = 0
FullCounti = 0
if 0 != quarterPoint:
FullCount = min(4, curPoint / quarterPoint)
if 0 != quarterPointi:
FullCounti = min(1, curPointi / quarterPointi)
for i in xrange(4):
self.petmini_exp[i].Hide()
self.petmini_exp[4].Hide()
for i in xrange(FullCount):
self.petmini_exp[i].SetRenderingRect(0.0, 0.0, 0.0, 0.0)
self.petmini_exp[i].Show()
for i in xrange(FullCounti):
self.petmini_exp[4].SetRenderingRect(0.0, 0.0, 0.0, 0.0)
self.petmini_exp[4].Show()
if 0 != quarterPoint:
if FullCount < 4:
Percentage = float(curPoint % quarterPoint) / quarterPoint - 1.0
self.petmini_exp[FullCount].SetRenderingRect(0.0, Percentage, 0.0, 0.0)
self.petmini_exp[FullCount].Show()
if 0 != quarterPointi:
if FullCounti < 1:
Percentage = float(curPointi % quarterPointi) / quarterPointi - 1.0
self.petmini_exp[4].SetRenderingRect(0.0, Percentage, 0.0, 0.0)
self.petmini_exp[4].Show()
#####
self.tooltipexp[0].SetText("Experience : %d of %d" % (expm, totexpm))
self.tooltipexp[1].SetText("Experience : %.2f%%" % expmp)
self.tooltipexp[2].SetText("ExperienceI : %d of %d" % (expi, totexpi))
self.tooltipexp[3].SetText("ExperienceI : %.2f%%" % expip)
def UseSkill(self, slot):
chat.AppendChat(chat.CHAT_TYPE_INFO, "+ --> "+str(slot))
#chat.AppendChat(chat.CHAT_TYPE_INFO, "Skill: "+ str(petskill.GetSkillbySlot(slot)))
net.SendChatPacket("/petskills "+str(slot))
def OnUpdate(self):
if constInfo.PET_LEVEL == 40 and constInfo.PET_EVOLUTION == 0:
self.pet_icon_slot_ani_img.Show()
elif constInfo.PET_LEVEL == 81 and constInfo.PET_EVOLUTION == 1:
self.pet_icon_slot_ani_img.Show()
elif constInfo.PET_LEVEL == 81 and constInfo.PET_EVOLUTION == 2:
self.pet_icon_slot_ani_img.Show()
else:
self.pet_icon_slot_ani_img.Hide()
if TRUE == self.expwind1.IsIn():
for i in range(0,4):
self.tooltipexp[i].Show()
else:
for i in range(0,4):
self.tooltipexp[i].Hide()
| [
"59807064+Reizonr1@users.noreply.github.com"
] | 59807064+Reizonr1@users.noreply.github.com |
a78b7c2ff7997da72cd3f9e2e6838ea42da7d1ec | bb16dba71461c17d520725022ffff6b578040a5e | /tools/releasetools/test_merge_target_files.py | bb9ce8e276333dbdd42a26bde30ed3ffe9385b3f | [
"Apache-2.0"
] | permissive | omnirom/android_build | c060115c1fc3adcaaceea40b43ed460387899a48 | 6111dcfb1048d972458aca792aed3dca043b34f6 | refs/heads/android-10 | 2023-08-26T01:40:43.568277 | 2020-08-13T18:36:05 | 2020-08-13T18:36:05 | 13,535,804 | 38 | 295 | null | 2020-07-31T01:49:04 | 2013-10-13T07:53:39 | Makefile | UTF-8 | Python | false | false | 3,045 | py | #
# Copyright (C) 2017 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os.path
import common
import test_utils
from merge_target_files import (
read_config_list, validate_config_lists, default_system_item_list,
default_other_item_list, default_system_misc_info_keys)
class MergeTargetFilesTest(test_utils.ReleaseToolsTestCase):
def setUp(self):
self.testdata_dir = test_utils.get_testdata_dir()
def test_read_config_list(self):
system_item_list_file = os.path.join(self.testdata_dir,
'merge_config_system_item_list')
system_item_list = read_config_list(system_item_list_file)
expected_system_item_list = [
'META/apkcerts.txt',
'META/filesystem_config.txt',
'META/root_filesystem_config.txt',
'META/system_manifest.xml',
'META/system_matrix.xml',
'META/update_engine_config.txt',
'PRODUCT/*',
'ROOT/*',
'SYSTEM/*',
]
self.assertItemsEqual(system_item_list, expected_system_item_list)
def test_validate_config_lists_ReturnsFalseIfMissingDefaultItem(self):
system_item_list = default_system_item_list[:]
system_item_list.remove('SYSTEM/*')
self.assertFalse(
validate_config_lists(system_item_list, default_system_misc_info_keys,
default_other_item_list))
def test_validate_config_lists_ReturnsTrueIfDefaultItemInDifferentList(self):
system_item_list = default_system_item_list[:]
system_item_list.remove('ROOT/*')
other_item_list = default_other_item_list[:]
other_item_list.append('ROOT/*')
self.assertTrue(
validate_config_lists(system_item_list, default_system_misc_info_keys,
other_item_list))
def test_validate_config_lists_ReturnsTrueIfExtraItem(self):
system_item_list = default_system_item_list[:]
system_item_list.append('MY_NEW_PARTITION/*')
self.assertTrue(
validate_config_lists(system_item_list, default_system_misc_info_keys,
default_other_item_list))
def test_validate_config_lists_ReturnsFalseIfBadSystemMiscInfoKeys(self):
for bad_key in ['dynamic_partition_list', 'super_partition_groups']:
system_misc_info_keys = default_system_misc_info_keys[:]
system_misc_info_keys.append(bad_key)
self.assertFalse(
validate_config_lists(default_system_item_list, system_misc_info_keys,
default_other_item_list))
| [
"danielnorman@google.com"
] | danielnorman@google.com |
952ee192cb9e0908a088003c0d9ba094564081bc | 62312aac9650a1cb3f346020f6603644bdd742e8 | /test/handlers_test.py | c8d7d1f916a4068f2100ac0fb8be06a6d4fbe169 | [] | no_license | saymonp/backend-estagio | 427204758d7c60a75ce79a9a1d42ba12b15768a5 | 00b5148776d81a88c8e1f8bb5b2ca71fdec18e8b | refs/heads/main | 2023-04-29T06:21:03.729296 | 2021-05-18T17:21:30 | 2021-05-18T17:21:30 | 321,748,227 | 0 | 0 | null | 2021-05-18T03:04:51 | 2020-12-15T18:01:05 | Python | UTF-8 | Python | false | false | 8,262 | py | import json
import pytest
import random
import string
import json
import requests
from backend.handlers.contact_email import send_contact_email
from backend.handlers.users import login, delete, register, request_password_reset, password_reset, email_confirmation, list_users, update_permissions
from backend.handlers.s3 import upload_presigned_url
from backend.user.user import User
from backend.services.mongo import db
def test_contact_email():
event = {"body": "{\"clientFirstName\": \"Client Name\", \"clientLastName\": \"Client Last Name\", \"clientEmail\": \"example@teste.com\", \"subject\": \"Teste Handler Serverless\", \"message\": \"Teste Serverless...\"}"}
response = send_contact_email(event, None)
body = json.loads(response["body"])
assert body == {"ok": "Email sent"}
def test_user_delete():
user = User()
email = "toDelete@delete"
user_to_del = user.register("ToDelete", email, "banana123")
user_id = user_to_del["_id"]
event = {"headers": {"Authorization": "bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyLCJwZXJtaXNzaW9ucyI6ImRlbGV0ZTp1c2VyIn0.SeTu_ZfAORdpmtpiX9YTZ0p97pxGfxGEu3qwjQT07O4",
"Content-Type": "application/json"}, "body": {"id": user_id, "email": email}}
response = delete(event, None)
print(response)
body = response
assert body == {"deleted user": user_id}
def test_user_register():
name = "Saymon Treviso1"
email = "porolac214@bulkbye.com"
password = "banana123"
permissions = ["create:product", "delete:product", "update:product"]
event = {"headers": {"Authorization": "bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJwb3JvbGFjMjE0QGJ1bGtieWUuY29tIiwiZXhwIjoxNjE5Nzk2NDE2LCJpZCI6IjYwM2FiOGZiNGRkMDZkNDM0MDQxYWI1NyIsIm5hbWUiOiJTYXltb24gVHJldmlzbzEiLCJwZXJtaXNzaW9ucyI6WyJURVNUOlNlcnZlcmxlc3MiXX0.GazFGRoEPU-Ck56Wjs7CqIPo1Kh3OdonXCzzAiQmK_A", "Content-Type": "application/json"},
"body": {"name": name, "email": email, "password": password, "permissions": permissions}}
res = register(event, None)
assert res["msg"] == "Verification email sent"
def test_user_list_users():
event = {"body": "{\"email\": \"porolac214@bulkbye.com\"}"}
res = list_users(event, None)
print(res)
def test_user_login():
event = {
"body": "{\"email\": \"nhs40e+vra5gv6hlusc@sharklasers.com\", \"password\": \"banana123\"}"}
res = login(event, None)
have_token = "token" in res["body"]
assert True == have_token
def test_user_update_permissions():
# register
name = randstr(4)
email = f"{randstr(4)}@{randstr(3)}.com"
password = "banana123"
permissions = ["update:user"]
event = {"headers": {"Authorization": "bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyLCJwZXJtaXNzaW9ucyI6WyJkZWxldGU6dXNlciIsImNyZWF0ZTp1c2VyIl19.-lF5dmarBO2aQLdY9AgW4mtB8_3c_hMplSUfowhTmMU", "Content-Type": "application/json"},
"body": {"name": name, "email": email, "password": password, "permissions": permissions}}
res = register(event, None)
assert res["msg"] == "Verification email sent"
# Get Token
user = db.users.find_one({"email": email})
userid = user["_id"]
secret_token = db.secretToken.find_one({"_userId": userid})
token = secret_token["token"]
event = {"body": f"{{\"confirmation_token\": \"{token}\"}}"}
response = email_confirmation(event, None)
assert json.loads(response["body"]) == {"msg": "User verified"}
# login
event = {"body": f"{{\"email\": \"{email}\", \"password\": \"banana123\"}}"}
res = login(event, None)
have_token = "token" in res["body"]
access_token = json.loads(res["body"])["token"]
event = {"headers":
{"Authorization": f"bearer {access_token}",
"Content-Type": "application/json"},
"body": {"id": str(userid), "permissions": ["update:user", "create:product", "delete:product", "update:product"]}}
res = update_permissions(event, None)
assert res == {"user permissions updated": str(userid)}
def test_user_password_reset():
# register
name = randstr(4)
email = f"{randstr(4)}@{randstr(3)}.com"
password = "banana123"
permissions = ["create:product", "delete:product", "update:product"]
event = {"headers": {"Authorization": "bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyLCJwZXJtaXNzaW9ucyI6WyJkZWxldGU6dXNlciIsImNyZWF0ZTp1c2VyIl19.-lF5dmarBO2aQLdY9AgW4mtB8_3c_hMplSUfowhTmMU", "Content-Type": "application/json"},
"body": {"name": name, "email": email, "password": password, "permissions": permissions}}
res = register(event, None)
assert res["msg"] == "Verification email sent"
# Get Token
user = db.users.find_one({"email": email})
userId = user["_id"]
secret_token = db.secretToken.find_one({"_userId": userId})
token = secret_token["token"]
event = {"body": f"{{\"confirmation_token\": \"{token}\"}}"}
response = email_confirmation(event, None)
assert json.loads(response["body"]) == {"msg": "User verified"}
# request_password_reset
event = {"body": f"{{\"email\": \"{email}\"}}"}
response = request_password_reset(event, None)
print("reset", response)
assert json.loads(response["body"]) == {"msg": "Password request sent"}
user = db.users.find_one({"email": email})
password_reset_token = user["passwordResetToken"]
event = {"body": f"{{\"newPassword\": \"321ananab\", \"passwordResetToken\": \"{password_reset_token}\"}}"}
reset_response = password_reset(event, None)
print("reset", reset_response)
assert json.loads(reset_response["body"]) == {"msg": "Password updated"}
# second request_password_reset
event = {"body": f"{{\"email\": \"{email}\"}}"}
response = request_password_reset(event, None)
print("reset", response)
assert json.loads(response["body"]) == {"msg": "Password request sent"}
user = db.users.find_one({"email": email})
password_reset_token = user["passwordResetToken"]
event = {"body": f"{{\"newPassword\": \"321ananab\", \"passwordResetToken\": \"{password_reset_token}\"}}"}
reset_response = password_reset(event, None)
print("reset", reset_response)
assert json.loads(reset_response["body"]) == {"msg": "Password updated"}
def test_user_email_confirmation():
# register
name = randstr(4)
email = f"{randstr(4)}@{randstr(3)}.com"
password = "banana123"
permissions = ["create:product", "delete:product", "update:product"]
event = {"headers": {"Authorization": "bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyLCJwZXJtaXNzaW9ucyI6WyJkZWxldGU6dXNlciIsImNyZWF0ZTp1c2VyIl19.-lF5dmarBO2aQLdY9AgW4mtB8_3c_hMplSUfowhTmMU", "Content-Type": "application/json"},
"body": {"name": name, "email": email, "password": password, "permissions": permissions}}
res = register(event, None)
assert res["msg"] == "Verification email sent"
# Get Token
user = db.users.find_one({"email": email})
userId = user["_id"]
secret_token = db.secretToken.find_one({"_userId": userId})
token = secret_token["token"]
event = {"body": f"{{\"confirmation_token\": \"{token}\"}}"}
response = email_confirmation(event, None)
print(response)
assert json.loads(response["body"]) == {"msg": "User verified"}
def test_presigned_url():
event = {"pathParameters": {"path": "teste%2F", "fileName": "My Test(34).jpg"}}
response = upload_presigned_url(event, None)
response = response["body"]
print(response)
object_name = "/home/trevisan/Desktop/backend-estagio/test/fixtures/My Test(34).jpg"
with open(object_name, "rb") as f:
files = {"file": (response["fields"]["key"], f)}
http_response = requests.post(response['url'], data=response["fields"], files=files)
print("response",http_response)
def randstr(length):
return ''.join(random.choice(string.ascii_lowercase) for i in range(length))
| [
"saymon.t@outlook.com"
] | saymon.t@outlook.com |
75b2778bff31d7bc5c84c4abcbaab82ed40bcef0 | 1cfcf45bbbd7320575d4316744c09826dac407af | /src/tp1_9.py | f84d9137e9a0887a53544da12bfa872c8576234f | [] | no_license | KasuraS/TP1-IoT | 95676d836c007701de17880c6c428d3562e5df6b | 28a7dae47dcd5743eba065417f10b4f16cf10772 | refs/heads/main | 2023-01-18T23:47:54.037618 | 2020-12-04T14:19:04 | 2020-12-04T14:19:04 | 318,537,697 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 438 | py | import RPi.GPIO as GPIO
import time
BUZZ = 17
BTN = 18
GPIO.setmode(GPIO.BCM)
GPIO.setup(BUZZ, GPIO.OUT)
GPIO.output(BUZZ, GPIO.LOW)
GPIO.setup(BTN, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
try:
while True:
btn_state = GPIO.input(BTN)
if (btn_state == True):
print("btn pressed...")
GPIO.output(BUZZ, GPIO.HIGH)
time.sleep(0.5)
GPIO.output(BUZZ, GPIO.LOW)
time.sleep(0.5)
except KeyboardInterrupt:
GPIO.cleanup() | [
"guo.yefan@hotmail.fr"
] | guo.yefan@hotmail.fr |
2112bbc0bb40eb05b9d150ae386c7817e5840775 | d94b6845aeeb412aac6850b70e22628bc84d1d6d | /unprocessing/estimator.py | def1f4464ffc7f3d2afab9ff75d80d3992b58c68 | [
"CC-BY-4.0",
"Apache-2.0"
] | permissive | ishine/google-research | 541aea114a68ced68736340e037fc0f8257d1ea2 | c1ae273841592fce4c993bf35cdd0a6424e73da4 | refs/heads/master | 2023-06-08T23:02:25.502203 | 2023-05-31T01:00:56 | 2023-05-31T01:06:45 | 242,478,569 | 0 | 0 | Apache-2.0 | 2020-06-23T01:55:11 | 2020-02-23T07:59:42 | Jupyter Notebook | UTF-8 | Python | false | false | 5,225 | py | # coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unprocessing model function and train and eval specs for Estimator.
Unprocessing Images for Learned Raw Denoising
http://timothybrooks.com/tech/unprocessing
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from tensorflow.compat.v1 import estimator as tf_estimator
from unprocessing import process
from tensorflow.contrib import layers as contrib_layers
def psnr(labels, predictions):
"""Computes average peak signal-to-noise ratio of `predictions`.
Here PSNR is defined with respect to the maximum value of 1. All image tensors
must be within the range [0, 1].
Args:
labels: Tensor of shape [B, H, W, N].
predictions: Tensor of shape [B, H, W, N].
Returns:
Tuple of (psnr, update_op) as returned by tf.metrics.
"""
predictions.shape.assert_is_compatible_with(labels.shape)
with tf.control_dependencies([tf.assert_greater_equal(labels, 0.0),
tf.assert_less_equal(labels, 1.0)]):
psnrs = tf.image.psnr(labels, predictions, max_val=1.0)
psnrs = tf.boolean_mask(psnrs, tf.logical_not(tf.is_inf(psnrs)))
return tf.metrics.mean(psnrs, name='psnr')
def create_model_fn(inference_fn, hparams):
"""Creates a model function for Estimator.
Args:
inference_fn: Model inference function with specification:
Args -
noisy_img - Tensor of shape [B, H, W, 4].
variance - Tensor of shape [B, H, W, 4].
Returns -
Tensor of shape [B, H, W, 4].
hparams: Hyperparameters for model as a tf.contrib.training.HParams object.
Returns:
`_model_fn`.
"""
def _model_fn(features, labels, mode, params):
"""Constructs the model function.
Args:
features: Dictionary of input features.
labels: Tensor of labels if mode is `TRAIN` or `EVAL`, otherwise `None`.
mode: ModeKey object (`TRAIN` or `EVAL`).
params: Parameter dictionary passed from the Estimator object.
Returns:
An EstimatorSpec object that encapsulates the model and its serving
configurations.
"""
del params # Unused.
def process_images(images):
"""Closure for processing images with fixed metadata."""
return process.process(images, features['red_gain'],
features['blue_gain'], features['cam2rgb'])
denoised_img = inference_fn(features['noisy_img'], features['variance'])
noisy_img = process_images(features['noisy_img'])
denoised_img = process_images(denoised_img)
truth_img = process_images(labels)
if mode in [tf_estimator.ModeKeys.TRAIN, tf_estimator.ModeKeys.EVAL]:
loss = tf.losses.absolute_difference(truth_img, denoised_img)
else:
loss = None
if mode == tf_estimator.ModeKeys.TRAIN:
optimizer = tf.train.AdamOptimizer(learning_rate=hparams.learning_rate)
train_op = contrib_layers.optimize_loss(
loss=loss,
global_step=tf.train.get_global_step(),
learning_rate=None,
optimizer=optimizer,
name='') # Prevents scope prefix.
else:
train_op = None
if mode == tf_estimator.ModeKeys.EVAL:
eval_metric_ops = {'PSNR': psnr(truth_img, denoised_img)}
def summary(images, name):
"""As a hack, saves image summaries by adding to `eval_metric_ops`."""
images = tf.saturate_cast(images * 255 + 0.5, tf.uint8)
eval_metric_ops[name] = (tf.summary.image(name, images, max_outputs=2),
tf.no_op())
summary(noisy_img, 'Noisy')
summary(denoised_img, 'Denoised')
summary(truth_img, 'Truth')
diffs = (denoised_img - truth_img + 1.0) / 2.0
summary(diffs, 'Diffs')
else:
eval_metric_ops = None
return tf_estimator.EstimatorSpec(
mode=mode,
loss=loss,
train_op=train_op,
eval_metric_ops=eval_metric_ops)
return _model_fn
def create_train_and_eval_specs(train_dataset_fn,
eval_dataset_fn,
eval_steps=250):
"""Creates a TrainSpec and EvalSpec.
Args:
train_dataset_fn: Function returning a Dataset of training data.
eval_dataset_fn: Function returning a Dataset of evaluation data.
eval_steps: Number of steps for evaluating model.
Returns:
Tuple of (TrainSpec, EvalSpec).
"""
train_spec = tf_estimator.TrainSpec(input_fn=train_dataset_fn, max_steps=None)
eval_spec = tf_estimator.EvalSpec(
input_fn=eval_dataset_fn, steps=eval_steps, name='')
return train_spec, eval_spec
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
b8b98ecbf8792e2155bd25ee50e31531bb90a722 | 36f0e7ea8aa8171795dce26e1f81ba865e2041c0 | /resite/views.py | c42fb9abfaf39940e9148fb412d1d20e56f597a7 | [] | no_license | pletain/week4 | 13e0f619cb86af4d26ec4f3d0d60f4ca5d1cebff | 5b36ae823c7af7446b387b384aadcd0b99a63cff | refs/heads/master | 2022-11-27T00:01:24.278574 | 2020-08-03T11:30:10 | 2020-08-03T11:30:10 | 283,144,683 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 135 | py | from django.shortcuts import render
# Create your views here.
def resite(request):
return render(request, 'resite/resite.html')
| [
"pletain@gimtaehyeong-ui-MacBookPro.local"
] | pletain@gimtaehyeong-ui-MacBookPro.local |
99fe63386a8be48d378cb82477fb48e3093c659b | 1cdff6768a529d91e8588a12333f906f5ddcf8c0 | /main.py | 995abd425a6a4227a0e402d0e3cfd22ec9c66b91 | [
"MIT"
] | permissive | tachytelicdetonation/Plagiarism_Checker | 2481c9b8ac3f18d277ae4d0dd7d76589cd368cfb | 72f3f8a16f5f51e4385de1a4b35a5432d22c7d11 | refs/heads/main | 2023-05-14T15:11:50.164651 | 2021-06-10T16:21:57 | 2021-06-10T16:21:57 | 375,756,575 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,210 | py | # Importing Libraries
import os
from offline_utilities import preprocessLines, plagarismCheck, checkPlagarismOffline, showPlagarismOffline
from online_utilities import searchGoogle, getSearchLines, showPlagarismOnline
# Offline Plagarism
# Path to data folder
path_to_data = os.path.join(os.getcwd(),'data/')
path_to_originals = os.path.join(path_to_data,'reference texts/') # Path to refernce texts
reference_path = path_to_data + 'huntrprint.txt' # Path to refernce file i.e raw file
reference = preprocessLines(reference_path) # Pre processing lines from .txt file
plagarised_object = plagarismCheck(reference, path_to_originals) # Create a plagarism object, which contains plagarised lines and corresponding ratios
display_object_offline = checkPlagarismOffline(reference, plagarised_object, isjson = False) # display object with color coded lines
showPlagarismOffline(display_object_offline) # Display offline plagarism file
# Online Plagarism
search_lines = getSearchLines(reference) # Pre processlines for online search
display_object_online = searchGoogle(search_lines) # Search Lines on google
showPlagarismOnline(search_lines,display_object_online) # Display online plagarism file | [
"deshmukhtanmay017@gmail.com"
] | deshmukhtanmay017@gmail.com |
0fb5e7964be470bc671a7d6c2fc74cb80dd76bf7 | 07c6d3055eda7b1ddb16ce9444166ed311ce3219 | /modules/topics.py | 0821e4b960e83e661ea7519105c0d6cf7682fd6f | [] | no_license | IISH/dpe | 4df9b0576b5419e543c61ce9ef14380ddc4b5c03 | 6509b06aa03242f450766d4cb5d8984f14146b11 | refs/heads/master | 2021-01-10T17:52:54.775316 | 2016-05-04T09:50:46 | 2016-05-04T09:50:46 | 42,994,984 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,867 | py | import urllib2
import simplejson
import json
import sys
import pandas as pd
import random
import vincent
from vincent import Axis, AxisProperties, PropertySet, ValueRef
from pandas.io.json import json_normalize
from config import configuration, dataverse2indicators, load_dataverse, findpid, load_metadata
import re
def loadjson(apiurl):
jsondataurl = apiurl
req = urllib2.Request(jsondataurl)
opener = urllib2.build_opener()
f = opener.open(req)
dataframe = simplejson.load(f)
return dataframe
def topics_parser(alltopics):
topics = {}
indicators = {}
topic2inds = {}
indline = []
for item in alltopics:
#print item
name = item['Name']
thisid = int(item['ID'])
pcode = item['parent ID']
if not pcode:
topics[name] = thisid
else:
indicators[thisid] = name
try:
indline = topic2inds[pcode]
except:
indline = []
indline.append(thisid)
topic2inds[int(pcode)] = indline
return (topics, indicators, topic2inds)
def load_alltopics(api, branch):
result = loadjson(api)
(topics, indicators, topic2inds) = topics_parser(result)
datasets = dataverse2indicators(branch)
html = ''
for topic in sorted(topics):
topicID = topics[topic]
html = html + "<optgroup label=\"" + str(topic) + "\">\n"
indlist = topic2inds[topicID]
for ind in indlist:
indicator = indicators[ind]
try:
showind = datasets[indicator]
except:
showind = ind
html = html + "\t<option value=\"" + str(showind) + "\">" + indicator + "</option>" + "\n"
html = html + "</optgroup>\n"
return html
| [
"4tikhonov@gmail.com"
] | 4tikhonov@gmail.com |
15d9b29ddeef1b379e388b0cbb36ebe97afa4cdd | 30a2a924eb32e7297b5a99785950467f25ea785d | /tfgen.py | 074dd41e2fc6687d9c11c44a8d2d2c8c9a1784f5 | [] | no_license | zshwuhan/Reinforcement-Learning-of-Spatio-Temporal-Point-Processes | 1a794e83491b52dea5db3926de91779a9e661a17 | a3f98e77b56c03839dcdb545b17b3675e7c43878 | refs/heads/master | 2020-07-22T16:18:10.020860 | 2019-07-02T18:49:02 | 2019-07-02T18:49:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,218 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Imitation Learning for Point Process
A LSTM based model for generating marked spatial-temporal points.
References:
- https://arxiv.org/abs/1811.05016
Dependencies:
- Python 3.6.7
- tensorflow==1.5.0
"""
import sys
import arrow
import utils
import numpy as np
import tensorflow as tf
from stppg import GaussianMixtureDiffusionKernel, HawkesLam, SpatialTemporalPointProcess
class SpatialTemporalHawkes(object):
"""
Customized Spatial Temporal Hawkes
A Hawkes model parametrized by multi-layers neural networks, which provides flexible self-exciting
points pattern.
"""
def __init__(self, T, S, layers=[20, 20], n_comp=5, C=1., maximum=1e+3, verbose=False):
"""
"""
# constant hyper parameters
self.INIT_PARAM = .01
self.SIGMA_SHIFT = .05
self.SIGMA_SCALE = .2
self.MU_SCALE = .01
# configurations
self.C = C # constant
self.T = T # time space
self.S = S # location space
self.maximum = maximum # upper bound of conditional intensity
self.verbose = verbose
# model parameters
self.mu = tf.get_variable(name="mu", initializer=tf.constant(0.1), dtype=tf.float32)
self.beta = tf.get_variable(name="beta", initializer=tf.constant(1.), dtype=tf.float32)
self.Wss = []
self.bss = []
self.Wphis = []
# construct multi-layers neural networks
# - define the layers where 2 is for the input layer (x and y);
# And 5 is for the output layer (mu_x, mu_y, sigma_x, sigma_y, rho)
self.layers = [2] + layers + [5]
# - define the number of the components in Gaussian mixture diffusion kernel
self.n_comp = n_comp
# - construct component weighting vectors
for k in range(self.n_comp):
Wphi = tf.get_variable(name="Wphi%d" % k,
initializer=self.INIT_PARAM * tf.random.normal(shape=[2, 1]),
dtype=tf.float32)
self.Wphis.append(Wphi)
# - construct weight & bias matrix layer by layer for each of Gaussian components
Ws = []
bs = []
for i in range(len(self.layers)-1):
# random initialization
W = tf.get_variable(name="W%d%d" % (k, i),
initializer=self.INIT_PARAM * tf.random.normal(shape=[self.layers[i], self.layers[i+1]]),
dtype=tf.float32)
b = tf.get_variable(name="b%d%d" % (k, i),
initializer=self.INIT_PARAM * tf.random.normal(shape=[self.layers[i+1]]),
dtype=tf.float32)
Ws.append(W)
bs.append(b)
self.Wss.append(Ws)
self.bss.append(bs)
def sampling(self, sess, batch_size):
"""fetch model parameters, and generate samples accordingly."""
# get current model parameters
mu, beta = sess.run([self.mu, self.beta])
Wss = sess.run(self.Wss)
bss = sess.run(self.bss)
Wphis = sess.run(self.Wphis)
# construct kernel function and conditional intensity lambda
kernel = GaussianMixtureDiffusionKernel(
self.n_comp, layers=self.layers[1:-1], beta=beta, C=self.C,
SIGMA_SHIFT=self.SIGMA_SHIFT, SIGMA_SCALE=self.SIGMA_SCALE, MU_SCALE=self.MU_SCALE,
Wss=Wss, bss=bss, Wphis=Wphis)
lam = HawkesLam(mu, kernel, maximum=self.maximum)
# sampling points given model parameters
pp = SpatialTemporalPointProcess(lam)
seqs, sizes = pp.generate(T=self.T, S=self.S, batch_size=batch_size, verbose=self.verbose)
return seqs
def _nonlinear_mapping(self, k, s):
"""nonlinear mapping from location space to parameters space"""
# construct multi-layers neural networks
output = s # [n_his, 2]
for i in range(len(self.layers)-1):
output = tf.nn.sigmoid(tf.nn.xw_plus_b(output, self.Wss[k][i], self.bss[k][i])) # [n_his, n_b]
# project to parameters space
mu_x = (output[:, 0] - 0.5) * 2 * self.MU_SCALE # [n_his]: mu_x spans (-MU_SCALE, MU_SCALE)
mu_y = (output[:, 1] - 0.5) * 2 * self.MU_SCALE # [n_his]: mu_y spans (-MU_SCALE, MU_SCALE)
sigma_x = output[:, 2] * self.SIGMA_SCALE + self.SIGMA_SHIFT # [n_his]: sigma_x spans (SIGMA_SHIFT, SIGMA_SHIFT + SIGMA_SCALE)
sigma_y = output[:, 3] * self.SIGMA_SCALE + self.SIGMA_SHIFT # [n_his]: sigma_y spans (SIGMA_SHIFT, SIGMA_SHIFT + SIGMA_SCALE)
rho = output[:, 4] * 1.5 - .75 # [n_his]: rho spans (-.75, .75)
return mu_x, mu_y, sigma_x, sigma_y, rho
def _gaussian_kernel(self, k, t, s, his_t, his_s):
"""
A Gaussian diffusion kernel function based on the standard kernel function proposed
by Musmeci and Vere-Jones (1992). The angle and shape of diffusion ellipse is able
to vary according to the location.
k indicates the k-th gaussian component that is used to compute the nonlinear mappings.
"""
eps = 1e-8 # IMPORTANT: Avoid delta_t be zero
delta_t = t - his_t + eps # [n_his]
delta_s = s - his_s # [n_his, 2]
delta_x = delta_s[:, 0] # [n_his]
delta_y = delta_s[:, 1] # [n_his]
mu_x, mu_y, sigma_x, sigma_y, rho = self._nonlinear_mapping(k, his_s)
return tf.exp(- self.beta * delta_t) * \
(self.C / (2 * np.pi * sigma_x * sigma_y * delta_t * tf.sqrt(1 - tf.square(rho)))) * \
tf.exp((- 1. / (2 * delta_t * (1 - tf.square(rho)))) * \
((tf.square(delta_x - mu_x) / tf.square(sigma_x)) + \
(tf.square(delta_y - mu_y) / tf.square(sigma_y)) - \
(2 * rho * (delta_x - mu_x) * (delta_y - mu_y) / (sigma_x * sigma_y))))
def _softmax(self, s, k):
"""
Gaussian mixture components are weighted by phi^k, which are computed by a softmax function, i.e.,
phi^k(x, y) = e^{[x y]^T w^k} / \sum_{i=1}^K e^{[x y]^T w^i}
"""
# s: [n_his, 2]
# Wphis[k]: [2, 1]
numerator = tf.exp(tf.matmul(s, self.Wphis[k])) # [n_his, 1]
denominator = tf.concat([
tf.exp(tf.matmul(s, self.Wphis[i]))
for i in range(self.n_comp) ], axis=1) # [n_his, K=n_comp]
phis = tf.squeeze(numerator) / tf.reduce_sum(denominator, axis=1) # [n_his]
return phis
def _gaussian_mixture_kernel(self, t, s, his_t, his_s):
"""
A Gaussian mixture diffusion kernel function is superposed by multiple Gaussian diffusion
kernel function. The number of the Gaussian components is specified by n_comp.
"""
nus = []
for k in range(self.n_comp):
phi = self._softmax(his_s, k) # [n_his]
nu = phi * self._gaussian_kernel(k, t, s, his_t, his_s) # [n_his]
nu = tf.expand_dims(nu, -1) # [n_his, 1]
nus.append(nu) # K * [n_his, 1]
nus = tf.concat(nus, axis=1) # [n_his, K]
return tf.reduce_sum(nus, axis=1) # [n_his]
def _lambda(self, t, s, his_t, his_s):
"""lambda function for the Hawkes process."""
lam = self.mu + tf.reduce_sum(self._gaussian_mixture_kernel(t, s, his_t, his_s))
return lam
def log_conditional_pdf(self, points, keep_latest_k=None):
"""log pdf conditional on history."""
if keep_latest_k is not None:
points = points[-keep_latest_k:, :]
# number of the points
len_points = tf.shape(points)[0]
# variables for calculating triggering probability
s, t = points[-1, 1:], points[-1, 0]
his_s, his_t = points[:-1, 1:], points[:-1, 0]
def pdf_no_history():
return tf.log(tf.clip_by_value(self._lambda(t, s, his_t, his_s), 1e-8, 1e+10))
def pdf_with_history():
# triggering probability
log_trig_prob = tf.log(tf.clip_by_value(self._lambda(t, s, his_t, his_s), 1e-8, 1e+10))
# variables for calculating tail probability
tn, ti = points[-2, 0], points[:-1, 0]
t_ti, tn_ti = t - ti, tn - ti
# tail probability
# TODO: change to gaussian mixture (add phi)
log_tail_prob = - \
self.mu * (t - tn) * utils.lebesgue_measure(self.S) - \
tf.reduce_sum(tf.scan(
lambda a, i: self.C * (tf.exp(- self.beta * tn_ti[i]) - tf.exp(- self.beta * t_ti[i])) / \
tf.clip_by_value(self.beta, 1e-8, 1e+10),
tf.range(tf.shape(t_ti)[0]),
initializer=np.array(0., dtype=np.float32)))
return log_trig_prob + log_tail_prob
# TODO: Unsolved issue:
# pdf_with_history will still be called even if the condition is true, which leads to exception
# "ValueError: slice index -1 of dimension 0 out of bounds." due to that points is empty but we
# try to index a nonexisted element.
# However, when points is indexed in a scan loop, this works fine and the numerical result is
# also correct. which is very confused to me. Therefore, I leave this problem here temporarily.
log_cond_pdf = tf.cond(tf.less(len_points, 2),
pdf_no_history, # if there is only one point in the sequence
pdf_with_history) # if there is more than one point in the sequence
return log_cond_pdf
def log_likelihood(self, points):
"""log likelihood of given points"""
loglikli = 0. # loglikelihood initialization
mask_t = tf.cast(points[:, 0] > 0, tf.float32) # time mask
trunc_seq = tf.boolean_mask(points, mask_t) # truncate the sequence and get the valid part
seq_len = tf.shape(trunc_seq)[0] # length of the sequence
# term 1: product of lambda
loglikli += tf.reduce_sum(tf.scan(
lambda a, i: tf.log(self._lambda(trunc_seq[i, 0], trunc_seq[i, 1:], trunc_seq[:i, 0], trunc_seq[:i, 1:])),
tf.range(seq_len),
initializer=np.array(0., dtype=np.float32)))
# term 2: 1 - F^*(T)
ti = points[:, 0]
zero_ti = 0 - ti
T_ti = self.T[1] - ti
loglikli -= tf.reduce_sum(tf.scan(
lambda a, i: self.C * (tf.exp(- self.beta * zero_ti[i]) - tf.exp(- self.beta * T_ti[i])) / \
tf.clip_by_value(self.beta, 1e-8, 1e+10),
tf.range(tf.shape(ti)[0]),
initializer=np.array(0., dtype=np.float32)))
return loglikli
def save_params_npy(self, sess, path):
"""save parameters into numpy file."""
Wss = sess.run(self.Wss)
bss = sess.run(self.bss)
Wphis = sess.run(self.Wphis)
mu, beta = sess.run([self.mu, self.beta])
print(Wss)
print(Wphis)
np.savez(path, Wss=Wss, bss=bss, Wphis=Wphis, mu=mu, beta=beta)
if __name__ == "__main__":
# Unittest example
np.random.seed(1)
tf.set_random_seed(1)
with tf.Session() as sess:
hawkes = SpatialTemporalHawkes(
T=[0., 10.], S=[[-1., 1.], [-1., 1.]],
layers=[5], n_comp=3, C=1., maximum=1e+3, verbose=True)
points = tf.constant([
[ 1.16898147e-02, 1.45831794e-01, -3.05314839e-01],
[ 4.81481478e-02, -1.25229925e-01, 8.72766301e-02],
[ 1.13194443e-01, -3.87020826e-01, 2.80696362e-01],
[ 1.60300925e-01, -2.42807735e-02, -5.64230382e-01],
[ 1.64004624e-01, 7.10764453e-02, -1.77927762e-01],
[ 1.64236113e-01, 6.51166216e-02, -6.82414293e-01],
[ 2.05671296e-01, -4.48017061e-01, 5.36620915e-01],
[ 2.12152779e-01, -3.20064761e-02, -2.08911732e-01]], dtype=tf.float32)
init_op = tf.global_variables_initializer()
sess.run(init_op)
# t = points[-1, 0]
# s = points[-1, 1:]
# his_t = points[:-1, 0]
# his_s = points[:-1, 1:]
# res = sess.run(hawkes.log_conditional_pdf(points))
# res = sess.run(hawkes._lambda(t, s, his_t, his_s))
# res = sess.run(hawkes._softmax(his_s, 0))
# res = sess.run(hawkes._gaussian_kernel(0, t, s, his_t, his_s))
# seq_len = tf.shape(points)[0]
# r = tf.scan(
# lambda a, i: hawkes._lambda(points[i, 0], points[i, 1:], points[:i, 0], points[:i, 1:]),
# tf.range(seq_len), # from the first point to the last point
# initializer=np.array(0., dtype=np.float32))
r = hawkes.log_likelihood(points)
print(sess.run(r))
# # test sampling
# seqs = hawkes.sampling(sess, batch_size=10)
# print(seqs) | [
"woodielove@163.com"
] | woodielove@163.com |
a2e37e0d4b119607f5714d60955c059bfeb459ae | 96c1f13473cf224113185902edd4c9c01091e106 | /theseus/optimizer/nonlinear/dcem.py | e8187726c0cc032faab0f714e60c34a80840bb1b | [
"MIT"
] | permissive | facebookresearch/theseus | f1e488eb5a25f5ba74a6995911bee958b5da4cf3 | 240e1206329d42fedd40399684d6e17e455c6645 | refs/heads/main | 2023-08-11T07:33:12.328520 | 2023-08-02T12:58:01 | 2023-08-02T12:58:01 | 429,570,359 | 1,410 | 105 | MIT | 2023-08-01T14:30:01 | 2021-11-18T20:28:27 | Python | UTF-8 | Python | false | false | 8,421 | py | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Dict, List, Optional, Union
import numpy as np
import torch
from torch.distributions import Normal
from theseus.core.objective import Objective
from theseus.optimizer import OptimizerInfo
from theseus.optimizer.variable_ordering import VariableOrdering
from theseus.third_party.lml import LML
from .nonlinear_optimizer import (
BackwardMode,
EndIterCallbackType,
NonlinearOptimizer,
NonlinearOptimizerInfo,
NonlinearOptimizerStatus,
)
class DCEM(NonlinearOptimizer):
"""
DCEM optimizer for nonlinear optimization using sampling based techniques.
The optimizer can be really sensitive to hypermeter tuning. Here are few tuning
hints:
1. If have to lower the max_iterations, then increase the n_sample.
2. The higher the n_sample, the slowly with variance of samples will decrease.
3. The higher the n_sample, more the chances of optimum being in the elite set.
4. The higher the n_elite, the slower is convergence, but more accurate it might
be, but would need more iterations. n_elite= 5 is good enough for most cases.
"""
def __init__(
self,
objective: Objective,
vectorize: bool = False,
max_iterations: int = 50,
n_sample: int = 100,
n_elite: int = 5,
temp: float = 1.0,
init_sigma: Union[float, torch.Tensor] = 1.0,
lb: float = None,
ub: float = None,
lml_verbose: bool = False,
lml_eps: float = 1e-3,
normalize: bool = True,
abs_err_tolerance: float = 1e-6,
rel_err_tolerance: float = 1e-4,
**kwargs,
) -> None:
super().__init__(
objective,
vectorize=vectorize,
abs_err_tolerance=abs_err_tolerance,
rel_err_tolerance=rel_err_tolerance,
max_iterations=max_iterations,
**kwargs,
)
self.objective = objective
self.ordering = VariableOrdering(objective)
self.n_samples = n_sample
self.n_elite = n_elite
self.lb = lb
self.ub = ub
self.temp = temp
self.normalize = normalize
self._tot_dof = sum([x.dof() for x in self.ordering])
self.lml_eps = lml_eps
self.lml_verbose = lml_verbose
self.init_sigma = init_sigma
def _mu_vec_to_dict(self, mu: torch.Tensor) -> Dict[str, torch.Tensor]:
idx = 0
mu_dic = {}
for var in self.ordering:
mu_dic[var.name] = mu[:, slice(idx, idx + var.dof())]
idx += var.dof()
return mu_dic
def reset_sigma(self, init_sigma: Union[float, torch.Tensor]) -> None:
self.sigma = (
torch.ones(
(self.objective.batch_size, self._tot_dof), device=self.objective.device
)
* init_sigma
)
def _CEM_step(self):
"""
Performs one iteration of CEM.
Updates the self.sigma and return the new mu.
"""
device = self.objective.device
n_batch = self.ordering[0].shape[0]
mu = torch.cat([var.tensor for var in self.ordering], dim=-1)
X = Normal(mu, self.sigma).rsample((self.n_samples,))
X_samples: List[Dict[str, torch.Tensor]] = []
for sample in X:
X_samples.append(self._mu_vec_to_dict(sample))
fX = torch.stack(
[self.objective.error_metric(X_samples[i]) for i in range(self.n_samples)],
dim=1,
)
assert fX.shape == (n_batch, self.n_samples)
if self.temp is not None and self.temp < np.infty:
if self.normalize:
fX_mu = fX.mean(dim=1).unsqueeze(1)
fX_sigma = fX.std(dim=1).unsqueeze(1)
_fX = (fX - fX_mu) / (fX_sigma + 1e-6)
else:
_fX = fX
if self.n_elite == 1:
# indexes = LML(N=n_elite, verbose=lml_verbose, eps=lml_eps)(-_fX*temp)
indexes = torch.softmax(-_fX * self.temp, dim=1)
else:
indexes = LML(
N=self.n_elite, verbose=self.lml_verbose, eps=self.lml_eps
)(-_fX * self.temp)
indexes = indexes.unsqueeze(2)
eps = 0
else:
indexes_vals = fX.argsort(dim=1)[:, : self.n_elite]
# Scatter 1.0 to the indexes using indexes_vals
indexes = torch.zeros(n_batch, self.n_samples, device=device).scatter_(
1, indexes_vals, 1.0
)
indexes = indexes.unsqueeze(2)
eps = 1e-10
# indexes.shape should be (n_batch, n_sample, 1)
X = X.transpose(0, 1)
assert indexes.shape[:2] == X.shape[:2]
X_I = indexes * X
mu = torch.sum(X_I, dim=1) / self.n_elite
self.sigma = (
(indexes * (X - mu.unsqueeze(1)) ** 2).sum(dim=1) / self.n_elite
).sqrt() + eps # adding eps to avoid sigma=0, which is happening when temp=None
assert self.sigma.shape == (n_batch, self._tot_dof)
return self._mu_vec_to_dict(mu)
def _optimize_loop(
self,
num_iter: int,
info: NonlinearOptimizerInfo,
verbose: bool,
end_iter_callback: Optional[EndIterCallbackType] = None,
**kwargs,
) -> int:
converged_indices = torch.zeros_like(info.last_err).bool()
iters_done = 0
for it_ in range(num_iter):
iters_done += 1
try:
mu = self._CEM_step()
except RuntimeError as error:
raise RuntimeError(f"There is an error in update {error}.")
self.objective.update(mu)
# check for convergence
with torch.no_grad():
err = self.objective.error_metric()
self._update_info(info, it_, err, converged_indices)
if verbose:
print(
f"Nonlinear optimizer. Iteration: {it_+1}. "
f"Error: {err.mean().item()} "
)
converged_indices = self._check_convergence(err, info.last_err)
info.status[
np.array(converged_indices.cpu().numpy())
] = NonlinearOptimizerStatus.CONVERGED
if converged_indices.all():
break # nothing else will happen at this point
info.last_err = err
if end_iter_callback is not None:
end_iter_callback(self, info, mu, it_)
info.status[
info.status == NonlinearOptimizerStatus.START
] = NonlinearOptimizerStatus.MAX_ITERATIONS
return iters_done
def _optimize_impl(
self,
track_best_solution: bool = False,
track_err_history: bool = False,
track_state_history: bool = False,
verbose: bool = False,
backward_mode: Union[str, BackwardMode] = BackwardMode.UNROLL,
end_iter_callback: Optional[EndIterCallbackType] = None,
**kwargs,
) -> OptimizerInfo:
backward_mode = BackwardMode.resolve(backward_mode)
init_sigma = kwargs.get("init_sigma", self.init_sigma)
self.reset_sigma(init_sigma)
with torch.no_grad():
info = self._init_info(
track_best_solution, track_err_history, track_state_history
)
if verbose:
print(
f"DCEM optimizer. Iteration: 0. "
f"Error: {info.last_err.mean().item()}"
)
if backward_mode in [BackwardMode.UNROLL, BackwardMode.DLM]:
self._optimize_loop(
num_iter=self.params.max_iterations,
info=info,
verbose=verbose,
end_iter_callback=end_iter_callback,
**kwargs,
)
# If didn't coverge, remove misleading converged_iter value
info.converged_iter[
info.status == NonlinearOptimizerStatus.MAX_ITERATIONS
] = -1
return info
else:
raise NotImplementedError(
"DCEM currently only supports 'unroll' backward mode."
)
| [
"noreply@github.com"
] | facebookresearch.noreply@github.com |
55d63f63586b52b2902dd7b6d821282be331846b | 82f296a1531ffef601d86a36794dbcb0289995e5 | /django_paystack/django_paystack/settings.py | 878fbde0a787c1a41deccaa399087cce01a4582e | [
"MIT"
] | permissive | 204070/django-paystack | c9d3b67cbd28789d30ed84c3118e413dbab9f7a0 | 164163b70a07f38c50c26ba46b47a105322b357b | refs/heads/master | 2021-04-18T18:53:28.502251 | 2018-03-23T13:46:45 | 2018-03-23T13:46:45 | 126,270,266 | 1 | 0 | MIT | 2018-03-22T02:48:19 | 2018-03-22T02:48:19 | null | UTF-8 | Python | false | false | 3,393 | py | """
Django settings for django_paystack project.
Generated by 'django-admin startproject' using Django 1.11.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '4=!bvm+%#ae^nr0u&wp(gd^aatu&-%vx_thcz0#w)8q(9mfca5'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'paystack',
'django_paystack',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'django_paystack.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'django_paystack.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
PAYSTACK_PUBLIC_KEY = os.getenv('PAYSTACK_PUBLIC_KEY', '')
PAYSTACK_SECRET_KEY = os.getenv('PAYSTACK_SECRET_KEY', '')
# PAYSTACK_FAILED_URL = "/google/api"
# PAYSTACK_SUCCESS_URL = "/google/api"
| [
"noreply@github.com"
] | 204070.noreply@github.com |
5d4a4e15c598cf1d72b46ffeaf5cc44a01bc8e22 | e6ba92a23cbf92d8727e8a7a057e664d4873db40 | /extdirect/django/crud.py | a7d4d110fc08998339a19d7f2fbc65961cea46bd | [
"BSD-3-Clause"
] | permissive | bitkeeper/extdirect.cherrypy | 0e4cd4c9b5abf538bad407fded5cf265b020392f | e725ad8fe7b8f3fc144d0e78778a054fc6b293c6 | refs/heads/master | 2020-04-01T21:47:26.146205 | 2010-09-22T14:06:41 | 2010-09-22T14:06:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,413 | py | from extdirect.django.store import ExtDirectStore
from django.db import transaction
from django.core.serializers import serialize
from django.utils.encoding import force_unicode
from django.views.generic.create_update import get_model_and_form_class
def format_form_errors(errors):
"""
Convert the ErrorDict and ErrorList Django objects
to dict and list standard python objects.
Otherwise we can't serialize them to JSON.
"""
err = {}
for k, v in errors.items():
err[k] = [force_unicode(e) for e in v]
return err
class BaseExtDirectCRUD(object):
"""
Base class for CRUD actions.
Implements all the methods that you may want to
re-implement in your own class.
"""
model = None
form = None
#Defaults
actions = ('create', 'read', 'update', 'destroy', 'load')
isForm = False
parse_fk_fields = True
show_form_validation = False
metadata = True
#Messages
create_success_msg = "Records created"
create_failure_msg = "There was an error while trying to save some of the records"
update_success_msg = "Records updated"
update_failure_msg = "There was an error while trying to save some of the records"
destroy_success_msg = "Objects deleted"
#Seems that Ext.form.action.DirectLoad always look for this metadata.
#If you find a way to change that on the client-side, please let me know.
direct_load_metadata = {'root': 'data', 'total' : 'total', 'success': 'success'}
def __init__(self):
#same as Django generic views
self.model, self.form = get_model_and_form_class(self.model, self.form)
self.store = self.direct_store()
def register_actions(self, provider, action, login_required, permission):
#Register the CRUD actions. You may want to re-implement these methods
#in your class definition in order to change the defaults registrations.
if 'create' in self.actions:
self.reg_create(provider, action, login_required, permission)
if 'read' in self.actions:
self.reg_read(provider, action, login_required, permission)
if 'load' in self.actions:
self.reg_load(provider, action, login_required, permission)
if 'update' in self.actions:
self.reg_update(provider, action, login_required, permission)
if 'destroy' in self.actions:
self.reg_destroy(provider, action, login_required, permission)
def reg_create(self, provider, action, login_required, permission):
provider.register(self.create, action, 'create', 1, self.isForm, login_required, permission)
def reg_read(self, provider, action, login_required, permission):
provider.register(self.read, action, 'read', 1, False, login_required, permission)
def reg_load(self, provider, action, login_required, permission):
provider.register(self.load, action, 'load', 1, False, login_required, permission)
def reg_update(self, provider, action, login_required, permission):
provider.register(self.update, action, 'update', 1, self.isForm, login_required, permission)
def reg_destroy(self, provider, action, login_required, permission):
provider.register(self.destroy, action, 'destroy', 1, False, login_required, permission)
def direct_store(self):
return ExtDirectStore(self.model, metadata=self.metadata)
def query(self, **kw):
#It must return `None` or a valid Django Queryset
return None
#All the "extract_(action)_data" will depend on how you registered each method.
def extract_create_data(self, request, sid):
#It must return a dict object or a list of dicts with the values ready
#to create the new instance or instances.
if self.isForm:
return dict(request.extdirect_post_data.items())
else:
return request.extdirect_post_data[0][self.store.root]
def extract_read_data(self, request):
#It must return a dict object ready to be passed
#to the query method of ExtDirectStore class.
return request.extdirect_post_data[0]
def extract_load_data(self, request):
#It must return a dict object ready to be passed
#to the query method of ExtDirectStore class.
return request.extdirect_post_data[0]
def extract_update_data(self, request, sid):
#It must return a dict object or a list of dicts with the values ready
#to update the instance or instances.
if self.isForm:
return dict(request.extdirect_post_data.items())
else:
return request.extdirect_post_data[0][self.store.root]
def extract_destroy_data(self, request):
#It must return the id or list of id's to be deleted.
return request.extdirect_post_data[0][self.store.root]
def _single_create(self, request, data):
#id='ext-record-#'
data.pop("id", "")
c = None
if self.parse_fk_fields:
data = self._fk_fields_parser(data)
form = self.form(data, request.FILES)
if form.is_valid():
c = form.save()
self.post_single_create(request, c)
return c.id, ""
else:
return 0, form.errors
def _single_update(self, request, data):
id = data.pop("id")
obj = self.model.objects.get(pk=id)
if self.parse_fk_fields:
data = self._fk_fields_parser(data)
form = self.form(data, request.FILES, instance=obj)
if form.is_valid():
obj = form.save()
self.post_single_update(request, obj)
return obj.id, ""
else:
return 0, form.errors
# Process of data in order to fix the foreign keys according to how
# the `extdirect` serializer handles them.
# {'fk_model': 'FKModel','fk_model_id':1} --> {'fk_model':1, 'fk_model_id': 1}
def _fk_fields_parser(self, data):
for field in data.keys():
if field[-3:] == '_id': #and isinstance(data[field], int) and not isinstance(data[field[:-3]], int):
data[field[:-3]] = data[field]
data.pop(field)
return data
#Very simple hooks that you may want to use
#to do something.
def pre_create(self, data):
return True, ""
def post_create(self, ids):
pass
def post_single_create(self, request, obj):
pass
def pre_read(self, data):
return True, ""
def pre_load(self, data):
return True, ""
def pre_update(self, request, data):
return True, ""
def post_update(self, ids):
pass
def post_single_update(self, request, obj):
pass
def pre_destroy(self, data):
return True, ""
def post_destroy(self, id):
pass
def failure(self, msg):
return {self.store.success: False, self.store.root: [], self.store.total: 0, self.store.message: msg}
class ExtDirectCRUD(BaseExtDirectCRUD):
"""
ExtDirectCRUD main class.
Implements the main CRUD actions.
You shouldn't re-implement these methods, see
BaseExtDirectCRUD if you need custom behavior.
"""
#CREATE
@transaction.commit_manually
def create(self, request):
sid = transaction.savepoint()
extdirect_data = self.extract_create_data(request, sid)
ok, msg = self.pre_create(extdirect_data)
if not ok:
return self.failure(msg)
ids = []
success = True
errors = {}
if isinstance(extdirect_data, list):
for data in extdirect_data:
id, errors = self._single_create(request, data)
if id:
ids.append(id)
else:
success = False
break
else:
id, errors = self._single_create(request, extdirect_data)
if id:
ids.append(id)
else:
success = False
if success:
transaction.commit()
self.post_create(ids)
res = self.store.query(self.model.objects.filter(pk__in=ids), metadata=False)
res[self.store.message] = self.create_success_msg
return res
else:
transaction.savepoint_rollback(sid)
if self.show_form_validation:
err = format_form_errors(errors)
else:
err = self.create_failure_msg
return self.failure(err)
#READ
def read(self, request):
extdirect_data = self.extract_read_data(request)
ok, msg = self.pre_read(extdirect_data)
if ok:
return self.store.query(qs=self.query(**extdirect_data), **extdirect_data)
else:
return self.failure(msg)
#LOAD
def load(self, request):
#Almost the same as 'read' action but here we call
#the serializer directly with a fixed metadata (different
#from the self.store). Besides, we assume that the load
#action should return a single record, so all the query
#options are not needed.
meta = self.direct_load_metadata
extdirect_data = self.extract_load_data(request)
ok, msg = self.pre_load(extdirect_data)
if ok:
queryset = self.model.objects.filter(**extdirect_data)
res = serialize('extdirect', queryset, meta=meta, single_cast=True)
return res
else:
return self.failure(msg)
#UPDATE
@transaction.commit_manually
def update(self, request):
sid = transaction.savepoint()
extdirect_data = self.extract_update_data(request, sid)
ok, msg = self.pre_update(request, extdirect_data)
if not ok:
return self.failure(msg)
ids = []
success = True
records = extdirect_data
errors = {}
if isinstance(records, list):
#batch update
for data in records:
id, errors = self._single_update(request, data)
if id:
ids.append(id)
else:
success = False
break
else:
#single update
id, errors = self._single_update(request, records)
if id:
ids.append(id)
else:
success = False
if success:
transaction.commit()
self.post_update(ids)
res = self.store.query(self.model.objects.filter(pk__in=ids), metadata=False)
res[self.store.message] = self.update_success_msg
return res
else:
transaction.savepoint_rollback(sid)
if self.show_form_validation:
err = format_form_errors(errors)
else:
err = self.update_failure_msg
return self.failure(err)
#DESTROY
def destroy(self, request):
ids = self.extract_destroy_data(request)
ok, msg = self.pre_destroy(ids)
if not ok:
return self.failure(msg)
if isinstance(ids, list):
cs = self.model.objects.filter(pk__in=ids)
else:
cs = [self.model.objects.get(pk=ids)]
for c in cs:
i = c.id
c.delete()
self.post_destroy(i)
return {self.store.success: True,
self.store.message: self.destroy_success_msg,
self.store.root: []}
| [
"santiago.videla@gmail.com"
] | santiago.videla@gmail.com |
2b5b65b94f2d7333456398ae7ef653016aedb184 | 26eb5f897974f2f4b7af5c6931b4974257e98917 | /model.py | 79cc5ec6e21ef4a4db28f95c3d12ebb149f67ff6 | [] | no_license | gaurav-singh1998/Cycle_GAN | 340168e5507fd9f415afaa79a068f3b8fd99383d | be373f229062faeb56ce840513ef991830093066 | refs/heads/master | 2022-06-14T22:06:50.806107 | 2020-05-07T07:52:05 | 2020-05-07T07:52:05 | 261,984,946 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,585 | py | from __future__ import print_function, unicode_literals, absolute_import, division
import tensorflow as tf
import tensorflow_addons as tfa
import tensorflow.keras as keras
def _get_norm_layer(norm):
if norm == 'none':
return lambda: lambda x: x
elif norm == 'batch_norm':
return keras.layers.BatchNormalization
elif norm == 'instance_norm':
return tfa.layers.InstanceNormalization
elif norm == 'layer_norm':
return tfa.layers.LayerNormalization
class Pad(keras.layers.Layer):
def __init__(self, paddings, mode='CONSTANT', constant_values=0, **kwargs):
super(Pad, self).__init__(**kwargs)
self.paddings = paddings
self.mode = mode
self.constant_values = constant_values
def call(self, inputs):
return tf.pad(inputs, self.paddings, mode=self.mode, constant_values=self.constant_values)
def ResnetGenerator(input_shape=(256, 256, 3),
output_channels=3,
dim=64,
n_downsamplings=2,
n_blocks=9,
norm='instance_norm'):
Norm = _get_norm_layer(norm)
def _residual_block(x):
dim = x.shape[-1]
h = x
h = Pad([[0, 0], [1, 1], [1, 1], [0, 0]], mode='REFLECT')(h)
h = keras.layers.Conv2D(dim, 3, padding='valid', use_bias=False)(h)
h = Norm()(h)
h = keras.layers.ReLU()(h)
h = Pad([[0, 0], [1, 1], [1, 1], [0, 0]], mode='REFLECT')(h)
h = keras.layers.Conv2D(dim, 3, padding='valid', use_bias=False)(h)
h = Norm()(h)
return keras.layers.add([x, h])
h = inputs = keras.Input(shape=input_shape)
h = Pad([[0, 0], [3, 3], [3, 3], [0, 0]], mode='REFLECT')(h)
h = keras.layers.Conv2D(dim, 7, padding='valid', use_bias=False)(h)
h = Norm()(h)
h = keras.layers.ReLU()(h)
for _ in range(n_downsamplings):
dim *= 2
h = keras.layers.Conv2D(dim, 3, strides=2, padding='same', use_bias=False)(h)
h = Norm()(h)
h = keras.layers.ReLU()(h)
for _ in range(n_blocks):
h = _residual_block(h)
for _ in range(n_downsamplings):
dim //= 2
h = keras.layers.Conv2DTranspose(dim, 3, strides=2, padding='same', use_bias=False)(h)
h = Norm()(h)
h = keras.layers.ReLU()(h)
h = Pad([[0, 0], [3, 3], [3, 3], [0, 0]], mode='REFLECT')(h)
h = keras.layers.Conv2D(output_channels, 7, padding='valid')(h)
h = keras.layers.Activation('tanh')(h)
return keras.Model(inputs=inputs, outputs=h)
def ConvDiscriminator(input_shape=(256, 256, 3),
dim=64,
n_downsamplings=3,
norm='instance_norm'):
dim_ = dim
Norm = _get_norm_layer(norm)
h = inputs = keras.Input(shape=input_shape)
h = keras.layers.Conv2D(dim, 4, strides=2, padding='same')(h)
h = keras.layers.LeakyReLU(alpha=0.2)(h)
for _ in range(n_downsamplings - 1):
dim = min(dim * 2, dim_ * 8)
h = keras.layers.Conv2D(dim, 4, strides=2, padding='same', use_bias=False)(h)
h = Norm()(h)
h = keras.layers.LeakyReLU(alpha=0.2)(h)
dim = min(dim * 2, dim_ * 8)
h = keras.layers.Conv2D(dim, 4, strides=1, padding='same', use_bias=False)(h)
h = Norm()(h)
h = keras.layers.LeakyReLU(alpha=0.2)(h)
h = keras.layers.Conv2D(1, 4, strides=1, padding='same')(h)
return keras.Model(inputs=inputs, outputs=h)
class LinearDecay(keras.optimizers.schedules.LearningRateSchedule):
# if `step` < `step_decay`: use fixed learning rate
# else: linearly decay the learning rate to zero
def __init__(self, initial_learning_rate, total_steps, step_decay):
super(LinearDecay, self).__init__()
self._initial_learning_rate = initial_learning_rate
self._steps = total_steps
self._step_decay = step_decay
self.current_learning_rate = tf.Variable(initial_value=initial_learning_rate, trainable=False, dtype=tf.float32)
def __call__(self, step):
self.current_learning_rate.assign(tf.cond(
step >= self._step_decay,
true_fn=lambda: self._initial_learning_rate * (1 - 1 / (self._steps - self._step_decay) * (step - self._step_decay)),
false_fn=lambda: self._initial_learning_rate
))
return self.current_learning_rate
if __name__ == '__main__':
generator=ResnetGenerator()
discriminator=ConvDiscriminator()
print(generator.summary())
print(discriminator.summary())
| [
"noreply@github.com"
] | gaurav-singh1998.noreply@github.com |
cce4258214c9c76a0aa0de00685e225913846b9b | a7dc8f76293a2c60478c95c4720cf39b8556c9e8 | /tests/test_classify.py | 3dc694dcb8ce4841090ee4d127deb0f3d62de74f | [
"MIT"
] | permissive | FarDON/cherry | 8b67f6587a5c13603dfe5047edece218a382e904 | 28da9a05a0bf09f209829e81b8642e3fd76034e8 | refs/heads/master | 2022-11-02T13:13:12.366289 | 2020-06-22T13:56:45 | 2020-06-22T13:56:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,587 | py | import os
import unittest
import cherry
from unittest import mock
from cherry import classify
from sklearn.exceptions import NotFittedError
class ClassifyTest(unittest.TestCase):
def setUp(self):
pass
# __init__()
@mock.patch('cherry.classifyer.Classify._classify')
@mock.patch('cherry.classifyer.Classify._load_cache')
def test_init(self, mock_load, mock_classify):
mock_load.return_value = ('foo', 'bar')
cherry.classifyer.Classify(model='random', text=['random text'])
mock_load.assert_called_once_with('random')
mock_classify.assert_called_once_with(['random text'])
# _load_cache()
@mock.patch('cherry.classifyer.Classify._classify')
@mock.patch('cherry.classifyer.load_cache')
def test_load_cache(self, mock_load, mock_classify):
res = cherry.classifyer.Classify(model='foo', text=['random text'])
mock_load.assert_not_called()
@mock.patch('sklearn.feature_extraction.text.CountVectorizer.transform')
@mock.patch('cherry.classifyer.load_cache')
def test_classify_with_missing_token(self, mock_load, mock_trans):
mock_object = mock.Mock()
mock_object.transform.side_effect = NotFittedError()
mock_load.return_value = mock_object
# with self.assertRaises(cherry.exceptions.TokenNotFoundError) as token_error:
# res = cherry.classifyer.Classify(model='harmful', text=['random text'])
# self.assertEqual(
# str(token_error.exception),
# 'Some of the tokens in text never appear in training data')
| [
"wiwindson@outlook.com"
] | wiwindson@outlook.com |
526b96f125d5bacda89a5d3e7492eb6a3bbbd07f | 5f106fc7ca105f6494ccec3f2da4570979c4f4cd | /Algorithms/Implementation/ACMICPCTeam.py | e7d30159cbe3164e5a0089d7c1e0b7a8b3350d36 | [] | no_license | reichunter1656/HackerRank | 1251017ce894a67f2920f1cfdfc35ca341d66d34 | ca1232645f410c41d68352b0b18d8948e729064f | refs/heads/master | 2023-01-07T10:13:12.314419 | 2017-08-08T05:37:54 | 2017-08-08T05:37:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 708 | py | #!/bin/python3
import sys
def findones(maxi):
count = 0
maxs = str(bin(maxi))
for s in maxs:
if s == '1':
count = count + 1
return count
n,m = input().strip().split(' ')
n,m = [int(n),int(m)]
topics_perperson = []
for person in range(n):
topics_known = str(input())
topics_perperson.append(topics_known)
count = 0
maxi = 0
count = [0 for i in range(0, m + 1)]
i = 0
maxi2 = 0
for person in range(n):
for person2 in range(person + 1, n):
bitor = int(topics_perperson[person], 2) | int(topics_perperson[person2], 2)
x = findones(bitor)
if x > maxi:
maxi = x
count[x] = count[x] + 1
print (maxi)
print (count[maxi])
| [
"jagdeeshxlnc@gmail.com"
] | jagdeeshxlnc@gmail.com |
0a27993a6e8351ecb41b9f6181bea19c78bf6000 | 53784d3746eccb6d8fca540be9087a12f3713d1c | /res/packages/scripts/scripts/client/gui/Scaleform/daapi/view/lobby/store/StoreTableDataProvider.py | 047518eda41afe48b100901c3b0b9c35381c591b | [] | no_license | webiumsk/WOT-0.9.17.1-CT | 736666d53cbd0da6745b970e90a8bac6ea80813d | d7c3cf340ae40318933e7205bf9a17c7e53bac52 | refs/heads/master | 2021-01-09T06:00:33.898009 | 2017-02-03T21:40:17 | 2017-02-03T21:40:17 | 80,870,824 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 969 | py | # 2017.02.03 21:50:30 Střední Evropa (běžný čas)
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/lobby/store/StoreTableDataProvider.py
from gui.Scaleform.framework.entities.DAAPIDataProvider import DAAPIDataProvider
class StoreTableDataProvider(DAAPIDataProvider):
def __init__(self):
super(StoreTableDataProvider, self).__init__()
self.__list = []
@property
def collection(self):
return self.__list
def buildList(self, dpList):
self.__list = dpList
def emptyItem(self):
return None
def clearList(self):
while len(self.__list):
self.__list.pop()
self.__list = None
return
# okay decompyling c:\Users\PC\wotsources\files\originals\res\packages\scripts\scripts\client\gui\Scaleform\daapi\view\lobby\store\StoreTableDataProvider.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.02.03 21:50:30 Střední Evropa (běžný čas)
| [
"info@webium.sk"
] | info@webium.sk |
52bf11f8be269922508207b1a1e3c7cdd7224b51 | ab6cfc2aedad3de7a04efae4a6105dc893958b9e | /hivwholeseq/patients/get_allele_cocounts.py | 733f7a025ee4e21175e08d194a24584b733f1f04 | [
"MIT"
] | permissive | neherlab/hivwholeseq | 158c0ce590bc67d1d36042c71b8b0afa3e8d8abf | 978ce4060362e4973f92b122ed5340a5314d7844 | refs/heads/master | 2021-01-15T16:48:15.769316 | 2015-09-04T08:33:52 | 2015-09-04T08:33:52 | 49,801,765 | 4 | 3 | null | 2016-01-17T03:43:46 | 2016-01-17T03:43:44 | null | UTF-8 | Python | false | false | 1,994 | py | #!/usr/bin/env python
# vim: fdm=marker
'''
author: Fabio Zanini
date: 20/03/14
content: Get the joint counts at two sites for patient samples, after mapping.
'''
# Modules
import argparse
import numpy as np
import matplotlib.pyplot as plt
from hivwholeseq.patients.samples import load_samples_sequenced as lssp
from hivwholeseq.patients.samples import SamplePat
# Script
if __name__ == '__main__':
# Parse input args
parser = argparse.ArgumentParser(description='Get allele cocounts',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
pats_or_samples = parser.add_mutually_exclusive_group(required=True)
pats_or_samples.add_argument('--patients', nargs='+',
help='Patient to analyze')
pats_or_samples.add_argument('--samples', nargs='+',
help='Samples to map')
parser.add_argument('--regions', nargs='+', required=True,
help='Fragments to analyze (e.g. F1 F6)')
parser.add_argument('--verbose', type=int, default=0,
help='Verbosity level [0-3]')
parser.add_argument('--qualmin', type=int, default=30,
help='Minimal quality of base to call')
args = parser.parse_args()
pnames = args.patients
samplenames = args.samples
regions = args.regions
VERBOSE = args.verbose
qual_min = args.qualmin
use_plot = args.plot
samples = lssp()
if pnames is not None:
samples = samples.loc[samples.patient.isin(pnames)]
elif samplenames is not None:
samples = samples.loc[samples.index.isin(samplenames)]
if VERBOSE >= 2:
print 'samples', samples.index.tolist()
for region in regions:
for samplename, sample in samples.iterrows():
sample = SamplePat(sample)
if VERBOSE >= 1:
print region, samplename
cocount = np.load(fn_out)['cocounts']
| [
"fabio.zanini@tuebingen.mpg.de"
] | fabio.zanini@tuebingen.mpg.de |
4a88e8e64c8ec9ae2261ff38f90741b89754d523 | 4352fc601b01e6fc30bee3d227778ce53b51cd59 | /models/BHT_ARIMA/util/all_cfg.py | 90415c4b3fb12f544e38bb9f64a55bb36889d581 | [] | no_license | Master-PLC/Few-Shot-Time-Series-Prediction | 9c1f6c812e7bff7ca08818cb4d9339415f156dd9 | c8682d75e2898172110ce48fd787c61149661519 | refs/heads/master | 2023-08-27T13:32:32.108613 | 2021-11-11T02:17:20 | 2021-11-11T02:17:20 | 425,485,973 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,211 | py | # -*- coding: utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
csv = 'perf_Us_3'
cfg_dict = {
'aux_no_0':
{
'dataset': 'aux_no_0',
'p': 3,
'd': 1,
'q': 1,
'taus': [1533, 8],
'Rs': [5, 5],
'k': 15,
'tol': 0.001,
'testsize': 0.1,
'loop_time': 5,
'info': 'v2',
'Us_mode': 3,
'filename': csv
},
'aux_smooth':
{
'dataset': 'aux_smooth',
'p': 3,
'd': 1,
'q': 1,
'taus': [319, 8],
'Rs': [5, 5],
'k': 15,
'tol': 0.001,
'testsize': 0.1,
'loop_time': 5,
'info': 'v2',
'Us_mode': 3,
'filename': csv
},
'aux_raw':
{
'dataset': 'aux_raw',
'p': 3,
'd': 1,
'q': 1,
'taus': [2246, 8],
'Rs': [5, 8],
'k': 15,
'tol': 0.001,
'testsize': 0.1,
'loop_time': 5,
'info': 'v2',
'Us_mode': 3,
'filename': csv
},
'D1':
{
'dataset': 'D1_qty',
'p': 3,
'd': 1,
'q': 1,
'taus': [607, 10],
'Rs': [20, 5],
'k': 15,
'tol': 0.001,
'testsize': 0.1,
'loop_time': 5,
'info': 'v2',
'Us_mode': 3,
'filename': csv
},
'PC_W':
{
'dataset': 'PC_W',
'p': 3,
'd': 1,
'q': 1,
'taus': [9, 10],
'Rs': [5, 5],
'k': 15,
'tol': 0.001,
'testsize': 0.1,
'loop_time': 5,
'info': 'v2',
'Us_mode': 3,
'filename': csv
},
'PC_M':
{
'dataset': 'PC_M',
'p': 3,
'd': 1,
'q': 1,
'taus': [9, 10],
'Rs': [5, 5],
'k': 15,
'tol': 0.001,
'testsize': 0.1,
'loop_time': 5,
'info': 'v2',
'Us_mode': 3,
'filename': csv
},
'ele40':
{
'dataset': 'ele40',
'p': 3,
'd': 2,
'q': 1,
'taus': [321, 20],
'Rs': [5, 5],
'k': 15,
'tol': 0.001,
'testsize': 0.1,
'loop_time': 5,
'info': 'v2',
'Us_mode': 3,
'filename': csv
},
'ele200':
{
'dataset': 'ele_small',
'p': 3,
'd': 2,
'q': 1,
'taus': [321, 20],
'Rs': [5, 5],
'k': 15,
'tol': 0.001,
'testsize': 0.1,
'loop_time': 5,
'info': 'v2',
'Us_mode': 3,
'filename': csv
},
'ele_big':
{
'dataset': 'ele_big',
'p': 3,
'd': 2,
'q': 1,
'taus': [321, 20],
'Rs': [5, 5],
'k': 15,
'tol': 0.001,
'testsize': 0.1,
'loop_time': 5,
'info': 'v2',
'Us_mode': 1,
'filename': csv
},
'traffic_40':
{
'dataset': 'traffic_40',
'p': 3,
'd': 2,
'q': 1,
'taus': [228, 5],
'Rs': [20, 5],
'k': 15,
'tol': 0.001,
'testsize': 0.1,
'loop_time': 5,
'info': 'v2',
'Us_mode': 3,
'filename': csv
},
'traffic_80':
{
'dataset': 'traffic_small',
'p': 3,
'd': 2,
'q': 1,
'taus': [228, 5],
'Rs': [20, 5],
'k': 15,
'tol': 0.001,
'testsize': 0.1,
'loop_time': 5,
'info': 'v2',
'Us_mode': 3,
'filename': csv
},
'traffic_big':
{
'dataset': 'traffic_big',
'p': 3,
'd': 2,
'q': 1,
'taus': [862, 10],
'Rs': [20, 5],
'k': 15,
'tol': 0.001,
'testsize': 0.1,
'loop_time': 5,
'info': 'v2',
'Us_mode': 1,
'filename': csv
}
}
| [
"18358716036@163.com"
] | 18358716036@163.com |
ec461e4efcf3da5428bd688d03a049eaf481b553 | 60b8c5e048be54f49c28b2c224e86cf4d4629164 | /gluon/setup.py | ec8a8656318e076b7715cb3373652d0ac7778656 | [
"MIT"
] | permissive | kcieslik/imgclsmob | b333d2b0f8a04d15cc1c0b0d38845d1d2923ae26 | d15bc7d4ebc50a31b4ad01cb3ad0e73b8cddbc9a | refs/heads/master | 2020-06-13T06:21:01.744329 | 2019-06-28T16:05:11 | 2019-06-28T16:05:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,515 | py | from setuptools import setup, find_packages
from os import path
from io import open
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='gluoncv2',
version='0.0.47',
description='Image classification and segmentation models for Gluon',
license='MIT',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/osmr/imgclsmob',
author='Oleg Sémery',
author_email='osemery@gmail.com',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Scientific/Engineering :: Image Recognition',
],
keywords='machine-learning deep-learning neuralnetwork image-classification mxnet gluon imagenet cifar svhn vgg '
'resnet pyramidnet diracnet densenet condensenet wrn drn dpn darknet fishnet espnetv2 xdensnet squeezenet '
'squeezenext shufflenet menet mobilenet igcv3 mnasnet darts xception inception polynet nasnet pnasnet ror '
'proxylessnas dianet efficientnet image-segmentation voc ade20k cityscapes coco pspnet deeplabv3 fcn',
packages=find_packages(exclude=['others', '*.others', 'others.*', '*.others.*']),
include_package_data=True,
install_requires=['numpy'],
)
| [
"osemery@gmail.com"
] | osemery@gmail.com |
305a5a7277c7e434c04cf95617392b42795b7d06 | 03debe97691578baa8ff1f0cfe1672d55177cab8 | /aanet/nets/resnet.py | 7441801396305145fd82c3c9b16a122a054a7f9c | [] | no_license | Luis-Domenech/stereo-matching-framework | 5567b3bc75264a9fee9ec6d6bee613dedb2e0bbf | 37dd139ec55ee5632db978c7f710eb99fe54a545 | refs/heads/main | 2023-04-26T02:53:56.962893 | 2021-05-09T02:32:56 | 2021-05-09T02:32:56 | 355,167,706 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,482 | py | import torch.nn as nn
from aanet.nets.deform import DeformBottleneck
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
__constants__ = ['downsample']
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
__constants__ = ['downsample']
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class AANetFeature(nn.Module):
def __init__(self, in_channels=32,
zero_init_residual=True,
groups=1,
width_per_group=64,
feature_mdconv=True,
norm_layer=None):
super(AANetFeature, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
layers = [3, 4, 6] # ResNet-40
# self.inplanes = 64
self.inplanes = in_channels
self.dilation = 1
self.groups = groups
self.base_width = width_per_group
stride = 3
self.conv1 = nn.Sequential(nn.Conv2d(3, self.inplanes, kernel_size=7, stride=stride,
padding=3, bias=False),
nn.BatchNorm2d(self.inplanes),
nn.ReLU(inplace=True)) # H/3
self.layer1 = self._make_layer(Bottleneck, in_channels, layers[0]) # H/3
self.layer2 = self._make_layer(Bottleneck, in_channels * 2, layers[1], stride=2) # H/6
block = DeformBottleneck if feature_mdconv else Bottleneck
self.layer3 = self._make_layer(block, in_channels * 4, layers[2], stride=2) # H/12
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
layer1 = self.layer1(x)
layer2 = self.layer2(layer1)
layer3 = self.layer3(layer2)
return [layer1, layer2, layer3]
| [
"luisfabiandomenech@gmail.com"
] | luisfabiandomenech@gmail.com |
01e8d841776ae8c2ad5bab76dccfa18b88ddf5cd | da266e92895f4cc41a8531120c19a04268ee829f | /Stats_GUI.py | 6adb6fa770efd2c323546c8a643a48b0e979d3ae | [] | no_license | sportsanalytics-world/BueStats | 5b39cc580241e91a5cbe2732a7f0f2ca27112f44 | 82026c43915c79c001b9f96457432f7330ddea9a | refs/heads/master | 2022-04-12T09:36:19.415143 | 2020-03-22T20:39:49 | 2020-03-22T20:39:49 | 272,090,443 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 25,797 | py | # encoding: utf-8
from tkinter import *
from tkinter import ttk
import GetAllGamesCommon
import GetAllLeagueCommon
import GetAllLeagueBothPlata
import unicodedata
from importlib import reload
import os
import platform
system = platform.system()
class Application(Frame):
def __init__(self, master):
Frame.__init__(self, master)
self.grid()
self.TeamLabel = Label(self)
self.text_team = ''
self.SeasonLabel = Label(self)
self.text_season = ''
self.jFirstLabel = Label(self)
self.text_jFirst = ''
self.jLastLabel = Label(self)
self.text_jLast = ''
self.tTopLabel = Label(self)
self.text_equiposTop = ''
self.tBotLabel = Label(self)
self.text_equiposBott = ''
self.divisionLabel = Label(self)
self.text_division = ''
self.folderLabel = Label(self)
self.text_folder = ''
self.perLabel = Label(self)
self.text_periodos = ''
self.confLabel = Label(self)
self.text_conf = ''
self.text_chrome = ''
self.jugadoresLabel = Label(self)
self.text_jugadores = ''
self.minPartidosLabel = Label(self)
self.text_minPartidos = ''
self.chEquipoLabel = Label(self)
self.checkEquipo = IntVar(value=1)
self.chAllLabel = Label(self)
self.checkAll = IntVar(value=1)
self.checkProj = IntVar(value=0)
self.chRankLabel = Label(self)
self.checkRank = IntVar(value=1)
self.language = StringVar(value="Castellano")
#self.Options = ["Castellano", "English"]
self.create_widgets()
def create_widgets(self):
self.create_season_widget()
self.create_rounds_widget()
self.create_target_widget()
self.create_against1_widget()
self.create_against2_widget()
self.create_division_widget()
self.create_conf_button()
self.create_export_widget()
self.create_folder()
self.create_periodos_widget()
self.create_conf_widget()
self.create_players_widget()
self.create_boxTeam_widget()
self.create_boxGames_widget()
self.create_language_widget()
###self.create_proj_widget()
self.create_rank_widget()
def create_season_widget(self):
self.SeasonLabel['text'] = '2. Temporada:'
self.SeasonLabel.grid(row=2, column=0, sticky=W)
#self.SeasonLabel = Label(self, text="2. Temporada:").grid(row=2, column=0, sticky=W)
self.text_season = Text(self, width=53, height=1, wrap=WORD, relief=RIDGE, borderwidth=2)
self.text_season.grid(row=2, column=1, columnspan=1, sticky=W)
self.text_season.insert(END, "2018")
# self.text_season.configure(state="disabled")
def create_rounds_widget(self):
self.jFirstLabel['text'] = "4. Primera Jornada:"
self.jFirstLabel.grid(row=4, column=0, sticky=W)
self.jLastLabel['text'] = "5. Ultima Jornada:"
self.jLastLabel.grid(row=5, column=0, sticky=W)
self.text_jFirst = Text(self, width=53, height=1, wrap=WORD, relief=RIDGE, borderwidth=2)
self.text_jFirst.grid(row=4, column=1, columnspan=1, sticky=W)
self.text_jFirst.insert(END, "1")
# self.text_jFirst.configure(state="disabled")
self.text_jLast = Text(self, width=53, height=1, wrap=WORD, relief=RIDGE, borderwidth=2)
self.text_jLast.grid(row=5, column=1, columnspan=1, sticky=W)
self.text_jLast.insert(END, "7")
# self.text_jLast.configure(state="disabled")
def create_target_widget(self):
self.TeamLabel['text'] = '1. Equipo:'
self.TeamLabel.grid(row=1, column=0, sticky=W)
#Label(self, text="1. Equipo:").grid(row=1, column=0, sticky=W)
#self.LabelTeam.grid(row=1, column=0, sticky=W)
self.text_team = Text(self, width=53, height=1, wrap=WORD, relief=RIDGE, borderwidth=2)
self.text_team.grid(row=1, column=1, columnspan=1, sticky=W)
self.text_team.insert(END, "Araberri")
# self.text_team.configure(state="disabled")
def create_against1_widget(self):
self.tTopLabel['text'] = "6. Equipos Top:"
self.tTopLabel.grid(row=6, column=0, sticky=W)
self.text_equiposTop = Text(self, width=53, height=1, wrap=WORD, relief=RIDGE, borderwidth=2)
self.text_equiposTop.grid(row=6, column=1, columnspan=1, sticky=W)
self.text_equiposTop.insert(END, "BETIS,BILBAO,PALMA,MELILLA")
# self.text_equiposTop.configure(state="disabled")
def create_against2_widget(self):
self.tBotLabel['text'] = '7. Equipos Cola:'
self.tBotLabel.grid(row=7, column=0, sticky=W)
self.text_equiposBott = Text(self, width=53, height=1, wrap=WORD, relief=RIDGE, borderwidth=2)
self.text_equiposBott.grid(row=7, column=1, columnspan=1, sticky=W)
self.text_equiposBott.insert(END, "CACERES,CANOE,PRAT,ARABERRI,TAU")
# self.text_equiposBott.configure(state="disabled")
def create_division_widget(self):
self.divisionLabel['text'] = '3. Categoria:'
self.divisionLabel.grid(row=3, column=0, sticky=W)
self.text_division = Text(self, width=53, height=1, wrap=WORD, relief=RIDGE, borderwidth=2)
self.text_division.grid(row=3, column=1, columnspan=1, sticky=W)
self.text_division.insert(END, "Oro")
# self.text_division.configure(state="disabled")
def create_periodos_widget(self):
self.perLabel['text'] = "8. Intervalos:"
self.perLabel.grid(row=8, column=0, sticky=W)
self.text_periodos = Text(self, width=53, height=1, wrap=WORD, relief=RIDGE, borderwidth=2)
self.text_periodos.grid(row=8, column=1, columnspan=1, sticky=W)
self.text_periodos.insert(END, "5")
# self.text_periodos.configure(state="disabled")
def create_players_widget(self):
self.jugadoresLabel['text'] = "9. Jugadores:"
self.jugadoresLabel.grid(row=9, column=0, sticky=W)
self.text_jugadores = Text(self, width=53, height=1, wrap=WORD, relief=RIDGE, borderwidth=2)
self.text_jugadores.grid(row=9, column=1, columnspan=1, sticky=W)
self.text_jugadores.insert(END, "")
# self.text_jugadores.configure(state="disabled")
def create_boxTeam_widget(self):
self.chEquipoLabel['text'] = "Extraer Estadisticas de Equipo:"
self.chEquipoLabel.grid(row=12, column=0, sticky=W)
self.checkButtonTeam = ttk.Checkbutton()
#self.checkButtonTeam.configure(width=12, var=self.checkEquipo,height=1, relief=RIDGE, borderwidth=2)
self.checkButtonTeam.configure(width=2, var=self.checkEquipo)
self.checkButtonTeam.grid(row=12, column=0, columnspan=1, sticky=W)
if system == 'Windows':
self.checkButtonTeam.place(x=290, y = 220)
else:
self.checkButtonTeam.place(x=305, y = 245)
def create_boxGames_widget(self):
self.chAllLabel['text'] = "Extraer Todas las Jornadas:"
self.chAllLabel.grid(row=11, column=0, sticky=W)
self.checkButtonGames = ttk.Checkbutton()
#self.checkButtonGames.configure(width=12, var=self.checkAll,height=1, relief=RIDGE, borderwidth=2)
self.checkButtonGames.configure(width=2, var=self.checkAll)
# self.checkButtonGames.grid(row=11, column=0, columnspan=1, sticky=W)
if system == 'Windows':
self.checkButtonGames.place(x=290, y = 200)
else:
self.checkButtonGames.place(x=305, y = 225)
def create_proj_widget(self):
Label(self, text="Extraer Proyecciones:").grid(row=13, column=0, sticky=W)
self.checkButtonProj = ttk.Checkbutton()
#self.checkButtonProj.configure(width=12, var=self.checkProj, height=1, relief=RIDGE, borderwidth=2)
self.checkButtonProj.configure(width=2, var=self.checkProj)
self.checkButtonProj.grid(row=11, column=0, columnspan=1, sticky=W)
self.checkButtonProj.place(x=305, y=265)
def create_rank_widget(self):
self.chRankLabel['text'] = "Extraer Rankings:"
self.chRankLabel.grid(row=13, column=0, sticky=W)
self.checkButtonRank = ttk.Checkbutton()
if system == 'Linux':
#self.checkButtonRank.configure(width=12, var=self.checkRank, height=1, relief=RIDGE, borderwidth=2)
self.checkButtonRank.configure(width=12, var=self.checkRank)
else:
#self.checkButtonRank.configure(width=2, var=self.checkRank, height=1, relief=RIDGE, borderwidth=2)
self.checkButtonRank.configure(width=2, var=self.checkRank)
self.checkButtonRank.grid(row=13, column=0, sticky=W)
if system == 'Windows':
self.checkButtonRank.place(x=290, y=240)
else:
self.checkButtonRank.place(x=305, y=265)
if system == 'Linux':
self.minPartidosLabel['text'] = "Minimo Partidos:"
self.minPartidosLabel.place(x=350, y=267)
self.text_minPartidos = Text(self, width=15, height=1, wrap=WORD, relief=RIDGE, borderwidth=2)
self.text_minPartidos.place(x=460, y=265)
self.text_minPartidos.insert(END, "")
elif system == 'Windows':
self.minPartidosLabel['text'] = "Minimo Partidos:"
self.minPartidosLabel.place(x=300, y=235)
# Label(self, text="Minimo Partidos:").place(x=300, y=265)
self.text_minPartidos = Text(self, width=15, height=1, wrap=WORD, relief=RIDGE, borderwidth=2)
self.text_minPartidos.place(x=410, y=235)
self.text_minPartidos.insert(END, "")
else:
self.minPartidosLabel['text'] = "Minimo Partidos:"
self.minPartidosLabel.place(x=300, y=265)
#Label(self, text="Minimo Partidos:").place(x=300, y=265)
self.text_minPartidos = Text(self, width=15, height=1, wrap=WORD, relief=RIDGE, borderwidth=2)
self.text_minPartidos.place(x=410, y=265)
self.text_minPartidos.insert(END, "")
def create_export_widget(self):
self.button_compare = ttk.Button()
self.button_compare.configure(text="Extraer Estadisticas")
self.button_compare.grid(row=18, column=1, sticky=W)
self.button_compare["command"] = self.save_stats
# self.button_compare.configure(state="disabled")
# self.button_compare.place(x = 100, y = 320)
def create_conf_button(self):
self.button_conf = ttk.Button()
self.button_conf.configure(text="Cargar Configuracion")
self.button_conf.grid()
if system == 'Linux':
self.button_conf.place(x=80, y=405)
else:
self.button_conf.place(x=110, y=400)
# self.button_conf.place(x=110, y=400)
self.button_conf["command"] = self.load_conf
# self.button_conf.place(x = 100, y = 350)
def create_conf_widget(self):
self.confLabel['text'] = "Carpeta Configuracion:"
self.confLabel.grid(row=16, column=0, sticky=W)
self.text_conf = Text(self, width=53, height=1, wrap=WORD, relief=RIDGE, borderwidth=2)
self.text_conf.grid(row=16, column=1, columnspan=1, sticky=W)
self.text_conf.insert(END, os.path.dirname(os.path.abspath(__file__)) + "/ValoresDefectoPLATA2.txt")
def create_folder(self):
self.folderLabel['text'] = 'Carpeta Destino:'
self.folderLabel.grid(row=18, column=0, sticky=W)
self.text_folder = Text(self, width=53, height=1, wrap=WORD, relief=RIDGE, borderwidth=2)
self.text_folder.grid(row=18, column=1, columnspan=1, sticky=W)
self.text_folder.insert(END, os.path.dirname(os.path.abspath(__file__)) + "/Reports")
# self.text_folder.configure(state="disabled")
def create_language_widget(self):
# self.language_drop = ttk.OptionMenu(self,self.language,*self.Options).grid(row=24, column=0, sticky=W)
# # self.language_drop.pack()
self.changeLang = ttk.Button()
self.changeLang.configure(text="Change Language")
self.changeLang.grid()
if system == 'Linux':
self.changeLang.place(x=80, y=375)
else:
self.changeLang.place(x=112, y=365)
self.changeLang["command"] = self.change_language
def change_language(self):
if self.language.get() == "Castellano":
self.language = StringVar(value="English")
else:
self.language = StringVar(value="Castellano")
if self.language.get() == "Castellano":
#self.create_labels_cast()
self.TeamLabel['text'] = '1. Equipo:'
self.SeasonLabel['text'] = '2. Temporada:'
self.divisionLabel['text'] = '3. Categoria:'
self.jFirstLabel['text'] = "4. Primera Jornada:"
self.jLastLabel['text'] = "5. Ultima Jornada:"
self.tTopLabel['text'] = "6. Equipos Top:"
self.tBotLabel['text'] = '7. Equipos Cola'
self.perLabel['text'] = "8. Intervalos:"
self.jugadoresLabel['text'] = "9. Jugadores"
self.chEquipoLabel['text'] = "Extraer Estadisticas de Equipo:"
self.chAllLabel['text'] = "Extraer Todas las Jornadas:"
self.chRankLabel['text'] = "Extraer Rankings:"
self.minPartidosLabel['text'] = "Min. Partidos:"
self.confLabel['text'] = "Carpeta Configuracion:"
self.folderLabel['text'] = 'Carpeta Destino:'
self.button_conf.configure(text="Cargar Configuracion")
self.button_compare.configure(text="Extraer Estadisticas")
self.changeLang.configure(text="Change Language")
else:
self.TeamLabel['text'] = '1. Team:'
self.SeasonLabel['text'] = '2. Season:'
self.divisionLabel['text'] = '3. Division:'
self.jFirstLabel['text'] = "4. First Round:"
self.jLastLabel['text'] = "5. Last Round:"
self.tTopLabel['text'] = "6. Top Teams:"
self.tBotLabel['text'] = '7. Bottom Teams:'
self.perLabel['text'] = "8. Intervals:"
self.jugadoresLabel['text'] = "9. Players"
self.chEquipoLabel['text'] = "Extract Team Stats:"
self.chAllLabel['text'] = "Extract Stats from All Games:"
self.chRankLabel['text'] = "Extract Player Rankings:"
self.minPartidosLabel['text'] = "Minimum Games:"
self.confLabel['text'] = "Configuration Folder:"
self.folderLabel['text'] = 'Output Folder:'
self.button_conf.configure(text="Load Configuration")
self.button_compare.configure(text="Extract Statistics")
self.changeLang.configure(text="Cambiar Idioma")
def load_conf(self):
f = open(str(unicodedata.normalize('NFKD', self.text_conf.get("1.0", END)).encode('ascii', 'ignore')).replace('\n', '')[2:-3], "r")
text = f.read()
parts = text.split('\n')[:-1]
for part in range(0, len(parts)):
if len(parts[part]) > 0:
parts[part] = parts[part].split('=')[1]
self.text_season.configure(state="normal")
self.text_season.delete('1.0',END)
self.text_season.insert(END, parts[1])
self.text_periodos.configure(state="normal")
self.text_periodos.delete('1.0',END)
self.text_periodos.insert(END, parts[7])
self.text_division.configure(state="normal")
self.text_division.delete('1.0',END)
self.text_division.insert(END, parts[2])
self.text_equiposBott.configure(state="normal")
self.text_equiposBott.delete('1.0', END)
self.text_equiposBott.insert(END, parts[6])
self.text_equiposTop.configure(state="normal")
self.text_equiposTop.delete('1.0', END)
self.text_equiposTop.insert(END, parts[5])
self.text_team.configure(state="normal")
self.text_team.delete('1.0', END)
self.text_team.insert(END,parts[0])
self.text_jFirst.configure(state="normal")
self.text_jFirst.delete('1.0', END)
self.text_jFirst.insert(END,parts[3])
self.text_jLast.configure(state="normal")
self.text_jLast.delete('1.0', END)
self.text_jLast.insert(END, parts[4])
self.text_jugadores.configure(state="normal")
self.text_jugadores.delete('1.0', END)
self.text_jugadores.insert(END, parts[8])
self.button_compare.configure(state="normal")
def save_stats(self):
self.text_chrome = os.path.dirname(os.path.abspath(__file__)) + '/chromedriver'
if self.checkAll.get() == 1:
bAll = 1
else:
bAll = 0
if self.checkEquipo.get() == 1:
bTeam = 1
else:
bTeam = 0
if self.checkProj.get() == 1:
bProj = True
else:
bProj = False
iBenIn = 2
iEndIn = -3
if iEndIn != 0:
season = str(unicodedata.normalize('NFKD', self.text_season.get("1.0", END)).encode('ascii', 'ignore')).replace('\n', '')[iBenIn:iEndIn]
division = str(unicodedata.normalize('NFKD', self.text_division.get("1.0", END)).encode('ascii', 'ignore')).replace('\n', '').upper()[iBenIn:iEndIn]
else:
season = str(unicodedata.normalize('NFKD', self.text_season.get("1.0", END)).encode('ascii', 'ignore')).replace('\n', '')
division = str(unicodedata.normalize('NFKD', self.text_division.get("1.0", END)).encode('ascii', 'ignore')).replace('\n', '').upper()
bUnaFase = False
divSplit = division.split(',')[0]
try:
groupSplit = division.split(',')[1]
except:
pass
if division == 'ORO':
groupFeb = '1'
elif division == 'DIA':
groupFeb = '4'
elif divSplit == 'PLATA':
bUnaFase = False
if len(division.split(',')) == 3:
if int(season) > 2017:
if division.split(',')[2] == 'A1':
groupFeb = '2'
else:
groupFeb = '18'
else:
groupFeb = '2'
else:
bUnaFase = True
if int(season) > 2017:
if division.split(',')[1] == 'ESTE':
groupFeb = '2'
else:
groupFeb = '18'
else:
groupFeb = '2'
elif divSplit == 'EBA':
if groupSplit[0] == 'A':
groupFeb = '3'
elif groupSplit[0] == 'B':
groupFeb = '5'
elif groupSplit[0] == 'C':
if int(season) > 2018:
if groupSplit[1] == 'A':
groupFeb = '6'
elif groupSplit[1] == 'B':
groupFeb = '46'
else:
groupFeb = '6'
elif groupSplit[0] == 'D':
groupFeb = '7'
elif groupSplit[0] == 'E':
groupFeb = '8'
elif divSplit == 'LF2':
groupFeb = '9'
sLang = self.language.get()
html_doc = "http://competiciones.feb.es/Estadisticas/Calendario.aspx?g=" + groupFeb + "&t=" + season + "&"
if iEndIn != 0:
targetTeam = str(unicodedata.normalize('NFKD', self.text_team.get("1.0", END)).encode('ascii', 'ignore')).replace('\n', '').upper()[iBenIn:iEndIn]
againstTeams1 = str(unicodedata.normalize('NFKD', self.text_equiposTop.get("1.0", END)).encode('ascii', 'ignore')).replace('\n', '')[iBenIn:iEndIn].split(',')
againstTeams2 = str(unicodedata.normalize('NFKD', self.text_equiposBott.get("1.0", END)).encode('ascii', 'ignore')).replace('\n', '')[iBenIn:iEndIn].split(',')
jorFirst = int(str(unicodedata.normalize('NFKD', self.text_jFirst.get("1.0", END)).encode('ascii', 'ignore')).replace('\n', '')[iBenIn:iEndIn])
jorLast = int(str(unicodedata.normalize('NFKD', self.text_jLast.get("1.0", END)).encode('ascii', 'ignore')).replace('\n', '')[iBenIn:iEndIn])
sDir = str(unicodedata.normalize('NFKD', self.text_folder.get("1.0", END)).encode('ascii', 'ignore')).replace('\n', '')[iBenIn:iEndIn]
sPeriodos = str(unicodedata.normalize('NFKD', self.text_periodos.get("1.0", END)).encode('ascii', 'ignore')).replace('\n', '')[iBenIn:iEndIn]
sPlayers = str(unicodedata.normalize('NFKD', self.text_jugadores.get("1.0", END)).encode('ascii', 'ignore')).replace('\n', '')[iBenIn:iEndIn]
sMinGames = str(unicodedata.normalize('NFKD', self.text_minPartidos.get("1.0", END)).encode('ascii', 'ignore')).replace('\n', '')[iBenIn:iEndIn]
else:
targetTeam = str(unicodedata.normalize('NFKD', self.text_team.get("1.0", END)).encode('ascii', 'ignore')).replace('\n', '').upper()
againstTeams1 = str(unicodedata.normalize('NFKD', self.text_equiposTop.get("1.0", END)).encode('ascii', 'ignore')).replace('\n', '').split(',')
againstTeams2 = str(unicodedata.normalize('NFKD', self.text_equiposBott.get("1.0", END)).encode('ascii', 'ignore')).replace('\n', '').split(',')
jorFirst = int(str(unicodedata.normalize('NFKD', self.text_jFirst.get("1.0", END)).encode('ascii', 'ignore')).replace('\n', ''))
jorLast = int(str(unicodedata.normalize('NFKD', self.text_jLast.get("1.0", END)).encode('ascii', 'ignore')).replace('\n', ''))
sDir = str(unicodedata.normalize('NFKD', self.text_folder.get("1.0", END)).encode('ascii', 'ignore')).replace('\n', '')
sPeriodos = str(unicodedata.normalize('NFKD', self.text_periodos.get("1.0", END)).encode('ascii', 'ignore')).replace('\n', '')
sPlayers = str(unicodedata.normalize('NFKD', self.text_jugadores.get("1.0", END)).encode('ascii', 'ignore')).replace('\n', '')
sMinGames = str(unicodedata.normalize('NFKD', self.text_minPartidos.get("1.0", END)).encode('ascii', 'ignore')).replace('\n', '')
if targetTeam == 'Liga' or targetTeam == 'LIGA':
if division == 'ORO' or division == 'DIA' or division == 'ENDESA':
GetAllLeagueCommon.extractStatisticsAllLeague(html_doc, 'Liga'+division.replace(',','-'), season, jorFirst, jorLast, division, sDir, self.text_chrome, bTeam, sPlayers, bProj, division, '', sMinGames, sLang, False)
elif divSplit == 'PLATA':
if bUnaFase == False:
GetAllLeagueBothPlata.extractStatisticsPlataAll(html_doc,targetTeam,season,jorFirst,jorLast,division.split(',')[1],division.split(',')[2],sDir,self.text_chrome,bTeam,sPlayers,bProj,division,'',sMinGames, sLang, False)
reload(GetAllLeagueBothPlata)
else:
GetAllLeagueCommon.extractStatisticsAllLeague(html_doc, 'Liga'+division.replace(',','-'), season, jorFirst, jorLast, division.split(',')[1], sDir, self.text_chrome, bTeam, sPlayers, bProj, '', 'Fase1', sMinGames, sLang, False)
elif divSplit == 'EBA':
GetAllLeagueCommon.extractStatisticsAllLeague(html_doc, 'Liga'+division.replace(',','-'), season, jorFirst, jorLast, division.split(',')[1], sDir, self.text_chrome, bTeam, sPlayers, bProj, division, '', sMinGames, sLang, False)
elif divSplit == 'LF2':
GetAllLeagueCommon.extractStatisticsAllLeague(html_doc, 'Liga'+division.replace(',','-'), season, jorFirst, jorLast, division.split(',')[1], sDir, self.text_chrome, bTeam, sPlayers, bProj, division, '', sMinGames, sLang, False)
reload(GetAllLeagueCommon)
else:
targetTeams = targetTeam.replace(' ', '').split(',')
for k in range(0, len(targetTeams)):
if sDir[-1] == '/':
sDir = sDir[:-1]
if division == 'ORO' or division == 'DIA':
GetAllGamesCommon.extractStatistics(html_doc, targetTeams[k], againstTeams1, againstTeams2, season, jorFirst, jorLast, division, sDir, sPeriodos, self.text_chrome, bAll, bTeam, sPlayers, bProj, division, '', sMinGames, sLang)
elif divSplit == 'PLATA':
if bUnaFase == False:
GetAllLeagueBothPlata.extractStatisticsPlata(html_doc, targetTeams[k], againstTeams1, againstTeams2, season, jorFirst, jorLast, division.split(',')[1], division.split(',')[2], sDir, sPeriodos, self.text_chrome, bAll, bTeam, sPlayers, bProj, division, '', sMinGames, sLang)
reload(GetAllLeagueBothPlata)
else:
GetAllGamesCommon.extractStatistics(html_doc, targetTeams[k], againstTeams1, againstTeams2, season, jorFirst, jorLast, division.split(',')[1], sDir, sPeriodos, self.text_chrome, bAll, bTeam, sPlayers, bProj, '', 'Fase1', sMinGames, sLang)
elif divSplit == 'EBA':
GetAllGamesCommon.extractStatistics(html_doc, targetTeams[k], againstTeams1, againstTeams2, season, jorFirst, jorLast, division.split(',')[1], sDir, sPeriodos, self.text_chrome, bAll, bTeam, sPlayers, bProj, division, '', sMinGames, sLang)
elif divSplit == 'LF2':
GetAllGamesCommon.extractStatistics(html_doc, targetTeams[k], againstTeams1, againstTeams2, season, jorFirst, jorLast, division.split(',')[1], sDir, sPeriodos, self.text_chrome, bAll, bTeam, sPlayers, bProj, division, '', sMinGames, sLang)
reload(GetAllGamesCommon)
#if __name__ == '__main__':
root = Tk()
root.title("BueStats (Adrià Arbués-Sangüesa, @arbues6)")
root.geometry("950x450")
root.columnconfigure(0, weight=1)
root.resizable(0, 0)
app = Application(root)
app.mainloop()
| [
"noreply@github.com"
] | sportsanalytics-world.noreply@github.com |
f39bd365db767a8011a2eb4aa13b47ed5c0ac42e | 923d035a4762a19b30d5900db91143a83837ae70 | /ichnaea/data/station.py | 8546d75f06412bf83af0c62c790ab8f2638f4774 | [
"Apache-2.0"
] | permissive | voolitels/ichnaea | d5d5da34cb30b3e0c85675e32dab3972cc31d7b0 | bd0350fcba9efb0bad3957309ed3a471ae07e41b | refs/heads/master | 2021-01-17T14:21:16.056481 | 2015-11-10T16:38:22 | 2015-11-10T16:57:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24,009 | py | from collections import defaultdict
import numpy
from ichnaea.constants import (
PERMANENT_BLOCKLIST_THRESHOLD,
TEMPORARY_BLOCKLIST_DURATION,
)
from ichnaea.data.base import DataTask
from ichnaea.geocalc import (
centroid,
circle_radius,
distance,
)
from ichnaea.geocode import GEOCODER
from ichnaea.models import (
encode_cellarea,
Cell,
CellBlocklist,
StatCounter,
StatKey,
WifiShard,
)
from ichnaea.models.constants import (
CELL_MAX_RADIUS,
WIFI_MAX_RADIUS,
)
from ichnaea import util
class CellRemover(DataTask):
def __init__(self, task, session, pipe):
super(CellRemover, self).__init__(task, session)
self.pipe = pipe
self.area_queue = self.task.app.data_queues['update_cellarea']
def __call__(self, cell_keys):
cells_removed = 0
changed_areas = set()
for key in cell_keys:
query = Cell.querykey(self.session, key)
cells_removed += query.delete()
changed_areas.add(encode_cellarea(
key.radio, key.mcc, key.mnc, key.lac))
if changed_areas:
self.area_queue.enqueue(list(changed_areas),
pipe=self.pipe, json=False)
return cells_removed
class StationUpdater(DataTask):
MAX_OLD_OBSERVATIONS = 1000
max_dist_meters = None
station_type = None
def __init__(self, task, session, pipe):
super(StationUpdater, self).__init__(task, session)
self.pipe = pipe
self.updated_areas = set()
self.utcnow = util.utcnow()
self.today = self.utcnow.date()
def stat_count(self, action, count, reason=None):
if count > 0:
tags = ['type:%s' % self.station_type]
if reason:
tags.append('reason:%s' % reason)
self.stats_client.incr(
'data.observation.%s' % action,
count,
tags=tags)
def __call__(self, batch=10):
raise NotImplementedError()
class CellUpdater(StationUpdater):
max_dist_meters = CELL_MAX_RADIUS
station_type = 'cell'
def __init__(self, task, session, pipe, remove_task=None):
super(CellUpdater, self).__init__(task, session, pipe)
self.remove_task = remove_task
self.data_queue = self.task.app.data_queues['update_cell']
def emit_statcounters(self, obs, stations):
day = self.today
StatCounter(StatKey.cell, day).incr(self.pipe, obs)
StatCounter(StatKey.unique_cell, day).incr(self.pipe, stations)
def emit_stats(self, added, dropped):
self.stat_count('insert', added)
for reason, count in dropped.items():
self.stat_count('drop', dropped[reason], reason=reason)
def add_area_update(self, key):
self.updated_areas.add(encode_cellarea(
key.radio, key.mcc, key.mnc, key.lac))
def queue_area_updates(self):
data_queue = self.task.app.data_queues['update_cellarea']
data_queue.enqueue(list(self.updated_areas),
pipe=self.pipe, json=False)
def blocklisted_station(self, block):
age = self.utcnow - block.time
temporary = age < TEMPORARY_BLOCKLIST_DURATION
permanent = block.count >= PERMANENT_BLOCKLIST_THRESHOLD
if temporary or permanent:
return (True, block.time, block)
return (False, block.time, block)
def blocklisted_stations(self, station_keys):
blocklist = {}
for block in CellBlocklist.iterkeys(
self.session, list(station_keys)):
blocklist[block.hashkey()] = self.blocklisted_station(block)
return blocklist
def blocklist_stations(self, moving):
moving_keys = []
new_block_values = []
for station_key, block in moving:
moving_keys.append(station_key)
if block:
block.time = self.utcnow
block.count += 1
else:
block_key = CellBlocklist.to_hashkey(station_key)
new_block_values.append(dict(
time=self.utcnow,
count=1,
**block_key.__dict__
))
if new_block_values:
# do a batch insert of new blocks
stmt = CellBlocklist.__table__.insert(
mysql_on_duplicate='time = time' # no-op
)
# but limit the batch depending on each model
ins_batch = CellBlocklist._insert_batch
for i in range(0, len(new_block_values), ins_batch):
batch_values = new_block_values[i:i + ins_batch]
self.session.execute(stmt.values(batch_values))
if moving_keys:
self.stats_client.incr(
'data.station.blocklist',
len(moving_keys),
tags=['type:%s' % self.station_type,
'action:add',
'reason:moving'])
self.remove_task.delay(moving_keys)
def new_station_values(self, station, station_key,
first_blocked, observations):
# This function returns a 3-tuple, the first element is True,
# if the station was found to be moving.
# The second element is either None or a dict of values,
# if the station is new and should result in a table insert
# The third element is either None or a dict of values
# if the station did exist and should be updated
obs_length = len(observations)
obs_positions = numpy.array(
[(obs.lat, obs.lon) for obs in observations],
dtype=numpy.double)
obs_lat, obs_lon = centroid(obs_positions)
values = {
'modified': self.utcnow,
}
values.update(station_key.__dict__)
if self.station_type == 'cell':
# pass on extra psc column which is not actually part
# of the stations hash key
values['psc'] = observations[-1].psc
created = self.utcnow
if station is None:
if first_blocked:
# if the station did previously exist, retain at least the
# time it was first put on a blocklist as the creation date
created = first_blocked
values.update({
'created': created,
'radius': 0,
'samples': 0,
})
if (station is not None and
station.lat is not None and station.lon is not None):
obs_positions = numpy.append(obs_positions, [
(station.lat, station.lon),
(numpy.nan if station.max_lat is None else station.max_lat,
numpy.nan if station.max_lon is None else station.max_lon),
(numpy.nan if station.min_lat is None else station.min_lat,
numpy.nan if station.min_lon is None else station.min_lon),
], axis=0)
existing_station = True
else:
values['lat'] = obs_lat
values['lon'] = obs_lon
existing_station = False
max_lat, max_lon = numpy.nanmax(obs_positions, axis=0)
min_lat, min_lon = numpy.nanmin(obs_positions, axis=0)
# calculate sphere-distance from opposite corners of
# bounding box containing current location estimate
# and new observations; if too big, station is moving
box_dist = distance(min_lat, min_lon, max_lat, max_lon)
# TODO: If we get a too large box_dist, we should not create
# a new station record with the impossibly big distance,
# so moving the box_dist > self.max_dist_meters here
if existing_station:
if box_dist > self.max_dist_meters:
# Signal a moving station and return early without updating
# the station since it will be deleted by caller momentarily
return (True, None, None)
# limit the maximum weight of the old station estimate
old_weight = min(station.samples,
self.MAX_OLD_OBSERVATIONS)
new_weight = old_weight + obs_length
values['lat'] = ((station.lat * old_weight) +
(obs_lat * obs_length)) / new_weight
values['lon'] = ((station.lon * old_weight) +
(obs_lon * obs_length)) / new_weight
# increase total counter
if station is not None:
values['samples'] = station.samples + obs_length
else:
values['samples'] = obs_length
# update max/min lat/lon columns
values['min_lat'] = float(min_lat)
values['min_lon'] = float(min_lon)
values['max_lat'] = float(max_lat)
values['max_lon'] = float(max_lon)
# give radius estimate between extreme values and centroid
values['radius'] = circle_radius(
values['lat'], values['lon'],
max_lat, max_lon, min_lat, min_lon)
if station is None:
# return new values
return (False, values, None)
else:
# return updated values, remove station from session
self.session.expunge(station)
return (False, None, values)
def __call__(self, batch=10):
all_observations = self.data_queue.dequeue(batch=batch)
drop_counter = defaultdict(int)
added = 0
new_stations = 0
station_obs = defaultdict(list)
for obs in all_observations:
station_obs[Cell.to_hashkey(obs)].append(obs)
if not station_obs:
return (0, 0)
stations = {}
for station in Cell.iterkeys(self.session, list(station_obs.keys())):
stations[station.hashkey()] = station
blocklist = self.blocklisted_stations(station_obs.keys())
new_station_values = []
changed_station_values = []
moving_stations = set()
for station_key, observations in station_obs.items():
blocked, first_blocked, block = blocklist.get(
station_key, (False, None, None))
if not any(observations):
continue
if blocked:
# Drop observations for blocklisted stations.
drop_counter['blocklisted'] += len(observations)
continue
station = stations.get(station_key, None)
if station is None and not first_blocked:
# We discovered an actual new never before seen station.
new_stations += 1
moving, new_values, changed_values = self.new_station_values(
station, station_key, first_blocked, observations)
if moving:
moving_stations.add((station_key, block))
else:
added += len(observations)
if new_values:
new_station_values.append(new_values)
if changed_values:
changed_station_values.append(changed_values)
# track potential updates to dependent areas
self.add_area_update(station_key)
if new_station_values:
# do a batch insert of new stations
stmt = Cell.__table__.insert(
mysql_on_duplicate='psc = psc' # no-op
)
# but limit the batch depending on each model
ins_batch = Cell._insert_batch
for i in range(0, len(new_station_values), ins_batch):
batch_values = new_station_values[i:i + ins_batch]
self.session.execute(stmt.values(batch_values))
if changed_station_values:
# do a batch update of changed stations
ins_batch = Cell._insert_batch
for i in range(0, len(changed_station_values), ins_batch):
batch_values = changed_station_values[i:i + ins_batch]
self.session.bulk_update_mappings(Cell, batch_values)
if self.updated_areas:
self.queue_area_updates()
if moving_stations:
self.blocklist_stations(moving_stations)
self.emit_stats(added, drop_counter)
self.emit_statcounters(added, new_stations)
if self.data_queue.enough_data(batch=batch): # pragma: no cover
self.task.apply_async(
kwargs={'batch': batch},
countdown=2,
expires=10)
return (len(stations) + len(new_station_values), len(moving_stations))
class WifiUpdater(StationUpdater):
max_dist_meters = WIFI_MAX_RADIUS
station_type = 'wifi'
def __init__(self, task, session, pipe, shard_id=None):
super(WifiUpdater, self).__init__(task, session, pipe)
self.shard_id = shard_id
queue_name = '%s_%s' % ('update_wifi', shard_id)
self.data_queue = self.task.app.data_queues[queue_name]
def emit_stats(self, stats_counter, drop_counter):
day = self.today
StatCounter(StatKey.wifi, day).incr(
self.pipe, stats_counter['obs'])
StatCounter(StatKey.unique_wifi, day).incr(
self.pipe, stats_counter['new_station'])
self.stat_count('insert', stats_counter['obs'])
for reason, count in drop_counter.items():
self.stat_count('drop', drop_counter[reason], reason=reason)
if stats_counter['block']:
self.stats_client.incr(
'data.station.blocklist',
stats_counter['block'],
tags=['type:%s' % self.station_type,
'action:add',
'reason:moving'])
def station_values(self, station_key, shard_station, observations):
"""
Return two-tuple of status, value dict where status is one of:
`new`, `new_moving`, `moving`, `changed`.
"""
# cases:
# we always get a station key and observations
# 0. observations disagree
# 0.a. no shard station, return new_moving
# 0.b. shard station, return moving
# 1. no shard station
# 1.a. obs agree -> return new
# 2. shard station
# 2.a. obs disagree -> return moving
# 2.b. obs agree -> return changed
created = self.utcnow
values = {
'mac': station_key,
'modified': self.utcnow,
}
obs_length = len(observations)
obs_positions = numpy.array(
[(obs.lat, obs.lon) for obs in observations],
dtype=numpy.double)
obs_new_lat, obs_new_lon = centroid(obs_positions)
obs_max_lat, obs_max_lon = numpy.nanmax(obs_positions, axis=0)
obs_min_lat, obs_min_lon = numpy.nanmin(obs_positions, axis=0)
obs_box_dist = distance(obs_min_lat, obs_min_lon,
obs_max_lat, obs_max_lon)
if obs_box_dist > self.max_dist_meters:
# the new observations are already too far apart
if not shard_station:
values.update({
'created': created,
'block_first': self.today,
'block_last': self.today,
'block_count': 1,
})
return ('new_moving', values)
else:
block_count = shard_station.block_count or 0
values.update({
'lat': None,
'lon': None,
'max_lat': None,
'min_lat': None,
'max_lon': None,
'min_lon': None,
'radius': None,
'region': shard_station.region,
'samples': None,
'source': None,
'block_first': shard_station.block_first or self.today,
'block_last': self.today,
'block_count': block_count + 1,
})
return ('moving', values)
if shard_station is None:
# totally new station, only agreeing observations
radius = circle_radius(
obs_new_lat, obs_new_lon,
obs_max_lat, obs_max_lon, obs_min_lat, obs_min_lon)
values.update({
'created': created,
'lat': obs_new_lat,
'lon': obs_new_lon,
'max_lat': float(obs_max_lat),
'min_lat': float(obs_min_lat),
'max_lon': float(obs_max_lon),
'min_lon': float(obs_min_lon),
'radius': radius,
'region': GEOCODER.region(obs_new_lat, obs_new_lon),
'samples': obs_length,
'source': None,
})
return ('new', values)
else:
# shard_station + new observations
positions = numpy.append(obs_positions, [
(numpy.nan if shard_station.lat is None
else shard_station.lat,
numpy.nan if shard_station.lon is None
else shard_station.lon),
(numpy.nan if shard_station.max_lat is None
else shard_station.max_lat,
numpy.nan if shard_station.max_lon is None
else shard_station.max_lon),
(numpy.nan if shard_station.min_lat is None
else shard_station.min_lat,
numpy.nan if shard_station.min_lon is None
else shard_station.min_lon),
], axis=0)
max_lat, max_lon = numpy.nanmax(positions, axis=0)
min_lat, min_lon = numpy.nanmin(positions, axis=0)
box_dist = distance(min_lat, min_lon, max_lat, max_lon)
if box_dist > self.max_dist_meters:
# shard_station + disagreeing observations
block_count = shard_station.block_count or 0
values.update({
'lat': None,
'lon': None,
'max_lat': None,
'min_lat': None,
'max_lon': None,
'min_lon': None,
'radius': None,
'region': shard_station.region,
'samples': None,
'source': None,
'block_first': shard_station.block_first or self.today,
'block_last': self.today,
'block_count': block_count + 1,
})
return ('moving', values)
else:
# shard_station + agreeing observations
if shard_station.lat is None or shard_station.lon is None:
old_weight = 0
else:
old_weight = min((shard_station.samples or 0),
self.MAX_OLD_OBSERVATIONS)
new_lat = ((obs_new_lat * obs_length +
(shard_station.lat or 0.0) * old_weight) /
(obs_length + old_weight))
new_lon = ((obs_new_lon * obs_length +
(shard_station.lon or 0.0) * old_weight) /
(obs_length + old_weight))
samples = (shard_station.samples or 0) + obs_length
radius = circle_radius(
new_lat, new_lon, max_lat, max_lon, min_lat, min_lon)
region = shard_station.region
if (region and not GEOCODER.in_region(
new_lat, new_lon, region)):
# reset region if it no longer matches
region = None
if not region:
region = GEOCODER.region(new_lat, new_lon)
values.update({
'lat': new_lat,
'lon': new_lon,
'max_lat': float(max_lat),
'min_lat': float(min_lat),
'max_lon': float(max_lon),
'min_lon': float(min_lon),
'radius': radius,
'region': region,
'samples': samples,
'source': None,
# use the exact same keys as in the moving case
'block_first': shard_station.block_first,
'block_last': shard_station.block_last,
'block_count': shard_station.block_count,
})
return ('changed', values)
return (None, None) # pragma: no cover
def _shard_observations(self, observations):
sharded_obs = {}
for obs in observations:
if obs is not None:
shard = WifiShard.shard_model(obs.mac)
if shard not in sharded_obs:
sharded_obs[shard] = defaultdict(list)
sharded_obs[shard][obs.mac].append(obs)
return sharded_obs
def _query_stations(self, shard, shard_values):
macs = list(shard_values.keys())
rows = (self.session.query(shard)
.filter(shard.mac.in_(macs))).all()
blocklist = {}
stations = {}
for row in rows:
stations[row.mac] = row
blocklist[row.mac] = row.blocked(today=self.today)
return (blocklist, stations)
def _update_shard(self, shard, shard_values,
drop_counter, stats_counter):
new_data = defaultdict(list)
blocklist, stations = self._query_stations(shard, shard_values)
for station_key, observations in shard_values.items():
if blocklist.get(station_key, False):
# Drop observations for blocklisted stations.
drop_counter['blocklisted'] += len(observations)
continue
shard_station = stations.get(station_key, None)
if shard_station is None:
# We discovered an actual new never before seen station.
stats_counter['new_station'] += 1
status, result = self.station_values(
station_key, shard_station, observations)
new_data[status].append(result)
if status in ('moving', 'new_moving'):
stats_counter['block'] += 1
else:
stats_counter['obs'] += len(observations)
if new_data['new']:
# do a batch insert of new stations
stmt = shard.__table__.insert(
mysql_on_duplicate='samples = samples' # no-op
)
self.session.execute(stmt.values(new_data['new']))
if new_data['new_moving']:
# do a batch insert of new moving stations
stmt = shard.__table__.insert(
mysql_on_duplicate='block_count = block_count' # no-op
)
self.session.execute(stmt.values(new_data['new_moving']))
if new_data['moving'] or new_data['changed']:
# do a batch update of changing and moving stations
self.session.bulk_update_mappings(
shard, new_data['changed'] + new_data['moving'])
def __call__(self, batch=10):
sharded_obs = self._shard_observations(
self.data_queue.dequeue(batch=batch))
if not sharded_obs:
return
drop_counter = defaultdict(int)
stats_counter = defaultdict(int)
for shard, shard_values in sharded_obs.items():
self._update_shard(shard, shard_values,
drop_counter, stats_counter)
self.emit_stats(stats_counter, drop_counter)
if self.data_queue.enough_data(batch=batch): # pragma: no cover
self.task.apply_async(
kwargs={'batch': batch, 'shard_id': self.shard_id},
countdown=2,
expires=10)
| [
"hanno@hannosch.eu"
] | hanno@hannosch.eu |
ce043bf80c658866371ed8409b88d5e6d18b4d21 | 9d4ff6b56b399b5c486944e195e6a1a0ebd489d1 | /SDA_python_basics/tasks/task_9.py | 9cea88c7d140099d4ef06ec0cebfba9f8824ef3f | [] | no_license | damianpud/sda_tasks | 92014faa1058920c9890d59e9e94fe6883667890 | 30f78b8b2faec306e6de5c817a4f2c8ef73cda84 | refs/heads/master | 2022-12-21T09:56:14.608238 | 2020-09-23T14:48:45 | 2020-09-23T14:48:45 | 289,966,291 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 212 | py | def sum_up(number):
wyn = 0
for x in range(number + 1):
wyn += x
return wyn
if __name__ == '__main__':
number = int(input('Podaj liczbe: '))
result = sum_up(number)
print(result) | [
"damianpud@gmail.com"
] | damianpud@gmail.com |
9649c62f55bb141c3e815d7ae15718dd19ade3dc | f8be761637814550276932dd3170ab5deec3b129 | /views/basic.py | 2c34d10741507f58b418cdd83be06bc742b30a82 | [] | no_license | jixiaolong/django-cms-note | db83d965c7389d9652f0f5e98453f08d0d5ea1de | 7268dbe95fd8597816eae78ab868a82f4b08cd4c | refs/heads/master | 2021-01-11T18:19:38.576666 | 2013-10-17T10:14:42 | 2013-10-17T10:14:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 227 | py | from django.conf import settings
from django.shortcuts import render_to_response
from django.template import RequestContext
def home(request):
return render_to_response("home.html",context_instance=RequestContext(request)) | [
"jixiaolongcom@sina.cn"
] | jixiaolongcom@sina.cn |
92e8eeda1d3934e17697a68fe86657b5a410da66 | 20d59cfb6290809431ca919b1ce4afe15630aa0a | /BookProblems/Chapter3/Eulerian Path(BA3G)/BA3G.py | 838b37eee7bb8072626cfa72cba422d2005ad8ab | [] | no_license | NimaFathi/Bioinformatics-Rosalind-Textbook | ab5d0a62ac74d17a01d9c4d4aeebd0e4ccc96b60 | c6722f51739b54204b02ab2cbc214928369e1b57 | refs/heads/main | 2023-02-12T00:04:28.436607 | 2021-01-11T05:53:57 | 2021-01-11T05:53:57 | 325,874,914 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,088 | py | class Node:
def __init__(self, value):
self.inDegree = 0
self.outDegree = 0
self.val = value
class Graph:
def __init__(self, vertices):
self.v = vertices
self.graph = {}
def add_node(self, value):
node = Node(value)
self.graph[node] = []
return node
def add_edge(self, source: Node, destination: Node):
self.graph[source].append(destination)
destination.inDegree += 1
source.outDegree += 1
def get_outDegree(self, source: Node):
source.outDegree = len(self.graph[source])
return len(self.graph[source])
def remove_edge(self, source: Node, destination: Node):
self.graph[source].remove(destination)
destination.inDegree -= 1
source.inDegree -= 1
def print_graph(self):
for key in self.graph:
if len(self.graph.get(key)) == 0:
continue
print(key.val, end=" -> ")
for val_index in range(len(self.graph.get(key))):
if val_index == 0:
print(self.graph.get(key)[val_index].val, end="")
else:
print(",", self.graph.get(key)[val_index].val, end="")
print(',,,,', key.inDegree)
print()
def eulerian_path(self):
for key in self.graph:
if key.inDegree - key.outDegree == 1:
sink = key
elif key.outDegree - key.inDegree == 1:
source = key
self.add_edge(sink, source)
curr = []
circuit = []
curr.append(source)
current_node = source
while len(curr):
if current_node.outDegree > 0:
curr.append(current_node)
next_node = self.graph[current_node][current_node.outDegree - 1]
current_node.outDegree -= 1
current_node = next_node
else:
circuit.append(current_node)
current_node = curr[-1]
curr.pop()
for i in range(0, len(circuit)):
if circuit[i] == source:
index = i
continue
self.remove_edge(sink, source)
for i in range(index, index - len(circuit) + 1, -1):
print(circuit[i].val, end="")
if i != index - len(circuit) + 2:
print("->", end="")
if __name__ == '__main__':
lines = []
while True:
try:
x = input()
lines.append(x)
except:
break
y = -1
for line in lines:
x = line.split()
if y < int(x[0]):
y = int(x[0])
# Used y to find number of vertices
graph = Graph(y + 1)
nodes = [Node] * (y + 1)
for i in range(0, y + 1):
node = graph.add_node(i)
nodes[i] = node
for line in lines:
parts = line.split()
destinations = parts[2].split(",")
for dest in destinations:
graph.add_edge(nodes[int((parts[0]))], nodes[int(dest)])
graph.eulerian_path()
| [
"nimafathinimafathi@gmail.com"
] | nimafathinimafathi@gmail.com |
3185ab9b1cda0366a299487c74cbd02fbe6495de | c873aed518de878b9953ebd6fe0a455a789d221d | /examples/amalgamation/transformer_ka/light_detr/models/seq_dropout/__init__.py | c118da0d0eb0ec960dfe0790dfcd507f5fb42dc3 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | zju-vipa/KamalEngine | 6a9aa461054b9194b204ec41de5d35b211796a76 | 7b77676d22dc88ca262941788cb8bb1d973690c0 | refs/heads/master | 2023-08-17T05:56:52.210411 | 2023-08-03T07:41:55 | 2023-08-03T07:41:55 | 195,203,179 | 99 | 26 | Apache-2.0 | 2023-08-03T07:41:56 | 2019-07-04T08:41:43 | Python | UTF-8 | Python | false | false | 6,506 | py | import copy
from typing import Optional, Dict, Any
import torch
import torch.nn as nn
from . import seq_dropout_functional as F
class SeqDropoutBase(nn.Module):
def __init__(self, **kwargs):
super().__init__()
self.permute = None
def _record_permute(self, permute: torch.Tensor):
self.permute = permute.clone()
def set_info(self, info: Dict[str, Any]):
self.info = info
def forward(
self,
permute: torch.Tensor,
src: torch.Tensor,
src_mask: Optional[torch.Tensor] = None,
src_key_padding_mask: Optional[torch.BoolTensor] = None,
pos: Optional[torch.Tensor] = None
) -> Dict[str, Optional[torch.Tensor]]:
# record permute tensor
self._record_permute(permute)
src = F.apply_batch_permute(src, permute, perm_dim=0, batch_dim=1)
if src_mask is not None:
raise NotImplementedError("`src_mask` is not supported")
if src_key_padding_mask is not None:
src_key_padding_mask = F.apply_batch_permute(src_key_padding_mask, permute, perm_dim=-1, batch_dim=0)
src_len = src.shape[0]
if torch.any(src_key_padding_mask.sum(dim=-1) == src_len):
err = "`src_key_padding_mask` have row(s) with all `True`, which will lead to `Nan` in multihead attention"
raise RuntimeError(err)
if pos is not None:
pos = F.apply_batch_permute(pos, permute, perm_dim=0, batch_dim=1)
ret = {
"src": src,
"src_mask": src_mask,
"src_key_padding_mask": src_key_padding_mask,
"pos": pos
}
return ret
class SeqIdentity(SeqDropoutBase):
def __init__(self, **kwargs):
super().__init__()
def forward(
self,
src: torch.Tensor,
src_mask: Optional[torch.Tensor] = None,
src_key_padding_mask: Optional[torch.BoolTensor] = None,
pos: Optional[torch.Tensor] = None
) -> Dict[str, Optional[torch.Tensor]]:
self.permute = F.get_identity_permute(src.shape[0], device=src.device)
ret = {
"src": src,
"src_mask": src_mask,
"src_key_padding_mask": src_key_padding_mask,
"pos": pos
}
return ret
class SeqEqualDropout(SeqDropoutBase):
"""
Drop input sequence to 1 / n_parts, e.g.
x1 x2 x3 x4 | y1 y2 y3 y4 ==> x1 x3 | y2 y4
"""
def __init__(self, num_parts: int, **kwargs):
super().__init__()
self.num_parts = num_parts
def forward(
self,
src: torch.Tensor,
src_mask: Optional[torch.Tensor],
src_key_padding_mask: Optional[torch.BoolTensor],
pos: Optional[torch.Tensor]
) -> Dict[str, Optional[torch.Tensor]]:
permute = F.get_equal_dropout_permute(src.shape[0], self.num_parts, device=src.device)
return super().forward(permute=permute, src=src, src_mask=src_mask, src_key_padding_mask=src_key_padding_mask, pos=pos)
class SeqEqualDropout_v2(SeqDropoutBase):
"""
Drop input sequence to arbitrary percent.
"""
def __init__(self, keep_percent: float = 0.5, **kwargs):
super().__init__()
self.keep_percent = keep_percent
def forward(
self,
src: torch.Tensor,
src_mask: Optional[torch.Tensor],
src_key_padding_mask: Optional[torch.BoolTensor],
pos: Optional[torch.Tensor]
) -> Dict[str, Optional[torch.Tensor]]:
seq_len = src.shape[0]
num_keep = round(seq_len * self.keep_percent)
permute = F.get_equal_dropout_permute_v2(seq_len, num_keep, device=src.device)
return super().forward(permute=permute, src=src, src_mask=src_mask, src_key_padding_mask=src_key_padding_mask, pos=pos)
class SeqRandomDropout(SeqDropoutBase):
"""
Random permute input sequence
"""
def __init__(self, keep_percent: float = 0.5, **kwargs):
super().__init__()
self.keep_percent = keep_percent
def forward(
self,
src: torch.Tensor,
src_mask: Optional[torch.Tensor],
src_key_padding_mask: Optional[torch.BoolTensor],
pos: Optional[torch.Tensor]
) -> Dict[str, Optional[torch.Tensor]]:
seq_len = src.shape[0]
num_keep = round(seq_len * self.keep_percent)
permute = F.get_random_dropout_permute(
seq_len,
batch_size=src.shape[1],
n_keep=num_keep,
device=src.device
)
return super().forward(permute=permute, src=src, src_mask=src_mask, src_key_padding_mask=src_key_padding_mask, pos=pos)
class SeqSimDropout(SeqDropoutBase):
"""
Random permute input sequence
"""
def __init__(
self,
num_parts: int,
merge_by_epoch: bool = False,
norm: bool = False,
**kwargs
):
super().__init__()
self.num_parts = num_parts
self.merge_by_epoch = merge_by_epoch
self.norm = norm
def forward(
self,
src: torch.Tensor,
src_mask: Optional[torch.Tensor],
src_key_padding_mask: Optional[torch.BoolTensor],
pos: Optional[torch.Tensor],
) -> Dict[str, Optional[torch.Tensor]]:
if self.training:
seq = self.info["feat_t"]
seq = torch.cat(seq, dim=0)
# merge teacher and student seq
if self.merge_by_epoch:
ep = self.info["epoch"]
total_ep = self.info["total_epoch"]
p = ep / total_ep
seq = p * src + (1 - p) * seq
else:
seq = src
permute = F.get_sim_dropout_permute(
seq,
self.num_parts,
norm=self.norm
)
return super().forward(permute=permute, src=src, src_mask=src_mask, src_key_padding_mask=src_key_padding_mask, pos=pos)
__REGISTERED_SEQ_DROPOUT__ = {
"identity": SeqIdentity,
"equal_drop": SeqEqualDropout,
"equal_drop_v2": SeqEqualDropout_v2,
"random_drop": SeqRandomDropout,
"sim_drop": SeqSimDropout
}
def build_seq_dropout(model_cfg: Dict[str, Any]) -> SeqDropoutBase:
if "seq_drop" not in model_cfg:
return
seq_dropout_cfg = model_cfg["seq_drop"]
seq_dropout_cfg["num_parts"] = model_cfg["detr"].get("num_proj", 1)
seq_dropout_cfg = copy.deepcopy(seq_dropout_cfg)
name = seq_dropout_cfg.pop("name")
return __REGISTERED_SEQ_DROPOUT__[name](**seq_dropout_cfg)
| [
"865490109@qq.com"
] | 865490109@qq.com |
571fa838209f48b5067bd64c92e95fc0f681e718 | 45fa095fd1e7b1fe4d53717d9890ff9dfdaebfec | /PayPalSandBox/app.py | 8659c206797328e10be2dbada6146d84777431af | [] | no_license | Lowens1996/PersonalProject | 859c1d290cf7509e28e350fedd92033d5660981e | 4d606f5af423b30c5f2cb6063ede8c4b1d2b96b1 | refs/heads/master | 2021-09-24T08:34:50.167625 | 2018-10-06T00:19:17 | 2018-10-06T00:19:17 | 117,971,263 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,823 | py | from flask import Flask, render_template, jsonify, request
import os
import paypalrestsdk
paypalrestsdk.configure({
"mode": "sandbox", # sandbox or live
"client_id": "ASNdXIYOglgkncwMqPFadYDjhKmlET-iFnb8JY_EuBol59DIZGcX0vYrRl3lRhw2nHKAXE5ACBMIGlP9", # merchant account
"client_secret": "EJ6ZHrvWB5mU_Te6RSNwIaFra6PNUzk63OY-0jmaUazUKuLlZJu1IGu5dhinmbNdWXT4TKq4-4VdV1mj"})
app = Flask(__name__)
app.secret_key = os.urandom(24)
@app.route('/')
def display_form() -> 'html':
return render_template('index.html')
@app.route('/payment', methods=['POST'])
def payment():
payment = paypalrestsdk.Payment({
"intent": "sale",
"payer": {
"payment_method": "paypal"},
"redirect_urls": {
"return_url": "http://localhost:3000/payment/execute",
"cancel_url": "http://localhost:3000/"},
"transactions": [{
"item_list": {
"items": [{
"name": "item",
"sku": "item",
"price": "100.00",
"currency": "GBP",
"quantity": 1}]},
"amount": {
"total": "100.00",
"currency": "GBP"},
"description": "This is the payment transaction description."}]})
if payment.create():
print('payment success')
else:
print(payment.error)
return jsonify({'paymentID': payment.id})
@app.route('/executePayment', methods=['POST'])
def execute():
payment = paypalrestsdk.Payment.find(request.form['paymentID'])
if payment.execute({'payer_id': request.form['payerID']}):
print('payment executed')
else:
print(payment.error)
if __name__ == '__main__':
app.run(debug=True) | [
"noreply@github.com"
] | Lowens1996.noreply@github.com |
490e7173f262ccd60dae3f62d13c8f4762f461ee | 80ba45bad62c77615d416bf6f6231ed41d7e010a | /onto.py | 660e736295324e10849b44d32231ec9ac900354b | [] | no_license | saitarunreddy/Ontology-based-Learning | b2237bed55632d4ec792415572fbdc056d708a21 | 4eff8422bdb67a40a9171843496e5bd1fe69e7b9 | refs/heads/master | 2020-12-02T18:11:56.690976 | 2017-07-28T04:37:40 | 2017-07-28T04:37:40 | 96,492,968 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,319 | py | import pronto
#import json
import MySQLdb
import re
ont = pronto.Ontology('pizza.owl')
# print(ont.obo)
# print(ont['Attribute'].children.children)
db = MySQLdb.connect(host="localhost", # your host, usually localhost
user="root", # your username
passwd="tarun", # your password
db="SecOnto") # name of the data base
# you must create a Cursor object. It will let you execute all the queries you need
cursor = db.cursor()
cursor.execute("truncate Ontology;")
listid = []
dict = {}
Relation = 'is a'
for term in ont:
if term.children:
a = str(term).split(":")
b = a[0]
listid.append(b[1:])
for x in xrange(0,len(listid)):
key = listid[x]
Concept1 = re.sub(r"\B([A-Z])", r" \1", key)
#
if dict.has_key(key):
child = ont[listid[x].children].split(":")
ch = child[0]
dict.get(key).append(ch[1:])
else:
childs = ont[listid[x]].children
all_childs = ""
for y in childs:
z = str(y).split(":")
f = z[0]
Concept2 = re.sub(r"\B([A-Z])", r" \1", z[0])
query = "insert into Ontology (Concept1, Relation, Concept2) values (%s,%s,%s)"
cursor.execute(query, (Concept2.strip('<'),Relation,Concept1))
db.commit()
#
all_childs += f[1:]+","
dict[key] = all_childs
print dict
| [
"noreply@github.com"
] | saitarunreddy.noreply@github.com |
a1514ff0aae5fff6ba6124c662459a1592b7a132 | 55c8fd9ce0c5bb147cbdb69274873b93b35356fc | /pathGeneration-v2/code-v2/full_eval.py | ca61e1c985c92a33e67e67192299fb8498954df2 | [] | no_license | WOW5678/pathGeneration | b4143bbbc2be686ee011d24af46d57d2cee88f06 | 88f31b4f30750307fa7f5072e7faa2f959a6d0c0 | refs/heads/master | 2020-08-06T17:46:22.075128 | 2019-11-15T12:38:07 | 2019-11-15T12:38:07 | 213,097,008 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,528 | py | import numpy as np
from sklearn.metrics import precision_recall_fscore_support, average_precision_score, \
roc_auc_score, precision_score, recall_score
thres = 0.5
def f1_score(preds, labels, thres, average='micro'):
'''Returns (precision, recall, F1 score) from a batch of predictions (thresholded probabilities)
given a batch of labels (for macro-averaging across batches)'''
#preds = (probs >= thres).astype(np.int32)
# print('probs:',probs)
# print('labels:',labels)
# print('preds:',preds)
#preds=probs
# print(preds)
# print(labels)
p, r, f, _ = precision_recall_fscore_support(labels, preds, average=average,
warn_for=())
return p, r, f
def auc_pr(probs, labels, average='micro'):
'''Precision integrated over all thresholds (area under the precision-recall curve)'''
if average == 'macro' or average is None:
sums = labels.sum(0)
nz_indices = np.logical_and(sums != labels.shape[0], sums != 0)
probs = probs[:, nz_indices]
labels = labels[:, nz_indices]
return average_precision_score(labels, probs, average=average)
def auc_roc(probs, labels, average='micro'):
'''Area under the ROC curve'''
if average == 'macro' or average is None:
sums = labels.sum(0)
nz_indices = np.logical_and(sums != labels.shape[0], sums != 0)
probs = probs[:, nz_indices]
labels = labels[:, nz_indices]
# print('labels:',labels)
# print('probs:',probs)
return roc_auc_score(labels, probs, average=average)
def precision_at_k(probs, labels, k, average='micro'):
indices = np.argpartition(-probs, k-1, axis=1)[:, :k]
preds = np.zeros(probs.shape, dtype=np.int)
preds[np.arange(preds.shape[0])[:, np.newaxis], indices] = 1
return precision_score(labels, preds, average=average)
def recall_at_k(probs, labels, k, average='micro'):
indices = np.argpartition(-probs, k-1, axis=1)[:, :k]
preds = np.zeros(probs.shape, dtype=np.int)
preds[np.arange(preds.shape[0])[:, np.newaxis], indices] = 1
return recall_score(labels, preds, average=average)
def full_evaluate(pred,probs, gold, thres=0.5):
# pred = np.array(pred)
# gold = np.array(gold)
#print(pred)
micro_p, micro_r, micro_f1 = f1_score(pred, gold, thres, average='micro')
macro_p,macro_r,macro_f1= f1_score(pred, gold, thres, average='macro')
# micro_auc_pr= auc_pr(pred, gold, average='micro')
# macro_auc_pr= auc_pr(pred, gold, average='macro')
micro_auc_roc= auc_roc(pred, gold, average='micro')
macro_auc_roc= auc_roc(pred, gold, average='macro')
precision_8= precision_at_k(probs, gold, 8, average='micro')
precision_40= precision_at_k(probs, gold, 40, average='micro')
recall_8= recall_at_k(probs, gold, 8, average='micro')
recall_40=recall_at_k(probs, gold, 40, average='micro')
return micro_p,macro_p,micro_r,macro_r,micro_f1,macro_f1,micro_auc_roc,macro_auc_roc,precision_8,precision_40,recall_8,recall_40
def jaccrad(predList, referList): # terms_reference为源句子,terms_model为候选句子
grams_reference = set(predList) # 去重;如果不需要就改为list
grams_model = set(referList)
temp = 0
for i in grams_reference:
if i in grams_model:
temp = temp + 1
fenmu = len(grams_model) + len(grams_reference) - temp # 并集
jaccard_coefficient = temp*1.0 / fenmu # 交集
return jaccard_coefficient | [
"noreply@github.com"
] | WOW5678.noreply@github.com |
d656a1ac663178e18029435ffcf12e75e28bc44f | f40edc47f2bf93514bfd7cfe3c06174d4c22ae4a | /4_4_FFT_analysis/FFT.py | d75fc4240a5a5afe8a33d1556008724ab230bce2 | [] | no_license | Hexzel/mbed04 | c8c6b2567bd4b31b600b2f22b69b5edf6da055bf | 27ada40eb1cb603ef31e1777cf2e5fb5434c7326 | refs/heads/master | 2023-03-21T17:53:19.482916 | 2021-03-17T06:38:44 | 2021-03-17T06:38:44 | 346,040,867 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,009 | py | import matplotlib.pyplot as plt
import numpy as np
import serial
import time
Fs = 128.0; # sampling rate
Ts = 1.0/Fs; # sampling interval
t = np.arange(0,1,Ts) # time vector; create Fs samples between 0 and 1.0 sec.
y = np.arange(0,1,Ts) # signal vector; create Fs samples
n = len(y) # length of the signal
k = np.arange(n)
T = n/Fs
frq = k/T # a vector of frequencies; two sides frequency range
frq = frq[range(int(n/2))] # one side frequency range
serdev = '/dev/ttyACM0'
s = serial.Serial(serdev)
for x in range(0, int(Fs)):
line=s.readline() # Read an echo string from B_L4S5I_IOT01A terminated with '\n'
# print line
y[x] = float(line)
Y = np.fft.fft(y)/n*2 # fft computing and normalization
Y = Y[range(int(n/2))] # remove the conjugate frequency parts
fig, ax = plt.subplots(2, 1)
ax[0].plot(t,y)
ax[0].set_xlabel('Time')
ax[0].set_ylabel('Amplitude')
ax[1].plot(frq,abs(Y),'r') # plotting the spectrum
ax[1].set_xlabel('Freq (Hz)')
ax[1].set_ylabel('|Y(freq)|')
plt.show()
s.close()
| [
"a0972180845@gmail.com"
] | a0972180845@gmail.com |
1858a114a9d82906af66ed0196d28e122827ea7c | 6d09adf8d47e36fe985a07ebe5fe5c77df9b8d45 | /flask-sla/demo/modules/admin/views.py | 2d7f4ed73ed0e44dd03e1df50734efb79a004988 | [] | no_license | kingAlexanders/python_practice | 7abc4fe13eb5ede77cced4b59d55bd1116e89ff7 | 92fe7cb60f9e31b74a5ce89997e477ea761d5410 | refs/heads/master | 2021-09-14T18:17:49.604723 | 2018-05-17T03:36:58 | 2018-05-17T03:36:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 454 | py | from flask_admin import BaseView, expose
from flask import Blueprint
adminRoute = Blueprint(
'admin',
__name__,
url_prefix='/admin',
template_folder='templates',
static_folder='static'
)
class SlaView(BaseView):
"""View function of Flask-Admin for Custom page."""
@expose('/')
def index(self):
return self.render('sla.html')
@expose('/sla')
def second_page(self):
return self.render('sla.html') | [
"itimor@techdog.com"
] | itimor@techdog.com |
c89619a5118a3b9d949bc62c1c2cf72ec70512f3 | 218a0c6b4af878af47862822d3b5744ae0268d6e | /Day1/program1.py | 45c0cd79123893efb1855f8f6f2a67333ae61a06 | [] | no_license | gauravgupta99/tathastu_week_of_code | 20a7810aed6ea1f8ab62e9c250d8d1b7826c4a12 | 4146103420e3badd1b50bdc0daf2e8c6fee838eb | refs/heads/master | 2022-07-19T08:30:15.869438 | 2020-05-26T17:44:54 | 2020-05-26T17:44:54 | 266,273,980 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 228 | py | name=input("Enter the name")
branch=input("Enter the branch")
gender=input("Enter the gender")
college=input("Enter the college")
age=int(input("Enter the age"))
print(name)
print(branch)
print(gender)
print(college)
print(age)
| [
"noreply@github.com"
] | gauravgupta99.noreply@github.com |
ceda8961e32720094a1947a9f7881b8781ad8f22 | 28bee4bae40460d08c84f903f1fff3f057d59805 | /todo/views.py | 895283def2ca7a7b8cb46f74e9feabbae1d79583 | [] | no_license | GaganShergill/todo | a8f1e4d49977abfd3ead9fd81811ef42968091e2 | 22ea8854d49c46f367faddc1f6285be28fae0461 | refs/heads/master | 2022-12-05T12:38:54.467044 | 2020-08-21T14:35:53 | 2020-08-21T14:35:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,830 | py | from django.shortcuts import render, get_object_or_404, redirect
from django.views import generic
from . import models, forms
from django.utils import timezone
from django.urls import reverse_lazy
# Create your views here.
class TaskListView(generic.ListView):
model = models.ToDo
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
start = timezone.now().replace(hour=0, minute=0, second=0)
end = timezone.now().replace(hour=23, minute=59, second=59)
queryset = models.ToDo.objects.filter(date_created__lte=end, date_created__gte=start, completed=True).count()
context['taskCompleted'] = queryset
return context
def get_queryset(self):
start = timezone.now().replace(hour=0, minute=0, second=0)
end = timezone.now().replace(hour=23, minute=59, second=59)
queryset = models.ToDo.objects.filter(date_created__lte=end, date_created__gte=start).order_by('-date_created')
return queryset
class TaskCreateView(generic.CreateView):
redirect_field_name = 'todo/todo_list.html'
form_class = forms.ToDoForm
model = models.ToDo
def form_valid(self, form):
todo = form.save()
return super(TaskCreateView, self).form_valid(form)
class TaskUpdateView(generic.UpdateView):
redirect_field_name = 'todo/todo_list.html'
form_class = forms.ToDoForm
model = models.ToDo
class TaskDeleteView(generic.DeleteView):
model = models.ToDo
success_url = reverse_lazy('todo:taskList')
def TaskCompleteView(request, pk):
task = get_object_or_404(models.ToDo, pk=pk)
task.complete()
return redirect('todo:taskList')
def TaskNotCompleteView(request, pk):
task = get_object_or_404(models.ToDo, pk=pk)
task.notComplete()
return redirect('todo:taskList')
| [
"shergill352@gmail.com"
] | shergill352@gmail.com |
691d85c56366185ed37ff4b0d26830bd496179bc | 0b1d991dbb803d73b60bf7337d629048457a4f03 | /Week_2/homework2.py | 3493fd4d0f8b69de00bf41c3b2fc22bc074fad57 | [] | no_license | BuiNgocHai/DataScience | 7dd59f06bfa2f877cb48d96419a4928c9b23d161 | 232f18dbda6fc450bbd5866b50e6200e96ee6b11 | refs/heads/master | 2020-03-27T00:09:00.741780 | 2018-09-08T01:21:53 | 2018-09-08T01:21:53 | 145,598,379 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 77 | py | a = [[1, 4], [9, 16], [25, 36]]
print ([i[j] for i in a for j in range(2)])
| [
"haibuai3@gmail.com"
] | haibuai3@gmail.com |
153fbf7d83996dbe4b03cef164b1f86b688de49f | f63bb3418a4bcfca847afd65349f5d2e7c4a6863 | /tests/test_exceptions.py | 1989073263ffb73d901d830f061a3bc6e2f06958 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | ndanielsen/py-votesmart | 8e5ee50a25d436b8926296d2f9fb33011f19b14e | bd69479ab06bf422cb965fcb6de4c38f227ea99c | refs/heads/master | 2023-05-27T05:50:21.533984 | 2019-11-17T19:22:12 | 2019-11-17T19:22:12 | 121,299,285 | 7 | 4 | NOASSERTION | 2023-05-22T21:34:24 | 2018-02-12T20:41:52 | Python | UTF-8 | Python | false | false | 215 | py | import pytest
from votesmart.exceptions import VotesmartApiError
def test_sanity():
assert 1 + 1 == 2
def test_VotesmartApiError():
with pytest.raises(VotesmartApiError):
raise VotesmartApiError()
| [
"noreply@github.com"
] | ndanielsen.noreply@github.com |
0ac07e96dc5cadeae1a88aa2810f899855728c95 | 3ab1b81f407e9a7af434b9ac3687ea52ff271292 | /Trees/binaryTree.py | 73ed41dd763b8815d8291e7e0f3c58967b3efea6 | [] | no_license | sravanthi-kethireddy/LeetCode | 418db700f3d95f12b3098cbc4e798bb7eb5e7914 | a0c34c7736ef0763fdbb52e2bde990e0b33e8ca6 | refs/heads/master | 2023-01-29T07:51:40.495479 | 2020-12-02T21:20:32 | 2020-12-02T21:20:32 | 317,992,403 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,940 | py | class Node:
def __init__(self, data):
self.left = None
self.right = None
self.data = data
# Function to insert nodes in level order
def insertLevelOrder(arr, root, i, n):
# Base case for recursion
if i < n:
temp = Node(arr[i])
root = temp
# insert left child
root.left = insertLevelOrder(arr, root.left, 2 * i + 1, n)
# insert right child
root.right = insertLevelOrder(arr, root.right, 2 * i + 2, n)
# print(root.data)
return root
# InOrder fashion
def inOrder(root):
if root != None:
inOrder(root.left)
print(root.data,end=" ")
inOrder(root.right)
def isValidBST(root):
return isValidBSTRecu(root, float("-inf"), float("inf"))
def isValidBSTRecu(root, low, high):
if root is None:
return True
return low < root.data and root.data < high \
and isValidBSTRecu(root.left, low, root.data) \
and isValidBSTRecu(root.right, root.data, high)
def isValidBST2(root):
prev, cur = None, root
while cur:
if cur.left is None:
if prev and prev.data >= cur.data:
return False
prev = cur
cur = cur.right
else:
node = cur.left
while node.right and node.right != cur:
node = node.right
if node.right is None:
node.right = cur
cur = cur.left
else:
if prev and prev.data >= cur.data:
return False
node.right = None
prev = cur
cur = cur.right
return True
# Driver Code
if __name__ == '__main__':
arr = [1, 2, 3, 4, 5, 6, 6, 6, 6]
arr = [1,2,3]
n = len(arr)
root = None
root = insertLevelOrder(arr, root, 0, n)
ans = isValidBST2(root)
print(ans)
# inOrder(root)
| [
"Sravanthi.Kethireddy@ey.com"
] | Sravanthi.Kethireddy@ey.com |
f9b7225639bb8e7345c3ae82acb0ee54276ceedb | fd67592b2338105e0cd0b3503552d188b814ad95 | /egoi_api/paths/campaign_groups/post.pyi | e20c009a7deaa011725e0a74459217db59959c7d | [] | no_license | E-goi/sdk-python | 175575fcd50bd5ad426b33c78bdeb08d979485b7 | 5cba50a46e1d288b5038d18be12af119211e5b9f | refs/heads/master | 2023-04-29T20:36:02.314712 | 2023-04-18T07:42:46 | 2023-04-18T07:42:46 | 232,095,340 | 5 | 2 | null | null | null | null | UTF-8 | Python | false | false | 15,827 | pyi | # coding: utf-8
"""
Generated by: https://openapi-generator.tech
"""
from dataclasses import dataclass
import typing_extensions
import urllib3
from urllib3._collections import HTTPHeaderDict
from egoi_api import api_client, exceptions
from datetime import date, datetime # noqa: F401
import decimal # noqa: F401
import functools # noqa: F401
import io # noqa: F401
import re # noqa: F401
import typing # noqa: F401
import typing_extensions # noqa: F401
import uuid # noqa: F401
import frozendict # noqa: F401
from egoi_api import schemas # noqa: F401
from egoi_api.model.campaign_group_post import CampaignGroupPost
from egoi_api.model.unauthorized import Unauthorized
from egoi_api.model.campaign_group import CampaignGroup
from egoi_api.model.service_unavailable import ServiceUnavailable
from egoi_api.model.conflict import Conflict
from egoi_api.model.bad_request import BadRequest
from egoi_api.model.unprocessable_entity import UnprocessableEntity
from egoi_api.model.internal_server_error import InternalServerError
from egoi_api.model.too_many_requests import TooManyRequests
from egoi_api.model.forbidden import Forbidden
# body param
SchemaForRequestBodyApplicationJson = CampaignGroupPost
request_body_campaign_group_post = api_client.RequestBody(
content={
'application/json': api_client.MediaType(
schema=SchemaForRequestBodyApplicationJson),
},
required=True,
)
SchemaFor201ResponseBodyApplicationJson = CampaignGroup
@dataclass
class ApiResponseFor201(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: typing.Union[
SchemaFor201ResponseBodyApplicationJson,
]
headers: schemas.Unset = schemas.unset
_response_for_201 = api_client.OpenApiResponse(
response_cls=ApiResponseFor201,
content={
'application/json': api_client.MediaType(
schema=SchemaFor201ResponseBodyApplicationJson),
},
)
SchemaFor400ResponseBodyApplicationJson = BadRequest
@dataclass
class ApiResponseFor400(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: typing.Union[
SchemaFor400ResponseBodyApplicationJson,
]
headers: schemas.Unset = schemas.unset
_response_for_400 = api_client.OpenApiResponse(
response_cls=ApiResponseFor400,
content={
'application/json': api_client.MediaType(
schema=SchemaFor400ResponseBodyApplicationJson),
},
)
SchemaFor401ResponseBodyApplicationJson = Unauthorized
@dataclass
class ApiResponseFor401(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: typing.Union[
SchemaFor401ResponseBodyApplicationJson,
]
headers: schemas.Unset = schemas.unset
_response_for_401 = api_client.OpenApiResponse(
response_cls=ApiResponseFor401,
content={
'application/json': api_client.MediaType(
schema=SchemaFor401ResponseBodyApplicationJson),
},
)
SchemaFor403ResponseBodyApplicationJson = Forbidden
@dataclass
class ApiResponseFor403(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: typing.Union[
SchemaFor403ResponseBodyApplicationJson,
]
headers: schemas.Unset = schemas.unset
_response_for_403 = api_client.OpenApiResponse(
response_cls=ApiResponseFor403,
content={
'application/json': api_client.MediaType(
schema=SchemaFor403ResponseBodyApplicationJson),
},
)
SchemaFor409ResponseBodyApplicationJson = Conflict
@dataclass
class ApiResponseFor409(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: typing.Union[
SchemaFor409ResponseBodyApplicationJson,
]
headers: schemas.Unset = schemas.unset
_response_for_409 = api_client.OpenApiResponse(
response_cls=ApiResponseFor409,
content={
'application/json': api_client.MediaType(
schema=SchemaFor409ResponseBodyApplicationJson),
},
)
SchemaFor422ResponseBodyApplicationJson = UnprocessableEntity
@dataclass
class ApiResponseFor422(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: typing.Union[
SchemaFor422ResponseBodyApplicationJson,
]
headers: schemas.Unset = schemas.unset
_response_for_422 = api_client.OpenApiResponse(
response_cls=ApiResponseFor422,
content={
'application/json': api_client.MediaType(
schema=SchemaFor422ResponseBodyApplicationJson),
},
)
SchemaFor429ResponseBodyApplicationJson = TooManyRequests
@dataclass
class ApiResponseFor429(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: typing.Union[
SchemaFor429ResponseBodyApplicationJson,
]
headers: schemas.Unset = schemas.unset
_response_for_429 = api_client.OpenApiResponse(
response_cls=ApiResponseFor429,
content={
'application/json': api_client.MediaType(
schema=SchemaFor429ResponseBodyApplicationJson),
},
)
SchemaFor500ResponseBodyApplicationJson = InternalServerError
@dataclass
class ApiResponseFor500(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: typing.Union[
SchemaFor500ResponseBodyApplicationJson,
]
headers: schemas.Unset = schemas.unset
_response_for_500 = api_client.OpenApiResponse(
response_cls=ApiResponseFor500,
content={
'application/json': api_client.MediaType(
schema=SchemaFor500ResponseBodyApplicationJson),
},
)
SchemaFor503ResponseBodyApplicationJson = ServiceUnavailable
@dataclass
class ApiResponseFor503(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: typing.Union[
SchemaFor503ResponseBodyApplicationJson,
]
headers: schemas.Unset = schemas.unset
_response_for_503 = api_client.OpenApiResponse(
response_cls=ApiResponseFor503,
content={
'application/json': api_client.MediaType(
schema=SchemaFor503ResponseBodyApplicationJson),
},
)
_all_accept_content_types = (
'application/json',
)
class BaseApi(api_client.Api):
@typing.overload
def _create_campaign_group_oapg(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,],
content_type: typing_extensions.Literal["application/json"] = ...,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor201,
]: ...
@typing.overload
def _create_campaign_group_oapg(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,],
content_type: str = ...,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor201,
]: ...
@typing.overload
def _create_campaign_group_oapg(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,],
skip_deserialization: typing_extensions.Literal[True],
content_type: str = ...,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
) -> api_client.ApiResponseWithoutDeserialization: ...
@typing.overload
def _create_campaign_group_oapg(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,],
content_type: str = ...,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = ...,
) -> typing.Union[
ApiResponseFor201,
api_client.ApiResponseWithoutDeserialization,
]: ...
def _create_campaign_group_oapg(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,],
content_type: str = 'application/json',
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
):
"""
Create new campaign group
:param skip_deserialization: If true then api_response.response will be set but
api_response.body and api_response.headers will not be deserialized into schema
class instances
"""
used_path = path.value
_headers = HTTPHeaderDict()
# TODO add cookie handling
if accept_content_types:
for accept_content_type in accept_content_types:
_headers.add('Accept', accept_content_type)
if body is schemas.unset:
raise exceptions.ApiValueError(
'The required body parameter has an invalid value of: unset. Set a valid value instead')
_fields = None
_body = None
serialized_data = request_body_campaign_group_post.serialize(body, content_type)
_headers.add('Content-Type', content_type)
if 'fields' in serialized_data:
_fields = serialized_data['fields']
elif 'body' in serialized_data:
_body = serialized_data['body']
response = self.api_client.call_api(
resource_path=used_path,
method='post'.upper(),
headers=_headers,
fields=_fields,
body=_body,
auth_settings=_auth,
stream=stream,
timeout=timeout,
)
if skip_deserialization:
api_response = api_client.ApiResponseWithoutDeserialization(response=response)
else:
response_for_status = _status_code_to_response.get(str(response.status))
if response_for_status:
api_response = response_for_status.deserialize(response, self.api_client.configuration)
else:
api_response = api_client.ApiResponseWithoutDeserialization(response=response)
if not 200 <= response.status <= 299:
raise exceptions.ApiException(api_response=api_response)
return api_response
class CreateCampaignGroup(BaseApi):
# this class is used by api classes that refer to endpoints with operationId fn names
@typing.overload
def create_campaign_group(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,],
content_type: typing_extensions.Literal["application/json"] = ...,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor201,
]: ...
@typing.overload
def create_campaign_group(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,],
content_type: str = ...,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor201,
]: ...
@typing.overload
def create_campaign_group(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,],
skip_deserialization: typing_extensions.Literal[True],
content_type: str = ...,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
) -> api_client.ApiResponseWithoutDeserialization: ...
@typing.overload
def create_campaign_group(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,],
content_type: str = ...,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = ...,
) -> typing.Union[
ApiResponseFor201,
api_client.ApiResponseWithoutDeserialization,
]: ...
def create_campaign_group(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,],
content_type: str = 'application/json',
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
):
return self._create_campaign_group_oapg(
body=body,
content_type=content_type,
accept_content_types=accept_content_types,
stream=stream,
timeout=timeout,
skip_deserialization=skip_deserialization
)
class ApiForpost(BaseApi):
# this class is used by api classes that refer to endpoints by path and http method names
@typing.overload
def post(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,],
content_type: typing_extensions.Literal["application/json"] = ...,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor201,
]: ...
@typing.overload
def post(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,],
content_type: str = ...,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor201,
]: ...
@typing.overload
def post(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,],
skip_deserialization: typing_extensions.Literal[True],
content_type: str = ...,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
) -> api_client.ApiResponseWithoutDeserialization: ...
@typing.overload
def post(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,],
content_type: str = ...,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = ...,
) -> typing.Union[
ApiResponseFor201,
api_client.ApiResponseWithoutDeserialization,
]: ...
def post(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,],
content_type: str = 'application/json',
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
):
return self._create_campaign_group_oapg(
body=body,
content_type=content_type,
accept_content_types=accept_content_types,
stream=stream,
timeout=timeout,
skip_deserialization=skip_deserialization
)
| [
"integrations@e-goi.com"
] | integrations@e-goi.com |
f2ad114df4ada5223c1c638da1322b6179e67576 | f60415469d5d28b2c9b7160d34d1aac979fddedc | /main.py | 88399ef33de463fb39326c630e3822ea11ce5457 | [] | no_license | michael-kwan/llanalysis | ffb30b469caef348d86e44e743e8d7dac6598a52 | b307f974f7255db8b7795644182c3312c2b5f726 | refs/heads/master | 2022-07-02T22:25:38.965433 | 2022-06-12T10:38:32 | 2022-06-12T10:38:32 | 176,899,701 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 356 | py | import sys
sys.path.insert(0, './src')
import util
import player
from bs4 import BeautifulSoup
import requests
if __name__ == "__main__":
sesh = util.login()
#players = util.get_player_ids()
#print(player.get_player_info('kwanm', sesh))
#print(player.get_player_charts('kwanm', sesh, [5]))
print(player.get_player_branch('boreckim', sesh))
| [
"18michaelkwan@gmail.com"
] | 18michaelkwan@gmail.com |
4cc9a4def847a0054c5afcf86c3fd3aab5fecd98 | ee8f7957897df84744a36ef57dc000a81c5fa02b | /imageParse.py | ab0b180b745d40686b758ef9c6daf5ec5ddf13b7 | [] | no_license | mts2/Scribbler2 | 2611ba9273e98b406f5f053ccc5b69d122cbfe27 | fe41c12faf74d6303db1929432e8d0827c4f8375 | refs/heads/master | 2021-01-19T00:29:38.820668 | 2014-11-18T04:13:55 | 2014-11-18T04:13:55 | 25,258,029 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,022 | py | from myro import *
import fileParse
def compare(pixel, colour, tolerance):
comparedR = colour[0]
comparedG = colour[1]
comparedB = colour[2]
realColours = getRGB(pixel)
realR = realColours[0]
realG = realColours[1]
realB = realColours[2]
return (abs(realR - int(comparedR)) < tolerance and abs(realG - int(comparedG)) < tolerance and abs(realB - int(comparedB)) < tolerance)
def checkForShoe(picture, origX, origY, colour, tolerance):
notShoePixels = 0 #to add up number of non-matching pixels
badPixelTolerance = 100 #total non-matching pixels allowed
comparisonR = colour[0]
comparisonG = colour[1]
comparisonB = colour[2]
for x in range(origX, origX+100, 5):
for y in range(origY, origY+100, 5): #check 20x20 pixel square
pixel = getPixel(picture, x, y)
pictureColour = getRGB(pixel)
pictureR = pictureColour[0]
pictureG = pictureColour[1]
pictureB = pictureColour[2]
if(abs(int(comparisonR) - int(pictureR)) > tolerance or abs(int(comparisonG) - int(pictureG)) > tolerance or abs(int(comparisonB) - int(pictureB)) > tolerance):
#if pixel colour does not match shoe, increment the counter for non-matching pixels
notShoePixels+=1
if(notShoePixels > badPixelTolerance): #if tolerance exceeded, no shoe
return False
return True #if tolerance not exceeded by end, shoe exists
def parseImage(picture, colours, increment):
matchingColour = False #default matching colour to false
shoeFound = False #assume no shoe found
for x in range(500, 800, increment):
if(shoeFound == True):
break #break out of loop if shoe priorly found
for y in range(200, 400, increment):
pixel = getPixel(picture, x, y) #take pixel
for i in range (0, len(colours)): #compare pixel colours for each colour
colour = colours[i]
tolerance = 100
if(compare(pixel, colour, tolerance) == True):
tolerance = 50
if(checkForShoe(picture, x, y, colour, tolerance) == True):
shoeFound = True #allows for loop to be broken
matchingColour = colour #colour to return
if(shoeFound == True):
break #break out of loop if shoe has been found (and then break again to exit parent loop)
return matchingColour #returns colour match, or otherwise returns false | [
"michael.socha.99@hotmail.ca"
] | michael.socha.99@hotmail.ca |
3103020bb638bf038a828e22a9c2e1ad20c9edcb | 268d048cbfda3d7c12989e7f27fe1cdf390be5d9 | /ship.py | 635d339d63ea9d25522cfe73c1299175c0059dfd | [] | no_license | blahwhale/game | 98df73c35ce16dbe3c065de5ba4274169ff0650a | ce063055be3fa7a35804c2e1d28d18035a7c141c | refs/heads/master | 2020-05-02T13:22:11.023011 | 2019-03-28T06:46:41 | 2019-03-28T06:46:41 | 177,982,247 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,362 | py | import pygame
class Ship():
def __init__(self,ai_settings,screen):
"""Initialize the ship and set its starting position."""
self.screen = screen
self.ai_settings=ai_settings
#Load the ship image and its rect.
self.image=pygame.image.load('images/ship.bmp')
self.rect=self.image.get_rect()
self.screen_rect=screen.get_rect()
# Start each new ship at the bottom center of the screen.
self.rect.centerx = self.screen_rect.centerx
self.rect.bottom = self.screen_rect.bottom
# Store a decimal value for the ship's center.
self.center=float(self.rect.centerx)
#Movement flag
self.moving_right = False
self.moving_left = False
def update(self):
"""Update the ship's position based on the movement flag."""
#Update the ship's center value, not the rect.
if self.moving_right and self.rect.right< self.screen_rect.right:
self.center += self.ai_settings.ship_speed_factor
if self.moving_left and self.rect.left>0:
self.center = self.ai_settings.ship_speed_factor
#Update rect object from self.center.
self.rect.centerx = self.center
def blitme(self):
"""Draw the ship at its current location."""
self.screen.blit(self.image, self.rect) | [
"dgu@Dians-MacBook-Pro.local"
] | dgu@Dians-MacBook-Pro.local |
d99b94a8224ccb5c9aac7a269535457cfdfb8e9f | 2d71a4824e6a99f137c9e1ae7d0af9b661e38291 | /Random_Substitution_Cipher.py | 2e476b29dfff8d8abb1695e7ff3bef6ff72b5505 | [] | no_license | parinshah187/Python-Progrrams | 61d0dfee498639096a7260ec6628a529d8d474f4 | 3386b6bb2af50b8b5a2f167106d7ce129267446e | refs/heads/master | 2020-05-19T09:27:35.377042 | 2014-03-25T10:46:51 | 2014-03-25T10:46:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,154 | py | # URL -> http://www.codeskulptor.org/#user16_UjY3TZ9q0YH6c2e.py
import simplegui
import random
CIPHER = {}
LETTERS = "abcdefghijklmnopqrstuvwxyz"
msg = ""
emsg=""
en = True
def init():
letter_list = list(LETTERS)
random.shuffle(letter_list)
for ch in LETTERS :
CIPHER[ch] = letter_list.pop()
print CIPHER
def new_msg(m):
global msg
msg = m
label.set_text(msg)
def encode():
global msg,emsg,en
emsg = ""
for ch in msg :
emsg += CIPHER[ch]
msg = ""
print msg,emsg
en = True
def decode():
global msg, emsg,en
emsg = msg
msg = ""
for ch in emsg :
for key in CIPHER :
if CIPHER[key]==ch :
msg+=key
print emsg,"decoded as",msg
en = False
def draw(canvas):
if en :
canvas.draw_text(emsg,[10,20],20,"White")
else :
canvas.draw_text(msg,[10,20],20,"White")
f = simplegui.create_frame("Cipher",200,200)
f.add_input("New Message : ",new_msg,200)
label = f.add_label("",200)
f.add_button("Encode",encode,100)
f.add_button("Decode",decode,100)
f.set_draw_handler(draw)
init()
f.start()
| [
"parin.shah1807@gmail.com"
] | parin.shah1807@gmail.com |
b6888f2a0b0cebdf279fd788903a70b9e292e2f3 | eb1de99009c03e55ab02240d4a4f471e261300e3 | /WhosWatching.py | dbac1b261bdf33a7e4c49de4f2bd2328d9d24dec | [] | no_license | dmegahan/WhosWatchingTheStream | 987446e0a3412a748886763658127993ca7d041f | 6833eb53b1e6e9af57d59b3cd1b3230e50704afe | refs/heads/master | 2020-06-14T16:52:01.391226 | 2015-02-24T02:32:01 | 2015-02-24T02:32:01 | 31,239,197 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,482 | py | import requests
#trackedUsers are the people you're looking for
trackedUsers = ['Totalbiscuit', 'LIRIK', 'summit1g', 'kinetick42', 'AmazHS', 'Voket']
#stream is the stream chat to search in
stream = 'summit1g'
#will read the twitch users followed streamers and track them instead
def trackFollowed(twitchUsername):
#API only shows the first 25 followed streams by default
#defaults: offset = 0, limit = 25
#https://github.com/justintv/Twitch-API/blob/master/v3_resources/follows.md#get-usersuserfollowschannels
offset = "0"
limit = "100"
FOLLOWING_REQUEST = "https://api.twitch.tv/kraken/users/" + twitchUsername + "/follows/channels?&limit=" + limit + "&offset=" + offset
try:
responseFollowing = requests.get(FOLLOWING_REQUEST)
except(requests.exceptions.ConnectionError):
print "request failure: " + FOLLOWING_REQUEST
try:
followingObj = responseFollowing.json()
followedStreams = followingObj['follows']
except (TypeError, ValueError, KeyError):
print "Error occured."
followedStreams = []
trackedUsers = []
for stream in followedStreams:
trackedUsers = trackedUsers + [stream['channel']['name']]
return trackedUsers
def findUsersInChatters(chatters, users):
for chatter in chatters:
for user in users:
if chatter.lower() == user.lower():
#found the user, no need to keep searching
print "Found: " + user
continue
def main():
#comment out if you want to track a manually input list of users
trackedUsers = trackFollowed('kinetick42')
CHATTERS_REQUEST = "http://tmi.twitch.tv/group/user/" + stream + "/chatters"
#request chatters info
try:
responseChatters = requests.get(CHATTERS_REQUEST)
except(requests.exceptions.ConnectionError):
print "request failure: " + CHATTERS_REQUEST
#get list of people currently in chat
try:
chattersObj = responseChatters.json()
moderators = chattersObj['chatters']['moderators']
staff = chattersObj['chatters']['staff']
admins = chattersObj['chatters']['admins']
globalMods = chattersObj['chatters']['global_mods']
viewers = chattersObj['chatters']['viewers']
except (TypeError, ValueError, KeyError):
viewers = []
allChatters = moderators + staff + admins + globalMods + viewers
findUsersInChatters(allChatters, trackedUsers)
main()
| [
"dpm68@drexel.edu"
] | dpm68@drexel.edu |
a58740e2a6ef0f1c5c1c2d3373a3d57e3b7311d6 | e6904315fef720d562727c259fe55edcaaf2f84b | /src/orion/core/io/evc_builder.py | 01094146ed00e9b0623a8a0adf56c0ef4a18b01b | [
"BSD-3-Clause"
] | permissive | mnoukhov/orion | c93c4655f6b1b6358f8ead78a3adbe9d871785c7 | 7849d77344e84ec805207cf4148aecf6f7d6b3d7 | refs/heads/master | 2020-03-25T05:37:54.251082 | 2019-08-19T17:33:15 | 2019-08-19T17:33:15 | 143,457,714 | 0 | 0 | NOASSERTION | 2018-10-31T02:37:32 | 2018-08-03T17:55:57 | Python | UTF-8 | Python | false | false | 2,275 | py | # -*- coding: utf-8 -*-
# pylint:disable=protected-access
"""
:mod:`orion.core.io.evc_builder` -- Builder of experiment version control tree
==============================================================================
.. module:: experiment
:platform: Unix
:synopsis: Builder of the experiment version control tree
The EVCBuilder takes care of building a main experiment along with an EVC tree and connect them
together.
A user can define a root and some leafs that should be the extremums of the tree. Those can be
different than the actual root and leafs of the global EVC tree, making the trimmed version a small
subset of the global version.
"""
from orion.core.evc.experiment import ExperimentNode
from orion.core.io.experiment_builder import ExperimentBuilder
class EVCBuilder(object):
"""Builder of experiment version control trees using
:class:`orion.core.evc.experiment.ExperimentNode`
.. seealso::
`orion.core.io.experiment_builder` for more information on the process of building
experiments.
:class:`orion.core.evc.experiment`
:class:`orion.core.worker.experiment`
"""
# pylint:disable=no-self-use
def connect_to_version_control_tree(self, experiment):
"""Build the EVC and connect the experiment to it"""
experiment_node = ExperimentNode(experiment.name, experiment=experiment)
experiment.connect_to_version_control_tree(experiment_node)
def build_view_from(self, cmdargs):
"""Build an experiment view based on global config and connect it to the EVC"""
experiment_view = ExperimentBuilder().build_view_from(cmdargs)
self.connect_to_version_control_tree(experiment_view)
return experiment_view
def build_from(self, cmdargs):
"""Build an experiment based on config and connect it to the EVC"""
experiment = ExperimentBuilder().build_from(cmdargs)
self.connect_to_version_control_tree(experiment)
return experiment
def build_from_config(self, config):
"""Build an experiment based on given config and connect it to the EVC"""
experiment = ExperimentBuilder().build_from_config(config)
self.connect_to_version_control_tree(experiment)
return experiment
| [
"xavier.bouthillier@umontreal.ca"
] | xavier.bouthillier@umontreal.ca |
080eab695ae14e05fd3c8832d91b6e36843643b1 | fa3f5368cbba48de3b9c57c79785e51086afb04d | /Python/daheng-camera-hello/Galaxy_Linux_Python_2.0.2106.9041/api/gxipy/dxwrapper.py | c8164896a7110e9041a141dc8586018aa345d7e7 | [] | no_license | langenhagen/experiments-and-tutorials | 8f853675e0d8718581c33ff099fcb35c8958f315 | 9598af1b8be7ebe8462a0bbfc87a6edfa5063741 | refs/heads/master | 2023-08-03T15:07:38.757388 | 2023-07-31T16:15:34 | 2023-07-31T16:15:34 | 211,196,519 | 4 | 1 | null | 2022-03-27T10:02:49 | 2019-09-26T23:15:40 | HTML | UTF-8 | Python | false | false | 39,187 | py | #!/usr/bin/python
# -*- coding:utf-8 -*-
# -*-mode:python ; tab-width:4 -*- ex:set tabstop=4 shiftwidth=4 expandtab: -*-
#
from ctypes import *
import sys
import os
if sys.platform == 'linux2' or sys.platform == 'linux':
if os.path.exists('/usr/lib/libdximageproc.so') :
filepath = '/usr/lib/libdximageproc.so'
else:
filepath = '/usr/lib/libgxiapi.so'
try:
dll = CDLL(filepath)
except OSError:
print('Cannot find libdximageproc.so or libgxiapi.so.')
else:
try:
if (sys.version_info.major == 3 and sys.version_info.minor >= 8) or (sys.version_info.major > 3):
dll = WinDLL('DxImageProc.dll', winmode=0)
else:
dll = WinDLL('DxImageProc.dll')
except OSError:
print('Cannot find DxImageProc.dll.')
# status definition
class DxStatus:
OK = 0 # Operation is successful
PARAMETER_INVALID = -101 # Invalid input parameter
PARAMETER_OUT_OF_BOUND = -102 # The input parameter is out of bounds
NOT_ENOUGH_SYSTEM_MEMORY = -103 # System out of memory
NOT_FIND_DEVICE = -104 # not find device
STATUS_NOT_SUPPORTED = -105 # operation is not supported
CPU_NOT_SUPPORT_ACCELERATE = -106 # CPU does not support acceleration
def __init__(self):
pass
# Bayer layout
class DxPixelColorFilter:
NONE = 0 # Isn't bayer format
RG = 1 # The first row starts with RG
GB = 2 # The first line starts with GB
GR = 3 # The first line starts with GR
BG = 4 # The first line starts with BG
def __init__(self):
pass
# image actual bits
class DxActualBits:
BITS_8 = 8 # 8bit
BITS_10 = 10 # 10bit
BITS_12 = 12 # 12bit
BITS_14 = 14 # 14bit
BITS_16 = 16 # 16bit
def __init__(self):
pass
# mono8 image process structure
class MonoImgProcess(Structure):
_fields_ = [
('defective_pixel_correct', c_bool), # Pixel correct switch
('sharpness', c_bool), # Sharpness switch
('accelerate', c_bool), # Accelerate switch
('sharp_factor', c_float), # Sharpen the intensity factor
('pro_lut', c_void_p), # Lookup table
('lut_length', c_uint16), # Lut Buffer length
('array_reserved', c_ubyte * 32), # Reserved
]
def __str__(self):
return "MonoImgProcess\n%s" % "\n".join("%s:\t%s" % (n, getattr(self, n[0])) for n in self._fields_)
# Raw8 Image process structure
class ColorImgProcess(Structure):
_fields_ = [
('defective_pixel_correct', c_bool), # Pixel correct switch
('denoise', c_bool), # Noise reduction switch
('sharpness', c_bool), # Sharpness switch
('accelerate', c_bool), # Accelerate switch
('arr_cc', c_void_p), # Color processing parameters
('cc_buf_length', c_uint8), # Color processing parameters length(sizeof(VxInt16)*9)
('sharp_factor', c_float), # Sharpen the intensity factor
('pro_lut', c_void_p), # Lookup table
('lut_length', c_uint16), # The length of the lookup table
('cv_type', c_uint), # Interpolation algorithm
('layout', c_uint), # Bayer format
('flip', c_bool), # Image flip flag
('array_reserved', c_ubyte * 32), # Reserved
]
def __str__(self):
return "ColorImgProcess\n%s" % "\n".join("%s:\t%s" % (n, getattr(self, n[0])) for n in self._fields_)
# Field correction process structure
class FieldCorrectionProcess(Structure):
_fields_ = [
('bright_buf', c_void_p), # Bright image buffer
('dark_buf', c_void_p), # Dark image buffer
('width', c_uint32), # image width
('height', c_uint32), # image height
('actual_bits', c_uint), # image actual bits
('bayer_type', c_uint), # Bayer Type
]
def __str__(self):
return "FieldCorrectionProcess\n%s" % "\n".join("%s:\t%s" % (n, getattr(self, n[0])) for n in self._fields_)
# color transform factor
class ColorTransformFactor(Structure):
_fields_ = [
('fGain00', c_float), # red contribution to the red pixel (multiplicative factor)
('fGain01', c_float), # green contribution to the red pixel (multiplicative factor)
('fGain02', c_float), # blue contribution to the red pixel (multiplicative factor)
('fGain10', c_float), # red contribution to the green pixel (multiplicative factor)
('fGain11', c_float), # green contribution to the green pixel (multiplicative factor)
('fGain12', c_float), # blue contribution to the green pixel (multiplicative factor)
('fGain20', c_float), # red contribution to the blue pixel (multiplicative factor)
('fGain21', c_float), # green contribution to the blue pixel (multiplicative factor)
('fGain22', c_float), # blue contribution to the blue pixel (multiplicative factor)
]
def __str__(self):
return "ColorTransformFactor\n%s" % "\n".join("%s:\t%s" % (n, getattr(self, n[0])) for n in self._fields_)
if hasattr(dll, 'DxGetLut'):
def dx_get_lut(contrast_param, gamma, lightness):
"""
:brief calculating lookup table of 8bit image
:param contrast_param: contrast param,range(-50~100)
:param gamma: gamma param,range(0.1~10)
:param lightness: lightness param,range(-150~150)
:return: status State return value, See detail in DxStatus
lut lookup table
lut_length lookup table length(unit:byte)
"""
contrast_param_c = c_int32()
contrast_param_c.value = contrast_param
gamma_c = c_double()
gamma_c.value = gamma
lightness_c = c_int32()
lightness_c.value = lightness
lut_length_c = c_uint16()
lut_length_c.value = 0
# Get length of the lookup table
dll.DxGetLut(contrast_param_c, gamma_c, lightness_c, None, byref(lut_length_c))
# Create buff to get LUT data
lut_c = (c_uint8 * lut_length_c.value)()
status = dll.DxGetLut(contrast_param_c, gamma_c, lightness_c, byref(lut_c), byref(lut_length_c))
return status, lut_c, lut_length_c.value
CC_PARAM_ARRAY_LEN = 18
if hasattr(dll, "DxCalcCCParam"):
def dx_calc_cc_param(color_cc_param, saturation):
"""
:brief calculating array of image processing color adjustment
:param color_cc_param: color correction param address(get from camera)
:param saturation: saturation factor,Range(0~128)
:return: status: State return value, See detail in DxStatus
cc_param: color adjustment calculating array
"""
color_cc_param_c = c_int64()
color_cc_param_c.value = color_cc_param
saturation_c = c_int16()
saturation_c.value = saturation
length_c = c_uint8()
# DxCalcCCParam length = sizeof(int16)*9 = 2 * 9 = 18
length_c.value = CC_PARAM_ARRAY_LEN
# Create buff to get cc data
cc_param_c = (c_int16 * length_c.value)()
status = dll.DxCalcCCParam(color_cc_param_c, saturation_c, byref(cc_param_c), length_c)
return status, cc_param_c
if hasattr(dll, "DxCalcUserSetCCParam"):
def dx_calc_user_set_cc_param(color_transform_factor, saturation):
"""
:brief calculating array of image processing color adjustment
:param color_transform_factor: color correction param address(user set),
type should be list or tuple, size = 3*3=9
:param saturation: saturation factor,Range(0~128)
:return: status: State return value, See detail in DxStatus
cc_param: color adjustment calculating array
"""
color_transform_factor_c = ColorTransformFactor()
color_transform_factor_c.fGain00 = color_transform_factor[0]
color_transform_factor_c.fGain01 = color_transform_factor[1]
color_transform_factor_c.fGain02 = color_transform_factor[2]
color_transform_factor_c.fGain10 = color_transform_factor[3]
color_transform_factor_c.fGain11 = color_transform_factor[4]
color_transform_factor_c.fGain12 = color_transform_factor[5]
color_transform_factor_c.fGain20 = color_transform_factor[6]
color_transform_factor_c.fGain21 = color_transform_factor[7]
color_transform_factor_c.fGain22 = color_transform_factor[8]
saturation_c = c_int16()
saturation_c.value = saturation
length_c = c_uint8()
# DxCalcCCParam length = sizeof(int16)*9 = 2 * 9 = 18
length_c.value = CC_PARAM_ARRAY_LEN
# Create buff to get cc data
cc_param_c = (c_int16 * length_c.value)()
status = dll.DxCalcUserSetCCParam(byref(color_transform_factor_c), saturation_c, byref(cc_param_c), length_c)
return status, cc_param_c
if hasattr(dll, "DxGetGammatLut"):
def dx_get_gamma_lut(gamma_param):
"""
:brief calculating gamma lookup table (RGB24)
:param gamma_param: gamma param,range(0.1 ~ 10)
:return: status: State return value, See detail in DxStatus
gamma_lut: gamma lookup table
lut_length: gamma lookup table length(unit:byte)
"""
gamma_param_c = c_double()
gamma_param_c.value = gamma_param
lut_length_c = c_int()
status = dll.DxGetGammatLut(gamma_param_c, None, byref(lut_length_c))
gamma_lut = (c_ubyte * lut_length_c.value)()
status = dll.DxGetGammatLut(gamma_param_c, byref(gamma_lut), byref(lut_length_c))
return status, gamma_lut, lut_length_c.value
if hasattr(dll, "DxGetContrastLut"):
def dx_get_contrast_lut(contrast_param):
"""
:brief ccalculating contrast lookup table (RGB24)
:param contrast_param: contrast param,range(-50 ~ 100)
:return: status: State return value, See detail in DxStatus
contrast_lut: contrast lookup table
lut_length: contrast lookup table length(unit:byte)
"""
contrast_param_c = c_int()
contrast_param_c.value = contrast_param
lut_length_c = c_int()
status = dll.DxGetContrastLut(contrast_param_c, None, byref(lut_length_c))
contrast_lut = (c_ubyte * lut_length_c.value)()
status = dll.DxGetContrastLut(contrast_param_c, byref(contrast_lut), byref(lut_length_c))
return status, contrast_lut, lut_length_c.value
if hasattr(dll, 'DxRaw8toRGB24'):
def dx_raw8_to_rgb24(input_address, output_address, width, height, convert_type, bayer_type, flip):
"""
:brief Convert Raw8 to Rgb24
:param input_address: The input raw image buff address, buff size = width * height
:param output_address: The output rgb image buff address, buff size = width * height * 3
:param width: Image width
:param height: Image height
:param convert_type: Bayer convert type, See detail in DxBayerConvertType
:param bayer_type: pixel color filter, See detail in DxPixelColorFilter
:param flip: Output image flip flag
True: turn the image upside down
False: do not flip
:return: status State return value, See detail in DxStatus
data_array Array of output images, buff size = width * height * 3
"""
width_c = c_uint32()
width_c.value = width
height_c = c_uint32()
height_c.value = height
convert_type_c = c_uint()
convert_type_c.value = convert_type
bayer_type_c = c_uint()
bayer_type_c.value = bayer_type
flip_c = c_bool()
flip_c.value = flip
input_address_p = c_void_p()
input_address_p.value = input_address
output_address_p = c_void_p()
output_address_p.value = output_address
status = dll.DxRaw8toRGB24(input_address_p, output_address_p,
width_c, height_c, convert_type_c, bayer_type_c, flip_c)
return status
if hasattr(dll, 'DxRaw8toRGB24Ex'):
def dx_raw8_to_rgb24_ex(input_address, output_address, width, height, convert_type, bayer_type, flip, channel_order):
"""
:brief Convert Raw8 to Rgb24
:param input_address: The input raw image buff address, buff size = width * height
:param output_address: The output rgb image buff address, buff size = width * height * 3
:param width: Image width
:param height: Image height
:param convert_type: Bayer convert type, See detail in DxBayerConvertType
:param bayer_type: pixel color filter, See detail in DxPixelColorFilter
:param flip: Output image flip flag
True: turn the image upside down
False: do not flip
:param channel_order: RGB channel order of output image
:return: status State return value, See detail in DxStatus
data_array Array of output images, buff size = width * height * 3
"""
width_c = c_uint32()
width_c.value = width
height_c = c_uint32()
height_c.value = height
convert_type_c = c_uint()
convert_type_c.value = convert_type
bayer_type_c = c_uint()
bayer_type_c.value = bayer_type
flip_c = c_bool()
flip_c.value = flip
channel_order_c = c_uint()
channel_order_c.value = channel_order
input_address_p = c_void_p()
input_address_p.value = input_address
output_address_p = c_void_p()
output_address_p.value = output_address
status = dll.DxRaw8toRGB24Ex(input_address_p, output_address_p,
width_c, height_c, convert_type_c, bayer_type_c, flip_c, channel_order_c)
return status
if hasattr(dll, 'DxRaw16toRaw8'):
def dx_raw16_to_raw8(input_address, out_address, width, height, valid_bits):
"""
:brief Raw16 converted to Raw8
:param input_address: The input image buff address, buff size = width * height * 2
:param out_address: The output image buff address, buff size = width * height
:param width: Image width
:param height: Image height
:param valid_bits: Data valid digit, See detail in DxValidBit
:return: status State return value, See detail in DxStatus
data_array Array of output images, buff size = width * height
"""
width_c = c_uint32()
width_c.value = width
height_c = c_uint32()
height_c.value = height
valid_bits_c = c_uint()
valid_bits_c.value = valid_bits
input_address_p = c_void_p()
input_address_p.value = input_address
out_address_p = c_void_p()
out_address_p.value = out_address
status = dll.DxRaw16toRaw8(input_address_p, out_address_p,
width_c, height_c, valid_bits_c)
return status
if hasattr(dll, 'DxRotate90CW8B'):
def dx_raw8_rotate_90_cw(input_address, out_address, width, height):
"""
:brief To rotate the 8-bit image clockwise by 90 degrees
:param input_address: The input image buff address, buff size = width * height
:param out_address: The output image buff address, buff size = width * height
:param width: Image width
:param height: Image height
:return: status State return value, See detail in DxStatus
data_array Array of output images, buff size = width * height
"""
width_c = c_uint32()
width_c.value = width
height_c = c_uint32()
height_c.value = height
input_address_p = c_void_p()
input_address_p.value = input_address
out_address_p = c_void_p()
out_address_p.value = out_address
status = dll.DxRotate90CW8B(input_address_p, out_address_p,
width_c, height_c)
return status
if hasattr(dll, 'DxRotate90CCW8B'):
def dx_raw8_rotate_90_ccw(input_address, out_address, width, height):
"""
:brief To rotate the 8-bit image counter clockwise by 90 degrees
:param input_address: The input image buff address, buff size = width * height
:param out_address: The output image buff address, buff size = width * height
:param width: Image width
:param height: Image height
:return: status State return value, See detail in DxStatus
data_array Array of output images, buff size = width * height
"""
width_c = c_uint32()
width_c.value = width
height_c = c_uint32()
height_c.value = height
input_address_p = c_void_p()
input_address_p.value = input_address
out_address_p = c_void_p()
out_address_p.value = out_address
status = dll.DxRotate90CCW8B(input_address_p, out_address_p,
width_c, height_c)
return status
if hasattr(dll, "DxImageImprovment"):
def dx_image_improvement(input_address, output_address, width, height,
color_correction_param, contrast_lut, gamma_lut):
"""
:brief image quality improvement
:param input_address: input buffer address, buff size = width * height *3
:param output_address: input buffer address, buff size = width * height *3
:param width: image width
:param height: image height
:param color_correction_param: color correction param(get from camera)
:param contrast_lut: contrast lookup table
:param gamma_lut: gamma lookup table
:return: status State return value, See detail in DxStatus
data_array Array of output images, buff size = width * height * 3
"""
width_c = c_uint32()
width_c.value = width
height_c = c_uint32()
height_c.value = height
input_address_p = c_void_p()
input_address_p.value = input_address
output_address_p = c_void_p()
output_address_p.value = output_address
color_correction_param_c = c_int64()
color_correction_param_c.value = color_correction_param
status = dll.DxImageImprovment(input_address_p, output_address_p, width_c, height_c,
color_correction_param_c, contrast_lut, gamma_lut)
return status
if hasattr(dll, "DxImageImprovmentEx"):
def dx_image_improvement_ex(input_address, output_address, width, height,
color_correction_param, contrast_lut, gamma_lut, channel_order):
"""
:brief image quality improvement
:param input_address: input buffer address, buff size = width * height *3
:param output_address: input buffer address, buff size = width * height *3
:param width: image width
:param height: image height
:param color_correction_param: color correction param(get from camera)
:param contrast_lut: contrast lookup table
:param gamma_lut: gamma lookup table
:param channel_order: RGB channel order of output image
:return: status State return value, See detail in DxStatus
data_array Array of output images, buff size = width * height * 3
"""
width_c = c_uint32()
width_c.value = width
height_c = c_uint32()
height_c.value = height
input_address_p = c_void_p()
input_address_p.value = input_address
output_address_p = c_void_p()
output_address_p.value = output_address
color_correction_param_c = c_int64()
color_correction_param_c.value = color_correction_param
channel_order_c = c_uint()
channel_order_c.value = channel_order
status = dll.DxImageImprovmentEx(input_address_p, output_address_p, width_c, height_c,
color_correction_param_c, contrast_lut, gamma_lut, channel_order_c)
return status
if hasattr(dll, "DxBrightness"):
def dx_brightness(input_address, output_address, image_size, factor):
"""
:brief Brightness adjustment (RGB24 or mono8)
:param input_address: input buffer address
:param output_address: output buffer address
:param image_size: image size
:param factor: brightness factor,range(-150 ~ 150)
:return: status: State return value, See detail in DxStatus
"""
image_size_c = c_uint32()
image_size_c.value = image_size
factor_c = c_int32()
factor_c.value = factor
input_address_p = c_void_p()
input_address_p.value = input_address
output_address_p = c_void_p()
output_address_p.value = output_address
status = dll.DxBrightness(input_address_p, output_address_p, image_size_c, factor_c)
return status
if hasattr(dll, "DxContrast"):
def dx_contrast(input_address, output_address, image_size, factor):
"""
:brief Contrast adjustment (RGB24 or mono8)
:param input_address: input buffer address
:param output_address: output buffer address
:param image_size: image size
:param factor: contrast factor,range(-50 ~ 100)
:return: status: State return value, See detail in DxStatus
"""
image_size_c = c_uint32()
image_size_c.value = image_size
factor_c = c_int32()
factor_c.value = factor
input_address_p = c_void_p()
input_address_p.value = input_address
output_address_p = c_void_p()
output_address_p.value = output_address
status = dll.DxContrast(input_address_p, output_address_p, image_size_c, factor_c)
return status
if hasattr(dll, "DxSaturation"):
def dx_saturation(input_address, output_address, image_size, factor):
"""
:brief Saturation adjustment (RGB24)
:param input_address: input buffer address, buff size = width * height * 3
:param output_address: output buffer address, buff size = width * height * 3
:param image_size: image size (width * height)
:param factor: saturation factor,range(0 ~ 128)
:return: status: State return value, See detail in DxStatus
"""
image_size_c = c_uint32()
image_size_c.value = image_size
factor_c = c_int32()
factor_c.value = factor
input_address_p = c_void_p()
input_address_p.value = input_address
output_address_p = c_void_p()
output_address_p.value = output_address
status = dll.DxSaturation(input_address_p, output_address_p, image_size_c, factor_c)
return status
if hasattr(dll, "DxAutoRawDefectivePixelCorrect"):
def dx_auto_raw_defective_pixel_correct(inout_address, width, height, bit_num):
"""
:brief Auto raw defective pixel correct,Support image from Raw8 to Raw16, the bit number is actual
bit number, when it is more than 8, the actual bit can be every number between 9 to 16.
And if image format is packed, you need convert it to Raw16.
This function should be used in each frame.
:param inout_address: input & output buffer address
:param width: image width
:param height: image height
:param bit_num: image bit number (for example:if image 10bit, nBitNum = 10,
if image 12bit, nBitNum = 12,
range:8 ~ 16)
:return: status: State return value, See detail in DxStatus
"""
width_c = c_uint32()
width_c.value = width
height_c = c_uint32()
height_c.value = height
bit_num_c = c_int32()
bit_num_c.value = bit_num
inout_address_p = c_void_p()
inout_address_p.value = inout_address
status = dll.DxAutoRawDefectivePixelCorrect(inout_address_p, width_c, height_c, bit_num_c)
return status
if hasattr(dll, "DxSharpen24B"):
def dx_sharpen_24b(input_address, output_address, width, height, factor):
"""
:brief Sharpen adjustment (RGB24)
:param input_address: input buffer address, buff size = width * height * 3
:param output_address: output buffer address, buff size = width * height * 3
:param width: image width
:param height: image height
:param factor: sharpen factor, range(0.1~5.0)
:return: status: State return value, See detail in DxStatus
"""
width_c = c_uint32()
width_c.value = width
height_c = c_uint32()
height_c.value = height
input_address_p = c_void_p()
input_address_p.value = input_address
output_address_p = c_void_p()
output_address_p.value = output_address
factor_c = c_float()
factor_c.value = factor
status = dll.DxSharpen24B(input_address_p, output_address_p, width_c, height_c, factor_c)
return status
if hasattr(dll, "DxGetWhiteBalanceRatio"):
def dx_get_white_balance_ratio(input_address, width, height):
"""
:brief Get white balance ratios(RGB24), In order to calculate accurately, the camera should
shoot objective "white" area,or input image is white area.
:param input_address: input buffer address, buff size = width * height * 3
:param width: image width
:param height: image height
:return: status: State return value, See detail in DxStatus
(r_ratio, g_ratio, b_ratio): rgb ratio tuple
"""
width_c = c_uint32()
width_c.value = width
height_c = c_uint32()
height_c.value = height
input_address_p = c_void_p()
input_address_p.value = input_address
r_ratio_c = c_double()
r_ratio_c.value = 0
g_ratio_c = c_double()
g_ratio_c.value = 0
b_ratio_c = c_double()
b_ratio_c.value = 0
status = dll.DxGetWhiteBalanceRatio(input_address_p, width_c, height_c, byref(r_ratio_c),
byref(g_ratio_c), byref(b_ratio_c))
return status, (r_ratio_c.value, g_ratio_c.value, b_ratio_c.value)
if hasattr(dll, "DxImageMirror"):
def dx_image_mirror(input_address, output_address, width, height, mirror_mode):
"""
:brief image mirror(raw8)
:param input_address: input buffer address
:param output_address: output buffer address
:param width: image width
:param height: image height
:param mirror_mode: mirror mode
:return: status: State return value, See detail in DxStatus
"""
width_c = c_uint32()
width_c.value = width
height_c = c_uint32()
height_c.value = height
mirror_mode_c = c_uint()
mirror_mode_c.value = mirror_mode
input_address_p = c_void_p()
input_address_p.value = input_address
output_address_p = c_void_p()
output_address_p.value = output_address
status = dll.DxImageMirror(input_address_p, output_address_p, width_c, height_c, mirror_mode_c)
return status
'''
if hasattr(dll, "DxRaw8ImgProcess"):
def dx_raw8_image_process(input_address, output_address, width, height, color_img_process_param):
"""
:brief Raw8 image process
:param input_address: input buffer address, buff size = width * height
:param output_address: output buffer address, buff size = width * height * 3
:param width: image width
:param height: image height
:param color_img_process_param: Raw8 image process param, refer to DxColorImgProcess
"""
input_address_p = c_void_p()
input_address_p.value = input_address
output_address_p = c_void_p()
output_address_p.value = output_address
width_c = c_uint32()
width_c.value = width
height_c = c_uint32()
height_c.value = height
color_img_process_param_c = ColorImgProcess()
color_img_process_param_c.defective_pixel_correct = color_img_process_param.defective_pixel_correct
color_img_process_param_c.denoise = color_img_process_param.denoise
color_img_process_param_c.sharpness = color_img_process_param.sharpness
color_img_process_param_c.accelerate = color_img_process_param.accelerate
if color_img_process_param.cc_param is None:
color_img_process_param_c.arr_cc = None
color_img_process_param_c.cc_buf_length = 0
else:
color_img_process_param_c.arr_cc = addressof(color_img_process_param.cc_param.get_ctype_array())
color_img_process_param_c.cc_buf_length = color_img_process_param.cc_param.get_length()
color_img_process_param_c.sharp_factor = color_img_process_param.sharp_factor
if color_img_process_param.pro_lut is None:
color_img_process_param_c.pro_lut = None
color_img_process_param_c.lut_length = 0
else:
color_img_process_param_c.pro_lut = addressof(color_img_process_param.pro_lut.get_ctype_array())
color_img_process_param_c.lut_length = color_img_process_param.pro_lut.get_length()
color_img_process_param_c.cv_type = color_img_process_param.convert_type
color_img_process_param_c.layout = color_img_process_param.color_filter_layout
color_img_process_param_c.flip = color_img_process_param.flip
status = dll.DxRaw8ImgProcess(input_address_p, output_address_p, width_c,
height_c, byref(color_img_process_param_c))
return status
if hasattr(dll, "DxMono8ImgProcess"):
def dx_mono8_image_process(input_address, output_address, width, height, mono_img_process_param):
"""
:brief mono8 image process
:param input_address: input buffer address, buff size = width * height
:param output_address: output buffer address, buff size = width * height
:param width: image width
:param height: image height
:param mono_img_process_param: mono8 image process param, refer to DxMonoImgProcess
"""
input_address_p = c_void_p()
input_address_p.value = input_address
output_address_p = c_void_p()
output_address_p.value = output_address
width_c = c_uint32()
width_c.value = width
height_c = c_uint32()
height_c.value = height
mono_img_process_param_c = MonoImgProcess()
mono_img_process_param_c.defective_pixel_correct = mono_img_process_param.defective_pixel_correct
mono_img_process_param_c.sharpness = mono_img_process_param.sharpness
mono_img_process_param_c.accelerate = mono_img_process_param.accelerate
mono_img_process_param_c.sharp_factor = mono_img_process_param.sharp_factor
if mono_img_process_param.pro_lut is None:
mono_img_process_param_c.pro_lut = None
mono_img_process_param_c.lut_length = 0
else:
mono_img_process_param_c.pro_lut = addressof(mono_img_process_param.pro_lut.get_ctype_array())
mono_img_process_param_c.lut_length = mono_img_process_param.pro_lut.get_length()
status = dll.DxMono8ImgProcess(input_address_p, output_address_p, width_c,
height_c, byref(mono_img_process_param_c))
return status
'''
if hasattr(dll, 'DxGetFFCCoefficients'):
def dx_get_ffc_coefficients(bright_img, dark_img, actual_bits, bayer_type, width, height, target_value):
"""
:brief Get Flat Field Correction Coefficients
(only support raw8 raw10 raw12)
:param bright_img: bright image
:param dark_img: dark image
:param actual_bits: image actual bits
:param bayer_type: bayer type
:param width: image width
:param height: image height
:param target_value: correction target Value
:return status: State return value, See detail in DxStatus
ffc_coefficients: flat field correction coefficients Buffer
ffc_coefficients_length: flat field correction coefficients Buffer length
"""
field_correction_process_c = FieldCorrectionProcess()
field_correction_process_c.bright_buf = bright_img
field_correction_process_c.dark_buf = dark_img
field_correction_process_c.width = width
field_correction_process_c.height = height
field_correction_process_c.actual_bits = actual_bits
field_correction_process_c.bayer_type = bayer_type
ffc_coefficients_len_c = c_int()
ffc_coefficients_len_c.value = 0
if target_value is None:
# Get length of ffc coefficients
dll.DxGetFFCCoefficients(field_correction_process_c, None, byref(ffc_coefficients_len_c), None)
# Create buff to get coefficients data
ffc_coefficients_c = (c_ubyte * ffc_coefficients_len_c.value)()
status = dll.DxGetFFCCoefficients(field_correction_process_c, byref(ffc_coefficients_c),
byref(ffc_coefficients_len_c), None)
else:
target_value_c = c_int()
target_value_c.value = target_value
# Get length of ffc coefficients
dll.DxGetFFCCoefficients(field_correction_process_c, None, byref(ffc_coefficients_len_c),
byref(target_value_c))
# Create buff to get coefficients data
ffc_coefficients_c = (c_ubyte * ffc_coefficients_len_c.value)()
status = dll.DxGetFFCCoefficients(field_correction_process_c, byref(ffc_coefficients_c),
byref(ffc_coefficients_len_c), byref(target_value_c))
return status, ffc_coefficients_c, ffc_coefficients_len_c.value
if hasattr(dll, "DxFlatFieldCorrection"):
def dx_flat_field_correction(input_address, output_address, actual_bits, width, height, ffc_coefficients):
"""
:brief Flat Field Correction Process
:param input_address: input buffer address, buff size = width * height
:param output_address: output buffer address, buff size = width * height
:param actual_bits: image actual bits
:param width: image width
:param height: image height
:param ffc_coefficients: flat field correction coefficients Buffer
:return: status: State return value, See detail in DxStatus
"""
input_address_p = c_void_p()
input_address_p.value = input_address
output_address_p = c_void_p()
output_address_p.value = output_address
width_c = c_uint32()
width_c.value = width
height_c = c_uint32()
height_c.value = height
actual_bits_c = c_uint()
actual_bits_c.value = actual_bits
ffc_coefficients_len_c = c_int()
ffc_coefficients_len_c.value = ffc_coefficients.get_length()
status = dll.DxFlatFieldCorrection(input_address_p, output_address_p, actual_bits_c, width_c, height_c,
byref(ffc_coefficients.get_ctype_array()), byref(ffc_coefficients_len_c))
return status
| [
"andreas.langenhagen@micropsi-industries.com"
] | andreas.langenhagen@micropsi-industries.com |
32c495d86179c4401c8bf75df944c92c9b360c0c | c924556a5929b3fa7447bf6fe9b2559a2ea7a5d7 | /question/migrations/0006_auto_20170814_2126.py | 4d6e70ec9e9e31b9ced8cfd1e331d07e04d60282 | [] | no_license | mx11Code/zhihu | d29be1cb1a5bc455443d0c5010efefbca36bec65 | 09979b3391f95ec0528f8fda3fce4d3a1a694914 | refs/heads/master | 2021-01-01T06:06:31.224160 | 2017-08-18T06:38:57 | 2017-08-18T06:38:57 | 97,356,208 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,200 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-14 21:26
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('question', '0005_auto_20170814_2020'),
]
operations = [
migrations.CreateModel(
name='Answer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('is_deleted', models.BooleanField(default=False)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('creation_time', models.DateTimeField(auto_now_add=True)),
('content', models.TextField()),
('like', models.IntegerField(default=0)),
('viewed_numbers', models.IntegerField(default=0)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='comment',
name='creation_time',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='question',
name='creation_time',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='tag',
name='creation_time',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AlterField(
model_name='question',
name='viewed_numbers',
field=models.BigIntegerField(default=0),
),
]
| [
"18672553257@163.com"
] | 18672553257@163.com |
a06e77569bb9fc552a12e6e6f5ee56d5c33ebea1 | 602bdbd1d8ef4d36ccfdcae5756bc8e448d30584 | /share/basiccms/web/checkout.py | 86bb792ceeb5be2a1dd97fafe87b116f9d8f365f | [] | no_license | timparkin/timparkingallery | 1136027bf9cfbad31319958f20771a6fdc9f5fc4 | 6e6c02684a701817a2efae27e21b77765daa2c33 | refs/heads/master | 2016-09-06T00:28:16.965416 | 2008-11-25T21:15:45 | 2008-11-25T21:15:45 | 12,716 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,952 | py | from pollen.mail import mailutil
from twisted.internet import defer
from twisted.python import log
from nevow import url, accessors, inevow, tags as T, rend
import formal
from crux import skin, icrux
from tub.public.web.common import getStoreSession
from ecommerce.salesorder.manager import SalesOrder, SalesOrderItem
from ecommerce.salesorder.util import createSalesOrderItem
from basiccms import basket as dw_basket
from basiccms.web import common
from basiccms.web.utils import RenderFragmentMixin, RenderInheritMixin
class DetailsPage(RenderInheritMixin, RenderFragmentMixin, common.Page):
docFactory = skin.loader('CheckoutDetailsPage.html')
def __init__(self, avatar):
super(DetailsPage, self).__init__()
self.avatar = avatar
def getCountryOptions(self, storeSession):
data = {}
d = self.avatar.getDeliveryCountries(storeSession)
d.addCallback(lambda options: data.update({'delivery': options}))
d.addCallback(lambda ignore: self.avatar.realm.getBillingCountryOptions())
d.addCallback(lambda options: data.update({'billing': options}))
d.addCallback(lambda options: data)
return d
def form_details(self, ctx):
storeSession = getStoreSession(ctx)
d = self.getCountryOptions(storeSession)
d.addCallback(lambda options: self._build_details_form(options['billing'], options['delivery']))
return d
def _build_details_form(self, billingCountryOptions, deliveryCountryOptions):
form = formal.Form()
form.addField('firstName', formal.String(required=True, strip=True))
form.addField('lastName', formal.String(required=True, strip=True))
form.addField('phoneNumber', formal.String(required=True, strip=True))
form.addField('billingAddress1', formal.String(required=True, strip=True))
form.addField('billingAddress2', formal.String(strip=True))
form.addField('billingAddress3', formal.String(strip=True))
form.addField('billingCity', formal.String(required=True, strip=True))
form.addField('billingPostcode', formal.String(required=True, strip=True))
form.addField('billingCountry', formal.String(required=True, strip=True),
widgetFactory=formal.widgetFactory(formal.SelectChoice, options=billingCountryOptions) )
form.addField('cardType', formal.String(required=True),
formal.widgetFactory(formal.SelectChoice, CommonData.Cards))
form.addField('cardNumber', formal.String(required=True, strip=True))
form.addField('cvv2', formal.String(required=True, strip=True),
label='Card Security Code',description='last three numbers on signature strip')
form.addField('expiryDate', formal.Date(required=True),
formal.widgetFactory(formal.MMYYDatePartsInput), description='e.g. 12/05' )
form.addField('issueNumber', formal.String(strip=True),
description='for maestro and switch only')
form.addField('startDate', formal.Date(),
formal.widgetFactory(formal.MMYYDatePartsInput), description='for switch only' )
delivery = formal.Group('delivery', label='Delivery Address', description="Only enter details here if the delivery address is different from the billing address above.")
form.add( delivery )
delivery.add( formal.Field('name', formal.String(strip=True)) )
delivery.add( formal.Field('address1', formal.String(strip=True)))
delivery.add( formal.Field('address2', formal.String(strip=True)))
delivery.add( formal.Field('address3', formal.String(strip=True)))
delivery.add( formal.Field('city', formal.String(strip=True)))
delivery.add( formal.Field('postcode', formal.String(strip=True)) )
delivery.add( formal.Field('country', formal.String(strip=True),
widgetFactory=formal.widgetFactory(formal.SelectChoice, options=deliveryCountryOptions)) )
message = formal.Group('message', label='Gift Message', description="If you have chosen to use our gift wrapping service you can specify a message here")
form.add( message )
message.add( formal.Field('message', formal.String(strip=True), widgetFactory=formal.TextArea) )
form.addAction(self._confirm, label="Confirm Order")
if self.avatar.checkoutDetails:
form.data = self.avatar.checkoutDetails
elif self.avatar.customer:
form.data = {
'firstName': self.avatar.customer.first_name,
'lastName': self.avatar.customer.last_name,
'phoneNumber': self.avatar.customer.phoneNumber,
'billingAddress1': self.avatar.customer.billingAddress1,
'billingAddress2': self.avatar.customer.billingAddress2,
'billingAddress3': self.avatar.customer.billingAddress3,
'billingCity': self.avatar.customer.billingCity,
'billingPostcode': self.avatar.customer.billingPostcode,
'billingCountry': self.avatar.customer.billingCountry,
}
if self.avatar.realm.config['ecommerce']['paymentGateway'].get('use_test_data', False):
from datetime import date
from dateutil.relativedelta import relativedelta
form.data['cardType'] = 'VISA'
form.data['cardNumber'] = '4111111111111111'
form.data['cvv2'] = '432'
form.data['expiryDate'] = date.today()+relativedelta(months=6)
return form
def _confirm(self, ctx, form, data):
deliveryAddressSpecified = data['delivery.address1'] or data['delivery.address2'] or data['delivery.address3']
if data['delivery.name'] or deliveryAddressSpecified or data['delivery.city'] \
or data['delivery.postcode'] or data['delivery.country']:
if not data['delivery.name']:
raise formal.FieldError('All delivery details must be entered.', 'delivery.name')
if not deliveryAddressSpecified:
raise formal.FieldError('All delivery details must be entered.', 'delivery.address1')
if not data['delivery.city']:
raise formal.FieldError('All delivery details must be entered.', 'delivery.city')
if not data['delivery.postcode']:
raise formal.FieldError('All delivery details must be entered.', 'delivery.postcode')
if not data['delivery.country']:
raise formal.FieldError('All delivery details must be entered.', 'delivery.country')
self.avatar.checkoutDetails = data
if data['delivery.country']:
if self.avatar.basket.deliveryOptions.getCurrentCountry() != data['delivery.country'].lower():
raise formal.FieldError('Delivery country does not match basket delivery option.', 'delivery.country')
else:
if self.avatar.basket.deliveryOptions.getCurrentCountry() != data['billingCountry'].lower():
raise formal.FieldError('Delivery country does not match basket delivery option.', 'billingCountry')
return url.URL.fromContext(ctx).sibling('confirm')
class ThankYouPage(common.Page):
docFactory = skin.loader('CheckoutThankYouPage.html')
def __init__(self, avatar):
super(ThankYouPage, self).__init__()
self.avatar = avatar
def render_order_num(self, ctx, data):
order_num = inevow.IRequest(ctx).args.get('order_num', [''])[0]
return order_num
def render_tracking(self, ctx, data):
order_num = inevow.IRequest(ctx).args.get('order_num', [''])[0]
basket_value = inevow.IRequest(ctx).args.get('basket_value', [''])[0]
ctx.tag.fillSlots('order_num', order_num)
ctx.tag.fillSlots('basket_value', basket_value)
return ctx.tag
def debug(r, mess):
print '>>DEBUG', mess, r
return r
| [
"info@timparkin.co.uk"
] | info@timparkin.co.uk |
d3dca8c7caa3d4cce19d81a2ae21e26977fd5021 | f6b95eb2e5c988b2f6d12a185996601898c06e4f | /E87A_E87G_count_water.py | b1044d83f49ef847a85a7bdfead308d375a2f1af | [] | no_license | A-Yarrow/PDB_tools | f4ca2de900f1c5da9b2a543dd23e4c5aa13e05dc | 89a66c0fbfd53e58d7b5463625d452148f91a3e6 | refs/heads/master | 2021-01-02T09:18:10.468628 | 2019-10-13T22:38:17 | 2019-10-13T22:38:17 | 99,187,272 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,570 | py | import os
import matplotlib.pyplot as plt
import seaborn as sns
def count_E87A_single_water():
os.chdir("/Users/yarrowmadrona/Laboratory/MD/NAMD/dosE87A/analysis/water_bridge/pdb")
filehandle = open('E87A_single_water.txt', 'wb')
E87A_water_list_single = []
E87A_single_frame_list = []
current_dir = os.getcwd()
for root, dirs, files in os.walk(current_dir):
for filename in files:
statinfo = os.stat(filename)
if statinfo.st_size > 0:
if filename.startswith('E87A_single'):
E87A_water_list_single.append(filename)
E87A_single_frame_list.append(int(filename[23:27])+1)
filehandle.write(filename+'\n')
value_frame_single_list = []
for water in E87A_water_list_single:
value_frame_single_list.append(int(1))
E87A_no_bridge_water = len(E87A_water_list_single)
E87A_single_perc_occ = float(E87A_no_bridge_water) / 4102 * 100
print "The E87A mutant has a Y171-wat-H89 water bridge %3d percent of the time" %(E87A_single_perc_occ)
filehandle.write("The E87A mutant has a Y171-wat-H89 water bridge %3d percent of the time" %(E87A_single_perc_occ))
#print "water list:%s"%(E87A_water_list_single)
filehandle.close()
return value_frame_single_list, E87A_single_frame_list
def count_E87A_double_water():
os.chdir("/Users/yarrowmadrona/Laboratory/MD/NAMD/dosE87A/analysis/water_bridge/pdb")
filehandle = open('E87A_double_water.txt', 'wb')
E87A_water_list_double = []
E87A_double_frame_list = []
current_dir = os.getcwd()
for root, dirs, files in os.walk(current_dir):
for filename in files:
statinfo = os.stat(filename)
if statinfo.st_size > 0:
if filename.startswith('E87A_double'):
E87A_water_list_double.append(filename)
E87A_double_frame_list.append(int(filename[23:27])+1)
filehandle.write(filename+'\n')
value_frame_double_list = []
for water in E87A_water_list_double:
value_frame_double_list.append(int(2))
E87A_no_bridge_water = len(E87A_water_list_double)
E87A_doub_perc_occ = float(E87A_no_bridge_water) / 4102 * 100
filehandle.write("The E87A mutant has a Y171-wat-wat-H89 water bridge %3d percent of the time" %(E87A_doub_perc_occ))
filehandle.close()
print "The E87A mutant has a Y171-wat-wat-H89 water bridge %3d percent of the time" %(E87A_doub_perc_occ)
#print "water list:%s"%(E87A_water_list_double)
return value_frame_double_list, E87A_double_frame_list
def count_E87G_single_water():
os.chdir("/Users/yarrowmadrona/Laboratory/MD/NAMD/dosE87G/analysis/water_bridge/pdb")
E87G_single_frame_list = []
current_dir = os.getcwd()
for root, dirs, files in os.walk(current_dir):
for filename in files:
statinfo = os.stat(filename)
if statinfo.st_size > 0:
if filename.startswith('E87G_single_water_0'):
E87G_single_frame_list.append(int(filename[23:27]))
return E87G_single_frame_list
def count_E87G_double_water():
os.chdir("/Users/yarrowmadrona/Laboratory/MD/NAMD/dosE87G/analysis/water_bridge/pdb")
E87G_double_frame_list = []
current_dir = os.getcwd()
for root, dirs, files in os.walk(current_dir):
for filename in files:
statinfo = os.stat(filename)
if statinfo.st_size > 0:
if filename.startswith('E87G_double_water_0'):
E87G_double_frame_list.append(int(filename[23:27]))
return E87G_double_frame_list
#Because there are so many cross bridges with extra water in this active site these overlap, so just look at uniques for total
#water bridges.
def E87G_water_bridge():
filehandle = open('E87G_water_bridge.txt', 'wb')
E87G_double_frame_list = count_E87G_double_water()
E87G_single_frame_list = count_E87G_single_water()
x = set()
y = set()
for frame in E87G_double_frame_list:
x.add(frame)
for frame in E87G_single_frame_list:
y.add(frame)
total_unique_bridges_E87G_set = set.intersection(x, y)
total_unique_bridges_E87G_list = list(total_unique_bridges_E87G_set)
total_unique_bridges_E87G_list.sort()
value_frame_unique_bridge = []
for frame in total_unique_bridges_E87G_list:
value_frame_unique_bridge.append(int(3))
E87G_no_bridge_water = len(total_unique_bridges_E87G_list)
E87G_bridge_perc_occ = float(E87G_no_bridge_water) / 609 * 100
filehandle.write("The E87G mutant has a Y171 water bridge H89 %3d percent of the time" %(E87G_bridge_perc_occ))
filehandle.close()
print "The E87G mutant has a Y171 H89 water bridge %3d percent of the time" %(E87G_bridge_perc_occ)
return value_frame_unique_bridge, total_unique_bridges_E87G_list
def plot_water_bridge():
y1, x1 = count_E87A_single_water()
y2, x2 = count_E87A_double_water()
y3, x3 = E87G_water_bridge()
plt.figure("WT water bridge", figsize = (8, 2.5))
plt.scatter(x1, y1, color = 'blue', s=450, lw = '0.03', facecolor='None')
plt.scatter(x2, y2, color = 'green', s=450, lw = '0.03', facecolor='None')
plt.scatter(x3, y3, color = 'yellow', s=450, lw = '0.03', facecolor='None')
plt.axis([-150, 4500, 0.5, 3.5])
#plt.xlabel("Nanoseconds")
plt.grid(False)
plt.savefig('E87A_E87G_water_plot')
plt.show()
plot_water_bridge()
| [
"yarrowmadrona@gmail.com"
] | yarrowmadrona@gmail.com |
871ee0aa11d9c9a5a5fd113b5d7702362217c2e3 | 8ec820513f17e34c64f8d66c57da40e02f1f1959 | /app/settings.py | b0a3617963e7083efa67871aa4d9f42ed02e381a | [] | no_license | fbiopereira/python-flask-api | 69174deb6f2211d1366fd4e767cb93c9d91d021d | c0ebf8623eee1eca844b69b26ae0d3d959abe71d | refs/heads/main | 2023-04-05T08:37:00.409107 | 2021-04-07T19:27:13 | 2021-04-07T19:27:13 | 348,356,103 | 3 | 0 | null | 2021-04-07T19:27:14 | 2021-03-16T13:22:38 | null | UTF-8 | Python | false | false | 2,760 | py | import os
from app.helpers.git_helpers import GitHelpers
from app.custom_log import CustomLog
from flask import Flask
from flask_restplus import Api
from flask_pymongo import PyMongo
from prometheus_flask_exporter import PrometheusMetrics
flask_app = Flask(__name__)
# Setting environment variables
environment = "development"
if os.environ.get('ENVIRONMENT') is not None:
environment = os.environ.get('ENVIRONMENT')
flask_debug = True
if os.environ.get('FLASK_DEBUG') is not None:
flask_debug = os.environ.get('FLASK_DEBUG')
if environment != "development":
flask_debug = False
if flask_debug is None:
flask_debug = True # Do not use debug mode in production
service_name = "NOME_DO_SERVICO_NAO_INFORMADO"
if os.environ.get('SERVICE_NAME') is not None:
service_name = os.environ.get('SERVICE_NAME')
genre_service_url = ""
if os.environ.get('GENRE_SERVICE_URL') is not None:
genre_service_url = os.environ.get('GENRE_SERVICE_URL')
movie_notify_service_url = ""
if os.environ.get('MOVIE_NOTIFY_SERVICE_URL') is not None:
movie_notify_service_url = os.environ.get('MOVIE_NOTIFY_SERVICE_URL')
mongo_uri = "mongodb://localhost:27017/movies"
if os.environ.get('MONGO_URI') is not None:
mongo_uri = os.environ.get('MONGO_URI')
log_path = None
if os.environ.get('LOG_PATH') is not None:
log_path = os.environ.get('LOG_PATH')
# Setting restplus variables
restplus_swagger_ui_doc_expansion = 'list'
restplus_validate = True
restplus_mask_swagger = False
restplus_error_404_help = False
git_helpers = GitHelpers(environment)
service_version = git_helpers.get_service_version()
# Setting flask config variables
flask_app.config.SWAGGER_SUPPORTED_SUBMIT_METHODS = ['get', 'post']
flask_app.config['SWAGGER_UI_DOC_EXPANSION'] = restplus_swagger_ui_doc_expansion
flask_app.config['RESTPLUS_VALIDATE'] = restplus_validate
flask_app.config['RESTPLUS_MASK_SWAGGER'] = restplus_mask_swagger
flask_app.config['ERROR_404_HELP'] = restplus_error_404_help
flask_app.config['ENVIRONMENT'] = environment
flask_app.config['SERVICE_NAME'] = service_name
flask_app.config['SERVICE_VERSION'] = service_version
flask_app.config['LOG_PATH'] = log_path
flask_app.config['MONGO_URI'] = mongo_uri
flask_app.config['GENRE_SERVICE_URL'] = genre_service_url
flask_app.config['MOVIE_NOTIFY_SERVICE_URL'] = movie_notify_service_url
log = CustomLog(service_name=service_name,
service_version=service_version,
environment=environment, log_path=log_path)
metrics = PrometheusMetrics(flask_app)
api = Api(title='Flask Restplus API', version=service_version,
description='A Flask RestPlus boilerplate to be used in my demos', doc="/docs", prefix="/api", validate=True)
mongodb = PyMongo(flask_app)
| [
"noreply@github.com"
] | fbiopereira.noreply@github.com |
890a0e4832d87c843d5509306210f0da7f740075 | d3efc82dfa61fb82e47c82d52c838b38b076084c | /Autocase_Result/TSZLMM/YW_TSZLMM_SZXJ_085.py | aee9b54b61b3b19aec3adc52e31a8f6ab6a2da24 | [] | no_license | nantongzyg/xtp_test | 58ce9f328f62a3ea5904e6ed907a169ef2df9258 | ca9ab5cee03d7a2f457a95fb0f4762013caa5f9f | refs/heads/master | 2022-11-30T08:57:45.345460 | 2020-07-30T01:43:30 | 2020-07-30T01:43:30 | 280,388,441 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,096 | py | #!/usr/bin/python
# -*- encoding: utf-8 -*-
import sys
sys.path.append("/home/yhl2/workspace/xtp_test/xtp/api")
from xtp_test_case import *
sys.path.append("/home/yhl2/workspace/xtp_test/service")
from ServiceConfig import *
from mainService import *
from QueryStkPriceQty import *
from log import *
sys.path.append("/home/yhl2/workspace/xtp_test/mysql")
from CaseParmInsertMysql import *
sys.path.append("/home/yhl2/workspace/xtp_test/utils")
from QueryOrderErrorMsg import queryOrderErrorMsg
class YW_TSZLMM_SZXJ_085(xtp_test_case):
# YW_TSZLMM_SZXJ_085
def test_YW_TSZLMM_SZXJ_085(self):
title = '默认3:订单报价超过涨跌幅限制-深A限价卖><跌停价(跌停价-0.02)'
# 定义当前测试用例的期待值
# 期望状态:初始、未成交、部成、全成、部撤已报、部撤、已报待撤、已撤、废单、撤废、内部撤单
# xtp_ID和cancel_xtpID默认为0,不需要变动
case_goal = {
'期望状态': '废单',
'errorID': 11010122,
'errorMSG': queryOrderErrorMsg(11010122),
'是否生成报单': '是',
'是否是撤废': '否',
'xtp_ID': 0,
'cancel_xtpID': 0,
}
logger.warning(title)
# 定义委托参数信息------------------------------------------
# 参数:证券代码、市场、证券类型、证券状态、交易状态、买卖方向(B买S卖)、期望状态、Api
stkparm = QueryStkPriceQty('003154', '2', '0', '10', '0', 'S', case_goal['期望状态'], Api)
# 如果下单参数获取失败,则用例失败
if stkparm['返回结果'] is False:
rs = {
'用例测试结果': stkparm['返回结果'],
'测试错误原因': '获取下单参数失败,' + stkparm['错误原因'],
}
self.assertEqual(rs['用例测试结果'], True)
else:
wt_reqs = {
'business_type': Api.const.XTP_BUSINESS_TYPE['XTP_BUSINESS_TYPE_CASH'],
'order_client_id':trade_type + 1,
'market': Api.const.XTP_MARKET_TYPE['XTP_MKT_SZ_A'],
'ticker': stkparm['证券代码'],
'side': Api.const.XTP_SIDE_TYPE['XTP_SIDE_SELL'],
'price_type': Api.const.XTP_PRICE_TYPE['XTP_PRICE_LIMIT'],
'price': stkparm['跌停价']-0.02,
'quantity': 200,
'position_effect': Api.const.XTP_POSITION_EFFECT_TYPE['XTP_POSITION_EFFECT_INIT']
}
ParmIni(Api, case_goal['期望状态'], wt_reqs['price_type'])
CaseParmInsertMysql(case_goal, wt_reqs)
rs = serviceTest(Api, case_goal, wt_reqs)
logger.warning('执行结果为' + str(rs['用例测试结果']) + ','
+ str(rs['用例错误源']) + ',' + str(rs['用例错误原因']))
self.assertEqual(rs['用例测试结果'], True) # 0
if __name__ == '__main__':
unittest.main()
| [
"418033945@qq.com"
] | 418033945@qq.com |
9c10b4ab8f6eefaa201f87e20b7c7240a8586b1d | 74988580506873d7047c2283690b0335ee1c595b | /backend/env/lib/python2.7/site-packages/django_cron/models.py | bd2db7041d640e08eb4897a9e053d7743b0012d1 | [] | no_license | morganpdx/CompostAnalyzer | 9be7f9b6379ca9b0aaed3ff5df7f1c047789b1d1 | 68d46127d9992499dc3fae36228f797254bab0bb | refs/heads/master | 2020-06-12T22:20:12.044913 | 2016-12-04T20:11:36 | 2016-12-04T20:11:36 | 75,499,675 | 1 | 0 | null | 2016-12-04T01:42:10 | 2016-12-03T20:51:41 | Python | UTF-8 | Python | false | false | 1,079 | py | from django.db import models
class CronJobLog(models.Model):
"""
Keeps track of the cron jobs that ran etc. and any error messages if they failed.
"""
code = models.CharField(max_length=64, db_index=True)
start_time = models.DateTimeField(db_index=True)
end_time = models.DateTimeField(db_index=True)
is_success = models.BooleanField(default=False)
message = models.TextField(default='', blank=True) # TODO: db_index=True
"""
This field is used to mark jobs executed in exact time.
Jobs that run every X minutes, have this field empty.
"""
ran_at_time = models.TimeField(null=True, blank=True, db_index=True, editable=False)
def __unicode__(self):
return '%s (%s)' % (self.code, 'Success' if self.is_success else 'Fail')
class Meta:
index_together = [
('code', 'is_success', 'ran_at_time'),
('code', 'start_time', 'ran_at_time'),
('code', 'start_time') # useful when finding latest run (order by start_time) of cron
]
app_label = 'django_cron'
| [
"morgan.senkal@metaltoad.com"
] | morgan.senkal@metaltoad.com |
77510b8aa173a5acc8b0b2369d6a2850ff5fb175 | 2a57cee9055825ce4f6f1477495c17acc0c2de80 | /classification/utils/plots.py | 5f9caddc7d2f6c5374a8ee79b73952f9940fc8b6 | [] | no_license | bayer-science-for-a-better-life/beehive | 60c72ad1d3098f0ea6daa582ee0a660dd6e4fa48 | 1b9a42833bd0dc1b95fe1c1c573d018659967308 | refs/heads/main | 2023-04-01T14:20:35.931453 | 2021-04-09T17:59:16 | 2021-04-09T17:59:16 | 356,354,493 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,782 | py | import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
def hist(vals, title=None, save=None, xlim=None):
"""vals: num list"""
ax = sns.distplot(vals, kde=False)
if xlim is not None:
ax.set(xlim=(0, 230))
if title is not None:
ax.set_title(title)
if save is not None:
ax.figure.savefig(save)
def boxplot(data, aes, title=None, save=None, figsize=(10, 4)):
"""
aes: dict(x='x-map', y='y-map', col='col-map')
"""
fig, ax = plt.subplots(1, 1, figsize=figsize)
sns.boxplot(data=data, x=aes["x"], y=aes["y"], hue=aes["col"], ax=ax)
if title is not None:
ax.set_title(title)
if save is not None:
fig.savefig(save)
def residuals(preds, truth, title=None, save=None):
"""
truth, preds, list of nums
"""
df = pd.DataFrame(
dict(truth=truth, preds=preds, resids=[t - p for t, p in zip(truth, preds)])
)
fig, ax = plt.subplots(1, 3, figsize=(14, 4))
g = sns.scatterplot(x="preds", y="truth", data=df, ax=ax[0])
g.plot(
[min(preds), max(preds)], [min(preds), max(preds)], color="gray", linestyle="--"
)
g.set_title("Prediction")
g = sns.scatterplot(x="preds", y="resids", data=df, ax=ax[1])
g.plot([min(preds), max(preds)], [0, 0], color="gray", linestyle="--")
g.set_title("Residual Plot")
g = sns.distplot(df["resids"], ax=ax[2])
g.set_title("Residual Distribution")
if title is not None:
plt.subplots_adjust(top=0.8)
fig.suptitle(title, fontsize=16)
if save is not None:
fig.savefig(save)
def barplot(data, aes, title=None, save=None, aspect=1.5, height=4, dodge=False):
"""
aes: dict(x='x-map', y='y-map', hue='col-map')
"""
g = sns.catplot(
**aes,
data=data,
dodge=dodge,
legend=False,
kind="bar",
aspect=aspect,
height=height
)
plt.legend(loc="lower left")
axs = g.axes.flatten()
if title is not None:
axs[0].set_title(title)
if save is not None:
axs[0].figure.savefig(save)
def train_progress(data, aes, title=None, save=None, aspect=3, height=4):
"""
aes: dict(x='x-map', loss=['Train-Loss', 'Val-Loss'], perf=['Train-Rsq', 'Val-Rsq'])
"""
df = data.melt(id_vars=aes["x"], value_vars=aes["loss"]).append(
data.melt(id_vars=aes["x"], value_vars=aes["perf"]), ignore_index=True
)
loss = aes["loss"][0].split("-")[1]
perf = aes["perf"][0].split("-")[1]
df["metric"] = [loss] * int(len(df) / 2) + [perf] * int(len(df) / 2)
df["part"] = df["variable"].apply(lambda d: d.split("-")[0])
g = sns.FacetGrid(
df, row="metric", aspect=3, height=4, sharey=False, legend_out=False
)
g = g.map_dataframe(sns.lineplot, x=aes["x"], y="value", hue="part").add_legend()
if title is not None:
plt.subplots_adjust(top=0.9)
g.fig.suptitle(title)
if save is not None:
g.fig.savefig(save)
def scatterpair(data, aes, title=None, save=None, aspect=1, height=5):
"""
aes: dict(x='x-map', y='y-map', hue='color-map', col='column-map')
"""
df_map = {k: v for k, v in aes.items() if k != "col"}
g = (
sns.FacetGrid(
data=data,
col=aes["col"],
aspect=1,
height=5,
sharey=False,
sharex=False,
legend_out=False,
)
.map_dataframe(sns.scatterplot, **df_map)
.add_legend()
)
if title is not None:
plt.subplots_adjust(top=0.9)
g.fig.suptitle(title)
if save is not None:
g.fig.savefig(save)
def distpair(data, aes, title=None, save=None, aspect=1, height=5):
"""
aes: dict(a='a-map', col='column-map')
"""
g = (
sns.FacetGrid(
data=data,
col=aes["col"],
aspect=1,
height=5,
sharey=False,
sharex=False,
legend_out=False,
)
.map(sns.distplot, aes["a"])
.add_legend()
)
if title is not None:
plt.subplots_adjust(top=0.9)
g.fig.suptitle(title)
if save is not None:
g.fig.savefig(save)
def heatmap(df, title=None, save=None, figsize=(9, 7), cmap="YlGnBu", annot=True):
"""
df of heatmap, indexes and columns are axis labels
"""
fig, ax = plt.subplots(figsize=figsize)
sns.heatmap(df, annot=annot, cmap=cmap, ax=ax)
if title is not None:
plt.subplots_adjust(top=0.95)
fig.suptitle(title, fontsize=16)
if save is not None:
fig.savefig(save)
def multiconf(m, labels, title=None, save=None):
"""
m: np.array confusion matrix
labels: ordered list of label names (axis labels)
"""
df = pd.DataFrame(m)
df.columns = labels
df.index = labels
heatmap(df, title=title, save=save)
def predictive_vals(metrics, title=None, save=None):
"""
metrics: list of dicts from `utils.evaluation.get_confusion_metrics`
"""
df = pd.DataFrame(metrics).melt(id_vars=["label"], value_vars=["PPV", "TPR"])
barplot(
df,
dict(x="label", y="value", hue="variable"),
dodge=True,
title=title,
save=save,
)
def lineplot(data, aes, title=None, save=None, line=None, figsize=(8, 8)):
"""
aes: dict(x='x-map', y='y-map', hue='hue-map')
line: dict(x=[x0, x1], y=[y0, y1]) for drawing straight line
"""
fig, ax = plt.subplots(figsize=figsize)
g = sns.lineplot(data=data, x=aes["x"], y=aes["y"], hue=aes["hue"], ax=ax)
if line is not None:
g.plot(line["x"], line["y"], color="gray", linestyle="--")
if title is not None:
g.set_title(title)
if save is not None:
fig.savefig(save)
| [
"schweringmarc01@gmail.com"
] | schweringmarc01@gmail.com |
ec05c908b2293a6b5a56297de2b08df851baacec | b33930a7d8e382985d7e8a8bc94fb5ccecbceefd | /leaf/dataSpec_P5B.py | 720a399e24574be44313d5a062302508a10372bf | [] | no_license | UCL-EO/leaf | d766f106917ee4200bfe9ed72e5a559a1a28b687 | b6751acee636ded88bbbaa2cd5cc39ae7a46e258 | refs/heads/master | 2022-05-17T14:38:12.890667 | 2022-04-11T16:13:47 | 2022-04-11T16:13:47 | 49,721,330 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 181,733 | py | #!/usr/bin/python
# -*- coding: iso-8859-15 -*-
dataspec_P5B='''! ***********************************************************************
! dataSpec_P5B.f90 - (october, 20th 2009)
! ***********************************************************************
! **********************************************************************
! these modules contain the spectral domain of PROSPECT & SAIL (lambda)
! (nw wavelengths with 1 nm spectral resolution),
! and the spectral coefficients for leaf, canopy, atmsphere and soil:
! [1] = refractive index of leaf material
! [2] = specific absorption coefficient of chlorophyll (a+b) (cm2.microg-1)
! [3] = specific absorption coefficient of carotenoids (cm2.microg-1)
! [4] = specific absorption coefficient of brown pigments (arbitrary units)
! [5] = specific absorption coefficient of water (cm-1)
! [6] = specific absorption coefficient of dry matter (cm2.g-1)
! [7] = fraction of direct light
! [8] = fraction of diffuse light
! [9] = reflectance of a dry soil
! [10] = reflectance of a wet soil
! Feret et al. (2008), PROSPECT-4 and 5: Advances in the Leaf Optical
! Properties Model Separating Photosynthetic Pigments, Remote Sensing of
! Environment
! The specific absorption coefficient corresponding to brown pigment is
! provided by Frederic Baret (EMMAH, INRA Avignon, baret@avignon.inra.fr)
! and used with his autorization.
! **********************************************************************
! **********************************************************************
MODULE MOD_dataSpec_P5B
IMPLICIT NONE
INTEGER,PARAMETER :: nw=2101
INTEGER*4 :: lambda(nw)
REAL*8 :: refractive(nw),k_Cab(nw),k_Car(nw),k_Brown(nw),k_Cw(nw),k_Cm(nw)
REAL*8 :: Es(nw),Ed(nw),Rsoil1(nw),Rsoil2(nw)
INTEGER*4 i
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
!!!!! wavelength !!!!!
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
DATA (lambda(i),i=1,100)/&
400.,401.,402.,403.,404.,405.,406.,407.,408.,409.,&
410.,411.,412.,413.,414.,415.,416.,417.,418.,419.,&
420.,421.,422.,423.,424.,425.,426.,427.,428.,429.,&
430.,431.,432.,433.,434.,435.,436.,437.,438.,439.,&
440.,441.,442.,443.,444.,445.,446.,447.,448.,449.,&
450.,451.,452.,453.,454.,455.,456.,457.,458.,459.,&
460.,461.,462.,463.,464.,465.,466.,467.,468.,469.,&
470.,471.,472.,473.,474.,475.,476.,477.,478.,479.,&
480.,481.,482.,483.,484.,485.,486.,487.,488.,489.,&
490.,491.,492.,493.,494.,495.,496.,497.,498.,499/
DATA (lambda(i),i=101,200)/&
500.,501.,502.,503.,504.,505.,506.,507.,508.,509.,&
510.,511.,512.,513.,514.,515.,516.,517.,518.,519.,&
520.,521.,522.,523.,524.,525.,526.,527.,528.,529.,&
530.,531.,532.,533.,534.,535.,536.,537.,538.,539.,&
540.,541.,542.,543.,544.,545.,546.,547.,548.,549.,&
550.,551.,552.,553.,554.,555.,556.,557.,558.,559.,&
560.,561.,562.,563.,564.,565.,566.,567.,568.,569.,&
570.,571.,572.,573.,574.,575.,576.,577.,578.,579.,&
580.,581.,582.,583.,584.,585.,586.,587.,588.,589.,&
590.,591.,592.,593.,594.,595.,596.,597.,598.,599/
DATA (lambda(i),i=201,300)/&
600.,601.,602.,603.,604.,605.,606.,607.,608.,609.,&
610.,611.,612.,613.,614.,615.,616.,617.,618.,619.,&
620.,621.,622.,623.,624.,625.,626.,627.,628.,629.,&
630.,631.,632.,633.,634.,635.,636.,637.,638.,639.,&
640.,641.,642.,643.,644.,645.,646.,647.,648.,649.,&
650.,651.,652.,653.,654.,655.,656.,657.,658.,659.,&
660.,661.,662.,663.,664.,665.,666.,667.,668.,669.,&
670.,671.,672.,673.,674.,675.,676.,677.,678.,679.,&
680.,681.,682.,683.,684.,685.,686.,687.,688.,689.,&
690.,691.,692.,693.,694.,695.,696.,697.,698.,699/
DATA (lambda(i),i=301,400)/&
700.,701.,702.,703.,704.,705.,706.,707.,708.,709.,&
710.,711.,712.,713.,714.,715.,716.,717.,718.,719.,&
720.,721.,722.,723.,724.,725.,726.,727.,728.,729.,&
730.,731.,732.,733.,734.,735.,736.,737.,738.,739.,&
740.,741.,742.,743.,744.,745.,746.,747.,748.,749.,&
750.,751.,752.,753.,754.,755.,756.,757.,758.,759.,&
760.,761.,762.,763.,764.,765.,766.,767.,768.,769.,&
770.,771.,772.,773.,774.,775.,776.,777.,778.,779.,&
780.,781.,782.,783.,784.,785.,786.,787.,788.,789.,&
790.,791.,792.,793.,794.,795.,796.,797.,798.,799/
DATA (lambda(i),i=401,500)/&
800.,801.,802.,803.,804.,805.,806.,807.,808.,809.,&
810.,811.,812.,813.,814.,815.,816.,817.,818.,819.,&
820.,821.,822.,823.,824.,825.,826.,827.,828.,829.,&
830.,831.,832.,833.,834.,835.,836.,837.,838.,839.,&
840.,841.,842.,843.,844.,845.,846.,847.,848.,849.,&
850.,851.,852.,853.,854.,855.,856.,857.,858.,859.,&
860.,861.,862.,863.,864.,865.,866.,867.,868.,869.,&
870.,871.,872.,873.,874.,875.,876.,877.,878.,879.,&
880.,881.,882.,883.,884.,885.,886.,887.,888.,889.,&
890.,891.,892.,893.,894.,895.,896.,897.,898.,899/
DATA (lambda(i),i=501,600)/&
900.,901.,902.,903.,904.,905.,906.,907.,908.,909.,&
910.,911.,912.,913.,914.,915.,916.,917.,918.,919.,&
920.,921.,922.,923.,924.,925.,926.,927.,928.,929.,&
930.,931.,932.,933.,934.,935.,936.,937.,938.,939.,&
940.,941.,942.,943.,944.,945.,946.,947.,948.,949.,&
950.,951.,952.,953.,954.,955.,956.,957.,958.,959.,&
960.,961.,962.,963.,964.,965.,966.,967.,968.,969.,&
970.,971.,972.,973.,974.,975.,976.,977.,978.,979.,&
980.,981.,982.,983.,984.,985.,986.,987.,988.,989.,&
990.,991.,992.,993.,994.,995.,996.,997.,998.,999/
DATA (lambda(i),i=601,700)/&
1000.,1001.,1002.,1003.,1004.,1005.,1006.,1007.,1008.,1009.,&
1010.,1011.,1012.,1013.,1014.,1015.,1016.,1017.,1018.,1019.,&
1020.,1021.,1022.,1023.,1024.,1025.,1026.,1027.,1028.,1029.,&
1030.,1031.,1032.,1033.,1034.,1035.,1036.,1037.,1038.,1039.,&
1040.,1041.,1042.,1043.,1044.,1045.,1046.,1047.,1048.,1049.,&
1050.,1051.,1052.,1053.,1054.,1055.,1056.,1057.,1058.,1059.,&
1060.,1061.,1062.,1063.,1064.,1065.,1066.,1067.,1068.,1069.,&
1070.,1071.,1072.,1073.,1074.,1075.,1076.,1077.,1078.,1079.,&
1080.,1081.,1082.,1083.,1084.,1085.,1086.,1087.,1088.,1089.,&
1090.,1091.,1092.,1093.,1094.,1095.,1096.,1097.,1098.,1099/
DATA (lambda(i),i=701,800)/&
1100.,1101.,1102.,1103.,1104.,1105.,1106.,1107.,1108.,1109.,&
1110.,1111.,1112.,1113.,1114.,1115.,1116.,1117.,1118.,1119.,&
1120.,1121.,1122.,1123.,1124.,1125.,1126.,1127.,1128.,1129.,&
1130.,1131.,1132.,1133.,1134.,1135.,1136.,1137.,1138.,1139.,&
1140.,1141.,1142.,1143.,1144.,1145.,1146.,1147.,1148.,1149.,&
1150.,1151.,1152.,1153.,1154.,1155.,1156.,1157.,1158.,1159.,&
1160.,1161.,1162.,1163.,1164.,1165.,1166.,1167.,1168.,1169.,&
1170.,1171.,1172.,1173.,1174.,1175.,1176.,1177.,1178.,1179.,&
1180.,1181.,1182.,1183.,1184.,1185.,1186.,1187.,1188.,1189.,&
1190.,1191.,1192.,1193.,1194.,1195.,1196.,1197.,1198.,1199/
DATA (lambda(i),i=801,900)/&
1200.,1201.,1202.,1203.,1204.,1205.,1206.,1207.,1208.,1209.,&
1210.,1211.,1212.,1213.,1214.,1215.,1216.,1217.,1218.,1219.,&
1220.,1221.,1222.,1223.,1224.,1225.,1226.,1227.,1228.,1229.,&
1230.,1231.,1232.,1233.,1234.,1235.,1236.,1237.,1238.,1239.,&
1240.,1241.,1242.,1243.,1244.,1245.,1246.,1247.,1248.,1249.,&
1250.,1251.,1252.,1253.,1254.,1255.,1256.,1257.,1258.,1259.,&
1260.,1261.,1262.,1263.,1264.,1265.,1266.,1267.,1268.,1269.,&
1270.,1271.,1272.,1273.,1274.,1275.,1276.,1277.,1278.,1279.,&
1280.,1281.,1282.,1283.,1284.,1285.,1286.,1287.,1288.,1289.,&
1290.,1291.,1292.,1293.,1294.,1295.,1296.,1297.,1298.,1299/
DATA (lambda(i),i=901,1000)/&
1300.,1301.,1302.,1303.,1304.,1305.,1306.,1307.,1308.,1309.,&
1310.,1311.,1312.,1313.,1314.,1315.,1316.,1317.,1318.,1319.,&
1320.,1321.,1322.,1323.,1324.,1325.,1326.,1327.,1328.,1329.,&
1330.,1331.,1332.,1333.,1334.,1335.,1336.,1337.,1338.,1339.,&
1340.,1341.,1342.,1343.,1344.,1345.,1346.,1347.,1348.,1349.,&
1350.,1351.,1352.,1353.,1354.,1355.,1356.,1357.,1358.,1359.,&
1360.,1361.,1362.,1363.,1364.,1365.,1366.,1367.,1368.,1369.,&
1370.,1371.,1372.,1373.,1374.,1375.,1376.,1377.,1378.,1379.,&
1380.,1381.,1382.,1383.,1384.,1385.,1386.,1387.,1388.,1389.,&
1390.,1391.,1392.,1393.,1394.,1395.,1396.,1397.,1398.,1399/
DATA (lambda(i),i=1001,1100)/&
1400.,1401.,1402.,1403.,1404.,1405.,1406.,1407.,1408.,1409.,&
1410.,1411.,1412.,1413.,1414.,1415.,1416.,1417.,1418.,1419.,&
1420.,1421.,1422.,1423.,1424.,1425.,1426.,1427.,1428.,1429.,&
1430.,1431.,1432.,1433.,1434.,1435.,1436.,1437.,1438.,1439.,&
1440.,1441.,1442.,1443.,1444.,1445.,1446.,1447.,1448.,1449.,&
1450.,1451.,1452.,1453.,1454.,1455.,1456.,1457.,1458.,1459.,&
1460.,1461.,1462.,1463.,1464.,1465.,1466.,1467.,1468.,1469.,&
1470.,1471.,1472.,1473.,1474.,1475.,1476.,1477.,1478.,1479.,&
1480.,1481.,1482.,1483.,1484.,1485.,1486.,1487.,1488.,1489.,&
1490.,1491.,1492.,1493.,1494.,1495.,1496.,1497.,1498.,1499/
DATA (lambda(i),i=1101,1200)/&
1500.,1501.,1502.,1503.,1504.,1505.,1506.,1507.,1508.,1509.,&
1510.,1511.,1512.,1513.,1514.,1515.,1516.,1517.,1518.,1519.,&
1520.,1521.,1522.,1523.,1524.,1525.,1526.,1527.,1528.,1529.,&
1530.,1531.,1532.,1533.,1534.,1535.,1536.,1537.,1538.,1539.,&
1540.,1541.,1542.,1543.,1544.,1545.,1546.,1547.,1548.,1549.,&
1550.,1551.,1552.,1553.,1554.,1555.,1556.,1557.,1558.,1559.,&
1560.,1561.,1562.,1563.,1564.,1565.,1566.,1567.,1568.,1569.,&
1570.,1571.,1572.,1573.,1574.,1575.,1576.,1577.,1578.,1579.,&
1580.,1581.,1582.,1583.,1584.,1585.,1586.,1587.,1588.,1589.,&
1590.,1591.,1592.,1593.,1594.,1595.,1596.,1597.,1598.,1599/
DATA (lambda(i),i=1201,1300)/&
1600.,1601.,1602.,1603.,1604.,1605.,1606.,1607.,1608.,1609.,&
1610.,1611.,1612.,1613.,1614.,1615.,1616.,1617.,1618.,1619.,&
1620.,1621.,1622.,1623.,1624.,1625.,1626.,1627.,1628.,1629.,&
1630.,1631.,1632.,1633.,1634.,1635.,1636.,1637.,1638.,1639.,&
1640.,1641.,1642.,1643.,1644.,1645.,1646.,1647.,1648.,1649.,&
1650.,1651.,1652.,1653.,1654.,1655.,1656.,1657.,1658.,1659.,&
1660.,1661.,1662.,1663.,1664.,1665.,1666.,1667.,1668.,1669.,&
1670.,1671.,1672.,1673.,1674.,1675.,1676.,1677.,1678.,1679.,&
1680.,1681.,1682.,1683.,1684.,1685.,1686.,1687.,1688.,1689.,&
1690.,1691.,1692.,1693.,1694.,1695.,1696.,1697.,1698.,1699/
DATA (lambda(i),i=1301,1400)/&
1700.,1701.,1702.,1703.,1704.,1705.,1706.,1707.,1708.,1709.,&
1710.,1711.,1712.,1713.,1714.,1715.,1716.,1717.,1718.,1719.,&
1720.,1721.,1722.,1723.,1724.,1725.,1726.,1727.,1728.,1729.,&
1730.,1731.,1732.,1733.,1734.,1735.,1736.,1737.,1738.,1739.,&
1740.,1741.,1742.,1743.,1744.,1745.,1746.,1747.,1748.,1749.,&
1750.,1751.,1752.,1753.,1754.,1755.,1756.,1757.,1758.,1759.,&
1760.,1761.,1762.,1763.,1764.,1765.,1766.,1767.,1768.,1769.,&
1770.,1771.,1772.,1773.,1774.,1775.,1776.,1777.,1778.,1779.,&
1780.,1781.,1782.,1783.,1784.,1785.,1786.,1787.,1788.,1789.,&
1790.,1791.,1792.,1793.,1794.,1795.,1796.,1797.,1798.,1799/
DATA (lambda(i),i=1401,1500)/&
1800.,1801.,1802.,1803.,1804.,1805.,1806.,1807.,1808.,1809.,&
1810.,1811.,1812.,1813.,1814.,1815.,1816.,1817.,1818.,1819.,&
1820.,1821.,1822.,1823.,1824.,1825.,1826.,1827.,1828.,1829.,&
1830.,1831.,1832.,1833.,1834.,1835.,1836.,1837.,1838.,1839.,&
1840.,1841.,1842.,1843.,1844.,1845.,1846.,1847.,1848.,1849.,&
1850.,1851.,1852.,1853.,1854.,1855.,1856.,1857.,1858.,1859.,&
1860.,1861.,1862.,1863.,1864.,1865.,1866.,1867.,1868.,1869.,&
1870.,1871.,1872.,1873.,1874.,1875.,1876.,1877.,1878.,1879.,&
1880.,1881.,1882.,1883.,1884.,1885.,1886.,1887.,1888.,1889.,&
1890.,1891.,1892.,1893.,1894.,1895.,1896.,1897.,1898.,1899/
DATA (lambda(i),i=1501,1600)/&
1900.,1901.,1902.,1903.,1904.,1905.,1906.,1907.,1908.,1909.,&
1910.,1911.,1912.,1913.,1914.,1915.,1916.,1917.,1918.,1919.,&
1920.,1921.,1922.,1923.,1924.,1925.,1926.,1927.,1928.,1929.,&
1930.,1931.,1932.,1933.,1934.,1935.,1936.,1937.,1938.,1939.,&
1940.,1941.,1942.,1943.,1944.,1945.,1946.,1947.,1948.,1949.,&
1950.,1951.,1952.,1953.,1954.,1955.,1956.,1957.,1958.,1959.,&
1960.,1961.,1962.,1963.,1964.,1965.,1966.,1967.,1968.,1969.,&
1970.,1971.,1972.,1973.,1974.,1975.,1976.,1977.,1978.,1979.,&
1980.,1981.,1982.,1983.,1984.,1985.,1986.,1987.,1988.,1989.,&
1990.,1991.,1992.,1993.,1994.,1995.,1996.,1997.,1998.,1999/
DATA (lambda(i),i=1601,1700)/&
2000.,2001.,2002.,2003.,2004.,2005.,2006.,2007.,2008.,2009.,&
2010.,2011.,2012.,2013.,2014.,2015.,2016.,2017.,2018.,2019.,&
2020.,2021.,2022.,2023.,2024.,2025.,2026.,2027.,2028.,2029.,&
2030.,2031.,2032.,2033.,2034.,2035.,2036.,2037.,2038.,2039.,&
2040.,2041.,2042.,2043.,2044.,2045.,2046.,2047.,2048.,2049.,&
2050.,2051.,2052.,2053.,2054.,2055.,2056.,2057.,2058.,2059.,&
2060.,2061.,2062.,2063.,2064.,2065.,2066.,2067.,2068.,2069.,&
2070.,2071.,2072.,2073.,2074.,2075.,2076.,2077.,2078.,2079.,&
2080.,2081.,2082.,2083.,2084.,2085.,2086.,2087.,2088.,2089.,&
2090.,2091.,2092.,2093.,2094.,2095.,2096.,2097.,2098.,2099/
DATA (lambda(i),i=1701,1800)/&
2100.,2101.,2102.,2103.,2104.,2105.,2106.,2107.,2108.,2109.,&
2110.,2111.,2112.,2113.,2114.,2115.,2116.,2117.,2118.,2119.,&
2120.,2121.,2122.,2123.,2124.,2125.,2126.,2127.,2128.,2129.,&
2130.,2131.,2132.,2133.,2134.,2135.,2136.,2137.,2138.,2139.,&
2140.,2141.,2142.,2143.,2144.,2145.,2146.,2147.,2148.,2149.,&
2150.,2151.,2152.,2153.,2154.,2155.,2156.,2157.,2158.,2159.,&
2160.,2161.,2162.,2163.,2164.,2165.,2166.,2167.,2168.,2169.,&
2170.,2171.,2172.,2173.,2174.,2175.,2176.,2177.,2178.,2179.,&
2180.,2181.,2182.,2183.,2184.,2185.,2186.,2187.,2188.,2189.,&
2190.,2191.,2192.,2193.,2194.,2195.,2196.,2197.,2198.,2199/
DATA (lambda(i),i=1801,1900)/&
2200.,2201.,2202.,2203.,2204.,2205.,2206.,2207.,2208.,2209.,&
2210.,2211.,2212.,2213.,2214.,2215.,2216.,2217.,2218.,2219.,&
2220.,2221.,2222.,2223.,2224.,2225.,2226.,2227.,2228.,2229.,&
2230.,2231.,2232.,2233.,2234.,2235.,2236.,2237.,2238.,2239.,&
2240.,2241.,2242.,2243.,2244.,2245.,2246.,2247.,2248.,2249.,&
2250.,2251.,2252.,2253.,2254.,2255.,2256.,2257.,2258.,2259.,&
2260.,2261.,2262.,2263.,2264.,2265.,2266.,2267.,2268.,2269.,&
2270.,2271.,2272.,2273.,2274.,2275.,2276.,2277.,2278.,2279.,&
2280.,2281.,2282.,2283.,2284.,2285.,2286.,2287.,2288.,2289.,&
2290.,2291.,2292.,2293.,2294.,2295.,2296.,2297.,2298.,2299/
DATA (lambda(i),i=1901,2000)/&
2300.,2301.,2302.,2303.,2304.,2305.,2306.,2307.,2308.,2309.,&
2310.,2311.,2312.,2313.,2314.,2315.,2316.,2317.,2318.,2319.,&
2320.,2321.,2322.,2323.,2324.,2325.,2326.,2327.,2328.,2329.,&
2330.,2331.,2332.,2333.,2334.,2335.,2336.,2337.,2338.,2339.,&
2340.,2341.,2342.,2343.,2344.,2345.,2346.,2347.,2348.,2349.,&
2350.,2351.,2352.,2353.,2354.,2355.,2356.,2357.,2358.,2359.,&
2360.,2361.,2362.,2363.,2364.,2365.,2366.,2367.,2368.,2369.,&
2370.,2371.,2372.,2373.,2374.,2375.,2376.,2377.,2378.,2379.,&
2380.,2381.,2382.,2383.,2384.,2385.,2386.,2387.,2388.,2389.,&
2390.,2391.,2392.,2393.,2394.,2395.,2396.,2397.,2398.,2399/
DATA (lambda(i),i=2001,2101)/&
2400.,2401.,2402.,2403.,2404.,2405.,2406.,2407.,2408.,2409.,&
2410.,2411.,2412.,2413.,2414.,2415.,2416.,2417.,2418.,2419.,&
2420.,2421.,2422.,2423.,2424.,2425.,2426.,2427.,2428.,2429.,&
2430.,2431.,2432.,2433.,2434.,2435.,2436.,2437.,2438.,2439.,&
2440.,2441.,2442.,2443.,2444.,2445.,2446.,2447.,2448.,2449.,&
2450.,2451.,2452.,2453.,2454.,2455.,2456.,2457.,2458.,2459.,&
2460.,2461.,2462.,2463.,2464.,2465.,2466.,2467.,2468.,2469.,&
2470.,2471.,2472.,2473.,2474.,2475.,2476.,2477.,2478.,2479.,&
2480.,2481.,2482.,2483.,2484.,2485.,2486.,2487.,2488.,2489.,&
2490.,2491.,2492.,2493.,2494.,2495.,2496.,2497.,2498.,2499.,&
2500./
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
!!!!! Refractive index !!!!!
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
DATA (refractive(i),i=1,100)/&
1.4955,1.4958,1.4960,1.4964,1.4971,1.4978,1.4986,1.4995,1.5006,1.5016,&
1.5024,1.5031,1.5041,1.5052,1.5065,1.5076,1.5088,1.5099,1.5110,1.5122,&
1.5134,1.5144,1.5156,1.5168,1.5181,1.5195,1.5206,1.5212,1.5219,1.5228,&
1.5235,1.5241,1.5249,1.5256,1.5262,1.5267,1.5272,1.5276,1.5279,1.5282,&
1.5286,1.5290,1.5292,1.5294,1.5294,1.5295,1.5293,1.5291,1.5289,1.5286,&
1.5282,1.5278,1.5273,1.5268,1.5262,1.5256,1.5249,1.5243,1.5237,1.5230,&
1.5224,1.5217,1.5210,1.5203,1.5196,1.5189,1.5184,1.5178,1.5173,1.5170,&
1.5166,1.5163,1.5159,1.5156,1.5153,1.5150,1.5148,1.5145,1.5142,1.5139,&
1.5135,1.5131,1.5125,1.5119,1.5111,1.5103,1.5092,1.5080,1.5067,1.5051,&
1.5034,1.5015,1.4994,1.4971,1.4947,1.4921,1.4894,1.4866,1.4837,1.4807/
DATA (refractive(i),i=101,200)/&
1.4776,1.4744,1.4712,1.4680,1.4647,1.4615,1.4583,1.4553,1.4523,1.4495,&
1.4468,1.4443,1.4419,1.4397,1.4377,1.4360,1.4344,1.4331,1.4319,1.4310,&
1.4302,1.4296,1.4291,1.4288,1.4286,1.4286,1.4286,1.4287,1.4288,1.4290,&
1.4292,1.4294,1.4296,1.4298,1.4300,1.4302,1.4303,1.4305,1.4306,1.4307,&
1.4308,1.4309,1.4309,1.4310,1.4310,1.4310,1.4309,1.4308,1.4307,1.4305,&
1.4303,1.4301,1.4298,1.4294,1.4290,1.4285,1.4280,1.4274,1.4268,1.4262,&
1.4255,1.4248,1.4241,1.4234,1.4227,1.4220,1.4213,1.4206,1.4199,1.4192,&
1.4185,1.4179,1.4173,1.4167,1.4161,1.4156,1.4151,1.4146,1.4141,1.4137,&
1.4133,1.4129,1.4126,1.4123,1.4120,1.4117,1.4114,1.4112,1.4109,1.4107,&
1.4105,1.4103,1.4101,1.4099,1.4097,1.4095,1.4093,1.4091,1.4089,1.4087/
DATA (refractive(i),i=201,300)/&
1.4086,1.4084,1.4082,1.4081,1.4079,1.4078,1.4077,1.4076,1.4075,1.4075,&
1.4075,1.4075,1.4075,1.4076,1.4077,1.4077,1.4078,1.4079,1.4080,1.4081,&
1.4082,1.4083,1.4083,1.4083,1.4083,1.4082,1.4082,1.4081,1.4079,1.4078,&
1.4077,1.4076,1.4076,1.4076,1.4078,1.4080,1.4083,1.4087,1.4093,1.4100,&
1.4108,1.4118,1.4128,1.4140,1.4153,1.4166,1.4179,1.4193,1.4206,1.4218,&
1.4229,1.4240,1.4250,1.4261,1.4272,1.4284,1.4298,1.4315,1.4335,1.4357,&
1.4383,1.4411,1.4442,1.4475,1.4509,1.4544,1.4579,1.4614,1.4649,1.4682,&
1.4714,1.4744,1.4773,1.4802,1.4830,1.4857,1.4884,1.4909,1.4932,1.4951,&
1.4963,1.4967,1.4958,1.4936,1.4899,1.4847,1.4780,1.4702,1.4618,1.4530,&
1.4445,1.4366,1.4296,1.4237,1.4190,1.4154,1.4129,1.4112,1.4104,1.4102/
DATA (refractive(i),i=301,400)/&
1.4106,1.4113,1.4124,1.4137,1.4152,1.4168,1.4186,1.4204,1.4222,1.4242,&
1.4261,1.4281,1.4301,1.4321,1.4341,1.4361,1.4382,1.4402,1.4422,1.4442,&
1.4462,1.4481,1.4500,1.4518,1.4536,1.4554,1.4571,1.4587,1.4603,1.4618,&
1.4632,1.4646,1.4659,1.4671,1.4683,1.4694,1.4704,1.4713,1.4721,1.4729,&
1.4736,1.4742,1.4747,1.4752,1.4756,1.4759,1.4762,1.4764,1.4765,1.4766,&
1.4766,1.4767,1.4767,1.4767,1.4767,1.4766,1.4766,1.4765,1.4764,1.4763,&
1.4762,1.4761,1.4759,1.4758,1.4756,1.4754,1.4752,1.4751,1.4748,1.4746,&
1.4744,1.4742,1.4739,1.4737,1.4734,1.4732,1.4729,1.4727,1.4725,1.4722,&
1.4720,1.4718,1.4716,1.4714,1.4712,1.4710,1.4708,1.4706,1.4703,1.4701,&
1.4700,1.4698,1.4696,1.4694,1.4692,1.4690,1.4688,1.4686,1.4684,1.4682/
DATA (refractive(i),i=401,500)/&
1.4681,1.4679,1.4677,1.4675,1.4674,1.4672,1.4670,1.4668,1.4666,1.4665,&
1.4663,1.4662,1.4660,1.4659,1.4657,1.4655,1.4654,1.4652,1.4651,1.4649,&
1.4648,1.4646,1.4645,1.4643,1.4641,1.4639,1.4638,1.4636,1.4635,1.4633,&
1.4631,1.4630,1.4628,1.4627,1.4626,1.4625,1.4623,1.4622,1.4620,1.4619,&
1.4617,1.4616,1.4614,1.4613,1.4611,1.4610,1.4608,1.4607,1.4605,1.4604,&
1.4602,1.4601,1.4600,1.4599,1.4597,1.4596,1.4595,1.4594,1.4592,1.4591,&
1.4589,1.4587,1.4586,1.4584,1.4583,1.4582,1.4582,1.4581,1.4580,1.4579,&
1.4577,1.4576,1.4574,1.4572,1.4571,1.4569,1.4568,1.4566,1.4565,1.4563,&
1.4562,1.4560,1.4559,1.4558,1.4556,1.4554,1.4553,1.4551,1.4550,1.4548,&
1.4547,1.4545,1.4544,1.4542,1.4541,1.4539,1.4538,1.4536,1.4535,1.4533/
DATA (refractive(i),i=501,600)/&
1.4532,1.4531,1.4529,1.4528,1.4526,1.4525,1.4524,1.4522,1.4521,1.4519,&
1.4518,1.4516,1.4515,1.4513,1.4512,1.4511,1.4509,1.4508,1.4506,1.4505,&
1.4503,1.4502,1.4500,1.4498,1.4497,1.4495,1.4493,1.4492,1.4491,1.4489,&
1.4488,1.4487,1.4485,1.4483,1.4481,1.4479,1.4478,1.4477,1.4476,1.4474,&
1.4472,1.4469,1.4466,1.4465,1.4463,1.4462,1.4461,1.4461,1.4461,1.4460,&
1.4457,1.4455,1.4453,1.4451,1.4450,1.4449,1.4448,1.4447,1.4445,1.4445,&
1.4445,1.4442,1.4440,1.4438,1.4437,1.4436,1.4436,1.4435,1.4434,1.4434,&
1.4434,1.4434,1.4432,1.4430,1.4428,1.4427,1.4426,1.4425,1.4423,1.4421,&
1.4420,1.4418,1.4416,1.4415,1.4414,1.4414,1.4414,1.4415,1.4416,1.4416,&
1.4415,1.4414,1.4413,1.4411,1.4409,1.4407,1.4405,1.4403,1.4401,1.4400/
DATA (refractive(i),i=601,700)/&
1.4399,1.4397,1.4395,1.4394,1.4393,1.4392,1.4392,1.4391,1.4391,1.4390,&
1.4389,1.4387,1.4386,1.4385,1.4384,1.4384,1.4383,1.4381,1.4379,1.4378,&
1.4376,1.4374,1.4372,1.4371,1.4370,1.4369,1.4368,1.4367,1.4365,1.4365,&
1.4364,1.4364,1.4363,1.4363,1.4363,1.4363,1.4363,1.4362,1.4361,1.4360,&
1.4359,1.4357,1.4356,1.4354,1.4352,1.4351,1.4351,1.4350,1.4349,1.4349,&
1.4348,1.4347,1.4345,1.4343,1.4342,1.4342,1.4342,1.4341,1.4340,1.4339,&
1.4338,1.4337,1.4335,1.4334,1.4333,1.4333,1.4334,1.4333,1.4333,1.4332,&
1.4330,1.4329,1.4328,1.4327,1.4325,1.4324,1.4322,1.4321,1.4319,1.4318,&
1.4317,1.4316,1.4315,1.4314,1.4314,1.4313,1.4312,1.4310,1.4309,1.4307,&
1.4305,1.4304,1.4303,1.4302,1.4302,1.4301,1.4300,1.4297,1.4295,1.4294/
DATA (refractive(i),i=701,800)/&
1.4293,1.4292,1.4291,1.4290,1.4289,1.4288,1.4286,1.4284,1.4283,1.4283,&
1.4284,1.4284,1.4284,1.4284,1.4283,1.4282,1.4282,1.4280,1.4278,1.4276,&
1.4275,1.4274,1.4272,1.4270,1.4268,1.4267,1.4267,1.4266,1.4265,1.4264,&
1.4263,1.4262,1.4260,1.4259,1.4258,1.4257,1.4256,1.4256,1.4255,1.4254,&
1.4252,1.4250,1.4249,1.4248,1.4247,1.4244,1.4241,1.4239,1.4237,1.4235,&
1.4234,1.4233,1.4231,1.4230,1.4228,1.4226,1.4224,1.4224,1.4223,1.4223,&
1.4222,1.4222,1.4221,1.4221,1.4221,1.4219,1.4217,1.4216,1.4215,1.4215,&
1.4214,1.4212,1.4210,1.4208,1.4207,1.4206,1.4206,1.4206,1.4206,1.4206,&
1.4205,1.4203,1.4200,1.4199,1.4198,1.4199,1.4201,1.4204,1.4205,1.4205,&
1.4203,1.4200,1.4196,1.4192,1.4189,1.4187,1.4187,1.4188,1.4188,1.4187/
DATA (refractive(i),i=801,900)/&
1.4186,1.4185,1.4182,1.4181,1.4179,1.4178,1.4177,1.4177,1.4176,1.4175,&
1.4173,1.4173,1.4172,1.4171,1.4170,1.4168,1.4167,1.4165,1.4164,1.4163,&
1.4161,1.4158,1.4156,1.4155,1.4154,1.4153,1.4151,1.4151,1.4151,1.4150,&
1.4148,1.4147,1.4146,1.4145,1.4144,1.4143,1.4141,1.4139,1.4136,1.4134,&
1.4133,1.4130,1.4128,1.4128,1.4128,1.4127,1.4126,1.4124,1.4122,1.4122,&
1.4121,1.4121,1.4121,1.4119,1.4118,1.4116,1.4115,1.4113,1.4112,1.4112,&
1.4111,1.4111,1.4110,1.4108,1.4106,1.4104,1.4102,1.4102,1.4102,1.4103,&
1.4103,1.4101,1.4099,1.4097,1.4095,1.4093,1.4091,1.4091,1.4091,1.4090,&
1.4089,1.4088,1.4085,1.4083,1.4081,1.4080,1.4079,1.4078,1.4077,1.4078,&
1.4078,1.4078,1.4077,1.4076,1.4075,1.4073,1.4070,1.4068,1.4066,1.4064/
DATA (refractive(i),i=901,1000)/&
1.4064,1.4062,1.4061,1.4059,1.4056,1.4055,1.4055,1.4055,1.4055,1.4056,&
1.4057,1.4058,1.4058,1.4056,1.4055,1.4053,1.4051,1.4050,1.4049,1.4048,&
1.4046,1.4045,1.4044,1.4044,1.4044,1.4045,1.4045,1.4043,1.4042,1.4040,&
1.4040,1.4041,1.4041,1.4040,1.4036,1.4034,1.4032,1.4031,1.4030,1.4031,&
1.4033,1.4033,1.4033,1.4031,1.4028,1.4023,1.4017,1.4013,1.4010,1.4010,&
1.4008,1.4007,1.4004,1.4002,1.4000,1.4002,1.4004,1.4009,1.4011,1.4015,&
1.4016,1.4015,1.4010,1.4002,1.3996,1.3992,1.3989,1.3992,1.3994,1.3997,&
1.4000,1.4000,1.4000,1.4003,1.4002,1.3999,1.4002,1.4004,1.4008,1.4009,&
1.4019,1.4015,1.4010,1.4000,1.4001,1.3988,1.3978,1.3971,1.3960,1.3967,&
1.3958,1.3955,1.3952,1.3953,1.3945,1.3960,1.3961,1.3950,1.3946,1.3942/
DATA (refractive(i),i=1001,1100)/&
1.3938,1.3935,1.3926,1.3926,1.3930,1.3943,1.3943,1.3944,1.3944,1.3937,&
1.3933,1.3934,1.3934,1.3935,1.3933,1.3931,1.3928,1.3925,1.3920,1.3918,&
1.3915,1.3910,1.3904,1.3898,1.3892,1.3880,1.3872,1.3865,1.3858,1.3852,&
1.3843,1.3838,1.3829,1.3823,1.3810,1.3801,1.3794,1.3785,1.3776,1.3771,&
1.3765,1.3758,1.3751,1.3743,1.3733,1.3725,1.3717,1.3711,1.3707,1.3703,&
1.3700,1.3700,1.3697,1.3692,1.3685,1.3680,1.3675,1.3670,1.3667,1.3662,&
1.3657,1.3659,1.3658,1.3656,1.3656,1.3654,1.3650,1.3653,1.3651,1.3649,&
1.3650,1.3646,1.3641,1.3644,1.3640,1.3644,1.3643,1.3644,1.3647,1.3646,&
1.3649,1.3644,1.3641,1.3635,1.3635,1.3633,1.3631,1.3635,1.3635,1.3643,&
1.3642,1.3646,1.3643,1.3642,1.3637,1.3640,1.3638,1.3640,1.3640,1.3640/
DATA (refractive(i),i=1101,1200)/&
1.3646,1.3645,1.3650,1.3650,1.3652,1.3657,1.3656,1.3660,1.3657,1.3661,&
1.3665,1.3667,1.3670,1.3666,1.3669,1.3673,1.3673,1.3680,1.3686,1.3686,&
1.3689,1.3693,1.3690,1.3694,1.3697,1.3695,1.3697,1.3698,1.3696,1.3698,&
1.3700,1.3706,1.3705,1.3709,1.3712,1.3717,1.3718,1.3721,1.3721,1.3724,&
1.3723,1.3721,1.3723,1.3723,1.3725,1.3723,1.3723,1.3727,1.3729,1.3730,&
1.3730,1.3730,1.3731,1.3734,1.3734,1.3736,1.3737,1.3738,1.3735,1.3735,&
1.3734,1.3735,1.3736,1.3738,1.3737,1.3738,1.3736,1.3737,1.3735,1.3735,&
1.3735,1.3734,1.3734,1.3734,1.3735,1.3732,1.3733,1.3732,1.3732,1.3735,&
1.3736,1.3739,1.3742,1.3741,1.3738,1.3737,1.3737,1.3736,1.3735,1.3737,&
1.3738,1.3741,1.3743,1.3744,1.3744,1.3743,1.3740,1.3740,1.3741,1.3741/
DATA (refractive(i),i=1201,1300)/&
1.3742,1.3742,1.3741,1.3743,1.3743,1.3743,1.3742,1.3741,1.3740,1.3740,&
1.3738,1.3736,1.3734,1.3733,1.3731,1.3730,1.3730,1.3731,1.3730,1.3732,&
1.3731,1.3730,1.3729,1.3728,1.3727,1.3726,1.3725,1.3726,1.3725,1.3726,&
1.3726,1.3724,1.3719,1.3718,1.3716,1.3715,1.3712,1.3713,1.3711,1.3710,&
1.3707,1.3708,1.3707,1.3708,1.3708,1.3710,1.3712,1.3715,1.3715,1.3716,&
1.3714,1.3714,1.3712,1.3713,1.3709,1.3708,1.3704,1.3704,1.3700,1.3698,&
1.3695,1.3694,1.3692,1.3690,1.3687,1.3687,1.3684,1.3685,1.3684,1.3683,&
1.3683,1.3687,1.3681,1.3678,1.3674,1.3670,1.3665,1.3665,1.3664,1.3663,&
1.3659,1.3659,1.3656,1.3655,1.3654,1.3655,1.3654,1.3654,1.3651,1.3651,&
1.3648,1.3648,1.3647,1.3648,1.3647,1.3647,1.3646,1.3647,1.3646,1.3644/
DATA (refractive(i),i=1301,1400)/&
1.3640,1.3638,1.3635,1.3634,1.3632,1.3631,1.3628,1.3629,1.3627,1.3629,&
1.3629,1.3627,1.3625,1.3625,1.3623,1.3622,1.3619,1.3620,1.3619,1.3619,&
1.3616,1.3615,1.3612,1.3611,1.3611,1.3612,1.3613,1.3617,1.3617,1.3618,&
1.3618,1.3617,1.3615,1.3616,1.3615,1.3616,1.3618,1.3617,1.3616,1.3616,&
1.3613,1.3612,1.3612,1.3616,1.3616,1.3617,1.3618,1.3621,1.3623,1.3625,&
1.3626,1.3626,1.3625,1.3627,1.3633,1.3637,1.3637,1.3636,1.3635,1.3633,&
1.3630,1.3629,1.3632,1.3636,1.3641,1.3646,1.3651,1.3653,1.3648,1.3640,&
1.3630,1.3620,1.3615,1.3608,1.3608,1.3611,1.3617,1.3628,1.3638,1.3645,&
1.3649,1.3646,1.3637,1.3625,1.3613,1.3601,1.3595,1.3593,1.3599,1.3607,&
1.3614,1.3623,1.3627,1.3627,1.3624,1.3622,1.3619,1.3616,1.3616,1.3617/
DATA (refractive(i),i=1401,1500)/&
1.3619,1.3621,1.3622,1.3626,1.3630,1.3635,1.3637,1.3639,1.3636,1.3632,&
1.3629,1.3625,1.3623,1.3625,1.3628,1.3634,1.3638,1.3645,1.3649,1.3652,&
1.3652,1.3650,1.3645,1.3642,1.3639,1.3638,1.3636,1.3634,1.3631,1.3632,&
1.3631,1.3630,1.3624,1.3623,1.3623,1.3623,1.3626,1.3629,1.3629,1.3630,&
1.3628,1.3622,1.3615,1.3607,1.3601,1.3597,1.3597,1.3599,1.3597,1.3597,&
1.3595,1.3599,1.3593,1.3597,1.3602,1.3613,1.3625,1.3633,1.3638,1.3645,&
1.3643,1.3648,1.3629,1.3632,1.3635,1.3645,1.3647,1.3662,1.3668,1.3684,&
1.3653,1.3658,1.3663,1.3663,1.3671,1.3668,1.3680,1.3626,1.3620,1.3624,&
1.3627,1.3617,1.3628,1.3562,1.3557,1.3558,1.3559,1.3555,1.3508,1.3504,&
1.3511,1.3520,1.3525,1.3493,1.3503,1.3524,1.3546,1.3578,1.3556,1.3583/
DATA (refractive(i),i=1501,1600)/&
1.3601,1.3610,1.3595,1.3599,1.3606,1.3611,1.3598,1.3600,1.3601,1.3606,&
1.3599,1.3600,1.3603,1.3608,1.3606,1.3613,1.3615,1.3614,1.3606,1.3600,&
1.3591,1.3582,1.3573,1.3564,1.3556,1.3547,1.3538,1.3530,1.3522,1.3513,&
1.3505,1.3497,1.3489,1.3481,1.3473,1.3465,1.3457,1.3450,1.3442,1.3434,&
1.3427,1.3420,1.3412,1.3405,1.3398,1.3391,1.3384,1.3377,1.3371,1.3364,&
1.3357,1.3351,1.3344,1.3338,1.3332,1.3326,1.3320,1.3314,1.3308,1.3302,&
1.3297,1.3291,1.3286,1.3280,1.3275,1.3270,1.3265,1.3260,1.3255,1.3251,&
1.3246,1.3242,1.3237,1.3233,1.3229,1.3225,1.3221,1.3217,1.3213,1.3209,&
1.3206,1.3203,1.3199,1.3196,1.3193,1.3190,1.3187,1.3185,1.3182,1.3180,&
1.3177,1.3175,1.3173,1.3171,1.3169,1.3168,1.3166,1.3165,1.3163,1.3162/
DATA (refractive(i),i=1601,1700)/&
1.3161,1.3160,1.3159,1.3159,1.3158,1.3157,1.3156,1.3155,1.3154,1.3153,&
1.3152,1.3151,1.3150,1.3150,1.3149,1.3148,1.3147,1.3146,1.3146,1.3145,&
1.3144,1.3143,1.3142,1.3141,1.3140,1.3139,1.3138,1.3137,1.3136,1.3135,&
1.3134,1.3133,1.3132,1.3131,1.3130,1.3129,1.3128,1.3127,1.3126,1.3125,&
1.3124,1.3123,1.3122,1.3121,1.3120,1.3119,1.3118,1.3117,1.3116,1.3115,&
1.3114,1.3113,1.3112,1.3110,1.3109,1.3108,1.3107,1.3106,1.3105,1.3104,&
1.3103,1.3102,1.3101,1.3099,1.3098,1.3097,1.3096,1.3095,1.3094,1.3093,&
1.3092,1.3091,1.3090,1.3088,1.3087,1.3086,1.3085,1.3084,1.3082,1.3081,&
1.3080,1.3079,1.3078,1.3076,1.3075,1.3074,1.3073,1.3072,1.3070,1.3069,&
1.3068,1.3067,1.3066,1.3064,1.3063,1.3062,1.3061,1.3060,1.3059,1.3058/
DATA (refractive(i),i=1701,1800)/&
1.3057,1.3056,1.3055,1.3053,1.3052,1.3051,1.3050,1.3049,1.3047,1.3046,&
1.3045,1.3044,1.3043,1.3041,1.3040,1.3039,1.3038,1.3037,1.3035,1.3034,&
1.3033,1.3032,1.3031,1.3029,1.3028,1.3027,1.3026,1.3025,1.3023,1.3022,&
1.3021,1.3020,1.3019,1.3017,1.3016,1.3015,1.3014,1.3013,1.3011,1.3010,&
1.3009,1.3008,1.3007,1.3005,1.3004,1.3003,1.3002,1.3001,1.2999,1.2998,&
1.2997,1.2996,1.2995,1.2993,1.2992,1.2991,1.2990,1.2989,1.2987,1.2986,&
1.2985,1.2984,1.2983,1.2981,1.2980,1.2979,1.2978,1.2977,1.2975,1.2974,&
1.2973,1.2972,1.2970,1.2969,1.2967,1.2966,1.2965,1.2964,1.2962,1.2961,&
1.2960,1.2959,1.2958,1.2957,1.2956,1.2955,1.2954,1.2953,1.2951,1.2950,&
1.2949,1.2948,1.2947,1.2945,1.2944,1.2943,1.2942,1.2941,1.2939,1.2938/
DATA (refractive(i),i=1801,1900)/&
1.2937,1.2936,1.2935,1.2933,1.2932,1.2931,1.2930,1.2928,1.2927,1.2925,&
1.2924,1.2923,1.2922,1.2920,1.2919,1.2918,1.2917,1.2916,1.2914,1.2913,&
1.2912,1.2911,1.2910,1.2908,1.2907,1.2906,1.2905,1.2904,1.2902,1.2901,&
1.2900,1.2899,1.2898,1.2896,1.2895,1.2894,1.2893,1.2892,1.2890,1.2889,&
1.2888,1.2887,1.2886,1.2884,1.2883,1.2882,1.2881,1.2880,1.2878,1.2877,&
1.2876,1.2875,1.2874,1.2873,1.2872,1.2871,1.2870,1.2869,1.2867,1.2866,&
1.2865,1.2864,1.2863,1.2862,1.2861,1.2860,1.2859,1.2858,1.2856,1.2855,&
1.2854,1.2853,1.2852,1.2850,1.2849,1.2848,1.2847,1.2846,1.2845,1.2844,&
1.2843,1.2842,1.2841,1.2840,1.2839,1.2838,1.2837,1.2836,1.2834,1.2833,&
1.2832,1.2831,1.2830,1.2829,1.2828,1.2827,1.2826,1.2825,1.2824,1.2823/
DATA (refractive(i),i=1901,2000)/&
1.2822,1.2821,1.2820,1.2818,1.2817,1.2816,1.2815,1.2814,1.2813,1.2812,&
1.2811,1.2810,1.2809,1.2808,1.2807,1.2806,1.2805,1.2804,1.2803,1.2802,&
1.2801,1.2800,1.2799,1.2799,1.2798,1.2797,1.2796,1.2795,1.2795,1.2794,&
1.2793,1.2792,1.2791,1.2791,1.2790,1.2789,1.2788,1.2788,1.2787,1.2787,&
1.2786,1.2785,1.2785,1.2784,1.2784,1.2783,1.2782,1.2781,1.2780,1.2779,&
1.2778,1.2777,1.2776,1.2776,1.2775,1.2774,1.2773,1.2772,1.2771,1.2770,&
1.2769,1.2768,1.2767,1.2766,1.2765,1.2764,1.2763,1.2762,1.2761,1.2760,&
1.2759,1.2758,1.2757,1.2757,1.2756,1.2755,1.2754,1.2753,1.2753,1.2752,&
1.2751,1.2750,1.2749,1.2749,1.2748,1.2747,1.2746,1.2745,1.2745,1.2744,&
1.2743,1.2742,1.2742,1.2741,1.2741,1.2740,1.2739,1.2739,1.2738,1.2738/
DATA (refractive(i),i=2001,2101)/&
1.2737,1.2736,1.2736,1.2735,1.2735,1.2734,1.2733,1.2733,1.2732,1.2732,&
1.2731,1.2730,1.2730,1.2729,1.2729,1.2728,1.2727,1.2727,1.2726,1.2726,&
1.2725,1.2725,1.2724,1.2724,1.2723,1.2723,1.2722,1.2722,1.2721,1.2721,&
1.2720,1.2720,1.2719,1.2719,1.2718,1.2718,1.2717,1.2717,1.2716,1.2716,&
1.2715,1.2715,1.2714,1.2714,1.2713,1.2713,1.2713,1.2713,1.2712,1.2712,&
1.2712,1.2712,1.2711,1.2711,1.2710,1.2710,1.2710,1.2710,1.2709,1.2709,&
1.2709,1.2709,1.2709,1.2708,1.2708,1.2708,1.2708,1.2708,1.2708,1.2708,&
1.2708,1.2708,1.2708,1.2708,1.2708,1.2708,1.2708,1.2709,1.2709,1.2710,&
1.2710,1.2711,1.2712,1.2712,1.2713,1.2714,1.2715,1.2716,1.2717,1.2718,&
1.2719,1.2720,1.2722,1.2723,1.2725,1.2726,1.2728,1.2730,1.2732,1.2734,&
1.2736/
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
!!!!! specific absorption of chlorophyll !!!!!
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
DATA (k_Cab(i),i=1,100)/&
2.676E-02,3.113E-02,3.561E-02,3.972E-02,4.321E-02,4.606E-02,4.815E-02,5.007E-02,5.189E-02,5.346E-02,&
5.526E-02,5.721E-02,5.891E-02,6.004E-02,6.100E-02,6.189E-02,6.259E-02,6.318E-02,6.367E-02,6.416E-02,&
6.475E-02,6.555E-02,6.628E-02,6.690E-02,6.718E-02,6.747E-02,6.795E-02,6.855E-02,6.897E-02,6.931E-02,&
6.979E-02,7.037E-02,7.082E-02,7.114E-02,7.141E-02,7.160E-02,7.174E-02,7.177E-02,7.171E-02,7.156E-02,&
7.120E-02,7.048E-02,6.963E-02,6.883E-02,6.789E-02,6.685E-02,6.583E-02,6.480E-02,6.358E-02,6.225E-02,&
6.090E-02,5.956E-02,5.813E-02,5.677E-02,5.553E-02,5.439E-02,5.330E-02,5.224E-02,5.135E-02,5.056E-02,&
4.990E-02,4.934E-02,4.891E-02,4.852E-02,4.812E-02,4.767E-02,4.729E-02,4.698E-02,4.673E-02,4.647E-02,&
4.622E-02,4.606E-02,4.589E-02,4.570E-02,4.552E-02,4.535E-02,4.519E-02,4.492E-02,4.466E-02,4.442E-02,&
4.413E-02,4.373E-02,4.325E-02,4.275E-02,4.216E-02,4.149E-02,4.073E-02,3.989E-02,3.895E-02,3.791E-02,&
3.683E-02,3.571E-02,3.455E-02,3.335E-02,3.215E-02,3.094E-02,2.972E-02,2.850E-02,2.730E-02,2.615E-02/
DATA (k_Cab(i),i=101,200)/&
2.502e-02,2.392e-02,2.284e-02,2.179e-02,2.077e-02,1.979e-02,1.883e-02,1.790e-02,1.702e-02,1.617e-02,&
1.536e-02,1.458e-02,1.383e-02,1.312e-02,1.246e-02,1.183e-02,1.126e-02,1.074e-02,1.027e-02,9.855e-03,&
9.490e-03,9.176e-03,8.910e-03,8.698e-03,8.542e-03,8.437e-03,8.381e-03,8.373e-03,8.411e-03,8.486e-03,&
8.595e-03,8.735e-03,8.905e-03,9.098e-03,9.313e-03,9.544e-03,9.789e-03,1.004e-02,1.030e-02,1.056e-02,&
1.083e-02,1.108e-02,1.133e-02,1.157e-02,1.180e-02,1.201e-02,1.221e-02,1.239e-02,1.256e-02,1.271e-02,&
1.286e-02,1.300e-02,1.314e-02,1.327e-02,1.340e-02,1.353e-02,1.367e-02,1.380e-02,1.394e-02,1.408e-02,&
1.421e-02,1.435e-02,1.451e-02,1.469e-02,1.489e-02,1.511e-02,1.535e-02,1.561e-02,1.589e-02,1.619e-02,&
1.650e-02,1.683e-02,1.716e-02,1.749e-02,1.782e-02,1.814e-02,1.846e-02,1.876e-02,1.905e-02,1.933e-02,&
1.960e-02,1.986e-02,2.011e-02,2.035e-02,2.058e-02,2.079e-02,2.100e-02,2.120e-02,2.138e-02,2.155e-02,&
2.171e-02,2.185e-02,2.199e-02,2.211e-02,2.222e-02,2.231e-02,2.241e-02,2.250e-02,2.259e-02,2.269e-02/
DATA (k_Cab(i),i=201,300)/&
2.279e-02,2.291e-02,2.304e-02,2.319e-02,2.337e-02,2.356e-02,2.378e-02,2.403e-02,2.430e-02,2.459e-02,&
2.489e-02,2.520e-02,2.552e-02,2.586e-02,2.620e-02,2.654e-02,2.687e-02,2.719e-02,2.750e-02,2.778e-02,&
2.803e-02,2.826e-02,2.845e-02,2.862e-02,2.877e-02,2.888e-02,2.898e-02,2.905e-02,2.910e-02,2.916e-02,&
2.921e-02,2.928e-02,2.938e-02,2.951e-02,2.969e-02,2.993e-02,3.022e-02,3.059e-02,3.102e-02,3.152e-02,&
3.208e-02,3.269e-02,3.335e-02,3.405e-02,3.478e-02,3.552e-02,3.626e-02,3.698e-02,3.767e-02,3.831e-02,&
3.890e-02,3.943e-02,3.993e-02,4.043e-02,4.095e-02,4.152e-02,4.217e-02,4.293e-02,4.380e-02,4.478e-02,&
4.588e-02,4.709e-02,4.837e-02,4.970e-02,5.106e-02,5.239e-02,5.368e-02,5.488e-02,5.595e-02,5.688e-02,&
5.765e-02,5.826e-02,5.871e-02,5.900e-02,5.914e-02,5.914e-02,5.899e-02,5.868e-02,5.819e-02,5.748e-02,&
5.650e-02,5.522e-02,5.361e-02,5.168e-02,4.941e-02,4.689e-02,4.416e-02,4.128e-02,3.834e-02,3.541e-02,&
3.254e-02,2.978e-02,2.717e-02,2.473e-02,2.247e-02,2.039e-02,1.850e-02,1.678e-02,1.523e-02,1.384e-02/
DATA (k_Cab(i),i=301,400)/&
1.259e-02,1.147e-02,1.048e-02,9.594e-03,8.804e-03,8.099e-03,7.465e-03,6.892e-03,6.371e-03,5.897e-03,&
5.461e-03,5.061e-03,4.691e-03,4.348e-03,4.029e-03,3.733e-03,3.456e-03,3.199e-03,2.960e-03,2.736e-03,&
2.528e-03,2.334e-03,2.153e-03,1.985e-03,1.829e-03,1.683e-03,1.549e-03,1.424e-03,1.309e-03,1.202e-03,&
1.103e-03,1.012e-03,9.269e-04,8.485e-04,7.768e-04,7.110e-04,6.505e-04,5.948e-04,5.437e-04,4.967e-04,&
4.539e-04,4.150e-04,3.794e-04,3.471e-04,3.177e-04,2.909e-04,2.666e-04,2.450e-04,2.255e-04,2.079e-04,&
1.924e-04,1.773e-04,1.629e-04,1.493e-04,1.364e-04,1.243e-04,1.128e-04,1.020e-04,9.188e-05,8.240e-05,&
7.354e-05,6.529e-05,5.763e-05,5.054e-05,4.400e-05,3.800e-05,3.251e-05,2.753e-05,2.302e-05,1.898e-05,&
1.539e-05,1.222e-05,9.466e-06,7.101e-06,5.109e-06,3.473e-06,2.175e-06,1.196e-06,5.200e-07,1.270e-07,&
0.000e+00,0.000e+00,0.000e+00,0.000e+00,0.000e+00,0.000e+00,0.000e+00,0.000e+00,0.000e+00,0.000e+00,&
0.000e+00,0.000e+00,0.000e+00,0.000e+00,0.000e+00,0.000e+00,0.000e+00,0.000e+00,0.000e+00,0.000e+00/
DATA (k_Cab(i),i=401,2101)/1701*0./
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
!!!!! specific absorption of carotenoids !!!!!
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
DATA (k_Car(i),i=1,100)/&
2.895e-01,2.796e-01,2.693e-01,2.601e-01,2.525e-01,2.456e-01,2.402e-01,2.352e-01,2.303e-01,2.255e-01,&
2.203e-01,2.151e-01,2.105e-01,2.069e-01,2.035e-01,2.002e-01,1.971e-01,1.943e-01,1.914e-01,1.889e-01,&
1.866e-01,1.844e-01,1.823e-01,1.805e-01,1.792e-01,1.777e-01,1.762e-01,1.747e-01,1.735e-01,1.724e-01,&
1.715e-01,1.707e-01,1.701e-01,1.694e-01,1.688e-01,1.682e-01,1.678e-01,1.674e-01,1.670e-01,1.667e-01,&
1.664e-01,1.659e-01,1.653e-01,1.647e-01,1.641e-01,1.632e-01,1.624e-01,1.615e-01,1.605e-01,1.592e-01,&
1.577e-01,1.561e-01,1.544e-01,1.528e-01,1.513e-01,1.497e-01,1.483e-01,1.468e-01,1.454e-01,1.440e-01,&
1.425e-01,1.411e-01,1.397e-01,1.384e-01,1.371e-01,1.358e-01,1.347e-01,1.335e-01,1.325e-01,1.316e-01,&
1.308e-01,1.302e-01,1.296e-01,1.292e-01,1.288e-01,1.285e-01,1.282e-01,1.280e-01,1.277e-01,1.274e-01,&
1.270e-01,1.265e-01,1.260e-01,1.253e-01,1.244e-01,1.234e-01,1.223e-01,1.211e-01,1.197e-01,1.182e-01,&
1.166e-01,1.150e-01,1.132e-01,1.114e-01,1.096e-01,1.076e-01,1.056e-01,1.035e-01,1.013e-01,9.901e-02/
DATA (k_Car(i),i=101,200)/&
9.660e-02,9.420e-02,9.179e-02,8.939e-02,8.699e-02,8.460e-02,8.221e-02,7.983e-02,7.746e-02,7.510e-02,&
7.275e-02,7.041e-02,6.809e-02,6.578e-02,6.349e-02,6.122e-02,5.897e-02,5.673e-02,5.453e-02,5.234e-02,&
5.018e-02,4.805e-02,4.594e-02,4.387e-02,4.182e-02,3.981e-02,3.783e-02,3.588e-02,3.397e-02,3.210e-02,&
3.026e-02,2.847e-02,2.672e-02,2.501e-02,2.334e-02,2.172e-02,2.015e-02,1.863e-02,1.715e-02,1.573e-02,&
1.436e-02,1.304e-02,1.178e-02,1.057e-02,9.420e-03,8.330e-03,7.301e-03,6.334e-03,5.429e-03,4.589e-03,&
3.816e-03,3.109e-03,2.471e-03,1.903e-03,1.406e-03,9.822e-04,6.322e-04,3.577e-04,1.599e-04,4.019e-05,&
0.000e+00,0.000e+00,0.000e+00,0.000e+00,0.000e+00,0.000e+00,0.000e+00,0.000e+00,0.000e+00,0.000e+00,&
0.000e+00,0.000e+00,0.000e+00,0.000e+00,0.000e+00,0.000e+00,0.000e+00,0.000e+00,0.000e+00,0.000e+00,&
0.000e+00,0.000e+00,0.000e+00,0.000e+00,0.000e+00,0.000e+00,0.000e+00,0.000e+00,0.000e+00,0.000e+00,&
0.000e+00,0.000e+00,0.000e+00,0.000e+00,0.000e+00,0.000e+00,0.000e+00,0.000e+00,0.000e+00,0.000e+00/
DATA (k_Car(i),i=201,2101)/1901*0./
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
!!!!! specific absorption of Brown pigments !!!!!
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
DATA (k_Brown(i),i=1,100)/&
5.272e-001,5.262e-001,5.252e-001,5.242e-001,5.232e-001,5.222e-001,5.212e-001,5.202e-001,5.192e-001,5.182e-001,&
5.172e-001,5.162e-001,5.152e-001,5.142e-001,5.132e-001,5.122e-001,5.112e-001,5.102e-001,5.092e-001,5.082e-001,&
5.072e-001,5.062e-001,5.052e-001,5.042e-001,5.032e-001,5.022e-001,5.012e-001,5.002e-001,4.992e-001,4.982e-001,&
4.972e-001,4.960e-001,4.948e-001,4.936e-001,4.924e-001,4.912e-001,4.900e-001,4.888e-001,4.876e-001,4.864e-001,&
4.852e-001,4.840e-001,4.829e-001,4.817e-001,4.805e-001,4.793e-001,4.781e-001,4.769e-001,4.757e-001,4.745e-001,&
4.733e-001,4.720e-001,4.708e-001,4.695e-001,4.683e-001,4.670e-001,4.658e-001,4.645e-001,4.633e-001,4.620e-001,&
4.608e-001,4.597e-001,4.587e-001,4.577e-001,4.566e-001,4.556e-001,4.546e-001,4.535e-001,4.525e-001,4.515e-001,&
4.504e-001,4.494e-001,4.484e-001,4.473e-001,4.463e-001,4.453e-001,4.442e-001,4.432e-001,4.422e-001,4.411e-001,&
4.401e-001,4.388e-001,4.375e-001,4.362e-001,4.350e-001,4.337e-001,4.324e-001,4.311e-001,4.298e-001,4.285e-001,&
4.272e-001,4.260e-001,4.247e-001,4.234e-001,4.221e-001,4.208e-001,4.195e-001,4.183e-001,4.170e-001,4.157e-001/
DATA (k_Brown(i),i=101,200)/&
4.144e-001,4.135e-001,4.127e-001,4.118e-001,4.109e-001,4.100e-001,4.092e-001,4.083e-001,4.074e-001,4.065e-001,&
4.057e-001,4.047e-001,4.038e-001,4.028e-001,4.019e-001,4.009e-001,4.000e-001,3.990e-001,3.981e-001,3.971e-001,&
3.962e-001,3.952e-001,3.943e-001,3.933e-001,3.924e-001,3.914e-001,3.905e-001,3.895e-001,3.886e-001,3.876e-001,&
3.867e-001,3.846e-001,3.824e-001,3.803e-001,3.782e-001,3.760e-001,3.739e-001,3.718e-001,3.696e-001,3.675e-001,&
3.654e-001,3.639e-001,3.625e-001,3.611e-001,3.597e-001,3.582e-001,3.568e-001,3.554e-001,3.540e-001,3.525e-001,&
3.511e-001,3.500e-001,3.489e-001,3.478e-001,3.467e-001,3.456e-001,3.445e-001,3.434e-001,3.423e-001,3.412e-001,&
3.401e-001,3.383e-001,3.366e-001,3.348e-001,3.330e-001,3.312e-001,3.294e-001,3.276e-001,3.258e-001,3.241e-001,&
3.223e-001,3.202e-001,3.182e-001,3.162e-001,3.141e-001,3.121e-001,3.100e-001,3.080e-001,3.059e-001,3.039e-001,&
3.019e-001,2.999e-001,2.979e-001,2.959e-001,2.940e-001,2.920e-001,2.900e-001,2.881e-001,2.861e-001,2.841e-001,&
2.821e-001,2.803e-001,2.784e-001,2.766e-001,2.747e-001,2.728e-001,2.710e-001,2.691e-001,2.673e-001,2.654e-001/
DATA (k_Brown(i),i=201,300)/&
2.636e-001,2.618e-001,2.601e-001,2.584e-001,2.566e-001,2.549e-001,2.532e-001,2.515e-001,2.497e-001,2.480e-001,&
2.463e-001,2.447e-001,2.431e-001,2.414e-001,2.398e-001,2.382e-001,2.366e-001,2.350e-001,2.334e-001,2.318e-001,&
2.302e-001,2.288e-001,2.273e-001,2.258e-001,2.244e-001,2.229e-001,2.215e-001,2.200e-001,2.185e-001,2.171e-001,&
2.156e-001,2.143e-001,2.129e-001,2.115e-001,2.102e-001,2.088e-001,2.074e-001,2.061e-001,2.047e-001,2.033e-001,&
2.020e-001,2.007e-001,1.994e-001,1.981e-001,1.968e-001,1.955e-001,1.942e-001,1.929e-001,1.916e-001,1.903e-001,&
1.890e-001,1.878e-001,1.865e-001,1.853e-001,1.841e-001,1.829e-001,1.816e-001,1.804e-001,1.792e-001,1.780e-001,&
1.768e-001,1.755e-001,1.742e-001,1.729e-001,1.717e-001,1.704e-001,1.691e-001,1.679e-001,1.666e-001,1.653e-001,&
1.641e-001,1.627e-001,1.613e-001,1.600e-001,1.586e-001,1.572e-001,1.559e-001,1.545e-001,1.532e-001,1.518e-001,&
1.504e-001,1.491e-001,1.478e-001,1.464e-001,1.451e-001,1.437e-001,1.424e-001,1.411e-001,1.397e-001,1.384e-001,&
1.370e-001,1.358e-001,1.345e-001,1.333e-001,1.320e-001,1.308e-001,1.295e-001,1.283e-001,1.270e-001,1.258e-001/
DATA (k_Brown(i),i=301,400)/&
1.245e-001,1.234e-001,1.223e-001,1.212e-001,1.200e-001,1.189e-001,1.178e-001,1.167e-001,1.156e-001,1.144e-001,&
1.133e-001,1.122e-001,1.111e-001,1.100e-001,1.089e-001,1.078e-001,1.067e-001,1.056e-001,1.046e-001,1.035e-001,&
1.024e-001,1.013e-001,1.003e-001,9.931e-002,9.829e-002,9.727e-002,9.625e-002,9.524e-002,9.422e-002,9.320e-002,&
9.218e-002,9.120e-002,9.022e-002,8.924e-002,8.827e-002,8.729e-002,8.631e-002,8.533e-002,8.435e-002,8.337e-002,&
8.239e-002,8.155e-002,8.070e-002,7.985e-002,7.901e-002,7.816e-002,7.732e-002,7.647e-002,7.562e-002,7.478e-002,&
7.393e-002,7.319e-002,7.245e-002,7.171e-002,7.097e-002,7.023e-002,6.949e-002,6.875e-002,6.801e-002,6.727e-002,&
6.653e-002,6.586e-002,6.519e-002,6.452e-002,6.385e-002,6.318e-002,6.251e-002,6.184e-002,6.117e-002,6.050e-002,&
5.983e-002,5.913e-002,5.843e-002,5.773e-002,5.704e-002,5.634e-002,5.564e-002,5.494e-002,5.424e-002,5.354e-002,&
5.284e-002,5.226e-002,5.167e-002,5.109e-002,5.050e-002,4.992e-002,4.933e-002,4.874e-002,4.816e-002,4.757e-002,&
4.699e-002,4.646e-002,4.594e-002,4.542e-002,4.490e-002,4.437e-002,4.385e-002,4.333e-002,4.281e-002,4.228e-002/
DATA (k_Brown(i),i=401,500)/&
4.176e-002,4.128e-002,4.081e-002,4.033e-002,3.985e-002,3.937e-002,3.889e-002,3.841e-002,3.793e-002,3.746e-002,&
3.698e-002,3.657e-002,3.615e-002,3.574e-002,3.533e-002,3.492e-002,3.451e-002,3.409e-002,3.368e-002,3.327e-002,&
3.286e-002,3.249e-002,3.212e-002,3.175e-002,3.139e-002,3.102e-002,3.065e-002,3.028e-002,2.991e-002,2.954e-002,&
2.918e-002,2.885e-002,2.852e-002,2.819e-002,2.786e-002,2.753e-002,2.720e-002,2.688e-002,2.655e-002,2.622e-002,&
2.589e-002,2.559e-002,2.529e-002,2.499e-002,2.469e-002,2.440e-002,2.410e-002,2.380e-002,2.350e-002,2.320e-002,&
2.290e-002,2.264e-002,2.238e-002,2.212e-002,2.186e-002,2.159e-002,2.133e-002,2.107e-002,2.081e-002,2.055e-002,&
2.029e-002,2.006e-002,1.983e-002,1.961e-002,1.938e-002,1.915e-002,1.893e-002,1.870e-002,1.847e-002,1.825e-002,&
1.802e-002,1.782e-002,1.762e-002,1.742e-002,1.723e-002,1.703e-002,1.683e-002,1.663e-002,1.643e-002,1.623e-002,&
1.604e-002,1.586e-002,1.568e-002,1.551e-002,1.533e-002,1.516e-002,1.498e-002,1.481e-002,1.463e-002,1.446e-002,&
1.428e-002,1.414e-002,1.400e-002,1.385e-002,1.371e-002,1.357e-002,1.343e-002,1.328e-002,1.314e-002,1.300e-002/
DATA (k_Brown(i),i=501,600)/&
1.286e-002,1.275e-002,1.265e-002,1.255e-002,1.245e-002,1.235e-002,1.225e-002,1.215e-002,1.205e-002,1.195e-002,&
1.185e-002,1.175e-002,1.165e-002,1.155e-002,1.145e-002,1.135e-002,1.125e-002,1.115e-002,1.105e-002,1.096e-002,&
1.086e-002,1.076e-002,1.066e-002,1.056e-002,1.047e-002,1.037e-002,1.027e-002,1.017e-002,1.008e-002,9.980e-003,&
9.884e-003,9.787e-003,9.691e-003,9.595e-003,9.500e-003,9.404e-003,9.309e-003,9.214e-003,9.120e-003,9.025e-003,&
8.931e-003,8.837e-003,8.743e-003,8.650e-003,8.557e-003,8.464e-003,8.371e-003,8.279e-003,8.187e-003,8.095e-003,&
8.004e-003,7.913e-003,7.822e-003,7.732e-003,7.641e-003,7.552e-003,7.462e-003,7.373e-003,7.284e-003,7.195e-003,&
7.107e-003,7.019e-003,6.932e-003,6.844e-003,6.758e-003,6.671e-003,6.585e-003,6.499e-003,6.414e-003,6.329e-003,&
6.244e-003,6.160e-003,6.076e-003,5.993e-003,5.910e-003,5.827e-003,5.745e-003,5.663e-003,5.581e-003,5.500e-003,&
5.419e-003,5.339e-003,5.259e-003,5.180e-003,5.101e-003,5.023e-003,4.945e-003,4.867e-003,4.790e-003,4.713e-003,&
4.637e-003,4.561e-003,4.486e-003,4.411e-003,4.337e-003,4.263e-003,4.190e-003,4.117e-003,4.044e-003,3.972e-003/
DATA (k_Brown(i),i=601,700)/&
3.901e-003,3.830e-003,3.760e-003,3.690e-003,3.620e-003,3.552e-003,3.483e-003,3.416e-003,3.348e-003,3.282e-003,&
3.215e-003,3.150e-003,3.085e-003,3.020e-003,2.956e-003,2.893e-003,2.830e-003,2.768e-003,2.706e-003,2.645e-003,&
2.585e-003,2.525e-003,2.465e-003,2.407e-003,2.348e-003,2.291e-003,2.234e-003,2.178e-003,2.122e-003,2.067e-003,&
2.012e-003,1.959e-003,1.905e-003,1.853e-003,1.801e-003,1.750e-003,1.699e-003,1.649e-003,1.600e-003,1.551e-003,&
1.503e-003,1.456e-003,1.409e-003,1.363e-003,1.318e-003,1.274e-003,1.230e-003,1.187e-003,1.144e-003,1.102e-003,&
1.061e-003,1.021e-003,9.811e-004,9.422e-004,9.040e-004,8.665e-004,8.297e-004,7.937e-004,7.584e-004,7.239e-004,&
6.901e-004,6.571e-004,6.248e-004,5.933e-004,5.626e-004,5.326e-004,5.034e-004,4.750e-004,4.473e-004,4.205e-004,&
3.944e-004,3.691e-004,3.446e-004,3.210e-004,2.981e-004,2.760e-004,2.548e-004,2.344e-004,2.148e-004,1.960e-004,&
1.780e-004,1.609e-004,1.446e-004,1.292e-004,1.146e-004,1.009e-004,8.804e-005,7.603e-005,6.488e-005,5.460e-005,&
4.519e-005,3.666e-005,2.901e-005,2.225e-005,1.637e-005,1.138e-005,7.297e-006,4.111e-006,1.830e-006,4.581e-007/
DATA (k_Brown(i),i=701,2101)/1401*0./
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
!!!!! specific absorption of water !!!!!
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
DATA (k_Cw(i),i=1,100)/&
5.800E-05,5.852E-05,5.900E-05,5.989E-05,6.100E-05,6.203E-05,6.300E-05,6.399E-05,6.500E-05,6.603E-05,&
6.700E-05,6.790E-05,6.900E-05,7.050E-05,7.200E-05,7.312E-05,7.400E-05,7.490E-05,7.600E-05,7.740E-05,&
7.900E-05,8.063E-05,8.200E-05,8.297E-05,8.400E-05,8.551E-05,8.700E-05,8.800E-05,8.900E-05,9.050E-05,&
9.200E-05,9.300E-05,9.400E-05,9.550E-05,9.700E-05,9.801E-05,9.900E-05,1.005E-04,1.020E-04,1.031E-04,&
1.040E-04,1.050E-04,1.060E-04,1.070E-04,1.080E-04,1.090E-04,1.100E-04,1.110E-04,1.120E-04,1.130E-04,&
1.140E-04,1.150E-04,1.160E-04,1.170E-04,1.180E-04,1.190E-04,1.200E-04,1.210E-04,1.220E-04,1.230E-04,&
1.240E-04,1.250E-04,1.260E-04,1.270E-04,1.280E-04,1.289E-04,1.300E-04,1.315E-04,1.330E-04,1.340E-04,&
1.350E-04,1.364E-04,1.380E-04,1.396E-04,1.410E-04,1.424E-04,1.440E-04,1.459E-04,1.480E-04,1.499E-04,&
1.520E-04,1.544E-04,1.570E-04,1.596E-04,1.620E-04,1.643E-04,1.670E-04,1.704E-04,1.740E-04,1.775E-04,&
1.810E-04,1.849E-04,1.890E-04,1.934E-04,1.980E-04,2.031E-04,2.090E-04,2.158E-04,2.230E-04,2.303E-04/
DATA (k_Cw(i),i=101,200)/&
2.380E-04,2.463E-04,2.550E-04,2.640E-04,2.730E-04,2.819E-04,2.910E-04,3.004E-04,3.100E-04,3.194E-04,&
3.290E-04,3.390E-04,3.490E-04,3.588E-04,3.680E-04,3.767E-04,3.860E-04,3.962E-04,4.040E-04,4.069E-04,&
4.090E-04,4.138E-04,4.160E-04,4.112E-04,4.090E-04,4.176E-04,4.270E-04,4.268E-04,4.230E-04,4.237E-04,&
4.290E-04,4.371E-04,4.450E-04,4.506E-04,4.560E-04,4.631E-04,4.700E-04,4.748E-04,4.800E-04,4.879E-04,&
4.950E-04,4.983E-04,5.030E-04,5.141E-04,5.270E-04,5.363E-04,5.440E-04,5.532E-04,5.640E-04,5.759E-04,&
5.880E-04,5.998E-04,6.110E-04,6.215E-04,6.310E-04,6.391E-04,6.460E-04,6.520E-04,6.580E-04,6.647E-04,&
6.720E-04,6.793E-04,6.860E-04,6.920E-04,6.990E-04,7.084E-04,7.180E-04,7.257E-04,7.340E-04,7.455E-04,&
7.590E-04,7.729E-04,7.870E-04,8.020E-04,8.190E-04,8.386E-04,8.580E-04,8.754E-04,8.960E-04,9.238E-04,&
9.520E-04,9.745E-04,1.000E-03,1.037E-03,1.079E-03,1.119E-03,1.159E-03,1.204E-03,1.253E-03,1.304E-03,&
1.356E-03,1.408E-03,1.459E-03,1.510E-03,1.567E-03,1.635E-03,1.700E-03,1.758E-03,1.860E-03,2.042E-03/
DATA (k_Cw(i),i=201,300)/&
2.224E-03,2.323E-03,2.366E-03,2.400E-03,2.448E-03,2.519E-03,2.587E-03,2.629E-03,2.653E-03,2.674E-03,&
2.691E-03,2.704E-03,2.715E-03,2.727E-03,2.740E-03,2.753E-03,2.764E-03,2.775E-03,2.785E-03,2.797E-03,&
2.810E-03,2.824E-03,2.839E-03,2.854E-03,2.868E-03,2.881E-03,2.893E-03,2.907E-03,2.922E-03,2.938E-03,&
2.955E-03,2.972E-03,2.988E-03,3.000E-03,3.011E-03,3.023E-03,3.038E-03,3.057E-03,3.076E-03,3.094E-03,&
3.111E-03,3.127E-03,3.144E-03,3.162E-03,3.181E-03,3.202E-03,3.223E-03,3.242E-03,3.263E-03,3.289E-03,&
3.315E-03,3.338E-03,3.362E-03,3.390E-03,3.423E-03,3.461E-03,3.508E-03,3.567E-03,3.636E-03,3.712E-03,&
3.791E-03,3.866E-03,3.931E-03,3.981E-03,4.019E-03,4.049E-03,4.072E-03,4.087E-03,4.098E-03,4.109E-03,&
4.122E-03,4.137E-03,4.150E-03,4.160E-03,4.173E-03,4.196E-03,4.223E-03,4.248E-03,4.270E-03,4.293E-03,&
4.318E-03,4.347E-03,4.381E-03,4.418E-03,4.458E-03,4.500E-03,4.545E-03,4.594E-03,4.646E-03,4.701E-03,&
4.760E-03,4.827E-03,4.903E-03,4.986E-03,5.071E-03,5.154E-03,5.244E-03,5.351E-03,5.470E-03,5.594E-03/
DATA (k_Cw(i),i=301,400)/&
5.722E-03,5.855E-03,5.995E-03,6.146E-03,6.303E-03,6.463E-03,6.628E-03,6.804E-03,6.993E-03,7.197E-03,&
7.415E-03,7.647E-03,7.893E-03,8.157E-03,8.445E-03,8.763E-03,9.109E-03,9.479E-03,9.871E-03,1.029E-02,&
1.072E-02,1.119E-02,1.168E-02,1.218E-02,1.268E-02,1.319E-02,1.372E-02,1.428E-02,1.487E-02,1.551E-02,&
1.621E-02,1.699E-02,1.787E-02,1.886E-02,1.992E-02,2.101E-02,2.207E-02,2.306E-02,2.394E-02,2.469E-02,&
2.532E-02,2.583E-02,2.623E-02,2.652E-02,2.672E-02,2.689E-02,2.702E-02,2.713E-02,2.722E-02,2.728E-02,&
2.733E-02,2.738E-02,2.741E-02,2.745E-02,2.748E-02,2.751E-02,2.754E-02,2.758E-02,2.763E-02,2.767E-02,&
2.771E-02,2.773E-02,2.773E-02,2.774E-02,2.774E-02,2.773E-02,2.770E-02,2.766E-02,2.761E-02,2.757E-02,&
2.754E-02,2.752E-02,2.748E-02,2.741E-02,2.731E-02,2.720E-02,2.710E-02,2.701E-02,2.690E-02,2.675E-02,&
2.659E-02,2.645E-02,2.633E-02,2.624E-02,2.613E-02,2.593E-02,2.558E-02,2.523E-02,2.513E-02,2.501E-02,&
2.466E-02,2.447E-02,2.412E-02,2.389E-02,2.374E-02,2.355E-02,2.337E-02,2.318E-02,2.304E-02,2.281E-02/
DATA (k_Cw(i),i=401,500)/&
2.246E-02,2.243E-02,2.238E-02,2.222E-02,2.204E-02,2.201E-02,2.204E-02,2.196E-02,2.177E-02,2.190E-02,&
2.188E-02,2.188E-02,2.198E-02,2.210E-02,2.223E-02,2.233E-02,2.248E-02,2.276E-02,2.304E-02,2.311E-02,&
2.329E-02,2.388E-02,2.446E-02,2.475E-02,2.516E-02,2.620E-02,2.769E-02,2.830E-02,2.914E-02,3.108E-02,&
3.214E-02,3.297E-02,3.459E-02,3.606E-02,3.662E-02,3.702E-02,3.788E-02,3.829E-02,3.854E-02,3.909E-02,&
3.949E-02,3.972E-02,4.000E-02,4.040E-02,4.057E-02,4.075E-02,4.115E-02,4.127E-02,4.149E-02,4.204E-02,&
4.199E-02,4.223E-02,4.254E-02,4.272E-02,4.280E-02,4.306E-02,4.360E-02,4.369E-02,4.379E-02,4.433E-02,&
4.454E-02,4.466E-02,4.505E-02,4.527E-02,4.552E-02,4.605E-02,4.658E-02,4.691E-02,4.705E-02,4.713E-02,&
4.752E-02,4.833E-02,4.867E-02,4.894E-02,4.960E-02,5.006E-02,5.050E-02,5.115E-02,5.153E-02,5.204E-02,&
5.298E-02,5.346E-02,5.386E-02,5.465E-02,5.528E-02,5.566E-02,5.596E-02,5.653E-02,5.745E-02,5.789E-02,&
5.831E-02,5.924E-02,5.982E-02,6.009E-02,6.035E-02,6.094E-02,6.185E-02,6.226E-02,6.269E-02,6.360E-02/
DATA (k_Cw(i),i=501,600)/&
6.407E-02,6.458E-02,6.562E-02,6.636E-02,6.672E-02,6.699E-02,6.769E-02,6.900E-02,6.989E-02,7.037E-02,&
7.085E-02,7.187E-02,7.358E-02,7.486E-02,7.562E-02,7.630E-02,7.792E-02,8.085E-02,8.292E-02,8.410E-02,&
8.528E-02,8.801E-02,9.268E-02,9.584E-02,9.819E-02,1.012E-01,1.042E-01,1.066E-01,1.113E-01,1.194E-01,&
1.246E-01,1.281E-01,1.327E-01,1.374E-01,1.410E-01,1.465E-01,1.557E-01,1.635E-01,1.688E-01,1.732E-01,&
1.818E-01,1.963E-01,2.050E-01,2.106E-01,2.187E-01,2.287E-01,2.386E-01,2.468E-01,2.542E-01,2.701E-01,&
2.976E-01,3.153E-01,3.274E-01,3.438E-01,3.622E-01,3.785E-01,3.930E-01,4.068E-01,4.184E-01,4.273E-01,&
4.385E-01,4.538E-01,4.611E-01,4.633E-01,4.663E-01,4.701E-01,4.733E-01,4.756E-01,4.772E-01,4.785E-01,&
4.800E-01,4.814E-01,4.827E-01,4.843E-01,4.864E-01,4.870E-01,4.867E-01,4.864E-01,4.857E-01,4.841E-01,&
4.821E-01,4.804E-01,4.786E-01,4.764E-01,4.738E-01,4.710E-01,4.677E-01,4.641E-01,4.604E-01,4.570E-01,&
4.532E-01,4.482E-01,4.434E-01,4.397E-01,4.362E-01,4.316E-01,4.265E-01,4.215E-01,4.168E-01,4.121E-01/
DATA (k_Cw(i),i=601,700)/&
4.072E-01,4.017E-01,3.963E-01,3.915E-01,3.868E-01,3.816E-01,3.760E-01,3.701E-01,3.640E-01,3.581E-01,&
3.521E-01,3.461E-01,3.402E-01,3.349E-01,3.297E-01,3.243E-01,3.191E-01,3.141E-01,3.086E-01,3.022E-01,&
2.957E-01,2.897E-01,2.840E-01,2.782E-01,2.724E-01,2.672E-01,2.621E-01,2.566E-01,2.506E-01,2.444E-01,&
2.391E-01,2.357E-01,2.331E-01,2.299E-01,2.251E-01,2.198E-01,2.151E-01,2.109E-01,2.064E-01,2.020E-01,&
1.981E-01,1.944E-01,1.904E-01,1.868E-01,1.841E-01,1.818E-01,1.790E-01,1.752E-01,1.715E-01,1.687E-01,&
1.664E-01,1.639E-01,1.613E-01,1.586E-01,1.562E-01,1.545E-01,1.532E-01,1.522E-01,1.510E-01,1.495E-01,&
1.475E-01,1.457E-01,1.447E-01,1.442E-01,1.438E-01,1.433E-01,1.426E-01,1.418E-01,1.412E-01,1.410E-01,&
1.409E-01,1.408E-01,1.406E-01,1.405E-01,1.408E-01,1.414E-01,1.426E-01,1.435E-01,1.438E-01,1.439E-01,&
1.443E-01,1.456E-01,1.475E-01,1.498E-01,1.519E-01,1.534E-01,1.547E-01,1.561E-01,1.580E-01,1.604E-01,&
1.632E-01,1.659E-01,1.677E-01,1.693E-01,1.712E-01,1.739E-01,1.777E-01,1.824E-01,1.866E-01,1.890E-01/
DATA (k_Cw(i),i=701,800)/&
1.906E-01,1.929E-01,1.967E-01,2.005E-01,2.031E-01,2.051E-01,2.079E-01,2.123E-01,2.166E-01,2.196E-01,&
2.219E-01,2.251E-01,2.298E-01,2.337E-01,2.346E-01,2.342E-01,2.353E-01,2.397E-01,2.450E-01,2.491E-01,&
2.528E-01,2.578E-01,2.650E-01,2.719E-01,2.765E-01,2.811E-01,2.891E-01,3.023E-01,3.164E-01,3.271E-01,&
3.378E-01,3.533E-01,3.770E-01,4.037E-01,4.281E-01,4.502E-01,4.712E-01,4.932E-01,5.202E-01,5.572E-01,&
6.052E-01,6.520E-01,6.863E-01,7.159E-01,7.535E-01,8.064E-01,8.597E-01,8.981E-01,9.253E-01,9.493E-01,&
9.769E-01,1.008E+00,1.041E+00,1.073E+00,1.100E+00,1.119E+00,1.131E+00,1.140E+00,1.150E+00,1.160E+00,&
1.170E+00,1.181E+00,1.190E+00,1.194E+00,1.196E+00,1.197E+00,1.200E+00,1.203E+00,1.205E+00,1.206E+00,&
1.207E+00,1.213E+00,1.223E+00,1.232E+00,1.234E+00,1.232E+00,1.229E+00,1.230E+00,1.233E+00,1.236E+00,&
1.239E+00,1.241E+00,1.244E+00,1.248E+00,1.252E+00,1.256E+00,1.258E+00,1.260E+00,1.262E+00,1.265E+00,&
1.267E+00,1.270E+00,1.272E+00,1.275E+00,1.277E+00,1.280E+00,1.282E+00,1.283E+00,1.283E+00,1.279E+00/
DATA (k_Cw(i),i=801,900)/&
1.272E+00,1.266E+00,1.267E+00,1.271E+00,1.273E+00,1.271E+00,1.265E+00,1.260E+00,1.258E+00,1.258E+00,&
1.257E+00,1.252E+00,1.247E+00,1.243E+00,1.243E+00,1.243E+00,1.240E+00,1.233E+00,1.224E+00,1.216E+00,&
1.214E+00,1.214E+00,1.213E+00,1.210E+00,1.205E+00,1.200E+00,1.199E+00,1.198E+00,1.197E+00,1.194E+00,&
1.189E+00,1.184E+00,1.180E+00,1.176E+00,1.171E+00,1.166E+00,1.161E+00,1.158E+00,1.157E+00,1.157E+00,&
1.155E+00,1.152E+00,1.148E+00,1.142E+00,1.138E+00,1.133E+00,1.130E+00,1.126E+00,1.123E+00,1.120E+00,&
1.116E+00,1.111E+00,1.107E+00,1.103E+00,1.101E+00,1.101E+00,1.101E+00,1.101E+00,1.100E+00,1.098E+00,&
1.094E+00,1.089E+00,1.085E+00,1.084E+00,1.083E+00,1.083E+00,1.082E+00,1.081E+00,1.080E+00,1.079E+00,&
1.080E+00,1.083E+00,1.087E+00,1.093E+00,1.099E+00,1.104E+00,1.107E+00,1.109E+00,1.111E+00,1.115E+00,&
1.121E+00,1.129E+00,1.137E+00,1.147E+00,1.156E+00,1.164E+00,1.170E+00,1.175E+00,1.181E+00,1.188E+00,&
1.196E+00,1.206E+00,1.216E+00,1.227E+00,1.239E+00,1.252E+00,1.267E+00,1.283E+00,1.297E+00,1.310E+00/
DATA (k_Cw(i),i=901,1000)/&
1.323E+00,1.336E+00,1.351E+00,1.370E+00,1.392E+00,1.416E+00,1.440E+00,1.465E+00,1.489E+00,1.511E+00,&
1.532E+00,1.555E+00,1.580E+00,1.610E+00,1.642E+00,1.672E+00,1.701E+00,1.728E+00,1.758E+00,1.791E+00,&
1.831E+00,1.872E+00,1.911E+00,1.943E+00,1.974E+00,2.007E+00,2.047E+00,2.098E+00,2.153E+00,2.203E+00,&
2.243E+00,2.277E+00,2.312E+00,2.357E+00,2.415E+00,2.479E+00,2.540E+00,2.590E+00,2.631E+00,2.666E+00,&
2.701E+00,2.738E+00,2.780E+00,2.829E+00,2.889E+00,2.960E+00,3.033E+00,3.097E+00,3.146E+00,3.181E+00,&
3.226E+00,3.267E+00,3.319E+00,3.363E+00,3.412E+00,3.449E+00,3.504E+00,3.544E+00,3.600E+00,3.648E+00,&
3.701E+00,3.752E+00,3.802E+00,3.871E+00,3.927E+00,3.985E+00,4.064E+00,4.125E+00,4.216E+00,4.302E+00,&
4.389E+00,4.504E+00,4.630E+00,4.737E+00,4.904E+00,5.092E+00,5.260E+00,5.479E+00,5.720E+00,6.006E+00,&
6.242E+00,6.580E+00,6.927E+00,7.313E+00,7.633E+00,8.089E+00,8.545E+00,9.030E+00,9.591E+00,1.002E+01,&
1.063E+01,1.122E+01,1.184E+01,1.245E+01,1.316E+01,1.369E+01,1.434E+01,1.509E+01,1.578E+01,1.646E+01/
DATA (k_Cw(i),i=1001,1100)/&
1.714E+01,1.781E+01,1.854E+01,1.919E+01,1.980E+01,2.029E+01,2.089E+01,2.146E+01,2.202E+01,2.260E+01,&
2.313E+01,2.360E+01,2.407E+01,2.450E+01,2.493E+01,2.533E+01,2.571E+01,2.606E+01,2.641E+01,2.673E+01,&
2.701E+01,2.729E+01,2.756E+01,2.782E+01,2.806E+01,2.835E+01,2.856E+01,2.875E+01,2.892E+01,2.908E+01,&
2.926E+01,2.940E+01,2.956E+01,2.966E+01,2.982E+01,2.993E+01,3.003E+01,3.014E+01,3.023E+01,3.029E+01,&
3.036E+01,3.042E+01,3.046E+01,3.049E+01,3.052E+01,3.053E+01,3.055E+01,3.056E+01,3.056E+01,3.055E+01,&
3.054E+01,3.051E+01,3.049E+01,3.045E+01,3.041E+01,3.035E+01,3.029E+01,3.023E+01,3.014E+01,3.006E+01,&
2.998E+01,2.983E+01,2.971E+01,2.957E+01,2.936E+01,2.917E+01,2.899E+01,2.872E+01,2.851E+01,2.829E+01,&
2.800E+01,2.777E+01,2.754E+01,2.722E+01,2.699E+01,2.664E+01,2.638E+01,2.611E+01,2.581E+01,2.555E+01,&
2.522E+01,2.497E+01,2.468E+01,2.443E+01,2.413E+01,2.388E+01,2.364E+01,2.332E+01,2.307E+01,2.274E+01,&
2.250E+01,2.218E+01,2.193E+01,2.163E+01,2.139E+01,2.107E+01,2.082E+01,2.052E+01,2.025E+01,2.001E+01/
DATA (k_Cw(i),i=1101,1200)/&
1.972E+01,1.951E+01,1.924E+01,1.900E+01,1.874E+01,1.847E+01,1.827E+01,1.802E+01,1.784E+01,1.758E+01,&
1.734E+01,1.712E+01,1.688E+01,1.671E+01,1.647E+01,1.623E+01,1.606E+01,1.583E+01,1.562E+01,1.545E+01,&
1.525E+01,1.504E+01,1.489E+01,1.468E+01,1.447E+01,1.432E+01,1.413E+01,1.395E+01,1.381E+01,1.364E+01,&
1.348E+01,1.329E+01,1.316E+01,1.298E+01,1.282E+01,1.265E+01,1.254E+01,1.238E+01,1.223E+01,1.206E+01,&
1.193E+01,1.181E+01,1.166E+01,1.152E+01,1.137E+01,1.126E+01,1.114E+01,1.100E+01,1.088E+01,1.075E+01,&
1.064E+01,1.054E+01,1.044E+01,1.032E+01,1.022E+01,1.011E+01,1.001E+01,9.912E+00,9.839E+00,9.754E+00,&
9.660E+00,9.563E+00,9.477E+00,9.383E+00,9.305E+00,9.202E+00,9.133E+00,9.047E+00,8.977E+00,8.898E+00,&
8.820E+00,8.742E+00,8.665E+00,8.588E+00,8.509E+00,8.448E+00,8.364E+00,8.295E+00,8.234E+00,8.157E+00,&
8.104E+00,8.036E+00,7.959E+00,7.890E+00,7.834E+00,7.773E+00,7.712E+00,7.654E+00,7.609E+00,7.548E+00,&
7.495E+00,7.432E+00,7.374E+00,7.315E+00,7.252E+00,7.203E+00,7.164E+00,7.124E+00,7.084E+00,7.041E+00/
DATA (k_Cw(i),i=1201,1300)/&
6.987E+00,6.943E+00,6.910E+00,6.865E+00,6.828E+00,6.776E+00,6.742E+00,6.714E+00,6.695E+00,6.654E+00,&
6.630E+00,6.599E+00,6.567E+00,6.526E+00,6.501E+00,6.474E+00,6.449E+00,6.420E+00,6.401E+00,6.363E+00,&
6.345E+00,6.309E+00,6.282E+00,6.250E+00,6.214E+00,6.186E+00,6.163E+00,6.130E+00,6.121E+00,6.091E+00,&
6.076E+00,6.053E+00,6.048E+00,6.016E+00,6.005E+00,5.982E+00,5.973E+00,5.947E+00,5.940E+00,5.919E+00,&
5.911E+00,5.887E+00,5.875E+00,5.846E+00,5.826E+00,5.798E+00,5.787E+00,5.751E+00,5.746E+00,5.718E+00,&
5.705E+00,5.685E+00,5.684E+00,5.657E+00,5.658E+00,5.644E+00,5.648E+00,5.626E+00,5.626E+00,5.619E+00,&
5.618E+00,5.603E+00,5.614E+00,5.597E+00,5.603E+00,5.582E+00,5.584E+00,5.564E+00,5.563E+00,5.547E+00,&
5.545E+00,5.536E+00,5.542E+00,5.529E+00,5.532E+00,5.525E+00,5.533E+00,5.528E+00,5.529E+00,5.516E+00,&
5.524E+00,5.516E+00,5.526E+00,5.520E+00,5.520E+00,5.516E+00,5.522E+00,5.511E+00,5.527E+00,5.511E+00,&
5.519E+00,5.515E+00,5.520E+00,5.510E+00,5.518E+00,5.523E+00,5.538E+00,5.535E+00,5.544E+00,5.557E+00/
DATA (k_Cw(i),i=1301,1400)/&
5.571E+00,5.583E+00,5.606E+00,5.607E+00,5.629E+00,5.636E+00,5.664E+00,5.670E+00,5.693E+00,5.702E+00,&
5.733E+00,5.752E+00,5.766E+00,5.776E+00,5.797E+00,5.811E+00,5.829E+00,5.842E+00,5.877E+00,5.891E+00,&
5.930E+00,5.945E+00,5.972E+00,5.999E+00,6.025E+00,6.051E+00,6.087E+00,6.096E+00,6.136E+00,6.166E+00,&
6.198E+00,6.219E+00,6.256E+00,6.284E+00,6.335E+00,6.369E+00,6.392E+00,6.445E+00,6.493E+00,6.517E+00,&
6.571E+00,6.617E+00,6.658E+00,6.689E+00,6.748E+00,6.796E+00,6.842E+00,6.897E+00,6.955E+00,7.003E+00,&
7.054E+00,7.111E+00,7.179E+00,7.235E+00,7.274E+00,7.339E+00,7.414E+00,7.481E+00,7.536E+00,7.594E+00,&
7.669E+00,7.734E+00,7.776E+00,7.833E+00,7.893E+00,7.952E+00,8.000E+00,8.045E+00,8.103E+00,8.155E+00,&
8.205E+00,8.241E+00,8.264E+00,8.321E+00,8.352E+00,8.394E+00,8.430E+00,8.448E+00,8.477E+00,8.512E+00,&
8.535E+00,8.562E+00,8.593E+00,8.618E+00,8.640E+00,8.670E+00,8.689E+00,8.720E+00,8.738E+00,8.755E+00,&
8.777E+00,8.778E+00,8.778E+00,8.794E+00,8.805E+00,8.807E+00,8.809E+00,8.811E+00,8.799E+00,8.795E+00/
DATA (k_Cw(i),i=1401,1500)/&
8.789E+00,8.779E+00,8.767E+00,8.754E+00,8.750E+00,8.738E+00,8.739E+00,8.735E+00,8.744E+00,8.753E+00,&
8.755E+00,8.780E+00,8.787E+00,8.790E+00,8.798E+00,8.794E+00,8.811E+00,8.820E+00,8.836E+00,8.845E+00,&
8.854E+00,8.858E+00,8.868E+00,8.869E+00,8.884E+00,8.888E+00,8.900E+00,8.922E+00,8.951E+00,8.973E+00,&
9.010E+00,9.034E+00,9.110E+00,9.146E+00,9.195E+00,9.259E+00,9.315E+00,9.380E+00,9.457E+00,9.535E+00,&
9.633E+00,9.723E+00,9.824E+00,9.935E+00,1.005E+01,1.018E+01,1.031E+01,1.042E+01,1.059E+01,1.075E+01,&
1.094E+01,1.110E+01,1.139E+01,1.160E+01,1.184E+01,1.208E+01,1.235E+01,1.266E+01,1.301E+01,1.334E+01,&
1.375E+01,1.412E+01,1.478E+01,1.529E+01,1.586E+01,1.641E+01,1.709E+01,1.774E+01,1.853E+01,1.925E+01,&
2.051E+01,2.148E+01,2.250E+01,2.367E+01,2.483E+01,2.622E+01,2.751E+01,2.972E+01,3.144E+01,3.317E+01,&
3.504E+01,3.725E+01,3.927E+01,4.269E+01,4.530E+01,4.789E+01,5.060E+01,5.360E+01,5.761E+01,6.088E+01,&
6.401E+01,6.720E+01,7.059E+01,7.497E+01,7.841E+01,8.157E+01,8.469E+01,8.752E+01,9.189E+01,9.456E+01/
DATA (k_Cw(i),i=1501,1600)/&
9.722E+01,9.995E+01,1.033E+02,1.057E+02,1.078E+02,1.097E+02,1.123E+02,1.140E+02,1.157E+02,1.171E+02,&
1.190E+02,1.203E+02,1.215E+02,1.226E+02,1.240E+02,1.248E+02,1.257E+02,1.265E+02,1.275E+02,1.281E+02,&
1.286E+02,1.292E+02,1.296E+02,1.299E+02,1.303E+02,1.304E+02,1.306E+02,1.306E+02,1.306E+02,1.305E+02,&
1.304E+02,1.301E+02,1.299E+02,1.296E+02,1.291E+02,1.287E+02,1.282E+02,1.275E+02,1.270E+02,1.264E+02,&
1.256E+02,1.249E+02,1.242E+02,1.232E+02,1.224E+02,1.216E+02,1.206E+02,1.197E+02,1.190E+02,1.178E+02,&
1.170E+02,1.157E+02,1.149E+02,1.140E+02,1.128E+02,1.118E+02,1.110E+02,1.097E+02,1.088E+02,1.076E+02,&
1.066E+02,1.058E+02,1.044E+02,1.036E+02,1.023E+02,1.014E+02,1.005E+02,9.928E+01,9.831E+01,9.711E+01,&
9.631E+01,9.488E+01,9.412E+01,9.341E+01,9.206E+01,9.121E+01,9.009E+01,8.929E+01,8.804E+01,8.725E+01,&
8.611E+01,8.532E+01,8.460E+01,8.336E+01,8.262E+01,8.151E+01,8.076E+01,7.973E+01,7.904E+01,7.800E+01,&
7.723E+01,7.628E+01,7.557E+01,7.463E+01,7.392E+01,7.298E+01,7.234E+01,7.141E+01,7.082E+01,6.986E+01/
DATA (k_Cw(i),i=1601,1700)/&
6.924E+01,6.865E+01,6.779E+01,6.688E+01,6.634E+01,6.548E+01,6.490E+01,6.412E+01,6.358E+01,6.281E+01,&
6.232E+01,6.156E+01,6.105E+01,6.029E+01,5.980E+01,5.907E+01,5.857E+01,5.788E+01,5.746E+01,5.680E+01,&
5.632E+01,5.566E+01,5.503E+01,5.463E+01,5.398E+01,5.356E+01,5.294E+01,5.256E+01,5.197E+01,5.138E+01,&
5.098E+01,5.047E+01,5.006E+01,4.951E+01,4.915E+01,4.860E+01,4.806E+01,4.773E+01,4.718E+01,4.685E+01,&
4.635E+01,4.584E+01,4.553E+01,4.503E+01,4.473E+01,4.426E+01,4.379E+01,4.348E+01,4.302E+01,4.273E+01,&
4.228E+01,4.186E+01,4.155E+01,4.115E+01,4.072E+01,4.046E+01,4.002E+01,3.975E+01,3.934E+01,3.894E+01,&
3.868E+01,3.827E+01,3.790E+01,3.762E+01,3.726E+01,3.687E+01,3.664E+01,3.628E+01,3.592E+01,3.568E+01,&
3.536E+01,3.499E+01,3.479E+01,3.447E+01,3.413E+01,3.392E+01,3.363E+01,3.329E+01,3.309E+01,3.280E+01,&
3.251E+01,3.231E+01,3.200E+01,3.172E+01,3.154E+01,3.127E+01,3.097E+01,3.071E+01,3.052E+01,3.025E+01,&
2.999E+01,2.980E+01,2.958E+01,2.929E+01,2.906E+01,2.888E+01,2.865E+01,2.840E+01,2.822E+01,2.799E+01/
DATA (k_Cw(i),i=1701,1800)/&
2.776E+01,2.751E+01,2.737E+01,2.713E+01,2.692E+01,2.671E+01,2.657E+01,2.637E+01,2.617E+01,2.597E+01,&
2.584E+01,2.565E+01,2.547E+01,2.528E+01,2.518E+01,2.501E+01,2.485E+01,2.466E+01,2.455E+01,2.438E+01,&
2.420E+01,2.401E+01,2.386E+01,2.374E+01,2.358E+01,2.342E+01,2.328E+01,2.318E+01,2.304E+01,2.289E+01,&
2.275E+01,2.262E+01,2.252E+01,2.239E+01,2.227E+01,2.214E+01,2.202E+01,2.194E+01,2.183E+01,2.172E+01,&
2.162E+01,2.150E+01,2.140E+01,2.133E+01,2.123E+01,2.112E+01,2.102E+01,2.091E+01,2.085E+01,2.077E+01,&
2.067E+01,2.058E+01,2.049E+01,2.040E+01,2.033E+01,2.027E+01,2.018E+01,2.010E+01,2.002E+01,1.995E+01,&
1.987E+01,1.983E+01,1.976E+01,1.969E+01,1.963E+01,1.958E+01,1.952E+01,1.946E+01,1.942E+01,1.938E+01,&
1.933E+01,1.928E+01,1.923E+01,1.918E+01,1.913E+01,1.909E+01,1.904E+01,1.900E+01,1.896E+01,1.889E+01,&
1.884E+01,1.880E+01,1.874E+01,1.870E+01,1.866E+01,1.860E+01,1.856E+01,1.852E+01,1.849E+01,1.845E+01,&
1.843E+01,1.839E+01,1.837E+01,1.837E+01,1.838E+01,1.837E+01,1.836E+01,1.835E+01,1.835E+01,1.835E+01/
DATA (k_Cw(i),i=1801,1900)/&
1.834E+01,1.833E+01,1.833E+01,1.833E+01,1.834E+01,1.833E+01,1.834E+01,1.834E+01,1.832E+01,1.831E+01,&
1.829E+01,1.828E+01,1.828E+01,1.829E+01,1.829E+01,1.830E+01,1.832E+01,1.835E+01,1.836E+01,1.837E+01,&
1.837E+01,1.838E+01,1.837E+01,1.840E+01,1.842E+01,1.843E+01,1.846E+01,1.849E+01,1.851E+01,1.854E+01,&
1.857E+01,1.858E+01,1.860E+01,1.864E+01,1.865E+01,1.867E+01,1.870E+01,1.875E+01,1.879E+01,1.883E+01,&
1.888E+01,1.893E+01,1.897E+01,1.903E+01,1.908E+01,1.913E+01,1.919E+01,1.924E+01,1.929E+01,1.934E+01,&
1.940E+01,1.946E+01,1.951E+01,1.961E+01,1.966E+01,1.973E+01,1.980E+01,1.985E+01,1.991E+01,1.997E+01,&
2.004E+01,2.013E+01,2.020E+01,2.027E+01,2.038E+01,2.046E+01,2.054E+01,2.062E+01,2.071E+01,2.079E+01,&
2.087E+01,2.094E+01,2.103E+01,2.111E+01,2.126E+01,2.135E+01,2.145E+01,2.153E+01,2.164E+01,2.174E+01,&
2.183E+01,2.192E+01,2.202E+01,2.212E+01,2.223E+01,2.234E+01,2.245E+01,2.264E+01,2.276E+01,2.287E+01,&
2.301E+01,2.313E+01,2.327E+01,2.338E+01,2.351E+01,2.363E+01,2.377E+01,2.385E+01,2.409E+01,2.418E+01/
DATA (k_Cw(i),i=1901,2000)/&
2.433E+01,2.444E+01,2.459E+01,2.470E+01,2.485E+01,2.495E+01,2.523E+01,2.534E+01,2.549E+01,2.564E+01,&
2.579E+01,2.592E+01,2.611E+01,2.623E+01,2.653E+01,2.664E+01,2.681E+01,2.695E+01,2.712E+01,2.727E+01,&
2.744E+01,2.756E+01,2.789E+01,2.802E+01,2.819E+01,2.838E+01,2.855E+01,2.869E+01,2.903E+01,2.916E+01,&
2.934E+01,2.951E+01,2.969E+01,2.988E+01,3.002E+01,3.038E+01,3.054E+01,3.073E+01,3.092E+01,3.107E+01,&
3.145E+01,3.160E+01,3.180E+01,3.199E+01,3.219E+01,3.238E+01,3.272E+01,3.290E+01,3.311E+01,3.331E+01,&
3.348E+01,3.388E+01,3.404E+01,3.426E+01,3.447E+01,3.468E+01,3.486E+01,3.528E+01,3.547E+01,3.570E+01,&
3.592E+01,3.610E+01,3.656E+01,3.675E+01,3.697E+01,3.716E+01,3.761E+01,3.777E+01,3.801E+01,3.827E+01,&
3.848E+01,3.892E+01,3.913E+01,3.938E+01,3.957E+01,4.007E+01,4.024E+01,4.050E+01,4.078E+01,4.096E+01,&
4.147E+01,4.167E+01,4.196E+01,4.220E+01,4.268E+01,4.293E+01,4.319E+01,4.342E+01,4.388E+01,4.413E+01,&
4.439E+01,4.464E+01,4.513E+01,4.538E+01,4.565E+01,4.590E+01,4.638E+01,4.663E+01,4.690E+01,4.714E+01/
DATA (k_Cw(i),i=2001,2101)/&
4.764E+01,4.787E+01,4.817E+01,4.836E+01,4.894E+01,4.917E+01,4.940E+01,4.997E+01,5.021E+01,5.050E+01,&
5.078E+01,5.131E+01,5.159E+01,5.191E+01,5.246E+01,5.275E+01,5.310E+01,5.339E+01,5.401E+01,5.429E+01,&
5.462E+01,5.523E+01,5.555E+01,5.588E+01,5.618E+01,5.686E+01,5.713E+01,5.746E+01,5.808E+01,5.841E+01,&
5.878E+01,5.946E+01,5.976E+01,6.009E+01,6.039E+01,6.107E+01,6.146E+01,6.180E+01,6.250E+01,6.286E+01,&
6.316E+01,6.392E+01,6.427E+01,6.462E+01,6.539E+01,6.574E+01,6.609E+01,6.685E+01,6.725E+01,6.759E+01,&
6.842E+01,6.881E+01,6.918E+01,7.009E+01,7.046E+01,7.084E+01,7.174E+01,7.212E+01,7.249E+01,7.340E+01,&
7.381E+01,7.423E+01,7.514E+01,7.554E+01,7.591E+01,7.675E+01,7.718E+01,7.801E+01,7.839E+01,7.880E+01,&
7.970E+01,8.005E+01,8.046E+01,8.136E+01,8.173E+01,8.215E+01,8.295E+01,8.338E+01,8.423E+01,8.474E+01,&
8.510E+01,8.584E+01,8.617E+01,8.637E+01,8.708E+01,8.762E+01,8.884E+01,8.904E+01,8.945E+01,9.055E+01,&
9.089E+01,9.134E+01,9.204E+01,9.244E+01,9.308E+01,9.333E+01,9.358E+01,9.416E+01,9.448E+01,9.499E+01,&
9.530E+01/
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
!!!!! specific absorption of dry matter !!!!!
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
DATA (k_Cm(i),i=1,100)/&
1.097E+02,1.037E+02,9.798E+01,9.244E+01,8.713E+01,8.231E+01,7.806E+01,7.404E+01,7.013E+01,6.654E+01,&
6.300E+01,5.954E+01,5.616E+01,5.301E+01,5.001E+01,4.723E+01,4.463E+01,4.220E+01,3.996E+01,3.780E+01,&
3.567E+01,3.362E+01,3.170E+01,2.993E+01,2.832E+01,2.679E+01,2.535E+01,2.402E+01,2.276E+01,2.150E+01,&
2.024E+01,1.901E+01,1.785E+01,1.676E+01,1.575E+01,1.481E+01,1.392E+01,1.312E+01,1.233E+01,1.161E+01,&
1.096E+01,1.041E+01,9.924E+00,9.410E+00,8.947E+00,8.508E+00,8.087E+00,7.640E+00,7.269E+00,6.939E+00,&
6.660E+00,6.422E+00,6.222E+00,6.010E+00,5.782E+00,5.573E+00,5.370E+00,5.173E+00,4.946E+00,4.761E+00,&
4.575E+00,4.419E+00,4.259E+00,4.117E+00,4.006E+00,3.945E+00,3.853E+00,3.784E+00,3.671E+00,3.554E+00,&
3.462E+00,3.364E+00,3.282E+00,3.184E+00,3.102E+00,3.051E+00,2.983E+00,2.947E+00,2.913E+00,2.869E+00,&
2.803E+00,2.777E+00,2.751E+00,2.726E+00,2.702E+00,2.679E+00,2.656E+00,2.634E+00,2.613E+00,2.593E+00,&
2.573E+00,2.554E+00,2.536E+00,2.519E+00,2.502E+00,2.486E+00,2.471E+00,2.457E+00,2.443E+00,2.430E+00/
DATA (k_Cm(i),i=101,200)/&
2.417E+00,2.405E+00,2.394E+00,2.384E+00,2.374E+00,2.365E+00,2.356E+00,2.348E+00,2.341E+00,2.334E+00,&
2.328E+00,2.323E+00,2.318E+00,2.314E+00,2.310E+00,2.307E+00,2.304E+00,2.303E+00,2.301E+00,2.300E+00,&
2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,&
2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,&
2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,&
2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,&
2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,&
2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,&
2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,&
2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00/
DATA (k_Cm(i),i=201,300)/&
2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,&
2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,&
2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,&
2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,&
2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,&
2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,&
2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,&
2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,&
2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,&
2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00/
DATA (k_Cm(i),i=301,400)/&
2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,&
2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,&
2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,&
2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,&
2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,&
2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,&
2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,&
2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,&
2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,&
2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00/
DATA (k_Cm(i),i=401,500)/&
2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,&
2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,&
2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,&
2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,&
2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,&
2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,&
2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,&
2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,&
2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,&
2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00/
DATA (k_Cm(i),i=501,600)/&
2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,&
2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,&
2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,&
2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,&
2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,&
2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,&
2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,&
2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,&
2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,&
2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00/
DATA (k_Cm(i),i=601,700)/&
2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,&
2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,&
2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,&
2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,&
2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,&
2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,&
2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,&
2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,&
2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,&
2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00/
DATA (k_Cm(i),i=701,800)/&
2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,&
2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,&
2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,&
2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,&
2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,&
2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,&
2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,&
2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,&
2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,&
2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00,2.300E+00/
DATA (k_Cm(i),i=801,900)/&
2.300E+00,2.300E+00,2.301E+00,2.301E+00,2.302E+00,2.303E+00,2.305E+00,2.306E+00,2.308E+00,2.310E+00,&
2.312E+00,2.315E+00,2.318E+00,2.320E+00,2.323E+00,2.327E+00,2.330E+00,2.334E+00,2.338E+00,2.342E+00,&
2.346E+00,2.350E+00,2.354E+00,2.359E+00,2.364E+00,2.369E+00,2.374E+00,2.379E+00,2.384E+00,2.389E+00,&
2.395E+00,2.400E+00,2.406E+00,2.412E+00,2.418E+00,2.424E+00,2.430E+00,2.436E+00,2.442E+00,2.448E+00,&
2.454E+00,2.461E+00,2.467E+00,2.473E+00,2.480E+00,2.486E+00,2.493E+00,2.499E+00,2.506E+00,2.512E+00,&
2.519E+00,2.525E+00,2.532E+00,2.538E+00,2.545E+00,2.551E+00,2.558E+00,2.564E+00,2.570E+00,2.577E+00,&
2.583E+00,2.589E+00,2.595E+00,2.601E+00,2.607E+00,2.613E+00,2.619E+00,2.625E+00,2.631E+00,2.636E+00,&
2.642E+00,2.647E+00,2.652E+00,2.657E+00,2.662E+00,2.667E+00,2.672E+00,2.677E+00,2.681E+00,2.685E+00,&
2.689E+00,2.693E+00,2.697E+00,2.701E+00,2.704E+00,2.708E+00,2.711E+00,2.714E+00,2.716E+00,2.719E+00,&
2.721E+00,2.723E+00,2.725E+00,2.726E+00,2.728E+00,2.729E+00,2.730E+00,2.730E+00,2.731E+00,2.731E+00/
DATA (k_Cm(i),i=901,1000)/&
2.721E+00,2.736E+00,2.728E+00,2.719E+00,2.712E+00,2.698E+00,2.702E+00,2.691E+00,2.688E+00,2.686E+00,&
2.682E+00,2.676E+00,2.675E+00,2.677E+00,2.670E+00,2.671E+00,2.668E+00,2.670E+00,2.674E+00,2.674E+00,&
2.683E+00,2.674E+00,2.675E+00,2.682E+00,2.683E+00,2.665E+00,2.661E+00,2.668E+00,2.670E+00,2.674E+00,&
2.664E+00,2.661E+00,2.665E+00,2.668E+00,2.681E+00,2.686E+00,2.684E+00,2.697E+00,2.712E+00,2.720E+00,&
2.717E+00,2.726E+00,2.744E+00,2.743E+00,2.751E+00,2.763E+00,2.778E+00,2.793E+00,2.818E+00,2.835E+00,&
2.865E+00,2.879E+00,2.899E+00,2.918E+00,2.936E+00,2.953E+00,2.966E+00,2.977E+00,2.981E+00,2.942E+00,&
2.888E+00,2.864E+00,2.877E+00,2.886E+00,2.888E+00,2.891E+00,2.899E+00,2.887E+00,2.884E+00,2.900E+00,&
2.929E+00,2.969E+00,3.014E+00,3.053E+00,3.075E+00,3.111E+00,3.128E+00,3.130E+00,3.103E+00,3.051E+00,&
2.980E+00,2.941E+00,2.920E+00,2.931E+00,2.950E+00,2.979E+00,3.025E+00,3.048E+00,3.066E+00,3.087E+00,&
3.099E+00,3.090E+00,3.088E+00,3.081E+00,3.086E+00,3.071E+00,3.065E+00,3.069E+00,3.067E+00,3.085E+00/
DATA (k_Cm(i),i=1001,1100)/&
3.094E+00,3.110E+00,3.136E+00,3.149E+00,3.158E+00,3.191E+00,3.230E+00,3.266E+00,3.298E+00,3.356E+00,&
3.419E+00,3.476E+00,3.534E+00,3.584E+00,3.632E+00,3.708E+00,3.775E+00,3.847E+00,3.931E+00,3.987E+00,&
4.071E+00,4.156E+00,4.242E+00,4.320E+00,4.395E+00,4.480E+00,4.561E+00,4.638E+00,4.708E+00,4.782E+00,&
4.846E+00,4.906E+00,4.974E+00,5.026E+00,5.071E+00,5.131E+00,5.179E+00,5.220E+00,5.271E+00,5.322E+00,&
5.358E+00,5.403E+00,5.441E+00,5.460E+00,5.481E+00,5.500E+00,5.523E+00,5.548E+00,5.560E+00,5.575E+00,&
5.582E+00,5.597E+00,5.611E+00,5.639E+00,5.653E+00,5.675E+00,5.682E+00,5.685E+00,5.680E+00,5.689E+00,&
5.711E+00,5.723E+00,5.715E+00,5.716E+00,5.732E+00,5.741E+00,5.743E+00,5.752E+00,5.745E+00,5.744E+00,&
5.757E+00,5.766E+00,5.781E+00,5.787E+00,5.798E+00,5.810E+00,5.808E+00,5.815E+00,5.825E+00,5.824E+00,&
5.827E+00,5.854E+00,5.878E+00,5.900E+00,5.908E+00,5.922E+00,5.940E+00,5.962E+00,5.963E+00,5.966E+00,&
5.982E+00,5.990E+00,5.994E+00,6.016E+00,6.014E+00,6.025E+00,6.008E+00,6.022E+00,6.021E+00,6.027E+00/
DATA (k_Cm(i),i=1101,1200)/&
6.027E+00,6.035E+00,6.025E+00,6.009E+00,5.990E+00,5.987E+00,5.984E+00,5.971E+00,5.971E+00,5.973E+00,&
5.951E+00,5.952E+00,5.939E+00,5.933E+00,5.931E+00,5.925E+00,5.909E+00,5.897E+00,5.884E+00,5.881E+00,&
5.876E+00,5.856E+00,5.843E+00,5.830E+00,5.818E+00,5.807E+00,5.799E+00,5.792E+00,5.776E+00,5.783E+00,&
5.776E+00,5.767E+00,5.762E+00,5.769E+00,5.756E+00,5.762E+00,5.737E+00,5.740E+00,5.757E+00,5.756E+00,&
5.751E+00,5.754E+00,5.751E+00,5.750E+00,5.744E+00,5.744E+00,5.754E+00,5.744E+00,5.735E+00,5.740E+00,&
5.732E+00,5.728E+00,5.731E+00,5.724E+00,5.724E+00,5.715E+00,5.697E+00,5.693E+00,5.703E+00,5.700E+00,&
5.713E+00,5.728E+00,5.731E+00,5.735E+00,5.743E+00,5.754E+00,5.753E+00,5.744E+00,5.746E+00,5.746E+00,&
5.746E+00,5.750E+00,5.749E+00,5.743E+00,5.740E+00,5.747E+00,5.739E+00,5.755E+00,5.749E+00,5.753E+00,&
5.745E+00,5.732E+00,5.735E+00,5.724E+00,5.725E+00,5.712E+00,5.702E+00,5.700E+00,5.700E+00,5.686E+00,&
5.685E+00,5.672E+00,5.659E+00,5.627E+00,5.613E+00,5.590E+00,5.591E+00,5.563E+00,5.552E+00,5.525E+00/
DATA (k_Cm(i),i=1201,1300)/&
5.517E+00,5.506E+00,5.494E+00,5.459E+00,5.450E+00,5.438E+00,5.428E+00,5.407E+00,5.391E+00,5.382E+00,&
5.376E+00,5.358E+00,5.347E+00,5.333E+00,5.319E+00,5.301E+00,5.292E+00,5.284E+00,5.274E+00,5.258E+00,&
5.253E+00,5.249E+00,5.233E+00,5.222E+00,5.211E+00,5.204E+00,5.210E+00,5.200E+00,5.193E+00,5.186E+00,&
5.177E+00,5.175E+00,5.178E+00,5.173E+00,5.152E+00,5.135E+00,5.139E+00,5.128E+00,5.121E+00,5.114E+00,&
5.126E+00,5.107E+00,5.104E+00,5.100E+00,5.109E+00,5.114E+00,5.112E+00,5.128E+00,5.137E+00,5.131E+00,&
5.152E+00,5.175E+00,5.194E+00,5.200E+00,5.244E+00,5.257E+00,5.273E+00,5.289E+00,5.335E+00,5.366E+00,&
5.389E+00,5.427E+00,5.453E+00,5.490E+00,5.522E+00,5.562E+00,5.605E+00,5.652E+00,5.698E+00,5.744E+00,&
5.743E+00,5.642E+00,5.689E+00,5.722E+00,5.755E+00,5.798E+00,5.848E+00,5.875E+00,5.918E+00,5.971E+00,&
6.022E+00,6.061E+00,6.116E+00,6.173E+00,6.214E+00,6.266E+00,6.319E+00,6.382E+00,6.426E+00,6.486E+00,&
6.542E+00,6.579E+00,6.616E+00,6.666E+00,6.728E+00,6.771E+00,6.807E+00,6.858E+00,6.908E+00,6.959E+00/
DATA (k_Cm(i),i=1301,1400)/&
7.006E+00,7.052E+00,7.093E+00,7.136E+00,7.164E+00,7.199E+00,7.232E+00,7.266E+00,7.315E+00,7.340E+00,&
7.361E+00,7.399E+00,7.440E+00,7.473E+00,7.505E+00,7.525E+00,7.557E+00,7.579E+00,7.604E+00,7.633E+00,&
7.653E+00,7.674E+00,7.691E+00,7.699E+00,7.708E+00,7.721E+00,7.730E+00,7.729E+00,7.712E+00,7.702E+00,&
7.695E+00,7.670E+00,7.644E+00,7.618E+00,7.585E+00,7.555E+00,7.517E+00,7.479E+00,7.451E+00,7.435E+00,&
7.408E+00,7.379E+00,7.363E+00,7.347E+00,7.332E+00,7.332E+00,7.332E+00,7.304E+00,7.295E+00,7.296E+00,&
7.291E+00,7.292E+00,7.292E+00,7.281E+00,7.283E+00,7.264E+00,7.250E+00,7.240E+00,7.228E+00,7.210E+00,&
7.186E+00,7.164E+00,7.143E+00,7.114E+00,7.101E+00,7.069E+00,7.038E+00,7.003E+00,6.974E+00,6.928E+00,&
6.889E+00,6.839E+00,6.793E+00,6.764E+00,6.729E+00,6.694E+00,6.662E+00,6.613E+00,6.572E+00,6.546E+00,&
6.522E+00,6.507E+00,6.482E+00,6.484E+00,6.479E+00,6.494E+00,6.496E+00,6.491E+00,6.461E+00,6.440E+00,&
6.430E+00,6.413E+00,6.421E+00,6.399E+00,6.379E+00,6.365E+00,6.372E+00,6.346E+00,6.321E+00,6.310E+00/
DATA (k_Cm(i),i=1401,1500)/&
6.314E+00,6.282E+00,6.277E+00,6.270E+00,6.258E+00,6.242E+00,6.234E+00,6.221E+00,6.231E+00,6.221E+00,&
6.205E+00,6.193E+00,6.192E+00,6.179E+00,6.159E+00,6.143E+00,6.143E+00,6.120E+00,6.098E+00,6.087E+00,&
6.063E+00,6.056E+00,6.053E+00,6.040E+00,6.044E+00,6.007E+00,5.996E+00,5.994E+00,5.997E+00,5.975E+00,&
5.954E+00,5.946E+00,5.927E+00,5.914E+00,5.890E+00,5.873E+00,5.832E+00,5.794E+00,5.768E+00,5.728E+00,&
5.681E+00,5.680E+00,5.655E+00,5.648E+00,5.620E+00,5.594E+00,5.567E+00,5.557E+00,5.552E+00,5.553E+00,&
5.539E+00,5.524E+00,5.507E+00,5.505E+00,5.487E+00,5.474E+00,5.462E+00,5.450E+00,5.448E+00,5.441E+00,&
5.440E+00,5.442E+00,5.450E+00,5.466E+00,5.461E+00,5.452E+00,5.445E+00,5.412E+00,5.379E+00,5.280E+00,&
5.228E+00,5.199E+00,5.171E+00,5.139E+00,5.124E+00,5.112E+00,5.129E+00,5.145E+00,5.173E+00,5.176E+00,&
5.185E+00,5.183E+00,5.200E+00,5.202E+00,5.204E+00,5.224E+00,5.244E+00,5.297E+00,5.318E+00,5.376E+00,&
5.441E+00,5.491E+00,5.561E+00,5.629E+00,5.687E+00,5.757E+00,5.830E+00,5.911E+00,5.998E+00,6.090E+00/
DATA (k_Cm(i),i=1501,1600)/&
6.193E+00,6.330E+00,6.449E+00,6.581E+00,6.710E+00,6.838E+00,6.970E+00,7.107E+00,7.238E+00,7.388E+00,&
7.506E+00,7.635E+00,7.767E+00,7.874E+00,7.977E+00,8.071E+00,8.150E+00,8.220E+00,8.292E+00,8.350E+00,&
8.449E+00,8.521E+00,8.583E+00,8.666E+00,8.723E+00,8.759E+00,8.821E+00,8.864E+00,8.909E+00,8.941E+00,&
8.949E+00,8.955E+00,8.983E+00,9.022E+00,9.043E+00,9.044E+00,9.028E+00,9.034E+00,9.052E+00,9.048E+00,&
9.041E+00,9.037E+00,9.036E+00,9.035E+00,9.021E+00,9.016E+00,9.008E+00,8.970E+00,8.974E+00,8.953E+00,&
8.957E+00,8.937E+00,8.923E+00,8.912E+00,8.895E+00,8.891E+00,8.880E+00,8.867E+00,8.855E+00,8.852E+00,&
8.861E+00,8.864E+00,8.876E+00,8.869E+00,8.873E+00,8.855E+00,8.828E+00,8.839E+00,8.855E+00,8.856E+00,&
8.833E+00,8.842E+00,8.844E+00,8.830E+00,8.808E+00,8.818E+00,8.807E+00,8.797E+00,8.794E+00,8.791E+00,&
8.795E+00,8.772E+00,8.754E+00,8.759E+00,8.760E+00,8.746E+00,8.762E+00,8.778E+00,8.790E+00,8.795E+00,&
8.811E+00,8.848E+00,8.874E+00,8.885E+00,8.913E+00,8.944E+00,8.981E+00,8.988E+00,9.001E+00,9.034E+00/
DATA (k_Cm(i),i=1601,1700)/&
9.076E+00,9.111E+00,9.141E+00,9.171E+00,9.214E+00,9.255E+00,9.304E+00,9.356E+00,9.406E+00,9.448E+00,&
9.516E+00,9.578E+00,9.638E+00,9.692E+00,9.763E+00,9.845E+00,9.953E+00,1.004E+01,1.015E+01,1.027E+01,&
1.039E+01,1.052E+01,1.063E+01,1.077E+01,1.091E+01,1.103E+01,1.119E+01,1.135E+01,1.150E+01,1.166E+01,&
1.181E+01,1.201E+01,1.217E+01,1.235E+01,1.251E+01,1.269E+01,1.287E+01,1.307E+01,1.325E+01,1.346E+01,&
1.364E+01,1.384E+01,1.404E+01,1.423E+01,1.441E+01,1.461E+01,1.481E+01,1.499E+01,1.518E+01,1.534E+01,&
1.554E+01,1.571E+01,1.591E+01,1.607E+01,1.622E+01,1.637E+01,1.653E+01,1.667E+01,1.678E+01,1.690E+01,&
1.698E+01,1.709E+01,1.718E+01,1.725E+01,1.734E+01,1.739E+01,1.748E+01,1.755E+01,1.761E+01,1.767E+01,&
1.771E+01,1.777E+01,1.783E+01,1.787E+01,1.794E+01,1.795E+01,1.799E+01,1.805E+01,1.809E+01,1.813E+01,&
1.820E+01,1.827E+01,1.830E+01,1.835E+01,1.841E+01,1.846E+01,1.852E+01,1.856E+01,1.861E+01,1.866E+01,&
1.871E+01,1.876E+01,1.881E+01,1.885E+01,1.890E+01,1.896E+01,1.903E+01,1.907E+01,1.911E+01,1.916E+01/
DATA (k_Cm(i),i=1701,1800)/&
1.921E+01,1.927E+01,1.929E+01,1.932E+01,1.935E+01,1.936E+01,1.940E+01,1.943E+01,1.947E+01,1.951E+01,&
1.953E+01,1.956E+01,1.960E+01,1.961E+01,1.962E+01,1.966E+01,1.966E+01,1.969E+01,1.970E+01,1.972E+01,&
1.974E+01,1.974E+01,1.976E+01,1.978E+01,1.979E+01,1.982E+01,1.982E+01,1.983E+01,1.986E+01,1.988E+01,&
1.989E+01,1.989E+01,1.993E+01,1.993E+01,1.997E+01,2.001E+01,2.001E+01,2.004E+01,2.007E+01,2.011E+01,&
2.014E+01,2.015E+01,2.017E+01,2.019E+01,2.022E+01,2.023E+01,2.023E+01,2.026E+01,2.027E+01,2.029E+01,&
2.028E+01,2.027E+01,2.029E+01,2.029E+01,2.031E+01,2.029E+01,2.026E+01,2.030E+01,2.031E+01,2.030E+01,&
2.030E+01,2.031E+01,2.029E+01,2.029E+01,2.026E+01,2.025E+01,2.023E+01,2.020E+01,2.016E+01,2.015E+01,&
2.012E+01,2.009E+01,2.007E+01,2.003E+01,1.998E+01,1.996E+01,1.991E+01,1.986E+01,1.980E+01,1.975E+01,&
1.969E+01,1.964E+01,1.959E+01,1.953E+01,1.947E+01,1.941E+01,1.936E+01,1.931E+01,1.922E+01,1.918E+01,&
1.912E+01,1.906E+01,1.899E+01,1.890E+01,1.885E+01,1.881E+01,1.875E+01,1.867E+01,1.861E+01,1.858E+01/
DATA (k_Cm(i),i=1801,1900)/&
1.852E+01,1.846E+01,1.840E+01,1.834E+01,1.829E+01,1.824E+01,1.819E+01,1.813E+01,1.807E+01,1.803E+01,&
1.798E+01,1.792E+01,1.788E+01,1.782E+01,1.780E+01,1.775E+01,1.773E+01,1.768E+01,1.768E+01,1.766E+01,&
1.763E+01,1.762E+01,1.763E+01,1.764E+01,1.764E+01,1.766E+01,1.770E+01,1.774E+01,1.779E+01,1.786E+01,&
1.795E+01,1.803E+01,1.814E+01,1.825E+01,1.836E+01,1.851E+01,1.866E+01,1.881E+01,1.895E+01,1.913E+01,&
1.932E+01,1.951E+01,1.972E+01,1.994E+01,2.017E+01,2.040E+01,2.065E+01,2.089E+01,2.114E+01,2.140E+01,&
2.163E+01,2.186E+01,2.210E+01,2.237E+01,2.262E+01,2.290E+01,2.313E+01,2.339E+01,2.361E+01,2.387E+01,&
2.410E+01,2.435E+01,2.455E+01,2.479E+01,2.499E+01,2.521E+01,2.541E+01,2.562E+01,2.583E+01,2.605E+01,&
2.626E+01,2.643E+01,2.657E+01,2.674E+01,2.689E+01,2.701E+01,2.718E+01,2.732E+01,2.742E+01,2.754E+01,&
2.763E+01,2.777E+01,2.794E+01,2.804E+01,2.821E+01,2.836E+01,2.850E+01,2.863E+01,2.878E+01,2.896E+01,&
2.913E+01,2.922E+01,2.937E+01,2.947E+01,2.960E+01,2.970E+01,2.982E+01,2.997E+01,3.007E+01,3.018E+01/
DATA (k_Cm(i),i=1901,2000)/&
3.028E+01,3.040E+01,3.053E+01,3.060E+01,3.066E+01,3.070E+01,3.076E+01,3.078E+01,3.075E+01,3.074E+01,&
3.072E+01,3.065E+01,3.058E+01,3.051E+01,3.045E+01,3.034E+01,3.029E+01,3.023E+01,3.015E+01,3.004E+01,&
3.000E+01,2.998E+01,2.991E+01,2.986E+01,2.984E+01,2.981E+01,2.976E+01,2.973E+01,2.976E+01,2.977E+01,&
2.979E+01,2.981E+01,2.985E+01,2.989E+01,2.999E+01,3.000E+01,3.005E+01,3.007E+01,3.011E+01,3.017E+01,&
3.022E+01,3.023E+01,3.029E+01,3.028E+01,3.029E+01,3.027E+01,3.027E+01,3.024E+01,3.019E+01,3.010E+01,&
3.010E+01,3.003E+01,2.993E+01,2.983E+01,2.984E+01,2.975E+01,2.966E+01,2.958E+01,2.948E+01,2.930E+01,&
2.926E+01,2.920E+01,2.913E+01,2.902E+01,2.890E+01,2.882E+01,2.873E+01,2.873E+01,2.870E+01,2.865E+01,&
2.858E+01,2.854E+01,2.851E+01,2.846E+01,2.838E+01,2.834E+01,2.823E+01,2.820E+01,2.817E+01,2.809E+01,&
2.803E+01,2.803E+01,2.801E+01,2.794E+01,2.791E+01,2.790E+01,2.784E+01,2.779E+01,2.781E+01,2.782E+01,&
2.781E+01,2.781E+01,2.783E+01,2.785E+01,2.785E+01,2.782E+01,2.782E+01,2.780E+01,2.780E+01,2.778E+01/
DATA (k_Cm(i),i=2001,2101)/&
2.783E+01,2.784E+01,2.788E+01,2.791E+01,2.799E+01,2.804E+01,2.812E+01,2.811E+01,2.819E+01,2.819E+01,&
2.817E+01,2.817E+01,2.831E+01,2.837E+01,2.848E+01,2.853E+01,2.859E+01,2.870E+01,2.874E+01,2.887E+01,&
2.898E+01,2.910E+01,2.923E+01,2.934E+01,2.944E+01,2.959E+01,2.973E+01,2.987E+01,3.002E+01,3.016E+01,&
3.035E+01,3.043E+01,3.064E+01,3.084E+01,3.098E+01,3.122E+01,3.132E+01,3.152E+01,3.165E+01,3.184E+01,&
3.204E+01,3.221E+01,3.233E+01,3.255E+01,3.282E+01,3.315E+01,3.339E+01,3.360E+01,3.384E+01,3.410E+01,&
3.426E+01,3.452E+01,3.473E+01,3.497E+01,3.519E+01,3.539E+01,3.561E+01,3.579E+01,3.604E+01,3.618E+01,&
3.636E+01,3.660E+01,3.675E+01,3.682E+01,3.699E+01,3.711E+01,3.735E+01,3.758E+01,3.792E+01,3.796E+01,&
3.812E+01,3.822E+01,3.833E+01,3.856E+01,3.874E+01,3.877E+01,3.884E+01,3.883E+01,3.879E+01,3.886E+01,&
3.900E+01,3.900E+01,3.906E+01,3.905E+01,3.916E+01,3.928E+01,3.948E+01,3.943E+01,3.951E+01,3.964E+01,&
3.953E+01,3.960E+01,3.958E+01,3.954E+01,3.940E+01,3.936E+01,3.917E+01,3.926E+01,3.893E+01,3.921E+01,&
3.871E+01/
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
!!!!! direct energy from the sum !!!!!
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
DATA (Es(i),i=1,100)/&
5.910E-01,5.980E-01,6.050E-01,6.120E-01,6.190E-01,6.260E-01,6.330E-01,6.400E-01,6.470E-01,6.540E-01,&
6.610E-01,6.680E-01,6.750E-01,6.820E-01,6.890E-01,6.960E-01,7.030E-01,7.100E-01,7.170E-01,7.240E-01,&
7.310E-01,7.376E-01,7.442E-01,7.508E-01,7.574E-01,7.640E-01,7.706E-01,7.772E-01,7.838E-01,7.904E-01,&
7.970E-01,8.036E-01,8.102E-01,8.168E-01,8.234E-01,8.300E-01,8.366E-01,8.432E-01,8.498E-01,8.564E-01,&
8.630E-01,8.743E-01,8.856E-01,8.969E-01,9.082E-01,9.195E-01,9.308E-01,9.421E-01,9.534E-01,9.647E-01,&
9.760E-01,9.873E-01,9.986E-01,1.010E+00,1.021E+00,1.033E+00,1.044E+00,1.055E+00,1.066E+00,1.078E+00,&
1.089E+00,1.094E+00,1.100E+00,1.105E+00,1.110E+00,1.116E+00,1.121E+00,1.126E+00,1.131E+00,1.137E+00,&
1.142E+00,1.147E+00,1.153E+00,1.158E+00,1.163E+00,1.169E+00,1.174E+00,1.179E+00,1.184E+00,1.190E+00,&
1.195E+00,1.194E+00,1.194E+00,1.193E+00,1.192E+00,1.191E+00,1.191E+00,1.190E+00,1.189E+00,1.188E+00,&
1.188E+00,1.187E+00,1.186E+00,1.185E+00,1.185E+00,1.184E+00,1.183E+00,1.182E+00,1.182E+00,1.181E+00/
DATA (Es(i),i=101,200)/&
1.180E+00,1.180E+00,1.181E+00,1.181E+00,1.181E+00,1.182E+00,1.182E+00,1.182E+00,1.182E+00,1.183E+00,&
1.183E+00,1.183E+00,1.184E+00,1.184E+00,1.184E+00,1.185E+00,1.185E+00,1.185E+00,1.185E+00,1.186E+00,&
1.186E+00,1.191E+00,1.195E+00,1.200E+00,1.204E+00,1.209E+00,1.213E+00,1.218E+00,1.222E+00,1.227E+00,&
1.231E+00,1.236E+00,1.240E+00,1.245E+00,1.249E+00,1.254E+00,1.258E+00,1.263E+00,1.267E+00,1.272E+00,&
1.276E+00,1.278E+00,1.279E+00,1.281E+00,1.282E+00,1.284E+00,1.285E+00,1.287E+00,1.288E+00,1.290E+00,&
1.291E+00,1.293E+00,1.294E+00,1.296E+00,1.297E+00,1.299E+00,1.300E+00,1.302E+00,1.303E+00,1.305E+00,&
1.306E+00,1.309E+00,1.312E+00,1.314E+00,1.317E+00,1.320E+00,1.323E+00,1.325E+00,1.328E+00,1.331E+00,&
1.334E+00,1.336E+00,1.339E+00,1.342E+00,1.345E+00,1.347E+00,1.350E+00,1.353E+00,1.356E+00,1.358E+00,&
1.361E+00,1.361E+00,1.360E+00,1.360E+00,1.360E+00,1.359E+00,1.359E+00,1.359E+00,1.358E+00,1.358E+00,&
1.358E+00,1.357E+00,1.357E+00,1.356E+00,1.356E+00,1.356E+00,1.355E+00,1.355E+00,1.355E+00,1.354E+00/
DATA (Es(i),i=201,300)/&
1.354E+00,1.356E+00,1.357E+00,1.359E+00,1.360E+00,1.362E+00,1.363E+00,1.365E+00,1.366E+00,1.368E+00,&
1.370E+00,1.371E+00,1.373E+00,1.374E+00,1.376E+00,1.377E+00,1.379E+00,1.380E+00,1.382E+00,1.383E+00,&
1.385E+00,1.385E+00,1.385E+00,1.385E+00,1.386E+00,1.386E+00,1.386E+00,1.386E+00,1.386E+00,1.386E+00,&
1.387E+00,1.387E+00,1.387E+00,1.387E+00,1.387E+00,1.387E+00,1.387E+00,1.388E+00,1.388E+00,1.388E+00,&
1.388E+00,1.387E+00,1.386E+00,1.384E+00,1.383E+00,1.382E+00,1.381E+00,1.379E+00,1.378E+00,1.377E+00,&
1.376E+00,1.374E+00,1.373E+00,1.372E+00,1.371E+00,1.369E+00,1.368E+00,1.367E+00,1.366E+00,1.364E+00,&
1.363E+00,1.364E+00,1.365E+00,1.366E+00,1.367E+00,1.368E+00,1.368E+00,1.369E+00,1.370E+00,1.371E+00,&
1.372E+00,1.373E+00,1.374E+00,1.375E+00,1.376E+00,1.377E+00,1.377E+00,1.378E+00,1.379E+00,1.380E+00,&
1.381E+00,1.375E+00,1.368E+00,1.362E+00,1.355E+00,1.349E+00,1.343E+00,1.336E+00,1.330E+00,1.323E+00,&
1.317E+00,1.311E+00,1.304E+00,1.298E+00,1.291E+00,1.285E+00,1.279E+00,1.272E+00,1.266E+00,1.259E+00/
DATA (Es(i),i=301,400)/&
1.253E+00,1.236E+00,1.219E+00,1.201E+00,1.184E+00,1.167E+00,1.150E+00,1.133E+00,1.115E+00,1.098E+00,&
1.081E+00,1.064E+00,1.047E+00,1.029E+00,1.012E+00,9.950E-01,9.778E-01,9.606E-01,9.434E-01,9.262E-01,&
9.090E-01,9.248E-01,9.405E-01,9.563E-01,9.720E-01,9.878E-01,1.004E+00,1.019E+00,1.035E+00,1.051E+00,&
1.067E+00,1.082E+00,1.098E+00,1.114E+00,1.130E+00,1.145E+00,1.161E+00,1.177E+00,1.193E+00,1.208E+00,&
1.224E+00,1.182E+00,1.139E+00,1.097E+00,1.054E+00,1.012E+00,9.693E-01,9.269E-01,8.844E-01,8.420E-01,&
7.995E-01,7.571E-01,7.146E-01,6.722E-01,6.297E-01,5.873E-01,5.448E-01,5.024E-01,4.599E-01,4.175E-01,&
3.750E-01,4.185E-01,4.619E-01,5.054E-01,5.488E-01,5.923E-01,6.357E-01,6.792E-01,7.226E-01,7.661E-01,&
8.095E-01,8.530E-01,8.964E-01,9.399E-01,9.833E-01,1.027E+00,1.070E+00,1.114E+00,1.157E+00,1.201E+00,&
1.244E+00,1.242E+00,1.240E+00,1.238E+00,1.235E+00,1.233E+00,1.231E+00,1.229E+00,1.227E+00,1.225E+00,&
1.223E+00,1.220E+00,1.218E+00,1.216E+00,1.214E+00,1.212E+00,1.210E+00,1.207E+00,1.205E+00,1.203E+00/
DATA (Es(i),i=401,500)/&
1.201E+00,1.188E+00,1.174E+00,1.161E+00,1.147E+00,1.134E+00,1.121E+00,1.107E+00,1.094E+00,1.080E+00,&
1.067E+00,1.054E+00,1.040E+00,1.027E+00,1.013E+00,1.000E+00,9.866E-01,9.732E-01,9.598E-01,9.464E-01,&
9.330E-01,9.416E-01,9.502E-01,9.588E-01,9.674E-01,9.760E-01,9.846E-01,9.932E-01,1.002E+00,1.010E+00,&
1.019E+00,1.028E+00,1.036E+00,1.045E+00,1.053E+00,1.062E+00,1.071E+00,1.079E+00,1.088E+00,1.096E+00,&
1.105E+00,1.105E+00,1.106E+00,1.106E+00,1.107E+00,1.107E+00,1.108E+00,1.108E+00,1.109E+00,1.109E+00,&
1.110E+00,1.110E+00,1.110E+00,1.111E+00,1.111E+00,1.112E+00,1.112E+00,1.113E+00,1.113E+00,1.114E+00,&
1.114E+00,1.114E+00,1.113E+00,1.113E+00,1.112E+00,1.112E+00,1.111E+00,1.111E+00,1.110E+00,1.110E+00,&
1.109E+00,1.109E+00,1.108E+00,1.108E+00,1.107E+00,1.107E+00,1.106E+00,1.106E+00,1.105E+00,1.105E+00,&
1.104E+00,1.087E+00,1.070E+00,1.053E+00,1.036E+00,1.019E+00,1.002E+00,9.850E-01,9.680E-01,9.510E-01,&
9.340E-01,9.170E-01,9.000E-01,8.830E-01,8.660E-01,8.490E-01,8.320E-01,8.150E-01,7.980E-01,7.810E-01/
DATA (Es(i),i=501,600)/&
7.640E-01,7.758E-01,7.876E-01,7.994E-01,8.112E-01,8.230E-01,8.348E-01,8.466E-01,8.584E-01,8.702E-01,&
8.820E-01,8.938E-01,9.056E-01,9.174E-01,9.292E-01,9.410E-01,9.528E-01,9.646E-01,9.764E-01,9.882E-01,&
1.000E+00,9.771E-01,9.542E-01,9.313E-01,9.084E-01,8.855E-01,8.626E-01,8.397E-01,8.168E-01,7.939E-01,&
7.710E-01,7.481E-01,7.252E-01,7.023E-01,6.794E-01,6.565E-01,6.336E-01,6.107E-01,5.878E-01,5.649E-01,&
5.420E-01,5.412E-01,5.403E-01,5.395E-01,5.386E-01,5.378E-01,5.369E-01,5.361E-01,5.352E-01,5.344E-01,&
5.335E-01,5.327E-01,5.318E-01,5.310E-01,5.301E-01,5.293E-01,5.284E-01,5.276E-01,5.267E-01,5.259E-01,&
5.250E-01,5.362E-01,5.474E-01,5.586E-01,5.698E-01,5.810E-01,5.922E-01,6.034E-01,6.146E-01,6.258E-01,&
6.370E-01,6.482E-01,6.594E-01,6.706E-01,6.818E-01,6.930E-01,7.042E-01,7.154E-01,7.266E-01,7.378E-01,&
7.490E-01,7.564E-01,7.638E-01,7.712E-01,7.786E-01,7.860E-01,7.934E-01,8.008E-01,8.082E-01,8.156E-01,&
8.230E-01,8.304E-01,8.378E-01,8.452E-01,8.526E-01,8.600E-01,8.674E-01,8.748E-01,8.822E-01,8.896E-01/
DATA (Es(i),i=601,700)/&
8.970E-01,8.952E-01,8.933E-01,8.915E-01,8.896E-01,8.878E-01,8.859E-01,8.841E-01,8.822E-01,8.804E-01,&
8.785E-01,8.767E-01,8.748E-01,8.730E-01,8.711E-01,8.693E-01,8.674E-01,8.656E-01,8.637E-01,8.619E-01,&
8.600E-01,8.588E-01,8.575E-01,8.563E-01,8.550E-01,8.538E-01,8.525E-01,8.513E-01,8.500E-01,8.488E-01,&
8.475E-01,8.463E-01,8.450E-01,8.438E-01,8.425E-01,8.413E-01,8.400E-01,8.388E-01,8.375E-01,8.363E-01,&
8.350E-01,8.333E-01,8.316E-01,8.299E-01,8.282E-01,8.265E-01,8.248E-01,8.231E-01,8.214E-01,8.197E-01,&
8.180E-01,8.163E-01,8.146E-01,8.129E-01,8.112E-01,8.095E-01,8.078E-01,8.061E-01,8.044E-01,8.027E-01,&
8.010E-01,7.977E-01,7.944E-01,7.911E-01,7.878E-01,7.845E-01,7.812E-01,7.779E-01,7.746E-01,7.713E-01,&
7.680E-01,7.647E-01,7.614E-01,7.581E-01,7.548E-01,7.515E-01,7.482E-01,7.449E-01,7.416E-01,7.383E-01,&
7.350E-01,7.326E-01,7.302E-01,7.278E-01,7.254E-01,7.230E-01,7.206E-01,7.182E-01,7.158E-01,7.134E-01,&
7.110E-01,7.086E-01,7.062E-01,7.038E-01,7.014E-01,6.990E-01,6.966E-01,6.942E-01,6.918E-01,6.894E-01/
DATA (Es(i),i=701,800)/&
6.870E-01,6.634E-01,6.397E-01,6.161E-01,5.924E-01,5.688E-01,5.451E-01,5.215E-01,4.978E-01,4.742E-01,&
4.505E-01,4.269E-01,4.032E-01,3.796E-01,3.559E-01,3.323E-01,3.086E-01,2.850E-01,2.613E-01,2.377E-01,&
2.140E-01,2.231E-01,2.321E-01,2.412E-01,2.502E-01,2.593E-01,2.683E-01,2.774E-01,2.864E-01,2.955E-01,&
3.045E-01,3.136E-01,3.226E-01,3.317E-01,3.407E-01,3.498E-01,3.588E-01,3.679E-01,3.769E-01,3.860E-01,&
3.950E-01,3.950E-01,3.949E-01,3.949E-01,3.948E-01,3.948E-01,3.947E-01,3.947E-01,3.946E-01,3.946E-01,&
3.945E-01,3.945E-01,3.944E-01,3.944E-01,3.943E-01,3.943E-01,3.942E-01,3.942E-01,3.941E-01,3.941E-01,&
3.940E-01,4.020E-01,4.099E-01,4.179E-01,4.258E-01,4.338E-01,4.417E-01,4.497E-01,4.576E-01,4.656E-01,&
4.735E-01,4.815E-01,4.894E-01,4.974E-01,5.053E-01,5.133E-01,5.212E-01,5.292E-01,5.371E-01,5.451E-01,&
5.530E-01,5.526E-01,5.522E-01,5.518E-01,5.514E-01,5.510E-01,5.506E-01,5.502E-01,5.498E-01,5.494E-01,&
5.490E-01,5.486E-01,5.482E-01,5.478E-01,5.474E-01,5.470E-01,5.466E-01,5.462E-01,5.458E-01,5.454E-01/
DATA (Es(i),i=801,900)/&
5.450E-01,5.477E-01,5.504E-01,5.531E-01,5.558E-01,5.585E-01,5.612E-01,5.639E-01,5.666E-01,5.693E-01,&
5.720E-01,5.747E-01,5.774E-01,5.801E-01,5.828E-01,5.855E-01,5.882E-01,5.909E-01,5.936E-01,5.963E-01,&
5.990E-01,5.984E-01,5.978E-01,5.972E-01,5.966E-01,5.960E-01,5.954E-01,5.948E-01,5.942E-01,5.936E-01,&
5.930E-01,5.924E-01,5.918E-01,5.912E-01,5.906E-01,5.900E-01,5.894E-01,5.888E-01,5.882E-01,5.876E-01,&
5.870E-01,5.848E-01,5.825E-01,5.803E-01,5.780E-01,5.758E-01,5.735E-01,5.713E-01,5.690E-01,5.668E-01,&
5.645E-01,5.623E-01,5.600E-01,5.578E-01,5.555E-01,5.533E-01,5.510E-01,5.488E-01,5.465E-01,5.443E-01,&
5.420E-01,5.406E-01,5.392E-01,5.378E-01,5.364E-01,5.350E-01,5.336E-01,5.322E-01,5.308E-01,5.294E-01,&
5.280E-01,5.266E-01,5.252E-01,5.238E-01,5.224E-01,5.210E-01,5.196E-01,5.182E-01,5.168E-01,5.154E-01,&
5.140E-01,5.114E-01,5.088E-01,5.062E-01,5.036E-01,5.010E-01,4.984E-01,4.958E-01,4.932E-01,4.906E-01,&
4.880E-01,4.854E-01,4.828E-01,4.802E-01,4.776E-01,4.750E-01,4.724E-01,4.698E-01,4.672E-01,4.646E-01/
DATA (Es(i),i=901,1000)/&
4.620E-01,4.553E-01,4.485E-01,4.418E-01,4.350E-01,4.283E-01,4.215E-01,4.148E-01,4.080E-01,4.013E-01,&
3.945E-01,3.878E-01,3.810E-01,3.743E-01,3.675E-01,3.608E-01,3.540E-01,3.473E-01,3.405E-01,3.338E-01,&
3.270E-01,3.239E-01,3.207E-01,3.176E-01,3.144E-01,3.113E-01,3.081E-01,3.050E-01,3.018E-01,2.987E-01,&
2.955E-01,2.924E-01,2.892E-01,2.861E-01,2.829E-01,2.798E-01,2.766E-01,2.735E-01,2.703E-01,2.672E-01,&
2.640E-01,2.512E-01,2.383E-01,2.255E-01,2.126E-01,1.998E-01,1.869E-01,1.741E-01,1.612E-01,1.484E-01,&
1.355E-01,1.227E-01,1.098E-01,9.695E-02,8.410E-02,7.125E-02,5.840E-02,4.555E-02,3.270E-02,1.985E-02,&
7.000E-03,6.800E-03,6.600E-03,6.400E-03,6.200E-03,6.000E-03,5.800E-03,5.600E-03,5.400E-03,5.200E-03,&
5.000E-03,4.800E-03,4.600E-03,4.400E-03,4.200E-03,4.000E-03,3.800E-03,3.600E-03,3.400E-03,3.200E-03,&
3.000E-03,3.900E-03,4.800E-03,5.700E-03,6.600E-03,7.500E-03,8.400E-03,9.300E-03,1.020E-02,1.110E-02,&
1.200E-02,1.290E-02,1.380E-02,1.470E-02,1.560E-02,1.650E-02,1.740E-02,1.830E-02,1.920E-02,2.010E-02/
DATA (Es(i),i=1001,1100)/&
2.100E-02,2.095E-02,2.090E-02,2.085E-02,2.080E-02,2.075E-02,2.070E-02,2.065E-02,2.060E-02,2.055E-02,&
2.050E-02,2.045E-02,2.040E-02,2.035E-02,2.030E-02,2.025E-02,2.020E-02,2.015E-02,2.010E-02,2.005E-02,&
2.000E-02,2.170E-02,2.340E-02,2.510E-02,2.680E-02,2.850E-02,3.020E-02,3.190E-02,3.360E-02,3.530E-02,&
3.700E-02,3.870E-02,4.040E-02,4.210E-02,4.380E-02,4.550E-02,4.720E-02,4.890E-02,5.060E-02,5.230E-02,&
5.400E-02,5.795E-02,6.190E-02,6.585E-02,6.980E-02,7.375E-02,7.770E-02,8.165E-02,8.560E-02,8.955E-02,&
9.350E-02,9.745E-02,1.014E-01,1.054E-01,1.093E-01,1.133E-01,1.172E-01,1.212E-01,1.251E-01,1.291E-01,&
1.330E-01,1.327E-01,1.324E-01,1.321E-01,1.318E-01,1.315E-01,1.312E-01,1.309E-01,1.306E-01,1.303E-01,&
1.300E-01,1.297E-01,1.294E-01,1.291E-01,1.288E-01,1.285E-01,1.282E-01,1.279E-01,1.276E-01,1.273E-01,&
1.270E-01,1.384E-01,1.497E-01,1.611E-01,1.724E-01,1.838E-01,1.951E-01,2.065E-01,2.178E-01,2.292E-01,&
2.405E-01,2.519E-01,2.632E-01,2.746E-01,2.859E-01,2.973E-01,3.086E-01,3.200E-01,3.313E-01,3.427E-01/
DATA (Es(i),i=1101,1200)/&
3.540E-01,3.541E-01,3.541E-01,3.542E-01,3.542E-01,3.543E-01,3.543E-01,3.544E-01,3.544E-01,3.545E-01,&
3.545E-01,3.546E-01,3.546E-01,3.547E-01,3.547E-01,3.548E-01,3.548E-01,3.549E-01,3.549E-01,3.550E-01,&
3.550E-01,3.556E-01,3.561E-01,3.567E-01,3.572E-01,3.578E-01,3.583E-01,3.589E-01,3.594E-01,3.600E-01,&
3.605E-01,3.611E-01,3.616E-01,3.622E-01,3.627E-01,3.633E-01,3.638E-01,3.644E-01,3.649E-01,3.655E-01,&
3.660E-01,3.659E-01,3.658E-01,3.657E-01,3.656E-01,3.655E-01,3.654E-01,3.653E-01,3.652E-01,3.651E-01,&
3.650E-01,3.649E-01,3.648E-01,3.647E-01,3.646E-01,3.645E-01,3.644E-01,3.643E-01,3.642E-01,3.641E-01,&
3.640E-01,3.626E-01,3.611E-01,3.597E-01,3.582E-01,3.568E-01,3.553E-01,3.539E-01,3.524E-01,3.510E-01,&
3.495E-01,3.481E-01,3.466E-01,3.452E-01,3.437E-01,3.423E-01,3.408E-01,3.394E-01,3.379E-01,3.365E-01,&
3.350E-01,3.350E-01,3.349E-01,3.349E-01,3.348E-01,3.348E-01,3.347E-01,3.347E-01,3.346E-01,3.346E-01,&
3.345E-01,3.345E-01,3.344E-01,3.344E-01,3.343E-01,3.343E-01,3.342E-01,3.342E-01,3.341E-01,3.341E-01/
DATA (Es(i),i=1201,1300)/&
3.340E-01,3.338E-01,3.335E-01,3.333E-01,3.330E-01,3.328E-01,3.325E-01,3.323E-01,3.320E-01,3.318E-01,&
3.315E-01,3.313E-01,3.310E-01,3.308E-01,3.305E-01,3.303E-01,3.300E-01,3.298E-01,3.295E-01,3.293E-01,&
3.290E-01,3.280E-01,3.270E-01,3.260E-01,3.250E-01,3.240E-01,3.230E-01,3.220E-01,3.210E-01,3.200E-01,&
3.190E-01,3.180E-01,3.170E-01,3.160E-01,3.150E-01,3.140E-01,3.130E-01,3.120E-01,3.110E-01,3.100E-01,&
3.090E-01,3.089E-01,3.087E-01,3.086E-01,3.084E-01,3.083E-01,3.081E-01,3.080E-01,3.078E-01,3.077E-01,&
3.075E-01,3.074E-01,3.072E-01,3.071E-01,3.069E-01,3.068E-01,3.066E-01,3.065E-01,3.063E-01,3.062E-01,&
3.060E-01,3.054E-01,3.047E-01,3.041E-01,3.034E-01,3.028E-01,3.021E-01,3.015E-01,3.008E-01,3.002E-01,&
2.995E-01,2.989E-01,2.982E-01,2.976E-01,2.969E-01,2.963E-01,2.956E-01,2.950E-01,2.943E-01,2.937E-01,&
2.930E-01,2.923E-01,2.915E-01,2.908E-01,2.900E-01,2.893E-01,2.885E-01,2.878E-01,2.870E-01,2.863E-01,&
2.855E-01,2.848E-01,2.840E-01,2.833E-01,2.825E-01,2.818E-01,2.810E-01,2.803E-01,2.795E-01,2.788E-01/
DATA (Es(i),i=1301,1400)/&
2.780E-01,2.777E-01,2.774E-01,2.771E-01,2.768E-01,2.765E-01,2.762E-01,2.759E-01,2.756E-01,2.753E-01,&
2.750E-01,2.747E-01,2.744E-01,2.741E-01,2.738E-01,2.735E-01,2.732E-01,2.729E-01,2.726E-01,2.723E-01,&
2.720E-01,2.689E-01,2.658E-01,2.627E-01,2.596E-01,2.565E-01,2.534E-01,2.503E-01,2.472E-01,2.441E-01,&
2.410E-01,2.379E-01,2.348E-01,2.317E-01,2.286E-01,2.255E-01,2.224E-01,2.193E-01,2.162E-01,2.131E-01,&
2.100E-01,2.119E-01,2.138E-01,2.157E-01,2.176E-01,2.195E-01,2.214E-01,2.233E-01,2.252E-01,2.271E-01,&
2.290E-01,2.309E-01,2.328E-01,2.347E-01,2.366E-01,2.385E-01,2.404E-01,2.423E-01,2.442E-01,2.461E-01,&
2.480E-01,2.427E-01,2.373E-01,2.320E-01,2.266E-01,2.213E-01,2.159E-01,2.106E-01,2.052E-01,1.999E-01,&
1.945E-01,1.892E-01,1.838E-01,1.785E-01,1.731E-01,1.678E-01,1.624E-01,1.571E-01,1.517E-01,1.464E-01,&
1.410E-01,1.356E-01,1.302E-01,1.248E-01,1.194E-01,1.140E-01,1.086E-01,1.032E-01,9.780E-02,9.240E-02,&
8.700E-02,8.160E-02,7.620E-02,7.080E-02,6.540E-02,6.000E-02,5.460E-02,4.920E-02,4.380E-02,3.840E-02/
DATA (Es(i),i=1401,1500)/&
3.300E-02,3.160E-02,3.020E-02,2.880E-02,2.740E-02,2.600E-02,2.460E-02,2.320E-02,2.180E-02,2.040E-02,&
1.900E-02,1.760E-02,1.620E-02,1.480E-02,1.340E-02,1.200E-02,1.060E-02,9.200E-03,7.800E-03,6.400E-03,&
5.000E-03,4.900E-03,4.800E-03,4.700E-03,4.600E-03,4.500E-03,4.400E-03,4.300E-03,4.200E-03,4.100E-03,&
4.000E-03,3.900E-03,3.800E-03,3.700E-03,3.600E-03,3.500E-03,3.400E-03,3.300E-03,3.200E-03,3.100E-03,&
3.000E-03,2.900E-03,2.800E-03,2.700E-03,2.600E-03,2.500E-03,2.400E-03,2.300E-03,2.200E-03,2.100E-03,&
2.000E-03,1.900E-03,1.800E-03,1.700E-03,1.600E-03,1.500E-03,1.400E-03,1.300E-03,1.200E-03,1.100E-03,&
1.000E-03,1.900E-03,2.800E-03,3.700E-03,4.600E-03,5.500E-03,6.400E-03,7.300E-03,8.200E-03,9.100E-03,&
1.000E-02,1.090E-02,1.180E-02,1.270E-02,1.360E-02,1.450E-02,1.540E-02,1.630E-02,1.720E-02,1.810E-02,&
1.900E-02,1.810E-02,1.720E-02,1.630E-02,1.540E-02,1.450E-02,1.360E-02,1.270E-02,1.180E-02,1.090E-02,&
1.000E-02,9.100E-03,8.200E-03,7.300E-03,6.400E-03,5.500E-03,4.600E-03,3.700E-03,2.800E-03,1.900E-03/
DATA (Es(i),i=1501,1600)/&
1.000E-03,1.000E-03,1.000E-03,1.000E-03,1.000E-03,1.000E-03,1.000E-03,1.000E-03,1.000E-03,1.000E-03,&
1.000E-03,1.000E-03,1.000E-03,1.000E-03,1.000E-03,1.000E-03,1.000E-03,1.000E-03,1.000E-03,1.000E-03,&
1.000E-03,1.250E-03,1.500E-03,1.750E-03,2.000E-03,2.250E-03,2.500E-03,2.750E-03,3.000E-03,3.250E-03,&
3.500E-03,3.750E-03,4.000E-03,4.250E-03,4.500E-03,4.750E-03,5.000E-03,5.250E-03,5.500E-03,5.750E-03,&
6.000E-03,7.100E-03,8.200E-03,9.300E-03,1.040E-02,1.150E-02,1.260E-02,1.370E-02,1.480E-02,1.590E-02,&
1.700E-02,1.810E-02,1.920E-02,2.030E-02,2.140E-02,2.250E-02,2.360E-02,2.470E-02,2.580E-02,2.690E-02,&
2.800E-02,3.150E-02,3.500E-02,3.850E-02,4.200E-02,4.550E-02,4.900E-02,5.250E-02,5.600E-02,5.950E-02,&
6.300E-02,6.650E-02,7.000E-02,7.350E-02,7.700E-02,8.050E-02,8.400E-02,8.750E-02,9.100E-02,9.450E-02,&
9.800E-02,9.720E-02,9.640E-02,9.560E-02,9.480E-02,9.400E-02,9.320E-02,9.240E-02,9.160E-02,9.080E-02,&
9.000E-02,8.920E-02,8.840E-02,8.760E-02,8.680E-02,8.600E-02,8.520E-02,8.440E-02,8.360E-02,8.280E-02/
DATA (Es(i),i=1601,1700)/&
8.200E-02,8.015E-02,7.830E-02,7.645E-02,7.460E-02,7.275E-02,7.090E-02,6.905E-02,6.720E-02,6.535E-02,&
6.350E-02,6.165E-02,5.980E-02,5.795E-02,5.610E-02,5.425E-02,5.240E-02,5.055E-02,4.870E-02,4.685E-02,&
4.500E-02,4.965E-02,5.430E-02,5.895E-02,6.360E-02,6.825E-02,7.290E-02,7.755E-02,8.220E-02,8.685E-02,&
9.150E-02,9.615E-02,1.008E-01,1.055E-01,1.101E-01,1.148E-01,1.194E-01,1.241E-01,1.287E-01,1.334E-01,&
1.380E-01,1.363E-01,1.346E-01,1.329E-01,1.312E-01,1.295E-01,1.278E-01,1.261E-01,1.244E-01,1.227E-01,&
1.210E-01,1.193E-01,1.176E-01,1.159E-01,1.142E-01,1.125E-01,1.108E-01,1.091E-01,1.074E-01,1.057E-01,&
1.040E-01,1.054E-01,1.068E-01,1.082E-01,1.096E-01,1.110E-01,1.124E-01,1.138E-01,1.152E-01,1.166E-01,&
1.180E-01,1.194E-01,1.208E-01,1.222E-01,1.236E-01,1.250E-01,1.264E-01,1.278E-01,1.292E-01,1.306E-01,&
1.320E-01,1.317E-01,1.314E-01,1.311E-01,1.308E-01,1.305E-01,1.302E-01,1.299E-01,1.296E-01,1.293E-01,&
1.290E-01,1.287E-01,1.284E-01,1.281E-01,1.278E-01,1.275E-01,1.272E-01,1.269E-01,1.266E-01,1.263E-01/
DATA (Es(i),i=1701,1800)/&
1.260E-01,1.262E-01,1.264E-01,1.266E-01,1.268E-01,1.270E-01,1.272E-01,1.274E-01,1.276E-01,1.278E-01,&
1.280E-01,1.282E-01,1.284E-01,1.286E-01,1.288E-01,1.290E-01,1.292E-01,1.294E-01,1.296E-01,1.298E-01,&
1.300E-01,1.301E-01,1.301E-01,1.302E-01,1.302E-01,1.303E-01,1.303E-01,1.304E-01,1.304E-01,1.305E-01,&
1.305E-01,1.306E-01,1.306E-01,1.307E-01,1.307E-01,1.308E-01,1.308E-01,1.309E-01,1.309E-01,1.310E-01,&
1.310E-01,1.301E-01,1.292E-01,1.283E-01,1.274E-01,1.265E-01,1.256E-01,1.247E-01,1.238E-01,1.229E-01,&
1.220E-01,1.211E-01,1.202E-01,1.193E-01,1.184E-01,1.175E-01,1.166E-01,1.157E-01,1.148E-01,1.139E-01,&
1.130E-01,1.129E-01,1.127E-01,1.126E-01,1.124E-01,1.123E-01,1.121E-01,1.120E-01,1.118E-01,1.117E-01,&
1.115E-01,1.114E-01,1.112E-01,1.111E-01,1.109E-01,1.108E-01,1.106E-01,1.105E-01,1.103E-01,1.102E-01,&
1.100E-01,1.094E-01,1.087E-01,1.081E-01,1.074E-01,1.068E-01,1.061E-01,1.055E-01,1.048E-01,1.042E-01,&
1.035E-01,1.029E-01,1.022E-01,1.016E-01,1.009E-01,1.003E-01,9.960E-02,9.895E-02,9.830E-02,9.765E-02/
DATA (Es(i),i=1801,1900)/&
9.700E-02,9.775E-02,9.850E-02,9.925E-02,1.000E-01,1.008E-01,1.015E-01,1.023E-01,1.030E-01,1.038E-01,&
1.045E-01,1.053E-01,1.060E-01,1.068E-01,1.075E-01,1.083E-01,1.090E-01,1.098E-01,1.105E-01,1.113E-01,&
1.120E-01,1.118E-01,1.115E-01,1.113E-01,1.110E-01,1.108E-01,1.105E-01,1.103E-01,1.100E-01,1.098E-01,&
1.095E-01,1.093E-01,1.090E-01,1.088E-01,1.085E-01,1.083E-01,1.080E-01,1.078E-01,1.075E-01,1.073E-01,&
1.070E-01,1.065E-01,1.059E-01,1.054E-01,1.048E-01,1.043E-01,1.037E-01,1.032E-01,1.026E-01,1.021E-01,&
1.015E-01,1.010E-01,1.004E-01,9.985E-02,9.930E-02,9.875E-02,9.820E-02,9.765E-02,9.710E-02,9.655E-02,&
9.600E-02,9.590E-02,9.580E-02,9.570E-02,9.560E-02,9.550E-02,9.540E-02,9.530E-02,9.520E-02,9.510E-02,&
9.500E-02,9.490E-02,9.480E-02,9.470E-02,9.460E-02,9.450E-02,9.440E-02,9.430E-02,9.420E-02,9.410E-02,&
9.400E-02,9.355E-02,9.310E-02,9.265E-02,9.220E-02,9.175E-02,9.130E-02,9.085E-02,9.040E-02,8.995E-02,&
8.950E-02,8.905E-02,8.860E-02,8.815E-02,8.770E-02,8.725E-02,8.680E-02,8.635E-02,8.590E-02,8.545E-02/
DATA (Es(i),i=1901,2000)/&
8.500E-02,8.430E-02,8.360E-02,8.290E-02,8.220E-02,8.150E-02,8.080E-02,8.010E-02,7.940E-02,7.870E-02,&
7.800E-02,7.730E-02,7.660E-02,7.590E-02,7.520E-02,7.450E-02,7.380E-02,7.310E-02,7.240E-02,7.170E-02,&
7.100E-02,7.105E-02,7.110E-02,7.115E-02,7.120E-02,7.125E-02,7.130E-02,7.135E-02,7.140E-02,7.145E-02,&
7.150E-02,7.155E-02,7.160E-02,7.165E-02,7.170E-02,7.175E-02,7.180E-02,7.185E-02,7.190E-02,7.195E-02,&
7.200E-02,7.195E-02,7.190E-02,7.185E-02,7.180E-02,7.175E-02,7.170E-02,7.165E-02,7.160E-02,7.155E-02,&
7.150E-02,7.145E-02,7.140E-02,7.135E-02,7.130E-02,7.125E-02,7.120E-02,7.115E-02,7.110E-02,7.105E-02,&
7.100E-02,7.025E-02,6.950E-02,6.875E-02,6.800E-02,6.725E-02,6.650E-02,6.575E-02,6.500E-02,6.425E-02,&
6.350E-02,6.275E-02,6.200E-02,6.125E-02,6.050E-02,5.975E-02,5.900E-02,5.825E-02,5.750E-02,5.675E-02,&
5.600E-02,5.685E-02,5.770E-02,5.855E-02,5.940E-02,6.025E-02,6.110E-02,6.195E-02,6.280E-02,6.365E-02,&
6.450E-02,6.535E-02,6.620E-02,6.705E-02,6.790E-02,6.875E-02,6.960E-02,7.045E-02,7.130E-02,7.215E-02/
DATA (Es(i),i=2001,2101)/&
7.300E-02,7.085E-02,6.870E-02,6.655E-02,6.440E-02,6.225E-02,6.010E-02,5.795E-02,5.580E-02,5.365E-02,&
5.150E-02,4.935E-02,4.720E-02,4.505E-02,4.290E-02,4.075E-02,3.860E-02,3.645E-02,3.430E-02,3.215E-02,&
3.000E-02,3.195E-02,3.390E-02,3.585E-02,3.780E-02,3.975E-02,4.170E-02,4.365E-02,4.560E-02,4.755E-02,&
4.950E-02,5.145E-02,5.340E-02,5.535E-02,5.730E-02,5.925E-02,6.120E-02,6.315E-02,6.510E-02,6.705E-02,&
6.900E-02,6.755E-02,6.610E-02,6.465E-02,6.320E-02,6.175E-02,6.030E-02,5.885E-02,5.740E-02,5.595E-02,&
5.450E-02,5.305E-02,5.160E-02,5.015E-02,4.870E-02,4.725E-02,4.580E-02,4.435E-02,4.290E-02,4.145E-02,&
4.000E-02,3.960E-02,3.920E-02,3.880E-02,3.840E-02,3.800E-02,3.760E-02,3.720E-02,3.680E-02,3.640E-02,&
3.600E-02,3.560E-02,3.520E-02,3.480E-02,3.440E-02,3.400E-02,3.360E-02,3.320E-02,3.280E-02,3.240E-02,&
3.200E-02,3.110E-02,3.020E-02,2.930E-02,2.840E-02,2.750E-02,2.660E-02,2.570E-02,2.480E-02,2.390E-02,&
2.300E-02,2.210E-02,2.120E-02,2.030E-02,1.940E-02,1.850E-02,1.760E-02,1.670E-02,1.580E-02,1.490E-02,&
1.400E-02/
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
!!!!! diffuse energy from the sum !!!!!
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
DATA (Ed(i),i=1,100)/&
2.413E+00,2.413E+00,2.414E+00,2.414E+00,2.414E+00,2.415E+00,2.415E+00,2.415E+00,2.415E+00,2.416E+00,&
2.416E+00,2.416E+00,2.417E+00,2.417E+00,2.417E+00,2.418E+00,2.418E+00,2.418E+00,2.418E+00,2.419E+00,&
2.419E+00,2.418E+00,2.417E+00,2.416E+00,2.415E+00,2.414E+00,2.413E+00,2.412E+00,2.411E+00,2.410E+00,&
2.410E+00,2.409E+00,2.408E+00,2.407E+00,2.406E+00,2.405E+00,2.404E+00,2.403E+00,2.402E+00,2.401E+00,&
2.400E+00,2.412E+00,2.424E+00,2.435E+00,2.447E+00,2.459E+00,2.471E+00,2.483E+00,2.494E+00,2.506E+00,&
2.518E+00,2.530E+00,2.542E+00,2.553E+00,2.565E+00,2.577E+00,2.589E+00,2.601E+00,2.612E+00,2.624E+00,&
2.636E+00,2.634E+00,2.632E+00,2.630E+00,2.628E+00,2.626E+00,2.623E+00,2.621E+00,2.619E+00,2.617E+00,&
2.615E+00,2.613E+00,2.611E+00,2.609E+00,2.607E+00,2.605E+00,2.602E+00,2.600E+00,2.598E+00,2.596E+00,&
2.594E+00,2.579E+00,2.565E+00,2.550E+00,2.535E+00,2.521E+00,2.506E+00,2.491E+00,2.477E+00,2.462E+00,&
2.448E+00,2.433E+00,2.418E+00,2.404E+00,2.389E+00,2.374E+00,2.360E+00,2.345E+00,2.330E+00,2.316E+00/
DATA (Ed(i),i=101,200)/&
2.301E+00,2.290E+00,2.279E+00,2.268E+00,2.257E+00,2.246E+00,2.234E+00,2.223E+00,2.212E+00,2.201E+00,&
2.190E+00,2.179E+00,2.168E+00,2.157E+00,2.146E+00,2.135E+00,2.123E+00,2.112E+00,2.101E+00,2.090E+00,&
2.079E+00,2.076E+00,2.073E+00,2.070E+00,2.067E+00,2.064E+00,2.061E+00,2.058E+00,2.055E+00,2.052E+00,&
2.049E+00,2.045E+00,2.042E+00,2.039E+00,2.036E+00,2.033E+00,2.030E+00,2.027E+00,2.024E+00,2.021E+00,&
2.018E+00,2.011E+00,2.004E+00,1.997E+00,1.990E+00,1.983E+00,1.975E+00,1.968E+00,1.961E+00,1.954E+00,&
1.947E+00,1.940E+00,1.933E+00,1.926E+00,1.919E+00,1.912E+00,1.904E+00,1.897E+00,1.890E+00,1.883E+00,&
1.876E+00,1.871E+00,1.867E+00,1.862E+00,1.857E+00,1.852E+00,1.848E+00,1.843E+00,1.838E+00,1.833E+00,&
1.829E+00,1.824E+00,1.819E+00,1.814E+00,1.810E+00,1.805E+00,1.800E+00,1.795E+00,1.791E+00,1.786E+00,&
1.781E+00,1.774E+00,1.767E+00,1.760E+00,1.752E+00,1.745E+00,1.738E+00,1.731E+00,1.724E+00,1.717E+00,&
1.710E+00,1.702E+00,1.695E+00,1.688E+00,1.681E+00,1.674E+00,1.667E+00,1.659E+00,1.652E+00,1.645E+00/
DATA (Ed(i),i=201,300)/&
1.638E+00,1.635E+00,1.631E+00,1.628E+00,1.624E+00,1.621E+00,1.617E+00,1.614E+00,1.610E+00,1.607E+00,&
1.604E+00,1.600E+00,1.597E+00,1.593E+00,1.590E+00,1.586E+00,1.583E+00,1.579E+00,1.576E+00,1.572E+00,&
1.569E+00,1.564E+00,1.560E+00,1.555E+00,1.550E+00,1.546E+00,1.541E+00,1.536E+00,1.532E+00,1.527E+00,&
1.523E+00,1.518E+00,1.513E+00,1.509E+00,1.504E+00,1.499E+00,1.495E+00,1.490E+00,1.485E+00,1.481E+00,&
1.476E+00,1.470E+00,1.464E+00,1.458E+00,1.451E+00,1.445E+00,1.439E+00,1.433E+00,1.427E+00,1.421E+00,&
1.415E+00,1.408E+00,1.402E+00,1.396E+00,1.390E+00,1.384E+00,1.378E+00,1.371E+00,1.365E+00,1.359E+00,&
1.353E+00,1.350E+00,1.347E+00,1.344E+00,1.340E+00,1.337E+00,1.334E+00,1.331E+00,1.328E+00,1.325E+00,&
1.322E+00,1.318E+00,1.315E+00,1.312E+00,1.309E+00,1.306E+00,1.303E+00,1.299E+00,1.296E+00,1.293E+00,&
1.290E+00,1.281E+00,1.271E+00,1.262E+00,1.253E+00,1.243E+00,1.234E+00,1.225E+00,1.215E+00,1.206E+00,&
1.197E+00,1.187E+00,1.178E+00,1.168E+00,1.159E+00,1.150E+00,1.140E+00,1.131E+00,1.122E+00,1.112E+00/
DATA (Ed(i),i=301,400)/&
1.103E+00,1.085E+00,1.068E+00,1.050E+00,1.032E+00,1.014E+00,9.965E-01,9.788E-01,9.610E-01,9.433E-01,&
9.255E-01,9.078E-01,8.900E-01,8.723E-01,8.545E-01,8.368E-01,8.190E-01,8.013E-01,7.835E-01,7.658E-01,&
7.480E-01,7.579E-01,7.677E-01,7.776E-01,7.874E-01,7.973E-01,8.071E-01,8.170E-01,8.268E-01,8.367E-01,&
8.465E-01,8.564E-01,8.662E-01,8.761E-01,8.859E-01,8.958E-01,9.056E-01,9.155E-01,9.253E-01,9.352E-01,&
9.450E-01,9.114E-01,8.777E-01,8.441E-01,8.104E-01,7.768E-01,7.431E-01,7.095E-01,6.758E-01,6.422E-01,&
6.085E-01,5.749E-01,5.412E-01,5.076E-01,4.739E-01,4.403E-01,4.066E-01,3.730E-01,3.393E-01,3.057E-01,&
2.720E-01,3.015E-01,3.310E-01,3.605E-01,3.900E-01,4.195E-01,4.490E-01,4.785E-01,5.080E-01,5.375E-01,&
5.670E-01,5.965E-01,6.260E-01,6.555E-01,6.850E-01,7.145E-01,7.440E-01,7.735E-01,8.030E-01,8.325E-01,&
8.620E-01,8.586E-01,8.552E-01,8.518E-01,8.484E-01,8.450E-01,8.416E-01,8.382E-01,8.348E-01,8.314E-01,&
8.280E-01,8.246E-01,8.212E-01,8.178E-01,8.144E-01,8.110E-01,8.076E-01,8.042E-01,8.008E-01,7.974E-01/
DATA (Ed(i),i=401,500)/&
7.940E-01,7.840E-01,7.739E-01,7.639E-01,7.538E-01,7.438E-01,7.337E-01,7.237E-01,7.136E-01,7.036E-01,&
6.935E-01,6.835E-01,6.734E-01,6.634E-01,6.533E-01,6.433E-01,6.332E-01,6.232E-01,6.131E-01,6.031E-01,&
5.930E-01,5.972E-01,6.013E-01,6.055E-01,6.096E-01,6.138E-01,6.179E-01,6.221E-01,6.262E-01,6.304E-01,&
6.345E-01,6.387E-01,6.428E-01,6.470E-01,6.511E-01,6.553E-01,6.594E-01,6.636E-01,6.677E-01,6.719E-01,&
6.760E-01,6.752E-01,6.744E-01,6.736E-01,6.728E-01,6.720E-01,6.712E-01,6.704E-01,6.696E-01,6.688E-01,&
6.680E-01,6.672E-01,6.664E-01,6.656E-01,6.648E-01,6.640E-01,6.632E-01,6.624E-01,6.616E-01,6.608E-01,&
6.600E-01,6.580E-01,6.560E-01,6.540E-01,6.520E-01,6.500E-01,6.480E-01,6.460E-01,6.440E-01,6.420E-01,&
6.400E-01,6.380E-01,6.360E-01,6.340E-01,6.320E-01,6.300E-01,6.280E-01,6.260E-01,6.240E-01,6.220E-01,&
6.200E-01,6.094E-01,5.988E-01,5.882E-01,5.776E-01,5.670E-01,5.564E-01,5.458E-01,5.352E-01,5.246E-01,&
5.140E-01,5.034E-01,4.928E-01,4.822E-01,4.716E-01,4.610E-01,4.504E-01,4.398E-01,4.292E-01,4.186E-01/
DATA (Ed(i),i=501,600)/&
4.080E-01,4.130E-01,4.179E-01,4.229E-01,4.278E-01,4.328E-01,4.377E-01,4.427E-01,4.476E-01,4.526E-01,&
4.575E-01,4.625E-01,4.674E-01,4.724E-01,4.773E-01,4.823E-01,4.872E-01,4.922E-01,4.971E-01,5.021E-01,&
5.070E-01,4.948E-01,4.826E-01,4.704E-01,4.582E-01,4.460E-01,4.338E-01,4.216E-01,4.094E-01,3.972E-01,&
3.850E-01,3.728E-01,3.606E-01,3.484E-01,3.362E-01,3.240E-01,3.118E-01,2.996E-01,2.874E-01,2.752E-01,&
2.630E-01,2.621E-01,2.611E-01,2.602E-01,2.592E-01,2.583E-01,2.573E-01,2.564E-01,2.554E-01,2.545E-01,&
2.535E-01,2.526E-01,2.516E-01,2.507E-01,2.497E-01,2.488E-01,2.478E-01,2.469E-01,2.459E-01,2.450E-01,&
2.440E-01,2.484E-01,2.528E-01,2.572E-01,2.616E-01,2.660E-01,2.704E-01,2.748E-01,2.792E-01,2.836E-01,&
2.880E-01,2.924E-01,2.968E-01,3.012E-01,3.056E-01,3.100E-01,3.144E-01,3.188E-01,3.232E-01,3.276E-01,&
3.320E-01,3.344E-01,3.368E-01,3.392E-01,3.416E-01,3.440E-01,3.464E-01,3.488E-01,3.512E-01,3.536E-01,&
3.560E-01,3.584E-01,3.608E-01,3.632E-01,3.656E-01,3.680E-01,3.704E-01,3.728E-01,3.752E-01,3.776E-01/
DATA (Ed(i),i=601,700)/&
3.800E-01,3.786E-01,3.771E-01,3.757E-01,3.742E-01,3.728E-01,3.713E-01,3.699E-01,3.684E-01,3.670E-01,&
3.655E-01,3.641E-01,3.626E-01,3.612E-01,3.597E-01,3.583E-01,3.568E-01,3.554E-01,3.539E-01,3.525E-01,&
3.510E-01,3.499E-01,3.487E-01,3.476E-01,3.464E-01,3.453E-01,3.441E-01,3.430E-01,3.418E-01,3.407E-01,&
3.395E-01,3.384E-01,3.372E-01,3.361E-01,3.349E-01,3.338E-01,3.326E-01,3.315E-01,3.303E-01,3.292E-01,&
3.280E-01,3.268E-01,3.256E-01,3.244E-01,3.232E-01,3.220E-01,3.208E-01,3.196E-01,3.184E-01,3.172E-01,&
3.160E-01,3.148E-01,3.136E-01,3.124E-01,3.112E-01,3.100E-01,3.088E-01,3.076E-01,3.064E-01,3.052E-01,&
3.040E-01,3.023E-01,3.006E-01,2.989E-01,2.972E-01,2.955E-01,2.938E-01,2.921E-01,2.904E-01,2.887E-01,&
2.870E-01,2.853E-01,2.836E-01,2.819E-01,2.802E-01,2.785E-01,2.768E-01,2.751E-01,2.734E-01,2.717E-01,&
2.700E-01,2.687E-01,2.674E-01,2.661E-01,2.648E-01,2.635E-01,2.622E-01,2.609E-01,2.596E-01,2.583E-01,&
2.570E-01,2.557E-01,2.544E-01,2.531E-01,2.518E-01,2.505E-01,2.492E-01,2.479E-01,2.466E-01,2.453E-01/
DATA (Ed(i),i=701,800)/&
2.440E-01,2.356E-01,2.271E-01,2.187E-01,2.102E-01,2.018E-01,1.933E-01,1.849E-01,1.764E-01,1.680E-01,&
1.595E-01,1.511E-01,1.426E-01,1.342E-01,1.257E-01,1.173E-01,1.088E-01,1.004E-01,9.190E-02,8.345E-02,&
7.500E-02,7.790E-02,8.080E-02,8.370E-02,8.660E-02,8.950E-02,9.240E-02,9.530E-02,9.820E-02,1.011E-01,&
1.040E-01,1.069E-01,1.098E-01,1.127E-01,1.156E-01,1.185E-01,1.214E-01,1.243E-01,1.272E-01,1.301E-01,&
1.330E-01,1.328E-01,1.326E-01,1.324E-01,1.322E-01,1.320E-01,1.318E-01,1.316E-01,1.314E-01,1.312E-01,&
1.310E-01,1.308E-01,1.306E-01,1.304E-01,1.302E-01,1.300E-01,1.298E-01,1.296E-01,1.294E-01,1.292E-01,&
1.290E-01,1.314E-01,1.337E-01,1.361E-01,1.384E-01,1.408E-01,1.431E-01,1.455E-01,1.478E-01,1.502E-01,&
1.525E-01,1.549E-01,1.572E-01,1.596E-01,1.619E-01,1.643E-01,1.666E-01,1.690E-01,1.713E-01,1.737E-01,&
1.760E-01,1.757E-01,1.754E-01,1.751E-01,1.748E-01,1.745E-01,1.742E-01,1.739E-01,1.736E-01,1.733E-01,&
1.730E-01,1.727E-01,1.724E-01,1.721E-01,1.718E-01,1.715E-01,1.712E-01,1.709E-01,1.706E-01,1.703E-01/
DATA (Ed(i),i=801,900)/&
1.700E-01,1.707E-01,1.713E-01,1.720E-01,1.726E-01,1.733E-01,1.739E-01,1.746E-01,1.752E-01,1.759E-01,&
1.765E-01,1.772E-01,1.778E-01,1.785E-01,1.791E-01,1.798E-01,1.804E-01,1.811E-01,1.817E-01,1.824E-01,&
1.830E-01,1.826E-01,1.822E-01,1.818E-01,1.814E-01,1.810E-01,1.806E-01,1.802E-01,1.798E-01,1.794E-01,&
1.790E-01,1.786E-01,1.782E-01,1.778E-01,1.774E-01,1.770E-01,1.766E-01,1.762E-01,1.758E-01,1.754E-01,&
1.750E-01,1.742E-01,1.734E-01,1.726E-01,1.718E-01,1.710E-01,1.702E-01,1.694E-01,1.686E-01,1.678E-01,&
1.670E-01,1.662E-01,1.654E-01,1.646E-01,1.638E-01,1.630E-01,1.622E-01,1.614E-01,1.606E-01,1.598E-01,&
1.590E-01,1.585E-01,1.579E-01,1.574E-01,1.568E-01,1.563E-01,1.557E-01,1.552E-01,1.546E-01,1.541E-01,&
1.535E-01,1.530E-01,1.524E-01,1.519E-01,1.513E-01,1.508E-01,1.502E-01,1.497E-01,1.491E-01,1.486E-01,&
1.480E-01,1.472E-01,1.463E-01,1.455E-01,1.446E-01,1.438E-01,1.429E-01,1.421E-01,1.412E-01,1.404E-01,&
1.395E-01,1.387E-01,1.378E-01,1.370E-01,1.361E-01,1.353E-01,1.344E-01,1.336E-01,1.327E-01,1.319E-01/
DATA (Ed(i),i=901,1000)/&
1.310E-01,1.291E-01,1.271E-01,1.252E-01,1.232E-01,1.213E-01,1.193E-01,1.174E-01,1.154E-01,1.135E-01,&
1.115E-01,1.096E-01,1.076E-01,1.057E-01,1.037E-01,1.018E-01,9.980E-02,9.785E-02,9.590E-02,9.395E-02,&
9.200E-02,9.110E-02,9.020E-02,8.930E-02,8.840E-02,8.750E-02,8.660E-02,8.570E-02,8.480E-02,8.390E-02,&
8.300E-02,8.210E-02,8.120E-02,8.030E-02,7.940E-02,7.850E-02,7.760E-02,7.670E-02,7.580E-02,7.490E-02,&
7.400E-02,7.040E-02,6.680E-02,6.320E-02,5.960E-02,5.600E-02,5.240E-02,4.880E-02,4.520E-02,4.160E-02,&
3.800E-02,3.440E-02,3.080E-02,2.720E-02,2.360E-02,2.000E-02,1.640E-02,1.280E-02,9.200E-03,5.600E-03,&
2.000E-03,1.950E-03,1.900E-03,1.850E-03,1.800E-03,1.750E-03,1.700E-03,1.650E-03,1.600E-03,1.550E-03,&
1.500E-03,1.450E-03,1.400E-03,1.350E-03,1.300E-03,1.250E-03,1.200E-03,1.150E-03,1.100E-03,1.050E-03,&
1.000E-03,1.250E-03,1.500E-03,1.750E-03,2.000E-03,2.250E-03,2.500E-03,2.750E-03,3.000E-03,3.250E-03,&
3.500E-03,3.750E-03,4.000E-03,4.250E-03,4.500E-03,4.750E-03,5.000E-03,5.250E-03,5.500E-03,5.750E-03/
DATA (Ed(i),i=1001,1100)/&
6.000E-03,5.950E-03,5.900E-03,5.850E-03,5.800E-03,5.750E-03,5.700E-03,5.650E-03,5.600E-03,5.550E-03,&
5.500E-03,5.450E-03,5.400E-03,5.350E-03,5.300E-03,5.250E-03,5.200E-03,5.150E-03,5.100E-03,5.050E-03,&
5.000E-03,5.450E-03,5.900E-03,6.350E-03,6.800E-03,7.250E-03,7.700E-03,8.150E-03,8.600E-03,9.050E-03,&
9.500E-03,9.950E-03,1.040E-02,1.085E-02,1.130E-02,1.175E-02,1.220E-02,1.265E-02,1.310E-02,1.355E-02,&
1.400E-02,1.505E-02,1.610E-02,1.715E-02,1.820E-02,1.925E-02,2.030E-02,2.135E-02,2.240E-02,2.345E-02,&
2.450E-02,2.555E-02,2.660E-02,2.765E-02,2.870E-02,2.975E-02,3.080E-02,3.185E-02,3.290E-02,3.395E-02,&
3.500E-02,3.495E-02,3.490E-02,3.485E-02,3.480E-02,3.475E-02,3.470E-02,3.465E-02,3.460E-02,3.455E-02,&
3.450E-02,3.445E-02,3.440E-02,3.435E-02,3.430E-02,3.425E-02,3.420E-02,3.415E-02,3.410E-02,3.405E-02,&
3.400E-02,3.690E-02,3.980E-02,4.270E-02,4.560E-02,4.850E-02,5.140E-02,5.430E-02,5.720E-02,6.010E-02,&
6.300E-02,6.590E-02,6.880E-02,7.170E-02,7.460E-02,7.750E-02,8.040E-02,8.330E-02,8.620E-02,8.910E-02/
DATA (Ed(i),i=1101,1200)/&
9.200E-02,9.200E-02,9.200E-02,9.200E-02,9.200E-02,9.200E-02,9.200E-02,9.200E-02,9.200E-02,9.200E-02,&
9.200E-02,9.200E-02,9.200E-02,9.200E-02,9.200E-02,9.200E-02,9.200E-02,9.200E-02,9.200E-02,9.200E-02,&
9.200E-02,9.210E-02,9.220E-02,9.230E-02,9.240E-02,9.250E-02,9.260E-02,9.270E-02,9.280E-02,9.290E-02,&
9.300E-02,9.310E-02,9.320E-02,9.330E-02,9.340E-02,9.350E-02,9.360E-02,9.370E-02,9.380E-02,9.390E-02,&
9.400E-02,9.390E-02,9.380E-02,9.370E-02,9.360E-02,9.350E-02,9.340E-02,9.330E-02,9.320E-02,9.310E-02,&
9.300E-02,9.290E-02,9.280E-02,9.270E-02,9.260E-02,9.250E-02,9.240E-02,9.230E-02,9.220E-02,9.210E-02,&
9.200E-02,9.155E-02,9.110E-02,9.065E-02,9.020E-02,8.975E-02,8.930E-02,8.885E-02,8.840E-02,8.795E-02,&
8.750E-02,8.705E-02,8.660E-02,8.615E-02,8.570E-02,8.525E-02,8.480E-02,8.435E-02,8.390E-02,8.345E-02,&
8.300E-02,8.290E-02,8.280E-02,8.270E-02,8.260E-02,8.250E-02,8.240E-02,8.230E-02,8.220E-02,8.210E-02,&
8.200E-02,8.190E-02,8.180E-02,8.170E-02,8.160E-02,8.150E-02,8.140E-02,8.130E-02,8.120E-02,8.110E-02/
DATA (Ed(i),i=1201,1300)/&
8.100E-02,8.085E-02,8.070E-02,8.055E-02,8.040E-02,8.025E-02,8.010E-02,7.995E-02,7.980E-02,7.965E-02,&
7.950E-02,7.935E-02,7.920E-02,7.905E-02,7.890E-02,7.875E-02,7.860E-02,7.845E-02,7.830E-02,7.815E-02,&
7.800E-02,7.770E-02,7.740E-02,7.710E-02,7.680E-02,7.650E-02,7.620E-02,7.590E-02,7.560E-02,7.530E-02,&
7.500E-02,7.470E-02,7.440E-02,7.410E-02,7.380E-02,7.350E-02,7.320E-02,7.290E-02,7.260E-02,7.230E-02,&
7.200E-02,7.190E-02,7.180E-02,7.170E-02,7.160E-02,7.150E-02,7.140E-02,7.130E-02,7.120E-02,7.110E-02,&
7.100E-02,7.090E-02,7.080E-02,7.070E-02,7.060E-02,7.050E-02,7.040E-02,7.030E-02,7.020E-02,7.010E-02,&
7.000E-02,6.975E-02,6.950E-02,6.925E-02,6.900E-02,6.875E-02,6.850E-02,6.825E-02,6.800E-02,6.775E-02,&
6.750E-02,6.725E-02,6.700E-02,6.675E-02,6.650E-02,6.625E-02,6.600E-02,6.575E-02,6.550E-02,6.525E-02,&
6.500E-02,6.480E-02,6.460E-02,6.440E-02,6.420E-02,6.400E-02,6.380E-02,6.360E-02,6.340E-02,6.320E-02,&
6.300E-02,6.280E-02,6.260E-02,6.240E-02,6.220E-02,6.200E-02,6.180E-02,6.160E-02,6.140E-02,6.120E-02/
DATA (Ed(i),i=1301,1400)/&
6.100E-02,6.090E-02,6.080E-02,6.070E-02,6.060E-02,6.050E-02,6.040E-02,6.030E-02,6.020E-02,6.010E-02,&
6.000E-02,5.990E-02,5.980E-02,5.970E-02,5.960E-02,5.950E-02,5.940E-02,5.930E-02,5.920E-02,5.910E-02,&
5.900E-02,5.830E-02,5.760E-02,5.690E-02,5.620E-02,5.550E-02,5.480E-02,5.410E-02,5.340E-02,5.270E-02,&
5.200E-02,5.130E-02,5.060E-02,4.990E-02,4.920E-02,4.850E-02,4.780E-02,4.710E-02,4.640E-02,4.570E-02,&
4.500E-02,4.535E-02,4.570E-02,4.605E-02,4.640E-02,4.675E-02,4.710E-02,4.745E-02,4.780E-02,4.815E-02,&
4.850E-02,4.885E-02,4.920E-02,4.955E-02,4.990E-02,5.025E-02,5.060E-02,5.095E-02,5.130E-02,5.165E-02,&
5.200E-02,5.085E-02,4.970E-02,4.855E-02,4.740E-02,4.625E-02,4.510E-02,4.395E-02,4.280E-02,4.165E-02,&
4.050E-02,3.935E-02,3.820E-02,3.705E-02,3.590E-02,3.475E-02,3.360E-02,3.245E-02,3.130E-02,3.015E-02,&
2.900E-02,2.790E-02,2.680E-02,2.570E-02,2.460E-02,2.350E-02,2.240E-02,2.130E-02,2.020E-02,1.910E-02,&
1.800E-02,1.690E-02,1.580E-02,1.470E-02,1.360E-02,1.250E-02,1.140E-02,1.030E-02,9.200E-03,8.100E-03/
DATA (Ed(i),i=1401,1500)/&
7.000E-03,6.700E-03,6.400E-03,6.100E-03,5.800E-03,5.500E-03,5.200E-03,4.900E-03,4.600E-03,4.300E-03,&
4.000E-03,3.700E-03,3.400E-03,3.100E-03,2.800E-03,2.500E-03,2.200E-03,1.900E-03,1.600E-03,1.300E-03,&
1.000E-03,1.000E-03,1.000E-03,1.000E-03,1.000E-03,1.000E-03,1.000E-03,1.000E-03,1.000E-03,1.000E-03,&
1.000E-03,1.000E-03,1.000E-03,1.000E-03,1.000E-03,1.000E-03,1.000E-03,1.000E-03,1.000E-03,1.000E-03,&
1.000E-03,9.500E-04,9.000E-04,8.500E-04,8.000E-04,7.500E-04,7.000E-04,6.500E-04,6.000E-04,5.500E-04,&
5.000E-04,4.500E-04,4.000E-04,3.500E-04,3.000E-04,2.500E-04,2.000E-04,1.500E-04,1.000E-04,5.000E-05,&
0.335E-17,2.000E-04,4.000E-04,6.000E-04,8.000E-04,1.000E-03,1.200E-03,1.400E-03,1.600E-03,1.800E-03,&
2.000E-03,2.200E-03,2.400E-03,2.600E-03,2.800E-03,3.000E-03,3.200E-03,3.400E-03,3.600E-03,3.800E-03,&
4.000E-03,3.800E-03,3.600E-03,3.400E-03,3.200E-03,3.000E-03,2.800E-03,2.600E-03,2.400E-03,2.200E-03,&
2.000E-03,1.800E-03,1.600E-03,1.400E-03,1.200E-03,1.000E-03,8.000E-04,6.000E-04,4.000E-04,2.000E-04/
DATA (Ed(i),i=1501,1600)/&
0.000E+00,0.000E+00,0.000E+00,0.000E+00,0.000E+00,0.000E+00,0.000E+00,0.000E+00,0.000E+00,0.000E+00,&
0.000E+00,0.000E+00,0.000E+00,0.000E+00,0.000E+00,0.000E+00,0.000E+00,0.000E+00,0.000E+00,0.000E+00,&
0.000E+00,5.000E-05,1.000E-04,1.500E-04,2.000E-04,2.500E-04,3.000E-04,3.500E-04,4.000E-04,4.500E-04,&
5.000E-04,5.500E-04,6.000E-04,6.500E-04,7.000E-04,7.500E-04,8.000E-04,8.500E-04,9.000E-04,9.500E-04,&
1.000E-03,1.200E-03,1.400E-03,1.600E-03,1.800E-03,2.000E-03,2.200E-03,2.400E-03,2.600E-03,2.800E-03,&
3.000E-03,3.200E-03,3.400E-03,3.600E-03,3.800E-03,4.000E-03,4.200E-03,4.400E-03,4.600E-03,4.800E-03,&
5.000E-03,5.650E-03,6.300E-03,6.950E-03,7.600E-03,8.250E-03,8.900E-03,9.550E-03,1.020E-02,1.085E-02,&
1.150E-02,1.215E-02,1.280E-02,1.345E-02,1.410E-02,1.475E-02,1.540E-02,1.605E-02,1.670E-02,1.735E-02,&
1.800E-02,1.785E-02,1.770E-02,1.755E-02,1.740E-02,1.725E-02,1.710E-02,1.695E-02,1.680E-02,1.665E-02,&
1.650E-02,1.635E-02,1.620E-02,1.605E-02,1.590E-02,1.575E-02,1.560E-02,1.545E-02,1.530E-02,1.515E-02/
DATA (Ed(i),i=1601,1700)/&
1.500E-02,1.465E-02,1.430E-02,1.395E-02,1.360E-02,1.325E-02,1.290E-02,1.255E-02,1.220E-02,1.185E-02,&
1.150E-02,1.115E-02,1.080E-02,1.045E-02,1.010E-02,9.750E-03,9.400E-03,9.050E-03,8.700E-03,8.350E-03,&
8.000E-03,8.800E-03,9.600E-03,1.040E-02,1.120E-02,1.200E-02,1.280E-02,1.360E-02,1.440E-02,1.520E-02,&
1.600E-02,1.680E-02,1.760E-02,1.840E-02,1.920E-02,2.000E-02,2.080E-02,2.160E-02,2.240E-02,2.320E-02,&
2.400E-02,2.370E-02,2.340E-02,2.310E-02,2.280E-02,2.250E-02,2.220E-02,2.190E-02,2.160E-02,2.130E-02,&
2.100E-02,2.070E-02,2.040E-02,2.010E-02,1.980E-02,1.950E-02,1.920E-02,1.890E-02,1.860E-02,1.830E-02,&
1.800E-02,1.825E-02,1.850E-02,1.875E-02,1.900E-02,1.925E-02,1.950E-02,1.975E-02,2.000E-02,2.025E-02,&
2.050E-02,2.075E-02,2.100E-02,2.125E-02,2.150E-02,2.175E-02,2.200E-02,2.225E-02,2.250E-02,2.275E-02,&
2.300E-02,2.290E-02,2.280E-02,2.270E-02,2.260E-02,2.250E-02,2.240E-02,2.230E-02,2.220E-02,2.210E-02,&
2.200E-02,2.190E-02,2.180E-02,2.170E-02,2.160E-02,2.150E-02,2.140E-02,2.130E-02,2.120E-02,2.110E-02/
DATA (Ed(i),i=1701,1800)/&
2.100E-02,2.105E-02,2.110E-02,2.115E-02,2.120E-02,2.125E-02,2.130E-02,2.135E-02,2.140E-02,2.145E-02,&
2.150E-02,2.155E-02,2.160E-02,2.165E-02,2.170E-02,2.175E-02,2.180E-02,2.185E-02,2.190E-02,2.195E-02,&
2.200E-02,2.200E-02,2.200E-02,2.200E-02,2.200E-02,2.200E-02,2.200E-02,2.200E-02,2.200E-02,2.200E-02,&
2.200E-02,2.200E-02,2.200E-02,2.200E-02,2.200E-02,2.200E-02,2.200E-02,2.200E-02,2.200E-02,2.200E-02,&
2.200E-02,2.185E-02,2.170E-02,2.155E-02,2.140E-02,2.125E-02,2.110E-02,2.095E-02,2.080E-02,2.065E-02,&
2.050E-02,2.035E-02,2.020E-02,2.005E-02,1.990E-02,1.975E-02,1.960E-02,1.945E-02,1.930E-02,1.915E-02,&
1.900E-02,1.895E-02,1.890E-02,1.885E-02,1.880E-02,1.875E-02,1.870E-02,1.865E-02,1.860E-02,1.855E-02,&
1.850E-02,1.845E-02,1.840E-02,1.835E-02,1.830E-02,1.825E-02,1.820E-02,1.815E-02,1.810E-02,1.805E-02,&
1.800E-02,1.790E-02,1.780E-02,1.770E-02,1.760E-02,1.750E-02,1.740E-02,1.730E-02,1.720E-02,1.710E-02,&
1.700E-02,1.690E-02,1.680E-02,1.670E-02,1.660E-02,1.650E-02,1.640E-02,1.630E-02,1.620E-02,1.610E-02/
DATA (Ed(i),i=1801,1900)/&
1.600E-02,1.610E-02,1.620E-02,1.630E-02,1.640E-02,1.650E-02,1.660E-02,1.670E-02,1.680E-02,1.690E-02,&
1.700E-02,1.710E-02,1.720E-02,1.730E-02,1.740E-02,1.750E-02,1.760E-02,1.770E-02,1.780E-02,1.790E-02,&
1.800E-02,1.795E-02,1.790E-02,1.785E-02,1.780E-02,1.775E-02,1.770E-02,1.765E-02,1.760E-02,1.755E-02,&
1.750E-02,1.745E-02,1.740E-02,1.735E-02,1.730E-02,1.725E-02,1.720E-02,1.715E-02,1.710E-02,1.705E-02,&
1.700E-02,1.695E-02,1.690E-02,1.685E-02,1.680E-02,1.675E-02,1.670E-02,1.665E-02,1.660E-02,1.655E-02,&
1.650E-02,1.645E-02,1.640E-02,1.635E-02,1.630E-02,1.625E-02,1.620E-02,1.615E-02,1.610E-02,1.605E-02,&
1.600E-02,1.595E-02,1.590E-02,1.585E-02,1.580E-02,1.575E-02,1.570E-02,1.565E-02,1.560E-02,1.555E-02,&
1.550E-02,1.545E-02,1.540E-02,1.535E-02,1.530E-02,1.525E-02,1.520E-02,1.515E-02,1.510E-02,1.505E-02,&
1.500E-02,1.495E-02,1.490E-02,1.485E-02,1.480E-02,1.475E-02,1.470E-02,1.465E-02,1.460E-02,1.455E-02,&
1.450E-02,1.445E-02,1.440E-02,1.435E-02,1.430E-02,1.425E-02,1.420E-02,1.415E-02,1.410E-02,1.405E-02/
DATA (Ed(i),i=1901,2000)/&
1.400E-02,1.385E-02,1.370E-02,1.355E-02,1.340E-02,1.325E-02,1.310E-02,1.295E-02,1.280E-02,1.265E-02,&
1.250E-02,1.235E-02,1.220E-02,1.205E-02,1.190E-02,1.175E-02,1.160E-02,1.145E-02,1.130E-02,1.115E-02,&
1.100E-02,1.100E-02,1.100E-02,1.100E-02,1.100E-02,1.100E-02,1.100E-02,1.100E-02,1.100E-02,1.100E-02,&
1.100E-02,1.100E-02,1.100E-02,1.100E-02,1.100E-02,1.100E-02,1.100E-02,1.100E-02,1.100E-02,1.100E-02,&
1.100E-02,1.100E-02,1.100E-02,1.100E-02,1.100E-02,1.100E-02,1.100E-02,1.100E-02,1.100E-02,1.100E-02,&
1.100E-02,1.100E-02,1.100E-02,1.100E-02,1.100E-02,1.100E-02,1.100E-02,1.100E-02,1.100E-02,1.100E-02,&
1.100E-02,1.090E-02,1.080E-02,1.070E-02,1.060E-02,1.050E-02,1.040E-02,1.030E-02,1.020E-02,1.010E-02,&
1.000E-02,9.900E-03,9.800E-03,9.700E-03,9.600E-03,9.500E-03,9.400E-03,9.300E-03,9.200E-03,9.100E-03,&
9.000E-03,9.100E-03,9.200E-03,9.300E-03,9.400E-03,9.500E-03,9.600E-03,9.700E-03,9.800E-03,9.900E-03,&
1.000E-02,1.010E-02,1.020E-02,1.030E-02,1.040E-02,1.050E-02,1.060E-02,1.070E-02,1.080E-02,1.090E-02/
DATA (Ed(i),i=2001,2101)/&
1.100E-02,1.070E-02,1.040E-02,1.010E-02,9.800E-03,9.500E-03,9.200E-03,8.900E-03,8.600E-03,8.300E-03,&
8.000E-03,7.700E-03,7.400E-03,7.100E-03,6.800E-03,6.500E-03,6.200E-03,5.900E-03,5.600E-03,5.300E-03,&
5.000E-03,5.250E-03,5.500E-03,5.750E-03,6.000E-03,6.250E-03,6.500E-03,6.750E-03,7.000E-03,7.250E-03,&
7.500E-03,7.750E-03,8.000E-03,8.250E-03,8.500E-03,8.750E-03,9.000E-03,9.250E-03,9.500E-03,9.750E-03,&
1.000E-02,9.800E-03,9.600E-03,9.400E-03,9.200E-03,9.000E-03,8.800E-03,8.600E-03,8.400E-03,8.200E-03,&
8.000E-03,7.800E-03,7.600E-03,7.400E-03,7.200E-03,7.000E-03,6.800E-03,6.600E-03,6.400E-03,6.200E-03,&
6.000E-03,5.950E-03,5.900E-03,5.850E-03,5.800E-03,5.750E-03,5.700E-03,5.650E-03,5.600E-03,5.550E-03,&
5.500E-03,5.450E-03,5.400E-03,5.350E-03,5.300E-03,5.250E-03,5.200E-03,5.150E-03,5.100E-03,5.050E-03,&
5.000E-03,4.850E-03,4.700E-03,4.550E-03,4.400E-03,4.250E-03,4.100E-03,3.950E-03,3.800E-03,3.650E-03,&
3.500E-03,3.350E-03,3.200E-03,3.050E-03,2.900E-03,2.750E-03,2.600E-03,2.450E-03,2.300E-03,2.150E-03,&
2.000E-03/
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
!!!!! soil 1 reflectance !!!!!
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
DATA (Rsoil1(i),i=1,100)/&
2.377E-01,2.373E-01,2.369E-01,2.365E-01,2.360E-01,2.356E-01,2.352E-01,2.348E-01,2.344E-01,2.340E-01,&
2.336E-01,2.332E-01,2.328E-01,2.324E-01,2.320E-01,2.316E-01,2.312E-01,2.308E-01,2.304E-01,2.299E-01,&
2.295E-01,2.290E-01,2.284E-01,2.278E-01,2.272E-01,2.267E-01,2.263E-01,2.259E-01,2.254E-01,2.250E-01,&
2.246E-01,2.243E-01,2.240E-01,2.237E-01,2.234E-01,2.231E-01,2.228E-01,2.224E-01,2.221E-01,2.218E-01,&
2.215E-01,2.215E-01,2.215E-01,2.215E-01,2.215E-01,2.214E-01,2.215E-01,2.215E-01,2.216E-01,2.217E-01,&
2.217E-01,2.218E-01,2.219E-01,2.219E-01,2.220E-01,2.221E-01,2.222E-01,2.224E-01,2.225E-01,2.227E-01,&
2.228E-01,2.230E-01,2.231E-01,2.233E-01,2.234E-01,2.236E-01,2.238E-01,2.240E-01,2.241E-01,2.243E-01,&
2.245E-01,2.247E-01,2.249E-01,2.251E-01,2.253E-01,2.256E-01,2.258E-01,2.260E-01,2.262E-01,2.264E-01,&
2.266E-01,2.269E-01,2.271E-01,2.273E-01,2.276E-01,2.278E-01,2.280E-01,2.283E-01,2.285E-01,2.287E-01,&
2.289E-01,2.293E-01,2.297E-01,2.302E-01,2.306E-01,2.310E-01,2.314E-01,2.319E-01,2.324E-01,2.329E-01/
DATA (Rsoil1(i),i=101,200)/&
2.333E-01,2.340E-01,2.346E-01,2.353E-01,2.359E-01,2.366E-01,2.371E-01,2.376E-01,2.381E-01,2.387E-01,&
2.392E-01,2.396E-01,2.400E-01,2.405E-01,2.409E-01,2.413E-01,2.418E-01,2.422E-01,2.427E-01,2.431E-01,&
2.436E-01,2.441E-01,2.446E-01,2.452E-01,2.457E-01,2.462E-01,2.467E-01,2.472E-01,2.477E-01,2.482E-01,&
2.487E-01,2.492E-01,2.497E-01,2.502E-01,2.507E-01,2.512E-01,2.517E-01,2.522E-01,2.527E-01,2.532E-01,&
2.537E-01,2.542E-01,2.546E-01,2.551E-01,2.556E-01,2.561E-01,2.566E-01,2.571E-01,2.576E-01,2.582E-01,&
2.587E-01,2.592E-01,2.598E-01,2.603E-01,2.609E-01,2.614E-01,2.620E-01,2.626E-01,2.631E-01,2.637E-01,&
2.642E-01,2.647E-01,2.651E-01,2.655E-01,2.659E-01,2.664E-01,2.667E-01,2.671E-01,2.675E-01,2.679E-01,&
2.683E-01,2.686E-01,2.690E-01,2.694E-01,2.698E-01,2.702E-01,2.707E-01,2.712E-01,2.717E-01,2.722E-01,&
2.727E-01,2.732E-01,2.737E-01,2.742E-01,2.747E-01,2.752E-01,2.757E-01,2.762E-01,2.767E-01,2.772E-01,&
2.777E-01,2.782E-01,2.787E-01,2.792E-01,2.797E-01,2.802E-01,2.807E-01,2.812E-01,2.818E-01,2.823E-01/
DATA (Rsoil1(i),i=201,300)/&
2.828E-01,2.833E-01,2.838E-01,2.844E-01,2.849E-01,2.855E-01,2.861E-01,2.866E-01,2.872E-01,2.878E-01,&
2.884E-01,2.890E-01,2.896E-01,2.902E-01,2.908E-01,2.913E-01,2.919E-01,2.924E-01,2.929E-01,2.934E-01,&
2.939E-01,2.944E-01,2.950E-01,2.955E-01,2.960E-01,2.965E-01,2.970E-01,2.975E-01,2.980E-01,2.985E-01,&
2.990E-01,2.995E-01,2.999E-01,3.004E-01,3.008E-01,3.013E-01,3.017E-01,3.022E-01,3.026E-01,3.030E-01,&
3.035E-01,3.039E-01,3.044E-01,3.048E-01,3.053E-01,3.057E-01,3.062E-01,3.066E-01,3.071E-01,3.075E-01,&
3.080E-01,3.086E-01,3.092E-01,3.097E-01,3.103E-01,3.109E-01,3.117E-01,3.125E-01,3.133E-01,3.141E-01,&
3.149E-01,3.156E-01,3.162E-01,3.169E-01,3.176E-01,3.182E-01,3.188E-01,3.193E-01,3.199E-01,3.204E-01,&
3.210E-01,3.216E-01,3.221E-01,3.227E-01,3.232E-01,3.238E-01,3.244E-01,3.250E-01,3.257E-01,3.263E-01,&
3.269E-01,3.275E-01,3.281E-01,3.288E-01,3.294E-01,3.300E-01,3.304E-01,3.309E-01,3.313E-01,3.317E-01,&
3.322E-01,3.325E-01,3.328E-01,3.332E-01,3.335E-01,3.338E-01,3.341E-01,3.345E-01,3.348E-01,3.351E-01/
DATA (Rsoil1(i),i=301,400)/&
3.355E-01,3.361E-01,3.367E-01,3.373E-01,3.379E-01,3.385E-01,3.392E-01,3.398E-01,3.405E-01,3.412E-01,&
3.418E-01,3.424E-01,3.429E-01,3.434E-01,3.439E-01,3.444E-01,3.449E-01,3.454E-01,3.459E-01,3.464E-01,&
3.469E-01,3.476E-01,3.483E-01,3.490E-01,3.497E-01,3.504E-01,3.512E-01,3.520E-01,3.529E-01,3.537E-01,&
3.545E-01,3.549E-01,3.553E-01,3.557E-01,3.560E-01,3.564E-01,3.568E-01,3.572E-01,3.575E-01,3.579E-01,&
3.583E-01,3.587E-01,3.591E-01,3.595E-01,3.600E-01,3.604E-01,3.609E-01,3.615E-01,3.621E-01,3.626E-01,&
3.632E-01,3.637E-01,3.643E-01,3.649E-01,3.654E-01,3.660E-01,3.667E-01,3.674E-01,3.681E-01,3.688E-01,&
3.695E-01,3.700E-01,3.705E-01,3.710E-01,3.715E-01,3.719E-01,3.722E-01,3.725E-01,3.728E-01,3.731E-01,&
3.733E-01,3.736E-01,3.739E-01,3.742E-01,3.745E-01,3.747E-01,3.752E-01,3.758E-01,3.763E-01,3.768E-01,&
3.773E-01,3.778E-01,3.783E-01,3.789E-01,3.794E-01,3.799E-01,3.804E-01,3.808E-01,3.813E-01,3.817E-01,&
3.822E-01,3.825E-01,3.829E-01,3.832E-01,3.836E-01,3.839E-01,3.843E-01,3.846E-01,3.850E-01,3.853E-01/
DATA (Rsoil1(i),i=401,500)/&
3.857E-01,3.862E-01,3.866E-01,3.871E-01,3.876E-01,3.881E-01,3.886E-01,3.891E-01,3.896E-01,3.901E-01,&
3.906E-01,3.911E-01,3.917E-01,3.922E-01,3.928E-01,3.933E-01,3.939E-01,3.944E-01,3.950E-01,3.955E-01,&
3.961E-01,3.966E-01,3.971E-01,3.976E-01,3.981E-01,3.987E-01,3.991E-01,3.995E-01,3.999E-01,4.003E-01,&
4.007E-01,4.010E-01,4.013E-01,4.015E-01,4.018E-01,4.021E-01,4.023E-01,4.026E-01,4.028E-01,4.031E-01,&
4.034E-01,4.038E-01,4.043E-01,4.047E-01,4.052E-01,4.056E-01,4.061E-01,4.066E-01,4.070E-01,4.075E-01,&
4.079E-01,4.082E-01,4.085E-01,4.088E-01,4.091E-01,4.094E-01,4.097E-01,4.099E-01,4.102E-01,4.104E-01,&
4.107E-01,4.110E-01,4.113E-01,4.116E-01,4.119E-01,4.122E-01,4.128E-01,4.134E-01,4.139E-01,4.145E-01,&
4.150E-01,4.155E-01,4.160E-01,4.165E-01,4.170E-01,4.175E-01,4.178E-01,4.182E-01,4.185E-01,4.189E-01,&
4.193E-01,4.196E-01,4.200E-01,4.204E-01,4.207E-01,4.211E-01,4.214E-01,4.217E-01,4.221E-01,4.224E-01,&
4.227E-01,4.230E-01,4.232E-01,4.235E-01,4.237E-01,4.240E-01,4.242E-01,4.245E-01,4.247E-01,4.250E-01/
DATA (Rsoil1(i),i=501,600)/&
4.252E-01,4.255E-01,4.258E-01,4.261E-01,4.264E-01,4.267E-01,4.270E-01,4.274E-01,4.277E-01,4.281E-01,&
4.284E-01,4.288E-01,4.291E-01,4.295E-01,4.298E-01,4.302E-01,4.306E-01,4.311E-01,4.315E-01,4.319E-01,&
4.324E-01,4.328E-01,4.333E-01,4.337E-01,4.342E-01,4.346E-01,4.351E-01,4.355E-01,4.359E-01,4.364E-01,&
4.368E-01,4.370E-01,4.373E-01,4.376E-01,4.378E-01,4.381E-01,4.384E-01,4.386E-01,4.389E-01,4.392E-01,&
4.394E-01,4.397E-01,4.400E-01,4.403E-01,4.406E-01,4.409E-01,4.412E-01,4.415E-01,4.418E-01,4.421E-01,&
4.424E-01,4.427E-01,4.430E-01,4.433E-01,4.436E-01,4.438E-01,4.439E-01,4.440E-01,4.441E-01,4.441E-01,&
4.442E-01,4.443E-01,4.444E-01,4.445E-01,4.446E-01,4.447E-01,4.450E-01,4.453E-01,4.456E-01,4.459E-01,&
4.462E-01,4.465E-01,4.468E-01,4.471E-01,4.474E-01,4.477E-01,4.479E-01,4.482E-01,4.485E-01,4.487E-01,&
4.490E-01,4.492E-01,4.495E-01,4.497E-01,4.500E-01,4.502E-01,4.506E-01,4.509E-01,4.513E-01,4.516E-01,&
4.520E-01,4.525E-01,4.530E-01,4.535E-01,4.540E-01,4.545E-01,4.549E-01,4.553E-01,4.557E-01,4.561E-01/
DATA (Rsoil1(i),i=601,700)/&
4.565E-01,4.568E-01,4.571E-01,4.573E-01,4.576E-01,4.579E-01,4.579E-01,4.580E-01,4.580E-01,4.580E-01,&
4.581E-01,4.582E-01,4.583E-01,4.584E-01,4.585E-01,4.586E-01,4.589E-01,4.591E-01,4.594E-01,4.597E-01,&
4.599E-01,4.602E-01,4.604E-01,4.606E-01,4.608E-01,4.611E-01,4.613E-01,4.614E-01,4.616E-01,4.618E-01,&
4.620E-01,4.622E-01,4.623E-01,4.625E-01,4.627E-01,4.628E-01,4.628E-01,4.627E-01,4.627E-01,4.627E-01,&
4.626E-01,4.626E-01,4.626E-01,4.625E-01,4.625E-01,4.625E-01,4.625E-01,4.626E-01,4.626E-01,4.627E-01,&
4.628E-01,4.628E-01,4.629E-01,4.630E-01,4.631E-01,4.631E-01,4.632E-01,4.633E-01,4.634E-01,4.635E-01,&
4.636E-01,4.640E-01,4.645E-01,4.649E-01,4.653E-01,4.658E-01,4.661E-01,4.664E-01,4.667E-01,4.670E-01,&
4.673E-01,4.673E-01,4.674E-01,4.675E-01,4.676E-01,4.677E-01,4.677E-01,4.678E-01,4.679E-01,4.680E-01,&
4.681E-01,4.682E-01,4.684E-01,4.685E-01,4.687E-01,4.688E-01,4.690E-01,4.692E-01,4.694E-01,4.696E-01,&
4.698E-01,4.701E-01,4.704E-01,4.708E-01,4.711E-01,4.714E-01,4.715E-01,4.716E-01,4.716E-01,4.717E-01/
DATA (Rsoil1(i),i=701,800)/&
4.718E-01,4.717E-01,4.717E-01,4.716E-01,4.716E-01,4.715E-01,4.718E-01,4.720E-01,4.723E-01,4.725E-01,&
4.728E-01,4.731E-01,4.733E-01,4.736E-01,4.739E-01,4.742E-01,4.743E-01,4.745E-01,4.746E-01,4.748E-01,&
4.749E-01,4.749E-01,4.749E-01,4.749E-01,4.749E-01,4.749E-01,4.749E-01,4.749E-01,4.749E-01,4.749E-01,&
4.749E-01,4.751E-01,4.753E-01,4.755E-01,4.757E-01,4.759E-01,4.761E-01,4.764E-01,4.766E-01,4.768E-01,&
4.770E-01,4.773E-01,4.775E-01,4.777E-01,4.779E-01,4.782E-01,4.784E-01,4.786E-01,4.788E-01,4.791E-01,&
4.793E-01,4.793E-01,4.794E-01,4.794E-01,4.795E-01,4.796E-01,4.795E-01,4.795E-01,4.795E-01,4.795E-01,&
4.795E-01,4.799E-01,4.802E-01,4.805E-01,4.808E-01,4.811E-01,4.811E-01,4.811E-01,4.811E-01,4.811E-01,&
4.811E-01,4.813E-01,4.815E-01,4.816E-01,4.818E-01,4.820E-01,4.825E-01,4.829E-01,4.833E-01,4.838E-01,&
4.842E-01,4.845E-01,4.847E-01,4.850E-01,4.852E-01,4.855E-01,4.857E-01,4.858E-01,4.860E-01,4.862E-01,&
4.864E-01,4.863E-01,4.863E-01,4.863E-01,4.863E-01,4.863E-01,4.863E-01,4.864E-01,4.864E-01,4.864E-01/
DATA (Rsoil1(i),i=801,900)/&
4.864E-01,4.866E-01,4.868E-01,4.870E-01,4.872E-01,4.874E-01,4.875E-01,4.875E-01,4.876E-01,4.877E-01,&
4.877E-01,4.878E-01,4.878E-01,4.878E-01,4.878E-01,4.879E-01,4.881E-01,4.884E-01,4.887E-01,4.889E-01,&
4.892E-01,4.893E-01,4.893E-01,4.894E-01,4.895E-01,4.896E-01,4.897E-01,4.897E-01,4.898E-01,4.899E-01,&
4.900E-01,4.901E-01,4.902E-01,4.903E-01,4.904E-01,4.905E-01,4.907E-01,4.909E-01,4.912E-01,4.914E-01,&
4.916E-01,4.919E-01,4.921E-01,4.924E-01,4.926E-01,4.929E-01,4.929E-01,4.929E-01,4.930E-01,4.930E-01,&
4.930E-01,4.929E-01,4.928E-01,4.927E-01,4.926E-01,4.924E-01,4.926E-01,4.928E-01,4.930E-01,4.932E-01,&
4.934E-01,4.935E-01,4.936E-01,4.937E-01,4.938E-01,4.939E-01,4.939E-01,4.940E-01,4.940E-01,4.941E-01,&
4.941E-01,4.942E-01,4.943E-01,4.945E-01,4.946E-01,4.947E-01,4.948E-01,4.950E-01,4.951E-01,4.953E-01,&
4.954E-01,4.957E-01,4.959E-01,4.962E-01,4.965E-01,4.967E-01,4.969E-01,4.972E-01,4.974E-01,4.976E-01,&
4.979E-01,4.980E-01,4.982E-01,4.984E-01,4.986E-01,4.988E-01,4.987E-01,4.987E-01,4.987E-01,4.987E-01/
DATA (Rsoil1(i),i=901,1000)/&
4.987E-01,4.987E-01,4.987E-01,4.987E-01,4.987E-01,4.987E-01,4.987E-01,4.987E-01,4.987E-01,4.987E-01,&
4.987E-01,4.987E-01,4.987E-01,4.987E-01,4.987E-01,4.987E-01,4.987E-01,4.987E-01,4.987E-01,4.987E-01,&
4.987E-01,4.987E-01,4.987E-01,4.987E-01,4.987E-01,4.987E-01,4.986E-01,4.986E-01,4.986E-01,4.986E-01,&
4.986E-01,4.987E-01,4.987E-01,4.987E-01,4.987E-01,4.987E-01,4.989E-01,4.990E-01,4.991E-01,4.993E-01,&
4.994E-01,4.996E-01,4.997E-01,4.999E-01,5.000E-01,5.002E-01,5.002E-01,5.003E-01,5.003E-01,5.003E-01,&
5.004E-01,5.004E-01,5.004E-01,5.004E-01,5.004E-01,5.004E-01,5.002E-01,5.000E-01,4.998E-01,4.996E-01,&
4.995E-01,4.993E-01,4.992E-01,4.991E-01,4.990E-01,4.989E-01,4.989E-01,4.989E-01,4.989E-01,4.989E-01,&
4.989E-01,4.989E-01,4.989E-01,4.989E-01,4.989E-01,4.989E-01,4.988E-01,4.988E-01,4.988E-01,4.988E-01,&
4.988E-01,4.979E-01,4.970E-01,4.962E-01,4.953E-01,4.944E-01,4.940E-01,4.937E-01,4.933E-01,4.930E-01,&
4.926E-01,4.926E-01,4.926E-01,4.926E-01,4.926E-01,4.926E-01,4.926E-01,4.926E-01,4.926E-01,4.926E-01/
DATA (Rsoil1(i),i=1001,1100)/&
4.926E-01,4.926E-01,4.926E-01,4.926E-01,4.926E-01,4.926E-01,4.926E-01,4.926E-01,4.926E-01,4.926E-01,&
4.925E-01,4.933E-01,4.940E-01,4.947E-01,4.955E-01,4.962E-01,4.968E-01,4.974E-01,4.980E-01,4.986E-01,&
4.992E-01,4.994E-01,4.997E-01,4.999E-01,5.001E-01,5.003E-01,5.004E-01,5.005E-01,5.006E-01,5.006E-01,&
5.007E-01,5.007E-01,5.008E-01,5.008E-01,5.008E-01,5.008E-01,5.008E-01,5.008E-01,5.008E-01,5.008E-01,&
5.008E-01,5.008E-01,5.008E-01,5.008E-01,5.008E-01,5.008E-01,5.007E-01,5.006E-01,5.006E-01,5.005E-01,&
5.004E-01,5.004E-01,5.005E-01,5.005E-01,5.006E-01,5.006E-01,5.008E-01,5.011E-01,5.014E-01,5.016E-01,&
5.019E-01,5.020E-01,5.022E-01,5.023E-01,5.025E-01,5.026E-01,5.027E-01,5.028E-01,5.029E-01,5.031E-01,&
5.032E-01,5.032E-01,5.033E-01,5.033E-01,5.034E-01,5.034E-01,5.032E-01,5.030E-01,5.028E-01,5.026E-01,&
5.024E-01,5.022E-01,5.020E-01,5.019E-01,5.017E-01,5.015E-01,5.017E-01,5.019E-01,5.021E-01,5.023E-01,&
5.025E-01,5.027E-01,5.029E-01,5.031E-01,5.033E-01,5.035E-01,5.036E-01,5.037E-01,5.038E-01,5.040E-01/
DATA (Rsoil1(i),i=1101,1200)/&
5.041E-01,5.039E-01,5.037E-01,5.036E-01,5.034E-01,5.033E-01,5.035E-01,5.036E-01,5.038E-01,5.040E-01,&
5.042E-01,5.044E-01,5.047E-01,5.049E-01,5.051E-01,5.054E-01,5.057E-01,5.061E-01,5.065E-01,5.068E-01,&
5.072E-01,5.072E-01,5.073E-01,5.073E-01,5.073E-01,5.074E-01,5.072E-01,5.071E-01,5.069E-01,5.068E-01,&
5.067E-01,5.067E-01,5.067E-01,5.067E-01,5.067E-01,5.067E-01,5.068E-01,5.070E-01,5.071E-01,5.072E-01,&
5.074E-01,5.075E-01,5.076E-01,5.078E-01,5.079E-01,5.080E-01,5.080E-01,5.079E-01,5.078E-01,5.077E-01,&
5.077E-01,5.075E-01,5.074E-01,5.073E-01,5.072E-01,5.071E-01,5.072E-01,5.074E-01,5.075E-01,5.077E-01,&
5.078E-01,5.079E-01,5.080E-01,5.082E-01,5.083E-01,5.084E-01,5.084E-01,5.084E-01,5.084E-01,5.084E-01,&
5.084E-01,5.084E-01,5.084E-01,5.084E-01,5.084E-01,5.084E-01,5.084E-01,5.084E-01,5.085E-01,5.085E-01,&
5.085E-01,5.087E-01,5.089E-01,5.090E-01,5.092E-01,5.094E-01,5.096E-01,5.097E-01,5.099E-01,5.101E-01,&
5.103E-01,5.103E-01,5.104E-01,5.104E-01,5.105E-01,5.106E-01,5.103E-01,5.101E-01,5.099E-01,5.097E-01/
DATA (Rsoil1(i),i=1201,1300)/&
5.095E-01,5.093E-01,5.091E-01,5.089E-01,5.087E-01,5.085E-01,5.086E-01,5.087E-01,5.088E-01,5.089E-01,&
5.090E-01,5.091E-01,5.092E-01,5.093E-01,5.095E-01,5.096E-01,5.095E-01,5.093E-01,5.092E-01,5.091E-01,&
5.090E-01,5.089E-01,5.087E-01,5.086E-01,5.085E-01,5.084E-01,5.083E-01,5.083E-01,5.083E-01,5.082E-01,&
5.082E-01,5.082E-01,5.082E-01,5.082E-01,5.082E-01,5.082E-01,5.082E-01,5.082E-01,5.082E-01,5.082E-01,&
5.082E-01,5.082E-01,5.082E-01,5.082E-01,5.082E-01,5.082E-01,5.085E-01,5.089E-01,5.092E-01,5.096E-01,&
5.099E-01,5.101E-01,5.104E-01,5.106E-01,5.109E-01,5.111E-01,5.109E-01,5.107E-01,5.106E-01,5.104E-01,&
5.102E-01,5.100E-01,5.098E-01,5.097E-01,5.095E-01,5.093E-01,5.091E-01,5.090E-01,5.088E-01,5.086E-01,&
5.084E-01,5.083E-01,5.083E-01,5.082E-01,5.081E-01,5.081E-01,5.081E-01,5.081E-01,5.080E-01,5.080E-01,&
5.080E-01,5.080E-01,5.080E-01,5.080E-01,5.080E-01,5.080E-01,5.080E-01,5.080E-01,5.080E-01,5.080E-01,&
5.080E-01,5.082E-01,5.084E-01,5.086E-01,5.088E-01,5.090E-01,5.093E-01,5.095E-01,5.098E-01,5.101E-01/
DATA (Rsoil1(i),i=1301,1400)/&
5.103E-01,5.106E-01,5.108E-01,5.111E-01,5.114E-01,5.116E-01,5.117E-01,5.118E-01,5.119E-01,5.120E-01,&
5.121E-01,5.122E-01,5.122E-01,5.122E-01,5.123E-01,5.123E-01,5.120E-01,5.116E-01,5.113E-01,5.110E-01,&
5.107E-01,5.104E-01,5.102E-01,5.100E-01,5.098E-01,5.095E-01,5.096E-01,5.096E-01,5.097E-01,5.097E-01,&
5.098E-01,5.095E-01,5.092E-01,5.089E-01,5.086E-01,5.083E-01,5.085E-01,5.086E-01,5.088E-01,5.090E-01,&
5.091E-01,5.092E-01,5.093E-01,5.094E-01,5.095E-01,5.096E-01,5.096E-01,5.096E-01,5.096E-01,5.096E-01,&
5.096E-01,5.097E-01,5.098E-01,5.099E-01,5.101E-01,5.102E-01,5.102E-01,5.103E-01,5.103E-01,5.104E-01,&
5.105E-01,5.102E-01,5.100E-01,5.098E-01,5.096E-01,5.094E-01,5.092E-01,5.090E-01,5.088E-01,5.086E-01,&
5.084E-01,5.082E-01,5.081E-01,5.079E-01,5.078E-01,5.076E-01,5.078E-01,5.079E-01,5.081E-01,5.082E-01,&
5.084E-01,5.086E-01,5.088E-01,5.091E-01,5.093E-01,5.095E-01,5.095E-01,5.095E-01,5.095E-01,5.095E-01,&
5.095E-01,5.095E-01,5.095E-01,5.095E-01,5.095E-01,5.095E-01,5.095E-01,5.095E-01,5.095E-01,5.095E-01/
DATA (Rsoil1(i),i=1401,1500)/&
5.095E-01,5.095E-01,5.095E-01,5.095E-01,5.095E-01,5.094E-01,5.094E-01,5.094E-01,5.094E-01,5.094E-01,&
5.094E-01,5.090E-01,5.086E-01,5.082E-01,5.078E-01,5.074E-01,5.073E-01,5.072E-01,5.071E-01,5.070E-01,&
5.069E-01,5.071E-01,5.074E-01,5.076E-01,5.079E-01,5.082E-01,5.081E-01,5.081E-01,5.080E-01,5.080E-01,&
5.079E-01,5.076E-01,5.073E-01,5.070E-01,5.067E-01,5.064E-01,5.066E-01,5.069E-01,5.071E-01,5.073E-01,&
5.076E-01,5.079E-01,5.083E-01,5.087E-01,5.091E-01,5.095E-01,5.097E-01,5.100E-01,5.103E-01,5.105E-01,&
5.108E-01,5.110E-01,5.112E-01,5.115E-01,5.117E-01,5.119E-01,5.122E-01,5.125E-01,5.128E-01,5.131E-01,&
5.134E-01,5.138E-01,5.142E-01,5.147E-01,5.151E-01,5.155E-01,5.153E-01,5.152E-01,5.150E-01,5.148E-01,&
5.146E-01,5.142E-01,5.137E-01,5.133E-01,5.128E-01,5.124E-01,5.102E-01,5.081E-01,5.059E-01,5.037E-01,&
5.016E-01,5.016E-01,5.016E-01,5.015E-01,5.015E-01,5.015E-01,5.019E-01,5.023E-01,5.026E-01,5.030E-01,&
5.034E-01,5.038E-01,5.042E-01,5.046E-01,5.050E-01,5.055E-01,5.061E-01,5.067E-01,5.073E-01,5.080E-01/
DATA (Rsoil1(i),i=1501,1600)/&
5.086E-01,5.086E-01,5.086E-01,5.085E-01,5.085E-01,5.085E-01,5.081E-01,5.076E-01,5.072E-01,5.068E-01,&
5.064E-01,5.059E-01,5.055E-01,5.051E-01,5.047E-01,5.042E-01,5.038E-01,5.034E-01,5.030E-01,5.027E-01,&
5.023E-01,5.019E-01,5.015E-01,5.011E-01,5.007E-01,5.003E-01,5.003E-01,5.003E-01,5.003E-01,5.003E-01,&
5.003E-01,4.992E-01,4.980E-01,4.969E-01,4.958E-01,4.946E-01,4.943E-01,4.939E-01,4.935E-01,4.931E-01,&
4.927E-01,4.924E-01,4.920E-01,4.916E-01,4.912E-01,4.908E-01,4.907E-01,4.905E-01,4.904E-01,4.902E-01,&
4.901E-01,4.902E-01,4.903E-01,4.904E-01,4.905E-01,4.906E-01,4.905E-01,4.904E-01,4.902E-01,4.901E-01,&
4.900E-01,4.900E-01,4.900E-01,4.900E-01,4.900E-01,4.900E-01,4.902E-01,4.904E-01,4.906E-01,4.907E-01,&
4.909E-01,4.911E-01,4.913E-01,4.914E-01,4.916E-01,4.918E-01,4.930E-01,4.941E-01,4.953E-01,4.964E-01,&
4.976E-01,4.976E-01,4.976E-01,4.976E-01,4.976E-01,4.977E-01,4.977E-01,4.977E-01,4.977E-01,4.977E-01,&
4.977E-01,4.977E-01,4.978E-01,4.978E-01,4.978E-01,4.978E-01,4.978E-01,4.978E-01,4.978E-01,4.978E-01/
DATA (Rsoil1(i),i=1601,1700)/&
4.979E-01,4.981E-01,4.983E-01,4.985E-01,4.986E-01,4.988E-01,4.991E-01,4.994E-01,4.996E-01,4.999E-01,&
5.002E-01,5.004E-01,5.006E-01,5.009E-01,5.011E-01,5.013E-01,5.016E-01,5.018E-01,5.021E-01,5.023E-01,&
5.025E-01,5.028E-01,5.030E-01,5.033E-01,5.035E-01,5.037E-01,5.038E-01,5.038E-01,5.039E-01,5.039E-01,&
5.040E-01,5.040E-01,5.040E-01,5.040E-01,5.040E-01,5.040E-01,5.044E-01,5.049E-01,5.053E-01,5.058E-01,&
5.062E-01,5.065E-01,5.068E-01,5.070E-01,5.073E-01,5.076E-01,5.076E-01,5.076E-01,5.076E-01,5.076E-01,&
5.076E-01,5.077E-01,5.078E-01,5.079E-01,5.080E-01,5.081E-01,5.085E-01,5.089E-01,5.093E-01,5.097E-01,&
5.101E-01,5.100E-01,5.100E-01,5.099E-01,5.098E-01,5.098E-01,5.095E-01,5.092E-01,5.089E-01,5.086E-01,&
5.083E-01,5.080E-01,5.078E-01,5.076E-01,5.073E-01,5.071E-01,5.071E-01,5.071E-01,5.071E-01,5.071E-01,&
5.071E-01,5.071E-01,5.071E-01,5.070E-01,5.070E-01,5.070E-01,5.070E-01,5.069E-01,5.069E-01,5.068E-01,&
5.068E-01,5.067E-01,5.067E-01,5.066E-01,5.066E-01,5.065E-01,5.064E-01,5.062E-01,5.061E-01,5.059E-01/
DATA (Rsoil1(i),i=1701,1800)/&
5.058E-01,5.054E-01,5.050E-01,5.047E-01,5.043E-01,5.039E-01,5.043E-01,5.047E-01,5.050E-01,5.054E-01,&
5.058E-01,5.056E-01,5.055E-01,5.054E-01,5.052E-01,5.051E-01,5.050E-01,5.048E-01,5.047E-01,5.046E-01,&
5.044E-01,5.047E-01,5.051E-01,5.054E-01,5.057E-01,5.060E-01,5.055E-01,5.050E-01,5.045E-01,5.040E-01,&
5.035E-01,5.039E-01,5.043E-01,5.047E-01,5.051E-01,5.055E-01,5.053E-01,5.051E-01,5.048E-01,5.046E-01,&
5.044E-01,5.041E-01,5.037E-01,5.034E-01,5.031E-01,5.028E-01,5.024E-01,5.021E-01,5.018E-01,5.015E-01,&
5.011E-01,5.009E-01,5.007E-01,5.005E-01,5.003E-01,5.000E-01,4.998E-01,4.996E-01,4.994E-01,4.992E-01,&
4.990E-01,4.988E-01,4.986E-01,4.984E-01,4.981E-01,4.979E-01,4.978E-01,4.976E-01,4.974E-01,4.972E-01,&
4.970E-01,4.972E-01,4.974E-01,4.975E-01,4.977E-01,4.979E-01,4.979E-01,4.979E-01,4.979E-01,4.979E-01,&
4.979E-01,4.963E-01,4.946E-01,4.929E-01,4.912E-01,4.895E-01,4.889E-01,4.883E-01,4.877E-01,4.871E-01,&
4.865E-01,4.859E-01,4.854E-01,4.848E-01,4.842E-01,4.836E-01,4.833E-01,4.830E-01,4.827E-01,4.824E-01/
DATA (Rsoil1(i),i=1801,1900)/&
4.821E-01,4.820E-01,4.819E-01,4.818E-01,4.817E-01,4.816E-01,4.816E-01,4.815E-01,4.815E-01,4.815E-01,&
4.815E-01,4.817E-01,4.818E-01,4.820E-01,4.821E-01,4.823E-01,4.827E-01,4.831E-01,4.836E-01,4.840E-01,&
4.844E-01,4.849E-01,4.853E-01,4.858E-01,4.862E-01,4.867E-01,4.873E-01,4.879E-01,4.884E-01,4.890E-01,&
4.896E-01,4.902E-01,4.908E-01,4.914E-01,4.920E-01,4.926E-01,4.924E-01,4.922E-01,4.920E-01,4.919E-01,&
4.917E-01,4.914E-01,4.911E-01,4.908E-01,4.905E-01,4.902E-01,4.902E-01,4.902E-01,4.902E-01,4.901E-01,&
4.901E-01,4.901E-01,4.900E-01,4.899E-01,4.898E-01,4.897E-01,4.895E-01,4.893E-01,4.891E-01,4.889E-01,&
4.887E-01,4.889E-01,4.892E-01,4.894E-01,4.896E-01,4.898E-01,4.900E-01,4.902E-01,4.904E-01,4.906E-01,&
4.908E-01,4.910E-01,4.912E-01,4.914E-01,4.916E-01,4.918E-01,4.920E-01,4.923E-01,4.925E-01,4.927E-01,&
4.929E-01,4.926E-01,4.924E-01,4.921E-01,4.919E-01,4.916E-01,4.910E-01,4.904E-01,4.898E-01,4.892E-01,&
4.886E-01,4.885E-01,4.884E-01,4.883E-01,4.882E-01,4.882E-01,4.879E-01,4.875E-01,4.872E-01,4.869E-01/
DATA (Rsoil1(i),i=1901,2000)/&
4.866E-01,4.863E-01,4.860E-01,4.857E-01,4.853E-01,4.850E-01,4.847E-01,4.844E-01,4.841E-01,4.838E-01,&
4.835E-01,4.832E-01,4.830E-01,4.828E-01,4.825E-01,4.823E-01,4.823E-01,4.823E-01,4.823E-01,4.823E-01,&
4.823E-01,4.823E-01,4.823E-01,4.823E-01,4.823E-01,4.823E-01,4.822E-01,4.822E-01,4.822E-01,4.822E-01,&
4.822E-01,4.817E-01,4.813E-01,4.809E-01,4.805E-01,4.801E-01,4.797E-01,4.793E-01,4.789E-01,4.785E-01,&
4.781E-01,4.778E-01,4.776E-01,4.773E-01,4.771E-01,4.768E-01,4.769E-01,4.770E-01,4.770E-01,4.771E-01,&
4.772E-01,4.773E-01,4.773E-01,4.774E-01,4.775E-01,4.775E-01,4.776E-01,4.777E-01,4.778E-01,4.779E-01,&
4.780E-01,4.778E-01,4.777E-01,4.776E-01,4.774E-01,4.773E-01,4.770E-01,4.767E-01,4.765E-01,4.762E-01,&
4.759E-01,4.756E-01,4.754E-01,4.751E-01,4.748E-01,4.745E-01,4.747E-01,4.748E-01,4.749E-01,4.750E-01,&
4.752E-01,4.752E-01,4.753E-01,4.754E-01,4.755E-01,4.755E-01,4.752E-01,4.748E-01,4.744E-01,4.740E-01,&
4.736E-01,4.736E-01,4.735E-01,4.734E-01,4.734E-01,4.733E-01,4.731E-01,4.729E-01,4.727E-01,4.725E-01/
DATA (Rsoil1(i),i=2001,2101)/&
4.723E-01,4.718E-01,4.714E-01,4.709E-01,4.704E-01,4.699E-01,4.695E-01,4.691E-01,4.686E-01,4.682E-01,&
4.678E-01,4.678E-01,4.678E-01,4.678E-01,4.678E-01,4.678E-01,4.678E-01,4.677E-01,4.677E-01,4.677E-01,&
4.677E-01,4.675E-01,4.673E-01,4.671E-01,4.668E-01,4.666E-01,4.662E-01,4.658E-01,4.653E-01,4.649E-01,&
4.645E-01,4.640E-01,4.636E-01,4.632E-01,4.627E-01,4.623E-01,4.620E-01,4.616E-01,4.613E-01,4.609E-01,&
4.606E-01,4.602E-01,4.599E-01,4.595E-01,4.592E-01,4.588E-01,4.585E-01,4.581E-01,4.578E-01,4.574E-01,&
4.571E-01,4.567E-01,4.564E-01,4.561E-01,4.557E-01,4.554E-01,4.551E-01,4.548E-01,4.545E-01,4.541E-01,&
4.538E-01,4.535E-01,4.532E-01,4.529E-01,4.526E-01,4.523E-01,4.522E-01,4.521E-01,4.521E-01,4.520E-01,&
4.519E-01,4.518E-01,4.518E-01,4.517E-01,4.516E-01,4.516E-01,4.513E-01,4.510E-01,4.507E-01,4.505E-01,&
4.502E-01,4.499E-01,4.497E-01,4.494E-01,4.491E-01,4.488E-01,4.487E-01,4.486E-01,4.485E-01,4.483E-01,&
4.482E-01,4.482E-01,4.482E-01,4.482E-01,4.482E-01,4.482E-01,4.478E-01,4.475E-01,4.471E-01,4.467E-01,&
4.464E-01/
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
!!!!! soil 2 reflectance !!!!!
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
DATA (Rsoil2(i),i=1,100)/&
3.208E-02,3.195E-02,3.181E-02,3.168E-02,3.154E-02,3.141E-02,3.125E-02,3.108E-02,3.092E-02,3.076E-02,&
3.059E-02,3.044E-02,3.028E-02,3.012E-02,2.996E-02,2.980E-02,2.969E-02,2.957E-02,2.946E-02,2.934E-02,&
2.923E-02,2.911E-02,2.900E-02,2.888E-02,2.877E-02,2.865E-02,2.854E-02,2.842E-02,2.831E-02,2.819E-02,&
2.808E-02,2.793E-02,2.779E-02,2.764E-02,2.750E-02,2.735E-02,2.720E-02,2.705E-02,2.690E-02,2.675E-02,&
2.660E-02,2.646E-02,2.632E-02,2.618E-02,2.604E-02,2.590E-02,2.578E-02,2.566E-02,2.553E-02,2.541E-02,&
2.529E-02,2.523E-02,2.518E-02,2.513E-02,2.508E-02,2.502E-02,2.502E-02,2.502E-02,2.502E-02,2.501E-02,&
2.501E-02,2.501E-02,2.501E-02,2.500E-02,2.500E-02,2.500E-02,2.500E-02,2.499E-02,2.499E-02,2.499E-02,&
2.499E-02,2.498E-02,2.498E-02,2.498E-02,2.498E-02,2.497E-02,2.483E-02,2.468E-02,2.453E-02,2.438E-02,&
2.423E-02,2.423E-02,2.423E-02,2.423E-02,2.422E-02,2.422E-02,2.422E-02,2.422E-02,2.421E-02,2.421E-02,&
2.421E-02,2.450E-02,2.479E-02,2.508E-02,2.537E-02,2.566E-02,2.565E-02,2.565E-02,2.565E-02,2.565E-02/
DATA (Rsoil2(i),i=101,200)/&
2.564E-02,2.564E-02,2.564E-02,2.564E-02,2.563E-02,2.563E-02,2.570E-02,2.577E-02,2.584E-02,2.591E-02,&
2.598E-02,2.598E-02,2.598E-02,2.598E-02,2.597E-02,2.597E-02,2.597E-02,2.597E-02,2.596E-02,2.596E-02,&
2.596E-02,2.596E-02,2.595E-02,2.595E-02,2.595E-02,2.595E-02,2.594E-02,2.594E-02,2.594E-02,2.594E-02,&
2.593E-02,2.593E-02,2.593E-02,2.593E-02,2.592E-02,2.592E-02,2.614E-02,2.635E-02,2.657E-02,2.679E-02,&
2.700E-02,2.700E-02,2.700E-02,2.700E-02,2.699E-02,2.699E-02,2.735E-02,2.772E-02,2.808E-02,2.844E-02,&
2.880E-02,2.880E-02,2.880E-02,2.879E-02,2.879E-02,2.879E-02,2.879E-02,2.878E-02,2.878E-02,2.878E-02,&
2.878E-02,2.877E-02,2.877E-02,2.877E-02,2.877E-02,2.877E-02,2.884E-02,2.891E-02,2.898E-02,2.905E-02,&
2.912E-02,2.912E-02,2.911E-02,2.911E-02,2.911E-02,2.911E-02,2.918E-02,2.926E-02,2.934E-02,2.942E-02,&
2.950E-02,2.978E-02,3.006E-02,3.034E-02,3.062E-02,3.090E-02,3.090E-02,3.090E-02,3.090E-02,3.089E-02,&
3.089E-02,3.089E-02,3.089E-02,3.088E-02,3.088E-02,3.088E-02,3.088E-02,3.087E-02,3.087E-02,3.087E-02/
DATA (Rsoil2(i),i=201,300)/&
3.087E-02,3.098E-02,3.110E-02,3.122E-02,3.133E-02,3.145E-02,3.147E-02,3.150E-02,3.152E-02,3.155E-02,&
3.157E-02,3.157E-02,3.157E-02,3.156E-02,3.156E-02,3.156E-02,3.163E-02,3.170E-02,3.177E-02,3.184E-02,&
3.191E-02,3.198E-02,3.205E-02,3.212E-02,3.219E-02,3.227E-02,3.263E-02,3.299E-02,3.335E-02,3.371E-02,&
3.408E-02,3.407E-02,3.407E-02,3.407E-02,3.407E-02,3.406E-02,3.425E-02,3.443E-02,3.461E-02,3.479E-02,&
3.498E-02,3.515E-02,3.533E-02,3.551E-02,3.568E-02,3.586E-02,3.593E-02,3.600E-02,3.607E-02,3.614E-02,&
3.621E-02,3.636E-02,3.650E-02,3.664E-02,3.679E-02,3.693E-02,3.710E-02,3.727E-02,3.743E-02,3.760E-02,&
3.777E-02,3.782E-02,3.786E-02,3.791E-02,3.795E-02,3.800E-02,3.829E-02,3.858E-02,3.887E-02,3.916E-02,&
3.945E-02,3.944E-02,3.944E-02,3.944E-02,3.944E-02,3.943E-02,3.980E-02,4.016E-02,4.052E-02,4.088E-02,&
4.124E-02,4.131E-02,4.138E-02,4.146E-02,4.153E-02,4.160E-02,4.167E-02,4.174E-02,4.181E-02,4.188E-02,&
4.195E-02,4.195E-02,4.194E-02,4.194E-02,4.194E-02,4.194E-02,4.193E-02,4.193E-02,4.193E-02,4.193E-02/
DATA (Rsoil2(i),i=301,400)/&
4.193E-02,4.214E-02,4.236E-02,4.257E-02,4.279E-02,4.301E-02,4.332E-02,4.363E-02,4.394E-02,4.426E-02,&
4.457E-02,4.462E-02,4.466E-02,4.471E-02,4.476E-02,4.480E-02,4.495E-02,4.509E-02,4.523E-02,4.538E-02,&
4.552E-02,4.566E-02,4.581E-02,4.595E-02,4.609E-02,4.624E-02,4.653E-02,4.682E-02,4.711E-02,4.739E-02,&
4.768E-02,4.805E-02,4.841E-02,4.877E-02,4.913E-02,4.949E-02,4.971E-02,4.993E-02,5.014E-02,5.036E-02,&
5.058E-02,5.073E-02,5.089E-02,5.105E-02,5.121E-02,5.136E-02,5.156E-02,5.175E-02,5.195E-02,5.214E-02,&
5.233E-02,5.253E-02,5.272E-02,5.292E-02,5.311E-02,5.330E-02,5.355E-02,5.380E-02,5.404E-02,5.429E-02,&
5.454E-02,5.461E-02,5.468E-02,5.475E-02,5.482E-02,5.489E-02,5.489E-02,5.488E-02,5.488E-02,5.488E-02,&
5.488E-02,5.490E-02,5.493E-02,5.496E-02,5.498E-02,5.501E-02,5.549E-02,5.597E-02,5.645E-02,5.692E-02,&
5.740E-02,5.753E-02,5.766E-02,5.778E-02,5.791E-02,5.804E-02,5.813E-02,5.821E-02,5.830E-02,5.839E-02,&
5.847E-02,5.884E-02,5.920E-02,5.956E-02,5.992E-02,6.028E-02,6.028E-02,6.028E-02,6.028E-02,6.027E-02/
DATA (Rsoil2(i),i=401,500)/&
6.027E-02,6.063E-02,6.100E-02,6.136E-02,6.172E-02,6.208E-02,6.223E-02,6.237E-02,6.251E-02,6.266E-02,&
6.280E-02,6.280E-02,6.279E-02,6.279E-02,6.279E-02,6.279E-02,6.300E-02,6.322E-02,6.344E-02,6.365E-02,&
6.387E-02,6.433E-02,6.479E-02,6.525E-02,6.571E-02,6.616E-02,6.643E-02,6.669E-02,6.696E-02,6.722E-02,&
6.749E-02,6.749E-02,6.748E-02,6.748E-02,6.748E-02,6.748E-02,6.747E-02,6.747E-02,6.747E-02,6.747E-02,&
6.747E-02,6.776E-02,6.806E-02,6.836E-02,6.866E-02,6.896E-02,6.938E-02,6.981E-02,7.023E-02,7.066E-02,&
7.109E-02,7.108E-02,7.108E-02,7.108E-02,7.108E-02,7.107E-02,7.107E-02,7.107E-02,7.107E-02,7.106E-02,&
7.106E-02,7.113E-02,7.119E-02,7.126E-02,7.132E-02,7.139E-02,7.197E-02,7.256E-02,7.315E-02,7.373E-02,&
7.432E-02,7.432E-02,7.431E-02,7.431E-02,7.431E-02,7.431E-02,7.445E-02,7.459E-02,7.474E-02,7.488E-02,&
7.502E-02,7.521E-02,7.539E-02,7.558E-02,7.576E-02,7.595E-02,7.614E-02,7.632E-02,7.651E-02,7.670E-02,&
7.689E-02,7.717E-02,7.745E-02,7.772E-02,7.800E-02,7.828E-02,7.848E-02,7.867E-02,7.887E-02,7.907E-02/
DATA (Rsoil2(i),i=501,600)/&
7.927E-02,7.943E-02,7.959E-02,7.975E-02,7.991E-02,8.007E-02,8.006E-02,8.006E-02,8.006E-02,8.006E-02,&
8.005E-02,8.022E-02,8.039E-02,8.056E-02,8.073E-02,8.090E-02,8.120E-02,8.151E-02,8.181E-02,8.212E-02,&
8.243E-02,8.272E-02,8.301E-02,8.330E-02,8.359E-02,8.389E-02,8.404E-02,8.419E-02,8.434E-02,8.449E-02,&
8.464E-02,8.495E-02,8.526E-02,8.557E-02,8.588E-02,8.619E-02,8.636E-02,8.653E-02,8.670E-02,8.686E-02,&
8.703E-02,8.737E-02,8.771E-02,8.804E-02,8.838E-02,8.872E-02,8.882E-02,8.893E-02,8.904E-02,8.914E-02,&
8.925E-02,8.947E-02,8.969E-02,8.991E-02,9.013E-02,9.035E-02,9.058E-02,9.081E-02,9.104E-02,9.127E-02,&
9.151E-02,9.189E-02,9.227E-02,9.265E-02,9.303E-02,9.341E-02,9.355E-02,9.369E-02,9.384E-02,9.398E-02,&
9.412E-02,9.437E-02,9.461E-02,9.485E-02,9.509E-02,9.534E-02,9.540E-02,9.546E-02,9.552E-02,9.558E-02,&
9.564E-02,9.569E-02,9.575E-02,9.580E-02,9.586E-02,9.591E-02,9.617E-02,9.643E-02,9.669E-02,9.695E-02,&
9.722E-02,9.797E-02,9.872E-02,9.947E-02,1.002E-01,1.010E-01,1.010E-01,1.010E-01,1.010E-01,1.010E-01/
DATA (Rsoil2(i),i=601,700)/&
1.010E-01,1.010E-01,1.011E-01,1.011E-01,1.012E-01,1.012E-01,1.015E-01,1.017E-01,1.019E-01,1.022E-01,&
1.024E-01,1.026E-01,1.028E-01,1.031E-01,1.033E-01,1.035E-01,1.035E-01,1.035E-01,1.035E-01,1.035E-01,&
1.035E-01,1.035E-01,1.035E-01,1.035E-01,1.035E-01,1.035E-01,1.042E-01,1.049E-01,1.056E-01,1.063E-01,&
1.070E-01,1.075E-01,1.080E-01,1.086E-01,1.091E-01,1.096E-01,1.096E-01,1.096E-01,1.096E-01,1.096E-01,&
1.096E-01,1.096E-01,1.096E-01,1.096E-01,1.096E-01,1.096E-01,1.096E-01,1.096E-01,1.096E-01,1.096E-01,&
1.096E-01,1.097E-01,1.099E-01,1.100E-01,1.102E-01,1.103E-01,1.105E-01,1.108E-01,1.110E-01,1.112E-01,&
1.114E-01,1.118E-01,1.121E-01,1.125E-01,1.129E-01,1.132E-01,1.132E-01,1.132E-01,1.132E-01,1.132E-01,&
1.132E-01,1.135E-01,1.138E-01,1.141E-01,1.144E-01,1.146E-01,1.148E-01,1.150E-01,1.152E-01,1.154E-01,&
1.156E-01,1.158E-01,1.160E-01,1.162E-01,1.163E-01,1.165E-01,1.168E-01,1.171E-01,1.174E-01,1.176E-01,&
1.179E-01,1.182E-01,1.184E-01,1.186E-01,1.188E-01,1.191E-01,1.191E-01,1.192E-01,1.193E-01,1.194E-01/
DATA (Rsoil2(i),i=701,800)/&
1.195E-01,1.196E-01,1.197E-01,1.199E-01,1.200E-01,1.202E-01,1.204E-01,1.206E-01,1.209E-01,1.211E-01,&
1.213E-01,1.215E-01,1.217E-01,1.218E-01,1.220E-01,1.221E-01,1.223E-01,1.225E-01,1.226E-01,1.228E-01,&
1.229E-01,1.231E-01,1.233E-01,1.234E-01,1.236E-01,1.238E-01,1.240E-01,1.241E-01,1.243E-01,1.245E-01,&
1.246E-01,1.248E-01,1.250E-01,1.252E-01,1.253E-01,1.255E-01,1.257E-01,1.259E-01,1.261E-01,1.263E-01,&
1.265E-01,1.267E-01,1.268E-01,1.269E-01,1.271E-01,1.272E-01,1.274E-01,1.275E-01,1.277E-01,1.278E-01,&
1.279E-01,1.281E-01,1.282E-01,1.284E-01,1.285E-01,1.287E-01,1.288E-01,1.290E-01,1.291E-01,1.293E-01,&
1.294E-01,1.296E-01,1.297E-01,1.299E-01,1.300E-01,1.302E-01,1.303E-01,1.304E-01,1.306E-01,1.307E-01,&
1.309E-01,1.310E-01,1.312E-01,1.314E-01,1.316E-01,1.318E-01,1.320E-01,1.322E-01,1.323E-01,1.325E-01,&
1.327E-01,1.329E-01,1.331E-01,1.334E-01,1.336E-01,1.338E-01,1.340E-01,1.343E-01,1.346E-01,1.348E-01,&
1.351E-01,1.352E-01,1.353E-01,1.353E-01,1.354E-01,1.355E-01,1.358E-01,1.361E-01,1.364E-01,1.367E-01/
DATA (Rsoil2(i),i=801,900)/&
1.369E-01,1.371E-01,1.373E-01,1.374E-01,1.376E-01,1.377E-01,1.383E-01,1.388E-01,1.394E-01,1.399E-01,&
1.405E-01,1.407E-01,1.409E-01,1.411E-01,1.412E-01,1.414E-01,1.415E-01,1.415E-01,1.415E-01,1.416E-01,&
1.416E-01,1.420E-01,1.423E-01,1.427E-01,1.431E-01,1.434E-01,1.438E-01,1.442E-01,1.445E-01,1.449E-01,&
1.453E-01,1.453E-01,1.452E-01,1.452E-01,1.452E-01,1.452E-01,1.453E-01,1.454E-01,1.455E-01,1.455E-01,&
1.456E-01,1.456E-01,1.456E-01,1.456E-01,1.456E-01,1.456E-01,1.458E-01,1.460E-01,1.462E-01,1.464E-01,&
1.467E-01,1.469E-01,1.471E-01,1.474E-01,1.476E-01,1.478E-01,1.479E-01,1.479E-01,1.480E-01,1.480E-01,&
1.481E-01,1.481E-01,1.481E-01,1.481E-01,1.481E-01,1.481E-01,1.481E-01,1.481E-01,1.481E-01,1.481E-01,&
1.481E-01,1.483E-01,1.485E-01,1.487E-01,1.489E-01,1.491E-01,1.492E-01,1.493E-01,1.494E-01,1.494E-01,&
1.495E-01,1.499E-01,1.502E-01,1.506E-01,1.510E-01,1.513E-01,1.513E-01,1.513E-01,1.513E-01,1.513E-01,&
1.513E-01,1.513E-01,1.513E-01,1.513E-01,1.513E-01,1.513E-01,1.517E-01,1.521E-01,1.525E-01,1.529E-01/
DATA (Rsoil2(i),i=901,1000)/&
1.533E-01,1.533E-01,1.533E-01,1.534E-01,1.534E-01,1.535E-01,1.535E-01,1.535E-01,1.534E-01,1.534E-01,&
1.534E-01,1.534E-01,1.534E-01,1.534E-01,1.534E-01,1.534E-01,1.534E-01,1.534E-01,1.534E-01,1.534E-01,&
1.534E-01,1.534E-01,1.534E-01,1.534E-01,1.534E-01,1.534E-01,1.534E-01,1.534E-01,1.534E-01,1.534E-01,&
1.534E-01,1.534E-01,1.534E-01,1.534E-01,1.534E-01,1.534E-01,1.532E-01,1.531E-01,1.529E-01,1.527E-01,&
1.526E-01,1.523E-01,1.520E-01,1.517E-01,1.515E-01,1.512E-01,1.510E-01,1.509E-01,1.507E-01,1.506E-01,&
1.504E-01,1.504E-01,1.504E-01,1.504E-01,1.504E-01,1.504E-01,1.500E-01,1.497E-01,1.493E-01,1.489E-01,&
1.486E-01,1.481E-01,1.476E-01,1.471E-01,1.466E-01,1.461E-01,1.454E-01,1.447E-01,1.440E-01,1.433E-01,&
1.425E-01,1.414E-01,1.403E-01,1.392E-01,1.381E-01,1.370E-01,1.363E-01,1.355E-01,1.348E-01,1.340E-01,&
1.332E-01,1.321E-01,1.309E-01,1.298E-01,1.287E-01,1.275E-01,1.264E-01,1.252E-01,1.240E-01,1.229E-01,&
1.217E-01,1.209E-01,1.201E-01,1.193E-01,1.185E-01,1.178E-01,1.169E-01,1.161E-01,1.152E-01,1.143E-01/
DATA (Rsoil2(i),i=1001,1100)/&
1.135E-01,1.128E-01,1.122E-01,1.116E-01,1.109E-01,1.103E-01,1.098E-01,1.093E-01,1.088E-01,1.083E-01,&
1.078E-01,1.070E-01,1.062E-01,1.054E-01,1.046E-01,1.039E-01,1.037E-01,1.036E-01,1.035E-01,1.034E-01,&
1.032E-01,1.029E-01,1.026E-01,1.024E-01,1.021E-01,1.018E-01,1.016E-01,1.015E-01,1.013E-01,1.012E-01,&
1.010E-01,1.010E-01,1.010E-01,1.010E-01,1.010E-01,1.010E-01,1.010E-01,1.010E-01,1.010E-01,1.010E-01,&
1.010E-01,1.010E-01,1.010E-01,1.010E-01,1.010E-01,1.010E-01,1.012E-01,1.014E-01,1.016E-01,1.019E-01,&
1.021E-01,1.021E-01,1.021E-01,1.021E-01,1.021E-01,1.021E-01,1.021E-01,1.021E-01,1.021E-01,1.020E-01,&
1.020E-01,1.023E-01,1.026E-01,1.029E-01,1.032E-01,1.035E-01,1.036E-01,1.037E-01,1.037E-01,1.038E-01,&
1.039E-01,1.049E-01,1.059E-01,1.069E-01,1.079E-01,1.088E-01,1.092E-01,1.096E-01,1.100E-01,1.104E-01,&
1.107E-01,1.108E-01,1.108E-01,1.108E-01,1.108E-01,1.108E-01,1.114E-01,1.120E-01,1.126E-01,1.133E-01,&
1.139E-01,1.144E-01,1.150E-01,1.156E-01,1.161E-01,1.167E-01,1.171E-01,1.175E-01,1.180E-01,1.184E-01/
DATA (Rsoil2(i),i=1101,1200)/&
1.188E-01,1.192E-01,1.196E-01,1.200E-01,1.204E-01,1.208E-01,1.212E-01,1.216E-01,1.220E-01,1.223E-01,&
1.227E-01,1.233E-01,1.239E-01,1.245E-01,1.251E-01,1.257E-01,1.262E-01,1.268E-01,1.274E-01,1.279E-01,&
1.285E-01,1.291E-01,1.298E-01,1.305E-01,1.311E-01,1.318E-01,1.321E-01,1.324E-01,1.327E-01,1.329E-01,&
1.332E-01,1.337E-01,1.342E-01,1.347E-01,1.352E-01,1.357E-01,1.360E-01,1.363E-01,1.366E-01,1.369E-01,&
1.372E-01,1.377E-01,1.381E-01,1.385E-01,1.390E-01,1.394E-01,1.396E-01,1.398E-01,1.400E-01,1.403E-01,&
1.405E-01,1.407E-01,1.410E-01,1.413E-01,1.415E-01,1.418E-01,1.424E-01,1.429E-01,1.435E-01,1.440E-01,&
1.446E-01,1.451E-01,1.455E-01,1.460E-01,1.465E-01,1.470E-01,1.475E-01,1.480E-01,1.485E-01,1.489E-01,&
1.494E-01,1.498E-01,1.501E-01,1.504E-01,1.508E-01,1.511E-01,1.512E-01,1.513E-01,1.515E-01,1.516E-01,&
1.517E-01,1.517E-01,1.517E-01,1.517E-01,1.517E-01,1.517E-01,1.517E-01,1.517E-01,1.517E-01,1.517E-01,&
1.517E-01,1.521E-01,1.525E-01,1.530E-01,1.534E-01,1.538E-01,1.541E-01,1.544E-01,1.547E-01,1.550E-01/
DATA (Rsoil2(i),i=1201,1300)/&
1.553E-01,1.555E-01,1.557E-01,1.558E-01,1.560E-01,1.562E-01,1.568E-01,1.573E-01,1.578E-01,1.584E-01,&
1.589E-01,1.590E-01,1.590E-01,1.591E-01,1.592E-01,1.592E-01,1.594E-01,1.595E-01,1.597E-01,1.598E-01,&
1.600E-01,1.600E-01,1.600E-01,1.600E-01,1.600E-01,1.600E-01,1.603E-01,1.606E-01,1.608E-01,1.611E-01,&
1.614E-01,1.618E-01,1.621E-01,1.625E-01,1.628E-01,1.631E-01,1.631E-01,1.632E-01,1.632E-01,1.632E-01,&
1.632E-01,1.632E-01,1.632E-01,1.632E-01,1.632E-01,1.632E-01,1.632E-01,1.632E-01,1.632E-01,1.632E-01,&
1.632E-01,1.633E-01,1.635E-01,1.636E-01,1.638E-01,1.639E-01,1.639E-01,1.639E-01,1.639E-01,1.639E-01,&
1.639E-01,1.639E-01,1.639E-01,1.639E-01,1.639E-01,1.639E-01,1.639E-01,1.639E-01,1.639E-01,1.639E-01,&
1.639E-01,1.639E-01,1.639E-01,1.639E-01,1.639E-01,1.639E-01,1.639E-01,1.639E-01,1.638E-01,1.638E-01,&
1.638E-01,1.639E-01,1.640E-01,1.641E-01,1.641E-01,1.642E-01,1.642E-01,1.642E-01,1.642E-01,1.642E-01,&
1.642E-01,1.643E-01,1.643E-01,1.644E-01,1.645E-01,1.645E-01,1.645E-01,1.645E-01,1.645E-01,1.645E-01/
DATA (Rsoil2(i),i=1301,1400)/&
1.645E-01,1.645E-01,1.645E-01,1.645E-01,1.645E-01,1.645E-01,1.645E-01,1.645E-01,1.645E-01,1.645E-01,&
1.645E-01,1.645E-01,1.645E-01,1.645E-01,1.645E-01,1.645E-01,1.642E-01,1.639E-01,1.636E-01,1.633E-01,&
1.630E-01,1.630E-01,1.630E-01,1.630E-01,1.630E-01,1.630E-01,1.629E-01,1.627E-01,1.626E-01,1.624E-01,&
1.623E-01,1.623E-01,1.623E-01,1.623E-01,1.623E-01,1.623E-01,1.617E-01,1.612E-01,1.607E-01,1.602E-01,&
1.597E-01,1.597E-01,1.597E-01,1.597E-01,1.597E-01,1.597E-01,1.597E-01,1.597E-01,1.597E-01,1.597E-01,&
1.597E-01,1.594E-01,1.592E-01,1.590E-01,1.588E-01,1.586E-01,1.585E-01,1.585E-01,1.584E-01,1.584E-01,&
1.584E-01,1.580E-01,1.576E-01,1.572E-01,1.568E-01,1.563E-01,1.559E-01,1.555E-01,1.550E-01,1.546E-01,&
1.541E-01,1.541E-01,1.541E-01,1.541E-01,1.541E-01,1.541E-01,1.541E-01,1.541E-01,1.541E-01,1.541E-01,&
1.541E-01,1.541E-01,1.541E-01,1.541E-01,1.541E-01,1.541E-01,1.541E-01,1.541E-01,1.541E-01,1.541E-01,&
1.541E-01,1.541E-01,1.541E-01,1.541E-01,1.541E-01,1.541E-01,1.541E-01,1.541E-01,1.541E-01,1.541E-01/
DATA (Rsoil2(i),i=1401,1500)/&
1.541E-01,1.540E-01,1.539E-01,1.538E-01,1.538E-01,1.537E-01,1.537E-01,1.537E-01,1.537E-01,1.537E-01,&
1.537E-01,1.537E-01,1.537E-01,1.537E-01,1.537E-01,1.537E-01,1.536E-01,1.535E-01,1.534E-01,1.534E-01,&
1.533E-01,1.533E-01,1.533E-01,1.533E-01,1.533E-01,1.533E-01,1.531E-01,1.528E-01,1.526E-01,1.524E-01,&
1.522E-01,1.519E-01,1.517E-01,1.515E-01,1.512E-01,1.510E-01,1.504E-01,1.497E-01,1.491E-01,1.484E-01,&
1.478E-01,1.473E-01,1.468E-01,1.463E-01,1.458E-01,1.453E-01,1.441E-01,1.429E-01,1.417E-01,1.404E-01,&
1.392E-01,1.384E-01,1.375E-01,1.366E-01,1.358E-01,1.349E-01,1.341E-01,1.332E-01,1.323E-01,1.315E-01,&
1.306E-01,1.287E-01,1.269E-01,1.250E-01,1.231E-01,1.212E-01,1.192E-01,1.171E-01,1.151E-01,1.131E-01,&
1.111E-01,1.077E-01,1.044E-01,1.010E-01,9.765E-02,9.430E-02,9.322E-02,9.214E-02,9.106E-02,8.998E-02,&
8.890E-02,8.870E-02,8.850E-02,8.829E-02,8.809E-02,8.789E-02,8.823E-02,8.857E-02,8.891E-02,8.924E-02,&
8.958E-02,9.038E-02,9.118E-02,9.198E-02,9.278E-02,9.358E-02,9.411E-02,9.464E-02,9.518E-02,9.571E-02/
DATA (Rsoil2(i),i=1501,1600)/&
9.624E-02,9.694E-02,9.765E-02,9.835E-02,9.905E-02,9.975E-02,9.931E-02,9.886E-02,9.842E-02,9.797E-02,&
9.752E-02,9.511E-02,9.269E-02,9.027E-02,8.786E-02,8.544E-02,8.363E-02,8.182E-02,8.002E-02,7.821E-02,&
7.640E-02,7.433E-02,7.226E-02,7.020E-02,6.813E-02,6.606E-02,6.446E-02,6.285E-02,6.124E-02,5.963E-02,&
5.802E-02,5.701E-02,5.600E-02,5.499E-02,5.398E-02,5.297E-02,5.225E-02,5.154E-02,5.083E-02,5.011E-02,&
4.940E-02,4.883E-02,4.826E-02,4.769E-02,4.712E-02,4.655E-02,4.626E-02,4.597E-02,4.567E-02,4.538E-02,&
4.508E-02,4.508E-02,4.508E-02,4.508E-02,4.507E-02,4.507E-02,4.529E-02,4.550E-02,4.572E-02,4.594E-02,&
4.615E-02,4.615E-02,4.615E-02,4.615E-02,4.614E-02,4.614E-02,4.614E-02,4.614E-02,4.613E-02,4.613E-02,&
4.613E-02,4.613E-02,4.612E-02,4.612E-02,4.612E-02,4.612E-02,4.626E-02,4.640E-02,4.655E-02,4.669E-02,&
4.683E-02,4.731E-02,4.779E-02,4.827E-02,4.875E-02,4.922E-02,4.965E-02,5.009E-02,5.052E-02,5.095E-02,&
5.138E-02,5.192E-02,5.246E-02,5.299E-02,5.353E-02,5.407E-02,5.463E-02,5.518E-02,5.574E-02,5.630E-02/
DATA (Rsoil2(i),i=1601,1700)/&
5.685E-02,5.702E-02,5.720E-02,5.737E-02,5.754E-02,5.771E-02,5.807E-02,5.843E-02,5.879E-02,5.916E-02,&
5.952E-02,6.013E-02,6.073E-02,6.134E-02,6.194E-02,6.255E-02,6.301E-02,6.347E-02,6.393E-02,6.439E-02,&
6.486E-02,6.524E-02,6.563E-02,6.601E-02,6.640E-02,6.679E-02,6.720E-02,6.761E-02,6.802E-02,6.843E-02,&
6.885E-02,6.943E-02,7.002E-02,7.060E-02,7.119E-02,7.177E-02,7.267E-02,7.356E-02,7.445E-02,7.534E-02,&
7.623E-02,7.693E-02,7.764E-02,7.834E-02,7.904E-02,7.974E-02,8.027E-02,8.080E-02,8.133E-02,8.186E-02,&
8.239E-02,8.283E-02,8.327E-02,8.371E-02,8.415E-02,8.459E-02,8.514E-02,8.570E-02,8.626E-02,8.681E-02,&
8.737E-02,8.764E-02,8.791E-02,8.817E-02,8.844E-02,8.871E-02,8.911E-02,8.951E-02,8.992E-02,9.032E-02,&
9.072E-02,9.072E-02,9.072E-02,9.071E-02,9.071E-02,9.071E-02,9.089E-02,9.107E-02,9.125E-02,9.143E-02,&
9.162E-02,9.209E-02,9.256E-02,9.303E-02,9.350E-02,9.397E-02,9.447E-02,9.496E-02,9.546E-02,9.596E-02,&
9.646E-02,9.691E-02,9.737E-02,9.782E-02,9.827E-02,9.872E-02,9.886E-02,9.899E-02,9.913E-02,9.926E-02/
DATA (Rsoil2(i),i=1701,1800)/&
9.940E-02,9.976E-02,1.001E-01,1.005E-01,1.008E-01,1.012E-01,1.018E-01,1.025E-01,1.031E-01,1.037E-01,&
1.043E-01,1.045E-01,1.047E-01,1.049E-01,1.051E-01,1.053E-01,1.059E-01,1.064E-01,1.070E-01,1.075E-01,&
1.081E-01,1.081E-01,1.081E-01,1.081E-01,1.081E-01,1.081E-01,1.086E-01,1.092E-01,1.098E-01,1.103E-01,&
1.109E-01,1.111E-01,1.114E-01,1.116E-01,1.118E-01,1.121E-01,1.121E-01,1.122E-01,1.123E-01,1.124E-01,&
1.124E-01,1.128E-01,1.132E-01,1.136E-01,1.140E-01,1.144E-01,1.145E-01,1.146E-01,1.147E-01,1.148E-01,&
1.150E-01,1.149E-01,1.149E-01,1.149E-01,1.149E-01,1.149E-01,1.149E-01,1.149E-01,1.149E-01,1.149E-01,&
1.149E-01,1.149E-01,1.149E-01,1.149E-01,1.149E-01,1.149E-01,1.149E-01,1.149E-01,1.149E-01,1.149E-01,&
1.149E-01,1.149E-01,1.149E-01,1.149E-01,1.149E-01,1.149E-01,1.149E-01,1.149E-01,1.149E-01,1.149E-01,&
1.149E-01,1.149E-01,1.150E-01,1.151E-01,1.152E-01,1.152E-01,1.155E-01,1.157E-01,1.160E-01,1.163E-01,&
1.165E-01,1.165E-01,1.166E-01,1.166E-01,1.166E-01,1.167E-01,1.174E-01,1.181E-01,1.188E-01,1.196E-01/
DATA (Rsoil2(i),i=1801,1900)/&
1.203E-01,1.201E-01,1.200E-01,1.199E-01,1.197E-01,1.196E-01,1.191E-01,1.186E-01,1.181E-01,1.176E-01,&
1.171E-01,1.167E-01,1.163E-01,1.159E-01,1.156E-01,1.152E-01,1.152E-01,1.152E-01,1.151E-01,1.151E-01,&
1.151E-01,1.151E-01,1.151E-01,1.151E-01,1.151E-01,1.151E-01,1.151E-01,1.151E-01,1.151E-01,1.151E-01,&
1.151E-01,1.151E-01,1.151E-01,1.151E-01,1.151E-01,1.151E-01,1.151E-01,1.151E-01,1.151E-01,1.151E-01,&
1.151E-01,1.151E-01,1.151E-01,1.151E-01,1.151E-01,1.151E-01,1.151E-01,1.151E-01,1.151E-01,1.151E-01,&
1.151E-01,1.150E-01,1.149E-01,1.148E-01,1.148E-01,1.147E-01,1.141E-01,1.134E-01,1.128E-01,1.121E-01,&
1.115E-01,1.115E-01,1.115E-01,1.114E-01,1.114E-01,1.114E-01,1.111E-01,1.108E-01,1.105E-01,1.102E-01,&
1.099E-01,1.097E-01,1.095E-01,1.093E-01,1.091E-01,1.089E-01,1.086E-01,1.084E-01,1.082E-01,1.079E-01,&
1.077E-01,1.074E-01,1.071E-01,1.068E-01,1.064E-01,1.061E-01,1.056E-01,1.051E-01,1.046E-01,1.041E-01,&
1.036E-01,1.029E-01,1.021E-01,1.013E-01,1.005E-01,9.971E-02,9.941E-02,9.910E-02,9.879E-02,9.849E-02/
DATA (Rsoil2(i),i=1901,2000)/&
9.818E-02,9.818E-02,9.818E-02,9.817E-02,9.817E-02,9.817E-02,9.817E-02,9.817E-02,9.816E-02,9.816E-02,&
9.816E-02,9.809E-02,9.802E-02,9.795E-02,9.788E-02,9.781E-02,9.741E-02,9.700E-02,9.660E-02,9.620E-02,&
9.580E-02,9.537E-02,9.495E-02,9.453E-02,9.411E-02,9.369E-02,9.319E-02,9.269E-02,9.219E-02,9.169E-02,&
9.118E-02,9.118E-02,9.118E-02,9.118E-02,9.117E-02,9.117E-02,9.078E-02,9.039E-02,9.000E-02,8.961E-02,&
8.922E-02,8.888E-02,8.853E-02,8.819E-02,8.785E-02,8.750E-02,8.779E-02,8.808E-02,8.837E-02,8.866E-02,&
8.895E-02,8.865E-02,8.836E-02,8.807E-02,8.777E-02,8.748E-02,8.680E-02,8.613E-02,8.546E-02,8.478E-02,&
8.411E-02,8.332E-02,8.254E-02,8.176E-02,8.097E-02,8.019E-02,8.011E-02,8.003E-02,7.995E-02,7.987E-02,&
7.979E-02,7.946E-02,7.914E-02,7.882E-02,7.849E-02,7.817E-02,7.754E-02,7.691E-02,7.628E-02,7.565E-02,&
7.502E-02,7.496E-02,7.489E-02,7.483E-02,7.476E-02,7.470E-02,7.394E-02,7.318E-02,7.242E-02,7.167E-02,&
7.091E-02,7.083E-02,7.074E-02,7.066E-02,7.058E-02,7.050E-02,7.015E-02,6.981E-02,6.946E-02,6.912E-02/
DATA (Rsoil2(i),i=2001,2101)/&
6.878E-02,6.878E-02,6.877E-02,6.877E-02,6.877E-02,6.877E-02,6.803E-02,6.730E-02,6.657E-02,6.584E-02,&
6.511E-02,6.445E-02,6.379E-02,6.313E-02,6.247E-02,6.182E-02,6.167E-02,6.153E-02,6.138E-02,6.124E-02,&
6.110E-02,6.018E-02,5.927E-02,5.835E-02,5.743E-02,5.652E-02,5.619E-02,5.585E-02,5.552E-02,5.519E-02,&
5.485E-02,5.485E-02,5.485E-02,5.485E-02,5.484E-02,5.484E-02,5.455E-02,5.425E-02,5.396E-02,5.366E-02,&
5.337E-02,5.337E-02,5.337E-02,5.336E-02,5.336E-02,5.336E-02,5.316E-02,5.297E-02,5.278E-02,5.258E-02,&
5.239E-02,5.192E-02,5.145E-02,5.099E-02,5.052E-02,5.005E-02,4.963E-02,4.921E-02,4.878E-02,4.836E-02,&
4.793E-02,4.777E-02,4.761E-02,4.744E-02,4.728E-02,4.711E-02,4.727E-02,4.743E-02,4.759E-02,4.774E-02,&
4.790E-02,4.796E-02,4.801E-02,4.807E-02,4.813E-02,4.818E-02,4.833E-02,4.847E-02,4.861E-02,4.876E-02,&
4.890E-02,4.904E-02,4.919E-02,4.933E-02,4.947E-02,4.962E-02,4.961E-02,4.961E-02,4.961E-02,4.961E-02,&
4.960E-02,4.960E-02,4.960E-02,4.960E-02,4.959E-02,4.959E-02,4.944E-02,4.930E-02,4.915E-02,4.900E-02,&
4.885E-02/
END'''
| [
"dr.p.e.lewis@gmail.com"
] | dr.p.e.lewis@gmail.com |
55756175ac08d7f85e1b57a91c4aeda567fb4e13 | e0fa727439ea733e88d30bfeab790064643a5003 | /freeze.py | c2777ce4191245e881f5fe18c4541f7589413c3f | [] | no_license | MichelleGlauser/michelleglauser.com | 6c0bcc79665afb251dfb6e2692adfca29b4d7a6f | fd705da75fe2cb9afc15f6714d5b1c2ddd2e7f8a | refs/heads/main | 2022-11-12T01:21:33.380982 | 2022-10-30T22:07:18 | 2022-10-30T22:07:18 | 26,449,480 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 127 | py | from flask_frozen import Freezer
from views import app
freezer = Freezer(app)
if __name__ == '__main__':
freezer.freeze() | [
"michelleglauser@gmail.com"
] | michelleglauser@gmail.com |
a3c134bcd54bd3305d135906d28c079de037afde | 63dd54e480af7e5f2a13cc89947287e1f0f715bc | /library/docker/htmDocker/htmHome/__init__.py | c8dda2a9ee659cbab7a87d6ba37159020ea19eb4 | [
"Apache-2.0"
] | permissive | wsgan001/unSAD | 7422b3164bcfede4c30627bd349522b483322bf4 | 9f1d0e680a0086d140bc8d1c55fe21dd7de87df5 | refs/heads/master | 2021-05-17T04:58:52.047427 | 2019-12-16T18:03:02 | 2019-12-16T18:03:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 47 | py | # Created by Xinyu Zhu on 10/3/2019, 11:14 AM
| [
"xzhuah@connect.ust.hk"
] | xzhuah@connect.ust.hk |
669f9172e8d0d06c0d067148ceb6bf2b411402ab | dc898ce913709ad282a2148729ad5733f44c5aa9 | /code/pcode/tools/plot_graph.py | ff887f082497950fdb3e211a8979140411502425 | [] | no_license | ZexiLee/quasi-global-momentum | 609c0e8ccadd70c0f5f22eeca965cfd2e649cc23 | 6bad90a65a183dc698e1087dc2c01bb7f901f607 | refs/heads/master | 2023-06-09T07:19:19.955961 | 2021-06-16T15:05:47 | 2021-06-16T15:05:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,814 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri May 15 11:45:07 2020
@author: aransil
"""
import math
import plotly.graph_objects as go
import networkx as nx
# Start and end are lists defining start and end points
# Edge x and y are lists used to construct the graph
# arrowAngle and arrowLength define properties of the arrowhead
# arrowPos is None, 'middle' or 'end' based on where on the edge you want the arrow to appear
# arrowLength is the length of the arrowhead
# arrowAngle is the angle in degrees that the arrowhead makes with the edge
# dotSize is the plotly scatter dot size you are using (used to even out line spacing when you have a mix of edge lengths)
def add_edge(
start,
end,
edge_x,
edge_y,
lengthFrac=1,
arrowPos=None,
arrowLength=0.025,
arrowAngle=30,
dotSize=20,
):
# Get start and end cartesian coordinates
x0, y0 = start
x1, y1 = end
# Incorporate the fraction of this segment covered by a dot into total reduction
length = math.sqrt((x1 - x0) ** 2 + (y1 - y0) ** 2)
dotSizeConversion = 0.0565 / 20 # length units per dot size
convertedDotDiameter = dotSize * dotSizeConversion
lengthFracReduction = convertedDotDiameter / length
lengthFrac = lengthFrac - lengthFracReduction
# If the line segment should not cover the entire distance, get actual start and end coords
skipX = (x1 - x0) * (1 - lengthFrac)
skipY = (y1 - y0) * (1 - lengthFrac)
x0 = x0 + skipX / 2
x1 = x1 - skipX / 2
y0 = y0 + skipY / 2
y1 = y1 - skipY / 2
# Append line corresponding to the edge
edge_x.append(x0)
edge_x.append(x1)
edge_x.append(
None
) # Prevents a line being drawn from end of this edge to start of next edge
edge_y.append(y0)
edge_y.append(y1)
edge_y.append(None)
# Draw arrow
if not arrowPos == None:
# Find the point of the arrow; assume is at end unless told middle
pointx = x1
pointy = y1
eta = math.degrees(math.atan((x1 - x0) / (y1 - y0)))
if arrowPos == "middle" or arrowPos == "mid":
pointx = x0 + (x1 - x0) / 2
pointy = y0 + (y1 - y0) / 2
# Find the directions the arrows are pointing
signx = (x1 - x0) / abs(x1 - x0)
signy = (y1 - y0) / abs(y1 - y0)
# Append first arrowhead
dx = arrowLength * math.sin(math.radians(eta + arrowAngle))
dy = arrowLength * math.cos(math.radians(eta + arrowAngle))
edge_x.append(pointx)
edge_x.append(pointx - signx ** 2 * signy * dx)
edge_x.append(None)
edge_y.append(pointy)
edge_y.append(pointy - signx ** 2 * signy * dy)
edge_y.append(None)
# And second arrowhead
dx = arrowLength * math.sin(math.radians(eta - arrowAngle))
dy = arrowLength * math.cos(math.radians(eta - arrowAngle))
edge_x.append(pointx)
edge_x.append(pointx - signx ** 2 * signy * dx)
edge_x.append(None)
edge_y.append(pointy)
edge_y.append(pointy - signx ** 2 * signy * dy)
edge_y.append(None)
return edge_x, edge_y
def ploty_draw_graph(G):
nodeColor = "Blue"
nodeSize = 20
lineWidth = 2
lineColor = "#000000"
pos = nx.spring_layout(G, k=0.5, iterations=50)
for node in G.nodes:
G.nodes[node]["pos"] = list(pos[node])
# Make list of nodes for plotly
node_x = []
node_y = []
for node in G.nodes():
x, y = G.nodes[node]["pos"]
node_x.append(x)
node_y.append(y)
# Make a list of edges for plotly, including line segments that result in arrowheads
edge_x = []
edge_y = []
for edge in G.edges():
start = G.nodes[edge[0]]["pos"]
end = G.nodes[edge[1]]["pos"]
if start[0] != end[0] and start[1] != end[1]:
edge_x, edge_y = add_edge(
start,
end,
edge_x,
edge_y,
lengthFrac=0.95,
arrowPos="end",
arrowLength=0.025,
arrowAngle=30,
dotSize=nodeSize,
)
edge_trace = go.Scatter(
x=edge_x,
y=edge_y,
line=dict(width=lineWidth, color=lineColor),
hoverinfo="none",
mode="lines",
)
node_trace = go.Scatter(
x=[],
y=[],
text=[],
mode="markers",
hoverinfo="text",
marker=dict(
showscale=True,
colorscale="RdBu",
reversescale=True,
color=[],
size=15,
colorbar=dict(
thickness=10,
title="Node Connections",
xanchor="left",
titleside="right",
),
line=dict(width=0),
),
)
for node in G.nodes():
x, y = G.nodes[node]["pos"]
node_trace["x"] += tuple([x])
node_trace["y"] += tuple([y])
for node, adjacencies in enumerate(G.adjacency()):
node_trace["marker"]["color"] += tuple([len(adjacencies[1])])
node_info = (
str(adjacencies[0]) + " # of connections: " + str(len(adjacencies[1]))
)
node_trace["text"] += tuple([node_info])
fig = go.Figure(
data=[edge_trace, node_trace],
layout=go.Layout(
title="",
titlefont=dict(size=16),
showlegend=False,
hovermode="closest",
margin=dict(b=20, l=5, r=5, t=40),
annotations=[dict(text="", showarrow=False, xref="paper", yref="paper")],
xaxis=dict(showgrid=False, zeroline=False, showticklabels=False),
yaxis=dict(showgrid=False, zeroline=False, showticklabels=False),
),
)
fig.show()
| [
"itamtao@gmail.com"
] | itamtao@gmail.com |
a03c09b8f1b77a4eccf94d0566153de13f80cfca | c5afac77666f68b19da78d133e8f9f3b54dcd12d | /snippets/python/generator/countdown.py | 0c054aa7dff3ee0976df58c8116e265675b78fa1 | [] | no_license | ko/random | f783bbd9a59ee38961a1a7f018e53cfef0d18511 | e16f41ffbdab9b44554f797c5fa5dcab6cefb859 | refs/heads/master | 2020-06-05T14:58:55.204318 | 2013-09-19T00:38:26 | 2013-09-19T00:38:26 | 2,125,034 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 125 | py | #!/usr/bin/python2.7
def countdown(n):
while n > 0:
yield n
n -= 1
for i in countdown(10):
print i
| [
"ko@yaksok.net"
] | ko@yaksok.net |
d45d1a59c53baf62ba1ea99db08a5a63d8dfe1b9 | afb35f66fd3a6caffa33048959cc26cf916833bc | /facedataset.py | b597ff65837a2ae9ed5f5636588d5b613c0a45d7 | [] | no_license | ali-hamzaa/face-recognizationa | 63c6effb28b6bac45510018b788ac2d8a86da443 | f6ddd1878028b223ab6b4644f8ae43c8ac46c649 | refs/heads/master | 2020-07-16T05:11:41.553393 | 2019-09-01T20:08:24 | 2019-09-01T20:08:24 | 205,726,586 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,272 | py | import cv2
import os
cam = cv2.VideoCapture(0)
#cam.set(3, 640) # set video width
#cam.set(4, 480) # set video height
face_detector = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
# For each person, enter one numeric face id
face_id = input('\n enter user id end press <return> ==> ')
print("\n [INFO] Initializing face capture. Look the camera and wait ...")
# Initialize individual sampling face count
count = 0
while(True):
ret, img = cam.read()
#img = cv2.flip(img, -1) # flip video image vertically
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_detector.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in faces:
cv2.rectangle(img, (x,y), (x+w,y+h), (255,0,0), 2)
count += 1
# Save the captured image into the datasets folder
cv2.imwrite("dataset/User." + str(face_id) + '.' + str(count) + ".jpg", gray[y:y+h,x:x+w])
cv2.imshow('image', img)
k = cv2.waitKey(100) & 0xff # Press 'ESC' for exiting video
if k == 27:
break
elif count >= 30: # Take 30 face sample and stop video
break
# Do a bit of cleanup
print("\n [INFO] Exiting Program and cleanup stuff")
cam.release()
cv2.destroyAllWindows()
| [
"noreply@github.com"
] | ali-hamzaa.noreply@github.com |
b1c33417df30b02f42b8769064907be79d4d1b31 | 517873cab25009ae61e09feb44c5581dc5ee7a16 | /Lesson_1/part01_lesson01_task04.py | 02f7d7827b4ceb5a34628b7c7fda635bbd163017 | [] | no_license | I-bluebeard-I/Python-Study | a34ee02b6c6e5ad9336dfd1084824ef7794c612b | a40084b97d3df2c2c7d3c4ce0b2062d42f6c16b8 | refs/heads/main | 2023-04-11T08:46:22.856138 | 2021-03-19T21:24:22 | 2021-03-19T21:24:22 | 340,944,706 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 989 | py | """
ЗАДАНИЕ 4
Склонение слова
Реализовать склонение слова «процент» во фразе «N процентов». Вывести эту фразу на экран
отдельной строкой для каждого из чисел в интервале от 1 до 100:
1 процент
2 процента
3 процента
4 процента
5 процентов
6 процентов
...
100 процентов
"""
number = 0
percent = {
'0': 'процентов',
'1': 'процент',
'2': 'процента',
'3': 'процента',
'5': 'процентов',
'4': 'процента',
'6': 'процентов',
'7': 'процентов',
'8': 'процентов',
'9': 'процентов',
}
while number < 100:
number += 1
if 14 >= number >= 10:
print(number, percent['0'])
else:
str_number = str(number)
print(number, percent[str_number[-1]])
| [
"_boroda_@mail.ru"
] | _boroda_@mail.ru |
15d4e730d6ba685398af844771351ed5063c928c | ab566f65adfc165b6e1dcc04a4524af5307fd473 | /HackerRank-python/build/lib/HackerRank/models/Languages.py | 8fb1ef81bc4a2561131b58bfc056186aff6b2792 | [] | no_license | Christopher312/Coding-Game | 7382edb32f3a89054c792d2fa90908d863997949 | 91d908c0ffa36e050fecd50ac3a70895a6bbb64b | refs/heads/master | 2022-10-05T16:19:48.914874 | 2016-02-15T15:57:04 | 2016-02-15T15:57:04 | 51,224,505 | 0 | 0 | null | 2022-09-16T17:44:49 | 2016-02-06T22:09:35 | HTML | UTF-8 | Python | false | false | 992 | py | #!/usr/bin/env python
"""
Copyright 2014 Wordnik, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class Languages:
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually."""
def __init__(self):
self.swaggerTypes = {
'codes': 'Codes',
'names': 'Names'
}
#Codes
self.codes = None # Codes
#Names
self.names = None # Names
| [
"proundahspam@gmail.com"
] | proundahspam@gmail.com |
a8e6de48cb4be2ef706bcc392a9a5138ca978413 | 306957f27846db6412374004931b2636ebdbf577 | /oldTests/openingTest.py | 10beea89294e4cc60dd13566a93bbf614409434e | [
"MIT"
] | permissive | madisonflaherty/Food | dc8de9382f68e6a29449de532b75592d9115fb4c | 2251b76ce084c2be1ebafe2916d391bd2d383dd6 | refs/heads/master | 2020-06-05T20:04:18.338830 | 2014-03-14T20:50:02 | 2014-03-14T20:50:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,042 | py | """
Test to determine easiness of pulling HTML from a website with python
Author: Madison Flaherty
Date: January 14, 2014
"""
import urllib.request
from html.parser import HTMLParser
class MyHTMLParser(HTMLParser):
"""
creates second parser
"""
def handle_data(self, data):
print("\n", data)
class MyHTMLParser2(HTMLParser):
"""
creates second parser
"""
def handle_data(self, data):
print(" ", data)
def openURL(loc):
"""
Takes in the urls of the rit food websites. These update daily.
"""
if loc == "commons":
commons = urllib.request.urlopen("http://www.rit.edu/fa/diningservices/commons")
return commons
if loc == "brick":
brick = urllib.request.urlopen("http://www.rit.edu/fa/diningservices/brickcity")
return brick
if loc == "gv":
gv = urllib.request.urlopen("http://www.rit.edu/fa/diningservices/gvcantinagrille")
return gv
if loc == "cross":
cross = urllib.request.urlopen("http://www.rit.edu/fa/diningservices/crossroads")
return cross
if loc == "gracies":
gracies = urllib.request.urlopen("http://www.rit.edu/fa/diningservices/content/gracies")
return gracies
if loc == "rSZ":
rSZ = urllib.request.urlopen("http://www.rit.edu/fa/diningservices/content/ritz-sports-zone")
return rSZ
else:
return None
def ask():
answer = input("Which dining location would you like to see? ")
url = openURL(answer)
if url == None:
print(" No such dinning location exists...")
ask()
else:
#print(url)
#Print the day's menu'
#url = str(url.read)
#print(url)
lst = lister(url)
#print(lst)
parcer(lst)
def parcer(lst):
parser = MyHTMLParser()
parser2 = MyHTMLParser2()
for item in lst:
value = '<div class="location-menu'
print(item[0:25])
if item[0:25] == value: #'<div class="location-menu':
#print(item)
if item[25:30] == '-item':
parser2.feed(str(item))
else:
parser.feed(str(item))
def lister(url):
lst = []
for line in url:
line = str(line)
lst.append(line[2:].strip())#.replace("'", ""))
return lst
def main():
ask()
main()
| [
"mef4824@rit.edu"
] | mef4824@rit.edu |
31eaaced2612fb97f044dfd29a315b2cd8f47557 | 47551d14f566096841b298015da4ff12d405ca09 | /mendelsp-2.5-beta/scipaas/plots.py | d789b930345228bd17ba6eae7a89cd35ac2edb37 | [
"MIT"
] | permissive | BibleEtScienceDiffusion/Mendel-Windows | 9f3c202f0ee97b6d74756ef225cef594d6a624a8 | 8ce1f2dab04e5b05e04cf626457120cf3fca8360 | refs/heads/master | 2022-04-20T07:02:09.910640 | 2020-04-21T13:29:04 | 2020-04-21T13:29:04 | 257,597,537 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,540 | py | #import json
import re, string
import config
class plot(object):
def get_data(self,fn,col1,col2,line1=1,line2=1e6):
"""return data as string in format [ [x1,y1], [x2,y2], ... ]"""
y = ''
lineno = 0
try:
data = open(fn, 'rU').readlines()
nlines = len(data)
# allow for tailing a file by giving a negative range, e.g. -100:10000
if line1 < 0:
line1 += nlines
for line in data:
lineno += 1
if lineno >= line1 and lineno <= line2:
# don't parse comments
if re.search(r'#',line): continue
x = line.split()
#following line doesnt work when NaN's in another column
#if not re.search(r'[A-Za-z]{2,}\s+[A-Za-z]{2,}',line):
y += '[ ' + x[col1-1] + ', ' + x[col2-1] + '], '
s = "[ %s ]" % y
return s
except:
return False
def get_data_gantt(self,fn,col1,col2,col3,col4,line1=1,line2=1e6):
"""return data as string in format [ [x1,y1], [x2,y2], ... ]"""
y = ''
lineno = 0
try:
data = open(fn, 'rU').readlines()
nlines = len(data)
# allow for tailing a file by giving a negative range, e.g. -100:10000
if line1 < 0:
line1 += nlines
for line in data:
lineno += 1
if lineno >= line1 and lineno <= line2:
# don't parse comments
if re.search(r'#',line): continue
x = line.split()
#following line doesnt work when NaN's in another column
#if not re.search(r'[A-Za-z]{2,}\s+[A-Za-z]{2,}',line):
y += '[ ' + x[col1-1] + ', ' + x[col2-1] + x[col3-1] + x[col4-1] + '], '
s = "[ %s ]" % y
return s
except:
return False
def get_raw_data(self,fn,line1=1,line2=1e6):
"""return data as an array..."""
y = []
data = open(fn, 'rU').readlines()
return data[line1:line2]
def get_column_of_data(self,fn,col,line1=1,line2=1e6):
try:
y = []
lineno = 0
data = open(fn, 'rU').readlines()
nlines = len(data)
# allow for tailing a file by giving a negative range, e.g. -100:10000
if line1 < 0:
line1 += nlines
for line in data:
lineno += 1
if lineno >= line1 and lineno <= line2:
# don't parse comments
if re.search(r'#',line): continue
x = line.split()
#following line doesnt work when NaN's in another column
#if not re.search(r'[A-Za-z]{2,}\s+[A-Za-z]{2,}',line):
y += [ x[col-1] ]
return y
except:
return False
def get_ticks(self,fn,col1,col2):
try:
y = ''
i = 0
for line in open(fn, 'rU'):
# don't parse comments
if re.search(r'#',line): continue
x = line.split()
if not re.search(r'[A-Za-z]{2,}\s+[A-Za-z]{2,}',line):
y += '[ ' + str(i) + ', ' + x[col1-1] + '], '
i += 1
s = "[ %s ]" % y
return s
except:
return False
| [
"eden@mifamofi.net"
] | eden@mifamofi.net |
6f47784a83d48d70c505c5245f66d76f5f608060 | 7de00051210b6b509e51159c32358871fea56c3c | /dataset/data_som_iris.py | 9935c8d17076597e5bf821e0360b6dfae4d2efd9 | [] | no_license | ziapple/fd-forecast | 36a465385dc189c541d79962b493c3e4bc00b396 | 93f0a0c03c0fb529407110cbed37349828e88adb | refs/heads/main | 2023-06-29T00:34:13.913478 | 2021-08-01T06:06:07 | 2021-08-01T06:06:07 | 382,564,983 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,576 | py | from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Patch
from minisom import MiniSom
from sklearn import datasets
import math
"""
附录:miniSomAPI参考
创建网络:创建时就会随机初始化网络权重
som = minisom.MiniSom(size,size,Input_size, sigma=sig,learning_rate=learning_rate, neighborhood_function='gaussian')
som.random_weights_init(X_train):随机选取样本进行初始化 som.pca_weights_init(X_train):PCA初始化
som.get_weights(): Returns the weights of the neural network
som.distance_map():Returns the distance map of the weights
som.activate(X): Returns the activation map to x 值越小的神经元,表示与输入样本 越匹配
som.quantization(X):Assigns a code book 给定一个 输入样本,找出该样本的优胜节点,然后返回该神经元的权值向量(每个元素对应一个输入单元)
som.winner(X): 给定一个 输入样本,找出该样本的优胜节点 格式:输出平面中的位置
som.win_map(X):将各个样本,映射到平面中对应的位置 返回一个dict { position: samples_list }
som.activation_response(X): 返回输出平面中,各个神经元成为 winner的次数 格式为 1个二维矩阵
quantization_error(量化误差): 输入样本 与 对应的winner神经元的weight 之间的 平方根
"""
# 样本数量, 维度/特征数量
N = 0
M = 0
max_iter = 200
# 输出尺寸
size = 0
def load_data():
iris = datasets.load_iris()
print('>> shape of data:',iris.data.shape)
x = iris.data
y = iris.target
# 划分训练集、测试集 7:3
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3, random_state=0)
# 样本数量
global N
global M
global size
N = x_train.shape[0]
M = x_train.shape[1] # 维度/特征数量
size = math.ceil(np.sqrt(5 * np.sqrt(N))) # 经验公式:决定输出层尺寸
print("训练样本个数:{} 测试样本个数:{}".format(N, x_test.shape[0]))
print("输出网格最佳边长为:", size)
return x_train, x_train, y_train, y_test
def train(x_train, y_train):
"""
第一个和第二个重要的参数是输出层的尺寸 :我们是使用经验公式
Neighborhood_function可选的设置有'gaussian'、'mexican_hat'、'bubble'. 调参的时候可以都试一遍,看效果
学习率:先设为默认的0.5,大部分情况下都适用
"""
som = MiniSom(size, size, M, sigma=3, learning_rate=0.5, neighborhood_function='bubble')
som.pca_weights_init(x_train)
som.train_batch(x_train, max_iter, verbose=False)
# 因为Som是无监督学习算法,不需要标签信息
winmap = som.labels_map(x_train, y_train)
return som, winmap
def classify(som, data, winmap):
default_class = np.sum(list(winmap.values())).most_common()[0][0]
result = []
for d in data:
win_position = som.winner(d)
if win_position in winmap:
result.append(winmap[win_position].most_common()[0][0])
else:
result.append(default_class)
return result
def predict(som, x_test, winmap, y_test):
# 输出混淆矩阵
y_pred = classify(som, x_test, winmap)
print(classification_report(y_test, np.array(y_pred)))
def show(som, x_train, y_train):
# 背景上画U-Matrix
heatmap = som.distance_map()
plt.pcolor(heatmap, cmap='bone_r') # plotting the distance map as background
# 定义不同标签的图案标记
markers = ['o', 's', 'D']
colors = ['C0', 'C1', 'C2']
category_color = {'setosa': 'C0', 'versicolor': 'C1', 'virginica': 'C2'}
for cnt, xx in enumerate(x_train):
w = som.winner(xx) # getting the winner
# 在样本Heat的地方画上标记+
plt.plot(w[0] + .5, w[1] + .5, markers[y_train[cnt]], markerfacecolor='None',
markeredgecolor=colors[y_train[cnt]], markersize=12, markeredgewidth=2)
plt.axis([0, size, 0, size])
ax = plt.gca()
ax.invert_yaxis() # 颠倒y轴方向
legend_elements = [Patch(facecolor=clr,
edgecolor='w',
label=l) for l, clr in category_color.items()]
plt.legend(handles=legend_elements, loc='center left', bbox_to_anchor=(1, .95))
plt.show()
def main():
x_train, x_test, y_train, y_test = load_data()
som, winmap = train(x_train, y_train)
show(som, x_train, y_train)
if __name__ == "__main__":
main()
| [
"ziapple@126.com"
] | ziapple@126.com |
48c38008dc8f830780911cc0ffbe98050fe9f2b8 | 337815ff32ebbf6e8dd2606f69d66e8efda4cd03 | /epi_judge_python_solutions/is_string_palindromic_punctuation.py | 8a74011a9f894f17696adcf9b67b7a1ac42109d9 | [] | no_license | federicociner/epi | b85eefbf5f5bad77e2e780ffbf4ac4f9ca0809a8 | 32f2a1056353bca55d0d5839be5e0b73809cb45d | refs/heads/master | 2020-12-19T09:22:43.430370 | 2020-02-04T02:34:53 | 2020-02-04T02:34:53 | 235,693,872 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 904 | py | from test_framework import generic_test
def is_palindrome(s: str) -> bool:
# i moves forward, and j moves backward.
i, j = 0, len(s) - 1
while i < j:
# i and j both skip non-alphanumeric characters.
while not s[i].isalnum() and i < j:
i += 1
while not s[j].isalnum() and i < j:
j -= 1
if s[i].lower() != s[j].lower():
return False
i, j = i + 1, j - 1
return True
def is_palindrome_pythonic(s):
return all(
a == b
for a, b in zip(
map(str.lower, filter(str.isalnum, s)),
map(str.lower, filter(str.isalnum, reversed(s))),
)
)
if __name__ == "__main__":
exit(
generic_test.generic_test_main(
"is_string_palindromic_punctuation.py",
"is_string_palindromic_punctuation.tsv",
is_palindrome,
)
)
| [
"federico.ciner@gmail.com"
] | federico.ciner@gmail.com |
77ffa800cee616cbc92dbb8224e7af3e41aaee4c | 7f114a1fb511b816c116d5b9e67cb998e3e23956 | /PyplayS163.py | 8fb12da406b708d8118f33d4a51858ee26d8c0b8 | [] | no_license | Bharanij27/bharanirep | 90ac34eb28deaa7ec96d042de456de71b96866d7 | 982133a7939c889d433c178a601441fa087293d9 | refs/heads/master | 2021-08-07T20:22:36.244395 | 2020-06-05T04:58:10 | 2020-06-05T04:58:10 | 186,580,768 | 0 | 6 | null | null | null | null | UTF-8 | Python | false | false | 104 | py | n,k=map(int,input().split())
l=list(map(int,input().split()))
if k in l: print("yes")
else: print("no")
| [
"noreply@github.com"
] | Bharanij27.noreply@github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.