blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9001d2c6f019fe03b648b70ce2b79b123c5b6a56 | 29456f01a994427149bddfb3c83e054a08860953 | /L8T4-6.py | 8f8222baa2630a4b6253a9ef62bc4a921391d68c | [] | no_license | comradsnarky/PythonBasics | 6a25a79d6b36952f6f00bbc0cd78a7fdd5cd63cc | c0bdd9abf6784c349b98e8af6bc1cd4b12b3e8ad | refs/heads/master | 2022-12-03T14:54:43.860174 | 2020-08-16T18:12:40 | 2020-08-16T18:12:40 | 277,656,866 | 0 | 0 | null | 2020-08-16T18:00:29 | 2020-07-06T21:55:51 | null | UTF-8 | Python | false | false | 3,365 | py | class Stock:
def __init__(self, *args):
self.args = args[0]
def in_stock(self, stock_dict):
self.stock_dict = stock_dict
if self.args[0] in self.stock_dict.keys():
self.stock_dict.get(self.args[0]).append(self.args[1:6])
else:
self.stock_dict.update({self.args[0]: [self.args[1:6]]})
return self.stock_dict
def out_stock(self, key, value, quantity):
self.stock_dict = self.__dict__.get('stock_dict')
self.key = key
self.value = value
self.quantity = quantity
for k in self.stock_dict.keys():
if self.key == k:
for el in self.stock_dict.get(k):
if self.value == el:
index = self.stock_dict.get(k).index(el)
self.stock_dict.get(k)[index][3] = int(self.stock_dict.get(k)[index][3]) - self.quantity
return self.stock_dict
class OfficeMachinery:
def __init__(self, name, manufacturer, model, price, quantity):
self.name = name
self.mnf = manufacturer
self.model = model
self.price = price
self.quantity = quantity
class Laptop(OfficeMachinery):
def __init__(self, name, manufacturer, model, price, quantity, productivity_benchmark):
super().__init__(name, manufacturer, model, price, quantity)
self.p_b = productivity_benchmark
def consolidate(self):
laptop = []
for k in self.__dict__.keys():
laptop.append(self.__dict__.get(k))
return laptop
class Copier(OfficeMachinery):
def __init__(self, name, manufacturer, model, price, quantity, speed):
super().__init__(name, manufacturer, model, price, quantity)
self.speed = speed
def consolidate(self):
copier = []
for k in self.__dict__.keys():
copier.append(self.__dict__.get(k))
return copier
class Phone(OfficeMachinery):
def __init__(self, name, manufacturer, model, price, quantity, kind):
super().__init__(name, manufacturer, model, price, quantity)
self.kind = kind
def consolidate(self):
phone = []
for k in self.__dict__.keys():
phone.append(self.__dict__.get(k))
return phone
stock_dict = {}
prod_1 = Laptop('Laptop', 'Apple', 'MacBook Pro', 2400, 10, 'Geekbench score - 2997')
prod_2 = Copier('Copier', 'HPE', 'LJ-100', 1000, 5, '30 pages per minute')
prod_3 = Phone('Phone', 'Cisco', 'TR9875', 768, 7, 'VoIP')
prod_4 = Laptop('Laptop', 'Apple', 'MacBook Air', 1300, 6, 'Geekbench score - 2597')
prod_5 = Copier('Copier', 'Epson', 'LT450', 500, 3, '60 pages per minute')
prod_6 = Phone('Phone', 'Siemens', 'S98T', 200, 7, 'Landline')
stock_1 = Stock(prod_1.consolidate())
stock_2 = Stock(prod_2.consolidate())
stock_3 = Stock(prod_3.consolidate())
stock_4 = Stock(prod_4.consolidate())
stock_5 = Stock(prod_5.consolidate())
stock_6 = Stock(prod_6.consolidate())
stock_1.in_stock(stock_dict)
stock_2.in_stock(stock_dict)
stock_3.in_stock(stock_dict)
stock_4.in_stock(stock_dict)
stock_5.in_stock(stock_dict)
stock_6.in_stock(stock_dict)
print(stock_dict)
Stock.out_stock(stock_6, 'Laptop', ['Apple', 'MacBook Air', 1300, 6, 'Geekbench score - 2597'], 2)
Stock.out_stock(stock_6, 'Phone', ['Cisco', 'TR9875', 768, 7, 'VoIP'], 5)
print()
print(stock_dict)
| [
"nick440.np@gmail.com"
] | nick440.np@gmail.com |
aa787657413838040f7ea45bbca7038066f0600a | 009878462618c2df22bdf97fad9aefdb40873c91 | /Python-basic-programs/Arrays/Basics.py | e74310109cae1309b5f8fe8eb71f09ffeff12a31 | [] | no_license | amolsawant844/SEM-4 | 823330ffc8477b5fc713461ccadc74ef8c9bf14a | 57888e381c07553326a864df23afd16e52a213d5 | refs/heads/master | 2020-09-25T04:52:37.638443 | 2019-12-04T18:00:33 | 2019-12-04T18:00:33 | 225,921,232 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 479 | py | import array
# creation
a = array.array('i', [4, 6, 2, 9])
print(a)
import array as ar
b=ar.array('i', [4, 6, 2, 9])
print(b)
from array import *
c=array('i',[3,45,67,89])
print(c)
#for character array
d=array('u',['a','b','c','d'])
print("the array elements are:")
for i in d:
print(i)
#single dimension
marks=array('i',[1,2,3,4,5])
#multi dimesion
marksmulti=array([[1,2,3,4],[5,6,7,8],[9,10,11,12]])
print("single dimesion",marks)
print("multi dimension",marksmulti) | [
"noreply@github.com"
] | amolsawant844.noreply@github.com |
aa718d4daf7a3e18af3a89fdaa2347cee9d3e036 | 7ba22c9826a1574777a08fb634ff15c56de6cb98 | /domain_adaptation/domain_separation/dsn_train.py | 2453d204f037ace6938252c8981854f5ef640dac | [] | no_license | dhanya1/full_cyclist | 02b85b8331f8ca9364169484ab97b32920cbbd14 | dd12c8d8a3deaaea15041e54f2e459a5041f11c2 | refs/heads/master | 2022-10-17T13:36:51.886476 | 2018-07-30T15:46:02 | 2018-07-30T15:46:02 | 142,896,293 | 0 | 1 | null | 2022-10-05T10:11:01 | 2018-07-30T15:46:15 | Python | UTF-8 | Python | false | false | 10,701 | py | # Copyright 2016 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Training for Domain Separation Networks (DSNs)."""
from __future__ import division
import tensorflow as tf
from domain_adaptation.datasets import dataset_factory
import dsn
slim = tf.contrib.slim
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_integer('batch_size', 32,
'The number of images in each batch.')
tf.app.flags.DEFINE_string('source_dataset', 'pose_synthetic',
'Source dataset to train_bkp on.')
tf.app.flags.DEFINE_string('target_dataset', 'pose_real',
'Target dataset to train_bkp on.')
tf.app.flags.DEFINE_string('target_labeled_dataset', 'none',
'Target dataset to train_bkp on.')
tf.app.flags.DEFINE_string('dataset_dir', None,
'The directory where the dataset files are stored.')
tf.app.flags.DEFINE_string('master', '',
'BNS name of the TensorFlow master to use.')
tf.app.flags.DEFINE_string('train_log_dir', '/tmp/da/',
'Directory where to write event logs.')
tf.app.flags.DEFINE_string(
'layers_to_regularize', 'fc3',
'Comma-separated list of layer names to use MMD regularization on.')
tf.app.flags.DEFINE_float('learning_rate', .01, 'The learning rate')
tf.app.flags.DEFINE_float('alpha_weight', 1e-6,
'The coefficient for scaling the reconstruction '
'loss.')
tf.app.flags.DEFINE_float(
'beta_weight', 1e-6,
'The coefficient for scaling the private/shared difference loss.')
tf.app.flags.DEFINE_float(
'gamma_weight', 1e-6,
'The coefficient for scaling the shared encoding similarity loss.')
tf.app.flags.DEFINE_float('pose_weight', 0.125,
'The coefficient for scaling the pose loss.')
tf.app.flags.DEFINE_float(
'weight_decay', 1e-6,
'The coefficient for the L2 regularization applied for all weights.')
tf.app.flags.DEFINE_integer(
'save_summaries_secs', 60,
'The frequency with which summaries are saved, in seconds.')
tf.app.flags.DEFINE_integer(
'save_interval_secs', 60,
'The frequency with which the model is saved, in seconds.')
tf.app.flags.DEFINE_integer(
'max_number_of_steps', None,
'The maximum number of gradient steps. Use None to train_bkp indefinitely.')
tf.app.flags.DEFINE_integer(
'domain_separation_startpoint', 1,
'The global step to add the domain separation losses.')
tf.app.flags.DEFINE_integer(
'bipartite_assignment_top_k', 3,
'The number of top-k matches to use in bipartite matching adaptation.')
tf.app.flags.DEFINE_float('decay_rate', 0.95, 'Learning rate decay factor.')
tf.app.flags.DEFINE_integer('decay_steps', 20000, 'Learning rate decay steps.')
tf.app.flags.DEFINE_float('momentum', 0.9, 'The momentum value.')
tf.app.flags.DEFINE_bool('use_separation', False,
'Use our domain separation model.')
tf.app.flags.DEFINE_bool('use_logging', False, 'Debugging messages.')
tf.app.flags.DEFINE_integer(
'ps_tasks', 0,
'The number of parameter servers. If the value is 0, then the parameters '
'are handled locally by the worker.')
tf.app.flags.DEFINE_integer(
'num_readers', 4,
'The number of parallel readers that read data from the dataset.')
tf.app.flags.DEFINE_integer('num_preprocessing_threads', 4,
'The number of threads used to create the batches.')
tf.app.flags.DEFINE_integer(
'task', 0,
'The Task ID. This value is used when training with multiple workers to '
'identify each worker.')
tf.app.flags.DEFINE_string('decoder_name', 'small_decoder',
'The decoder to use.')
tf.app.flags.DEFINE_string('encoder_name', 'default_encoder',
'The encoder to use.')
################################################################################
# Flags that control the architecture and losses
################################################################################
tf.app.flags.DEFINE_string(
'similarity_loss', 'grl',
'The method to use for encouraging the common encoder codes to be '
'similar, one of "grl", "mmd", "corr".')
tf.app.flags.DEFINE_string('recon_loss_name', 'sum_of_pairwise_squares',
'The name of the reconstruction loss.')
tf.app.flags.DEFINE_string('basic_tower', 'pose_mini',
'The basic tower building block.')
def provide_batch_fn():
""" The provide_batch function to use. """
return dataset_factory.provide_batch
def main(_):
model_params = {
'use_separation': FLAGS.use_separation,
'domain_separation_startpoint': FLAGS.domain_separation_startpoint,
'layers_to_regularize': FLAGS.layers_to_regularize,
'alpha_weight': FLAGS.alpha_weight,
'beta_weight': FLAGS.beta_weight,
'gamma_weight': FLAGS.gamma_weight,
'pose_weight': FLAGS.pose_weight,
'recon_loss_name': FLAGS.recon_loss_name,
'decoder_name': FLAGS.decoder_name,
'encoder_name': FLAGS.encoder_name,
'weight_decay': FLAGS.weight_decay,
'batch_size': FLAGS.batch_size,
'use_logging': FLAGS.use_logging,
'ps_tasks': FLAGS.ps_tasks,
'task': FLAGS.task,
}
g = tf.Graph()
with g.as_default():
with tf.device(tf.train.replica_device_setter(FLAGS.ps_tasks)):
# Load the data.
source_images, source_labels = provide_batch_fn()(
FLAGS.source_dataset, 'train_bkp', FLAGS.dataset_dir, FLAGS.num_readers,
FLAGS.batch_size, FLAGS.num_preprocessing_threads)
target_images, target_labels = provide_batch_fn()(
FLAGS.target_dataset, 'train_bkp', FLAGS.dataset_dir, FLAGS.num_readers,
FLAGS.batch_size, FLAGS.num_preprocessing_threads)
# In the unsupervised case all the samples in the labeled
# domain are from the source domain.
domain_selection_mask = tf.fill((source_images.get_shape().as_list()[0],),
True)
# When using the semisupervised model we include labeled target data in
# the source labelled data.
if FLAGS.target_labeled_dataset != 'none':
# 1000 is the maximum number of labelled target samples that exists in
# the datasets.
target_semi_images, target_semi_labels = provide_batch_fn()(
FLAGS.target_labeled_dataset, 'train_bkp', FLAGS.batch_size)
# Calculate the proportion of source domain samples in the semi-
# supervised setting, so that the proportion is set accordingly in the
# batches.
proportion = float(source_labels['num_train_samples']) / (
source_labels['num_train_samples'] +
target_semi_labels['num_train_samples'])
rnd_tensor = tf.random_uniform(
(target_semi_images.get_shape().as_list()[0],))
domain_selection_mask = rnd_tensor < proportion
source_images = tf.where(domain_selection_mask, source_images,
target_semi_images)
source_class_labels = tf.where(domain_selection_mask,
source_labels['classes'],
target_semi_labels['classes'])
if 'quaternions' in source_labels:
source_pose_labels = tf.where(domain_selection_mask,
source_labels['quaternions'],
target_semi_labels['quaternions'])
(source_images, source_class_labels, source_pose_labels,
domain_selection_mask) = tf.train.shuffle_batch(
[
source_images, source_class_labels, source_pose_labels,
domain_selection_mask
],
FLAGS.batch_size,
50000,
5000,
num_threads=1,
enqueue_many=True)
else:
(source_images, source_class_labels,
domain_selection_mask) = tf.train.shuffle_batch(
[source_images, source_class_labels, domain_selection_mask],
FLAGS.batch_size,
50000,
5000,
num_threads=1,
enqueue_many=True)
source_labels = {}
source_labels['classes'] = source_class_labels
if 'quaternions' in source_labels:
source_labels['quaternions'] = source_pose_labels
slim.get_or_create_global_step()
tf.summary.image('source_images', source_images, max_outputs=3)
tf.summary.image('target_images', target_images, max_outputs=3)
dsn.create_model(
source_images,
source_labels,
domain_selection_mask,
target_images,
target_labels,
FLAGS.similarity_loss,
model_params,
basic_tower_name=FLAGS.basic_tower)
# Configure the optimization scheme:
learning_rate = tf.train.exponential_decay(
FLAGS.learning_rate,
slim.get_or_create_global_step(),
FLAGS.decay_steps,
FLAGS.decay_rate,
staircase=True,
name='learning_rate')
tf.summary.scalar('learning_rate', learning_rate)
tf.summary.scalar('total_loss', tf.losses.get_total_loss())
opt = tf.train.MomentumOptimizer(learning_rate, FLAGS.momentum)
tf.logging.set_verbosity(tf.logging.INFO)
# Run training.
loss_tensor = slim.learning.create_train_op(
slim.losses.get_total_loss(),
opt,
summarize_gradients=True,
colocate_gradients_with_ops=True)
slim.learning.train(
train_op=loss_tensor,
logdir=FLAGS.train_log_dir,
master=FLAGS.master,
is_chief=FLAGS.task == 0,
number_of_steps=FLAGS.max_number_of_steps,
save_summaries_secs=FLAGS.save_summaries_secs,
save_interval_secs=FLAGS.save_interval_secs)
if __name__ == '__main__':
tf.app.run()
| [
"dhanyasj01@gmail.com"
] | dhanyasj01@gmail.com |
cf59c1c639fbd40a712f18d3084e6714dd548efc | 1f84ec9e3022eaa92f18aab97d1791f225073b2d | /src/transformers/models/mobilevit/image_processing_mobilevit.py | 147050099a6fa7298368f2f9230bd7d8f2c46402 | [
"Apache-2.0"
] | permissive | fwtan/transformers | 1c874b7ee6cf9c92adf82060dd8425a6cb524521 | 55db70c63de2c07b6ffe36f24c0e7df8f967e935 | refs/heads/main | 2023-08-19T10:48:42.792181 | 2023-08-10T20:06:29 | 2023-08-10T20:06:29 | 666,594,807 | 0 | 0 | null | 2023-07-15T00:09:07 | 2023-07-15T00:09:06 | null | UTF-8 | Python | false | false | 14,175 | py | # coding=utf-8
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Image processor class for MobileViT."""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
flip_channel_order,
get_resize_output_image_size,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
logger = logging.get_logger(__name__)
class MobileViTImageProcessor(BaseImageProcessor):
r"""
Constructs a MobileViT image processor.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the
`do_resize` parameter in the `preprocess` method.
size (`Dict[str, int]` *optional*, defaults to `{"shortest_edge": 224}`):
Controls the size of the output image after resizing. Can be overridden by the `size` parameter in the
`preprocess` method.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
Defines the resampling filter to use if resizing the image. Can be overridden by the `resample` parameter
in the `preprocess` method.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale`
parameter in the `preprocess` method.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the
`preprocess` method.
do_center_crop (`bool`, *optional*, defaults to `True`):
Whether to crop the input at the center. If the input size is smaller than `crop_size` along any edge, the
image is padded with 0's and then center cropped. Can be overridden by the `do_center_crop` parameter in
the `preprocess` method.
crop_size (`Dict[str, int]`, *optional*, defaults to `{"height": 256, "width": 256}`):
Desired output size `(size["height"], size["width"])` when applying center-cropping. Can be overridden by
the `crop_size` parameter in the `preprocess` method.
do_flip_channel_order (`bool`, *optional*, defaults to `True`):
Whether to flip the color channels from RGB to BGR. Can be overridden by the `do_flip_channel_order`
parameter in the `preprocess` method.
"""
model_input_names = ["pixel_values"]
def __init__(
self,
do_resize: bool = True,
size: Dict[str, int] = None,
resample: PILImageResampling = PILImageResampling.BILINEAR,
do_rescale: bool = True,
rescale_factor: Union[int, float] = 1 / 255,
do_center_crop: bool = True,
crop_size: Dict[str, int] = None,
do_flip_channel_order: bool = True,
**kwargs,
) -> None:
super().__init__(**kwargs)
size = size if size is not None else {"shortest_edge": 224}
size = get_size_dict(size, default_to_square=False)
crop_size = crop_size if crop_size is not None else {"height": 256, "width": 256}
crop_size = get_size_dict(crop_size, param_name="crop_size")
self.do_resize = do_resize
self.size = size
self.resample = resample
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_center_crop = do_center_crop
self.crop_size = crop_size
self.do_flip_channel_order = do_flip_channel_order
# Copied from transformers.models.mobilenet_v1.image_processing_mobilenet_v1.MobileNetV1ImageProcessor.resize with PILImageResampling.BICUBIC->PILImageResampling.BILINEAR
def resize(
self,
image: np.ndarray,
size: Dict[str, int],
resample: PILImageResampling = PILImageResampling.BILINEAR,
data_format: Optional[Union[str, ChannelDimension]] = None,
**kwargs,
) -> np.ndarray:
"""
Resize an image. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge
resized to keep the input aspect ratio.
Args:
image (`np.ndarray`):
Image to resize.
size (`Dict[str, int]`):
Size of the output image.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
Resampling filter to use when resiizing the image.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
"""
size = get_size_dict(size, default_to_square=False)
if "shortest_edge" not in size:
raise ValueError(f"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}")
output_size = get_resize_output_image_size(image, size=size["shortest_edge"], default_to_square=False)
return resize(image, size=output_size, resample=resample, data_format=data_format, **kwargs)
def flip_channel_order(
self, image: np.ndarray, data_format: Optional[Union[str, ChannelDimension]] = None
) -> np.ndarray:
"""
Flip the color channels from RGB to BGR or vice versa.
Args:
image (`np.ndarray`):
The image, represented as a numpy array.
data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
"""
return flip_channel_order(image, data_format=data_format)
def preprocess(
self,
images: ImageInput,
do_resize: bool = None,
size: Dict[str, int] = None,
resample: PILImageResampling = None,
do_rescale: bool = None,
rescale_factor: float = None,
do_center_crop: bool = None,
crop_size: Dict[str, int] = None,
do_flip_channel_order: bool = None,
return_tensors: Optional[Union[str, TensorType]] = None,
data_format: ChannelDimension = ChannelDimension.FIRST,
**kwargs,
) -> PIL.Image.Image:
"""
Preprocess an image or batch of images.
Args:
images (`ImageInput`):
Image to preprocess.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`Dict[str, int]`, *optional*, defaults to `self.size`):
Size of the image after resizing.
resample (`int`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`, Only
has an effect if `do_resize` is set to `True`.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image by rescale factor.
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
Whether to center crop the image.
crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`):
Size of the center crop if `do_center_crop` is set to `True`.
do_flip_channel_order (`bool`, *optional*, defaults to `self.do_flip_channel_order`):
Whether to flip the channel order of the image.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
- `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `ChannelDimension.LAST`: image in (height, width, num_channels) format.
"""
do_resize = do_resize if do_resize is not None else self.do_resize
resample = resample if resample is not None else self.resample
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop
do_flip_channel_order = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
size = size if size is not None else self.size
size = get_size_dict(size, default_to_square=False)
crop_size = crop_size if crop_size is not None else self.crop_size
crop_size = get_size_dict(crop_size, param_name="crop_size")
images = make_list_of_images(images)
if not valid_images(images):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray."
)
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True.")
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True.")
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True.")
# All transformations expect numpy arrays.
images = [to_numpy_array(image) for image in images]
if do_resize:
images = [self.resize(image=image, size=size, resample=resample) for image in images]
if do_center_crop:
images = [self.center_crop(image=image, size=crop_size) for image in images]
if do_rescale:
images = [self.rescale(image=image, scale=rescale_factor) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
images = [self.flip_channel_order(image=image) for image in images]
images = [to_channel_dimension_format(image, data_format) for image in images]
data = {"pixel_values": images}
return BatchFeature(data=data, tensor_type=return_tensors)
# Copied from transformers.models.beit.image_processing_beit.BeitImageProcessor.post_process_semantic_segmentation with Beit->MobileViT
def post_process_semantic_segmentation(self, outputs, target_sizes: List[Tuple] = None):
"""
Converts the output of [`MobileViTForSemanticSegmentation`] into semantic segmentation maps. Only supports
PyTorch.
Args:
outputs ([`MobileViTForSemanticSegmentation`]):
Raw outputs of the model.
target_sizes (`List[Tuple]` of length `batch_size`, *optional*):
List of tuples corresponding to the requested final size (height, width) of each prediction. If unset,
predictions will not be resized.
Returns:
semantic_segmentation: `List[torch.Tensor]` of length `batch_size`, where each item is a semantic
segmentation map of shape (height, width) corresponding to the target_sizes entry (if `target_sizes` is
specified). Each entry of each `torch.Tensor` correspond to a semantic class id.
"""
# TODO: add support for other frameworks
logits = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(logits) != len(target_sizes):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits"
)
if is_torch_tensor(target_sizes):
target_sizes = target_sizes.numpy()
semantic_segmentation = []
for idx in range(len(logits)):
resized_logits = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0), size=target_sizes[idx], mode="bilinear", align_corners=False
)
semantic_map = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(semantic_map)
else:
semantic_segmentation = logits.argmax(dim=1)
semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
| [
"noreply@github.com"
] | fwtan.noreply@github.com |
261109ed43010ad33b92caf43d1f8d6b0f968d45 | efb8f7f7ae8cf129104a45607f69b9bccd9b798c | /celery-monitor/backend/analyze_log.py | db6b1758b2a0d78b9161862321bf4697f19b603a | [] | no_license | jiangfeng051/gitskills | 7a7b406a9c33e5519dbda1978cae6443338f591b | 1cb3403cf5f8d37f675f8c7bb322078d2a02666c | refs/heads/master | 2020-03-24T20:37:03.576312 | 2018-07-31T09:00:01 | 2018-07-31T09:00:01 | 142,987,804 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,392 | py | #date:2018/7/27
from dbutils.connectdb import DbConnect
from backend.sendmail import email
def analyze_log(rule,data_log,date_now):
rule_condtion = rule['rule_condtion']
rule_price= int(rule['rule_price'])
urle_id = rule['rule_id']
rule_name = rule['rule_name']
rule_list = rule['rule_content'].split(',')
ret_count = 0
state = True
for hit in data_log:
for keyword in rule_list:
if keyword not in hit['_source']['message']:
state = False
break
if state:
ret_count += 1
else:
state = True
print(ret_count,rule_price,rule_condtion)
if rule_condtion=='lt':
if ret_count<rule_price:
message='{date}触发了警告'.format(date=rule_name)
email(message)
warning_flag = 1
else:
warning_flag = 2
if rule_condtion=='gt':
if ret_count>rule_price:
message='{date}触发了警告'.format(date=rule_name)
email(message)
warning_flag = 1
else:
warning_flag = 2
#把结果存入到报警明细表
db_conn = DbConnect()
cursor = db_conn.connect()
sql = "insert into rule_log_detail (rule_id,result,warning,gmt_create) value (%s,%s,%s,%s)"
cursor.execute(sql,[urle_id,ret_count,warning_flag,date_now,])
db_conn.close() | [
"jiangfeng@zentech-inc.com"
] | jiangfeng@zentech-inc.com |
b4a2db0fc6da43c2eb0ad5f2cd65f2c360d65ad7 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /eDQDChGrv6y4fd44j_20.py | 1a24d4d5555b5a19f0f2a0043b0899ec7134f363 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,156 | py | """
A billboard is an `m * n` list, where each list element consists of either one
letter or a blank space. You are given a phrase and the billboard dimensions.
Create a function that determines whether you can place the complete phrase on
the billboard.
There are two rules:
1. If there is a space between two words:
* If they are on the same row, you must put a space.
* If they are two different rows, the space is optional.
2. You can only put COMPLETE words on a row.
To illustrate, `can_put("GOOD MORN", [2, 4])` should yield `True`, since while
there is a space between "GOOD" and "MORN", it's not needed since both words
are on separate rows.
[
["G", "O", "O", "D"],
["M", "O", "R", "N"]
]
On the other hand `can_put("GOOD MORN", [1, 8])` should yield `False`. Since
both words reside in the first row, we require nine spots, and eight would
yield the incomplete phrase "GOOD MOR".
[
["G", "O", "O", "D", "_", "M", "O", "R"]
]
We would also return `False` if we could not fit a word on a row. So
`can_put("GOOD MORN", [3,3])` should yield `False`, since we can only fit
"GOO" on the first row.
[
["G", "O", "O"],
["D", "_", "M"],
["O", "R", "N"]
]
# No good!
### Examples
can_put("HEY JUDE", [2, 4]) ➞ True
can_put("HEY JUDE", [1, 8]) ➞ True
can_put("HEY JUDE", [1, 7]) ➞ False
can_put("HEY JUDE", [4, 3]) ➞ False
### Notes
It is okay to leave extra empty spaces on one line if you cannot fit two words
with a space. For example, in a 5 x 5 billboard, you can put "BE" on the first
row and "HAPPY" on the second row.
"""
def can_put(message, dimensions):
def fit_word(pointer, word):
height, width = dimensions
row, col = pointer
if width - col >= len(word): #fits in line
return (row, col + len(word) + 1)
elif row + 1 < height and len(word) <= width:
return (row + 1, len(word) + 1)
pointer = (0,0)
for word in message.split():
pointer = fit_word(pointer, word)
if not pointer:
return False
return True
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
a8ebb603e7a36708b8d42f8f11cbdfc4941da201 | a58765023b251b6db02e8dc2fc801329c4f0bd51 | /backend/backend/settings.py | cabe6be1473d1c78e3915a7387cf7edefd2e8e11 | [] | no_license | MickiMouse/billing | 36a67f899f69da41eba56f609741448337da51ac | 8a06e8abc33545526ba95e9ac1c1104b8dc00bcd | refs/heads/master | 2022-12-13T14:54:02.484584 | 2019-11-20T14:31:37 | 2019-11-20T14:31:37 | 222,928,070 | 0 | 0 | null | 2022-05-25T04:30:27 | 2019-11-20T12:02:37 | Python | UTF-8 | Python | false | false | 3,294 | py | import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '(971f##xbymbs88^-l)%u4qil1y8i^z#vj4kzl=n+e9pif&7+3'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
INSTALLED_APPS = [
'django_crontab',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'main.apps.MainConfig',
'rest_framework.authtoken',
'rest_framework',
'corsheaders',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.common.BrokenLinkEmailsMiddleware',
'corsheaders.middleware.CorsMiddleware',
]
CORS_ORIGIN_ALLOW_ALL = True
ROOT_URLCONF = 'backend.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'backend.wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'billing_system',
'USER': 'djadmin',
'PASSWORD': 'qwerty123',
'HOST': '127.0.0.1',
'PORT': 5432,
}
}
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
AUTH_USER_MODEL = 'main.Reseller'
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = False
DATE_FORMAT = "%m/%d/%Y"
DATETIME_FORMAT = "%m/%d/%Y %H:%M:%S"
STATIC_URL = '/static/'
REST_FRAMEWORK = {
'DEFAULT_PARSER_CLASSES': (
'rest_framework.parsers.JSONParser',
'rest_framework.parsers.FormParser',
'rest_framework.parsers.MultiPartParser',
),
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.TokenAuthentication',
),
'DATETIME_FORMAT': "%m/%d/%Y %H:%M:%S",
"DATETIME_INPUT_FORMATS": ["%m/%d/%Y %H:%M:%S"],
}
MAX_BOUQUETS = 127
SERVER_LOGIN = 'admin'
SERVER_PSWD = ''
CRONJOBS = [
('*/1 * * * *', 'main.cron.my_scheduled_job'),
]
try:
from backend.email_config import *
except ImportError:
pass
| [
"495lolkek42@gmail.com"
] | 495lolkek42@gmail.com |
7daa4619bb498aecf21032bf14487ffe751a8ac3 | 7d76c2309c2fcd30cfccb71187761e9db48ae440 | /backend/Admin.py | bf0a0d7632c221dc47702bbf541ddfe1c9a5c96d | [] | no_license | veejayts/ooad-project | 7c833e25564a74f08f4c3c382081d17a8f5d4cd9 | 10d7dc31990c88ab23b08340bb6b59947872a41d | refs/heads/main | 2023-01-23T04:22:15.770420 | 2020-12-11T07:39:10 | 2020-12-11T07:39:10 | 314,194,041 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,243 | py | from backend.DatabaseHelper import DatabaseHelper
class Admin:
db = DatabaseHelper()
id = 0
password = ''
def enterStudentDetails(self, regno, name, d_o_b, department, sem):
"""
Enters new student details in the database
:returns: True if insertion of student details was successful. False if the operation was not successful.
"""
# try:
student_data = {
'regno': int(regno),
'name': name,
'password': f'student{regno}',
'd_o_b': d_o_b,
'department': department,
'sem': sem
}
# Checking if student already exists
rec = self.db.getAll('SELECT regno FROM student WHERE regno = :regno', {'regno': int(regno)})
if len(rec) >= 1:
return False
self.db.execute('INSERT INTO student(regno, password, name, dob, department, sem) VALUES (:regno, :password, :name, :d_o_b, :department, :sem)', student_data)
for i in range(1, 9):
subcodes = self.db.getAll(f'SELECT subcode FROM subjects WHERE department = :department AND sem = {i}', {'department': department})
subcodes = [sub[0] for sub in subcodes]
for sub in subcodes:
self.db.execute(f'INSERT INTO marks (regno, type, subcode, marks, department, sem) VALUES (:regno, "SEM", "{sub}", 0, :department, {i})', student_data)
self.db.execute(f'INSERT INTO attendance (regno, subcode, attendance, sem) VALUES (:regno, "{sub}", 0, {i})', student_data)
for exam in ['CAT1', 'CAT2', 'CAT3']:
self.db.execute(f'INSERT INTO marks (regno, type, subcode, marks, department, sem) VALUES (:regno, "{exam}", "{sub}", 0, :department, {i})', student_data)
return True
# except Exception:
# return False
def enterStaffDetails(self, regno, name, department):
"""
Enters new staff details in the database
:returns: True if insertion of staff details was successful. False if the operation was not successful.
"""
staffData = {
'regno': regno,
'password': f'staff{regno}',
'name': name,
'department': department
}
try:
# Checking if staff already exists
rec = self.db.getAll('SELECT regno FROM staff WHERE regno = :regno', {'regno': int(regno)})
if len(rec) >= 1:
return False
self.db.execute(f'INSERT INTO staff (regno, password, name, department) VALUES (:regno, :password, :name, :department)', staffData)
return True
except Exception:
return False
def viewDetails(self, detailType, id):
"""
:returns: A dictionary containing the information of the requested student or staff
"""
try:
if detailType == 'Student':
record = self.db.getAll(f'SELECT * FROM student WHERE regno = "{id}"')
record = record[0]
data = {
'regno': record[0],
'name': record[1],
'dob': record[3],
'department': record[4],
'sem': record[5]
}
return data
else:
record = self.db.getAll(f'SELECT * FROM staff WHERE regno = "{id}"')
record = record[0]
data = {
'name': record[1],
'id': record[0],
'department': record[3]
}
return data
except:
return False
def getSemMarks(self, regno, department, sem):
"""
:returns: A list of lists containing the marks of the particular sem of the requested student
"""
record = self.db.getAll('SELECT * FROM marks WHERE regno = :regno AND sem = :sem', {
'regno': regno,
'sem': sem
})
return record
def updateNotice(self, notice):
"""
Enters new notice into the database
"""
try:
self.db.execute(f'INSERT INTO notice (notice) VALUES ("{notice}")')
return True
except:
return False
def getNotices(self):
"""
:returns: All notices present
"""
return self.db.getAll('SELECT * FROM notice')
def updateDetails(self, name, regno, dob, attendance, maths, english, computer, percentage):
"""
Updates details of existing students
:returns: True if insertion of student details was successful. False if the operation was not successful.
"""
student_data = {
'name': name,
'regno': regno,
'dob': dob,
'attendance': attendance,
'maths': maths,
'english': english,
'computer': computer,
'percentage': percentage
}
try:
self.db.execute('UPDATE student SET name = :name, dob = :dob, attendance = :attendance, maths_marks = :maths, english_marks = :english, computer_marks = :computer, percentage_marks = :percentage WHERE regno = :regno', student_data)
return True
except:
return False
def updateMarks(self, name, regno, dob, attendance, maths, english, computer, percentage):
"""
Updates details of existing students
:returns: True if insertion of student details was successful. False if the operation was not successful.
"""
student_data = {
'regno': regno,
'maths': maths,
'english': english,
'computer': computer,
'percentage': percentage
}
try:
self.db.execute('UPDATE student SET maths_marks = :maths, english_marks = :english, computer_marks = :computer, percentage_marks = :percentage WHERE regno = :regno', student_data)
return True
except:
return False
def logout(self):
"""
Logs out the user
"""
print('Logged out of Admin') | [
"vijayts2001@gmail.com"
] | vijayts2001@gmail.com |
f0c76d1fa08e79952459a729a781ae1b1a1a853d | 78144baee82268a550400bbdb8c68de524adc68f | /Production/python/Fall17/SMS-T2tt_mStop-1200_mLSP-100_TuneCP2_13TeV-madgraphMLM-pythia8_cff.py | c7058877ed083db8890af32476f19744018f6428 | [] | no_license | tklijnsma/TreeMaker | e6989c03189b849aff2007bad22e2bfc6922a244 | 248f2c04cc690ef2e2202b452d6f52837c4c08e5 | refs/heads/Run2_2017 | 2023-05-26T23:03:42.512963 | 2020-05-12T18:44:15 | 2020-05-12T18:44:15 | 263,960,056 | 1 | 2 | null | 2020-09-25T00:27:35 | 2020-05-14T15:57:20 | null | UTF-8 | Python | false | false | 3,838 | py | import FWCore.ParameterSet.Config as cms
maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
readFiles = cms.untracked.vstring()
secFiles = cms.untracked.vstring()
source = cms.Source ("PoolSource",fileNames = readFiles, secondaryFileNames = secFiles)
readFiles.extend( [
'/store/mc/RunIIFall17MiniAODv2/SMS-T2tt_mStop-1200_mLSP-100_TuneCP2_13TeV-madgraphMLM-pythia8/MINIAODSIM/PU2017_12Apr2018_94X_mc2017_realistic_v14-v1/00000/001CEA5F-72C9-E811-BA66-00259029E84C.root',
'/store/mc/RunIIFall17MiniAODv2/SMS-T2tt_mStop-1200_mLSP-100_TuneCP2_13TeV-madgraphMLM-pythia8/MINIAODSIM/PU2017_12Apr2018_94X_mc2017_realistic_v14-v1/00000/2CD7E036-72C9-E811-B8BC-B499BAAC0572.root',
'/store/mc/RunIIFall17MiniAODv2/SMS-T2tt_mStop-1200_mLSP-100_TuneCP2_13TeV-madgraphMLM-pythia8/MINIAODSIM/PU2017_12Apr2018_94X_mc2017_realistic_v14-v1/00000/4A60530E-72C9-E811-9AC1-1866DA879ED8.root',
'/store/mc/RunIIFall17MiniAODv2/SMS-T2tt_mStop-1200_mLSP-100_TuneCP2_13TeV-madgraphMLM-pythia8/MINIAODSIM/PU2017_12Apr2018_94X_mc2017_realistic_v14-v1/00000/5A6EF34A-72C9-E811-B5F2-001E67DDC051.root',
'/store/mc/RunIIFall17MiniAODv2/SMS-T2tt_mStop-1200_mLSP-100_TuneCP2_13TeV-madgraphMLM-pythia8/MINIAODSIM/PU2017_12Apr2018_94X_mc2017_realistic_v14-v1/00000/D8371BB4-7DC7-E811-B485-002590D9D8AE.root',
'/store/mc/RunIIFall17MiniAODv2/SMS-T2tt_mStop-1200_mLSP-100_TuneCP2_13TeV-madgraphMLM-pythia8/MINIAODSIM/PU2017_12Apr2018_94X_mc2017_realistic_v14-v1/270000/1ED8C138-4BBB-E811-B449-246E96D10C24.root',
'/store/mc/RunIIFall17MiniAODv2/SMS-T2tt_mStop-1200_mLSP-100_TuneCP2_13TeV-madgraphMLM-pythia8/MINIAODSIM/PU2017_12Apr2018_94X_mc2017_realistic_v14-v1/270000/2C39513A-4BBB-E811-A565-F02FA768CFE4.root',
'/store/mc/RunIIFall17MiniAODv2/SMS-T2tt_mStop-1200_mLSP-100_TuneCP2_13TeV-madgraphMLM-pythia8/MINIAODSIM/PU2017_12Apr2018_94X_mc2017_realistic_v14-v1/270000/30571216-4BBB-E811-AD1A-0CC47A0AD476.root',
'/store/mc/RunIIFall17MiniAODv2/SMS-T2tt_mStop-1200_mLSP-100_TuneCP2_13TeV-madgraphMLM-pythia8/MINIAODSIM/PU2017_12Apr2018_94X_mc2017_realistic_v14-v1/270000/42FFEC30-4BBB-E811-81B7-001E67397003.root',
'/store/mc/RunIIFall17MiniAODv2/SMS-T2tt_mStop-1200_mLSP-100_TuneCP2_13TeV-madgraphMLM-pythia8/MINIAODSIM/PU2017_12Apr2018_94X_mc2017_realistic_v14-v1/270000/58987AEF-4ABB-E811-BFD6-E0071B6C9DF0.root',
'/store/mc/RunIIFall17MiniAODv2/SMS-T2tt_mStop-1200_mLSP-100_TuneCP2_13TeV-madgraphMLM-pythia8/MINIAODSIM/PU2017_12Apr2018_94X_mc2017_realistic_v14-v1/270000/5E81430E-5CBA-E811-9AE8-002590D9D8AE.root',
'/store/mc/RunIIFall17MiniAODv2/SMS-T2tt_mStop-1200_mLSP-100_TuneCP2_13TeV-madgraphMLM-pythia8/MINIAODSIM/PU2017_12Apr2018_94X_mc2017_realistic_v14-v1/270000/60152222-4BBB-E811-AD87-0CC47AD990C4.root',
'/store/mc/RunIIFall17MiniAODv2/SMS-T2tt_mStop-1200_mLSP-100_TuneCP2_13TeV-madgraphMLM-pythia8/MINIAODSIM/PU2017_12Apr2018_94X_mc2017_realistic_v14-v1/270000/6A2711BC-4BBB-E811-88C7-0026B92786AC.root',
'/store/mc/RunIIFall17MiniAODv2/SMS-T2tt_mStop-1200_mLSP-100_TuneCP2_13TeV-madgraphMLM-pythia8/MINIAODSIM/PU2017_12Apr2018_94X_mc2017_realistic_v14-v1/270000/90B7CF6A-4BBB-E811-A85C-1866DAEB1FC8.root',
'/store/mc/RunIIFall17MiniAODv2/SMS-T2tt_mStop-1200_mLSP-100_TuneCP2_13TeV-madgraphMLM-pythia8/MINIAODSIM/PU2017_12Apr2018_94X_mc2017_realistic_v14-v1/270000/D6F1494D-4BBB-E811-8551-7CD30ACE1B58.root',
'/store/mc/RunIIFall17MiniAODv2/SMS-T2tt_mStop-1200_mLSP-100_TuneCP2_13TeV-madgraphMLM-pythia8/MINIAODSIM/PU2017_12Apr2018_94X_mc2017_realistic_v14-v1/270000/F48D6DF7-4ABB-E811-8D2F-0CC47A7C3424.root',
'/store/mc/RunIIFall17MiniAODv2/SMS-T2tt_mStop-1200_mLSP-100_TuneCP2_13TeV-madgraphMLM-pythia8/MINIAODSIM/PU2017_12Apr2018_94X_mc2017_realistic_v14-v1/60000/D0903CAD-F5C5-E811-80BC-002590D9D8C4.root',
] )
| [
"Alexx.Perloff@Colorado.edu"
] | Alexx.Perloff@Colorado.edu |
4f4b68ca0c6623d671747618cbe6275ec180ab9f | b22cbe574c6fd43fde3dc82441805917b5996bb2 | /test/util.py | 9a84f69774201372124c9d12aad475c699637b11 | [] | no_license | matthagy/hlab | 7a7b16526ee06f9b6211e387795e09c6438b536c | 1bea77cf6df460f1828f99f3a54251d20e2d0f3d | refs/heads/master | 2021-01-25T03:26:52.311278 | 2012-07-23T16:20:11 | 2012-07-23T16:20:11 | 2,352,334 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,332 | py | '''Assorted unit tests utilities
'''
import unittest
from HH2.pathutils import FilePath,DirPath
def basepath():
filebase = FilePath(__file__).abspath().stripext()
backpath = __name__.replace('.','/')
assert filebase.endswith(backpath)
path = DirPath(filebase[:-len(backpath)])
assert path.isdir()
return path
basepath = basepath()
loader = unittest.TestLoader()
def load_file_tests(path):
path = path.stripext()
assert path.startswith(basepath)
name = path[len(basepath)+1::].replace('/','.')
mod = __import__(name, fromlist=name.rsplit('.',1)[-1:])
return loader.loadTestsFromModule(mod)
def load_directory_tests(path, recursive=True):
tests = []
for p in DirPath(path):
if p.isdir():
if recursive:
tests.extend(load_directory_tests(p, recursive=True))
elif (p.endswith('.py') and not p.basename().startswith('.') and
not p.statswith('__') and not p.basename() in ['util']):
tests.extend(load_file_tests(p))
return tests
def test_directory(basefile):
basefile = FilePath(basefile)
assert basefile.basename().startswith('__main__.py')
tests = unittest.TestSuite(load_directory_tests(basefile.abspath().parent()))
runner = unittest.TextTestRunner(verbosity=2)
runner.run(tests)
| [
"hagy@gatech.edu"
] | hagy@gatech.edu |
833f32ab83f0fd9dffb0e1fbd5d9fee7b5e60300 | a2cf0ce6233af9fbb259e4c7b9d7b8a5b758f5ad | /project_manager/client.py | 7440882b30684608451fc7ce4c2c8edf477a135b | [] | no_license | ZhekaHauska/gamma400_tracker | 31b943345a0128ebe233b823ff0d4ca0e43f62f6 | 2833a68429281f541193643eb03019fd9c86fc64 | refs/heads/master | 2020-12-27T03:38:51.884723 | 2020-03-30T16:29:07 | 2020-03-30T16:29:07 | 237,751,151 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,321 | py | # читаем исходную таблицу
import subprocess as sub
import os
import yaml
from math import cos, sin, pi
from itertools import product
import pandas as pd
import numpy as np
import tables as tb
import tables.exceptions as exceptions
from time import sleep, time
import smtplib
# TODO выводить объём папки database
class Client:
def __init__(self):
self.last_email_time = 0
self.total_time = time()
self.wait = False
self.tracker_dir = os.getenv("TRACKER_DIR")
self.tracker_mac_dir = os.getenv("TRACKER_MAC_DIR")
self.tracker_output_dir = os.getenv("TRACKER_OUTPUT_DIR")
self.scripts_dir = os.getenv("TRACKER_SCRIPTS_DIR")
self.database_dir = os.getenv("TRACKER_DATABASE_DIR")
try:
if (self.tracker_output_dir is None or
self.tracker_mac_dir is None or
self.tracker_dir is None or
self.database_dir is None or
self.scripts_dir is None):
raise Exception("""Should be specified all environment variables:
TRACKER_DIR={}
TRACKER_MAC_DIR={}
TRACKER_OUTPUT_DIR={}
TRACKER_SCRIPTS_DIR={}
TRACKER_DATABASE_DIR={}""".format(self.tracker_dir,
self.tracker_mac_dir,
self.tracker_output_dir,
self.scripts_dir,
self.database_dir))
except Exception as answer:
print(answer)
# настройки
with open('input.yaml') as file:
self.data = yaml.load(file, Loader=yaml.Loader)
try:
file = open('tmp/cache.yaml')
data = yaml.load(file, Loader=yaml.Loader)
self.queue_job_id = data['job_id']
self.queue_set_id = data['set_id']
self.total_time = data['time']
file.close()
if len(self.queue_job_id) != 0:
self.wait = True
except FileNotFoundError:
self.queue_job_id = list()
self.queue_set_id = list()
# создать таблицу, в которой будет храниться информация о
# сгенерированных событиях
# если таблица существует, то будут использованы старые наборы параметров для частиц
try:
self.info = pd.read_hdf("info.h5")
except FileNotFoundError:
data = self.data
combinations = list(product(data['particle'], data['direction']['azimuth'], data['direction']['zenith'],
data['energy'], data['polarization']['x'],
data['polarization']['y'], data['polarization']['z'],
data['position']['x'], data['position']['y'], data['position']['z']))
info = pd.DataFrame(combinations, columns=['particle', 'azimuth', 'zenith', 'energy',
'pol_x', 'pol_y', 'pol_z', 'pos_x', 'pos_y', 'pos_z'])
self.info = info.assign(registered=np.zeros(info.shape[0]))
self.info = self.info.assign(bad_events=np.zeros(info.shape[0]))
self.info = self.info.assign(good_events=np.zeros(info.shape[0]))
self.info = self.info.assign(generated=np.zeros(info.shape[0]))
self.info = self.info.assign(number_of_jobs=np.zeros(info.shape[0]))
self.info.to_hdf("info.h5", key='info')
self.index_set = set(self.info.index)
if self.data['verbose'] > 1:
# test
self.send_info()
def send_info(self):
# for sending emails
message_server = smtplib.SMTP_SSL('smtp.gmail.com', 465)
message_server.ehlo()
message_server.login("gamma400messenger@gmail.com", "BIGblueelephant400")
# test
self.last_email_time = time()
message_server.sendmail("gamma400messenger", self.data['email'], self.__repr__())
message_server.close()
# делаем запрос в очередь
def start_query(self):
# ставим в очередь строку с самым маленьким good_events и
# которую ещё не поставили в очередь
diff = self.index_set - set(self.queue_set_id)
row = self.info.index[0]
for idx in self.info.index:
if (idx in diff) and (self.info.loc[idx, 'good_events'] < self.data['number_of_events']):
row = idx
break
self.info.loc[row, "number_of_jobs"] += 1
mac_name = self.tracker_mac_dir + os.sep + "{}.mac".format(row)
try:
mac_file = open(mac_name, 'r')
except FileNotFoundError:
# конвертируем сферические координаты направления импульса в декартовы
pars = self.info.loc[row, :]
zen = pars['zenith'] * pi / 180
az = pars['azimuth'] * pi / 180
z = sin(zen) * cos(az)
y = sin(zen) * sin(az)
x = cos(zen)
direction = (x, y, z)
mac_file = open(mac_name, 'w')
mac_file.write("""
/control/verbose 0
/run/verbose 0
/event/verbose 0
/tracking/verbose 0
/gun/particle {}
/gun/energy {} MeV
/gun/position {} {} {} cm
/gun/direction {} {} {}
/gun/polarization {} {} {}
/run/beamOn {}
""".format(pars['particle'],
pars['energy'],
pars['pos_x'],
pars['pos_y'],
pars['pos_z'],
*direction,
pars['pol_x'],
pars['pol_y'],
pars['pol_z'],
self.data['batch_size']
))
mac_file.close()
file = open("tmp/query.sh", 'w')
file.write("""#! /bin/bash
#PBS -q {queue}
#PBS -l ncpus={n}
#PBS -l walltime={walltime}:00:00
#PBS -N database_hauska
#PBS -e {scripts_dir}/log_err
#PBS -o {scripts_dir}/log_out
python {scripts_dir}/server.py {mac} {n} > {scripts_dir}/log_server.txt
""".format(scripts_dir=self.scripts_dir, mac="{}.mac".format(row), n=self.data['number_of_cores'],
queue=self.data['queue'], walltime=self.data['walltime']))
file.close()
process = sub.Popen(["qsub", "tmp/query.sh"], stdout=sub.PIPE, text=True)
job_id = process.communicate()[0]
job_id = job_id.split(sep='.')
job_id = job_id[0] + '.' + job_id[1]
self.queue_job_id.append(job_id)
self.queue_set_id.append(row)
def check_queue(self):
for i, job in enumerate(self.queue_job_id):
process = sub.Popen(["qstat", job], stdout=sub.PIPE, text=True)
answer = process.communicate()[0]
if len(answer) == 0:
self.queue_job_id.pop(i)
self.queue_set_id.pop(i)
return len(self.queue_job_id)
def update_info(self):
files = os.scandir(self.tracker_output_dir)
for file_name in files:
# странно, но это работало и без .path
try:
file = tb.open_file(file_name.path)
set_number = file.root.data._v_attrs.P_Set_number[0]
bad_events = file.root.data._v_attrs.N_Bad_events[0]
good_events = file.root.data._v_attrs.N_Good_events[0]
generated = file.root.data._v_attrs.N_Generated_events[0]
file.close()
self.info.loc[set_number, 'generated'] += generated
self.info.loc[set_number, 'bad_events'] += bad_events
self.info.loc[set_number, 'good_events'] += good_events
self.info.loc[set_number, 'registered'] += (good_events + bad_events)
sub.call(['mv', file_name.path, self.database_dir + os.sep])
except FileNotFoundError:
pass
except exceptions.NoSuchNodeError:
sub.call(['rm', file_name.path])
self.info.to_hdf("info.h5", key='info')
def check_iteration(self):
self.info = self.info.sort_values(by='good_events')
# если самая мальенькая good_events достигла необходимого значения
# то прекращаем цикл
if self.info.iloc[0]['good_events'] >= self.data['number_of_events']:
raise StopIteration
def __repr__(self):
answer = "Running {} hours\n".format(round(-(self.total_time - time()) / 3600, 1))
answer += self.info.head().__repr__()
answer += os.linesep
answer += "Now running: {} jobs".format(len(self.queue_job_id))
answer += os.linesep
total_events = self.info.loc[:, 'generated'].sum()
total_registered = self.info.loc[:, 'registered'].sum()
total_jobs = self.info.loc[:, 'number_of_jobs'].sum()
total_bad_events = self.info.loc[:, 'bad_events'].sum()
total_good_events = self.info.loc[:, 'good_events'].sum()
total_events_by_energy = self.info.groupby(by='energy').aggregate({'generated': np.sum,
'good_events': np.sum,
'bad_events': np.sum,
'registered': np.sum})
total_events_by_zenith = self.info.groupby(by='zenith').aggregate({'generated': np.sum,
'good_events': np.sum,
'bad_events': np.sum,
'registered': np.sum})
answer += """
Number of jobs {}
Number of generated events: {}
Number of registered events {}
Number of good events: {}
Number of bad events: {}\n
""".format(total_jobs, total_events, total_registered, total_good_events, total_bad_events)
answer += total_events_by_energy.__repr__()
answer += os.linesep
answer += total_events_by_zenith.__repr__()
answer += os.linesep
return answer
def __iter__(self):
return self
def __next__(self):
n = self.check_queue()
if n == 0:
self.update_info()
self.check_iteration()
self.start_query()
self.wait = False
elif n < self.data['query_size'] and not self.wait:
self.start_query()
elif n == self.data['query_size'] and not self.wait:
self.wait = True
# update number of jobs
self.info.to_hdf("info.h5", key='info')
else:
sleep(60 * self.data['update_duration'])
# update cache
with open("tmp/cache.yaml", 'w') as file:
yaml.dump({'job_id': self.queue_job_id,
'set_id': self.queue_set_id,
'time': self.total_time}, file)
return self
if __name__ == "__main__":
db = Client()
for x in db:
if db.data['verbose'] == 2 or db.data['verbose'] == 3:
if (time() - db.last_email_time)/3600 >= db.data["email_duration"]:
db.send_info()
if db.data['verbose'] == 1 or db.data['verbose'] == 3:
sub.call("clear")
print(x)
# by the end, send email also
if db.data['verbose'] == 2 or db.data['verbose'] == 3:
db.send_info()
| [
"noreply@github.com"
] | ZhekaHauska.noreply@github.com |
b7ebbd95fa77eea38c9f2222a210df2477884119 | fdc2b246f38f773e4dee8e9016e4fdbb3b1a4cc6 | /Python3/get_data.py | 1cae6d6a996ad9b7d292d24b90e17e2bcf2f6829 | [] | no_license | IrinaVladimirTkachenko/Python_IdeaProjects_Course_EDU | 2c50b16ea0b8cb226bf4a7dfb40f45c84103cfc1 | ba9554b0f4258c7d3f63f41a0ccd0dc00b241b05 | refs/heads/master | 2023-07-08T09:32:49.923155 | 2021-08-05T16:52:00 | 2021-08-05T16:52:00 | 393,114,251 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,540 | py | from bs4 import BeautifulSoup
html_string = """
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Web Development Page</title>
<style type="text/css">
h1{
color: white;
background: red;
}
li{
color: red;
}
#css-li{
color: blue;
}
.green{
color: green;
}
</style>
</head>
<body>
<h1>Web Development</h1>
<h1 class="green">Web</h1>
<h3>Programming Laguages</h3>
<ol>
<li>HTML</li>
<li id="css-li">CSS</li>
<li class="green bold">JavaScript</li>
<li class="green" id='python-li'>Python</li>
</ol>
</body>
</html>
"""
parsed_html = BeautifulSoup(html_string, 'html.parser')
#print(parsed_html.body.ol.li)
#print(parsed_html.find('li'))
#print(type(parsed_html.find('li')))
#print(parsed_html.find_all('li'))
#print(type(parsed_html.find_all('li')))
#print(parsed_html.find(id="css-li"))
#print(parsed_html.select('#css-li')[0])
#print(parsed_html.find_all(class_="green"))
#print(parsed_html.select(".green")[1])
#print(parsed_html.select("li")[3])
#html_elem = parsed_html.select("li")[0]
#print(html_elem.get_text())
#html_elem_list= parsed_html.select("li")
#for html_elem in html_elem_list:
# print(html_elem.get_text())
#green_class_elem_list= parsed_html.select("li")
#for html_elem in green_class_elem_list:
# print(html_elem.get_text())
#for html_elem in green_class_elem_list:
# print(html_elem.attrs)
html_elem_list= parsed_html.select("li") [3]
#print(html_elem_list.attrs['id'])
print(html_elem_list['class'])
| [
"WildIrish@192.168.20.100"
] | WildIrish@192.168.20.100 |
79c947f1007d87255536a6a0f401e9d449851021 | 40fdb47497f0dafa83381f5f8fac410541c5c622 | /decode-cli | 93fe277cc22b6089cc2ae644e3c5e2f40cc11e56 | [] | no_license | evantey14/cipher | 16a55f96df1aa789b7da9efae1a960dfa3a2e651 | 41eef5f06056a2f5d6b5c5471a5b2bf2ae850283 | refs/heads/master | 2022-10-26T03:18:13.440675 | 2020-06-20T17:47:39 | 2020-06-20T17:47:39 | 262,177,278 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 653 | #!/usr/bin/env python3
"""
This script will be used to evaluate your solution.
To use python2, change the top of this script to say python2.
The evaluator expects this script to:
1. Read in ciphertext as first command line argument.
2. Read in a has_breakpoint flag ("true" or "false") as the second command line
argument.
3. Print the best guess for the plaintext to stdout.
Example usage:
./decode-cli "$(cat data/test/ciphertext.txt)" false
./decode-cli "$(cat data/test/ciphertext_breakpoint.txt)" true
"""
import sys
from decode import decode
has_breakpoint = sys.argv[2].lower() == "true"
print(decode(sys.argv[1], has_breakpoint))
| [
"evantey14@gmail.com"
] | evantey14@gmail.com | |
5371e1adc79386f50355efbbca03f914ea6d8acc | 6e34e838e836d49631af9ba27931996cf86b6564 | /ims/tests/admin_tests.py | 9318fa21b1d92a0ea0e2dd7c7d36736eb593b80c | [] | no_license | yxm0513/flask-ims | 1eaa6eee4242b1d17269d09e23d28dc4da6f697b | 23fe360316211080caab80632fa25ae6c145b3d4 | refs/heads/master | 2016-09-06T11:02:51.738756 | 2012-06-24T14:52:43 | 2012-06-24T14:52:43 | 1,461,977 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 78 | py | from flaskext.testing import TestCase
class AdminTestCase(TestCase):
pass
| [
"simon.yang.sh@gmail.com"
] | simon.yang.sh@gmail.com |
aff4c221d9cb8a47902c0af65c5dbe60d0218bdb | 5ccae2a03907eb6a59ea571e5a6c68510c41152a | /simple_compact.py | 633f6d0bc2c28601fa83bd2b54714e414f1a6213 | [] | no_license | yurifialho/tsp_compare | 8ce47226b4c9ec35f94df2ef4a7fa020496b0141 | 11f8610c56acfb00e1181d47fcbee2cf77310e3a | refs/heads/master | 2023-01-12T14:57:19.039711 | 2020-11-12T03:25:15 | 2020-11-12T03:25:15 | 310,966,806 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,904 | py | """Example that solves the Traveling Salesman Problem using the simple compact
formulation presented in Miller, C.E., Tucker, A.W and Zemlin, R.A. "Integer
Programming Formulation of Traveling Salesman Problems". Journal of the ACM
7(4). 1960."""
from itertools import product
from sys import stdout as out
from mip import Model, xsum, minimize, BINARY
from matriz import ExampleMatrix
# names of places to visit
places = [([i+1 for i in range(29)])]
# distances in an upper triangular matrix
e = ExampleMatrix()
dists = e.getTriangularMatrix()
# number of nodes and list of vertices
n, V = len(dists), set(range(len(dists)))
# distances matrix
c = [[0 if i == j
else dists[i][j-i-1] if j > i
else dists[j][i-j-1]
for j in V] for i in V]
model = Model()
# binary variables indicating if arc (i,j) is used on the route or not
x = [[model.add_var(var_type=BINARY) for j in V] for i in V]
# continuous variable to prevent subtours: each city will have a
# different sequential id in the planned route except the first one
y = [model.add_var() for i in V]
# objective function: minimize the distance
model.objective = minimize(xsum(c[i][j]*x[i][j] for i in V for j in V))
# constraint : leave each city only once
for i in V:
model += xsum(x[i][j] for j in V - {i}) == 1
# constraint : enter each city only once
for i in V:
model += xsum(x[j][i] for j in V - {i}) == 1
# subtour elimination
for (i, j) in product(V - {0}, V - {0}):
if i != j:
model += y[i] - (n+1)*x[i][j] >= y[j]-n
# optimizing
model.optimize()
# checking if a solution was found
if model.num_solutions:
out.write('route with total distance %g found: %s'
% (model.objective_value, places[0]))
# sanity tests
from mip import OptimizationStatus
assert model.status == OptimizationStatus.OPTIMAL
assert round(model.objective_value) == 547
model.check_optimization_results() | [
"yurirfialho@gmail.com"
] | yurirfialho@gmail.com |
e91fb3b0579a68d2e180e42add34ad6919708d82 | 3929d114c1bc6aef86402300a8d5b278849d41ae | /186. Reverse Words in a String II.py | 8cc8dc1f28c024f2e87d00719eb97c60a509c32c | [] | no_license | lxyshuai/leetcode | ee622235266017cf18da9b484f87c1cf9ceb91d0 | 5f98270fbcd2d28d0f2abd344c3348255a12882a | refs/heads/master | 2020-04-05T21:29:37.140525 | 2018-12-16T13:17:15 | 2018-12-16T13:17:15 | 157,222,620 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,135 | py | """
Given an input string, reverse the string word by word. A word is defined as a sequence of non-space characters.
The input string does not contain leading or trailing spaces and the words are always separated by a single space.
For example,
Given s = "the sky is blue",
return "blue is sky the".
Could you do it in-place without allocating extra space?
"""
class Solution(object):
def reverseWords(self, s):
"""
:type s: str
:rtype: str
"""
def reverse(string_list, left, right):
while left < right:
string_list[left], string_list[right] = string_list[right], string_list[left]
left += 1
right -= 1
string_list = list(s)
reverse(string_list, 0, len(string_list) - 1)
left = 0
right = 0
while right < len(string_list):
if string_list[right].isspace():
reverse(string_list, left, right - 1)
left = right + 1
right += 1
return ''.join(string_list)
if __name__ == '__main__':
print Solution().reverseWords('a b c d')
| [
"442536013@qq.com"
] | 442536013@qq.com |
1790c864a83cfd173f39c272bb2987599b0d9d0a | 457db8efad2e641bb828d4caf270f891269c5eee | /Tema4/Tema4_1.py | 95b4f0594cc7735e2a9491f10eedca494c0198c3 | [] | no_license | GaaraOfSuna/Tecnicas-Experimentales-Programs | a3a2bb424093da7dd4dc91bee400da6db37d08af | e551ce47a73ec0bf18ad6f0b078748ad49d6183f | refs/heads/master | 2021-01-10T17:20:38.399634 | 2016-03-02T20:20:59 | 2016-03-02T20:20:59 | 52,617,328 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,071 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Nov 04 23:58:48 2015
@author: GAARA
"""
from numpy import *
from matplotlib import pyplot as plt
from numpy.random import uniform, normal
from scipy.stats.stats import pearsonr
N = 100000.0
mu_1 = 3
mu_2 = 3
sigma_1 = 2
sigma_2 = 0.5
theta = -pi/6
gauss_1 = random.normal(mu_1, sigma_1, N)
gauss_2 = random.normal(mu_2, sigma_2, N)
gauss_2d = [gauss_1, gauss_2]
"""a)#####################################################################"""
print "a)"
print "La covarianza es:", cov(gauss_1, gauss_2)
print "El coeficiente de correlacion es:", pearsonr(gauss_1, gauss_2)[0]
"""b)#####################################################################"""
def ellipse_up(x, a, b):
return sqrt(1-((x-3)/a)**2)*b**2+3
def ellipse_down(x, a, b):
return -sqrt(1-((x-3)/a)**2)*b**2+3
def count(a, b, gau_1, gau_2):
"""Returns the amount of items in gau that return true from condition"""
i = 0
c = 0
for x in gau_2:
if (gau_1[c]-3)**2/a**2<=1:
if abs(x-3)<=sqrt((1-((gau_1[c]-3)/a)**2)*b**2):
i+=1
else:
i+=0
else:
i+=0
c+=1
return i
numb = count(sigma_1, sigma_2, gauss_1, gauss_2)
numb2 = count(2*sigma_1, 2*sigma_2, gauss_1, gauss_2)
numb3 = count(3*sigma_1, 3*sigma_2, gauss_1, gauss_2)
numb4 = count(4*sigma_1, 4*sigma_2, gauss_1, gauss_2)
print "b)"
print "Numero de puntos en sigma ", numb
print "Porcentaje de numero total ", numb/N
print "Valor esperado de distribucion gaussiana 0.46607"
print "Numero de puntos en 2*sigma ", numb2
print "Porcentaje de numero total ", numb2/N
print "Valor esperado de distribucion gaussiana 0.91107"
print "Numero de puntos en 3*sigma ", numb3
print "Porcentaje de numero total ", numb3/N
print "Valor esperado de distribucion gaussiana 0.99461"
print "Numero de puntos en 4*sigma ", numb4
print "Porcentaje de numero total ", numb4/N
print "Valor esperado de distribucion gaussiana 0.99987"
"""c)####################################################################"""
print "c)"
def f(x, y):
return 3*x+5*y
def stan(sigma_1, sigma_2, gau_1, gau_2):
"""Calculate standard derivation of f(x, y)"""
return (3*sigma_1)**2+(5*sigma_2)**2-2*3*5*pearsonr(gau_1, gau_2)[0]
print "La varianza de f(x,y) de los datos es:", var(f(gauss_1, gauss_2))
print "La varianza de f(x,y) de la propagacion de errores es:", stan(sigma_1, sigma_2, gauss_1, gauss_2)
"""d)####################################################################"""
print "d)"
print "Experimento igual con una rotacion de los datos de 30 grados"
### Rotation der Punktwolke
def cart2pol(x, y):
rho = sqrt((x)**2 + (y)**2)
phi = arctan2(y, x)
return(rho, phi)
rho, phi = cart2pol(gauss_1-mu_1, gauss_2-mu_2)
def pol2cart(rho, phi):
x = rho * cos(phi-theta)+mu_1
y = rho * sin(phi-theta)+mu_2
return(x, y)
gauss_1rot, gauss_2rot = pol2cart(rho, phi)
covarianz = pearsonr(gauss_1rot, gauss_2rot)[0]
print "La covarianza es:", cov(gauss_1rot, gauss_2rot)
print "El coeficiente de correlacion es:", covarianz
numb_rot = count(sqrt(var(gauss_1rot)), sqrt(var(gauss_2rot)), gauss_1rot, gauss_2rot)
numb_rot2 = count(2*sqrt(var(gauss_1rot)), 2*sqrt(var(gauss_2rot)), gauss_1rot, gauss_2rot)
numb_rot3 = count(3*sqrt(var(gauss_1rot)), 3*sqrt(var(gauss_2rot)), gauss_1rot, gauss_2rot)
numb_rot4 = count(4*sqrt(var(gauss_1rot)), 4*sqrt(var(gauss_2rot)), gauss_1rot, gauss_2rot)
#code fuer wolfram aplha: int (1/(2*pi*sqrt(1-0.851²))*exp(-0.5/(1-0.851²)*(x²+y²-2*0.851*x*y))) dx dy, x=-4 to 4, y=-4 to 4
print "Numero de puntos en sigma ", numb_rot
print "Porcentaje de numero total ", numb_rot/N
print "Valor esperado de distribucion gaussiana 0.57734"
print "Numero de puntos en 2*sigma ", numb_rot2
print "Porcentaje de numero total ", numb_rot2/N
print "Valor esperado de distribucion gaussiana 0.93188"
print "Numero de puntos en 2*sigma ", numb_rot3
print "Porcentaje de numero total ", numb_rot3/N
print "Valor esperado de distribucion gaussiana 0.99555"
print "Numero de puntos en 2*sigma ", numb_rot4
print "Porcentaje de numero total ", numb_rot4/N
print "Valor esperado de distribucion gaussiana 0.99989"
sigma_u = (sigma_1**2*(cos(theta))**2+sigma_2**2*(sin(theta))**2)#/((cos(theta))**2-(sin(theta))**2)
print sigma_u
sigma_v = (sigma_2**2*(cos(theta))**2+sigma_1**2*(sin(theta))**2)#/((cos(theta))**2-(sin(theta))**2)
print sigma_v
u=(sqrt(var(gauss_1rot))**2*(cos(theta))**2+sqrt(var(gauss_2rot))**2*(sin(theta))**2)
print u
v=(sqrt(var(gauss_2rot))**2*(cos(theta))**2+sqrt(var(gauss_1rot))**2*(sin(theta))**2)
print v
print "La varianza de los datos es:", var(f(gauss_1rot, gauss_2rot))
print "La varianza de f(x,y) de la propagacion de errores es:", stan(u, v, gauss_1rot, gauss_2rot)
print "El valor sigma_u es:", var(gauss_1rot)
print "El valor sigma_v es:", var(gauss_2rot)
print "El angulo es:", 180/pi*arcsin(2*covarianz*sqrt(var(gauss_1rot))*sqrt(var(gauss_2rot))/(sigma_1**2-sigma_2**2))/2
"""
plt.figure(1)
plt.subplot(111)
plt.axis([-6, 12, -6, 12])
plt.xlabel("x")
plt.ylabel("y")
plt.plot(gauss_1, gauss_2, linestyle='none', marker='.', ms=0.5)
arr = linspace(-sigma_1+3, sigma_1+3, 1000)
plt.plot(arr, ellipse_up(arr, sigma_1, sigma_2), ms=4, color="y")
plt.plot(arr, ellipse_down(arr, sigma_1, sigma_2), ms=4, color="y")
"""
plt.figure(1)
plt.subplot(111)
plt.axis([-6, 12, -6, 12])
plt.xlabel("x")
plt.ylabel("y")
plt.plot(gauss_1rot, gauss_2rot, linestyle='none', marker='.', ms=1)
arr = linspace(-sigma_1+3, sigma_1+3, 1000)
#plt.plot(arr, ellipse_up(arr, sigma_1, sigma_2), ms=4, color="y")
#plt.plot(arr, ellipse_down(arr, sigma_1, sigma_2), ms=4, color="y")
plt.draw()
plt.show() | [
"blaaa"
] | blaaa |
664f40dd60dc8f4a78fa4378fc0e6826c176a2e8 | 05ef172891f1d5d4bd1e9a41e7bd228b7d50cc55 | /tools/prepare_RBM_param.py | 6a339f07b2598fffde89a7d41162099d40d20c1f | [] | no_license | YifanCheng/VIC_RBM | 389f7d146429088a8197ec3cf8f0b91ec04fb915 | 49c5f4f0938a7090c5c6d0beb12b8f8db96665ee | refs/heads/master | 2022-06-22T10:57:54.372594 | 2016-06-01T19:06:56 | 2016-06-01T19:06:56 | 49,667,077 | 1 | 0 | null | 2016-01-14T18:39:32 | 2016-01-14T18:39:32 | null | UTF-8 | Python | false | false | 4,665 | py | #!/usr/local/anaconda/bin/python
# This script generates:
# - A routing station file (for the next step to generate flow and energy file)
# - RBM control file (with running period and flow and energy file missing and to be subsitute)
# Note:
# - For different basin or different Mohseni parameters, this script needs to be rerun
import numpy as np
import sys
import subprocess
import my_functions
cfg = my_functions.read_config(sys.argv[1]) # Read config file
#==========================================================#
# Read config file parameters
#==========================================================#
# [INPUT]
# Flow direction file, arcGIS ascii format
# 1-8 for direction, 9 for basin outlet (only one outlet), -1 for inactive grid cells
flowdir_asc = cfg['INPUT']['flowdir_asc']
# Template control file for RBM parameter preparation
# The following options will be filled in:
# - <OUTPUT_DIR> # Directory for output routing station files
# - <BASIN_CODE> # basin_code
# - <TOPOLOGY_FILE>
# - <NETWORK_FILE> # RBM control file to be generated
# - <MOHSENI_FILE>
# The following options will NOT be filled in (but will be filled in directly in the RBM control file when preparing flow and energy input files for RBM):
# - <START_DATE>
# - <END_DATE>
# - <OUTPUT_FLOW_FILE>
# - <OUTPUT_ENERGY_FILE>
control_template = cfg['INPUT']['control_template']
# [TOOLS]
# Perl script for building topology file
topo_pl = cfg['TOOLS']['topo_pl']
# Perl script for generating RBM control file & routing station file
build_input_pl = cfg['TOOLS']['build_input_pl']
# [MOHSENI]
# Mohseni parameters (currently spatially constants
# alpha is actually 'alpha-mu'
mohseni_param = {}
mohseni_param['alpha'] = cfg['MOHSENI']['alpha']
mohseni_param['beta'] = cfg['MOHSENI']['beta']
mohseni_param['gamma'] = cfg['MOHSENI']['gamma']
mohseni_param['mu'] = cfg['MOHSENI']['mu']
mohseni_param['timelag'] = cfg['MOHSENI']['timelag']
# [OUTPUT]
# Output directory for all temporary files in the process
output_tmp_dir = cfg['OUTPUT']['output_tmp_dir']
# Basin code, will be used as basename for topology and Mohseni parameter files
basin_code = cfg['OUTPUT']['basin_code']
#==========================================================#
# Generate topology file
#==========================================================#
subprocess.call('perl {} {} {}/{}.Topology'\
.format(topo_pl, flowdir_asc, output_tmp_dir, basin_code), \
shell=True)
#==========================================================#
# Generate Mohseni parameter files
# (Currntly, Mohseni parameters are set to spatially constants)
#==========================================================#
#=== Load flow direction file header ===#
# Read header
header = ''
f = open(flowdir_asc, 'r')
for i in range(6):
line = f.readline()
header = header + line
f.close()
# Extract number of rows and columns
ncols = int(header.split()[1])
nrows = int(header.split()[3])
#=== Write Mohseni parameter files ===#
for param in mohseni_param.keys():
# Create Mohseni parameter array
param_array = np.ones([nrows, ncols]) * mohseni_param[param]
f = open('{}/{}.Mohseni.{}'.format(output_tmp_dir, basin_code, param), 'w')
f.write(header)
np.savetxt(f, param_array, fmt='%.2f')
f.close()
#==========================================================#
# Prepare control file (for RBM input preparation)
#==========================================================#
#=== Read in template file ===#
f = open(control_template, 'r')
content = f.read()
f.close()
#=== Replace options ===#
content = content.replace('<OUTPUT_DIR>', \
'{}'.format(output_tmp_dir))
content = content.replace('<BASIN_CODE>', \
'{}'.format(basin_code))
content = content.replace('<TOPOLOGY_FILE>', \
'{}/{}.Topology'.format(output_tmp_dir, basin_code))
content = content.replace('<NETWORK_FILE>', \
'{}/{}_Network'.format(output_tmp_dir, basin_code))
content = content.replace('<MOHSENI_FILE>', \
'{}/{}.Mohseni'.format(output_tmp_dir, basin_code))
#=== Write new control file ===#
f = open('{}/{}.RBM_prep.Control'.format(output_tmp_dir, basin_code), 'w')
f.write(content)
f.close()
#==========================================================#
# Generate RBM control file & routing station file
#==========================================================#
subprocess.call("{} {}/{}.RBM_prep"\
.format(build_input_pl, output_tmp_dir, basin_code), \
shell=True) # ".Control" is appended
| [
"ymao@hydro.washington.edu"
] | ymao@hydro.washington.edu |
9dfef73bdb4ca36d08e448d5637ff33d58b50b88 | 325fde42058b2b82f8a4020048ff910cfdf737d7 | /src/aks-preview/azext_aks_preview/vendored_sdks/azure_mgmt_preview_aks/v2019_02_01/models/managed_cluster_addon_profile_py3.py | 71e05cd14c0e9e64885cfee910165304b5df4421 | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | ebencarek/azure-cli-extensions | 46b0d18fe536fe5884b00d7ffa30f54c7d6887d1 | 42491b284e38f8853712a5af01836f83b04a1aa8 | refs/heads/master | 2023-04-12T00:28:44.828652 | 2021-03-30T22:34:13 | 2021-03-30T22:34:13 | 261,621,934 | 2 | 5 | MIT | 2020-10-09T18:21:52 | 2020-05-06T01:25:58 | Python | UTF-8 | Python | false | false | 1,290 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ManagedClusterAddonProfile(Model):
"""A Kubernetes add-on profile for a managed cluster.
All required parameters must be populated in order to send to Azure.
:param enabled: Required. Whether the add-on is enabled or not.
:type enabled: bool
:param config: Key-value pairs for configuring an add-on.
:type config: dict[str, str]
"""
_validation = {
'enabled': {'required': True},
}
_attribute_map = {
'enabled': {'key': 'enabled', 'type': 'bool'},
'config': {'key': 'config', 'type': '{str}'},
}
def __init__(self, *, enabled: bool, config=None, **kwargs) -> None:
super(ManagedClusterAddonProfile, self).__init__(**kwargs)
self.enabled = enabled
self.config = config
| [
"wx44@cornell.edu"
] | wx44@cornell.edu |
419db0786d502a3cf9c1eae20144f684848c9409 | 13a32b92b1ba8ffb07e810dcc8ccdf1b8b1671ab | /home--tommy--mypy/mypy/lib/python2.7/site-packages/gensim/test/test_utils.py | 240900129cf6621eddafef08f2c921360b47d10e | [
"Unlicense"
] | permissive | tommybutler/mlearnpy2 | 8ec52bcd03208c9771d8d02ede8eaa91a95bda30 | 9e5d377d0242ac5eb1e82a357e6701095a8ca1ff | refs/heads/master | 2022-10-24T23:30:18.705329 | 2022-10-17T15:41:37 | 2022-10-17T15:41:37 | 118,529,175 | 0 | 2 | Unlicense | 2022-10-15T23:32:18 | 2018-01-22T23:27:10 | Python | UTF-8 | Python | false | false | 6,864 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Automated tests for checking various utils functions.
"""
import logging
import unittest
import numpy as np
from six import iteritems
from gensim import utils
class TestIsCorpus(unittest.TestCase):
def test_None(self):
# test None
result = utils.is_corpus(None)
expected = (False, None)
self.assertEqual(expected, result)
def test_simple_lists_of_tuples(self):
# test list words
# one document, one word
potentialCorpus = [[(0, 4.)]]
result = utils.is_corpus(potentialCorpus)
expected = (True, potentialCorpus)
self.assertEqual(expected, result)
# one document, several words
potentialCorpus = [[(0, 4.), (1, 2.)]]
result = utils.is_corpus(potentialCorpus)
expected = (True, potentialCorpus)
self.assertEqual(expected, result)
potentialCorpus = [[(0, 4.), (1, 2.), (2, 5.), (3, 8.)]]
result = utils.is_corpus(potentialCorpus)
expected = (True, potentialCorpus)
self.assertEqual(expected, result)
# several documents, one word
potentialCorpus = [[(0, 4.)], [(1, 2.)]]
result = utils.is_corpus(potentialCorpus)
expected = (True, potentialCorpus)
self.assertEqual(expected, result)
potentialCorpus = [[(0, 4.)], [(1, 2.)], [(2, 5.)], [(3, 8.)]]
result = utils.is_corpus(potentialCorpus)
expected = (True, potentialCorpus)
self.assertEqual(expected, result)
def test_int_tuples(self):
potentialCorpus = [[(0, 4)]]
result = utils.is_corpus(potentialCorpus)
expected = (True, potentialCorpus)
self.assertEqual(expected, result)
def test_invalid_formats(self):
# test invalid formats
# these are no corpus, because they do not consists of 2-tuples with
# the form(int, float).
potentials = list()
potentials.append(["human"])
potentials.append("human")
potentials.append(["human", "star"])
potentials.append([1, 2, 3, 4, 5, 5])
potentials.append([[(0, 'string')]])
for noCorpus in potentials:
result = utils.is_corpus(noCorpus)
expected = (False, noCorpus)
self.assertEqual(expected, result)
class TestUtils(unittest.TestCase):
def test_decode_entities(self):
# create a string that fails to decode with unichr on narrow python builds
body = u'It’s the Year of the Horse. YES VIN DIESEL 🙌 💯'
expected = u'It\x92s the Year of the Horse. YES VIN DIESEL \U0001f64c \U0001f4af'
self.assertEqual(utils.decode_htmlentities(body), expected)
class TestSampleDict(unittest.TestCase):
def test_sample_dict(self):
d = {1: 2, 2: 3, 3: 4, 4: 5}
expected_dict = [(1, 2), (2, 3)]
expected_dict_random = [(k, v) for k, v in iteritems(d)]
sampled_dict = utils.sample_dict(d, 2, False)
self.assertEqual(sampled_dict, expected_dict)
sampled_dict_random = utils.sample_dict(d, 2)
if sampled_dict_random in expected_dict_random:
self.assertTrue(True)
class TestWindowing(unittest.TestCase):
arr10_5 = np.array([
[0, 1, 2, 3, 4],
[1, 2, 3, 4, 5],
[2, 3, 4, 5, 6],
[3, 4, 5, 6, 7],
[4, 5, 6, 7, 8],
[5, 6, 7, 8, 9]
])
def _assert_arrays_equal(self, expected, actual):
self.assertEqual(expected.shape, actual.shape)
self.assertTrue((actual == expected).all())
def test_strided_windows1(self):
out = utils.strided_windows(range(5), 2)
expected = np.array([
[0, 1],
[1, 2],
[2, 3],
[3, 4]
])
self._assert_arrays_equal(expected, out)
def test_strided_windows2(self):
input_arr = np.arange(10)
out = utils.strided_windows(input_arr, 5)
expected = self.arr10_5.copy()
self._assert_arrays_equal(expected, out)
out[0, 0] = 10
self.assertEqual(10, input_arr[0], "should make view rather than copy")
def test_strided_windows_window_size_exceeds_size(self):
input_arr = np.array(['this', 'is', 'test'], dtype='object')
out = utils.strided_windows(input_arr, 4)
expected = np.ndarray((0, 0))
self._assert_arrays_equal(expected, out)
def test_strided_windows_window_size_equals_size(self):
input_arr = np.array(['this', 'is', 'test'], dtype='object')
out = utils.strided_windows(input_arr, 3)
expected = np.array([input_arr.copy()])
self._assert_arrays_equal(expected, out)
def test_iter_windows_include_below_window_size(self):
texts = [['this', 'is', 'a'], ['test', 'document']]
out = utils.iter_windows(texts, 3, ignore_below_size=False)
windows = [list(w) for w in out]
self.assertEqual(texts, windows)
out = utils.iter_windows(texts, 3)
windows = [list(w) for w in out]
self.assertEqual([texts[0]], windows)
def test_iter_windows_list_texts(self):
texts = [['this', 'is', 'a'], ['test', 'document']]
windows = list(utils.iter_windows(texts, 2))
list_windows = [list(iterable) for iterable in windows]
expected = [['this', 'is'], ['is', 'a'], ['test', 'document']]
self.assertListEqual(list_windows, expected)
def test_iter_windows_uses_views(self):
texts = [np.array(['this', 'is', 'a'], dtype='object'), ['test', 'document']]
windows = list(utils.iter_windows(texts, 2))
list_windows = [list(iterable) for iterable in windows]
expected = [['this', 'is'], ['is', 'a'], ['test', 'document']]
self.assertListEqual(list_windows, expected)
windows[0][0] = 'modified'
self.assertEqual('modified', texts[0][0])
def test_iter_windows_with_copy(self):
texts = [
np.array(['this', 'is', 'a'], dtype='object'),
np.array(['test', 'document'], dtype='object')
]
windows = list(utils.iter_windows(texts, 2, copy=True))
windows[0][0] = 'modified'
self.assertEqual('this', texts[0][0])
windows[2][0] = 'modified'
self.assertEqual('test', texts[1][0])
def test_flatten_nested(self):
nested_list = [[[1, 2, 3], [4, 5]], 6]
expected = [1, 2, 3, 4, 5, 6]
self.assertEqual(utils.flatten(nested_list), expected)
def test_flatten_not_nested(self):
not_nested = [1, 2, 3, 4, 5, 6]
expected = [1, 2, 3, 4, 5, 6]
self.assertEqual(utils.flatten(not_nested), expected)
if __name__ == '__main__':
logging.root.setLevel(logging.WARNING)
unittest.main()
| [
"tbutler.github@internetalias.net"
] | tbutler.github@internetalias.net |
0f87baff0c56e542582a7e5170dea054a1e969c7 | 2be196d073367f3cf19e8b449ef755d454003864 | /manimlib/once_useful_constructs/arithmetic.py | 76a3fd1222764779551f2a09b750f67afa5382ac | [] | no_license | yxy1996/manim_practise | 61eb4188bed52775f81ae44b63a93a36d11b2376 | f1815166a194a8d98ad9a493db93ff1ed01054cd | refs/heads/master | 2022-11-07T19:34:32.017740 | 2020-06-24T01:41:07 | 2020-06-24T01:41:07 | 274,540,471 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,635 | py | import numpy as np
from manimlib.animation.animation import Animation
from manimlib.constants import *
from manimlib.mobject.svg.tex_mobject import TexMobject
from manimlib.scene.scene import Scene
class RearrangeEquation(Scene):
def construct(
self,
start_terms,
end_terms,
index_map,
path_arc=np.pi,
start_transform=None,
end_transform=None,
leave_start_terms=False,
transform_kwargs={},
):
transform_kwargs["path_func"] = path
start_mobs, end_mobs = self.get_mobs_from_terms(
start_terms, end_terms
)
if start_transform:
start_mobs = start_transform(Mobject(*start_mobs)).split()
if end_transform:
end_mobs = end_transform(Mobject(*end_mobs)).split()
unmatched_start_indices = set(range(len(start_mobs)))
unmatched_end_indices = set(range(len(end_mobs)))
unmatched_start_indices.difference_update(
[n % len(start_mobs) for n in index_map]
)
unmatched_end_indices.difference_update(
[n % len(end_mobs) for n in list(index_map.values())]
)
mobject_pairs = [
(start_mobs[a], end_mobs[b])
for a, b in index_map.items()
] + [
(Point(end_mobs[b].get_center()), end_mobs[b])
for b in unmatched_end_indices
]
if not leave_start_terms:
mobject_pairs += [
(start_mobs[a], Point(start_mobs[a].get_center()))
for a in unmatched_start_indices
]
self.add(*start_mobs)
if leave_start_terms:
self.add(Mobject(*start_mobs))
self.wait()
self.play(*[
Transform(*pair, **transform_kwargs)
for pair in mobject_pairs
])
self.wait()
def get_mobs_from_terms(self, start_terms, end_terms):
"""
Need to ensure that all image mobjects for a tex expression
stemming from the same string are point-for-point copies of one
and other. This makes transitions much smoother, and not look
like point-clouds.
"""
num_start_terms = len(start_terms)
all_mobs = np.array(
TexMobject(start_terms).split() + TexMobject(end_terms).split())
all_terms = np.array(start_terms + end_terms)
for term in set(all_terms):
matches = all_terms == term
if sum(matches) > 1:
base_mob = all_mobs[list(all_terms).index(term)]
all_mobs[matches] = [
base_mob.copy().replace(target_mob)
for target_mob in all_mobs[matches]
]
return all_mobs[:num_start_terms], all_mobs[num_start_terms:]
class FlipThroughSymbols(Animation):
CONFIG = {
"start_center": ORIGIN,
"end_center": ORIGIN,
}
def __init__(self, tex_list, **kwargs):
mobject = TexMobject(self.curr_tex).shift(start_center)
Animation.__init__(self, mobject, **kwargs)
def interpolate_mobject(self, alpha):
new_tex = self.tex_list[np.ceil(alpha * len(self.tex_list)) - 1]
if new_tex != self.curr_tex:
self.curr_tex = new_tex
self.mobject = TexMobject(new_tex).shift(self.start_center)
if not all(self.start_center == self.end_center):
self.mobject.center().shift(
(1 - alpha) * self.start_center + alpha * self.end_center
)
| [
"ckbaby1996@163.com"
] | ckbaby1996@163.com |
547f56cae470648424b7485f6231f2167b17b872 | 9405aa570ede31a9b11ce07c0da69a2c73ab0570 | /aliyun-python-sdk-hbase/aliyunsdkhbase/request/v20190101/CreateInstanceRequest.py | 6dcc7d5ca183ba80569cfe098efcfdd438b27383 | [
"Apache-2.0"
] | permissive | liumihust/aliyun-openapi-python-sdk | 7fa3f5b7ea5177a9dbffc99e73cf9f00e640b72b | c7b5dd4befae4b9c59181654289f9272531207ef | refs/heads/master | 2020-09-25T12:10:14.245354 | 2019-12-04T14:43:27 | 2019-12-04T14:43:27 | 226,002,339 | 1 | 0 | NOASSERTION | 2019-12-05T02:50:35 | 2019-12-05T02:50:34 | null | UTF-8 | Python | false | false | 6,117 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkhbase.endpoint import endpoint_data
class CreateInstanceRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'HBase', '2019-01-01', 'CreateInstance','hbase')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ClusterName(self):
return self.get_query_params().get('ClusterName')
def set_ClusterName(self,ClusterName):
self.add_query_param('ClusterName',ClusterName)
def get_DbInstanceConnType(self):
return self.get_query_params().get('DbInstanceConnType')
def set_DbInstanceConnType(self,DbInstanceConnType):
self.add_query_param('DbInstanceConnType',DbInstanceConnType)
def get_EngineVersion(self):
return self.get_query_params().get('EngineVersion')
def set_EngineVersion(self,EngineVersion):
self.add_query_param('EngineVersion',EngineVersion)
def get_DepMode(self):
return self.get_query_params().get('DepMode')
def set_DepMode(self,DepMode):
self.add_query_param('DepMode',DepMode)
def get_BackupId(self):
return self.get_query_params().get('BackupId')
def set_BackupId(self,BackupId):
self.add_query_param('BackupId',BackupId)
def get_DbInstanceType(self):
return self.get_query_params().get('DbInstanceType')
def set_DbInstanceType(self,DbInstanceType):
self.add_query_param('DbInstanceType',DbInstanceType)
def get_VSwitchId(self):
return self.get_query_params().get('VSwitchId')
def set_VSwitchId(self,VSwitchId):
self.add_query_param('VSwitchId',VSwitchId)
def get_SecurityIPList(self):
return self.get_query_params().get('SecurityIPList')
def set_SecurityIPList(self,SecurityIPList):
self.add_query_param('SecurityIPList',SecurityIPList)
def get_AutoRenew(self):
return self.get_query_params().get('AutoRenew')
def set_AutoRenew(self,AutoRenew):
self.add_query_param('AutoRenew',AutoRenew)
def get_NetType(self):
return self.get_query_params().get('NetType')
def set_NetType(self,NetType):
self.add_query_param('NetType',NetType)
def get_ZoneId(self):
return self.get_query_params().get('ZoneId')
def set_ZoneId(self,ZoneId):
self.add_query_param('ZoneId',ZoneId)
def get_CoreDiskType(self):
return self.get_query_params().get('CoreDiskType')
def set_CoreDiskType(self,CoreDiskType):
self.add_query_param('CoreDiskType',CoreDiskType)
def get_PricingCycle(self):
return self.get_query_params().get('PricingCycle')
def set_PricingCycle(self,PricingCycle):
self.add_query_param('PricingCycle',PricingCycle)
def get_CoreInstanceQuantity(self):
return self.get_query_params().get('CoreInstanceQuantity')
def set_CoreInstanceQuantity(self,CoreInstanceQuantity):
self.add_query_param('CoreInstanceQuantity',CoreInstanceQuantity)
def get_Duration(self):
return self.get_query_params().get('Duration')
def set_Duration(self,Duration):
self.add_query_param('Duration',Duration)
def get_Engine(self):
return self.get_query_params().get('Engine')
def set_Engine(self,Engine):
self.add_query_param('Engine',Engine)
def get_RestoreTime(self):
return self.get_query_params().get('RestoreTime')
def set_RestoreTime(self,RestoreTime):
self.add_query_param('RestoreTime',RestoreTime)
def get_SrcDBInstanceId(self):
return self.get_query_params().get('SrcDBInstanceId')
def set_SrcDBInstanceId(self,SrcDBInstanceId):
self.add_query_param('SrcDBInstanceId',SrcDBInstanceId)
def get_MasterInstanceType(self):
return self.get_query_params().get('MasterInstanceType')
def set_MasterInstanceType(self,MasterInstanceType):
self.add_query_param('MasterInstanceType',MasterInstanceType)
def get_ColdStorageSize(self):
return self.get_query_params().get('ColdStorageSize')
def set_ColdStorageSize(self,ColdStorageSize):
self.add_query_param('ColdStorageSize',ColdStorageSize)
def get_CoreDiskQuantity(self):
return self.get_query_params().get('CoreDiskQuantity')
def set_CoreDiskQuantity(self,CoreDiskQuantity):
self.add_query_param('CoreDiskQuantity',CoreDiskQuantity)
def get_IsColdStorage(self):
return self.get_query_params().get('IsColdStorage')
def set_IsColdStorage(self,IsColdStorage):
self.add_query_param('IsColdStorage',IsColdStorage)
def get_CoreInstanceType(self):
return self.get_query_params().get('CoreInstanceType')
def set_CoreInstanceType(self,CoreInstanceType):
self.add_query_param('CoreInstanceType',CoreInstanceType)
def get_CoreDiskSize(self):
return self.get_query_params().get('CoreDiskSize')
def set_CoreDiskSize(self,CoreDiskSize):
self.add_query_param('CoreDiskSize',CoreDiskSize)
def get_VpcId(self):
return self.get_query_params().get('VpcId')
def set_VpcId(self,VpcId):
self.add_query_param('VpcId',VpcId)
def get_DbType(self):
return self.get_query_params().get('DbType')
def set_DbType(self,DbType):
self.add_query_param('DbType',DbType)
def get_PayType(self):
return self.get_query_params().get('PayType')
def set_PayType(self,PayType):
self.add_query_param('PayType',PayType) | [
"sdk-team@alibabacloud.com"
] | sdk-team@alibabacloud.com |
9062cdef499ad5fb2dfca69831fc27f6e76eb076 | bae7013487de0c23eea1781e527e560f24fb5a2b | /Sources/Python/MSCGeneretor/ping_pong/client.py | d1ed5e9196777033c3531517b48b8337ee0cf2ee | [] | no_license | yannisVentura/MSC_Research | 6bcf0e7b9e65b966aae8ded1fa6115d174e3f3cc | 6ce23cecacf57d267df7f7716c535fbe555801ba | refs/heads/master | 2021-03-22T05:14:03.569250 | 2018-03-29T11:44:31 | 2018-03-29T11:44:31 | 121,608,103 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,693 | py | from socket import *
import sys
from log import Log
TAILLE_TAMPON = 256
log = Log("client" + format(sys.argv[1]) + ".log")
if len(sys.argv) != 3:
print("Usage: {} <ip> <port>".format(sys.argv[0]), file=sys.stderr)
log.write_critical("Usage: {} <ip> <port>".format(sys.argv[0]))
log.write_critical("Exit 1")
sys.exit(1)
with socket(AF_INET, SOCK_DGRAM) as sock:
mess = "Initialise communication..."
sock.sendto(mess.encode(), (sys.argv[1], int(sys.argv[2])))
reponse, address = sock.recvfrom(TAILLE_TAMPON)
server_adress = str(address[0])
ip = str(address[1])
print("the server adress is : " + server_adress)
print("listening on " + ip)
log.write_info('The server address is ' + server_adress)
log.write_info('Server Ip : ' + ip)
while True:
try:
# Remarque : pas besoin de bind car le port local est choisi par le système
sock.sendto(mess.encode(), (sys.argv[1], int(sys.argv[2])))
reponse, address = sock.recvfrom(TAILLE_TAMPON)
mess = "this is a simple test message "
# Envoi de la requête au serveur (ip, port) après encodage de str en bytes
log.write_info(sys.argv[1] + "send :" + mess + "to " + server_adress)
sock.sendto(mess.encode(), (sys.argv[1], int(sys.argv[2])))
# Réception de la réponse du serveur et décodage de bytes en str
print("Réponse = " + reponse.decode())
log.write_info("receive response : " + reponse.decode() + "to " + server_adress)
except KeyboardInterrupt:
log.write_critical("KeyboardInterrupt")
break
log.write_warning("Client close")
| [
"yventura@RCFNET.rockwellcollins.com"
] | yventura@RCFNET.rockwellcollins.com |
afd5e9a732ae36b23155af1e2cba98c435520645 | dde6faa0857c8c7e46cbe3c48dbe80b1ac9c9bcf | /suspend_resume/scripts/suspend_resume_handler_3.py | 18f59181cb17badae55a3e34d125fbf2cc356724 | [] | no_license | ROSDevoloper/Atlas80EVO-Gazebo | 7119270f4421b1a077e3c4abbb90dcf11281023b | 468d068584e71c3cca2169b365ec43faaac33f47 | refs/heads/master | 2022-10-16T10:02:48.121404 | 2020-06-15T05:08:46 | 2020-06-15T05:08:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,185 | py | #!/usr/bin/env python
"""
Author: (1) Arefeen Ridwan
(2) Samuel Chieng Kien Ho
Function: Suspend Resume based on service request
"""
import rospy
from atlas80evo_msgs.msg import FSMState
from atlas80evo_msgs.srv import SetFSMState
from std_srvs.srv import Empty
from geometry_msgs.msg import Twist
from std_msgs.msg import String
class SuspendResumeHandler():
def __init__(self):
# Internal USE Variables - Modify with consultation
self.rate = rospy.Rate(30)
self.sleep = rospy.Rate(2)
self.pre_state="NONE"
self.current_state="NONE"
# Publisher
self.drive_pub = rospy.Publisher(rospy.get_param("~drive_topic", "/twist_cmd_mux/input/suspend"), Twist, queue_size=1)
# Subscriber
self.state_sub= rospy.Subscriber("/fsm_node/state", FSMState, self.stateCB, queue_size=1) #get current state from ros
# Service Server
self.suspend_srv = rospy.Service("/suspend/request", Empty, self.suspendSRV)
# Service Client
self.set_state_call = rospy.ServiceProxy("/fsm_node/set_state", SetFSMState)
# Main Loop
self.main_loop()
# FSM State Callback
def stateCB(self, msg):
self.current_state = msg.state
if str(msg.state)!="SUSPEND" and str(msg.state)!="ERROR" and str(msg.state)!="MANUAL":
self.pre_state=str(msg.state)
def suspendSRV(self, req):
self.sleep.sleep()
if self.current_state!="SUSPEND":
self.set_state_call("SUSPEND")
self.stopping()
#print("suspend")
else:
self.set_state_call(self.pre_state)
print self.pre_state
self.sleep.sleep()
return ()
# Main Loop
def main_loop(self):
while not rospy.is_shutdown():
if(self.current_state=="SUSPEND"):
self.stopping()
#print("suspend")
self.rate.sleep()
# Stopping Vehicle
def stopping(self):
stop_cmd=Twist()
self.drive_pub.publish(stop_cmd)
if __name__=="__main__":
rospy.init_node("suspend_resume_handler")
SuspendResumeHandler()
rospy.spin()
| [
"kienho91@gmail.com"
] | kienho91@gmail.com |
f64feda20504dccac97a40d5747a0a3c49125432 | d05298a88638fd62f74e8f26c5a1959f821367d1 | /src/words_baseline/reddit_output_att.py | 413266f01e93721f50de7639a7e50fc75bac1c43 | [
"MIT"
] | permissive | rpryzant/causal-text-embeddings | d4b93f5852f1854d52a09e28b81ee784015e296a | 2966493f86a6f808f0dfa71d590e3403a840befc | refs/heads/master | 2022-12-22T09:33:23.654291 | 2020-03-05T19:41:33 | 2020-03-05T19:41:33 | 298,045,006 | 1 | 0 | MIT | 2020-09-23T17:28:18 | 2020-09-23T17:28:18 | null | UTF-8 | Python | false | false | 4,087 | py | from semi_parametric_estimation.att import att_estimates, psi_plugin, psi_q_only
from reddit.data_cleaning.reddit_posts import load_reddit_processed
from .helpers import filter_document_embeddings, make_index_mapping, assign_split
import numpy as np
import pandas as pd
import os
from sklearn.linear_model import LogisticRegression, LinearRegression, Ridge
from sklearn.metrics import mean_squared_error as mse
import argparse
import sys
from scipy.special import logit
from scipy.sparse import load_npz
def get_log_outcomes(outcomes):
#relu
outcomes = np.array([max(0.0, out) + 1.0 for out in outcomes])
return np.log(outcomes)
def predict_expected_outcomes(model, features):
return model.predict(features)
def fit_conditional_expected_outcomes(outcomes, features):
model = Ridge()
model.fit(features, outcomes)
predict = model.predict(features)
if verbose:
print("Training MSE:", mse(outcomes, predict))
return model
def predict_treatment_probability(labels, features):
model = LogisticRegression(solver='liblinear')
model.fit(features, labels)
if verbose:
print("Training accuracy:", model.score(features, labels))
treatment_probability = model.predict_proba(features)[:,1]
return treatment_probability
def load_simulated_data():
sim_df = pd.read_csv(simulation_file, delimiter='\t')
sim_df = sim_df.rename(columns={'index':'post_index'})
return sim_df
def load_term_counts(path='../dat/reddit/'):
return load_npz(path + 'term_counts.npz').toarray()
def main():
if not dat_dir:
term_counts = load_term_counts()
else:
term_counts = load_term_counts(path=dat_dir)
sim_df = load_simulated_data()
treatment_labels = sim_df.treatment.values
indices = sim_df.post_index.values
all_words = term_counts[indices, :]
treated_sim = sim_df[sim_df.treatment==1]
untreated_sim = sim_df[sim_df.treatment==0]
treated_indices = treated_sim.post_index.values
untreated_indices = untreated_sim.post_index.values
all_outcomes = sim_df.outcome.values
outcomes_st_treated = treated_sim.outcome.values
outcomes_st_not_treated = untreated_sim.outcome.values
words_st_treated = term_counts[treated_indices,:]
words_st_not_treated = term_counts[untreated_indices,:]
treatment_probability = predict_treatment_probability(treatment_labels, all_words)
model_outcome_st_treated = fit_conditional_expected_outcomes(outcomes_st_treated, words_st_treated)
model_outcome_st_not_treated = fit_conditional_expected_outcomes(outcomes_st_not_treated, words_st_not_treated)
expected_outcome_st_treated = predict_expected_outcomes(model_outcome_st_treated, all_words)
expected_outcome_st_not_treated = predict_expected_outcomes(model_outcome_st_not_treated, all_words)
q_hat = psi_q_only(expected_outcome_st_not_treated, expected_outcome_st_treated,
treatment_probability, treatment_labels, all_outcomes, truncate_level=0.03, prob_t=treatment_labels.mean())
tmle = psi_plugin(expected_outcome_st_not_treated, expected_outcome_st_treated,
treatment_probability, treatment_labels, all_outcomes, truncate_level=0.03, prob_t=treatment_labels.mean())
print("Q hat:", q_hat)
print("TMLE:", tmle)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--dat-dir", action="store", default=None)
parser.add_argument("--sim-dir", action="store", default='../dat/sim/reddit_subreddit_based/')
parser.add_argument("--subs", action="store", default='13,6,8')
parser.add_argument("--mode", action="store", default="simple")
parser.add_argument("--params", action="store", default="1.0,1.0,1.0")
parser.add_argument("--verbose", action='store_true')
args = parser.parse_args()
sim_dir = args.sim_dir
dat_dir = args.dat_dir
subs = None
if args.subs != '':
subs = [int(s) for s in args.subs.split(',')]
verbose = args.verbose
params = args.params.split(',')
sim_setting = 'beta0' + params[0] + '.beta1' + params[1] + '.gamma' + params[2]
subs_string = ', '.join(args.subs.split(','))
mode = args.mode
simulation_file = sim_dir + 'subreddits['+ subs_string + ']/mode' + mode + '/' + sim_setting + ".tsv"
main() | [
"victorveitch@gmail.com"
] | victorveitch@gmail.com |
7728830707ecc457434b7c68b75d9de245ba5c7c | fd4a20c0c640b706da8213d9a19c4cbc8ac9def9 | /python/DP/edit_distance.py | 02bdcf13d59b612bcb232061b117437d0a225a42 | [] | no_license | murtzdhulz/algorithms_practice | 448991887a66fa7f7e4c260c13b4bbaafb48bd90 | ae5ea1f69c611f9b91bd28df4ab4742e7afb1aa0 | refs/heads/master | 2020-05-21T19:13:13.474160 | 2019-02-22T20:42:18 | 2019-02-22T20:42:18 | 62,366,356 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 802 | py | __author__ = 'Murtaza'
# Given two strings. Find the minimum edit distance between them
def minDistance(word1, word2):
"""
:type word1: str
:type word2: str
:rtype: int
"""
m=len(word1)
n=len(word2)
# Create the lookup table
dp=[[0]*(n+1) for _ in range(m+1)]
for i in range(1,m+1):
dp[i][0]=i
for j in range(1,n+1):
dp[0][j]=j
for i in range(1,m+1):
for j in range(1,n+1):
if word1[i-1]==word2[j-1]:
dp[i][j]=dp[i-1][j-1]
else:
dp[i][j]=min(dp[i-1][j],dp[i][j-1],dp[i-1][j-1])+1
return dp[m][n]
word1="hello"
word2="yelkyulo"
print minDistance(word1,word2) | [
"murtzdhulz@gmail.com"
] | murtzdhulz@gmail.com |
1f29a3f91ed832a3645931407070d9b3577ca4c7 | 21369596ff395de0d56b1032fa4a1e88fcd4bf79 | /joysticktest.py | 727ab40852ba2e9c2c8c0e4c577cbe18ebc3f797 | [] | no_license | josh-r-dick/flappybird | a99410f8f39536decc97cae3bb8b699aeb00e085 | 0a6ba12d3ea4e5ffde9780a0c18b7cacd552132e | refs/heads/master | 2021-05-03T17:01:00.278987 | 2017-05-18T18:40:15 | 2017-05-18T18:40:15 | 72,026,463 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,952 | py | import pygame
# Define some colors
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
# This is a simple class that will help us print to the screen
# It has nothing to do with the joysticks, just outputting the
# information.
class TextPrint:
def __init__(self):
self.reset()
self.font = pygame.font.Font(None, 20)
def prnt(self, screen, textString):
textBitmap = self.font.render(textString, True, BLACK)
screen.blit(textBitmap, [self.x, self.y])
self.y += self.line_height
def reset(self):
self.x = 10
self.y = 10
self.line_height = 15
def indent(self):
self.x += 10
def unindent(self):
self.x -= 10
pygame.init()
# Set the width and height of the screen [width,height]
size = [500, 700]
screen = pygame.display.set_mode(size)
pygame.display.set_caption("My Game")
# Loop until the user clicks the close button.
done = False
# Used to manage how fast the screen updates
clock = pygame.time.Clock()
# Initialize the joysticks
pygame.joystick.init()
# Get ready to print
textPrint = TextPrint()
# -------- Main Program Loop -----------
while done == False:
# EVENT PROCESSING STEP
for event in pygame.event.get(): # User did something
if event.type == pygame.QUIT: # If user clicked close
done = True # Flag that we are done so we exit this loop
# Possible joystick actions: JOYAXISMOTION JOYBALLMOTION JOYBUTTONDOWN JOYBUTTONUP JOYHATMOTION
if event.type == pygame.JOYBUTTONDOWN:
print(">>>>>>> Joystick button pressed.")
if event.type == pygame.JOYBUTTONUP:
print(">>>>>> Joystick button released.")
# DRAWING STEP
# First, clear the screen to white. Don't put other drawing commands
# above this, or they will be erased with this command.
screen.fill(WHITE)
textPrint.reset()
# Get count of joysticks
joystick_count = pygame.joystick.get_count()
textPrint.prnt(screen, "Number of joysticks: {}".format(joystick_count))
textPrint.indent()
# For each joystick:
for i in range(joystick_count):
joystick = pygame.joystick.Joystick(i)
joystick.init()
textPrint.prnt(screen, "Joystick {}".format(i))
textPrint.indent()
# Get the name from the OS for the controller/joystick
name = joystick.get_name()
textPrint.prnt(screen, "Joystick name: {}".format(name))
# Usually axis run in pairs, up/down for one, and left/right for
# the other.
axes = joystick.get_numaxes()
textPrint.prnt(screen, "Number of axes: {}".format(axes))
textPrint.indent()
for i in range(axes):
axis = joystick.get_axis(i)
textPrint.prnt(screen, "Axis {} value: {:>6.3f}".format(i, axis))
textPrint.unindent()
buttons = joystick.get_numbuttons()
textPrint.prnt(screen, "Number of buttons: {}".format(buttons))
textPrint.indent()
for i in range(buttons):
button = joystick.get_button(i)
textPrint.prnt(screen, "Button {:>2} value: {}".format(i, button))
textPrint.unindent()
# Hat switch. All or nothing for direction, not like joysticks.
# Value comes back in an array.
hats = joystick.get_numhats()
textPrint.prnt(screen, "Number of hats: {}".format(hats))
textPrint.indent()
for i in range(hats):
hat = joystick.get_hat(i)
textPrint.prnt(screen, "Hat {} value: {}".format(i, str(hat)))
textPrint.unindent()
textPrint.unindent()
# ALL CODE TO DRAW SHOULD GO ABOVE THIS COMMENT
# Go ahead and update the screen with what we've drawn.
pygame.display.flip()
# Limit to 20 frames per second
clock.tick(5)
# Close the window and quit.
# If you forget this line, the program will 'hang'
# on exit if running from IDLE.
pygame.quit()
| [
"josh.r.dick@greenimaging.com"
] | josh.r.dick@greenimaging.com |
f8f17817cc26bf8e89af712f747790baf69950e9 | 35fa22b5ff5413d80d0fbab4d2072b2e97186167 | /6file/13.py | 3c95032fd0ce617b76d7178f9f6d1a4f47bc3a79 | [] | no_license | Aleksiysh/OpenUDUPython | 7c60b59e3238eb64898b44fccd5f00f3b204fcc9 | ae33f82c05c0ba5bee452009a01c6c94eba39c19 | refs/heads/master | 2020-08-07T07:46:12.622040 | 2019-10-26T10:44:37 | 2019-10-26T10:44:37 | 213,359,054 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 404 | py | import openpyxl
vedomost = list()
for i in range(1,1001):
fileName = '../rogaikpyta/'+str(i)+'.xlsx'
wb = openpyxl.load_workbook(fileName)
sheet = wb.active
record = (sheet['B2'].value+' ' +str(sheet['D2'].value))
vedomost.append(record)
#print(i)
vedomost.sort()
fout = open('out13.txt','w',encoding='utf8')
for value in vedomost:
print(value,file = fout)
fout.close()
pass | [
"aleks-sh@mail.ru"
] | aleks-sh@mail.ru |
bf3628287d6912c3ae78c55e67f21dd80313b222 | b95e71dcc1b42ebf3459ee57bd0119c618a79796 | /Array/maximum_subarray.py | 562be03611c865ee216e753a51da805015ca258d | [] | no_license | anton-dovnar/LeetCode | e47eece7de28d76b0c3b997d4dacb4f151a839b5 | 6ed9e1bd4a0b48e343e1dd8adaebac26a3bc2ed7 | refs/heads/master | 2023-06-29T07:21:06.335041 | 2021-07-31T15:08:05 | 2021-07-31T15:08:05 | 361,205,679 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 268 | py | class Solution:
def maxSubArray(self, nums: List[int]) -> int:
max_subarr = float('-inf')
curr_sum = 0
for n in nums:
curr_sum = max(n, curr_sum + n)
max_subarr = max(max_subarr, curr_sum)
return max_subarr
| [
"fode4cun@gmail.com"
] | fode4cun@gmail.com |
cd14f97434cea94fade2b7a39e1da3f02ed8223b | 566997eebb7ec228b371f9309b8f0204dfeb3619 | /gan.py | a4858008fc618573dcad057e3ef343e27bd4aebb | [] | no_license | hayago/generative-adversarial-nets-mnist | 889b71d604025f8ff68ed0d467db9ec2f27962cc | ecb1c6c0ebd518c7fe71c333e02a22952bd64f25 | refs/heads/master | 2021-01-22T06:02:05.866307 | 2017-02-12T13:52:55 | 2017-02-12T14:11:37 | 81,729,819 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,131 | py | import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import numpy as np
import os
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
class GAN:
"""
Generative Adversarial Networks
"""
IMAGE_SIZE = 784 # 28 x 28 = 784
def __init__(self):
# ---------- Generator's variables ----------
self.Z = tf.placeholder(tf.float32, shape=[None, 100])
self.G_W1 = tf.Variable(xavier_initial_value([100, 128]))
self.G_b1 = tf.Variable(tf.zeros([128]))
self.G_W2 = tf.Variable(xavier_initial_value([128, GAN.IMAGE_SIZE]))
self.G_b2 = tf.Variable(tf.zeros([GAN.IMAGE_SIZE]))
self.theta_G = [self.G_W1, self.G_W2, self.G_b1, self.G_b2]
# ---------- Discriminator's variables ----------
self.X = tf.placeholder(tf.float32, shape=[None, GAN.IMAGE_SIZE])
self.D_W1 = tf.Variable(xavier_initial_value([GAN.IMAGE_SIZE, 128]))
self.D_b1 = tf.Variable(tf.zeros([128]))
self.D_W2 = tf.Variable(xavier_initial_value([128, 1]))
self.D_b2 = tf.Variable(tf.zeros([1]))
self.theta_D = [self.D_W1, self.D_W2, self.D_b1, self.D_b2]
def train(self):
G_sample = self.__generator(self.Z)
D_real = self.__descriminator(self.X)
D_fake = self.__descriminator(G_sample)
# Loss function
D_loss = -tf.reduce_mean(tf.log(D_real) + tf.log(1. - D_fake))
G_loss = tf.reduce_mean(tf.log(1. - D_fake))
D_train_step = tf.train.AdamOptimizer(1e-5).minimize(D_loss, var_list=self.theta_D)
G_train_step = tf.train.AdamOptimizer(1e-5).minimize(G_loss, var_list=self.theta_G)
# Load data sets
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
sess = tf.Session()
sess.run(tf.initialize_all_variables())
if not os.path.exists('output/'):
os.makedirs('output/')
batch_size = 100
Z_dim = 100
plot_count = 1000
for i in range(1000000):
batch, _ = mnist.train.next_batch(batch_size)
sess.run([D_train_step, G_train_step], feed_dict={
self.Z: sample_random_uniform([batch_size, Z_dim]),
self.X: batch
})
if i % plot_count == 0:
D_loss_current, G_loss_current = sess.run([D_loss, G_loss], feed_dict={
self.Z: sample_random_uniform([batch_size, Z_dim]),
self.X: batch
})
print('Iteration: {}'.format(i))
print('D loss: {:.10}'.format(D_loss_current))
print('G loss: {:.10}'.format(G_loss_current))
print()
samples = sess.run(G_sample, feed_dict={
self.Z: sample_random_uniform([16, Z_dim])
})
# plot
fig = plot(samples)
plt.savefig('output/{}.png'.format(i // plot_count).zfill(4), bbox_inches='tight')
plt.close(fig)
def __generator(self, z):
G_h1 = tf.nn.relu(tf.matmul(z, self.G_W1) + self.G_b1)
G_h2 = tf.nn.sigmoid(tf.matmul(G_h1, self.G_W2) + self.G_b2)
return G_h2
def __descriminator(self, x):
D_h1 = tf.nn.relu(tf.matmul(x, self.D_W1) + self.D_b1)
D_h2 = tf.nn.sigmoid(tf.matmul(D_h1, self.D_W2) + self.D_b2)
return D_h2
def xavier_initial_value(size):
in_dim = size[0]
xavier_stddev = 1. / tf.sqrt(in_dim / 2.)
return tf.random_normal(size, stddev=xavier_stddev)
def sample_random_uniform(size):
return np.random.uniform(-1., 1., size=size)
def plot(samples):
fig = plt.figure(figsize=(4, 4))
gs = gridspec.GridSpec(4, 4)
gs.update(wspace=0.05, hspace=0.05)
for i, sample in enumerate(samples):
ax = plt.subplot(gs[i])
plt.axis('off')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_aspect('equal')
plt.imshow(samples[i].reshape(28, 28), cmap='Greys_r')
return fig
if __name__ == '__main__':
gan = GAN()
gan.train()
| [
"go.hayakawa@gmail.com"
] | go.hayakawa@gmail.com |
4464b2855336752fff3ecd8ebabbeba6eadebcd9 | d3eaf3fa1f16474652efcc806c975eb491234d8d | /Account/models.py | d63970fd9d120bacf98e813d6767960926ecbf20 | [] | no_license | Arash3f/simple_django_portfolio | 28681bcc4bff3a7314c4f0aa7c883b6cbc4462c7 | 95e3586700ac4d936e58a780d866e342eeeec61b | refs/heads/master | 2023-07-15T20:06:46.359651 | 2021-08-26T12:03:08 | 2021-08-26T12:03:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,272 | py | from django.db import models
from ckeditor.fields import RichTextField
from django.contrib.auth.models import User
class User(models.Model):
user = models.OneToOneField( User , related_name='user', on_delete=models.CASCADE)
picture = models.ImageField ( 'user_pictures' , upload_to='user_pictures/' , blank=True , null=True)
about_me = models.TextField()
job = models.CharField ( 'Job' , max_length=30 , blank=True , null=True)
date_of_birth= models.DateField ( 'Date of birth' , blank=True , null=True)
github = models.CharField ( 'Github' , max_length=100 , blank=True , null=True)
skype = models.CharField ( 'Skype' , max_length=100 , blank=True , null=True)
twitter = models.CharField ( 'Twitter' , max_length=100 , blank=True , null=True)
instagram = models.CharField ( 'Instagram' , max_length=100 , blank=True , null=True)
class Meta:
verbose_name = ("User")
verbose_name_plural = ("Users")
def __str__(self):
return self.user.username
class Skill(models.Model):
title = models.CharField ( 'Title' , max_length=30 , blank=True , null=True)
amount = models.IntegerField( 'Amount' , default=0 , blank=True , null=True)
class Meta:
verbose_name = ("Skill")
verbose_name_plural = ("Skills")
def __str__(self):
return self.title
class Experience(models.Model):
title = models.CharField( 'Title' , max_length=30 , blank=True , null=True)
body = models.TextField( 'About',max_length=500 , blank=True , null=True )
Date = models.DateField( 'Date' , blank=True , null=True)
picture= models.ImageField('user_experience' , upload_to='experience_pictures/' , blank=True , null=True)
class Meta:
verbose_name = ("Experience")
verbose_name_plural = ("Experiences")
def __str__(self):
return self.title
class Service(models.Model):
title = models.CharField( 'Title' , max_length=30 , blank=True , null=True)
about = models.TextField()
class Meta:
verbose_name = ("Service")
verbose_name_plural = ("Services")
def __str__(self):
return self.title | [
"arash.alfooneh@gmail.com"
] | arash.alfooneh@gmail.com |
379d3b447bd21211255d268b91155f490c3c172c | e0267836691f84391780c68d3f972fd46ec612d5 | /01-Python-Primer/Exercises/Creativity/C1-15.py | aa75619d3ad9a772cac82400cc3019a8af7be1a3 | [] | no_license | ursu1964/Algorithms-in-Python-1 | 14e5e7e7d05533e22c2d85059dab84f2ac154f18 | 7df5fea064a2391703f7da8f891d9c69d79af1c7 | refs/heads/master | 2023-03-16T18:41:34.379471 | 2017-07-18T17:59:02 | 2017-07-18T17:59:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 315 | py | def distinct(data):
temp = data
for item in data:
temp.remove(item)
if item in temp:
return False
return True
input_list = [10, 2, -1, 100, 48, 0]
print(distinct(input_list))
test_2 = [100,3, 4 , 5, 2]
print(distinct(test_2))
test_3 = [1, 1, 1, 1, 1]
print(distinct(test_3)) | [
"mayankshah11996@gmail.com"
] | mayankshah11996@gmail.com |
0a73bcc828c1d8a1bbb22e0861d0e9418fb8e765 | a6d33a4f864889bec0ec21b5b8106d99a263141c | /kubernetes/models/v1/SecretKeySelector.py | 5eeb07ef465f363ee936f7942c20707316caff90 | [
"Apache-2.0"
] | permissive | riconnon/kubernetes-py | bffd2a89b2f445706381c01f46c60cce804f49d6 | 42a4537876985ed105ee44b6529763ba5d57c179 | refs/heads/master | 2020-03-28T01:35:43.082333 | 2018-08-21T19:46:24 | 2018-08-21T19:46:24 | 147,515,935 | 0 | 0 | Apache-2.0 | 2018-09-05T12:42:13 | 2018-09-05T12:42:12 | null | UTF-8 | Python | false | false | 1,742 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is subject to the terms and conditions defined in
# file 'LICENSE.md', which is part of this source code package.
#
from kubernetes.utils import is_valid_string
class SecretKeySelector(object):
"""
http://kubernetes.io/docs/api-reference/v1/definitions/#_v1_secretkeyselector
"""
def __init__(self, model=None):
super(SecretKeySelector, self).__init__()
self._name = None
self._key = None
if model is not None:
self._build_with_model(model)
def _build_with_model(self, model=None):
if 'name' in model:
self.name = model['name']
if 'key' in model:
self.key = model['key']
# ------------------------------------------------------------------------------------- name
@property
def name(self):
return self._name
@name.setter
def name(self, n=None):
if not is_valid_string(n):
raise SyntaxError('SecretKeySelector: name: [ {} ] is invalid.'.format(n))
self._name = n
# ------------------------------------------------------------------------------------- key
@property
def key(self):
return self._key
@key.setter
def key(self, k=None):
if not is_valid_string(k):
raise SyntaxError('SecretKeySelector: key: [ {} ] is invalid.'.format(k))
self._key = k
# ------------------------------------------------------------------------------------- serialize
def serialize(self):
data = {}
if self.name is not None:
data['name'] = self.name
if self.key is not None:
data['key'] = self.key
return data
| [
"francis@manafont.net"
] | francis@manafont.net |
96302dbfad171e64160534464df2b0add5495106 | 59e613d6a0bcb8570c89defa77da398f69b82c77 | /qcengine/tests/test_config.py | 40178a4cfe0e32b0d39d4a31efe3c27904365901 | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] | permissive | ffangliu/QCEngine | 3e081e7f5e236c434016e222f716e6b34b24030b | 835b291986069669e4be0e708ec4846ebfd858d6 | refs/heads/master | 2020-04-22T23:57:35.470503 | 2019-02-14T16:57:30 | 2019-02-14T16:57:30 | 170,760,404 | 0 | 0 | null | 2019-02-14T21:27:14 | 2019-02-14T21:27:14 | null | UTF-8 | Python | false | false | 5,049 | py | """
Tests the DQM compute module configuration
"""
import copy
import os
import pydantic
import pytest
import qcengine
from qcengine.testing import environ_context
def test_node_blank():
node = qcengine.config.NodeDescriptor(name="something", hostname_pattern="*")
def test_node_auto():
desc = {
"name": "something",
"hostname_pattern": "*",
"jobs_per_node": 1,
"ncores": 4,
"memory": 10,
"memory_safety_factor": 0,
}
node1 = qcengine.config.NodeDescriptor(**desc)
job1 = qcengine.get_config(hostname=node1)
assert job1.ncores == 4
assert pytest.approx(job1.memory) == 10.0
desc["jobs_per_node"] = 2
node2 = qcengine.config.NodeDescriptor(**desc)
job2 = qcengine.get_config(hostname=node2)
assert job2.ncores == 2
assert pytest.approx(job2.memory) == 5.0
def test_node_environ():
scratch_name = "myscratch1234"
with environ_context({"QCA_SCRATCH_DIR": scratch_name}):
description = {
"name": "something",
"hostname_pattern": "*",
"scratch_directory": "$QCA_SCRATCH_DIR",
}
node = qcengine.config.NodeDescriptor(**description)
assert node.scratch_directory == scratch_name
def test_node_skip_environ():
description = {
"name": "something",
"hostname_pattern": "*",
"scratch_directory": "$RANDOM_ENVIRON",
}
node = qcengine.config.NodeDescriptor(**description)
assert node.scratch_directory is None
@pytest.fixture
def opt_state_basic():
"""
Capture the options state and temporarily override.
"""
# Snapshot env
old_node = copy.deepcopy(qcengine.config.NODE_DESCRIPTORS)
scratch_name = "myscratch1234"
with environ_context({"QCA_SCRATCH_DIR": scratch_name}):
configs = [{
"name": "dragonstooth",
"hostname_pattern": "dt*",
"jobs_per_node": 2,
"ncores": 12,
"memory": 120,
"scratch_directory": "$NOVAR_RANDOM_ABC123"
}, {
"name": "newriver",
"hostname_pattern": "nr*",
"jobs_per_node": 2,
"ncores": 24,
"memory": 240
},
{
"name": "default",
"hostname_pattern": "*",
"jobs_per_node": 1,
"memory": 4,
"memory_safety_factor": 0,
"ncores": 5,
"scratch_directory": "$QCA_SCRATCH_DIR"
}]
for desc in configs:
node = qcengine.config.NodeDescriptor(**desc)
qcengine.config.NODE_DESCRIPTORS[desc["name"]] = node
yield
# Reset env
qcengine.config.NODE_DESCRIPTORS = old_node
def test_node_matching(opt_state_basic):
node = qcengine.config.get_node_descriptor("nomatching")
assert node.name == "default"
node = qcengine.config.get_node_descriptor("dt149")
assert node.name == "dragonstooth"
node = qcengine.config.get_node_descriptor("nr149")
assert node.name == "newriver"
def test_node_env(opt_state_basic):
node = qcengine.config.get_node_descriptor("dt")
assert node.name == "dragonstooth"
assert node.scratch_directory is None
node = qcengine.config.get_node_descriptor("nomatching")
assert node.name == "default"
assert node.scratch_directory == "myscratch1234"
def test_config_default(opt_state_basic):
config = qcengine.config.get_config(hostname="something")
assert config.ncores == 5
assert config.memory == 4
config = qcengine.config.get_config(hostname="dt149")
assert config.ncores == 6
assert pytest.approx(config.memory, 0.1) == 54
def test_config_local_ncores(opt_state_basic):
config = qcengine.config.get_config(hostname="something", local_options={"ncores": 10})
assert config.ncores == 10
assert config.memory == 4
def test_config_local_njobs(opt_state_basic):
config = qcengine.config.get_config(hostname="something", local_options={"jobs_per_node": 5})
assert config.ncores == 1
assert pytest.approx(config.memory) == 0.8
def test_config_local_njob_ncore(opt_state_basic):
config = qcengine.config.get_config(hostname="something", local_options={"jobs_per_node": 3, "ncores": 1})
assert config.ncores == 1
assert pytest.approx(config.memory, 0.1) == 1.33
def test_config_local_njob_ncore(opt_state_basic):
config = qcengine.config.get_config(
hostname="something", local_options={
"jobs_per_node": 3,
"ncores": 1,
"memory": 6
})
assert config.ncores == 1
assert pytest.approx(config.memory, 0.1) == 6
def test_config_validation(opt_state_basic):
with pytest.raises(pydantic.ValidationError):
config = qcengine.config.get_config(hostname="something", local_options={"bad": 10})
def test_global_repr():
assert isinstance(qcengine.config.global_repr(), str)
| [
"malorian@me.com"
] | malorian@me.com |
3be54fa1efd83322f1fe7a892ea740e5bf3982c2 | 59027e8242ed21037f5de9de2c72e93d9f65d7e0 | /plot/env/bin/easy_install | 6b2e2d075e662c380b9581e724cd106adf21e5a6 | [] | no_license | ErichHoene/time-series-benchmarks | 37e31bc9e238011a46d5d8036ca5740d1cdd8041 | 1b435445d2165927168c01cb9b575b6efa6fe4fe | refs/heads/master | 2020-09-24T10:12:24.659923 | 2019-12-03T23:35:56 | 2019-12-03T23:35:56 | 225,737,280 | 0 | 0 | null | 2019-12-03T23:31:06 | 2019-12-03T23:31:06 | null | UTF-8 | Python | false | false | 289 | #!/mnt/c/Users/Erich/Documents/time-series-benchmarks/plot/env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"erich@DESKTOP-GP8H4ER.localdomain"
] | erich@DESKTOP-GP8H4ER.localdomain | |
40da9441cf9ce77b344b374aacaee50aa21add22 | 99832c4632c6084c071e9eb5c2b4902b39d7bc54 | /acc_from_traj_v2/core/space_signal.py | 4b0c31d8687797ef76e20cddd6fa6ad751228366 | [] | no_license | LongStart/pyproj | 0a6b058b8a56824a61dbf37545c380552930e072 | 1f4fb23e01329ba6b50e14a0c90ae55440c1637a | refs/heads/master | 2020-08-17T14:16:53.567233 | 2019-10-16T05:44:18 | 2019-10-16T05:47:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,604 | py | import numpy as np
class SequenceXd():
def __init__(self, dim, data):
assert np.shape(data) == (dim, len(data[0]))
self._data = data
self._len = len(data[0])
def __call__(self, data=None):
if data is None:
return np.array(self._data)
assert np.shape(self._data) == np.shape(data)
self._data = data
class TimeSequence():
def __init__(self, dim, data):
self._data = data
def __call__(self, data=None):
if data is None:
return np.array(self._data)
assert np.shape(self._data) == np.shape(data)
self._data = data
return np.array(self._data)
class Signal3d():
def __init__(self, t_xyz):
self.dim = 3
assert np.shape(t_xyz) == (self.dim + 1, len(t_xyz[0]))
self.t = TimeSequence(1, t_xyz[0])
self.xyz = SequenceXd(self.dim, t_xyz[1:])
def __call__(self):
return np.vstack((self.t(), self.xyz()))
@classmethod
def from_t_xyz(cls, t, xyz=None):
if xyz is None:
xyz = np.zeros((self.dim, len(t)))
return cls(np.vstack((t, xyz)))
@property
def t_xyz(self):
return self
class Signal4d():
def __init__(self, t_xyzw):
self.dim = 4
assert np.shape(t_xyzw) == (self.dim + 1, len(t_xyzw[0]))
self.t = TimeSequence(1, t_xyzw[0])
self.xyzw = SequenceXd(self.dim, t_xyzw[1:])
def __call__(self):
return np.vstack((self.t(), self.xyzw()))
@classmethod
def from_t_xyzw(cls, t, xyzw=None):
if xyzw is None:
xyzw = np.zeros((self.dim, len(t)))
return cls(np.vstack((t, xyzw)))
@property
def t_xyzw(self):
return self
class Trajectory3d():
def __init__(self, t_xyz_xyzw):
assert np.shape(t_xyz_xyzw) == (8, len(t_xyz_xyzw[0]))
self.t = TimeSequence(1, t_xyz_xyzw[0])
self.xyz = SequenceXd(3, t_xyz_xyzw[1:4])
self.xyzw = SequenceXd(4, t_xyz_xyzw[4:])
def __call__(self):
return np.vstack((self.t(), self.xyz(), self.xyzw()))
@classmethod
def from_t_xyz_xyzw(cls, t, xyz=None, xyzw=None):
if xyz is None:
xyz = np.zeros((3, len(t)))
if xyzw is None:
xyzw = np.zeros((4, len(t)))
return cls(np.vstack((t, xyz, xyzw)))
@property
def t_xyz(self):
return Signal3d.from_t_xyz(self.t(), self.xyz())
@property
def t_xyzw(self):
return Signal4d.from_t_xyzw(self.t(), self.xyzw())
@property
def t_xyz_xyzw(self):
return self
| [
"chen-xx@zju.edu.cn"
] | chen-xx@zju.edu.cn |
a6fdc4dd44a88f40d0d85f5b2d42702db7d6059c | a777a4af5cfc4270aa16db69a2076dd8c536dee6 | /11-28/chunkBrown.py | 5063ada08ab3ae71fc2132500607e73d33f285a7 | [] | no_license | llCUriel/Natural-Language-Processing | 80e31311610c53fc001457390a612ca6c0cad20a | 52b3bcb5627509f6f79bd01cd4b7f0c52b0605eb | refs/heads/master | 2021-03-01T10:44:29.282464 | 2020-03-08T08:36:59 | 2020-03-08T08:36:59 | 245,779,218 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,191 | py | import re
import os
# import resource
import string
from collections import Counter
import pickle
import nltk
import matplotlib.pyplot as plt
from bs4 import BeautifulSoup
from pprint import pprint
import numpy as np
import networkx
from gensim.summarization.summarizer import summarize
from sklearn.feature_extraction.text import TfidfVectorizer
def savePkl(tagger, filename="name.pkl"):
with open(filename, "wb") as f:
pickle.dump(tagger, f, -1)
def loadPkl(filename="name.pkl"):
with open(filename, "rb") as f:
return pickle.load(f)
def lemmanize(tokens, taggin=False):
global lemma
global tagger
if len(lemma) == 0:
print("lemma init")
with open('../generate.txt', encoding='latin-1') as f:
for l in f:
s = l.replace('#', '')
words = s.split()
if len(words) > 2:
if taggin:
lemma[words[0]] = words[-1], str(words[-2]).lower()
else:
lemma[words[0]] = words[-1]
lemmanized = []
unknown = []
for w in tokens:
try:
lemmanized.append(lemma[w])
except KeyError:
if taggin:
lemmanized.append((w, 'nounArtificial'))
unknown.append((w, 'nounArtificial'))
else:
lemmanized.append(w)
unknown.append(w)
return lemmanized, unknown
def deleteDiacriticMarks(word):
word = word.lower()
word = re.sub('[á]', 'a', word)
word = re.sub('[é]', 'e', word)
word = re.sub('[í]', 'i', word)
word = re.sub('[ó]', 'o', word)
word = re.sub('[ú]', 'u', word)
word = re.sub('[ñ]', 'n', word)
return word
def removeSpecialCharacters(tokens):
'''Remove special characters, we receive tokens'''
pattern = re.compile('[{}]'.format(re.escape(string.punctuation)))
filteredTokens = filter(None, [pattern.sub(' ', token)
for token in tokens])
return list(filteredTokens)
def cleanText(text, lemmanized=False, cleaningLevel=1, taggin=False):
''' A string is cleaned, depending on the level of cleaning:
0 -> raw string
1 - > lower case, special characters removed
2 - > Stopwords are removed
the cleaned string is return
'''
tokens = nltk.word_tokenize(text)
stopwords = nltk.corpus.stopwords.words('spanish')
if cleaningLevel == 0:
cleanedTokens = tokens
elif cleaningLevel == 1:
cleanedTokens = removeSpecialCharacters([
t.lower() for t in tokens if t.isalpha()
])
elif cleaningLevel == 2: # Without stopwords
cleanedTokens = removeSpecialCharacters([
t.lower() for t in tokens
if t.isalpha() and t.lower() not in stopwords
])
if taggin:
sentences = nltk.sent_tokenize(text, language='spanish')
tagged = []
tokens = []
# Tags using the tagger, as a fallback,
# it uses the tag from generator.txt dictonary
for sentence in sentences:
taggedTokens = tagger.tag(nltk.word_tokenize(sentence, 'spanish'))
for token, tag in taggedTokens:
token = str(token).lower()
tag = str(tag).lower()
# When the token was a punctuation character token is empty
if token.isalpha():
lemmanized, unknown = lemmanize([token], taggin)
# If it's a token that isn't on the dictonary
if len(unknown) == 1:
lemmanized[0] = lemmanized[0], 'nounArtificial'
else:
tag = lemmanized[0][1]
tokens.append(token)
tagged.append((token, tag))
cleanedTokens = ' '.join(tokens)
return cleanedTokens, tagged
else:
lemmanizedTokens, unkown = lemmanize(cleanedTokens, taggin)
cleanedText = ' '.join(cleanedTokens)
return cleanedText
def readMessages(path, tagger=None, cleaningLevel=1, lemmanized=False,
encoding='latin-1'):
reviews = []
tagged = []
classifications = []
sentences = []
for filename in sorted(os.listdir(path)):
if filename.endswith('txt'):
with open(path+filename, encoding=encoding) as f:
# print('Review ', filename, ' said: ')
review = f.read().strip()
# OriginalReviewLength
osl = nltk.sent_tokenize(review, 'spanish')
for sent in osl:
cleaned, tagged = cleanText(sent.strip(), False, 1, True)
if len(tagged) > 2:
sentences.append(tagged)
# if len(osl) > 5:
# summary = summarize(review)
# # Summary Review Length
# ssl = nltk.sent_tokenize(summary, 'spanish')
# print('Original has ', len(osl), ' sentences.')
# print('Summary has ', len(ssl), ' sentences.')
# pprint(summary)
# else:
# print(filename, ' cannot be summarized.')
# review, tags = cleanText(review, lemmanized,
# cleaningLevel, True)
# classification = filename.split('.')[0].split('_')
# tagged.append(tags)
# reviews.append(review)
# classifications.append((filename, classification))
return sentences
def compute_ngrams(sequence, n):
return zip(*[sequence[index:] for index in range(n)])
def get_top_ngrams(corpus, ngram_val=1, limit=5):
tokens = nltk.word_tokenize(corpus)
ngrams = compute_ngrams(tokens, ngram_val)
ngrams_freq_dist = nltk.FreqDist(ngrams)
# pprint(ngrams_freq_dist.items())
sorted_ngrams_fd = sorted(ngrams_freq_dist.items(),
key=lambda e: e[1], reverse=True)
sorted_ngrams = sorted_ngrams_fd[0:limit]
sorted_ngrams = [(' '.join(text), freq)
for text, freq in sorted_ngrams]
return sorted_ngrams
if __name__ == '__main__':
taggerPath = 'tagger.pkl'
tagger = []
lemma = {}
# Check if the file tagger.pkl exists
# if so load tagger, if not create a tagger
if os.path.isfile(taggerPath):
print('Loading Tagger')
tagger = loadPkl(taggerPath)
else:
print('Initialiazing Tagger')
spanishTags = nltk.corpus.cess_esp.tagged_sents()
tagger = nltk.UnigramTagger(spanishTags)
savePkl(tagger, taggerPath)
# list of texts, each text is a string (a sms)
sentences = readMessages(
'../../Corpus/SFU_Spanish_Review_Corpus/moviles/', tagger)
grammar = "CHUNK: {<n.*><sps00><n.*>}"
cp = nltk.RegexpParser(grammar)
for sent in sentences:
# pprint(sent)
tree = cp.parse(sent)
# pprint(tree)
for subtree in tree.subtrees():
if subtree.label() == 'CHUNK':
print(subtree)
| [
"48939025+llCUriel@users.noreply.github.com"
] | 48939025+llCUriel@users.noreply.github.com |
465b9336c6c23addd490124cbca063924fdb8f1b | afb50bf0615b7bacb35827106157584449a466e4 | /moldesign/_tests/conftest.py | 108f71148d6dac6c2cf26073731989bec2c3d7d5 | [
"Apache-2.0"
] | permissive | jellyr/molecular-design-toolkit | 4f7a22c5a36318022d0be6d89d889c8169b100fd | 72a81111c9257cdac88562e6ca3cd80614881da6 | refs/heads/master | 2020-12-02T21:24:01.359532 | 2017-07-01T04:39:52 | 2017-07-01T04:39:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 439 | py |
def pytest_itemcollected(item):
if hasattr(item.module, '__PYTEST_MARK__'):
item.add_marker(item.module.__PYTEST_MARK__)
# TODO: nicer output strings for git commit status
# see https://docs.pytest.org/en/latest/example/simple.html#post-process-test-reports-failures
#@pytest.hookimpl(tryfirst=True, hookwrapper=True)
#def pytest_runtest_makereport(item, call):i
# pass
# Also possibly useful: item.add_report_section
| [
"avirshup@gmail.com"
] | avirshup@gmail.com |
25bb1e59fa52a1478f01a27db44ee8ae299b07d2 | 81407be1385564308db7193634a2bb050b4f822e | /the-python-standard-library-by-example/subprocess/repeater.py | cf01ca41051f6970c677e34642d0326924274e24 | [
"MIT"
] | permissive | gottaegbert/penter | 6db4f7d82c143af1209b4259ba32145aba7d6bd3 | 8cbb6be3c4bf67c7c69fa70e597bfbc3be4f0a2d | refs/heads/master | 2022-12-30T14:51:45.132819 | 2020-10-09T05:33:23 | 2020-10-09T05:33:23 | 305,266,398 | 0 | 0 | MIT | 2020-10-19T04:56:02 | 2020-10-19T04:53:05 | null | UTF-8 | Python | false | false | 1,421 | py | #!/usr/bin/env python
#
# Copyright 2007 Doug Hellmann.
#
#
# All Rights Reserved
#
# Permission to use, copy, modify, and distribute this software and
# its documentation for any purpose and without fee is hereby
# granted, provided that the above copyright notice appear in all
# copies and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of Doug
# Hellmann not be used in advertising or publicity pertaining to
# distribution of the software without specific, written prior
# permission.
#
# DOUG HELLMANN DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN
# NO EVENT SHALL DOUG HELLMANN BE LIABLE FOR ANY SPECIAL, INDIRECT OR
# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
"""Echo anything written to stdin on stdout.
"""
__module_id__ = "$Id$"
#end_pymotw_header
import sys
sys.stderr.write('repeater.py: starting\n')
sys.stderr.flush()
while True:
next_line = sys.stdin.readline()
if not next_line:
break
sys.stdout.write(next_line)
sys.stdout.flush()
sys.stderr.write('repeater.py: exiting\n')
sys.stderr.flush()
| [
"350840291@qq.com"
] | 350840291@qq.com |
3f78c466709124429eaedfcbc4849133d80eb1be | 4c4509c34b57350b605af50600eefc0c24a74255 | /ecommerce/urls.py | 0dead703ab3eb8c660305689032883b343a6f140 | [] | no_license | sayanth123/ecomm | cd6dd7e8c3fb13048d35c272379a320c20eb3d24 | 67101ebbb08c82bbd15a7c1dfc22c3da5483e307 | refs/heads/main | 2023-05-05T03:20:16.660301 | 2021-05-27T04:21:44 | 2021-05-27T04:21:44 | 370,259,601 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,247 | py | """ecommerce URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
from ecommerce import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('ecommerce_app/', include('ecommerceapp.urls')),
path('search_app/', include('search_app.urls')),
path('cart/', include('cart_app.urls')),
]
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL,
document_root= settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT)
#search_app/
#ecommerce_app/ | [
"you@example.com"
] | you@example.com |
588d62e455ac9574347362ab3be396c056a9340d | db23803c56eb7f0f7e0239ba45ef5aa5f1e26fc6 | /scripts/processing_scripts/create_reference_files/rRNA_from_gff3.py | 6ec321246d58122cf8840cb19bd39097232ddb90 | [] | no_license | weibokong27/picrust2_manuscript | ed362146df5f917e3f92aa012ba5a864dc2928fa | dd4e2daa0b7058fa0ef46bc8f02c052f226c34ed | refs/heads/master | 2022-07-02T14:47:02.757223 | 2020-05-16T12:42:29 | 2020-05-16T12:42:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,062 | py | #!/usr/bin/python3
import argparse
import skbio.sequence
import os
import sys
def read_fasta(filename, cut_header=True):
'''Read in FASTA file and return dictionary with each independent sequence
id as a key and the corresponding sequence string as the value.
'''
# Intitialize empty dict.
seq = {}
# Intitialize undefined str variable to contain the most recently parsed
# header name.
name = None
# Read in FASTA line-by-line.
with open(filename, "r") as fasta:
for line in fasta:
# If header-line then split by whitespace, take the first element,
# and define the sequence name as everything after the ">".
if line[0] == ">":
if cut_header:
name = line.split()[0][1:]
else:
name = line[1:]
# Intitialize empty sequence with this id.
seq[name] = ""
else:
# Remove line terminator/newline characters.
line = line.rstrip("\r\n")
# Add sequence to dictionary.
seq[name] += line
return seq
def main():
parser = argparse.ArgumentParser(description="Reads in rRNA coordinates\
from gff3 file and slices out sequence from assembly FASTA. User can specify\
other rRNAs to output besides 16S if needed. Note that other coordinates can\
be in the gff3 as well, but only elements annotated as \"rRNA\" will be\
parsed", epilog='''Example of usage:
python3 rRNA_from_gff3.py -p 16s_product GFF3 REF_GENOME
''', formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("gff", metavar="annotation_file", type=str,
help="gff3 file containing rRNA coordinates")
parser.add_argument("ref", metavar="reference_genome", type=str,
help="fasta file with reference sequence")
parser.add_argument("-p", "--product_str", metavar="rRNA_product_string",
type=str, help="String matching product of rRNA line\
(default=16s_rRNA)", default="16s_rRNA")
args = parser.parse_args()
ref = read_fasta(args.ref)
# Get reference genome basename without extension.
ref_name, ref_name_extension = os.path.splitext(
os.path.basename(args.ref))
rRNA_counter = 0
# Read through gff3 line-by-line.
with open(args.gff, "r") as gff3:
for line in gff3:
# If first character is comment then skip.
if line[0] == "#":
continue
# Remove line terminator/newline character.
line = line.rstrip("\r\n")
# Split line on tab characters.
line_split = line.split("\t")
# Check if element is defined as rRNA and matches product string.
if line_split[2] == "rRNA" and args.product_str in line_split[8]:
rRNA_counter += 1
scaffold = line_split[0]
start = int(line_split[3])
stop = int(line_split[4])
strand = line_split[6]
# Get nucleotide sequence of rRNA based on coordinates.
rRNA_slice = skbio.sequence.DNA(sequence=ref[scaffold]
[start-1:stop].upper())
# Define name of sequence based on genome and genomic
# coordinates.
name = "|".join([ref_name, scaffold, str(start), str(stop),
strand])
# Take reverse complement if gene is on negative strand.
if strand == "-":
rRNA_slice = rRNA_slice.reverse_complement()
# Print rRNA sequence in FASTA format.
print(">" + name)
print(rRNA_slice)
if rRNA_counter == 0:
print("Warning: file " + args.gff +
" did not contain any rRNA matches.", file=sys.stderr)
if __name__ == '__main__':
main()
| [
"gavinmdouglas@gmail.com"
] | gavinmdouglas@gmail.com |
5d7b346a48b908e9726d1c407659bf42416ca287 | f7deb23c08fa89c73e44e7bf865f8c03ef6e20f6 | /政府报告词云.py | 24b95c181ad6a039949093fdbd2f595ff82c56b9 | [] | no_license | qinhew/MY_D_Python | b85a0263376826b1d29a7fd4fc327fc4a72d42f6 | 51bf823c742743291b8720541b498313c4760367 | refs/heads/master | 2022-12-19T03:42:52.181513 | 2020-09-30T02:56:03 | 2020-09-30T02:56:03 | 298,992,424 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 407 | py | import jieba
import wordcloud
from scipy.misc import imread
mask = imread("Chinamap.jpg")
f = open("新时代中国特色社会主义.txt", "r", encoding="utf-8")
t = f.read()
f.close()
ls = jieba.lcut(t)
txt = " ".join(ls)
w = wordcloud.WordCloud(font_path="msyh.ttc",
mask=mask, width=1000, height=700, background_color="white")
w.generate(txt)
w.to_file("政府报告词云.png")
| [
"270240626@qq.com"
] | 270240626@qq.com |
5db87f98445a36fe18384ffb43ce34c037f4e457 | 70f1cd4a860c2936d192a55f796e1f25fb20a083 | /0x0A-python-inheritance/5-base_geometry.py | 2fce442b3d73bda1a9c7b2b9b19ee1c13da2b241 | [] | no_license | JacobBHartman/holbertonschool-higher_level_programming | 5641f8c00249284ab457fd6854d99bfe3320f618 | 2fd7a65d00e8f32d1dc8d46e2d00e3cc72ba3577 | refs/heads/master | 2021-09-24T08:32:00.338508 | 2018-10-05T22:24:22 | 2018-10-05T22:24:22 | 113,101,224 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 268 | py | #!/usr/bin/python3
"""this module contains an empty class called BaseGeometry"""
class BaseGeometry:
"""this class is an empty one"""
def __init__(self):
"""this method is magic and initializes an object
of class BaseGeometry"""
pass
| [
"209@holbertonschool.com"
] | 209@holbertonschool.com |
9170b0b21899081c2505bb3e82a8d26b4391d673 | d650da884a0a33dd1acf17d04f56d6d22a2287fd | /test/test_inspect.py | 894d8f97bc3c3456e7baeaaca34461ea1c6b61a8 | [] | no_license | GaelicGrime/rpnpy | debe3a79e9a456e13dcd1421d42f01c0bcbe9084 | 5a095dd024403daad93a3222bd190bbb867a8ae2 | refs/heads/master | 2023-04-03T11:19:16.737278 | 2020-12-03T08:26:40 | 2020-12-03T08:26:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 962 | py | from unittest import TestCase
from math import log
from rpnpy.inspect import countArgs
class TestCountArgs(TestCase):
"""Test the countArgs function"""
def testZero(self):
"A function that takes zero arguments must be processed correctly"
self.assertEqual(0, countArgs(lambda: 3))
def testOne(self):
"A function that takes one argument must be processed correctly"
self.assertEqual(1, countArgs(lambda x: 3))
def testTwo(self):
"A function that takes two arguments must be processed correctly"
self.assertEqual(2, countArgs(lambda x, y: 3))
def testLog(self):
"The signature of math.log can't be inspected (at least in Python 3.7)"
self.assertEqual(None, countArgs(log))
def testLogWithDefault(self):
"""The signature of math.log can't be inspected (at least in Python
3.7). Pass a default value."""
self.assertEqual(3, countArgs(log, 3))
| [
"terry@jon.es"
] | terry@jon.es |
fb2fcf81d59459c7ce1dcbea2ffb9c738b0cc99f | b75e86e44f37d71ba6feaa1646670729cd6fe416 | /randgen_target.py | fa2c3fc4849a27b48886b495b8872c6ca0bc992a | [] | no_license | matthewr6/moral-transfer | fb11f8f498cb7736e65d30d5a8f70da9ff6bf863 | d3ec091d2d7a387f253a2db0181e6dacee30c2c2 | refs/heads/master | 2023-03-24T10:57:53.543164 | 2021-03-19T22:29:18 | 2021-03-19T22:29:18 | 342,505,492 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 899 | py | import numpy as np
def rand_target_morals(input_vec):
assert len(input_vec) == 10
while True:
output_vec = np.random.randint(0, 2, 10) # randomly generate output moral
# Check similarity. Output should be different from the input
combined_vec = zip(input_vec, output_vec)
different = False
for moral in combined_vec:
if moral[0] != moral[1]:
different = True
break
if different:
# Check for opposing morals (e.g. care vs harm) - both can't be 1
morals_consistent = True
for i in range(0, 10, 2):
if output_vec[i] == output_vec[i+1] == 1:
morals_consistent = False
if morals_consistent:
return output_vec # No opposing morals, return True
# print(rand_target_morals([0 for i in range(10)]))
| [
"47877154+mustafakhan14@users.noreply.github.com"
] | 47877154+mustafakhan14@users.noreply.github.com |
6dd04baaa2c4b69230aff448cc5124f506c5e06e | 80b6e49aacba02cb1e2ecef7678667e0f82ae561 | /Lesson 3/PermMissingElem.py | a8fab1878c47f1774c6a6dae8ec9fba88050c04f | [] | no_license | juliapedrycz/codility-lessons | b93dc3eefba11ec94430321ccc36557d61189d24 | 3b1619059a3c6e6e5b563391991f37b99c256053 | refs/heads/main | 2023-03-12T16:38:46.844478 | 2021-03-01T19:46:52 | 2021-03-01T19:46:52 | 342,552,125 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 268 | py | def solution(A):
sumA = 0
x = 0
for i in range(0,len(A)):
sumA += A[i]
x += i+1
return len(A) + 1 - (sumA-x)
def main():
A = [3,4,5,1]
print("A: ", A," \tmissing element: ", solution(A))
if __name__ == "__main__":
main()
| [
"julkapedrycz@gmail.com"
] | julkapedrycz@gmail.com |
3ce912889455dcd9ee9fe906681e155a17711461 | f747bb741d17ec138a01b87a07bfd96f6ab4b723 | /clockwork/utils.py | c53e30fd8fc27dceb2973b1ab3e357a5daccdeda | [
"MIT"
] | permissive | iqbal-lab-org/clockwork_validate_spreadsheet | 996f69ae181203e00b3cb73e385cf5bb43f3b522 | 543df9d1af665ec3dd6c6dfa7b9457f1dd142ea6 | refs/heads/master | 2021-05-10T13:40:29.360846 | 2018-01-26T10:28:31 | 2018-01-26T10:28:31 | 118,486,146 | 0 | 0 | MIT | 2018-01-23T08:22:18 | 2018-01-22T16:50:25 | null | UTF-8 | Python | false | false | 47 | py | ../clockwork_original/python/clockwork/utils.py | [
"martinghunt@gmail.com"
] | martinghunt@gmail.com |
8bcfc63d79258fc7090fb849af95c660ea9fc560 | c1ead46041b6150b212a3cf6b5f5121a5c4177b8 | /sites/models.py | 17567913d4d1c6118ee597078ba7cf120ad05006 | [] | no_license | rfadams/yakgroups | 31d6c767c18bdea185b67621656284cdef0a3bdf | fb263cd176ab6b5e5663dce1d919cf70344b4553 | refs/heads/master | 2021-01-22T07:28:14.301724 | 2013-06-14T18:01:12 | 2013-06-14T18:03:19 | 10,694,697 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,008 | py | from django.db import models
import datetime
class GroupSite(models.Model):
"""GroupSite Model: site id and info for a site"""
random_name = models.CharField(editable=False, max_length=10, unique=True)
name = models.CharField(blank=False, max_length=25, unique=True)
title = models.CharField(blank=False, max_length=25)
name_changed = models.BooleanField(editable=False, default=False)
group_leader = models.IntegerField(editable=False, default=0)
created = models.DateTimeField(editable=False, default=datetime.datetime.now)
updated = models.DateTimeField(editable=False)
def save(self):
"""Overwrites save to modify 'updated' on each save"""
self.updated = datetime.datetime.now()
super(GroupSite, self).save()
def __unicode__(self):
return u'%s: %s' % (self.pk, self.name)
def get_site_name(self):
if self.name_changed:
return self.name
else:
return self.random_name
def get_site_title(self):
if self.name_changed:
return self.title
else:
return "Unnamed"
| [
"rfadams@gmail.com"
] | rfadams@gmail.com |
d09e8cfd12158d7338f73096900aa2f29faece0c | 09cead98874a64d55b9e5c84b369d3523c890442 | /py210110c_python1a/day06_210214/homework/hw_5_yiding.py | 2b8e12eb578a330374112c74a1059c59eddd995b | [] | no_license | edu-athensoft/stem1401python_student | f12b404d749286036a090e941c0268381ce558f8 | baad017d4cef2994855b008a756758d7b5e119ec | refs/heads/master | 2021-08-29T15:01:45.875136 | 2021-08-24T23:03:51 | 2021-08-24T23:03:51 | 210,029,080 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 143 | py | """
1,1
2,1
3,1
4,0
5,0
6,1
7,0.5
8,1
"""
"""
q1.
none, false, else if, for, in, from, as
q2.
abc$, 0a
q3.
a
q4.
c
q5.
a
q6.
a
q7.
a
q8.
a
""" | [
"lada314@gmail.com"
] | lada314@gmail.com |
2809357943d5b037037bc13c4f28ae71a69980ad | cb7d6fb9faa8e1c7c68e9a156f997446eae47b83 | /BitcoinSpellbook.py | fa6929c4cdc70e92b5cce3a7a822215b4cde124d | [
"MIT"
] | permissive | WouterGlorieux/BitcoinSpellbook-v0.1 | 3beb7e9edfacf1bad9cb0a900e5a2f0d785ed0ab | 982428470ba977182b2519ca8ab15fe8f94ee607 | refs/heads/master | 2021-05-31T13:05:33.380354 | 2016-05-19T17:58:29 | 2016-05-19T17:58:29 | 57,211,721 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,150 | py | __author__ = 'Wouter Glorieux'
import bitcoin
import BIP44
import BlockchainDataWrapper
import SimplifiedInputsListWrapper
import BlocklinkerWrapper
import ProportionalRandomWrapper
import BitvoterWrapper
import HDForwarderWrapper
import DistributeBTCWrapper
import BitcoinWellsWrapper
from pprint import pprint
import sys
BLOCKCHAINDATA_URL = 'https://blockchaindata.appspot.com'
SIL_URL = 'https://simplifiedinputslist.appspot.com'
BLOCKLINKER_URL = 'https://blocklinker.appspot.com'
PROPORTIONALRANDOM_URL = 'https://proportionalrandom.appspot.com'
BITVOTER_URL = 'https://bitvoter.appspot.com'
HDFORWARDER_URL = 'https://hdforwarder.appspot.com'
DISTRIBUTEBTC_URL = 'https://distributebtc.appspot.com'
BITCOINWELLS_URL = 'https://bitcoinwells.appspot.com'
class BitcoinSpellbook():
def __init__(self):
self.address = ''
def BlockchainData(self, url=BLOCKCHAINDATA_URL):
return BlockchainDataWrapper.BlockchainData(url)
def SIL(self, url=SIL_URL):
return SimplifiedInputsListWrapper.SIL(url)
def Blocklinker(self, url=BLOCKLINKER_URL):
return BlocklinkerWrapper.Blocklinker(url)
def HDForwarder(self, url=HDFORWARDER_URL):
return HDForwarderWrapper.HDForwarder(url)
def DistributeBTC(self, url=DISTRIBUTEBTC_URL):
return DistributeBTCWrapper.DistributeBTC(url)
def BitcoinWells(self, url=BITCOINWELLS_URL):
return BitcoinWellsWrapper.BitcoinWells(url)
def ProportionalRandom(self, url=PROPORTIONALRANDOM_URL):
return ProportionalRandomWrapper.ProportionalRandom(url)
def Bitvoter(self, url=BITVOTER_URL):
return BitvoterWrapper.Bitvoter(url)
def BIP44(self):
return BIP44
def sendCustomTransaction(self, privkeys, inputs, outputs, fee=0):
success = False
totalInputValue = 0
UTXOs = []
for tx_input in inputs:
if 'spend' not in tx_input:
totalInputValue += tx_input['value']
UTXOs.append(tx_input)
totalOutputValue = 0
for tx_output in outputs:
totalOutputValue += tx_output['value']
diff = totalInputValue - totalOutputValue
if fee != diff:
pprint("Warning: Fee incorrect! aborting transaction")
else:
allKeysPresent = True
allInputsConfirmed = True
for tx_input in UTXOs:
if tx_input['address'] not in privkeys:
print 'not found:', tx_input['address']
allKeysPresent = False
if tx_input['block_height'] == None:
allInputsConfirmed = False
if allKeysPresent == True and allInputsConfirmed == True:
tx = bitcoin.mktx(UTXOs, outputs)
for i in range(0, len(UTXOs)):
tx = bitcoin.sign(tx, i, str(privkeys[UTXOs[i]['address']]))
try:
bitcoin.pushtx(tx)
success = True
except:
e = sys.exc_info()
print e
success = False
return success
| [
"wouter@valyrian.tech"
] | wouter@valyrian.tech |
7a6ea156514e8fec2c46d6640f4d2fd9b8b57b5d | 80b7f2a10506f70477d8720e229d7530da2eff5d | /ixnetwork_restpy/testplatform/sessions/ixnetwork/traffic/statistics/misdirectedperflow/misdirectedperflow.py | 0562872acb73fb50d2a0e5450f633d42c7da8502 | [
"MIT"
] | permissive | OpenIxia/ixnetwork_restpy | 00fdc305901aa7e4b26e4000b133655e2d0e346a | c8ecc779421bffbc27c906c1ea51af3756d83398 | refs/heads/master | 2023-08-10T02:21:38.207252 | 2023-07-19T14:14:57 | 2023-07-19T14:14:57 | 174,170,555 | 26 | 16 | MIT | 2023-02-02T07:02:43 | 2019-03-06T15:27:20 | Python | UTF-8 | Python | false | false | 4,342 | py | # MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import sys
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
if sys.version_info >= (3, 5):
from typing import List, Any, Union
class MisdirectedPerFlow(Base):
"""Display misdirected statistics on a per-flow basis. When active this replaces port level misdirected statistics
The MisdirectedPerFlow class encapsulates a required misdirectedPerFlow resource which will be retrieved from the server every time the property is accessed.
"""
__slots__ = ()
_SDM_NAME = "misdirectedPerFlow"
_SDM_ATT_MAP = {
"Enabled": "enabled",
}
_SDM_ENUM_MAP = {}
def __init__(self, parent, list_op=False):
super(MisdirectedPerFlow, self).__init__(parent, list_op)
@property
def Enabled(self):
# type: () -> bool
"""
Returns
-------
- bool: If true then misdirected per flow statistics will be enabled
"""
return self._get_attribute(self._SDM_ATT_MAP["Enabled"])
@Enabled.setter
def Enabled(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP["Enabled"], value)
def update(self, Enabled=None):
# type: (bool) -> MisdirectedPerFlow
"""Updates misdirectedPerFlow resource on the server.
Args
----
- Enabled (bool): If true then misdirected per flow statistics will be enabled
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def find(self, Enabled=None):
# type: (bool) -> MisdirectedPerFlow
"""Finds and retrieves misdirectedPerFlow resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve misdirectedPerFlow resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all misdirectedPerFlow resources from the server.
Args
----
- Enabled (bool): If true then misdirected per flow statistics will be enabled
Returns
-------
- self: This instance with matching misdirectedPerFlow resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of misdirectedPerFlow data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the misdirectedPerFlow resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
| [
"andy.balogh@keysight.com"
] | andy.balogh@keysight.com |
4aa8ab5f377a6923765d26773dc5b45edce5d5bc | 527e2b5aa7ed2053d4570997cc46b35c1e22ad1f | /tpfinal/imagenes/plotear imagenes.py | 7a8b2fc8adb372dce31f7f7944d0ecebcd109cfe | [] | no_license | fcaprile/instrumentacion | 8dac060e090a7f445c775e62b32e909f33b38db8 | 1de0ffc6c6017a8d1139b8238a068b45e7fbd823 | refs/heads/master | 2020-05-02T01:06:51.690038 | 2019-06-30T22:18:29 | 2019-06-30T22:18:29 | 177,681,383 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,054 | py | # -*- coding: utf-8 -*-
"""
Created on Sun Jun 30 19:00:06 2019
@author: ferchi
"""
import math as m
from matplotlib import pyplot as plt
import numpy as np
from scipy.optimize import curve_fit
from array import array
import os
import sys
from scipy.integrate import quad as inte
carpeta='C:/Users/ferchi/Desktop/github labo 6/instrumentacion/tpfinal/mediciones/pid/totodsloserrores/'
lista=[]
for archivo in os.listdir(carpeta):
if archivo.endswith(".txt"):
lista.append(archivo)
for nombre in lista:
#nombre='titas,derivadas,integrales,errores,kp=0,2,ki0,7.txt'
titas,derivadas,integrales,errores=np.loadtxt(carpeta+nombre)
plt.plot(titas,'b-',label='Proporcional')
plt.plot(derivadas,'g-',label='Derivativo')
plt.plot(integrales,'y-',label='Integral')
plt.plot(errores,'k-',label='Error total')
plt.grid(True) # Para que quede en hoja cuadriculada
plt.legend(loc = 'best')
plt.title(nombre)
plt.xlabel('Paso')
plt.ylabel('Error')
plt.savefig(nombre+'.png')
plt.clf()
plt.close()
| [
"fcaprile@gmail.com"
] | fcaprile@gmail.com |
33f2f552754a26206f2b192b5ce5639d80bcdbf5 | 5efc0271eb922da63a6825112b4de786915b1b89 | /22_qpdf简化使用/test.py | def4e13a983537dc89696317c9b91c7f06cd114e | [] | no_license | uestcmee/PiecemealScripts | 41e7d45cbcd5d720822da6cbc935efd2ce0fcee4 | a18b4dd58229897aafe53767448e3fd8fb9c1881 | refs/heads/master | 2021-04-21T16:04:04.006230 | 2021-01-07T06:47:45 | 2021-01-07T06:47:45 | 249,794,695 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 124 | py | import re
files='{D:/Desktop/Geryler Karadi 2015.pdf} D:/Desktop/光伏产业\链.png'
files.replace('\\','/')
print(files) | [
"zikepeng@outlook.com"
] | zikepeng@outlook.com |
a00f94a9e471ce1f11eaa85e7c0a82d80b5634d4 | a356d83e4b4d87d3494f5a162ba16fa71fedd480 | /opencv_cnn_plant_v0.py | 4ff4634a1c7a7bb4f353ae9ae51bca4bbe230e39 | [
"MIT"
] | permissive | mjain72/OpenCV-and-CNN-for-plant-classification | 6e024270e409e2979397df6d2ab2496a7a72cead | 7e650697c6e192b82016e36bf96c234be7ec4856 | refs/heads/master | 2020-03-23T19:40:08.461722 | 2018-07-23T09:58:14 | 2018-07-23T09:58:14 | 141,994,336 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,855 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 21 12:37:24 2018
@author: mohit
"""
import cv2
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
from tqdm import tqdm #to show progress
from sklearn.model_selection import train_test_split
from keras.models import Sequential, load_model
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import ModelCheckpoint
from sklearn.metrics import confusion_matrix, classification_report, f1_score
#Load image
image_dir_test = 'images/plants/test/'
image_dir_train = 'images/plants/train/'
#define the range for green color
sensitivity = 30
#define final image size
image_size = 64
'''
define a function to remove background from the image to only leave the green leaves. SUbsequenty transfer it to
gray scale, followed by resizing them to 64 x 64 size image
'''
def image_transformation(imageName, sensitivity):
imagePlatCV = cv2.imread(imageName) #read image
hsvImage = cv2.cvtColor(imagePlatCV, cv2.COLOR_BGR2HSV)
#define the range for green color
lower_green = np.array([60 - sensitivity, 100, 50])
upper_green = np.array([60 + sensitivity, 255, 255])
# threshold the hsv image to get only green colors
mask = cv2.inRange(hsvImage, lower_green, upper_green)
#apply bitwise_and between mask and the original image
greenOnlyImage = cv2.bitwise_and(imagePlatCV, imagePlatCV, mask=mask)
#lets define a kernal with ones
kernel0 = np.ones((15,15), np.uint8)
#lets apply closing morphological operation
closing0 = cv2.morphologyEx(greenOnlyImage, cv2.MORPH_CLOSE, kernel0)
#convert to gray scale
grayScale = cv2.cvtColor(closing0, cv2.COLOR_BGR2GRAY)
print(grayScale.shape)
#blur the edges
blurImage = cv2.GaussianBlur(grayScale, (15,15), 0)
#resize image
resizeImage = cv2.resize(blurImage, (image_size, image_size), interpolation=cv2.INTER_AREA)
resizeImage = resizeImage/255 #normalize
resizeImage = resizeImage.reshape(64,64,1) #to make it in right dimensions for the Keras add 1 channel
print(resizeImage.shape)
return resizeImage
#define classes
classes = ['Black-grass', 'Charlock', 'Cleavers', 'Common Chickweed', 'Common wheat', 'Fat Hen', 'Loose Silky-bent', 'Maize', 'Scentless Mayweed'
, 'Shepherds Purse', 'Small-flowered Cranesbill', 'Sugar beet']
'''
Data extraction: The loop below will create a data list containing image file path name, the classifcation lable (0 -11) and the specific plant name
'''
train = [] #data list
for species_lable, speciesName in enumerate(classes):
for fileName in os.listdir(os.path.join(image_dir_train, speciesName)):
train.append([image_dir_train + '{}/{}'.format(speciesName, fileName), species_lable, speciesName])
#convert the list into dataframe using Pandas
trainigDataFrame = pd.DataFrame(train, columns=['FilePath', 'PlantLabel', 'PlantName'])
#Suffle the data
seed = 1234
trainigDataFrame = trainigDataFrame.sample(frac=1, random_state=seed)
trainigDataFrame = trainigDataFrame.reset_index()
#Prepare the images for the model by preprocessing
X = np.zeros((trainigDataFrame.shape[0], image_size, image_size, 1)) #array with image size of each procssed image after image_transformfunction
for i, fileName in tqdm(enumerate(trainigDataFrame['FilePath'])):
print(fileName)
newImage = image_transformation(fileName, sensitivity)
X[i] = newImage
#Convert lables to categorical and do one-hot encoding, followed by conversion to numpy array
y = trainigDataFrame['PlantLabel']
y = pd.Categorical(y)
y = pd.get_dummies(y)
y = y.values
#split dataset into Train and Test
X_train_dev, X_val, y_train_dev, y_val = train_test_split(X, y, test_size=0.10, random_state=seed)
X_train, X_dev, y_train, y_dev = train_test_split(X_train_dev, y_train_dev, test_size= 0.10, random_state=seed)
#generate more datasamples
datagen = ImageDataGenerator(rotation_range=360, # Degree range for random rotations
width_shift_range=0.2, # Range for random horizontal shifts
height_shift_range=0.2, # Range for random vertical shifts
zoom_range=0.2, # Range for random zoom
horizontal_flip=True, # Randomly flip inputs horizontally
vertical_flip=True) # Randomly flip inputs vertically
datagen.fit(X_train)
#define training model
def cnn_model():
classifier = Sequential()
classifier.add(Conv2D(64, kernel_size=(3,3), padding='same', strides=(1,1), input_shape=(image_size, image_size,1), activation='relu'))
classifier.add(MaxPool2D(pool_size=(2,2)))
classifier.add(Dropout(0.1))
classifier.add(Conv2D(32, kernel_size=(3,3), strides=(1,1), padding='same', input_shape=(image_size, image_size,1), activation='relu'))
classifier.add(MaxPool2D(pool_size=(2,2)))
classifier.add(Dropout(0.1))
classifier.add(Conv2D(16, kernel_size=(3,3), strides=(1,1), padding='same', input_shape=(image_size, image_size,1), activation='relu'))
classifier.add(MaxPool2D(pool_size=(2,2)))
classifier.add(Dropout(0.1))
classifier.add(Conv2D(16, kernel_size=(3,3), strides=(1,1), padding='same', input_shape=(image_size, image_size,1), activation='relu'))
classifier.add(MaxPool2D(pool_size=(2,2)))
classifier.add(Dropout(0.1))
classifier.add(Flatten())
classifier.add(Dense(units=12, activation='softmax'))
classifier.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
classifier.summary()
return classifier
batch_size = 64
epochs = 50
checkpoint = ModelCheckpoint('model3.h5', verbose=1, save_best_only=True)
model = cnn_model()
#trtain model
trainingModel = model.fit_generator(datagen.flow(X_train, y_train, batch_size=batch_size), steps_per_epoch=X_train.shape[0],
epochs=epochs, verbose=1, callbacks=[checkpoint], validation_data=(X_dev, y_dev))
#final model
final_model = load_model("model3.h5")
final_loss, final_accuracy = final_model.evaluate(X_val, y_val)
print('Final Loss: {}, Final Accuracy: {}'.format(final_loss, final_accuracy))
#prediction
y_pred = final_model.predict(X_val)
#confusion matrix and classification report
cm = confusion_matrix(y_val.argmax(axis=1), y_pred.argmax(axis=1))
print(classification_report(y_val.argmax(axis=1), y_pred.argmax(axis=1), target_names=classes))
#plot F1-score vs. Classes
f1Score = f1_score(y_val.argmax(axis=1), y_pred.argmax(axis=1), average=None)
y_pos = np.arange(len(classes))
plt.bar(y_pos, f1Score)
plt.xticks(y_pos, classes)
plt.ylabel('F1 Score')
plt.title('F1 Score of various species after classification')
plt.show()
| [
"noreply@github.com"
] | mjain72.noreply@github.com |
2302883fc7ae5c935c28322e4dca3573cfdade73 | 4347bddb0b39aa60f758eead103454254cc6e467 | /proyecto/biblioteca/apps/libro/admin.py | eb95fd554d93d2dbd9e4e0fb42cb92cbc1059186 | [] | no_license | soledadfbarrios/proyecto | 4573542f57ef4fbe35a2f4519fa10c0ac9ea8d5e | a284b1ce187a243de45cebd560a85af86ba79ee3 | refs/heads/main | 2023-08-23T01:30:12.514942 | 2021-10-21T01:53:55 | 2021-10-21T01:53:55 | 419,546,239 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 210 | py | from django.contrib import admin
# Register your models here.
from .models import Autor, Libro
#importamos el modelo Autor
admin.site.register(Autor)
#registramo la clase creada
admin.site.register(Libro) | [
"86332428+soledadfbarrios@users.noreply.github.com"
] | 86332428+soledadfbarrios@users.noreply.github.com |
0c6c3749e3e833c57bdc0ef645fd60c82467e7ff | 7b20bfea333946b7241ee48be7341d89fa11b3e4 | /main.py | 17661dd44ebacb4d52be871ef3f144887602a68f | [] | no_license | esperandote/solvecube | 8a363325d1c003fda3ed6a8d30810b10ff12a799 | 397a558632660c656dd52951622b7e7e2817c6ed | refs/heads/master | 2020-07-16T01:35:55.336989 | 2019-09-01T15:11:47 | 2019-09-01T15:11:47 | 205,692,750 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 47,044 | py |
#encoding:utf-8
import cube
def checke(state,a,b,c):#check edge piece a in position b and orientation c
if(state[b-1]==a and state[b+19]==c):
return True
else:
return False
def checkc(state,a,b,c):#check corner piece a in position b and orientation c
if(state[b+11]==a and state[b+31]==c):
return True
else:
return False
def uppercase(a):
if(a=='f'):return('F')
if(a=='b'):return('B')
if(a=='u'):return('U')
if(a=='d'):return('D')
if(a=='l'):return('L')
if(a=='r'):return('R')
return(a)
def solve(state):
if(state==False):
return '输入错误,请认真检查~'
t=0
solve=''
#c1 12*2=24种情况(经检验确认无误)
c1=[
[1,0,'ul2'],
[1,1,'Bl'],
[2,0,'l2'],
[2,1,'Lfd'],
[3,0,'u2l2'],
[3,1,'rFd'],
[4,0,'Ul2'],
[4,1,'fL'],
[5,0,'l'],
[5,1,'BD'],
[6,0,'Rd2'],
[6,1,'bD'],
[7,0,'L'],
[7,1,'fd'],
[8,0,'rd2'],
[8,1,'Fd'],
[9,0,'D'],
[9,1,'bl'],
[10,0,''],
[10,1,'lfd'],
[11,0,'d2'],
[11,1,'RFd'],
[12,0,'d'],
[12,1,'FL']
]
for i in c1:
if(checke(state,10,i[0],i[1])):#还可优化
cube.do(state,i[2])
solve+=i[2]
t=1
break
if(t==0):
return '输入错误,请认真检查~'
t=0
#c2 11*2=22种情况(经检验确认无误)
c2=[
[1,0,'u2f2'],
[1,1,'UrF'],
[2,0,'uf2'],
[2,1,'Lfl'],
[3,0,'Uf2'],
[3,1,'rF'],
[4,0,'f2'],
[4,1,'urF'],
[5,0,'dlD'],
[5,1,'d2Bd2'],
[6,0,'DRd'],
[6,1,'r2F'],
[7,0,'dLD'],
[7,1,'f'],
[8,0,'Drd'],
[8,1,'F'],
[9,0,'Br2F'],
[9,1,'b2UrF'],
[11,0,'r2Uf2'],
[11,1,'RF'],
[12,0,''],
[12,1,'f2urF'],
]
for i in c2:
if(checke(state,12,i[0],i[1])):
cube.do(state,i[2])
solve+=i[2]
t=1
break
if(t==0):
return '输入错误,请认真检查~'
t=0
#c3 10*2=20种情况(经检验确认无误)
c3=[
[1,0,'Ur2'],
[1,1,'bR'],
[2,0,'u2r2'],
[2,1,'UbR'],
[3,0,'r2'],
[3,1,'UFrf'],
[4,0,'ur2'],
[4,1,'Frf'],
[5,0,'b2R'],
[5,1,'DBd'],
[6,0,'R'],
[6,1,'Dbd'],
[7,0,'d2Ld2'],
[7,1,'dfD'],
[8,0,'r'],
[8,1,'dFD'],
[9,0,'BDbd'],
[9,1,'BR'],
[11,0,''],
[11,1,'rDbd'],
]
for i in c3:
if(checke(state,11,i[0],i[1])):
cube.do(state,i[2])
solve+=i[2]
t=1
break
if(t==0):
return '输入错误,请认真检查~'
t=0
#c4 9*2=18种情况(经检验确认无误)
c4=[
[1,0,'b2'],
[1,1,'URbr'],
[2,0,'Ub2'],
[2,1,'lBL'],
[3,0,'ub2'],
[3,1,'Rbr'],
[4,0,'u2b2'],
[4,1,'uRbr'],
[5,0,'Dld'],
[5,1,'B'],
[6,0,'dRD'],
[6,1,'b'],
[7,0,'DLd'],
[7,1,'d2fd2'],
[8,0,'drD'],
[8,1,'d2Fd2'],
[9,0,''],
[9,1,'BdRD'],
]
for i in c4:
if(checke(state,9,i[0],i[1])):
cube.do(state,i[2])
solve+=i[2]
t=1
break
if(t==0):
return '输入错误,请认真检查~'
t=0
#f1 8*2*8*3=384种情况,棱块7,角块7
f1=[
[1,0,1,0,'LUluLu2l2UL'],#棱块位置,棱块朝向,角块位置,角块朝向
[1,0,1,1,'lULuluL'],
[1,0,1,2,'u2lUL'],#未完待续(差得远..)
[1,0,2,0,'RUrURlu2L'],
[1,0,2,1,'BubluL'],
[1,0,2,2,'Ru2ru2luL'],
[1,0,3,0,'bu2Bu2lUL'],
[1,0,3,1,'fuFulUL'],
[1,0,3,2,'luL'],
[1,0,4,0,'buBu2lUL'],
[1,0,4,1,'FufUluL'],
[1,0,4,2,'ru2RlUL'],
[1,0,5,0,'ULulu2lUL'],
[1,0,5,1,'buBluL'],
[1,0,5,2,'Lu2l2UL'],
[1,0,6,0,'u2Bublu2L'],
[1,0,6,1,'UBUblUL'],
[1,0,6,2,'u2ru2RluL'],
[1,0,7,0,'FufulUL'],
[1,0,7,1,'uFUfu2lUL'],
[1,0,7,2,'lu2Lu2luL'],
[1,0,8,0,'uRurlUL'],
[1,0,8,1,'RurUluL'],
[1,0,8,2,'u2fUFlUL'],
[1,1,1,0,'LulubUBlu2L'],
[1,1,1,1,'bUBUFUf'],
[1,1,1,2,'lu2LuFUf'],
[1,1,2,0,'Bu2bUFUf'],
[1,1,2,1,'Ruru2FUf'],
[1,1,2,2,'Fu2f'],
[1,1,3,0,'BUbFu2f'],
[1,1,3,1,'fUf2Uf'],
[1,1,3,2,'Lu2lUFuf'],
[1,1,4,0,'Bu2bFu2f'],
[1,1,4,1,'FUfu2Fuf'],
[1,1,4,2,'UFUf'],
[1,1,5,0,'u2bUBFuf'],
[1,1,5,1,'ubFufB'],
[1,1,5,2,'u2Lu2lUFUf'],
[1,1,6,0,'urURFu2f'],
[1,1,6,1,'BUbUFUf'],
[1,1,6,2,'ruRFuf'],
[1,1,7,0,'UlULUFuf'],
[1,1,7,1,'lULFUf'],
[1,1,7,2,'u2lu2LFu2f'],
[1,1,8,0,'fUFu2Fuf'],
[1,1,8,1,'Ufu2f2uf'],
[1,1,8,2,'Ru2ruFUf'],
[2,0,1,0,'LulruRlUL'],
[2,0,1,1,'Bu2b2lu2LB'],
[2,0,1,2,'LulUluL'],
[2,0,2,0,'luLulUL'],
[2,0,2,1,'bu2Blu2L'],
[2,0,2,2,'Ruru2luL'],
[2,0,3,0,'lu2LUluL'],
[2,0,3,1,'ulUL'],
[2,0,3,2,'fUFluL'],
[2,0,4,0,'lu2LulUL'],
[2,0,4,1,'UluL'],
[2,0,4,2,'ruRlUL'],
[2,0,5,0,'u2Lulu2lUL'],
[2,0,5,1,'UbuBluL'],
[2,0,5,2,'ULu2l2UL'],
[2,0,6,0,'uBUbUluL'],
[2,0,6,1,'u2Blu2Lb'],
[2,0,6,2,'u2ruRuluL'],
[2,0,7,0,'UFufulUL'],
[2,0,7,1,'FUfu2lUL'],
[2,0,7,2,'luLUluL'],
[2,0,8,0,'RurlUL'],
[2,0,8,1,'fu2FuluL'],
[2,0,8,2,'uRUrulUL'],
[2,1,1,0,'Lu2lu2FUf'],
[2,1,1,1,'Fuf'],
[2,1,1,2,'BubuFUf'],
[2,1,2,0,'Lu2lFuf'],
[2,1,2,1,'u2FUf'],
[2,1,2,2,'RUruFuf'],
[2,1,3,0,'lULRUrFu2f'],
[2,1,3,1,'fu2f2Uf'],
[2,1,3,2,'lULu2FUf'],
[2,1,4,0,'LUlFuf'],
[2,1,4,1,'Fu2fu2Fuf'],
[2,1,4,2,'rURUFUf'],
[2,1,5,0,'ubUBFuf'],
[2,1,5,1,'bFufB'],
[2,1,5,2,'LUlu2FUf'],
[2,1,6,0,'rURFu2f'],
[2,1,6,1,'Bu2bFUf'],
[2,1,6,2,'UruRFuf'],
[2,1,7,0,'u2lULUFuf'],
[2,1,7,1,'uFUfuFUf'],
[2,1,7,2,'uluLu2Fuf'],
[2,1,8,0,'UfUFu2Fuf'],
[2,1,8,1,'u2fu2f2uf'],
[2,1,8,2,'URu2ruFUf'],
[3,0,1,0,'ru2Rlu2L'],
[3,0,1,1,'uluL'],
[3,0,1,2,'luLu2lUL'],
[3,0,2,0,'ru2RuluL'],
[3,0,2,1,'lu2L'],
[3,0,2,2,'bUBu2luL'],
[3,0,3,0,'ruRlu2L'],
[3,0,3,1,'fu2FulUL'],
[3,0,3,2,'Lul2uL'],
[3,0,4,0,'Ru2rfu2FulUL'],
[3,0,4,1,'Fu2fUluL'],
[3,0,4,2,'RuruluL'],
[3,0,5,0,'Lulu2lUL'],
[3,0,5,1,'ubuBluL'],
[3,0,5,2,'uLu2l2UL'],
[3,0,6,0,'UBublu2L'],
[3,0,6,1,'BUblUL'],
[3,0,6,2,'ruRuluL'],
[3,0,7,0,'uFufulUL'],
[3,0,7,1,'u2Fu2flu2L'],
[3,0,7,2,'ulu2Lu2luL'],
[3,0,8,0,'u2RurlUL'],
[3,0,8,1,'UfuFu2luL'],
[3,0,8,2,'URUrulUL'],
[3,1,1,0,'RUru2Fuf'],
[3,1,1,1,'Bu2bFuf'],
[3,1,1,2,'RurFUf'],
[3,1,2,0,'rURLUlUFuf'],
[3,1,2,1,'u2fu2f2Uf'],
[3,1,2,2,'rURFUf'],#经过修改
[3,1,3,0,'Ru2ru2Fuf'],
[3,1,3,1,'FUf'],
[3,1,3,2,'LUlUFuf'],
[3,1,4,0,'Ru2rFUf'],
[3,1,4,1,'u2Fuf'],
[3,1,4,2,'RurURu2ruFuf'],
[3,1,5,0,'UbUBFuf'],
[3,1,5,1,'u2bFufB'],
[3,1,5,2,'u2LUlu2FUf'],
[3,1,6,0,'u2rURFu2f'],
[3,1,6,1,'uBUbUFUf'],
[3,1,6,2,'uruRFuf'],
[3,1,7,0,'lULUFuf'],
[3,1,7,1,'UFUfuFUf'],
[3,1,7,2,'UluLu2Fuf'],
[3,1,8,0,'ufUFu2Fuf'],
[3,1,8,1,'fu2f2uf'],
[3,1,8,2,'RUrFUf'],
[4,0,1,0,'fuFlUL'],
[4,0,1,1,'BubuluL'],
[4,0,1,2,'lu2Lu2lUL'],
[4,0,2,0,'fu2FlUL'],
[4,0,2,1,'buBlu2L'],
[4,0,2,2,'u2luL'],
[4,0,3,0,'FufUruRlUL'],
[4,0,3,1,'Fufu2luL'],
[4,0,3,2,'Lu2l2uL'],
[4,0,4,0,'fu2Fu2luL'],
[4,0,4,1,'rURUluL'],
[4,0,4,2,'lUL'],
[4,0,5,0,'uLulu2lUL'],
[4,0,5,1,'u2buBluL'],
[4,0,5,2,'u2Lu2l2UL'],
[4,0,6,0,'Bublu2L'],
[4,0,6,1,'uBUblUL'],
[4,0,6,2,'ru2RluL'],
[4,0,7,0,'u2FufulUL'],
[4,0,7,1,'UFu2flu2L'],
[4,0,7,2,'UluLUluL'],
[4,0,8,0,'URurlUL'],
[4,0,8,1,'fuFu2luL'],
[4,0,8,2,'RUrulUL'],
[4,1,1,0,'Fu2fUFuf'],
[4,1,1,1,'BUbFuf'],
[4,1,1,2,'uFUf'],
[4,1,2,0,'FUfUFuf'],#changed
[4,1,2,1,'u2fUf2Uf'],
[4,1,2,2,'Ru2ruFuf'],
[4,1,3,0,'Fu2fuFUf'],
[4,1,3,1,'LulFUf'],
[4,1,3,2,'UFuf'],
[4,1,4,0,'fUFBUbFuf'],
[4,1,4,1,'fUFuFUf'],
[4,1,4,2,'ru2RUFUf'],
[4,1,5,0,'bUBFuf'],
[4,1,5,1,'UbuBUFuf'],
[4,1,5,2,'ULUlu2FUf'],
[4,1,6,0,'UrURFu2f'],
[4,1,6,1,'u2BUbUFUf'],
[4,1,6,2,'u2ruRFuf'],
[4,1,7,0,'ulULUFuf'],
[4,1,7,1,'FUfuFUf'],
[4,1,7,2,'luLu2Fuf'],
[4,1,8,0,'u2fUFu2Fuf'],
[4,1,8,1,'ufu2f2uf'],
[4,1,8,2,'uRUrFUf'],
[5,0,1,0,'Lul2UL'],
[5,0,1,1,'uLUl2uL'],
[5,0,1,2,'Ubub2Fufb'],
[5,0,2,0,'uLul2UL'],
[5,0,2,1,'bub2Fufb'],
[5,0,2,2,'u2LUl2uL'],
[5,0,3,0,'ULul2UL'],
[5,0,3,1,'u2bub2Fufb'],
[5,0,3,2,'LUl2uL'],
[5,0,4,0,'u2Lul2UL'],
[5,0,4,1,'ULUl2uL'],
[5,0,4,2,'ubub2Fufb'],
[5,0,5,0,'Lul2u2Lu2lUL'],
[5,0,5,1,'LuluLul2uL'],
[5,0,5,2,'LulfuFlUL'],
[5,0,6,0,'LulBublu2L'],
[5,0,6,1,'Lu2lBUblUL'],
[5,0,6,2,'buBUruRFuf'],
[5,0,7,0,'l2u2l2u2l2'],
[5,0,7,1,'LulUFu2flu2L'],
[5,0,7,2,'bu2BluLu2Fuf'],
[5,0,8,0,'LulURurlUL'],
[5,0,8,1,'bUBfu2f2uf'],
[5,0,8,2,'LulRUrulUL'],
[5,1,1,0,'ubuBlu2LulUL'],
[5,1,1,1,'uLulUFuf'],
[5,1,1,2,'UbUBlu2L'],
[5,1,2,0,'u2buBlu2LulUL'],
[5,1,2,1,'bUBlu2L'],
[5,1,2,2,'u2LulUFuf'],
[5,1,3,0,'buBlu2LulUL'],
[5,1,3,1,'u2bUBlu2L'],
[5,1,3,2,'LulUFuf'],
[5,1,4,0,'UbuBlu2LulUL'],
[5,1,4,1,'ULulUFuf'],
[5,1,4,2,'ubUBlu2L'],
[5,1,5,0,'bu2BlUL'],
[5,1,5,1,'LulBUbFuf'],
[5,1,5,2,'LulFu2fUFuf'],
[5,1,6,0,'LulUrURFu2f'],
[5,1,6,1,'bUb2lu2Lb'],
[5,1,6,2,'LUlruRFuf'],
[5,1,7,0,'Lu2l2ULUFuf'],
[5,1,7,1,'buBFUfu2lUL'],
[5,1,7,2,'Lul2u2LFu2f'],
[5,1,8,0,'LUlfUFu2Fuf'],
[5,1,8,1,'Lu2lfu2f2uf'],
[5,1,8,2,'bu2BRUrulUL'],
[6,0,1,0,'ruRu2fu2FlUL'],
[6,0,1,1,'BubFuf'],
[6,0,1,2,'u2rURlUL'],
[6,0,2,0,'uruRu2fu2FlUL'],
[6,0,2,1,'UrURlUL'],
[6,0,2,2,'uBubFuf'],
[6,0,3,0,'UruRu2fu2FlUL'],
[6,0,3,1,'urURlUL'],
[6,0,3,2,'UBubFuf'],
[6,0,4,0,'BUbRu2ru2Fuf'],
[6,0,4,1,'u2BubFuf'],
[6,0,4,2,'rURlUL'],
[6,0,5,0,'ruRULulu2lUL'],
[6,0,5,1,'Bub2FufB'],
[6,0,5,2,'ruRLu2l2UL'],
[6,0,6,0,'rUlULR'],
[6,0,6,1,'rURUruRlUL'],
[6,0,6,2,'rURfu2FlUL'],
[6,0,7,0,'ruRFufulUL'],
[6,0,7,1,'ru2RFUfu2lUL'],
[6,0,7,2,'Bu2bluLu2Fuf'],
[6,0,8,0,'ru2r2urlUL'],
[6,0,8,1,'BUbfu2f2uf'],
[6,0,8,2,'rUr2lULr'],
[6,1,1,0,'Uru2RFuf'],
[6,1,1,1,'BUbuluL'],
[6,1,1,2,'u2ruRUFUf'],
[6,1,2,0,'ru2RFuf'],
[6,1,2,1,'UruRUFUf'],
[6,1,2,2,'uBUbuluL'],
[6,1,3,0,'u2ru2RFuf'],
[6,1,3,1,'uruRUFUf'],
[6,1,3,2,'UBUbuluL'],
[6,1,4,0,'uru2RFuf'],
[6,1,4,1,'u2BUbuluL'],
[6,1,4,2,'ruRUFUf'],
[6,1,5,0,'rURbUBFuf'],
[6,1,5,1,'ru2RbFufB'],
[6,1,5,2,'BUbuLu2l2UL'],
[6,1,6,0,'rUr2u2rFu2f'],
[6,1,6,1,'BubluLulUL'],
[6,1,6,2,'rURFUfUFuf'],
[6,1,7,0,'rlULUFufR'],
[6,1,7,1,'BubFUfu2lUL'],
[6,1,7,2,'rURluLu2Fuf'],
[6,1,8,0,'ruRfUFu2Fuf'],
[6,1,8,1,'rURufu2f2uf'],
[6,1,8,2,'Bu2bRUrulUL'],
[7,0,1,0,'luLfuFlUL'],
[7,0,1,1,'u2Fufu2Fuf'],
[7,0,1,2,'lULu2lUL'],
[7,0,2,0,'FufRUru2Fuf'],
[7,0,2,1,'ulULu2lUL'],
[7,0,2,2,'UFufu2Fuf'],
[7,0,3,0,'UluLfuFlUL'],
[7,0,3,1,'UlULu2lUL'],
[7,0,3,2,'uFufu2Fuf'],
[7,0,4,0,'FUfLUlFuf'],
[7,0,4,1,'Fufu2Fuf'],
[7,0,4,2,'u2lULu2lUL'],
[7,0,5,0,'lu2l2ulu2lUL'],
[7,0,5,1,'FUfbFufB'],
[7,0,5,2,'lUl2u2l2UL'],
[7,0,6,0,'luLBublu2L'],
[7,0,6,1,'lu2LBlu2Lb'],
[7,0,6,2,'Fu2frFu2fR'],
[7,0,7,0,''],
[7,0,7,1,'FufRu2ru2Fuf'],
[7,0,7,2,'FufLUlUFuf'],
[7,0,8,0,'RFufu2rFuf'],
[7,0,8,1,'Fuf2u2f2uf'],
[7,0,8,2,'luRULr'],
[7,1,1,0,'uFuflu2L'],
[7,1,1,1,'u2FUfUluL'],
[7,1,1,2,'luLuFUf'],
[7,1,2,0,'u2Fuflu2L'],
[7,1,2,1,'uluLuFUf'],
[7,1,2,2,'UFUfUluL'],
[7,1,3,0,'Fuflu2L'],
[7,1,3,1,'UluLuFUf'],
[7,1,3,2,'uFUfUluL'],
[7,1,4,0,'UFuflu2L'],
[7,1,4,1,'FUfUluL'],
[7,1,4,2,'u2luLuFUf'],
[7,1,5,0,'luLbUBFuf'],
[7,1,5,1,'lULubFufB'],
[7,1,5,2,'Fu2fLu2l2UL'],
[7,1,6,0,'lULurURFu2f'],
[7,1,6,1,'FufBlu2Lb'],
[7,1,6,2,'lULruRFuf'],
[7,1,7,0,'Fuf2u2FulUL'],
[7,1,7,1,'lULfUf2Uf'],
[7,1,7,2,'lULuLUlFuf'],
[7,1,8,0,'lULfUFu2Fuf'],
[7,1,8,1,'lu2Lfu2f2uf'],
[7,1,8,2,'FufURlULr'],
[8,0,1,0,'u2Rlu2Lr'],
[8,0,1,1,'URUru2luL'],
[8,0,1,2,'ufuf2Uf'],
[8,0,2,0,'URlu2Lr'],
[8,0,2,1,'u2fuf2Uf'],
[8,0,2,2,'RUru2luL'],
[8,0,3,0,'uRlu2Lr'],
[8,0,3,1,'fuf2Uf'],
[8,0,3,2,'u2RUru2luL'],
[8,0,4,0,'Rlu2Lr'],
[8,0,4,1,'uRUru2luL'],
[8,0,4,2,'Ufuf2Uf'],
[8,0,5,0,'fUFubUBFuf'],
[8,0,5,1,'fUf2bufB'],
[8,0,5,2,'RurLu2l2UL'],
[8,0,6,0,'fUFrURFu2f'],
[8,0,6,1,'RUruBlu2Lb'],
[8,0,6,2,'fu2FrFu2fR'],
[8,0,7,0,'f2u2f2u2f2'],
[8,0,7,1,'Ru2rFu2flu2L'],
[8,0,7,2,'fUFulu2LFu2f'],
[8,0,8,0,'Rur2u2RlUL'],
[8,0,8,1,'fUFLUlFuf'],
[8,0,8,2,'Ruru2fuFlUL'],
[8,1,1,0,'RuruLUlFuf'],
[8,1,1,1,'URurFu2f'],
[8,1,1,2,'ufUFulUL'],
[8,1,2,0,'Ru2rLu2lFuf'],
[8,1,2,1,'u2fUFulUL'],
[8,1,2,2,'RurFu2f'],
[8,1,3,0,'fuFruRlu2L'],
[8,1,3,1,'fUFulUL'],
[8,1,3,2,'u2RurFu2f'],
[8,1,4,0,'RUruRUrFUf'],
[8,1,4,1,'uRurFu2f'],
[8,1,4,2,'UfUFulUL'],
[8,1,5,0,'RUrbUBFuf'],
[8,1,5,1,'Ru2rbFufB'],
[8,1,5,2,'fu2FLu2l2UL'],
[8,1,6,0,'Ru2r2URFu2f'],
[8,1,6,1,'fuFBUblUL'],
[8,1,6,2,'Rur2Fu2fR'],
[8,1,7,0,'fu2f2ufulUL'],
[8,1,7,1,'fUf2u2flu2L'],
[8,1,7,2,'RUrlu2LFu2f'],
[8,1,8,0,'Ru2rFuf'],
[8,1,8,1,'RurFUfu2Fuf'],
[8,1,8,2,'fUFruRlUL'],
]
for i in f1:
if(checke(state,7,i[0],i[1]) and checkc(state,7,i[2],i[3])):
cube.do(state,i[4])
solve+='<BR />'
solve+=i[4]
t=1
break
if(t==0):
return '输入错误,请认真检查hhh~'
t=0
#f2 7*2*7*3=294种情况,棱块5,角块5
f2=[
[1,0,1,0,'Lu2lUrUr2Lulr'],
[1,0,1,1,'bUBu2LUl'],
[1,0,1,2,'ufu2FULUl'],
[1,0,2,0,'URu2rULUl'],
[1,0,2,1,'RuruLUl'],
[1,0,2,2,'Lul'],
[1,0,3,0,'BUbLul'],
[1,0,3,1,'fUFULUl'],
[1,0,3,2,'Lu2lu2Lul'],
[1,0,4,0,'Bu2bLul'],
[1,0,4,1,'uRUrLul'],
[1,0,4,2,'u2LUl'],
[1,0,5,0,'u2bUBULul'],
[1,0,5,1,'ubu2BLu2l'],
[1,0,5,2,'uLUluLUl'],
[1,0,6,0,'urURLul'],
[1,0,6,1,'BUbu2LUl'],
[1,0,6,2,'rLulR'],
[1,0,8,0,'fUFLu2l'],
[1,0,8,1,'UfLu2lF'],
[1,0,8,2,'URUrULUl'],
[1,1,1,0,'Uru2RbuB'],
[1,1,1,1,'ufUFUbuB'],
[1,1,1,2,'ubUB'],
[1,1,2,0,'URu2rfu2FbUB'],
[1,1,2,1,'URurbuB'],
[1,1,2,2,'Ru2r2bu2BR'],
[1,1,3,0,'Uru2Ru2bUB'],
[1,1,3,1,'fuFbUB'],
[1,1,3,2,'UbuB'],
[1,1,4,0,'UruRu2bUB'],
[1,1,4,1,'uRur2bu2BR'],
[1,1,4,2,'ru2Rbu2B'],
[1,1,5,0,'ULulubUB'],
[1,1,5,1,'buBUbuB'],
[1,1,5,2,'Lu2lbu2B'],
[1,1,6,0,'u2Bubu2bUB'],
[1,1,6,1,'UBu2b2UB'],
[1,1,6,2,'UruRbuB'],
[1,1,8,0,'uRurbu2B'],
[1,1,8,1,'u2fuFubuB'],
[1,1,8,2,'u2Rbu2Br'],
[2,0,1,0,'Lu2luLUl'],
[2,0,1,1,'ULul'],
[2,0,1,2,'BubLUl'],
[2,0,2,0,'Lu2lULul'],
[2,0,2,1,'uLUl'],
[2,0,2,2,'RUrLul'],
[2,0,3,0,'fUFBUbLul'],
[2,0,3,1,'fu2FULUl'],
[2,0,3,2,'ufUFLUl'],
[2,0,4,0,'LUlULul'],
[2,0,4,1,'uRu2rLul'],
[2,0,4,2,'rUr2Lulr'],
[2,0,5,0,'ubUBULul'],
[2,0,5,1,'bu2BLu2l'],
[2,0,5,2,'LUluLUl'],
[2,0,6,0,'rURLul'],
[2,0,6,1,'UBUbu2LUl'],
[2,0,6,2,'UrLulR'],
[2,0,8,0,'UfUFLu2l'],
[2,0,8,1,'u2fLu2lF'],
[2,0,8,2,'u2RUrULUl'],
[2,1,1,0,'LulruRbu2B'],
[2,1,1,1,'Bu2b2uB'],
[2,1,1,2,'Lulu2buB'],
[2,1,2,0,'ufuFbu2B'],
[2,1,2,1,'u2fu2FbUB'],
[2,1,2,2,'RurubuB'],
[2,1,3,0,'u2ru2RbuB'],
[2,1,3,1,'bUB'],
[2,1,3,2,'fUFUbuB'],
[2,1,4,0,'ufu2Fbu2B'],
[2,1,4,1,'u2buB'],
[2,1,4,2,'ruRbu2B'],
[2,1,5,0,'u2LulubUB'],
[2,1,5,1,'UbuBUbuB'],
[2,1,5,2,'ULu2lbu2B'],
[2,1,6,0,'uBubu2bUB'],
[2,1,6,1,'u2Bu2b2UB'],
[2,1,6,2,'u2ruRbuB'],
[2,1,8,0,'Rurbu2B'],
[2,1,8,1,'ufuFubuB'],
[2,1,8,2,'uRbu2Br'],
[3,0,1,0,'RUrLu2l'],
[3,0,1,1,'URu2rLul'],
[3,0,1,2,'ufUFULUl'],
[3,0,2,0,'rURLUlu2Lul'],
[3,0,2,1,'Uru2r2Lul'],
[3,0,2,2,'rURULUl'],
[3,0,3,0,'Ru2rLu2l'],
[3,0,3,1,'ULUl'],
[3,0,3,2,'LUlu2Lul'],
[3,0,4,0,'Ru2rULUl'],
[3,0,4,1,'Lu2l'],
[3,0,4,2,'uRuruLUl'],
[3,0,5,0,'UbUBULul'],
[3,0,5,1,'u2bu2BLu2l'],
[3,0,5,2,'u2LUluLUl'],
[3,0,6,0,'u2rURLul'],
[3,0,6,1,'uBUbu2LUl'],
[3,0,6,2,'urLulR'],
[3,0,8,0,'ufUFLu2l'],
[3,0,8,1,'fLu2lF'],
[3,0,8,2,'RUrULUl'],
[3,1,1,0,'ru2Ru2bUB'],
[3,1,1,1,'buB'],
[3,1,1,2,'ufuFbUB'],
[3,1,2,0,'ru2RbuB'],
[3,1,2,1,'u2bUB'],
[3,1,2,2,'UrURu2buB'],
[3,1,3,0,'ruRu2bUB'],
[3,1,3,1,'fu2FbUB'],
[3,1,3,2,'LulUbuB'],
[3,1,4,0,'Ru2rfu2FbUB'],
[3,1,4,1,'uRu2r2bu2BR'],
[3,1,4,2,'RurbuB'],
[3,1,5,0,'LulubUB'],
[3,1,5,1,'ubuBUbuB'],
[3,1,5,2,'uLu2lbu2B'],
[3,1,6,0,'UBubu2bUB'],
[3,1,6,1,'Bu2b2UB'],
[3,1,6,2,'ruRbuB'],
[3,1,8,0,'u2Rurbu2B'],
[3,1,8,1,'UfuFubuB'],
[3,1,8,2,'URbu2Br'],
[4,0,1,0,'uRu2rLu2l'],
[4,0,1,1,'URUrLul'],
[4,0,1,2,'LUl'],
[4,0,2,0,'ULUlULul'],
[4,0,2,1,'UrURu2LUl'],
[4,0,2,2,'Ru2rLul'],
[4,0,3,0,'uRu2rULUl'],
[4,0,3,1,'LulULUl'],
[4,0,3,2,'u2Lul'],
[4,0,4,0,'fu2FRu2rLul'],
[4,0,4,1,'fUFLUl'],
[4,0,4,2,'ru2Ru2LUl'],
[4,0,5,0,'bUBULul'],
[4,0,5,1,'Ubu2BLu2l'],
[4,0,5,2,'ULUluLUl'],
[4,0,6,0,'UrURLul'],
[4,0,6,1,'u2BUbu2LUl'],
[4,0,6,2,'u2rLulR'],
[4,0,8,0,'u2fUFLu2l'],
[4,0,8,1,'ufLu2lF'],
[4,0,8,2,'uRUrULUl'],
[4,1,1,0,'fuFbu2B'],
[4,1,1,1,'Bub2uB'],
[4,1,1,2,'ufu2FbUB'],
[4,1,2,0,'fu2Fbu2B'],
[4,1,2,1,'UruRbu2B'],
[4,1,2,2,'ubuB'],
[4,1,3,0,'uRu2rfu2FbUB'],
[4,1,3,1,'uRurbuB'],
[4,1,3,2,'Lu2lUbuB'],
[4,1,4,0,'uru2RbuB'],
[4,1,4,1,'rURu2buB'],
[4,1,4,2,'bu2B'],
[4,1,5,0,'uLulubUB'],
[4,1,5,1,'u2buBUbuB'],
[4,1,5,2,'u2Lu2lbu2B'],
[4,1,6,0,'Bubu2bUB'],
[4,1,6,1,'uBu2b2UB'],
[4,1,6,2,'uruRbuB'],
[4,1,8,0,'URurbu2B'],
[4,1,8,1,'fuFubuB'],
[4,1,8,2,'Rbu2Br'],
[5,0,1,0,'ULUlBu2bLul'],
[5,0,1,1,'uLulu2Lul'],
[5,0,1,2,'UbUBu2bUB'],
[5,0,2,0,'LUlBu2bLul'],
[5,0,2,1,'bUBu2bUB'],
[5,0,2,2,'u2Lulu2Lul'],
[5,0,3,0,'LUlBUbLul'],
[5,0,3,1,'u2bUBu2bUB'],
[5,0,3,2,'Lulu2Lul'],
[5,0,4,0,'LulULUlULul'],
[5,0,4,1,'ULulu2Lul'],
[5,0,4,2,'ubUBu2bUB'],
[5,0,5,0,''],
[5,0,5,1,'LulURUrLul'],
[5,0,5,2,'Lulu2Bu2bLul'],
[5,0,6,0,'LulUrURLul'],
[5,0,6,1,'bUb2u2b2UB'],
[5,0,6,2,'LUruRl'],
[5,0,8,0,'LUlfUFLu2l'],
[5,0,8,1,'Lu2lfLu2lF'],
[5,0,8,2,'Lu2lRUrULUl'],
[5,1,1,0,'Lulbu2B'],
[5,1,1,1,'uLUlUbuB'],
[5,1,1,2,'UbuBuLUl'],
[5,1,2,0,'uLulbu2B'],
[5,1,2,1,'buBuLUl'],
[5,1,2,2,'u2LUlUbuB'],
[5,1,3,0,'ULulbu2B'],
[5,1,3,1,'u2buBuLUl'],
[5,1,3,2,'LUlUbuB'],
[5,1,4,0,'u2Lulbu2B'],
[5,1,4,1,'LulubuB'],
[5,1,4,2,'LruRlbu2B'],
[5,1,5,0,'Lulufu2FbUB'],
[5,1,5,1,'LulBub2uB'],
[5,1,5,2,'LulfuFbu2B'],
[5,1,6,0,'Bub2uBuLUl'],
[5,1,6,1,'Lu2lBu2b2UB'],
[5,1,6,2,'ru2LUlRUbuB'],
[5,1,8,0,'RUrLUlUbuB'],
[5,1,8,1,'bUBfLu2lF'],
[5,1,8,2,'LulRbu2Br'],
[6,0,1,0,'UrLu2lR'],
[6,0,1,1,'BUb2uB'],
[6,0,1,2,'u2ruRu2LUl'],
[6,0,2,0,'rLu2lR'],
[6,0,2,1,'UruRu2LUl'],
[6,0,2,2,'uBUb2uB'],
[6,0,3,0,'u2rLu2lR'],
[6,0,3,1,'uruRu2LUl'],
[6,0,3,2,'UBUb2uB'],
[6,0,4,0,'urLu2lR'],
[6,0,4,1,'u2BUb2uB'],
[6,0,4,2,'ruRu2LUl'],
[6,0,5,0,'rURbUBULul'],
[6,0,5,1,'ru2Rbu2BLu2l'],
[6,0,5,2,'rURLu2lu2LUl'],
[6,0,6,0,'rUr2u2rLul'],
[6,0,6,1,'rURUrURu2LUl'],
[6,0,6,2,'rURULUlULul'],
[6,0,8,0,'ruRfUFLu2l'],
[6,0,8,1,'rURufLu2lF'],
[6,0,8,2,'rur2u2rLUl'],
[6,1,1,0,'ruRbu2BubUB'],
[6,1,1,1,'BubULul'],
[6,1,1,2,'u2rURbu2B'],
[6,1,2,0,'UruRbuBubUB'],
[6,1,2,1,'UrURbu2B'],
[6,1,2,2,'uBubULul'],
[6,1,3,0,'uruRbuBubUB'],
[6,1,3,1,'urURbu2B'],
[6,1,3,2,'LBu2blu2Lul'],
[6,1,4,0,'ruRbuBubUB'],
[6,1,4,1,'ru2Ru2buB'],
[6,1,4,2,'rURbu2B'],
[6,1,5,0,'rURuLulubUB'],
[6,1,5,1,'Bub2u2BLu2l'],
[6,1,5,2,'ruRLu2lbu2B'],
[6,1,6,0,'ru2RbUB'],
[6,1,6,1,'BubLu2lULul'],
[6,1,6,2,'rURfu2Fbu2B'],
[6,1,8,0,'ru2r2urbu2B'],
[6,1,8,1,'BUbfLu2lF'],
[6,1,8,2,'rUr2bu2Br'],
[8,0,1,0,'fUFufuFbu2B'],
[8,0,1,1,'URurLul'],
[8,0,1,2,'ufUFbUB'],
[8,0,2,0,'fuFru2Ru2bUB'],
[8,0,2,1,'u2fUFbUB'],
[8,0,2,2,'RurLul'],
[8,0,3,0,'fuFruRu2bUB'],
[8,0,3,1,'fUFbUB'],
[8,0,3,2,'u2RurLul'],
[8,0,4,0,'fLu2lUFbuB'],
[8,0,4,1,'uRurLul'],
[8,0,4,2,'UfUFbUB'],
[8,0,5,0,'RUrbUBULul'],
[8,0,5,1,'Ru2rbu2BLu2l'],
[8,0,5,2,'fu2FLu2lbu2B'],
[8,0,6,0,'Ru2r2URLul'],
[8,0,6,1,'fuFBu2b2UB'],
[8,0,6,2,'Rur2Lul'],
[8,0,8,0,'RuLulr'],
[8,0,8,1,'RuruRUrLul'],
[8,0,8,2,'fUFruRbu2B'],
[8,1,1,0,'u2bRuBr'],
[8,1,1,1,'URUrubuB'],
[8,1,1,2,'ufuFULUl'],
[8,1,2,0,'UbRuBr'],
[8,1,2,1,'u2fuFULUl'],
[8,1,2,2,'RUrubuB'],
[8,1,3,0,'ubRuBr'],
[8,1,3,1,'fuFULUl'],
[8,1,3,2,'u2RUrubuB'],
[8,1,4,0,'bRuBr'],
[8,1,4,1,'uRUrubuB'],
[8,1,4,2,'UfuFULUl'],
[8,1,5,0,'RUruLulubUB'],
[8,1,5,1,'fUFbu2BLu2l'],
[8,1,5,2,'RurLu2lbu2B'],
[8,1,6,0,'fUFrURLul'],
[8,1,6,1,'RUruBu2b2UB'],
[8,1,6,2,'fu2FrLulR'],
[8,1,8,0,'Rur2u2Rbu2B'],
[8,1,8,1,'fUFLUlULul'],
[8,1,8,2,'fUFrUr2Lulr']
]
for i in f2:
if(checke(state,5,i[0],i[1]) and checkc(state,5,i[2],i[3])):
cube.do(state,i[4])
solve+='<BR />'
solve+=i[4]
t=1
break
if(t==0):
return '输入错误,请认真检查hhh~'
t=0
#f3 6*2*6*3=216种情况
f3=[
[1,0,1,0,'Uru2RUruR'],
[1,0,1,1,'ufUFu2ruR'],
[1,0,1,2,'rUR'],
[1,0,2,0,'BubfuFru2R'],
[1,0,2,1,'ru2RfuFu2rUR'],
[1,0,2,2,'Ru2r2uR'],
[1,0,3,0,'Uru2RurUR'],
[1,0,3,1,'fuFru2R'],
[1,0,3,2,'u2ruR'],
[1,0,4,0,'UruRurUR'],
[1,0,4,1,'uRur2uR'],
[1,0,4,2,'ru2Ru2rUR'],
[1,0,6,0,'u2BuburUR'],
[1,0,6,1,'UBu2bru2R'],
[1,0,6,2,'UruRUruR'],
[1,0,8,0,'uRuru2rUR'],
[1,0,8,1,'RururuR'],
[1,0,8,2,'u2Ru2r2UR'],
[1,1,1,0,'UrURu2RUrUBub'],
[1,1,1,1,'UrURu2BUb'],
[1,1,1,2,'ufu2Fu2BUb'],
[1,1,2,0,'Bu2buBUb'],
[1,1,2,1,'RurBUb'],
[1,1,2,2,'UBub'],
[1,1,3,0,'URUrBub'],
[1,1,3,1,'fUFu2BUb'],
[1,1,3,2,'u2Ru2rUBub'],
[1,1,4,0,'URu2rBub'],
[1,1,4,1,'uRUrUBub'],
[1,1,4,2,'uBUb'],
[1,1,6,0,'urURUBub'],
[1,1,6,1,'BUbuBUb'],
[1,1,6,2,'ru2RBu2b'],
[1,1,8,0,'fUFBub'],
[1,1,8,1,'UfBubF'],
[1,1,8,2,'Ru2rUBUb'],
[2,0,1,0,'UBubfuFru2R'],
[2,0,1,1,'URu2r2uR'],
[2,0,1,2,'u2RurUruR'],
[2,0,2,0,'u2ruRurUR'],
[2,0,2,1,'Uru2Ru2rUR'],
[2,0,2,2,'Rur2uR'],
[2,0,3,0,'ufu2FruR'],
[2,0,3,1,'ru2R'],
[2,0,3,2,'fUFu2ruR'],
[2,0,4,0,'ufu2Fu2rUR'],
[2,0,4,1,'uruR'],
[2,0,4,2,'ruRu2rUR'],
[2,0,6,0,'uBuburUR'],
[2,0,6,1,'u2Bu2bru2R'],
[2,0,6,2,'uru2Ru2ruR'],
[2,0,8,0,'Ruru2rUR'],
[2,0,8,1,'fu2FUruR'],
[2,0,8,2,'uRUr2u2R'],
[2,1,1,0,'UBu2buBUb'],
[2,1,1,1,'u2Bub'],
[2,1,1,2,'URurBUb'],
[2,1,2,0,'u2Ru2rBub'],
[2,1,2,1,'BUb'],
[2,1,2,2,'RUrUBub'],
[2,1,3,0,'ufuFufBubF'],
[2,1,3,1,'fu2Fu2BUb'],
[2,1,3,2,'ufUFUBUb'],
[2,1,4,0,'u2RUrBub'],
[2,1,4,1,'uRu2rUBub'],
[2,1,4,2,'rURuBUb'],
[2,1,6,0,'rURUBub'],
[2,1,6,1,'Bu2bu2BUb'],
[2,1,6,2,'Uru2RBu2b'],
[2,1,8,0,'UfUFBub'],
[2,1,8,1,'u2fBubF'],
[2,1,8,2,'u2RUru2BUb'],
[3,0,1,0,'ru2RurUR'],
[3,0,1,1,'UruR'],
[3,0,1,2,'ufuFru2R'],
[3,0,2,0,'ru2RUruR'],
[3,0,2,1,'urUR'],
[3,0,2,2,'UrURuruR'],
[3,0,3,0,'ruRurUR'],
[3,0,3,1,'fu2Fru2R'],
[3,0,3,2,'u2Rur2uR'],
[3,0,4,0,'RUrURu2r2UR'],
[3,0,4,1,'uRu2r2uR'],
[3,0,4,2,'RurUruR'],
[3,0,6,0,'UBuburUR'],
[3,0,6,1,'Bu2bru2R'],
[3,0,6,2,'ruRUruR'],
[3,0,8,0,'u2Ruru2rUR'],
[3,0,8,1,'uRururuR'],
[3,0,8,2,'URu2r2UR'],
[3,1,1,0,'RUrBub'],
[3,1,1,1,'URu2rUBub'],
[3,1,1,2,'u2rURuBUb'],
[3,1,2,0,'ruRuru2RBu2b'],
[3,1,2,1,'Uru2RuBUb'],
[3,1,2,2,'rURu2BUb'],
[3,1,3,0,'Ru2rBub'],
[3,1,3,1,'u2BUb'],
[3,1,3,2,'u2RUrUBub'],
[3,1,4,0,'Ru2ru2BUb'],
[3,1,4,1,'Bub'],
[3,1,4,2,'uRurBUb'],
[3,1,6,0,'u2rURUBub'],
[3,1,6,1,'u2Bu2bu2BUb'],
[3,1,6,2,'uru2RBu2b'],
[3,1,8,0,'ufUFBub'],
[3,1,8,1,'fBubF'],
[3,1,8,2,'RUru2BUb'],
[4,0,1,0,'fuFu2rUR'],
[4,0,1,1,'URur2uR'],
[4,0,1,2,'ufu2Fru2R'],
[4,0,2,0,'fu2Fu2rUR'],
[4,0,2,1,'UruRu2rUR'],
[4,0,2,2,'ruR'],
[4,0,3,0,'uRUrURu2r2UR'],
[4,0,3,1,'uRurUruR'],
[4,0,3,2,'u2Ru2r2uR'],
[4,0,4,0,'fu2FruR'],
[4,0,4,1,'rURuruR'],
[4,0,4,2,'u2rUR'],
[4,0,6,0,'BuburUR'],
[4,0,6,1,'uBu2bru2R'],
[4,0,6,2,'ru2Ru2ruR'],
[4,0,8,0,'URuru2rUR'],
[4,0,8,1,'fuFruR'],
[4,0,8,2,'Ru2r2UR'],
[4,1,1,0,'uRu2rBub'],
[4,1,1,1,'URUrUBub'],
[4,1,1,2,'UBUb'],
[4,1,2,0,'uRUrBub'],
[4,1,2,1,'UrURuBUb'],
[4,1,2,2,'Ru2rUBub'],
[4,1,3,0,'uRu2ru2BUb'],
[4,1,3,1,'u2RurBUb'],
[4,1,3,2,'Bu2b'],
[4,1,4,0,'fUFBUbu2Bub'],
[4,1,4,1,'fUFUBUb'],
[4,1,4,2,'ru2RuBUb'],
[4,1,6,0,'UrURUBub'],
[4,1,6,1,'UBu2bu2BUb'],
[4,1,6,2,'u2ru2RBu2b'],
[4,1,8,0,'u2fUFBub'],
[4,1,8,1,'ufBubF'],
[4,1,8,2,'u2Ru2rUBUb'],
[6,0,1,0,'ruRUru2RurUR'],
[6,0,1,1,'Bubu2Bub'],
[6,0,1,2,'u2rURu2rUR'],
[6,0,2,0,'UBUbRu2rBub'],
[6,0,2,1,'UrURu2rUR'],
[6,0,2,2,'uBubu2Bub'],
[6,0,3,0,'rURfuFu2rUR'],
[6,0,3,1,'urURu2rUR'],
[6,0,3,2,'UBubu2Bub'],
[6,0,4,0,'BUbRu2rBub'],
[6,0,4,1,'u2Bubu2Bub'],
[6,0,4,2,'rURu2rUR'],
[6,0,6,0,''],
[6,0,6,1,'rURUruRu2rUR'],
[6,0,6,2,'rURfu2Fu2rUR'],
[6,0,8,0,'ru2r2uru2rUR'],
[6,0,8,1,'BUfubF'],
[6,0,8,2,'rUr2u2r2UR'],
[6,1,1,0,'UrURBu2b'],
[6,1,1,1,'BUbUruR'],
[6,1,1,2,'u2ruRuBUb'],
[6,1,2,0,'rURBu2b'],
[6,1,2,1,'UruRuBUb'],
[6,1,2,2,'uBUbUruR'],
[6,1,3,0,'u2rURBu2b'],
[6,1,3,1,'uruRuBUb'],
[6,1,3,2,'UBUbUruR'],
[6,1,4,0,'urURBu2b'],
[6,1,4,1,'u2BUbUruR'],
[6,1,4,2,'ruRuBUb'],
[6,1,6,0,'rUr2u2rUBub'],
[6,1,6,1,'rURUrURuBUb'],
[6,1,6,2,'rURuRUrBub'],
[6,1,8,0,'ruRfUFBub'],
[6,1,8,1,'rURufBubF'],
[6,1,8,2,'rur2u2rUBUb'],
[8,0,1,0,'u2Rur2UR'],
[8,0,1,1,'URUr2uR'],
[8,0,1,2,'ufuFu2BUb'],
[8,0,2,0,'URur2UR'],
[8,0,2,1,'u2fuFu2BUb'],
[8,0,2,2,'RUr2uR'],
[8,0,3,0,'uRur2UR'],
[8,0,3,1,'fuFu2BUb'],
[8,0,3,2,'u2RUr2uR'],
[8,0,4,0,'Rur2UR'],
[8,0,4,1,'uRUr2uR'],
[8,0,4,2,'UfuFu2BUb'],
[8,0,6,0,'fUFrURUBub'],
[8,0,6,1,'ru2RuRur2UR'],
[8,0,6,2,'rURURur2UR'],
[8,0,8,0,'Rur2u2Ru2rUR'],
[8,0,8,1,'RuruRur2uR'],
[8,0,8,2,'RurUruRurUR'],
[8,1,1,0,'RurBUbUBub'],
[8,1,1,1,'URurUBub'],
[8,1,1,2,'ufUFru2R'],
[8,1,2,0,'RUruRUrBub'],
[8,1,2,1,'u2fUFru2R'],
[8,1,2,2,'RurUBub'],
[8,1,3,0,'fuFruRurUR'],
[8,1,3,1,'fUFru2R'],
[8,1,3,2,'u2RurUBub'],
[8,1,4,0,'UfuFruRurUR'],
[8,1,4,1,'uRurUBub'],
[8,1,4,2,'UfUFru2R'],
[8,1,6,0,'ru2RfUFru2R'],
[8,1,6,1,'fuFBu2bru2R'],
[8,1,6,2,'Rur2u2RBu2b'],
[8,1,8,0,'RuruBUb'],
[8,1,8,1,'RuruRUrUBub'],
[8,1,8,2,'RurURu2rBub'],
]
for i in f3:
if(checke(state,6,i[0],i[1]) and checkc(state,6,i[2],i[3])):
cube.do(state,i[4])
solve+='<BR />'
solve+=i[4]
t=1
break
if(t==0):
return '输入错误,请认真检查hhh~'
t=0
#f4 5*2*5*3=150种情况
f4=[
[1,0,1,0,'ufu2FRu2rURur'],
[1,0,1,1,'u2fUFu2RUr'],
[1,0,1,2,'ufu2FuRUr'],
[1,0,2,0,'Ru2ruRUrURur'],
[1,0,2,1,'RurURUr'],
[1,0,2,2,'u2Rur'],
[1,0,3,0,'URUrURur'],
[1,0,3,1,'fUFuRUr'],
[1,0,3,2,'u2Ru2ru2Rur'],
[1,0,4,0,'URu2rURur'],
[1,0,4,1,'uRUru2Rur'],
[1,0,4,2,'RUr'],
[1,0,8,0,'fUFURur'],
[1,0,8,1,'Ufu2FRu2r'],
[1,0,8,2,'Ru2ru2RUr'],
[1,1,1,0,'u2fu2FUfuF'],
[1,1,1,1,'ufUFufuF'],
[1,1,1,2,'fu2F'],
[1,1,2,0,'Ru2rfu2FufUF'],
[1,1,2,1,'URuru2fuF'],
[1,1,2,2,'Ru2rUfuF'],
[1,1,3,0,'u2fu2FufUF'],
[1,1,3,1,'fuFu2fUF'],
[1,1,3,2,'ufuF'],
[1,1,4,0,'u2fuFufUF'],
[1,1,4,1,'uRurUfuF'],
[1,1,4,2,'Ufu2Fu2fUF'],
[1,1,8,0,'uRurufUF'],
[1,1,8,1,'ufu2Fu2fuF'],
[1,1,8,2,'u2Ru2rfu2F'],
[2,0,1,0,'u2Ru2ruRUr'],
[2,0,1,1,'Ru2r'],
[2,0,1,2,'URurURUr'],
[2,0,2,0,'u2Ru2rURur'],
[2,0,2,1,'URUr'],
[2,0,2,2,'RUru2Rur'],
[2,0,3,0,'fu2FRu2rURur'],
[2,0,3,1,'fu2FuRUr'],
[2,0,3,2,'ufUFu2RUr'],
[2,0,4,0,'u2RUrURur'],
[2,0,4,1,'uRu2ru2Rur'],
[2,0,4,2,'UfUFuRUr'],
[2,0,8,0,'UfUFURur'],
[2,0,8,1,'u2fu2FRu2r'],
[2,0,8,2,'RUrfUFuRUr'],
[2,1,1,0,'URu2rfu2FufUF'],
[2,1,1,1,'URu2rUfuF'],
[2,1,1,2,'u2Ruru2fuF'],
[2,1,2,0,'ufuFufUF'],
[2,1,2,1,'u2fu2Fu2fUF'],
[2,1,2,2,'RurUfuF'],
[2,1,3,0,'ufu2FUfuF'],
[2,1,3,1,'u2fUF'],
[2,1,3,2,'fUFufuF'],
[2,1,4,0,'ufu2FufUF'],
[2,1,4,1,'fuF'],
[2,1,4,2,'UfuFu2fUF'],
[2,1,8,0,'RurufUF'],
[2,1,8,1,'fu2Fu2fuF'],
[2,1,8,2,'uRu2rfu2F'],
[3,0,1,0,'RUrURur'],
[3,0,1,1,'URu2ru2Rur'],
[3,0,1,2,'RuruRUr'],
[3,0,2,0,'UfUFURUru2Rur'],
[3,0,2,1,'u2fu2FuRUr'],
[3,0,2,2,'UfUFu2RUr'],
[3,0,3,0,'Ru2rURur'],
[3,0,3,1,'uRUr'],
[3,0,3,2,'u2RUru2Rur'],
[3,0,4,0,'Ru2ruRUr'],
[3,0,4,1,'URur'],
[3,0,4,2,'uRurURUr'],
[3,0,8,0,'ufUFURur'],
[3,0,8,1,'fu2FRu2r'],
[3,0,8,2,'RUruRUr'],
[3,1,1,0,'Ufu2FufUF'],
[3,1,1,1,'u2fuF'],
[3,1,1,2,'ufuFu2fUF'],
[3,1,2,0,'Ufu2FUfuF'],
[3,1,2,1,'fUF'],
[3,1,2,2,'u2fUFufuF'],
[3,1,3,0,'UfuFufUF'],
[3,1,3,1,'fu2Fu2fUF'],
[3,1,3,2,'u2RurUfuF'],
[3,1,4,0,'RurufuFu2fUF'],
[3,1,4,1,'uRu2rUfuF'],
[3,1,4,2,'Ruru2fuF'],
[3,1,8,0,'u2RurufUF'],
[3,1,8,1,'RUrUfuFufUF'],
[3,1,8,2,'URu2rfu2F'],
[4,0,1,0,'uRu2rURur'],
[4,0,1,1,'URUru2Rur'],
[4,0,1,2,'u2RUr'],
[4,0,2,0,'uRUrURur'],
[4,0,2,1,'u2fUFuRUr'],
[4,0,2,2,'Ru2ru2Rur'],
[4,0,3,0,'uRu2ruRUr'],
[4,0,3,1,'u2RurURUr'],
[4,0,3,2,'Rur'],
[4,0,4,0,'fUFURUru2Rur'],
[4,0,4,1,'fUFu2RUr'],
[4,0,4,2,'Ufu2FuRUr'],
[4,0,8,0,'u2fUFURur'],
[4,0,8,1,'ufu2FRu2r'],
[4,0,8,2,'URUrfUFuRUr'],
[4,1,1,0,'fuFufUF'],
[4,1,1,1,'URurUfuF'],
[4,1,1,2,'ufu2Fu2fUF'],
[4,1,2,0,'fu2FufUF'],
[4,1,2,1,'u2fuFu2fUF'],
[4,1,2,2,'UfuF'],
[4,1,3,0,'uRurufuFu2fUF'],
[4,1,3,1,'uRuru2fuF'],
[4,1,3,2,'u2Ru2rUfuF'],
[4,1,4,0,'fu2FUfuF'],
[4,1,4,1,'UfUFufuF'],
[4,1,4,2,'ufUF'],
[4,1,8,0,'URurufUF'],
[4,1,8,1,'fuFUfuF'],
[4,1,8,2,'RUru2fUF'],
[8,0,1,0,'RurURUrURur'],
[8,0,1,1,'URuru2Rur'],
[8,0,1,2,'ufUFu2fUF'],
[8,0,2,0,'RUruRUrURur'],
[8,0,2,1,'u2fUFu2fUF'],
[8,0,2,2,'Ruru2Rur'],
[8,0,3,0,'RUruRu2rURur'],
[8,0,3,1,'fUFu2fUF'],
[8,0,3,2,'u2Ruru2Rur'],
[8,0,4,0,'URUruRu2rURur'],
[8,0,4,1,'uRuru2Rur'],
[8,0,4,2,'UfUFu2fUF'],
[8,0,8,0,''],
[8,0,8,1,'fUFufu2FufUF'],
[8,0,8,2,'RurURu2rURur'],
[8,1,1,0,'u2Rurfu2F'],
[8,1,1,1,'URUrUfuF'],
[8,1,1,2,'ufuFuRUr'],
[8,1,2,0,'URurfu2F'],
[8,1,2,1,'u2fuFuRUr'],
[8,1,2,2,'RUrUfuF'],
[8,1,3,0,'uRurfu2F'],
[8,1,3,1,'fuFuRUr'],
[8,1,3,2,'u2RUrUfuF'],
[8,1,4,0,'Rurfu2F'],
[8,1,4,1,'uRUrUfuF'],
[8,1,4,2,'UfuFuRUr'],
[8,1,8,0,'RurUfu2Fu2fUF'],
[8,1,8,1,'fUFu2RUrURur'],
[8,1,8,2,'Ruru2fuFufUF']
]
for i in f4:
if(checke(state,8,i[0],i[1]) and checkc(state,8,i[2],i[3])):
cube.do(state,i[4])
solve+='<BR />'
solve+=i[4]
t=1
break
if(t==0):
return '输入错误,请认真检查hhh~'
t=0
#o 我们只需考虑:棱块1,2,3朝向,角块1,2,3朝向,共2*2*2*3*3*3=216种情况
o=[
[0,0,0,0,0,0,''],
[0,0,0,0,0,1,'u2r2dRu2rDRu2R'],
[0,0,0,0,0,2,'ULFrflFRf'],
[0,0,0,0,1,0,'u2LFrflFRf'],
[0,0,0,0,1,1,'rULuRUl'],
[0,0,0,0,1,2,'ufLFrflFR'],
[0,0,0,0,2,0,'ur2dRu2rDRu2R'],
[0,0,0,0,2,1,'UfLFrflFR'],
[0,0,0,0,2,2,'uRulUruL'],
[0,0,0,1,0,0,'u2fLFrflFR'],
[0,0,0,1,0,1,'LFrflFRf'],
[0,0,0,1,0,2,'LurUluR'],
[0,0,0,1,1,0,'r2dRu2rDRu2R'],
[0,0,0,1,1,1,'Ru2ruRUruRur'],
[0,0,0,1,1,2,'uRu2r2ur2ur2u2R'],
[0,0,0,1,2,0,'RulUruL'],
[0,0,0,1,2,1,'u2Ru2r2ur2ur2u2R'],
[0,0,0,1,2,2,'ru2RUrUR'],
[0,0,0,2,0,0,'fLFrflFR'],
[0,0,0,2,0,1,'UlURuLUr'],
[0,0,0,2,0,2,'Ur2dRu2rDRu2R'],
[0,0,0,2,1,0,'ulURuLUr'],
[0,0,0,2,1,1,'lURuLUr'],
[0,0,0,2,1,2,'Ru2r2ur2ur2u2R'],
[0,0,0,2,2,0,'uLFrflFRf'],
[0,0,0,2,2,1,'URu2r2ur2ur2u2R'],
[0,0,0,2,2,2,'URu2ruRUruRur'],
[0,0,1,0,0,0,'LFrflRURur'],
[0,0,1,0,0,1,'uburURB'],
[0,0,1,0,0,2,'UruFURurfR'],
[0,0,1,0,1,0,'u2RUburURBr'],
[0,0,1,0,1,1,'ULf2rfRfl'],
[0,0,1,0,1,2,'u2Ru2r2FRfRu2r'],
[0,0,1,0,2,0,'u2BULulb'],
[0,0,1,0,2,1,'FRuruRUrf'],
[0,0,1,0,2,2,'u2lb2RBrBL'],
[0,0,1,1,0,0,'RUrURururFRf'],
[0,0,1,1,0,1,'ULf2rfRflru2RUrUR'],
[0,0,1,1,0,2,'LFrFrDRdRf2l'],
[0,0,1,1,1,0,'RUrURu2rFRUruf'],
[0,0,1,1,1,1,'ULFrFRfrFRf2l'],
[0,0,1,1,1,2,'UfluLUluLUF'],
[0,0,1,1,2,0,'LFrFRf2l'],
[0,0,1,1,2,1,'rFr2br2fr2Br'],
[0,0,1,1,2,2,'uRUrUrFRfRu2r'],
[0,0,1,2,0,0,'uruRurURURbrB'],
[0,0,1,2,0,1,'ulbRbrb2L'],
[0,0,1,2,0,2,'uruRuru2RFRUruf'],
[0,0,1,2,1,0,'u2FRUrufUFRUruf'],
[0,0,1,2,1,1,'RUrurFr2Uruf'],
[0,0,1,2,1,2,'FRUruRUruf'],
[0,0,1,2,2,0,'u2lb2RBrBLRu2ruRur'],
[0,0,1,2,2,1,'uRbr2Fr2Br2fR'],
[0,0,1,2,2,2,'ulb2RBrbRBrBL'],
[0,1,0,0,0,0,'uLFrflRURur'],
[0,1,0,0,0,1,'UBULulb'],
[0,1,0,0,0,2,'URUburURBr'],
[0,1,0,0,1,0,'Ulb2RBrBLRu2ruRur'],
[0,1,0,0,1,1,'UFRUrufUFRUruf'],
[0,1,0,0,1,2,'u2ruRurURURbrB'],
[0,1,0,0,2,0,'uRUrURu2rFRUruf'],
[0,1,0,0,2,1,'uRUrURururFRf'],
[0,1,0,0,2,2,'uLFrFRf2l'],
[0,1,0,1,0,0,'URu2r2FRfRu2r'],
[0,1,0,1,0,1,'ruFURurfR'],
[0,1,0,1,0,2,'Ulb2RBrBL'],
[0,1,0,1,1,0,'u2ruRuru2RFRUruf'],
[0,1,0,1,1,1,'u2lb2RBrbRBrBL'],
[0,1,0,1,1,2,'uFRUruRUruf'],
[0,1,0,1,2,0,'u2RUrUrFRfRu2r'],
[0,1,0,1,2,1,'fluLUluLUF'],
[0,1,0,1,2,2,'uLFrFrDRdRf2l'],
[0,1,0,2,0,0,'uFRuruRUrf'],
[0,1,0,2,0,1,'Lf2rfRfl'],
[0,1,0,2,0,2,'u2burURB'],
[0,1,0,2,1,0,'uRUrurFr2Uruf'],
[0,1,0,2,1,1,'u2lbRbrb2L'],
[0,1,0,2,1,2,'u2Rbr2Fr2Br2fR'],
[0,1,0,2,2,0,'Lf2rfRflru2RUrUR'],
[0,1,0,2,2,1,'urFr2br2fr2Br'],
[0,1,0,2,2,2,'LFrFRfrFRf2l'],
[0,1,1,0,0,0,'URUruLrFRfl'],
[0,1,1,0,0,1,'UFRUruf'],
[0,1,1,0,0,2,'URUrurFRf'],
[0,1,1,0,1,0,'uRUrubrFRfB'],
[0,1,1,0,1,1,'ULFlRUruLfl'],
[0,1,1,0,1,2,'ULfluLUFul'],
[0,1,1,0,2,0,'rurFRfUR'],
[0,1,1,0,2,1,'uLfluLUFul'],
[0,1,1,0,2,2,'uFURur2fRURur'],
[0,1,1,1,0,0,'urFRUrufUR'],
[0,1,1,1,0,1,'URUrubrFRfB'],
[0,1,1,1,0,2,'urfRluLUrFR'],
[0,1,1,1,1,0,'uFRUruf'],
[0,1,1,1,1,1,'ULFlURurURurLfl'],
[0,1,1,1,1,2,'uBULulULulb'],
[0,1,1,1,2,0,'UrfRluLUrFR'],
[0,1,1,1,2,1,'ruRurUfUFR'],
[0,1,1,1,2,2,'UFURur2fRURur'],
[0,1,1,2,0,0,'UrFRUrufUR'],
[0,1,1,2,0,1,'UrFRUrfRFuf'],
[0,1,1,2,0,2,'u2rurFRfUR'],
[0,1,1,2,1,0,'urFRUrfRFuf'],
[0,1,1,2,1,1,'uLFlRUruLfl'],
[0,1,1,2,1,2,'u2ruRurUfUFR'],
[0,1,1,2,2,0,'uRUrurFRf'],
[0,1,1,2,2,1,'UBULulULulb'],
[0,1,1,2,2,2,'UrFURur2fr2UruR'],
[1,0,0,0,0,0,'RUruLrFRfl'],
[1,0,0,0,0,1,'ururFRfUR'],
[1,0,0,0,0,2,'u2RUrubrFRfB'],
[1,0,0,0,1,0,'u2RUrurFRf'],
[1,0,0,0,1,1,'u2rFRUrfRFuf'],
[1,0,0,0,1,2,'rFRUrufUR'],
[1,0,0,0,2,0,'u2FRUruf'],
[1,0,0,0,2,1,'u2rFRUrufUR'],
[1,0,0,0,2,2,'rfRluLUrFR'],
[1,0,0,1,0,0,'LfluLUFul'],
[1,0,0,1,0,1,'RUrurFRf'],
[1,0,0,1,0,2,'u2FURur2fRURur'],
[1,0,0,1,1,0,'UrurFRfUR'],
[1,0,0,1,1,1,'rFURur2fr2UruR'],
[1,0,0,1,1,2,'UruRurUfUFR'],
[1,0,0,1,2,0,'FURur2fRURur'],
[1,0,0,1,2,1,'u2BULulULulb'],
[1,0,0,1,2,2,'u2rfRluLUrFR'],
[1,0,0,2,0,0,'u2LfluLUFul'],
[1,0,0,2,0,1,'LFlRUruLfl'],
[1,0,0,2,0,2,'FRUruf'],
[1,0,0,2,1,0,'u2LFlRUruLfl'],
[1,0,0,2,1,1,'rFRUrfRFuf'],
[1,0,0,2,1,2,'BULulULulb'],
[1,0,0,2,2,0,'RUrubrFRfB'],
[1,0,0,2,2,1,'uruRurUfUFR'],
[1,0,0,2,2,2,'LFlURurURurLfl'],
[1,0,1,0,0,0,'ULFrflRURur'],
[1,0,1,0,0,1,'ruRuru2RFRUruf'],
[1,0,1,0,0,2,'u2Lf2rfRflru2RUrUR'],
[1,0,1,0,1,0,'u2ruFURurfR'],
[1,0,1,0,1,1,'lbRbrb2L'],
[1,0,1,0,1,2,'URUrURururFRf'],
[1,0,1,0,2,0,'burURB'],
[1,0,1,0,2,1,'ruRurURURbrB'],
[1,0,1,0,2,2,'ULFrFrDRdRf2l'],
[1,0,1,1,0,0,'UFRuruRUrf'],
[1,0,1,1,0,1,'ulb2RBrBLRu2ruRur'],
[1,0,1,1,0,2,'RUrUrFRfRu2r'],
[1,0,1,1,1,0,'uBULulb'],
[1,0,1,1,1,1,'lb2RBrbRBrBL'],
[1,0,1,1,1,2,'UrFr2br2fr2Br'],
[1,0,1,1,2,0,'ulb2RBrBL'],
[1,0,1,1,2,1,'Rbr2Fr2Br2fR'],
[1,0,1,1,2,2,'ULFrFRf2l'],
[1,0,1,2,0,0,'uRu2r2FRfRu2r'],
[1,0,1,2,0,1,'URUrurFr2Uruf'],
[1,0,1,2,0,2,'URUrURu2rFRUruf'],
[1,0,1,2,1,0,'u2Lf2rfRfl'],
[1,0,1,2,1,1,'uFRUrufUFRUruf'],
[1,0,1,2,1,2,'u2fluLUluLUF'],
[1,0,1,2,2,0,'uRUburURBr'],
[1,0,1,2,2,1,'UFRUruRUruf'],
[1,0,1,2,2,2,'u2LFrFRfrFRf2l'],
[1,1,0,0,0,0,'u2LFrflRURur'],
[1,1,0,0,0,1,'u2RUrURu2rFRUruf'],
[1,1,0,0,0,2,'lb2RBrBLRu2ruRur'],
[1,1,0,0,1,0,'uLf2rfRflru2RUrUR'],
[1,1,0,0,1,1,'u2RUrurFr2Uruf'],
[1,1,0,0,1,2,'u2FRuruRUrf'],
[1,1,0,0,2,0,'UruRuru2RFRUruf'],
[1,1,0,0,2,1,'Ru2r2FRfRu2r'],
[1,1,0,0,2,2,'URUrUrFRfRu2r'],
[1,1,0,1,0,0,'UruRurURURbrB'],
[1,1,0,1,0,1,'RUburURBr'],
[1,1,0,1,0,2,'u2LFrFRf2l'],
[1,1,0,1,1,0,'UburURB'],
[1,1,0,1,1,1,'uLFrFRfrFRf2l'],
[1,1,0,1,1,2,'URbr2Fr2Br2fR'],
[1,1,0,1,2,0,'u2LFrFrDRdRf2l'],
[1,1,0,1,2,1,'u2FRUruRUruf'],
[1,1,0,1,2,2,'lb2RBrBL'],
[1,1,0,2,0,0,'u2RUrURururFRf'],
[1,1,0,2,0,1,'FRUrufUFRUruf'],
[1,1,0,2,0,2,'BULulb'],
[1,1,0,2,1,0,'UlbRbrb2L'],
[1,1,0,2,1,1,'uLf2rfRfl'],
[1,1,0,2,1,2,'u2rFr2br2fr2Br'],
[1,1,0,2,2,0,'uruFURurfR'],
[1,1,0,2,2,1,'ufluLUluLUF'],
[1,1,0,2,2,2,'Ulb2RBrbRBrBL'],
[1,1,1,0,0,0,'lRBRBrbl2r2FRfl'],
[1,1,1,0,0,1,'u2FRUrUfu2fLFl'],
[1,1,1,0,0,2,'RlBRBrbrLrFRf'],
[1,1,1,0,1,0,'URlBRBrbrLrFRf'],
[1,1,1,0,1,1,'UBULulbUFRUruf'],
[1,1,1,0,1,2,'RUrUrFRfu2rFRf'],
[1,1,1,0,2,0,'uFRUrUfu2fLFl'],
[1,1,1,0,2,1,'u2RUrUrFRfu2rFRf'],
[1,1,1,0,2,2,'u2BULulbuFRUruf'],
[1,1,1,1,0,0,'uRUrUrFRfu2rFRf'],
[1,1,1,1,0,1,'uRlBRBrbrLrFRf'],
[1,1,1,1,0,2,'UBULulbuFRUruf'],
[1,1,1,1,1,0,'FRUrUfu2fLFl'],
[1,1,1,1,1,1,'URu2r2FRfu2rFRf'],
[1,1,1,1,1,2,'uFRUrufBULulb'],
[1,1,1,1,2,0,'uBULulbuFRUruf'],
[1,1,1,1,2,1,'u2FRUrufBULulb'],
[1,1,1,1,2,2,'BULulbuFRUruf'],
[1,1,1,2,0,0,'URUrUrFRfu2rFRf'],
[1,1,1,2,0,1,'BULulbUFRUruf'],
[1,1,1,2,0,2,'UFRUrUfu2fLFl'],
[1,1,1,2,1,0,'u2BULulbUFRUruf'],
[1,1,1,2,1,1,'uBULulbUFRUruf'],
[1,1,1,2,1,2,'FRUrufBULulb'],
[1,1,1,2,2,0,'u2RlBRBrbrLrFRf'],
[1,1,1,2,2,1,'UFRUrufBULulb'],
[1,1,1,2,2,2,'Ru2r2FRfu2rFRf']
]
for i in o:
if(state[20]==i[0] and state[21]==i[1] and state[22]==i[2] and state[32]==i[3] and state[33]==i[4] and state[34]==i[5]):
cube.do(state,i[6])
solve+='<BR />'
solve+=i[6]
t=1
break
if(t==0):
return '输入错误,请认真检查hhh~'
t=0
#p
p=[
[1,2,3,1,2,''],
[1,2,3,1,3,'RbRf2rBRf2r2'],
[1,2,3,1,4,'r2f2rbRf2rBr'],
[1,2,3,2,1,'FrfLFRflFRfLFrfl'],
[1,2,3,2,3,'u2r2f2rbRf2rBru2'],
[1,2,3,2,4,'Ur2f2rbRf2rBru'],
[1,2,3,3,1,'u2RbRf2rBRf2r2u2'],
[1,2,3,3,2,'uRbRf2rBRf2r2U'],
[1,2,3,3,4,'UFrfLFRflFRfLFrflu'],
[1,2,3,4,1,'URbRf2rBRf2r2u'],
[1,2,3,4,2,'ur2f2rbRf2rBrU'],
[1,2,3,4,3,'u2r2l2Dr2l2u2r2l2Dr2l2'],
[1,2,4,1,2,'lUru2Lulu2LRu'],
[1,2,4,1,3,'urUrubrb2ubUbRBRU'],
[1,2,4,1,4,'RUrfRUrurFr2uru'],
[1,2,4,2,1,'ru2Ru2rFRUrurfr2u'],
[1,2,4,2,3,'u2r2dFuFUfDr2BubU'],
[1,2,4,2,4,'uRuRURURurur2u2'],
[1,2,4,3,1,'u2r2URUrururUrU'],
[1,2,4,3,2,'uRu2ru2RbruRURBr2u2'],
[1,2,4,3,4,'Ur2DbUbuBdr2fUF'],
[1,2,4,4,1,'RUrf2dLulUlDf2U'],
[1,2,4,4,2,'u2FRuruRUrfRUrurFRfu2'],
[1,2,4,4,3,'uruRb2DlULuLdb2'],
[1,3,2,1,2,'u2rURur2fuFURFrfr2U'],
[1,3,2,1,3,'ULuRu2lUrLuRu2lUr'],
[1,3,2,1,4,'RUrurFr2uruRUrf'],
[1,3,2,2,1,'rURur2fuFURFrfr2u'],
[1,3,2,2,3,'UrURur2fuFURFrfr2'],
[1,3,2,2,4,'uruRuRURurURUr2ur'],
[1,3,2,3,1,'ruRuRURurURUr2urU'],
[1,3,2,3,2,'u2RUrurFr2uruRUrfu2'],
[1,3,2,3,4,'URUrurFr2uruRUrfU'],
[1,3,2,4,1,'urURur2fuFURFrfr2u2'],
[1,3,2,4,2,'lUru2LulRUru2LuRU'],
[1,3,2,4,3,'uRUrurFr2uruRUrfu'],
[1,3,4,1,2,'r2URUrururUr'],
[1,3,4,1,3,'UruRb2DlULuLdb2u'],
[1,3,4,1,4,'u2RUrfRUrurFr2uru2'],
[1,3,4,2,1,'UrUrubrb2ubUbRBR'],
[1,3,4,2,3,'URu2ru2RbruRURBr2U'],
[1,3,4,2,4,'u2ru2Ru2rFRUrurfr2u2'],
[1,3,4,3,1,'ur2DbUbuBdr2fUFu'],
[1,3,4,3,2,'r2dFuFUfDr2Bub'],
[1,3,4,3,4,'FRuruRUrfRUrurFRfU'],
[1,3,4,4,1,'u2RUrf2dLulUlDf2'],
[1,3,4,4,2,'u2lUru2Lulu2LRu2'],
[1,3,4,4,3,'URuRURURurur2U'],
[1,4,2,1,2,'RuRURURurur2'],
[1,4,2,1,3,'URUrfRUrurFr2urU'],
[1,4,2,1,4,'u2r2DbUbuBdr2fUFu2'],
[1,4,2,2,1,'rUrubrb2ubUbRBRu'],
[1,4,2,2,3,'ruRb2DlULuLdb2u2'],
[1,4,2,2,4,'ur2dFuFUfDr2Bubu'],
[1,4,2,3,1,'Ru2ru2RbruRURBr2'],
[1,4,2,3,2,'UlUru2Lulu2LRU'],
[1,4,2,3,4,'uFRuruRUrfRUrurFRf'],
[1,4,2,4,1,'Uru2Ru2rFRUrurfr2U'],
[1,4,2,4,2,'URUrf2dLulUlDf2u'],
[1,4,2,4,3,'ur2URUrururUru'],
[1,4,3,1,2,'uRUrfRUrurFr2ur'],
[1,4,3,1,3,'UFRuruRUrfRUrurFRfu'],
[1,4,3,1,4,'uru2Ru2rFRUrurfr2'],
[1,4,3,2,1,'u2Ru2ru2RbruRURBr2u'],
[1,4,3,2,3,'u2ruRb2DlULuLdb2U'],
[1,4,3,2,4,'u2RuRURURurur2u'],
[1,4,3,3,1,'Ur2URUrururUru2'],
[1,4,3,3,2,'ulUru2Lulu2LR'],
[1,4,3,3,4,'Ur2dFuFUfDr2Bubu2'],
[1,4,3,4,1,'r2DbUbuBdr2fUFU'],
[1,4,3,4,2,'u2rUrubrb2ubUbRBRu2'],
[1,4,3,4,3,'uRUrf2dLulUlDf2u2'],
[2,1,3,1,2,'u2ru2Ru2rFRUrurfr2U'],
[2,1,3,1,3,'UrUrubrb2ubUbRBRu'],
[2,1,3,1,4,'URu2ru2RbruRURBr2'],
[2,1,3,2,1,'u2lUru2Lulu2LRU'],
[2,1,3,2,3,'u2RUrf2dLulUlDf2u'],
[2,1,3,2,4,'URuRURURurur2'],
[2,1,3,3,1,'r2URUrururUru'],
[2,1,3,3,2,'u2RUrfRUrurFr2urU'],
[2,1,3,3,4,'UruRb2DlULuLdb2u2'],
[2,1,3,4,1,'r2dFuFUfDr2Bubu'],
[2,1,3,4,2,'FRuruRUrfRUrurFRf'],
[2,1,3,4,3,'ur2DbUbuBdr2fUFu2'],
[2,1,4,1,2,'UruRuRURurURUr2urU'],
[2,1,4,1,3,'rURur2fuFURFrfr2u2'],
[2,1,4,1,4,'UrURur2fuFURFrfr2u'],
[2,1,4,2,1,'lUru2LulRUru2LuR'],
[2,1,4,2,3,'urURur2fuFURFrfr2U'],
[2,1,4,2,4,'uRUrurFr2uruRUrfu2'],
[2,1,4,3,1,'u2rURur2fuFURFrfr2'],
[2,1,4,3,2,'RUrurFr2uruRUrfu'],
[2,1,4,3,4,'u2LuRu2lUrLuRu2lUr'],
[2,1,4,4,1,'u2RUrurFr2uruRUrfU'],
[2,1,4,4,2,'URUrurFr2uruRUrf'],
[2,1,4,4,3,'ruRuRURurURUr2ur'],
[2,3,1,1,2,'u2RuRURURurur2u2'],
[2,3,1,1,3,'u2Ru2ru2RbruRURBr2u2'],
[2,3,1,1,4,'u2ruRb2DlULuLdb2'],
[2,3,1,2,1,'u2rUrubrb2ubUbRBRU'],
[2,3,1,2,3,'r2DbUbuBdr2fUF'],
[2,3,1,2,4,'uRUrf2dLulUlDf2U'],
[2,3,1,3,1,'uRUrfRUrurFr2uru'],
[2,3,1,3,2,'uru2Ru2rFRUrurfr2u'],
[2,3,1,3,4,'UFRuruRUrfRUrurFRfu2'],
[2,3,1,4,1,'ulUru2Lulu2LRu'],
[2,3,1,4,2,'Ur2dFuFUfDr2BubU'],
[2,3,1,4,3,'Ur2URUrururUrU'],
[2,3,4,1,2,'ur2dFuFUfDr2Bubu2'],
[2,3,4,1,3,'rUrubrb2ubUbRBRu2'],
[2,3,4,1,4,'ruRb2DlULuLdb2U'],
[2,3,4,2,1,'URUrf2dLulUlDf2u2'],
[2,3,4,2,3,'Uru2Ru2rFRUrurfr2'],
[2,3,4,2,4,'ur2URUrururUru2'],
[2,3,4,3,1,'RuRURURurur2u'],
[2,3,4,3,2,'u2r2DbUbuBdr2fUFU'],
[2,3,4,3,4,'URUrfRUrurFr2ur'],
[2,3,4,4,1,'UlUru2Lulu2LR'],
[2,3,4,4,2,'uFRuruRUrfRUrurFRfu'],
[2,3,4,4,3,'Ru2ru2RbruRURBr2u'],
[2,4,1,1,2,'Ur2f2rbRf2rBru2'],
[2,4,1,1,3,'FrfLFRflFRfLFrflu'],
[2,4,1,1,4,'u2r2f2rbRf2rBrU'],
[2,4,1,2,1,'ur2f2rbRf2rBr'],
[2,4,1,2,3,'URbRf2rBRf2r2u2'],
[2,4,1,2,4,'Ur2l2Dr2l2u2r2l2Dr2l2'],
[2,4,1,3,1,'u'],
[2,4,1,3,2,'r2f2rbRf2rBru'],
[2,4,1,3,4,'RbRf2rBRf2r2u'],
[2,4,1,4,1,'uRbRf2rBRf2r2'],
[2,4,1,4,2,'uFrfLFRflFRfLFrfl'],
[2,4,1,4,3,'u2RbRf2rBRf2r2U'],
[2,4,3,1,2,'uRuRURURurur2U'],
[2,4,3,1,3,'ru2Ru2rFRUrurfr2u2'],
[2,4,3,1,4,'u2r2dFuFUfDr2Bub'],
[2,4,3,2,1,'u2FRuruRUrfRUrurFRfU'],
[2,4,3,2,3,'RUrf2dLulUlDf2'],
[2,4,3,2,4,'uruRb2DlULuLdb2u'],
[2,4,3,3,1,'lUru2Lulu2LRu2'],
[2,4,3,3,2,'RUrfRUrurFr2uru2'],
[2,4,3,3,4,'urUrubrb2ubUbRBR'],
[2,4,3,4,1,'uRu2ru2RbruRURBr2U'],
[2,4,3,4,2,'Ur2DbUbuBdr2fUFu'],
[2,4,3,4,3,'u2r2URUrururUr'],
[3,1,2,1,2,'u2r2URUrururUru2'],
[3,1,2,1,3,'Ur2DbUbuBdr2fUFU'],
[3,1,2,1,4,'uRu2ru2RbruRURBr2u'],
[3,1,2,2,1,'urUrubrb2ubUbRBRu2'],
[3,1,2,2,3,'RUrfRUrurFr2ur'],
[3,1,2,2,4,'lUru2Lulu2LR'],
[3,1,2,3,1,'uruRb2DlULuLdb2U'],
[3,1,2,3,2,'RUrf2dLulUlDf2u2'],
[3,1,2,3,4,'u2FRuruRUrfRUrurFRfu'],
[3,1,2,4,1,'u2r2dFuFUfDr2Bubu2'],
[3,1,2,4,2,'ru2Ru2rFRUrurfr2'],
[3,1,2,4,3,'uRuRURURurur2u'],
[3,1,4,1,2,'u2RbRf2rBRf2r2u'],
[3,1,4,1,3,'UFrfLFRflFRfLFrfl'],
[3,1,4,1,4,'uRbRf2rBRf2r2u2'],
[3,1,4,2,1,'RbRf2rBRf2r2U'],
[3,1,4,2,3,'r2f2rbRf2rBrU'],
[3,1,4,2,4,'U'],
[3,1,4,3,1,'ur2l2Dr2l2u2r2l2Dr2l2'],
[3,1,4,3,2,'URbRf2rBRf2r2'],
[3,1,4,3,4,'ur2f2rbRf2rBru2'],
[3,1,4,4,1,'u2r2f2rbRf2rBru'],
[3,1,4,4,2,'FrfLFRflFRfLFrflU'],
[3,1,4,4,3,'Ur2f2rbRf2rBr'],
[3,2,1,1,2,'Ru2ru2RbruRURBr2U'],
[3,2,1,1,3,'uFRuruRUrfRUrurFRfU'],
[3,2,1,1,4,'UlUru2Lulu2LRu2'],
[3,2,1,2,1,'URUrfRUrurFr2uru2'],
[3,2,1,2,3,'u2r2DbUbuBdr2fUFu'],
[3,2,1,2,4,'RuRURURurur2U'],
[3,2,1,3,1,'ur2URUrururUr'],
[3,2,1,3,2,'Uru2Ru2rFRUrurfr2u2'],
[3,2,1,3,4,'URUrf2dLulUlDf2'],
[3,2,1,4,1,'ruRb2DlULuLdb2u'],
[3,2,1,4,2,'rUrubrb2ubUbRBR'],
[3,2,1,4,3,'ur2dFuFUfDr2Bub'],
[3,2,4,1,2,'Ur2URUrururUru'],
[3,2,4,1,3,'Ur2dFuFUfDr2Bubu'],
[3,2,4,1,4,'ulUru2Lulu2LRU'],
[3,2,4,2,1,'UFRuruRUrfRUrurFRf'],
[3,2,4,2,3,'uru2Ru2rFRUrurfr2U'],
[3,2,4,2,4,'uRUrfRUrurFr2urU'],
[3,2,4,3,1,'uRUrf2dLulUlDf2u'],
[3,2,4,3,2,'r2DbUbuBdr2fUFu2'],
[3,2,4,3,4,'u2rUrubrb2ubUbRBRu'],
[3,2,4,4,1,'u2ruRb2DlULuLdb2u2'],
[3,2,4,4,2,'u2Ru2ru2RbruRURBr2'],
[3,2,4,4,3,'u2RuRURURurur2'],
[3,4,1,1,2,'ruRuRURurURUr2uru2'],
[3,4,1,1,3,'URUrurFr2uruRUrfu2'],
[3,4,1,1,4,'u2RUrurFr2uruRUrfu'],
[3,4,1,2,1,'LuRu2lUrLuRu2lUr'],
[3,4,1,2,3,'RUrurFr2uruRUrfU'],
[3,4,1,2,4,'u2rURur2fuFURFrfr2u2'],
[3,4,1,3,1,'uRUrurFr2uruRUrf'],
[3,4,1,3,2,'urURur2fuFURFrfr2u'],
[3,4,1,3,4,'lUru2LulRUru2LuRu2'],
[3,4,1,4,1,'UrURur2fuFURFrfr2U'],
[3,4,1,4,2,'rURur2fuFURFrfr2'],
[3,4,1,4,3,'UruRuRURurURUr2uru'],
[3,4,2,1,2,'ur2DbUbuBdr2fUF'],
[3,4,2,1,3,'FRuruRUrfRUrurFRfu2'],
[3,4,2,1,4,'r2dFuFUfDr2BubU'],
[3,4,2,2,1,'UruRb2DlULuLdb2'],
[3,4,2,2,3,'u2RUrfRUrurFr2uru'],
[3,4,2,2,4,'r2URUrururUrU'],
[3,4,2,3,1,'URuRURURurur2u2'],
[3,4,2,3,2,'u2RUrf2dLulUlDf2U'],
[3,4,2,3,4,'u2lUru2Lulu2LRu'],
[3,4,2,4,1,'URu2ru2RbruRURBr2u2'],
[3,4,2,4,2,'UrUrubrb2ubUbRBRU'],
[3,4,2,4,3,'u2ru2Ru2rFRUrurfr2u'],
[4,1,2,1,2,'uRUrf2dLulUlDf2'],
[4,1,2,1,3,'u2rUrubrb2ubUbRBR'],
[4,1,2,1,4,'r2DbUbuBdr2fUFu'],
[4,1,2,2,1,'Ur2dFuFUfDr2Bub'],
[4,1,2,2,3,'ulUru2Lulu2LRu2'],
[4,1,2,2,4,'Ur2URUrururUr'],
[4,1,2,3,1,'u2RuRURURurur2U'],
[4,1,2,3,2,'u2ruRb2DlULuLdb2u'],
[4,1,2,3,4,'u2Ru2ru2RbruRURBr2U'],
[4,1,2,4,1,'uru2Ru2rFRUrurfr2u2'],
[4,1,2,4,2,'UFRuruRUrfRUrurFRfU'],
[4,1,2,4,3,'uRUrfRUrurFr2uru2'],
[4,1,3,1,2,'ur2URUrururUrU'],
[4,1,3,1,3,'URUrf2dLulUlDf2U'],
[4,1,3,1,4,'Uru2Ru2rFRUrurfr2u'],
[4,1,3,2,1,'uFRuruRUrfRUrurFRfu2'],
[4,1,3,2,3,'UlUru2Lulu2LRu'],
[4,1,3,2,4,'Ru2ru2RbruRURBr2u2'],
[4,1,3,3,1,'ur2dFuFUfDr2BubU'],
[4,1,3,3,2,'ruRb2DlULuLdb2'],
[4,1,3,3,4,'rUrubrb2ubUbRBRU'],
[4,1,3,4,1,'u2r2DbUbuBdr2fUF'],
[4,1,3,4,2,'URUrfRUrurFr2uru'],
[4,1,3,4,3,'RuRURURurur2u2'],
[4,2,1,1,2,'URuRURURurur2u'],
[4,2,1,1,3,'u2lUru2Lulu2LR'],
[4,2,1,1,4,'u2RUrf2dLulUlDf2u2'],
[4,2,1,2,1,'FRuruRUrfRUrurFRfu'],
[4,2,1,2,3,'r2dFuFUfDr2Bubu2'],
[4,2,1,2,4,'ur2DbUbuBdr2fUFU'],
[4,2,1,3,1,'u2ru2Ru2rFRUrurfr2'],
[4,2,1,3,2,'URu2ru2RbruRURBr2u'],
[4,2,1,3,4,'UrUrubrb2ubUbRBRu2'],
[4,2,1,4,1,'u2RUrfRUrurFr2ur'],
[4,2,1,4,2,'UruRb2DlULuLdb2U'],
[4,2,1,4,3,'r2URUrururUru2'],
[4,2,3,1,2,'uRUrurFr2uruRUrfU'],
[4,2,3,1,3,'lUru2LulRUru2LuRu'],
[4,2,3,1,4,'urURur2fuFURFrfr2'],
[4,2,3,2,1,'URUrurFr2uruRUrfu'],
[4,2,3,2,3,'u2RUrurFr2uruRUrf'],
[4,2,3,2,4,'ruRuRURurURUr2uru'],
[4,2,3,3,1,'UruRuRURurURUr2ur'],
[4,2,3,3,2,'UrURur2fuFURFrfr2u2'],
[4,2,3,3,4,'rURur2fuFURFrfr2U'],
[4,2,3,4,1,'RUrurFr2uruRUrfu2'],
[4,2,3,4,2,'uLuRu2lUrLuRu2lUr'],
[4,2,3,4,3,'u2rURur2fuFURFrfr2u'],
[4,3,1,1,2,'uruRb2DlULuLdb2u2'],
[4,3,1,1,3,'u2FRuruRUrfRUrurFRf'],
[4,3,1,1,4,'RUrf2dLulUlDf2u'],
[4,3,1,2,1,'Ur2DbUbuBdr2fUFu2'],
[4,3,1,2,3,'uRu2ru2RbruRURBr2'],
[4,3,1,2,4,'u2r2URUrururUru'],
[4,3,1,3,1,'uRuRURURurur2'],
[4,3,1,3,2,'u2r2dFuFUfDr2Bubu'],
[4,3,1,3,4,'ru2Ru2rFRUrurfr2U'],
[4,3,1,4,1,'RUrfRUrurFr2urU'],
[4,3,1,4,2,'urUrubrb2ubUbRBRu'],
[4,3,1,4,3,'lUru2Lulu2LRU'],
[4,3,2,1,2,'r2l2Dr2l2u2r2l2Dr2l2'],
[4,3,2,1,3,'ur2f2rbRf2rBru'],
[4,3,2,1,4,'URbRf2rBRf2r2U'],
[4,3,2,2,1,'UFrfLFRflFRfLFrflU'],
[4,3,2,2,3,'uRbRf2rBRf2r2u'],
[4,3,2,2,4,'u2RbRf2rBRf2r2'],
[4,3,2,3,1,'Ur2f2rbRf2rBrU'],
[4,3,2,3,2,'u2r2f2rbRf2rBr'],
[4,3,2,3,4,'u2FrfLFRflFRfLFrfl'],
[4,3,2,4,1,'r2f2rbRf2rBru2'],
[4,3,2,4,2,'RbRf2rBRf2r2u2'],
[4,3,2,4,3,'u2']
]
for i in p:
if(state[0]==i[0] and state[1]==i[1] and state[2]==i[2] and state[12]==i[3] and state[13]==i[4]):
cube.do(state,i[5])
solve+='<BR />'
solve+=i[5]
t=1
break
if(t==0):
return '输入错误,请认真检查hhh~'
listo=['f','b','u','d','l','r']
solveconvert=[i for i in solve]
ls=len(solveconvert)
for i in range(0,ls-1):
if(solveconvert[i] in listo):
if(solveconvert[i+1])=='2':
solveconvert[i]=uppercase(solveconvert[i])
else:
solveconvert[i]=uppercase(solveconvert[i])+'\''
# if(solveconvert[ls-1] in listo):
# solveconvert[ls-1]=uppercase(solveconvert[ls-1])
if(solveconvert[ls-1] in listo):
solveconvert[ls-1]=uppercase(solveconvert[ls-1])+'\''
solve=''
for i in solveconvert:
solve+=i
return solve
| [
"unnino@126.com"
] | unnino@126.com |
006123581571814076c0c5a650ae638e95c97553 | 6fe2d3c27c4cb498b7ad6d9411cc8fa69f4a38f8 | /algorithms/algorithms-python/leetcode/Question_010_Regular_Expression_Matching.py | 68ae34c4b6feaa6b7adadbf0450d28621463d895 | [] | no_license | Lanceolata/code | aae54af632a212c878ce45b11dab919bba55bcb3 | f7d5a7de27c3cc8a7a4abf63eab9ff9b21d512fb | refs/heads/master | 2022-09-01T04:26:56.190829 | 2021-07-29T05:14:40 | 2021-07-29T05:14:40 | 87,202,214 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 695 | py | #!/usr/bin/python
# coding: utf-8
class Solution(object):
def isMatch(self, s, p):
"""
:type s: str
:type p: str
:rtype: bool
"""
if not p:
return not s
m, n = len(s), len(p)
dp = [[False] * (n + 1) for i in range(m + 1)]
dp[m][n] = True
for i in range(m, -1, -1):
for j in range(n - 1, -1, -1):
match = i < m and (s[i] == p[j] or p[j] == '.')
if j + 1 < n and p[j + 1] == '*':
dp[i][j] = dp[i][j + 2] or (match and dp[i + 1][j])
else:
dp[i][j] = match and dp[i + 1][j + 1]
return dp[0][0]
| [
"lanceolatayuan@gmail.com"
] | lanceolatayuan@gmail.com |
53098885dc20c023f509ece3f40ecf25383fd679 | 381c0b67d9078f1c88e29535643bfd22a897b754 | /helloWorld/venv/bin/pip3.7 | cabf51f8dfe2c2c7218591c85fc1ce659604aa25 | [] | no_license | zrshishir/python-programming | 9c41cdbc563b82f378eb3db686c530cd548dff7f | df93f09a3e4a93d8656441e3c515cd532e229925 | refs/heads/master | 2022-12-01T04:46:21.950559 | 2020-08-11T18:18:48 | 2020-08-11T18:18:48 | 286,817,133 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 264 | 7 | #!/Users/zrshishir/PycharmProjects/helloWorld/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.cli.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"zrshishir@gmail.com"
] | zrshishir@gmail.com |
c90437d7489788001cdc9b5f5959fdffad3add2b | b2fc177dc3ec04e9f2a7b745b8c6cf885e01ee83 | /lab8/lab8/shop/api/admin.py | f37c38704c68f9983f5bd9466f5190b1d185408b | [] | no_license | Nurdaulet08/WebDev | 1be154f1174ffb3ecae5e76e7b2765588319a6e8 | aa13c9f4e88e654df472d5a302a5faff939cee08 | refs/heads/main | 2023-04-06T03:11:29.905226 | 2021-04-15T13:54:10 | 2021-04-15T13:54:10 | 337,661,434 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 156 | py | from django.contrib import admin
from .models import product
from .models import category
admin.site.register(product)
admin.site.register(category)
| [
"noreply@github.com"
] | Nurdaulet08.noreply@github.com |
d32573cf52bfd3b5afcb5fdd67737d39274ea1c5 | 0c71b3b47314dfb26b74e1886039e4f58404084b | /飞机大战/main.py | 1ca0ff99aa40814f2f4d433e6ce326dcd8724539 | [] | no_license | Ceeeeeeeeeeeeeb/AircraftBattle | a33dd6ac0326186ac9fb0b101f5097ab4eafa4fd | c9f90b4921cd56924c4c20bda1a6490b27937a32 | refs/heads/main | 2023-03-16T08:53:57.134516 | 2021-03-12T06:05:18 | 2021-03-12T06:05:18 | 346,951,833 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,618 | py | import pygame
import sys
import traceback
import myplane
import enemy
import bullet
import supply
from pygame.locals import *
from random import *
import os
if not os.path.exists('record.txt'):
with open("record.txt", mode='w', encoding='utf-8') as ff:
ff.write('0')
pygame.init()
pygame.mixer.init()
bg_size = width, height = 480, 700
screen = pygame.display.set_mode(bg_size)
pygame.display.set_caption('飞机大战')
background = pygame.image.load('images/background.png').convert()
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
GREEN = (0, 255, 0)
RED = (255, 0, 0)
#载入背景音乐
pygame.mixer.music.load('sound/game_music.ogg')
pygame.mixer.music.set_volume(0.2)
bullet_sound = pygame.mixer.Sound('sound/bullet.wav')
bullet_sound.set_volume(0.2)
bomb_sound = pygame.mixer.Sound('sound/use_bomb.wav')
bomb_sound.set_volume(0.2)
supply_sound = pygame.mixer.Sound('sound/supply.wav')
supply_sound.set_volume(0.2)
get_bomb_sound = pygame.mixer.Sound('sound/get_bomb.wav')
get_bomb_sound.set_volume(0.2)
get_bullet_sound = pygame.mixer.Sound('sound/get_bullet.wav')
get_bullet_sound.set_volume(0.2)
upgrade_sound = pygame.mixer.Sound('sound/upgrade.wav')
upgrade_sound.set_volume(0.2)
enemy3_fly_sound = pygame.mixer.Sound('sound/enemy3_flying.wav')
enemy3_fly_sound.set_volume(0.2)
enemy1_down_sound = pygame.mixer.Sound('sound/enemy1_down.wav')
enemy1_down_sound.set_volume(0.1)
enemy2_down_sound = pygame.mixer.Sound('sound/enemy2_down.wav')
enemy2_down_sound.set_volume(0.2)
enemy3_down_sound = pygame.mixer.Sound('sound/enemy3_down.wav')
enemy3_down_sound.set_volume(0.5)
me_down_sound = pygame.mixer.Sound('sound/me_down.wav')
me_down_sound.set_volume(0.2)
def add_small_enemies(group1, group2, num):
for i in range(num):
e1 = enemy.SamllEnemy(bg_size)
group1.add(e1)
group2.add(e1)
def add_mid_enemies(group1, group2, num):
for i in range(num):
e2 = enemy.MidEnemy(bg_size)
group1.add(e2)
group2.add(e2)
def add_big_enemies(group1, group2, num):
for i in range(num):
e3 = enemy.BigEnemy(bg_size)
group1.add(e3)
group2.add(e3)
def inc_speed(target, inc):
for each in target:
each.speed += inc
def main():
pygame.mixer.music.play(-1)
#生成我方飞机
me = myplane.MyPlane(bg_size)
#敌方飞机集合
enemies = pygame.sprite.Group()
#生成敌方小型飞机
small_enemies = pygame.sprite.Group()
add_small_enemies(small_enemies, enemies, 15)
# 生成敌方中型飞机
mid_enemies = pygame.sprite.Group()
add_mid_enemies(mid_enemies, enemies, 4)
# 生成敌方大型飞机
big_enemies = pygame.sprite.Group()
add_big_enemies(big_enemies, enemies, 2)
#生成普通子弹
bullet1 = []
bullet1_index = 0
BULLET1_NUM = 4
for i in range(BULLET1_NUM):
bullet1.append(bullet.Bullet1(me.rect.midtop))
# 生成超级子弹
bullet2 = []
bullet2_index = 0
BULLET2_NUM = 8
for i in range(BULLET2_NUM // 2):
bullet2.append(bullet.Bullet2((me.rect.centerx - 33, me.rect.centery)))
bullet2.append(bullet.Bullet2((me.rect.centerx + 30, me.rect.centery)))
clock = pygame.time.Clock()
#中弹图片索引
e1_destroy_index = 0
e2_destroy_index = 0
e3_destroy_index = 0
me_destroy_index = 0
#统计得分
score = 0
score_font = pygame.font.Font('font/font.ttf', 36)
#标志是否暂停游戏
paused = False
pause_nor_image = pygame.image.load('images/pause_nor.png').convert_alpha()
pause_pressed_image = pygame.image.load('images/pause_pressed.png').convert_alpha()
resume_nor_image = pygame.image.load('images/resume_nor.png').convert_alpha()
resume_pressed_image = pygame.image.load('images/resume_pressed.png').convert_alpha()
pause_rect = pause_nor_image.get_rect()
pause_rect.left, pause_rect.top = width - pause_rect.width - 10, 10
pause_image = pause_nor_image
#设置难度级别
level = 1
#全屏炸弹
bomb_image = pygame.image.load('images/bomb.png').convert_alpha()
bomb_rect = bomb_image.get_rect()
bomb_font = pygame.font.Font('font/font.ttf', 48)
bomb_num = 3
#每30秒发放一个补给包
bullet_supply = supply.Bullet_Supply(bg_size)
bomb_supply = supply.Bomb_Supply(bg_size)
SUPPLY_TIME = USEREVENT
pygame.time.set_timer(SUPPLY_TIME, 30 * 1000)
#超级子弹定时器
DOUBLE_BULLET_TIME = USEREVENT + 1
#标志是否使用超级子弹
is_double_bullet = False
#解除我方无敌状态计时器
INVINCIBLE_TIME = USEREVENT + 2
#生命数量
life_image = pygame.image.load('images/life.png').convert_alpha()
life_rect = life_image.get_rect()
life_num = 3
#用于阻止重复打开记录文件
recorded = False
#游戏结束画面
gameover_font = pygame.font.Font('font/font.ttf', 48)
again_image = pygame.image.load('images/again.png').convert_alpha()
again_rect = again_image.get_rect()
gameover_image = pygame.image.load('images/gameover.png').convert_alpha()
gameover_rect = gameover_image.get_rect()
#用于切换图片
switch_image = True
#用于延迟
delay = 100
running = True
while running:
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
elif event.type == MOUSEBUTTONDOWN:
if event.button == 1 and pause_rect.collidepoint(event.pos):
paused = not paused
if paused:
pygame.time.set_timer(SUPPLY_TIME, 0)
pygame.mixer.music.pause()
pygame.mixer.pause()
else:
pygame.time.set_timer(SUPPLY_TIME, 30 * 1000)
pygame.mixer.music.unpause()
pygame.mixer.unpause()
elif event.type == MOUSEMOTION:
if pause_rect.collidepoint(event.pos):
if paused:
pause_image = resume_pressed_image
else:
pause_image = pause_pressed_image
else:
if paused:
pause_image = resume_nor_image
else:
pause_image = pause_nor_image
elif event.type == KEYDOWN:
if event.key == K_SPACE:
if bomb_num:
bomb_num -= 1
bomb_sound.play()
for each in enemies:
if each.rect.bottom > 0:
each.active = False
elif event.type == SUPPLY_TIME:
supply_sound.play()
if choice([True, False]):
bomb_supply.reset()
else:
bullet_supply.reset()
elif event.type == DOUBLE_BULLET_TIME:
is_double_bullet = False
pygame.time.set_timer(DOUBLE_BULLET_TIME, 0)
elif event.type == INVINCIBLE_TIME:
me.invincible = False
pygame.time.set_timer(INVINCIBLE_TIME, 0)
#根据用户的得分增加难度
if level == 1 and score > 50000:
level = 2
upgrade_sound.play()
# 增加3架小型敌机,2架中型敌机和1架大型敌机
add_small_enemies(small_enemies, enemies, 3)
add_mid_enemies(mid_enemies, enemies, 2)
add_big_enemies(big_enemies, enemies, 1)
# 提升小型敌机速度
inc_speed(small_enemies, 1)
if level == 2 and score > 300000:
level = 3
upgrade_sound.play()
# 增加5架小型敌机,3架中型敌机和2架大型敌机
add_small_enemies(small_enemies, enemies, 5)
add_mid_enemies(mid_enemies, enemies, 3)
add_big_enemies(big_enemies, enemies, 2)
# 提升敌机速度
inc_speed(small_enemies, 1)
inc_speed(mid_enemies, 1)
if level == 3 and score > 600000:
level = 4
upgrade_sound.play()
# 增加5架小型敌机,3架中型敌机和2架大型敌机
add_small_enemies(small_enemies, enemies, 5)
add_mid_enemies(mid_enemies, enemies, 3)
add_big_enemies(big_enemies, enemies, 2)
# 提升敌机速度
inc_speed(small_enemies, 1)
inc_speed(mid_enemies, 1)
if level == 4 and score > 1000000:
level = 5
upgrade_sound.play()
# 增加5架小型敌机,3架中型敌机和2架大型敌机
add_small_enemies(small_enemies, enemies, 5)
add_mid_enemies(mid_enemies, enemies, 3)
add_big_enemies(big_enemies, enemies, 2)
# 提升敌机速度
inc_speed(small_enemies, 1)
inc_speed(mid_enemies, 1)
screen.blit(background, (0, 0))
if life_num and not paused:
#检测用户的键盘操作
key_pressed = pygame.key.get_pressed()
if key_pressed[K_w] or key_pressed[K_UP]:
me.moveUP()
if key_pressed[K_s] or key_pressed[K_DOWN]:
me.moveDown()
if key_pressed[K_a] or key_pressed[K_LEFT]:
me.moveLeft()
if key_pressed[K_d] or key_pressed[K_RIGHT]:
me.moveRight()
# 绘制全屏炸弹补给并检测是否获得
if bomb_supply.active:
bomb_supply.move()
screen.blit(bomb_supply.image, bomb_supply.rect)
if pygame.sprite.collide_mask(bomb_supply, me):
get_bomb_sound.play()
if bomb_num < 3:
bomb_num += 1
bomb_supply.active = False
# 绘制超级子弹补给并检测是否获得
if bullet_supply.active:
bullet_supply.move()
screen.blit(bullet_supply.image, bullet_supply.rect)
if pygame.sprite.collide_mask(bullet_supply, me):
get_bullet_sound.play()
# 发射超级子弹
is_double_bullet = True
pygame.time.set_timer(DOUBLE_BULLET_TIME, 18 * 1000)
bullet_supply.active = False
#发射子弹
if not (delay % 10):
bullet_sound.play()
if is_double_bullet:
bullets = bullet2
bullets[bullet2_index].reset((me.rect.centerx - 33, me.rect.centery))
bullets[bullet2_index + 1].reset((me.rect.centerx + 30, me.rect.centery))
bullet2_index = (bullet2_index + 2) % BULLET2_NUM
else:
bullets = bullet1
bullets[bullet1_index].reset(me.rect.midtop)
bullet1_index = (bullet1_index + 1) % BULLET1_NUM
#检测子弹是否击中敌机
for b in bullets:
if b.active:
b.move()
screen.blit(b.image, b.rect)
enemy_hit = pygame.sprite.spritecollide(b, enemies, False, pygame.sprite.collide_mask)
if enemy_hit:
b.active = False
for e in enemy_hit:
if e in mid_enemies or e in big_enemies:
e.hit = True
e.energy -= 1
if e.energy == 0:
e.active = False
else:
e.active = False
#绘制大型敌机
for each in big_enemies:
if each.active:
each.move()
#绘制被击中特效
if each.hit:
screen.blit(each.image_hit, each.rect)
each.hit = False
else:
if switch_image:
screen.blit(each.image1, each.rect)
else:
screen.blit(each.image2, each.rect)
#绘制血槽
pygame.draw.line(screen, BLACK, \
(each.rect.left, each.rect.top - 5), \
(each.rect.right, each.rect.top - 5), \
2)
#当生命大于20%显示绿色,否则显示红色
energy_remain = each.energy / enemy.BigEnemy.energy
if energy_remain > 0.2:
energy_color = GREEN
else:
energy_color = RED
pygame.draw.line(screen, energy_color, \
(each.rect.left, each.rect.top - 5), \
(each.rect.left + each.rect.width * energy_remain, \
each.rect.top - 5), 2)
#即将出现在画面中,播放音效
if each.rect.bottom == -50:
enemy3_fly_sound.play(-1)
else:
#毁灭
if not (delay % 3):
if e3_destroy_index == 0:
enemy3_down_sound.play()
screen.blit(each.destroy_images[e3_destroy_index], each.rect)
e3_destroy_index = (e3_destroy_index + 1) % 6
if e3_destroy_index == 0:
enemy3_fly_sound.stop()
score += 10000
each.reset()
#绘制中型敌机
for each in mid_enemies:
if each.active:
each.move()
# 绘制被击中特效
if each.hit:
screen.blit(each.image_hit, each.rect)
each.hit = False
else:
screen.blit(each.image, each.rect)
# 绘制血槽
pygame.draw.line(screen, BLACK, \
(each.rect.left, each.rect.top - 5), \
(each.rect.right, each.rect.top - 5), \
2)
# 当生命大于20%显示绿色,否则显示红色
energy_remain = each.energy / enemy.MidEnemy.energy
if energy_remain > 0.2:
energy_color = GREEN
else:
energy_color = RED
pygame.draw.line(screen, energy_color, \
(each.rect.left, each.rect.top - 5), \
(each.rect.left + each.rect.width * energy_remain, \
each.rect.top - 5), 2)
else:
# 毁灭
if not (delay % 3):
if e2_destroy_index == 0:
enemy2_down_sound.play()
screen.blit(each.destroy_images[e2_destroy_index], each.rect)
e2_destroy_index = (e2_destroy_index + 1) % 4
if e2_destroy_index == 0:
score += 6000
each.reset()
# 绘制小型敌机
for each in small_enemies:
if each.active:
each.move()
screen.blit(each.image, each.rect)
else:
# 毁灭
if not (delay % 3):
if e1_destroy_index == 0:
enemy1_down_sound.play()
screen.blit(each.destroy_images[e1_destroy_index], each.rect)
e1_destroy_index = (e1_destroy_index + 1) % 4
if e1_destroy_index == 0:
score += 1000
each.reset()
#检测我方飞机是否被撞:
enemies_down = pygame.sprite.spritecollide(me, enemies, False, \
pygame.sprite.collide_mask)
if enemies_down and not me.invincible:
me.active = False
for e in enemies_down:
e.active = False
#绘制我方飞机
if me.active:
if switch_image:
screen.blit(me.image1, me.rect)
else:
screen.blit(me.image2, me.rect)
else:
# 毁灭
if not (delay % 3):
if me_destroy_index == 0:
me_down_sound.play()
screen.blit(me.destroy_images[me_destroy_index], me.rect)
me_destroy_index = (me_destroy_index + 1) % 4
if me_destroy_index == 0:
life_num -= 1
me.reset()
pygame.time.set_timer(INVINCIBLE_TIME, 3 * 1000)
# 绘制全屏炸弹数量
bomb_text = bomb_font.render('× %d' % bomb_num, True, WHITE)
text_rect = bomb_text.get_rect()
screen.blit(bomb_image, (10, height - 10 - bomb_rect.height))
screen.blit(bomb_text, (20 + bomb_rect.width, height - 5 - text_rect.height))
# 绘制剩余生命数量
if life_num:
for i in range(life_num):
screen.blit(life_image, \
(width - 10 - (i + 1) * life_rect.width, \
height - 10 - life_rect.height))
# 绘制得分
score_text = score_font.render('Score : %s' % str(score), True, WHITE)
screen.blit(score_text, (10, 5))
#游戏结束
elif life_num == 0:
#背景音乐停止
pygame.mixer.music.stop()
#停止全部音效
pygame.mixer.stop()
#停止发放补给
pygame.time.set_timer(SUPPLY_TIME, 0)
if not recorded:
recorded = True
#读取历史最高得分
with open('record.txt', 'r') as f:
record_score = int(f.read())
#如果玩家得分高于历史最高得分,则存档
if score > record_score:
record_score = score
with open('record.txt', 'w') as f:
f.write(str(score))
#绘制结束画面
record_score_text = score_font.render('Best: %d' % record_score, True, WHITE)
screen.blit(record_score_text, (50, 50))
gameover_text1 = gameover_font.render("You Score", True, WHITE)
gameover_text1_rect = gameover_text1.get_rect()
gameover_text1_rect.left, gameover_text1_rect.top = \
(width - gameover_text1_rect.width) // 2, height // 3
screen.blit(gameover_text1, gameover_text1_rect)
gameover_text2 = gameover_font.render(str(score), True, WHITE)
gameover_text2_rect = gameover_text2.get_rect()
gameover_text2_rect.left, gameover_text2_rect.top = \
(width - gameover_text2_rect.width) // 2, \
gameover_text1_rect.bottom + 10
screen.blit(gameover_text2, gameover_text2_rect)
again_rect.left, again_rect.top = \
(width - again_rect.width) // 2, \
gameover_text2_rect.bottom + 50
screen.blit(again_image, again_rect)
gameover_rect.left, gameover_rect.top = \
(width - again_rect.width) // 2, \
again_rect.bottom + 10
screen.blit(gameover_image, gameover_rect)
# 检测用户的鼠标操作
# 如果用户按下鼠标左键
if pygame.mouse.get_pressed()[0]:
# 获取鼠标坐标
pos = pygame.mouse.get_pos()
# 如果用户点击“重新开始”
if again_rect.left < pos[0] < again_rect.right and \
again_rect.top < pos[1] < again_rect.bottom:
# 调用main函数,重新开始游戏
main()
# 如果用户点击“结束游戏”
elif gameover_rect.left < pos[0] < gameover_rect.right and \
gameover_rect.top < pos[1] < gameover_rect.bottom:
# 退出游戏
pygame.quit()
sys.exit()
# 绘制暂停按钮
screen.blit(pause_image, pause_rect)
#切换图片
if not (delay % 5):
switch_image = not switch_image
delay -= 1
if not delay:
delay = 100
pygame.display.flip()
clock.tick(60)
if __name__ == '__main__':
try:
main()
except SystemExit:
pass
except:
traceback.print_exc()
pygame.quit()
input()
| [
"w18500351019@163.com"
] | w18500351019@163.com |
a66a9fd2e8e3f5ce2f3ec138056b33f7f578050a | eb977cdfc8d2333d4d36efc2e742004f25640139 | /mac/shop/views.py | a5deb77d3a1bab27478f652762d7f73bb4152ff5 | [] | no_license | surajrawat1210/ecommerce | 775faf207a4a3c0579d673691190d45b57c9fa87 | 6f6e950ebaf56d233ca6a22639222b720599511f | refs/heads/master | 2023-05-06T05:20:02.785202 | 2021-05-21T08:05:57 | 2021-05-21T08:05:57 | 369,461,490 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,752 | py | from django.http import HttpResponse
from django.shortcuts import render
from . models import Product,Contact
from math import ceil
def index(request):
product=Product.objects.all()
allprods=[]
catprods=Product.objects.values('category')
cats={item['category'] for item in catprods}
print(cats)
for cat in cats:
prod=Product.objects.filter(category=cat)
print(prod)
n = len(prod)
nslides = n // 4 + ceil((n / 4) - (n // 4))
if n>1:
allprods.append([prod,range(1,nslides),nslides])
else:
pass
# cats={}
# params={'no_of_slides':nslides,"range":range(1,nslides),"product":product}
params={'allprods':allprods}
return render(request,"shop/index.html",params)
def about(request):
return render(request,"shop/about.html")
def contact(request):
if request.method =='POST':
print(request)
email=request.POST.get('email',"")
message=request.POST.get('message',"")
phone=request.POST.get('phone',"")
# phone=request.Post.get('phone','')
contact=Contact(email=email,message=message,phone=phone)
contact.save()
return render(request,"shop/contact.html")
def tracker(request):
return render(request,'shop/tracker.html')
def search(request):
return render(request,'shop/search.html')
def productview(request,id):
product=Product.objects.filter(product_id=id);
print("hello",product)
return render(request,'shop/productview.html',{'product':product[0]})
def search(request):
return render(request,'shop/search.html')
def checkout(request):
return HttpResponse("we are on checkout page")
def cart(request):
return render(request,'shop/cart.html') | [
"surajrawat09859@gmail.com"
] | surajrawat09859@gmail.com |
9e0224d54a64afde8a18b0b2ae04e85b56617093 | d110546d747d7e3865ce5742d5fca09f404623c0 | /pkg/windows/nsis/tests/config_tests/test_install_dir_custom_minion.py | fcdf2146e0a35e6c3723e06236f4a534092dffa9 | [
"Apache-2.0",
"MIT",
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-other-permissive",
"LGPL-2.0-or-later"
] | permissive | saltstack/salt | 354fc86a7be1f69514b3dd3b2edb9e6f66844c1d | 1ef90cbdc7203f97775edb7666db86a41eb9fc15 | refs/heads/master | 2023-07-19T20:56:20.210556 | 2023-06-29T23:12:28 | 2023-07-19T11:47:47 | 1,390,248 | 11,026 | 6,296 | Apache-2.0 | 2023-09-14T20:45:37 | 2011-02-20T20:16:56 | Python | UTF-8 | Python | false | false | 1,375 | py | import os
import pytest
@pytest.fixture(scope="module")
def inst_dir():
return "C:\\custom_location"
@pytest.fixture(scope="module")
def install(inst_dir):
pytest.helpers.clean_env(inst_dir)
# Create a custom config
pytest.helpers.custom_config()
pytest.helpers.run_command(
[
pytest.INST_BIN,
"/S",
f"/install-dir={inst_dir}",
"/custom-config=custom_conf",
"/minion-name=cli_minion",
]
)
yield
pytest.helpers.clean_env(inst_dir)
def test_binaries_present(install, inst_dir):
assert os.path.exists(rf"{inst_dir}\ssm.exe")
def test_config_present(install):
assert os.path.exists(rf"{pytest.DATA_DIR}\conf\minion")
def test_config_correct(install):
# The config file should be the custom config with only minion set
expected = [
"# Custom config from test suite line 1/6\n",
"master: custom_master\n",
"# Custom config from test suite line 2/6\n",
"id: cli_minion\n",
"# Custom config from test suite line 3/6\n",
"# Custom config from test suite line 4/6\n",
"# Custom config from test suite line 5/6\n",
"# Custom config from test suite line 6/6\n",
]
with open(rf"{pytest.DATA_DIR}\conf\minion") as f:
result = f.readlines()
assert result == expected
| [
"leesh@vmware.com"
] | leesh@vmware.com |
693f078cfc8b1df7350051c51f2a3374af94075f | 91639fea573828d08e8642a9022fe2ec62319414 | /telegram/forcereply.py | ed4d9c821ee6c8043822fa94bc0db9cf3bec18eb | [
"MIT"
] | permissive | agincel/AdamTestBot | 9787a22f25a3bfc2bbab0b6c6e66b857cb369f32 | fee093c3dd944881bd92c9180fbb3a13700673da | refs/heads/master | 2020-05-22T04:26:39.241479 | 2016-12-29T22:15:04 | 2016-12-29T22:15:04 | 44,931,116 | 0 | 8 | null | 2016-10-18T22:04:33 | 2015-10-25T21:40:35 | Python | UTF-8 | Python | false | false | 1,753 | py | #!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2016
# Leandro Toledo de Souza <devs@python-telegram-bot.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
"""This module contains a object that represents a Telegram ForceReply."""
from telegram import ReplyMarkup
class ForceReply(ReplyMarkup):
"""This object represents a Telegram ForceReply.
Attributes:
force_reply (bool):
selective (bool):
Args:
force_reply (bool):
**kwargs: Arbitrary keyword arguments.
Keyword Args:
selective (Optional[bool]):
"""
def __init__(self,
force_reply=True,
**kwargs):
# Required
self.force_reply = bool(force_reply)
# Optionals
self.selective = bool(kwargs.get('selective', False))
@staticmethod
def de_json(data):
"""
Args:
data (str):
Returns:
telegram.ForceReply:
"""
if not data:
return None
return ForceReply(**data)
| [
"adam@DESKTOP-TA850RC.localdomain"
] | adam@DESKTOP-TA850RC.localdomain |
7612e8fe4a585cf142934c9de78f0311d029a988 | b6a2a61ebdbec779b504e2a92446200b540311f0 | /detect_with_classifier_sliding_window.py | 2f122d68473f4b8620525a002c63c1bfeb17d9ec | [] | no_license | Walid-Ahmed/Object-Detection-with-Sliding-Window-and-classification | 375b7a4a8602b8f78d4e05103784da2ac8ce096a | e10b5c1513d5c3dff1e335a8ed256ff4db02b964 | refs/heads/main | 2023-05-26T17:38:59.769367 | 2021-06-20T11:38:48 | 2021-06-20T11:38:48 | 378,446,914 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,511 | py | '''
Credit
Adrian Rosebrock, urning any CNN image classifier into an object detector with Keras, TensorFlow, and OpenCV , PyImageSearch, https://www.pyimagesearch.com/2020/06/22/turning-any-cnn-image-classifier-into-an-object-detector-with-keras-tensorflow-and-opencv/, accessed on 16 April 2021
'''
# USAGE
# python detect_with_classifier_sliding_window.py --image images/test3.jpeg --min-conf 0.8 --modelFile soccer_Classifier.h5
# import the necessary packages
from tensorflow.keras.preprocessing.image import img_to_array
import imutils
import sys
from imutils.object_detection import non_max_suppression
import numpy as np
import argparse
import imutils
import cv2
from tensorflow.keras.models import load_model
import os
import shutil
np.set_printoptions(threshold=sys.maxsize)
def sliding_window(image, step, ws):
# slide a window across the image
for y in range(0, image.shape[0] - ws[1], step):
for x in range(0, image.shape[1] - ws[0], step):
# yield the current window
yield (x, y, image[y:y + ws[1], x:x + ws[0]])
def image_pyramid(image, scale=1.5, minSize=(224, 224)):
# yield the original image
yield image
# keep looping over the image pyramid
while True:
# compute the dimensions of the next image in the pyramid
w = int(image.shape[1] / scale)
image = imutils.resize(image, width=w)
# if the resized image does not meet the supplied minimum
# size, then stop constructing the pyramid
if image.shape[0] < minSize[1] or image.shape[1] < minSize[0]:
break
# yield the next image in the pyramid
yield image
demoFrame = np.zeros([512,1300,3],dtype=np.uint8)
demoFrame.fill(255)
fourcc = cv2.VideoWriter_fourcc(*'mp4v') # Be sure to use lower case
fileNameToSaveVideo="demo_Object_Detection_with_Sliding_Window_and_binary_classification.mp4"
fps=70
video_creator = cv2.VideoWriter(fileNameToSaveVideo,fourcc, fps, (1300,512))
# prepare folders to store cropped rois ,rois classiffied as positive images , rois classiffied as negative images for further analysis
foldersToCreate=["cropped" ,"positive","negative"]
for folderToCreate in foldersToCreate:
if os.path.exists(folderToCreate):
shutil.rmtree(folderToCreate)
os.mkdir(folderToCreate)
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("--image", required=True,
help="path to the input image")
ap.add_argument("-s", "--size", type=str, default="(200, 150)",
help="ROI size (in pixels)")
ap.add_argument("--min-conf", type=float, default=0.9,
help="minimum probability to filter weak detections")
ap.add_argument("--modelFile", type=str, default=-1,
help="model used for classification")
args = vars(ap.parse_args())
args["modelFile"]
# initialize variables used for the object detection procedure
WIDTH = 600
PYR_SCALE = 1.5
WIN_STEP = 16
ROI_SIZE = (64, 64)
INPUT_SIZE = (32,32) #This will be the input size to the model
# load our the network weights from disk
print("[INFO] loading network...")
pathToModelFile=args["modelFile"]
model = load_model(pathToModelFile)
print("[INFO] Model successfully loaded from {}".format(pathToModelFile))
# load the input image from disk, resize it such that it has the
# has the supplied width, and then grab its dimensions
orig = cv2.imread(args["image"])
orig = imutils.resize(orig, width=WIDTH)
(H, W) = orig.shape[:2]
# initialize the image pyramid
pyramid = image_pyramid(orig, scale=PYR_SCALE, minSize=ROI_SIZE)
rois = [] # store rois
locs = [] #store coordinate of roi
i=949
# loop over the image pyramid
for image in pyramid:
# determine the scale factor between the *original* image
# dimensions and the *current* layer of the pyramid
scale = W / float(image.shape[1])
print("[INFO] Working with image with size {} after a scale factor {} ".format(image.shape,scale))
# for each layer of the image pyramid, loop over the sliding
# window locations
#x,y are the coordinates within image returned from pyramid
for (x, y, roiOrig) in sliding_window(image, WIN_STEP, ROI_SIZE):
# scale the (x, y)-coordinates of the ROI with respect to the
# *original* image dimensions
x_modified = int(x * scale)
y_modified = int(y * scale)
w_modified = int(ROI_SIZE[0] * scale)
h_modified = int(ROI_SIZE[1] * scale)
w= int(ROI_SIZE[0])
h= int(ROI_SIZE[1])
pathToSave=os.path.join("cropped","crop_"+str(i)+".png")
cv2.imwrite(pathToSave, roiOrig)
i=i+1
roi = cv2.resize(roiOrig, INPUT_SIZE) #resize to input size needed for model
roi = img_to_array(roi) #change image to numpy array
roi=roi/255
# update our list of ROIs and associated coordinates
rois.append(roi)
locs.append((x_modified, y_modified, x_modified + w_modified, y_modified + h_modified))
clone=image.copy()
cv2.rectangle(clone, (x, y), (x + w, y + h),(0, 255, 0), 2)
h1,w1,_=clone.shape
h2,w2,_=roiOrig.shape
demoFrame.fill(255)
a1=10+int(300-0.5*w1) +350
b1=100
b=300+350
demoFrame[b1:b1+h1,a1:a1+w1]=clone
demoFrame[10:10+h2,b:b+w2]=roiOrig
cv2.imshow("Demo", demoFrame)
video_creator.write(demoFrame)
cv2.waitKey(1)
# convert the ROIs to a NumPy array
rois = np.array(rois, dtype="float32")
# classify each of the proposal ROIs using the model
probs = model.predict(rois)
y_pred=probs.argmax(axis=1)
label="Scoccer Ball"
boxes=[]
probs_soccer=[]
# loop over the predictions
p=0
n=0
for (i, classIndex) in enumerate(y_pred):
# grab the prediction information for the current ROI
#(imagenetID, label, prob) = p[0]
if classIndex== 1: # the roi was classifies positive
box = locs[i]
out=probs[i]
if (out[1]>= args["min_conf"]): ## filter out weak detections
boxes.append(box)
probs_soccer.append(out[1])
positiveImage=rois[i]*255
pathToSave=os.path.join("positive","_pos_"+str(p)+".png")
p=p+1
cv2.imwrite(pathToSave, positiveImage)
else:
pathToSave=os.path.join("negative","neg_"+str(n)+".png")
n=n+1
negativeImage=rois[i]*255
cv2.imwrite(pathToSave, negativeImage)
# loop over all bounding boxes for the current label
clone=orig.copy()
for (box, prob) in zip(boxes,probs_soccer):
# draw the bounding box on the image
(startX, startY, endX, endY) = box
cv2.rectangle(clone, (startX, startY), (endX, endY),(0, 255, 0), 2)
h1,w1,_=clone.shape
demoFrame.fill(255)
a=10+int(300-0.5*w1)
i=int((a+w1)/2)-220
j=30
cv2.putText(demoFrame, "Detected Soccer ball before NMS", (i, j),cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 0), 2)
demoFrame[100:100+h1,a:a+w1]=clone
video_creator.write(demoFrame)
boxes = np.array(boxes, dtype=np.float32)
boxes = non_max_suppression(boxes, probs_soccer)
clone = orig.copy()
for (startX, startY, endX, endY) in boxes:
# draw the bounding box and label on the image
cv2.rectangle(clone, (startX, startY), (endX, endY),
(0, 255, 0), 2)
y = startY - 10 if startY - 10 > 10 else startY + 10
cv2.putText(clone, label, (startX, y),
cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 2)
# show the output after apply non-maxima suppression
#cv2.imshow("After", clone)
i=int((a+w1)/2)-90+ w1-50
cv2.putText(demoFrame, "Detected Soccer ball after NMS", (i, j),cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 0), 2)
demoFrame[100:100+h1,a+w1+20:a+w1+20+w1]=clone
cv2.imshow("Demo", demoFrame)
#shlow last frame for a longer time
for i in range(300):
video_creator.write(demoFrame)
cv2.waitKey(3000)
video_creator.release()
print("[INFO] Demo video saved to file {}".format(fileNameToSaveVideo))
| [
"noreply@github.com"
] | Walid-Ahmed.noreply@github.com |
99e24fc1e58762f96e053af6d8df04e9c57e9ee0 | e0d34ce5603b04f50ac019c54e524e91b818bf73 | /world_cup_sim.py | 7d483cdcbd5938234b6cc59367e521d57538b93e | [] | no_license | alck11/world-cup-elo | 9828b1a7e592fc9e4a19aea395d107f2d02a760d | 38a29b8099f3fcdaf2b321ddb5e0b96578da992a | refs/heads/master | 2020-03-19T12:33:03.994206 | 2018-06-08T07:05:35 | 2018-06-08T07:05:35 | 136,525,001 | 1 | 0 | null | 2018-06-07T20:00:29 | 2018-06-07T20:00:28 | null | UTF-8 | Python | false | false | 7,545 | py |
import random, math
from Match import *
from standings import init
from constants import *
from matches import matches
def get_updated_elo(prev, exp, outcome):
WORLD_CUP_CONSTANT = 60
new_rating = prev + (WORLD_CUP_CONSTANT * (outcome - exp))
return new_rating
def update_points(match_data, teams):
team_a = match_data["a"]["name"]
a_i = match_data["a"]["index"]
team_b = match_data["b"]["name"]
b_i = match_data["b"]["index"]
outcome = match_data["outcome"]
if (outcome == WIN):
teams[a_i][PTS] += 3
elif (outcome == DRAW):
teams[a_i][PTS] += 1
teams[b_i][PTS] += 1
else:
teams[b_i][PTS] += 3
return teams
def update_standings(match_data, teams):
team_a = match_data["a"]["name"]
prev_a_elo = match_data["a"][ELO]
a_i = match_data["a"]["index"]
team_b = match_data["b"]["name"]
prev_b_elo = match_data["b"][ELO]
b_i = match_data["b"]["index"]
exp = match_data["exp"]
outcome = match_data["outcome"]
new_a_elo = get_updated_elo(prev_a_elo, exp, outcome)
new_b_elo = get_updated_elo(prev_b_elo, 1 - exp, abs(1 - outcome))
teams[a_i][ELO] = int(round(new_a_elo))
teams[b_i][ELO] = int(round(new_b_elo))
return teams
def update_team_goals(match_data, teams):
team_a = match_data["a"]["name"]
a_goals = match_data["a"]["scored"]
a_i = match_data["a"]["index"]
team_b = match_data["b"]["name"]
b_goals = match_data["b"]["scored"]
b_i = match_data["b"]["index"]
teams[a_i][GF] += a_goals
teams[a_i][GA] += b_goals
teams[b_i][GF] += b_goals
teams[b_i][GA] += a_goals
return teams
def sim_match(a_name, b_name, teams):
a_team = [x for x in teams if x["name"] == a_name][0]
b_team = [x for x in teams if x["name"] == b_name][0]
a_index = -1
b_index = -1
for i in range(len(teams)):
if teams[i]["name"] == a_name:
a_index = i
if teams[i]["name"] == b_name:
b_index = i
a_elo = a_team[ELO]
b_elo = b_team[ELO]
match = Match(a_elo, b_elo)
outcome = match.get_outcome()
exp = match.get_expectation()
a_goals, b_goals = match.get_score()
match_data = {"a": {"name": a_name, "scored": a_goals, "elo": a_elo, "index": a_index},
"b": {"name": b_name, "scored": b_goals, "elo": b_elo, "index": b_index},
"exp": exp,
"outcome": outcome}
teams = update_team_goals(match_data, teams)
teams = update_points(match_data, teams)
teams = update_standings(match_data, teams)
return teams
def print_group(standings, group_name):
group = [x for x in standings if x["group"] == group_name]
sorted_group = sorted(group, key=lambda k: k["points"])[::-1]
for team in sorted_group:
print(team)
def pretty_print(standings):
groups = ["A", "B", "C", "D", "E", "F", "G", "H"]
for group in groups:
print_group(standings, group)
print("")
def sim_group_stage():
standings = init()
for match in matches:
a_name = match["home"]
b_name = match["away"]
standings = sim_match(a_name, b_name, standings)
return standings
def get_top_two_from(curr_standings, group_name):
group = [x for x in curr_standings if x["group"] == group_name]
sorted_group = sorted(group, key=lambda k: k["points"], reverse=True)
top_two = sorted_group[0:2]
return top_two
def get_random_float_between(lower_bound, upper_bound):
return random.uniform(lower_bound, upper_bound)
def get_knockout_match_winner(team_a, team_b):
match = Match(team_a[ELO], team_b[ELO])
exp = match.get_expectation()
outcome_float = get_random_float_between(0, 1)
winner = team_a if (outcome_float < exp) else team_b
return winner
def sim_knockout_round(curr_standings, master_dict):
a_top = get_top_two_from(curr_standings, "A")
b_top = get_top_two_from(curr_standings, "B")
c_top = get_top_two_from(curr_standings, "C")
d_top = get_top_two_from(curr_standings, "D")
e_top = get_top_two_from(curr_standings, "E")
f_top = get_top_two_from(curr_standings, "F")
g_top = get_top_two_from(curr_standings, "G")
h_top = get_top_two_from(curr_standings, "H")
A1, A2 = a_top
B1, B2 = b_top
C1, C2 = c_top
D1, D2 = d_top
E1, E2 = e_top
F1, F2 = f_top
G1, G2 = g_top
H1, H2 = h_top
QF1 = get_knockout_match_winner(A1, B2)
QF2 = get_knockout_match_winner(C1, D2)
QF3 = get_knockout_match_winner(E1, F2)
QF4 = get_knockout_match_winner(G1, H2)
QF5 = get_knockout_match_winner(B1, A2)
QF6 = get_knockout_match_winner(D1, C2)
QF7 = get_knockout_match_winner(F1, E2)
QF8 = get_knockout_match_winner(H1, G2)
SF1 = get_knockout_match_winner(QF1, QF2)
SF2 = get_knockout_match_winner(QF3, QF4)
SF3 = get_knockout_match_winner(QF5, QF6)
SF4 = get_knockout_match_winner(QF7, QF8)
FINAL1 = get_knockout_match_winner(SF1, SF2)
FINAL2 = get_knockout_match_winner(SF3, SF4)
CHAMPION = get_knockout_match_winner(FINAL1, FINAL2)
for i in range(len(master_dict)):
if master_dict[i]["name"] == CHAMPION["name"]:
master_dict[i]["champion"] += 1
if master_dict[i]["name"] == QF1["name"]:
master_dict[i]["qf"] += 1
if master_dict[i]["name"] == QF2["name"]:
master_dict[i]["qf"] += 1
if master_dict[i]["name"] == QF3["name"]:
master_dict[i]["qf"] += 1
if master_dict[i]["name"] == QF4["name"]:
master_dict[i]["qf"] += 1
if master_dict[i]["name"] == QF5["name"]:
master_dict[i]["qf"] += 1
if master_dict[i]["name"] == QF6["name"]:
master_dict[i]["qf"] += 1
if master_dict[i]["name"] == QF7["name"]:
master_dict[i]["qf"] += 1
if master_dict[i]["name"] == QF8["name"]:
master_dict[i]["qf"] += 1
if master_dict[i]["name"] == SF1["name"]:
master_dict[i]["sf"] += 1
if master_dict[i]["name"] == SF2["name"]:
master_dict[i]["sf"] += 1
if master_dict[i]["name"] == SF3["name"]:
master_dict[i]["sf"] += 1
if master_dict[i]["name"] == SF4["name"]:
master_dict[i]["sf"] += 1
return master_dict
def update_qualifications(curr_standings, master_dict, name):
group = [x for x in curr_standings if x["group"] == name]
sorted_group = sorted(group, key=lambda k: k["points"], reverse=True)
qualified = sorted_group[0:2]
first = qualified[0]["name"]
second = qualified[1]["name"]
winner_index = -1
second_index = -1
for i in range(len(master_dict)):
if master_dict[i]["name"] == first:
master_dict[i]["first"] += 1
if master_dict[i]["name"] == second:
master_dict[i]["second"] += 1
return master_dict
def run_sim_n_times(n):
master_dict = init()
groups = ["A", "B", "C", "D", "E", "F", "G", "H"]
update_keys = ["GF", "GA", "points"]
for i in range(n):
curr_standings = sim_group_stage()
for i in range(len(master_dict)):
team = curr_standings[i]
for key in update_keys:
master_dict[i][key] += team[key]
for group in groups:
master_dict = update_qualifications(curr_standings, master_dict, group)
master_dict = sim_knockout_round(curr_standings, master_dict)
pretty_print(master_dict)
#m = Match(100, 200)
run_sim_n_times(10000) | [
"afreymiller10@gmail.com"
] | afreymiller10@gmail.com |
6ca464214a42ba84a9d25de3821c229dff2289b3 | 87dd7f0df1e73bcfeb2d3e1c80ff368f3768499b | /test/test_model3.py | 63340eb1cb6c6fb93fee5da9393f1e4d850d4d13 | [] | no_license | torloneg/fabulator-sdk-python | 18bf6b3996062ef28c5731ec67955734a2da87ce | c9480ed4ea4acee2b067b2471759ab68a5eca4b1 | refs/heads/master | 2020-04-18T07:07:55.481187 | 2019-01-24T10:32:08 | 2019-01-24T10:32:08 | 167,349,598 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 821 | py | # coding: utf-8
"""
Fabulator API
Endpoint Fabulator project # noqa: E501
OpenAPI spec version: 1.0.0
Contact: torloneg@gmail.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.model3 import Model3 # noqa: E501
from swagger_client.rest import ApiException
class TestModel3(unittest.TestCase):
"""Model3 unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testModel3(self):
"""Test Model3"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.model3.Model3() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"torloneg@gmail.com"
] | torloneg@gmail.com |
72c49aa9bbe684e1346f0668477ce89c841a9b0a | 74192ed5b9b5529c7b6ab9f238e9f44d5dce6aca | /backend/manage.py | abefc5fc0c9913675279a1ecc7a3677e5d3465ed | [] | no_license | crowdbotics-apps/black-mart-23466 | 47691415229f97248b7aaf18c6efca45f81d6e84 | 5be279d61e74cc543782b8879ca4d7abeb3ad06c | refs/heads/master | 2023-02-03T22:51:54.843742 | 2020-12-24T19:41:41 | 2020-12-24T19:41:41 | 322,783,523 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 636 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "black_mart_23466.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == "__main__":
main()
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
ed60f4604756b8ab463685159c294e5bedba1161 | ec25c4bfaf364b1aed31245d5ad20e1acbb4c723 | /scripts/distpass_createjson_dataset_bruteforce.py | 4d48be662f4704caa8b25ae382612401727ed9cf | [] | no_license | lclms/distpass | 28cce1086b37f844dfc3b74ac0286e6dbf8349fe | dad7eda68551315911dec2a951d4c759da65038a | refs/heads/master | 2020-03-30T08:24:45.200309 | 2018-10-02T22:36:34 | 2018-10-02T22:36:34 | 151,012,988 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 745 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: luismarques
"""
filepath = 'az09.txt'
file = open('file_importa_distpass_brute_force.json','w')
with open(filepath) as fp:
line = fp.readline()
cnt = 1
var = 50000
dobra = 50000
s = "4"
while line:
try:
type(int(line.strip()))
ty="i"
except ValueError:
ty="s"
if var==dobra:
file.write(']}{"l": '+s+', "k": "b","t":"'+ty+'","w": [')
dobra+=50000
if var+1==dobra:
file.write('"'+line.strip()+'"\n')
else:
file.write('"'+line.strip()+'",\n')
var+=1
line = fp.readline()
cnt += 1
file.close()
| [
"noreply@github.com"
] | lclms.noreply@github.com |
65b8f432c0f9ad2a8d3da0b263fbe75b60296b9d | 1d4a0e2ea3923ee9799bf696123cb82262e61619 | /drenaj/utils/drnj_time.py | afcec7ecd3c94e507f4114ce3b84839da924a19d | [
"MIT"
] | permissive | kyzn/drenaj | f5e026a374df7a998fdd7b6ab0f8a7c0f87e9bbe | 89c0da419ea936d48bf63766cdff40432aacfe44 | refs/heads/master | 2021-01-11T02:56:59.978637 | 2017-03-05T18:45:29 | 2017-03-05T18:45:29 | 70,877,162 | 0 | 0 | null | 2016-10-14T05:32:25 | 2016-10-14T05:32:24 | null | UTF-8 | Python | false | false | 1,247 | py | # This file defines the functions for time conversion
#
import time
number_of_days_from_1_Jan_0000_to_1_Jan_1970 = 719529.0
def now_in_drnj_time():
t = time.time()
return py_time2drnj_time(t)
def py_time2drnj_time(t):
"""
t : seconds from 1-Jan-1970
returns days from 1 Jan 0000
"""
dt = number_of_days_from_1_Jan_0000_to_1_Jan_1970 + t/(24.0*60*60)
return dt
# javascript version: utc_time2drnj_time = function (date_str) { return 719529.0 + new Date(date_str).getTime()/(24.0*60*60*1000) }
def py_utc_time2drnj_time(time_str):
"""
"""
utc_time_format = '%a %b %d %H:%M:%S +0000 %Y'
t = time.strptime(time_str, utc_time_format)
import calendar
t = calendar.timegm(t)
return py_time2drnj_time(t)
def xdays_before_now_in_drnj_time(days):
return now_in_drnj_time()-days
def drnj_time2py_time(dt):
"""
dt : days from 1-Jan-0000
returns seconds from 1 Jan 1970
"""
t = (dt-number_of_days_from_1_Jan_0000_to_1_Jan_1970)*24.0*60*60
return t
def py_time2str(t):
utc_time_format = '%a %b %d %H:%M:%S +0000 %Y'
return time.strftime(utc_time_format, time.gmtime(t))
def drnj_time2str(dt):
t = drnj_time2py_time(dt)
return py_time2str(t)
| [
"onurgu@gmail.com"
] | onurgu@gmail.com |
a71a9e09267dcf33aa66e75ad4fb6119b393b175 | 5dea72cb1d1978c2cef9792d273af74e7a4d3031 | /api_1.3/containerd/services/namespaces/v1/namespace_pb2_grpc.py | 599bf6f0e012a31eebfb7a79a0d3a2134dcfd5d0 | [
"Apache-2.0"
] | permissive | Silvanoc/pycontainerd | 1cd019828260bef51d566fb4b996ba8166e659d3 | 7245ce623d978f65cd8a4cf0d685a3318640a305 | refs/heads/master | 2020-11-24T22:45:30.072647 | 2019-12-16T12:05:42 | 2019-12-16T12:05:42 | 228,370,707 | 0 | 0 | Apache-2.0 | 2019-12-16T11:33:02 | 2019-12-16T11:20:22 | Python | UTF-8 | Python | false | false | 6,631 | py | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from containerd.services.namespaces.v1 import namespace_pb2 as containerd_dot_services_dot_namespaces_dot_v1_dot_namespace__pb2
from containerd.vendor.google.protobuf import empty_pb2 as containerd_dot_vendor_dot_google_dot_protobuf_dot_empty__pb2
class NamespacesStub(object):
"""Namespaces provides the ability to manipulate containerd namespaces.
All objects in the system are required to be a member of a namespace. If a
namespace is deleted, all objects, including containers, images and
snapshots, will be deleted, as well.
Unless otherwise noted, operations in containerd apply only to the namespace
supplied per request.
I hope this goes without saying, but namespaces are themselves NOT
namespaced.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Get = channel.unary_unary(
'/containerd.services.namespaces.v1.Namespaces/Get',
request_serializer=containerd_dot_services_dot_namespaces_dot_v1_dot_namespace__pb2.GetNamespaceRequest.SerializeToString,
response_deserializer=containerd_dot_services_dot_namespaces_dot_v1_dot_namespace__pb2.GetNamespaceResponse.FromString,
)
self.List = channel.unary_unary(
'/containerd.services.namespaces.v1.Namespaces/List',
request_serializer=containerd_dot_services_dot_namespaces_dot_v1_dot_namespace__pb2.ListNamespacesRequest.SerializeToString,
response_deserializer=containerd_dot_services_dot_namespaces_dot_v1_dot_namespace__pb2.ListNamespacesResponse.FromString,
)
self.Create = channel.unary_unary(
'/containerd.services.namespaces.v1.Namespaces/Create',
request_serializer=containerd_dot_services_dot_namespaces_dot_v1_dot_namespace__pb2.CreateNamespaceRequest.SerializeToString,
response_deserializer=containerd_dot_services_dot_namespaces_dot_v1_dot_namespace__pb2.CreateNamespaceResponse.FromString,
)
self.Update = channel.unary_unary(
'/containerd.services.namespaces.v1.Namespaces/Update',
request_serializer=containerd_dot_services_dot_namespaces_dot_v1_dot_namespace__pb2.UpdateNamespaceRequest.SerializeToString,
response_deserializer=containerd_dot_services_dot_namespaces_dot_v1_dot_namespace__pb2.UpdateNamespaceResponse.FromString,
)
self.Delete = channel.unary_unary(
'/containerd.services.namespaces.v1.Namespaces/Delete',
request_serializer=containerd_dot_services_dot_namespaces_dot_v1_dot_namespace__pb2.DeleteNamespaceRequest.SerializeToString,
response_deserializer=containerd_dot_vendor_dot_google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
class NamespacesServicer(object):
"""Namespaces provides the ability to manipulate containerd namespaces.
All objects in the system are required to be a member of a namespace. If a
namespace is deleted, all objects, including containers, images and
snapshots, will be deleted, as well.
Unless otherwise noted, operations in containerd apply only to the namespace
supplied per request.
I hope this goes without saying, but namespaces are themselves NOT
namespaced.
"""
def Get(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def List(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Create(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Update(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Delete(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_NamespacesServicer_to_server(servicer, server):
rpc_method_handlers = {
'Get': grpc.unary_unary_rpc_method_handler(
servicer.Get,
request_deserializer=containerd_dot_services_dot_namespaces_dot_v1_dot_namespace__pb2.GetNamespaceRequest.FromString,
response_serializer=containerd_dot_services_dot_namespaces_dot_v1_dot_namespace__pb2.GetNamespaceResponse.SerializeToString,
),
'List': grpc.unary_unary_rpc_method_handler(
servicer.List,
request_deserializer=containerd_dot_services_dot_namespaces_dot_v1_dot_namespace__pb2.ListNamespacesRequest.FromString,
response_serializer=containerd_dot_services_dot_namespaces_dot_v1_dot_namespace__pb2.ListNamespacesResponse.SerializeToString,
),
'Create': grpc.unary_unary_rpc_method_handler(
servicer.Create,
request_deserializer=containerd_dot_services_dot_namespaces_dot_v1_dot_namespace__pb2.CreateNamespaceRequest.FromString,
response_serializer=containerd_dot_services_dot_namespaces_dot_v1_dot_namespace__pb2.CreateNamespaceResponse.SerializeToString,
),
'Update': grpc.unary_unary_rpc_method_handler(
servicer.Update,
request_deserializer=containerd_dot_services_dot_namespaces_dot_v1_dot_namespace__pb2.UpdateNamespaceRequest.FromString,
response_serializer=containerd_dot_services_dot_namespaces_dot_v1_dot_namespace__pb2.UpdateNamespaceResponse.SerializeToString,
),
'Delete': grpc.unary_unary_rpc_method_handler(
servicer.Delete,
request_deserializer=containerd_dot_services_dot_namespaces_dot_v1_dot_namespace__pb2.DeleteNamespaceRequest.FromString,
response_serializer=containerd_dot_vendor_dot_google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'containerd.services.namespaces.v1.Namespaces', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
| [
"silvano.cirujano-cuesta@siemens.com"
] | silvano.cirujano-cuesta@siemens.com |
406a6a5350cfcd2121bd46613212bf6cb3324b4b | 5c7887eedad07ab764ad247ba6b4e9cd73675993 | /embeddings/words.py | ecfaf38507836a43f0c7f537ad5f2091d25a00d2 | [
"MIT"
] | permissive | jonathanmutal/neuralFrame | b573fd49098b90f0721765b87f90c9a767e9ff48 | 23f249e01c915856919c7e87247b3678f5d1a887 | refs/heads/master | 2022-01-25T05:04:15.869026 | 2019-05-13T09:03:54 | 2019-05-13T09:03:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,395 | py | class IndexWords:
"""
This class is to manage word indexing.
A sentence would be understood by the algorithm as long as we map each word by
a number.
"""
def __init__(self, sentences=[], vocabulary_file='', pad_word='<s>'):
"""
:sentences: the sentences which contains most vocabulary.
It has to be a list of all the sentences.
:vocabulary_file: pre-loaded vocabulary.
:pad_word: a word which will padded.
:begin: number you like to begin index
"""
self.id2word = []
self.word2id = dict()
self.pad_word = pad_word
self.vocabulary_file = vocabulary_file
if sentences:
self.__create_vocabulary_from_sentences(sentences)
elif self.vocabulary_file:
self.__load_vocabulary()
@property
def size(self):
"""Returns the number of entries of the vocabulary."""
return len(self.id2word)
def __add(self, token):
self.word2id[token] = self.size
self.id2word.insert(self.size, token)
def __create_index(self, vocabulary):
"""
:vocabulary: an order structure for indexing words.
"""
for word in vocabulary:
self.__add(word)
def __load_vocabulary(self):
"""
load the vocabulary from a file.
"""
with open(self.vocabulary_file, 'r') as vocab:
vocabulary = map(lambda word: word.rstrip(), vocab.readlines())
self.__create_index(vocabulary)
def __create_vocabulary_from_sentences(self, sentences):
"""
The main function where the magic comes up.
"""
vocab = set()
for sent in sentences:
vocab.update(sent.split(" "))
vocabulary = [self.pad_word] + list(vocab)
self.__create_index(vocab)
def get_vocabulary(self):
return self.id2word
def get_word(self, idx):
"""
:idx: return a word from the idx.
Will return empty string if the idx is not in the dictionary
"""
return self.id2word[idx]
def get_index(self, token):
"""
:word: return an id from a word.
Return -1 if the word is not in the dictionary
"""
return self.word2id.get(token, -1)
def is_in_vocab(self, token):
return word in self.id2word
| [
"Jonathan.Mutal@unige.ch"
] | Jonathan.Mutal@unige.ch |
342d7838454b684e37cb8847a8e2eb7083a265e8 | bae75bf1de75fb1b76e19b0d32c778e566de570a | /smodels/test/testFiles/scanExample/smodels-output/100488230.slha.py | 61b10a9b406b2f585c269025d1c544acdcb3f737 | [] | no_license | andlessa/RDM | 78ae5cbadda1875c24e1bb726096b05c61627249 | ac6b242871894fee492e089d378806c2c2e7aad8 | refs/heads/master | 2023-08-16T00:47:14.415434 | 2021-09-21T20:54:25 | 2021-09-21T20:54:25 | 228,639,778 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,108 | py | smodelsOutput = {'ExptRes': [{'AnalysisSqrts (TeV)': 8, 'chi2': -0.01262596850888705, 'dataType': 'efficiencyMap', 'Mass (GeV)': [[808.3, 321.4], [808.3, 321.4]], 'maxcond': 0.0, 'expected upper limit (fb)': 0.242, 'likelihood': 0.24134326901519393, 'AnalysisID': 'ATLAS-CONF-2013-093', 'upper limit (fb)': 0.21, 'theory prediction (fb)': 1.9895558829990775e-05, 'lumi (fb-1)': 20.3, 'TxNames': ['T2tt'], 'DataSetID': 'SRBh'}, {'AnalysisSqrts (TeV)': 8, 'chi2': 0.080725933861111418, 'dataType': 'efficiencyMap', 'Mass (GeV)': [[808.3, 321.4], [808.3, 321.4]], 'maxcond': 0.0, 'expected upper limit (fb)': 0.517, 'likelihood': 0.080915004649873834, 'AnalysisID': 'ATLAS-CONF-2013-053', 'upper limit (fb)': 0.45, 'theory prediction (fb)': 4.641807353583199e-05, 'lumi (fb-1)': 20.1, 'TxNames': ['T2tt'], 'DataSetID': 'SRA mCT250'}, {'AnalysisSqrts (TeV)': 8, 'chi2': 1.3268179470754997, 'dataType': 'efficiencyMap', 'Mass (GeV)': [[808.3, 321.4], [808.3, 321.4]], 'maxcond': 0.0, 'expected upper limit (fb)': 0.235, 'likelihood': 0.082051203901957165, 'AnalysisID': 'ATLAS-CONF-2013-062', 'upper limit (fb)': 0.38, 'theory prediction (fb)': 0.00014170357283113728, 'lumi (fb-1)': 20.3, 'TxNames': ['T2tt'], 'DataSetID': 'incHL3j_m'}, {'AnalysisSqrts (TeV)': 8.0, 'chi2': 0.18071342418780731, 'dataType': 'efficiencyMap', 'Mass (GeV)': None, 'maxcond': 0.0, 'expected upper limit (fb)': 1.23, 'likelihood': 0.030109490456771158, 'AnalysisID': 'ATLAS-SUSY-2013-04', 'upper limit (fb)': 1.04, 'theory prediction (fb)': 0.00015365831214172594, 'lumi (fb-1)': 20.3, 'TxNames': ['T2tt'], 'DataSetID': 'GtGrid_SR_8ej50_2ibjet'}, {'AnalysisSqrts (TeV)': 8, 'chi2': 0.11881841528273665, 'dataType': 'efficiencyMap', 'Mass (GeV)': [[808.3, 321.4], [808.3, 321.4]], 'maxcond': 0.0, 'expected upper limit (fb)': 0.292, 'likelihood': 0.19205573525299049, 'AnalysisID': 'ATLAS-CONF-2013-061', 'upper limit (fb)': 0.23, 'theory prediction (fb)': 0.00016085993231827376, 'lumi (fb-1)': 20.1, 'TxNames': ['T2tt'], 'DataSetID': 'SR-0l-4j-A'}, {'AnalysisSqrts (TeV)': 8, 'chi2': 0.15264247723847224, 'dataType': 'efficiencyMap', 'Mass (GeV)': [[808.3, 321.4], [808.3, 321.4]], 'maxcond': 0.0, 'expected upper limit (fb)': 0.303, 'likelihood': 0.16330613782184344, 'AnalysisID': 'ATLAS-CONF-2013-048', 'upper limit (fb)': 0.23, 'theory prediction (fb)': 0.00016367804967709967, 'lumi (fb-1)': 20.3, 'TxNames': ['T2tt'], 'DataSetID': 'SR M120'}, {'AnalysisSqrts (TeV)': 8, 'chi2': 0.51517066264090305, 'dataType': 'efficiencyMap', 'Mass (GeV)': [[808.3, 321.4], [808.3, 321.4]], 'maxcond': 0.0, 'expected upper limit (fb)': 0.35, 'likelihood': 0.090423569611299118, 'AnalysisID': 'ATLAS-CONF-2013-037', 'upper limit (fb)': 0.4, 'theory prediction (fb)': 0.0006599393876471776, 'lumi (fb-1)': 20.7, 'TxNames': ['T2tt'], 'DataSetID': 'SRtN3'}, {'AnalysisSqrts (TeV)': 8, 'chi2': 0.7460827902608741, 'dataType': 'efficiencyMap', 'Mass (GeV)': None, 'maxcond': 0.0, 'expected upper limit (fb)': 0.239, 'likelihood': 0.18762627815864522, 'AnalysisID': 'ATLAS-CONF-2013-024', 'upper limit (fb)': 0.19, 'theory prediction (fb)': 0.0007758367207617888, 'lumi (fb-1)': 20.5, 'TxNames': ['T2tt'], 'DataSetID': 'SR3: MET > 350'}, {'AnalysisSqrts (TeV)': 8, 'chi2': 0.09139882929374292, 'dataType': 'efficiencyMap', 'Mass (GeV)': None, 'maxcond': 0.0, 'expected upper limit (fb)': 0.463, 'likelihood': 0.091787344807384222, 'AnalysisID': 'ATLAS-CONF-2013-054', 'upper limit (fb)': 0.5, 'theory prediction (fb)': 0.0013047848687137208, 'lumi (fb-1)': 20.3, 'TxNames': ['T2', 'T2tt'], 'DataSetID': '7j80 flavor 0 b-jets'}, {'AnalysisSqrts (TeV)': 13, 'chi2': 0.035181360747816408, 'dataType': 'efficiencyMap', 'Mass (GeV)': None, 'maxcond': 0.0, 'expected upper limit (fb)': 1.26, 'likelihood': 0.34814937255663714, 'AnalysisID': 'ATLAS-SUSY-2015-02', 'upper limit (fb)': 1.26, 'theory prediction (fb)': 0.012020001372145027, 'lumi (fb-1)': 3.2, 'TxNames': ['T2tt'], 'DataSetID': 'SR2'}, {'AnalysisSqrts (TeV)': 8, 'dataType': 'upperLimit', 'Mass (GeV)': [[808.3, 321.4], [808.3, 321.4]], 'maxcond': 0.0, 'expected upper limit (fb)': None, 'AnalysisID': 'CMS-PAS-SUS-13-023', 'upper limit (fb)': 4.488370799999999, 'theory prediction (fb)': 0.034444562449142735, 'lumi (fb-1)': 18.9, 'TxNames': ['T2tt'], 'DataSetID': None}, {'AnalysisSqrts (TeV)': 8, 'dataType': 'upperLimit', 'Mass (GeV)': [[808.3, 321.4], [808.3, 321.4]], 'maxcond': 0.0, 'expected upper limit (fb)': None, 'AnalysisID': 'CMS-SUS-13-004', 'upper limit (fb)': 6.707422400000001, 'theory prediction (fb)': 0.034444562449142735, 'lumi (fb-1)': 19.3, 'TxNames': ['T2tt'], 'DataSetID': None}, {'AnalysisSqrts (TeV)': 8, 'dataType': 'upperLimit', 'Mass (GeV)': [[808.3, 321.4], [808.3, 321.4]], 'maxcond': 0.0, 'expected upper limit (fb)': None, 'AnalysisID': 'CMS-SUS-13-019', 'upper limit (fb)': 28.95454, 'theory prediction (fb)': 0.034444562449142735, 'lumi (fb-1)': 19.5, 'TxNames': ['T2tt'], 'DataSetID': None}, {'AnalysisSqrts (TeV)': 8, 'chi2': 2.0775669740848599, 'dataType': 'efficiencyMap', 'Mass (GeV)': [[435.0, 321.4], [435.0, 321.4]], 'maxcond': 0.0, 'expected upper limit (fb)': 0.8395, 'likelihood': 0.018118314232756384, 'AnalysisID': 'ATLAS-SUSY-2014-03', 'upper limit (fb)': 0.5018, 'theory prediction (fb)': 0.0425236728155903, 'lumi (fb-1)': 20.3, 'TxNames': ['TScharm'], 'DataSetID': 'mCT150'}, {'AnalysisSqrts (TeV)': 13, 'dataType': 'upperLimit', 'Mass (GeV)': [[808.3, 340.04999999999995], [808.3, 340.04999999999995]], 'maxcond': 0.0, 'expected upper limit (fb)': None, 'AnalysisID': 'ATLAS-SUSY-2015-02', 'upper limit (fb)': 103.54699999999993, 'theory prediction (fb)': 0.2796967836994387, 'lumi (fb-1)': 3.2, 'TxNames': ['T2tt'], 'DataSetID': None}, {'AnalysisSqrts (TeV)': 13, 'dataType': 'upperLimit', 'Mass (GeV)': [[808.3, 321.4], [808.3, 321.4]], 'maxcond': 0.0, 'expected upper limit (fb)': None, 'AnalysisID': 'ATLAS-SUSY-2015-02', 'upper limit (fb)': 77.06399999999996, 'theory prediction (fb)': 0.3496303949578995, 'lumi (fb-1)': 3.2, 'TxNames': ['T2tt'], 'DataSetID': None}, {'AnalysisSqrts (TeV)': 13, 'dataType': 'upperLimit', 'Mass (GeV)': [[808.3, 321.4], [808.3, 321.4]], 'maxcond': 0.0, 'expected upper limit (fb)': None, 'AnalysisID': 'CMS-SUS-16-033', 'upper limit (fb)': 16.03673333333333, 'theory prediction (fb)': 0.3496303949578995, 'lumi (fb-1)': 35.9, 'TxNames': ['T2tt'], 'DataSetID': None}, {'AnalysisSqrts (TeV)': 13, 'dataType': 'upperLimit', 'Mass (GeV)': [[808.3, 321.4], [808.3, 321.4]], 'maxcond': 0.0, 'expected upper limit (fb)': None, 'AnalysisID': 'CMS-SUS-16-036', 'upper limit (fb)': 9.556988800000001, 'theory prediction (fb)': 0.3496303949578995, 'lumi (fb-1)': 35.9, 'TxNames': ['T2tt'], 'DataSetID': None}, {'AnalysisSqrts (TeV)': 13, 'dataType': 'upperLimit', 'Mass (GeV)': [[808.3, 321.4], [808.3, 321.4]], 'maxcond': 0.0, 'expected upper limit (fb)': None, 'AnalysisID': 'CMS-SUS-16-049', 'upper limit (fb)': 13.280713076923078, 'theory prediction (fb)': 0.3496303949578995, 'lumi (fb-1)': 35.9, 'TxNames': ['T2tt'], 'DataSetID': None}, {'AnalysisSqrts (TeV)': 13.0, 'dataType': 'upperLimit', 'Mass (GeV)': [[808.3, 321.4], [808.3, 321.4]], 'maxcond': 0.0, 'expected upper limit (fb)': None, 'AnalysisID': 'CMS-SUS-16-050', 'upper limit (fb)': 14.412808000000002, 'theory prediction (fb)': 0.3496303949578995, 'lumi (fb-1)': 35.9, 'TxNames': ['T2tt'], 'DataSetID': None}, {'AnalysisSqrts (TeV)': 13, 'dataType': 'upperLimit', 'Mass (GeV)': [[808.3, 321.4], [808.3, 321.4]], 'maxcond': 0.0, 'expected upper limit (fb)': None, 'AnalysisID': 'CMS-SUS-16-051', 'upper limit (fb)': 10.6244916, 'theory prediction (fb)': 0.3496303949578995, 'lumi (fb-1)': 35.9, 'TxNames': ['T2tt'], 'DataSetID': None}, {'AnalysisSqrts (TeV)': 13, 'dataType': 'upperLimit', 'Mass (GeV)': [[808.3, 321.4], [808.3, 321.4]], 'maxcond': 0.0, 'expected upper limit (fb)': None, 'AnalysisID': 'CMS-SUS-17-001', 'upper limit (fb)': 35.44316, 'theory prediction (fb)': 0.3496303949578995, 'lumi (fb-1)': 35.9, 'TxNames': ['T2tt'], 'DataSetID': None}, {'AnalysisSqrts (TeV)': 8.0, 'chi2': 0.0046844499378386195, 'dataType': 'efficiencyMap', 'Mass (GeV)': None, 'maxcond': 0.0, 'expected upper limit (fb)': 0.743, 'likelihood': 0.050115430028928237, 'AnalysisID': 'CMS-SUS-13-012', 'upper limit (fb)': 1.21, 'theory prediction (fb)': 0.5057031943004956, 'lumi (fb-1)': 19.5, 'TxNames': ['T2', 'T2tt', 'T6bbWWoff'], 'DataSetID': '3NJet6_1000HT1250_600MHTinf'}, {'AnalysisSqrts (TeV)': 8.0, 'dataType': 'upperLimit', 'Mass (GeV)': [[1233.6, 321.4], [1233.6, 321.4]], 'maxcond': 0.0, 'expected upper limit (fb)': None, 'AnalysisID': 'ATLAS-SUSY-2013-02', 'upper limit (fb)': 4.1627674417777785, 'theory prediction (fb)': 0.5683519371938481, 'lumi (fb-1)': 20.3, 'TxNames': ['T2'], 'DataSetID': None}, {'AnalysisSqrts (TeV)': 8, 'chi2': 0.047123556716875459, 'dataType': 'efficiencyMap', 'Mass (GeV)': None, 'maxcond': 0.0, 'expected upper limit (fb)': 2.1, 'likelihood': 0.018033175934649193, 'AnalysisID': 'ATLAS-CONF-2013-047', 'upper limit (fb)': 2.52, 'theory prediction (fb)': 0.8775048582679014, 'lumi (fb-1)': 20.3, 'TxNames': ['T1', 'T1bbqq', 'T2', 'T2tt'], 'DataSetID': 'A Medium'}, {'AnalysisSqrts (TeV)': 8.0, 'chi2': 2.3816461538326896, 'dataType': 'efficiencyMap', 'Mass (GeV)': None, 'maxcond': 0.0, 'expected upper limit (fb)': 1.5124, 'likelihood': 0.0078918609084519288, 'AnalysisID': 'ATLAS-SUSY-2013-02', 'upper limit (fb)': 1.8181, 'theory prediction (fb)': 1.6613836669444895, 'lumi (fb-1)': 20.3, 'TxNames': ['T1', 'T2'], 'DataSetID': 'SR2jt'}, {'AnalysisSqrts (TeV)': 13, 'dataType': 'upperLimit', 'Mass (GeV)': [[1233.6, 321.4], [1233.6, 321.4]], 'maxcond': 0.0, 'expected upper limit (fb)': None, 'AnalysisID': 'CMS-SUS-16-033', 'upper limit (fb)': 6.181469576852964, 'theory prediction (fb)': 4.941443621858816, 'lumi (fb-1)': 35.9, 'TxNames': ['T2'], 'DataSetID': None}, {'AnalysisSqrts (TeV)': 13, 'dataType': 'upperLimit', 'Mass (GeV)': [[1233.6, 321.4], [1233.6, 321.4]], 'maxcond': 0.0, 'expected upper limit (fb)': None, 'AnalysisID': 'CMS-SUS-16-036', 'upper limit (fb)': 4.1513288, 'theory prediction (fb)': 4.941443621858816, 'lumi (fb-1)': 35.9, 'TxNames': ['T2'], 'DataSetID': None}, {'AnalysisSqrts (TeV)': 8.0, 'dataType': 'upperLimit', 'Mass (GeV)': [[356.00988231550116, 321.4], [357.072205314013, 321.4]], 'maxcond': 0.0, 'expected upper limit (fb)': None, 'AnalysisID': 'ATLAS-SUSY-2013-02', 'upper limit (fb)': 5701.500237088296, 'theory prediction (fb)': 7.728246211660869, 'lumi (fb-1)': 20.3, 'TxNames': ['T1'], 'DataSetID': None}, {'AnalysisSqrts (TeV)': 8.0, 'dataType': 'upperLimit', 'Mass (GeV)': [[438.1951789713725, 321.4], [438.3774682703211, 321.4]], 'maxcond': 0.0, 'expected upper limit (fb)': None, 'AnalysisID': 'ATLAS-SUSY-2013-02', 'upper limit (fb)': 967.6734743323401, 'theory prediction (fb)': 871.0003509943216, 'lumi (fb-1)': 20.3, 'TxNames': ['T2'], 'DataSetID': None}, {'AnalysisSqrts (TeV)': 8.0, 'dataType': 'upperLimit', 'Mass (GeV)': [[438.12625289533446, 321.4], [438.642216742945, 321.4]], 'maxcond': 0.0, 'expected upper limit (fb)': None, 'AnalysisID': 'CMS-SUS-13-012', 'upper limit (fb)': 1466.966818632756, 'theory prediction (fb)': 935.8148705756644, 'lumi (fb-1)': 19.5, 'TxNames': ['T2'], 'DataSetID': None}, {'AnalysisSqrts (TeV)': 8, 'dataType': 'upperLimit', 'Mass (GeV)': [[437.5209910776753, 321.4], [439.33102549677807, 321.4]], 'maxcond': 0.0, 'expected upper limit (fb)': None, 'AnalysisID': 'CMS-SUS-13-019', 'upper limit (fb)': 1061.9665100007187, 'theory prediction (fb)': 1160.4935751426733, 'lumi (fb-1)': 19.5, 'TxNames': ['T2'], 'DataSetID': None}, {'AnalysisSqrts (TeV)': 13, 'dataType': 'upperLimit', 'Mass (GeV)': [[438.3496031120908, 321.4], [438.757496342467, 321.4]], 'maxcond': 0.0, 'expected upper limit (fb)': None, 'AnalysisID': 'CMS-SUS-16-033', 'upper limit (fb)': 399.35386729516733, 'theory prediction (fb)': 4591.861877556004, 'lumi (fb-1)': 35.9, 'TxNames': ['T2'], 'DataSetID': None}, {'AnalysisSqrts (TeV)': 13, 'dataType': 'upperLimit', 'Mass (GeV)': [[438.3496031120908, 321.4], [438.757496342467, 321.4]], 'maxcond': 0.0, 'expected upper limit (fb)': None, 'AnalysisID': 'CMS-SUS-16-036', 'upper limit (fb)': 514.2516169165423, 'theory prediction (fb)': 4591.861877556004, 'lumi (fb-1)': 35.9, 'TxNames': ['T2'], 'DataSetID': None}], 'OutputStatus': {'minmassgap': 5.0, 'input file': '../data/Bino_excluded_slha/100488230.slha', 'decomposition status': 1, 'warnings': 'Input file ok', 'ncpus': -1, 'maxcond': 0.2, 'smodels version': '1.1.1.post3', 'database version': '1.1.2', 'sigmacut': 0.03, 'file status': 1}, 'Long Cascades': [{'weight (fb)': 636.8858663515357, 'sqrts (TeV)': 13.0, 'mother PIDs': [[2000001, 2000001], [2000001, 2000003], [2000003, 2000003]]}, {'weight (fb)': 546.6288974698384, 'sqrts (TeV)': 13.0, 'mother PIDs': [[1000002, 1000002], [1000002, 1000004]]}, {'weight (fb)': 394.6239937857626, 'sqrts (TeV)': 13.0, 'mother PIDs': [[1000002, 1000004], [1000004, 1000004]]}, {'weight (fb)': 391.487766702734, 'sqrts (TeV)': 13.0, 'mother PIDs': [[1000001, 1000001], [1000001, 1000003], [1000003, 1000003]]}, {'weight (fb)': 299.66172224145225, 'sqrts (TeV)': 13.0, 'mother PIDs': [[1000001, 1000002], [1000002, 1000003]]}, {'weight (fb)': 131.62539640945383, 'sqrts (TeV)': 13.0, 'mother PIDs': [[1000002, 2000001], [1000002, 2000003]]}, {'weight (fb)': 73.16356174908559, 'sqrts (TeV)': 13.0, 'mother PIDs': [[1000001, 2000001], [1000001, 2000003], [1000003, 2000001], [1000003, 2000003]]}, {'weight (fb)': 30.624523126002337, 'sqrts (TeV)': 13.0, 'mother PIDs': [[2000001, 2000002], [2000002, 2000003]]}, {'weight (fb)': 13.94121696982233, 'sqrts (TeV)': 13.0, 'mother PIDs': [[1000001, 1000004], [1000003, 1000004]]}, {'weight (fb)': 13.793056107933312, 'sqrts (TeV)': 13.0, 'mother PIDs': [[1000002, 2000002]]}], 'Missed Topologies': [{'weight (fb)': 1040.8566993139923, 'sqrts (TeV)': 13.0, 'element': '[[[jet]],[[jet],[jet,jet],[jet,jet]]]'}, {'weight (fb)': 540.876223519754, 'sqrts (TeV)': 13.0, 'element': '[[[jet]],[[jet],[jet,jet]]]'}, {'weight (fb)': 298.78807678620154, 'sqrts (TeV)': 13.0, 'element': '[[[jet]],[[jet],[jet,jet],[l,nu]]]'}, {'weight (fb)': 298.2006301817304, 'sqrts (TeV)': 13.0, 'element': '[[[jet]],[[jet],[l,nu],[jet,jet]]]'}, {'weight (fb)': 147.30767131214859, 'sqrts (TeV)': 13.0, 'element': '[[[jet]],[[jet],[jet,jet],[nu,ta]]]'}, {'weight (fb)': 145.60259457504057, 'sqrts (TeV)': 13.0, 'element': '[[[jet]],[[jet],[nu,ta],[jet,jet]]]'}, {'weight (fb)': 139.21146559705483, 'sqrts (TeV)': 13.0, 'element': '[[[jet]],[[jet],[l,nu]]]'}, {'weight (fb)': 96.36675859548866, 'sqrts (TeV)': 13.0, 'element': '[[[jet]],[[jet],[l,nu],[l,nu]]]'}, {'weight (fb)': 86.6200993215025, 'sqrts (TeV)': 13.0, 'element': '[[[jet]],[[jet],[photon]]]'}, {'weight (fb)': 68.68906468596393, 'sqrts (TeV)': 13.0, 'element': '[[[jet]],[[jet],[nu,ta]]]'}], 'Asymmetric Branches': [{'weight (fb)': 532.9335789742406, 'sqrts (TeV)': 13.0, 'mother PIDs': [[1000002, 1000002], [1000002, 1000004], [1000004, 1000004]]}, {'weight (fb)': 172.3783326016151, 'sqrts (TeV)': 13.0, 'mother PIDs': [[1000001, 1000002], [1000001, 1000004], [1000002, 1000003], [1000003, 1000004]]}, {'weight (fb)': 90.56041427502541, 'sqrts (TeV)': 13.0, 'mother PIDs': [[1000001, 1000001], [1000001, 1000003], [1000003, 1000003]]}, {'weight (fb)': 64.90475182488875, 'sqrts (TeV)': 13.0, 'mother PIDs': [[1000002, 2000001], [1000002, 2000003], [1000004, 2000001], [1000004, 2000003]]}, {'weight (fb)': 50.11555613127241, 'sqrts (TeV)': 13.0, 'mother PIDs': [[2000001, 2000001], [2000001, 2000003], [2000003, 2000003]]}, {'weight (fb)': 18.454477729999994, 'sqrts (TeV)': 13.0, 'mother PIDs': [[1000022, 1000024]]}, {'weight (fb)': 17.826953089865842, 'sqrts (TeV)': 13.0, 'mother PIDs': [[1000023, 1000024]]}, {'weight (fb)': 12.383551689803406, 'sqrts (TeV)': 13.0, 'mother PIDs': [[1000022, 1000023]]}, {'weight (fb)': 11.485848817914249, 'sqrts (TeV)': 13.0, 'mother PIDs': [[1000022, 2000001], [1000022, 2000003]]}, {'weight (fb)': 10.472686420984749, 'sqrts (TeV)': 13.0, 'mother PIDs': [[1000024, 1000024]]}], 'Outside Grid': [{'weight (fb)': 88.48133368234193, 'sqrts (TeV)': 13.0, 'element': '[[[jet]],[[jet]]]'}, {'weight (fb)': 1.8492721985478566, 'sqrts (TeV)': 13.0, 'element': '[[[b],[jet,jet]],[[b],[l,nu]]]'}, {'weight (fb)': 1.028127629776797, 'sqrts (TeV)': 13.0, 'element': '[[[l,nu]],[[l,nu]]]'}, {'weight (fb)': 0.9124591650782516, 'sqrts (TeV)': 13.0, 'element': '[[[b],[jet,jet]],[[b],[nu,ta]]]'}, {'weight (fb)': 0.7673101098593308, 'sqrts (TeV)': 13.0, 'element': '[[[jet]],[[jet,jet]]]'}, {'weight (fb)': 0.5315720611960791, 'sqrts (TeV)': 13.0, 'element': '[[[l,l]],[[l,nu]]]'}, {'weight (fb)': 0.30837059437025616, 'sqrts (TeV)': 13.0, 'element': '[[[b],[l,nu]],[[b],[l,nu]]]'}, {'weight (fb)': 0.3043095281427132, 'sqrts (TeV)': 13.0, 'element': '[[[b],[l,nu]],[[b],[nu,ta]]]'}, {'weight (fb)': 0.20401965403968503, 'sqrts (TeV)': 13.0, 'element': '[[[jet],[photon]],[[jet],[photon]]]'}, {'weight (fb)': 0.07507548596482914, 'sqrts (TeV)': 13.0, 'element': '[[[b],[nu,ta]],[[b],[nu,ta]]]'}]} | [
"lessa.a.p@gmail.com"
] | lessa.a.p@gmail.com |
362880e0fbf0290665972c5cd1dc295ac48f3de4 | 4ac17d029103e277c2f5776fa34a458e2456585e | /network/models/pranet/pranetv0.py | 635e0412271bdcdd23aeea9cffcf9ea38dc76d5a | [] | no_license | taintpro98/polyp_segmentation | 2784ed243a616c2ab9384f088b0e47b61dbfeaba | 91086ed1d0a4a2d2411aa3d58d6fd93caf628b2e | refs/heads/master | 2023-06-03T12:55:36.591420 | 2021-06-18T06:22:08 | 2021-06-18T06:22:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,332 | py | from .pranet import BasicConv2d, RFB_modified, aggregation
from torch.nn import BatchNorm2d, BatchNorm1d
import torch
import torch.nn as nn
import torch.nn.functional as F
from ...encoders import res2net50_v1b_26w_4s
from ...contextagg import DualGCNHead, DualGCN
class PraNetv0(nn.Module):
def __init__(self, channel=32):
super(PraNetv0, self).__init__()
# ---- ResNet Backbone ----
print("PraNetv0")
self.resnet = res2net50_v1b_26w_4s(pretrained=True)
self.head = DualGCNHead(2048, 512, 1)
self.dualgcn_2 = DualGCN(512)
self.dualgcn_3 = DualGCN(1024)
self.dualgcn_4 = DualGCN(2048)
# ---- Receptive Field Block like module ----
self.rfb2_1 = RFB_modified(512, channel)
self.rfb3_1 = RFB_modified(1024, channel)
self.rfb4_1 = RFB_modified(2048, channel)
# ---- Partial Decoder ----
self.agg1 = aggregation(channel)
# ---- reverse attention branch 4 ----
self.ra4_conv1 = BasicConv2d(2048, 256, kernel_size=1)
self.ra4_conv2 = BasicConv2d(256, 256, kernel_size=5, padding=2)
self.ra4_conv3 = BasicConv2d(256, 256, kernel_size=5, padding=2)
self.ra4_conv4 = BasicConv2d(256, 256, kernel_size=5, padding=2)
self.ra4_conv5 = BasicConv2d(256, 1, kernel_size=1)
# ---- reverse attention branch 3 ----
self.ra3_conv1 = BasicConv2d(1024, 64, kernel_size=1)
self.ra3_conv2 = BasicConv2d(64, 64, kernel_size=3, padding=1)
self.ra3_conv3 = BasicConv2d(64, 64, kernel_size=3, padding=1)
self.ra3_conv4 = BasicConv2d(64, 1, kernel_size=3, padding=1)
# ---- reverse attention branch 2 ----
self.ra2_conv1 = BasicConv2d(512, 64, kernel_size=1)
self.ra2_conv2 = BasicConv2d(64, 64, kernel_size=3, padding=1)
self.ra2_conv3 = BasicConv2d(64, 64, kernel_size=3, padding=1)
self.ra2_conv4 = BasicConv2d(64, 1, kernel_size=3, padding=1)
def forward(self, x):
x = self.resnet.conv1(x)
x = self.resnet.bn1(x)
x = self.resnet.relu(x)
x = self.resnet.maxpool(x) # bs, 64, 88, 88
# ---- low-level features ----
x1 = self.resnet.layer1(x) # bs, 256, 88, 88
x2 = self.resnet.layer2(x1) # bs, 512, 44, 44
x3 = self.resnet.layer3(x2) # bs, 1024, 22, 22
x4 = self.resnet.layer4(x3) # bs, 2048, 11, 11
# print("x1",x1.shape,"x2",x2.shape,"x3",x3.shape,"x4",x4.shape)
# ra5_feat = self.head(x4)
x2 = self.dualgcn_2(x2)
x3 = self.dualgcn_3(x3)
x4 = self.dualgcn_4(x4)
x2_rfb = self.rfb2_1(x2) # channel --> 32 [bs, 32, 44, 44]
x3_rfb = self.rfb3_1(x3) # channel --> 32 [bs, 32, 22, 22]
x4_rfb = self.rfb4_1(x4) # channel --> 32 [bs, 32, 11, 11]
ra5_feat = self.agg1(x4_rfb, x3_rfb, x2_rfb) # [bs, 1, 44, 44]
# print("ra5_feat",x3_rfb.shape,x4_rfb.shape)
lateral_map_5 = F.interpolate(
ra5_feat, scale_factor=8, mode="bilinear"
) # NOTES: Sup-1 (bs, 1, 44, 44) -> (bs, 1, 352, 352)
# lateral_map_5 = F.upsample(input=ra5_feat, size=(352,352), mode='bilinear', align_corners=True)
# ---- reverse attention branch_4 ----
crop_4 = F.interpolate(ra5_feat, scale_factor=0.25, mode="bilinear")
# print(crop_4,"crop_4")
x = -1 * (torch.sigmoid(crop_4)) + 1
x = x.expand(-1, 2048, -1, -1).mul(x4)
x = self.ra4_conv1(x)
x = F.relu(self.ra4_conv2(x))
x = F.relu(self.ra4_conv3(x))
x = F.relu(self.ra4_conv4(x))
ra4_feat = self.ra4_conv5(x)
x = ra4_feat + crop_4
lateral_map_4 = F.interpolate(
x, scale_factor=32, mode="bilinear"
) # NOTES: Sup-2 (bs, 1, 11, 11) -> (bs, 1, 352, 352)
# ---- reverse attention branch_3 ----
crop_3 = F.interpolate(x, scale_factor=2, mode="bilinear")
x = -1 * (torch.sigmoid(crop_3)) + 1
x = x.expand(-1, 1024, -1, -1).mul(x3)
x = self.ra3_conv1(x)
x = F.relu(self.ra3_conv2(x))
x = F.relu(self.ra3_conv3(x))
ra3_feat = self.ra3_conv4(x)
x = ra3_feat + crop_3
lateral_map_3 = F.interpolate(
x, scale_factor=16, mode="bilinear"
) # NOTES: Sup-3 (bs, 1, 22, 22) -> (bs, 1, 352, 352)
# ---- reverse attention branch_2 ----
crop_2 = F.interpolate(x, scale_factor=2, mode="bilinear")
x = -1 * (torch.sigmoid(crop_2)) + 1
x = x.expand(-1, 512, -1, -1).mul(x2)
x = self.ra2_conv1(x)
x = F.relu(self.ra2_conv2(x))
x = F.relu(self.ra2_conv3(x))
ra2_feat = self.ra2_conv4(x)
x = ra2_feat + crop_2
lateral_map_2 = F.interpolate(
x, scale_factor=8, mode="bilinear"
) # NOTES: Sup-4 (bs, 1, 44, 44) -> (bs, 1, 352, 352)
return lateral_map_5, lateral_map_4, lateral_map_3, lateral_map_2
def restore_weights(self, restore_from):
saved_state_dict = torch.load(restore_from)["model_state_dict"]
lr = torch.load(restore_from)["lr"]
self.load_state_dict(saved_state_dict, strict=False)
return lr
if __name__ == "__main__":
ras = PraNetv0().cuda()
input_tensor = torch.randn(1, 3, 352, 352).cuda()
out = ras(input_tensor)
| [
"hunglhp1998@gmail.com"
] | hunglhp1998@gmail.com |
e5878d1630a04a9819cd721e83e981494c4ab280 | 4deaf8f4f1a91fc12c3a3d10bc8794fa62f54657 | /scroll.py | 12b341c435a70848dc667627818fd7ffcd2411ea | [] | no_license | AdopaX/Manimouse | 03554d81ef5a1a8576ec7041d361541dff0c5096 | 3b02fca16b93be98af093274e6249465b9ee2295 | refs/heads/master | 2021-06-14T21:35:03.308346 | 2017-01-29T17:27:14 | 2017-01-29T17:27:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,119 | py | # All packages needed for the program are imported ahead
import cv2
import numpy as np
import pyautogui
import time
# Some global variables or others that need prior intialization are initalized here
# colour ranges for feeding to the inRange funtions
blue_range = np.array([[88,78,20],[128,255,255]])
yellow_range = np.array([[21,70,80],[61,255,255]])
red_range = np.array([[158,85,72],[180 ,255,255]])
# Prior initialization of all centers for safety
b_cen, y_pos, r_cen = [240,320],[240,320],[240,320]
cursor = [960,540]
# Area ranges for contours of different colours to be detected
r_area = [00,1700]
b_area = [00,1700]
y_area = [00,1700]
# Rectangular kernal for eroding and dilating the mask for primary noise removal
kernel = np.ones((7,7),np.uint8)
# Status variables defined globally
perform = False
showCentroid = False
cv2.namedWindow('Frame')
# 'nothing' function is useful when creating trackbars
# It is passed as last arguement in the cv2.createTrackbar() function
def nothing(x):
pass
# To bring to the top the contours with largest area in the specified range
# Used in drawContour()
def swap( array, i, j):
temp = array[i]
array[i] = array[j]
array[j] = temp
# Distance between two centroids
def distance( c1, c2):
distance = pow( pow(c1[0]-c2[0],2) + pow(c1[1]-c2[1],2) , 0.5)
return distance
# To toggle status of control variables
def changeStatus(key):
global perform
global showCentroid
global yellow_range,red_range,blue_range
# toggle mouse simulation
if key == ord('p'):
perform = not perform
if perform:
print 'Mouse simulation ON...'
else:
print 'Mouse simulation OFF...'
# toggle display of centroids
elif key == ord('c'):
showCentroid = not showCentroid
if showCentroid:
print 'Showing Centroids...'
else:
print 'Not Showing Centroids...'
elif key == ord('r'):
print '**********************************************************************'
print ' You have entered recalibration mode.'
print ' Use the trackbars to calibrate and press SPACE when done.'
print ' Press D to use the default settings'
print '**********************************************************************'
yellow_range = calibrateColor('Yellow', yellow_range)
red_range = calibrateColor('Red', red_range)
blue_range = calibrateColor('Blue', blue_range)
else:
pass
# cv2.inRange function is used to filter out a particular color from the frame
# The result then undergoes morphosis i.e. erosion and dilation
# Resultant frame is returned as mask
def makeMask(hsv_frame, color_Range):
mask = cv2.inRange( hsv_frame, color_Range[0], color_Range[1])
# Morphosis next ...
eroded = cv2.erode( mask, kernel, iterations=1)
dilated = cv2.dilate( eroded, kernel, iterations=1)
return dilated
# Contours on the mask are detected.. Only those lying in the previously set area
# range are filtered out and the centroid of the largest of these is drawn and returned
def drawCentroid(vid, color_area, mask, showCentroid):
_, contour, _ = cv2.findContours( mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
l=len(contour)
area = np.zeros(l)
# filtering contours on the basis of area rane specified globally
for i in range(l):
if cv2.contourArea(contour[i])>color_area[0] and cv2.contourArea(contour[i])<color_area[1]:
area[i] = cv2.contourArea(contour[i])
else:
area[i] = 0
a = sorted( area, reverse=True)
# bringing contours with largest valid area to the top
for i in range(l):
for j in range(1):
if area[i] == a[j]:
swap( contour, i, j)
if l > 0 :
# finding centroid using method of 'moments'
M = cv2.moments(contour[0])
if M['m00'] != 0:
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
center = (cx,cy)
if showCentroid:
cv2.circle( vid, center, 5, (0,0,255), -1)
return center
else:
# return error handling values
return (-1,-1)
# This function helps in filtering the required colored objects from the background
def calibrateColor(color, def_range):
global kernel
name = 'Calibrate '+ color
cv2.namedWindow(name)
cv2.createTrackbar('Hue', name, def_range[0][0]+20, 180, nothing)
cv2.createTrackbar('Sat', name, def_range[0][1] , 255, nothing)
cv2.createTrackbar('Val', name, def_range[0][2] , 255, nothing)
while(1):
ret , frameinv = cap.read()
frame=cv2.flip(frameinv ,1)
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
hue = cv2.getTrackbarPos('Hue', name)
sat = cv2.getTrackbarPos('Sat', name)
val = cv2.getTrackbarPos('Val', name)
lower = np.array([hue-20,sat,val])
upper = np.array([hue+20,255,255])
mask = cv2.inRange(hsv, lower, upper)
eroded = cv2.erode( mask, kernel, iterations=1)
dilated = cv2.dilate( eroded, kernel, iterations=1)
cv2.imshow(name, dilated)
k = cv2.waitKey(5) & 0xFF
if k == ord(' '):
cv2.destroyWindow(name)
return np.array([[hue-20,sat,val],[hue+20,255,255]])
elif k == ord('d'):
cv2.destroyWindow(name)
return def_range
'''
This function takes as input the center of yellow region (yc) and
the previous cursor position (pyp). The new cursor position is calculated
in such a way that the mean deviation for desired steady state is reduced.
'''
def setCursorPos( yc, pyp):
yp = np.zeros(2)
if abs(yc[0]-pyp[0])<5 and abs(yc[1]-pyp[1])<5:
yp[0] = yc[0] + .7*(pyp[0]-yc[0])
yp[1] = yc[1] + .7*(pyp[1]-yc[1])
else:
yp[0] = yc[0] + .1*(pyp[0]-yc[0])
yp[1] = yc[1] + .1*(pyp[1]-yc[1])
return yp
# Depending upon the relative positions of the three centroids, this function chooses whether
# the user desires free movement of cursor, left click, right click or dragging
def chooseAction(yp, rc, bc):
out = np.array(['move', 'false'])
if rc[0]!=-1 and bc[0]!=-1:
if distance(yp,rc)<50 and distance(yp,bc)<50 and distance(rc,bc)<50 :
out[0] = 'drag'
out[1] = 'true'
return out
elif distance(rc,bc)<40:
out[0] = 'left'
return out
elif distance(yp,rc)<40:
out[0] = 'right'
return out
elif distance(yp,rc)>40 and rc[1]-bc[1]>120:
out[0] = 'down'
return out
elif bc[1]-rc[1]>110:
out[0] = 'up'
return out
else:
return out
else:
out[0] = -1
return out
# Movement of cursor on screen, left click, right click,scroll up, scroll down
# and dragging actions are performed here based on value stored in 'action'.
def performAction( yp, rc, bc, action, drag, perform):
if perform:
cursor[0] = 4*(yp[0]-110)
cursor[1] = 4*(yp[1]-120)
if action == 'move':
if yp[0]>110 and yp[0]<590 and yp[1]>120 and yp[1]<390:
pyautogui.moveTo(cursor[0],cursor[1])
elif yp[0]<110 and yp[1]>120 and yp[1]<390:
pyautogui.moveTo( 8 , cursor[1])
elif yp[0]>590 and yp[1]>120 and yp[1]<390:
pyautogui.moveTo(1912, cursor[1])
elif yp[0]>110 and yp[0]<590 and yp[1]<120:
pyautogui.moveTo(cursor[0] , 8)
elif yp[0]>110 and yp[0]<590 and yp[1]>390:
pyautogui.moveTo(cursor[0] , 1072)
elif yp[0]<110 and yp[1]<120:
pyautogui.moveTo(8, 8)
elif yp[0]<110 and yp[1]>390:
pyautogui.moveTo(8, 1072)
elif yp[0]>590 and yp[1]>390:
pyautogui.moveTo(1912, 1072)
else:
pyautogui.moveTo(1912, 8)
elif action == 'left':
pyautogui.click(button = 'left')
elif action == 'right':
pyautogui.click(button = 'right')
time.sleep(0.3)
elif action == 'up':
pyautogui.scroll(5)
# time.sleep(0.3)
elif action == 'down':
pyautogui.scroll(-5)
# time.sleep(0.3)
elif action == 'drag' and drag == 'true':
global y_pos
drag = 'false'
pyautogui.mouseDown()
while(1):
k = cv2.waitKey(10) & 0xFF
changeStatus(k)
_, frameinv = cap.read()
# flip horizontaly to get mirror image in camera
frame = cv2.flip( frameinv, 1)
hsv = cv2.cvtColor( frame, cv2.COLOR_BGR2HSV)
b_mask = makeMask( hsv, blue_range)
r_mask = makeMask( hsv, red_range)
y_mask = makeMask( hsv, yellow_range)
py_pos = y_pos
b_cen = drawCentroid( frame, b_area, b_mask, showCentroid)
r_cen = drawCentroid( frame, r_area, r_mask, showCentroid)
y_cen = drawCentroid( frame, y_area, y_mask, showCentroid)
if py_pos[0]!=-1 and y_cen[0]!=-1:
y_pos = setCursorPos(y_cen, py_pos)
performAction(y_pos, r_cen, b_cen, 'move', drag, perform)
cv2.imshow('Frame', frame)
if distance(y_pos,r_cen)>60 or distance(y_pos,b_cen)>60 or distance(r_cen,b_cen)>60:
break
pyautogui.mouseUp()
cap = cv2.VideoCapture(0)
print '**********************************************************************'
print ' You have entered calibration mode.'
print ' Use the trackbars to calibrate and press SPACE when done.'
print ' Press D to use the default settings.'
print '**********************************************************************'
yellow_range = calibrateColor('Yellow', yellow_range)
red_range = calibrateColor('Red', red_range)
blue_range = calibrateColor('Blue', blue_range)
print '**********************************************************************'
print ' Press P to turn ON and OFF mouse simulation.'
print ' Press C to display the centroid of various colours.'
print ' Press R to recalibrate color ranges.'
print ' Press ESC to exit.'
print '**********************************************************************'
while(1):
k = cv2.waitKey(10) & 0xFF
changeStatus(k)
_, frameinv = cap.read()
# flip horizontaly to get mirror image in camera
frame = cv2.flip( frameinv, 1)
hsv = cv2.cvtColor( frame, cv2.COLOR_BGR2HSV)
b_mask = makeMask( hsv, blue_range)
r_mask = makeMask( hsv, red_range)
y_mask = makeMask( hsv, yellow_range)
py_pos = y_pos
b_cen = drawCentroid( frame, b_area, b_mask, showCentroid)
r_cen = drawCentroid( frame, r_area, r_mask, showCentroid)
y_cen = drawCentroid( frame, y_area, y_mask, showCentroid)
if py_pos[0]!=-1 and y_cen[0]!=-1 and y_pos[0]!=-1:
y_pos = setCursorPos(y_cen, py_pos)
output = chooseAction(y_pos, r_cen, b_cen)
if output[0]!=-1:
performAction(y_pos, r_cen, b_cen, output[0], output[1], perform)
cv2.imshow('Frame', frame)
if k == 27:
break
cv2.destroyAllWindows()
| [
"noreply@github.com"
] | AdopaX.noreply@github.com |
17570f54174509c08a7569382cdcdaf5f2c2349b | 177230d7823a5950dea301276c714a9a305b8aa0 | /contentBasedPlotDescription.py | 8f0312e8aa3c51b71769233d9805cc9454c4868c | [
"MIT"
] | permissive | szels/recommender_system | 19480c97aa51c38a1b4ff30fc188ff62691c39a7 | 06839a1e6d4c81cbffe5970f4d4fa60e363b5481 | refs/heads/master | 2020-05-03T17:25:57.588412 | 2019-04-02T20:06:14 | 2019-04-02T20:06:14 | 178,743,788 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,941 | py | # read https://www.datacamp.com/community/tutorials/recommender-systems-python
import pandas as pd
# Load movies metadata
metadata = pd.read_csv('../data/movies_metadata.csv', low_memory=False)
#print metadata.head(3)
#Print plot overviews of the first 5 movies.
#print metadata['overview'].head()
#Import TfIdfVectorizer from scikit-learn
from sklearn.feature_extraction.text import TfidfVectorizer
#Define a TF-IDF Vectorizer Object. Remove all english stop words such as 'the', 'a'
tfidf = TfidfVectorizer(stop_words='english')
#Replace NaN with an empty string
metadata['overview'] = metadata['overview'].fillna('')
#Construct the required TF-IDF matrix by fitting and transforming the data
tfidf_matrix = tfidf.fit_transform(metadata['overview'])
#Output the shape of tfidf_matrix
#print tfidf_matrix.shape
# Import linear_kernel
from sklearn.metrics.pairwise import linear_kernel
# Compute the cosine similarity matrix
cosine_sim = linear_kernel(tfidf_matrix, tfidf_matrix)
#Construct a reverse map of indices and movie titles
indices = pd.Series(metadata.index, index=metadata['title']).drop_duplicates()
# Function that takes in movie title as input and outputs most similar movies
def get_recommendations(title, cosine_sim=cosine_sim):
# Get the index of the movie that matches the title
idx = indices[title]
# Get the pairwsie similarity scores of all movies with that movie
sim_scores = list(enumerate(cosine_sim[idx]))
# Sort the movies based on the similarity scores
sim_scores = sorted(sim_scores, key=lambda x: x[1], reverse=True)
# Get the scores of the 10 most similar movies
sim_scores = sim_scores[1:11]
# Get the movie indices
movie_indices = [i[0] for i in sim_scores]
# Return the top 10 most similar movies
return metadata['title'].iloc[movie_indices]
# print get_recommendations('The Dark Knight Rises')
#print get_recommendations('The Godfather') #
| [
"szels@gmx.de"
] | szels@gmx.de |
73f5ed3543f2d49ad1f6a7f276439bce2babf8af | 7fbb895133aa287007f09573d09fa2cfe796e89d | /examples/Multilabel/multilabel.py | 63dcebfcd333115b4ae784adb6ec77d747f39dd1 | [
"Apache-2.0"
] | permissive | xiaosongwang/tensorpack | cde523c656ae1094107151d8502be3d04b749dbf | b7e676c77866d4e86ec8b90507bcf63583255846 | refs/heads/master | 2021-01-19T01:10:46.737590 | 2017-05-08T19:35:01 | 2017-05-08T19:35:01 | 87,231,953 | 1 | 1 | null | 2017-04-04T20:30:16 | 2017-04-04T20:30:15 | null | UTF-8 | Python | false | false | 8,562 | py | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
# File: multilabel_v1.py
# Author: Xiaosong Wang <xswang82@gmail.com>
import cv2
import tensorflow as tf
import argparse
import numpy as np
from six.moves import zip
import os
import sys
from tensorpack import *
from tensorpack.tfutils.symbolic_functions import *
from tensorpack.tfutils.summary import *
class Model(ModelDesc):
def _get_inputs(self):
return [InputDesc(tf.float32, [None, None, None, 3], 'image'),
InputDesc(tf.int32, [None, 14], 'multilabel')]
def _build_graph(self, inputs):
image, imageLabel = inputs
image = image - tf.constant([104, 116, 122], dtype='float32')
# imageLabel = tf.expand_dims(imageLabel, 3, name='imageLabel4d')
# def branch(name, l, up):
# with tf.variable_scope(name) as scope:
# l = Conv2D('convfc', l, 1, kernel_shape=1, nl=tf.identity,
# use_bias=True,
# W_init=tf.constant_initializer(),
# b_init=tf.constant_initializer())
# while up != 1:
# l = BilinearUpSample('upsample{}'.format(up), l, 2)
# up = up / 2
# return l
with argscope(Conv2D, kernel_shape=3, nl=tf.nn.relu):
l = Conv2D('conv1_1', image, 64)
l = Conv2D('conv1_2', l, 64)
# b1 = branch('branch1', l, 1)
l = MaxPooling('pool1', l, 2)
l = Conv2D('conv2_1', l, 128)
l = Conv2D('conv2_2', l, 128)
# b2 = branch('branch2', l, 2)
l = MaxPooling('pool2', l, 2)
l = Conv2D('conv3_1', l, 256)
l = Conv2D('conv3_2', l, 256)
l = Conv2D('conv3_3', l, 256)
# b3 = branch('branch3', l, 4)
l = MaxPooling('pool3', l, 2)
l = Conv2D('conv4_1', l, 512)
l = Conv2D('conv4_2', l, 512)
l = Conv2D('conv4_3', l, 512)
# b4 = branch('branch4', l, 8)
l = MaxPooling('pool4', l, 2)
l = Conv2D('conv5_1', l, 512)
l = Conv2D('conv5_2', l, 512)
l = Conv2D('conv5_3', l, 512)
# b5 = branch('branch5', l, 16)
final_map = Conv2D('convfcweight',
tf.concat([b1, b2, b3, b4, b5], 3), 1, 1,
W_init=tf.constant_initializer(0.2),
use_bias=False, nl=tf.identity)
costs = []
for idx, b in enumerate([b1, b2, b3, b4, b5, final_map]):
output = tf.nn.sigmoid(b, name='output{}'.format(idx + 1))
xentropy = multilabel_class_balanced_sigmoid_cross_entropy(
b, imageLabel,
name='xentropy{}'.format(idx + 1))
costs.append(xentropy)
# some magic threshold
pred = tf.cast(tf.greater(output, 0.5), tf.int32, name='prediction')
wrong = tf.cast(tf.not_equal(pred, imageLabel), tf.float32)
wrong = tf.reduce_mean(wrong, name='train_error')
if get_current_tower_context().is_training:
wd_w = tf.train.exponential_decay(2e-4, get_global_step_var(),
80000, 0.7, True)
wd_cost = tf.multiply(wd_w, regularize_cost('.*/W', tf.nn.l2_loss), name='wd_cost')
costs.append(wd_cost)
add_param_summary(('.*/W', ['histogram'])) # monitor W
self.cost = tf.add_n(costs, name='cost')
add_moving_summary(costs + [wrong, self.cost])
def _get_optimizer(self):
lr = get_scalar_var('learning_rate', 1e-3, summary=True)
opt = tf.train.AdamOptimizer(lr, epsilon=1e-3)
return optimizer.apply_grad_processors(
opt, [gradproc.ScaleGradient(
[('convfcweight.*', 0.1), ('conv5_.*', 5)])])
def get_data(name):
isTrain = name == 'train'
# ds = dataset.BSDS500(name, shuffle=True)
ds = dataset.CHESTXRAY14(name, shuffle=True)
# class CropMultiple16(imgaug.ImageAugmentor):
# def _get_augment_params(self, img):
# newh = img.shape[0] // 16 * 16
# neww = img.shape[1] // 16 * 16
# assert newh > 0 and neww > 0
# diffh = img.shape[0] - newh
# h0 = 0 if diffh == 0 else self.rng.randint(diffh)
# diffw = img.shape[1] - neww
# w0 = 0 if diffw == 0 else self.rng.randint(diffw)
# return (h0, w0, newh, neww)
#
# def _augment(self, img, param):
# h0, w0, newh, neww = param
# return img[h0:h0 + newh, w0:w0 + neww]
# if isTrain:
# # IMAGE_SHAPE = (480, 480)
# shape_aug = [
# # imgaug.RandomResize(xrange=(0.7, 1.5), yrange=(0.7, 1.5),
# # aspect_ratio_thres=0.15),
# # imgaug.RotationAndCropValid(90),
# # CropMultiple16(),
# imgaug.Flip(horiz=True)
# # imgaug.Flip(vert=True)
# # imgaug.CenterCrop(IMAGE_SHAPE)
# ]
# else:
# # the original image shape (321x481) in BSDS is not a multiple of 16
# IMAGE_SHAPE = (512, 512)
# shape_aug = [imgaug.CenterCrop(IMAGE_SHAPE)]
# ds = AugmentImageComponents(ds, shape_aug, (0, 1))
def f(m): # thresholding
m[m >= 0.50] = 1
m[m < 0.50] = 0
return m
ds = MapDataComponent(ds, f, 1)
if isTrain:
# augmentors = [
# imgaug.Brightness(63, clip=False),
# imgaug.Contrast((0.4, 1.5)),
# ]
# ds = AugmentImageComponent(ds, augmentors)
ds = BatchDataByShape(ds, 8, idx=0)
ds = PrefetchDataZMQ(ds, 1)
else:
ds = BatchData(ds, 1)
return ds
def view_data():
ds = RepeatedData(get_data('train'), -1)
ds.reset_state()
for ims, imageLabels in ds.get_data():
for im, imageLabel in zip(ims, imageLabels):
assert im.shape[0] % 16 == 0 and im.shape[1] % 16 == 0, im.shape
cv2.imshow("im", im / 255.0)
cv2.waitKey(1000)
cv2.imshow("edge", imageLabel)
cv2.waitKey(1000)
def get_config():
logger.auto_set_dir()
dataset_train = get_data('train')
steps_per_epoch = dataset_train.size() * 40
dataset_val = get_data('val')
return TrainConfig(
dataflow=dataset_train,
callbacks=[
ModelSaver(),
ScheduledHyperParamSetter('learning_rate', [(10, 1e-2), (10, 1e-3), (10, 1e-4)]),
HumanHyperParamSetter('learning_rate'),
InferenceRunner(dataset_val,
BinaryClassificationStats('prediction', 'imageLabel4d'))
],
model=Model(),
steps_per_epoch=steps_per_epoch,
max_epoch=100,
)
## sTODO: update to the ChestXray version
def run(model_path, image_path, output):
pred_config = PredictConfig(
model=Model(),
session_init=get_model_loader(model_path),
input_names=['image'],
output_names=['output' + str(k) for k in range(1, 7)])
predictor = OfflinePredictor(pred_config)
im = cv2.imread(image_path)
assert im is not None
im = cv2.resize(im, (im.shape[1] // 16 * 16, im.shape[0] // 16 * 16))
outputs = predictor([[im.astype('float32')]])
if output is None:
for k in range(6):
pred = outputs[k][0]
cv2.imwrite("out{}.png".format(
'-fused' if k == 5 else str(k + 1)), pred * 255)
else:
pred = outputs[5][0]
cv2.imwrite(output, pred * 255)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.')
parser.add_argument('--load', help='load pre-trained model and fine-tune')
parser.add_argument('--display', help='view dataset', action='store_true')
parser.add_argument('--test', help='run model on images')
parser.add_argument('--output', help='fused output filename. default to out-fused.png')
args = parser.parse_args()
if args.gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
if args.display:
view_data()
elif args.test:
run(args.load, arg.test, args.output)
else: # train the model
config = get_config()
if args.load:
config.session_init = get_model_loader(args.load)
if args.gpu:
config.nr_tower = len(args.gpu.split(','))
SyncMultiGPUTrainer(config).train()
| [
"xiaosong.wang@live.com"
] | xiaosong.wang@live.com |
6c2cf63addd9d3664eeabb0d446ac9beeed5c449 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_287/ch16_2020_03_21_00_14_52_868139.py | 94be6d91f54a3fead1d374afc0cf87ba1bac9fca | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 93 | py | f=float(input('conta:'))
f=f*1,1
print('Valor da conta com 10%:R$ {0:.2f}'.format(f))
| [
"you@example.com"
] | you@example.com |
ee3473b10902f6c6c697639c370c76082fa54da6 | 06919b9fd117fce042375fbd51d7de6bb9ae14fc | /py/dcp/problems/graph/find_order.py | 65b844789e326bb2a11db792095d06afc91af167 | [
"MIT"
] | permissive | bmoretz/Daily-Coding-Problem | 0caf2465579e81996869ee3d2c13c9ad5f87aa8f | f79e062e9f6e7b18b7e95c071fbe71ad104affcb | refs/heads/master | 2022-12-07T15:41:06.498049 | 2021-11-18T19:45:19 | 2021-11-18T19:45:19 | 226,376,236 | 1 | 0 | MIT | 2022-11-22T09:20:23 | 2019-12-06T17:17:00 | C++ | UTF-8 | Python | false | false | 1,593 | py | '''Topological sort.
We are given a hasmap associating each courseId key with a list of courseIds values, which tells us that the prerequisites of courseId
are course Ids. Return a sorted ordering of courses such that we can complete the curriculum.
Return null if there is no such ordering.
For example, given the following prerequisites:
{
'CSC300' : ['CSC100', 'CSC200'],
'CSC200' : ['CSC100'],
'CSC100' : []
}
You should return ['CSC100', 'CSC200', 'CSC300'].
'''
from collections import deque, defaultdict
def find_order1(courses_to_prereqs : dict):
# Copy list values into a set for faster removal
course_to_prereqs = {c : set(p) for c, p in courses_to_prereqs.items()}
# Start off our list with all courses without prerequisites.
todo = deque([c for c, p in course_to_prereqs.items() if not p])
# Create a new data structure to map prereqs to successor courses.
prereq_to_courses = defaultdict(list)
for course, prereqs in course_to_prereqs.items():
for prereq in prereqs:
prereq_to_courses[prereq].append(course)
result = []
while todo:
prereq = todo.popleft()
result.append(prereq)
# Remove this prereq from all successor courses.
# If any course now does not have any prereqs, add it to todo.
for c in prereq_to_courses[prereq]:
course_to_prereqs[c].remove(prereq)
if not course_to_prereqs[c]:
todo.append(c)
# Circular dependency
if len(result) < len(course_to_prereqs):
return None
return result | [
"bmoretz82@gmail.com"
] | bmoretz82@gmail.com |
f7383bb07b5a685e539266485c37b94cae869f20 | 992b6058a66a6d7e05e21f620f356d1ebe347472 | /fluent_pages/migrations/0001_initial.py | bebe90e32b27fb10654917cc68d5847990affc35 | [
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] | permissive | masschallenge/django-fluent-pages | 5ffb47a6dc4d7333ccbef9973cea4e6cf79569fe | 8beb083d89fba935ef3bfeda8cacf566f28b1334 | refs/heads/master | 2021-07-15T14:27:46.078658 | 2015-11-12T16:52:00 | 2015-11-12T16:52:00 | 28,341,345 | 0 | 0 | NOASSERTION | 2021-03-24T18:53:09 | 2014-12-22T14:17:52 | Python | UTF-8 | Python | false | false | 9,911 | py | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'UrlNode'
db.create_table('fluent_pages_urlnode', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('polymorphic_ctype', self.gf('django.db.models.fields.related.ForeignKey')(related_name='polymorphic_fluent_pages.urlnode_set', null=True, to=orm['contenttypes.ContentType'])),
('title', self.gf('django.db.models.fields.CharField')(max_length=255)),
('slug', self.gf('django.db.models.fields.SlugField')(max_length=50, db_index=True)),
('parent', self.gf('mptt.fields.TreeForeignKey')(blank=True, related_name='children', null=True, to=orm['fluent_pages.UrlNode'])),
('parent_site', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['sites.Site'])),
('status', self.gf('django.db.models.fields.CharField')(default='d', max_length=1)),
('publication_date', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('expire_date', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('in_navigation', self.gf('django.db.models.fields.BooleanField')(default=True)),
('sort_order', self.gf('django.db.models.fields.IntegerField')(default=1)),
('override_url', self.gf('django.db.models.fields.CharField')(max_length=300, blank=True)),
('author', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('creation_date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('modification_date', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('_cached_url', self.gf('django.db.models.fields.CharField')(default='', max_length=300, db_index=True, blank=True)),
('lft', self.gf('django.db.models.fields.PositiveIntegerField')(db_index=True)),
('rght', self.gf('django.db.models.fields.PositiveIntegerField')(db_index=True)),
('tree_id', self.gf('django.db.models.fields.PositiveIntegerField')(db_index=True)),
('level', self.gf('django.db.models.fields.PositiveIntegerField')(db_index=True)),
))
db.send_create_signal('fluent_pages', ['UrlNode'])
# Adding model 'PageLayout'
db.create_table('fluent_pages_pagelayout', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('key', self.gf('django.db.models.fields.SlugField')(max_length=50, db_index=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=255)),
('template_path', self.gf('fluent_pages.models.fields.TemplateFilePathField')(path='/srv/www/webapps/edoburu.nl/edoburu_site/themes/edoburu/templates/', max_length=100, recursive=True, match='.*\\.html$')),
))
db.send_create_signal('fluent_pages', ['PageLayout'])
def backwards(self, orm):
# Deleting model 'UrlNode'
db.delete_table('fluent_pages_urlnode')
# Deleting model 'PageLayout'
db.delete_table('fluent_pages_pagelayout')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'fluent_pages.pagelayout': {
'Meta': {'ordering': "('title',)", 'object_name': 'PageLayout'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}),
'template_path': ('fluent_pages.models.fields.TemplateFilePathField', [], {'path': "'/srv/www/webapps/edoburu.nl/edoburu_site/themes/edoburu/templates/'", 'max_length': '100', 'recursive': 'True', 'match': "'.*\\\\.html$'"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'fluent_pages.urlnode': {
'Meta': {'ordering': "('lft', 'sort_order', 'title')", 'object_name': 'UrlNode'},
'_cached_url': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '300', 'db_index': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'expire_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_navigation': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'modification_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'override_url': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['fluent_pages.UrlNode']"}),
'parent_site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_fluent_pages.urlnode_set'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'publication_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}),
'sort_order': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'d'", 'max_length': '1'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['fluent_pages']
| [
"vdboor@edoburu.nl"
] | vdboor@edoburu.nl |
8e508f5de15158bc323cee8419d2fbd8c6e85167 | d4f485d9e331a8019dcffbc8467e403103ed209e | /python/p053.py | 70bcebcd59a12d96593b3c96da5a39079dac6e75 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | doboy/euler | 777a8c5c7ab5474796e9baab1403934bda645065 | 75a73d7a48f7e02b772a96bec168194ba6f3b666 | refs/heads/master | 2020-04-09T23:54:53.184650 | 2019-01-13T20:06:15 | 2019-01-13T20:06:15 | 2,723,288 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 235 | py | from utils import product, choose
def countGen():
for n in xrange( 1, 101 ):
for r in xrange( 1, n // 2 + 1 ):
if choose( n, r ) > 10 ** 6:
yield 2 if n - r != r else 1
print sum( countGen() )
| [
"Doboy@Huans-MacBook-Pro.local"
] | Doboy@Huans-MacBook-Pro.local |
b3d94c75f6e50153a6dcd11c78d49a3a93aa63e6 | a13e369f7d98f6a97021a654421c71d957b2cb38 | /design/design.py | a1c23c88cf6caeb91bab7b703ab6a98f30da75cf | [] | no_license | eginez/bigdl-jdks | 557f8a65c1dca81bcfabff58b12438b389b67498 | f43d37270507a2b62111a8f4f4a802b06ff750d2 | refs/heads/master | 2023-01-01T13:35:19.329676 | 2020-10-24T13:28:25 | 2020-10-24T13:28:25 | 298,538,355 | 0 | 0 | null | 2020-10-03T08:25:37 | 2020-09-25T10:15:18 | HCL | UTF-8 | Python | false | false | 3,026 | py | import json
import pandas as pd
from argparse import ArgumentParser
from pyDOE2 import fullfact, ff2n, fracfact_by_res
def frac_by_max_res(factor_count, target_resolution=6):
"""Generate fractional factorial design sign table with maximum resolution"""
try:
if target_resolution >= 3:
return fracfact_by_res(factor_count, target_resolution)
return ff2n(factor_count)
except ValueError:
return frac_by_max_res(factor_count, target_resolution - 1)
def min_max(levels):
"""Compute minimum and maximum of given levels"""
if not levels:
raise ValueError("Cannot determine minimum value of empty list")
if type(levels[0]) == str:
return 0, -1
return levels.index(min(levels)), levels.index(max(levels))
def generate_frac(factors):
"""Generate a fractional factorial design with maximum resolution"""
# Generate sign table
design = frac_by_max_res(len(factors))
# Replace signs with min and max indices
min_indices, max_indices = zip(*[min_max(levels) for _, levels in factors.items()])
for i in range(len(design)):
for j in range(len(design[i])):
if design[i][j] < 0:
design[i][j] = min_indices[j]
else:
design[i][j] = max_indices[j]
return design
def generate_full(factors):
"""Generate a full factorial design"""
levels = [len(values) for _, values in factors.items()]
design = fullfact(levels)
return design
def main():
"""Generate experiment configurations from the provided factors and store them in a CSV file"""
# Parse arguments
argument_parser = ArgumentParser()
argument_parser.add_argument("-t", "--type", metavar="TYPE", choices=["full", "frac"], required=True, help="type of experiment design")
argument_parser.add_argument("-f", "--factors", metavar="PATH", required=True, help="path to JSON configuration file")
argument_parser.add_argument("-o", "--output", metavar="PATH", required=True, help="path to CSV output file")
arguments = argument_parser.parse_args()
# Load factors
with open(arguments.factors) as file:
factors = json.load(file)
# Generate design
if "frac" in arguments.type:
design = generate_frac(factors)
elif "full" in arguments.type:
design = generate_full(factors)
else:
raise ValueError(f"Invalid design type: {arguments.type}")
# Convert indices to integers
design = [map(int, indices) for indices in design]
# Generate configurations
configurations = []
for indices in design:
configuration = []
for factor, index in zip(factors, indices):
configuration.append(factors[factor][index])
configurations.append(configuration)
# Store configurations
data_frame = pd.DataFrame(data=configurations, columns=list(map(str.lower, factors.keys())))
data_frame.to_csv(arguments.output, index_label="index")
if __name__ == '__main__':
main()
| [
"wvdbrug@gmail.com"
] | wvdbrug@gmail.com |
07b0ee808e6b529d4d78ec6d7216bfd8955af654 | 9f82983f5f119635931a0233ec86aa223f5f57ec | /base/myfields.py | 1d4f1eb4da76105d460d99c280ed7ac274e61ae4 | [] | no_license | Arox/d_and_d | d49532cd0b0a824aea3f4767200fa9463d2ae6a0 | d707c5cdb557f23f12c99ac8f1b7bd7c86e2a935 | refs/heads/master | 2020-06-02T00:34:12.648579 | 2015-03-21T10:21:01 | 2015-03-21T10:21:01 | 32,378,298 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 681 | py | from django.db import models
import ast
class ListField(models.TextField):
__metaclass__ = models.SubfieldBase
description = "Stores a python list"
def __init__(self, *args, **kwargs):
super(ListField, self).__init__(*args, **kwargs)
def to_python(self, value):
if not value:
value = []
if isinstance(value, list):
return value
return ast.literal_eval(value)
def get_prep_value(self, value):
if value is None:
return value
return unicode(value)
def value_to_string(self, obj):
value = self._get_val_from_obj(obj)
return self.get_db_prep_value(value) | [
"mailofarox@gmail.com"
] | mailofarox@gmail.com |
b0ee96afdbb8d940aeeedbe2f8276662709cd207 | 09cead98874a64d55b9e5c84b369d3523c890442 | /py200913b_python2m8/day06_201018/filedir_4_remove.py | 0740189b7058ab68253a539e1376c26eddba0f08 | [] | no_license | edu-athensoft/stem1401python_student | f12b404d749286036a090e941c0268381ce558f8 | baad017d4cef2994855b008a756758d7b5e119ec | refs/heads/master | 2021-08-29T15:01:45.875136 | 2021-08-24T23:03:51 | 2021-08-24T23:03:51 | 210,029,080 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 128 | py | """
remove dir or file
remove(name)
"""
import os
# os.remove("mydir3a")
# remove a file
os.remove("rename_file_new.py")
| [
"lada314@gmail.com"
] | lada314@gmail.com |
6e5e1b9cb98e0bf40839ba51dd140cf6e32afa14 | fe75b011ad3072ba80db7676a13b20483f515ecf | /hp.py | d80e582da3fa01699dc9d5393ee0d54d52e8d8a3 | [
"MIT"
] | permissive | meelement/noise_adversarial_tacotron | 87f91f03f421ea374082c61c4e16564035fcc080 | 7a7fda49eb8bf82f5139743d55639d48ff204e9e | refs/heads/master | 2022-02-13T20:47:45.626008 | 2019-08-15T00:58:31 | 2019-08-15T00:58:31 | 204,584,234 | 7 | 3 | null | null | null | null | UTF-8 | Python | false | false | 4,480 | py | from pathlib import Path
from os.path import expanduser
from os import makedirs
from random import sample
from os import system
from math import exp
from platform import node
# Debug Trigger ###############################################################
## Set your debug trigger here.
debug = "sorcerer" in node()
if debug:
print("Working Under DEBUG Mode ! ")
# GLOBAL HOOKS ################################################################
# you can hook global variables here to print or observe their value.
# This is easier than wiring them out of the model.
# This resembles tensorflows' GraphKeys
hook = {}
# CONFIG ######################################################################
# This model works on VCTK and LJSpeech together, the model samples data from both dataset.
# This is a baseline multi-speaker model.
encoder_model = "CNNRNN"
assert encoder_model in ("CNN", "CNNRNN"), "Unknown Encoder Model"
model_id = f"{encoder_model}_" + "".join(sample("0123456789qwertyuiopasdfghjklzxcvbnm", 3))
print("My ID is ", model_id)
## Audio Signal Processing ####################################################
sampling_rate = 16000
n_fft = 2048
fft_bins = n_fft // 2 + 1
n_mels = 128
hop_length = int(sampling_rate * 12.5 // 1000)
win_length = int(sampling_rate * 60 // 1000) # 25 ~ 128
min_f = 40
max_f = 8000
## Dataset ####################################################################
vctk_wav_path = Path(expanduser("~/datasets/vctk/cut16/"))
vctk_text_path = Path(expanduser("~/datasets/vctk/txt16/"))
ljs_wav_path = Path(expanduser("~/datasets/ljspeech/wavs_16000/"))
ljs_text_path = Path(expanduser("~/datasets/ljspeech/metadata.csv"))
whole_chime_path = Path(expanduser("~/datasets/chime/backgrounds/"))
part_chime_path = Path(expanduser("~/datasets/chime/segmented_backgrounds/"))
n_loading_threads = 6
## Logging Paths ##############################################################
# low pass exponential decay rate used for filtering all kind of loss
decay = 0.9
root_path = Path(__file__).parent
makedirs(str(root_path / "log"), exist_ok=True)
log_root = root_path / "log" / model_id
training_log_path = log_root / "log.txt"
checkpoint_path = log_root / "checkpoint"
attention_plot_path = log_root / "attention"
mel_plot_path = log_root / "mel"
linear_plot_path = log_root / "linear"
speaker_hidden_plot_path = log_root / "speaker_hidden"
noise_hidden_plot_path = log_root / "noise_hidden"
speaker_encode_plot_path = log_root / "speaker_encode"
noise_encode_plot_path = log_root / "noise_encode"
## Automatically creating paths
temporary_paths = [
log_root,
checkpoint_path
]
for name, value in list(globals().items()):
if "plot_path" in name:
temporary_paths.append(value)
for path in temporary_paths:
path.mkdir()
## Copying current hyper-parameters to the log dir.
system(f"cp {__file__} {str(log_root)}")
## Network Parameters #########################################################
embed_dims = 256
encoder_dims = 128
decoder_dims = 256
assert encoder_dims * 2 == decoder_dims, "Mismatch dimensions"
postnet_dims = 128
encoder_K = 16
lstm_dims = 512
postnet_K = 8
num_highways = 4
dropout = 0.5
cleaner_names = ['english_cleaners']
## Training Parameters ########################################################
device = "cuda"
schedule = [
(5, 70000, 32), # (r, lr, step, batch_size)
(3, 180000, 16),
]
init_lr = 0.0007
warmup_steps = 4000
# LR Decay is necessary for multi-speaker model to converge fast.
def learning_rate(step):
return 0.0004 if debug else init_lr * warmup_steps ** 0.5 * min(step * warmup_steps ** -1.5, (step + 1) ** -0.5)
min_sample_length = sampling_rate
max_sample_length = sampling_rate * 8
bin_lengths = True
clip_grad_norm = 1.0
checkpoint_interval = 2000
plot_interval = 50 if debug else 200
### VAE Training
max_kl_ratio = 0.00000001
annealing_offset = 10000.0
annealing_ratio = 5000.0
classification_ratio = 1.0
noise_augment_probability = 0.5
def sigmoid(x):
return 1.0 / (1.0 + exp(-x))
def kl_loss_ratio(step):
return max_kl_ratio * sigmoid((step - annealing_offset) / annealing_ratio)
n_speakers = 200
speaker_encoder_dims = 256
speaker_latent_dims = 64
noise_encoder_dims = 256
noise_latent_dims = 8
enable_speaker_guide = True
enable_adversarial = True
### Binning Loading ###########################################################
q_size = 1000
redundancy = 10
load_weight_file = root_path / "log" / "restore.pyt"
| [
"mr.jimmy@foxmail.com"
] | mr.jimmy@foxmail.com |
0d7f1fc9b62199e37dce2ba1ac1f4d5ec06cf21a | affc88ec2d455414261949d6e87ad1fedee3db90 | /02.Python/day4-函数模块与包/package1/subPack1/module_12.py | 9cbe82bdc5e46fb1c24dafbff87f28a3e725b104 | [] | no_license | Wilson-Sunshine/Machine-Learning-Kaikeba | 6c7d12e4fbbb68a42fff5b99e464a64265bc199d | 40b8919805e4b32ec21c274f1bddcdb79e31d54a | refs/heads/master | 2020-08-02T22:24:35.179330 | 2020-02-19T22:20:19 | 2020-02-19T22:20:19 | 211,524,144 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 57 | py | def funcB():
print("funcB in module_12")
return | [
"wilson@wilson.local"
] | wilson@wilson.local |
260b2eda0001da5dd016cc606b317e4f77282597 | f23e648a63750dc66645177426717dee549351bf | /stock_czsc_tools.py | d0b08eab05fc257c7a60c49e3bd427668e17fdb8 | [] | no_license | billlaw6/my_python | 6d348d4febe7a2492dfc67e2abe04f5d67e57155 | fe322953e603d70484c69cd73fe7ac71d0353290 | refs/heads/master | 2021-01-10T23:56:39.376887 | 2016-12-30T08:46:34 | 2016-12-30T08:46:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 29,367 | py | #!/usr/bin/env python
# -*- coding: utf8 -*-
# File Name: stock_czsc_tools.py
# Author: bill_law6
# mail: bill_law6@163.com
# Created Time: Wed 23 Nov 2016 01:37:38 PM CST
import tushare as ts
import time
import datetime
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import matplotlib.finance as mpf
import numpy as np
import pandas as pd
import talib as ta
plt.rcParams['font.family'] = ['sans-serif'] # 用来正常显示中文标签
plt.rcParams['font.sans-serif'] = ['Liberation Sans'] # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False # 用来正常显示负号
def baohan_process(data = None):
"""
1、2、3根K线,只2和3有包含关系,包含关系的处理由1和2两K线的升降决定
Class65.
必须从左至右顺序处理包含关系,单次遍历就行。包含处理只会缩短K线,不会产生新的包含关系。
"""
if data is None:
return None
up_down_flag = 'up'
for i in range(0, len(data)-1):
# 当前K线的升降属性由后一根K线组合确定
if data.ix[i, 'high'] < data.ix[i+1, 'high'] \
and data.ix[i, 'low'] < data.ix[i+1, 'low']:
data.ix[i, 'type'] = 'up'
up_down_flag = 'up'
elif data.ix[i, 'high'] > data.ix[i+1, 'high'] \
and data.ix[i, 'low'] > data.ix[i+1, 'low']:
data.ix[i, 'type'] = 'down'
up_down_flag = 'down'
# 出现前K线包含后K线时,删除后K线,并且继续向后找包含关系
elif data.ix[i, 'high'] >= data.ix[i+1, 'high'] \
and data.ix[i, 'low'] <= data.ix[i+1, 'low']:
# 标记被包含的K线及与包含K线的位置关系及UP、DOWN
data.ix[i+1, 'delete'] = True
if i == 0:
data.ix[i, 'low'] = data.ix[i+1, 'low']
elif up_down_flag == 'up':
data.ix[i, 'low'] = data.ix[i+1, 'low']
elif up_down_flag == 'down':
data.ix[i, 'high'] = data.ix[i+1, 'high']
# 继续向后分析处理包含关系
for j in range(2, len(data) - i):
if data.ix[i, 'high'] >= data.ix[i+j, 'high'] \
and data.ix[i, 'low'] <= data.ix[i+j, 'low']:
data.ix[i+j, 'delete'] = True
if i == 0:
data.ix[i, 'low'] = data.ix[i+j, 'low']
elif up_down_flag == 'up':
data.ix[i, 'low'] = data.ix[i+j, 'low']
elif up_down_flag == 'down':
data.ix[i, 'high'] = data.ix[i+j, 'high']
else:
break
# 出现后K线包含前K线时
elif data.ix[i, 'high'] <= data.ix[i+1, 'high'] \
and data.ix[i, 'low'] >= data.ix[i+1, 'low']:
# 标记被包含的K线及与包含K线的位置关系及UP、DOWN
# default type is UP
data.ix[i, 'delete'] = True
if i == 0:
data.ix[i+1, 'low'] = data.ix[i, 'low']
elif up_down_flag == 'up':
data.ix[i+1, 'low'] = data.ix[i, 'low']
elif up_down_flag == 'down':
data.ix[i+1, 'high'] = data.ix[i, 'low']
else:
print("Shem Ma K xian guanxi?")
data.ix[i, 'type'] = 'unknown'
return data.drop(data[data.delete==True].index)
def find_possible_ding_di(data = None):
"""寻找和标识顶和底,数据类型应该是tushare.get_hist_data获得的dataFrame格式"""
if data is None:
return None
for i in range(2, len(data)-2):
if data.ix[i, 'high'] > data.ix[i - 1, 'high'] \
and data.ix[i, 'high'] > data.ix[i - 2, 'high'] \
and data.ix[i, 'high'] > data.ix[i + 1, 'high'] \
and data.ix[i, 'high'] > data.ix[i + 2, 'high'] \
and data.ix[i, 'low'] > data.ix[i - 1, 'low'] \
and data.ix[i, 'low'] > data.ix[i + 1, 'low']:
data.ix[i, 'fenxing'] = 'ding'
elif data.ix[i, 'high'] < data.ix[i - 1, 'high'] \
and data.ix[i, 'high'] < data.ix[i + 1, 'high'] \
and data.ix[i, 'low'] < data.ix[i - 1, 'low'] \
and data.ix[i, 'low'] < data.ix[i - 2, 'low'] \
and data.ix[i, 'low'] < data.ix[i + 1, 'low'] \
and data.ix[i, 'low'] < data.ix[i + 2, 'low']:
data.ix[i, 'fenxing'] = 'di'
else:
data.ix[i, 'fenxing'] = data.ix[i, 'type']
return data
def tag_bi_line_mm(data = None):
"""给标记顶底之后的数据画笔
"""
if data is None:
return None
# 取出笔开始的所有顶底标记,方便循环处理
ding_di_list = []
for i in range(0, len(data)-1):
if type(data.ix[i, 'fenxing']) == str:
if data.ix[i, 'fenxing'].find('di') != -1:
ding_di = {}
ding_di['loc'] = i
ding_di['fenxing'] = data.ix[i, 'fenxing']
ding_di['high'] = data.ix[i, 'high']
ding_di['low'] = data.ix[i, 'low']
ding_di_list.append(ding_di)
else:
pass
# 顶底数不够4个时,不标线
if len(ding_di_list) < 4:
print("Number of ding_di less than 4, please wait!")
exit
possible_ding_start = {'loc': -1, 'value': 0}
possible_di_start = {'loc': -1, 'value': 0}
ding_list = []
di_list = []
# 走出一个新顶或底就判断一次是否调整笔的结束点
for i in range(0, len(ding_di_list) - 1):
# 当前为底分型
if ding_di_list[i]['fenxing'] == 'di':
di_list.append(ding_di_list[i]['low'])
# 底 前面没有可能为笔起点的顶分型,暂标记当前底为笔起点
if possible_ding_start['loc'] < 0 and ding_di_list[i]['low'] == min(di_list):
possible_di_start['loc'] = ding_di_list[i]['loc']
possible_di_start['value'] = ding_di_list[i]['low']
continue
# 底 前面有暂标记为笔起点的顶分型,但与当前底分型间隔不够3根K线,前面没有标记为笔起点的底分型时,暂标记当前底为笔起点
elif possible_ding_start['loc'] >= 0 \
and ding_di_list[i]['loc'] - possible_ding_start['loc'] <= 3 \
and possible_di_start['loc'] < 0:
# 标识当前底为可能的笔节点
possible_di_start['loc'] = ding_di_list[i]['loc']
possible_di_start['value'] = ding_di_list[i]['low']
continue
# 底 前面有暂标记为笔起点的顶分型,但与之间隔不够3根K线,前面有标记为笔起点的底分型而当前底更低时,将当前底标记为笔起点
elif possible_ding_start['loc'] >= 0 \
and ding_di_list[i]['loc'] - possible_ding_start['loc'] <= 3 \
and possible_di_start['loc'] > 0 \
and ding_di_list[i]['low'] < possible_di_start['value']:
# 标识当前底为可能的笔节点
possible_di_start['loc'] = ding_di_list[i]['loc']
possible_di_start['value'] = ding_di_list[i]['low']
ding_list = []
continue
# 底 前面有可能为笔起点的顶,并且与之间隔3根K线以上,同时当前K线底小于前面可能顶的顶并且是最低底时
elif possible_ding_start['loc'] >= 0 \
and ding_di_list[i]['loc'] - possible_ding_start['loc'] > 3 \
and ding_di_list[i]['low'] == min(di_list) \
and data.ix[possible_ding_start['loc'], 'high'] > ding_di_list[i]['low']:
# 如果前面有标记为笔起点的底,并且与后面的顶间隔3根K线以上时确认前面的底为确定的笔起点
if possible_di_start['loc'] >= 0 \
and possible_ding_start['loc'] - possible_di_start['loc'] > 3:
# 确定前底为笔节点
data.ix[possible_di_start['loc'], 'bi_value'] = possible_di_start['value']
# 标识当前底为可能的笔节点
possible_di_start['loc'] = ding_di_list[i]['loc']
possible_di_start['value'] = ding_di_list[i]['low']
ding_list = []
continue
# 当前为顶分型
elif ding_di_list[i]['fenxing'] == 'ding':
ding_list.append(ding_di_list[i]['high'])
# 顶 前面没有可能为笔起点的底分型,暂标记当前顶为笔起点
if possible_di_start['loc'] < 0 and ding_di_list[i]['high'] == max(ding_list):
possible_ding_start['loc'] = ding_di_list[i]['loc']
possible_ding_start['value'] = ding_di_list[i]['high']
continue
# 顶 前面有暂标记为笔起点的底分型,但与当前顶分型间隔不够3根K线,前面没有标记为笔起点的顶分型时,暂标记当前顶为笔起点
elif possible_di_start['loc'] > 0 \
and ding_di_list[i]['loc'] - possible_di_start['loc'] <= 3 \
and possible_ding_start['loc'] < 0:
# 标识当前顶为可能的笔节点
possible_ding_start['loc'] = ding_di_list[i]['loc']
possible_ding_start['value'] = ding_di_list[i]['high']
continue
# 顶 前面有暂标记为笔起点的底分型,但与之间隔不够3根K线,前面有标记为笔起点的顶分型而当前顶更高时,将当前顶标记为笔起点
elif possible_di_start['loc'] > 0 \
and ding_di_list[i]['loc'] - possible_di_start['loc'] <= 3 \
and possible_ding_start['loc'] > 0 \
and ding_di_list[i]['high'] > possible_ding_start['value']:
# 标识当前顶为可能的笔节点
possible_ding_start['loc'] = ding_di_list[i]['loc']
possible_ding_start['value'] = ding_di_list[i]['high']
di_list = []
continue
# 顶 前面有可能为笔起点的底,并且与之间隔3根K线以上,同时当前K线顶小于前面可能底的底并且是最高顶时
elif possible_di_start['loc'] >= 0 \
and ding_di_list[i]['loc'] - possible_di_start['loc'] > 3 \
and ding_di_list[i]['high'] == max(ding_list) \
and data.ix[possible_di_start['loc'], 'low'] < ding_di_list[i]['high']:
# 如果前面有标记为笔起点的顶,并且与后面的底间隔3根K线以上时确认前面的顶为确定的笔起点
if possible_ding_start['loc'] >= 0 \
and possible_di_start['loc'] - possible_ding_start['loc'] > 3:
# 确定前顶为笔节点
data.ix[possible_ding_start['loc'], 'bi_value'] = data.ix[possible_ding_start['loc'], 'high']
# 标识当前顶为可能的笔节点
possible_ding_start['loc'] = ding_di_list[i]['loc']
possible_ding_start['value'] = ding_di_list[i]['high']
di_list = []
continue
return data
def tag_bi_line(data = None):
"""给标记顶底之后的数据画笔 """
if data is None:
return None
# 取出所有顶底标记,方便循环处理
ding_di_list = []
for i in range(0, len(data)-1):
if type(data.ix[i, 'fenxing']) == str:
if data.ix[i, 'fenxing'].find('di') != -1:
ding_di = {}
ding_di['loc'] = i
ding_di['fenxing'] = data.ix[i, 'fenxing']
ding_di['high'] = data.ix[i, 'high']
ding_di['low'] = data.ix[i, 'low']
ding_di_list.append(ding_di)
else:
pass
# 顶底数不够4个时,不标线
if len(ding_di_list) < 4:
print("Number of ding_di less than 4, please wait!")
exit
pre_ding_start = {'loc': -1, 'value': 0}
pre_di_start = {'loc': -1, 'value': 0}
ding_start = {'loc': -1, 'value': 0}
di_start = {'loc': -1, 'value': 0}
# 走出一个新顶或底就判断一次是否调整笔的结束点
for i in range(0, len(ding_di_list) - 1):
if ding_di_list[i]['fenxing'] == 'ding':
if ding_start['loc'] < 0:
pre_ding_start['loc'] = ding_start['loc']
pre_ding_start['value'] = ding_start['value']
ding_start['loc'] = ding_di_list[i]['loc']
ding_start['value'] = ding_di_list[i]['high']
continue
elif ding_start['loc'] >= 0:
if di_start['loc'] < 0:
if ding_di_list[i]['high'] > ding_start['value']:
pre_ding_start['loc'] = ding_start['loc']
pre_ding_start['value'] = ding_start['value']
ding_start['loc'] = ding_di_list[i]['loc']
ding_start['value'] = ding_di_list[i]['high']
continue
elif di_start['loc'] >= 0:
if di_start['loc'] < ding_start['loc']:
if ding_di_list[i]['loc'] - di_start['loc'] > 3 \
and ding_di_list[i]['high'] > ding_start['value']:
pre_ding_start['loc'] = ding_start['loc']
pre_ding_start['value'] = ding_start['value']
ding_start['loc'] = ding_di_list[i]['loc']
ding_start['value'] = ding_di_list[i]['high']
if pre_ding_start['loc'] > 0 \
and di_start['loc'] - pre_ding_start['loc'] > 3:
data.ix[pre_ding_start['loc'], 'bi_value'] = pre_ding_start['value']
continue
elif di_start['loc'] > ding_start['loc']:
if ding_di_list[i]['loc'] - di_start['loc'] > 3 \
and ding_di_list[i]['high'] > di_start['value']:
pre_ding_start['loc'] = ding_start['loc']
pre_ding_start['value'] = ding_start['value']
ding_start['loc'] = ding_di_list[i]['loc']
ding_start['value'] = ding_di_list[i]['high']
if pre_ding_start['loc'] > 0 \
and di_start['loc'] - pre_ding_start['loc'] > 3:
data.ix[pre_ding_start['loc'], 'bi_value'] = pre_ding_start['value']
continue
elif ding_di_list[i]['loc'] - di_start['loc'] <= 3:
if ding_di_list[i]['high'] > ding_start['value']:
ding_start['loc'] = ding_di_list[i]['loc']
ding_start['value'] = ding_di_list[i]['high']
di_start['loc'] = pre_di_start['loc']
di_start['value'] = pre_di_start['value']
pre_di_start['loc'] = -1
pre_di_start['value'] = 0
if pre_ding_start['loc'] > 0 \
and di_start['loc'] - pre_ding_start['loc'] > 3:
data.ix[pre_ding_start['loc'], 'bi_value'] = pre_ding_start['value']
continue
elif ding_di_list[i]['fenxing'] == 'di':
if di_start['loc'] < 0:
pre_di_start['loc'] = di_start['loc']
pre_di_start['value'] = di_start['value']
di_start['loc'] = ding_di_list[i]['loc']
di_start['value'] = ding_di_list[i]['low']
continue
elif di_start['loc'] >= 0:
if ding_start['loc'] < 0:
if ding_di_list[i]['low'] < di_start['loc']:
pre_di_start['loc'] = di_start['loc']
pre_di_start['value'] = di_start['value']
di_start['loc'] = ding_di_list[i]['loc']
di_start['value'] = ding_di_list[i]['low']
continue
elif ding_start['loc'] >= 0:
if di_start['loc'] > ding_start['loc']:
if ding_di_list[i]['loc'] - ding_start['loc'] > 3 \
and ding_di_list[i]['low'] < di_start['value']:
pre_di_start['loc'] = di_start['loc']
pre_di_start['value'] = di_start['value']
di_start['loc'] = ding_di_list[i]['loc']
di_start['value'] = ding_di_list[i]['low']
if pre_di_start['loc'] > 0 \
and ding_start['loc'] - pre_di_start['loc'] > 3:
data.ix[pre_di_start['loc'], 'bi_value'] = pre_di_start['value']
continue
elif di_start['loc'] < ding_start['loc']:
if ding_di_list[i]['loc'] - ding_start['loc'] > 3 \
and ding_di_list[i]['low'] < ding_start['value']:
pre_di_start['loc'] = di_start['loc']
pre_di_start['value'] = di_start['value']
di_start['loc'] = ding_di_list[i]['loc']
di_start['value'] = ding_di_list[i]['low']
if pre_di_start['loc'] > 0 \
and ding_start['loc'] - pre_di_start['loc'] > 3:
data.ix[pre_di_start['loc'], 'bi_value'] = pre_di_start['value']
continue
elif ding_di_list[i]['loc'] - ding_start['loc'] <= 3:
if ding_di_list[i]['low'] < di_start['value']:
di_start['loc'] = ding_di_list[i]['loc']
di_start['value'] = ding_di_list[i]['low']
ding_start['loc'] = pre_ding_start['loc']
ding_start['value'] = pre_ding_start['value']
pre_ding_start['loc'] = -1
pre_ding_start['value'] = 0
if pre_di_start['loc'] > 0 \
and ding_start['loc'] - pre_di_start['loc'] > 3:
data.ix[pre_di_start['loc'], 'bi_value'] = pre_di_start['value']
continue
return data
def tag_duan_line(data = None):
"""Class 67"""
if data is None:
return None
# 取出所有标记为笔节点的顶底标记,方便循环处理
if 'bi_value' in data.columns:
bi_ding_di_list = []
for i in range(0, len(data)):
if type(data.ix[i, 'fenxing']) == str:
if data.ix[i, 'fenxing'].find('di') != -1 \
and data.ix[i, 'bi_value'] > 0:
ding_di = {}
ding_di['loc'] = i
ding_di['fenxing'] = data.ix[i, 'fenxing']
ding_di['high'] = data.ix[i, 'high']
ding_di['low'] = data.ix[i, 'low']
bi_ding_di_list.append(ding_di)
else:
pass
else:
print("No bi tag in data")
return data
# 顶底数不够4个时,不构成段
if len(bi_ding_di_list) < 4:
print("Number of ding_di less than 4, please wait!")
return data
shang = {}
shang_list = []
xia = {}
xia_list = []
for i in range(0, len(bi_ding_di_list)-1):
if bi_ding_di_list[i]['fenxing'] == 'ding':
xia['high_loc'] = bi_ding_di_list[i]['loc']
xia['high_value'] = bi_ding_di_list[i]['high']
xia['low_loc'] = bi_ding_di_list[i+1]['loc']
xia['low_value'] = bi_ding_di_list[i+1]['low']
xia_list.append(xia)
xia = {}
elif bi_ding_di_list[i]['fenxing'] == 'di':
shang['low_loc'] = bi_ding_di_list[i]['loc']
shang['low_value'] = bi_ding_di_list[i]['low']
shang['high_loc'] = bi_ding_di_list[i+1]['loc']
shang['high_value'] = bi_ding_di_list[i+1]['high']
shang_list.append(shang)
shang = {}
pd_shang = pd.DataFrame(shang_list)
pd_xia = pd.DataFrame(xia_list)
# 处理上行笔特征序列包含关系,往底靠
for i in range(0, len(pd_shang) - 1):
if pd_shang.ix[i, 'low_value'] < pd_shang.ix[i+1, 'low_value'] \
and pd_shang.ix[i, 'high_value'] > pd_shang.ix[i+1, 'high_value']:
for j in range(1, len(pd_shang) - i - 1):
if pd_shang.ix[i, 'low_value'] < pd_shang.ix[i+j, 'low_value'] \
and pd_shang.ix[i, 'high_value'] > pd_shang.ix[i+j, 'high_value']:
pd_shang.ix[i, 'high_value'] = pd_shang.ix[i+j, 'high_value']
pd_shang.ix[i+j, 'remove'] = True
else:
break
elif pd_shang.ix[i, 'low_value'] > pd_shang.ix[i+1, 'low_value'] \
and pd_shang.ix[i, 'high_value'] < pd_shang.ix[i+1, 'high_value']:
pd_shang.ix[i+1, 'high_value'] = pd_shang.ix[i, 'high_value']
pd_shang.ix[i, 'remove'] = True
if 'remove' in pd_shang.columns:
pd_shang = pd_shang.drop(pd_shang[pd_shang.remove==True].index)
pd_shang = pd_shang.set_index('low_loc')
pd_shang = pd_shang.reset_index()
# 处理上行笔特征序列包含关系,往顶靠
for i in range(0, len(pd_xia) - 1):
if pd_xia.ix[i, 'low_value'] < pd_xia.ix[i+1, 'low_value'] \
and pd_xia.ix[i, 'high_value'] > pd_xia.ix[i+1, 'high_value']:
for j in range(1, len(pd_xia) - i - 1):
if pd_xia.ix[i, 'low_value'] < pd_xia.ix[i+j, 'low_value'] \
and pd_xia.ix[i, 'high_value'] > pd_xia.ix[i+j, 'high_value']:
pd_xia.ix[i, 'high_value'] = pd_xia.ix[i+j, 'high_value']
pd_xia.ix[i+j, 'remove'] = True
else:
break
elif pd_xia.ix[i, 'low_value'] > pd_xia.ix[i+1, 'low_value'] \
and pd_xia.ix[i, 'high_value'] < pd_xia.ix[i+1, 'high_value']:
pd_xia.ix[i+1, 'low_value'] = pd_xia.ix[i, 'low_value']
pd_xia.ix[i, 'remove'] = True
if 'remove' in pd_xia.columns:
pd_xia = pd_xia.drop(pd_xia[pd_xia.remove==True].index)
pd_xia = pd_xia.set_index('high_loc')
pd_xia = pd_xia.reset_index()
# 标记笔特征序列的顶底,上行笔序列中找底,下行笔序列中找顶
for i in range(1, len(pd_shang) - 1):
if pd_shang.ix[i, 'low_value'] <= pd_shang.ix[i-1, 'low_value'] \
and pd_shang.ix[i, 'low_value'] <= pd_shang.ix[i+1, 'low_value'] \
and pd_shang.ix[i, 'high_value'] <= pd_shang.ix[i-1, 'high_value'] \
and pd_shang.ix[i, 'high_value'] <= pd_shang.ix[i+1, 'high_value']:
pd_shang.ix[i, 'duan_value'] = pd_shang.ix[i, 'low_value']
data.ix[pd_shang.ix[i, 'low_loc'], 'bi_fenxing'] = 'di'
data.ix[pd_shang.ix[i, 'low_loc'], 'duan_value'] = pd_shang.ix[i, 'low_value']
for i in range(1, len(pd_xia) - 1):
if pd_xia.ix[i, 'high_value'] >= pd_xia.ix[i-1, 'high_value'] \
and pd_xia.ix[i, 'high_value'] >= pd_xia.ix[i+1, 'high_value'] \
and pd_xia.ix[i, 'low_value'] >= pd_xia.ix[i-1, 'low_value'] \
and pd_xia.ix[i, 'low_value'] >= pd_xia.ix[i+1, 'low_value']:
pd_xia.ix[i, 'duan_value'] = pd_xia.ix[i, 'high_value']
data.ix[pd_xia.ix[i, 'high_loc'], 'bi_fenxing'] = 'ding'
data.ix[pd_xia.ix[i, 'high_loc'], 'duan_value'] = pd_xia.ix[i, 'high_value']
# print(pd_shang[pd_shang.duan_value > 0])
# print(pd_xia[pd_xia.duan_value > 0])
# 查看包含处理后的结果
# fig, axes = plt.subplots(2, 1, sharex=True, figsize=(8,6))
# mpf.candlestick2_ochl(axes[0], pd_shang['low_value'],pd_shang['low_value'],pd_shang['high_value'],pd_shang['low_value'], width=0.6, colorup='r', colordown='g')
# axes[0].set_title("Shang")
# di_pd_shang = pd_shang[pd_shang.duan_value>0]
# axes[0].plot(np.array(di_pd_shang.index), np.array(di_pd_shang.low_value), '^')
# mpf.candlestick2_ochl(axes[1], pd_xia['low_value'],pd_xia['low_value'],pd_xia['high_value'],pd_xia['low_value'], width=0.6, colorup='r', colordown='g')
# axes[1].set_title("Xia")
# ding_pd_xia = pd_xia[pd_xia.duan_value>0]
# axes[1].plot(np.array(ding_pd_xia.index), np.array(ding_pd_xia.high_value), 'v')
# plt.show()
return data
def plot_data(data = None, single=False):
"""自定义画图"""
if data is None:
print("Data is None!")
exit
if len(str(data.index[0])) == 10:
dates = [datetime.datetime(*time.strptime(str(i), '%Y-%m-%d')[:6]) for i in data.index]
else:
dates = [datetime.datetime(*time.strptime(str(i), '%Y-%m-%d %H:%M:%S')[:6]) for i in data.index]
# 多指标同图
if not single:
data['t'] = mdates.date2num(dates)
adata = data[['t','open','close','high','low','volume']]
ddata = zip(np.array(adata.t), np.array(adata.open), np.array(adata.close), np.array(adata.high), np.array(adata.low), np.array(adata.volume))
fig, axes = plt.subplots(3, 1, sharex=True, figsize=(8,6))
mpf.candlestick_ochl(axes[0], ddata, width=0.6, colorup='r', colordown='g')
axes[0].set_title(u'宝鹰股份')
axes[0].set_ylabel('price')
axes[0].grid(True)
axes[0].xaxis_date()
axes[0].autoscale_view()
axes[1].bar(np.array(adata.t), np.array(adata.volume))
axes[1].set_ylabel('volume')
axes[1].grid(True)
axes[1].autoscale_view()
plt.setp(plt.gca().get_xticklabels(), rotation=30)
# dflen = df.shape[0]
dflen = len(data)
if dflen > 35:
macd, macdsignal, macdhist = ta.MACD(np.array(data['close']),
fastperiod=12, slowperiod=26,
signalperiod=9)
data['macd']=pd.Series(macd,index=data.index)
data['macdsignal']=pd.Series(macdsignal,index=data.index)
data['macdhist']=pd.Series(macdhist,index=data.index)
data.set_index('t')
data = data.set_index('t')
data[['macd','macdsignal','macdhist']].plot(ax=axes[2])
axes[2].axhline()
else:
data['t'] = mdates.date2num(dates)
adata = data[['t','open','close','high','low','volume']]
ddata = zip(np.array(adata.t), np.array(adata.open), np.array(adata.close), np.array(adata.high), np.array(adata.low), np.array(adata.volume))
fig, ax = plt.subplots(1, 1, figsize=(8,6))
mpf.candlestick_ochl(ax, ddata, width=0.6, colorup='r', colordown='g')
ax.set_ylabel('price')
ax.grid(True)
ax.xaxis_date()
ax.autoscale_view()
# 有顶底标记时画顶底标记
if 'fenxing' in data.columns:
p_data = data[data.fenxing == 'ding']
b_data = data[data.fenxing == 'di']
ax = plt.gca()
ax.plot(np.array(p_data.t), np.array(p_data.high), 'v')
ax.plot(np.array(b_data.t), np.array(b_data.low), '^')
# 有笔标记时添加笔线条
if 'bi_value' in data.columns:
bi_data = data[~np.isnan(data.bi_value)]
ax.plot(np.array(bi_data.t), np.array(bi_data.bi_value))
# 有段标记时添加段线条
if 'duan_value' in data.columns:
duan_data = data[~np.isnan(data.duan_value)]
# print("duan_data %s" % duan_data)
ax.plot(np.array(duan_data.t), np.array(duan_data.duan_value), color='b', linewidth=2)
plt.show()
def main():
#data = ts.get_hist_data('sh','2003-01-11',ktype='D').sort_index()
#data = ts.get_hist_data('sh','2016-11-01',ktype='30').sort_index()
data = ts.get_hist_data('sh','2010-10-01').sort_index()
# data = pd.read_csv(u'./sh_M.csv')
# data = data.set_index('date')
print("Before baohan process: %s" % len(data))
#plot_data(data, single=True)
data = baohan_process(data)
print("After baohan process: %s" % len(data))
data = find_possible_ding_di(data)
print("After find ding di: %s" % len(data))
data = tag_bi_line(data)
# plot_data(data, single=True)
data = tag_duan_line(data)
plot_data(data, single=True)
if __name__ == '__main__':
main()
| [
"liubin6@gmail.com"
] | liubin6@gmail.com |
12cf2c51c229d7b623d42256e911e7269940fea2 | 04e38597b547424b0d0b0db163372f8a68cc6f92 | /publichealth/home/migrations/0003_auto_20170223_0959.py | 486ad8cf15b2c2fd853a833161d6e0ff009d696c | [
"MIT",
"BSD-3-Clause"
] | permissive | datalets/public-health-ch | 9888dc2561377f024c77fb36a53022958f47a114 | 1cf81f055562afb6954c6b462447fed2957f1006 | refs/heads/master | 2022-11-24T05:30:08.594863 | 2022-11-17T13:30:26 | 2022-11-17T13:30:26 | 88,725,732 | 4 | 4 | NOASSERTION | 2022-11-09T23:49:26 | 2017-04-19T09:19:51 | Python | UTF-8 | Python | false | false | 4,824 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-23 08:59
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import modelcluster.fields
import wagtail.core.blocks
import wagtail.core.fields
import wagtail.images.blocks
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0032_add_bulk_delete_page_permission'),
('wagtailimages', '0018_remove_rendition_filter'),
('home', '0002_create_homepage'),
]
operations = [
migrations.CreateModel(
name='ArticleIndexPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('title_fr', models.CharField(default='', max_length=255)),
],
options={
'verbose_name': 'Rubrik',
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='ArticlePage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('title_fr', models.CharField(default='', max_length=255)),
('date', models.DateField(verbose_name='Date')),
('intro_de', wagtail.core.fields.RichTextField(default='')),
('intro_fr', wagtail.core.fields.RichTextField(default='')),
('body_de', wagtail.core.fields.StreamField([('paragraph', wagtail.core.blocks.RichTextBlock()), ('image', wagtail.images.blocks.ImageChooserBlock()), ('section', wagtail.core.blocks.CharBlock(classname='full title'))], blank=True, null=True)),
('body_fr', wagtail.core.fields.StreamField([('paragraph', wagtail.core.blocks.RichTextBlock()), ('image', wagtail.images.blocks.ImageChooserBlock()), ('section', wagtail.core.blocks.CharBlock(classname='full title'))], blank=True, null=True)),
('feed_image', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image')),
],
options={
'verbose_name': 'Artikel',
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='ArticleRelatedLink',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('name', models.CharField(max_length=255)),
('url', models.URLField()),
('page', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='related_links', to='home.ArticlePage')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
migrations.AddField(
model_name='homepage',
name='body_de',
field=wagtail.core.fields.RichTextField(default=''),
),
migrations.AddField(
model_name='homepage',
name='body_fr',
field=wagtail.core.fields.RichTextField(default=''),
),
migrations.AddField(
model_name='homepage',
name='infos_de',
field=wagtail.core.fields.StreamField([('info', wagtail.core.blocks.StructBlock([(b'title', wagtail.core.blocks.CharBlock(required=True)), (b'photo', wagtail.images.blocks.ImageChooserBlock()), (b'summary', wagtail.core.blocks.RichTextBlock(required=True)), (b'action', wagtail.core.blocks.CharBlock()), (b'url', wagtail.core.blocks.URLBlock())]))], blank=True, null=True),
),
migrations.AddField(
model_name='homepage',
name='infos_fr',
field=wagtail.core.fields.StreamField([('info', wagtail.core.blocks.StructBlock([(b'title', wagtail.core.blocks.CharBlock(required=True)), (b'photo', wagtail.images.blocks.ImageChooserBlock()), (b'summary', wagtail.core.blocks.RichTextBlock(required=True)), (b'action', wagtail.core.blocks.CharBlock()), (b'url', wagtail.core.blocks.URLBlock())]))], blank=True, null=True),
),
migrations.AddField(
model_name='homepage',
name='intro_de',
field=wagtail.core.fields.RichTextField(default=''),
),
migrations.AddField(
model_name='homepage',
name='intro_fr',
field=wagtail.core.fields.RichTextField(default=''),
),
]
| [
"oleg@utou.ch"
] | oleg@utou.ch |
b99d30b0d196ba1a95380cde7bcc023fc44bddf2 | a4de363190f23f5c7d61e743dcee9cfc674d055d | /testbed/tektronix-afg3252-function-generator/afgtest.py | b069b23be01d913ad360c8448bb6a8cd9838a273 | [
"MIT"
] | permissive | kaikai581/t2k-mppc-daq-deprecated | 150816beb3af39349392e6e87b7244422c1365ba | c4e792eaf6617e603671b04bab9a033a39b1e245 | refs/heads/master | 2022-12-06T09:44:13.042956 | 2020-08-27T18:13:07 | 2020-08-27T18:13:07 | 269,753,783 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 880 | py | from AFG3252 import *
import argparse
import time
import socket
if __name__ == '__main__':
# Command line argument to use another IP
parser = argparse.ArgumentParser()
# Default IP address of the function generator assigned by me.
parser.add_argument('-i','--ip', help='IP address of the function generator', default='192.168.0.101', type=str)
args = parser.parse_args()
fg = socket.gethostbyname(args.ip)
print('Function generator on IP {0}...'.format(fg))
dev = AFG3252(fg)
dev.disableOutput(1)
dev.disableOutput(2)
dev.outputType('pulse')
dev.outputPolarity(1,'INVerted')
dev.setFrequency(7000)
#dev.setVoltage(1, "50mV")
dev.setVoltageHigh(1,"50mV")
dev.setVoltageLow(1,"0mV")
dev.setLeading(1, "4ns")
dev.setTrailing(1, "55us")
dev.enableOutput(1)
time.sleep(2)
dev.disableOutput(1)
| [
"kaikai581@hotmail.com"
] | kaikai581@hotmail.com |
f2bd15544ee3c84be073387e3bc96998892d8e21 | 5ca07360f2350a193251099b379d3105ee9ab566 | /projekt/urls.py | 920e304adbff0c395963a393211f70a8449fbb90 | [] | no_license | MateuszO99/Projekt | b4d4ebbd7af109e14a30566bc22e96b483223fdf | d1cb1206c1ab5a259c828574e9756204886d1432 | refs/heads/master | 2022-08-27T19:20:38.094470 | 2020-05-18T09:35:28 | 2020-05-18T09:35:28 | 247,076,659 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,015 | py | """projekt URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('video.urls')),
path('users/', include('users.urls')),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"315352@uwr.edu.pl"
] | 315352@uwr.edu.pl |
9e673189f7b3663b9f3c1004c0d52e8ed3aec3bb | 871c8b48a58b3e7dc7821e14bc451acb92dfe33e | /cms/migrations/0009_auto_20160308_1456.py | e5cf4eb94dfd3f19f01071c28af85f5df2715bea | [
"BSD-3-Clause"
] | permissive | sonsandco/djangocms2000 | 6f3937e2185707c32f15e5e42d06e138751d85e4 | 25131e9e8659a7a30a8fd58b7da011cbb928c8ac | refs/heads/master | 2022-08-25T22:18:17.173639 | 2022-08-17T11:36:36 | 2022-08-17T11:36:36 | 121,998,739 | 0 | 0 | NOASSERTION | 2022-07-24T05:16:48 | 2018-02-18T23:00:47 | Python | UTF-8 | Python | false | false | 1,935 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-03-08 01:56
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cms', '0008_auto_20150216_1649'),
]
operations = [
migrations.AddField(
model_name='block',
name='language',
field=models.CharField(choices=[('en', 'English'), ('ja', 'Japanese'), ('fr', 'French'), ('es', 'Spanish'), ('pt', 'Portuguese')], default='en-us', max_length=5),
),
migrations.AlterField(
model_name='block',
name='content',
field=models.TextField(blank=True, default=''),
),
migrations.AlterField(
model_name='block',
name='format',
field=models.CharField(choices=[('attr', 'Attribute'), ('plain', 'Plain text'), ('html', 'HTML')], default='plain', max_length=10),
),
migrations.AlterField(
model_name='image',
name='file',
field=models.ImageField(blank=True, upload_to='cms/%y_%m'),
),
migrations.AlterField(
model_name='page',
name='is_live',
field=models.BooleanField(default=True, help_text='If this is not checked, the page will only be visible to logged-in users.'),
),
migrations.AlterField(
model_name='page',
name='template',
field=models.CharField(default='', max_length=255),
),
migrations.AlterField(
model_name='page',
name='url',
field=models.CharField(db_index=True, help_text='e.g. /about/contact', max_length=255, verbose_name='URL'),
),
migrations.AlterUniqueTogether(
name='block',
unique_together=set([('content_type', 'object_id', 'language', 'label')]),
),
]
| [
"gregplaysguitar@gmail.com"
] | gregplaysguitar@gmail.com |
6202d9a57108677f07b79083de4263b4ff9620f9 | 79df6b960b2902dc04fad41ef40a48d9305de625 | /setup.py | ddbf6c0698a7800d1bed1f6497413d3ee5cff2c3 | [] | no_license | dagheyman/rovarsprak | 70fc3e14010823bf8e6db58b9689574826127e02 | 7de2e4b63872b2d1b9534f92d585855a6b573775 | refs/heads/master | 2021-01-19T06:36:25.129869 | 2016-08-03T16:05:19 | 2016-08-03T16:05:19 | 64,612,398 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 100 | py | from setuptools import setup
setup(name='rovarsprak',
version='0.1',
test_suite='test')
| [
"dag@yubico.com"
] | dag@yubico.com |
1c8af3684fa3376ab710147e87f4ca6dae8accac | b83bd5b9b0403223e6a1fbd370dbe74091835534 | /helper_scripts/gmx_msd.py | 05d668e1def26c9240d2b5e7b2073e0fc8de6b66 | [] | no_license | eboek/AlkaneStudy.Gromacs | e7403c2de9a16c7df3f7927952c773af8748346c | 88ac8d2248a8638f34ec5106bef0a549d99a3620 | refs/heads/master | 2020-09-03T16:49:12.200265 | 2019-11-01T14:40:29 | 2019-11-01T14:40:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 934 | py | # subprocess module is used to call gromacs commands
import subprocess
import re
startTime = '1000' # picoseconds (as a string)
gmxCmd = 'gmx_d' # Usually 'gmx', but 'gmx_d' for double precision
# Set filenames
xtcFile = 'xtc_'+suffix+'.xtc'
tprFile = 'tpr_'+suffix+'.tpr'
# Call again to extract properties
shiftStrs = ['0.010', '0.012'] #, '0.014', '0.016', '0.018', '0.020', '0.022', '0.024', '0.026', '0.028', '0.030']
for shift in shiftStrs:
suffix = 'NPT_sim_'+shift
xtcFile = 'xtc_'+suffix+'.xtc'
tprFile = 'tpr_'+suffix+'.tpr'
msdGet = subprocess.Popen([gmxCmd, 'msd', '-f', xtcFile, '-s', tprFile, '-b', startTime], stdin = subprocess.PIPE)
msd_stdout, msd_err = msdGet.communicate(b'0\n')
msdText = msd_err.decode('utf-8')
msdTextArr = msdText.splitlines()
print(msdTextArr[-2])
# Use regular expression to extract diffucion coeff: D[ System] 2.0968 (+/- 0.0391) 1e-5 cm^2/s
msdGet.terminate() | [
"stephen.burrows@hotmail.com"
] | stephen.burrows@hotmail.com |
66922cde445ad633e85b3360be8a3feddb8824b6 | 2a98d60ee861cc2e5003348c0903b2eebc8b4e77 | /GUI_Tests/first_gui.py | 48785eb3898e5afdaafb015d3893adcc0d996ee7 | [] | no_license | ljkeller/Sorting-Visualization | f68abc4a9ff37435230b18fd58465f7f13659a7d | 80d6bb546db5ea167494cb813d37fe09c7bceffa | refs/heads/master | 2020-11-27T23:51:59.122242 | 2019-12-26T23:41:27 | 2019-12-26T23:41:27 | 229,652,343 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,876 | py | import math
import random
import heapq
import PySimpleGUI as sg
CANVAS_HEIGHT = 820
CANVAS_WIDTH = 1400
NUM_ELEMENTS = 175
BLOCK_WIDTH = int(CANVAS_WIDTH / NUM_ELEMENTS)
COUNTER = 0
def new_dataset():
return [random.randint(0, CANVAS_HEIGHT) for i in range(NUM_ELEMENTS)]
def clear_graph(graph, graph_elements):
for i in range(len(graph_elements)):
graph.DeleteFigure(graph_elements[i])
graph_elements.clear()
def merge_sort(graph, boxes, xs):
# This is an interative, in place merge sort. Using an im-place merge
# sort allows me to easily manage drawing the rectangles. I DID NOT
# WRITE THIS. Credit to:
# https://gist.github.com/m00nlight/a076d3995406ca92acd6
# In the future, I may write a new helper method to avoid using an
# in-place merge, but for now, this works.
# The likely best solution is using the "generated" array as the "extra"
# array space, but the biggest difficulty is graphing all of the blocks
# easily
unit = 1
while unit <= len(xs):
h = 0
for h in range(0, len(xs), unit * 2):
l, r = h, min(len(xs), h + 2 * unit)
mid = h + unit
# merge xs[h:h + 2 * unit]
p, q = l, mid
while p < mid and q < r:
# use <= for stable merge merge
if xs[p] <= xs[q]:
p += 1
else:
tmp = xs[q]
xs[p + 1: q + 1] = xs[p:q]
xs[p] = tmp
p, mid, q = p + 1, mid + 1, q + 1
draw_boxes_and_read(graph, boxes, xs, 30)
unit *= 2
def sift_down(graph, rectangles, arr: list, node: int, n: int) -> None:
"""
A log(h) time function to be used on an array node to heapify a subtree.
Works bottom-up, as in parents smaller than children will swap,
and iteratively continues.
:param arr: Array to be sifted through
:param node: Current parent node index of interest
:param n: Size of array/tree
"""
index = node
if index >= n or index*2 + 1 >= n:
return
has_left_child = True
left_child = arr[index * 2 + 1]
has_right_child = True if index * 2 + 2 < n else False
if has_right_child:
right_child = arr[index * 2 + 2]
else:
right_child = None
while has_left_child and arr[index] < arr[index * 2 + 1] or (
has_right_child and arr[index] < arr[index * 2 + 2]):
if has_left_child and has_right_child:
max_child_index = 2 * index + 1 if left_child >= right_child \
else 2 * index + 2
else:
max_child_index = 2 * index + 1
arr[max_child_index], arr[index] = arr[index], arr[max_child_index]
index = max_child_index
draw_boxes_and_read(graph, rectangles, arr, 30)
if index*2 + 1 >= n:
return
else:
left_child = arr[index*2 + 1]
has_right_child = True if index * 2 + 2 < n else False
if has_right_child:
right_child = arr[index * 2 + 2]
def heapify(graph, boxes, arr: list) -> None:
"""
A linear time, in place function for creating a max-heap
:param arr: Array to be turned into a max-heap
:param graph: Graph object we are displaying to
:param boxes: Previous collection of graphed elements
"""
for i in range(NUM_ELEMENTS - 1, -1, -1):
sift_down(graph, boxes, arr, i, NUM_ELEMENTS)
def quick_sort(graph, boxes, arr, left, right):
if left >= right:
return
p = partition(graph, boxes, arr, left, right)
draw_boxes_and_read(graph, boxes, arr, 100)
quick_sort(graph, boxes, arr, left, p - 1)
quick_sort(graph, boxes, arr, p + 1, right)
def partition(graph, boxes, arr, left, right):
pivot = arr[left]
i, j = left + 1, right
while True: # consider even or odd length lists
while i <= j and arr[i] <= pivot:
i += 1
while i <= j and arr[j] >= pivot:
j -= 1
if i <= j:
arr[j], arr[i] = arr[i], arr[j]
draw_boxes_and_read(graph, boxes, arr, 30)
else:
break
arr[left], arr[j] = arr[j], arr[left] # swap pivot in
draw_boxes_and_read(graph, boxes, arr, 30)
return j
def draw_boxes(graph, rectangles, elements):
clear_graph(graph, rectangles) # clear graph elements & clear their IDs
for i in range(len(elements)): # When appending to list, allows us to
# save
# all figures
rectangles.append(graph.DrawRectangle((i * BLOCK_WIDTH, elements[i]),
(i * BLOCK_WIDTH + BLOCK_WIDTH,
0),
fill_color='black',
line_color='white'))
def draw_boxes_and_read(graph, rectangles, elements, timeout):
draw_boxes(graph, rectangles, elements)
window.read(timeout)
sg.ChangeLookAndFeel('DarkAmber')
layout = [
[sg.Graph(canvas_size=(CANVAS_WIDTH, CANVAS_HEIGHT),
graph_bottom_left=(0, 0),
graph_top_right=(CANVAS_WIDTH, CANVAS_HEIGHT),
background_color='grey',
key='graph')],
[sg.T('Generate, and select sorting method:'), sg.Button(
'Generate'), sg.Button('Clear'), sg.Button('Bubble Sort'),
sg.Button('Insertion Sort'), sg.Button('Selection Sort'), sg.Button(
'Quick Sort'), sg.Button('Merge Sort'), sg.Button('Radix Sort'),
sg.Button('Heap Sort')]
]
window = sg.Window('Sorting Visualization', layout)
window.Finalize()
graph = window['graph']
boxes = []
while True:
event, values = window.read()
if event == None:
break
if event == 'Bubble Sort':
swap = True
for i in range(len(generated)):
swap = False
for j in range(len(generated) - i - 1):
if generated[j] > generated[j + 1]:
generated[j], generated[j + 1] = generated[j + 1], \
generated[j]
swap = True
draw_boxes_and_read(graph, boxes, generated, 3)
if not swap:
break
draw_boxes_and_read(graph, boxes, generated, 30)
elif event == 'Clear':
COUNTER = 0
clear_graph(graph, boxes)
elif event == 'Insertion Sort':
for i in range(len(generated)):
insert = generated[i]
j = i - 1
while j >= 0 and insert < generated[j]: # right to left, stable
generated[j + 1] = generated[j]
draw_boxes_and_read(graph, boxes, generated, 30)
j -= 1
generated[j + 1] = insert
draw_boxes_and_read(graph, boxes, generated, 30)
elif event == 'Selection Sort':
for i in range(len(generated)):
min_index = i
for j in range(i + 1, len(generated)):
if generated[min_index] > generated[j]:
min_index = j
generated[min_index], generated[i] = generated[i], \
generated[min_index]
draw_boxes_and_read(graph, boxes, generated, 30)
elif event == 'Merge Sort':
merge_sort(graph, boxes, generated)
elif event == 'Heap Sort':
heapify(graph, boxes, generated)
for i in range(NUM_ELEMENTS-1, 0, -1):
generated[0], generated[i] = generated[i], generated[0]
sift_down(graph, boxes, generated, 0, i)
elif event == 'Radix Sort':
num_digits = math.ceil(math.log(CANVAS_HEIGHT, 10))
count_sort = [[] for i in range(10)]
for digit in range(num_digits):
for i in range(NUM_ELEMENTS):
remainder = int((generated[i] // math.pow(10, digit))) % 10
count_sort[remainder].append(generated[i])
flat = [leaves for tree in count_sort for leaves in tree]
draw_boxes_and_read(graph, boxes, flat + generated[i:], 30)
generated[:] = [leaves for tree in count_sort for leaves in tree]
for digit_sublist in count_sort:
digit_sublist.clear()
draw_boxes_and_read(graph, boxes, generated, 100)
elif event == 'Quick Sort':
quick_sort(graph, boxes, generated, 0, NUM_ELEMENTS - 1)
elif event == 'Generate':
COUNTER = 0 # reset counter
generated = new_dataset()
draw_boxes(graph, boxes, generated)
# elif event == 'Iterate':
# COUNTER += 1
# # graph.MoveFigure(boxes[0], 10,10)
# graph.TKCanvas.itemconfig(boxes[COUNTER], fill="red")
# if (COUNTER >= 1):
# graph.TKCanvas.itemconfig(boxes[COUNTER - 1], fill="black")
| [
"kellerjlucas@gmail.com"
] | kellerjlucas@gmail.com |
f34b8ffa6417786f7e33668349eb60cae8dfed9a | dadd2955aad6e9435e1fd168301eb54edc668e3a | /hello_world_python_django/hello_world_python_django/wsgi.py | b080316525d197f32a8254bfd9e79ad6d9e7e0f5 | [] | no_license | trackness/hello-world-python-django | a73921275d11deb73917b09397ef601b570d6e28 | 4b8d3e9c3d93a28f2638b7cb62e3442637e30c98 | refs/heads/master | 2022-12-02T01:50:14.418745 | 2020-08-15T18:25:50 | 2020-08-15T18:25:50 | 287,792,938 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 427 | py | """
WSGI config for hello_world_python_django project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'hello_world_python_django.settings')
application = get_wsgi_application()
| [
"40263851+trackness@users.noreply.github.com"
] | 40263851+trackness@users.noreply.github.com |
6e0472e4471dd8faeb439276a7fdb64178bb84d7 | d5496ee23dccc763d4f8b1e8ae7e1b29d21c18ce | /core/api/serializers.py | b0d47ae0396e67afe7a38d8989aad7373b8a3b15 | [] | no_license | And3rson-Paiva/pontos_turisticos | c685097c3711043eb0fd46459531187feb60d70d | f2ee3cca95234ffc1e21e36437e553c1e7c0cfef | refs/heads/master | 2022-12-27T15:32:33.038773 | 2020-10-16T15:57:20 | 2020-10-16T15:57:20 | 304,116,010 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 239 | py | from rest_framework.serializers import ModelSerializer
from core.models import PontoTuristico
class PontoTuristicoSerializer(ModelSerializer):
class Meta:
model = PontoTuristico
fields = ('id', 'nome', 'descricao')
| [
"and3rsonpaiva@gmail.com"
] | and3rsonpaiva@gmail.com |
dc8acecc08120972247d355d7d49943cc3d80b05 | 5ed35806523dd5587cb2dc31e55aa086dda87801 | /dataanalysis/modules/plot/GainAnimation.py | 72de0c0b884ea747a5f2ceaacb7c4a60adba299f | [] | no_license | oscarmartinezrubi/ledama | 97d3e188432e42f37dd0229080a8e0750acbf128 | 75fca624dfd38e58af32f23a96113b04248cc251 | refs/heads/master | 2020-12-31T04:16:59.640490 | 2016-09-16T12:04:12 | 2016-09-16T12:04:12 | 68,370,784 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 56,061 | py | ################################################################################
# Created by Oscar Martinez #
# martinez@astro.rug.nl #
################################################################################
import os, time, multiprocessing, sys
from multiprocessing import Pipe
from multiprocessing.managers import SyncManager
from ledama import utils
from ledama import diagoperations
from ledama.LModule import LModule
from ledama.LModuleOptions import LModuleOptions
from ledama.DiagnosticFile import DiagnosticFile
from ledama.MovieInfoFile import MovieInfoFile
from ledama.leddb.Connector import DEF_DBNAME, DEF_DBHOST, Connector
from ledama.leddb.query.QueryManager import QueryManager
from ledama.leddb.Naming import GAIN_KEY, GAIN, ID, STATION, MS, VALUES, SBINDEX,\
CENTFREQ, LDSBP, BW, TSTEP, FSTEP
import numpy as np
import matplotlib
matplotlib.use('Agg')
import pylab
TIME = 'time'
FREQ = 'freq'
DEFAULT_CHANNELS = '0,0,1'
DEFAULT_POLAR_YRANGE = '0,0.0025,-3.14,3.14'
DEFAULT_CARTESIAN_YRANGE = '-0.0025,0.0025,-0.0025,0.0025'
AUTH_KEY = 'animationdistributionkey'
DEF_OUTPUT_INFO_NAME = 'INFO'
COORD_NUM = 2
FIG_SIZE = '12,7'
# MESSAGE FROM WORKERS (CHILDREN IN CLIENTS)
MESS_TYPE_WORKER_OK_GAIN = 0
MESS_TYPE_WORKER_KO_GAIN = 1
MESS_TYPE_WORKER_SEND_GAIN = 2
MESS_TYPE_WORKER_IMAGE_CREATED = 3
MESS_TYPE_WORKER_END = 4
# MESSAGE FROM CLIENTS
MESS_TYPE_CLIENT_END = 5
def setLabels(axis, polar, antIndex, cordIndex, xLabel):
""" Set the labels to the pylab axis"""
# we add the lables (only for first ant which will be in bottom-left)
if antIndex==0:
if cordIndex == 0:
axis.set_xlabel(xLabel)
if polar:
axis.set_ylabel("Amp")
else:
axis.set_ylabel("Real")
else:
if polar:
axis.set_ylabel("Pha")
else:
axis.set_ylabel("Imag")
def getGain(clientMSId, gainIds, gainPartTable, sbCentFreq, sbBW, freqStep, timeStep, stations, jonesElements, polar, tIndexInit, tIndexEnd, tIndexStep, cIndexInit, cIndexEnd, cIndexStep, timeout, dbname, dbuser, dbhost):
"""Get the gain solutions indicated by gainIds from the LEDDB"""
try:
connection = Connector(dbname, dbuser, dbhost).getConnection()
qm = QueryManager()
queryOption = GAIN_KEY
names = [STATION, VALUES]
qc = qm.initConditions()
qm.addCondition(queryOption, qc, MS+ID, clientMSId, '=')
qm.addCondition(queryOption, qc, GAIN+ID, tuple(gainIds))
(query, queryDict) = qm.getQuery(queryOption, qc, names, [STATION,])
#query = query.replace(GAIN+' ', gainPartTable + ' ').replace(GAIN+'.', '') # trick to directly query to the partition that has the data
cursor = connection.cursor()
print query
print qm.executeQuery(connection, cursor, query, queryDict, True, timeout)
qm.executeQuery(connection, cursor, query, queryDict, False, timeout)
(Gains, times, freqs) = (None,None,None)
for row in cursor:
rowDict = qm.rowToDict(row, names)
stationIndex = stations.index(rowDict.get(STATION))
# values is corr,freq,time,cord
rowValues = rowDict.get(VALUES)
jonesValues = []
for jones in jonesElements:
jonesValues.append(rowValues[jones])
# jonesValues is corr,channel,times,cord(real,imag) (only the select jones)
rowGains = utils.convertValues(jonesValues, np.complex64)
# rowGains is corr,channel,times (with numpy.complexs)
if Gains == None:
# Theoretically Gains will be None in the first row. So, then we
# read this information since, in principle, it is the same
# for all the rows. We also create the large Gain matrix
numAllCorrs = len(rowGains)
numAllFreqs = len(rowGains[0])
numAllTimes = len(rowGains[0,0])
# Compute the initial frequency of the channels
allfreqs = (np.array(range(0,numAllFreqs)) * float(freqStep)) + (sbCentFreq - (sbBW / 2.))
if cIndexEnd == -1 or cIndexEnd > numAllFreqs:
cIndexEnd = numAllFreqs
else:
cIndexEnd += 1
freqs = allfreqs[cIndexInit:cIndexEnd:cIndexStep] # Get only the requested channels
Nf = len(freqs)
# Compute the time samples relative to initial time of observing
alltimes = np.array(range(0,numAllTimes)) * float(timeStep)
if tIndexEnd == -1 or tIndexEnd > numAllTimes:
tIndexEnd = numAllTimes
else:
tIndexEnd += 1
times = alltimes[tIndexInit:tIndexEnd:tIndexStep]
Nt = len(times)
# Create a full matrix with zeros for all Gains solutions to be read
Gains = np.zeros((len(stations),numAllCorrs,Nf,Nt),dtype=np.complex64)
# We fill in the matrix the part related to the current station
Gains[stationIndex,:,:,:] = rowGains[:,cIndexInit:cIndexEnd:cIndexStep,tIndexInit:tIndexEnd:tIndexStep]
cursor.close()
connection.close()
if polar:
Gains = np.array((np.abs(Gains),np.angle(Gains)))
else:
Gains = np.array((np.real(Gains),np.imag(Gains)))
# Gains is cord, station, jones, freq, times
Gains = Gains.transpose((3,4,1,0,2))
# We return freq, times, station, cord, jones
return (Gains, times, freqs)
except Exception,e:
print 'ERROR getting gain: ' + str(e)
sys.stdout.flush()
return (None,None,None)
def runPlotWorker(workerName, sharedJobQueue, sharedResultQueue, stations, outputFolder, times, polar, refStationIndex, acoord, margx, margy, ylim, dx, dy, delay, figSize):
""" Plot-Worker that is executed in remote clients when xaxis==freq.
They have an input queue where to get the data to plot.
And, also a output queue where to indicate what they have done.
The images are stored in the outputFolder (which should be a directory
shared by all the involved machines)"""
# Get number of stations and the refStation name
numStations = len(stations)
refStation = stations[refStationIndex]
# Initialize the min and max freqs (in the job we will assign their values)
(minFreq,maxFreq) = (None, None)
# Initialize the figure
fig = pylab.figure(figsize=figSize)
# Initialize the variable that will indicate when the killing-job is received
kill_received = False
while not kill_received:
job = None
try:
job = sharedJobQueue.get() # wait until new job is available
if job == None:
# If we receive a None job, it means we can stop this worker
kill_received = True
log(workerName + ' kill received!')
else:
ti, tgains, freqs = job # tgains dimensions are time, station, cord, jones, freq
if minFreq == None:
(minFreq,maxFreq) = (float(np.min(freqs)/1e6),float(np.max(freqs)/1e6))
# For all the times indexes (relative to ti) we create images
for tir in range(len(tgains)):
# Clean the figure
pylab.clf()
fig.clf()
imageName = outputFolder + ('/img%06d.png' % (ti+tir))
Jones2Ref = None
if polar:
Jones2Ref = tgains[tir,refStationIndex,1]
for antIndex in range(numStations):
stationGains = tgains[tir,antIndex]
if polar:
stationGains[1] -= Jones2Ref
stationGains[1] -= 2 * np.pi * freqs * 1e6 * delay[stations[antIndex]]
stationGains[1] = np.remainder(stationGains[1],2*np.pi) - np.pi
a = None
addposy = [margy, dy]
# for (Real and Imag) or (Ampl and Phase)
for cordIndex in range(COORD_NUM):
# we create a sub-plot
a = fig.add_axes([acoord[antIndex,0]+margx, acoord[antIndex,1]+addposy[cordIndex], dx-2.*margx,dy-margy])
# for each element in the jones matrix (that we selected to plot)
for j in range(len(stationGains[cordIndex])):
a.plot(freqs,stationGains[cordIndex,j,:],marker='.',ls='',mew=0.1,ms=1.5,color='black')
pylab.setp(a, xticks=[], yticks=[],ylim=ylim[cordIndex])
setLabels(a, polar, antIndex, cordIndex, "Freq")
a.title.set_text(stations[antIndex])
a.title.set_fontsize(9)
# Set general title
imageTitle = "t=%.1f, Freqs=[%5.1f, %5.1f]MHz" % (times[(ti+tir)],minFreq,maxFreq)
if polar:
imageTitle += ", RefStation=%s" % refStation
a.annotate(imageTitle, xy=(0.5, 0.97), xycoords='figure fraction', horizontalalignment="center")
# Creat the image
fig.savefig(imageName)
log(workerName + ' ' + imageName + ' created')
# Put in the output queue that we created a new image
sharedResultQueue.put([MESS_TYPE_WORKER_IMAGE_CREATED,])
except:
# if there is an error we will quit the generation of pictures
kill_received = True
sharedResultQueue.put([MESS_TYPE_WORKER_END,])
log(workerName + ' Exiting...')
def runReadWorker(workerName, recvJobPipeRW, sharedResultQueue, clientMSId, msIdData, stations, jonesElements, polar, tIndexInit, tIndexEnd,tIndexStep, cIndexInit,cIndexEnd,cIndexStep, chunkSize, timeout, dbname, dbuser, dbhost, dbinterval):
""" Read Worker that is executed in remote clients when xaxis=freq.
First it loads its realted SB and then, under request, it sends the
chunks of data to the main server
"""
(gainIds, (gainPartTable, sbCounter, sbIndex, sbCentFreq, sbBW, timeStep , freqStep)) = msIdData
try:
time.sleep(sbCounter*dbinterval)
# Loads the gains (dimensions are freq, time, station, cord, jones)
(gains, times, freqs) = getGain(clientMSId, gainIds, gainPartTable, sbCentFreq, sbBW, freqStep, timeStep, stations, jonesElements, polar, tIndexInit, tIndexEnd,tIndexStep, cIndexInit,cIndexEnd,cIndexStep,timeout, dbname, dbuser, dbhost)
# Check if data was successfully loaded
if gains == None:
message = workerName + ' could not load gains from LEDDB for MS ID ' + str(clientMSId)
sharedResultQueue.put([MESS_TYPE_WORKER_KO_GAIN, message,],)
else:
# Send message that we are done reading the gains, so we are ready to starting
# the plotters and the queries for time chunks
message = workerName + ' loaded gains from LEDDB for MS ID ' + str(clientMSId) + (' (%5.1f MHz' % float(sbCentFreq / 1e6)) + ')'
sharedResultQueue.put([MESS_TYPE_WORKER_OK_GAIN, message, clientMSId, sbIndex, freqs, times])
log(message)
# Next part is not executed until server has info from all SBs
continueRecv = True
while continueRecv:
data = recvJobPipeRW.recv() # we recieve message from the main client process that the server is asking for cerain chunk
initTimeIndex = data['TIMEINDEX']
if (gains != None) and (initTimeIndex >= 0):
endTimeIndex = initTimeIndex + chunkSize
if endTimeIndex>=len(times):
endTimeIndex = len(times)
# we get the gains solutions fore the requested time chunk
gainsToSend = gains[:,initTimeIndex:endTimeIndex]
log(workerName + ' sending data for t=[' + str(initTimeIndex) + ',' + str(endTimeIndex) + ') ALL: ' + str(gains.shape) + ', SEND: ' + str(gainsToSend.shape))
sharedResultQueue.put([MESS_TYPE_WORKER_SEND_GAIN,gainsToSend, freqs],)
else:
continueRecv = False
except Exception,e:
log(workerName + ' ERROR: ' + str(e))
pass
sharedResultQueue.put([MESS_TYPE_WORKER_END,])
log(workerName + ' exiting...')
def runWorker(workerName, node, clientJobQueue, sharedResultQueue, stations, jonesElements, polar, tIndexInit, tIndexEnd, tIndexStep, cIndexInit, cIndexEnd, cIndexStep, refStationIndex, yRange, delay, outputFolder, figSize, timeout, dbname, dbuser, dbhost, dbinterval):
""" Worker that is executed in the remote clients when xaxis==time.
They get from the queue the SB, and for each one they create their
related images"""
kill_received = False
numStations = len(stations)
refStation = stations[refStationIndex]
(minTime,maxTime) = (None, None)
while not kill_received:
job = None
try:
job = clientJobQueue.get() # wait until new job is available
if job == None:
# If we receive a None job, it means we can stop this worker
kill_received = True
log(workerName + ' kill received!')
else:
[clientMSId, msIdData] = job
(gainIds, (gainPartTable, sbCounter, sbIndex, sbCentFreq, sbBW, timeStep , freqStep)) = msIdData
time.sleep(sbCounter*dbinterval)
# Loads the gains (freq, time, station, cord, jones)
(gains, times, freqs) = getGain(clientMSId, gainIds, gainPartTable, sbCentFreq, sbBW, freqStep, timeStep, stations, jonesElements, polar, tIndexInit, tIndexEnd,tIndexStep, cIndexInit,cIndexEnd,cIndexStep,timeout, dbname, dbuser, dbhost)
if gains == None:
message = workerName + ' could not load gains from LEDDB for MS ID ' + str(clientMSId)
log(message)
sharedResultQueue.put([MESS_TYPE_WORKER_KO_GAIN, message,],)
else:
if minTime == None:
(minTime,maxTime) = (times[0],times[-1])
# Initialize the figure and get layout parameters
log(workerName + ' loaded gains from LEDDB for MS ID ' + str(clientMSId) + (' (%5.1f MHz' % float(sbCentFreq / 1e6)) + '). Starting plotting...')
(acoord, margx, margy, ylim, dx, dy) = getPlotArgs(numStations, yRange)
addposy = [margy, dy]
fig = pylab.figure(figsize=figSize)
# For all the channel indexes we create images
for ci in range(len(gains)):
# Clean the figure
pylab.clf()
fig.clf()
imageName = outputFolder + ('/timg%03d%03d.png' % (sbIndex,ci))
Jones2Ref = None
if polar:
Jones2Ref = gains[ci,:,refStationIndex,1]
for antIndex in range(numStations):
stationName = stations[antIndex]
stationGains = gains[ci,:,antIndex] #stationGains is time, cord, jones
if polar:
stationGains[:,1] -= Jones2Ref
stationGains[:,1] -= 2 * np.pi * freqs[ci] * 1e6 * delay[stationName]
stationGains[:,1] = np.remainder(stationGains[:,1],2*np.pi) - np.pi
a = None
# for (Real and Imag) or (Ampl and Phase)
for cordIndex in range(COORD_NUM):
# we create a sub-plot
a = fig.add_axes([acoord[antIndex,0]+margx, acoord[antIndex,1]+addposy[cordIndex], dx-2.*margx,dy-margy])
# for each element in the jones matrix (that we selected to plot)
for j in range(len(stationGains[0][cordIndex])):
a.plot(times,stationGains[:,cordIndex,j],marker='.',ls='',mew=0.1,ms=1.5,color='black')
pylab.setp(a, xticks=[], yticks=[],ylim=ylim[cordIndex])
setLabels(a, polar, antIndex, cordIndex, "Time")
a.title.set_text(stationName)
a.title.set_fontsize(9)
# Set general title
imageTitle = "f=%.1fMHz, Times=[%5.1f, %5.1f]s" % (freqs[ci]/1e6,minTime,maxTime)
if polar:
imageTitle += ", RefStation=%s"%refStation
a.annotate(imageTitle, xy=(0.5, 0.97), xycoords='figure fraction', horizontalalignment="center")
# Save the image
fig.savefig(imageName)
log(workerName + ' ' + imageName + ' created')
# Put in the output queue that we created a new image
sharedResultQueue.put([MESS_TYPE_WORKER_IMAGE_CREATED,])
# Send message to indicate we are done with this SB
sharedResultQueue.put([MESS_TYPE_WORKER_OK_GAIN, clientMSId, freqs[ci], sbIndex, freqs])
except:
# if there is an error we will quit the generation
kill_received = True
sharedResultQueue.put([MESS_TYPE_WORKER_END,])
log(workerName + ' exiting...')
def runClient(snode, port):
""" The function which is run in each remote client.
First of all, from the recv_job_p pipe the client receives the info on
which (and how) gains solutions (measurement set) to query from LEDDB.
Then, depending on xaxis:
1 - if xaxis == time, we create processes in charge of both querying
and plotting. When they are done, they will the server know and the
server will send message to main client process indicating that it can
terminate its execution.
2 - if xaxis == freqs, we create other processes (read-workers)
in charge of querying the data. There are as many read-workers as SBs
assigned to this node. Then these new read-workers load in local memory
the gains solutions and let the server know that they are ready to start
further processing. Then, the main client process will create other
processes in charge of creation of images (plot-workers). Third, it
remains listening to the recv_job_p where the server will query for
chunks of the loaded gains and forward these info to read-workers.
When all the gains solutions have been sent (under server request) and
all the plot-workers have finished their plotting tasks the life of
this main process is over
"""
manager = makeClientManager(snode, int(port), AUTH_KEY)
node = utils.getHostName()
print 'Getting job receiving pipe'
recvJobPipe = manager.get_job_p_r(node)
print 'Getting queues'
sharedResultQueue = manager.get_result_q()
# First data we receive is the info
print 'Getting initial data...'
data = recvJobPipe.recv()
outputFolder = data['OUTPUT']
mssDict= data['MSDICT']
numWorkers = data['NUMWORKERS']
xaxis = data['XAXIS']
jonesElements = data['JONES']
stations = data['STATIONS']
refStationIndex = data['REFSTATIONINDEX']
polar = data['POLAR']
timeslots = data['TIMES']
channelsslots = data['CHANNELS']
yRange = data['YRANGE']
chunkSize = data['TIMECHUNKSIZE']
delay = data['DELAYS']
figSize = data['FIGSIZE']
timeout = data['TIMEOUT']
dbname = data['DBNAME']
dbuser = data['DBUSER']
dbhost = data['DBHOST']
dbinterval = data['DBINTERVAL']
tIndexInit,tIndexEnd,tIndexStep = timeslots
cIndexInit,cIndexEnd,cIndexStep = channelsslots
workers = []
workersNames = []
if xaxis == TIME:
# Create queue in client node to comunicate with workers
clientJobQueue = multiprocessing.Queue()
for clientMSId in mssDict:
clientJobQueue.put([clientMSId,mssDict[clientMSId]])
for i in range(numWorkers):
workersNames.append(node + ('-worker%02d' % i))
clientJobQueue.put(None) # we put a None in the job for each worker (they are used to indicate them to finish)
workers.append(multiprocessing.Process(target=runWorker,
args=(workersNames[-1], node, clientJobQueue, sharedResultQueue, stations, jonesElements, polar,
tIndexInit, tIndexEnd, tIndexStep, cIndexInit, cIndexEnd, cIndexStep,
refStationIndex, yRange, delay, outputFolder, figSize, timeout, dbname, dbuser, dbhost, dbinterval)))
workers[-1].start()
else: # xaxis == FREQ
sharedJobQueue = manager.get_job_q()
numReadWorkers = len(mssDict)
clientMSIds = mssDict.keys()
numPlotWorkers = numWorkers
log('Starting ' + str(numReadWorkers) + ' read-workers')
sendJobPipeRWs = []
for i in range(numReadWorkers):
workersNames.append(node + ('-readWorker%02d' % i))
# Create pipe to communicate the main client process with the several read-workers
recvJobPipeRW, sendJobPipeRW = Pipe(False)
sendJobPipeRWs.append(sendJobPipeRW)
workers.append(multiprocessing.Process(target=runReadWorker,
args=(workersNames[-1], recvJobPipeRW, sharedResultQueue, clientMSIds[i], mssDict[clientMSIds[i]],
stations, jonesElements, polar, tIndexInit, tIndexEnd, tIndexStep,
cIndexInit,cIndexEnd,cIndexStep, chunkSize, timeout, dbname, dbuser, dbhost, dbinterval)))
workers[-1].start()
# Once server has acknowledge of initialization of all SBs in all nodes
# we will get a message with indications
data = recvJobPipe.recv()
times = data['TIMES']
log('Starting ' + str(numPlotWorkers) + ' plot workers')
# Compute the variable needed for plots
(acoord, margx, margy, ylim, dx, dy) = getPlotArgs(len(stations), yRange)
for i in range(numPlotWorkers):
workersNames.append(node + ('-plotWorker%02d' % i))
workers.append(multiprocessing.Process(target=runPlotWorker,
args=(workersNames[-1], sharedJobQueue, sharedResultQueue, stations, outputFolder,
times, polar, refStationIndex, acoord, margx, margy, ylim, dx, dy, delay, figSize)))
workers[-1].start()
continueRecv = True
while continueRecv:
data = recvJobPipe.recv() # this message indicates a new chunk is requested
if data['TIMEINDEX'] < 0:
# this means the server does not want more chunks, we can exit
# (after telling so to the read-workers)!
continueRecv = False
for i in range(numReadWorkers):
sendJobPipeRWs[i].send(data)
# In this point, we can close these ends of the pipes
for i in range(numReadWorkers):
sendJobPipeRWs[i].close()
# We wait until all the workers are done
for i in range(len(workers)): # we do not use numWorkers because in case of xaxis == FREQ there are also the readWorkers
workers[i].join()
log(workersNames[i] + ' joined!')
log('All workers exited!')
# Indicate that all workers are done
sharedResultQueue.put([MESS_TYPE_CLIENT_END,],)
return
def getPlotArgs(numStations, yRange):
""" Get plotting fields arguments of the pictures layout"""
ny=int(np.sqrt(numStations/1.77))
nx=int(numStations/ny)
if nx*ny < numStations:
nx+=1
xcoord=np.linspace(0.05,.95,nx+1)
ycoord=np.linspace(0.05,.95,ny+1)
acoord=np.zeros((numStations,2),dtype=np.float)
for ant in range(numStations)[::-1]:
i=int(ant/nx)
j=ant-i*nx
acoord[ant,0]=xcoord[j]
acoord[ant,1]=ycoord[i]
dx=xcoord[1]-xcoord[0]
dy=(ycoord[1]-ycoord[0])/2.
margx=4e-3
margy=2.e-2
ylim= [yRange[0:2],yRange[2:4]]
return (acoord, margx, margy, ylim, dx, dy)
def makeClientManager(server, port, authkey):
""" Create a manager for a client. This manager connects to a server on the
given address:port and exposes the get_job_p_r, get_job_q and
get_result_q methods for accessing the pipes and shared queue from the
server. Return a manager object.
"""
class JobManager(SyncManager): pass
JobManager.register('get_job_p_r')
#JobManager.register('get_job_p_s') #NOT REQUIRED
JobManager.register('get_job_q')
JobManager.register('get_result_q')
manager = JobManager(address=(server, port), authkey=authkey)
manager.connect()
print 'Client connected to %s:%s' % (server, port)
sys.stdout.flush()
return manager
def log(message):
""" Method that print to stdout and automatically flushes it"""
sys.stdout.write(message + '\n')
sys.stdout.flush()
class GainAnimation(LModule):
def __init__(self,userName = None):
self.userName = userName
if self.userName == None:
self.userName = utils.getUserName()
options = LModuleOptions()
options.add('diagfile', 'i', 'Input DiagFile', helpmessage='. When plotting with xaxis==' + FREQ + ', it is recommended that the given DiagFile does not reference gain solutions from more than 40 SBs. Given the resolution of the station plots, giving more SBs will not have a noticeable effect.')
options.add('output', 'o', 'Output folder', helpmessage=' in the current node where the logs, images and the info file will be stored. This directory must be shared between the used nodes.')
options.add('xaxis', 'x', 'X axis', choice=[FREQ,TIME], default=FREQ, helpmessage='. The animation will be as a function of the other one (by default ' + TIME + ')')
options.add('jones', 'j', 'Jones', default='0,3', helpmessage='. Elements of the Jones matrix to plot.')
options.add('refstation', 'r', 'Reference Station', mandatory=False, helpmessage='. You can specify the reference station name to be used for the phase (if plotting polar coordinates). If not specified, the first station is used.')
options.add('cartesian', 'c', 'Plot cartesian?', default=False,helpmessage=' instead of polar coordinates')
options.add('timeslots', 't', 'Timeslots to use',default='0,-1,1', helpmessage=' (comma separated and zero-based: start,end[inclusive],step) (for the last time sample specify -1). If xaxis==' + TIME + ', we suggest to use large value for the step given the resolution of the station plots.')
options.add('channels', 'f', 'Channels to use', mandatory = False, helpmessage=' in each SB (comma separated and zero-based: start,end[inclusive],step) (for the last channel specify -1). [default is to use only the first channel, i.e. 0,0,1]')
options.add('yrange', 'y', 'Y range', mandatory=False, helpmessage=', specify four values comma separated, i.e. minAmpl,maxAmpl,minPhase,maxPhase in case of polar [default is ' + DEFAULT_POLAR_YRANGE + ' in polar and ' + DEFAULT_CARTESIAN_YRANGE + ' in cartesian]')
options.add('nodes', 'n', 'Nodes to use', helpmessage=' to use for the remote clients (and its workers).')
options.add('numworkers', 'w', 'Num. plot-workers', default = 16, helpmessage=' (for pictures generation) used per node. Take into account that, if xaxis==' + FREQ + ', for each node also read-workers (specific worker querying the LEDDB) will be created (as many as number of SBs in the node). In the case of xaxis==' + TIME + ', the plot-workers are also in charge of LEDDB queries')
options.add('port', 'p', 'Port', default = 1234, helpmessage=' number to be used for TCP communication between current node and the nodes with the data')
options.add('chunkin', 'a', 'Chunk Size In', default = 512, helpmessage=' (only used if xaxis==' + FREQ + ') of the gains (in time) requested by the server (in local machine) to the remote nodes. The higher the number the more RAM the local machine will need to combine the chunks from remote nodes. If possible, the recommended value is numworkers * numnodes * chunkout [default is 512]')
options.add('chunkout', 'b', 'Chunk Size Out', default = 1, helpmessage=' (only used if xaxis==' + FREQ + ') of the combined gains (in time) send by the server (in local machine) to the remote nodes (that will generate the images).')
options.add('delay', 'd', 'Delay File', mandatory=False, helpmessage=', file with the delays per station to be applied in the phases')
options.add('figsize', 'g', 'Size of the figures', default=FIG_SIZE, helpmessage=' in inches.')
options.add('dbtimeout', 'm', 'DB connection timeout', default = 300)
options.add('dbname', 'l', 'DB name', default=DEF_DBNAME)
options.add('dbuser', 'u', 'DB user', default=self.userName)
options.add('dbhost', 'z', 'DB host', default=DEF_DBHOST)
options.add('dbinterval', 'e', 'DB interval', default=1.5,helpmessage=' (in seconds) between LEDDB queries.')
# the information
information = """Generate a movie of all the stations gains."""
# Initialize the parent class
LModule.__init__(self, options, information)
self.anim = None
def getSlots(self, slots):
""" Parse the slots from the args to a list of ints.
It returns: [initialIndex, endIndex, step] """
slotssplit = slots.split(',')
if len(slotssplit) != 3:
raise Exception('ERROR: slots format is start,end,step')
for i in range(len(slotssplit)): slotssplit[i] = int(slotssplit[i])
return slotssplit
def getYRange(self, yrange, polar):
""" Parse the yrange from the args to a list of floats
(if required, it assigns the default values)
"""
if yrange == '':
if polar:
yrange = DEFAULT_POLAR_YRANGE
else:
yrange = DEFAULT_CARTESIAN_YRANGE
yrangesplit = yrange.split(',')
if len(yrangesplit) != 4:
raise Exception('Error: YRange format is min1,max1,min2,max2')
for i in range(len(yrangesplit)):
yrangesplit[i] = float(yrangesplit[i])
return yrangesplit
def getFigSize(self, figsize):
figsize = figsize.split(',')
if len(figsize) != 2:
raise Exception('ERROR: figsize format is xsize,ysize')
for i in range(len(figsize)): figsize[i] = int(figsize[i])
return tuple(figsize)
def getDelayDict(self, delayFile, stations):
"""Get a delay dictionary from the given file.
It returns all 0 if none file is provided"""
delayDict = {}
for station in stations:
delayDict[station] = 0.
if delayFile != None:
lines = open(delayFile,'r').read().split('\n')
for line in lines:
if line != '':
(station,delay) = line.split()[:2]
if station in stations:
delayDict[station] = float(delay)
return delayDict
def logImagesProgress(self, counter, total, numWorkers):
""" Show dynamic progress of created images"""
message = "\rImages completed: %d of %d (%3.1f%%). Num. workers: %4d" % (counter,total, float(counter) * 100./float(total), numWorkers)
sys.stdout.write(message)
sys.stdout.flush()
def showMakeMovieCommand(self, outputFolder, animFile, xaxis):
""" Shows the commands that the user should execute to create the movie"""
if xaxis == FREQ:
rate = 20
else:
rate = 8
print 'Renaming images...'
os.system('rm -f ' + outputFolder + '/img*png')
# We need to rename the images to consecutive indexes
imageIndex = 0
for imagefile in sorted(os.listdir(outputFolder)):
if imagefile.endswith('png'):
imagefilepath = outputFolder + '/' + imagefile
newimagefilepath = outputFolder + ('/img%06d.png' % imageIndex)
os.system('mv ' + imagefilepath + ' ' + newimagefilepath)
imageIndex += 1
print 'To generate the movie with mencoder:'
print "cd " + outputFolder + "; mencoder -ovc lavc -lavcopts vcodec=mpeg4:vpass=1:vbitrate=4620000:mbd=2:keyint=132:v4mv:vqmin=3:lumi_mask=0.07:dark_mask=0.2:mpeg_quant:scplx_mask=0.1:tcplx_mask=0.1:naq -mf type=png:fps=" + str(rate) + " -nosound -o "+animFile+" mf://\*.png"
print
print 'To generate the movie with ffmpeg:'
print "ffmpeg -r " + str(rate) + " -i " + outputFolder + "/img%06d.png -vcodec mpeg4 -b:v 4620000 -y " + animFile
def runRemoteClient(self, node, snode, port, outputFolder):
""" Function to make an ssh to run the client code in a remote machine
We assume the node is reachable via ssh (and the internal network is
properly configured). We also assume that in the remote machine the current
script file is in the same location that in current machine.
"""
scriptpath = os.path.abspath(__file__)
parentpath = os.path.abspath(os.path.join(scriptpath, '..'))
scriptname = scriptpath.split('/')[-1].split('.')[0]
command = 'python -c "import ' + scriptname + '; ' + scriptname + '.' + runClient.__name__ + '(\\\"' + snode +'\\\", \\\"' + str(port) +'\\\")"'
logging = (' > ' + outputFolder + '/' + node + '.log')
return (os.popen("ssh " + node + " 'cd " + parentpath + " ; " + command + "'" + logging)).read()
def runServer(self, manager, outputFolder, xaxis, jonesElements, stations, refStationIndex, polar, timeslots, channelsslots, yRange, numWorkers, chunkInSize, chunkOutSize, mssPerNodeDict, delayDict, figSize, timeout, dbname, dbuser, dbhost, dbinterval):
""" The server which is run in current machine. From the manager we get the
involved pipes and queues. We send to the remote clients a message to
processing the gain solutions
Then, depending on xaxis:
1- For xaxis==time each client will create workers (as many as numWorkers).
Each worker will process a SB (or more) and for each SB create its
related images. We do not require further communication in this case
2- For xaxis==freq we need communication with central server to join,
for each time sample, all the solutions from different SBs.
So, each remote client will create as many read-workers as SBs assigned to the node.
Then we split the times in chunks and request the gains solutions to all
the read-wrokers for each chunk. When we have all the gains (for a given chunk)
for all SBs, we summit to the job queue all the tasks for the plot workers
"""
numClients = len(mssPerNodeDict)
finishClientCounter = 0
imagesCounter = 0
usedData = []
message = ''
finishWorkersCounter = 0
numTotalWorkers = None
numSBs = 0
sendJobsPipes = {}
for node in mssPerNodeDict:
sendJobsPipes[node] = manager.get_job_p_s(node)
numSBs += len(mssPerNodeDict[node])
# We send the messages on what data to load to all the clients
data = {}
data['OUTPUT'] = outputFolder
data['MSDICT'] = mssPerNodeDict[node]
data['NUMWORKERS'] = numWorkers
data['XAXIS'] = xaxis
data['JONES'] = jonesElements
data['STATIONS'] = stations
data['REFSTATIONINDEX'] = refStationIndex
data['POLAR'] = polar
data['TIMES'] = timeslots
data['CHANNELS'] = channelsslots
data['YRANGE'] = yRange
data['TIMECHUNKSIZE'] = chunkInSize
data['FIGSIZE'] = figSize
data['DELAYS'] = delayDict
data['TIMEOUT'] = timeout
data['DBNAME'] = dbname
data['DBUSER'] = dbuser
data['DBHOST'] = dbhost
data['DBINTERVAL'] = dbinterval
sendJobsPipes[node].send(data) # We send the messages with details to all the clients
# The queue to receive gains and message from remote clients (from their read-workers more concretely)
sharedResultQueue = manager.get_result_q()
if xaxis == TIME:
# We can close the pipes since in this case we do not need them anymore
for node in mssPerNodeDict:
sendJobsPipes[node].close()
# In this case, the clients have already created the workers and started
# creating the images (we do not require communication with server)
errorMessages = []
numChannelsDict = {}
numTotalWorkers = numWorkers * numClients
freqs = [None,]
while finishWorkersCounter < numTotalWorkers:
# Gets the results from the result queue. there can be several types of messages
rmess = sharedResultQueue.get()
rmessType = rmess[0]
if rmessType == MESS_TYPE_WORKER_END:
finishWorkersCounter += 1
elif rmessType == MESS_TYPE_WORKER_KO_GAIN:
[emessage, ] = rmess[1:]
errorMessages.append(emessage)
elif rmessType == MESS_TYPE_WORKER_OK_GAIN:
[clientMSId, freq, sbIndex, freqs] = rmess[1:]
if clientMSId not in numChannelsDict:
# First time we recieve data from certain SB
numChannelsDict[clientMSId] = len(freqs)
usedData.append((clientMSId, freq, sbIndex))
elif rmessType == MESS_TYPE_WORKER_IMAGE_CREATED:
imagesCounter += 1
elif rmessType == MESS_TYPE_CLIENT_END: # Clients where all the workers finished
finishClientCounter += 1
self.logImagesProgress(imagesCounter, numSBs * len(freqs), numTotalWorkers - finishWorkersCounter)
print # we print blank line after the dynamic writting
# Show possible accumulated error messages
if len(errorMessages):
print str(len(errorMessages)) + ' SBs had errors while querying Gains:'
for errorMessage in errorMessages:
print ' ' + errorMessage
# All workers finished loading and plotting data. Waiting clients finalization
while (finishClientCounter < numClients):
try:
sharedResultQueue.get() # They are all MESS_TYPE_CLIENT_END_WORKERS messages
finishClientCounter += 1
except:
pass
if len(numChannelsDict): # check that all SBs had the same number of channels
numChannels = numChannelsDict[numChannelsDict.keys()[0]]
for clientMSId in numChannelsDict:
if numChannelsDict[clientMSId] != numChannels:
message = 'WARNING: GAINS MAY NOT BE COHERENT (different SBs having different number of channels)'
print message
break
else: # xaxis==FREQ
# the queue to send jobs for the plot-workers
sharedJobQueue = manager.get_job_q()
# Wait until all messages from read-workers (one for each SB) are back
times = []
numReadWorkers = numSBs
numTotalWorkers = numReadWorkers + (numWorkers * numClients)
readWorkersCounter = 0
while readWorkersCounter < numReadWorkers:
rmess = sharedResultQueue.get()
rmessType = rmess[0]
if rmessType == MESS_TYPE_WORKER_KO_GAIN:
[emessage, ] = rmess[1:]
print emessage
elif rmessType == MESS_TYPE_WORKER_OK_GAIN:
[omessage, clientMSId, sbIndex, freqs, times] = rmess[1:]
usedData.append((clientMSId, freqs[0], sbIndex))
print omessage
elif rmessType == MESS_TYPE_WORKER_END:
finishWorkersCounter += 1
readWorkersCounter += 1
numCorrectSBs = len(usedData) # There will be as many active read-workers as SBs with Gains
numTimes = len(times)
log('All clients (read-workers) finished loading data. Starting querying chunks and creating images. Num. freqs (read-workers) = ' + str(numCorrectSBs) + ', Num. stations = ' + str(len(stations)) + ', Num. times = ' + str(numTimes) + '...')
# Send to the clients the details of the readed data
data = {}
data['TIMES'] = times
for node in mssPerNodeDict:
sendJobsPipes[node].send(data)
# In this point all read-workers have finished reading data and are ready to start
# receiving queries for chunks of data, as well as the plot-workers to plot
numImages = 0
for ti in range(0,numTimes,chunkInSize):
# for each chunk of times
receivedGain = 0
sdata = {'TIMEINDEX':ti}
chunkfreqs = []
chunkgains = []
# send all the messages to query for this chunk of times
for node in mssPerNodeDict:
sendJobsPipes[node].send(sdata)
# we wait until we receive all the gains from all the clients
while receivedGain < numCorrectSBs:
rmess = sharedResultQueue.get()
rmessType = rmess[0]
if rmessType in (MESS_TYPE_WORKER_IMAGE_CREATED,MESS_TYPE_CLIENT_END):
if rmessType == MESS_TYPE_WORKER_IMAGE_CREATED:
# in the meanwhile it may happend that we receive messages of finished images
imagesCounter += 1
else: # rmessType == MESS_TYPE_CLIENT_END_WORKERS:
# in the meanwhile it may happen that we receive messaged of
# clients that have finished their tasks (readers and plotters)
finishClientCounter += 1
self.logImagesProgress(imagesCounter, numTimes, numWorkers * (numClients - finishClientCounter))
elif rmessType == MESS_TYPE_WORKER_END:
finishWorkersCounter += 1
elif rmessType == MESS_TYPE_WORKER_SEND_GAIN:
receivedGain += 1
[fgains, freqs] = rmess[1:] #fgains dim freq, time, station, cord, jones
chunkgains.extend(fgains)
chunkfreqs.extend(freqs)
# we have received all the clients contributions for this chunk
chunkgains = np.array(chunkgains) #dimensions are: freq, time, station, cord, jones
chunkfreqs = np.array(chunkfreqs)
if len(chunkgains.shape) == 5:
# if shape is not 5, it means that probably some gains have number of time samples
# all gains must be dimensioned to: time, station, cord, jones, freq
allfgains = chunkgains.transpose((1,2,3,4,0))
# For the received chunk we create other smallet chunks of times.
# Those are send to the queue to be plotted
for tic in range(0, len(allfgains), chunkOutSize):
numImages += 1
sharedJobQueue.put([tic+ti, allfgains[tic:tic+chunkOutSize], chunkfreqs])
# We send negative index to indicate to the readers that they can finish
sdata = {'TIMEINDEX':-1}
for node in mssPerNodeDict:
sendJobsPipes[node].send(sdata)
# in this point we are done reading all the gains from remote clients
# We can close the pipes since we already have all of them ready
for node in mssPerNodeDict:
sendJobsPipes[node].close()
# We add in the end of the queue as many None as numworkers, this is the
# ending task of each plot-worker and it is used to tell them theirs tasks are done!
for node in mssPerNodeDict:
for i in range(numWorkers):
sharedJobQueue.put(None)
# Wait all the workers to finish their tasks
while finishClientCounter < numClients:
try:
rmess = sharedResultQueue.get()
rmessType = rmess[0]
if rmessType == MESS_TYPE_WORKER_IMAGE_CREATED:
imagesCounter += 1
elif rmessType == MESS_TYPE_WORKER_END:
finishWorkersCounter += 1
elif rmessType == MESS_TYPE_CLIENT_END:
finishClientCounter += 1
self.logImagesProgress(imagesCounter, numImages, numWorkers * (numClients - finishClientCounter))
except:
pass
print # we print blank line after the dynamic writting
if imagesCounter < numImages: # theoretically if all clients are done, all the images should have been generated
print 'ERROR: there are still images in the queue (' + (numImages-imagesCounter) + ') but all clients are finished!'
if finishWorkersCounter < numTotalWorkers: # theoretically if all cleints are donem all workers should be done
print 'ERROR: there are alive workers (' + (numTotalWorkers-finishWorkersCounter) + ') but all clients are finished!'
if numImages != numTimes:
message = 'WARNING: GAINS MAY NOT BE COHERENT (different SBs having different number of time samples). Num. Times = ' + str(numTimes) + '. Images generated: ' + str(numImages)
print message
# Independently of xaxis, in both cases we must return this data
return (usedData, imagesCounter, message)
def makeServerManager(self, port, authkey, nodeDict):
""" Create a manager for the server, listening on the given port.
Return a manager object with get_job_p_s, get_job_p_r, get_job_q and
get_result_q methods.
"""
# We create as many pipes as nodes, we use these pipes to send the job that
# each node must do. We also create a queue for summiting the jobs for
# creating images and another one to send all the results back to the server
pipesDict = {}
for node in nodeDict:
# False because we do not need duplex connection
pipesDict[node] = Pipe(False)
# Create the queue to recieve the mesages (and gains) from the clients
resultQueue = multiprocessing.Queue()
# Create queue to send the plot jobs to the plot-workers
jobQueue = multiprocessing.Queue()
class JobManager(SyncManager): pass
JobManager.register('get_job_p_r', callable=lambda k: pipesDict[k][0])
JobManager.register('get_job_p_s', callable=lambda k: pipesDict[k][1])
JobManager.register('get_result_q', callable=lambda: resultQueue)
JobManager.register('get_job_q', callable=lambda: jobQueue)
manager = JobManager(address=('', port), authkey=authkey)
manager.start()
print 'Server started at port %s' % port
return manager
def process(self,diagfile,output,xaxis,jones,refstation,cartesian,timeslots,channels,yrange,nodes,numworkers,port,chunkin,chunkout,delay,figsize,dbtimeout,dbname,dbuser,dbhost,dbinterval):
diagfile = os.path.abspath(diagfile)
if not os.path.isfile(diagfile):
print 'Input diagnostics file not found!'
return
outputFolder = os.path.abspath(output)
# Read the rest of arguments
jonesElements = utils.getElements(jones)
polar = not cartesian
timeslots = self.getSlots(timeslots)
if channels == '':
channels = DEFAULT_CHANNELS
channelsslots = self.getSlots(channels)
yRange = self.getYRange(yrange, polar)
nodes = utils.getNodes(nodes)
numNodes = len(nodes)
# Check the chunk sizes
if chunkout > chunkin:
print 'ERROR: chunkin must be higher than chunkout.'
return
delayFile = None
if delay != '':
delayFile = os.path.abspath(delay)
figSize = self.getFigSize(figsize)
print
print 'Input DiagFile: ' + str(diagfile)
print 'Output folder (logs, images and info file): ' + str(outputFolder)
print 'XAxis: ' + str(xaxis)
print 'Jones elements: ' + str(jonesElements)
print 'Use polar coord.: ' + str(polar)
print 'Times: ' + str(timeslots)
print 'Channels: ' + str(channelsslots)
print 'YRange: ' + str(yRange)
print 'Delay file: ' + str(delayFile)
print 'Num. nodes: ' + str(numNodes)
if xaxis == FREQ:
print 'Maximum plot-workers per node: ' + str(numworkers)
print 'Chunk size read-workers->server: ' + str(chunkin)
print 'Chunk size server->plot-workers: ' + str(chunkout)
else:
print 'Maximum workers per node: ' + str(numworkers)
print 'Figures size [inches]: ' + str(figSize)
print
# Create the outputFolder
os.system('mkdir -p ' + outputFolder)
currentNode = utils.getHostName()
# We do an initial query just to find the involved gainIds
# and check if they are related to same LDSBP (and have same timeStep)
connection = Connector(dbname, dbuser, dbhost).getConnection()
# We get the different partitions of the GAIN table
(partitionsSuffix, nodePoints) = diagoperations.getPartitionSchema(connection)
qm = QueryManager()
diagFile = DiagnosticFile(diagfile)
names = [GAIN+ID,LDSBP+ID,MS+ID,SBINDEX,CENTFREQ,BW, STATION,TSTEP,FSTEP]
(query, queryDict) = qm.getQuery(diagFile.queryOption, diagFile.queryConditions, names)
cursor = connection.cursor()
qm.executeQuery(connection, cursor, query, queryDict, timeout=dbtimeout)
# We create a dictionary for the gainIds, we separate them by station
mssDict = {}
stationsSet = set([])
ldsbpId = None
timeStep = None
freqStep = None
for row in cursor:
msId = row[2]
stationsSet.add(row[6])
if msId not in mssDict:
# we extract the freq-time info, then we do not need to do it again
mssDict[msId] = ([],(diagoperations.getPartition(GAIN, partitionsSuffix, nodePoints, msId), len(mssDict), row[3], row[4] * 1e6, row[5] * 1e6, row[7], row[8])) # sbIndex, centFreq, BW, timeStep , freqStep
mssDict[msId][0].append(row[0])
if ldsbpId == None:
ldsbpId = row[1]
timeStep = row[7]
freqStep = row[8]
elif (ldsbpId != row[1]) or (timeStep != row[7]) or (freqStep != row[8]):
print 'Gain solutions must be related to the same LDSBP! (and have the same timeStep and freqStep)'
return
cursor.close()
connection.close()
timeStep = float(timeStep)
freqStep = float(freqStep)
stations = sorted(stationsSet)
refStationIndex = 0
if refstation != '' and refstation in stations:
refStationIndex = stations.index(refstation)
# Get the delay dictionary
delayDict = self.getDelayDict(delayFile, stations)
print 'Num. MSs: ' + str(len(mssDict))
print 'Num. Stations: ' + str(len(stations))
print 'Ref. station: ' + stations[refStationIndex]
print 'Times step: ' + str(timeStep)
print 'Freq. step: ' + str(freqStep)
print
# We create a many sub-dict, one for each node
# hence, each node will have assigned a list of MSs
# and will be in charge of the gains related to them
nodesDictList = utils.splitDictionary(mssDict, numNodes)
mssPerNodeDict = {}
for i in range(numNodes):
mssPerNodeDict[nodes[i]] = nodesDictList[i]
# We create the manager for the server (current process) that will handle
# the pipes and the shared queues
manager = self.makeServerManager(port, AUTH_KEY, mssPerNodeDict)
# We create one remote client in each node
remoteClients = []
for node in nodes:
remoteClients.append(multiprocessing.Process(target=self.runRemoteClient, args=(node, currentNode, port, outputFolder)))
remoteClients[-1].start()
if xaxis == FREQ:
print 'All remote clients correctly initialized. Starting read-workers for querying LEDDB data...'
else:
print 'All remote clients correctly initialized. Starting workers for querying LEDDB and plotting the data...'
# Run the server code manager
(usedData,plotCounter,message) = self.runServer(manager, outputFolder, xaxis, jonesElements, stations, refStationIndex,
polar, timeslots, channelsslots, yRange, numworkers, chunkin, chunkout, mssPerNodeDict, delayDict, figSize, dbtimeout, dbname, dbuser, dbhost, dbinterval)
# Join the processes to terminate the main process
for i in range(numNodes):
remoteClients[i].join()
print # we print a blank line after the dynamic writting in stdout
print 'All remote clients finished!'
# Sleep a bit before shutting down the server - to give clients time to
# realize the job queue is empty and exit in an orderly way.
time.sleep(2)
manager.shutdown()
# usedData contains the used measurement sets an dplotCounter the number of
# generated images
if len(usedData) and plotCounter:
# Create the file with info on the generated video (well, the one that
# should be generated from the created images)
MovieInfoFile(outputFolder + '/' + DEF_OUTPUT_INFO_NAME).write(usedData, xaxis, jonesElements, stations, stations[refStationIndex], polar, timeslots, channelsslots, yRange, delayDict, message)
# Create the movie
self.showMakeMovieCommand(outputFolder, os.path.basename(diagfile).split('.')[0] + '.mp4', xaxis)
| [
"omrubi@gmail.com"
] | omrubi@gmail.com |
e7737a9da1906380651bf68b089dacca80d6427a | 0eb37240203486cc224be79ecf67614ead966109 | /Morgan stanley/qstn2.py | 5e9ff1c7d4894db247b41058c8e9197b0c6a3f9b | [] | no_license | kiranpuligorla/Algorithms | ec41a2a97843b6073c8d94d8269c1f3e8d1c9879 | 9d979874f3f36973300cc716bf3b003368cccfdc | refs/heads/master | 2021-08-09T00:27:15.043047 | 2017-11-11T18:47:33 | 2017-11-11T18:47:33 | 109,561,657 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 623 | py | #!/bin/python3
def sort_numeric(st, bl):
dt = {}
for s in st:
k,v = s.split()
dt[k] = v
d_view = [(int(v), k) for k, v in dt.items()]
d_view.sort(reverse=bl)
for v, k in d_view:
print(k, dt[k])
def sort_lxg(st, bl):
st.sort(reverse=bl)
print('\n'.join(st))
if __name__ == "__main__":
st = []
for i in range(int(input())):
st.append(input())
ky, bl, st_type = input().split()
if bl == 'false':
bl=False
elif bl =='true':
bl=True
if st_type == "numeric":
sort_numeric(st, bl)
else:
sort_lxg(st, bl) | [
"kiran.puligorla@gmail.com"
] | kiran.puligorla@gmail.com |
9b607339c99c4efbe0cfbe4fda6f6c3457d9e823 | 73767b6a308cf11908bb17d7fac6f0412652d177 | /src/descriptions.py | a7a50a09b1a7b9eead1700e60b2ef1bcc66215bd | [] | no_license | ocarmieo/github-trends | ab36065319b19821b9767a493049eb9a088649e4 | 13aad00f0cf640bee0f4915684afed4bf898d3ad | refs/heads/master | 2021-01-19T02:21:01.856085 | 2020-08-11T17:57:05 | 2020-08-11T17:57:05 | 75,510,616 | 7 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,420 | py | from __future__ import division
from collections import Counter
from pandas.io.json import json_normalize
import json
import numpy as np
import pandas as pd
from sklearn.decomposition import TruncatedSVD, NMF
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
import lda
def load_json_df(filename):
'''
INPUT: json new line delimited file
OUTPUT: pandas dataframe
'''
data = []
for line in open(filename, 'r'):
data.append(json.loads(line))
# Use pandas json_normalize to load json nested dicts
df = json_normalize(data)
return df
def describe_nmf_results(document_term_mat, W, H, n_top_words = 15):
# print("Reconstruction error: %f") %(reconst_mse(document_term_mat, W, H))
for topic_num, topic in enumerate(H):
print("Topic %d:" % topic_num)
print(" ".join([feature_words[i] \
for i in topic.argsort()[:-n_top_words - 1:-1]]))
return
if __name__ == '__main__':
df = load_json_df('data/docstrings')
df['docstrings'] = df['docstrings'].map(lambda x: x[0])
df['docstrings'].map(len).mean()
df = df[df['docstrings'] != '0']
df['package'] = df['repo_name'].map(lambda x: x.split('/')[-1])
# ===== Topic modeling =====
n_features = 5000
n_topics = 8
doc_bodies = df['docstrings']
#vectorizer = CountVectorizer(max_features=n_features)
vectorizer = TfidfVectorizer(max_features=n_features, stop_words='english')
document_term_mat = vectorizer.fit_transform(doc_bodies)
feature_words = vectorizer.get_feature_names()
# NMF
nmf = NMF(n_components=n_topics)
W_sklearn = nmf.fit_transform(document_term_mat)
H_sklearn = nmf.components_
describe_nmf_results(document_term_mat, W_sklearn, H_sklearn)
# LDA
cnt_vectorizer = CountVectorizer(max_features=n_features)
cv_doc_term_mat = cnt_vectorizer.fit_transform(doc_bodies)
vocab = cnt_vectorizer.get_feature_names()
model = lda.LDA(n_topics=5, n_iter=1500, random_state=1)
model.fit_transform(cv_doc_term_mat) # model.fit_transform(X) is also available
topic_word = model.components_ # model.components_ also works
n_top_words = 7
for i, topic_dist in enumerate(topic_word):
topic_words = np.array(vocab)[np.argsort(topic_dist)][:-(n_top_words+1):-1]
print('Topic {}: {}'.format(i, ' '.join(topic_words)))
| [
"cglai8@gmail.com"
] | cglai8@gmail.com |
2d97358a016a1bf55d4b27eb069b7ec83eb0808e | 2a5f45d8c61bc211ad44cb2df775fbceef86aeb4 | /app/config.py | 6bbd254585df4513870cbe99ae1bed11494f941d | [] | no_license | LABIOQUIM/Plasmodium-Classifier | dae58ac60dcd883f8a27a3556af0ca7f20aa14b0 | 407d27e4a440c247378ab196515572824b312b07 | refs/heads/master | 2023-03-13T10:00:05.672664 | 2021-03-05T00:08:38 | 2021-03-05T00:08:38 | 296,375,197 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 337 | py | import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config(object):
SECRET_KEY = 'gvon475gvqn5q5AISWDU'
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'app.db')
SQLALCHEMY_TRACK_MODIFICATIONS = False
ALLOWED_EXTENSIONS = {'png', 'jpeg', 'jpg'} | [
"thales3238junior@gmail.com"
] | thales3238junior@gmail.com |
7e96884df88998e1cd4b4b6f2f635021055b5322 | c317f99691f549b393562db200b1e9504ce11f95 | /algorithms_learn/what_can_be_computed/src/simulateDfa.py | efedb724f8a32d4de40d6a61ff15aa0d1e302d68 | [
"CC-BY-4.0"
] | permissive | RRisto/learning | 5349f9d3466150dbec0f4b287c13333b02845b11 | 618648f63a09bf946a50e896de8aed0f68b5144a | refs/heads/master | 2023-09-01T00:47:23.664697 | 2023-08-30T17:56:48 | 2023-08-30T17:56:48 | 102,286,332 | 15 | 24 | null | 2023-07-06T21:22:48 | 2017-09-03T18:42:58 | Jupyter Notebook | UTF-8 | Python | false | false | 1,135 | py | # SISO program simulateDfa.py
# Simulate a given dfa with a given input.
# dfaString: ASCII description of the dfa M to be simulated
# inString: the initial content I of M's tape
# returns: 'yes' if M accepts I and 'no' otherwise
# Example:
# >>> simulateDfa(rf('multipleOf5.dfa'), '3425735')
# 'yes'
import utils; from utils import rf; from turingMachine import TuringMachine
import re, sys; from dfa import Dfa
def simulateDfa(dfaString, inString):
tm = Dfa(dfaString)
tm.reset(inString)
tmResult = tm.run()
return tmResult
# see testCheckDfa() in checkTuringMachine.py for more detailed tests
def testSimulateDfa():
for (filename, inString, val) in [
('containsGAGA.dfa', 'CCCCCCCCCAAAAAA', 'no'),
('containsGAGA.dfa', 'CCCGAGACCAAAAAA', 'yes'),
('multipleOf5.dfa', '12345', 'yes'),
('multipleOf5.dfa', '1234560', 'yes'),
('multipleOf5.dfa', '123456', 'no'),
]:
result = simulateDfa(rf(filename), inString)
utils.tprint('filename:', filename, 'inString:', inString, 'result:', result)
assert val == result
| [
"ristohinno@gmail.com"
] | ristohinno@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.