blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 257 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
93db1965da7b6efb5ec2b81be3c712e4fccf59a2 | 17192928ecbca9a66ea00036e8a96f2a8d0b81b0 | /workshops/programacion_python_ESO/pong_v0b.py | ccc25a926889afd683c4ea4af6c36f99cadaa1bc | [
"CC0-1.0"
] | permissive | vicente-gonzalez-ruiz/YAPT | 3ebd893d4205278a1aa2f859f2630cda215ed4ae | 19c9800f12098d725f5d9277714fa4d8b0b9e412 | refs/heads/master | 2023-08-08T23:13:38.022338 | 2023-07-28T08:16:26 | 2023-07-28T08:16:26 | 73,827,591 | 4 | 7 | null | null | null | null | UTF-8 | Python | false | false | 4,705 | py | import pygame
import threading
import time
from empty_display import EmptyDisplay
import lib.colors as Color
WIDTH = 0
HEIGHT = 1
class BallPosition:
x = 1
class Ball(pygame.sprite.Sprite):
def __init__(self,
color,
width,
height,
initial_x_coordinate,
initial_y_coordinate,
display_size):
super().__init__()
self.color = color
self.width = width
self.height = height
self.display_size = display_size
self.image = pygame.Surface([width, height])
self.image.fill(Color.black)
self.image.set_colorkey(Color.black)
self.rect = self.image.get_rect()
pygame.draw.rect(self.image,
color,
[self.rect.x, self.rect.y, width, height])
self.rect.x = initial_x_coordinate
self.rect.y = initial_y_coordinate
self.x_direction_step = 8 # Go to the right, one pixel
self.y_direction_step = 8 # Go to bottom, one pixel
def horizontal_rebound(self):
self.x_direction_step = -self.x_direction_step
def vertical_rebound(self):
self.y_direction_step = -self.y_direction_step
def ball_hits_bottom(self):
self.horizontal_rebound()
def ball_hits_top(self):
self.horizontal_rebound()
def ball_hits_left(self):
self.vertical_rebound()
def ball_hits_right(self):
self.vertical_rebound()
def update(self):
display_width = self.display_size[0]
display_height = self.display_size[1]
self.rect.x += self.x_direction_step
self.rect.y += self.y_direction_step
if (self.rect.x + self.width) > display_width:
self.rect.x = display_width - self.width - 1
self.ball_hits_bottom()
elif self.rect.x < 0:
self.rect.x = 0
self.horizontal_rebound()
if (self.rect.y + self.height) > display_height:
self.rect.y = display_height - self.height - 1
self.ball_hits_right()
elif self.rect.y < 0:
self.rect.y = 0
self.ball_hits_right()
BallPosition.x = self.rect.x
class Pong_v0(EmptyDisplay):
def __init__(self,
width = 800,
height = 600,
caption = "A bouncing ball of size 16x16"):
super().__init__(width, height, caption)
self.running = True
self.ball_width = 16
self.ball_height = 16
self.initial_x_coordinate = self.display_size[WIDTH]//2 - self.ball_width//2
self.initial_y_coordinate = 3*self.display_size[HEIGHT]//4 - self.ball_height//2
self.ball_color = Color.white
self.ball = Ball(
color = self.ball_color,
width = self.ball_width,
height = self.ball_height,
initial_x_coordinate = self.initial_x_coordinate,
initial_y_coordinate = self.initial_y_coordinate,
display_size = self.display_size
)
self.all_sprites_list = pygame.sprite.Group()
self.all_sprites_list.add(self.ball)
self.FPS = 0
def process_events(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.running = False
def update_model(self):
self.all_sprites_list.update()
def draw_frame(self):
self.display.fill(Color.black)
self.all_sprites_list.draw(self.display)
def draw(self):
clock = pygame.time.Clock()
while self.running:
self.draw_frame()
self.update_model()
pygame.display.update()
self.process_events()
clock.tick(60)
self.FPS = clock.get_fps()
# def run_model(self):
# clock = pygame.time.Clock()
# while self.running:
# #self.all_sprites_list.draw(self.display)
# self.update_model()
# clock.tick(1000)
def print_FPS(self):
while self.running:
print(f"FPS={self.FPS:04.2f}", end='\r' )
time.sleep(1)
def run(self):
#self.draw_frame__thread = multiprocessing.Process(target = self.draw_frame)
#self.draw_frame__thread.start()
self.print_FPS__thread = threading.Thread(target = self.print_FPS)
self.print_FPS__thread.start()
#self.run_model()
self.draw()
#self.draw_frame__thread.join()
self.print_FPS__thread.join()
if __name__ == "__main__":
display = Pong_v0()
display.run()
| [
"vicente.gonzalez.ruiz@gmail.com"
] | vicente.gonzalez.ruiz@gmail.com |
1c180ef08be3a1b5de480328ea4ffdb1327129a5 | 2e7a740bfa64475c8e8a36b7b9b588f905e6fbae | /fbprep/convert_to_linked_list.py | e3cfbf7623cb9877900ca84d9833961beddf039f | [] | no_license | steph-meyering/DSandAlgorithms | 4fdb17a7e7432856859f50e52d78058061faa292 | 540f5958d8b84a6574c8fc5c0ee0779370c35e6d | refs/heads/master | 2023-03-15T15:36:31.772227 | 2021-04-28T00:43:42 | 2021-04-28T00:43:42 | 254,006,896 | 0 | 0 | null | 2023-03-03T15:09:55 | 2020-04-08T06:42:39 | Python | UTF-8 | Python | false | false | 783 | py | # def convert_to_linked_list(root):
# head, _ = dfs(root)
# return head
# def dfs(root):
# if root is None:
# return None, None
# head, prev = dfs(root.left)
# next, tail = dfs(root.right)
# root.left = prev
# root.right = next
# if head is None:
# head = root
# if tail is None:
# tail = root
# return head, tail
def convert_to_linked_list(root):
in_order = []
dfs(root, in_order)
for i in range(len(in_order)):
if i == 0:
in_order[i].left = None
else:
in_order[i].left = in_order[i-1]
if i == len(in_order) - 1:
in_order[i].right = None
else:
in_order[i].right = in_order[i+1]
return in_order[0]
def dfs(root, arr):
if root:
dfs(root.left, arr)
arr.append(root)
dfs(root.right, arr)
| [
"stephane.meyering@gmail.com"
] | stephane.meyering@gmail.com |
7dd82ad7b8efdfa97922d5161038f9805e99e879 | 2beb11caac290bb67811df76cf501a1c37f947ed | /cfrPython/src/cfrMNIST/mnist_deep.py | 56d29c425e15ff11f076dc68a63adf1b4e96bc47 | [] | no_license | cfrcfrrr/JavaDemo | 4763975d93db10815999d8b94d33b2bcfe95ed1f | ed9f47a56826ac02a88a74e33793e27c11c696a5 | refs/heads/master | 2020-03-29T13:48:56.781742 | 2018-11-18T03:38:11 | 2018-11-18T03:38:11 | 149,983,676 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,047 | py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A deep MNIST classifier using convolutional layers.
See extensive documentation at
https://www.tensorflow.org/get_started/mnist/pros
"""
# Disable linter warnings to maintain consistency with tutorial.
# pylint: disable=invalid-name
# pylint: disable=g-bad-import-order
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import tempfile
from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
FLAGS = None
def deepnn(x):
"""deepnn builds the graph for a deep net for classifying digits.
Args:
x: an input tensor with the dimensions (N_examples, 784), where 784 is the
number of pixels in a standard MNIST image.
Returns:
A tuple (y, keep_prob). y is a tensor of shape (N_examples, 10), with values
equal to the logits of classifying the digit into one of 10 classes (the
digits 0-9). keep_prob is a scalar placeholder for the probability of
dropout.
"""
# Reshape to use within a convolutional neural net.
# Last dimension is for "features" - there is only one here, since images are
# grayscale -- it would be 3 for an RGB image, 4 for RGBA, etc.
with tf.name_scope('reshape'):
x_image = tf.reshape(x, [-1, 28, 28, 1])
# First convolutional layer - maps one grayscale image to 32 feature maps.
with tf.name_scope('conv1'):
W_conv1 = weight_variable([5, 5, 1, 32])
b_conv1 = bias_variable([32])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
# Pooling layer - downsamples by 2X.
with tf.name_scope('pool1'):
h_pool1 = max_pool_2x2(h_conv1)
# Second convolutional layer -- maps 32 feature maps to 64.
with tf.name_scope('conv2'):
W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
# Second pooling layer.
with tf.name_scope('pool2'):
h_pool2 = max_pool_2x2(h_conv2)
# Fully connected layer 1 -- after 2 round of downsampling, our 28x28 image
# is down to 7x7x64 feature maps -- maps this to 1024 features.
with tf.name_scope('fc1'):
W_fc1 = weight_variable([7 * 7 * 64, 1024])
b_fc1 = bias_variable([1024])
h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
# Dropout - controls the complexity of the model, prevents co-adaptation of
# features.
with tf.name_scope('dropout'):
keep_prob = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
# Map the 1024 features to 10 classes, one for each digit
with tf.name_scope('fc2'):
W_fc2 = weight_variable([1024, 10])
b_fc2 = bias_variable([10])
y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
return y_conv, keep_prob
def conv2d(x, W):
"""conv2d returns a 2d convolution layer with full stride."""
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
"""max_pool_2x2 downsamples a feature map by 2X."""
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
def weight_variable(shape):
"""weight_variable generates a weight variable of a given shape."""
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
"""bias_variable generates a bias variable of a given shape."""
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def main(_):
# Import data
mnist = input_data.read_data_sets(FLAGS.data_dir)
# Create the model
x = tf.placeholder(tf.float32, [None, 784])
# Define loss and optimizer
y_ = tf.placeholder(tf.int64, [None])
# Build the graph for the deep net
y_conv, keep_prob = deepnn(x)
with tf.name_scope('loss'):
cross_entropy = tf.losses.sparse_softmax_cross_entropy(
labels=y_, logits=y_conv)
cross_entropy = tf.reduce_mean(cross_entropy)
with tf.name_scope('adam_optimizer'):
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
with tf.name_scope('accuracy'):
correct_prediction = tf.equal(tf.argmax(y_conv, 1), y_)
correct_prediction = tf.cast(correct_prediction, tf.float32)
accuracy = tf.reduce_mean(correct_prediction)
graph_location = tempfile.mkdtemp()
print('Saving graph to: %s' % graph_location)
train_writer = tf.summary.FileWriter(graph_location)
train_writer.add_graph(tf.get_default_graph())
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(20000):
batch = mnist.train.next_batch(50)
if i % 100 == 0:
train_accuracy = accuracy.eval(feed_dict={
x: batch[0], y_: batch[1], keep_prob: 1.0})
print('step %d, training accuracy %g' % (i, train_accuracy))
train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})
print('test accuracy %g' % accuracy.eval(feed_dict={
x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0}))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str,
default='/tmp/tensorflow/mnist/input_data',
help='Directory for storing input data')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed) | [
"33483958+cfrcfrrr@users.noreply.github.com"
] | 33483958+cfrcfrrr@users.noreply.github.com |
05b2d8aa80a938aba52bdf7ca4afbcb77177ee21 | 71707d97f1a61f77d1b6ba17568a8cfe4ffa0e65 | /15 英鎊比.py | b90e6e57276eead1fb45b32a65a31ddec09ddfd7 | [] | no_license | vivian2943/01 | b0dbb42d1ba8678e28759cc906b8d740a7e529ef | 26742b9ffe9cb6750d7a3f97e7def6434a606e2e | refs/heads/main | 2023-06-04T16:28:47.978530 | 2021-06-22T12:40:23 | 2021-06-22T12:40:23 | 372,204,950 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 127 | py | rate = input('輸入英鎊比率:')
e = input('輸入薪水:')
earnings = float(rate)*int(e)
print(str(earnings)+'台幣')
| [
"noreply@github.com"
] | vivian2943.noreply@github.com |
0f702ff15d1d5b9145082f6402c50e7a282d49a8 | 5b3d8b5c612c802fd846de63f86b57652d33f672 | /Python/eight_kyu/make_negative.py | 1ced2d2e37e6381d69e9df3fff51514a55f71b75 | [
"Apache-2.0"
] | permissive | Brokenshire/codewars-projects | 1e591b57ed910a567f6c0423beb194fa7f8f693e | db9cd09618b8a7085b0d53ad76f73f9e249b9396 | refs/heads/master | 2021-07-22T18:50:25.847592 | 2021-01-25T23:27:17 | 2021-01-25T23:27:17 | 228,114,677 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 724 | py | # Python solution for 'Return Negative' codewars question.
# Level: 8 kyu
# Tags: FUNDAMENTALS and NUMBERS.
# Author: Jack Brokenshire
# Date: 11/04/2020
import unittest
def make_negative(number):
"""
Make a given number negative.
:param number: an integer value.
:return: the integer as a negative number.
"""
return -abs(number)
class TestMakeNegative(unittest.TestCase):
"""Class to test make_negative function"""
def test_make_negative(self):
self.assertEqual(make_negative(42), -42)
self.assertEqual(make_negative(1), -1)
self.assertEqual(make_negative(-5), -5)
self.assertEqual(make_negative(0), 0)
if __name__ == '__main__':
unittest.main()
| [
"29889878+Brokenshire@users.noreply.github.com"
] | 29889878+Brokenshire@users.noreply.github.com |
75d55fc725194a32fcecd23a78c767b5279c1bb5 | 96f6390a000254f942a68ab4bc349f1403a35ae3 | /recipes/migrations/0002_rename_daily_recipe.py | 41f3af48beca1a13ad67abd06e4667ca174df5fb | [] | no_license | tak-ka3/django_Restframework | c8e91c2582bd6cb903e9339cdc243fe37d632348 | 8d8458a229fb63fcab711e3f4adb1e37bfb08d56 | refs/heads/main | 2023-05-23T17:22:34.657803 | 2021-06-13T04:57:11 | 2021-06-13T04:57:11 | 376,421,089 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 318 | py | # Generated by Django 3.2.3 on 2021-06-13 01:13
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('recipes', '0001_initial'),
]
operations = [
migrations.RenameModel(
old_name='Daily',
new_name='Recipe',
),
]
| [
"hi.ta9.23.333@gmail.com"
] | hi.ta9.23.333@gmail.com |
be5c1b5992e68428d06e14747e5ee74245b52472 | 58afefdde86346760bea40690b1675c6639c8b84 | /leetcode/elimination-game/365996335.py | a8932065ba6959fe4df1131bf0761ece4fd6de2d | [] | no_license | ausaki/data_structures_and_algorithms | aaa563f713cbab3c34a9465039d52b853f95548e | 4f5f5124534bd4423356a5f5572b8a39b7828d80 | refs/heads/master | 2021-06-21T10:44:44.549601 | 2021-04-06T11:30:21 | 2021-04-06T11:30:21 | 201,942,771 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 311 | py | # title: elimination-game
# detail: https://leetcode.com/submissions/detail/365996335/
# datetime: Mon Jul 13 18:50:53 2020
# runtime: 52 ms
# memory: 13.7 MB
class Solution:
def lastRemaining(self, n: int) -> int:
return (2 * (n // 2 - self.lastRemaining(n // 2) + 1)) if n > 1 else 1
| [
"ljm51689@gmail.com"
] | ljm51689@gmail.com |
c1ad3f14fa4ee87e676d126d504d451079462174 | 6831b5a2e1b31094c8edc5ec68157efb838a6051 | /processing/scripts/__init__.py | 49217b24c3360ffa0d7cfe741599bd7cd1059716 | [] | no_license | optionalg/backend | c7f60518a72b6b953d7797b20eb03df8d8cedfca | 905888926cd57a1120f65ab94b9db9491ad9bfd9 | refs/heads/master | 2020-03-18T08:00:10.479204 | 2017-04-25T19:00:12 | 2017-04-25T19:00:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,331 | py | import logging
import click
from helpers.topics import get_static_topics, transform_topic_name
from main import app
from processing import db, Tweet
@app.cli.command()
def cli_update_sentiment_and_region_classification():
click.echo("Running update_region_and_topic_classification")
update_sentiment_and_region_classification()
click.echo("Done")
def update_sentiment_and_region_classification():
cursor = db.tweets.find({})
for t in cursor:
tweet = Tweet.load_stripped_tweet(t)
tweet.process()
db.tweets.update_one({"_id": tweet.id}, {"$set": tweet.get_full_dict()})
@app.cli.command()
def cli_remove_irrelevant_tweets():
click.echo("Running remove_irrelevant_tweets")
nb_invalid_classification, nb_no_topics, nb_transformed_topic_names = remove_irrelevant_tweets()
click.echo("Removed {} because they were assigned to an invalid topic".format(nb_invalid_classification))
click.echo("Removed {} because they had no topic assigned".format(nb_no_topics))
click.echo("Removed {} because they had an invalid topic name".format(nb_transformed_topic_names))
click.echo("Done")
def remove_irrelevant_tweets():
cursor = db.tweets.find({})
static_topics = get_static_topics()
static_topics = {topic.topic_name: topic for topic in static_topics}
nb_invalid_classification = 0
nb_no_topics = 0
nb_transformed_topic_names = 0
for t in cursor:
tweet = Tweet.load_stripped_tweet(t)
if tweet.topic in static_topics:
if static_topics[tweet.topic].tweet_is_about_topic(tweet.text):
continue
logging.info("Invalid topic classification ({}) for tweet {}".format(tweet.topic, tweet.text))
for topic_name, topic in static_topics.items():
if topic.tweet_is_about_topic(tweet.text):
logging.info("Instead classifying as {}".format(topic_name))
current = db.tweets.find_one({"tweet_id": tweet.tweet_id, "topic": topic_name})
if current is not None:
logging.info("Already present")
continue
logging.info("Newly added")
updated_dict = tweet.get_full_dict()
updated_dict["topic"] = topic_name
db.tweets.insert_one(updated_dict)
nb_invalid_classification += 1
db.tweets.delete_one({"_id": tweet.id})
elif tweet.topic is None:
nb_no_topics += 1
db.tweets.delete_one({"_id": tweet.id})
else:
transformed_topic = transform_topic_name(tweet.topic)
if transformed_topic == tweet.topic:
continue
db.tweets.delete_one({"_id": tweet.id})
nb_transformed_topic_names += 1
if db.tweets.find_one({"tweet_id": tweet.tweet_id, "topic": transformed_topic}) is not None:
continue
updated_dict = tweet.get_full_dict()
updated_dict["topic"] = transformed_topic
if db.tweets.find_one({"tweet_id": tweet.tweet_id, "topic": transformed_topic}) is not None:
continue
db.tweets.insert_one(updated_dict)
return nb_invalid_classification, nb_no_topics, nb_transformed_topic_names
| [
"floris.kint@gmail.com"
] | floris.kint@gmail.com |
099cbf71916f59418e98d995adba9f81835ae2f0 | cc3e56faea644ddd001e237d6d2f0f607e288dba | /Scripts/bugs.py | 842072e45834df63243376636148ed542dc32ff5 | [] | no_license | ChristopheBunn/Udacity-Data-Scientist-Nanodegree | 4b4d10f6ac3713f3068a1753703a03a5fecf992f | 7d2dc8cb063f8ef7915d903e7d98fa659fd362c2 | refs/heads/master | 2022-12-22T00:23:29.348577 | 2020-01-21T21:24:58 | 2020-01-21T21:24:58 | 208,598,058 | 1 | 3 | null | 2022-12-08T06:53:45 | 2019-09-15T13:26:42 | Jupyter Notebook | UTF-8 | Python | false | false | 3,078 | py | import pandas as pd
bugs = pd.read_csv('ml-bugs.csv')
print(bugs)
print("\nbugs is of type {}".format(type(bugs)))
num_total = bugs['Species'].size
print("\nThere are a total of {} bugs.".format(num_total))
species = bugs.groupby(['Species'])['Species'].count()
print("\n{} Species:\n{}".format(num_total, species))
colors = bugs.groupby(['Color'])['Color'].count()
print("\n{} Colors:\n{}".format(num_total, colors))
blue_bugs = bugs[bugs['Color'] == 'Blue'].groupby(['Species'])['Species'].count()
not_blue_bugs = bugs[bugs['Color'] != 'Blue'].groupby(['Species'])['Species'].count()
brown_bugs = bugs[bugs['Color'] == 'Brown'].groupby(['Species'])['Species'].count()
not_brown_bugs = bugs[bugs['Color'] != 'Brown'].groupby(['Species'])['Species'].count()
green_bugs = bugs[bugs['Color'] == 'Green'].groupby(['Species'])['Species'].count()
not_green_bugs = bugs[bugs['Color'] != 'Green'].groupby(['Species'])['Species'].count()
length17_bugs = bugs[bugs['Length (mm)'] < 17].groupby(['Species'])['Species'].count()
not_length17_bugs = bugs[bugs['Length (mm)'] >= 17].groupby(['Species'])['Species'].count()
length20_bugs = bugs[bugs['Length (mm)'] < 20].groupby(['Species'])['Species'].count()
not_length20_bugs = bugs[bugs['Length (mm)'] >= 20].groupby(['Species'])['Species'].count()
print("\nspecies, colors, and lengths are of type {}".format(type(species)))
# --------------------------------------------------------------------
import math
def entropy(elements):
counts = list()
counts_sum = 0
entropy = 0
for element in elements.iteritems():
counts.append(element[1]) # put all counts in a list
counts_sum += element[1] # add all counts
#print("elements = {}".format(elements))
#print("counts = {}, sum = {}".format(counts, counts_sum))
for count in counts:
probability = count / counts_sum
entropy -= probability*math.log2(probability)
return pd.Series(data = [counts_sum, entropy], index = ['total', 'entropy'])
def information_gain(parent, child1, child2):
p = entropy(parent)
num_p = p['total']
p_entropy = p['entropy']
c1 = entropy(child1)
num_c1 = c1['total']
c1_entropy = c1['entropy']
c2 = entropy(child2)
num_c2 = c2['total']
c2_entropy = c2['entropy']
return p_entropy - (num_c1/num_p*c1_entropy + num_c2/num_p*c2_entropy)
# --------------------------------------------------------------------
print("Split Blue Information Gain = {}".format(round(information_gain(species, blue_bugs, not_blue_bugs), 5)))
print("Split Brown Information Gain = {}".format(round(information_gain(species, brown_bugs, not_brown_bugs), 5)))
print("Split Green Information Gain = {}".format(round(information_gain(species, green_bugs, not_green_bugs), 5)))
print("Split < 17 Information Gain = {}".format(round(information_gain(species, length17_bugs, not_length17_bugs), 5)))
print("Split < 20 Information Gain = {}".format(round(information_gain(species, length20_bugs, not_length20_bugs), 5))) | [
"christophe.bunn@gmail.com"
] | christophe.bunn@gmail.com |
989b4a490afd630ec9d6eb1ab26d1e0490707b00 | 6a4680f5a0130edd3dc7bfd4f085e97ab0819fff | /linearregression/linearregression.py | ed6da1ddc71282d6b1eb7c481f854821b0ac42f4 | [] | no_license | shristiparajuli/linearregression | 3c14cdbc55f65ef1ab8c870d5b1f7094611cd09c | 2cb0ed822e34f5f0a85f1c206113e19d0b0ccd34 | refs/heads/master | 2022-12-21T08:01:40.556499 | 2020-09-28T09:02:44 | 2020-09-28T09:02:44 | 299,247,263 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,025 | py | import pandas as pd
import numpy as np
import sklearn
from sklearn import linear_model
from sklearn.utils import shuffle
import matplotlib.pyplot as plt
import pickle
from matplotlib import style
data = pd.read_csv("C:\\student-mat.csv", sep=";")
data = data[["G1", "G2", "G3","studytime", "failures", "absences"]]
predict = "G3"
x = np.array(data.drop([predict], 1))
y = np.array(data[predict])
x_train, x_test, y_train, y_test = sklearn.model_selection.train_test_split(x, y, test_size = 0.1)
linear = linear_model.LinearRegression()
linear.fit(x_train, y_train)
acc = linear.score(x_test, y_test)
print (acc)
with open("studentmodel.pickle", "wb") as f:
pickle.dump(linear, f)
pickle_in = open("studentmodel.pickle", "rb")
linear = pickle.load(pickle_in)
print("Coefficient: " + str(linear.coef_))
print("Intercept: "+ str(linear.intercept_))
predictions = linear.predict(x_test)
for x in range (len(predictions)):
print(predictions[x], x_test[x], y_test[x])
| [
"noreply@github.com"
] | shristiparajuli.noreply@github.com |
1dc16a63a83e65662628b2453ff91ff337eff28d | 3de21fc587c02f2702bd5770f11a31d5558a4666 | /django_ac22/apps/avisos/forms.py | f0f90b51cfdad481b5d8887b01638b45daf0f108 | [] | no_license | juanros13/ac22 | 8c20d59de62d596a73d6d7190f551ef3accf2b8e | d8ecf0686f3d8a57a747503b231b46277db71a6e | refs/heads/master | 2020-04-16T11:24:07.344404 | 2016-09-22T23:51:39 | 2016-09-22T23:51:39 | 65,859,654 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,110 | py | # -*- encoding: utf-8 -*-
from django import forms
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.contrib.auth.forms import UserCreationForm, AuthenticationForm
from django.contrib.auth import authenticate
from django.forms.widgets import Select, Textarea
from apps.avisos.models import Aviso, ComentarioAviso
class AvisoAddForm(forms.ModelForm):
titulo = forms.CharField(
widget=forms.TextInput(
attrs={
'class': 'form-control',
'placeholder' : 'Ingresa el titulo del aviso',
}
),
label = "Titulo del aviso",
)
class Meta:
model = Aviso
fields = ('tipo','titulo', 'contenido','mantener_al_principio')
widgets = {
'contenido': Textarea(
attrs={
'class': 'form-control',
}
),
}
class ComentarioAddForm(forms.ModelForm):
class Meta:
model = ComentarioAviso
fields = ('comentario',)
widgets = {
'comentario': Textarea(
attrs={
'class': 'form-control',
}
),
}
| [
"juanros13@gmail.com"
] | juanros13@gmail.com |
0cc92092dbd744015e475b3cd777349197c893ed | acbaf0ac316ceffad927da213c6a4cd02bf91549 | /Django/Craiglist/MyApp/urls.py | 30e0fdf65523a78b4f36737abd2f0e47b34a70c4 | [] | no_license | Rohyth/Python | be4a177b454c46d77e6c1b5b894e3cf78d136280 | 8e4dbbed704ccc3efe85fae4349e4b164b66ae3e | refs/heads/master | 2022-12-22T02:40:30.511768 | 2021-05-08T19:45:28 | 2021-05-08T19:45:28 | 254,495,454 | 0 | 0 | null | 2022-12-08T10:00:56 | 2020-04-09T22:56:34 | Python | UTF-8 | Python | false | false | 172 | py | from django.urls import path
from MyApp import views
urlpatterns = [
path('', views.home, name='home'),
path('new_search', views.new_search, name='new_search'),
]
| [
"Rohitratawal@gmail.com"
] | Rohitratawal@gmail.com |
9e6d98b674fe76be7f90c72ef6ee93503565d747 | aa731f3e006a75079dde2471351d49b7ef93e8d6 | /setup.py | d8d1f68ec2dd02bc91d2667878389e5df1db2ae2 | [
"MIT"
] | permissive | c1au6i0/pokerpy | 0e40f38104c2e2e536283b266a0c320f5418bf68 | 5846ebe323f13e5852c48b18c052b2f3f14a1efe | refs/heads/master | 2022-11-13T13:14:05.218000 | 2020-07-11T04:12:49 | 2020-07-11T04:12:49 | 259,125,852 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,411 | py | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="pokerpy-c1au6io-dottorDav", # Replace with your own username
version="0.0.0.9",
author="Claudio Zanettini, Davide Colella",
author_email="claudio.zanettini@gmail.com, dottordav@gmail.com ",
description="An implementation of the classical poker in Python",
long_description="""
The aim of this project is to develop in `python` the classical power game, and in the process to learn more about `python`,
probability and game theory and deep inside about our-selves and the meaning of life.
We are well aware that there are many other people that did it already in python (es: [link](https://pypi.org/project/poker/))
and that poker is quite complex, but again this is an exercise for us, and being able to take a peak at the work of someone
else (much more experience then us) makes it even more informative.
"""
,
long_description_content_type="text/markdown",
url="https://github.com/pypa/sampleproject",
packages=setuptools.find_packages(exclude=('tests', 'docs')),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
install_requires = [
'pandas',
'numpy'
],
python_requires='>=3.6',
) | [
"claudio.zanettini@gmail.com"
] | claudio.zanettini@gmail.com |
9d970a6420fe907b2979185d2b48aa7ae78262f1 | 5c61990fc1a79f389111a3e449c1fadf65fc1b8c | /portnet_reports/indicateurs_financier/__init__.py | b8f88826d9a9cd5ecc4a5bc263880c787dbaefa2 | [] | no_license | brahim94/portnet | 3befb64009fd014b74e01151cc429a613d3d2f11 | f1120ce4806ba2fd7e26132ca918d1ce8b9ad32c | refs/heads/master | 2023-04-14T07:17:40.956207 | 2021-04-27T16:37:48 | 2021-04-27T16:37:48 | 356,211,308 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 27 | py | import financial_indicator
| [
"brahim-ayad@hotmail.com"
] | brahim-ayad@hotmail.com |
4a16bae85cdddfd2118cbf68bd4399070109330e | 1520f013535ebcb19578083059316194b236eff1 | /zoo/inception/inception_v2_c.py | a84e6ff36802158a80029605530de20d232064df | [
"CC-BY-4.0",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | koyo-jakanees/keras-idiomatic-programmer | ed12bb67dbc6c866148db44c2952ecf83b024ac1 | 0669dff3a14c41b8aedd2605b21a9c6b546e17a3 | refs/heads/master | 2020-09-23T01:39:35.056403 | 2019-12-02T00:27:55 | 2019-12-02T00:27:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,117 | py | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Inception v2 (GoogLeNet) - Composable
# Paper: https://arxiv.org/pdf/1409.4842.pdf
import tensorflow as tf
from tensorflow.keras import Model, Input
from tensorflow.keras.layers import Conv2D, ReLU, ZeroPadding2D, Flatten, Dropout, BatchNormalization
from tensorflow.keras.layers import MaxPooling2D, Dense, Concatenate, AveragePooling2D
class InceptionV2(object):
""" Construct an Inception Convolutional Neural Network """
init_weights='glorot_uniform'
_model = None
def __init__(self, dropout=0.4, input_shape=(224, 224, 3), n_classes=1000):
""" Construct an Inception Convolutional Neural Network
dropout : percentage of dropout
input_shape: input shape to the neural network
n_classes : number of output classes
"""
# Meta-parameter: dropout percentage
dropout = 0.4
# The input tensor
inputs = Input(shape=input_shape)
# The stem convolutional group
x = self.stem(inputs)
# The learner
x, aux = self.learner(x, n_classes)
# The classifier f
outputs = self.classifier(x, n_classes, dropout)
# Instantiate the Model
self._model = Model(inputs, [outputs] + aux)
@property
def model(self):
return self._model
@model.setter
def model(self, _model):
self._model = model
def stem(self, inputs):
""" Construct the Stem Convolutional Group
inputs : the input vector
"""
# The 224x224 images are zero padded (black - no signal) to be 230x230 images prior to the first convolution
x = ZeroPadding2D(padding=(3, 3))(inputs)
# First Convolutional layer which uses a large (coarse) filter
x = Conv2D(64, (7, 7), strides=(2, 2), padding='valid', use_bias=False, kernel_initializer=self.init_weights)(x)
x = BatchNormalization()(x)
x = ReLU()(x)
# Pooled feature maps will be reduced by 75%
x = ZeroPadding2D(padding=(1, 1))(x)
x = MaxPooling2D((3, 3), strides=(2, 2))(x)
# Second Convolutional layer which uses a mid-size filter
x = Conv2D(64, (1, 1), strides=(1, 1), padding='same', use_bias=False, kernel_initializer=self.init_weights)(x)
x = BatchNormalization()(x)
x = ReLU()(x)
x = ZeroPadding2D(padding=(1, 1))(x)
x = Conv2D(192, (3, 3), strides=(1, 1), padding='valid', use_bias=False, kernel_initializer=self.init_weights)(x)
x = BatchNormalization()(x)
x = ReLU()(x)
# Pooled feature maps will be reduced by 75%
x = ZeroPadding2D(padding=(1, 1))(x)
x = MaxPooling2D((3, 3), strides=(2, 2))(x)
return x
def learner(self, x, n_classes):
""" Construct the Learner
x : input to the learner
n_classes: number of output classes
"""
aux = [] # Auxiliary Outputs
# Group 3
x, o = InceptionV2.group(x, [((64,), (96,128), (16, 32), (32,)), # 3a
((128,), (128, 192), (32, 96), (64,))]) # 3b
aux += o
# Group 4
x, o = InceptionV2.group(x, [((192,), (96, 208), (16, 48), (64,)), # 4a
None, # auxiliary classifier
((160,), (112, 224), (24, 64), (64,)), # 4b
((128,), (128, 256), (24, 64), (64,)), # 4c
((112,), (144, 288), (32, 64), (64,)), # 4d
None, # auxiliary classifier
((256,), (160, 320), (32, 128), (128,))], # 4e
n_classes=n_classes)
aux += o
# Group 5
x, o = InceptionV2.group(x, [((256,), (160, 320), (32, 128), (128,)), # 5a
((384,), (192, 384), (48, 128), (128,))],# 5b
pooling=False)
aux += o
return x, aux
@staticmethod
def group(x, blocks, pooling=True, n_classes=1000, init_weights=None):
""" Construct an Inception group
x : input into the group
blocks : filters for each block in the group
pooling : whether to end the group with max pooling
n_classes : number of classes for auxiliary classifier
"""
if init_weights is None:
init_weights = InceptionV2.init_weights
aux = [] # Auxiliary Outputs
# Construct the inception blocks (modules)
for block in blocks:
# Add auxiliary classifier
if block is None:
aux.append(InceptionV2.auxiliary(x, n_classes))
else:
x = InceptionV2.inception_block(x, block[0], block[1], block[2], block[3])
if pooling:
x = ZeroPadding2D(padding=(1, 1))(x)
x = MaxPooling2D((3, 3), strides=2)(x)
return x, aux
@staticmethod
def inception_block(x, f1x1, f3x3, f5x5, fpool, init_weights=None):
""" Construct an Inception block (module)
x : input to the block
f1x1 : filters for 1x1 branch
f3x3 : filters for 3x3 branch
f5x5 : filters for 5x5 branch
fpool: filters for pooling branch
"""
if init_weights is None:
init_weights = InceptionV2.init_weights
# 1x1 branch
b1x1 = Conv2D(f1x1[0], (1, 1), strides=1, padding='same', use_bias=False, kernel_initializer=init_weights)(x)
b1x1 = BatchNormalization()(b1x1)
b1x1 = ReLU()(b1x1)
# 3x3 branch
# 3x3 reduction
b3x3 = Conv2D(f3x3[0], (1, 1), strides=1, padding='same', use_bias=False, kernel_initializer=init_weights)(x)
b3x3 = BatchNormalization()(b3x3)
b3x3 = ReLU()(b3x3)
b3x3 = ZeroPadding2D((1,1))(b3x3)
b3x3 = Conv2D(f3x3[1], (3, 3), strides=1, padding='valid', use_bias=False, kernel_initializer=init_weights)(b3x3)
b3x3 = BatchNormalization()(b3x3)
b3x3 = ReLU()(b3x3)
# 5x5 branch
# 5x5 reduction
b5x5 = Conv2D(f5x5[0], (1, 1), strides=1, padding='same', use_bias=False, kernel_initializer=init_weights)(x)
b5x5 = BatchNormalization()(b5x5)
b5x5 = ReLU()(b5x5)
b5x5 = ZeroPadding2D((1,1))(b5x5)
b5x5 = Conv2D(f5x5[1], (3, 3), strides=1, padding='valid', use_bias=False, kernel_initializer=init_weights)(b5x5)
b5x5 = BatchNormalization()(b5x5)
b5x5 = ReLU()(b5x5)
# Pooling branch
bpool = MaxPooling2D((3, 3), strides=1, padding='same')(x)
# 1x1 projection
bpool = Conv2D(fpool[0], (1, 1), strides=1, padding='same', use_bias=False, kernel_initializer=init_weights)(bpool)
bpool = BatchNormalization()(bpool)
bpool = ReLU()(bpool)
# Concatenate the outputs (filters) of the branches
x = Concatenate()([b1x1, b3x3, b5x5, bpool])
return x
@staticmethod
def auxiliary(x, n_classes, init_weights=None):
""" Construct the auxiliary classier
x : input to the auxiliary classifier
n_classes: number of output classes
"""
if init_weights is None:
init_weights = InceptionV2.init_weights
x = AveragePooling2D((5, 5), strides=(3, 3))(x)
x = Conv2D(128, (1, 1), strides=(1, 1), padding='same', use_bias=False, kernel_initializer=init_weights)(x)
x = BatchNormalization()(x)
x = ReLU()(x)
x = Flatten()(x)
x = Dense(1024, activation='relu', kernel_initializer=init_weights)(x)
x = Dropout(0.7)(x)
output = Dense(n_classes, activation='softmax', kernel_initializer=init_weights)(x)
return output
def classifier(self, x, n_classes, dropout=0.4):
""" Construct the Classifier Group
x : input to the classifier
n_classes : number of output classes
dropout : percentage for dropout rate
"""
# Pool at the end of all the convolutional residual blocks
x = AveragePooling2D((7, 7))(x)
x = Flatten()(x)
x = Dropout(dropout)(x)
# Final Dense Outputting Layer for the outputs
outputs = Dense(n_classes, activation='softmax', kernel_initializer=self.init_weights)(x)
return outputs
# Example
# inception = InceptionV2()
| [
"noreply@github.com"
] | koyo-jakanees.noreply@github.com |
81b9658d7beef3f5af94d215949d0df32e66dc26 | df8ec66b10e97956f80ec52503dd456372c03c4a | /plotter/objects/selections.py | b4d32bd707d2c9a8132f64b9d47eda95be2cf1ba | [] | no_license | amlyon/plotter | 3670820faf9864501b666f2e157e435a8285a766 | 5a3295fbf5d0875fd4a1c53164ac45e92d3ccd05 | refs/heads/master | 2022-11-17T02:27:42.072710 | 2020-07-07T20:19:25 | 2020-07-07T20:19:25 | 270,580,472 | 0 | 0 | null | 2020-06-23T14:55:25 | 2020-06-08T07:56:20 | Python | UTF-8 | Python | false | false | 10,048 | py | from collections import OrderedDict
class Selections(object):
def __init__(self, channel):
self.channel = channel
self.base = None
self.selections = OrderedDict()
if self.channel == 'mmm':
self.selections['pt_iso'] = ' & '.join(['l0_pt > 25' ,
'l2_pt > 5' ,
'l1_pt > 5' ,
'l0_id_m == 1' ,
'l1_id_hnl_m == 1',
'l2_id_hnl_m == 1',])
if self.channel == 'mem':
self.selections['pt_iso'] = ' & '.join(['l0_pt > 25' ,
'l2_pt > 5' ,
'l1_pt > 5' ,
'l0_id_m == 1' ,
'l1_id_hnl_l_niso == 1' ,
'l2_id_hnl_m == 1' ,])
if self.channel == 'eem':
self.selections['pt_iso'] = ' & '.join(['l0_pt > 32' ,
'l2_pt > 5' ,
'l1_pt > 5' ,
'l0_id_mva_niso_90 == 1',
'l1_id_hnl_l_niso == 1' ,
'l2_id_hnl_m == 1' ,])
if self.channel == 'eee':
self.selections['pt_iso'] = ' & '.join(['l0_pt > 32' ,
'l2_pt > 5' ,
'l1_pt > 5' ,
'l0_id_mva_niso_90 == 1',
'l1_id_hnl_l_niso == 1' ,
'l2_id_hnl_l_niso == 1' ,])
assert self.selections['pt_iso'], 'Error: No channel specific selection applied!'
self.selections['pre_baseline'] = ' & '.join([
'abs(l0_eta) < 2.4' ,
'abs(l0_dxy) < 0.05' ,
'abs(l0_dz) < 0.1' ,
'l0_reliso_rho_03 < 0.1',
'abs(l1_eta) < 2.4' ,
'l1_reliso_rho_03 < 10' ,
'abs(l2_eta) < 2.4' ,
'l2_reliso_rho_03 < 10' ,
'hnl_q_12 == 0' ,
'hnl_dr_12 < 1.' ,
'hnl_dr_12 > 0.02' ,
'hnl_m_12 < 20' ,
'abs(hnl_dphi_01)>1.' ,
'abs(hnl_dphi_02)>1.' , # dphi a la facon belgique
'pass_met_filters==1' ,
])
self.selections['baseline'] = ' & '.join([
self.selections['pre_baseline'],
'nbj == 0' ,
'hnl_2d_disp_sig>20' ,
'hnl_pt_12>15' ,
'sv_cos>0.99' ,
'sv_prob>0.001' ,
'abs(l1_dz)<10' ,
'abs(l2_dz)<10' ,
'abs(l1_dxy) > 0.01' ,
'abs(l2_dxy) > 0.01' ,
])
self.selections['sideband'] = '!(hnl_w_vis_m > 50. & hnl_w_vis_m < 80.)' # THIS IS IMPORTANT!
self.selections['signal_region'] = '(hnl_w_vis_m > 50. & hnl_w_vis_m < 80.)' # THIS IS IMPORTANT!
# FSR veto
# remove events where the tree lepton make the Z mass
# and at least two same flavour OS leptons are present
self.selections['fsr_veto'] = '( (abs(hnl_w_vis_m-91.19)>10. & (l0_pdgid==-l1_pdgid | l0_pdgid==-l2_pdgid)) | !(l0_pdgid==-l1_pdgid | l0_pdgid==-l2_pdgid))'
# self.selections['vetoes_12_OS'] = ' & '.join([
# # vetoes 12 (always OS anyways)
# 'abs(hnl_m_12-3.0969) > 0.08' , # jpsi veto
# 'abs(hnl_m_12-3.6861) > 0.08' , # psi (2S) veto
# 'abs(hnl_m_12-0.7827) > 0.08' , # omega veto
# 'abs(hnl_m_12-1.0190) > 0.08' , # phi veto
# ])
# after discussing with Martina 9/1/2020
self.selections['vetoes_12_OS'] = ' & '.join([
# vetoes 12 (always OS anyways)
'!(hnl_2d_disp<1.5 & abs(hnl_m_12-3.0969) < 0.08)', # jpsi veto
'!(hnl_2d_disp<1.5 & abs(hnl_m_12-3.6861) < 0.08)', # psi (2S) veto
'!(hnl_2d_disp<1.5 & abs(hnl_m_12-0.7827) < 0.08)', # omega veto
'!(hnl_2d_disp<1.5 & abs(hnl_m_12-1.0190) < 0.08)', # phi veto
])
self.selections['vetoes_01_OS'] = ' & '.join([
# vetoes 01 (only is OS)
'!(hnl_q_01==0 & abs(hnl_m_01-91.1876) < 10)' , # Z veto
'!(hnl_q_01==0 & abs(hnl_m_01- 9.4603) < 0.08)', # Upsilon veto
'!(hnl_q_01==0 & abs(hnl_m_01-10.0233) < 0.08)', # Upsilon (2S) veto
'!(hnl_q_01==0 & abs(hnl_m_01-10.3552) < 0.08)', # Upsilon (3S) veto
'!(hnl_q_01==0 & abs(hnl_m_01-3.0969) < 0.08)', # jpsi veto
'!(hnl_q_01==0 & abs(hnl_m_01-3.6861) < 0.08)', # psi (2S) veto
'!(hnl_q_01==0 & abs(hnl_m_01-0.7827) < 0.08)', # omega veto
'!(hnl_q_01==0 & abs(hnl_m_01-1.0190) < 0.08)', # phi veto
])
self.selections['vetoes_02_OS'] = ' & '.join([
# vetoes 02 (only is OS)
'!(hnl_q_02==0 & abs(hnl_m_02-91.1876) < 10)' , # Z veto
'!(hnl_q_02==0 & abs(hnl_m_02- 9.4603) < 0.08)', # Upsilon veto
'!(hnl_q_02==0 & abs(hnl_m_02-10.0233) < 0.08)', # Upsilon (2S) veto
'!(hnl_q_02==0 & abs(hnl_m_02-10.3552) < 0.08)', # Upsilon (3S) veto
'!(hnl_q_02==0 & abs(hnl_m_02-3.0969) < 0.08)', # jpsi veto
'!(hnl_q_02==0 & abs(hnl_m_02-3.6861) < 0.08)', # psi (2S) veto
'!(hnl_q_02==0 & abs(hnl_m_02-0.7827) < 0.08)', # omega veto
'!(hnl_q_02==0 & abs(hnl_m_02-1.0190) < 0.08)', # phi veto
])
self.selections['tight'] = ' & '.join([
'l1_reliso_rho_03 < 0.2',
'l2_reliso_rho_03 < 0.2',
])
# RM is this wrong? this allows for one of the two displaced leptons to be
# neither prompt nor conversion
# self.selections['is_prompt_lepton'] = '(%s)' %(' | '.join([
# 'l1_gen_match_isPrompt==1',
# 'l1_gen_match_pdgid==22',
# 'l2_gen_match_isPrompt==1',
# 'l2_gen_match_pdgid==22',
# ]))
self.selections['is_prompt_lepton'] = ' & '.join([
'(l1_gen_match_isPrompt==1 | l1_gen_match_pdgid==22)',
'(l2_gen_match_isPrompt==1 | l2_gen_match_pdgid==22)',
])
self.selections['zmm'] = ' & '.join([
'l0_pt > 40' ,
'abs(l0_eta) < 2.4' ,
'abs(l0_dxy) < 0.05' ,
'abs(l0_dz) < 0.2' ,
'l0_reliso_rho_03 < 0.2',
'l0_id_t == 1' ,
'l1_pt > 35' ,
'abs(l1_eta) < 2.4' ,
'abs(l1_dxy) < 0.05' ,
'abs(l1_dz) < 0.2' ,
'l1_reliso_rho_03 < 0.2',
'l1_id_t == 1' ,
'hnl_q_01==0' ,
'abs(hnl_dphi_01)>1.' ,
'pass_met_filters==1' ,
])
self.selections['zee'] = ' & '.join([
'l0_pt > 40' ,
'abs(l0_eta) < 2.4' ,
'abs(l0_dxy) < 0.05' ,
'abs(l0_dz) < 0.2' ,
'l0_reliso_rho_03 < 0.2',
'l0_id_mva_niso_90 == 1' ,
'l1_pt > 35' ,
'abs(l1_eta) < 2.4' ,
'abs(l1_dxy) < 0.05' ,
'abs(l1_dz) < 0.2' ,
'l1_reliso_rho_03 < 0.2',
'l1_id_mva_niso_90 == 1',
'hnl_q_01==0' ,
'abs(hnl_dphi_01)>1.' ,
'pass_met_filters==1' ,
])
self.selections['ttbar_me*'] = ' & '.join([
'l0_pt > 28' ,
'abs(l0_eta) < 2.4' ,
'abs(l0_dxy) < 0.05' ,
'abs(l0_dz) < 0.2' ,
'l0_reliso_rho_03 < 0.2',
'l0_id_m == 1' ,
'l1_pt > 10' ,
'abs(l1_eta) < 2.4' ,
'abs(l1_dxy) < 0.05' ,
'abs(l1_dz) < 0.2' ,
'l1_reliso_rho_03 < 0.2',
'l1_id_mva_iso_90 == 1' ,
'hnl_q_01==0' ,
'nbj>=1' ,
'abs(hnl_dphi_01)>1.' ,
'pass_met_filters==1' ,
])
self.selections['ttbar_em*'] = ' & '.join([
'l0_pt > 28' ,
'abs(l0_eta) < 2.4' ,
'abs(l0_dxy) < 0.05' ,
'abs(l0_dz) < 0.2' ,
'l0_reliso_rho_03 < 0.2',
'l0_id_mva_iso_90 == 1' ,
'l2_pt > 10' ,
'abs(l2_eta) < 2.4' ,
'abs(l2_dxy) < 0.05' ,
'abs(l2_dz) < 0.2' ,
'l2_reliso_rho_03 < 0.2',
'l2_id_m == 1' ,
'hnl_q_02==0' ,
'nbj>=1' ,
'abs(hnl_dphi_02)>1.' ,
'pass_met_filters==1' ,
])
# convert to pandas readable queries
self.selections_pd = OrderedDict()
for k, v in self.selections.items():
vv = v.replace('&', 'and').replace('|', 'or').replace('!=', 'not').replace('!', 'not')
self.selections_pd[k] = vv
| [
"riccardo.manzoni@cern.ch"
] | riccardo.manzoni@cern.ch |
7efb8ef9da9d77a2dea29542cdfeae246c6ad6d6 | a2b6bc9bdd2bdbe5871edb613065dd2397175cb3 | /Cookbook/Array/最小路径和.py | 8fcbf61420a03b424278ab65480d35b31e907523 | [] | no_license | Asunqingwen/LeetCode | ed8d2043a31f86e9e256123439388d7d223269be | b7c59c826bcd17cb1333571eb9f13f5c2b89b4ee | refs/heads/master | 2022-09-26T01:46:59.790316 | 2022-09-01T08:20:37 | 2022-09-01T08:20:37 | 95,668,066 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,076 | py | '''
给定一个包含非负整数的 m x n 网格 grid ,请找出一条从左上角到右下角的路径,使得路径上的数字总和为最小。
说明:每次只能向下或者向右移动一步。
示例 1:
输入:grid = [[1,3,1],[1,5,1],[4,2,1]]
输出:7
解释:因为路径 1→3→1→1→1 的总和最小。
示例 2:
输入:grid = [[1,2,3],[4,5,6]]
输出:12
提示:
m == grid.length
n == grid[i].length
1 <= m, n <= 200
0 <= grid[i][j] <= 100
'''
from typing import List
class Solution:
def minPathSum(self, grid: List[List[int]]) -> int:
row, col = len(grid), len(grid[0])
for r in range(1, row):
grid[r][0] += grid[r - 1][0]
for c in range(1, col):
grid[0][c] += grid[0][c - 1]
for r in range(1, row):
for c in range(1, col):
grid[r][c] += min(grid[r - 1][c], grid[r][c - 1])
return grid[-1][-1]
if __name__ == '__main__':
grid = [[1, 3, 1], [1, 5, 1], [4, 2, 1]]
sol = Solution()
print(sol.minPathSum(grid))
| [
"sqw123az@sina.com"
] | sqw123az@sina.com |
d62c8c6b220fb81051692a7991dc7c594ab69286 | 2a229d37b001b3714ce3b246e1e89bf8ac2a0130 | /scripts/arcpy_geom/Vertex.py | 5d17ea1285527dbd4497f7acfdae0160a4adaad3 | [] | no_license | glennvorhes/CurveFinderHelper | 86e85dd4a3f2c810660b53b9317c76f930010010 | e6a2feccf909f49e5ce2c9a070e5b50fa2a33e28 | refs/heads/master | 2021-06-02T04:54:11.241563 | 2021-02-06T16:25:54 | 2021-02-06T16:25:54 | 95,932,112 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,753 | py | import math
class Vertex:
def __init__(self, x, y, z=None, m=None):
self.x = x
self.y = y
self.z = z
self.m = m
@property
def kw(self):
return {
'X': self.x,
'Y': self.y,
'Z': self.z,
'M': self.m
}
def get_2d_dist(self, v2):
"""
:param v2:
:type v2: Vertex
:return:
:rtype: float
"""
return math.sqrt(
math.pow(self.x - v2.x, 2) +
math.pow(self.y - v2.y, 2)
)
def get_3d_dist(self, v2):
"""
:param v2:
:type v2: Vertex
:return:
:rtype: float
"""
diff_3d = 0
if self.z is not None and v2.z is not None:
diff_3d = self.z - v2
return math.sqrt(
math.pow(self.x - v2.x, 2) +
math.pow(self.y - v2.y, 2) +
math.pow(diff_3d, 2)
)
def get_m_diff(self, v2):
"""
:param v2:
:type v2: Vertex
:return:
:rtype:
"""
if self.m is not None and v2.m is not None:
return v2.m - self.m
else:
return None
def as_list(self):
"""
:return:
:rtype: list[float]
"""
c = [self.x, self.y]
if self.z is not None:
c.append(self.z)
if self.m is not None:
c.append(self.m)
return c
def __str__(self):
out_str = 'x: {0}, y: {1}'.format(self.x, self.y)
if self.z is not None:
out_str += ', z: {0}'.format(self.z)
if self.m is not None:
out_str += ', m: {0}'.format(self.m)
return out_str
| [
"gavorhes@wisc.edu"
] | gavorhes@wisc.edu |
1c8f9228323474b915e9a7d5edc370244f52c75d | 6aa706a8644d0c758366d5cf3e01051664612f07 | /ex22/ex22-challenge.py | 3ce5b6dd3e5419ea3b4dd5759f68fb96095d4592 | [] | no_license | dspina79/lp3thw | d792aa38278ab3b6b8829e7e54f191f64d9e2bc1 | 2968d12c40fb9f5e00ceca38ed41d50615d7af5f | refs/heads/main | 2023-03-07T13:28:34.855956 | 2021-02-19T15:17:05 | 2021-02-19T15:17:05 | 322,066,185 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 703 | py | # Basic Revew
# Just come code to highlight previous lessons learned
from sys import argv
script, arg1 = argv
def read_file(f, num_lines_to_read):
num_lines_total = get_num_lines(f)
f.seek(0)
print(f"There are {num_lines_total} total lines.")
line_num = 1
while line_num <= num_lines_total and line_num <= num_lines_to_read:
line = f.readline()
print(line_num, line)
line_num += 1
return
def get_num_lines(f):
f.seek(0)
line_num = 0
while f.readline():
line_num += 1
return line_num
input_file = open(arg1)
lines_to_read = input('How many lines to read? ')
read_file(input_file, int(lines_to_read))
input_file.close()
| [
"dspina79@gmail.com"
] | dspina79@gmail.com |
77fe34446a2c183a54a1da6a8e62c8415843b819 | 5cd02f24090472ab94f942ccf780835df4d5fd46 | /pi-dashboard/pi_dashboard/asgi.py | 5b656e44266c47d54f7421a51dcbe325ab7d36eb | [] | no_license | saenzjonathan11/CSCE-462-Raspberry-Pi-Documentation | 48e817c0533c66ef78e8899074193a229defff23 | 8d427886719e1b6aed174cc4e3301f9a47f1602c | refs/heads/master | 2021-05-18T12:50:32.232566 | 2020-05-13T12:30:08 | 2020-05-13T12:30:08 | 251,249,170 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 401 | py | """
ASGI config for pi_dashboard project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'pi_dashboard.settings')
application = get_asgi_application()
| [
"saenzjonathan11@gmail.com"
] | saenzjonathan11@gmail.com |
6ccaf0b7923ddbcf05dbd74de253ca863a8a52de | 57db61160494659af43ee255d1e6ab2af6617114 | /ultron-api/contact/admin.py | 92b8324a11994ac12b4367be09b970e401577cbe | [] | no_license | gloompi/ultron-studio | fc667d563467b386a8dec04a6079e7cdcfedc5a7 | ec2ae8051644df2433b931c7e0228e75eaf20990 | refs/heads/master | 2023-06-25T19:22:45.119315 | 2019-12-08T05:53:02 | 2019-12-08T05:53:02 | 226,545,035 | 0 | 0 | null | 2023-06-10T00:22:15 | 2019-12-07T16:44:16 | JavaScript | UTF-8 | Python | false | false | 205 | py | from django.contrib import admin
from .models import Contact
class ContactAdmin(admin.ModelAdmin):
list_display = ('id', 'title')
# Register your models here.
admin.site.register(Contact, ContactAdmin) | [
"gloompi@gmail.com"
] | gloompi@gmail.com |
533b7742f803c5e8623281c3f038e47250afcebd | f7fcdb32ba79620a140def0d16aa37784750d781 | /P-Divisors.py | 9cd4671af22ca43597ee8beaccb8ccd3d158a36c | [] | no_license | Msarkis/python | b87888558dbae66823c73356645b89e6a1582af6 | a10689ff9d6064796a8f6c57e735e1102ce760fb | refs/heads/master | 2022-07-18T23:06:37.172593 | 2020-05-22T16:39:03 | 2020-05-22T16:39:03 | 263,748,298 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 538 | py | """
FROM PracticePython.org
Exercise 4: Divisors
Create a program that:
1. asks the user for a number
2. prints out a list of all the divisors of that number.
"""
ask_num = 30
print("The divisors of " + str(ask_num) + " are ")
y=[]
i = 1
n= 0
while (i < ask_num):
y.append(i)
i = i+1
for n in y:
if ask_num % n == 0:
print(ask_num / n )
print("\nAnother solution\n")
devisors = [i for i in range(1,ask_num) if ask_num % i == 0]
print(str(ask_num) + " is divisible by ", str(devisors)) | [
"noreply@github.com"
] | Msarkis.noreply@github.com |
aca820fb2f94f242539ff4b7b1b2ab02fbc5a555 | 148072ce210ca4754ea4a37d83057e2cf2fdc5a1 | /src/core/w3af/w3af/plugins/attack/db/sqlmap/tamper/charencode.py | 6d1a46727fed80594ad45d9e5cbf3e7aa2e118f8 | [] | no_license | ycc1746582381/webfuzzer | 8d42fceb55c8682d6c18416b8e7b23f5e430c45f | 0d9aa35c3218dc58f81c429cae0196e4c8b7d51b | refs/heads/master | 2021-06-14T18:46:59.470232 | 2017-03-14T08:49:27 | 2017-03-14T08:49:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,491 | py | #!/usr/bin/env python
"""
Copyright (c) 2006-2015 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
import string
from lib.core.enums import PRIORITY
__priority__ = PRIORITY.LOWEST
def dependencies():
pass
def tamper(payload, **kwargs):
"""
Url-encodes all characters in a given payload (not processing already
encoded)
Tested against:
* Microsoft SQL Server 2005
* MySQL 4, 5.0 and 5.5
* Oracle 10g
* PostgreSQL 8.3, 8.4, 9.0
Notes:
* Useful to bypass very weak web application firewalls that do not
url-decode the request before processing it through their ruleset
* The web server will anyway pass the url-decoded version behind,
hence it should work against any DBMS
>>> tamper('SELECT FIELD FROM%20TABLE')
'%53%45%4C%45%43%54%20%46%49%45%4C%44%20%46%52%4F%4D%20%54%41%42%4C%45'
"""
retVal = payload
if payload:
retVal = ""
i = 0
while i < len(payload):
if payload[i] == '%' and (i < len(payload) - 2) and payload[i + 1:i + 2] in string.hexdigits and payload[
i + 2:i + 3] in string.hexdigits:
retVal += payload[i:i + 3]
i += 3
else:
retVal += '%%%.2X' % ord(payload[i])
i += 1
return retVal
| [
"everping@outlook.com"
] | everping@outlook.com |
38b2275bab017121700f29468db3da539f3d450e | bab33c23fc02dc171395b34c5c88fcf83a95cb96 | /test/Transforms/test_Transforms.py | ec1905520dfce9a46bb05990c38fae7639a0f5b3 | [] | no_license | heliy/nornir-imageregistration | a623ad00c0c253bcc925306920824affaa414810 | 368bc245ef2c7be630f0cdc8c448adb62b797d5a | refs/heads/master | 2020-05-07T16:59:02.268951 | 2018-02-27T01:22:57 | 2018-02-27T01:22:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,922 | py | '''
Created on Mar 18, 2013
@author: u0490822
'''
import os
import unittest
from nornir_imageregistration.transforms import *
from nornir_imageregistration.transforms.rbftransform import \
RBFWithLinearCorrection
import numpy as np
### MirrorTransformPoints###
### A simple four control point mapping on two 20x20 grids centered on 0,0###
### Fixed Space WarpedSpace ###
# . . . . . . . . . . 2 . . . . . . . . . 3 . . . . . . . . . . . . . . . . . . . . .
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# . . . . . . . . . . 0 . . . . . . . . . 1 1 . . . . . . . . . 0 . . . . . . . . . .
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# . . . . . . . . . . . . . . . . . . . . . 3 . . . . . . . . . 2 . . . . . . . . . .
# Coordinates are CY, CX, MY, MX
MirrorTransformPoints = np.array([[0, 0, 0, 0],
[0, 10, 0, -10],
[10, 0, -10, 0],
[10, 10, -10, -10]])
IdentityTransformPoints = np.array([[0, 0, 0, 0],
[1, 0, 1, 0],
[0, 1, 0, 1],
[1, 1, 1, 1]])
# Translate points by (1,2)
TranslateTransformPoints = np.array([[0, 0, 1, 2],
[1, 0, 2, 2],
[0, 1, 1, 3],
[1, 1, 2, 3]])
# Used to test IsOffsetAtZero
OffsetTransformPoints = np.array([[1, 1, 0, 0],
[2, 1, 1, 0],
[1, 2, 0, 1],
[2, 2, 1, 1]])
def TransformCheck(test, transform, warpedPoint, fixedPoint):
'''Ensures that a point can map to its expected transformed position and back again'''
fp = transform.Transform(warpedPoint)
test.assertTrue(np.array_equal(np.around(fp, 2), fixedPoint))
wp = transform.InverseTransform(fp)
test.assertTrue(np.array_equal(np.around(wp, 2), warpedPoint))
def NearestFixedCheck(test, transform, fixedPoints, testPoints):
'''Ensures that the nearest fixed point can be found for a test point'''
distance, index = transform.NearestFixedPoint(testPoints)
test.assertTrue(np.array_equal(np.around(transform.FixedPoints[index,:], 2), fixedPoints))
def NearestWarpedCheck(test, transform, warpedPoints, testPoints):
'''Ensures that the nearest warped point can be found for a test point'''
distance, index = transform.NearestWarpedPoint(testPoints)
test.assertTrue(np.array_equal(np.around(transform.WarpedPoints[index,:], 2), warpedPoints))
class Test(unittest.TestCase):
def testIdentity(self):
T = meshwithrbffallback.MeshWithRBFFallback(IdentityTransformPoints)
warpedPoint = np.array([[0, 0],
[0.25, 0.25],
[1, 1],
[-1, -1]])
TransformCheck(self, T, warpedPoint, warpedPoint)
def testTranslate(self):
T = meshwithrbffallback.MeshWithRBFFallback(TranslateTransformPoints)
warpedPoint = np.array([[1, 2],
[1.25, 2.25],
[2, 3],
[0, 1]])
controlPoint = np.array([[0, 0],
[0.25, 0.25],
[1, 1],
[-1, -1]])
TransformCheck(self, T, warpedPoint, controlPoint)
def testTriangulation(self):
# os.chdir('C:\\Buildscript\\Test\\Stos')
# MToCStos = IrTools.IO.stosfile.StosFile.Load('27-26.stos')
# CToVStos = IrTools.IO.stosfile.StosFile.Load('26-25.stos')
#
# # I'll need to make sure I remember to set the downsample factor when I warp the .mosaic files
# (CToV, cw, ch) = IrTools.Transforms.factory.TransformFactory.LoadTransform(CToVStos.Transform)
# (MToC, mw, mh) = IrTools.Transforms.factory.TransformFactory.LoadTransform(MToCStos.Transform)
#
# MToV = CToV.AddTransform(MToC)
#
# MToCStos.Transform = IrTools.Transforms.factory.TransformFactory.TransformToIRToolsGridString(MToC, mw, mh)
# MToCStos.Save("27-26_Test.stos")
#
# MToVStos = copy.deepcopy(MToCStos)
# MToVStos.ControlImageFullPath = CToVStos.ControlImageFullPath
# MToVStos.Transform = IrTools.Transforms.factory.TransformFactory.TransformToIRToolsGridString(MToV, mw, mh)
# MToVStos.ControlImageDim = CToVStos.ControlImageDim
# MToVStos.MappedImageDim = MToCStos.MappedImageDim
#
# MToVStos.Save("27-25.stos")
global MirrorTransformPoints
T = triangulation.Triangulation(MirrorTransformPoints)
self.assertEqual(len(T.FixedTriangles), 2)
self.assertEqual(len(T.WarpedTriangles), 2)
warpedPoint = np.array([[-5, -5]])
TransformCheck(self, T, warpedPoint, -warpedPoint)
NearestFixedCheck(self, T, MirrorTransformPoints[:,0:2], MirrorTransformPoints[:,0:2] - 1)
NearestWarpedCheck(self, T, MirrorTransformPoints[:,2:4], MirrorTransformPoints[:,2:4] - 1)
# Add a point to the mirror transform, make sure it still works
T.AddPoint([5.0, 5.0, -5.0, -5.0])
#Make sure the new point can be found correctly
NearestFixedCheck(self, T, T.FixedPoints, T.FixedPoints - 1)
NearestWarpedCheck(self, T, T.WarpedPoints, T.WarpedPoints - 1)
#Add a duplicate and see what happens
NumBefore = T.NumControlPoints
T.AddPoint([5.0, 5.0, -5.0, -5.0])
NumAfter = T.NumControlPoints
self.assertEqual(NumBefore, NumAfter)
# We should have a new triangulation if we added a point
self.assertTrue(len(T.FixedTriangles) > 2)
self.assertTrue(len(T.WarpedTriangles) > 2)
TransformCheck(self, T, warpedPoint, -warpedPoint)
# Try points not on the transform points
warpedPoints = np.array([[-2.0, -4.0],
[-4.0, -2.0],
[0.0, -9.0],
[-9.0, 0.0]])
TransformCheck(self, T, warpedPoints, -warpedPoints)
def testRBFTriangulation(self):
# os.chdir('C:\\Buildscript\\Test\\Stos')
# MToCStos = IrTools.IO.stosfile.StosFile.Load('27-26.stos')
# CToVStos = IrTools.IO.stosfile.StosFile.Load('26-25.stos')
#
# # I'll need to make sure I remember to set the downsample factor when I warp the .mosaic files
# (CToV, cw, ch) = IrTools.Transforms.factory.TransformFactory.LoadTransform(CToVStos.Transform)
# (MToC, mw, mh) = IrTools.Transforms.factory.TransformFactory.LoadTransform(MToCStos.Transform)
#
# MToV = CToV.AddTransform(MToC)
#
# MToCStos.Transform = IrTools.Transforms.factory.TransformFactory.TransformToIRToolsGridString(MToC, mw, mh)
# MToCStos.Save("27-26_Test.stos")
#
# MToVStos = copy.deepcopy(MToCStos)
# MToVStos.ControlImageFullPath = CToVStos.ControlImageFullPath
# MToVStos.Transform = IrTools.Transforms.factory.TransformFactory.TransformToIRToolsGridString(MToV, mw, mh)
# MToVStos.ControlImageDim = CToVStos.ControlImageDim
# MToVStos.MappedImageDim = MToCStos.MappedImageDim
#
# MToVStos.Save("27-25.stos")
global MirrorTransformPoints
T = RBFWithLinearCorrection(MirrorTransformPoints[:,2:4], MirrorTransformPoints[:,0:2])
self.assertEqual(len(T.FixedTriangles), 2)
self.assertEqual(len(T.WarpedTriangles), 2)
warpedPoint = np.array([[-5, -5]])
TransformCheck(self, T, warpedPoint, -warpedPoint)
NearestFixedCheck(self, T, T.FixedPoints, T.FixedPoints - 1)
NearestWarpedCheck(self, T, T.WarpedPoints, T.WarpedPoints - 1)
# Add a point to the mirror transform, make sure it still works
T.AddPoint([5.0, 5.0, -5.0, -5.0])
NearestFixedCheck(self, T, T.FixedPoints, T.FixedPoints - 1)
NearestWarpedCheck(self, T, T.WarpedPoints, T.WarpedPoints - 1)
#Add a duplicate and see what happens
NumBefore = T.NumControlPoints
T.AddPoint([5.0, 5.0, -5.0, -5.0])
NumAfter = T.NumControlPoints
self.assertEqual(NumBefore, NumAfter)
# We should have a new triangulation if we added a point
self.assertTrue(len(T.FixedTriangles) > 2)
self.assertTrue(len(T.WarpedTriangles) > 2)
TransformCheck(self, T, warpedPoint, -warpedPoint)
#Try removing a point
# Try points not on the transform points
warpedPoints = np.array([[-2.0, -4.0],
[-4.0, -2.0],
[0.0, -9.0],
[-9.0, 0.0]])
TransformCheck(self, T, warpedPoints, -warpedPoints)
T.AddPoints([[2.5,2.5,-2.5,-2.5],
[7.5,7.5,-7.5,-7.5]])
TransformCheck(self, T, warpedPoints, -warpedPoints)
def test_OriginAtZero(self):
global IdentityTransformPoints
global OffsetTransformPoints
IdentityTransform = triangulation.Triangulation(IdentityTransformPoints)
OffsetTransform = triangulation.Triangulation(OffsetTransformPoints)
self.assertTrue(utils.IsOriginAtZero([IdentityTransform]), "Origin of identity transform is at zero")
self.assertFalse(utils.IsOriginAtZero([OffsetTransform]), "Origin of Offset Transform is not at zero")
self.assertTrue(utils.IsOriginAtZero([IdentityTransform, OffsetTransform]), "Origin of identity transform and offset transform is at zero")
def test_bounds(self):
global IdentityTransformPoints
IdentityTransform = triangulation.Triangulation(IdentityTransformPoints)
# print "Fixed Verts"
# print T.FixedTriangles
# print "\nWarped Verts"
# print T.WarpedTriangles
#
# T.AddPoint([5, 5, -5, -5])
# print "\nPoint added"
# print "Fixed Verts"
# print T.FixedTriangles
# print "\nWarped Verts"
# print T.WarpedTriangles
#
# T.AddPoint([5, 5, 5, 5])
# print "\nDuplicate Point added"
# print "Fixed Verts"
# print T.FixedTriangles
# print "\nWarped Verts"
# print T.WarpedTriangles
#
# warpedPoint = [[-5, -5]]
# fp = T.ViewTransform(warpedPoint)
# print("__Transform " + str(warpedPoint) + " to " + str(fp))
# wp = T.InverseTransform(fp)
#
# T.UpdatePoint(3, [10, 15, -10, -15])
# print "\nPoint updated"
# print "Fixed Verts"
# print T.FixedTriangles
# print "\nWarped Verts"
# print T.WarpedTriangles
#
# warpedPoint = [[-9, -14]]
# fp = T.ViewTransform(warpedPoint)
# print("__Transform " + str(warpedPoint) + " to " + str(fp))
# wp = T.InverseTransform(fp)
#
# T.RemovePoint(1)
# print "\nPoint removed"
# print "Fixed Verts"
# print T.FixedTriangles
# print "\nWarped Verts"
# print T.WarpedTriangles
#
# print "\nFixedPointsInRect"
# print T.GetFixedPointsRect([-1, -1, 14, 4])
if __name__ == "__main__":
# import sys;sys.argv = ['', 'Test.testName']
unittest.main()
| [
"james.r.andreson@utah.edu"
] | james.r.andreson@utah.edu |
b75bb8a8354bc94be4833f4eef3a1d81adafe76e | 3095db2b8eb39c560c1ea5918492b015a975e2c5 | /sample_ros_time_domain_uwb/catkin_ws/devel/lib/python2.7/dist-packages/pkg_timedomain_uwb/msg/_msg_timedomain_uwb.py | cbaa92a3d5ae56f98bb572bf23f9c5b9d956c770 | [] | no_license | jungwonkang/qdrone_all | 7a1b689a4576452749eeb971a1a98d9911c4bb48 | 6d3c4e12450c43f427d0203aa3632e83c0085ed2 | refs/heads/master | 2021-06-12T12:41:11.179182 | 2021-03-08T17:42:59 | 2021-03-08T17:42:59 | 157,916,901 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,713 | py | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from pkg_timedomain_uwb/msg_timedomain_uwb.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import std_msgs.msg
class msg_timedomain_uwb(genpy.Message):
_md5sum = "5832270dc2da80beb97f2d958efffd99"
_type = "pkg_timedomain_uwb/msg_timedomain_uwb"
_has_header = True #flag to mark the presence of a Header object
_full_text = """Header header
uint32 id_module_uwb
float64 range
float64 range_err
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')
# * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')
# time-handling sugar is provided by the client library
time stamp
#Frame this data is associated with
# 0: no frame
# 1: global frame
string frame_id
"""
__slots__ = ['header','id_module_uwb','range','range_err']
_slot_types = ['std_msgs/Header','uint32','float64','float64']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
header,id_module_uwb,range,range_err
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(msg_timedomain_uwb, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.header is None:
self.header = std_msgs.msg.Header()
if self.id_module_uwb is None:
self.id_module_uwb = 0
if self.range is None:
self.range = 0.
if self.range_err is None:
self.range_err = 0.
else:
self.header = std_msgs.msg.Header()
self.id_module_uwb = 0
self.range = 0.
self.range_err = 0.
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_I2d().pack(_x.id_module_uwb, _x.range, _x.range_err))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
_x = self
start = end
end += 20
(_x.id_module_uwb, _x.range, _x.range_err,) = _get_struct_I2d().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_I2d().pack(_x.id_module_uwb, _x.range, _x.range_err))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
_x = self
start = end
end += 20
(_x.id_module_uwb, _x.range, _x.range_err,) = _get_struct_I2d().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_3I = None
def _get_struct_3I():
global _struct_3I
if _struct_3I is None:
_struct_3I = struct.Struct("<3I")
return _struct_3I
_struct_I2d = None
def _get_struct_I2d():
global _struct_I2d
if _struct_I2d is None:
_struct_I2d = struct.Struct("<I2d")
return _struct_I2d
| [
"kctown99@gmail.com"
] | kctown99@gmail.com |
3bba74f04259265887d2ec648d851419eac34d2e | 40578c9f7370d2905ed66cbec9ebd7998ea0d462 | /test-old-chart.py | 9e295988705f4aa6b0aea16c62cfe0a2e9619ead | [] | no_license | boonleng/rats | 1f6d63eadab005cc7a5966ea90c32f07575e8922 | 8bea912ab8ac3f6c7a23ae902c545aeaaf65f5a6 | refs/heads/master | 2021-10-10T05:29:55.312987 | 2019-01-07T05:28:55 | 2019-01-07T05:28:55 | 103,794,888 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 298 | py | import data
import chart
stock = data.file('NVDA')
# Some global variables
K = 130 # Number of days to show
sma_sizes = [10, 50, 200] # SMA window sizes
L = K + max(sma_sizes)
s = stock.iloc[-L:]
view = chart.showChart(s)
view['figure'].savefig('blob/NVDA_test.png')
| [
"boonleng@ou.edu"
] | boonleng@ou.edu |
5d86860ca2ec5fa6bbeab18e86e8b56a4012a878 | db2ae028c001fb47c21fa8024d2e9abb6214da3c | /apply_ng_correction_to_ninv.py | af04e2b6a1b631a4bd73c3d2fcc885bf915bad1b | [
"MIT"
] | permissive | tbs1980/mice-number-crunch | b98211a443cabb9246cbd2c8020f219a186dcd37 | 0e1fc8997709a66f69e1ad74cc5e23b2753a378f | refs/heads/master | 2021-01-10T02:48:05.064646 | 2015-12-04T14:21:50 | 2015-12-04T14:21:50 | 36,322,850 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,077 | py | import numpy as np
import healpy as hp
import matplotlib.pyplot as plt
input_data_path = "/resource/data/MICE/maps/"
res = "512"
# read the g_ninv
g_ninv_file_name = input_data_path + res + "/mice_v2_0_shear_g_ninv.fits"
g_ninv = hp.read_map(g_ninv_file_name)
# read the G_ninv
G_ninv_file_name = input_data_path + res + "/mice_v2_0_shear_G_ninv.fits"
G0_ninv = hp.read_map(G_ninv_file_name,field=0)
G1_ninv = hp.read_map(G_ninv_file_name,field=1)
# get n_bar from the g_ninv
n_bar = np.mean(1./g_ninv[g_ninv>0])
print "n_bar = ",n_bar
# multiply G0 and G1 ninv by n_bar
G0_ninv *= n_bar
G1_ninv *= n_bar
# now write new maps
G_ninv_out_file_name = input_data_path + res + "/mice_v2_0_shear_G_corr_ninv.fits"
hp.write_map(G_ninv_out_file_name,m=[G0_ninv,G1_ninv])
G0_ninv_png_out_file_name = input_data_path + res + "/mice_v2_0_shear_G0_corr_ninv.png"
hp.mollview(G0_ninv)
plt.savefig(G0_ninv_png_out_file_name)
G1_ninv_png_out_file_name = input_data_path + res + "/mice_v2_0_shear_G1_corr_ninv.png"
hp.mollview(G1_ninv)
plt.savefig(G1_ninv_png_out_file_name)
| [
"tbs1980@gmail.com"
] | tbs1980@gmail.com |
c512c3d28683e706f9ec457ece4dd43b632b1478 | 8fbe8c1512b1139540b1620f7372d57c9ad58663 | /gpxjoin.py | 6505f8f9310e58b3035dd8c9991c97dd1b49fbf2 | [] | no_license | uskudnik/gpxjoin | d1f3013b47fb122d14bc2c551390522738612e66 | 0892f3812642897bf0448c2e0cd2d8bdfebc51ac | refs/heads/master | 2020-04-10T16:43:12.721578 | 2012-05-19T20:10:14 | 2012-05-19T20:10:14 | 4,380,592 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,685 | py | #!/usr/bin/env python
# encoding: utf-8
"""
gpxjoin.py
Licensed under MIT License.
Copyright (c) 2012, Urban Skudnik <urban.skudnik@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE."""
import sys
import argparse
from BeautifulSoup import BeautifulStoneSoup
import datetime
gpx_time_format = "%Y-%m-%dT%H:%M:%SZ"
def main():
parser = argparse.ArgumentParser(description="Join multiple gpx files")
parser.add_argument("gpx_files", metavar='GPX XML file', nargs="+", type=str, action='append')
args = parser.parse_args(sys.argv[1:])
files = list()
# To make sure our data files are attached in correct order; we don't trust file system (download order, ...)
for ffile in args.gpx_files[0]:
ffile = open(ffile, "r")
filecontent = ffile.readlines()[0]
xml = BeautifulStoneSoup(filecontent)
starttime = datetime.datetime.strptime(xml.find("metadata").find("time").string, gpx_time_format)
files += [[starttime, filecontent]]
ffiles = sorted(files, key=lambda *d: d[0])
# GPX end tag is unnecessary from initial file
joined_gpx = ffiles[0][1].split("</gpx>")[0]
# "Header" data (initial xml tag, gpx tag, metadata, etc.) is unnecessary
# in subsequent file, therefore we remove it, along with end GPX tag.
for date, ffile in ffiles[1:]:
header, content = ffile.split("</metadata>")
joined_gpx += content.split("</gpx>")[0]
# Processed all files, append end GPX tag
joined_gpx += "</gpx>"
# Filename is a combination of all files names
output_filename = " + ".join([f.split(".gpx.xml")[0] for f in args.gpx_files[0]]) + ".gpx.xml"
output_gpx = file(output_filename, "w")
output_gpx.write(joined_gpx)
output_gpx.close()
if __name__ == '__main__':
main()
| [
"urban.skudnik@gmail.com"
] | urban.skudnik@gmail.com |
89953cc562f5821db41a06a6c2c67cef8e4197ab | 67cfe3567f0a961123c561538624be28044ec852 | /backend/girltalk_15424/urls.py | d6ab5c8e6f4bb8b2bb8d6c9afad9be43542c8a78 | [] | no_license | crowdbotics-apps/girltalk-15424 | b732f7f6fc04fedd1acd99a2acfd129af71cc010 | 770efb300bc8297faea15e7b6a94c7a755fa8cf7 | refs/heads/master | 2023-02-04T02:55:52.708635 | 2020-04-04T05:21:02 | 2020-04-04T05:21:02 | 252,916,119 | 0 | 0 | null | 2023-01-26T16:28:35 | 2020-04-04T05:20:13 | JavaScript | UTF-8 | Python | false | false | 1,914 | py | """girltalk_15424 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
]
admin.site.site_header = "Girltalk"
admin.site.site_title = "Girltalk Admin Portal"
admin.site.index_title = "Girltalk Admin"
# swagger
schema_view = get_schema_view(
openapi.Info(
title="Girltalk API",
default_version="v1",
description="API documentation for Girltalk App",
),
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
d1ca6c351122bf427bc8a5012370cfc1a2fa3cc8 | 6570bfdb26a41d99620debec5541e789b3c613f3 | /Others/Mercari/binary.py | c7514cc5228a9257b4679b4b065a41dd9d90ea3f | [] | no_license | ameet-1997/Competitive_Coding | bc30f37ae034efe7bb63f71241792fc53c323a50 | a9824430cf0458516ddd88655c1eca1f42ff3f0a | refs/heads/master | 2021-05-10T14:07:15.209770 | 2018-01-22T19:22:13 | 2018-01-22T19:22:13 | 118,500,812 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 45 | py | n=input()+1
while'11'in bin(n):n+=1
print n
| [
"ameetsd97@gmail.com"
] | ameetsd97@gmail.com |
1dcceb03a81d2043094a05404cce1de425ad775d | 4ff90da76cb447db065896a06573258a27fc8878 | /itchat/components/login.py | 709b8307350c9a7bc7e422e7fe610d2d585d68f0 | [
"MIT"
] | permissive | wosenbo/ItChat | 9455c79a57f7b5d5f03be86d652b428eb248299f | 36a3a027bb8dc19f0ceb4b8a44e943a60b2286ef | refs/heads/master | 2021-01-12T09:18:38.605240 | 2017-04-07T01:18:53 | 2017-04-07T01:18:53 | 76,820,262 | 0 | 0 | null | 2016-12-19T02:13:56 | 2016-12-19T02:13:56 | null | UTF-8 | Python | false | false | 12,311 | py | import os, sys, time, re, io
import threading
import json, xml.dom.minidom
import copy, pickle, random
import traceback, logging
import requests
from .. import config, utils
from ..returnvalues import ReturnValue
from .contact import update_local_chatrooms
from .messages import produce_msg
logger = logging.getLogger('itchat')
def load_login(core):
core.login = login
core.get_QRuuid = get_QRuuid
core.get_QR = get_QR
core.check_login = check_login
core.web_init = web_init
core.show_mobile_login = show_mobile_login
core.start_receiving = start_receiving
core.get_msg = get_msg
core.logout = logout
def login(self, enableCmdQR=False, picDir=None, qrCallback=None,
loginCallback=None, exitCallback=None):
if self.alive:
logger.debug('itchat has already logged in.')
return
while 1:
for getCount in range(10):
logger.info('Getting uuid of QR code.')
while not self.get_QRuuid(): time.sleep(1)
logger.info('Downloading QR code.')
qrStorage = self.get_QR(enableCmdQR=enableCmdQR,
picDir=picDir, qrCallback=qrCallback)
if qrStorage:
break
elif 9 == getCount:
logger.info('Failed to get QR code, please restart the program.')
sys.exit()
logger.info('Please scan the QR code to log in.')
isLoggedIn = False
while not isLoggedIn:
status = self.check_login()
if hasattr(qrCallback, '__call__'):
qrCallback(uuid=self.uuid, status=status, qrcode=qrStorage.getvalue())
if status == '200':
isLoggedIn = True
elif status == '201':
if isLoggedIn is not None:
logger.info('Please press confirm on your phone.')
isLoggedIn = None
elif status != '408':
break
if isLoggedIn: break
logger.info('Log in time out, reloading QR code')
self.web_init()
self.show_mobile_login()
self.get_contact(True)
if hasattr(loginCallback, '__call__'):
r = loginCallback()
else:
utils.clear_screen()
if os.path.exists(picDir or config.DEFAULT_QR):
os.remove(picDir or config.DEFAULT_QR)
logger.info('Login successfully as %s' % self.storageClass.nickName)
self.start_receiving(exitCallback)
def get_QRuuid(self):
url = '%s/jslogin' % config.BASE_URL
params = {
'appid' : 'wx782c26e4c19acffb',
'fun' : 'new', }
headers = { 'User-Agent' : config.USER_AGENT }
r = self.s.get(url, params=params, headers=headers)
regx = r'window.QRLogin.code = (\d+); window.QRLogin.uuid = "(\S+?)";'
data = re.search(regx, r.text)
if data and data.group(1) == '200':
self.uuid = data.group(2)
return self.uuid
def get_QR(self, uuid=None, enableCmdQR=False, picDir=None, qrCallback=None):
uuid = uuid or self.uuid
picDir = picDir or config.DEFAULT_QR
url = '%s/qrcode/%s' % (config.BASE_URL, uuid)
headers = { 'User-Agent' : config.USER_AGENT }
try:
r = self.s.get(url, stream=True, headers=headers)
except:
return False
qrStorage = io.BytesIO(r.content)
if hasattr(qrCallback, '__call__'):
qrCallback(uuid=uuid, status='0', qrcode=qrStorage.getvalue())
else:
with open(picDir, 'wb') as f: f.write(r.content)
if enableCmdQR:
utils.print_cmd_qr(picDir, enableCmdQR=enableCmdQR)
else:
utils.print_qr(picDir)
return qrStorage
def check_login(self, uuid=None):
uuid = uuid or self.uuid
url = '%s/cgi-bin/mmwebwx-bin/login' % config.BASE_URL
localTime = int(time.time())
params = 'loginicon=true&uuid=%s&tip=0&r=%s&_=%s' % (
uuid, localTime / 1579, localTime)
headers = { 'User-Agent' : config.USER_AGENT }
r = self.s.get(url, params=params, headers=headers)
regx = r'window.code=(\d+)'
data = re.search(regx, r.text)
if data and data.group(1) == '200':
process_login_info(self, r.text)
return '200'
elif data:
return data.group(1)
else:
return '400'
def process_login_info(core, loginContent):
''' when finish login (scanning qrcode)
* syncUrl and fileUploadingUrl will be fetched
* deviceid and msgid will be generated
* skey, wxsid, wxuin, pass_ticket will be fetched
'''
regx = r'window.redirect_uri="(\S+)";'
core.loginInfo['url'] = re.search(regx, loginContent).group(1)
headers = { 'User-Agent' : config.USER_AGENT }
r = core.s.get(core.loginInfo['url'], headers=headers, allow_redirects=False)
core.loginInfo['url'] = core.loginInfo['url'][:core.loginInfo['url'].rfind('/')]
for indexUrl, detailedUrl in (
("wx2.qq.com" , ("file.wx2.qq.com", "webpush.wx2.qq.com")),
("wx8.qq.com" , ("file.wx8.qq.com", "webpush.wx8.qq.com")),
("qq.com" , ("file.wx.qq.com", "webpush.wx.qq.com")),
("web2.wechat.com" , ("file.web2.wechat.com", "webpush.web2.wechat.com")),
("wechat.com" , ("file.web.wechat.com", "webpush.web.wechat.com"))):
fileUrl, syncUrl = ['https://%s/cgi-bin/mmwebwx-bin' % url for url in detailedUrl]
if indexUrl in core.loginInfo['url']:
core.loginInfo['fileUrl'], core.loginInfo['syncUrl'] = \
fileUrl, syncUrl
break
else:
core.loginInfo['fileUrl'] = core.loginInfo['syncUrl'] = core.loginInfo['url']
core.loginInfo['deviceid'] = 'e' + repr(random.random())[2:17]
core.loginInfo['msgid'] = int(time.time() * 1000)
core.loginInfo['BaseRequest'] = {}
for node in xml.dom.minidom.parseString(r.text).documentElement.childNodes:
if node.nodeName == 'skey':
core.loginInfo['skey'] = core.loginInfo['BaseRequest']['Skey'] = node.childNodes[0].data
elif node.nodeName == 'wxsid':
core.loginInfo['wxsid'] = core.loginInfo['BaseRequest']['Sid'] = node.childNodes[0].data
elif node.nodeName == 'wxuin':
core.loginInfo['wxuin'] = core.loginInfo['BaseRequest']['Uin'] = node.childNodes[0].data
elif node.nodeName == 'pass_ticket':
core.loginInfo['pass_ticket'] = core.loginInfo['BaseRequest']['DeviceID'] = node.childNodes[0].data
def web_init(self):
url = '%s/webwxinit?r=%s' % (self.loginInfo['url'], int(time.time()))
data = { 'BaseRequest': self.loginInfo['BaseRequest'], }
headers = {
'ContentType': 'application/json; charset=UTF-8',
'User-Agent' : config.USER_AGENT, }
r = self.s.post(url, data=json.dumps(data), headers=headers)
dic = json.loads(r.content.decode('utf-8', 'replace'))
utils.emoji_formatter(dic['User'], 'NickName')
self.loginInfo['InviteStartCount'] = int(dic['InviteStartCount'])
self.loginInfo['User'] = utils.struct_friend_info(dic['User'])
self.loginInfo['SyncKey'] = dic['SyncKey']
self.loginInfo['synckey'] = '|'.join(['%s_%s' % (item['Key'], item['Val'])
for item in dic['SyncKey']['List']])
self.storageClass.userName = dic['User']['UserName']
self.storageClass.nickName = dic['User']['NickName']
self.memberList.append(dic['User'])
return dic
def show_mobile_login(self):
url = '%s/webwxstatusnotify?lang=zh_CN&pass_ticket=%s' % (
self.loginInfo['url'], self.loginInfo['pass_ticket'])
data = {
'BaseRequest' : self.loginInfo['BaseRequest'],
'Code' : 3,
'FromUserName' : self.storageClass.userName,
'ToUserName' : self.storageClass.userName,
'ClientMsgId' : int(time.time()), }
headers = {
'ContentType': 'application/json; charset=UTF-8',
'User-Agent' : config.USER_AGENT, }
r = self.s.post(url, data=json.dumps(data), headers=headers)
return ReturnValue(rawResponse=r)
def start_receiving(self, exitCallback=None, getReceivingFnOnly=False):
self.alive = True
def maintain_loop():
retryCount = 0
while self.alive:
try:
i = sync_check(self)
if i is None:
self.alive = False
elif i == '0':
continue
else:
msgList, contactList = self.get_msg()
if contactList:
chatroomList, otherList = [], []
for contact in contactList:
if '@@' in contact['UserName']:
chatroomList.append(contact)
else:
otherList.append(contact)
chatroomMsg = update_local_chatrooms(self, chatroomList)
self.msgList.put(chatroomMsg)
if msgList:
msgList = produce_msg(self, msgList)
for msg in msgList: self.msgList.put(msg)
retryCount = 0
except:
retryCount += 1
logger.debug(traceback.format_exc())
if self.receivingRetryCount < retryCount:
self.alive = False
else:
time.sleep(1)
self.logout()
if hasattr(exitCallback, '__call__'):
exitCallback()
else:
logger.info('LOG OUT!')
if getReceivingFnOnly:
return maintain_loop
else:
maintainThread = threading.Thread(target=maintain_loop)
maintainThread.setDaemon(True)
maintainThread.start()
def sync_check(self):
url = '%s/synccheck' % self.loginInfo.get('syncUrl', self.loginInfo['url'])
params = {
'r' : int(time.time() * 1000),
'skey' : self.loginInfo['skey'],
'sid' : self.loginInfo['wxsid'],
'uin' : self.loginInfo['wxuin'],
'deviceid' : self.loginInfo['deviceid'],
'synckey' : self.loginInfo['synckey'],
'_' : int(time.time() * 1000),}
headers = { 'User-Agent' : config.USER_AGENT }
r = self.s.get(url, params=params, headers=headers)
regx = r'window.synccheck={retcode:"(\d+)",selector:"(\d+)"}'
pm = re.search(regx, r.text)
if pm is None or pm.group(1) != '0':
logger.debug('Unexpected sync check result: %s' % r.text)
return None
return pm.group(2)
def get_msg(self):
url = '%s/webwxsync?sid=%s&skey=%s&pass_ticket=%s' % (
self.loginInfo['url'], self.loginInfo['wxsid'],
self.loginInfo['skey'],self.loginInfo['pass_ticket'])
data = {
'BaseRequest' : self.loginInfo['BaseRequest'],
'SyncKey' : self.loginInfo['SyncKey'],
'rr' : ~int(time.time()), }
headers = {
'ContentType': 'application/json; charset=UTF-8',
'User-Agent' : config.USER_AGENT }
r = self.s.post(url, data=json.dumps(data), headers=headers)
dic = json.loads(r.content.decode('utf-8', 'replace'))
if dic['BaseResponse']['Ret'] != 0: return None, None
self.loginInfo['SyncKey'] = dic['SyncCheckKey']
self.loginInfo['synckey'] = '|'.join(['%s_%s' % (item['Key'], item['Val'])
for item in dic['SyncCheckKey']['List']])
return dic['AddMsgList'], dic['ModContactList']
def logout(self):
if self.alive:
url = '%s/webwxlogout' % self.loginInfo['url']
params = {
'redirect' : 1,
'type' : 1,
'skey' : self.loginInfo['skey'], }
headers = { 'User-Agent' : config.USER_AGENT }
self.s.get(url, params=params, headers=headers)
self.alive = False
self.s.cookies.clear()
del self.chatroomList[:]
del self.memberList[:]
del self.mpList[:]
return ReturnValue({'BaseResponse': {
'ErrMsg': 'logout successfully.',
'Ret': 0, }})
| [
"i7meavnktqegm1b@qq.com"
] | i7meavnktqegm1b@qq.com |
537c25b232e64293c8f21c5c30fb20a3296bc9fe | a69f551e9dcc118e730c4e85e096993535fa0c70 | /Codeforces/1200/A. Sweet Problem.py | c3db0f24cb1b06012a87541db57a2091ee919690 | [] | no_license | DHaythem/Competitive-Programming-Solutions | ef30c9418f47e522b8f5096f6077c487ed247500 | 5c4e6b78ee9657ee46abd1fce082ef11acd6a13c | refs/heads/master | 2021-07-07T16:29:55.013246 | 2021-03-18T22:12:03 | 2021-03-18T22:12:03 | 229,136,575 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 216 | py | #https://codeforces.com/contest/1263/problem/A
t=int(input())
for _ in range(t):
l=list(map(int,input().split()))
l.sort()
if l[0]+l[1]<l[-1]:
print(l[0]+l[1])
else:
print(sum(l)//2)
| [
"noreply@github.com"
] | DHaythem.noreply@github.com |
d0f83f7d1e095a684adc028c82436ae2a547ae84 | 53fd34c6f4544c999fb44a19bfb09ac5379d3381 | /.c9/metadata/environment/manage.py | 96e162979620a446001118d73186f2a4b83a5225 | [] | no_license | pazcm/django-blog | de1290983f8a7ee722e5b462319d4810ff1821c2 | 889186f53725191a73b19ba29b9e2a94f84f54d1 | refs/heads/master | 2020-07-30T10:15:10.268024 | 2019-09-23T21:27:29 | 2019-09-23T21:27:29 | 210,188,650 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 413 | py | {"filter":false,"title":"manage.py","tooltip":"/manage.py","undoManager":{"mark":-1,"position":-1,"stack":[]},"ace":{"folds":[],"scrolltop":0,"scrollleft":0,"selection":{"start":{"row":0,"column":0},"end":{"row":0,"column":0},"isBackwards":false},"options":{"guessTabSize":true,"useWrapMode":false,"wrapToView":true},"firstLineState":0},"timestamp":1569172955503,"hash":"061598ccdf948a3d248fc3bb7a078dae02cec539"} | [
"ubuntu@ip-172-31-88-117.ec2.internal"
] | ubuntu@ip-172-31-88-117.ec2.internal |
dca21316cca42b67e54b9d630d8524b9ddf48686 | 28e54666a9d30fee0bfba30e7f0206cb38943ef4 | /문자열/3. Longest Substring Without Repeating Characters.py | 6f607115d43068bd404276fd7ea22d4c2b68b5eb | [] | no_license | SunghyunChoi/Algorithm | 804808b16b1c70f1b52ee55b2a927fd663ea8b48 | 736fd18c1bc6e6c2fed72b01ae67075f8bf0683f | refs/heads/master | 2023-06-05T11:22:17.411474 | 2021-06-24T14:31:23 | 2021-06-24T14:31:23 | 299,884,052 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | class Solution:
# @return an integer
def lengthOfLongestSubstring(self, s):
start = maxLength = 0
usedChar = {}
for i in range(len(s)):
if s[i] in usedChar and start <= usedChar[s[i]]:
start = usedChar[s[i]] + 1
else:
maxLength = max(maxLength, i - start + 1)
usedChar[s[i]] = i
return maxLength
solution = Solution()
print(solution.lengthOfLongestSubstring("dvdf")) | [
"chltjdgus99@naver.com"
] | chltjdgus99@naver.com |
598e66cd794150397c8cf73002b440126b93541a | 951fc0da7384b961726999e5451a10e2783462c4 | /script.module.ATFTV/addon.py | 08dc093ce00ace1411bebb0134af1dcc39de1c05 | [] | no_license | vphuc81/MyRepository | eaf7b8531b2362f0e0de997a67b889bc114cd7c2 | 9bf8aca6de07fcd91bcec573f438f29e520eb87a | refs/heads/master | 2022-01-02T15:07:35.821826 | 2021-12-24T05:57:58 | 2021-12-24T05:57:58 | 37,680,232 | 6 | 10 | null | null | null | null | UTF-8 | Python | false | false | 7,622 | py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2016,2017,2018 RACC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import sys
import xbmc
import xbmcgui
import xbmcaddon
import xbmcplugin
from xbmcgui import ListItem
from routing import Plugin
import os
import traceback
import requests
import requests_cache
from datetime import timedelta
from base64 import b64decode, urlsafe_b64encode
from pyDes import des, PAD_PKCS5
try:
from urllib.parse import quote_from_bytes as orig_quote
except ImportError:
from urllib import quote as orig_quote
addon = xbmcaddon.Addon()
plugin = Plugin()
plugin.name = addon.getAddonInfo("name")
user_agent = "Dalvik/2.1.0 (Linux; U; Android 5.1.1; AFTS Build/LVY48F)"
player_user_agent = "mediaPlayerhttp/2.1 (Linux;Android 5.1) ExoPlayerLib/2.6.1"
USER_DATA_DIR = xbmc.translatePath(addon.getAddonInfo("profile")).decode("utf-8") # !!
CACHE_TIME = int(addon.getSetting("cache_time"))
CACHE_FILE = os.path.join(USER_DATA_DIR, "cache")
expire_after = timedelta(hours=CACHE_TIME)
if not os.path.exists(USER_DATA_DIR):
os.makedirs(USER_DATA_DIR)
s = requests_cache.CachedSession(CACHE_FILE, allowable_methods="POST", expire_after=expire_after, old_data_on_error=True)
s.hooks = {"response": lambda r, *args, **kwargs: r.raise_for_status()}
s.headers.update({"User-Agent": "USER-AGENT-tvtap-APP-V2"})
token_url = "http://tvtap.net/tvtap1/index_new.php?case=get_channel_link_with_token_tvtap"
list_url = "http://tvtap.net/tvtap1/index_new.php?case=get_all_channels"
def quote(s, safe=""):
return orig_quote(s.encode("utf-8"), safe.encode("utf-8"))
@plugin.route("/")
def root():
categories = {
"01": "UK & USA Channels",
"02": "Movies",
"03": "Music",
"04": "News",
"05": "Sport",
"06": "Documentary",
"07": "Kids",
"08": "Food",
"09": "Religious",
}
list_items = []
for cat in categories.keys():
li = ListItem(categories[cat])
url = plugin.url_for(list_channels, cat_id=cat.lstrip("0"))
list_items.append((url, li, True))
xbmcplugin.addSortMethod(plugin.handle, xbmcplugin.SORT_METHOD_LABEL)
xbmcplugin.addDirectoryItems(plugin.handle, list_items)
xbmcplugin.endOfDirectory(plugin.handle)
@plugin.route("/list_channels/<cat_id>")
def list_channels(cat_id=None):
list_items = []
r = s.post(list_url, headers={"app-token": "9120163167c05aed85f30bf88495bd89"}, data={"username": "603803577"}, timeout=15)
if "Could not connect" in r.content:
s.cache.clear()
ch = r.json()
for c in ch["msg"]["channels"]:
if c["cat_id"] == cat_id:
image = "http://tvtap.net/tvtap1/{0}|User-Agent={1}".format(quote(c.get("img"), "/"), quote(user_agent))
li = ListItem(c["channel_name"].rstrip("."))
li.setProperty("IsPlayable", "true")
li.setArt({"thumb": image, "icon": image})
li.setInfo(type="Video", infoLabels={"Title": c["channel_name"].rstrip("."), "mediatype": "video"})
try:
li.setContentLookup(False)
except AttributeError:
pass
url = plugin.url_for(play, ch_id=c["pk_id"])
list_items.append((url, li, False))
xbmcplugin.addSortMethod(plugin.handle, xbmcplugin.SORT_METHOD_LABEL)
xbmcplugin.addDirectoryItems(plugin.handle, list_items)
xbmcplugin.endOfDirectory(plugin.handle)
@plugin.route("/play/<ch_id>/play.pvr")
def play(ch_id):
# 178.132.6.54 81.171.8.162
key = b"19087321"
r = s.post(list_url, headers={"app-token": "9120163167c05aed85f30bf88495bd89"}, data={"username": "603803577"}, timeout=15)
ch = r.json()
for c in ch["msg"]["channels"]:
if c["pk_id"] == ch_id:
selected_channel = c
break
title = selected_channel.get("channel_name")
image = "http://tvtap.net/tvtap1/{0}|User-Agent={1}".format(quote(c.get("img"), "/"), quote(user_agent))
with s.cache_disabled():
r = s.post(token_url, headers={"app-token": "9120163167c05aed85f30bf88495bd89"}, data={"channel_id": ch_id, "username": "603803577"}, timeout=15)
links = []
for stream in r.json()["msg"]["channel"][0].keys():
if "stream" in stream or "chrome_cast" in stream:
d = des(key)
link = d.decrypt(b64decode(r.json()["msg"]["channel"][0][stream]), padmode=PAD_PKCS5)
if link:
link = link.decode("utf-8")
if not link == "dummytext" and link not in links:
links.append(link)
if addon.getSetting("autoplay") == "true":
link = links[0]
else:
dialog = xbmcgui.Dialog()
ret = dialog.select("Choose Stream", links)
link = links[ret]
if link.startswith("http"):
media_url = "{0}|User-Agent={1}".format(link, quote(player_user_agent))
else:
media_url = link
if "playlist.m3u8" in media_url:
if addon.getSetting("inputstream") == "true":
li = ListItem(title, path=media_url)
li.setArt({"thumb": image, "icon": image})
li.setMimeType("application/vnd.apple.mpegurl")
li.setProperty("inputstreamaddon", "inputstream.adaptive")
li.setProperty("inputstream.adaptive.manifest_type", "hls")
li.setProperty("inputstream.adaptive.stream_headers", media_url.split("|")[-1])
elif addon.getSetting("livestreamer") == "true":
serverPath = os.path.join(xbmc.translatePath(addon.getAddonInfo("path")), "livestreamerXBMCLocalProxy.py")
runs = 0
while not runs > 10:
try:
requests.get("http://127.0.0.1:19001/version")
break
except Exception:
xbmc.executebuiltin("RunScript(" + serverPath + ")")
runs += 1
xbmc.sleep(600)
livestreamer_url = "http://127.0.0.1:19001/livestreamer/" + urlsafe_b64encode("hlsvariant://" + media_url)
li = ListItem(title, path=livestreamer_url)
li.setArt({"thumb": image, "icon": image})
li.setMimeType("video/x-mpegts")
else:
li = ListItem(title, path=media_url)
li.setArt({"thumb": image, "icon": image})
li.setMimeType("application/vnd.apple.mpegurl")
try:
li.setContentLookup(False)
except AttributeError:
pass
else:
li = ListItem(title, path=media_url)
li.setArt({"thumb": image, "icon": image})
xbmcplugin.setResolvedUrl(plugin.handle, True, li)
if __name__ == "__main__":
try:
plugin.run(sys.argv)
s.close()
except requests.exceptions.RequestException as e:
dialog = xbmcgui.Dialog()
dialog.notification(plugin.name, str(e), xbmcgui.NOTIFICATION_ERROR)
traceback.print_exc()
xbmcplugin.endOfDirectory(plugin.handle, False)
| [
"vinhphuc_81@yahoo.com"
] | vinhphuc_81@yahoo.com |
17f77bc38e3f754124901201d5eb455d4e1664d0 | fb668e010cf15aae811a2951afc6628e5021c9c3 | /trademarks/migrations/0004_convert_userreaction_word.py | 17f756be3d954cd0980b46b396edc43b9453420a | [] | no_license | niggin/Trademarks_Deployment | ef36f5f321623352d1d8a3bae5f23e50920caa06 | bca70184e64abf3d5e42c595c2bb8cc2a3825daf | refs/heads/master | 2021-01-19T16:59:39.101234 | 2014-12-08T11:19:17 | 2014-12-08T11:19:17 | 17,259,621 | 0 | 0 | null | 2014-12-01T08:55:12 | 2014-02-27T18:34:19 | Python | UTF-8 | Python | false | false | 3,374 | py | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
"Write your forwards methods here."
# Note: Don't use "from appname.models import ModelName".
# Use orm.ModelName to refer to models in this application,
# and orm['appname.ModelName'] for models in other applications.
for userreaction in orm.UserReaction.objects.all():
userreaction.user_word = orm.History.objects.get(word=userreaction.input_word)
userreaction.save()
def backwards(self, orm):
"Write your backwards methods here."
models = {
u'trademarks.history': {
'Meta': {'object_name': 'History'},
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'requests': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'word': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'})
},
u'trademarks.session': {
'Meta': {'object_name': 'Session'},
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user_id': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'word': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['trademarks.History']"})
},
u'trademarks.userreaction': {
'Meta': {'object_name': 'UserReaction'},
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'dislike': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'input_word': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'like': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'to_word': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['trademarks.Word']"}),
'user_word': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['trademarks.History']", 'null': 'True'})
},
u'trademarks.word': {
'Meta': {'object_name': 'Word'},
'fullipa': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ipa': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'lang': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'meaning': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'meaning_eng': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'transcription': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'word': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['trademarks']
symmetrical = True
| [
"rubik303@mail.ru"
] | rubik303@mail.ru |
b4bcab68b3b7a21477735f4d6559fe1ff0a17484 | 20588b6ea02e4ea2b7ad61d4bd77de31bd940256 | /Librera/librera/apps.py | d4b5a445632b54d876b47b5971ba6f78b31fab8f | [] | no_license | antoniogomez093/Librera | 7629787f47c4a80bcd22ff784010f0d219d27b79 | 23a87181758548695df5a5637253eab853205c03 | refs/heads/master | 2020-04-30T06:15:59.798258 | 2019-03-20T03:56:42 | 2019-03-20T03:56:42 | 176,647,063 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 89 | py | from django.apps import AppConfig
class LibreraConfig(AppConfig):
name = 'librera'
| [
"antoniogomez093@gmail.com"
] | antoniogomez093@gmail.com |
fda56427bc53e80b66f30c189b2cf682de401ada | bf720a2b404dbab0b08838a805fd8d37f1017b38 | /noaaTideSoapHL.py | 70f57ec46135c9c818fd537fd9e5b66ecacac0ce | [] | no_license | ChadChapman/tidesProject | 6f5b53ceecdfe0737370b5cbc37b7212d784cc39 | 3ccaa6a149953699c20edc81475e48cea512c1e3 | refs/heads/master | 2021-01-01T20:40:41.023931 | 2017-08-04T23:50:22 | 2017-08-04T23:50:22 | 98,911,631 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 370 | py | from zeep import Client
client = ("https://opendap.co-ops.nos.noaa.gov/axis/webservices/highlowtidepred/wsdl")
client.service.submit_order(user_id = 1, order = {
'stationId': '9446484', #first Tacoma station id found with quick search
'beginDate': '20060920 00:00',
'endDate': '20060922 23:59',
'datum': 'MLLW',
'unit': '0',
'timeZone': '0',
}) | [
"noreply@github.com"
] | ChadChapman.noreply@github.com |
c70e076147bb1a64bc5b42f37dd0777575a12e34 | 0b076ae5b9962844549a0590975b7963c38f6627 | /youdaoledu2/login.py | 0fda8666ece497c0b20e46ecae8dcd76a95f6354 | [] | no_license | 1111xu/yread-Android-UI | 7c6c171481acc605529411f4220419318436e924 | 24051e74bd7c5030d9dc6b2e4efb5f7367776f2c | refs/heads/master | 2022-09-03T18:27:55.017909 | 2020-05-21T11:26:09 | 2020-05-21T11:26:09 | 258,079,604 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,014 | py | # coding=utf-8
import logging
from youdaoledu2.open import appium_desired,webdriver
from youdaoledu2.First_start import First_start
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions
from selenium.common.exceptions import TimeoutException
from time import sleep
class Login_phone(First_start):
PhoneNumber=(By.ID,'com.youdao.yread:id/etPhoneNumber')
userPwd=(By.ID,'com.youdao.yread:id/etPassword')
loginBtn=(By.ID,'com.youdao.yread:id/btnLogin')
def login_pwd(self,phoneNumber,userpwd):
self.check_PrivacyRightBtn()
self.check_PopupCloseBtn()
logging.info('==========into_loginpage=========')
self.driver.find_element(By.ID, 'com.youdao.yread:id/ivAvatar').click()
self.driver.find_element(By.ID,'com.youdao.yread:id/btnLoginByPhone').click() # 进入手机登录
logging.info('==========into_pwdlogin=========')
self.driver.find_element(By.ID,"com.youdao.yread:id/tvLoginByPassword").click()
logging.info('==========pwdlogin=========')
logging.info('input username:%s'%phoneNumber)
self.driver.find_element(*self.PhoneNumber).send_keys(phoneNumber)
logging.info('input userpwd:%s'%userpwd)
self.driver.find_element(*self.userPwd).send_keys(userpwd)
logging.info('click loginBtn')
self.driver.find_element(*self.loginBtn).click()
def get_toast(driver, text=None, timeout=5, poll_frequency=0.5):
"""
get toast
:param driver: driver
:param text: toast text
:param timeout: Number of seconds before timing out, By default, it is 5 second.
:param poll_frequency: sleep interval between calls, By default, it is 0.5 second.
:return: toast
"""
if text:
toast_loc = ("//*[contains(@text, '%s')]" % text)
else:
toast_loc = "//*[@class='android.widget.Toast']"
try:
WebDriverWait(driver, timeout, poll_frequency).until(EC.presence_of_element_located(('xpath', toast_loc)))
toast_elm = driver.find_element_by_xpath(toast_loc)
return toast_elm
except:
return "Toast not found"
# def login_wrong(self):
# try:
# vendortext = "账号或密码错误"
# toast_element = '//*[@text=\'{}\']'.format(vendortext)
# toast = WebDriverWait(driver, 1).until(lambda driver: driver.find_element(By.XPATH,toast_element))
#
# # toast=WebDriverWait(self.driver, 10).until(expected_conditions.presence_of_element_located((By.XPATH,toast_element)))
# except TimeoutException:
# logging.info('Time Out!,Toast not found')
# else:
# print(toast.text)
logging.info('login failed')
if __name__ == '__main__':
driver=appium_desired()
L=Login_phone(driver)
L.login_pwd('15888509413','abc12345')
| [
"wb.xurunze@cn.net.ntes"
] | wb.xurunze@cn.net.ntes |
3040be782248c917cdc83a55505739f977559922 | bf2d010229aece071359662f4fef44e48ba57951 | /dynamic_range_parallel_pipeline.py | 6432414ec8f72d79b72df4a68b82b80d29b6a4bc | [] | no_license | Osrip/CriticalEvolution | b97398f74e2fc5b54c9ab92765b08ce3bf97257e | f77cae8acc626cb4c6d64d5a44fdf00310309c2e | refs/heads/master | 2021-06-24T03:44:03.283017 | 2021-04-03T13:09:42 | 2021-04-03T13:09:42 | 215,332,038 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,763 | py | import os
from multiprocessing import Pool
import argparse
import train
import copy
from automatic_plot_helper import detect_all_isings
from automatic_plot_helper import load_isings_from_list
from automatic_plot_helper import load_settings
from automatic_plot_helper import all_sim_names_in_parallel_folder
import time
import ray
from switch_season_repeat_plotting import plot_pipeline
import pickle
from run_combi import RunCombi
import numpy as np
def dynamic_pipeline_all_sims(folder_names, pipeline_settings):
for folder_name in folder_names:
sim_names = all_sim_names_in_parallel_folder(folder_name)
if not pipeline_settings['parallelize_each_sim']:
for i, sim_name in enumerate(sim_names):
if pipeline_settings['only_plot_certain_num_of_simulations'] is None:
dynamic_pipeline_one_sim(sim_name, pipeline_settings)
elif pipeline_settings['only_plot_certain_num_of_simulations'] > i:
dynamic_pipeline_one_sim(sim_name, pipeline_settings)
else:
all_sim_names = np.array([])
for folder_name in folder_names:
sim_names = all_sim_names_in_parallel_folder(folder_name)
all_sim_names = np.append(all_sim_names, sim_names)
ray.init(num_cpus=pipeline_settings['cores'])
if pipeline_settings['specify_memory_usage']:
ray_funcs = [dynamic_pipeline_one_sim_remote_memory.remote(sim_name, pipeline_settings)for sim_name in all_sim_names]
else:
ray_funcs = [dynamic_pipeline_one_sim_remote.remote(sim_name, pipeline_settings)for sim_name in all_sim_names]
ray.get(ray_funcs)
ray.shutdown()
@ray.remote
def dynamic_pipeline_one_sim_remote(sim_name, pipeline_settings):
original_settings = load_settings(sim_name)
settings = create_settings_for_repeat(original_settings, sim_name, pipeline_settings)
run_all_repeats(settings, original_settings, pipeline_settings)
# Exact copy of run_repeat_remote but with specific memory usage. Memory usage par task!!
@ray.remote(memory=1500 * 1024 * 1024)
def dynamic_pipeline_one_sim_remote_memory(sim_name, pipeline_settings):
original_settings = load_settings(sim_name)
settings = create_settings_for_repeat(original_settings, sim_name, pipeline_settings)
run_all_repeats(settings, original_settings, pipeline_settings)
# Exact copy of run_repeat_remote but without ray.remote decorator
def dynamic_pipeline_one_sim(sim_name, pipeline_settings):
original_settings = load_settings(sim_name)
settings = create_settings_for_repeat(original_settings, sim_name, pipeline_settings)
run_all_repeats(settings, original_settings, pipeline_settings)
def create_settings_for_repeat(settings, sim_name, pipeline_settings):
# settings['TimeSteps'] = 5
if pipeline_settings['varying_parameter'] == 'time_steps':
settings['random_time_steps'] = False
elif pipeline_settings['varying_parameter'] == 'food':
settings['random_food_seasons'] = False
settings = copy.deepcopy(settings)
complete_sim_folder = sim_name
settings['loadfile'] = complete_sim_folder
if pipeline_settings['load_last_generation']:
settings['iter'] = detect_all_isings(complete_sim_folder)[-1]
pipeline_settings['load_generation'] = detect_all_isings(complete_sim_folder)[-1]
else:
settings['iter'] = pipeline_settings['load_generation']
settings['LoadIsings'] = True
settings['switch_off_evolution'] = True
settings['save_data'] = False
settings['switch_seasons_repeat_pipeline'] = True
settings['dynamic_range_pipeline'] = True
# Animations:
settings['plot_generations'] = pipeline_settings['animation_for_repeats']
settings['repeat_pipeline_switched_boo'] = None
settings['random_time_steps_power_law'] = False
settings['commands_in_folder_name'] = False
settings['plot_pipeline'] = False
# switches off animation:
settings['plot'] = False
settings['save_energies_velocities_last_gen'] = False
settings['compress_save_isings'] = pipeline_settings['compress_save_isings']
return settings
def run_all_repeats(settings, original_settings, pipeline_settings):
# WATCH OUT !!! PARAMETERS WITH "FOOD" IN THEM CAN ALSO BECOME TIME STEPS !!!
if pipeline_settings['varying_parameter'] == 'time_steps':
if not original_settings['random_time_steps']:
original_mean_food_num = original_settings['TimeSteps']
else:
original_mean_food_num = (settings['random_time_step_limits'][0] + settings['random_time_step_limits'][1]) / 2
# if original_settings['random_time_steps_power_law']:
# print('!!! random_time_steps_power_law is not supported !!!')
elif pipeline_settings['varying_parameter'] == 'food':
if not original_settings['random_food_seasons']:
original_mean_food_num = original_settings['food_num']
else:
original_mean_food_num = (settings['rand_food_season_limits'][0] + settings['rand_food_season_limits'][1]) / 2
lowest_food_num = original_mean_food_num * (pipeline_settings['lowest_food_percent'] / 100.0)
if lowest_food_num < 1:
lowest_food_num = 1
highest_food_num = original_mean_food_num * (pipeline_settings['highest_food_percent'] / 100.0)
resolution = pipeline_settings['resolution']
food_num_arr = l
# Append food_num of original simulation if not already in list
if not original_mean_food_num in food_num_arr:
food_num_arr = np.append(food_num_arr, original_mean_food_num)
food_num_arr = np.sort(food_num_arr)
if pipeline_settings['parallelize_run_repeats']:
ray.init(num_cpus=pipeline_settings['cores']) #, ignore_reinit_error=True
ray_funcs = [run_repeat_remote.remote(food_num, settings, pipeline_settings, food_num_arr, original_mean_food_num) for food_num in food_num_arr]
ray.get(ray_funcs)
ray.shutdown()
else:
[run_repeat(food_num, settings, pipeline_settings, food_num_arr, original_mean_food_num) for food_num in food_num_arr]
# run_repeat(20, settings, pipeline_settings)
@ray.remote
def run_repeat_remote(num_foods, settings, pipeline_settings, food_num_arr, original_mean_food_num):
if pipeline_settings['varying_parameter'] == 'time_steps':
settings['TimeSteps'] = num_foods
# Activate saving of energies and velocities during life time for simulation with similar varying param as
# original simulation and for largest varying param
if num_foods == original_mean_food_num or num_foods == np.max(food_num_arr):
settings['save_energies_velocities_last_gen'] = True
print(num_foods)
elif pipeline_settings['varying_parameter'] == 'food':
settings['food_num'] = num_foods
if pipeline_settings['varying_parameter'] == 'food':
settings['dynamic_range_pipeline_save_name'] = '{}dynamic_range_run_foods_{}'.format(pipeline_settings['add_save_file_name'], num_foods)
elif pipeline_settings['varying_parameter'] == 'time_steps':
settings['dynamic_range_pipeline_save_name'] = '{}dynamic_range_run_time_step_{}'.format(pipeline_settings['add_save_file_name'], num_foods)
Iterations = pipeline_settings['num_repeats']
train.run(settings, Iterations)
# Exact copy of run_repeat_remote but without ray.remote decorator
def run_repeat(num_foods, settings, pipeline_settings, food_num_arr, original_mean_food_num):
if pipeline_settings['varying_parameter'] == 'time_steps':
settings['TimeSteps'] = num_foods
# Activate saving of energies and velocities during life time for simulation with similar varying param as
# original simulation and for largest varying param
if num_foods == original_mean_food_num or num_foods == np.max(food_num_arr):
settings['save_energies_velocities_last_gen'] = True
print(num_foods)
elif pipeline_settings['varying_parameter'] == 'food':
settings['food_num'] = num_foods
if pipeline_settings['varying_parameter'] == 'food':
settings['dynamic_range_pipeline_save_name'] = '{}dynamic_range_run_foods_{}'.format(pipeline_settings['add_save_file_name'], num_foods)
elif pipeline_settings['varying_parameter'] == 'time_steps':
settings['dynamic_range_pipeline_save_name'] = '{}dynamic_range_run_time_step_{}'.format(pipeline_settings['add_save_file_name'], num_foods)
Iterations = pipeline_settings['num_repeats']
train.run(settings, Iterations)
if __name__=='__main__':
'''
BETTER NAME: FOOD or TIME STEP DENSITY RESPONSE CURVE
This module explores the dynamic range of random food simulations:
It expects a file with with random food season parameter active
It then takes the last generation of that simulation and puts it into different environments with fixed amount of
foods. There the organisms do not evolve but the experiment is repeated from scratch a given amount of times, which
is defined by "num_repeats" to get statistically meaningful results.
Cores should be about equal to the resolution, which should also be int
'''
pipeline_settings = {}
pipeline_settings['varying_parameter'] = 'time_steps' # 'food'
pipeline_settings['cores'] = 58
pipeline_settings['num_repeats'] = 3
if pipeline_settings['varying_parameter'] == 'food':
pipeline_settings['lowest_food_percent'] = 1
pipeline_settings['highest_food_percent'] = 1000
elif pipeline_settings['varying_parameter'] == 'time_steps':
pipeline_settings['lowest_food_percent'] = 1
pipeline_settings['highest_food_percent'] = 2500
pipeline_settings['resolution'] = 40
# !!!!!!!! add_save_file_name has to be unique each run and must not be a substring of previous run !!!!!!!!!
# !!!!!!!! otherwise runs are indistringuishible !!!!!!!!!
pipeline_settings['add_save_file_name'] = 'res_40_3_repeats_gen_4000' #'resulotion_80_hugeres_3_repeats_gen_100' # 'resulotion_80_hugeres_3_repeats_last_gen'
# list of repeats, that should be animated, keep in mind, that this Creates an animation for each REPEAT!
# If no animations, just emtpy list, if an animation should be created f.e. [0]
pipeline_settings['animation_for_repeats'] = []
# This loads last / highest generation from trained simulation
pipeline_settings['load_last_generation'] = False
# Otherwise specify generation, that shall be loaded, make sure thsi generation exists in all loaded simulations:
pipeline_settings['load_generation'] = 4000
# The following command allows to only plot a certain number of simulations in each parallel simulations folder
# If all simulations in those folders shall be plotted, set to None
pipeline_settings['only_plot_certain_num_of_simulations'] = None
# The following settings define the level of parallelization. Use 'parallelize_run_repeats' for low level
# parallelization when plotting few simulations. use high level parallelization with 'parallelize_each_sim' when
# plotting many simulations. Both does not work at the same time. 'parallelize_each_sim' particularly recommended
# when varying time steps
pipeline_settings['parallelize_each_sim'] = True
pipeline_settings['parallelize_run_repeats'] = False
# Specific memory usage per parallel task has to be specified in dynamic_pipeline_one_sim_remote_memory
# only works for pipeline_settings['parallelize_each_sim'] = True
pipeline_settings['specify_memory_usage'] = True
pipeline_settings['compress_save_isings'] = True
# folder_names = ['sim-20201022-184145_parallel_TEST_repeated']
# folder_names = ['sim-20201022-190553_parallel_b1_normal_seas_g4000_t2000', 'sim-20201022-190615_parallel_b10_normal_seas_g4000_t2000']#, 'sim-20201105-202455_parallel_b1_random_ts_2000_lim_100_3900', 'sim-20201105-202517_parallel_b10_random_ts_2000_lim_100_3900']
# folder_names = ['sim-20201026-224639_parallel_b1_fixed_4000ts_', 'sim-20201026-224709_parallel_b10_fixed_4000ts_', 'sim-20201022-190553_parallel_b1_normal_seas_g4000_t2000', 'sim-20201022-190615_parallel_b10_normal_seas_g4000_t2000', 'sim-20201026-224655_parallel_b1_random_100-7900ts_', 'sim-20201026-224722_parallel_b10_random_100-7900ts_', 'sim-20201105-202455_parallel_b1_random_ts_2000_lim_100_3900', 'sim-20201105-202517_parallel_b10_random_ts_2000_lim_100_3900']
folder_names = ['sim-20210206-122918_parallel_b1_normal_run_g4000_t2000_54_sims']#, 'sim-20201119-190204_parallel_b10_normal_run_g4000_t2000_54_sims']
dynamic_pipeline_all_sims(folder_names, pipeline_settings)
| [
"jan.prosi@hotmail.com"
] | jan.prosi@hotmail.com |
6853de685212b455aebebb14ca3cee9e2f0e9813 | b42a9da792bbc79ce22c06ef1d5019263e48eac4 | /settings.py | 4c917651914189b203f15460b589a8188cca5804 | [] | no_license | foolishwolf/spider4dianping | 5fe39c7a1fd173d57299aae4c52829dbe4bc5c60 | b409dfbdfd5e64513a4200bd516df54916cb61d1 | refs/heads/master | 2021-01-10T11:45:50.160988 | 2015-10-18T05:35:23 | 2015-10-18T05:35:23 | 44,080,995 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 791 | py | # -*- coding: utf-8 -*-
# Scrapy settings for dishRec project
#
# For simplicity, this file contains only the most important settings by
# default. All the other settings are documented here:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
#
BOT_NAME = 'dishRec'
SPIDER_MODULES = ['dishRec.spiders']
NEWSPIDER_MODULE = 'dishRec.spiders'
DOWNLOADER_MIDDLEWARES = {
'scrapy.contrib.downloadermiddleware.useragent.UserAgentMiddleware' : None,
'dishRec.randomUserAgent.RandomUserAgentMiddleware' :400
}
# USER_AGENT = 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/29.0.1547.66 Safari/537.36'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'dishRec (+http://www.yourdomain.com)'
| [
"xuruihan1990@163.com"
] | xuruihan1990@163.com |
2638d539fd17db3f9a443926f3c761356f6d33a5 | f007cce421676dbfb2e12237ce1769698b84ffdd | /repo_dependencies.py | 775c755781b28795f9bd6094b148fe97994c4a1b | [] | no_license | slatex/lmhtools | 0222582f2430e0621dce10c50de0edd856ecad9c | c744baa23616dcadc373ef494d95e4c5bd3b58da | refs/heads/master | 2021-06-10T13:09:36.195041 | 2021-05-13T15:14:58 | 2021-05-13T15:14:58 | 184,758,704 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,339 | py | #!/usr/bin/env python3
"""
Script for fixing the repository dependencies in META-INF/MANIFEST.MF
"""
import os
import re
import lmh_harvest as harvest
TOKEN_MHINPUTREF = -1
TOKEN_MHGRAPHICS = -2
re_mhinputref = re.compile(
r"\\n?mhinputref\s*"
r"(?:\[(?P<params>[^\]]*)\])?\s*" # parameter
r"\{(?P<arg>" + harvest.re_arg + r")\}" # arg
)
re_mhgraphics = re.compile(
r"\\mhgraphics\s*"
r"(?:\[(?P<params>[^\]]*)\])?\s*" # parameter
r"\{(?P<arg>" + harvest.re_arg + r")\}" # arg
)
REGEXES = [
(harvest.re_guse, harvest.TOKEN_GUSE),
(harvest.re_gimport, harvest.TOKEN_GIMPORT),
(harvest.re_importmhmodule, harvest.TOKEN_IMPORTMHMODULE),
(harvest.re_usemhmodule, harvest.TOKEN_USEMHMODULE),
(re_mhinputref, TOKEN_MHINPUTREF),
(re_mhgraphics, TOKEN_MHGRAPHICS),
]
def gather_repos(path, REPOS):
with open(path, "r") as fp:
string = harvest.preprocess_string(fp.read())
tokens = harvest.parse(string, REGEXES)
for (match, token_type) in tokens:
if token_type in [harvest.TOKEN_GUSE, harvest.TOKEN_GIMPORT, TOKEN_MHINPUTREF]:
# repo is optional argument
repo = match.group("params")
if repo and repo not in REPOS.keys():
REPOS[repo] = f"{path}:{harvest.get_file_pos_str(string, match.start())}: {match.group(0)}"
elif token_type in [harvest.TOKEN_IMPORTMHMODULE, harvest.TOKEN_USEMHMODULE, TOKEN_MHGRAPHICS]:
params = harvest.get_params(match.group("params"))
key = "repos"
if token_type == TOKEN_MHGRAPHICS:
key = "mhrepos"
if key in params.keys():
repo = params[key]
if repo and repo not in REPOS.keys():
REPOS[repo] = f"{path}:{harvest.get_file_pos_str(string, match.start())}: {match.group(0)}"
else:
assert False
def get_olddeps(line):
line = line[len("dependencies:"):]
while line and line[0] == " ":
line = line[1:]
sep = re.compile(r",\s*")
return sep.split(line)
def adjust_manifest(dir_path, REPOS):
new_manifest = ""
found_deps = False
new_line = "dependencies: " + ",".join(REPOS.keys())
with open(os.path.join(dir_path, "../META-INF/MANIFEST.MF"), "r") as fp:
for line in fp:
if line.startswith("dependencies: "):
if found_deps:
print("ERROR: Multiple entries for dependencies found in manifest")
return
old_entries = set(get_olddeps(line[:-1]))
new_entries = set(REPOS.keys())
if old_entries == new_entries:
print("The dependencies are already up-to-date")
return
if new_entries - old_entries:
print("Adding the following dependencies:", ",".join(list(new_entries - old_entries)))
print()
if old_entries - new_entries:
print("Removing the following dependencies:", repr(old_entries - new_entries)) # .join(["'" + s + "'" for s in list(old_entries - new_entries)]))
print()
print("old " + line[:-1])
print("new " + new_line)
new_manifest += new_line + "\n"
found_deps = True
else:
new_manifest += line
if not found_deps:
print()
print("No entry for dependencies found in " + os.path.join(dir_path, "META-INF/MANIFEST.MF"))
print("Appending the following entry:")
print(new_line)
new_manifest += new_line + "\n"
print()
i = input("Do you want to apply these changes? (enter 'y' to confirm): ")
if i == 'y':
with open(os.path.join(dir_path, "../META-INF/MANIFEST.MF"), "w") as fp:
fp.write(new_manifest)
print("Dependecies successfully updated")
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Script for fixing repo dependencies in META-INF/MANIFEST.MF",
epilog="Example call: repo_dependencies.py -v0 ../../sets")
parser.add_argument("-v", "--verbosity", type=int, default=1, choices=range(4), help="the verbosity (default: 1)")
parser.add_argument("DIRECTORY", nargs="+", help="git repo or higher level directory for which statistics are generated")
args = parser.parse_args()
if args.verbosity >= 2:
print("GATHERING DATA\n")
logger = harvest.SimpleLogger(args.verbosity)
# determine mathhub folder
mathhub_repo = os.path.abspath(args.DIRECTORY[0])
while not mathhub_repo.endswith("MathHub"):
new = os.path.split(mathhub_repo)[0]
if new == mathhub_repo:
raise Exception("Failed to infer MathHub directory")
mathhub_repo = new
for directory in args.DIRECTORY:
if not os.path.isdir(os.path.join(directory, ".git")): ## TODO: Is there a better way?
raise Exception("'" + directory + "' doesn't appear to be a git repository")
REPOS = {} # repo name to evidence
dir_path = os.path.join(directory, "source")
for root, dirs, files in os.walk(dir_path):
for file_name in files:
if file_name.endswith(".tex"):
gather_repos(os.path.join(root, file_name), REPOS)
for repo in REPOS.keys():
print("I found this dependency:", repo)
print("Evidence:", REPOS[repo])
print()
to_ignore = None
for repo in REPOS.keys():
rp = os.path.abspath(os.path.join(dir_path, "../../..", repo))
if not os.path.isdir(rp):
print("WARNING: I didn't find the directory " + rp)
if directory.endswith(repo):
print("WARNING: It appears that you self-reference the repo:")
print(" " + REPOS[repo])
print(" -> I'm going to ignore this entry")
to_ignore = repo
if to_ignore:
del REPOS[to_ignore]
print()
print()
adjust_manifest(dir_path, REPOS)
| [
"jfschaefer@outlook.com"
] | jfschaefer@outlook.com |
2de100b76ac7bc9504f4c7ba8c897b2f81c28b72 | 58affdc32985e377e0c8c78c2d49871fad7da600 | /Dimuon/test/genSimCrab/crabConfig_GenSim_CIToMuMu_M300_L16000_LL_Con.py | 6af19953ea1b319e9481b2f68c1f21fbffd5e385 | [] | no_license | jarvislam1999/Pythia8-dilepton | 86d764f43c295a30f600b6ef623b41c38623a043 | 5bd749bdc0463176a0c0e7da21f3f336697ad648 | refs/heads/master | 2020-03-25T05:32:15.570262 | 2018-08-03T18:43:38 | 2018-08-03T18:43:38 | 143,452,501 | 0 | 0 | null | 2018-08-03T16:57:06 | 2018-08-03T16:57:06 | null | UTF-8 | Python | false | false | 1,023 | py | from CRABClient.UserUtilities import config, getUsernameFromSiteDB
config = config()
config.General.requestName = 'Dimuon_GENSIM17_M300to800_CI_L16000_LL_Con_13TeV_Feb5'
config.General.workArea = 'crab_projects'
config.General.transferOutputs = True
config.General.transferLogs = True
config.JobType.pluginName = 'PrivateMC'
config.JobType.psetName = 'mc17genSimcfg.py'
config.JobType.numCores = 8
config.JobType.pyCfgParams = ['minMass=300','maxMass=800','Lambda=16000','helicityLL=-1','ciGen=1','pdgId=13']
#config.Data.inputDataset = '/GenericTTbar/HC-CMSSW_5_3_1_START53_V5-v1/GEN-SIM-RECO'
config.Data.outputPrimaryDataset = 'CITo2Mu_L16TeV_GENSIM17_Test'
#config.Data.inputDBS = 'global'
config.Data.splitting = 'EventBased'
config.Data.unitsPerJob = 500
NJOBS = 100
config.Data.totalUnits = config.Data.unitsPerJob * NJOBS
config.Data.outLFNDirBase = '/store/user/szaleski/'
config.Data.publication = True
config.Data.outputDatasetTag = 'MuMu_16TeV_GENSIM17_LLConM300'
config.Site.storageSite = 'T3_US_FNALLPC'
| [
"ek7121@wayne.edu"
] | ek7121@wayne.edu |
dac3f89e9ff6dcef5bdf7d2f7588c8933dd9afa1 | c5291e50a3c72c885922378573a0ad423fcedf05 | /elastic/MainApp/__init__.py | a45576712bb6d54a6826b931b0fc69c4e1e0d94d | [] | no_license | raghurammanyam/django-projects | bcc3ed6285882af437a2995514cef33760fb063e | dd20ae354f7f111a0176a1cc047c099bd23e9f05 | refs/heads/master | 2022-12-12T19:22:31.698114 | 2018-12-09T09:41:45 | 2018-12-09T09:41:45 | 137,443,359 | 0 | 0 | null | 2022-11-22T03:01:07 | 2018-06-15T05:08:15 | Python | UTF-8 | Python | false | false | 51 | py | #default_app_config = 'MainApp.apps.MainappConfig'
| [
"manyamraghuram@gmail.com"
] | manyamraghuram@gmail.com |
5eb44788937ca1fbf4a8a624dde9701a7a41231b | 8ac22dadac75a6968209997eae693db312deeef3 | /tenant_customer/__init__.py | 108c3893fe1910b15d16abd89dfadfd2b7dfad02 | [
"BSD-2-Clause"
] | permissive | smegurus/smegurus-django | 9b7c420d35806850da7e3ce66cffccfbc263bea2 | 053973b5ff0b997c52bfaca8daf8e07db64a877c | refs/heads/master | 2022-11-29T08:43:43.596459 | 2019-01-09T01:47:03 | 2019-01-09T01:47:03 | 159,753,141 | 1 | 0 | BSD-4-Clause | 2022-11-22T01:37:38 | 2018-11-30T01:52:03 | HTML | UTF-8 | Python | false | false | 65 | py | default_app_config = 'tenant_customer.apps.TenantCustomerConfig'
| [
"bart@mikasoftware.com"
] | bart@mikasoftware.com |
54b620345c53ba0b5b2b5bdff024aa2eff3d165c | 94c050f223ca8b6bb84154d742a532b6b8e6a608 | /backend/domain_auth/models.py | 316bdb7ad6eb4bfba83cf8c9afeea7b0755d4b50 | [] | no_license | AryamannNingombam/Webloom-Task | 83c38cd72f62d9d5ec613f7c854056e013fdd205 | cdbe4e664c96c4d9a55159ce9a1ca2e61f4efd53 | refs/heads/master | 2023-07-14T14:46:25.820728 | 2021-08-27T10:59:24 | 2021-08-27T10:59:24 | 397,211,168 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 587 | py | from django.db import models
from django.contrib.auth.models import User
# Create your models here.
# this model is there to check if the user
# has verified his account or not, if not, he cannot log in
# . an email is sent to him to verify his account
class UserVerified(models.Model):
id = models.AutoField(primary_key=True)
user = models.OneToOneField(User,on_delete=models.CASCADE)
verified = models.BooleanField(default=False, null=False)
verification_id = models.CharField(max_length=100,default="XXXX")
def __str__(self):
return self.user.username
| [
"aryamannsingh9@gmail.com"
] | aryamannsingh9@gmail.com |
1c29c68135ad71f0956060a5adc6f9ee554ac33e | 284a2d79bb38a9f1a87f429300d10f0458231667 | /zoo.py | 0ce741bde0e8468e25e55ed29ec7b1566fb743ff | [
"MIT"
] | permissive | tollefj/ABSA-PyTorch | 69dca912eea28563a98477883109dff0682ce40d | a88c08a0f1161031fd2e4f7231e079385e4b8680 | refs/heads/master | 2022-09-06T07:59:04.649378 | 2020-06-04T10:41:31 | 2020-06-04T10:41:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 911 | py | from models import LSTM, TD_LSTM, IAN, RAM, TNet_LF, LCF_BERT, AEN_BERT
import torch
models = {
'lstm': LSTM,
'td_lstm': TD_LSTM,
'ian': IAN,
'ram': RAM,
'tnet_lf': TNet_LF,
'aen_bert': AEN_BERT,
'lcf_bert': LCF_BERT
# default hyper-parameters for LCF-BERT model is as follws:
# lr: 2e-5
# l2: 1e-5
# batch size: 16
# num epochs: 5
}
optimizers = {
'adadelta': torch.optim.Adadelta, # default lr=1.0
'adagrad': torch.optim.Adagrad, # default lr=0.01
'adam': torch.optim.Adam, # default lr=0.001
'adamax': torch.optim.Adamax, # default lr=0.002
'asgd': torch.optim.ASGD, # default lr=0.01
'rmsprop': torch.optim.RMSprop, # default lr=0.01
'sgd': torch.optim.SGD
}
initializers = {
'xavier_uniform_': torch.nn.init.xavier_uniform_,
'xavier_normal_': torch.nn.init.xavier_normal,
'orthogonal_': torch.nn.init.orthogonal_
}
| [
"tollefj@gmail.com"
] | tollefj@gmail.com |
1139cbb6126257f41ecf599562d900110ee97fa1 | 7ac0577053bbb8cf83a9d5feb4c7d0b325a2dd28 | /zip、lambda和map.py | d99f276d8eab87cc9059c5ecb56ffdf709138f56 | [] | no_license | QiangBB/python_learning | 1007aa9807fbe4a7f2ddc5f843e9c3290581ef21 | 573378070b28ea057a413f75b64e56bf61b7cef9 | refs/heads/master | 2020-06-20T13:17:49.190903 | 2019-07-16T06:34:37 | 2019-07-16T06:34:37 | 197,134,628 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 234 | py | #zip
a=[1,2,3]
b=[2,3,4]
print(list(zip(a,b)))
for i,j in zip(a,b):
print(i,j)
#lambda
def fun1(x,y):
return x+y
print(fun1(1,2))
fun2=lambda x,y:x+y
print(fun2(1,2))
#map
map(fun1,[1],[2])
print(list(map(fun1,[1],[2])))
| [
"752865241@qq.com"
] | 752865241@qq.com |
8dafbf450ae3b49c12a5ec6091ee4839c11488fa | f3aebc85f9d001226207900c6320c3ce9b1e96e2 | /ROS_Tuts/Tut3_ROS_Service/src/add_two_int/scripts/add_two_ints_client.py | eb1eea7cb4dd42b6127f9df8a2b91e78f27f7297 | [] | no_license | m-loay/ROS_Tutorials | 0f3f006c8e4c24d7f52409404214ae10b4e31dbe | 3f376eee3b85849b82d73ddb87560ef1c2cf5730 | refs/heads/main | 2023-04-27T19:50:28.054313 | 2021-04-30T03:06:33 | 2021-04-30T03:06:33 | 349,783,785 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 697 | py | #!/usr/bin/env python3
import rospy
from rospy_tutorials.srv import AddTwoInts
if __name__ == '__main__':
#select service name
nodeName = "add_two_ints_client"
#select service name
serviceName = "/add_two_ints"
#Init Node with its name
rospy.init_node(nodeName)
#Send a Log msg in terminal
rospy.loginfo(nodeName+" has been created")
#wait for the service
rospy.wait_for_service(serviceName)
try:
add_two_ints = rospy.ServiceProxy(serviceName, AddTwoInts)
response = add_two_ints(5,-1)
rospy.loginfo("Sum is: " + str(response.sum))
except rospy.ServiceException as e:
rospy.logwarn("Service Failed " + str(e)) | [
"mohamed.loay.ali@gmail.com"
] | mohamed.loay.ali@gmail.com |
0b6b413ca12cd9ea166c27c6d69de41cbdbbae3c | 021ba3c7a50090deb289ae6d592e36e7edf369f3 | /open_parser/search.py | e3f6fb5da1e95449d24afe2d111e7f39a040bc80 | [
"BSD-3-Clause"
] | permissive | nikitcha/open_parser | b23dc2782c666bb9e29796d665c6f1467aaff9f3 | 0716a4938715856493ef213446d7e69c84a4c2ea | refs/heads/main | 2023-09-04T10:40:21.442405 | 2021-10-22T14:18:07 | 2021-10-22T14:18:07 | 415,002,899 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 487 | py | import sys
from open_parser import Biorxiv, Nature, PNAS, PLOS, RoyalSociety
engines ={'biorxiv':Biorxiv, 'nature':Nature, 'pnas':PNAS, 'plos':PLOS, 'royal_society':RoyalSociety}
def search(term, journal):
jname = journal.lower()
if jname in engines:
parser = engines[jname]()
parser.search(term)
parser.parse_articles()
parser.save() # Check #home/.open_parser
if __name__=='__main__':
journal, term = sys.argv[1:]
search(term, journal) | [
"nikitcha@yahoo.com"
] | nikitcha@yahoo.com |
6b93c25a26b7b34f934e5ad0a9bb905b0bbda736 | 3b9e1f87b68f2859fb38dda8cd0f95761a5e8c74 | /issubstring.py | 2325208bcac41effc6c5c026cd2a840179838cb3 | [] | no_license | Bhavdeep21/hacktoberfest2021 | ce36178e556305b974445343450e5cea392add9c | c9890c4464bf0d1cd91a78e41d58c9b1317f41c2 | refs/heads/main | 2023-08-24T13:27:54.822998 | 2021-10-30T11:28:55 | 2021-10-30T11:28:55 | 422,652,823 | 0 | 1 | null | 2021-10-30T11:28:56 | 2021-10-29T17:02:02 | C++ | UTF-8 | Python | false | false | 650 | py | # a string is substring of other.
# Returns true if s1 is substring of s2
def isSubstring(s1, s2):
M = len(s1)
N = len(s2)
# A loop to slide pat[] one by one
for i in range(N - M + 1):
# For current index i,
# check for pattern match
for j in range(M):
if (s2[i + j] != s1[j]):
break
if j + 1 == M :
return i
return -1
# Driver Code
if __name__ == "__main__":
s1 = "for"
s2 = "geeksforgeeks"
res = isSubstring(s1, s2)
if res == -1 :
print("Not present")
else:
print("Present at index " + str(res))
| [
"noreply@github.com"
] | Bhavdeep21.noreply@github.com |
ec6aeb30c25573caf0925da3c3ed23837c47509c | c5347ba3bbd2f4f2c7eefa50b2be2cdef94fa8d1 | /src/plugin/binkit/functions_match_viewer.py | e51dfe5ede3784977760930618561dbc7c858513 | [] | no_license | ohjeongwook/binkit | ebc1d58db6ff6950a632cbc8f98ce7078475670f | cfd183d5fa2860f78071d35424d55cae8ca80e60 | refs/heads/master | 2022-12-23T16:43:59.812706 | 2020-10-05T01:34:57 | 2020-10-05T01:34:57 | 266,231,657 | 68 | 9 | null | null | null | null | UTF-8 | Python | false | false | 9,088 | py | import thread
import traceback
import idaapi
import idc
import ida_bytes
from PyQt5 import QtGui, QtCore, QtWidgets
from client import *
from Queue import Queue
from threading import Thread
def sync_worker(queue):
syncers = {}
while True:
commands = queue.get()
queue.task_done()
if not commands['md5'] in syncers or syncers[commands['md5']] == None:
syncers[commands['md5']] = IDASessions.connect(commands['md5'])
connection = syncers[commands['md5']]
try:
if connection:
connection.root.run_commands(commands['list'])
except:
traceback.print_exc()
del syncers[commands['md5']]
class NumberSortModel(QtCore.QSortFilterProxyModel):
def lessThan(self, left, right):
if left.column() in (4, 5, 6):
lvalue = int(left.data())
rvalue = int(right.data())
return lvalue < rvalue
elif left.column() in (1, 3):
lvalue = int(left.data(), 16)
rvalue = int(right.data(), 16)
return lvalue < rvalue
else:
return left < right
class FunctionsMatchViewer(idaapi.PluginForm):
def color_lines(self, start, end, color):
address = idaapi.get_imagebase() + start
while address < idaapi.get_imagebase() + end:
idaapi.set_item_color(address, color)
address += ida_bytes.get_item_size(address)
def color_node(self, addresses, bg_color, frame_color = 0x000000):
if len(addresses) <= 0:
return
func = idaapi.get_func(idaapi.get_imagebase() + addresses[0])
flowchart_ = idaapi.FlowChart(func)
address_map = {}
for address in addresses:
address_map[idaapi.get_imagebase() + address] = 1
for code_block in flowchart_:
if not code_block.start_ea in address_map:
continue
node_info = idaapi.node_info_t()
node_info.bg_color = bg_color
node_info.frame_color = frame_color
idaapi.set_node_info(func.start_ea, code_block.id, node_info, idaapi.NIF_BG_COLOR | idaapi.NIF_FRAME_COLOR)
def set_basic_blocks_color(self):
for function_match in self.function_matches:
self.matched_block_color_function_match(function_match)
def tree_view_double_clicked_handler(self, ix):
item = ix.data(QtCore.Qt.UserRole)
idaapi.jumpto(idaapi.get_imagebase() + item.function_match[item.self_name])
commands = {'md5': item.peer_md5, 'list': []}
commands['list'].append(({'name': 'jumpto', 'address': item.function_match[item.peer_name]}))
self_basic_block_addresses = []
peer_basic_block_addresses = []
if 'matches' in item.function_match:
for match_data in item.function_match['matches']:
self_basic_block_addresses.append(match_data[self.self_name])
peer_basic_block_addresses.append(match_data[self.peer_name])
self.color_lines(match_data[self.self_name], match_data[self.self_name+'_end'], self.matched_block_color)
commands['list'].append({'name': 'color_lines', 'start': match_data[self.peer_name], 'end': match_data[self.peer_name+'_end'], 'color': self.matched_block_color})
self.color_node(self_basic_block_addresses, self.matched_block_color)
commands['list'].append({'name': 'color_node', 'addresses': peer_basic_block_addresses, 'bg_color': self.matched_block_color})
if 'unidentified_blocks' in item.function_match:
self_basic_block_addresses = []
for basic_block in item.function_match['unidentified_blocks'][self.self_name+'s']:
self_basic_block_addresses.append(basic_block['start'])
self.color_lines(basic_block['start'], basic_block['end'], self.unidentified_block_color)
self.color_node(self_basic_block_addresses, self.unidentified_block_color)
peer_basic_block_addresses = []
for basic_block in item.function_match['unidentified_blocks'][self.peer_name+'s']:
peer_basic_block_addresses.append(basic_block['start'])
commands['list'].append({'name': 'color_lines', 'start': basic_block['start'], 'end': basic_block['end'], 'color': self.unidentified_block_color})
commands['list'].append({'name': 'color_node', 'addresses': peer_basic_block_addresses, 'bg_color': self.unidentified_block_color})
item.queue.put(commands)
def count_blocks(self, function_match):
matched_block_counts = 0
self_unidentified_block_counts = 0
peer_unidentified_block_counts = 0
if 'matches' in function_match:
matched_block_counts = len(function_match['matches']) * 2
if 'unidentified_blocks' in function_match:
self_unidentified_block_counts += len(function_match['unidentified_blocks'][self.self_name+'s'])
peer_unidentified_block_counts += len(function_match['unidentified_blocks'][self.peer_name+'s'])
counts = {}
counts['matched_block_counts'] = matched_block_counts
counts['self_unidentified_block_counts'] = self_unidentified_block_counts
counts['peer_unidentified_block_counts'] = peer_unidentified_block_counts
return counts
def add_item(self, function_match):
imagebase = idaapi.get_imagebase()
self_address = imagebase + function_match[self.self_name]
counts = self.count_blocks(function_match)
root = self.model.invisibleRootItem()
columns = [
QtGui.QStandardItem(idaapi.get_short_name(self_address)),
QtGui.QStandardItem('%.8x' % self_address),
QtGui.QStandardItem(function_match[self.peer_name+'_name']),
QtGui.QStandardItem('%.8x' % function_match[self.peer_name]),
QtGui.QStandardItem('%d' % counts['matched_block_counts']),
QtGui.QStandardItem('%d' % counts['self_unidentified_block_counts']),
QtGui.QStandardItem('%d' % counts['peer_unidentified_block_counts'])
]
root.appendRow(columns)
class Item:
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
item_data = Item(
function_match = function_match,
self_name = self.self_name,
peer_name = self.peer_name,
peer_md5 = self.peer_md5,
queue = self.queue
)
for column_item in columns:
column_item.setData(item_data, QtCore.Qt.UserRole)
def add_items(self, function_matches, self_name, peer_name, peer_md5, matched_block_color, unidentified_block_color):
self.matched_block_color = matched_block_color
self.unidentified_block_color = unidentified_block_color
self.function_matches = function_matches
self.self_name = self_name
self.peer_name = peer_name
self.peer_md5 = peer_md5
for function_match in self.function_matches:
self.add_item(function_match)
self.tree_view.setRootIsDecorated(False)
self.tree_view.setColumnWidth(0, 100)
self.tree_view.setColumnWidth(1, 50)
self.tree_view.setColumnWidth(2, 100)
self.tree_view.setColumnWidth(3, 50)
self.tree_view.setColumnWidth(4, 30)
self.tree_view.setColumnWidth(5, 30)
self.tree_view.setColumnWidth(6, 30)
def search_input_changed(self, text):
self.proxy_model.setFilterWildcard(text)
def OnCreate(self, form):
self.parent = idaapi.PluginForm.FormToPyQtWidget(form)
self.columns = ("Source", "Address", "Target", "Address", "Matched", "Removed", "Added")
self.tree_view = QtWidgets.QTreeView()
self.tree_view.setSortingEnabled(True)
self.tree_view.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
self.tree_view.doubleClicked.connect(self.tree_view_double_clicked_handler)
self.item_map = {}
self.model = QtGui.QStandardItemModel(self.tree_view)
self.model.setHorizontalHeaderLabels(self.columns)
self.proxy_model = NumberSortModel(self.tree_view)
self.proxy_model.setSourceModel(self.model)
self.tree_view.setModel(self.proxy_model)
self.search_input = QtWidgets.QLineEdit()
self.search_input.textChanged.connect(self.search_input_changed)
layout = QtWidgets.QVBoxLayout()
layout.addWidget(self.tree_view)
layout.addWidget(self.search_input)
self.parent.setLayout(layout)
self.queue = Queue(maxsize=0)
worker = Thread(target=sync_worker, args=(self.queue,))
worker.setDaemon(True)
worker.start()
def Show(self, title):
return idaapi.PluginForm.Show(self, title, options = idaapi.PluginForm.FORM_PERSIST)
if __name__ == "__main__":
form = FunctionsMatchViewer()
form.Show("Function Matches")
form.AddTestItems()
| [
"oh.jeongwook@gmail.com"
] | oh.jeongwook@gmail.com |
2c4efdd70a087ebee39af990a0fae554ea083000 | 52c990629932dcc5f13b4753af23c7d395bb4b1b | /STOCK/WIG/tests.py | f47315a3f7882846ef334f67f64ccd8b36345ff8 | [] | no_license | Strzelba2/STOCK | 4a0158534cf3a231df59ead0873d1ac50d6b1ee8 | b1904057a40f74f54abd7629fd8726807229c44c | refs/heads/main | 2023-03-14T17:19:04.662137 | 2021-03-21T19:45:08 | 2021-03-21T19:45:08 | 313,441,960 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 155 | py | from django.test import TestCase
from .WIG_udate import UPDATE_SCRAP
from .models import CompanyData , Quotes,Wares,WaresData
# Create your tests here.
| [
"artur_strzelczyk@wp.pl"
] | artur_strzelczyk@wp.pl |
077564b3952bc54b073286c6b30b9b5496fcc8eb | 10491a5bcced9a444e20f11de625c0eeac370833 | /JWTConfig.py | 759e9825464fa0f8c8ac2cbfa72b23d523e9d4c2 | [] | no_license | webclinic017/Otacon | 4c5caa48a44f0be3d3ad13d850dbf81cb8b5bbdd | 610f66b5286cb884a65fbea070359c3049af7d75 | refs/heads/master | 2022-04-11T06:44:23.981621 | 2019-06-19T11:15:58 | 2019-06-19T11:15:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,952 | py | from flask_jwt_extended import (
JWTManager, jwt_required, get_jwt_identity,
create_access_token, create_refresh_token,
jwt_refresh_token_required, get_raw_jwt
)
from flask import jsonify,session
from mongoConnection import MongoDB
from Config import Configuration
import ast
import JsonEncoder as json
import requests
import json
from logger import generate_log
import IbConnection
class JWT:
def login(self, request):
try:
data = json.loads(request.data.decode())
self.ip = Configuration().GetData()['PrivateIp']
self.port = Configuration().GetData()['MongoPort']
self.db = Configuration().GetData()['MongoDB']
obj = MongoDB()
obj.ConnectMongo(self.ip, self.port, self.db)
record = obj.ReadValue("users", data["email"])
if (record != None):
record = ast.literal_eval(record['Data'])
if (record['password'] == data["password"]):
ret = {
'access_token': create_access_token(identity=data["email"]),
'refresh_token': create_refresh_token(identity=data["email"]),
'status': "True"
}
return jsonify(ret), 200
else:
return jsonify({"status": "Invalid username or password"}), 401
else:
return jsonify({"status": "Invalid username or password"}), 401
except Exception as e:
generate_log('auth', str(e), str(request))
def logout(self):
return jsonify({"status": "Successfully logged out"}), 200
@jwt_refresh_token_required
def logout2(self):
return ({"status": "Successfully logged out"}), 200
@jwt_refresh_token_required
def refresh(self):
try:
current_user = get_jwt_identity()
ret = {
'access_token': create_access_token(identity=current_user),
'refresh_token': create_refresh_token(identity=current_user),
'status': "Successfully Refreshed"
}
return jsonify(ret), 200
except Exception as e:
generate_log('refresh', str(e), 'Creating Refresh Token')
@jwt_required
def get_user(self):
try:
email = get_jwt_identity()
self.ip = Configuration().GetData()['PrivateIp']
self.port = Configuration().GetData()['MongoPort']
self.db = Configuration().GetData()['MongoDB']
obj = MongoDB()
obj.ConnectMongo(self.ip, self.port, self.db)
record = obj.ReadValue("users", email)
record = ast.literal_eval(record['Data'])
record.pop('password', None)
return jsonify(record)
except Exception as e:
generate_log('get_user', str(e), 'get_user method')
@jwt_required
def get_history(self):
try:
email = get_jwt_identity()
self.ip = Configuration().GetData()['PrivateIp']
self.port = Configuration().GetData()['MongoPort']
self.db = Configuration().GetData()['MongoDB']
obj = MongoDB()
obj.ConnectMongo(self.ip, self.port, self.db)
record = obj.ReadValue("history", email)
if record != None:
record = ast.literal_eval(record["Data"])
toreturn = {"status":"True","record":record}
else:
# record = ast.literal_eval(record["Data"])
toreturn = {"status": "False"}
return jsonify(toreturn)
except Exception as e:
generate_log('get_history', str(e))
@jwt_required
def ibconn(self):
try:
i = 0
i=i+1
IbConnection.TestApp('127.0.0.1',"4002",i)
except Exception as e:
generate_log('TestTws', str(e))
| [
"mfaizan@codexnow.com"
] | mfaizan@codexnow.com |
de03b8c94f8b5b3b9f58ebb303e721051a4875f7 | 13909b445f71750b2f59c18d1f8c28625f63d11d | /001_Most Frequently Used Words in a Text/build2.py | 8fba8a6d2aa8d48077e374b9f4c161ccce0019fa | [] | no_license | kaivantaylor/Code-Wars | 26d66ecc741556a6959532ec7528cb2e39779ce5 | d4a2f46159dec818208c643d0dfcced236752c56 | refs/heads/master | 2022-02-15T02:49:53.412692 | 2019-08-31T06:19:01 | 2019-08-31T06:19:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 624 | py | import string
def top_3_words(text):
txtsplt = text.split()
#print(txtsplt)
dict = {}
for x in txtsplt:
x = x.lower()
#print(lower_x)
x = x.strip(string.punctuation)
#print(x)
if x in dict:
dict[x] = dict[x] + 1
else:
dict[x] = int(1)
#print(dict)
list = []
for x in dict:
list.append((dict[x],x))
list.sort()
list.reverse()
final = []
count = 0
for x in list:
if count < 3:
if x[1] != '':
final.append(x[1])
count += 1
return final
| [
"38149120+speedykai@users.noreply.github.com"
] | 38149120+speedykai@users.noreply.github.com |
853dd81191ac5d776ca0be57819da47923026a97 | 10b04efdf156b7fe6e1ed20f72f21435d1284c78 | /client_errors/urls.py | b584ba45d9fd7856bf21c6191233439cdd148c6d | [
"MIT"
] | permissive | sorensen/django-client-errors | 0b512fef6070053732b4c92a74858c7dbf428bff | 3f60cf863dd358d09eead873d86368b0c97660d5 | refs/heads/master | 2021-01-25T08:54:52.688919 | 2012-08-03T18:34:25 | 2012-08-03T18:34:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 335 | py |
from django.conf.urls.defaults import patterns, include, url
_PREFIX = '__error__'
urlpatterns = patterns('client_errors.views',
url(
regex = r'^%s/media/(.*)$' % _PREFIX,
view = 'media'
),
url(
regex = r'^%s/client/$' % _PREFIX,
view = 'error',
name = 'client_error'
)
)
| [
"mail@beausorensen.com"
] | mail@beausorensen.com |
f16b930bc98b0cd76057efb34b5997ae34cc6883 | c2506832377a0b7d68f70fe0b0e9bfc1b784c046 | /reports/open-changesets-by-owner-newbie.py | f77fc7768b6edbb3884056be5e938967c07aa5fd | [
"LicenseRef-scancode-public-domain"
] | permissive | nemobis/gerrit-reports | 44e852c515620d529fa92ca7bb3d828531dd51ab | 84577888454d3e2a1e6979d295bf1fb2db93cf8b | refs/heads/master | 2020-12-24T13:00:14.943350 | 2015-02-02T11:48:49 | 2015-02-02T11:49:27 | 12,793,176 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,611 | py | #! /usr/bin/env python
# Public domain; MZMcBride; 2013
import ConfigParser
import os
import sqlite3
import wikitools
config = ConfigParser.ConfigParser()
config.read([os.path.expanduser('~/.gerrit-reports.ini')])
database_name = config.get('gerrit-reports', 'database_name')
wiki_api_url = config.get('gerrit-reports', 'wiki_api_url')
root_page = config.get('gerrit-reports', 'wiki_root_page')
report_title = root_page + 'Open changesets by newbie owner'
report_template = u'''\
%s
{| class="wikitable sortable plainlinks"
|- style="white-space:nowrap;"
! Owner
! Changesets<br>(total)
! Changesets<br>(mediawiki/*)
! Changesets<br>(mediawiki/core)
%s
|- class="sortbottom"
! Total
! %s
! %s
! %s
|}
%s
'''
conn = sqlite3.connect(database_name)
cursor = conn.cursor()
cursor.execute('''
SELECT
gc_owner,
COUNT(*) as open_total,
SUM( gc_project LIKE 'mediawiki/%' ) as open_mediawiki,
SUM( gc_project == 'mediawiki/core' ) as open_core
FROM changesets
WHERE gc_status = 'NEW'
AND gc_owner NOT IN (
SELECT gc_owner
FROM changesets
WHERE gc_status = 'MERGED'
GROUP BY gc_owner
HAVING COUNT( gc_owner ) >= 5
)
GROUP BY gc_owner;
''')
output = []
open_total = 0
open_mediawiki = 0
open_core = 0
for row in cursor.fetchall():
table_row = u"""
|-
| %s
| [https://gerrit.wikimedia.org/r/#/q/{{urlencode:owner:"%s" status:open}},n,z %s]
| [https://gerrit.wikimedia.org/r/#/q/{{urlencode:owner:"%s" project:^mediawiki/.+ status:open}},n,z %s]
| [https://gerrit.wikimedia.org/r/#/q/{{urlencode:owner:"%s" project:mediawiki/core status:open}},n,z %s]
""".strip() % (row[0],
row[0], row[1],
row[0], row[2],
row[0], row[3])
output.append(table_row)
open_total += int(row[1])
open_mediawiki += int(row[2])
open_core += int(row[3])
wiki = wikitools.Wiki(config.get('gerrit-reports', 'wiki_api_url'))
wiki.login(config.get('gerrit-reports', 'wiki_username'),
config.get('gerrit-reports', 'wiki_password'))
report = wikitools.Page(wiki, report_title)
report_text = report_template % (config.get('gerrit-reports',
'wiki_header_template'),
'\n'.join(output),
open_total, open_mediawiki, open_core,
config.get('gerrit-reports',
'wiki_footer_template'))
report_text = report_text.encode('utf-8')
report.edit(report_text,
summary=config.get('gerrit-reports', 'wiki_edit_summary'),
bot=1)
cursor.close()
conn.close()
| [
"federicoleva@tiscali.it"
] | federicoleva@tiscali.it |
1d27bbb460fe161649bc2030c097b1b42ea69426 | c00572d792ce674fbdf72f54963bad1e300524e8 | /python/code/diff/diff.py | ac6a6a0572ca436b1697e66fc47c06e0daec1c2e | [] | no_license | lovebugss/notes | 4165a18b7e9a69c418859aa6c40b36911670c6ef | ff5be4cb35a500b1c5b8ee96fb3a033a3e334f12 | refs/heads/master | 2021-07-11T16:25:17.214007 | 2020-07-02T00:15:30 | 2020-07-02T00:15:30 | 160,685,682 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 668 | py | """文本对比工具
目前支持 并集, 差集, 交集
Usage:
diff.py <file1> <file2> [-t <type>] [-o <output>]
Options:
filename: 文件名
-t : 类型 ins uni dif
"""
from docopt import docopt
from difflib import Differ,HtmlDiff
import sys, difflib
def read(path):
with open(path, 'r') as f:
return f.readlines()
def main(file1, file2):
d1 = read(file1)
d2 = read(file2)
differ = Differ()
d = differ.compare(d1, d2)
hd = HtmlDiff().make_file()
sys.stdout.writelines(d)
if __name__ == '__main__':
arguments = docopt(__doc__, version='0.0.1')
main(arguments['<file1>'], arguments['<file2>'])
| [
"renjianpeng@goclouds.cn"
] | renjianpeng@goclouds.cn |
bc687d5bb4cf86f031a3ecd8470bf3c53f0497b8 | 4fd3f6c6ce06199d554101f796c0f6fc7eca074f | /0x04-python-more_data_structures/4-only_diff_elements.py | 383927b3db341ed3619e6a785f0868335cd45a56 | [] | no_license | Joldiazch/holbertonschool-higher_level_programming | 64f453aaf492b5473319a1b5e7e338bc7964fa7b | c9127882ffed3b72b2a517824770adafa63a9042 | refs/heads/master | 2020-09-29T03:12:47.497695 | 2020-05-15T04:05:13 | 2020-05-15T04:05:13 | 226,935,286 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 98 | py | #!/usr/bin/python3
def only_diff_elements(set_1, set_2):
return set_1 - set_2 | set_2 - set_1
| [
"jluis.diaz@udea.edu.co"
] | jluis.diaz@udea.edu.co |
9e59a3cd7de02ea03e9b64c87de384f8e393ea5a | 293b12bf3dc8902c904b1ce7740724c4c5f0e7fd | /librapp/librapp/settings_test.py | 20e60ef31b336945b9671b922ac9a39c98d7d1c3 | [] | no_license | rabinutam/librapp_api | 0fb12d8f2e04fa499bf604280daec30ec8496f88 | 3c15bd19ffab5e4c9673f78b310122f48932c2df | refs/heads/master | 2023-01-04T03:22:03.281319 | 2016-07-11T03:48:02 | 2016-07-11T03:48:02 | 62,640,316 | 0 | 0 | null | 2022-12-26T20:14:55 | 2016-07-05T13:46:57 | Python | UTF-8 | Python | false | false | 4,410 | py | """
Django settings for librapp project.
Generated by 'django-admin startproject' using Django 1.9.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '&^dg!j10c5ef$rw=9k_=cfwjmih7t0(6l^pgr@+x5#7ix@q0w*'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# added
'rest_framework',
'corsheaders',
'librapp', #module containing models.py
'docs',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'librapp.middleware.disable_csrf.DisableCSRF',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'librapp.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'librapp.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'librdb',
'USER': 'librapp',
'PASSWORD': 'librm7dev',
'HOST': 'localhost',
#'HOST': '127.0.0.1',
'PORT': '3306',
'OPTIONS': { #not sure about this
#'init_command': 'SET default_storage_engine=INNODB',
},
}
}
REST_FRAMEWORK = {
'TEST_REQUEST_DEFAULT_FORMAT': 'json',
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# customizing authentication
# https://docs.djangoproject.com/en/1.9/topics/auth/customizing/#authentication-backends
# The order of AUTHENTICATION_BACKENDS matters, so if the same username and password is valid in multiple backends,
# Django will stop processing at the first positive match.
AUTHENTICATION_BACKENDS = [
'librapp.lib.token_auth_backend.TokenAuthBackend',
'django.contrib.auth.backends.ModelBackend'
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = '/home/ubuntu/static'
# https://developer.mozilla.org/en-US/docs/Web/HTTP/Access_control_CORS
# http://www.django-rest-framework.org/topics/ajax-csrf-cors/
# https://github.com/ottoyiu/django-cors-headers/
CORS_ORIGIN_ALLOW_ALL = True # temp fix, will revisit
DOCS_ROOT = os.path.join(BASE_DIR, 'librapp/docs/build/html')
DOCS_ACCESS = 'public'
| [
"gautam.prabin@gmail.com"
] | gautam.prabin@gmail.com |
fcdbcd844b3f96af28ce889e505fa5cfa7b6bae6 | 4b640f91e6cbfaa8a85b31e513db2acfb951f89b | /dynamic page without database/travello/App/models.py | 5be04be88f6fdf153a31fa25fa98083a872726b0 | [
"CC-BY-4.0",
"CC-BY-3.0"
] | permissive | rawanvib/Django | 7558084d500045b954480e503a1b2ff102ad9bee | 0471190897874efb475d1d2ee6680cf23590811a | refs/heads/master | 2023-04-20T16:00:20.798926 | 2021-05-13T05:03:09 | 2021-05-13T05:03:09 | 356,786,056 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 160 | py | from django.db import models
# Create your models here.
class Destination:
id: int
name:str
img : str
descrip:str
price:int
offer:bool | [
"vibharawan12@gmail.com"
] | vibharawan12@gmail.com |
17e52a019c6e9ba4667dc4e73fa5a384e804e5d3 | e24b25b92ee5a6041a807bddf35316858544d0b8 | /hw01/templates/ultra.py | b3dfe299056db3fbed74faf5fee48525d1fcafdf | [] | no_license | isa43461/ADA | 970cae4a66b28f744a3615bb7f430c910375cd11 | f66dec6691bdf152a3320443da20a64046b45095 | refs/heads/master | 2022-11-12T05:08:52.137380 | 2020-06-23T23:12:43 | 2020-06-23T23:12:43 | 253,364,280 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 269 | py | from sys import stdin
def solve(num, n):
ans = None
# ...
return ans
def main():
n = int(stdin.readline().strip())
while n!=0:
num = [ int(stdin.readline()) for _ in range(n) ]
print(solve(num, n))
n = int(stdin.readline().strip())
main()
| [
"noreply@github.com"
] | isa43461.noreply@github.com |
fc9d27bcb01c7fe4e3ef1115a053ef8ac3b732cd | 1925c535d439d2d47e27ace779f08be0b2a75750 | /microsoft/implement_rand10_with_rand7.py | 0f89680adba0923d2798aa8ebf8bb297ca0fc640 | [] | no_license | arthurDz/algorithm-studies | ee77d716041671c4b8bb757d8d96f3d10b6589f7 | 1e4d23dd0c40df34f58d71c7ca3e6491be732075 | refs/heads/master | 2023-04-27T12:17:06.209278 | 2021-04-30T20:16:18 | 2021-04-30T20:16:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 736 | py | # Given a function rand7 which generates a uniform random integer in the range 1 to 7, write a function rand10 which generates a uniform random integer in the range 1 to 10.
# Do NOT use system's Math.random().
# Example 1:
# Input: 1
# Output: [7]
# Example 2:
# Input: 2
# Output: [8,4]
# Example 3:
# Input: 3
# Output: [8,1,10]
# Note:
# rand7 is predefined.
# Each testcase has one argument: n, the number of times that rand10 is called.
# Follow up:
# What is the expected value for the number of calls to rand7() function?
# Could you minimize the number of calls to rand7()?
def rand10(self):
temp = rand7() + (rand7() - 1) * 7
while temp > 10:
temp = rand7() + (rand7() - 1) * 7
return temp | [
"yunfan.yang@minerva.kgi.edu"
] | yunfan.yang@minerva.kgi.edu |
82723bfb1a14e8ec87516d1c338ac304371d2d50 | 4f5d128cdc02d513067c4b55895ab93705ed6b3d | /plugin_manager/launch_window/views.py | 6da9fcdbbcdf6a26138f09314fd87e772140f717 | [
"MIT"
] | permissive | ahharu/plugin-manager | 3e2834ce0e616fe1e8197afbe96ef4a5a1fd6bb9 | 43d5e2c6e25ed8f50eedf7fd876fbc04f75d94bb | refs/heads/master | 2021-01-10T14:53:28.352401 | 2016-10-05T10:30:01 | 2016-10-05T10:30:01 | 47,773,260 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,792 | py | from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.views.generic.detail import DetailView
from django.core.urlresolvers import reverse_lazy, reverse
from django.contrib import messages
from django_tables2.views import SingleTableView
from plugin_manager.core.mixins.views import MultipleGroupRequiredMixin
from plugin_manager.launch_window import models, tables, forms
from django.conf import settings
class LaunchWindowList(MultipleGroupRequiredMixin, SingleTableView):
group_required = ['Admin', 'Deployer', ]
table_class = tables.LaunchWindowTable
model = models.LaunchWindow
table_pagination = {"per_page": getattr(settings, "NUM_RESULTS_PER_PAGE", None)}
class LaunchWindowDetail(MultipleGroupRequiredMixin, DetailView):
group_required = ['Admin', 'Deployer', ]
model = models.LaunchWindow
class LaunchWindowCreate(MultipleGroupRequiredMixin, CreateView):
"""View for creating a launch window."""
group_required = ['Admin', 'Deployer', ]
model = models.LaunchWindow
form_class = forms.LaunchWindowCreateForm
template_name_suffix = '_create'
def form_valid(self, form):
"""First call the parent's form valid then let the user know it worked."""
form_valid_from_parent = super(LaunchWindowCreate, self).form_valid(form)
messages.success(self.request, 'Launch Window {} Successfully Created'.format(self.object))
return form_valid_from_parent
def get_success_url(self):
"""Send them back to the detail view for that launch window"""
return reverse('launch_window_launchwindow_detail', kwargs={'pk': self.object.pk})
class LaunchWindowUpdate(MultipleGroupRequiredMixin, UpdateView):
group_required = ['Admin', ]
model = models.LaunchWindow
form_class = forms.LaunchWindowUpdateForm
template_name_suffix = '_update'
def form_valid(self, form):
"""First call the parent's form valid then let the user know it worked."""
form_valid_from_parent = super(LaunchWindowUpdate, self).form_valid(form)
messages.success(self.request, 'Launch Window {} Successfully Updated'.format(self.object))
return form_valid_from_parent
def get_success_url(self):
""""""
return reverse('launch_window_launchwindow_detail', kwargs={'pk': self.object.pk})
class LaunchWindowDelete(MultipleGroupRequiredMixin, DeleteView):
group_required = 'Admin'
model = models.LaunchWindow
success_url = reverse_lazy('launch_window_launchwindow_list')
def delete(self, request, *args, **kwargs):
messages.success(self.request, 'Launch Window {} Successfully Deleted'.format(self.get_object()))
return super(LaunchWindowDelete, self).delete(self, request, *args, **kwargs)
| [
"alex.ruiz@scytl.com"
] | alex.ruiz@scytl.com |
a7c6d4cd74e48c1c702c3b7e8fe87d9224d65306 | 22125054e91da837a611ab7e441df7b957ccc550 | /part_1/readable/main.py | 3ec7b593fdd99fb76b5504505b40d1ebe3a5af3f | [] | no_license | ixxnafri/Enterprise-Networking | 9184b77763ab5d3e5426690d6e26cefcee1ac842 | b8c36db172d8983e0a946c86e4db4c73b87a1697 | refs/heads/master | 2020-04-19T21:25:33.377092 | 2019-01-31T01:18:03 | 2019-01-31T01:18:03 | 168,440,703 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 440 | py | #! /usr/bin/env python
import pandas as pd
import matplotlib.pyplot as plt
data = pd.read_csv("q1_out.txt",delimiter="|",usecols=[2,5],names=['action','prefix'],dtype={'action':'str','prefix':'str'})
counted = data.groupby(['prefix'])['action'].count().reset_index(name='count').sort_values(['count'],ascending=True)
print counted
#counted.unstack().plot()
#plt.legend(ncol=14,loc='upper center',bbox_to_anchor=(0.5,1.15))
#plt.show()
| [
"ixz@wirelessprv-10-193-58-164.near.illinois.edu"
] | ixz@wirelessprv-10-193-58-164.near.illinois.edu |
c5bc8947d0baa64518eee129fd2101da1f01cf22 | c4b813eca5045d636cd2bea0f41960904e5bc3b3 | /using_asyncio/chapter03/05future.py | f22f298ba89c44d76f8a59802f3744edf9ec22c5 | [
"MIT"
] | permissive | renyalvarado/Using-Asyncio-in-Python---Exercises | 21ae336cd016f57f0c7b4a146656dea7f9f26d68 | f558eb72fd30ed88aa8ccf88c2b601bd7c3995dc | refs/heads/main | 2023-04-18T13:06:43.030345 | 2021-04-30T20:56:40 | 2021-04-30T20:56:40 | 341,087,254 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 294 | py | import asyncio
async def main(f: asyncio.Future) -> None:
await asyncio.sleep(1)
f.set_result("I have finished.")
loop = asyncio.get_event_loop()
fut = asyncio.Future()
print(f"fut: {fut.done()}")
loop.create_task(main(fut))
loop.run_until_complete(fut)
print(f"fut: {fut.done()}")
| [
"renyalvarado@gmail.com"
] | renyalvarado@gmail.com |
e1689a0347c11a71ab603021ca919b53a381eec6 | 8ba51848f957ff090d18956157cccba7a94f0f27 | /simulateCircuitsScript.py | 813d65de57d4e3b372acd10b2d60fd7d2647f978 | [
"BSD-3-Clause"
] | permissive | achoora/qubit-efficient-entanglement-spectroscopy | 1dba6e48891bfe17d0f7b6922972767b330d9c57 | 9a3bbc178b84ea08b43e76081a3f38864a8bf498 | refs/heads/main | 2023-07-15T17:25:46.919943 | 2021-08-25T20:16:21 | 2021-08-25T20:16:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,581 | py | """
This file runs simulations of various entanglement spectroscopy circuits using
qiskit and saves and plots the results.
See https://arxiv.org/abs/2010.03080
Authors:
Justin Yirka yirka@utexas.edu
Yigit Subasi ysubasi@lanl.gov
"""
"""
This file is an example of how to use the module entSpectroscopy.py.
Most of the experiments for https://arxiv.org/abs/2010.03080 were run using scripts
like this.
You can run it from the command line like:
python simulateCircuitsScript.py 2 20 1 1000 0.68 -f './folder'
or
python3 simulateCircuitsScript.py 2 6 1 20000 0.68 0.95 -f './folder' $'Graph Title' 1 5 3 2 800 800 10**(-7) 0.01 0.0005 0.0025 0 &
Change the parameters after `simulateCircuitsScript.py` based on the description below.
This script needs `entSpectroscopy.py` and `idle_scheduler.py` available to import.
`idle_scheduler` is available at https://github.com/gadial/qiskit-aer/blob/ff56889c3cf0486b1ad094634e88d7e756b6db3c/qiskit/providers/aer/noise/utils/idle_scheduler.py
You can adjust the number of qubits and the paramters for the noise model from the command line, like above.
Other changes will require modifying this script. Again, this is just one example of using entSpectroscopy.
If you want to change which circuits are simulated, then you'll have to edit the tuple `all_circuits`
in this file.
If you want a noise model different than the models provided in entSpectroscopy, then you'll have
to write you own.
If you want to simulate the circuits on different quantum states than the default state provided in
entSpectroscopy, then you'll have to write you own function.
"""
"""
Command line arguments:
1: Min n
2: Max n (exclusive, i.e. we compute up to maxN - 1)
3: k (size of rho_A) (entSpectroscopy is really only defined for k=1 right now)
4: Number of shots
5: Confidence level for error bars (as a decimal)
6: Noise choice: -f -t -r -g or -n
for full noise model, thermal noise only, readout noise only, gate and readout noise only, or no noise.
The most general is -f.
-t and -r are for convenience. You could achieve -r with -f if you set many
parameters to 0, but this makes it easier.
7: Output directory (what folder should the output files be placed in?) (don't include a slash at the end)
Optional:
8: Plot subtitle
Optional, but if give one, must give all:
If none of these are given, then we use the default error parameters listed in `entSpectroscopy.py`.
Note that while we've made this to be flexible, we have made assumptions; for example, this script
only takes 1 parameter for readout error, assuming that 1 should flip to 0 with the same probability
that 0 flips to 1. If you want more customization... write your own script.
8: Single qubit gate time
10: CX time
11: Measure time
12: Reset time
13: T1
14: T2
15: Thermal Population
16: Readout error probability
17: Single qubit gate error
18: CX gate error
19: Reset gate error
"""
######################
###### Imports #######
######################
import sys, os
from time import perf_counter
import numpy as np
from scipy.stats import linregress
from scipy.stats import t
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from entSpectroscopy import *
from qiskit import execute
from qiskit.providers.aer import QasmSimulator
from qiskit.providers.aer.noise import NoiseModel
from qiskit.providers.aer.noise.errors import thermal_relaxation_error, ReadoutError
###########################################
###### Command Line Args/Parameters #######
###########################################
args = sys.argv
N_LIST = range(int(args[1]), int(args[2]))
K = int(args[3])
SHOTS = int(args[4])
CONFIDENCE_LEVEL = float(args[5])
NOISE_MODEL_CHOICE = args[6]
OUTPUT_DIRECTORY = args[7]
if len(args) >= 9:
PLOT_SUBTITLE_STRING = "\n" + args[8]
else:
PLOT_SUBTITLE_STRING = ""
if len(args) >= 10:
op_times = {
"u1" : int(args[9]),
"u2" : int(args[9]),
"u3" : int(args[9]),
"cx" : int(args[10]),
"measure" : int(args[11]),
"reset" : int(args[12]),
"id" : 1
}
noiseArgs = {
"op_times" : op_times,
"t1" : float(args[13]),
"t2" : float(args[14]),
"thermal_population_1" : float(args[15]),
"measurement_error_prob0_given_1" : float(args[16]),
"measurement_error_prob1_given_0" : float(args[16]),
"depolarization_prob_single_qubit" : float(args[17]),
"pauli_error_prob_single_qubit" : float(args[17]),
"depolarization_prob_two_qubit" : float(args[18]),
"pauli_error_prob_two_qubit" : float(args[18]),
"depolarization_prob_reset" : float(args[19]),
"pauli_error_prob_reset" : float(args[19])
}
else:
op_times = DEFAULT_OP_TIMES
noiseArgs = {}
if NOISE_MODEL_CHOICE == "-f":
noise_model_generator = construct_noise_model_full
elif NOISE_MODEL_CHOICE == "-t":
noise_model_generator = construct_noise_model_thermalOnly
elif NOISE_MODEL_CHOICE == "-r":
noise_model_generator = construct_noise_model_readoutOnly
elif NOISE_MODEL_CHOICE == "-g":
noise_model_generator = construct_noise_model_noThermal
elif NOISE_MODEL_CHOICE == "-n":
noise_model_generator = None
else:
raise Exception("Did not recognize the noise flag you passed. Only -f,-t,-r,-g or -n are accepted.")
#####################################
###### Which circuits to run? #######
#####################################
# Order of tuples: Generating function, Plot label, Name to write in results file, Results, Plot color, Type of circuit it is, Slopes, Slope StdErr
# All of the generating functions needs to accept parameters (n, k, prep_state)
hTestOrig = (hTest_original_circuit, "H-Test Original", "H-Test Original", [], 'c', 'h', [], [])
hTestEff4k = (hTest_qubitEfficient4k_circuit, "H-Test Q.Eff. 4k", "H-Test Qubit Eff 4k+1", [], 'm', 'h', [], [])
hTestEff3k = (hTest_qubitEfficient3k_circuit, "H-Test Q.Eff. 3k", "H-Test Qubit Eff 3k+1", [], 'k', 'h', [], [])
hTestEff_alt4k = (hTest_qubitEfficient_alternative4k_circ, "H-Test 4k Alt", "H-Test Qubit Eff Alternative 4k", [], 'r', 'h', [], [])
hTestEff_alt3k = (hTest_qubitEfficient_alternative3k_circ, "H-Test 3k Alt", "H-Test Qubit Eff Alternative 3k", [], 'y', 'h', [], [])
twoCopyOrig = (twoCopyTest_original_circuit, "Two-Copy Orig", "Two-copy test Original", [], 'g', 't', [], [])
twoCopyEff6k = (twoCopyTest_qubitEfficient6k_circuit, "Two-Copy Q.Eff. 6k", "Two-copy test Qubit Eff 6k", [], 'r', 't', [], [])
twoCopyEff4k = (twoCopyTest_qubitEfficient4k_circuit, "Two-Copy Q.Eff. 4k", "Two-copy test Qubit Eff 4k", [], 'y', 't', [], [])
# Edit this list to specify which circuits to run or not to run:
all_circuits = (hTestOrig, hTestEff4k, hTestEff3k, hTestEff_alt4k, hTestEff_alt3k, twoCopyOrig, twoCopyEff6k, twoCopyEff4k)
######################################
###### Thetas and Exact Values #######
######################################
# Change these functions if you want to change the inputs and ideal outputs
NUM_THETAS = 20
def getThetaList(n):
"""
Returns list of 20 thetas which give evenly spaced values of Tr(rho_A^n)
Hardcoded values for convenience.
Assumes we're using the default state prep function from spectroscopy, with k=1
These values were generated using this Mathematica code:
n = 7;
numIntervals = 19;
numDigitsPrecision = 10;
outs = Subdivide[2^(-(n-1)), 1, numIntervals];
trace[t] = Sum[ Binomial[n,i]*Sin[t]^i, {i,0,n, 2}] / 2^(n-1);
result = N[Map[Reduce[{trace[t] == #, 0 <= t <= Pi/2}, t, Reals]&, outs], numDigitsPrecision];
StringReplace[ToString[result], {"t == "->"","{"->"[","}"->"]"}]
"""
if n == 2:
theta_list = [0, 0.2314773640, 0.3304226479, 0.4086378551, 0.4766796116, 0.5386634661, 0.5967431472, 0.6522511548, 0.7061190231, 0.7590702093, 0.8117261175, 0.8646773037, 0.9185451720, 0.9740531796, 1.032132861, 1.094116715, 1.162158472, 1.240373679, 1.339318963, 1.570796327]
elif n == 3:
theta_list = [0, 0.2314773640, 0.3304226479, 0.4086378551, 0.4766796116, 0.5386634661, 0.5967431472, 0.6522511548, 0.7061190231, 0.7590702093, 0.8117261175, 0.8646773037, 0.9185451720, 0.9740531796, 1.032132861, 1.094116715, 1.162158472, 1.240373679, 1.339318963, 1.570796327]
elif n == 4:
theta_list = [0, 0.2491203095, 0.3543433131, 0.4366865759, 0.5076378996, 0.5716852714, 0.6311768187, 0.6875602412, 0.7418396403, 0.7947846259, 0.8470441546, 0.8992213155, 0.9519358743, 1.005893804, 1.061988158, 1.121480238, 1.186392369, 1.260572559, 1.353876403, 1.570796327]
elif n == 5:
theta_list = [0, 0.2794021424, 0.3935921523, 0.4808709997, 0.5546242519, 0.6201165657, 0.6801030414, 0.7362716601, 0.7897777908, 0.8414889475, 0.8921167248, 0.9423010413, 0.9926770691, 1.043945273, 1.096968442, 1.152941697, 1.213757347, 1.282990475, 1.369767528, 1.570796327]
elif n == 6:
theta_list = [0, 0.3199493272, 0.4426490985, 0.5332414006, 0.6079996178, 0.6732468307, 0.7322299041, 0.7868946105, 0.8385407416, 0.8881187231, 0.9363862185, 0.9840045982, 1.031611489, 1.079892021, 1.129672738, 1.182081773, 1.238888833, 1.303420250, 1.384147665, 1.570796327]
elif n == 7:
theta_list = [0, 0.3678313758, 0.4959520472, 0.5872307295, 0.6610521651, 0.7246517493, 0.7816281774, 0.8340827170, 0.8833879597, 0.9305272669, 0.9762694175, 1.021272932, 1.066161499, 1.111594881, 1.158359191, 1.207517971, 1.260730410, 1.321105684, 1.396551522, 1.570796327]
elif n == 8:
theta_list = [0, 0.4190980610, 0.5487218956, 0.6385481712, 0.7102223398, 0.7714770612, 0.8260582020, 0.8761131478, 0.9230247583, 0.9677718001, 1.011111010, 1.053683798, 1.096091665, 1.138965366, 1.183051185, 1.229353728, 1.279435279, 1.336218322, 1.407129918, 1.570796327]
elif n == 9:
theta_list = [0, 0.4699483644, 0.5980580911, 0.6852691420, 0.7543035643, 0.8130292719, 0.8651960287, 0.9129307474, 0.9575922479, 1.000135893, 1.041295848, 1.081690781, 1.121897674, 1.162518577, 1.204262943, 1.248083017, 1.295456906, 1.349146792, 1.416169103, 1.570796327]
elif n == 10:
theta_list = [0, 0.5178871080, 0.6428229977, 0.7269938361, 0.7933187827, 0.8495895696, 0.8994862332, 0.9450840890, 0.9877032578, 1.028268498, 1.067488042, 1.105956733, 1.144227554, 1.182875819, 1.222577841, 1.264239767, 1.309266550, 1.360282005, 1.423949179, 1.570796327]
elif n == 11:
theta_list = [0, 0.5618311107, 0.6829308316, 0.7640349883, 0.8277746957, 0.8817666042, 0.9295904649, 0.9732586582, 1.014048186, 1.052851733, 1.090351606, 1.127119732, 1.163686736, 1.200603658, 1.238517270, 1.278293144, 1.321272326, 1.369958243, 1.430707030, 1.570796327]
elif n == 12:
theta_list = [0, 0.6016113123, 0.7187601932, 0.7969478414, 0.8582967603, 0.9102117319, 0.9561635201, 0.9980997455, 1.037254578, 1.074489673, 1.110462801, 1.145724613, 1.180785326, 1.216174132, 1.252511494, 1.290627125, 1.331805837, 1.378445385, 1.436632811, 1.570796327]
elif n == 13:
theta_list = [0, 0.6374947672, 0.7508314977, 0.8263148487, 0.8854801079, 0.9355135269, 0.9797781857, 1.020159260, 1.057850304, 1.093683938, 1.128295237, 1.162215377, 1.195936075, 1.229966875, 1.264904742, 1.301547495, 1.341130084, 1.385956703, 1.441876305, 1.570796327]
elif n == 14:
theta_list = [0, 0.6699013348, 0.7796628239, 0.8526634711, 0.9098410717, 0.9581700326, 1.000911168, 1.039891076, 1.076265547, 1.110840399, 1.144229891, 1.176947451, 1.209468031, 1.242283462, 1.275969611, 1.311295749, 1.349452268, 1.392659851, 1.446555022, 1.570796327]
elif n == 15:
theta_list = [0, 0.6992688969, 0.8057167271, 0.8764438904, 0.9318105945, 0.9785912864, 1.019951286, 1.057662855, 1.092846933, 1.126284704, 1.158571373, 1.190204182, 1.221642854, 1.253363183, 1.285921989, 1.320062813, 1.356935956, 1.398686995, 1.450761486, 1.570796327]
elif n == 16:
theta_list = [0, 0.7259992723, 0.8293876787, 0.8980308021, 0.9517428409, 0.9971115976, 1.037213790, 1.073771462, 1.107873437, 1.140278259, 1.171563669, 1.202212111, 1.232669418, 1.263396793, 1.294933762, 1.328000541, 1.363711112, 1.404143059, 1.454569088, 1.570796327]
elif n == 17:
theta_list = [0, 0.7504420305, 0.8510056543, 0.9177332773, 0.9699277528, 1.014003334, 1.052954644, 1.088457310, 1.121570524, 1.153031985, 1.183403375, 1.213153560, 1.242715652, 1.272537518, 1.303142892, 1.335230726, 1.369881908, 1.409112095, 1.458036592, 1.570796327]
elif n == 18:
theta_list = [0, 0.7728943275, 0.8708449345, 0.9358060701, 0.9866032111, 1.029489282, 1.067382764, 1.101916298, 1.134121676, 1.164717338, 1.194250156, 1.223176488, 1.251917747, 1.280909547, 1.310661141, 1.341851985, 1.375532656, 1.413662103, 1.461211518, 1.570796327]
elif n == 19:
theta_list = [0, 0.7936065797, 0.8891335947, 0.9524599192, 1.001965421, 1.043752841, 1.080669888, 1.114309291, 1.145677424, 1.175474885, 1.204234833, 1.232402061, 1.260387176, 1.288614493, 1.317579915, 1.347944949, 1.380732269, 1.417848650, 1.464132692, 1.570796327]
elif n == 20:
theta_list = [0, 0.8127895528, 0.9060620298, 0.9678701650, 1.016177317, 1.056946117, 1.092958313, 1.125769477, 1.156362330, 1.185420882, 1.213465554, 1.240930408, 1.268216032, 1.295736274, 1.323974685, 1.353576174, 1.385537623, 1.421717585, 1.466832141, 1.570796327]
else:
raise Exception("Thetas have only been prepared for n = 2 to 20.")
return theta_list
def getExactTraces(n):
"""
Returns list of NUM_THETAS evenly spaced values from 0 to 2^(-(n-1)).
So this assumes you pick thetas such that the exact values are this evenly spaced list.
See `computeExactTraces_forDefaultStatePrep` in spectroscopy to calculate the traces for
arbitrary thetas.
"""
return np.linspace(2 ** (-(n-1)), 1, NUM_THETAS)
###########################
###### *** MAIN *** #######
###########################
backend = QasmSimulator()
os.makedirs(os.path.dirname(OUTPUT_DIRECTORY + "/results.txt"), exist_ok=True)
resultsFile = open(OUTPUT_DIRECTORY + "/results.txt", "w", buffering=1)
logFile = open(OUTPUT_DIRECTORY + "/results_log.txt", "w", buffering=1)
resultsFile.write("Arguments: \n" + str(args) + "\n\n")
for r in all_circuits:
resultsFile.write(r[1] + "\n")
resultsFile.write("\n")
logFile.write("STARTING \n")
nStartTime = perf_counter()
lastTime = nStartTime
newTime = nStartTime
for n in N_LIST:
for r in all_circuits:
r[3].clear()
theta_list = getThetaList(n)
exact_values = getExactTraces(n)
resultsFile.write("n = " + str(n) + "\n")
resultsFile.write("Thetas: " + str(theta_list) + "\n")
resultsFile.write("Exact Values: " + str(exact_values) + "\n")
for thetaCounter, theta in enumerate(theta_list):
prep_state = generate_default_prep_state_instruction(theta, K)
for circTuple in all_circuits:
circuit = circTuple[0](n, K, prep_state)
if NOISE_MODEL_CHOICE != "-n": # Noisy
circuitThermalReady = construct_modified_circuit_for_thermal_noise(circuit, op_times)
noise = noise_model_generator(len(circuit.qubits), **noiseArgs)
counts = execute(circuitThermalReady, backend=backend, shots=SHOTS, basis_gates=noise.basis_gates, noise_model=noise).result().get_counts()
else: # Noiseless
counts = execute(circuit, backend=backend, shots=SHOTS).result().get_counts()
if circTuple[5] == "h":
answer = hTest_computeAnswer(counts)
elif circTuple[5] == "t":
answer = twoCopyTest_computeAnswer(n, K, counts)
circTuple[3].append(answer)
logFile.write(circTuple[2] + ". N = " + str(n) + ". Theta number " + str(thetaCounter) + "\n")
newTime = perf_counter()
logFile.write("Time to complete this simulation: " + str(newTime - lastTime) + "\n")
lastTime = newTime
for r in all_circuits:
resultsFile.write(str(r[3]) + "\n")
resultsFile.write("\n")
newTime = perf_counter()
logFile.write("Total time taken for N=" + str(n) + " was " + str(newTime - nStartTime) + "\n")
logFile.write("\n")
nStartTime = newTime
# Normal Plot
plt.clf()
plt.axis([-.02, 1.6, 0, 1.02])
plt.plot(theta_list, exact_values, 'b')
for r in all_circuits:
if r[5] == 'h':
plt.errorbar(theta_list, r[3], yerr = hTest_computeErrorBars(SHOTS, r[3], CONFIDENCE_LEVEL), color = r[4], linestyle = '--')
elif r[5] == 't':
plt.errorbar(theta_list, r[3], yerr = twoCopyTest_computeErrorBars(SHOTS, r[3], CONFIDENCE_LEVEL), color = r[4], linestyle = '-')
else:
print("ERROR! Unknown algorithm type in r[4]. Don't know how to plot.")
plt.legend(["exact"] + [r[1] for r in all_circuits])
plt.title("N = " + str(n) + " " + PLOT_SUBTITLE_STRING)
plt.tight_layout()
plt.savefig(OUTPUT_DIRECTORY + "/plot_n" +str(n)+ ".png", dpi=300)
# Linear Plot
plt.clf()
plt.axis([-.02, 1.02, 0, 1.02])
plt.plot(exact_values, exact_values, 'b')
for r in all_circuits:
if r[5] == 'h':
plt.errorbar(exact_values, r[3], yerr = hTest_computeErrorBars(SHOTS, r[3], CONFIDENCE_LEVEL), color = r[4], linestyle = '--')
elif r[5] == 't':
plt.errorbar(exact_values, r[3], yerr = twoCopyTest_computeErrorBars(SHOTS, r[3], CONFIDENCE_LEVEL), color = r[4], linestyle = '-')
else:
print("ERROR! Unknown algorithm type in r[4]. Don't know how to plot.")
plt.legend(["exact"] + [r[1] for r in all_circuits])
plt.title("N = " + str(n) + " " + PLOT_SUBTITLE_STRING)
plt.tight_layout()
plt.savefig(OUTPUT_DIRECTORY + "/linearPlot_n" +str(n)+ ".png", dpi=300)
# Calculate slopes
resultsFile.write("Slopes, Std Err, R squared: \n")
for r in all_circuits:
regression = linregress(exact_values, r[3])
resultsFile.write(str(regression[0]) + " , " + str(regression[4]) + " , " + str(regression[2]) + "\n")
r[6].append(regression[0]) # slope
r[7].append(regression[4]) # stderr
resultsFile.write("\n")
resultsFile.close()
logFile.close()
# Plot Slopes
def calculateSlopeError(numPoints, stderr, confidence_level):
"""
Calculates the error in the slope according to a given confidence level.
data : number of points the linregress was based on
stderr : generally, the error output by the linregress function
confidence_level : a decimal, such as 0.95
Calculations are based on these instructions:
https://stattrek.com/regression/slope-confidence-interval.aspx
"""
score = t.ppf(1 - ((1 - confidence_level) / 2), numPoints - 2)
margin_of_error = score * stderr
return margin_of_error
plt.clf()
plt.axis([N_LIST[0], N_LIST[-1] + 1, -0.1, 1.1])
plt.plot(N_LIST, [1]*len(N_LIST), 'b')
for circTuple in all_circuits:
slopes = circTuple[6]
stdErrors = circTuple[7]
errors = [calculateSlopeError(NUM_THETAS, stderr, CONFIDENCE_LEVEL) for stderr in stdErrors]
if circTuple[5] == "h":
linestyle = '--'
elif circTuple[5] == "t":
linestyle = '-'
plt.errorbar(N_LIST, slopes, yerr = errors, color = circTuple[4], linestyle = linestyle)
plt.legend(["exact"] + [r[1] for r in all_circuits])
plt.title("Slopes. " + PLOT_SUBTITLE_STRING)
plt.tight_layout()
plt.savefig(OUTPUT_DIRECTORY + "/slopePlot.png", dpi=300)
| [
"noreply@github.com"
] | achoora.noreply@github.com |
fc27042eaae21fea6ee015e954980fd672a2c584 | ff6248be9573caec94bea0fa2b1e4b6bf0aa682b | /StudentProblem/10.21.11.40/8/1569575464.py | 03f7325643668c7c922036efc5b29701c3522051 | [] | no_license | LennartElbe/codeEvo | 0e41b1a7705204e934ef71a5a28c047366c10f71 | e89b329bc9edd37d5d9986f07ca8a63d50686882 | refs/heads/master | 2020-12-21T17:28:25.150352 | 2020-03-26T10:22:35 | 2020-03-26T10:22:35 | 236,498,032 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,719 | py | import functools
import typing
import string
import random
import pytest
class Leaf0:
def __init__ (self, value):
self.value = value
class Node0:
def __init__ (self, left, right, value=None):
self.value = value
self.left = left
self.right = right
## Lösung Teil 1.
def Leaf(Leaf0):
def __init__(self, *args):
super().__init__(*args)
def preorder(self) -> list:
"""
Returns a list of the leaf in preorder without any None values.
"""
return self.value
def postorder(self) -> list:
"""
Returns a list of the leaf in postorder without any None values.
"""
return self.value
class Node(Node0):
def __init__(self, *args):
super().__init__(*args)
def preorder(self) -> list:
"""
Returns a list of the node in preorder without any None values.
"""
ls = []
if self.value:
ls.append(self.value)
if self.left:
ls += self.left.preorder()
if self.right:
ls += self.right.preorder()
return ls
def postorder(self) -> list:
"""
Returns a list of the node in postorder without any None values.
"""
ls = []
if self.left:
ls += self.left.preorder()
if self.right:
ls += self.right.preorder()
if self.value:
ls.append(self.value)
return ls
######################################################################
## Lösung Teil 2.
def test_tree():
assert Node (Leaf(1), Leaf(2), 3).postorder() == [1, 2, 3]
######################################################################
| [
"lenni.elbe@gmail.com"
] | lenni.elbe@gmail.com |
f0b978319c8d6d217612f9f4b4e7602292c72ea1 | 3a0a437b8b9ec11a6852bd75c6305f63463183db | /base/excepts.py | 68228285c23d8ec85c6566ca68feac8b2aa8c322 | [] | no_license | vanshin/mss_core | 2ca827a48d670c6ec52a96ae919716d9d624250a | 83e9c0792860654e4f863e96dcb3bff464c7c3b6 | refs/heads/master | 2021-06-25T12:09:23.594977 | 2019-04-19T04:54:37 | 2019-04-19T04:54:37 | 88,000,134 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 404 | py | class BaseError(Exception):
def __init__(self, code, msg):
self.code = code
self.msg = msg
class ParamError(BaseError):
def __init__(self, msg='参数错误'):
BaseError.__init__(self, '4001', msg)
self.msg = msg
class ServerError(BaseError):
def __init__(self, msg='服务错误'):
BaseError.__init__(self, '4002', msg)
self.msg = msg
| [
"kfx721@hotmail.com"
] | kfx721@hotmail.com |
47a9b1d27f0c09c17766ed65797f6e07b8e706b2 | 20fda4969798cfacd45904e3d2a39520ef4798e3 | /Talker.py | 92177df1399ad8ee8a55205622c4de6a0481b00d | [] | no_license | pilad0hwtts/Hakaton_AnaSantin | 987bda5370a240f29f8077c1e51258d8fef85332 | df57bcba5675856aa8c091e5f1fcc1ffe804a133 | refs/heads/master | 2020-04-08T01:32:06.254134 | 2018-11-24T07:33:03 | 2018-11-24T07:33:03 | 158,898,950 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 707 | py | import random
class Talker():
def __init__(self):
self.message = ""
self.helpNeeded = False
self.denied = False
self.messageList = ["Попробуй выпить воды", "Сделай несколько глубоких вдохов", "Выйди на улицу и подыши свежим воздухом"]
self.currentCountToNewAdvice = 0
def getNewAdvice(self):
if (self.currentCountToNewAdvice == 0):
self.helpNeeded = True
self.message = self.messageList[random.randint(0, len(self.messageList) - 1)]
self.currentCountToNewAdvice = (self.currentCountToNewAdvice + 1) % 10
return self.message
| [
"pilad0hwtts@yandex.ru"
] | pilad0hwtts@yandex.ru |
8c529afc2e397516fb9ea02350c43786e5f6cf38 | 4dd9c9d1ff9e93c1d61251e59969fea3b4b2b243 | /utils/test/test_BitWriter.py | 812788bec6fcab09cece34b8b30368bd2a6db3a6 | [] | no_license | wildmb/pdfproto | 674685bc5fccb24ef7ccc655c51d5347ded6740b | 45aec1280ded5d492781772f8bd09555f082a97c | refs/heads/master | 2021-01-01T17:16:02.328742 | 2013-05-14T04:02:53 | 2013-05-14T04:02:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,445 | py | #!/usr/bin/env python
# standard library imports
import random
# third party realted imports
import pytest
# local library imports
from pdfproto.utils.BitWriter import BitWriter, BitWriterError
class TestBitWriter:
def _rand_data(self, data_len):
return map(lambda _: random.randint(0, 255), xrange(data_len))
def test_write(self):
# test empty data
bw = BitWriter()
bw.flush()
assert str(bw) == ''
# write random data
data = self._rand_data(random.randint(0, 255))
bitmap = []
for d in data:
bits = '0' * random.randint(0, 10) + bin(d)[2:]
bitmap.append(bits)
bw.write(d, len(bits))
bw.flush()
# generate bitmap
bitmap = ''.join(bitmap)
if len(bitmap) % 8 != 0:
bitmap += '0' * (8 - (len(bitmap) % 8))
# generate expect answer
expect_chars = []
for i in xrange(0, len(bitmap), 8):
expect_chars.append(chr(int(bitmap[i:(i + 8)], 2)))
assert str(bw) == ''.join(expect_chars)
def test_len(self):
bw = BitWriter()
assert len(bw) == 0
data = self._rand_data(random.randint(0, 255))
bitmap = []
for d in data:
bits = '0' * random.randint(0, 10) + bin(d)[2:]
bitmap.append(bits)
bw.write(d, len(bits))
assert len(bw) == len(''.join(bitmap))
| [
"yu.liang@thekono.com"
] | yu.liang@thekono.com |
b38f73829c9287f18fdff9690765e16ee7d82575 | ad5b63cd791a75ea4104628497e6c61ba4c38dde | /Medium/largestDivisibleSubset.py | 22af70dda89fb3bd2c1f02c4a8b98d250319421d | [] | no_license | tranphibaochau/LeetCodeProgramming | 81f7d985ea5c3d4f36dcc4cfd7d6dc6bbc470751 | 22881c43c9acd206803b2ada0ea5b52d5816f94d | refs/heads/master | 2022-12-09T11:47:52.523064 | 2022-11-28T22:22:31 | 2022-11-28T22:22:31 | 228,126,296 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,377 | py | class Solution(object):
def largestDivisibleSubset(self, nums):
if len(nums) == 0:
return []
EDS={}
for i, n in enumerate(nums):
EDS[n] = [1, i]
nums.sort() #sort the list, takes O(n.log(n))
largest = 1
max_n = nums[0]
for i, n in enumerate(nums): #find EDS(nums(i))
maxsub = 0 #maximum subset size
max_item = nums[0] # biggest element in the divisible set so far
for j in range(i, -1, -1):
if (nums[i] % nums[j] == 0 and nums[i] != nums[j]):
if EDS[nums[j]][0] > maxsub:
maxsub = EDS[nums[j]][0]
max_item = nums[j]
EDS[nums[i]] = [(1+ maxsub), max_item] #current biggest divisible set must be 1 + the size of previous divisible subset
if EDS[nums[i]][0] > largest: #record the biggest divisible subset
largest = EDS[nums[i]][0]
max_n = nums[i]
#reconstruct the largest divisible subset from the biggest size and previous element
result = [max_n]
n = max_n
while EDS[n][0] != 1:
result.append(EDS[n][1])
n = EDS[n][1]
return result
| [
"tranphibaochau@gmail.com"
] | tranphibaochau@gmail.com |
56b31469f1e6aca7e3c73ce6778ea1b9eb222d08 | 2058cf7a85076745b3b0f0fe18165abe006d9d8c | /mesh.py | 3aad572ad1d0784bd09638faaeb9ba36b047dab1 | [] | no_license | skyfalldec99/xiaozhumao | 9f62970facea9e72f7bfba4fd94ab9c113d19c4f | f019431e503f48fcec299bf30822c20b87850a81 | refs/heads/master | 2020-12-13T23:26:03.263991 | 2020-01-17T14:39:53 | 2020-01-17T14:39:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,228 | py | import meshio
import os
import easygui as g
import numpy as np
def gettxt():
title = g.msgbox(msg="选择的存放节点和单元信息文件的文件夹为",title="hjx有限元程序",ok_button="OK")
file_path1=g.diropenbox(default="*")
print('选择的存放节点和单元信息文件的文件夹为:'+str(file_path1))
c=str(file_path1)+'\\eles7.txt'
d=str(file_path1)+'\\nodes7.txt'
title = g.msgbox(msg="请打开生成的.msh文件",title="hjx有限元程序",ok_button="OK")
file_path2=g.fileopenbox(default=".msh")
mesh = meshio.read(file_path2)
points = mesh.points
cells = mesh.cells
point_data = mesh.point_data
cell_data = mesh.cell_data
# Element data
eles = cells["triangle"]
els_array = np.zeros([eles.shape[0], 6], dtype=int)
els_array[:, 0] = range(eles.shape[0])
els_array[:, 1] = 3
els_array[:, 3::] = eles
# Nodes
nodes_array = np.zeros([points.shape[0], 5])
nodes_array[:, 0] = range(points.shape[0])
nodes_array[:, 1:3] = points[:, :2]
# Create files
np.savetxt(c, els_array, fmt="%d")
np.savetxt(d, nodes_array,fmt=("%d", "%.4f", "%.4f", "%d", "%d"))
return c,d
| [
"noreply@github.com"
] | skyfalldec99.noreply@github.com |
7164ffb11701800d47c326e3f9b8921d3ebf3fe3 | 6fbdfacc22a31c805687d937177a0f8d1c010acc | /towhee/engine/pipeline.py | 8adbd698f5dbbe5f7755af4f4855b63183d27103 | [
"Apache-2.0"
] | permissive | claireyuw/towhee | 79a94c346969f80371d02e6469941091c775ae11 | 9debfd92ff4a946f67b22fa8a5f99b5b1e1a2f8b | refs/heads/main | 2023-08-05T08:45:04.694405 | 2021-09-17T11:19:48 | 2021-09-17T11:19:48 | 407,728,256 | 1 | 0 | Apache-2.0 | 2021-09-18T01:58:14 | 2021-09-18T01:58:13 | null | UTF-8 | Python | false | false | 3,173 | py | # Copyright 2021 Zilliz. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
from typing import Callable
from towhee.dag.graph_repr import GraphRepr
from towhee.dataframe import DataFrameIterator
from towhee.engine.graph_context import GraphContext
class Pipeline(threading.Thread):
"""
The runtime pipeline context
"""
def __init__(self, graph_repr: GraphRepr, parallelism: int = 1) -> None:
"""
Args:
graph_repr: the graph representation
parallelism: how many rows of inputs to be processed concurrently
"""
self._graph_repr = graph_repr
self._parallelism = parallelism
self.on_task_finish_handlers = []
self._graph_ctx = None
@property
def graph_ctx(self):
return self._graph_ctx
def build(self):
"""
Create GraphContexts and set up input iterators.
"""
for g in self._graph_ctxs:
g.on_task_finish_handlers.append(self.on_task_finish_handlers)
raise NotImplementedError
def run(self, inputs: list) -> DataFrameIterator:
"""
The Pipeline's main loop
Agrs:
inputs: the input data, organized as a list of DataFrame, feeding
to the Pipeline.
"""
# while we still have pipeline inputs:
# input = inputs.next()
# for g in graph contexts:
# if g.is_idle:
# g.start_op.inputs = input
# break
# if all graphs contexts are busy:
# wait for notification from _notify_run_loop
raise NotImplementedError
def on_start(self, handler: Callable[[], None]) -> None:
"""
Set a custom handler that called before the execution of the graph.
"""
self._on_start_handler = handler
raise NotImplementedError
def on_finish(self, handler: Callable[[], None]) -> None:
"""
Set a custom handler that called after the execution of the graph.
"""
self._on_finish_handler = handler
raise NotImplementedError
def _organize_outputs(self, graph_ctx: GraphContext):
"""
on_finish handler passing to GraphContext. The handler will organize the
GraphContext's output into Pipeline's outputs.
"""
raise NotImplementedError
def _notify_run_loop(self, graph_ctx: GraphContext):
"""
on_finish handler passing to GraphContext. The handler will notify the run loop
that a GraphContext is in idle state.
"""
raise NotImplementedError
| [
"noreply@github.com"
] | claireyuw.noreply@github.com |
dfafd504fae4b05424e5b3f67f5709f7c631bad4 | 44f89fec54a8a8a87a77c3102eac84787b61703e | /djangoApp/asgi.py | bb0599fd36cb32172b5162d8720bc5a179e59428 | [] | no_license | kashyapthakkar/Wisdome-Pet-Medicine-Django | 89e51299b18535484934d79f744e60831f048564 | 6e295da4a402230498e564f11fb4886a430c9195 | refs/heads/master | 2022-04-10T00:42:21.417163 | 2020-03-27T01:12:44 | 2020-03-27T01:12:44 | 250,409,576 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 411 | py | """
ASGI config for djangoApp project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'djangoApp.settings')
application = get_asgi_application()
| [
"noreply@github.com"
] | kashyapthakkar.noreply@github.com |
cc311648169c4a5d06d4625c3ead1b2421359e8b | e89d68ded29b1ffabefeec2cc0e5c437fd1d3572 | /recipes/migrations/0003_recipe_videofile.py | 80a6693965dd0b68ec7eb10efa20256b421d5312 | [] | no_license | Jeanca7/choppingboard.ie | 808db82bd1e41e71fbebf6a22b97a6abb7807e13 | 0eebe35582e5b8d7b491865a412ebb98433ee15c | refs/heads/master | 2022-12-10T08:33:45.261560 | 2019-03-05T11:37:22 | 2019-03-05T11:37:22 | 159,159,817 | 0 | 1 | null | 2022-12-08T01:19:17 | 2018-11-26T11:37:39 | JavaScript | UTF-8 | Python | false | false | 422 | py | # Generated by Django 2.0.8 on 2018-11-29 10:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('recipes', '0002_remove_recipe_video'),
]
operations = [
migrations.AddField(
model_name='recipe',
name='videofile',
field=models.FileField(null=True, upload_to='videos/', verbose_name=''),
),
]
| [
"jsepulvedadominguez@gmail.com"
] | jsepulvedadominguez@gmail.com |
7e4e28d2c13d17fdd64f8dd33933b84f8a9c95db | cbcdf195338307b0c9756549a9bffebf3890a657 | /django-stubs/core/cache/backends/base.pyi | 52f2910b56950d0d0b50af70cb6a198f97a8879f | [
"MIT"
] | permissive | mattbasta/django-stubs | bc482edf5c6cdf33b85005c2638484049c52851b | 8978ad471f2cec0aa74256fe491e2e07887f1006 | refs/heads/master | 2020-04-27T08:38:22.694104 | 2019-03-06T09:05:08 | 2019-03-06T09:05:24 | 174,178,933 | 1 | 0 | MIT | 2019-03-06T16:18:01 | 2019-03-06T16:18:00 | null | UTF-8 | Python | false | false | 2,590 | pyi | from collections import OrderedDict
from typing import Any, Callable, Dict, List, Optional, Union
from django.core.exceptions import ImproperlyConfigured
class InvalidCacheBackendError(ImproperlyConfigured): ...
class CacheKeyWarning(RuntimeWarning): ...
DEFAULT_TIMEOUT: Any
MEMCACHE_MAX_KEY_LENGTH: int
def default_key_func(key: Union[int, str], key_prefix: str, version: Union[int, str]) -> str: ...
def get_key_func(key_func: Optional[Union[Callable, str]]) -> Callable: ...
class BaseCache:
default_timeout: int = ...
key_prefix: str = ...
version: int = ...
key_func: Callable = ...
def __init__(self, params: Dict[str, Optional[Union[Callable, Dict[str, int], int, str]]]) -> None: ...
def get_backend_timeout(self, timeout: Any = ...) -> Optional[float]: ...
def make_key(self, key: Union[int, str], version: Optional[Union[int, str]] = ...) -> str: ...
def add(self, key: Any, value: Any, timeout: Any = ..., version: Optional[Any] = ...) -> None: ...
def get(self, key: Any, default: Optional[Any] = ..., version: Optional[Any] = ...) -> Any: ...
def set(self, key: Any, value: Any, timeout: Any = ..., version: Optional[Any] = ...) -> None: ...
def touch(self, key: Any, timeout: Any = ..., version: Optional[Any] = ...) -> None: ...
def delete(self, key: Any, version: Optional[Any] = ...) -> None: ...
def get_many(self, keys: List[str], version: Optional[int] = ...) -> Dict[str, Union[int, str]]: ...
def get_or_set(
self, key: str, default: Optional[Union[Callable, int, str]], timeout: Any = ..., version: Optional[int] = ...
) -> Optional[Union[int, str]]: ...
def has_key(self, key: Any, version: Optional[Any] = ...): ...
def incr(self, key: str, delta: int = ..., version: Optional[int] = ...) -> int: ...
def decr(self, key: str, delta: int = ..., version: Optional[int] = ...) -> int: ...
def __contains__(self, key: str) -> bool: ...
def set_many(
self,
data: Union[Dict[str, bytes], Dict[str, int], Dict[str, str], OrderedDict],
timeout: Any = ...,
version: Optional[Union[int, str]] = ...,
) -> List[Any]: ...
def delete_many(self, keys: Union[Dict[str, str], List[str]], version: None = ...) -> None: ...
def clear(self) -> None: ...
def validate_key(self, key: str) -> None: ...
def incr_version(self, key: str, delta: int = ..., version: Optional[int] = ...) -> int: ...
def decr_version(self, key: str, delta: int = ..., version: Optional[int] = ...) -> int: ...
def close(self, **kwargs: Any) -> None: ...
| [
"maxim.kurnikov@gmail.com"
] | maxim.kurnikov@gmail.com |
0165d25c1c0c68a71343c15d575f22e270017e69 | e29734c2b3543a05a28b6bc460c3248ea37aaf5c | /apps/course/migrations/0015_auto_20190424_1717.py | 36961cbabe8320fc898752c336f25bbec6d02e5d | [] | no_license | simida0755/PopularBlogs | fda6dbe06751dde013ba57f73c708fd7106a49ee | 3a86989232206d0727223306c0e2d2c62d35fa9b | refs/heads/master | 2020-05-21T15:54:09.853341 | 2019-05-13T02:15:28 | 2019-05-13T02:15:28 | 186,101,555 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 434 | py | # Generated by Django 2.0.2 on 2019-04-24 17:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('course', '0014_auto_20190424_1716'),
]
operations = [
migrations.AlterField(
model_name='course',
name='image',
field=models.ImageField(null=True, upload_to='courses/%Y/%m', verbose_name='封面图'),
),
]
| [
"simida027@163.com"
] | simida027@163.com |
77f2701c9a194550da4676ee6d1b88a87d897bc8 | 3a53855cba33cd0d4ecb53bfe7a0b5e7d393e045 | /org_tree/urls.py | f2916a8fd4a800a262dc872d1bbd41c8baef65b5 | [
"MIT"
] | permissive | drogina/orgtree | ff6459329a2a52312c8174b692fb7c2439892fc0 | 4f159bdb3d7f88f1085cba27266c9f5fb94e3a5b | refs/heads/master | 2021-05-09T04:49:23.967560 | 2018-02-07T15:41:30 | 2018-02-07T15:41:30 | 119,288,542 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 243 | py | from django.conf.urls import url, include
from rest_framework import routers
from api import views
router = routers.DefaultRouter()
router.register(r'employees', views.EmployeeViewSet)
urlpatterns = [
url(r'^', include(router.urls)),
]
| [
"osxserver@test.com"
] | osxserver@test.com |
82bbbe9b40505d1dc9a8185d84f30a264647c3a4 | 6c90a52d5be46fe4bd920acef07b2e53d2e4b42c | /runner.py | 52b14e24661e02f9b9948a2cb41441de6ec05b45 | [] | no_license | amoretti86/supervind | fb3f335f0400011af937fc0e5d29e98688ed885c | 6444b88acf0c51e32b54206619cb6bcb438bdd26 | refs/heads/master | 2021-04-26T23:25:05.347404 | 2018-03-05T04:58:46 | 2018-03-05T04:58:46 | 123,989,516 | 0 | 0 | null | 2018-03-05T22:55:18 | 2018-03-05T22:55:18 | null | UTF-8 | Python | false | false | 12,926 | py | # Copyright 2018 Daniel Hernandez Diaz, Columbia University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
import os
import pickle
import numpy as np
import matplotlib
matplotlib.use('Agg')
import seaborn as sns
import matplotlib.pyplot as plt
import tensorflow as tf
from code.LatEvModels import LocallyLinearEvolution
from code.ObservationModels import PoissonObs, GaussianObs
from code.Optimizer_VAEC import Optimizer_TS
from code.datetools import addDateTime
DTYPE = tf.float32
# CONFIGURATION
RUN_MODE = 'train' # ['train', 'generate']
# DIRECTORIES, SAVE FILES, ETC
LOCAL_ROOT = "./"
LOCAL_DATA_DIR = "./data/"
THIS_DATA_DIR = 'poisson_data_002/'
LOCAL_RLT_DIR = "./rslts/"
LOAD_CKPT_DIR = "" # TODO:
SAVE_DATA_FILE = "datadict"
SAVE_TO_VIND = False
IS_PY2 = True
# MODEL/OPTIMIZER ATTRIBUTES
LAT_MOD_CLASS = 'llinear'
GEN_MOD_CLASS = 'Poisson' # ['Gaussian', 'Poisson']
YDIM = 10
XDIM = 2
NNODES = 60
ALPHA = 0.3
INITRANGE_MUX = 0.3
INITRANGE_LAMBDAX = 2.0
INITRANGE_B = 3.0
INITRANGE_OUTY = 3.0
INIT_Q0 = 0.4
INIT_Q = 1.0
INITRANGE_GOUTMEAN = 0.03
INITRANGE_GOUTVAR = 1.0
INITBIAS_GOUTMEAN = 1.0
# TRAINING PARAMETERS
LEARNING_RATE = 2e-3
# GENERATION PARAMETERS
NTBINS = 30
NSAMPS = 100
DRAW_HEAT_MAPS = True
flags = tf.app.flags
flags.DEFINE_string('mode', RUN_MODE, "The mode in which to run. Can be ['train', 'generate']")
flags.DEFINE_string('local_root', LOCAL_ROOT, "The root directory of supervind.")
flags.DEFINE_string('local_data_dir', LOCAL_DATA_DIR, "The directory that stores all the datasets")
flags.DEFINE_string('local_rlt_dir', LOCAL_RLT_DIR, "The directory that stores all the results")
flags.DEFINE_string('this_data_dir', THIS_DATA_DIR, ("For the 'generate' mode, the directory that shall "
"store this dataset"))
flags.DEFINE_string('save_data_file', SAVE_DATA_FILE, ("For the 'generate' mode, the name of the file "
"to store the data"))
flags.DEFINE_string('load_data_file', LOAD_CKPT_DIR, ("For the 'train' mode, the directory storing "
"`tf` checkpoints."))
flags.DEFINE_boolean('save_to_vind', SAVE_TO_VIND, ("Should the data be saved in a format that can be "
"read by the old theano code"))
flags.DEFINE_boolean('is_py2', IS_PY2, "Was the data pickled in python 2?")
flags.DEFINE_integer('xDim', XDIM, "The dimensionality of the latent space")
flags.DEFINE_integer('yDim', YDIM, "The dimensionality of the data")
flags.DEFINE_string('lat_mod_class', LAT_MOD_CLASS, ("The evolution model class. Implemented "
"['llinear']"))
flags.DEFINE_string('gen_mod_class', GEN_MOD_CLASS, ("The generative model class. Implemented "
"['Poisson, Gaussian']"))
flags.DEFINE_float('alpha', ALPHA, ("The scale factor of the nonlinearity. This parameters "
"works in conjunction with initrange_B"))
flags.DEFINE_float('initrange_MuX', INITRANGE_MUX, ("Controls the initial ranges within "
"which the latent space paths are contained. Bigger "
"values here lead to bigger bounding box. It is im-"
"portant to adjust this parameter so that the initial "
"paths do not collapse nor blow up."))
flags.DEFINE_float('initrange_LambdaX', INITRANGE_LAMBDAX, ("Controls the initial ranges within "
"which the latent space paths are contained. Roughly "
"rangeX ~ 1/(Lambda + Q), so if Lambda very big, the "
"range is reduced. If Lambda very small, then it defers "
"to Q. Optimally Lambda ~ Q ~ 1."))
flags.DEFINE_float('initrange_B', INITRANGE_B, ("Controls the initial size of the nonlinearity. "
"Works in conjunction with alpha"))
flags.DEFINE_float('initrange_outY', INITRANGE_OUTY, ("Controls the initial range of the output of the "
"generative network"))
flags.DEFINE_float('init_Q0', INIT_Q0, ("Controls the initial spread of the starting points of the "
"paths in latent space."))
flags.DEFINE_float('init_Q', INIT_Q, ("Controls the initial noise added to the paths in latent space. "
"More importantly, it also controls the initial ranges within "
"which the latent space paths are contained. Roughly rangeX ~ "
"1/(Lambda + Q), so if Q is very big, the range is reduced. If "
"Q is very small, then it defers to Lambda. Optimally "
"Lambda ~ Q ~ 1."))
flags.DEFINE_float('initrange_Goutmean', INITRANGE_GOUTMEAN, "")
flags.DEFINE_float('initrange_Goutvar', INITRANGE_GOUTVAR, "")
flags.DEFINE_float('initbias_Goutmean', INITBIAS_GOUTMEAN, "")
flags.DEFINE_float('learning_rate', LEARNING_RATE, "It's the learning rate, silly")
flags.DEFINE_integer('genNsamps', NSAMPS, "The number of samples to generate")
flags.DEFINE_integer('genNTbins', NTBINS, "The number of time bins in the generated data")
flags.DEFINE_boolean('draw_heat_maps', DRAW_HEAT_MAPS, "Should I draw heat maps of your data?")
params = tf.flags.FLAGS
def write_option_file(path):
"""
Writes a file with the parameters that were used for this fit. Cuz - no doubt -
you will forget Daniel Hernandez.
"""
params_list = sorted([param for param in dir(params) if param
not in ['h', 'help', 'helpfull', 'helpshort']])
with open(path + 'params.txt', 'w') as option_file:
for par in params_list:
option_file.write(par + ' ' + str(getattr(params, par)) + '\n')
def generate_fake_data(lat_mod_class, gen_mod_class, params,
data_path=None,
save_data_file=None,
Nsamps=100,
NTbins=30,
write_params_file=False,
draw_quiver=False,
draw_heat_maps=True,
savefigs=False):
"""
Generates synthetic data and possibly pickles it for later use. Maybe you
would like to train a model? ;)
Args:
lat_mod_class: A string that is a key to the evolution model class. Currently
'llinear' -> `LocallyLinearEvolution` is implemented.
gen_mod_class: A string that is a key to the observation model class. Currently
'Poisson' -> `PoissonObs` is implemented
data_path: The local directory where the generated data should be stored. If None,
don't store shit.
save_data_file: The name of the file to hold your data
Nsamps: Number of trials to generate
NTbins: Number of time steps to run.
xDim: The dimensions of the latent space.
yDim: The dimensions of the data.
write_params_file: Would you like the parameters with which this data has been
generated to be saved to a separate txt file?
"""
print('Generating some fake data...!\n')
lat_mod_classes = {'llinear' : LocallyLinearEvolution}
gen_mod_classes = {'Poisson' : PoissonObs, 'Gaussian' : GaussianObs}
evolution_class = lat_mod_classes[lat_mod_class]
generator_class = gen_mod_classes[gen_mod_class]
if data_path:
if not type(save_data_file) is str:
raise ValueError("`save_data_file` must be string (representing the name of your file) "
"if you intend to save the data (`data_path` is not None)")
if not os.path.exists(data_path): os.makedirs(data_path)
if write_params_file:
write_option_file(data_path)
# Generate some fake data for training, validation and test
graph = tf.Graph()
with graph.as_default():
with tf.Session() as sess:
xDim = params.xDim
yDim = params.yDim
if not Nsamps: Nsamps = params.genNsamps
if not NTbins: NTbins = params.genNTbins
X = tf.placeholder(DTYPE, shape=[None, None, xDim], name='X')
Y = tf.placeholder(DTYPE, shape=[None, None, yDim], name='Y')
latm = evolution_class(X, params)
genm = generator_class(Y, X, params, latm, is_out_positive=True)
Nsamps_train = int(4*Nsamps/5)
valid_test = int(Nsamps/10)
sess.run(tf.global_variables_initializer())
Ydata, Xdata = genm.sample_XY(sess, 'X:0', Nsamps=Nsamps, NTbins=NTbins,
with_inflow=True)
Ytrain, Xtrain = Ydata[:Nsamps_train], Xdata[:Nsamps_train]
Yvalid, Xvalid = Ydata[Nsamps_train:-valid_test], Xdata[Nsamps_train:-valid_test]
Ytest, Xtest = Ydata[valid_test:], Xdata[valid_test:]
# If xDim == 2, draw a cool path plot
if draw_quiver and xDim == 2:
latm.plot_2Dquiver_paths(sess, Xdata, 'X:0', rlt_dir=data_path,
with_inflow=True, savefig=savefigs)
if draw_heat_maps:
maxY = np.max(Ydata)
for i in range(1):
plt.figure()
sns.heatmap(Ydata[i].T, yticklabels=False, vmax=maxY).get_figure()
if savefigs:
plt.savefig(data_path + "heat" + str(i) + ".png")
else:
plt.show()
plt.pause(0.001)
input('Press Enter to continue.')
plt.close()
if data_path:
datadict = {'Ytrain' : Ytrain, 'Yvalid' : Yvalid, 'Xtrain' : Xtrain, 'Xvalid' : Xvalid,
'Ytest' : Ytest, 'Xtest' : Xtest}
with open(data_path + save_data_file, 'wb+') as data_file:
pickle.dump(datadict, data_file)
if params.save_to_vind:
with open(data_path + save_data_file + '_vind', 'wb+') as data_file:
pickle.dump(datadict, data_file, protocol=2)
return Ydata, Xdata
def main(_):
"""
Launches this whole zingamajinga.
"""
data_path = params.local_data_dir + params.this_data_dir
rlt_dir = params.local_rlt_dir + params.this_data_dir + addDateTime() + '/'
if params.mode == 'generate':
generate_fake_data(lat_mod_class=params.lat_mod_class,
gen_mod_class=params.gen_mod_class,
params=params,
data_path=data_path,
save_data_file=params.save_data_file,
Nsamps=params.genNsamps,
NTbins=params.genNTbins,
write_params_file=True,
draw_quiver=True,
draw_heat_maps=True,
savefigs=True)
if params.mode == 'train':
graph = tf.Graph()
with graph.as_default():
sess = tf.Session(graph=graph)
with sess.as_default():
with open(data_path+params.save_data_file, 'rb+') as f:
# Set encoding='latin1' for python 2 pickled data
datadict = pickle.load(f, encoding='latin1') if params.is_py2 else pickle.load(f)
Ytrain = datadict['Ytrain']
Yvalid = datadict['Yvalid']
params.yDim = Ytrain.shape[-1]
write_option_file(data_path)
opt = Optimizer_TS(params)
sess.run(tf.global_variables_initializer())
opt.train(sess, rlt_dir, Ytrain, Yvalid)
if __name__ == '__main__':
tf.app.run()
| [
"dh2832@columbia.edu"
] | dh2832@columbia.edu |
1525d9b4f6ff4a840598d8256cd0387f45f3099c | df17ab9d9d931c36a7aa23d5f365ad9459538d05 | /quiz01_answer/A.py | 1ee132843768cd04d201d012bb7c898b54515fab | [] | no_license | itsss/SASA_OOP | 1cdec9150e27ef92e5fcd7292726c66d785b6cef | 1b63fefef631ef869f3ca93c6c952f2fa59f014b | refs/heads/master | 2020-03-27T01:17:39.841618 | 2018-11-07T05:11:54 | 2018-11-07T05:11:54 | 145,697,631 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 502 | py | '''
문제 A: [PYTHON] N개의 정수를 입력 받아, 역순으로 출력하기
사용자로부터 N개의 정수를 띄어쓰기로 구분하여, 역순으로 띄어쓰기로 구분하여 출력하시오.
입력
N개의 정수가 띄어쓰기로 구분되어 입력됨.
출력
N개의 정수가 역순으로 띄어쓰기로 구분하여 출력.
입력 예시
1 2 3 4 5
출력 예시
5 4 3 2 1
'''
a = list(map(int, input().split()))
a.reverse()
for i in range(len(a)):
print(a[i], end=' ') | [
"itschool@itsc.kr"
] | itschool@itsc.kr |
b4c03ba1f9e19aba9a78c79a81a3245394c1b6f4 | f31138ffa1a509bd8d65f276f0ab9f4c270bb719 | /arya_backend/routers/upload.py | ddedad6325f92aee864d0d38fdbe2dbdd5ed24b9 | [] | no_license | rychanya/arya_backend | abd032899c81b2d775448eb507466efbc2c159e6 | 24b13f796ae4f7e60abc05bc75a1314093c6a9f9 | refs/heads/master | 2023-08-24T12:39:41.002150 | 2021-08-05T22:36:01 | 2021-08-05T22:36:01 | 368,568,176 | 0 | 0 | null | 2021-08-05T22:02:49 | 2021-05-18T14:51:12 | Python | UTF-8 | Python | false | false | 1,020 | py | from bson.objectid import ObjectId
from fastapi import APIRouter, BackgroundTasks, Security
from arya_backend.db import upload_QA
from arya_backend.dependencies import get_current_user
from arya_backend.models.auth import User
from arya_backend.models.upload_QA import Payload, Upload
from arya_backend.parser import parse
router = APIRouter(prefix="/uploads")
@router.post("/")
async def upload(
bt: BackgroundTasks,
payload: list[Payload],
user: User = Security(get_current_user, scopes=["qa:add"]),
):
upload_id = upload_QA.create(user.id)
bt.add_task(parse, upload_id, user.id, payload)
return str(upload_id)
@router.get("/{id}")
def get_uplod_by_id(
id: str, user: User = Security(get_current_user, scopes=["qa:add"])
):
doc = upload_QA.get_by_id(ObjectId(id))
if doc and doc["by"] == user.id:
return Upload(**doc)
@router.get("/")
def get(user: User = Security(get_current_user, scopes=["qa:add"])):
upload = upload_QA.get_by_user(user.id)
return upload
| [
"rychanya@gmail.ru"
] | rychanya@gmail.ru |
d37a597b320a15c5c87894ea94cac78585063d3e | a11a5cf77b160bb968442bf59a542a2eb30af755 | /mooniverse/urls.py | 55aa96f85678c6c5d2d3785e8bfc2431673ce4bf | [] | no_license | kevinmarceloph/mooniverse-proto | a093ffa265b131154696b5ba5bcb96f2628906bb | 0771de0a6b8381dc20a46d36edc79c9d78926835 | refs/heads/master | 2016-09-05T10:26:01.086778 | 2015-01-26T00:45:09 | 2015-01-26T00:45:09 | 29,838,857 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 497 | py | from django.conf import settings
from django.conf.urls import patterns, include, url
from django.conf.urls.static import static
from django.contrib import admin
admin.autodiscover()
urlpatterns = static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) + patterns('',
url(r'^dj-admin/', include(admin.site.urls)),
url(r'^accounts/', include('allauth.urls')),
url(r'^(?P<slug>.+)/$', 'proto.views.page_view', name='page_view'),
url(r'^$', 'proto.views.home', name='home'),
)
| [
"kevin@marcelo.ph"
] | kevin@marcelo.ph |
3082426d6af77afdbb12ae27aa6ad62c7a17533f | 5321e51ef751ff443cd2016585541afe4cba45a3 | /shop/admin.py | e7c4047e3ea3eb4ece57b07bcf5c3bc028795aaf | [] | no_license | kang-hyun/onlineshop | 853d6864454263e1634a6ede5cac7901eca64c26 | 809c82b1aa838df8c43539f714da8208b0aa0640 | refs/heads/master | 2023-05-14T13:43:47.337140 | 2021-05-25T04:55:02 | 2021-05-25T04:55:02 | 370,564,843 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 658 | py | from django.contrib import admin
from .models import *
# Register your models here.
class CategoryAdmin(admin.ModelAdmin):
list_display = ['name','slug']
prepopulated_fields = {'slug':('name',)}
admin.site.register(Category, CategoryAdmin)
class ProductAdmin(admin.ModelAdmin):
list_display = ['name','slug','category','price', 'stock', 'available_display', 'available_order', 'created', 'updated']
list_filter = ['available_display', 'created', 'updated', 'category']
prepopulated_fields = {'slug': ('name',)}
list_editable = ['price', 'stock', 'available_display', 'available_order']
admin.site.register(Product, ProductAdmin)
| [
"gusrn8959@naver.com"
] | gusrn8959@naver.com |
38ce8da96e7d15f553c74ea7daf37229603d5bb9 | 415bc146c18c339e11800fc14172146d25ee3685 | /fcc-api/opif-file-manager/test/test_download_api.py | 71998fc956cf7c2bbf55108c8f28cfd76472d971 | [] | no_license | ngrayluna/deepform | 901af90db5ed22e3b26d4f34bb348427c9e080da | acb57a0ab529fe9e2251c9dd29763f912515745b | refs/heads/master | 2021-04-23T00:54:51.440736 | 2020-05-03T01:10:16 | 2020-05-03T01:10:16 | 249,885,245 | 0 | 0 | null | 2020-03-25T04:16:25 | 2020-03-25T04:16:25 | null | UTF-8 | Python | false | false | 912 | py | # coding: utf-8
"""
OPIF Manager API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 0.9.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from api.download_api import DownloadApi # noqa: E501
from swagger_client.rest import ApiException
class TestDownloadApi(unittest.TestCase):
"""DownloadApi unit test stubs"""
def setUp(self):
self.api = api.download_api.DownloadApi() # noqa: E501
def tearDown(self):
pass
def test_download_folder_id_file_manager_id_pdf_get(self):
"""Test case for download_folder_id_file_manager_id_pdf_get
Dowload converted File # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
| [
"daniel.fennelly@gmail.com"
] | daniel.fennelly@gmail.com |
625c6de37d103930bacb03c6a8b19227b91476c9 | af0f9ecc34551da6db81722c76b2470f297599d8 | /luko_ws/src/speech_recognition/src/mic_array.py | c7388ca3ed0232da4e692e2b3b66aa8300af8dec | [
"Apache-2.0"
] | permissive | roastedpork/luko | 5997c6c53431d5d01c9c5a84609518189667c12c | e4c780f55a0329a13f37bb996a8253c83e8c80c0 | refs/heads/master | 2021-09-06T17:17:01.712839 | 2018-02-08T21:33:56 | 2018-02-08T21:33:56 | 108,414,121 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,880 | py | import pyaudio
import Queue
import threading
import numpy as np
from gcc_phat import gcc_phat
import math
import wave
SOUND_SPEED = 343.2
MIC_DISTANCE_6P1 = 0.064
MAX_TDOA_6P1 = MIC_DISTANCE_6P1 / float(SOUND_SPEED)
MIC_DISTANCE_4 = 0.08127
MAX_TDOA_4 = MIC_DISTANCE_4 / float(SOUND_SPEED)
class MicArray(object):
def __init__(self, rate=16000, channels=8, chunk_size=None):
self.pyaudio_instance = pyaudio.PyAudio()
self.queue = Queue.Queue()
self.quit_event = threading.Event()
self.channels = channels
self.sample_rate = rate
self.chunk_size = chunk_size if chunk_size else rate / 100
device_index = None
for i in range(self.pyaudio_instance.get_device_count()):
dev = self.pyaudio_instance.get_device_info_by_index(i)
name = dev['name'].encode('utf-8')
print(i, name, dev['maxInputChannels'], dev['maxOutputChannels'])
if dev['maxInputChannels'] == self.channels:
print('Use {}'.format(name))
device_index = i
break
if device_index is None:
raise Exception('can not find input device with {} channel(s)'.format(self.channels))
self.stream = self.pyaudio_instance.open(
input=True,
start=False,
format=pyaudio.paInt16,
channels=self.channels,
rate=int(self.sample_rate),
frames_per_buffer=int(self.chunk_size),
stream_callback=self._callback,
input_device_index=device_index,
)
def _callback(self, in_data, frame_count, time_info, status):
self.queue.put(in_data)
return None, pyaudio.paContinue
def start(self):
self.queue.queue.clear()
self.stream.start_stream()
def read_chunks(self):
self.quit_event.clear()
while not self.quit_event.is_set():
frames = self.queue.get()
if not frames:
break
frames = np.fromstring(frames, dtype='int16')
yield frames
def stop(self):
self.quit_event.set()
self.stream.stop_stream()
self.queue.put('')
def __enter__(self):
self.start()
return self
def __exit__(self, type, value, traceback):
if value:
return False
self.stop()
def _suppress_noise(self, buf, doa):
inc_mic = int(round(doa/60.0)%6+1)
far_mic = int((inc_mic+3)%6)
if far_mic == 0: far_mic=6
theta = (np.array([inc_mic,far_mic])-1)*60-(180-doa)
delta_frames = [int(round(self.sample_rate*MAX_TDOA_6P1*((1.0-math.cos(math.radians(x)))/2.0))) for x in theta]
frames_inc = buf[inc_mic+8*(3-delta_frames[0])::self.channels]
frames_far = buf[inc_mic+8*(3-delta_frames[1])::self.channels]
max_len = min(len(frames_inc),len(frames_far))
res = (frames_inc[:max_len]+frames_far[:max_len])/2.0
return res.astype(np.int16)
def suppress_noise(self, buff, doa):
# Initialise microphone angles relative to the signal DOA
angle_offsets = np.arange(0,360,60)-(180-doa)
# Calculate frame delay for each sensor proportional to array geometry & max delay
delay_frames = [(MAX_TDOA_6P1*self.sample_rate*(1.0-math.cos(math.radians(theta )))/2.0) for theta in angle_offsets]
delay_frames_discrete = [int(round(x)) for x in delay_frames]
delay_output = [[]]*6
for i in range(6):
delay_output[i] = np.array(buff[(i+1+(3-delay_frames_discrete[i])*8)::8])
max_len = min([len(out) for out in delay_output])
avg = delay_output[0][:max_len]
for i in range(1,len(delay_output)):
avg += delay_output[i][:max_len]
avg /= 6.0
return avg.astype(np.int16)
def get_direction(self, buf):
best_guess = None
if self.channels == 8:
MIC_GROUP_N = 3
MIC_GROUP = [[1, 4], [2, 5], [3, 6]]
tau = [0] * MIC_GROUP_N
theta = [0] * MIC_GROUP_N
# buf = np.fromstring(buf, dtype='int16')
for i, v in enumerate(MIC_GROUP):
tau[i], _ = gcc_phat(buf[v[0]::8], buf[v[1]::8], fs=self.sample_rate, max_tau=MAX_TDOA_6P1, interp=1)
theta[i] = math.asin(tau[i] / MAX_TDOA_6P1) * 180 / math.pi
min_index = np.argmin(np.abs(tau))
if (min_index != 0 and theta[min_index - 1] >= 0) or (min_index == 0 and theta[MIC_GROUP_N - 1] < 0):
best_guess = (theta[min_index] + 360) % 360
else:
best_guess = (180 - theta[min_index])
best_guess = (best_guess + 120 + min_index * 60) % 360
elif self.channels == 4:
MIC_GROUP_N = 2
MIC_GROUP = [[0, 2], [1, 3]]
tau = [0] * MIC_GROUP_N
theta = [0] * MIC_GROUP_N
for i, v in enumerate(MIC_GROUP):
tau[i], _ = gcc_phat(buf[v[0]::4], buf[v[1]::4], fs=self.sample_rate, max_tau=MAX_TDOA_4, interp=1)
theta[i] = math.asin(tau[i] / MAX_TDOA_4) * 180 / math.pi
if np.abs(theta[0]) < np.abs(theta[1]):
if theta[1] > 0:
best_guess = (theta[0] + 360) % 360
else:
best_guess = (180 - theta[0])
else:
if theta[0] < 0:
best_guess = (theta[1] + 360) % 360
else:
best_guess = (180 - theta[1])
best_guess = (best_guess + 90 + 180) % 360
best_guess = (-best_guess + 120) % 360
elif self.channels == 2:
pass
print(tau)
return best_guess
def test_4mic():
import signal
import time
is_quit = threading.Event()
def signal_handler(sig, num):
is_quit.set()
print('Quit')
signal.signal(signal.SIGINT, signal_handler)
with MicArray(16000, 4, 16000 / 4) as mic:
for chunk in mic.read_chunks():
direction = mic.get_direction(chunk)
print(int(direction))
if is_quit.is_set():
break
def test_8mic():
import signal
import time
from pixel_ring import pixel_ring
is_quit = threading.Event()
def signal_handler(sig, num):
is_quit.set()
print('Quit')
signal.signal(signal.SIGINT, signal_handler)
with MicArray(16000, 8, 16000 / 4) as mic:
for chunk in mic.read_chunks():
print(len(chunk))
direction = mic.get_direction(chunk)
pixel_ring.set_direction(direction)
print(int(direction))
if is_quit.is_set():
break
pixel_ring.off()
if __name__ == '__main__':
# test_4mic()
test_8mic()
| [
"al2114@ic.ac.uk"
] | al2114@ic.ac.uk |
2b9bee86ebd1b08f2a0f0400abf395c09608c7e8 | 5de3f612df0dbda712b39403dbafb0617e597651 | /build/pal_behaviour_msgs/catkin_generated/pkg.installspace.context.pc.py | 8706a70930366093c2aaea8520ef1c40fd260a4a | [] | no_license | AdriiTrujillo/tiago_public_ws | 1bd62d51c2eb694d07db83738f7bebd582d8126c | 6eaeabd1ec177df837b81fd9f42887318128766b | refs/heads/main | 2023-04-03T13:09:09.749190 | 2021-04-01T10:05:43 | 2021-04-01T10:05:43 | 350,026,041 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 461 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "${prefix}/include".split(';') if "${prefix}/include" != "" else []
PROJECT_CATKIN_DEPENDS = "message_runtime;std_msgs;actionlib_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "pal_behaviour_msgs"
PROJECT_SPACE_DIR = "/home/adrii/tiago_public_ws/install"
PROJECT_VERSION = "0.13.2"
| [
"adrii.trujillo@gmail.com"
] | adrii.trujillo@gmail.com |
019289dc74206da27191b5df98bffab987e960b5 | 838830ebb91be6baaf0e79eaea7bf1b4e0513f17 | /put_user_entry.py | 333dd9314bc90c61d2d26ed71f7cc13222cdbff4 | [] | no_license | mgrechukh/nssdb-master | 9c92d8b0685c2fc45051e0dedc0f368640db33c8 | aacbca4a2c0fe64d9db0d13ce7980d1eb0480f5a | refs/heads/master | 2021-01-17T16:41:21.953547 | 2016-07-27T23:00:43 | 2016-07-27T23:00:43 | 63,964,100 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 764 | py | #!/usr/bin/env python
# templates
passwd="%s:x:%s:0::/home/%s:"
shadow="%s:%s:17004:0:99999:7:::\x00"
# proof of concept: create files suitable for libnss-db from scratch by given password and username
import bsddb3
from passlib.hash import sha512_crypt
def useradd(_n, uid, name):
dbpwd = bsddb3.btopen("passwd.db")
pwd_entry = passwd % (name, uid, name)
dbpwd[".%s" % name] = pwd_entry
dbpwd["=%s" % uid] = pwd_entry
dbpwd["0%d" % _n] = pwd_entry
dbpwd.sync()
def pwdset(_n, name, passw):
pwhash = sha512_crypt.encrypt(passw)
shadow_entry = shadow % (name, pwhash)
dbsh = bsddb3.btopen("shadow.db")
dbsh[".%s" % name] = shadow_entry
dbsh["0%d" % _n] = shadow_entry;
dbsh.sync()
useradd(0, 300, 'usertest')
pwdset(0, 'usertest', '10101')
| [
"mgrechukh@satelliz.com"
] | mgrechukh@satelliz.com |
7a7b7ac0f8c14cc1a4756aa69a85c707f0d0cb51 | 2826bdf11463b199f20be351f514bcb16f35d04e | /.history/ftp_20210407055256.py | b44e9d312cd6276dfc7c23b78b965740f32bf6a1 | [] | no_license | Roarcannotprogramming/Sec_Client_Server | 9efdb7e4c3e729cd6b5052b0ca0e23c14459ebc0 | 38f491e0e643e372c546eca73f9cf16c36513568 | refs/heads/master | 2023-04-11T12:40:12.780834 | 2021-04-17T15:53:47 | 2021-04-17T15:53:47 | 353,070,974 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,984 | py | import socket, ssl, os, sys
"""
00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f 10 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f
0x00 | version | hb | request | unused | path length |
0x04 | package length (1) | package length (2) |
0x08 | package length (3) | package length (4) |
0x0c | unused | unused |
"""
class ProtocalError(Exception):
pass
class FtpProtocol:
MAGIC = b'v1me'
# Requests
GET_FILE_LIST = 1
GET_FILE = 2
POST_FILE = 3
GET_CWD = 4
CHANGE_CWD = 5
MAKE_DIR = 6
DEL_FILE = 7
TRANS_ERROR = 8
# Max length of single content is 16M
CONTENT_MAX_LENGTH = 0xfffff0
HEADER_LEN = 0x10
BASE_PATH = '/FILES'
def __init__(self, ssock, version=1):
if version != 1:
raise ProtocalError("Version error")
if not isinstance(ssock, ssl.SSLSocket):
raise ProtocalError("Socket type error")
self.version = version
self.ssock = ssock
self.request = 0
self.hb = False
self.root = b''
self.current_recv = b''
def get_file_list(self, path):
assert(isinstance(path, bytes))
self.request = self.GET_FILE_LIST
self.path = path
self.path_len = len(path)
self.content = b''
self.package_len = self.HEADER_LEN + self.path_len
if self.path_len <= 0 or self.path_len >= 0x10000:
raise ProtocalError("Path length error")
self.__send(self.__pack())
def get_file(self, path):
assert(isinstance(path, bytes))
self.request = self.GET_FILE
self.path = path
self.path_len = len(path)
self.content = b''
self.package_len = self.HEADER_LEN + self.path_len
if self.path_len <= 0 or self.path_len >= 0x10000:
raise ProtocalError("Path length error")
self.__send(self.__pack())
def post_file(self, path, file_path = None, file_content = None):
if (file_path and file_content):
raise ProtocalError("File must be unique")
assert(isinstance(path, bytes))
self.request = self.POST_FILE
self.path = path
self.path_len = len(path)
if self.path_len <= 0 or self.path_len >= 0x10000:
raise ProtocalError("Path length error")
if file_path:
self.package_len = self.HEADER_LEN + self.path_len + os.path.getsize(file_path)
self.content = b''
with open(file_path, 'rb') as f:
self.__send(self.__pack())
while True:
s = f.read(self.CONTENT_MAX_LENGTH)
if not s:
break
self.__send(s)
if file_content:
self.package_len = self.HEADER_LEN + self.path_len + len(file_content)
self.content = file_content
self.__send(self.__pack())
def get_cwd(self):
self.request = self.GET_CWD
self.path = b''
self.path_len = 0
self.content = b''
self.package_len = self.HEADER_LEN + self.path_len
self.__send(self.__pack())
def change_cwd(self, path):
assert(isinstance(path, bytes))
self.request = self.CHANGE_CWD
self.path = path
self.path_len = len(path)
self.content = b''
self.package_len = self.HEADER_LEN + self.path_len
if self.path_len <= 0 or self.path_len >= 0x10000:
raise ProtocalError("Path length error")
self.__send(self.__pack())
def make_dir(self, path):
assert(isinstance(path, bytes))
self.request = self.MAKE_DIR
self.path = path
self.path_len = len(path)
self.content = b''
self.package_len = self.HEADER_LEN + self.path_len
if self.path_len <= 0 or self.path_len >= 0x10000:
raise ProtocalError("Path length error")
self.__send(self.__pack())
def del_file(self, path):
assert(isinstance(path, bytes))
self.request = self.DEL_FILE
self.path = path
self.path_len = len(path)
self.content = b''
self.package_len = self.HEADER_LEN + self.path_len
if self.path_len <= 0 or self.path_len >= 0x10000:
raise ProtocalError("Path length error")
self.__send(self.__pack())
def server_deal(self):
while True:
header = self.__recv(self.HEADER_LEN)
self.version , self.hb, self.request, self.path_len, self.package_len = self.__check_format(header)
if self.hb:
self.path_len = 0
self.package_len = self.HEADER_LEN
self.path = b''
self.content = b''
# return self.__send(self.__pack())
return 0
if self.request == self.GET_FILE_LIST:
self.path = self.__recv(self.path_len)
self.content = self.__recv(self.package_len - self.HEADER_LEN - self.path_len)
try:
p = self.__os_check_path(self.path)
ls = '\n'.join(os.listdir(p))
self.content = ls
return self.__send(self.__pack())
except Exception:
self.content = 'Invalid path'
self.request = self.TRANS_ERROR
return self.__send(self.__pack())
if self.request == self.GET_FILE:
self.path = self.__recv(self.path_len)
self.content = self.__recv(self.package_len - self.HEADER_LEN - self.path_len)
try:
p = self.__os_check_path(self.path)
with open(p, 'rb') as f:
self.__send(self.__pack())
while True:
s = f.read(self.CONTENT_MAX_LENGTH)
if not s:
break
self.content = s
self.__send(s)
return 1
except Exception:
self.content = 'Invalid path'
self.request = self.TRANS_ERROR
return self.__send(self.__pack())
if self.request == self.POST_FILE:
self.path = self.__recv(self.path_len)
# TODO
self.content = self.__recv(self.package_len - self.HEADER_LEN - self.path)
try:
p = self.__os_check_path(self.path)
with open(p, 'wb+') as f:
f.write(self.content)
self.content = b''
return self.__send(self.__pack())
except Exception:
self.content = 'Invalid path'
self.request = self.TRANS_ERROR
return self.__send(self.__pack())
def __os_check_path(self, path):
p = os.path.normpath(path)
if p.startswith('..'):
ProtocalError('Invalid path')
return os.path.join(self.BASE_PATH, self.root, p)
def __check_format(self, pack):
version = pack[0] & 7
hb = (pack[0] >> 3) & 1
request = pack[0] >> 4
path_len = pack[2] + (pack[3] << 8)
package_len = pack[4] + (pack[5] << 8) + (pack[6] << 16) + (pack[7] << 24) + (pack[8] << 32) + (pack[9] << 40) + (pack[10] << 48) + (pack[11] << 56)
if version != 1:
raise ProtocalError("Version error")
if request not in range(1, 8):
raise ProtocalError("Request error")
if path_len < 0:
raise ProtocalError("Path error")
if package_len < 0:
raise ProtocalError("Package error")
return version, hb, request, path_len, package_len
def __pack(self):
self.path_len = len(self.path)
self.package_len = self.HEADER_LEN + self.path_len + len(self.content)
p = bytes([(self.version & 7) | (self.hb << 3) | (self.request << 4), 0,
self.path_len & 0xff, (self.path_len >> 8) & 0xff,
self.package_len & 0xff, (self.package_len >> 8) & 0xff,
(self.package_len >> 16) & 0xff, (self.package_len >> 24) & 0xff,
(self.package_len >> 32) & 0xff, (self.package_len >> 40) & 0xff,
(self.package_len >> 48) & 0xff, (self.package_len >> 56) & 0xff,
0, 0, 0, 0])
p += self.path
p += self.content
return p
def __send(self, pack):
self.ssock.send(pack)
"""
print(pack)
path_len = pack[2] + (pack[3] << 8)
package_len = pack[4] + (pack[5] << 8) + (pack[6] << 16) + (pack[7] << 24) + (pack[8] << 32) + (pack[9] << 40) + (pack[10] << 48) + (pack[11] << 56)
request = pack[0] >> 4
print("package_len: ", package_len)
print("path_len: ", path_len)
print("content_len: ", package_len - path_len - self.HEADER_LEN)
print("path: ", pack[self.HEADER_LEN: self.HEADER_LEN + path_len])
print("content: ", pack[self.HEADER_LEN + path_len:])
"""
return 1
def __recv(self, length):
current_len = len(self.current_recv)
while True:
s = self.ssock.recv(length - current_len)
current_len += len(s)
self.current_recv = self.current_recv + s
if current_len == length:
current_len = 0
ss = self.current_recv
self.current_recv = b''
return ss
if current_len > length:
raise ProtocalError("Length error")
# FtpProtocol(0).post_file(b'/root/admin/user/pwn', b'CA.key')
# client
def client():
CA_FILE = "CA.crt"
KEY_FILE = "Client.key"
CERT_FILE = "Client.crt"
context = ssl.SSLContext(ssl.PROTOCOL_TLS)
context.check_hostname = False
context.load_cert_chain(certfile=CERT_FILE, keyfile=KEY_FILE)
context.load_verify_locations(CA_FILE)
context.verify_mode = ssl.CERT_REQUIRED
with socket.socket() as sock:
with context.wrap_socket(sock, server_side=False) as ssock:
ssock.connect(('127.0.0.1', 5678))
ftp = FtpProtocol(ssock)
ftp.get_cwd()
msg = ssock.recv(1024).decode("utf-8")
print(f"receive msg from server : {msg}")
ssock.close()
def server():
CA_FILE = "CA.crt"
KEY_FILE = "Server.key"
CERT_FILE = "Server.crt"
context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
context.load_cert_chain(certfile=CERT_FILE, keyfile=KEY_FILE)
context.load_verify_locations(CA_FILE)
context.verify_mode = ssl.CERT_REQUIRED
with socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) as sock:
with context.wrap_socket(sock, server_side=True) as ssock:
ssock.bind(('127.0.0.1', 5678))
ssock.listen(5)
while True:
client_socket, addr = ssock.accept()
ftp = FtpProtocol(client_socket)
ftp.server_deal()
msg = client_socket.recv(1024).decode("utf-8")
print(f"receive msg from client {addr}:{msg}")
msg = f"yes , you have client_socketect with server.\r\n".encode("utf-8")
client_socket.close()
if __name__ == "__main__":
if sys.argv[1] == "server":
server()
if sys.argv[1] == "client":
client()
| [
"z1991998920@gmail.com"
] | z1991998920@gmail.com |
73211ed950fbe7d9cfd91911fe33abe1ca433104 | 8dbb928b8ef4c92f3c9b12ebbe2412594f737df1 | /disco_aws_automation/disco_alarm.py | 5953df3890087ebe44923d69b2c23d7ae2971a2d | [
"BSD-2-Clause"
] | permissive | Angakkuit/asiaq | f2ccdf7a262c6311c94026160d9d041769003328 | f6c19d3784a0e43733eef5bbd04d908870a2ec27 | refs/heads/master | 2021-01-24T22:27:31.690862 | 2016-03-24T02:33:06 | 2016-03-24T02:33:06 | 54,143,584 | 0 | 0 | null | 2016-03-17T18:52:37 | 2016-03-17T18:52:37 | null | UTF-8 | Python | false | false | 3,411 | py | '''Contains DiscoAlarm class for orchestrating CloudWatch alarms'''
import logging
from boto.ec2.cloudwatch import CloudWatchConnection
from .disco_sns import DiscoSNS
from .disco_alarm_config import DiscoAlarmConfig
from .resource_helper import throttled_call
# Max batch size for alarm deletion http://goo.gl/vMQOrX
DELETE_BATCH_SIZE = 100
class DiscoAlarm(object):
"""
Class orchestrating CloudWatch alarms
"""
def __init__(self, disco_sns=None):
self.cloudwatch = CloudWatchConnection()
self._disco_sns = disco_sns
def upsert_alarm(self, alarm):
"""
Create an alarm, delete and re-create if it already exists
"""
existing_alarms = self.cloudwatch.describe_alarms(alarm_names=[alarm.name])
for existing_alarm in existing_alarms:
throttled_call(
existing_alarm.delete
)
throttled_call(
self.cloudwatch.create_alarm,
alarm
)
@property
def disco_sns(self):
"""
Lazy sns connection
"""
self._disco_sns = self._disco_sns or DiscoSNS()
return self._disco_sns
def _sns_topic(self, alarm):
"""
retrieve SNS topic correspoding to the alarm
"""
return self.disco_sns.topic_arn_from_name(alarm.notification_topic)
def create_alarms(self, alarms):
"""
Create alarms from dict of DiscoAlarmConfig objects.
"""
for alarm in alarms:
self.upsert_alarm(
alarm.to_metric_alarm(
self._sns_topic(alarm)
)
)
def alarms(self):
"""
Iterate alarms
"""
next_token = None
while True:
alarms = throttled_call(
self.cloudwatch.describe_alarms,
next_token=next_token,
)
for alarm in alarms:
yield alarm
next_token = alarms.next_token
if not next_token:
break
def get_alarms(self, desired=None):
"""
Get all alarms for an environment filtered on the desired dictionary keys
"""
desired = desired or {}
keys = set(desired.keys())
def _key_filter(dictionary, keys):
return {key: value for key, value in dictionary.iteritems() if key in keys}
return [alarm for alarm in self.alarms()
if _key_filter(DiscoAlarmConfig.decode_alarm_name(alarm.name), keys) == desired]
def _delete_alarms(self, alarms):
alarm_names = [alarm.name for alarm in alarms]
alarm_len = len(alarm_names)
logging.debug("Deleting %s alarms.", alarm_len)
for index in range(0, alarm_len, DELETE_BATCH_SIZE):
throttled_call(
self.cloudwatch.delete_alarms,
alarm_names[index:min(index + DELETE_BATCH_SIZE, alarm_len)]
)
def delete_hostclass_environment_alarms(self, environment, hostclass):
"""
Delete alarm in an environment by hostclass name
"""
self._delete_alarms(self.get_alarms({"env": environment, "hostclass": hostclass}))
def delete_environment_alarms(self, environment):
"""
Delete all alarms for an environment
"""
self._delete_alarms(self.get_alarms({"env": environment}))
| [
"isukhanov@wgen.net"
] | isukhanov@wgen.net |
5be0509d53a356e848c58073847ec594a6815bd9 | 96287732427a69744be3cd5501e78403ec770429 | /TestCases/S3/output/strcmp.tac | 1ccb0714b7fa7cc60e85c78b284af14236dafb6e | [] | no_license | darknessnone/decaf_PA3_2018 | f853f63038adfe288ac838f3f603bf51ac683cc5 | 554f13e19d45ecf817f027c110dd2a5df9d85973 | refs/heads/master | 2020-04-08T07:07:54.808332 | 2018-11-28T06:35:23 | 2018-11-28T06:35:23 | 159,128,046 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,543 | tac | VTABLE(_Main) {
<empty>
Main
}
FUNCTION(_Main_New) {
memo ''
_Main_New:
_T4 = 4
parm _T4
_T5 = call _Alloc
_T6 = VTBL <_Main>
*(_T5 + 0) = _T6
return _T5
}
FUNCTION(_Main.compareString) {
memo '_T0:4 _T1:8'
_Main.compareString:
parm _T0
parm _T1
_T7 = call _StringEqual
if (_T7 == 0) branch _L12
_T8 = "Equal"
return _T8
branch _L13
_L12:
parm _T0
parm _T1
_T9 = call _StringEqual
_T10 = ! _T9
if (_T10 == 0) branch _L14
_T11 = "Unequal"
return _T11
branch _L15
_L14:
_T12 = "The impossible happens!"
return _T12
_L15:
_L13:
}
FUNCTION(_Main.printCompareString) {
memo '_T2:4 _T3:8'
_Main.printCompareString:
_T13 = "\""
parm _T13
call _PrintString
parm _T2
call _PrintString
_T14 = "\" and \""
parm _T14
call _PrintString
parm _T3
call _PrintString
_T15 = "\": "
parm _T15
call _PrintString
parm _T2
parm _T3
_T16 = call _Main.compareString
parm _T16
call _PrintString
_T17 = "\n"
parm _T17
call _PrintString
}
FUNCTION(main) {
memo ''
main:
_T18 = "Jobs"
_T19 = "Gates"
parm _T18
parm _T19
call _Main.printCompareString
_T20 = "case sensitive"
_T21 = "CASE SENSITIVE"
parm _T20
parm _T21
call _Main.printCompareString
_T22 = "Hello"
_T23 = "Hello"
parm _T22
parm _T23
call _Main.printCompareString
}
| [
"noreply@github.com"
] | darknessnone.noreply@github.com |
d952c125838bb94b58e87ab7fe246a91cb0f636e | 0f644cec7bfacd9a680126d63a53dd4e2b3eb7b1 | /networkx/classes/tests/test_graphviews.py | f6e69bd5e29365a2dddbdc997724d497b9f0ff4a | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | maniacs-oss/networkx | 604df07a2323e5511da597bc162bceea518bfff9 | 4affa3a48bb8d4e4ce8ab0dd73ec1b1a2ae477ae | refs/heads/master | 2021-01-19T00:09:11.614153 | 2017-08-16T20:39:50 | 2017-08-16T20:39:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,787 | py | from nose.tools import assert_in, assert_not_in, assert_equal
from nose.tools import assert_raises, assert_true, assert_false
import networkx as nx
from networkx.testing import assert_edges_equal
# Note: SubGraph views are not tested here. They have their own testing file
class TestReverseView(object):
def setup(self):
self.G = nx.path_graph(9, create_using=nx.DiGraph())
self.rv = nx.reverse_view(self.G)
def test_pickle(self):
import pickle
rv = self.rv
prv = pickle.loads(pickle.dumps(rv, -1))
assert_equal(rv._node, prv._node)
assert_equal(rv._adj, prv._adj)
assert_equal(rv.graph, prv.graph)
def test_contains(self):
assert_in((2, 3), self.G.edges)
assert_not_in((3, 2), self.G.edges)
assert_not_in((2, 3), self.rv.edges)
assert_in((3, 2), self.rv.edges)
def test_iter(self):
expected = sorted(tuple(reversed(e)) for e in self.G.edges)
assert_equal(sorted(self.rv.edges), expected)
def test_exceptions(self):
nxg = nx.graphviews
assert_raises(nx.NetworkXNotImplemented, nxg.ReverseView, nx.Graph())
class TestMultiReverseView(object):
def setup(self):
self.G = nx.path_graph(9, create_using=nx.MultiDiGraph())
self.G.add_edge(4, 5)
self.rv = nx.reverse_view(self.G)
def test_pickle(self):
import pickle
rv = self.rv
prv = pickle.loads(pickle.dumps(rv, -1))
assert_equal(rv._node, prv._node)
assert_equal(rv._adj, prv._adj)
assert_equal(rv.graph, prv.graph)
def test_contains(self):
assert_in((2, 3, 0), self.G.edges)
assert_not_in((3, 2, 0), self.G.edges)
assert_not_in((2, 3, 0), self.rv.edges)
assert_in((3, 2, 0), self.rv.edges)
assert_in((5, 4, 1), self.rv.edges)
assert_not_in((4, 5, 1), self.rv.edges)
def test_iter(self):
expected = sorted((v, u, k) for u, v, k in self.G.edges)
assert_equal(sorted(self.rv.edges), expected)
def test_exceptions(self):
nxg = nx.graphviews
MG = nx.MultiGraph(self.G)
assert_raises(nx.NetworkXNotImplemented, nxg.MultiReverseView, MG)
class TestToDirected(object):
def setup(self):
self.G = nx.path_graph(9)
self.dv = nx.to_directed(self.G)
self.MG = nx.path_graph(9, create_using=nx.MultiGraph())
self.Mdv = nx.to_directed(self.MG)
def test_directed(self):
assert_false(self.G.is_directed())
assert_true(self.dv.is_directed())
def test_already_directed(self):
dd = nx.to_directed(self.dv)
Mdd = nx.to_directed(self.Mdv)
assert_edges_equal(dd.edges, self.dv.edges)
assert_edges_equal(Mdd.edges, self.Mdv.edges)
def test_pickle(self):
import pickle
dv = self.dv
pdv = pickle.loads(pickle.dumps(dv, -1))
assert_equal(dv._node, pdv._node)
assert_equal(dv._succ, pdv._succ)
assert_equal(dv._pred, pdv._pred)
assert_equal(dv.graph, pdv.graph)
def test_contains(self):
assert_in((2, 3), self.G.edges)
assert_in((3, 2), self.G.edges)
assert_in((2, 3), self.dv.edges)
assert_in((3, 2), self.dv.edges)
def test_iter(self):
revd = [tuple(reversed(e)) for e in self.G.edges]
expected = sorted(list(self.G.edges) + revd)
assert_equal(sorted(self.dv.edges), expected)
def test_exceptions(self):
nxg = nx.graphviews
assert_raises(nx.NetworkXError, nxg.DiGraphView, self.MG)
assert_raises(nx.NetworkXError, nxg.MultiDiGraphView, self.G)
class TestToUndirected(object):
def setup(self):
self.DG = nx.path_graph(9, create_using=nx.DiGraph())
self.uv = nx.to_undirected(self.DG)
self.MDG = nx.path_graph(9, create_using=nx.MultiDiGraph())
self.Muv = nx.to_undirected(self.MDG)
def test_directed(self):
assert_true(self.DG.is_directed())
assert_false(self.uv.is_directed())
def test_already_directed(self):
uu = nx.to_undirected(self.uv)
Muu = nx.to_undirected(self.Muv)
assert_edges_equal(uu.edges, self.uv.edges)
assert_edges_equal(Muu.edges, self.Muv.edges)
def test_pickle(self):
import pickle
uv = self.uv
puv = pickle.loads(pickle.dumps(uv, -1))
assert_equal(uv._node, puv._node)
assert_equal(uv._adj, puv._adj)
assert_equal(uv.graph, puv.graph)
assert_true(hasattr(uv, '_graph'))
def test_contains(self):
assert_in((2, 3), self.DG.edges)
assert_not_in((3, 2), self.DG.edges)
assert_in((2, 3), self.uv.edges)
assert_in((3, 2), self.uv.edges)
def test_iter(self):
expected = sorted(self.DG.edges)
assert_equal(sorted(self.uv.edges), expected)
def test_exceptions(self):
nxg = nx.graphviews
assert_raises(nx.NetworkXError, nxg.GraphView, self.MDG)
assert_raises(nx.NetworkXError, nxg.MultiGraphView, self.DG)
class TestChainsOfViews(object):
def setUp(self):
self.G = nx.path_graph(9)
self.DG = nx.path_graph(9, create_using=nx.DiGraph())
self.Gv = nx.to_undirected(self.DG)
self.DMG = nx.path_graph(9, create_using=nx.MultiDiGraph())
self.MGv = nx.to_undirected(self.DMG)
def test_subgraph_of_subgraph(self):
SG = nx.induced_subgraph(self.G, [4, 5, 6])
assert_equal(list(SG), [4, 5, 6])
SSG = SG.subgraph([6, 7])
assert_equal(list(SSG), [6])
def test_subgraph_todirected(self):
SG = nx.induced_subgraph(self.G, [4, 5, 6])
SSG = SG.to_directed()
assert_equal(sorted(SSG), [4, 5, 6])
assert_equal(sorted(SSG.edges), [(4, 5), (5, 4), (5, 6), (6, 5)])
def test_subgraph_toundirected(self):
SG = nx.induced_subgraph(self.G, [4, 5, 6])
SSG = SG.to_undirected()
assert_equal(list(SSG), [4, 5, 6])
assert_equal(sorted(SSG.edges), [(4, 5), (5, 6)])
def test_reverse_subgraph_toundirected(self):
G = self.DG.reverse()
SG = G.subgraph([4, 5, 6])
SSG = SG.to_undirected()
assert_equal(list(SSG), [4, 5, 6])
assert_equal(sorted(SSG.edges), [(4, 5), (5, 6)])
def test_subgraph_edgesubgraph_toundirected(self):
G = self.G.copy()
SG = G.subgraph([4, 5, 6])
SSG = SG.edge_subgraph([(4, 5), (5, 4)])
USSG = SSG.to_undirected()
assert_equal(list(USSG), [4, 5])
assert_equal(sorted(USSG.edges), [(4, 5)])
def test_copy_subgraph(self):
G = self.G.copy()
SG = G.subgraph([4, 5, 6])
CSG = SG.copy(as_view=True)
DCSG = SG.copy(as_view=False)
assert_equal(CSG.__class__.__name__, 'GraphView')
assert_equal(DCSG.__class__.__name__, 'Graph')
def test_copy_disubgraph(self):
G = self.DG.copy()
SG = G.subgraph([4, 5, 6])
CSG = SG.copy(as_view=True)
DCSG = SG.copy(as_view=False)
assert_equal(CSG.__class__.__name__, 'DiGraphView')
assert_equal(DCSG.__class__.__name__, 'DiGraph')
def test_copy_multidisubgraph(self):
G = self.DMG.copy()
SG = G.subgraph([4, 5, 6])
CSG = SG.copy(as_view=True)
DCSG = SG.copy(as_view=False)
assert_equal(CSG.__class__.__name__, 'MultiDiGraphView')
assert_equal(DCSG.__class__.__name__, 'MultiDiGraph')
def test_copy_multisubgraph(self):
G = self.MGv.copy()
SG = G.subgraph([4, 5, 6])
CSG = SG.copy(as_view=True)
DCSG = SG.copy(as_view=False)
assert_equal(CSG.__class__.__name__, 'MultiGraphView')
assert_equal(DCSG.__class__.__name__, 'MultiGraph')
| [
"noreply@github.com"
] | maniacs-oss.noreply@github.com |
e221a1d2fe9e2a114d41425d4623d173129c5983 | c3ab0c8014f96c67ce4efb186b35e15e12fd483c | /decagon-pytorch/tests/decagon_pytorch/test_convolve.py | 8d0bdf7b915145600ae27ddd89568ed073c8c1b9 | [] | no_license | yangYlin/decagon-pytorch | 3bff14866ee84d170c0f4f3ffce8e6834d86bb55 | 3e57509efef50ae9d9113e824957585942c81016 | refs/heads/master | 2022-10-13T15:04:21.886411 | 2020-06-12T10:12:58 | 2020-06-12T10:12:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,421 | py | import decagon_pytorch.convolve
import decagon.deep.layers
import torch
import tensorflow as tf
import numpy as np
def prepare_data():
np.random.seed(0)
latent = np.random.random((5, 10)).astype(np.float32)
latent[latent < .5] = 0
latent = np.ceil(latent)
adjacency_matrices = []
for _ in range(5):
adj_mat = np.random.random((len(latent),) * 2).astype(np.float32)
adj_mat[adj_mat < .5] = 0
adj_mat = np.ceil(adj_mat)
adjacency_matrices.append(adj_mat)
print('latent:', latent)
print('adjacency_matrices[0]:', adjacency_matrices[0])
return latent, adjacency_matrices
def dense_to_sparse_tf(x):
a, b = np.where(x)
indices = np.array([a, b]).T
values = x[a, b]
return tf.sparse.SparseTensor(indices, values, x.shape)
def dropout_sparse_tf(x, keep_prob, num_nonzero_elems):
"""Dropout for sparse tensors. Currently fails for very large sparse tensors (>1M elements)
"""
noise_shape = [num_nonzero_elems]
random_tensor = keep_prob
random_tensor += tf.convert_to_tensor(torch.rand(noise_shape).detach().numpy())
# tf.convert_to_tensor(np.random.random(noise_shape))
# tf.random_uniform(noise_shape)
dropout_mask = tf.cast(tf.floor(random_tensor), dtype=tf.bool)
pre_out = tf.sparse_retain(x, dropout_mask)
return pre_out * (1./keep_prob)
def dense_graph_conv_torch():
torch.random.manual_seed(0)
latent, adjacency_matrices = prepare_data()
latent = torch.tensor(latent)
adj_mat = adjacency_matrices[0]
adj_mat = torch.tensor(adj_mat)
conv = decagon_pytorch.convolve.DenseGraphConv(10, 10,
adj_mat)
latent = conv(latent)
return latent
def dense_dropout_graph_conv_activation_torch(keep_prob=1.):
torch.random.manual_seed(0)
latent, adjacency_matrices = prepare_data()
latent = torch.tensor(latent)
adj_mat = adjacency_matrices[0]
adj_mat = torch.tensor(adj_mat)
conv = decagon_pytorch.convolve.DenseDropoutGraphConvActivation(10, 10,
adj_mat, keep_prob=keep_prob)
latent = conv(latent)
return latent
def sparse_graph_conv_torch():
torch.random.manual_seed(0)
latent, adjacency_matrices = prepare_data()
print('latent.dtype:', latent.dtype)
latent = torch.tensor(latent).to_sparse()
adj_mat = adjacency_matrices[0]
adj_mat = torch.tensor(adj_mat).to_sparse()
print('adj_mat.dtype:', adj_mat.dtype,
'latent.dtype:', latent.dtype)
conv = decagon_pytorch.convolve.SparseGraphConv(10, 10,
adj_mat)
latent = conv(latent)
return latent
def sparse_graph_conv_tf():
torch.random.manual_seed(0)
latent, adjacency_matrices = prepare_data()
conv_torch = decagon_pytorch.convolve.SparseGraphConv(10, 10,
torch.tensor(adjacency_matrices[0]).to_sparse())
weight = tf.constant(conv_torch.weight.detach().numpy())
latent = dense_to_sparse_tf(latent)
adj_mat = dense_to_sparse_tf(adjacency_matrices[0])
latent = tf.sparse_tensor_dense_matmul(latent, weight)
latent = tf.sparse_tensor_dense_matmul(adj_mat, latent)
return latent
def sparse_dropout_graph_conv_activation_torch(keep_prob=1.):
torch.random.manual_seed(0)
latent, adjacency_matrices = prepare_data()
latent = torch.tensor(latent).to_sparse()
adj_mat = adjacency_matrices[0]
adj_mat = torch.tensor(adj_mat).to_sparse()
conv = decagon_pytorch.convolve.SparseDropoutGraphConvActivation(10, 10,
adj_mat, keep_prob=keep_prob)
latent = conv(latent)
return latent
def sparse_dropout_graph_conv_activation_tf(keep_prob=1.):
torch.random.manual_seed(0)
latent, adjacency_matrices = prepare_data()
conv_torch = decagon_pytorch.convolve.SparseGraphConv(10, 10,
torch.tensor(adjacency_matrices[0]).to_sparse())
weight = tf.constant(conv_torch.weight.detach().numpy())
nonzero_feat = np.sum(latent > 0)
latent = dense_to_sparse_tf(latent)
latent = dropout_sparse_tf(latent, keep_prob,
nonzero_feat)
adj_mat = dense_to_sparse_tf(adjacency_matrices[0])
latent = tf.sparse_tensor_dense_matmul(latent, weight)
latent = tf.sparse_tensor_dense_matmul(adj_mat, latent)
latent = tf.nn.relu(latent)
return latent
def test_sparse_graph_conv():
latent_torch = sparse_graph_conv_torch()
latent_tf = sparse_graph_conv_tf()
assert np.all(latent_torch.detach().numpy() == latent_tf.eval(session = tf.Session()))
def test_sparse_dropout_graph_conv_activation():
for i in range(11):
keep_prob = i/10. + np.finfo(np.float32).eps
latent_torch = sparse_dropout_graph_conv_activation_torch(keep_prob)
latent_tf = sparse_dropout_graph_conv_activation_tf(keep_prob)
latent_torch = latent_torch.detach().numpy()
latent_tf = latent_tf.eval(session = tf.Session())
print('latent_torch:', latent_torch)
print('latent_tf:', latent_tf)
assert np.all(latent_torch - latent_tf < .000001)
def test_sparse_multi_dgca():
latent_torch = None
latent_tf = []
for i in range(11):
keep_prob = i/10. + np.finfo(np.float32).eps
latent_torch = sparse_dropout_graph_conv_activation_torch(keep_prob) \
if latent_torch is None \
else latent_torch + sparse_dropout_graph_conv_activation_torch(keep_prob)
latent_tf.append(sparse_dropout_graph_conv_activation_tf(keep_prob))
latent_torch = torch.nn.functional.normalize(latent_torch, p=2, dim=1)
latent_tf = tf.add_n(latent_tf)
latent_tf = tf.nn.l2_normalize(latent_tf, dim=1)
latent_torch = latent_torch.detach().numpy()
latent_tf = latent_tf.eval(session = tf.Session())
assert np.all(latent_torch - latent_tf < .000001)
def test_graph_conv():
latent_dense = dense_graph_conv_torch()
latent_sparse = sparse_graph_conv_torch()
assert np.all(latent_dense.detach().numpy() == latent_sparse.detach().numpy())
# def setup_function(fun):
# if fun == test_dropout_graph_conv_activation or \
# fun == test_multi_dgca:
# print('Disabling dropout for testing...')
# setup_function.old_dropout = decagon_pytorch.convolve.dropout, \
# decagon_pytorch.convolve.dropout_sparse
#
# decagon_pytorch.convolve.dropout = lambda x, keep_prob: x
# decagon_pytorch.convolve.dropout_sparse = lambda x, keep_prob: x
#
#
# def teardown_function(fun):
# print('Re-enabling dropout...')
# if fun == test_dropout_graph_conv_activation or \
# fun == test_multi_dgca:
# decagon_pytorch.convolve.dropout, \
# decagon_pytorch.convolve.dropout_sparse = \
# setup_function.old_dropout
def flexible_dropout_graph_conv_activation_torch(keep_prob=1.):
torch.random.manual_seed(0)
latent, adjacency_matrices = prepare_data()
latent = torch.tensor(latent).to_sparse()
adj_mat = adjacency_matrices[0]
adj_mat = torch.tensor(adj_mat).to_sparse()
conv = decagon_pytorch.convolve.DropoutGraphConvActivation(10, 10,
adj_mat, keep_prob=keep_prob)
latent = conv(latent)
return latent
def _disable_dropout(monkeypatch):
monkeypatch.setattr(decagon_pytorch.convolve.dense, 'dropout',
lambda x, keep_prob: x)
monkeypatch.setattr(decagon_pytorch.convolve.sparse, 'dropout_sparse',
lambda x, keep_prob: x)
monkeypatch.setattr(decagon_pytorch.convolve.universal, 'dropout',
lambda x, keep_prob: x)
monkeypatch.setattr(decagon_pytorch.convolve.universal, 'dropout_sparse',
lambda x, keep_prob: x)
def test_dropout_graph_conv_activation(monkeypatch):
_disable_dropout(monkeypatch)
for i in range(11):
keep_prob = i/10.
if keep_prob == 0:
keep_prob += np.finfo(np.float32).eps
print('keep_prob:', keep_prob)
latent_dense = dense_dropout_graph_conv_activation_torch(keep_prob)
latent_dense = latent_dense.detach().numpy()
print('latent_dense:', latent_dense)
latent_sparse = sparse_dropout_graph_conv_activation_torch(keep_prob)
latent_sparse = latent_sparse.detach().numpy()
print('latent_sparse:', latent_sparse)
latent_flex = flexible_dropout_graph_conv_activation_torch(keep_prob)
latent_flex = latent_flex.detach().numpy()
print('latent_flex:', latent_flex)
nonzero = (latent_dense != 0) & (latent_sparse != 0)
assert np.all(latent_dense[nonzero] == latent_sparse[nonzero])
nonzero = (latent_dense != 0) & (latent_flex != 0)
assert np.all(latent_dense[nonzero] == latent_flex[nonzero])
nonzero = (latent_sparse != 0) & (latent_flex != 0)
assert np.all(latent_sparse[nonzero] == latent_flex[nonzero])
def test_multi_dgca(monkeypatch):
_disable_dropout(monkeypatch)
keep_prob = .5
torch.random.manual_seed(0)
latent, adjacency_matrices = prepare_data()
latent_sparse = torch.tensor(latent).to_sparse()
latent = torch.tensor(latent)
assert np.all(latent_sparse.to_dense().numpy() == latent.numpy())
adjacency_matrices_sparse = [ torch.tensor(a).to_sparse() for a in adjacency_matrices ]
adjacency_matrices = [ torch.tensor(a) for a in adjacency_matrices ]
for i in range(len(adjacency_matrices)):
assert np.all(adjacency_matrices[i].numpy() == adjacency_matrices_sparse[i].to_dense().numpy())
torch.random.manual_seed(0)
multi_sparse = decagon_pytorch.convolve.SparseMultiDGCA([10,] * len(adjacency_matrices), 10, adjacency_matrices_sparse, keep_prob=keep_prob)
torch.random.manual_seed(0)
multi = decagon_pytorch.convolve.DenseMultiDGCA([10,] * len(adjacency_matrices), 10, adjacency_matrices, keep_prob=keep_prob)
print('len(adjacency_matrices):', len(adjacency_matrices))
print('len(multi_sparse.sparse_dgca):', len(multi_sparse.sparse_dgca))
print('len(multi.dgca):', len(multi.dgca))
for i in range(len(adjacency_matrices)):
assert np.all(multi_sparse.sparse_dgca[i].sparse_graph_conv.weight.detach().numpy() == multi.dgca[i].graph_conv.weight.detach().numpy())
# torch.random.manual_seed(0)
latent_sparse = multi_sparse([latent_sparse,] * len(adjacency_matrices))
# torch.random.manual_seed(0)
latent = multi([latent,] * len(adjacency_matrices))
assert np.all(latent_sparse.detach().numpy() == latent.detach().numpy())
| [
"yuuto.0902.toko.mcas@icloud.com"
] | yuuto.0902.toko.mcas@icloud.com |
8f1a593498c67a5f2e79f4e502f58f638ac7aad2 | 75dcd22937caa082524b6a974e1b3734c3b5f4c0 | /CLI_automation_project/aws_cli.py | cae31feb10459b54bd0e4f5104564717fb374761 | [] | no_license | KunalKumarJaiswal/CLI_automation_project | 9b110be860be02c027e7ec31fadbe7d30dedc335 | 1d305ae9b780304c8dbaf38a3a25182fcabc7a57 | refs/heads/main | 2023-01-10T20:57:47.971152 | 2020-10-30T11:54:49 | 2020-10-30T11:54:49 | 310,178,606 | 0 | 0 | null | 2020-11-05T03:17:58 | 2020-11-05T03:17:57 | null | UTF-8 | Python | false | false | 1,776 | py | import os
def load_cmds_aws():
print("""
Press 1 to login using access key
Press 2 to setup EC2 instance
Press 3 to check current EC2 instances
press 0 to exit AWS menu
""")
aws_a=input()
if int(aws_a)==2:
create_ec2()
if int(aws_a)==3:
check_running_instances()
if int(aws_a)==0
def clear_screen_aws():
os.system("clear")
load_cmds_aws()
def create_ec2():
ami_type=["ami-0e306788ff2473ccb","ami-052c08d70def0ac62","ami-0b2f6494ff0b07a0e","ami-0cda377a1b884a1bc"]
machine_size=['t2.nano','t2.micro','t2.small','t2.medium']
preference=dict()
print("""select the AMI you want to use
1.ami-0e306788ff2473ccb Amazon Linux 2
2.ami-052c08d70def0ac62 RHEL 8
3.ami-0b2f6494ff0b07a0e Windows Server (GUI)
4.ami-0cda377a1b884a1bc Ubuntu 20.04
""")
machine_type=input()
preference['machine_type']=ami_type[int(machine_type)-1]
print("""Select machine type
1. t2.nano
2. t2.micro(free tier eligible)
3. t2.small
4. t2.medium """
)
size=input()
preference['size']=machine_size[int(size)-1]
num_of_inputs=int(input("How many instances you want to create \n"))
preference['number_of_instances']=num_of_inputs
print(f"aws ec2 run-instances --image-id {preference['machine_type']} --count {preference['number_of_instances']} --instance-type {preference['size']}")
os.system(f"aws ec2 run-instances --image-id {preference['machine_type']} --count {preference['number_of_instances']} --instance-type {preference['size']}")
def check_running_instances():
os.system("aws ec2 describe-instances")
| [
"noreply@github.com"
] | KunalKumarJaiswal.noreply@github.com |
d156167ce165ac16bab92f480187ddf3da7430eb | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-5/b2e3e8c0718142d4cb0387f46cd77c15b67cc1e9-<get_random_string>-bug.py | 9b873176526ac647a2e151598420e0deb76c070d | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 182 | py | def get_random_string(length=8, choices=(string.ascii_letters + string.digits)):
'\n Generate random string\n '
return ''.join([choice(choices) for i in range(length)]) | [
"dg1732004@smail.nju.edu.cn"
] | dg1732004@smail.nju.edu.cn |
9d9bec682b8409ccc2d18ac3c64f1c22b5a01199 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2551/60603/312775.py | 3038d6419acf664fc4ed2b489ef7cb65c0727f17 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 323 | py | def change(a,b):
for i in range(a-1,b):
li[i]=0 if li[i]==1 else 1
def que(a,b):
return sum(li[a-1:b])
n,m = [int(x) for x in input().split()]
li = [0]*n
for i in range(m):
s = [int(x) for x in input().split()]
if s[0]==0:
change(s[1],s[2])
elif s[0]==1:
print(que(s[1],s[2])) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
51999790613f281e419c5b219103e0886aa44ba0 | a569c668bd8d1a5e3dbde715bc893300bf0847e5 | /app/worker.py | 76ff8a7f1bed8c7bddf561139a3485a82428b73e | [] | no_license | teopeurt/flask_redis_queue | ac05d8a291e50e68545745639f58254c3e35f8fa | 0f541c3a4b2311b3e2e5fee26fb12969dae77996 | refs/heads/master | 2021-01-10T03:00:27.717150 | 2016-03-21T22:23:54 | 2016-03-21T22:23:54 | 54,426,292 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 309 | py | import os
import redis
from rq import Worker, Queue, Connection
listen = ['foo']
redis_url = os.getenv('REDISTOGO_URL', 'redis://localhost:6379')
conn = redis.from_url(redis_url)
if __name__ == '__main__':
with Connection(conn):
worker = Worker(list(map(Queue, listen)))
worker.work() | [
"don@pigstycoders.com"
] | don@pigstycoders.com |
b0b638794415687755cbdb2be2b4c90db79e1c55 | bc2cdb1e438efaf67131e975ac4db80b4dc43385 | /src/public/message/migrations/0003_pushmessage.py | a4cc7fb3829a923d5a18ec9f447e1971018bd4f1 | [] | no_license | Shadow-linux/ops-for-study | cf4d55409ebc6f27d454bea60886cd154c994484 | 115b567948d25a64e423a6cdc89bc8337896afe2 | refs/heads/master | 2023-01-14T13:35:56.880896 | 2019-09-23T05:01:31 | 2019-09-23T05:01:31 | 209,781,758 | 2 | 0 | null | 2023-01-04T10:55:45 | 2019-09-20T12:08:11 | Python | UTF-8 | Python | false | false | 1,062 | py | # Generated by Django 2.0.1 on 2019-04-17 21:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('message', '0002_auto_20190416_1144'),
]
operations = [
migrations.CreateModel(
name='PushMessage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(help_text='str; 标题', max_length=100)),
('content', models.TextField(help_text='str; 消息内容')),
('user_id_list', models.CharField(help_text='str; 用户ID', max_length=500)),
('send_type_list', models.CharField(help_text='str; 发送消息类型', max_length=500)),
('created', models.DateTimeField(auto_now_add=True, help_text='str; 创建时间')),
],
options={
'verbose_name': '消息推送',
'db_table': 'common_push_message',
},
),
]
| [
"liangyedong@qipeipu.com"
] | liangyedong@qipeipu.com |
1d33194bec7731fe3ff67d062b92dded4e6ce2c0 | e08adb1bdef5426e5756c7739d501b74c926efa7 | /tests/base.py | 327193a7d69bced8a63606b48eca240b931ec96c | [
"Apache-2.0"
] | permissive | sassoftware/python-debpkgr | ba962a09f223b06b283c22485d860f7fb37b322f | 7d2c9a1ce160aeba671dea0825f6f13916e7cb86 | refs/heads/master | 2022-08-29T17:10:05.496937 | 2022-08-25T17:55:02 | 2022-08-25T17:55:02 | 83,701,285 | 7 | 5 | Apache-2.0 | 2019-02-15T15:17:18 | 2017-03-02T16:43:47 | Python | UTF-8 | Python | false | false | 2,104 | py | #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import pkg_resources
import tempfile
import unittest
import pytest
from six import text_type
# flake8: noqa
try:
import unittest2 as unittest
except ImportError:
import unittest # noqa
try:
from unittest import mock # noqa
except ImportError:
import mock # noqa
class BaseTestCase(unittest.TestCase):
test_dir_pre = 'debpkgr-test-'
def setUp(self):
self.test_dir = tempfile.mkdtemp(prefix=self.test_dir_pre)
self.current_repo_dir = os.path.join(self.test_dir, 'cur_repo')
self.new_repo_dir = self.mkdir('new_repo')
self.pool_dir = os.path.join(self.current_repo_dir, 'pool', 'main')
test_data = pkg_resources.resource_filename(
__name__, 'test_data/')
shutil.copytree(test_data, self.current_repo_dir)
os.chdir(self.test_dir)
self.addCleanup(shutil.rmtree, self.test_dir, ignore_errors=True)
self.addCleanup(os.chdir, os.getcwd())
def mkfile(self, path, contents=None):
if contents is None:
contents = "\n"
fpath = os.path.join(self.test_dir, path)
if isinstance(contents, text_type):
mode = 'w'
else:
mode = 'wb'
with open(fpath, mode) as fh:
fh.write(contents)
return fpath
def mkdir(self, path):
path = os.path.join(self.test_dir, path)
os.makedirs(path)
return path
| [
"bc.smith@sas.com"
] | bc.smith@sas.com |
16a9374b8c7244cdc0cd714adb4f32636af432e4 | 68b8aaeb6d2c73a652d4e833e432d041c90bc017 | /scripts/vis_poly.py | 6bbb950dc85ad664894ac7cc7a803ad08fdb6679 | [] | no_license | xibaomo/fx_poly2tri | 96c5c8767f65a8c21c39117a4a224b98f229e666 | 7d8bbed7763697c25f9c40d2a3cbb2844abca4f2 | refs/heads/master | 2020-12-03T14:20:21.987769 | 2020-01-02T09:41:03 | 2020-01-02T09:41:03 | 231,351,792 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,343 | py | #!/usr/bin/env python
import matplotlib.pyplot as plt
import pandas as pd
import sys, os
import pdb
def plot_origin_polys():
with open('polys/poly.cli','r') as f:
x=[]
y=[]
for line in f:
line = line.strip()
if line.find('polygon:') >=0:
if len(x) >0:
plt.fill(x, y, facecolor='none',edgecolor='b')
x=[]
y=[]
continue
xy = line.split(',')
x.append(float(xy[0]))
y.append(float(xy[1]))
plt.axis('equal')
def plot_vis_poly(vpfile):
df = pd.read_csv(vpfile)
x = df.iloc[:, 0].values
y = df.iloc[:, 1].values
plt.fill(x, y, ls='-.',lw = 2)
plt.plot(x, y, 'x')
i1 = vpfile.find('_')
i2 = vpfile.find('.csv')
svp = vpfile[i1+1:i2].split('_')
xp = float(svp[0])
yp = float(svp[1])
plt.plot(xp, yp, 'ro')
def disp_all_vispoly():
fs = os.listdir('polys')
for f in fs:
if not f.endswith(".csv"):
continue
plot_origin_polys()
print f
plot_vis_poly("polys/"+f)
plt.show()
if __name__ == "__main__":
if len(sys.argv) == 1:
disp_all_vispoly()
sys.exit(0)
vpfile = sys.argv[1]
plot_origin_polys()
plot_vis_poly(vpfile)
plt.show()
| [
"fxua@gmail.com"
] | fxua@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.