blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2c646056ce26a51c41069113ac3ea417b26a8726 | 89a5bb6d7ca44b081b6b6e6359918d3ec9a48b89 | /forms.py | 450a4a45cbe7a01de41965dc9ab5cb423be0e593 | [] | no_license | jetamartin/flask-MyNotes-App | 2719089c5a0371089ee51870cead14a340369cf6 | 0555f77ad0f4e04065656abcb2f03b51738ff5ac | refs/heads/master | 2023-03-24T12:32:41.048956 | 2020-05-14T21:10:01 | 2020-05-14T21:10:01 | 264,022,910 | 0 | 0 | null | 2021-03-20T03:59:22 | 2020-05-14T20:54:22 | Python | UTF-8 | Python | false | false | 1,567 | py | """ Forms for MyNotes App """
from wtforms import StringField, PasswordField
from wtforms.validators import InputRequired, Length, NumberRange, Optional
from flask_wtf import FlaskForm
class RegisterForm(FlaskForm):
""" User registration form. """
username = StringField(
"Username",
validators=[InputRequired(), Length(min=1, max=20)]
)
password = PasswordField(
"Password",
validators=[InputRequired(), Length(min=6, max=30)]
)
email = StringField(
"Email",
validators=[InputRequired(), Length(max=30)]
)
first_name = StringField(
"First Name",
validators=[InputRequired(), Length(max=30)]
)
last_name = StringField(
"Last Name",
validators=[InputRequired(), Length(max=30)]
)
class LoginForm(FlaskForm):
""" User login form. """
username = StringField(
"Username",
validators=[InputRequired(), Length(min=2, max=20)]
)
password = PasswordField(
"Password",
validators=[InputRequired(), Length(min=6, max=30)]
)
class AddNoteForm(FlaskForm):
""" Form to add a new note """
title = StringField(
"Title",
validators=[InputRequired(), Length(min=2, max=30)]
)
content = StringField(
"Content",
validators=[InputRequired(), Length(max=200)]
)
class UpdateNoteForm(FlaskForm):
""" Form to add a new note """
title = StringField(
"Title",
validators=[InputRequired(), Length(min=2, max=30)]
)
content = StringField(
"Content",
validators=[InputRequired(), Length(max=200)]
)
class DeleteForm(FlaskForm):
""" """
| [
"jetamartin@gmail.com"
] | jetamartin@gmail.com |
a77f8eebd37076a3c6fe53a769845787c5aa43d9 | 737988155d6949005600e4e72c85bb8096608607 | /sort/__init__.py | 577b19ed9b11349d727f2b51a895eda1efe10f63 | [] | no_license | nxexox/aist | 85d716521a111d4d54debeb8a6766c868d929820 | da86184ad2b8c597c71562f551763bec5c3c50be | refs/heads/master | 2021-01-22T13:30:45.729867 | 2018-05-04T08:32:32 | 2018-05-04T08:32:32 | 100,676,375 | 0 | 8 | null | 2017-10-30T06:23:13 | 2017-08-18T05:33:48 | Python | UTF-8 | Python | false | false | 806 | py | #!/usr/bin/python
# coding: utf8
from .comb_sort import comb_sort
from .heap_sort import heap_sort
from .insert_sort import insert_sort
from .quick_sort import quick_sort
from .radix_sort import radix_sort
from .shell_sort import shell_sort
if __name__ == "__main__":
test_list = [12, 68, 213, 1, 59, 394, 43, 12, 645, 324, 98, 999, 998]
print(insert_sort(test_list)) # Сортировка вставками.
print(radix_sort(test_list)) # Поразрядная сортировка.
print(comb_sort(test_list)) # Сортировка расческой.
print(shell_sort(test_list)) # Сортировка Шелла.
print(quick_sort(test_list)) # Быстрая сортировка.
print(heap_sort(test_list)) # Пирамидальная сортировка.
| [
"nxexox@gmail.com"
] | nxexox@gmail.com |
99d30b344e559124924b3313cdd62dc0b265257b | fb5adbcbd169cdcebd7205bc9704edd6278f2391 | /code/Smith-Waterman similarity.py | 3dd647b58a28a4de7c93855ba6d55c6e3ac1057f | [] | no_license | zyk2118216069/LPI-MiRNA | 7c42625d5a51d3b63505e78ed5c400ffecadfb25 | a233d427c4859c8beed1c44fbd703a937e2956d4 | refs/heads/master | 2022-03-25T01:29:28.711872 | 2019-12-05T13:26:54 | 2019-12-05T13:26:54 | 223,330,813 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 877 | py | import numpy as np
import pandas as pd
lenapth = r'C:\Users\zyk\Desktop\PAPER\data\5_select based on common miRNA\lncRNA species.csv'
content = pd.read_csv(lenapth,header=None,index_col=None,low_memory=False)
data = np.array(content)
lendata = data[:,5]
swpath = r'C:\Users\zyk\Desktop\PAPER\data\9_similarity\Sequence similarity\lncRNA score.csv'
swcontent = pd.read_csv(swpath,header=None,index_col=None,low_memory=False)
swscore = np.array(swcontent)
similarity = np.zeros((331,331))
for i in range(331):
for j in range(i, 331):
if i == j:
similarity[i][j] = similarity[j][i] = 0
else:
similarity[i][j] = similarity[j][i] = swscore[i][j]/(lendata[i]+lendata[j])
np.savetxt(r"C:\Users\zyk\Desktop\PAPER\data\9_similarity\Sequence similarity\lncRNA-lncRNA similarity.csv", similarity, delimiter=',',fmt='%f') | [
"noreply@github.com"
] | noreply@github.com |
fd13a713a5caf9c48c60ad83f504415838e20c7c | 710d2f31b6808187c4895a618101c25b36d25b3c | /backend/home/migrations/0002_customtext_homepage_message.py | 306e32a28d93fb23006c97f446b674b7a449a077 | [] | no_license | crowdbotics-apps/haplen-28237 | 920231c921a3aa490acc97a7debacc6858c520de | 938cef97a8534941cb5f4de9684c95361ae27ef3 | refs/heads/master | 2023-05-31T20:26:37.979544 | 2021-06-25T20:12:49 | 2021-06-25T20:12:49 | 380,344,291 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,515 | py | # Generated by Django 2.2.20 on 2021-06-25 20:12
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('dating', '0001_initial'),
('home', '0001_load_initial_data'),
]
operations = [
migrations.CreateModel(
name='CustomText',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=150)),
],
),
migrations.CreateModel(
name='HomePage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('body', models.TextField()),
],
),
migrations.CreateModel(
name='Message',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('slug', models.SlugField()),
('created', models.DateTimeField(auto_now_add=True)),
('inbox', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='message_inbox', to='dating.Inbox')),
('match', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='message_match', to='dating.Match')),
],
),
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
f3d4d9b99d9b282e026c3b0ecc9e4acaedb53641 | b72929066ea8cc1ac0baf2109b7b267c31dcc0ed | /GuiaColombiana/Apps/AdministracionGuia/apps.py | 9bfe5bd6ac7c36895e22db14bfba8ae4331438ea | [] | no_license | dfnino10/GuiaColombiana | 87c4767eebe2f9a390d9233ac1ba5d73b7e88706 | 67c1443124078ed169861441695845229e40e79c | refs/heads/master | 2023-01-10T05:52:55.341689 | 2020-02-14T13:05:07 | 2020-02-14T13:05:07 | 203,075,644 | 0 | 1 | null | 2023-01-07T09:11:47 | 2019-08-19T01:18:50 | CSS | UTF-8 | Python | false | false | 111 | py | from django.apps import AppConfig
class AdministracionguiaConfig(AppConfig):
name = 'AdministracionGuia'
| [
"df.nino10@uniandes.edu.co"
] | df.nino10@uniandes.edu.co |
14d9e93beee2ce1dc6ba45d5720eae2e039aa2fc | 45b911eee552869cdd61061e546491e4945d0804 | /pyserver.py | b3d23b8db216ccc60c89d1ffdb77be36ec58b3c7 | [] | no_license | eschmalzy/messageLog | a432da1b3e6cf4ddec165f2cc3be9c14c86349fc | e58588de2f9647c0da2f28266fc5b14721cd7f1e | refs/heads/master | 2021-01-12T12:43:54.702112 | 2016-10-03T18:30:30 | 2016-10-03T18:30:30 | 69,290,492 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,029 | py | from http.server import BaseHTTPRequestHandler, HTTPServer
from urllib.parse import urlparse, parse_qs
import json
import os
import ast
class MyServer(BaseHTTPRequestHandler):
#GET request
def do_GET(self):
#status code
self.send_response(200)
self.send_header('Access-Control-Allow-Origin', ' * ')
file = open("messages.txt", 'r')
#query = parse_qs(urlparse(self.path).query)
#print(query)
#write response
if self.path == "/chickens":
# headers
self.send_header("Content-Type", "text/html")
self.end_headers()
self.wfile.write(bytes("<p>YES</p>", "utf-8"))
elif self.path.startswith("/messages"):
if os.path.getsize('messages.txt') > 0:
# headers
self.send_header("Content-Type", "application/json")
self.end_headers()
jsonMessages = []
for line in file:
line = line.rstrip("\n")
line = ast.literal_eval(line)
jsonMessages.append(line)
print(jsonMessages)
self.wfile.write(bytes(json.dumps(jsonMessages),"utf-8"))
else:
# headers
self.send_header("Content-Type", "text/html")
self.end_headers()
print("Couldn't read from file.")
self.wfile.write(bytes("<p>Couldn't read from file.</p>", "utf-8"))
else:
# headers
self.send_header("Content-Type", "text/html")
self.end_headers()
self.wfile.write(bytes("<p>Hello, World!</p>", "utf-8"))
file.close()
return
def do_POST(self):
if self.path.endswith("/messages"):
self.send_response(201)
self.send_header('Access-Control-Allow-Origin', ' * ')
fileRead = open("messages.txt", "r")
messagesLst = ""
for lines in fileRead:
if lines.endswith("\n"):
messagesLst += lines
else:
messagesLst += lines + "\n"
fileRead.close()
self.send_header('Content-Type', 'text/plain')
self.end_headers()
length = int(self.headers['Content-Length'])
data = self.rfile.read(length).decode('utf-8')
parsed_data = parse_qs(data)
print(data)
print(parsed_data)
messagesLst += str(parsed_data)
file = open("messages.txt", 'w')
file.write(messagesLst)
file.close()
else:
self.send_response(404)
return
def run():
listen = ("127.0.0.1", 8080)
server = HTTPServer(listen, MyServer)
#file = open("messages.txt", 'w').close()
print("Listening......")
server.serve_forever()
run()
| [
"schmalzys@gmail.com"
] | schmalzys@gmail.com |
e309786604352cf226db5f2fd9c227d02a538eb4 | 1dbcab22f6480f4c64e2c0ba8ad288b78a8d2cee | /src/front.py | 964e15cf378de944c44ed1df5aa12b279d4d29a7 | [
"Apache-2.0"
] | permissive | MLH-Fellowship/Auto-Tagger | e18dadf665d192e3d0b936c3639cb4123eb44fbf | 9157c7b8d698f579fbc8cd02238f645e47ac4e24 | refs/heads/master | 2022-12-22T06:57:58.772728 | 2020-10-07T18:52:12 | 2020-10-07T18:52:12 | 298,593,960 | 4 | 5 | Apache-2.0 | 2020-10-07T18:52:14 | 2020-09-25T14:20:09 | Python | UTF-8 | Python | false | false | 1,242 | py | from flask import Flask, render_template, request, redirect, abort
from nltk.tokenize.treebank import TreebankWordDetokenizer
from nltk import word_tokenize
import bentoml
import itertools
app = Flask(__name__)
LOAD_PATH = "/home/parthparikh/bentoml/repository/PyTorchModel/20200926132654_E3B49E"
@app.route('/')
def index():
return render_template('index.html', word_range=0)
@app.route('/predict', methods=['POST'])
def predict():
if request.method == 'POST':
bento_service = bentoml.load(LOAD_PATH)
# request debug
print({"sentence": request.form["ner_text"]})
sentence = request.form["ner_text"]
result = bento_service.predict([{"sentence": request.form["ner_text"]}])
tokens = result[0].split(",")
all_words = [[word_tokenize(w), ' '] for w in sentence.split()]
all_words = list(itertools.chain(*list(itertools.chain(*all_words))))
print(all_words)
return render_template('index.html',
word_range=len(all_words),
words=all_words,
tokens=tokens,
sentence=sentence)
if __name__ == "__main__":
app.run(debug=True) | [
"parthparikh1999p@gmail.com"
] | parthparikh1999p@gmail.com |
09925f31f19351fda75ef9c39ecb6ecb186a5c99 | 24f354c0a362c0a44fe0946f0a947930f0724f4d | /tests/unit/config/test_ini.py | d3608a6f540a216b4c83bd97783eb9170438e817 | [
"MIT"
] | permissive | pypa/virtualenv | 783cf226c806bcb44ee63fd87c37d76e90c121ce | 6d22da631fd289f89f921a4010047ad969b7bfa7 | refs/heads/main | 2023-09-04T06:50:16.410634 | 2023-08-30T14:32:38 | 2023-08-30T14:32:38 | 1,446,474 | 4,313 | 1,073 | MIT | 2023-09-12T14:54:09 | 2011-03-06T14:33:27 | Python | UTF-8 | Python | false | false | 842 | py | from __future__ import annotations
import sys
from textwrap import dedent
import pytest
from virtualenv.info import IS_PYPY, IS_WIN, fs_supports_symlink
from virtualenv.run import session_via_cli
@pytest.mark.skipif(not fs_supports_symlink(), reason="symlink is not supported")
@pytest.mark.xfail(IS_PYPY and IS_WIN and sys.version_info[0:2] == (3, 9), reason="symlink is not supported")
def test_ini_can_be_overwritten_by_flag(tmp_path, monkeypatch):
custom_ini = tmp_path / "conf.ini"
custom_ini.write_text(
dedent(
"""
[virtualenv]
copies = True
""",
),
encoding="utf-8",
)
monkeypatch.setenv("VIRTUALENV_CONFIG_FILE", str(custom_ini))
result = session_via_cli(["venv", "--symlinks"])
symlinks = result.creator.symlinks
assert symlinks is True
| [
"noreply@github.com"
] | noreply@github.com |
578b59740c5979f7b07443a56487b3cabe0040ae | 18105618d16d1699cb3f273eef63477d254e3709 | /Convolutional_neural_network/Convolutional_neural_network_tensorflow_v2.py | ab51e291cc346202897120a4fc5b1cf554d18df9 | [
"MIT"
] | permissive | Jaehoon-Cha-Data/TF2.0 | c33911b55e8218662c81020f59976b011a19fad2 | 9dbbffaf52178b94b7b337efe32d26408d5a37d8 | refs/heads/master | 2020-08-12T03:06:37.112028 | 2019-10-13T18:42:28 | 2019-10-13T18:42:28 | 214,676,652 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,234 | py | # -*- coding: utf-8 -*-
"""
Created on Sun Oct 13 17:00:20 2019
@author: jaehooncha
@email: chajaehoon79@gmail.com
CNN in tensorflow_v2
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
from tensorflow.keras.layers import Dense, Flatten, Conv2D, MaxPool2D
from tensorflow.keras import Model
import numpy as np
import matplotlib.pyplot as plt
import argparse
from collections import OrderedDict
tf.keras.backend.set_floatx('float64')
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--epochs', type = int, default = 10)
parser.add_argument('--batch_size', type = int, default =128)
args = parser.parse_args()
config = OrderedDict([
('epochs', args.epochs),
('batch_size', args.batch_size)])
return config
config = parse_args()
@tf.function()
def random_jitter(input_image, input_label):
input_image = tf.image.resize(input_image, [34, 34],
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
input_image = tf.image.random_crop(input_image, size=[28, 28, 1])
# if tf.random.uniform(()) > 0.5:
# input_image = tf.image.flip_left_right(input_image)
return input_image, input_label
### data load ###
mnist = tf.keras.datasets.mnist
(train_x, train_y), (test_x, test_y) = mnist.load_data()
train_x, test_x = train_x / 255.0, test_x / 255.0
train_x = train_x[..., tf.newaxis]
test_x = test_x[..., tf.newaxis]
n_samples = len(train_x)
train_ds = tf.data.Dataset.from_tensor_slices((train_x, train_y))
train_ds = train_ds.map(random_jitter,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
train_ds = train_ds.shuffle(n_samples).batch(config['batch_size'])
test_ds = tf.data.Dataset.from_tensor_slices((test_x, test_y)).batch(config['batch_size'])
class CNN(Model):
def __init__(self):
super(CNN, self).__init__()
self.c1 = Conv2D(32, 3, 1, padding = 'same', activation='relu')
self.p1 = MaxPool2D(2)
self.flatten = Flatten()
self.d1 = Dense(128, activation='relu')
self.d2 = Dense(10, activation='softmax')
def call(self, x):
x = self.c1(x)
x = self.p1(x)
x = self.flatten(x)
x = self.d1(x)
return self.d2(x)
model = CNN()
optimizer = tf.keras.optimizers.Adam(lr=0.01)
loss_object = tf.keras.losses.SparseCategoricalCrossentropy()
train_loss = tf.keras.metrics.Mean(name='train_loss')
train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='train_accuracy')
test_loss = tf.keras.metrics.Mean(name='test_loss')
test_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='test_accuracy')
@tf.function
def train_step(X, Y):
with tf.GradientTape() as tape:
predictions = model(X)
loss = loss_object(Y, predictions)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
train_loss(loss)
train_accuracy(Y, predictions)
@tf.function
def test_step(X, Y):
predictions = model(X)
t_loss = loss_object(Y, predictions)
test_loss(t_loss)
test_accuracy(Y, predictions)
### run ###
def runs():
for epoch in range(config['epochs']):
for epoch_x, epoch_y in train_ds:
train_step(epoch_x, epoch_y)
for epoch_x, epoch_y in test_ds:
test_step(epoch_x, epoch_y)
template = 'epoch: {}, train_loss: {}, train_acc: {}, test_loss: {}, test_acc: {}'
print(template.format(epoch+1,
train_loss.result(),
train_accuracy.result()*100,
test_loss.result(),
test_accuracy.result()*100))
runs()
### results ###
test_predict_y = model(test_x)
### mean squared error ###
test_acc = test_accuracy(test_y, test_predict_y)
print('test acc is %.4f' %(test_acc))
### font size ###
plt.rcParams.update({'font.size': 15})
### draw outputs ###
plt.figure(figsize=(5,5))
plt.imshow(test_x[0].reshape(28,28))
print(np.argmax(test_predict_y[0]))
| [
"chajaehoon79@gmail.com"
] | chajaehoon79@gmail.com |
5d7458f2c2d6962f5be50183283d1437f8dc2908 | 68f757e7be32235c73e316888ee65a41c48ecd4e | /python_book(이것이 코딩테스트다)/03 그리디/3-2 큰수의 법칙.py | b1f160381acf71c965174db1df86f096e154ed49 | [] | no_license | leejongcheal/algorithm_python | b346fcdbe9b1fdee33f689477f983a63cf1557dc | f5d9bc468cab8de07b9853c97c3db983e6965d8f | refs/heads/master | 2022-03-05T20:16:21.437936 | 2022-03-03T01:28:36 | 2022-03-03T01:28:36 | 246,039,901 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 256 | py | n, m, k = map(int,input().split())
L = list(map(int,input().split()))
f = max(L)
L.remove(f)
s = max(L)
flag = 0
result = 0
for i in range(m):
flag += 1
if flag >= k:
result += s
flag = 0
else:
result += f
print(result)
| [
"aksndk123@naver.com"
] | aksndk123@naver.com |
d8ca473834215464d9cf5cbef9af09d117952933 | fcb517b1d2fa8dfb7dce1ca9e156bcee835e1f20 | /qqbot GroupEmotions/main.py | b94420ea021275423b56f851132720fc3a1c0885 | [] | no_license | acdzh/python_projects | 2ae6b0d6eeab3883c5ad688bd1da08b4e6a6434f | d165da54dc9d6e21b2ec618cac8a75c7e8e9b3f6 | refs/heads/master | 2021-06-03T16:53:19.784523 | 2020-10-03T07:33:54 | 2020-10-03T07:33:54 | 135,989,177 | 6 | 5 | null | 2020-10-03T07:35:18 | 2018-06-04T07:44:39 | Python | UTF-8 | Python | false | false | 632 | py | from qqbot import QQBotSlot as qqbotslot, RunBot
import json
import baidu_ai as bd
@qqbotslot
def onQQMessage(bot, contact, member, content):
print('get a msg: ' + content)
txt = content
if '@ME' in content:
out = bd.ai_get(txt,1)
bot.SendTo(contact,out)
print (out)
elif not '表情' in content:
neg = bd.ai_get(txt,'n')
sen = bd.ai_get(txt,'s')
if (neg > 0.6) & (sen == 0):
alert = bd.ai_get(txt,'s')
bot.SendTo(contact, alert)
else:
print('not replay')
if __name__ == '__main__':
RunBot()
| [
"1069436872@qq.com"
] | 1069436872@qq.com |
93638e6c349607a0107252696396903b701dc859 | f4329f3234ad1f4e3b8f666503ada7665c34098f | /gc.py | a4d02b32b8c112e25c4bfc0191ffb8cfb7495756 | [] | no_license | Andrea-DM95/Homework4 | 30f07425cd1539eef9fd39ad240860bb9cccc097 | 90601e3d47bc58059c4fd4c90107748eb57b0b55 | refs/heads/main | 2023-02-05T17:31:33.465644 | 2020-12-06T09:31:10 | 2020-12-06T09:31:10 | 318,992,715 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 532 | py | from collections import Counter
def gc(d):
mGC={}
lGC=[]
for k,v in d.items():
c=Counter(v)
GC=((c['G']+c['C'])/len(v))*100
lGC.append(GC)
mGC[GC]=k
m=max(lGC)
return "{}\n{}".format(mGC[m],m)
fasta={}
with open("rosalind_gc.txt", 'r') as f:
for line in f:
if line.startswith('>'):
key=line[1:-1]
fasta[key]=""
else:
fasta[key]+=line.strip()
nf=open("output_gc.txt","w")
nf.write(gc(fasta))
nf.close() | [
"noreply@github.com"
] | noreply@github.com |
338a60d0851ccf757b65b9116b86de9062ee4777 | 95f4c29adc4ebd1553efc95c71a16da592bfde80 | /apps/foscam_app_v2/homeassistant0.54/foscam.py | d491f553efae8c59031c3c658d1a6b63c402e8ee | [] | no_license | ganice/My-AppDaemon | c4210f2c550ad8657ec41f6ebdcc6971e6db4640 | a4ee9e81bf5f512508db8ebabdd85d7c29873e68 | refs/heads/master | 2020-09-08T11:46:21.024835 | 2019-09-13T11:57:40 | 2019-09-13T11:57:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,598 | py | ###########################################################################################
# #
# Rene Tode ( hass@reot.org ) #
# #
# 2017/10/10 Germany #
# #
# an app to control a foscam camera. #
# dependecies: #
# untangle (pip install untangle) #
# #
# args: #
# see the readme on #
# https://github.com/ReneTode/My-AppDaemon/tree/master/apps/foscam_app_v2 #
# #
###########################################################################################
import appdaemon.appapi as appapi
import datetime
import untangle
from urllib.request import urlopen
import urllib.request
from socket import timeout
import time
class foscam(appapi.AppDaemon):
def initialize(self):
runtime = datetime.datetime.now() + datetime.timedelta(seconds=5)
self.loglevels = {
"CRITICAL": 50,
"ERROR": 40,
"WARNING": 30,
"INFO": 20,
"DEBUG": 10,
"NOTSET": 0
}
self.logsettings = self.args["logsettings"]
if "loglevel" in self.logsettings:
self.loglevel = self.logsettings["loglevel"]
else:
self.loglevel = "INFO"
if "logsensorlevel" in self.logsettings:
self.logsensorlevel = self.logsettings["logsensorlevel"]
else:
self.logsensorlevel = "INFO"
self.knowntype = 0
self.knowntypes1 = ["F19828P","F19828P V2","R2","F19928P","F19821W V2"]
self.knowntypes2 = ["C1 lite","C1"]
self.knowntypes3 = ["F18918","F18918W"]
self.camsettings = self.args["camsettings"]
self.picsettings = self.args["picsettings"]
self.ptzsettings = self.args["ptzsettings"]
self.alarmsettings = self.args["alarmsettings"]
self.recordsettings = self.args["recordsettings"]
self.dashboardsettings = self.args["dashboardsettings"]
self.type = self.camsettings["camera_type"]
if self.type in self.knowntypes1:
self.PTZ = True
self.infraredcam = True
elif self.type in self.knowntypes2:
self.PTZ = False
self.infraredcam = False
else:
self.log("unknown camera type. try 1 of the known types, and if it works please report back on the forum")
return
try:
self.camhost = self.camsettings["host"]
self.portnr = str(self.camsettings["port"])
self.user = self.camsettings["user"]
self.password = self.camsettings["password"]
self.last_error_sensor = self.logsettings["last_error_sensor"]
self.camera_name = self.camsettings["camera_name"]
repeat = int(self.alarmsettings["sensor_update_time"])
self.motion_sensor = self.alarmsettings["motion_sensor"]
self.soundalarm_sensor = self.alarmsettings["soundalarm_sensor"]
self.motion_switch = self.alarmsettings["motion_switch"]
self.brightness_slider = self.picsettings["brightness_slider"]
self.contrast_slider = self.picsettings["contrast_slider"]
self.hue_slider = self.picsettings["hue_slider"]
self.saturation_slider = self.picsettings["saturation_slider"]
self.sharpness_slider = self.picsettings["sharpness_slider"]
self.default_pic_settings_switch = self.picsettings["default_pic_settings_switch"]
self.mirror_switch = self.picsettings["mirror_switch"]
self.flip_switch = self.picsettings["flip_switch"]
self.recording_sensor = self.recordsettings["recording_sensor"]
self.snap_picture_switch = self.recordsettings["snap_picture_switch"]
self.save_snap_dir = self.recordsettings["save_snap_dir"]
except:
self.log("some arguments are not given or wrong", level = "ERROR")
return
if self.infraredcam:
try:
self.infrared_switch = self.picsettings["infrared_switch"]
self.auto_infrared_switch = self.picsettings["auto_infrared_switch"]
except:
self.log("some infrared arguments are not given or wrong and you selected an infraredcam", level = "ERROR")
return
if self.PTZ:
try:
self.up_down_slider = self.ptzsettings["up_down_slider"]
self.left_right_slider = self.ptzsettings["left_right_slider"]
self.zoom_slider = self.ptzsettings["zoom_slider"]
self.preset_points_select = self.ptzsettings["preset_points_select"]
self.start_cruise_select = self.ptzsettings["start_cruise_select"]
self.stop_cruise_switch = self.ptzsettings["stop_cruise_switch"]
except:
self.log("some PTZ arguments are not given or wrong and you selected a PTZ cam", level = "ERROR")
return
self.run_every(self.get_sensors,runtime,repeat)
self.listen_state(self.snap_picture,self.snap_picture_switch)
self.url = "http://"+ self.camhost + ":" + str(self.portnr) + "/cgi-bin/CGIProxy.fcgi?&usr=" + self.user + "&pwd=" + self.password + "&cmd="
self.listen_state(self.input_boolean_changed, self.mirror_switch, on_command="mirrorVideo&isMirror=1", off_command="mirrorVideo&isMirror=0", reset=False)
self.listen_state(self.input_boolean_changed, self.flip_switch, on_command="flipVideo&isFlip=1", off_command="flipVideo&isFlip=0", reset=False)
self.listen_state(self.pic_setting_input_slider_changed,self.brightness_slider,settingstype = "Brightness")
self.listen_state(self.pic_setting_input_slider_changed,self.contrast_slider,settingstype = "Contrast")
self.listen_state(self.pic_setting_input_slider_changed,self.hue_slider,settingstype = "Hue")
self.listen_state(self.pic_setting_input_slider_changed,self.saturation_slider,settingstype = "Saturation")
self.listen_state(self.pic_setting_input_slider_changed,self.sharpness_slider,settingstype = "Sharpness")
self.listen_state(self.pic_setting_input_slider_changed,self.default_pic_settings_switch,settingstype = "Default")
self.pic_setting_input_slider_changed("","","","",{"settingstype":"JustCheck"})
if self.PTZ:
self.listen_state(self.input_select_changed, self.preset_points_select, on_command="ptzGotoPresetPoint&name=")
self.listen_state(self.input_select_changed, self.start_cruise_select, on_command="ptzStartCruise&mapName=")
self.listen_state(self.input_slider_changed, self.zoom_slider, stop_command="zoomStop", speed_command="setZoomSpeed&speed=", left_command="zoomOut", right_command="zoomIn")
self.listen_state(self.input_slider_changed, self.up_down_slider, stop_command="ptzStopRun", speed_command="setPTZSpeed&speed=", left_command="ptzMoveDown", right_command="ptzMoveUp")
self.listen_state(self.input_slider_changed, self.left_right_slider, stop_command="ptzStopRun", speed_command="setPTZSpeed&speed=", left_command="ptzMoveLeft", right_command="ptzMoveRight")
self.listen_state(self.input_boolean_changed, self.stop_cruise_switch, on_command="ptzStopCruise", reset=True)
if self.infraredcam:
self.listen_state(self.input_boolean_changed, self.infrared_switch, on_command="openInfraLed", off_command="closeInfraLed", reset=False)
self.listen_state(self.input_boolean_changed, self.auto_infrared_switch, on_command="setInfraLedConfig&mode=0", off_command="setInfraLedConfig&mode=1", reset=False)
if self.dashboardsettings["use_dashboard"]:
self.fullscreenalarm = self.dashboardsettings["full_screen_alarm_switch"]
if self.dashboardsettings["show_full_screen_dashboard"]:
self.lastshown = datetime.datetime.now()
self.listen_state(self.toondash,self.motion_sensor,constrain_input_boolean=self.fullscreenalarm)
if self.dashboardsettings["create_dashboard"]:
self.create_dashboard()
if self.dashboardsettings["create_alarm_dashboard"]:
self.create_alarm_dashboard()
self.log(" App started without errors", "INFO")
self.set_state(self.last_error_sensor, state = self.time().strftime("%H:%M:%S") + " App started without errors")
def get_sensors(self, kwargs):
data = self.send_command("getDevState")
if data == "":
return
DevState = untangle.parse(data)
try:
motion_alarm = DevState.CGI_Result.motionDetectAlarm.cdata
if motion_alarm == "0":
motion_alarm_text = "Disabled"
elif motion_alarm == "1":
motion_alarm_text = "No Alarm"
elif motion_alarm == "2":
motion_alarm_text = "Alarm!"
sound_alarm = DevState.CGI_Result.soundAlarm.cdata
if sound_alarm == "0":
sound_alarm_text = "Disabled"
elif sound_alarm == "1":
sound_alarm_text = "No Alarm"
elif sound_alarm == "2":
sound_alarm_text = "Alarm!"
recording = DevState.CGI_Result.record.cdata
if recording == "0":
recording_text = "No"
elif recording == "1":
recording_text = "Yes"
infrared = DevState.CGI_Result.infraLedState.cdata
self.set_state(self.motion_sensor,state = motion_alarm_text)
self.set_state(self.soundalarm_sensor,state = sound_alarm_text)
self.set_state(self.recording_sensor,state = recording_text)
if motion_alarm == "0":
self.turn_off(self.motion_switch)
else:
self.turn_on(self.motion_switch)
if infrared == "0":
self.turn_off(self.infrared_switch)
else:
self.turn_on(self.infrared_switch)
except:
self.my_log(" Unexpected error", "WARNING")
self.log(data, level = "WARNING")
data = self.send_command("getInfraLedConfig")
if data == "":
return
DevState = untangle.parse(data)
infraredstate = DevState.CGI_Result.mode.cdata
if infraredstate == "1":
self.turn_off(self.auto_infrared_switch)
else:
self.turn_on(self.auto_infrared_switch)
data = self.send_command("getMirrorAndFlipSetting")
if data == "":
return
DevState = untangle.parse(data)
mirrorstate = DevState.CGI_Result.isMirror.cdata
if mirrorstate == "0":
self.turn_off(self.mirror_switch)
else:
self.turn_on(self.mirror_switch)
flipstate = DevState.CGI_Result.isFlip.cdata
if flipstate == "0":
self.turn_off(self.flip_switch)
else:
self.turn_on(self.flip_switch)
def input_boolean_changed(self, entity, attribute, old, new, kwargs):
if new == "on":
data = self.send_command(kwargs["on_command"])
else:
if "off_command" in kwargs:
data = self.send_command(kwargs["off_command"])
else:
return
if "<result>0</result>" in data:
self.my_log(" Changed " + self.friendly_name(entity), "INFO")
else:
self.my_log("Failed to change " + self.friendly_name(entity), "WARNING")
if kwargs["reset"]:
self.turn_off(entity)
def input_select_changed(self, entity, attribute, old, new, kwargs):
data = self.send_command(kwargs["on_command"] + new)
if "<result>0</result>" in data:
self.my_log("Changed " + self.friendly_name(entity),"INFO")
else:
self.my_log("Failed to change " + self.friendly_name(entity), "WARNING")
def input_slider_changed(self, entity, attribute, old, new, kwargs):
data0 = ""
data1 = ""
data2 = ""
if float(new) == 0:
data0 = self.send_command(kwargs["stop_command"])
if "<result>0</result>" in data0:
self.my_log("Stopped" + self.friendly_name(entity), "INFO")
elif data0 == "":
self.my_log("Failed to stop " + self.friendly_name(entity), "WARNING")
else:
self.log(data0, "WARNING")
elif float(new) > 0:
data1 = self.send_command(kwargs["speed_command"] + str((float(new)-1))[:-2])
data2 = self.send_command(kwargs["right_command"])
self.run_in(self.reset_after_a_second,1,entityname=entity)
elif float(new) < 0:
data1 = self.send_command(kwargs["speed_command"] + str((-float(new)-1))[:-2])
data2 = self.send_command(kwargs["left_command"])
self.run_in(self.reset_after_a_second,1,entityname=entity)
if float(new) != 0:
if "<result>0</result>" in data1:
self.my_log("Speed set", "INFO")
elif data1 == "":
self.my_log("Failed to set speed", "WARNING")
else:
self.log(data1)
if "<result>0</result>" in data2:
self.my_log("Started " + self.friendly_name(entity), "INFO")
elif data2 == "":
self.my_log("Failed to start " + self.friendly_name(entity), "WARNING")
else:
self.log(data2)
def pic_setting_input_slider_changed(self, entity, attribute, old, new, kwargs):
if kwargs["settingstype"] != "Default" and kwargs["settingstype"] != "JustCheck":
if kwargs["settingstype"] == "Contrast":
data = self.send_command("setContrast&constrast=" + new)
else:
data = self.send_command("set" + kwargs["settingstype"] + "&" + kwargs["settingstype"].lower() + "=" + new)
else:
if kwargs["settingstype"] == "Default":
data = self.send_command("resetImageSetting")
self.turn_off(self.default_pic_settings_switch)
data = self.send_command("getImageSetting")
if data == "":
self.my_log("Failed to get settingsdata", "WARNING")
return
try:
pic_settings = untangle.parse(data)
brightness = pic_settings.CGI_Result.brightness.cdata
contrast = pic_settings.CGI_Result.contrast.cdata
hue = pic_settings.CGI_Result.hue.cdata
saturation = pic_settings.CGI_Result.saturation.cdata
sharpness = pic_settings.CGI_Result.sharpness.cdata
self.call_service("input_slider/select_value", entity_id=(self.brightness_slider), value=brightness)
self.call_service("input_slider/select_value", entity_id=(self.contrast_slider), value=contrast)
self.call_service("input_slider/select_value", entity_id=(self.hue_slider), value=hue)
self.call_service("input_slider/select_value", entity_id=(self.saturation_slider), value=saturation)
self.call_service("input_slider/select_value", entity_id=(self.sharpness_slider), value=sharpness)
except:
self.my_log("image setting got wrong data", "WARNING")
def snap_picture(self, entity, attribute, old, new, kwargs):
if new == "on":
savetime = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
try:
urllib.request.urlretrieve(self.url + "snapPicture2",self.save_snap_dir + savetime + ".jpg")
except:
self.my_log("Failed to snap picture", "WARNING")
self.turn_off(entity)
def reset_after_a_second(self, kwargs):
self.call_service("input_slider/select_value", entity_id=kwargs["entityname"], value="0")
def send_command(self, command):
try:
with urlopen(self.url + command, timeout=10) as response:
data = response.read().decode()
except timeout:
self.my_log(" Camera took more then 10 seconds", "WARNING")
return ""
if "<result>0</result>" in data:
self.my_log(" Camera state ok", "INFO")
return data
elif "<result>-1</result>" in data:
self.my_log(" String format error", "WARNING")
self.log(self.url + command)
return ""
elif "<result>-2</result>" in data:
self.my_log(" Username or password error", "WARNING")
return ""
elif "<result>-3</result>" in data:
self.my_log(" Access denied", "WARNING")
return ""
elif "<result>-4</result>" in data:
self.my_log(" CGI execute failed", "WARNING")
return ""
elif "<result>-5</result>" in data:
self.my_log(" Timeout", "WARNING")
return ""
else:
self.my_log(" Unknown error", "WARNING")
return ""
def toondash(self, entity, attribute, old, new, kwargs):
timegoneby = datetime.datetime.now() - self.lastshown
if new == "Alarm!" and timegoneby > datetime.timedelta(seconds=self.dashboardsettings["time_between_shows"]):
self.dash_navigate(self.dashboardsettings["alarm_dashboard_file_name"], self.dashboardsettings["show_time"])
self.lastshown = datetime.datetime.now()
def my_log(self, message, level = "INFO"):
self.last_error = message
if self.loglevels[level] >= self.loglevels[self.loglevel]:
self.log(self.last_error, level = level)
if self.loglevels[level] >= self.loglevels[self.logsensorlevel]:
self.set_state(self.last_error_sensor, state = self.time().strftime("%H:%M:%S") + self.last_error)
def create_dashboard(self):
try:
with open(self.config["HADashboard"]["dash_dir"] + "/" + self.dashboardsettings["dashboard_file_name"] + ".dash", 'w') as dashboard:
screenwidth = self.dashboardsettings["screen_width"]
screenheight = self.dashboardsettings["screen_height"]
widgetwidth = round((screenwidth - 22) / 10)
widgetheight = round((screenheight - 14) / 6)
dashboardlines = [
'title: camera',
'widget_dimensions: [' + str(widgetwidth) + ', ' + str(widgetheight) + ']',
'widget_size: [1,1]',
'widget_margins: [2, 2]',
'columns: 10',
'global_parameters:',
' use_comma: 1',
' precision: 0',
' use_hass_icon: 1',
'',
'layout:',
' - my_camera(7x4), ' + self.saturation_slider + '(2x1), ' + self.flip_switch,
' - ' + self.contrast_slider + '(2x1), ' + self.mirror_switch,
' - ' + self.brightness_slider + '(2x1), ' + self.auto_infrared_switch,
' - ' + self.hue_slider + '(2x1), ' + self.infrared_switch,
' - ' + self.left_right_slider + '(2x1), ' + self.zoom_slider + '(1x2), ' + self.preset_points_select + '(2x1), ' + self.motion_sensor + ', ' + self.default_pic_settings_switch + ',' + self.sharpness_slider + '(2x1), ' + self.motion_switch,
' - ' + self.up_down_slider + '(2x1), ' + self.start_cruise_select + '(2x1),' + self.recording_sensor + ', ' + self.soundalarm_sensor + ', ' + self.last_error_sensor + '(2x1), ' + self.snap_picture_switch,
'',
'my_camera:',
' widget_type: camera',
' entity_picture: ' + self.config["HASS"]["ha_url"] + '/api/camera_proxy_stream/camera.' + self.camera_name + '?api_password=' + self.config["HASS"]["ha_key"],
' title: ' + self.camera_name,
' refresh: 120',
self.recording_sensor + ':',
' widget_type: sensor',
' entity: ' + self.recording_sensor,
' title: Recording',
self.motion_sensor + ':',
' widget_type: sensor',
' entity: ' + self.motion_sensor,
' title: Motion',
self.soundalarm_sensor + ':',
' widget_type: sensor',
' entity: ' + self.soundalarm_sensor,
' title: Sound alarm',
self.saturation_slider + ':',
' widget_type: new_input_slider',
' entity: ' + self.saturation_slider,
' title: Saturation',
self.contrast_slider + ':',
' widget_type: new_input_slider',
' entity: ' + self.contrast_slider,
' title: Contrast',
self.brightness_slider + ':',
' widget_type: new_input_slider',
' entity: ' + self.brightness_slider,
' title: Brightness',
self.hue_slider + ':',
' widget_type: new_input_slider',
' entity: ' + self.hue_slider,
' title: Hue',
self.sharpness_slider + ':',
' widget_type: new_input_slider',
' entity: ' + self.sharpness_slider,
' title: Sharpness',
self.left_right_slider + ':',
' widget_type: new_input_slider',
' entity: ' + self.left_right_slider,
' title: Move Left',
' title2: Move Right',
self.up_down_slider + ':',
' widget_type: new_input_slider',
' entity: ' + self.up_down_slider,
' title: Move Up(right)',
' title2: Move Down(left)',
self.zoom_slider + ':',
' widget_type: vertical_input_slider',
' entity: ' + self.zoom_slider,
' title: Zoom',
]
for line in dashboardlines:
dashboard.write(line + '\n')
except IOError as e:
self.log("I/O error({0}): {1} : dashboard couldnt be written".format(e.errno, e.strerror),"ERROR")
self.log("tried to write: " + self.config["HADashboard"]["dash_dir"] + "/" + self.dashboardsettings["dashboard_file_name"] + ".dash","ERROR")
except TypeError:
self.log("one of the arguments has the wrong type","ERROR")
except ValueError:
self.log("width or height isnt given as a correct integer","ERROR")
except:
self.log("unexpected error: dashboard couldnt be written", "ERROR")
self.log("tried to write: " + self.config["HADashboard"]["dash_dir"] + "/" + self.dashboardsettings["dashboard_file_name"] + ".dash","ERROR")
def create_alarm_dashboard(self):
try:
with open(self.config["HADashboard"]["dash_dir"] + "/" + self.dashboardsettings["alarm_dashboard_file_name"] + ".dash", 'w') as dashboard:
screenwidth = self.dashboardsettings["screen_width"]
screenheight = self.dashboardsettings["screen_height"]
widgetwidth = round((screenwidth - 22) / 10)
widgetheight = round((screenheight - 14) / 6)
dashboardlines = [
'title: camera',
'widget_dimensions: [' + str(widgetwidth) + ', ' + str(widgetheight) + ']',
'widget_size: [1,1]',
'widget_margins: [2, 2]',
'columns: 10',
'global_parameters:',
' use_comma: 1',
' precision: 0',
' use_hass_icon: 1',
'',
'layout:',
' - my_camera(10x6)',
'',
'my_camera:',
' widget_type: camera',
' entity_picture: ' + self.config["HASS"]["ha_url"] + '/api/camera_proxy_stream/camera.' + self.camera_name + '?api_password=' + self.config["HASS"]["ha_key"],
' title: ' + self.camera_name,
]
for line in dashboardlines:
dashboard.write(line + '\n')
except IOError as e:
self.log("I/O error({0}): {1} : dashboard couldnt be written".format(e.errno, e.strerror),"ERROR")
self.log("tried to write: " + self.config["HADashboard"]["dash_dir"] + "/" + self.dashboardsettings["alarm_dashboard_file_name"] + ".dash")
except TypeError:
self.log("one of the arguments has the wrong type","ERROR")
except ValueError:
self.log("width or height isnt given as a correct integer","ERROR")
except:
self.log("unexpected error: dashboard couldnt be written", "ERROR")
self.log("tried to write: " + self.config["HADashboard"]["dash_dir"] + "/" + self.dashboardsettings["alarm_dashboard_file_name"] + ".dash")
| [
"noreply@github.com"
] | noreply@github.com |
b4732a7f6c96f3017dae541e6ef9294eb8632c9c | 6982c3c54ee9199d93fb89c61cfdcba15b9b7012 | /python3_cookbook/chapter08/demo02.py | 85770265aaaf5341ddc89a3e76168dd08817c360 | [] | no_license | gzgdouru/python_study | a640e1097ebc27d12049ded53fb1af3ba9729bac | e24b39e82e39ee5a5e54566781457e18c90a122a | refs/heads/master | 2020-03-29T11:33:13.150869 | 2019-03-08T09:24:29 | 2019-03-08T09:24:29 | 149,858,658 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 668 | py | '''
自定义字符串的格式化
'''
_formats = {
'ymd': '{d.year}-{d.month}-{d.day}',
'mdy': '{d.month}/{d.day}/{d.year}',
'dmy': '{d.day}/{d.month}/{d.year}'
}
class Date:
def __init__(self, year, month, day):
self.year = year
self.month = month
self.day = day
def __format__(self, format_spec):
if format_spec == '':
format_spec = "ymd"
fmt = _formats[format_spec]
return fmt.format(d=self)
if __name__ == "__main__":
d = Date(2019, 1, 17)
print(format(d))
print(format(d, 'dmy'))
print("this date is {:ymd}".format(d))
print("this date is {:dmy}".format(d))
| [
"18719091650@163.com"
] | 18719091650@163.com |
8d00fdfee6c74af3a17a7795f687be123a3c4aca | ce42c96065ee487c9715d8dacc4b5be78e68ca55 | /Optimization/nnOptimizationNew.py | 6ece5dc48c2f4edf04f341465f4d220ccc78ca7a | [] | no_license | farhan-toddywala/VisualOdometry | 091880fa01d56d83294ce49990720691f63fa602 | a754e2ea79c3afa9f5daebd7b2c9600095ffab7d | refs/heads/master | 2021-01-06T10:54:51.403919 | 2020-02-18T08:29:54 | 2020-02-18T08:29:54 | 241,303,961 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,541 | py | import numpy as np
import matplotlib.pyplot as plt
from scipy import signal
import cv2
import seaborn as sns
X1 = np.loadtxt("data/base_point_no_noise.txt")
X2 = np.loadtxt("data/base_point_noise.txt")
X3 = np.loadtxt("data/point_before_moving.txt")
X4 = np.loadtxt("data/point_after_moving.txt")
X1 = np.delete(X1, 112, axis = 1)
X2 = np.delete(X2, 112, axis = 1)
X3 = np.delete(X3, 112, axis = 1)
X4 = np.delete(X4, 112, axis = 1)
#X1 = np.concatenate([X1, np.ones((X1.shape[0], 1))], axis = 1)
#X2 = np.concatenate([X2, np.ones((X2.shape[0], 1))], axis = 1)
#print(X1.shape)
#print(X2.shape)
#print(X3.shape)
#print(X4.shape)
p = np.random.permutation(len(X1))
X1 = X1[p]
X2 = X2[p]
p = np.random.permutation(len(X3))
X3 = X3[p]
X4 = X4[p]
X1tr, X2tr, X3tr, X4tr = X1[:int(0.75 * len(X1))],X2[:int(0.75 * len(X2))], X3[:int(0.75 * len(X3))],X4[:int(0.75 * len(X4))]
X1te, X2te, X3te, X4te = X1[int(0.75 * len(X1)):],X2[int(0.75 * len(X2)):], X3[int(0.75 * len(X3)):],X4[int(0.75 * len(X4)):]
def shrink(v, lam):
return np.sign(v) * np.maximum(np.abs(v) - lam, 0)
max_iter = 1000
lambda_ = 0.0005 #0.00025 -> great
w = np.ones(X1tr.shape[1])#np.random.normal(loc = 0, scale = 1, size = X1tr.shape[1])
w = w / np.linalg.norm(w)
X_1 = (X1tr - X2tr).T @ (X1tr - X2tr)
X_3 = (X3tr - X4tr).T @ (X3tr - X4tr)
alpha_1 = 0.1 / np.max(np.linalg.eig(X_1)[0]) #0.035 / len(X1tr) #0.035 -> great
alpha_3 = 0.1 / np.max(np.linalg.eig(X_3)[0])
loss = []
epsilon = 0.001
k = 3
W1 = np.random.normal(loc = 0, scale = 1, size = (X1.shape[1], k))
W1 = W1 / np.linalg.norm(W1)
W2 = np.random.normal(loc = 0, scale = 1, size = (k,))
W2 = W2 / np.linalg.norm(W2)
w1prev = np.zeros(W1.shape)
w2prev = np.zeros(W2.shape)
for i in range(max_iter):
linear1_move1 = X1 @ W1
linear1_move2 = X2 @ W1
relu_move1 = np.maximum(linear1_move1, 0)
relu_move2 = np.maximum(linear1_move2, 0)
linear2_move1 = relu_move1 @ W2
linear2_move2 = relu_move2 @ W2
linear1_noise1 = X1 @ W1
linear1_noise2 = X2 @ W1
relu_noise1 = np.maximum(linear1_noise1, 0)
relu_noise2 = np.maximum(linear1_noise2, 0)
linear2_noise1 = relu_noise1 @ W2
linear2_noise2 = relu_noise2 @ W2
mask1 = relu_move1[relu_move1 > 0] = 1
mask2 = relu_move2[relu_move2 > 0] = 1
mask3 = relu_noise1[relu_noise1 > 0] = 1
mask4 = relu_noise2[relu_noise2 > 0] = 1
W2_grad = alpha_1 * relu_move1.T @ relu_move1 @ W2 + alpha_3 * relu_noise1.T @ relu_noise1 @ W2
W1_grad = alpha_1 * np.sum(X1[:, :, np.newaxis] @ (W2 * mask1).reshape((1, k)), axis = 0) - alpha_1 * np.sum(X2[:, :, np.newaxis] @ (W2 * mask2).reshape((1, k)), axis = 0) + alpha_3 * np.sum(X3[:, :, np.newaxis] @ (W2 * mask3).reshape((1, k)), axis = 0)+ alpha_3 * np.sum(X4[:, :, np.newaxis] @ (W2 * mask4).reshape((1, k)), axis = 0)
W1 = shrink(W1 - W1_grad, lambda_)
W1 = W1 / np.linalg.norm(W1)
W2 = shrink(W2 - W2_grad, lambda_)
W2 = W2 / np.linalg.norm(W2)
if (np.linalg.norm(W1 - w1prev) < epsilon and np.linalg.norm(W2 - w2prev) < epsilon):
break
w1prev = W1.copy()
w2prev = W2.copy()
loss.append(lambda_ * np.linalg.norm(W1, ord = 1) + lambda_ * np.linalg.norm(W2, ord = 1) + (1 / len(linear2_move1)) * np.linalg.norm(linear2_move1 - linear2_move2) + (1 / len(linear2_noise1)) * np.linalg.norm(linear2_noise1 - linear2_noise2))
plt.plot(loss)
plt.show()
print("sparsity:", len(np.where(w > epsilon)[0]))
print
img_id = 80
#U, S, Vt = np.linalg.svd(X)
#w = U[:,-1]
X = np.loadtxt('/Users/farhantoddywala/desktop/dataset2/poses/04.txt')
X = X.reshape((X.shape[0], 3, 4))
img1 = cv2.imread('/Users/farhantoddywala/desktop/dataset/sequences/04/image_0/'+str(img_id).zfill(6)+'.png', 0)
img2 = cv2.imread('/Users/farhantoddywala/desktop/dataset/sequences/04/image_0/'+str(img_id + 1).zfill(6)+'.png', 0)
R1 = X[img_id][:,:3]
R2 = X[img_id + 1][:,:3]
K = np.loadtxt('/Users/farhantoddywala/desktop/dataset/sequences/04/calib.txt')
W1 = np.insert(w, 112, 0, axis = 0)
def corresponding(x1, R1, R2, K):
return K @ R2 @ np.linalg.pinv(R1) @ np.linalg.pinv(K) @ x1
def kp_to_homog(kp):
s = set([])
arr = []
for marker in kp:
a, b = tuple(int(i) for i in marker.pt)
s.add((a, b))
arr.append([a, b, 1])
return s
fast = cv2.FastFeatureDetector_create(25)
offset = 7
interesting = np.zeros(img1.shape)
values = []
for i in range(offset, img1.shape[0] - offset):
for j in range(offset, img1.shape[1] - offset):
tmp = (img1[i - offset : i + offset + 1, j - offset : j + offset + 1] - img1[i][j]).flatten() / 255
interesting[i][j] = W2 @ np.maximum(W1.T @ tmp, 0)
values.append(interesting[i][j])
thres = sorted(values)[int(0.995 * len(values))]
interesting[interesting < thres] = 0
interesting2 = np.zeros(img1.shape)
cnt = 0
for i in range(offset, img1.shape[0] - offset):
for j in range(offset, img1.shape[1] - offset):
tmp = (img2[i - offset : i + offset + 1, j - offset : j + offset + 1] - img2[i][j]).flatten() / 255
interesting2[i][j] = W2 @ np.maximum(W1.T @ tmp, 0)
if (interesting2[i][j] >= thres):
cnt += 1
interesting2[interesting2 < thres] = 0
#plt.imshow(interesting, cmap = 'gray')
#plt.show()
#plt.imshow(interesting2, cmap = 'gray')
#plt.show()
coordinates1_true = []
coordinates_pred = []
corr = []
corrkp = []
kp1 = fast.detect(img1, None)
kp2 = fast.detect(img2, None)
MSE = 0
s1 = kp_to_homog(kp1)
s2 = kp_to_homog(kp2)
window = 4
for i in range(offset, img1.shape[0] - offset):
for j in range(offset, img1.shape[1] - offset):
if ((i, j) in s1):
c2 = corresponding(np.array([i, j, 1]), R1, R2, K)
c2 = (c2 / c2[2]).astype("int")
worked2 = 0
for n in range(-window, window + 1):
for m in range(-window, window + 1):
if ((c2[0] - n, c2[1] - m) in s2):
worked2 = 1
corrkp.append(worked2)
if (interesting[i][j] >= thres):
coor = np.array([i, j, 1])
coordinates1_true.append(coor)
coor2 = corresponding(coor, R1, R2, K)
coor2 = (coor2 / coor2[2]).astype("int")
worked = 0
for n in range(-window, window + 1):
for m in range(-window, window + 1):
if (interesting2[coor2[0] + n][coor2[1] + m] >= thres):
worked = 1
if (n == 0 and m == 0):
MSE += (interesting[i][j] - interesting2[i][j]) ** 2
corr.append(worked)
if (interesting2[i][j] >= thres):
coor = np.array([i, j, 1])
coordinates_pred.append(coor)
MSE = MSE / len(coordinates1_true)
print("MSE no noise:", MSE)
print("number of interesting points in image 1:", int(0.005 * len(values)))
print("number of interesting points in image 2:", cnt)
print("percentage of interesting points moved: " + str(100 * sum(corr)/len(corr)) + "%")
coordinates1_true = np.array(coordinates1_true)
corr = np.array(corr)
coordinates_pred = np.array(coordinates_pred)
print("number of corners in image 1:", len(kp1))
print("number of corners image 2:", len(kp2))
print("percentage of corners moved: " + str(100 * sum(corrkp)/len(corrkp)) + "%")
print()
'''
plt.imshow(interesting, cmap = "hot")
plt.show()
plt.imshow(interesting2, cmap = "hot")
plt.show()
'''
sns.heatmap(interesting)
sns.heatmap(interesting2)
def test_between_images_with_noise(img1, img2, R1, R2, K, noise, thres, w, old1, old2):
img1 = img1 + np.random.normal(loc = 0, scale = noise, size = img1.shape)
img2 = img2 + np.random.normal(loc = 0, scale = noise, size = img1.shape)
img1[img1 > 255] = 255
img1[img1 < 0] = 0
img1 = img1.astype("uint8")
img2[img2 > 255] = 255
img2[img2 < 0] = 0
img2 = img2.astype("uint8")
offset = 7
interesting = np.zeros(img1.shape)
cnt2 = 0
for i in range(offset, img1.shape[0] - offset):
for j in range(offset, img1.shape[1] - offset):
tmp = (img1[i - offset : i + offset + 1, j - offset : j + offset + 1] - img1[i][j]).flatten() / 255
interesting[i][j] = W2 @ np.maximum(W1.T @ tmp, 0)
if (interesting[i][j] >= thres):
cnt2 += 1
interesting[interesting < thres] = 0
interesting2 = np.zeros(img1.shape)
cnt = 0
for i in range(offset, img1.shape[0] - offset):
for j in range(offset, img1.shape[1] - offset):
tmp = (img2[i - offset : i + offset + 1, j - offset : j + offset + 1] - img2[i][j]).flatten() / 255
interesting2[i][j] = W2 @ np.maximum(W1.T @ tmp, 0)
if (interesting2[i][j] >= thres):
cnt += 1
interesting2[interesting2 < thres] = 0
#plt.imshow(interesting, cmap = 'gray')
#plt.show()
#plt.imshow(interesting2, cmap = 'gray')
#plt.show()
coordinates1_true = []
coordinates_pred = []
corr = []
corrkp = []
kp1 = fast.detect(img1, None)
kp2 = fast.detect(img2, None)
MSE = 0
s1 = kp_to_homog(kp1)
s2 = kp_to_homog(kp2)
for i in range(offset, img1.shape[0] - offset):
for j in range(offset, img1.shape[1] - offset):
if ((i, j) in s1):
c2 = corresponding(np.array([i, j, 1]), R1, R2, K)
c2 = (c2 / c2[2]).astype("int")
worked2 = 0
for n in range(-window, window + 1):
for m in range(-window, window + 1):
if ((c2[0] - n, c2[1] - m) in s2):
worked2 = 1
corrkp.append(worked2)
if (interesting[i][j] >= thres):
coor = np.array([i, j, 1])
coordinates1_true.append(coor)
coor2 = corresponding(coor, R1, R2, K)
coor2 = (coor2 / coor2[2]).astype("int")
worked = 0
for n in range(-window, window + 1):
for m in range(-window, window + 1):
if (interesting2[coor2[0] + n][coor2[1] + m] >= thres):
worked = 1
if (n == 0 and m == 0):
MSE += (interesting[i][j] - interesting2[i][j]) ** 2
corr.append(worked)
if (interesting2[i][j] >= thres):
coor = np.array([i, j, 1])
coordinates_pred.append(coor)
MSE = MSE / len(coordinates1_true)
print("MSE noise", noise, ":", MSE)
print("number of interesting points in image 1:", cnt2)
print("number of interesting points in image 2:", cnt)
print("percentage of interesting points moved: " + str(100 * sum(corr)/len(corr)) + "%")
coordinates1_true = np.array(coordinates1_true)
corr = np.array(corr)
coordinates_pred = np.array(coordinates_pred)
print("number of corners in image 1:", len(kp1))
print("number of corners image 2:", len(kp2))
print("percentage of corners moved: " + str(100 * sum(corrkp)/len(corrkp)) + "%")
interesting[old1 == 0] = 0
interesting2[old2 == 0] = 0
print("MSE vs non-noisy version of image 1:", (np.linalg.norm(interesting - old1) ** 2) / np.count_nonzero(interesting))
print("MSE vs non-noisy version of image 2:", (np.linalg.norm(interesting2 - old2) ** 2) / np.count_nonzero(interesting2))
print()
for noise in [5, 15, 25, 35, 45, 55, 65, 75, 85, 95]:
test_between_images_with_noise(img1, img2, R1, R2, K, noise, thres, w, interesting, interesting2)
| [
"noreply@github.com"
] | noreply@github.com |
e6aaeb095bc5f80a528def54a281d8534449f9a1 | a1566f5733611a930c82990e4c7c5d05664c1eb0 | /jspy1/test.py | 8809144780e3fc2a8e4db10d0a0ba8223aaefaa0 | [] | no_license | sjbrown/misc_work | 95e7020c9fb0a2fe840c7a4e25bf30a30b26aaea | 4215e3d612811426686b1a90db4f975052c4023d | refs/heads/master | 2022-10-21T02:05:04.928161 | 2022-10-12T23:43:56 | 2022-10-12T23:43:56 | 18,151,228 | 5 | 3 | null | 2021-05-29T21:47:17 | 2014-03-26T19:38:07 | Python | UTF-8 | Python | false | false | 47 | py | import os
def foo():
return False
foo()
| [
"github@ezide.com"
] | github@ezide.com |
85135c0dda32ef85af2237a0e39b66d110e0c90d | a7f47db2b4889a1a3528c4729a5b87388b932b1c | /2014201426/paper_detect_w2v/src/getSegment.py | 08405c0ce67034d6a06a3d24a2485129a471e040 | [] | no_license | fanyuqing111/ai-project-2016 | 4dca9e8c48d041e7a651556d41ff3ae09d7c0438 | 1c81933a94d0d869a7f0085c8925fca69a721f52 | refs/heads/master | 2021-12-21T15:17:11.458947 | 2017-09-29T13:50:03 | 2017-09-29T13:50:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,277 | py | from utils import *
import os,sys
import math
traindir = "TXT-final/True/"
testdir = "TXT-final/False/"
files = os.listdir(traindir)
cnt = 0
for f in files:
cnt+=1
if cnt%100==0:
print cnt
make_seg(traindir+f,Dir='segment/True/')
files = os.listdir(testdir)
cnt=0
for f in files:
cnt+=1
if cnt%100==0:
print cnt
make_seg(testdir+f,Dir='segment/False/')
files = os.listdir('segment/True/Train/')
cnt=0
ff = open("Train/True/segment.dat",'w')
for f in files:
cnt+=1
if cnt%100==0:
print cnt
f2 = open('segment/True/Train/'+f,'r')
ff.write(f2.readline())
f2.close()
ff.close()
files = os.listdir('segment/False/Train/')
cnt=0
ff = open("Train/False/segment.dat",'w')
for f in files:
cnt+=1
if cnt%100==0:
print cnt
f2 = open('segment/False/Train/'+f,'r')
ff.write(f2.readline())
f2.close()
ff.close()
files = os.listdir('segment/True/Test/')
cnt=0
ff = open("Test/True/segment.dat",'w')
for f in files:
cnt+=1
if cnt%100==0:
print cnt
f2 = open('segment/True/Test/'+f,'r')
ff.write(f2.readline())
f2.close()
ff.close()
files = os.listdir('segment/False/Test/')
cnt=0
ff = open("Test/False/segment.dat",'w')
for f in files:
cnt+=1
if cnt%100==0:
print cnt
f2 = open('segment/False/Test/'+f,'r')
ff.write(f2.readline())
f2.close()
ff.close()
| [
"oysq@ruc.edu.cn"
] | oysq@ruc.edu.cn |
3033e6d1836363f7bb5afdf44a0c0c1d5e093bf0 | ad372f7753c70e3997d035097ee03f740a5fb068 | /pygym/custom_storage.py | 83d139b7b163479af1d7152929f4ca060d13b04d | [] | no_license | Insper/servidor-de-desafios | a5f09fe9368887b06b98800f2bb8f35ff13f80a9 | 9875e9b9248c14237161ca73983595f7d929e963 | refs/heads/master | 2022-12-14T17:28:42.963112 | 2022-09-12T19:18:36 | 2022-09-12T19:18:36 | 167,026,050 | 3 | 42 | null | 2022-12-08T07:36:47 | 2019-01-22T16:19:46 | Python | UTF-8 | Python | false | false | 1,053 | py | # Source: https://github.com/druids/django-chamber/blob/master/chamber/storages/boto3.py
from django.core.files.base import ContentFile
from storages.backends.s3boto3 import S3Boto3Storage
def force_bytes_content(content, blocksize=1024):
"""Returns a tuple of content (file-like object) and bool indicating wheter the content has been casted or not"""
block = content.read(blocksize)
content.seek(0)
if not isinstance(block, bytes):
_content = bytes(
content.read(),
'utf-8' if not hasattr(content, 'encoding') or content.encoding is None else content.encoding,
)
return ContentFile(_content), True
return content, False
class MediaStorage(S3Boto3Storage):
bucket_name = 'softdes-static'
location = 'media'
def _clean_name(self, name):
# pathlib support
return super()._clean_name(str(name))
def save(self, name, content, max_length=None):
content, _ = force_bytes_content(content)
return super().save(name, content, max_length)
| [
"andrew.kurauchi@gmail.com"
] | andrew.kurauchi@gmail.com |
84a51f1c66522d2338158587e627aa28ee1c0298 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /pyDemMDspSSFdWsh4_2.py | b7cb001bbe31b50d1e5bc5559aa8530b83963619 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 262 | py |
def digital_decipher(eMessage, key):
keyPos = 0
key = str(key)
decodeMessage = ''
for digit in eMessage:
decodeMessage += chr(int(digit) - int(key[keyPos])+96)
keyPos += 1
if (keyPos >= len(key)):
keyPos = 0
return decodeMessage
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
cef2087d11823bc458cbf93cd06433c58f41febc | b980c3c7e47c970e662f218782b6c8eac721ab70 | /job_web/handlers/job.py | a99dab80b6654a74a145620aa1ffa4bfc5990bc2 | [
"MIT"
] | permissive | csudragonzl/master | 24466d016a2cbb0a00daa96b77a4457c6125ca01 | e5ea6418ff9676b8c68bf4bbe14cdf06ccae27d8 | refs/heads/master | 2023-03-24T23:39:25.002815 | 2021-03-27T01:35:36 | 2021-03-27T01:35:36 | 351,474,339 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,890 | py | # -*- coding: utf-8 -*-
from flask import Blueprint, render_template, abort,\
current_app, redirect, url_for, flash, request
from flask_login import current_user, login_required
from ..decorators import company_required
from ..models import Job, db, Delivery
from ..forms import EXP, JobForm
job = Blueprint('job', __name__, url_prefix='/job')
@job.route('/')
def index():
page = request.args.get('page', default=1, type=int)
kw = request.args.get('kw')
Job.is_enable==True
flt = {Job.is_enable is True}
if kw is not None and kw != '':
flt.update({Job.name.like('%{}%'.format(kw))})
pagination = Job.query.filter(*flt).order_by(
Job.created_at.desc()).paginate(
page=page,
per_page=current_app.config['JOB_INDEX_PER_PAGE'],
error_out=False
)
return render_template('job/index.html', pagination=pagination,
kw=kw, filter=EXP, active='job')
@job.route('/<int:job_id>')
def detail(job_id):
job_obj = Job.query.get_or_404(job_id)
if not job_obj.is_enable and job_obj.company_id != current_user.id:
abort(404)
return render_template('job/detail.html', job=job_obj)
@job.route("/event_address", methods=['GET', 'POST'])
def event_address():
return render_template("job/event_address.html")
@job.route("/event2", methods=['GET', 'POST'])
def event2():
return render_template("job/event2.html")
@job.route("/event3", methods=['GET', 'POST'])
def event3():
return render_template("job/event3.html")
@job.route("/event4", methods=['GET', 'POST'])
def event4():
return render_template("job/event4.html")
@job.route('/<int:job_id>/apply', methods=['GET', 'POST'])
@login_required
def apply(job_id):
job_obj = Job.query.get_or_404(job_id)
if not current_user.is_user():
abort(404)
if not current_user.resume:
flash('请先上传简历', 'warning')
return redirect(url_for('user.resume'))
elif job_obj.is_applied():
flash('已经投递过该职位', 'warning')
return redirect(url_for('job.detail', job_id=job_id))
delivery = Delivery(
job_id=job_id,
user_id=current_user.id,
company_id=job_obj.company.id,
resume=current_user.resume
)
db.session.add(delivery)
db.session.commit()
flash('简历投递成功', 'success')
return redirect(url_for('job.detail', job_id=job_id))
@job.route('/create', methods=['GET', 'POST'])
@company_required
def create():
form = JobForm()
if form.validate_on_submit():
company_id = current_user.id
form.create_job(company_id)
flash('职位创建成功', 'success')
return redirect_job_index()
return render_template('job/create.html', form=form, active='manage', panel='create')
@job.route('/<int:job_id>/edit', methods=['GET', 'POST'])
@company_required
def edit(job_id):
job_obj = Job.query.get_or_404(job_id)
if job_obj.company_id != current_user.id and not current_user.is_admin():
abort(404)
form = JobForm(obj=job_obj)
if form.validate_on_submit():
form.update_job(job_obj)
flash('职位更新成功', 'success')
return redirect_job_index()
return render_template('job/edit.html', form=form, job_id=job_id)
@job.route('/<int:job_id>/delete', methods=['GET', 'POST'])
@company_required
def delete(job_id):
job_obj = Job.query.get_or_404(job_id)
if job_obj.company_id != current_user.id and not current_user.is_admin():
abort(404)
db.session.delete(job_obj)
db.session.commit()
flash('职位删除成功', 'success')
return redirect_job_index()
@job.route('<int:job_id>/disable')
@company_required
def disable(job_id):
job_obj = Job.query.get_or_404(job_id)
if not current_user.is_admin() and current_user.id != job_obj.company.id:
abort(404)
if not job_obj.is_enable:
flash('职位已下线', 'warning')
else:
job_obj.is_enable = False
db.session.add(job_obj)
db.session.commit()
flash('职位下线成功', 'success')
return redirect_job_index()
@job.route('<int:job_id>/enable')
@company_required
def enable(job_id):
job_obj = Job.query.get_or_404(job_id)
if not current_user.is_admin() and current_user.id != job_obj.company.id:
abort(404)
if job_obj.is_enable:
flash('职位已上线', 'warning')
else:
job_obj.is_enable = True
db.session.add(job_obj)
db.session.commit()
flash('职位上线成功', 'success')
return redirect_job_index()
def redirect_job_index():
if current_user.is_admin():
return redirect(url_for('admin.job'))
elif current_user.is_company():
return redirect(url_for('company.jobs'))
else:
return redirect(url_for('front.index'))
| [
"1359622058@qq.com"
] | 1359622058@qq.com |
2e26315e5deace2c6c9e35f2b77d72e8ed8732f3 | 51c7482c02c1b6695ceebd6c4e011b6b3a7f01ab | /renku/service/serializers/cache.py | c42415245e1abbf01b30240f67310d3238e87eda | [
"Apache-2.0",
"Python-2.0"
] | permissive | cyberhck/renku-python | aa60c2204e2507a07aa11efd5ffdfb8ac49aef9d | 2e52dff9dd627c93764aadb9fd1e91bd190a5de7 | refs/heads/master | 2020-12-12T08:55:08.112921 | 2020-01-15T12:05:07 | 2020-01-15T12:05:07 | 234,093,844 | 0 | 0 | Apache-2.0 | 2020-01-15T14:06:13 | 2020-01-15T14:06:12 | null | UTF-8 | Python | false | false | 5,007 | py | # -*- coding: utf-8 -*-
#
# Copyright 2019 - Swiss Data Science Center (SDSC)
# A partnership between École Polytechnique Fédérale de Lausanne (EPFL) and
# Eidgenössische Technische Hochschule Zürich (ETHZ).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Renku service cache serializers."""
import time
import uuid
from urllib.parse import urlparse
from marshmallow import Schema, ValidationError, fields, post_load, pre_load, \
validates
from werkzeug.utils import secure_filename
from renku.core.errors import ConfigurationError
from renku.core.models.git import GitURL
from renku.service.config import PROJECT_CLONE_DEPTH_DEFAULT
from renku.service.serializers.rpc import JsonRPCResponse
def extract_file(request):
"""Extract file from Flask request.
:raises: `ValidationError`
"""
files = request.files
if 'file' not in files:
raise ValidationError('missing key: file')
file = files['file']
if file and not file.filename:
raise ValidationError('wrong filename: {0}'.format(file.filename))
if file:
file.filename = secure_filename(file.filename)
return file
class FileUploadRequest(Schema):
"""Request schema for file upload."""
override_existing = fields.Boolean(missing=False)
unpack_archive = fields.Boolean(missing=False)
class FileUploadContext(Schema):
"""Context schema for file upload."""
file_id = fields.String(missing=lambda: uuid.uuid4().hex)
# measured in ms
timestamp = fields.Integer(missing=time.time() * 1e+3)
content_type = fields.String(missing='unknown')
file_name = fields.String(required=True)
# measured in bytes (comes from stat() - st_size)
file_size = fields.Integer(required=True)
relative_path = fields.String(required=True)
is_archive = fields.Boolean(missing=False)
unpack_archive = fields.Boolean(missing=False)
class FileUploadResponse(Schema):
"""Response schema for file upload."""
files = fields.List(fields.Nested(FileUploadContext), required=True)
class FileUploadResponseRPC(JsonRPCResponse):
"""RPC response schema for file upload response."""
result = fields.Nested(FileUploadResponse)
class FileListResponse(Schema):
"""Response schema for files listing."""
files = fields.List(fields.Nested(FileUploadContext), required=True)
class FileListResponseRPC(JsonRPCResponse):
"""RPC response schema for files listing."""
result = fields.Nested(FileListResponse)
class ProjectCloneRequest(Schema):
"""Request schema for project clone."""
git_url = fields.String(required=True)
depth = fields.Integer(missing=PROJECT_CLONE_DEPTH_DEFAULT)
class ProjectCloneContext(ProjectCloneRequest):
"""Context schema for project clone."""
project_id = fields.String(missing=lambda: uuid.uuid4().hex)
name = fields.String(required=True)
fullname = fields.String(required=True)
email = fields.String(required=True)
owner = fields.String(required=True)
token = fields.String(required=True)
@validates('git_url')
def validate_git_url(self, value):
"""Validates git url."""
try:
GitURL.parse(value)
except ConfigurationError as e:
raise ValidationError(str(e))
return value
@post_load()
def format_url(self, data, **kwargs):
"""Format URL with username and password."""
git_url = urlparse(data['git_url'])
url = 'oauth2:{0}@{1}'.format(data['token'], git_url.netloc)
data['url_with_auth'] = git_url._replace(netloc=url).geturl()
return data
@pre_load()
def set_owner_name(self, data, **kwargs):
"""Set owner and name fields."""
git_url = GitURL.parse(data['git_url'])
data['owner'] = git_url.owner
data['name'] = git_url.name
return data
class ProjectCloneResponse(Schema):
"""Response schema for project clone."""
project_id = fields.String(required=True)
git_url = fields.String(required=True)
class ProjectCloneResponseRPC(JsonRPCResponse):
"""RPC response schema for project clone response."""
result = fields.Nested(ProjectCloneResponse)
class ProjectListResponse(Schema):
"""Response schema for project listing."""
projects = fields.List(fields.Nested(ProjectCloneResponse), required=True)
class ProjectListResponseRPC(JsonRPCResponse):
"""RPC response schema for project listing."""
result = fields.Nested(ProjectListResponse)
| [
"noreply@github.com"
] | noreply@github.com |
f7b21a906007932e12e6b9b620df1487e6bf7182 | 180d59e5ff42d724db49b46860640e14cb8b198c | /myFace.py | dc2415c98850bd6e50b3178adb79ff78fb063dfb | [] | no_license | KaanKesgin/Generating-Realistic-Faces-w-GANs | 409f53423a0158964e6e22eabe324357c5d1aedb | 1fdd62fcc49cdd2143a8a655c77be272404e48db | refs/heads/master | 2020-07-18T01:06:59.447004 | 2019-09-03T18:00:38 | 2019-09-03T18:00:38 | 206,140,304 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,254 | py | #GANS for face generation
import os
import helper
from glob import glob
from matplotlib import pyplot
import warnings
import tensorflow as tf
import numpy as np
#check for GPU because why not
#if not tf.test.gpu_device_name():
# warnings.warn('No GPU found, You are going to want to use a GPU for this')
#else:
# print('Default GPU device: {}'.format(tf.test.gpu_device_name))
data_dir = 'data'
show_n_images = 25
helper.download_extract('mnist', data_dir)
#helper.download_extract('celeba', data_dir)
mnist_images = helper.get_batch(glob(os.path.join(data_dir, 'mnist/*.jpg'))[:show_n_images], 28, 28, 'L')
pyplot.imshow(helper.images_square_grid(mnist_images, 'L'), cmap='gray')
def model_inputs(image_width, image_height, image_channels, z_dim):
""""create model_inputs
:param image_width: the input image width
:param image _height: input image height
:param image_channels: the number of image channels
:param z_dim: the dimension of Z
:return: Tuple of (tensor of real input images, tensor of z data, learning rate)
"""
realInputImages = tf.placeholder(tf.float32, (None,image_width,image_height,image_channels), name='realInputImages')
inputs_Z = tf.placeholder(tf.float32, (None, z_dim), name='input_z')
learning_rate = tf.placeholder(tf.float32)
return realInputImages, inputs_Z, learning_rate
#Discriminator network that discriminates on images
#This function should be able to reuse variables in the neural network, use a scope name of discriminator
#The function should return a tuple of (tensor output of the discriminator, tensor logits of the discriminator)
def discriminator(images, reuse=False):
"""Create the discriminator network
:param images: tensor of input image(s)
:param reuse: boolean if the weights should be reused
:return: tuple of (tensor output of the discriminator, tensor logits of the discriminator)"""
alpha = 0.2
with tf.variable_scope('discriminator', reuse=reuse):
#input layer is 28*28*3(?!_1)
x1 = tf.layers.conv2d(images, 64, 5,strides=2, padding='same')
# 64 is filter: dimensionality of the output space (number of filters in the convolution)
# 5 is the stride of convolution along the height and width (can be a tuple with 2 dimensions)
relu1 = tf.maximum(alpha*x1, x1)
# dimensions are now 14*14*64
x2 = tf.layers.conv2d(relu1, 128, 5, strides=2, padding='same')
bn2 = tf.layers.batch_normalization(x2, training=True)
relu2 = tf.maximum(alpha*bn2, bn2)
# dimensions are now 7*7*128
# time to flatten
flat = tf.reshape(relu2, (-1, 7*7*128))
logits = tf.layers.dense(flat,1)
out = tf.sigmoid(logits)
return out, logits
#implement a generator to generate an image using 'z'.
#similar to discriminator this function should be able to reuse the variables in the neural network
#in a similar manner with the scope defined in the discriminator network
#this function should return the generated 28*28*out_channel_dim images
def generator(z, out_channel_dim, is_train=True):
"""Create the generator network
:param z: Input z
:param out_channel_dim: the number of channels in the output image
:param: is_train: boolean if generator is being used for training
:return: the tensor output of the generator"""
alpha = 0.2
with tf.variable_scope('generator', reuse=not is_train):
#first fully connected layer
x1 = tf.layers.dense(z, 7*7*128)
# reshape it to start the convolutional stack
x1 = tf.reshape(x1, (-1, 7, 7, 128))
x1 = tf.layers.batch_normalization(x1, training=is_train)
x1 = tf.maximum(alpha*x1, x1)
#current size 7*7*128
x2 = tf.layers.conv2d_transpose(x1, 64, 5, strides=2, padding='same')
x2 = tf.layers.batch_normalization(x2, training=is_train)
x2 = tf.maximum(alpha * x2, x2)
#current size is 14*14*64
logits = tf.layers.conv2d_transpose(x2, out_channel_dim, 5, strides=2, padding='same')
#current size = 28*28*3
out = tf.tanh(logits)
return out
#implement model_loss to build the GANs for training and calculate the loss.
#The functions should return a tuple of (discriminator loss, generator loss)
#use discriminator and generator functions
def model_loss(input_real, input_z, out_channel_dim):
"""Get the loss for the discriminator and generator
:param input_real: Images from the real dataset
:param input_z: Z inpput
:param out_channel_dim: the number of channels in the output image
:return: a tuple of (discriminator loss, generator loss)"""
g_model = generator(input_z, out_channel_dim)
d_model_real, d_logits_real = discriminator(input_real)
d_model_fake, d_logits_fake = discriminator(g_model, reuse=True)
d_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_real,
labels=tf.ones_like(d_model_real)))
d_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake,
labels=tf.zeros_like(d_model_fake)))
g_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake,
labels=tf.ones_like(d_model_fake)))
d_loss = d_loss_real + d_loss_fake
return d_loss, g_loss
#implement model_opt to create the optimization operations for the GAN
#the function should return a tuple of (discriminator training operatrion, generator treining operation)
def model_opt(d_loss, g_loss, learning_rate, beta1):
"""Get optimization operations
:param d_loss: Discriminator loss tensor
:param g_loss: Generator loss tensor
:param learning_rate: learning rate placeholder
:param beta1: the exponential decay rate for the 1st moment in the optimizer
:return: A tuple of(discriminator training operation, generator training operation)
"""
t_vars = tf.trainable_variables()
d_vars = [var for var in t_vars if var.name.startswith('discriminator')]
g_vars = [var for var in t_vars if var.name.startswith('generator')]
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
d_train_opt = tf.train.AdamOptimizer(learning_rate, beta1=beta1).minimize(d_loss, var_list=d_vars)
g_train_opt = tf.train.AdamOptimizer(learning_rate, beta1=beta1).minimize(g_loss, var_list=g_vars)
return d_train_opt, g_train_opt
#following function is to show the output of the generator during training to help determine how well GAN is training
def show_generator_output(sess, n_images, input_z, out_channel_dim, image_mode):
"""show example output for the generator
:param sess: tensorflow session
:param n_images: number of images to display
:param input_z: input Z tensor
:param out_channel_dim: the number of channels in the output image
:param image_mode: the mode to use for images ("RGB" or "L)"""
cmap = None if image_mode == 'RGB' else 'gray'
z_dim = input_z.get_shape().as_list()[-1]
example_z = np.random.uniform(-1, 1, size=[n_images, z_dim])
samples = sess.run(generator(input_z, out_channel_dim, False), feed_dict={input_z: example_z})
images_grid = helper.images_square_grid(samples, image_mode)
pyplot.imshow(images_grid, cmap=cmap)
#implementation of train to build and train GANs
#its recommended to show generator output once every 100 batches as it is a computational expense to show the output
def train(epoch_count, batch_size, z_dim, learning_rate, beta1, get_batches, data_shape, data_image_mode):
"""Train the GAN
:param epoch_count: number of epochs
:param batch_size: batch size
:param z_dim: Z dimension
:param learning _rate: learning rate
:param beta1: exponential decay rate for the 1st moment in the optimizer
:param get_batches: function to get batches
:param data_shape: shape of the data
:param data_image_mode the image mode ("RGB" or "L")"""
inputs_real, inputs_z, learning_rate = model_inputs(data_shape[1], data_shape[2], data_shape[3], z_dim)
learnRate = 0.0002
d_loss, g_loss = model_loss(inputs_real, inputs_z, data_shape[3])
d_opt, g_opt = model_opt(d_loss, g_loss, learnRate, beta1)
step = 0
losses = []
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for epoch_i in range(epoch_count):
for batch_images in get_batches(batch_size):
step +=1
batch_images *=2
batch_z = np.random.uniform(-1,1,size=(batch_size, z_dim))
sess.run(d_opt, feed_dict={inputs_real: batch_images, inputs_z: batch_z, learning_rate: learnRate})
sess.run(g_opt, feed_dict={inputs_z: batch_z, inputs_real: batch_images, learning_rate: learnRate})
if step%128 == 0:
train_loss_d = d_loss.eval({inputs_z:batch_z, inputs_real: batch_images})
train_loss_g = g_loss.eval({inputs_z: batch_z})
print("Epoch {}/{}...".format(epoch_i+1, epochs),
"Discriminator Loss: {:.4f} ...".format(train_loss_d),
"Generator Loss: {:.4f} ...".format(train_loss_g))
losses.append((train_loss_d, train_loss_g))
show_generator_output(sess, 16, inputs_z, data_shape[3], data_image_mode)
batch_size = 32
z_dim = 100
learning_rate = 0.0002
beta1 = 0.1
epochs = 1
celeba_dataset = helper.Dataset('celeba', glob(os.path.join(data_dir, 'img_align_celeba/*.jpg')))
with tf.Graph().as_default():
train(epochs, batch_size, z_dim, learning_rate, beta1, celeba_dataset.get_batches,
celeba_dataset.shape, celeba_dataset.image_mode)
| [
"noreply@github.com"
] | noreply@github.com |
9df13b60fe7e01a4f1d644f01e37df3490769058 | 649bd422025e421d86025743eac324c9b882a2e8 | /exam/1_three-dimensional_atomic_system/dump/phasetrans/temp156_6000.py | d472674e752a50fc2c150d7c07922e1993e4e721 | [] | no_license | scheuclu/atom_class | 36ddee1f6a5995872e858add151c5942c109847c | 0c9a8c63d9b38898c1869fe8983126cef17662cd | refs/heads/master | 2021-01-21T10:52:28.448221 | 2017-03-07T23:04:41 | 2017-03-07T23:04:41 | 83,489,471 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 68,771 | py | ITEM: TIMESTEP
6000
ITEM: NUMBER OF ATOMS
2048
ITEM: BOX BOUNDS pp pp pp
-2.7215234007832265e+00 4.9921523400776607e+01
-2.7215234007832265e+00 4.9921523400776607e+01
-2.7215234007832265e+00 4.9921523400776607e+01
ITEM: ATOMS id type xs ys zs
1622 1 0.483866 0.6696 0.0502502
1525 1 0.0892975 0.141091 0.0758595
815 1 0.205158 0.0643884 0.178165
585 1 0.0492387 0.136937 0.19374
18 1 0.106243 0.0579593 0.0985426
155 1 0.0507332 0.042377 0.152979
1496 1 0.417022 0.200629 0.381304
1871 1 0.0493648 0.066403 0.0397444
1238 1 0.344257 0.110556 0.0896849
1357 1 0.192934 0.0796272 0.0895359
852 1 0.0236587 0.513427 0.154936
1254 1 0.298657 0.980446 0.0572751
1641 1 0.161095 0.159221 0.0760137
1748 1 0.357619 0.0908315 0.174926
586 1 0.268144 0.119384 0.0504549
549 1 0.343336 0.16553 0.156985
1322 1 0.246871 0.0327068 0.120771
1352 1 0.479376 0.59961 0.260596
1359 1 0.451406 0.0179069 0.0734731
420 1 0.272947 0.131492 0.136173
1334 1 0.331776 0.24152 0.113246
1420 1 0.387949 0.0423638 0.0589836
1293 1 0.192485 0.681525 0.0235019
1759 1 0.302003 0.0578136 0.0901349
804 1 0.16988 0.31171 0.13637
1423 1 0.01381 0.332269 0.178131
1268 1 0.471821 0.118791 0.462719
671 1 0.0260388 0.349169 0.430271
1620 1 0.109044 0.333926 0.0059734
733 1 0.226852 0.140259 0.22027
2027 1 0.124392 0.183455 0.15116
1670 1 0.410592 0.651008 0.0647709
896 1 0.357546 0.692507 0.0203623
1932 1 0.224796 0.0761756 0.243876
1073 1 0.302701 0.177569 0.080904
1771 1 0.191654 0.169475 0.146355
1085 1 0.192225 0.276039 0.0551283
2018 1 0.270152 0.203908 0.154698
1106 1 0.229948 0.265733 0.126304
1222 1 0.247279 0.213697 0.0558782
560 1 0.0308248 0.277388 0.271511
443 1 0.446264 0.203666 0.00346344
1973 1 0.306554 0.249728 0.0489461
697 1 0.374909 0.173534 0.0854787
1734 1 0.441042 0.11816 0.0688306
1382 1 0.386391 0.00553615 0.122214
1261 1 0.0157243 0.572904 0.275918
726 1 0.483122 0.188514 0.0808325
708 1 0.138855 0.376148 0.058544
745 1 0.137014 0.382484 0.13446
857 1 0.317975 0.727283 0.476744
1912 1 0.00382869 0.388565 0.13246
1168 1 0.132705 0.369722 0.252162
299 1 0.122185 0.310537 0.0816508
493 1 0.138899 0.8265 0.472695
636 1 0.0725038 0.41664 0.0768961
1830 1 0.4183 0.815832 0.0205347
1471 1 0.240489 0.376823 0.110267
314 1 0.364801 0.314022 0.0687149
77 1 0.276422 0.309631 0.0836881
61 1 0.213686 0.365702 0.0458131
1257 1 0.327797 0.373794 0.12158
1651 1 0.282014 0.357026 0.0261893
1782 1 0.49329 0.255314 0.041978
1544 1 0.0399534 0.265682 0.199401
399 1 0.201333 0.900543 0.0454325
21 1 0.46716 0.338451 0.0475941
1964 1 0.458008 0.459357 0.331051
1686 1 0.305292 0.408094 0.191945
790 1 0.318827 0.561141 0.00697293
1249 1 0.489129 0.585931 0.445332
639 1 0.378009 0.477792 0.0241742
1637 1 0.073647 0.467397 0.133855
1189 1 0.00585068 0.199621 0.255641
554 1 0.140448 0.445631 0.0874117
634 1 0.149596 0.456036 0.15993
798 1 0.218015 0.462438 0.0703786
1456 1 0.0244078 0.968952 0.197645
1450 1 0.0710672 0.585753 0.0546782
1048 1 0.416368 0.0729733 0.349807
1172 1 0.219372 0.595219 0.0315721
1020 1 0.0102313 0.853479 0.0509291
521 1 0.290558 0.446694 0.0664316
2014 1 0.213763 0.503303 0.15461
410 1 0.362256 0.545075 0.0657383
1319 1 0.00204304 0.322992 0.328492
1064 1 0.386025 0.475469 0.0918908
1897 1 0.204548 0.571401 0.173788
767 1 0.0194324 0.117569 0.292348
1107 1 0.12076 0.872165 0.32345
461 1 0.384477 0.529569 0.146594
552 1 0.0415801 0.440072 0.0137688
13 1 0.373904 0.871252 0.487999
407 1 0.40633 0.650536 0.41665
1183 1 0.410172 0.405893 0.0323969
1376 1 0.133309 0.611886 0.16266
1980 1 0.164674 0.68791 0.0877057
1439 1 0.356244 0.78798 0.427554
1709 1 0.0935858 0.475377 0.212098
1522 1 0.132965 0.548713 0.20603
1981 1 0.0519103 0.673502 0.140625
680 1 0.0539551 0.582624 0.127162
1494 1 0.0621276 0.505935 0.0748198
494 1 0.194497 0.586566 0.256727
1452 1 0.193198 0.529091 0.077242
1958 1 0.284743 0.601646 0.211071
1607 1 0.238582 0.718498 0.115319
1719 1 0.343217 0.642765 0.178728
26 1 0.263245 0.655388 0.0723762
1069 1 0.275149 0.660345 0.154563
806 1 0.195298 0.661058 0.147522
1557 1 0.25855 0.712974 0.201067
821 1 0.339161 0.582091 0.275418
487 1 0.340012 0.614336 0.0578631
517 1 0.421659 0.671662 0.154329
1788 1 0.39392 0.591654 0.109754
1722 1 0.352657 0.682255 0.108319
1684 1 0.440434 0.720711 0.0905676
995 1 0.388555 0.411849 0.362093
371 1 0.442397 0.590743 0.0396707
470 1 0.017124 0.2041 0.114881
653 1 0.487129 0.20578 0.469929
1530 1 0.110026 0.843105 0.205851
682 1 0.104818 0.751129 0.0990502
1061 1 0.0871148 0.85978 0.0427872
982 1 0.174039 0.749127 0.142528
367 1 0.166628 0.895638 0.112145
1821 1 0.196858 0.438413 0.464433
381 1 0.10792 0.772422 0.18576
642 1 0.0818784 0.818484 0.132096
510 1 0.469564 0.872451 0.36641
1921 1 0.152271 0.822168 0.143109
1634 1 0.222516 0.747744 0.0550109
1837 1 0.193264 0.778491 0.212812
880 1 0.221796 0.799142 0.142191
954 1 0.247516 0.290947 0.00564548
246 1 0.221438 0.822577 0.0767767
266 1 0.156852 0.787589 0.0513247
970 1 0.303509 0.726039 0.0659358
1702 1 0.337957 0.81078 0.197879
637 1 0.330978 0.73895 0.150336
66 1 0.446756 0.808833 0.119088
488 1 0.37115 0.75709 0.0557082
503 1 0.411781 0.747182 0.155946
1742 1 0.285631 0.853902 0.0713214
1218 1 0.0659605 0.242343 0.0538262
404 1 0.086972 0.895618 0.120177
1169 1 0.181257 0.0411055 0.0173737
483 1 0.0751966 0.912805 0.188378
1438 1 0.204136 0.981767 0.145377
193 1 0.146352 -0.00176401 0.0738577
673 1 0.269764 0.679185 0.430396
1895 1 0.469652 0.369803 0.349189
1834 1 0.332902 0.413315 0.0236871
610 1 0.0673337 0.98407 0.0512626
1807 1 0.153785 0.0312405 0.496127
1281 1 0.227514 0.982361 0.0670085
408 1 0.260453 0.0496816 0.0373235
1878 1 0.301151 0.166323 0.003567
357 1 0.31351 0.997439 0.135443
1515 1 0.273072 0.940571 0.181624
1078 1 0.235561 0.912142 0.125067
184 1 0.376028 0.952468 0.0190259
164 1 0.453359 0.290961 0.272484
48 1 0.29736 0.793669 0.118187
1392 1 0.415483 0.881458 0.0997325
1385 1 0.363477 0.826667 0.127686
486 1 0.354476 0.934201 0.108967
1694 1 0.322314 0.880581 0.159394
428 1 0.389438 0.327637 0.483379
1448 1 0.0983204 0.904983 0.263716
472 1 0.466976 0.945951 0.327448
1049 1 0.185899 0.277118 0.478588
57 1 0.13024 0.0301861 0.2766
1528 1 0.0919083 0.00390295 0.200257
530 1 0.161331 0.121979 0.195784
927 1 0.102341 0.0848775 0.227258
2045 1 0.03147 0.182296 0.336931
1988 1 0.0688094 0.00742275 0.298411
1735 1 0.245548 0.00274071 0.202496
578 1 0.273665 0.0502451 0.296492
1132 1 0.201507 0.0554274 0.335264
360 1 0.3725 0.147153 0.258049
529 1 0.33227 0.0795435 0.248999
1671 1 0.372516 0.0227447 0.29961
182 1 0.473602 0.240855 0.13003
1030 1 0.432889 0.123131 0.199093
1408 1 0.369683 0.228965 0.242896
1425 1 0.486934 0.0308626 0.146846
1615 1 0.45293 0.186878 0.241639
1273 1 0.450049 0.158815 0.132616
356 1 0.41751 0.070802 0.12054
689 1 0.223078 0.667828 0.490259
364 1 0.269796 0.1868 0.426349
103 1 0.0796575 0.242 0.132427
786 1 0.15976 0.101113 0.289875
1789 1 0.256899 0.514566 0.47629
1011 1 0.106414 0.183485 0.233839
67 1 0.0568578 0.199758 0.429103
865 1 0.0883007 0.290357 0.335037
365 1 0.312385 0.136311 0.21799
280 1 0.400452 0.221167 0.140076
105 1 0.33618 0.245459 0.18476
794 1 0.196124 0.171117 0.279133
770 1 0.186666 0.244595 0.24936
388 1 0.133146 0.116073 0.132097
1 1 0.282572 0.0660715 0.180107
1822 1 0.295918 0.209139 0.230935
1766 1 0.00825946 0.517181 0.0255241
971 1 0.275141 0.555078 0.418052
323 1 0.303919 0.28077 0.263427
261 1 0.376522 0.296108 0.286098
808 1 0.0613197 0.722629 0.047541
684 1 0.378485 0.286979 0.138742
267 1 0.432296 0.253296 0.329693
1876 1 0.295073 0.173664 0.291593
353 1 0.111802 0.283701 0.256865
776 1 0.162063 0.23368 0.118168
1050 1 0.403528 0.163103 0.469499
877 1 0.239027 0.437142 0.188904
80 1 0.414975 0.0371346 0.456889
1311 1 0.0693176 0.340679 0.268739
202 1 0.0855497 0.305023 0.420641
135 1 0.0880526 0.33027 0.203097
24 1 0.260733 0.274514 0.199533
1656 1 0.30762 0.298064 0.144493
490 1 0.20643 0.373654 0.281525
1117 1 0.180265 0.318149 0.220319
717 1 0.237498 0.30777 0.265178
1582 1 0.199886 0.346939 0.372689
1144 1 0.163035 0.471136 0.373726
1936 1 0.272606 0.29563 0.332409
1609 1 0.0196685 0.865747 0.394211
1956 1 0.419358 0.253246 0.0781785
277 1 0.252112 0.352262 0.178309
406 1 0.48257 0.393363 0.243109
2024 1 0.350552 0.383975 0.243983
415 1 0.374929 0.357321 0.173934
1412 1 0.0636634 0.50132 0.307699
163 1 0.0805611 0.418162 0.177358
844 1 0.189989 0.578451 0.331068
1457 1 0.0638928 0.432863 0.268574
1097 1 0.0229876 0.395102 0.317612
901 1 0.155974 0.434545 0.269738
87 1 0.0137158 0.438444 0.199665
151 1 0.105212 0.566751 0.2702
1486 1 0.136315 0.50199 0.288798
1320 1 0.119193 0.536058 0.367558
1799 1 0.063662 0.563952 0.208734
1619 1 0.313825 0.494178 0.154774
259 1 0.267249 0.520202 0.0786244
1563 1 0.261941 0.460638 0.342903
1403 1 0.336031 0.556785 0.198538
348 1 0.393982 0.506189 0.216301
899 1 0.285221 0.411328 0.29788
555 1 0.192393 0.476946 0.226631
1484 1 0.27637 0.674597 0.324974
1725 1 0.280293 0.514613 0.293397
337 1 0.437746 0.464927 0.163205
633 1 0.427287 0.3784 0.103285
1712 1 0.497984 0.461422 0.216186
30 1 0.392226 0.59192 0.202698
1849 1 0.321297 0.322707 0.214857
333 1 0.319829 0.472595 0.229327
438 1 0.458304 0.465596 0.0795514
1159 1 0.360877 0.463093 0.304899
690 1 0.316546 0.505645 0.360378
1015 1 0.371848 0.435252 0.184217
859 1 0.114754 0.699297 0.157781
1858 1 0.433299 0.724626 0.0211737
1344 1 0.0741369 0.626388 0.259654
720 1 0.0465314 0.36264 0.00957701
1193 1 0.0946984 0.679171 0.311312
874 1 0.355518 0.538354 0.496216
459 1 0.0518772 0.808554 0.256665
1816 1 0.219152 0.646332 0.215644
625 1 0.250032 0.522304 0.2241
1997 1 0.230028 0.73539 0.324439
1942 1 0.316065 0.665182 0.249742
455 1 0.416574 0.561549 0.380235
1632 1 0.448023 0.65706 0.2263
1187 1 0.209239 0.717585 0.259522
1179 1 0.207797 0.491855 0.313868
663 1 0.418938 0.601873 0.306238
1340 1 0.492443 0.699405 0.266133
1432 1 0.467476 0.539973 0.203499
1591 1 0.48647 0.838833 0.4252
645 1 0.465595 0.538927 0.33025
1026 1 0.4934 0.0665068 0.215854
715 1 0.347835 0.68023 0.31582
79 1 0.12714 0.729434 0.249218
890 1 0.0361423 0.825665 0.187425
1487 1 0.440551 0.675556 0.320933
706 1 0.431773 0.806493 0.461608
676 1 0.0435212 0.868871 0.309296
16 1 0.0573563 0.705808 0.227714
1082 1 0.491147 0.408213 0.0631251
1355 1 0.000170218 0.762564 0.229993
1490 1 0.233553 0.82703 0.293448
1724 1 0.263044 0.785277 0.23061
1551 1 0.243406 0.794281 0.370577
336 1 0.395853 0.752776 0.302724
147 1 0.287298 0.856622 0.271983
1115 1 0.175623 0.878746 0.270136
949 1 0.138321 0.806145 0.27056
572 1 0.255018 0.857497 0.178192
1191 1 0.372347 0.694199 0.212989
1024 1 0.424453 0.738516 0.236994
1451 1 0.413233 0.272503 0.203584
801 1 0.467627 0.79047 0.306412
614 1 0.416212 0.831717 0.178498
1501 1 0.391186 0.803131 0.247048
1314 1 0.362936 0.871671 0.228518
1437 1 0.460545 0.840452 0.24104
1654 1 0.146174 0.939278 0.195385
190 1 0.290181 0.918048 0.45829
1643 1 0.137073 0.956314 0.294743
1499 1 0.0364567 0.248716 0.377285
658 1 0.00978924 0.870885 0.246424
528 1 0.160445 0.00391666 0.2229
1659 1 0.141856 0.0272609 0.153267
1695 1 0.21224 0.942026 0.251342
173 1 0.371639 0.95877 0.252425
522 1 0.181683 0.857515 0.198777
864 1 0.283511 0.926278 0.322867
883 1 0.196712 0.593424 0.107512
1272 1 0.339968 0.0232865 0.205614
1847 1 0.404741 0.0513271 0.200091
923 1 0.353904 0.948866 0.175064
473 1 0.44891 0.892519 0.18458
1160 1 0.445592 0.00502982 0.255303
1776 1 0.438402 0.915861 0.263569
254 1 0.416442 0.962502 0.195676
1296 1 0.408076 0.984168 0.38453
354 1 0.242677 0.596189 0.497795
1827 1 0.126272 0.0226259 0.424922
1290 1 0.139119 0.0225616 0.353219
1868 1 0.461507 0.875403 0.0456829
604 1 0.311028 0.142538 0.486167
700 1 0.0516796 0.100697 0.378399
1745 1 0.083648 0.152546 0.297683
820 1 0.104464 0.154379 0.363447
1935 1 0.404016 0.673321 0.497112
627 1 0.0889709 0.122427 0.450871
1721 1 0.24151 0.124222 0.331183
655 1 0.290997 0.0555414 0.359757
1947 1 0.281085 0.118303 0.390833
1090 1 0.147537 0.607474 0.0443984
1952 1 0.245839 0.268451 0.437798
1285 1 0.353083 0.0338139 0.389395
583 1 0.178828 0.149436 0.376397
1813 1 0.0141431 0.285117 0.0405492
1618 1 0.423469 0.140407 0.320155
730 1 0.41881 0.0699972 0.267941
275 1 0.358172 0.115637 0.426322
557 1 0.340065 0.101599 0.332243
431 1 0.450052 0.96445 0.457728
95 1 0.393491 0.0977628 0.498911
1308 1 0.0600522 0.295303 0.490045
1963 1 0.15547 0.280764 0.395975
1440 1 0.103846 0.548181 0.459239
1116 1 0.105347 0.22381 0.298049
40 1 0.108507 0.241701 0.452794
1297 1 0.1312 0.177581 0.431332
650 1 0.298875 0.589237 0.107551
775 1 0.00104041 0.687027 0.490531
372 1 0.185311 0.287282 0.316868
28 1 0.168887 0.221672 0.337655
1714 1 0.462985 0.939345 0.120867
520 1 0.111067 0.520879 0.00220536
1446 1 0.371153 0.269543 0.426384
1031 1 0.243672 0.200756 0.356863
15 1 0.447362 0.896409 0.484672
436 1 0.327255 0.33223 0.396132
574 1 0.424488 0.454628 0.42575
1835 1 0.430224 0.323777 0.41521
759 1 0.471464 0.242673 0.39558
931 1 0.250666 0.235194 0.287485
659 1 0.359139 0.192311 0.316029
1431 1 0.231336 0.0658019 0.412045
1806 1 0.122195 0.348194 0.470685
65 1 0.173349 0.469172 0.0157279
355 1 0.0749504 0.405072 0.447611
1832 1 0.0254639 0.750262 0.165262
54 1 0.178829 0.389772 0.189895
736 1 0.128583 0.378105 0.329141
1101 1 0.0885554 0.448163 0.3602
362 1 0.134221 0.956268 0.49018
1841 1 0.123335 0.373244 0.396999
1505 1 0.353344 0.266758 0.348012
1100 1 0.225363 0.362841 0.452397
217 1 0.016221 0.381166 0.243564
904 1 0.00511055 0.815427 0.345264
838 1 0.0627194 0.355863 0.365529
1001 1 0.0322636 0.113256 0.127858
1361 1 0.33287 0.439004 0.383384
216 1 0.368143 0.384978 0.435699
1679 1 0.273144 0.410283 0.492311
1948 1 0.407726 0.27798 0.0021874
255 1 0.0134614 0.868112 0.49063
4 1 0.410383 0.403516 0.282654
1419 1 0.118138 0.257301 0.00729662
1688 1 0.461865 0.510344 0.261998
753 1 0.480085 0.0118728 0.405836
1657 1 0.195462 0.210821 0.418839
1287 1 0.0263044 0.655621 0.310871
766 1 0.0489084 0.339393 0.0813783
845 1 0.0865658 0.598615 0.33017
143 1 0.0458714 0.527319 0.377897
1531 1 0.100724 0.474697 0.455698
929 1 0.272076 0.610742 0.362205
342 1 0.224096 0.527131 0.377091
802 1 0.177726 0.507478 0.437163
1310 1 0.266633 0.610117 0.282684
1594 1 0.330331 0.367906 0.334387
1017 1 0.2711 0.446664 0.426292
305 1 0.271253 0.373927 0.387492
433 1 0.488483 0.53072 0.0479932
1309 1 0.162045 0.0986956 0.446668
468 1 0.361057 0.53196 0.418337
805 1 0.0408654 0.918252 0.0540287
1255 1 0.417479 0.575946 0.454536
1903 1 0.354902 0.465694 0.462919
325 1 0.376711 0.528512 0.303694
430 1 0.473082 0.732072 0.355079
1028 1 0.481493 0.0804316 0.374043
1203 1 0.137391 0.658987 0.223337
789 1 0.0292921 0.71 0.377006
335 1 0.0325517 0.605758 0.400729
1143 1 0.336264 0.0879007 0.00226226
935 1 0.0526789 0.740118 0.303207
957 1 0.0773359 0.703428 0.457273
313 1 0.16255 0.602734 0.403152
977 1 0.028823 0.635161 0.0125259
1236 1 0.350049 0.576905 0.354664
712 1 0.324138 0.741947 0.258241
1330 1 0.260218 0.755776 0.430959
681 1 0.184467 0.650923 0.313929
1601 1 0.177283 0.741389 0.419479
702 1 0.200871 0.680202 0.391092
508 1 0.311117 0.739682 0.372914
1114 1 0.36286 0.705079 0.426753
1756 1 0.350911 0.619786 0.445609
1393 1 0.338465 0.653499 0.377612
791 1 0.466627 0.620871 0.377423
1715 1 0.405601 0.718645 0.37242
2011 1 0.484328 0.782177 0.185464
489 1 0.444142 0.719898 0.4381
1033 1 0.0281741 0.940163 0.27692
543 1 0.112204 0.728992 0.375062
110 1 0.0868614 0.862715 0.417482
1395 1 0.067843 0.788667 0.473825
181 1 0.150701 0.903532 0.432849
1521 1 0.491567 0.106483 0.288504
2028 1 0.164334 0.76546 0.328433
919 1 0.460051 0.012582 0.336795
243 1 0.0652428 0.775973 0.409043
224 1 0.322105 0.904488 0.0175257
239 1 0.0841322 0.657781 0.378436
1391 1 0.0860221 0.801942 0.338185
1703 1 0.207544 0.808647 0.430789
1443 1 0.451889 0.348204 0.193855
1321 1 0.226926 0.0196864 0.478939
380 1 0.165415 0.833896 0.372247
1044 1 0.00336234 0.462896 0.327407
126 1 0.281575 0.837507 0.444873
1896 1 0.393055 0.838601 0.389467
90 1 0.05844 0.0559116 0.433632
611 1 0.110573 0.0511445 0.00930101
1930 1 0.318955 0.817964 0.373009
497 1 0.376667 0.82316 0.317589
1235 1 0.467055 0.528362 0.131685
1055 1 0.443529 0.7878 0.390057
1517 1 0.0448693 0.497302 0.499286
117 1 0.147858 0.958323 0.387289
1416 1 0.20976 0.948369 0.455624
58 1 0.00514178 0.968392 0.34249
448 1 0.48006 0.152968 0.38164
376 1 0.0146178 0.544914 0.435955
1463 1 0.0771431 0.989677 0.383702
1665 1 0.213272 0.911066 0.33852
1614 1 0.0792928 0.938073 0.332106
304 1 0.278491 0.881577 0.384099
2009 1 0.201221 0.999414 0.297067
1944 1 0.297713 0.968939 0.244439
1979 1 0.425501 0.11568 0.410394
1663 1 0.232754 -0.000394719 0.37927
869 1 0.340784 0.861609 0.427989
1037 1 0.0646118 0.623152 0.476947
233 1 0.42434 0.907492 0.41095
242 1 0.351686 0.970066 0.476978
46 1 0.340475 0.952163 0.376473
1353 1 0.468843 0.60253 0.146325
1794 1 0.0402093 0.935966 0.417051
1991 1 0.3069 0.797297 0.0283287
912 1 0.493699 0.318651 0.116809
1198 1 0.224186 0.139943 0.45792
2013 1 0.0884616 0.969026 0.128729
727 1 0.146861 0.609987 0.481613
722 1 0.0370234 0.182379 0.0124987
1548 1 0.494411 0.286665 0.19264
12 1 0.146207 0.720129 0.480599
515 1 0.00101851 0.138638 0.472105
509 1 0.232898 0.206259 0.484237
1786 1 0.000852815 0.00507774 0.402765
1480 1 0.38115 0.755308 0.485879
1341 1 0.484753 0.49736 0.463241
351 1 0.0179296 0.955009 0.120167
1389 1 0.0793425 0.000357977 0.499293
1587 1 0.00695859 0.155114 0.39981
1010 1 0.00568505 0.0181926 0.480703
130 1 0.480496 0.405007 0.175293
1497 1 0.161177 0.1553 0.496648
121 1 0.219824 0.803055 0.0118939
1959 1 0.127824 0.187657 0.00131914
1488 1 0.324933 0.299305 0.00456966
941 1 0.306949 0.632706 -0.0018836
573 1 0.134894 0.0089005 0.560542
1145 1 0.16607 0.109391 0.582571
161 1 0.105167 0.0848857 0.521968
863 1 0.403596 0.950022 0.856108
201 1 0.0221128 0.265854 0.69201
872 1 0.117593 0.142174 0.684392
1583 1 0.191948 0.0248687 0.611599
840 1 0.0824378 0.994927 0.617334
1626 1 0.445028 0.595475 0.544084
1156 1 0.210242 0.104255 0.515104
1882 1 0.460185 0.376177 0.862688
115 1 0.231458 0.457217 0.535354
564 1 0.0340691 0.656274 0.664785
1843 1 0.226719 0.0924276 0.611971
1844 1 0.257814 0.155176 0.527621
146 1 0.287983 0.0503006 0.580887
1373 1 0.282363 0.0432722 0.649603
331 1 0.490505 0.0334389 0.58649
1675 1 0.382059 0.960718 0.668176
707 1 0.332518 0.0966365 0.634551
1113 1 0.377633 0.899777 0.70738
1855 1 0.401843 0.079959 0.574284
1652 1 0.478267 0.212696 0.659899
512 1 0.476083 0.0793353 0.714954
373 1 0.0625567 0.103007 0.639269
339 1 0.388354 0.143914 0.992333
1993 1 0.173223 0.227091 0.526414
2030 1 0.0703815 0.0478873 0.686097
1052 1 0.12126 0.172447 0.597224
1639 1 0.201168 0.29366 0.569621
1250 1 0.124604 0.268555 0.598651
525 1 0.0162282 0.0037413 0.55142
1210 1 0.218269 0.27297 0.635463
1032 1 0.0130824 0.39494 0.761133
1838 1 0.173366 0.219441 0.63118
444 1 0.255448 0.22552 0.562719
142 1 0.336256 0.29084 0.632238
1814 1 0.395312 0.217039 0.597288
84 1 0.327323 0.216095 0.618066
1586 1 0.295345 0.180323 0.678149
908 1 0.476029 0.837887 0.662956
1312 1 0.442603 0.284048 0.73225
1242 1 0.418511 0.298401 0.58959
843 1 0.47795 0.347498 0.604622
175 1 0.384186 0.157821 0.538036
534 1 0.360648 0.162271 0.666955
366 1 0.448374 0.125393 0.536653
191 1 0.450073 0.199764 0.543308
1904 1 0.357113 0.240518 0.687116
1951 1 0.281229 0.0752945 0.511133
1277 1 0.126062 0.310525 0.532391
1216 1 0.00559104 0.426649 0.526566
813 1 0.111438 0.368483 0.580097
345 1 0.109766 0.424114 0.640199
10 1 0.0657443 0.307155 0.579215
1737 1 0.0725302 0.282502 0.73653
185 1 0.0750452 0.561851 0.887564
1178 1 0.0119703 0.847522 0.899566
1056 1 0.186688 0.343486 0.698186
972 1 0.392561 0.351197 0.987966
457 1 0.241512 0.308117 0.741386
1914 1 0.173356 0.39149 0.530361
329 1 0.174282 0.436675 0.586208
384 1 0.240188 0.349648 0.537545
784 1 0.256485 0.32561 0.665121
696 1 0.258759 0.40214 0.59626
683 1 0.17944 0.369351 0.605606
1274 1 0.0224418 0.285379 0.843627
114 1 0.36756 0.35264 0.578042
992 1 0.382182 0.480031 0.633756
1568 1 0.276127 0.517806 0.565181
139 1 0.459801 0.438144 0.586226
1130 1 0.115051 0.340212 0.658341
1240 1 0.0436686 0.570394 0.641579
1978 1 0.0135796 0.568375 0.965195
1065 1 0.0393035 0.549738 0.736094
499 1 0.0559012 0.476068 0.598704
1940 1 0.034149 0.361176 0.936158
1109 1 0.0726951 0.0585557 0.579751
1110 1 0.096885 0.588525 0.526968
51 1 0.482619 0.062366 0.839054
640 1 0.0371764 0.44297 0.670507
1662 1 0.00834321 0.422114 0.891934
441 1 0.458232 0.713705 0.519516
1518 1 0.157187 0.43388 0.718948
1677 1 0.22266 0.508343 0.610112
1898 1 0.325661 0.459092 0.562487
569 1 0.209332 0.4012 0.666071
111 1 0.180136 0.496297 0.505344
1574 1 0.426293 0.894972 0.660061
1387 1 0.0273104 0.620232 0.756227
361 1 0.37804 0.532305 0.566633
514 1 0.47326 0.51333 0.577358
1237 1 0.447237 0.492518 0.661973
1472 1 0.388957 0.423594 0.577192
1325 1 0.273933 0.282223 0.507892
1589 1 0.332288 0.112508 0.561541
617 1 0.0956559 0.621217 0.639386
1529 1 0.0502032 0.360446 0.697556
917 1 0.0990268 0.582771 0.701939
1267 1 0.110851 0.413734 0.986975
860 1 0.0708845 0.220171 0.956022
1900 1 0.474857 0.30855 0.82204
900 1 0.179251 0.604515 0.622502
668 1 0.259007 0.607082 0.595062
1757 1 0.27168 0.821947 0.583045
1504 1 0.336275 0.6625 0.57333
1728 1 0.233126 0.665051 0.581469
149 1 0.0225556 0.495082 0.8809
144 1 0.178806 0.718609 0.537489
1765 1 0.324533 0.525905 0.634444
1215 1 0.29434 0.978134 0.919914
1464 1 0.365298 0.588281 0.668712
1260 1 0.284974 0.999907 0.504904
1181 1 0.421806 0.897148 0.929849
1511 1 0.351962 0.555244 0.723417
519 1 0.447741 0.387675 0.549021
709 1 0.451563 0.350914 0.749934
1552 1 0.369881 0.607305 0.528191
605 1 0.421354 0.992133 0.971725
1447 1 0.0811391 0.238054 0.523334
758 1 0.0820748 0.73987 0.626731
1916 1 0.0484605 0.669097 0.586285
1950 1 0.10986 0.718043 0.548835
544 1 0.496314 0.25288 0.78856
834 1 0.0944433 0.78552 0.555833
854 1 0.0514924 0.375223 0.504185
1875 1 0.165402 0.790322 0.528175
1259 1 0.158 0.745019 0.60187
228 1 0.239617 0.757196 0.512326
2007 1 0.11875 0.692718 0.67714
3 1 0.230864 0.73975 0.610163
797 1 0.292296 0.707001 0.626536
748 1 0.26333 0.913116 0.597443
1442 1 0.283224 0.692818 0.529004
1520 1 0.461276 0.916321 0.879024
612 1 0.422118 0.656283 0.575064
1687 1 0.0877832 0.616064 0.839112
1564 1 0.314447 0.862137 0.523911
1629 1 0.475705 0.131859 0.929639
1228 1 0.365236 0.817027 0.547106
635 1 0.383102 0.722197 0.55707
1469 1 0.448159 0.716338 0.592737
293 1 0.435179 0.790667 0.553547
607 1 0.184194 0.94366 0.82674
386 1 0.0291018 0.828644 0.553886
1262 1 0.286267 0.904085 0.926109
1375 1 0.0172612 0.958663 0.605968
322 1 0.134407 0.902486 0.590198
710 1 0.488797 0.517955 0.724674
1041 1 0.104132 0.923535 0.659677
209 1 0.450649 0.168045 0.845151
1793 1 0.239033 0.0148604 0.971099
1747 1 0.252908 0.988575 0.580247
1045 1 0.173436 0.811516 0.667134
1339 1 0.246497 0.928647 0.518888
1080 1 0.296461 0.793045 0.517788
213 1 0.221515 0.0455195 0.550626
1483 1 0.326187 0.947508 0.553957
588 1 0.307636 0.964176 0.633396
1655 1 0.0624602 0.928313 0.551133
1103 1 0.249699 0.978838 0.668016
1743 1 0.498332 0.140201 0.677097
1578 1 0.46958 0.289859 0.662636
397 1 0.396705 0.883603 0.558244
1508 1 0.383899 0.950398 0.589135
1137 1 0.343753 0.794378 0.668654
945 1 0.421973 0.021179 0.602602
1887 1 0.0206426 0.124538 0.58046
1127 1 0.0532541 0.0655504 0.862622
1470 1 0.046056 0.1149 0.734752
449 1 0.0412015 0.197397 0.717328
1661 1 0.141528 0.988068 0.656398
52 1 0.221268 0.160244 0.657097
1027 1 0.058445 0.0440336 0.780357
132 1 0.227949 0.104054 0.732792
672 1 0.131162 0.101053 0.742198
464 1 0.208813 0.167776 0.583145
198 1 0.122715 0.0602165 0.630202
903 1 0.331477 0.980437 0.832628
1740 1 0.335567 0.0566208 0.70203
743 1 0.21595 0.20738 0.856298
888 1 0.257243 0.0966712 0.798989
1025 1 0.421977 0.117239 0.801374
502 1 0.280441 0.101517 0.691162
1571 1 0.401905 0.803927 0.9528
231 1 0.346928 0.0114972 0.604775
1459 1 0.431311 0.0144729 0.719384
298 1 0.428671 0.997475 0.79856
738 1 0.0109694 0.478774 0.954045
1972 1 0.362358 0.213762 0.96868
571 1 0.419865 0.0880845 0.673719
1095 1 0.450022 0.145338 0.724257
1256 1 0.381047 0.0528845 0.775786
946 1 0.16906 0.145032 0.797143
906 1 0.14502 0.220514 0.842955
8 1 0.0889684 0.439501 0.537041
1194 1 0.0886096 0.12828 0.78879
1323 1 0.141213 0.940698 0.895572
669 1 0.0984773 0.240496 0.680316
1965 1 0.17447 0.160123 0.722611
1083 1 0.157424 0.280509 0.6926
1939 1 0.214195 0.224173 0.701465
989 1 0.256433 0.174356 0.7413
582 1 0.213853 0.233057 0.786747
2033 1 0.28421 0.267896 0.698976
1374 1 0.282435 0.155489 0.599293
1553 1 0.334229 0.136336 0.901018
1949 1 0.301087 0.237661 0.768395
425 1 0.441186 0.201401 0.769903
1624 1 0.0119858 0.984882 0.759957
1182 1 0.443897 0.146381 0.622079
1859 1 0.407046 0.133428 0.891518
1569 1 0.421873 0.211243 0.697974
1549 1 0.392541 0.246698 0.813862
1305 1 0.37003 0.303908 0.757989
312 1 0.0646594 0.23197 0.591409
1066 1 0.488752 0.64342 0.503176
1962 1 0.0930805 0.298978 0.892061
432 1 0.0385307 0.378528 0.628778
703 1 0.125714 0.337848 0.834753
1131 1 0.107814 0.271767 0.800236
1184 1 0.106899 0.355543 0.760907
1541 1 0.351595 0.0443236 0.509018
1863 1 0.0447407 0.338281 0.795208
1356 1 0.236632 0.268591 0.885547
1244 1 0.229875 0.456742 0.759064
480 1 0.25529 0.29251 0.813413
1918 1 0.173131 0.381817 0.785895
256 1 0.387541 0.310256 0.688421
1826 1 0.307881 0.34135 0.716138
1060 1 0.172151 0.306931 0.769841
116 1 0.35538 0.166756 0.818736
1479 1 0.00185292 0.767316 0.920853
1908 1 0.444395 0.401242 0.683702
975 1 0.401031 0.360114 0.636562
741 1 0.440725 0.447209 0.735924
317 1 0.388807 0.378034 0.725592
959 1 0.431693 0.30536 0.915668
445 1 0.266911 0.329797 0.875053
2020 1 0.20226 0.554022 0.556074
1396 1 0.405691 0.31998 0.840665
1913 1 0.0822874 0.869412 0.89189
44 1 0.145209 0.484947 0.631781
566 1 0.0836773 0.431286 0.732467
1711 1 0.265821 0.630442 0.778806
546 1 0.116217 0.404248 0.815895
222 1 0.0173396 0.635406 0.84551
769 1 0.0382843 0.558164 0.807433
1219 1 0.158668 0.476398 0.772932
1263 1 0.043821 0.142684 0.935863
987 1 0.46635 0.434106 0.901579
746 1 0.148855 0.529654 0.718215
1872 1 0.250276 0.370579 0.806176
1378 1 0.0894812 0.495518 0.680843
1138 1 0.311403 0.430697 0.793916
1150 1 0.215461 0.547995 0.769035
504 1 0.292273 0.502846 0.754726
169 1 0.333056 0.593711 0.929206
427 1 0.402794 0.414289 0.812683
200 1 0.322656 0.410519 0.702139
930 1 0.312437 0.373865 0.636197
1739 1 0.459782 0.583055 0.678812
751 1 0.126266 0.653836 0.974456
817 1 0.298968 0.493998 0.827548
32 1 0.432189 0.509393 0.879002
1124 1 0.0225088 0.755287 0.518716
593 1 0.0920712 0.670018 0.782959
1369 1 0.185441 0.663688 0.688869
1223 1 0.11733 0.645533 0.722024
516 1 0.175465 0.645038 0.774373
454 1 0.0669979 0.750947 0.885996
1275 1 0.253389 0.476956 0.687364
799 1 0.141578 0.680726 0.610632
1795 1 0.201035 0.595551 0.721991
814 1 0.25436 0.672989 0.711923
835 1 0.23852 0.559079 0.667592
1288 1 0.309351 0.631462 0.664717
1575 1 0.216757 0.592578 0.870881
1850 1 0.158512 0.595261 0.821783
1911 1 0.322488 0.712551 0.69093
1560 1 0.237883 0.726622 0.840816
692 1 0.472074 0.68238 0.66643
1148 1 0.460731 0.632321 0.782014
1195 1 0.254255 0.945341 0.978903
1660 1 0.47182 0.681027 0.884228
1752 1 0.277119 0.592533 0.714793
1039 1 0.484994 0.533863 0.928687
1229 1 0.412875 0.566476 0.776349
327 1 0.425169 0.677464 0.817122
1919 1 0.038848 0.776082 0.679074
150 1 0.226524 0.72862 0.686592
1969 1 0.0521996 0.922409 0.773907
674 1 0.0585728 0.802791 0.747506
34 1 0.115336 0.725124 0.745567
1938 1 0.432409 0.525864 0.501987
781 1 0.134593 0.847404 0.727828
414 1 0.119881 0.828631 0.817443
341 1 0.121153 0.768629 0.68324
394 1 0.263564 0.828079 0.7063
1335 1 0.192936 0.762635 0.751648
1770 1 0.344408 0.595665 0.603085
1818 1 0.323263 0.761332 0.586411
250 1 0.223665 0.826471 0.909274
387 1 0.399037 0.827869 0.610093
724 1 0.290443 0.769556 0.724864
1466 1 0.298049 0.876882 0.767516
188 1 0.40218 0.725359 0.755587
1270 1 0.260931 0.942345 0.845825
31 1 0.424037 0.559081 0.615287
284 1 0.468336 0.75393 0.707374
1856 1 0.43035 0.778102 0.651376
72 1 0.482013 0.844181 0.864851
1954 1 0.383574 0.658647 0.635809
1797 1 0.48906 0.694394 0.745417
1510 1 0.482068 0.734008 0.820743
1680 1 0.091537 0.122483 0.990789
68 1 0.340044 0.777404 0.903817
7 1 0.0739206 0.976477 0.812332
754 1 0.0572822 0.890518 0.708992
916 1 0.442052 0.529318 0.990759
762 1 0.0865504 0.983859 0.71468
279 1 0.128577 0.0254425 0.868839
2006 1 0.142873 0.993579 0.795807
965 1 0.218614 0.891677 0.702147
601 1 0.187035 0.868146 0.794532
1606 1 0.0485838 0.877102 0.609543
1749 1 0.27895 0.772481 0.645495
589 1 0.264111 0.863313 0.831444
1866 1 0.195138 0.95374 0.733841
1732 1 0.131526 0.928074 0.755731
1699 1 0.291231 0.928662 0.701584
876 1 0.077516 0.822102 0.654701
1590 1 0.354716 0.793334 0.743023
1970 1 0.349602 0.898774 0.62875
292 1 0.47 0.930507 0.813363
718 1 0.350421 0.91788 0.776148
1295 1 0.430839 0.94559 0.744062
167 1 0.414897 0.87798 0.769931
1604 1 0.350257 0.981783 0.742349
1081 1 0.334078 0.826378 0.808221
1996 1 0.322077 0.905863 0.848239
324 1 0.0998495 0.559353 0.774087
850 1 0.377198 0.724078 0.629671
330 1 0.117025 0.169368 0.924071
1362 1 0.135955 0.0866231 0.910786
1093 1 0.0902266 0.864522 0.517625
1303 1 0.110147 0.217224 0.746422
505 1 0.148143 0.0834991 0.826703
1763 1 0.481333 0.363815 0.936994
1108 1 0.0273244 4.89503e-05 0.900455
1902 1 0.220512 0.0286767 0.769928
1350 1 0.418428 0.438197 0.507012
27 1 0.00352152 0.189889 0.622906
393 1 0.198417 0.0619229 0.691152
91 1 0.0187193 0.170804 0.786216
1123 1 0.222159 0.119862 0.869606
83 1 0.355045 0.131589 0.745776
1226 1 0.202903 0.0568832 0.913007
596 1 0.0321865 0.646561 0.918303
603 1 0.270654 0.0138739 0.853939
1067 1 0.46075 0.308042 0.519077
1774 1 0.338661 0.00705315 0.982428
158 1 0.0100853 0.342791 0.559002
398 1 0.40268 0.0486161 0.84615
889 1 0.473733 0.00272913 0.888392
244 1 0.440502 0.0575033 0.938558
1338 1 0.278428 0.0770884 0.900016
809 1 0.387043 0.00269398 0.903999
961 1 0.20714 0.0335447 0.84076
1208 1 0.153697 0.589564 0.904377
699 1 0.0868788 0.160036 0.527971
1540 1 0.0943037 0.147874 0.857056
460 1 0.196719 0.900328 0.957873
964 1 0.179922 0.194051 0.920128
823 1 0.0796814 0.290858 0.95872
1716 1 0.13793 0.740601 0.999063
1220 1 0.081322 0.989568 0.940053
511 1 0.166043 0.96441 0.979982
1201 1 0.0530798 0.196928 0.873328
881 1 0.308414 0.210714 0.910331
947 1 0.309478 0.250836 0.843906
1852 1 0.0239391 0.059129 0.969101
1227 1 0.272859 0.166327 0.847777
1023 1 0.484748 0.630885 0.611782
33 1 0.174441 0.799079 0.956218
666 1 0.260573 0.946448 0.773965
679 1 0.385037 0.227946 0.883067
194 1 0.290652 0.23002 0.979101
195 1 0.45033 0.246338 0.857229
1126 1 0.105066 0.545322 0.603314
1192 1 0.463368 0.228477 0.926891
891 1 0.479865 0.297936 0.972818
870 1 0.34738 0.404389 0.506888
137 1 0.171951 0.299219 0.880767
1810 1 0.276144 0.0920075 0.973176
661 1 0.477991 0.959838 0.525763
456 1 0.0576468 0.379888 0.862342
153 1 0.0884414 0.464294 0.861323
828 1 0.180242 0.355348 0.982154
1336 1 0.1373 0.253603 0.925946
1367 1 0.222539 0.392698 0.881756
1380 1 0.250417 0.34685 0.953825
1243 1 0.17454 0.87531 0.53048
1475 1 0.255651 0.151814 0.92768
1014 1 0.316477 0.332083 0.79681
221 1 0.323236 0.361397 0.95687
839 1 0.190042 0.278531 0.965083
570 1 0.363908 0.0820627 0.93658
218 1 0.395927 0.388547 0.902284
894 1 0.296004 0.295975 0.932115
382 1 0.150353 0.0165243 0.719225
1453 1 0.0743097 0.944697 0.873737
1099 1 0.0855641 0.591239 0.985335
422 1 0.176285 0.131869 0.979553
1961 1 0.0622254 0.428873 0.93571
133 1 0.493302 0.964936 0.695223
1012 1 0.217245 0.514556 0.845401
1394 1 0.0668785 0.493403 0.765955
1089 1 0.40995 0.966711 0.522905
265 1 0.0446524 0.437205 0.805088
1306 1 0.232973 0.834003 0.51662
539 1 0.0816222 0.505479 0.938799
531 1 0.264341 0.562118 0.939888
43 1 0.163839 0.451631 0.856222
1125 1 0.0949857 0.916481 0.938354
214 1 0.382584 0.851209 0.854141
1204 1 0.350928 0.434949 0.941961
619 1 0.277776 0.561306 0.849576
1042 1 0.180186 0.428542 0.939957
1870 1 0.239476 0.481641 0.917156
760 1 0.310969 0.404149 0.862835
478 1 0.271316 0.416234 0.938077
1343 1 0.431146 0.8166 0.726185
1351 1 0.355466 0.520947 0.962148
1945 1 0.454121 0.59083 0.840704
429 1 0.367001 0.483916 0.850858
779 1 0.318388 0.505576 0.903097
1674 1 0.438397 0.502074 0.799509
723 1 0.372854 0.554182 0.850628
347 1 0.427765 0.588307 0.917996
276 1 0.361621 0.932604 0.938671
1034 1 0.175191 0.663642 0.857174
507 1 0.149169 0.798128 0.881004
1386 1 0.150499 0.0132099 0.932053
311 1 0.21082 0.618768 0.941357
506 1 0.140948 0.528058 0.860234
624 1 0.0970886 0.69769 0.842728
1397 1 0.305854 0.467704 0.994105
122 1 0.261967 0.653531 0.853819
1751 1 0.293348 0.651707 0.93068
11 1 0.324284 0.568047 0.791845
1975 1 0.331969 0.697151 0.78882
1071 1 0.231702 0.183657 0.988908
1347 1 0.156118 0.533555 0.945814
933 1 0.00292613 0.0901342 0.801229
1730 1 0.213251 0.811343 0.834701
990 1 0.345309 0.698264 0.876397
1280 1 0.104893 0.364269 0.930077
1957 1 0.343879 0.855515 0.921803
1664 1 0.480532 0.659608 0.977355
830 1 0.339778 0.24935 0.558013
638 1 0.382149 0.631608 0.858149
1445 1 0.409872 0.659839 0.993571
1209 1 0.362762 0.850523 0.991983
647 1 0.401658 0.735097 0.944659
942 1 0.418331 0.456127 0.958802
70 1 0.00127611 0.0480131 0.659315
1474 1 0.0940743 0.67563 0.918396
219 1 0.162068 0.732752 0.822775
1831 1 0.468677 0.0511005 0.652282
1676 1 0.118761 0.910091 0.826393
675 1 0.0538817 0.805277 0.843953
1008 1 0.147345 0.870429 0.912045
1365 1 0.14308 0.71967 0.893057
561 1 0.483167 0.797432 0.98827
2047 1 0.0209879 0.987276 0.98051
1153 1 0.279378 0.800342 0.86667
2004 1 0.250792 0.693262 0.969033
303 1 0.102155 0.786703 0.950008
1755 1 0.220837 0.71935 0.910175
166 1 0.277817 0.797151 0.95199
950 1 0.00732535 0.289474 0.615408
165 1 0.020672 0.934921 0.925325
701 1 0.161017 0.634779 0.55395
969 1 0.359216 0.75653 0.807935
1434 1 0.423983 0.654322 0.714596
446 1 0.203201 0.891659 0.875154
1689 1 0.218528 0.961164 0.906743
119 1 0.447794 0.801634 0.804676
136 1 0.41081 0.791913 0.880389
725 1 0.47813 0.880958 0.550788
1072 1 0.281143 0.292052 0.580448
829 1 0.494397 0.582658 0.748774
523 1 0.0279319 0.0790824 0.51976
421 1 0.455599 0.0306483 0.514776
562 1 0.0338663 0.760407 0.987716
1966 1 0.0253152 0.544554 0.57126
1258 1 0.474852 0.419941 0.983764
1573 1 0.00488556 0.740531 0.62483
999 1 0.476343 0.958165 0.603453
1252 1 0.231561 0.514315 0.991878
1532 1 0.388331 0.5865 0.988966
334 1 0.483559 0.78183 0.912325
551 1 0.47494 0.936746 0.992037
479 1 0.0237903 0.266018 0.921912
1792 1 0.135094 0.868554 0.993489
1907 1 0.330634 0.330117 0.518504
264 1 0.00756798 0.214536 0.53382
1697 1 0.00258787 0.429883 0.598327
498 1 0.411479 0.0760592 0.997771
1726 1 0.248934 0.866667 0.994222
56 1 0.387806 0.245609 0.502007
1825 1 0.226668 0.417514 0.994108
340 1 0.682326 0.74564 0.0586216
1139 1 0.594403 0.982338 0.038838
1649 1 0.641239 0.115147 0.0722476
1364 1 0.710462 0.00878996 0.147516
471 1 0.521987 0.0879638 0.0301279
118 1 0.74815 0.566248 0.085912
290 1 0.984522 0.0354067 0.296112
810 1 0.997558 0.609772 0.479076
2032 1 0.702749 0.0713283 0.0963703
785 1 0.774631 0.980079 0.164739
235 1 0.868282 0.978463 0.165478
212 1 0.954531 0.179685 0.0422658
439 1 0.637237 0.0544936 0.144182
1348 1 0.655876 0.00141903 0.0783334
1925 1 0.908299 0.0883887 0.043643
1485 1 0.840405 0.112634 0.0972968
1566 1 0.538092 0.340129 0.187635
99 1 0.784942 0.154471 0.0543351
1853 1 0.954011 0.317319 0.0736562
882 1 0.98778 0.0988658 0.0451183
1750 1 0.98498 0.477037 0.424783
125 1 0.978171 0.040493 0.132374
1000 1 0.932695 0.729253 0.468615
1435 1 0.826307 0.982603 0.0875245
1016 1 0.804803 0.0569065 0.16037
1302 1 0.611673 0.126091 0.145574
1328 1 0.532825 0.131958 0.086664
75 1 0.683127 0.253363 0.154597
1282 1 0.575831 0.184548 0.121639
237 1 0.667196 0.863943 0.0109084
1923 1 0.694783 0.197274 0.0726649
1823 1 0.586501 0.0467832 0.414843
1690 1 0.884263 0.944164 0.0282134
1086 1 0.766629 0.0964487 0.115805
1576 1 0.727672 0.0482489 0.00974281
735 1 0.685511 0.265664 0.0736713
1370 1 0.760404 0.167067 0.120784
1910 1 0.731447 0.230201 0.12234
1291 1 0.572069 0.348407 0.120992
1068 1 0.821056 0.290829 0.0139513
922 1 0.899453 0.688209 0.0821724
1976 1 0.897016 0.0753125 0.132158
192 1 0.882665 0.307877 0.244079
1327 1 0.87361 0.169176 0.0679967
1022 1 0.904566 0.166647 0.144192
1035 1 0.952615 0.218093 0.158007
565 1 0.920903 0.229995 0.077192
392 1 0.888912 0.332156 0.180493
807 1 0.670359 0.352563 0.0534775
291 1 0.572873 0.415176 0.0424826
878 1 0.643807 0.461611 0.247384
241 1 0.567464 0.965582 0.314202
465 1 0.53994 0.231411 0.163405
1669 1 0.52897 0.576488 0.36556
1543 1 0.615734 0.272658 0.0450359
210 1 0.63439 0.237155 0.443088
600 1 0.730059 0.315032 0.105806
861 1 0.595355 0.340217 0.057609
1824 1 0.830537 0.317045 0.123597
648 1 0.524236 0.367907 0.00697098
1142 1 0.82732 0.448635 0.0612608
632 1 0.770404 0.278428 0.160673
1761 1 0.715531 0.390705 0.111761
729 1 0.963771 0.0353024 0.014337
1733 1 0.690558 0.537187 0.0262612
1877 1 0.773239 0.364045 0.0545135
1524 1 0.857349 0.405378 0.12245
1248 1 0.83043 0.329127 0.283954
2040 1 0.93979 0.350754 0.128969
1644 1 0.718762 0.113231 0.0423735
513 1 0.850557 0.658135 0.0266162
978 1 0.881895 0.299765 0.0713397
1580 1 0.630791 0.515671 0.0485753
772 1 0.797754 0.440227 0.155868
1723 1 0.970028 0.388789 0.0560693
145 1 0.985231 0.313171 0.490157
225 1 0.543986 0.570555 0.185679
1112 1 0.745066 0.500146 0.00579013
154 1 0.807002 0.239936 0.107485
1247 1 0.625439 0.497321 0.168719
2037 1 0.701943 0.467416 0.0996669
1611 1 0.551331 0.502864 0.223575
1986 1 0.626695 0.398286 0.0963942
1738 1 0.560717 0.491903 0.0651688
251 1 0.95893 0.651023 0.0446542
778 1 0.554215 0.426783 0.123309
1696 1 0.622861 0.423092 0.171133
1441 1 0.575827 0.167644 0.201607
613 1 0.850563 0.499047 0.113398
948 1 0.628342 0.46615 0.102372
897 1 0.763229 0.49601 0.0780003
128 1 0.769494 0.575514 0.210957
765 1 0.788202 0.498901 0.217878
1053 1 0.985789 0.0847825 0.414729
691 1 0.950953 0.52826 0.0803678
1533 1 0.904495 0.455568 0.0918857
761 1 0.795347 0.536675 0.284919
868 1 0.95034 0.439954 0.233641
1681 1 0.952253 0.498422 0.187388
375 1 0.941239 0.434176 0.159275
1744 1 0.872173 0.463314 0.177921
1315 1 0.995715 0.789071 0.432317
848 1 0.67045 0.645391 0.155195
591 1 0.536765 0.557568 0.106875
924 1 0.613125 0.551838 0.110225
993 1 0.946777 0.582667 0.425039
657 1 0.543974 0.628596 0.0714586
2038 1 0.512452 0.736365 0.0399855
1476 1 0.506623 0.915514 0.400308
199 1 0.583479 0.620105 0.142104
96 1 0.960885 0.94268 0.411335
667 1 0.730079 0.911622 0.00249097
368 1 0.802548 0.552282 0.128892
1854 1 0.737415 0.675055 0.115595
898 1 0.602601 0.672597 0.10481
134 1 0.732333 0.517257 0.153605
1539 1 0.734291 0.627519 0.0592689
1783 1 0.776215 0.0498246 0.0600073
1329 1 0.681093 0.571207 0.104945
1424 1 0.658338 0.66835 0.0610585
1006 1 0.945158 0.368947 0.48939
928 1 0.897269 0.613512 0.0791149
873 1 0.603789 0.0606637 0.484038
628 1 0.526816 0.70946 0.198053
1076 1 0.913395 0.693791 0.219602
1535 1 0.86112 0.0355351 0.0255271
1239 1 0.580239 0.707541 0.431088
63 1 0.820816 0.668303 0.0978028
998 1 0.538202 0.101349 0.427178
1968 1 0.620695 0.81094 0.12383
568 1 0.880023 0.505331 0.0449374
1804 1 0.949009 0.388021 0.319089
403 1 0.671882 0.869882 0.140345
734 1 0.59021 0.749578 0.153147
670 1 0.552289 0.214461 0.0446989
795 1 0.600915 0.82793 0.0598759
1523 1 0.601299 0.751351 0.0670226
1038 1 0.810102 0.0923385 0.428805
229 1 0.942217 0.100467 0.279071
1767 1 0.782624 0.683081 0.0443539
2002 1 0.752931 0.845809 0.0340247
1773 1 0.656183 0.737484 0.125031
739 1 0.674914 0.82298 0.0766476
608 1 0.753411 0.772395 0.0827946
69 1 0.525092 0.985348 0.0671102
1120 1 0.827033 0.83608 0.0509281
1449 1 0.815568 0.855588 0.475942
1556 1 0.838972 0.888981 0.126301
1140 1 0.535653 0.848654 0.359345
98 1 0.81994 0.803972 0.123318
1205 1 0.983021 0.274304 0.433211
450 1 0.976719 0.587424 0.341419
1246 1 0.870645 0.718164 0.142308
983 1 0.921708 0.756892 0.0242346
1704 1 0.942986 0.74235 0.168782
768 1 0.541129 0.917775 0.0531518
1136 1 0.535195 0.929267 0.132217
1481 1 0.501878 0.200356 0.316053
1812 1 0.746034 0.84916 0.118448
412 1 0.650006 0.94079 0.0219171
17 1 0.703695 0.909355 0.0663481
955 1 0.978294 0.810015 0.277134
1171 1 0.811494 0.603511 0.0580804
1934 1 0.755729 0.962141 0.0676218
559 1 0.928146 0.112233 0.466963
1133 1 0.597841 0.920299 0.102749
485 1 0.818024 0.918703 0.188185
1881 1 0.52515 0.812456 0.0501152
282 1 0.813045 0.910912 0.0626169
1717 1 0.986054 0.40396 0.395875
1404 1 0.563657 0.898924 0.308835
936 1 0.940846 0.88015 0.0163042
1865 1 0.953837 0.951181 0.0460492
631 1 0.512148 0.265571 0.324168
1094 1 0.964907 0.822295 0.195903
606 1 0.885543 0.936601 0.107019
1989 1 0.904017 0.836441 0.0553881
749 1 0.940268 0.968676 0.146061
1388 1 0.503497 0.947456 0.267845
793 1 0.75137 0.432614 0.0509985
1602 1 0.586944 0.99596 0.11405
884 1 0.650361 0.996718 0.210372
1915 1 0.646177 0.439074 0.0352638
2008 1 0.617837 0.174186 0.0316194
652 1 0.514673 0.0164095 0.277377
1283 1 0.747393 0.104682 0.388927
1354 1 0.565478 0.0965916 0.190698
1390 1 0.796838 0.974869 0.246298
1102 1 0.692333 0.151144 0.143183
1864 1 0.702533 0.107919 0.224095
102 1 0.634582 0.130229 0.28616
656 1 0.732181 0.0525691 0.204993
287 1 0.91238 0.0247893 0.255198
1004 1 0.808244 0.0952474 0.350743
1613 1 0.687301 0.0737666 0.301208
1196 1 0.707942 0.952113 0.230505
440 1 0.932378 0.977099 0.214095
1413 1 0.972291 0.0511849 0.216548
1926 1 0.856617 0.138599 0.247327
1077 1 0.845162 0.0355057 0.218337
475 1 0.863156 0.939621 0.24407
1018 1 0.932992 0.0394054 0.442575
518 1 0.937338 0.051457 0.36022
892 1 0.962837 0.95393 0.278662
1682 1 0.542028 0.915239 0.464828
851 1 0.643284 0.141841 0.207739
1245 1 1.0013 0.718813 0.0920475
1170 1 0.649741 0.324019 0.139141
260 1 0.654617 0.205758 0.286775
1406 1 0.568918 0.216701 0.324535
187 1 0.628201 0.0759613 0.235693
1318 1 0.695842 0.327361 0.237867
138 1 0.518513 0.227548 0.235083
1289 1 0.683162 0.0810207 0.412496
2042 1 0.739693 0.360129 0.188501
100 1 0.840497 0.266874 0.188151
2034 1 0.824069 0.254055 0.281811
1982 1 0.805681 0.186519 0.177124
538 1 0.726785 0.19502 0.189397
756 1 0.693696 0.243462 0.237364
980 1 0.79069 0.179426 0.25278
537 1 0.734756 0.265571 0.362868
1104 1 0.961648 0.115067 0.356882
907 1 0.817727 0.0848231 0.265477
1762 1 0.84621 0.125056 0.176631
2025 1 0.872303 0.195591 0.292293
1206 1 0.913468 0.208579 0.237071
1058 1 0.872241 0.00323329 0.467489
29 1 0.533925 0.34398 0.400893
55 1 0.622188 0.287453 0.219149
1009 1 0.607321 0.256155 0.282896
45 1 0.984329 0.710067 0.016564
1428 1 0.600256 0.892353 0.0230876
236 1 0.615929 0.177734 0.469739
5 1 0.596819 0.27715 0.150709
25 1 0.560365 0.416418 0.208345
1333 1 0.604867 0.352841 0.201055
2010 1 0.612608 0.454607 0.425428
1869 1 0.818566 0.377245 0.375154
1265 1 0.757028 0.437148 0.224132
1514 1 0.772949 0.273629 0.234479
630 1 0.697406 0.445822 0.176607
694 1 0.940214 0.172085 0.308383
622 1 0.877502 0.28647 0.349396
85 1 0.749436 0.237095 0.293222
270 1 0.751956 0.313884 0.28987
1928 1 0.816117 0.331949 0.195525
1070 1 0.945044 0.291492 0.370267
1829 1 0.878029 0.581191 0.00297087
1493 1 0.883389 0.356176 0.330916
1999 1 0.552294 0.503004 0.144371
1512 1 0.648742 0.427581 0.354433
1307 1 0.61505 0.544443 0.236254
1646 1 0.636572 0.575793 0.170849
1161 1 0.560188 0.322177 0.309733
405 1 0.519503 0.641448 0.31482
390 1 0.536344 0.547269 0.289223
1129 1 0.783887 0.368612 0.247405
1779 1 0.70335 0.524409 0.22136
532 1 0.691173 0.399485 0.224347
249 1 0.809175 0.636469 0.236681
307 1 0.848765 0.440618 0.263772
1955 1 0.845313 0.397017 0.207877
1313 1 0.836615 0.582726 0.398992
89 1 0.758976 0.546355 0.35776
592 1 0.910691 0.566963 0.355917
796 1 0.661212 0.524612 0.294395
721 1 0.676932 0.577134 0.415375
1207 1 0.954177 0.365462 0.196904
1500 1 0.820693 0.594285 0.33003
1943 1 0.973087 0.516148 0.358953
1920 1 0.842589 0.581137 0.184363
1491 1 0.796349 0.405199 0.315738
418 1 0.977775 0.328415 0.256582
157 1 0.899731 0.478549 0.295702
885 1 0.899901 0.623728 0.320145
952 1 0.832363 0.305214 0.398556
1658 1 0.534924 0.643611 0.236823
932 1 0.640858 0.761196 0.207043
1819 1 0.600266 0.615079 0.221918
168 1 0.666944 0.597503 0.246611
822 1 0.678565 0.648005 0.318543
1781 1 0.604751 0.697405 0.205613
1316 1 0.594424 0.582384 0.0473753
914 1 0.539914 0.687711 0.362432
1901 1 0.909355 0.0171614 0.0898782
1746 1 0.728432 0.564766 0.285667
925 1 0.57058 0.97215 0.400099
1234 1 0.785735 0.725589 0.135026
1754 1 0.722957 0.736368 0.180518
395 1 0.690341 0.732255 0.351357
905 1 0.673452 0.677267 0.227473
887 1 0.758116 0.634276 0.344821
1692 1 0.765199 0.616846 0.13852
81 1 0.764533 0.673063 0.190845
1567 1 0.628619 0.541413 0.466163
232 1 0.938297 0.547569 0.291412
1616 1 0.593913 0.976199 0.488332
401 1 0.900966 0.762714 0.325502
867 1 0.918374 0.646906 0.158729
524 1 0.814599 0.676384 0.301512
649 1 0.89713 0.584137 0.236603
106 1 0.995059 0.582277 0.0837205
991 1 0.925381 0.760266 0.244473
744 1 0.779699 0.707859 0.375876
580 1 0.989668 0.940493 0.477071
1666 1 0.751517 0.922911 0.366933
824 1 0.717033 0.787562 0.236245
1005 1 0.536793 0.753557 0.290289
662 1 0.66127 0.714665 0.288243
1848 1 0.738517 0.863524 0.28077
1241 1 0.589 0.686085 0.292445
1384 1 0.698269 0.797137 0.141085
547 1 0.837777 0.783294 0.202324
389 1 0.77467 0.740808 0.240521
1931 1 0.772238 0.794317 0.171455
263 1 0.778135 0.86028 0.195726
247 1 0.745342 0.706082 0.30725
757 1 0.819522 0.937624 0.421365
1197 1 0.802474 0.752299 0.325745
1062 1 0.599652 0.73968 0.350897
1880 1 0.827806 0.703564 0.202128
750 1 0.619347 0.781671 0.30079
1635 1 0.846359 0.747429 0.273677
1888 1 0.963176 0.626376 0.262399
1059 1 0.882389 0.889068 0.387322
1174 1 0.559171 0.671446 0.0218448
1889 1 0.965412 0.340663 0.00685101
474 1 0.578328 0.850767 0.176161
140 1 0.56681 0.0154981 0.18701
374 1 0.604601 0.947902 0.170643
2046 1 0.593406 0.000714651 0.250527
1118 1 0.632026 0.930898 0.258422
1842 1 0.718288 0.854116 0.352749
728 1 0.641099 0.830473 0.218352
1673 1 0.627371 0.970062 0.358578
1299 1 0.694893 0.939295 0.154106
156 1 0.774177 0.12109 0.189527
909 1 0.804674 0.906321 0.252754
1088 1 0.813925 0.815345 0.272689
1760 1 0.704729 0.879094 0.208795
774 1 0.732411 0.938325 0.296759
620 1 0.868327 0.881479 0.30263
1713 1 0.80095 0.879307 0.331577
286 1 0.923472 0.974165 0.331669
1427 1 0.880286 0.24381 0.0235873
1780 1 0.834303 0.949461 0.334782
771 1 0.909559 0.913469 0.175802
1720 1 0.864145 0.861452 0.222566
576 1 0.91975 0.89631 0.24951
283 1 0.562387 0.151224 0.282512
1883 1 0.907585 0.846958 0.147922
1953 1 0.657269 0.0324644 0.366986
159 1 0.620573 0.0489418 0.303924
1398 1 0.508591 0.977478 0.193126
2005 1 0.499677 0.430318 0.416141
285 1 0.620524 0.0968515 0.3636
2000 1 0.564081 0.0791672 0.289837
1796 1 0.543518 0.115921 0.35936
352 1 0.896557 0.935447 0.454568
616 1 0.640287 0.00464305 0.445238
996 1 0.967777 0.780515 0.0698204
997 1 0.811328 0.32216 0.497073
1603 1 0.676347 0.981243 0.297892
651 1 0.729002 0.10706 0.463768
1784 1 0.763208 0.528485 0.42561
1617 1 0.877527 0.0129303 0.394098
1538 1 0.828262 0.157923 0.403975
577 1 0.773618 0.199137 0.440439
252 1 0.908226 0.159887 0.405379
495 1 0.557718 0.039023 0.352398
97 1 0.752491 0.972518 0.414666
1430 1 0.869966 0.0420185 0.322592
1595 1 0.895408 0.377365 0.0512997
71 1 0.54921 0.520193 0.429514
332 1 0.654975 0.115555 0.484344
1513 1 0.966051 0.211895 0.385742
501 1 0.995297 0.453306 0.082934
370 1 0.603223 0.0647696 0.0126999
476 1 0.501773 0.4173 0.494394
186 1 0.988993 0.830096 0.126108
1995 1 0.603756 0.124252 0.427003
35 1 0.534291 0.193715 0.388298
545 1 0.647158 0.234244 0.352324
385 1 0.611266 0.184644 0.381581
1650 1 0.974262 0.145264 0.111268
1633 1 0.56652 0.842866 0.430395
230 1 0.573722 0.311271 0.454863
893 1 0.80473 0.366272 0.443102
910 1 0.677911 0.14286 0.350259
1547 1 0.844078 0.113177 0.0194386
1455 1 0.577902 0.400733 0.490549
915 1 0.856874 0.201038 0.469404
172 1 0.742976 0.135575 0.284858
1874 1 0.723253 0.380813 0.400833
1346 1 0.823151 0.263531 0.453518
1668 1 0.723925 0.19332 0.350302
248 1 0.96511 0.242343 0.297597
41 1 0.796927 0.986815 0.00656881
434 1 0.797562 0.170475 0.321788
1482 1 0.53883 0.875594 0.233961
109 1 0.563948 0.0707829 0.126452
74 1 0.923908 0.451033 0.469587
484 1 0.635095 0.924053 0.463409
1693 1 0.522392 0.763345 0.107414
1269 1 0.576413 0.252292 0.399106
686 1 0.602974 0.376761 0.410253
1884 1 0.661045 0.856816 0.27406
1630 1 0.664449 0.3118 0.305656
1163 1 0.730392 0.488244 0.379987
1345 1 0.7376 0.363262 0.476223
1758 1 0.997316 0.684007 0.187486
1861 1 0.60929 0.844449 0.346419
1468 1 0.876338 0.214333 0.356576
1933 1 0.807314 0.241853 0.360614
926 1 0.767495 0.327218 0.384781
226 1 0.712685 0.412798 0.300195
1119 1 0.77817 0.442156 0.406876
1960 1 0.69612 0.910224 0.42641
1135 1 0.814185 0.499522 0.375737
1414 1 0.917378 0.366268 0.383474
599 1 0.900373 0.31689 0.439646
1599 1 0.879245 0.11557 0.315455
1368 1 0.94627 0.293336 0.176497
800 1 0.998391 0.287025 0.121033
1332 1 0.905844 0.244488 0.410551
78 1 0.938015 0.201511 0.46004
711 1 0.973478 0.900569 0.338589
1165 1 0.800134 0.0241246 0.331478
1046 1 0.535302 0.0331911 0.466339
223 1 0.887217 0.0952702 0.385979
1585 1 0.572296 0.46894 0.334188
2029 1 0.972756 0.570296 0.198886
1698 1 0.534516 0.401764 0.347602
1202 1 0.6067 0.536782 0.390256
400 1 0.532253 0.656783 0.43826
1791 1 0.510375 0.149105 0.226629
644 1 0.565967 0.478697 0.501911
587 1 0.86063 0.429954 0.405985
1899 1 0.745309 0.485481 0.289184
826 1 0.67143 0.447229 0.467449
383 1 0.703424 0.524274 0.479846
1833 1 0.867667 0.514162 0.460469
832 1 0.701861 0.601328 0.484491
206 1 0.770447 0.661206 0.483317
1266 1 0.976805 0.749192 0.322108
1405 1 0.911215 0.384077 0.260376
238 1 0.864691 0.677052 0.358687
1461 1 0.889198 0.390494 0.454389
1775 1 0.884663 0.423721 0.342574
553 1 0.574031 0.242543 0.486648
871 1 0.876715 0.594368 0.468915
1401 1 0.921823 0.710018 0.39319
641 1 0.600427 0.618975 0.301944
1477 1 0.98524 0.696589 0.267917
316 1 0.522964 0.475678 0.282664
976 1 0.635021 0.631161 0.445087
974 1 0.601341 0.626623 0.373051
678 1 0.758234 0.0547881 0.277159
1111 1 0.809504 0.667726 0.423974
296 1 0.77626 0.734947 0.475908
160 1 0.646926 0.694722 0.403121
50 1 0.996388 0.496482 0.247958
1537 1 0.634162 0.769135 0.444742
308 1 0.767911 0.601244 0.409169
1941 1 0.744631 0.811125 0.486139
124 1 0.717299 0.656302 0.409533
2048 1 0.666985 0.576177 0.338954
1885 1 0.515101 0.857617 0.122545
1561 1 0.764775 0.577428 0.48607
1230 1 0.900704 0.488743 0.379727
2015 1 0.852512 0.735029 0.474829
731 1 0.9603 0.670139 0.344615
862 1 0.914683 0.783636 0.412974
1421 1 0.977245 0.655362 0.416506
1987 1 0.90084 0.657881 0.430386
811 1 0.549109 0.782688 0.393185
170 1 0.647782 0.791367 0.376796
1349 1 0.636214 0.857096 0.426062
253 1 0.699277 0.973654 0.369448
1221 1 0.576504 0.785246 0.481514
1710 1 0.591304 0.903259 0.383876
492 1 0.655952 0.907635 0.334882
53 1 0.901319 0.696754 0.289637
1177 1 0.737889 0.786862 0.316206
1967 1 0.980593 0.140799 0.212605
1122 1 0.930843 0.848878 0.344422
1946 1 0.715887 0.737256 0.414002
1707 1 0.772269 0.776338 0.38802
812 1 0.574882 0.589355 0.442715
1409 1 0.709794 0.813327 0.425913
1600 1 0.857778 0.822437 0.336861
527 1 0.55047 0.281436 0.0796719
1994 1 0.771069 0.877796 0.424695
1579 1 0.845818 0.74795 0.392719
1507 1 0.826157 0.793783 0.440505
271 1 0.942513 0.87121 0.441801
747 1 0.728326 0.300417 0.0246706
853 1 0.682311 0.160871 0.422032
1360 1 0.534202 0.689095 0.115973
911 1 0.890983 0.8311 0.474844
409 1 0.898105 0.32669 0.00169017
1892 1 0.951995 0.80479 0.494119
714 1 0.629328 0.62477 0.00197386
1805 1 0.990108 0.821297 0.797203
742 1 0.823401 0.740516 0.996709
836 1 0.988338 0.882317 0.826252
1092 1 0.786281 0.931071 0.532153
1199 1 0.669474 0.136275 0.587714
1768 1 0.53834 0.0294966 0.645072
875 1 0.622198 0.996864 0.644693
626 1 0.559932 0.963966 0.688709
825 1 0.986454 0.0657598 0.578777
1802 1 0.551254 0.995812 0.547657
918 1 0.505297 0.849673 0.772922
1924 1 0.701713 0.0647875 0.524477
1154 1 0.807998 0.0593889 0.610289
2035 1 0.905053 0.186346 0.526695
1358 1 0.786077 0.194447 0.695667
1839 1 0.893136 0.858866 0.968724
1492 1 0.746925 0.170756 0.562224
1815 1 0.740181 0.102655 0.595271
719 1 0.843459 0.229291 0.577829
1418 1 0.70885 0.153766 0.651041
951 1 0.883435 0.137599 0.750713
274 1 0.610523 0.60172 0.536561
1638 1 0.998924 0.888958 0.980229
1764 1 0.86308 0.11353 0.625229
1929 1 0.959746 0.133292 0.534858
2039 1 0.784032 0.226086 0.513916
866 1 0.512551 0.76693 0.510733
1772 1 0.876905 0.118285 0.552079
1324 1 0.924424 0.0590839 0.524008
763 1 0.579687 0.658221 0.509703
1701 1 0.932163 0.0983044 0.594802
939 1 0.557918 0.924561 0.977086
183 1 0.619882 0.18676 0.619191
227 1 0.513174 0.0879917 0.519111
1545 1 0.558267 0.0661555 0.574555
536 1 0.57192 0.56394 0.937753
2021 1 0.512817 0.253429 0.51776
1301 1 0.519774 0.176352 0.549961
1176 1 0.687082 0.215752 0.621611
618 1 0.732795 0.274977 0.578156
481 1 0.640539 0.203712 0.547546
426 1 0.963028 0.022203 0.933568
1175 1 0.645855 0.558161 0.891287
986 1 0.877194 0.311295 0.598742
590 1 0.823556 0.354689 0.564179
1495 1 0.723264 0.230902 0.701828
350 1 0.753642 0.299457 0.66509
533 1 0.794909 0.127181 0.63757
782 1 0.978061 0.290654 0.776547
1636 1 0.634826 0.726454 0.510029
1785 1 0.921626 0.278148 0.645772
1801 1 0.772126 0.35697 0.622597
1158 1 0.871552 0.176145 0.695222
1891 1 0.656638 0.31051 0.567237
1562 1 0.618429 0.319418 0.722161
458 1 0.768564 0.0926859 0.528416
203 1 0.591845 0.462175 0.590239
842 1 0.620944 0.0419616 0.554625
1279 1 0.575456 0.238608 0.57637
1800 1 0.699212 0.355622 0.606984
2003 1 0.670782 0.293989 0.674247
629 1 0.884528 0.402198 0.705835
396 1 0.818745 0.464171 0.651692
1909 1 0.678142 0.372439 0.532226
2 1 0.793479 0.530063 0.671164
956 1 0.745415 0.34607 0.550727
841 1 0.813615 0.161531 0.5414
496 1 0.881762 0.302561 0.715809
705 1 0.931965 0.359552 0.587272
1460 1 0.725581 0.887582 0.50203
677 1 0.539194 0.747752 0.779321
86 1 0.81811 0.424741 0.588504
2001 1 0.855593 0.438429 0.515941
643 1 0.969988 0.259843 0.974549
1550 1 0.99115 0.855277 0.701619
973 1 0.530106 0.634079 0.709136
220 1 0.652678 0.425387 0.602173
1105 1 0.689578 0.450499 0.692154
1121 1 0.990117 0.967331 0.856051
1731 1 0.5635 0.537828 0.593607
968 1 0.959522 0.648535 0.782481
19 1 1.00047 0.134741 0.669906
1577 1 0.653426 0.552147 0.598687
211 1 0.565775 0.746214 0.991934
902 1 0.604862 0.639134 0.69007
615 1 0.608621 0.5539 0.666818
417 1 0.988247 0.119893 0.863648
419 1 0.986757 0.484776 0.769274
294 1 0.585774 0.414165 0.673223
1422 1 0.72464 0.421589 0.580346
346 1 0.937422 0.428891 0.783976
1593 1 0.830076 0.0436065 0.516971
257 1 0.754086 0.491659 0.614649
359 1 0.732008 0.587957 0.632154
819 1 0.803416 0.527254 0.577449
687 1 0.720996 0.548353 0.562534
391 1 0.930547 0.976433 0.511691
245 1 0.906717 0.495983 0.532077
1433 1 0.51152 0.387276 0.808607
1411 1 0.545284 0.565575 0.524146
963 1 0.862058 0.56475 0.532956
319 1 0.837486 0.596165 0.643503
1628 1 0.944859 0.472421 0.670887
1029 1 0.872178 0.495614 0.616078
1906 1 0.569713 0.744178 0.55575
1173 1 0.595233 0.729715 0.635955
1007 1 0.64267 0.67736 0.639216
1798 1 0.680719 0.634342 0.571124
1554 1 0.542758 0.67167 0.652683
1998 1 0.597838 0.236701 0.980689
94 1 0.805248 0.598864 0.577327
816 1 0.868458 0.668304 0.632236
240 1 0.566586 0.618039 0.590206
2017 1 0.72542 0.72743 0.611156
981 1 0.725435 0.66335 0.638262
1790 1 0.7453 0.617763 0.539058
1489 1 0.799005 0.689652 0.638458
1555 1 0.606909 0.680243 0.573664
1407 1 0.597808 0.00236326 0.7708
424 1 0.898651 0.612939 0.58905
20 1 0.636235 0.934082 0.534504
349 1 0.50502 0.463336 0.837164
1983 1 0.921615 0.165015 0.647139
1736 1 0.972687 0.714951 0.554863
1087 1 0.842816 0.643065 0.70244
1536 1 0.527563 0.027512 0.748805
1075 1 0.61194 0.797578 0.54852
1542 1 0.519411 0.318553 0.76321
289 1 0.594601 0.867849 0.501445
301 1 0.571451 0.871427 0.590773
107 1 0.547642 0.922654 0.53903
1519 1 0.682629 0.833946 0.572906
1608 1 0.689061 0.859026 0.65399
1845 1 0.516871 0.734425 0.619552
1683 1 0.75806 0.92546 0.65022
1098 1 0.540566 0.315269 0.564492
654 1 0.668837 0.826367 0.500815
943 1 0.714737 0.900616 0.585097
49 1 0.787186 0.760841 0.639855
148 1 0.729111 0.782013 0.563717
1605 1 0.7533 0.835624 0.61303
22 1 0.850174 0.729355 0.668757
205 1 0.846244 0.718235 0.580512
1581 1 0.828588 0.803533 0.701646
1890 1 0.972621 0.849468 0.622727
787 1 0.904269 0.699959 0.534081
62 1 0.939743 0.016534 0.736286
207 1 0.949948 0.573071 0.553262
1598 1 0.950968 0.676664 0.642883
550 1 0.909055 0.734162 0.615041
204 1 0.822249 0.80728 0.597649
152 1 0.621531 0.430032 0.537781
1592 1 0.666662 0.970999 0.598649
1700 1 0.54324 0.595052 0.993854
1478 1 0.530833 0.347356 0.500454
1278 1 0.846959 0.96609 0.562867
1054 1 0.676127 0.251112 0.993029
1467 1 0.841992 0.927184 0.953147
953 1 0.632423 0.902833 0.618304
1653 1 0.960639 0.789256 0.740125
780 1 0.937012 0.0560482 0.794397
1627 1 0.738042 0.996834 0.578645
101 1 0.779388 0.855018 0.546143
1146 1 0.787417 0.951387 0.712624
1040 1 0.668645 0.940684 0.67914
1224 1 0.714552 0.00249947 0.661184
1417 1 0.604068 0.36065 0.55259
1036 1 0.522661 0.222393 0.964752
803 1 0.894268 0.924772 0.814691
1741 1 0.796947 0.784352 0.532408
732 1 0.816076 0.902143 0.603137
985 1 0.871334 0.843523 0.565967
535 1 0.85222 0.906874 0.517586
447 1 0.796817 0.988699 0.640765
1128 1 0.880377 0.80432 0.645593
36 1 0.891082 0.0284371 0.579976
463 1 0.527642 0.385074 0.558667
1706 1 0.741488 0.535116 0.935955
1051 1 0.592843 0.0426594 0.716761
921 1 0.685277 0.195712 0.783422
1860 1 0.610296 0.122701 0.640719
849 1 0.657697 0.184859 0.708296
2023 1 0.606079 0.156185 0.834292
1803 1 0.628951 0.291516 0.816362
818 1 0.682654 0.0623883 0.594116
1003 1 0.551706 0.102325 0.742359
1454 1 0.654868 0.00839513 0.720326
1371 1 0.566766 0.0701739 0.854549
960 1 0.751299 0.064381 0.65785
920 1 0.810796 0.278165 0.703026
594 1 0.72511 0.11697 0.710994
162 1 0.755334 0.144921 0.786185
713 1 0.800528 0.0984726 0.84542
141 1 0.723688 0.0543058 0.849541
462 1 0.646635 0.280209 0.500375
197 1 0.974409 0.414176 0.99071
556 1 0.930861 0.0975977 0.695155
542 1 0.92639 0.0185873 0.650624
1233 1 0.837392 0.191042 0.761798
1217 1 0.851337 0.0833024 0.792069
1623 1 0.9935 0.966621 0.687196
1162 1 0.804914 0.131016 0.731419
1977 1 0.75766 0.0794351 0.761021
579 1 0.5527 0.303518 0.687947
967 1 0.790933 0.285132 0.842555
1817 1 0.682277 0.338979 0.780644
895 1 0.594384 0.184367 0.753322
1927 1 0.790325 0.808757 0.937673
189 1 0.578459 0.280442 0.766989
621 1 0.525015 0.216824 0.713425
2016 1 0.564025 0.231602 0.822621
1399 1 0.811579 0.879307 0.995579
1155 1 0.657393 0.267719 0.747295
123 1 0.76281 0.227881 0.640696
1610 1 0.595899 0.241826 0.680976
597 1 0.84641 0.374684 0.649586
740 1 0.740644 0.223614 0.857403
1366 1 0.742078 0.252972 0.78531
1383 1 0.757818 0.171556 0.900408
1498 1 0.81561 0.258002 0.782626
1588 1 0.857037 0.30937 0.814545
526 1 0.761042 0.917845 0.876498
1894 1 0.873316 0.226984 0.827335
344 1 0.884158 0.240267 0.900469
1708 1 0.81581 0.197659 0.865865
1718 1 0.857757 0.152683 0.826615
2043 1 0.940548 0.347099 0.748916
315 1 0.941491 0.156235 0.802107
623 1 0.937007 0.729105 0.954291
215 1 0.590584 0.528108 0.844877
1857 1 0.603351 0.296265 0.627154
343 1 0.97236 0.813944 0.987563
288 1 0.7189 0.396968 0.74347
127 1 0.943619 0.164381 0.715513
1151 1 0.567623 0.380067 0.728748
1778 1 0.653399 0.375564 0.692947
1753 1 0.49926 0.351717 0.689778
540 1 0.531617 0.483312 0.983712
979 1 0.709313 0.294532 0.849036
855 1 0.643327 0.397661 0.796348
2019 1 0.78693 0.371314 0.711933
1612 1 0.719827 0.457258 0.794216
934 1 0.733618 0.298259 0.734501
827 1 0.833656 0.312875 0.90021
837 1 0.633756 0.477149 0.655293
321 1 0.740449 0.359045 0.83295
1021 1 0.823105 0.378123 0.847602
1828 1 0.791312 0.492131 0.772521
500 1 0.53485 0.72598 0.860957
1079 1 0.904968 0.424307 0.619734
453 1 0.899897 0.36518 0.891279
177 1 0.888409 0.174114 0.889951
1372 1 0.946206 0.272346 0.711172
1429 1 0.791899 0.321559 0.772635
1645 1 0.972073 0.334293 0.668694
1596 1 0.663701 0.316012 0.952408
300 1 0.862076 0.433062 0.78949
1473 1 0.733468 0.779889 0.990485
1937 1 0.661577 0.607048 0.656495
1570 1 0.578153 0.216706 0.906167
1621 1 0.537331 0.569935 0.665743
2012 1 0.552949 0.201482 0.636819
320 1 0.621036 0.531103 0.753643
1509 1 0.56254 0.560204 0.726639
120 1 0.558274 0.476219 0.770484
278 1 0.706399 0.517197 0.672043
113 1 0.743631 0.651005 0.729161
379 1 0.788757 0.398934 0.780634
174 1 0.686758 0.502301 0.749477
777 1 0.753578 0.562614 0.751763
698 1 0.779629 0.598024 0.685565
104 1 0.936755 0.265007 0.530836
1002 1 0.905185 0.496099 0.801467
1647 1 0.939016 0.563567 0.623196
2036 1 0.805934 0.511464 0.506039
1286 1 0.97712 0.456459 0.831776
716 1 0.817365 0.548059 0.898585
886 1 0.83233 0.548793 0.759812
402 1 0.580286 0.603206 0.860972
1271 1 0.556569 0.733361 0.710964
1458 1 0.67032 0.667211 0.731671
1678 1 0.631314 0.766807 0.805232
1836 1 0.720495 0.736986 0.759677
1381 1 0.692802 0.688681 0.83969
1685 1 0.591732 0.605232 0.783895
1502 1 0.806664 0.620073 0.755237
1506 1 0.687197 0.579718 0.718171
1284 1 0.755947 0.656601 0.803306
2031 1 0.815658 0.694659 0.750771
1276 1 0.66148 0.596556 0.832512
467 1 0.767114 0.611796 0.923382
548 1 0.800147 0.642933 0.861604
281 1 0.667749 0.72696 0.690307
1294 1 0.871224 0.534836 0.683524
328 1 0.978836 0.701912 0.891677
1167 1 0.847938 0.574088 0.844833
60 1 0.53087 0.43073 0.624876
363 1 0.907213 0.600264 0.741446
297 1 0.666264 0.154307 0.96782
477 1 0.960423 0.771998 0.862278
1971 1 0.879848 0.637288 0.80628
82 1 0.616038 0.736001 0.740945
2022 1 0.672083 0.783633 0.635101
1867 1 0.588371 0.801604 0.642202
1503 1 0.576942 0.680643 0.753432
1251 1 0.525791 0.677914 0.555646
423 1 0.536438 0.951394 0.776204
966 1 0.634259 0.838418 0.791615
937 1 0.547089 0.891373 0.731655
93 1 0.569126 0.816298 0.755189
112 1 0.648617 0.804638 0.720339
595 1 0.720284 0.787503 0.700461
1527 1 0.761958 0.718478 0.695965
176 1 0.839432 0.833272 0.781952
1667 1 0.86884 0.765142 0.751589
1264 1 0.761865 0.700521 0.874952
442 1 0.789577 0.765148 0.745754
1186 1 0.868379 0.809507 0.905477
1152 1 0.579705 0.378546 0.964244
1253 1 0.942858 0.90589 0.685272
1691 1 0.902807 0.682 0.736139
755 1 0.772264 0.86997 0.728641
413 1 0.916279 0.829547 0.709457
847 1 0.910532 0.744459 0.697283
1985 1 0.87382 0.777604 0.830337
39 1 0.616868 0.867571 0.714815
2044 1 0.806046 0.282729 0.606431
646 1 0.524346 0.156758 1.00103
1084 1 0.633428 0.785225 0.992185
858 1 0.679851 0.0667317 0.674555
1584 1 0.9456 0.623491 0.846892
378 1 0.608054 0.932541 0.767727
6 1 0.534963 0.920318 0.866501
1558 1 0.717908 0.027077 0.730777
108 1 0.798496 0.866061 0.665486
1047 1 0.697961 0.866343 0.726006
1377 1 0.66292 0.0587611 0.767673
944 1 0.706379 0.897997 0.793339
704 1 0.59598 0.953199 0.834119
1212 1 0.865888 0.0485696 0.657892
665 1 0.801811 0.0446937 0.702307
846 1 0.873354 0.981978 0.774042
1180 1 0.927237 0.868148 0.773477
1225 1 0.824989 0.951378 0.84053
318 1 0.559201 0.818841 0.981914
196 1 0.686392 0.945092 0.941915
1326 1 0.85812 0.901394 0.753126
73 1 0.874588 0.0621303 0.727006
1157 1 0.868557 0.986238 0.699985
558 1 0.858681 0.909617 0.682
338 1 0.541901 0.997055 0.839636
491 1 0.591509 0.00845036 0.967258
1213 1 0.541029 0.0443004 0.919259
1922 1 0.506514 0.111112 0.602188
764 1 0.655435 0.0857838 0.851712
326 1 0.635067 0.125969 0.761947
1546 1 0.884201 0.332005 0.510082
258 1 0.739383 0.964526 0.808312
38 1 0.794424 0.0086675 0.779912
451 1 0.72411 0.289248 0.501549
208 1 0.736512 0.0156676 0.938333
1873 1 0.773651 0.946992 0.94629
306 1 0.779366 0.993608 0.865163
1134 1 0.606222 0.99705 0.901587
1990 1 0.508571 0.598876 0.898456
1400 1 0.771321 0.997185 0.511213
1705 1 0.78686 0.402781 0.522951
1164 1 0.98434 0.365319 0.823219
273 1 0.805551 0.0539324 0.969469
1672 1 0.984758 0.0434965 0.856426
1232 1 0.709161 0.651476 0.989754
178 1 0.880561 0.0493261 0.910877
1331 1 0.936316 0.981537 0.801721
879 1 0.919658 0.114291 0.928676
1840 1 0.597231 0.152129 0.916261
1188 1 0.548434 0.388733 0.881048
88 1 0.986749 0.917096 0.557872
664 1 0.947823 0.481956 0.995222
1292 1 0.554998 0.31495 0.848863
752 1 0.735216 0.227318 0.951168
1091 1 0.659619 0.209637 0.920709
688 1 0.517064 0.429833 0.700928
1886 1 0.646558 0.273073 0.895628
1214 1 0.75679 0.0924929 0.913584
268 1 0.692569 0.140581 0.90474
309 1 0.643674 0.221446 0.847769
269 1 0.993284 0.0646117 0.72927
1565 1 0.789374 0.471216 0.949687
1893 1 0.803793 0.259809 0.950523
1231 1 0.819384 0.168429 0.981877
64 1 0.981484 0.619324 0.702998
773 1 0.988946 0.192135 0.931953
1147 1 0.757887 0.578422 0.996833
14 1 0.928125 0.632449 0.509114
737 1 0.955647 0.209901 0.864099
1846 1 0.834408 0.122573 0.908774
581 1 0.916053 0.0899415 0.85611
1777 1 0.9103 0.170055 0.977609
129 1 0.895524 0.245399 0.761346
693 1 0.515144 0.805138 0.596885
685 1 0.638528 0.35348 0.898075
1787 1 0.510941 0.270014 0.889022
962 1 0.592991 0.344761 0.791346
1465 1 0.600109 0.451889 0.907111
783 1 0.721961 0.346708 0.906568
262 1 0.661918 0.382155 0.987645
1727 1 0.851149 0.453497 0.905989
1298 1 0.690721 0.165635 0.508619
1063 1 0.743821 0.381037 0.986581
1597 1 0.899196 0.413201 0.956347
584 1 0.802683 0.391056 0.928937
272 1 0.978746 0.21846 0.746806
1185 1 0.838287 0.34789 0.965959
1410 1 0.531723 0.797258 0.693143
295 1 0.971422 0.317249 0.928457
437 1 0.895538 0.42687 0.853395
234 1 0.817972 0.538132 0.969291
466 1 0.935642 0.286783 0.863866
1190 1 0.90972 0.562428 0.810085
302 1 0.964287 0.536638 0.924341
1559 1 0.555514 0.101467 0.957445
1534 1 0.855437 0.26955 0.519918
482 1 0.6536 0.50156 0.81797
1917 1 0.625009 0.436183 0.733543
1300 1 0.597353 0.440823 0.830159
563 1 0.692049 0.409789 0.850016
1811 1 0.744709 0.476535 0.874649
1149 1 0.551051 0.502115 0.909212
59 1 0.551464 0.322886 0.926047
1526 1 0.716761 0.427038 0.932988
1402 1 0.769628 0.449263 0.71495
9 1 0.776475 0.659057 0.980018
856 1 0.78455 0.55607 0.823834
1096 1 0.839157 0.486629 0.834474
42 1 0.691915 0.824132 0.838506
452 1 0.725005 0.583332 0.860547
1426 1 0.921892 0.426431 0.550688
469 1 0.640011 0.505474 0.547451
411 1 0.554889 0.496842 0.653587
1013 1 0.90937 0.97946 0.875006
1809 1 0.894568 0.503392 0.892775
1019 1 0.945777 0.459902 0.917683
92 1 0.672074 0.990034 0.519626
833 1 0.885763 0.501649 0.969936
1851 1 0.830888 0.992372 0.918673
602 1 0.975549 0.526828 0.83994
435 1 0.677995 0.503556 0.919028
1200 1 0.524239 0.612216 0.816075
831 1 0.640135 0.640818 0.900945
76 1 0.500632 0.717522 0.953753
1516 1 0.843621 0.430676 0.989205
37 1 0.949392 0.87721 0.51184
416 1 0.623939 0.524302 0.965858
1879 1 0.691421 0.580003 0.962315
1342 1 0.566584 0.362441 0.620717
171 1 0.734849 0.72528 0.937881
660 1 0.714851 0.755242 0.86095
1057 1 0.981016 0.111467 0.971121
358 1 0.836844 0.617 0.96022
1043 1 0.86983 0.663899 0.872741
1074 1 0.905853 0.0533773 0.975576
1436 1 0.983998 0.738113 0.794001
1820 1 0.665659 0.734217 0.569936
1640 1 0.996235 0.583757 0.890362
1905 1 0.945044 0.632168 0.914389
598 1 0.890733 0.668239 0.973634
1379 1 0.639998 0.0636397 0.939914
788 1 0.89087 0.575585 0.914071
179 1 0.683553 0.80059 0.920256
1974 1 0.616322 0.843362 0.931805
1444 1 0.561497 0.777949 0.915814
1337 1 0.568701 0.668627 0.833545
1642 1 0.566995 0.875606 0.798783
131 1 0.612467 0.809244 0.86512
1211 1 0.733087 0.827899 0.777767
1415 1 0.685482 0.902528 0.871782
1141 1 0.778223 0.881251 0.801653
695 1 0.510744 0.539793 0.817708
1631 1 0.67885 0.970982 0.876005
1462 1 0.631475 0.716075 0.869993
1808 1 0.807033 0.769163 0.823936
1304 1 0.72551 0.935149 0.735228
1984 1 0.817648 0.689414 0.928526
1729 1 0.891836 0.909898 0.895171
1363 1 0.817116 0.864426 0.884641
377 1 0.891298 0.735195 0.890412
1166 1 0.734726 0.861623 0.914906
984 1 0.954562 0.979519 0.580757
938 1 0.908114 0.840601 0.841892
609 1 0.868677 0.782678 0.518677
994 1 0.932985 0.707574 0.829057
1648 1 0.945498 0.827879 0.912995
47 1 0.618846 0.886571 0.854396
23 1 0.659866 0.961623 0.802359
792 1 0.549056 0.970245 0.92409
940 1 0.602324 0.924131 0.927946
567 1 0.629161 0.0255365 0.830368
541 1 0.901191 0.937784 0.600856
2041 1 0.543315 0.859191 0.903105
1862 1 0.888072 0.783177 0.966459
180 1 0.848938 0.0287615 0.8411
958 1 0.766653 0.818082 0.854854
913 1 0.755397 0.708676 0.554973
1625 1 0.541261 0.811012 0.836937
1992 1 0.987439 0.903024 0.754873
575 1 0.996184 0.728197 0.719436
1572 1 0.502182 0.247736 0.591546
1317 1 0.49962 0.00343709 0.965863
988 1 0.992795 0.524164 0.690209
2026 1 0.517106 0.840671 0.506215
310 1 0.953446 0.555779 0.995529
369 1 0.85318 0.654951 0.500881
1769 1 0.551821 0.286594 0.988603
| [
"scheuclu@gmail.com"
] | scheuclu@gmail.com |
e3fa02b862a4442c9a2a11c88c611c5f3b54bd12 | 264bc7fd19e67ab1eed7892a896c47e9a031434d | /programs/finalsdc/SDC-tarun/graph.py | 6a64587d1dc52d80d677873019d80766c3bc000a | [] | no_license | tfpf/PRML_project | ccd8e1281ffc91b78dd79fb68d643ad798b514cf | 9490488c847db253281bd879572f881bd158f135 | refs/heads/master | 2020-03-14T16:39:29.952377 | 2018-05-04T16:12:43 | 2018-05-04T16:12:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 969 | py | from graphviz import Digraph
dot = Digraph(comment='OBDD')
print ("enter splitting order")
order=list(input())
numbers = len(order)
file=open("input.txt","r") #complements are represented with capital letters
input1=file.readlines()
for i in range(len(input1)) :
input1[i]=list(input1[i][:-1])
input1 = np.array(input1,dtype='object')
print(len(input1))
print(int(numbers))
output = [[0 for y in range(int(numbers))] for x in range(len(input1))]
print(input1)
print(output)
for i in range(len(input1)) :
for j in range(len(order)):
for k in range(len(input1[i])):
if order[j] == input1[i][k]:
output[i][j] = '01'
print (output)
if(k+1<len(input1[i])):
if input1[i][k+1] == "'":
output[i][j] = '10'
break
if k == len(input1[i])-1:
output[i][j] = '11'
print(len(output))
print(output[0])
print(output[1])
output = np.array(output,dtype='object')
dot.render('test-output/round-table.gv', view=True) | [
"rohanbfg@gmail.com"
] | rohanbfg@gmail.com |
d20971ac7d122528ac943d5a46e7e8f529aa93db | 19a4365d81507587ef09488edc7850c2227e7165 | /159.py | 1269b3a84de6cd5b5911a7d86d4ea7721918348b | [] | no_license | akauntotesuto888/Leetcode-Lintcode-Python | 80d8d9870b3d81da7be9c103199dad618ea8739a | e2fc7d183d4708061ab9b610b3b7b9e2c3dfae6d | refs/heads/master | 2023-08-07T12:53:43.966641 | 2021-09-17T19:51:09 | 2021-09-17T19:51:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 654 | py | # Time: O(n)
# Space: O(1)
class Solution:
def lengthOfLongestSubstringTwoDistinct(self, s: str) -> int:
d = {}
count = 0
start, end = 0, 0
result = 0
while end < len(s):
c = s[end]
d[c] = d.get(c, 0) + 1
if d[c] == 1:
count += 1
end += 1
while count > 2 and start < len(s):
curr = s[start]
if curr in d:
d[curr] -= 1
if d[curr] == 0:
count -= 1
start += 1
result = max(result, end-start)
return result | [
"tiant@qualtrics.com"
] | tiant@qualtrics.com |
414a502ec52a6351df06b41af65aede0299e8ef0 | 2de259bc9e3306c2ddbe72b83df31df83436091b | /gol/view.py | a146a7f010b3e120e654cfa04215a76e67cf1a7c | [
"MIT"
] | permissive | FlipEnergy/gol | 102b7f3b7aa710a13badb8bd81c37d381ce6f7c5 | d5d37621c2b3ed6d0718b6ccd8d433c9b44d97ec | refs/heads/master | 2023-04-08T00:10:46.424338 | 2020-05-17T16:03:50 | 2020-05-17T16:03:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,600 | py | import curses
import time
from gol.board import Size
from gol.errors import BoardTooLarge
class View:
def __init__(self):
self.symbols = {
'alive': '\N{FULL BLOCK}',
'dead': '\N{LIGHT SHADE}',
}
def play(self, stdscr, generate_boards):
curses.use_default_colors()
visibility = 0
curses.curs_set(visibility)
max_size = Size(
curses.LINES - 1,
curses.COLS - 1,
)
for board in generate_boards():
if (
board.size.height > max_size.height
or board.size.width > max_size.width
):
raise BoardTooLarge((
"Board size is too large and "
"cannot fit into screen."
), board.size, max_size)
stdscr.erase()
s = self.stringify_view(board)
stdscr.addstr(s)
stdscr.refresh()
def stringify_view(self, board):
status = f"Generation: {board.generation}"
s = '\n'.join([
status,
self.stringify_board(board),
])
return s
def stringify_cell(self, cell):
k = 'alive' if cell.alive else 'dead'
c = self.symbols[k]
return c
def stringify_row(self, row):
s = ''.join(
self.stringify_cell(cell)
for cell in row
)
return s
def stringify_board(self, board):
s = '\n'.join(
self.stringify_row(row)
for row in board.cells
)
return s
| [
"sohyongsheng@gmail.com"
] | sohyongsheng@gmail.com |
e286a0a6a6ef5d3669680cd9ceacd7caf05f7a4b | 7d91ec17756f64c5dec8d1d2c7cf028162a87fb8 | /online_library/urls.py | 8ea4ec61f2b20744a30897255352f7cd27cf26ed | [
"MIT"
] | permissive | svaradinov/online_library | a24dbe0eb0bdcbe16f2e1fd0305f368fa34f0ac4 | 183f841dc27e899136a437b37221e1385abe0c33 | refs/heads/main | 2023-08-14T11:15:07.107456 | 2021-09-11T22:57:55 | 2021-09-11T22:57:55 | 395,108,616 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 233 | py | from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('online_library.book.urls')),
path('user', include('online_library.user.urls')),
]
| [
"sv.varadinov@gmail.com"
] | sv.varadinov@gmail.com |
baf9e01690d9b7617c973a0ffbeaf8dff30a2ba2 | 3712a929d1124f514ea7af1ac0d4a1de03bb6773 | /开班笔记/python网络爬虫部分/day02/lxmlTest.py | 5436f0ebe77b8b9ef01a6d55ce88f6ab04779c42 | [] | no_license | jiyabing/learning | abd82aa3fd37310b4a98b11ea802c5b0e37b7ad9 | 6059006b0f86aee9a74cfc116d2284eb44173f41 | refs/heads/master | 2020-04-02T20:47:33.025331 | 2018-10-26T05:46:10 | 2018-10-26T05:46:10 | 154,779,387 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 602 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Jul 12 17:20:25 2018
@author: jyb
"""
from lxml import etree
lxmlStr = '''
<bookstore>
<book>
<title lang="en">Harry Potter</title>
<author>J K. Rowling</author>
<year>2005</year>
<price>29.99</price>
</book>
<book>
<title lang="zh">hello world</title>
<author>J K. Rowling</author>
<year>2005</year>
<price>29.99</price>
</book>
</bookstore>
'''
# 根节点
root = etree.fromstring(lxmlStr)
print(root)
elements = root.xpath('//book/title')
print(elements[0].text)
print(elements[0].attrib)
attrs = root.xpath('//@lang')
print(attrs) | [
"yabing_ji@163.com"
] | yabing_ji@163.com |
39dcc0a8cb19d050d40f63af8512633fbbddc8e7 | f17fe3c240aeda4205d934a34fc2fc407c6d9f8a | /backend/silent_lake_29099/wsgi.py | 25d60da238c4e278e073595fb4c11c76d1b79f0b | [] | no_license | crowdbotics-apps/silent-lake-29099 | d5baaa2272c8362af6cc2adacf738c99fa4bb770 | 6993bc68a87d0e835d5d3b4240e9e1412d851528 | refs/heads/master | 2023-06-22T22:49:18.030820 | 2021-07-23T23:51:23 | 2021-07-23T23:51:23 | 388,954,458 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 411 | py | """
WSGI config for silent_lake_29099 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'silent_lake_29099.settings')
application = get_wsgi_application()
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
be23945603db0d48e9bf6c9b3d89f7c8c219bc1d | 7d85c42e99e8009f63eade5aa54979abbbe4c350 | /game/tools/build_tools/make.py | 2e55e59e68a8d04a495b61e0b90078d11af6cf1a | [] | no_license | ToontownServerArchive/Cog-Invasion-Online-Alpha | 19c0454da87e47f864c0a5cb8c6835bca6923f0e | 40498d115ed716f1dec12cf40144015c806cc21f | refs/heads/master | 2023-03-25T08:49:40.878384 | 2016-07-05T07:09:36 | 2016-07-05T07:09:36 | 348,172,701 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,312 | py | '''
Use this script to invoke Nirai builder and compile the game.
This process consists of 3 step:
1. Pack models into a models.mf.
2. Compile src/sample.cxx and generate sample.exe using NiraiCompiler.
3. Generate sample.nri, which contains the Python modules.
'''
import argparse
import sys
import os
from niraitools import *
parser = argparse.ArgumentParser()
parser.add_argument('--compile-cxx', '-c', action='store_true',
help='Compile the CXX codes and generate coginvasion.exe into built.')
parser.add_argument('--make-nri', '-n', action='store_true',
help='Generate coginvasion.nri.')
parser.add_argument('--is-launcher', '-l', action='store_true',
help='Are we compiling the launcher?')
parser.add_argument('--models', '-m', action='store_true',
help='Pack models.mf.')
args = parser.parse_args()
def niraicall_obfuscate(code):
# We'll obfuscate if len(code) % 4 == 0
# This way we make sure both obfuscated and non-obfuscated code work.
if len(code) % 4:
return False, None
# There are several ways to obfuscate it
# For this example, we'll invert the string
return True, code[::-1]
niraimarshal.niraicall_obfuscate = niraicall_obfuscate
class CIOPackager(NiraiPackager):
HEADER = 'COGINVASIONONLINE'
BASEDIR = '.'
def __init__(self, outfile):
if args.is_launcher:
self.HEADER = 'COGINVASIONLAUNCHER'
NiraiPackager.__init__(self, outfile)
self.__manglebase = self.get_mangle_base(self.BASEDIR)
self.add_panda3d_dirs()
self.add_default_lib()
self.add_directory(self.BASEDIR, mangler=self.__mangler)
def __mangler(self, name):
# N.B. Mangler can be used to strip certain files from the build.
# The file is not included if it returns an empty string.
return name[self.__manglebase:].strip('.')
def generate_niraidata(self):
print 'Generating niraidata'
config = self.get_file_contents('tools/build_tools/config.prc', True)
niraidata = 'CONFIG = %r' % config
self.add_module('niraidata', niraidata, compile=True)
def process_modules(self):
'''
This method is called when it's time to write the output.
For sample.nri, we use an encrypted datagram.
The datagram is read by sample.cxx, which populates Python frozen array.
Datagram format:
uint32 numModules
for each module:
string name
int32 size *
data(abs(size))
* Negative size means the file was an __init__
'''
dg = Datagram()
dg.addUint32(len(self.modules))
for moduleName in self.modules:
data, size = self.modules[moduleName]
dg.addString(moduleName)
dg.addInt32(size)
dg.appendData(data)
data = dg.getMessage()
iv = '\0' * 16
if args.is_launcher:
key = 'mmkfcaaph_cil_bm'
else:
key = 'mmkfcaaph_cio_bm'
return aes.encrypt(data, key, iv)
if args.compile_cxx and not args.is_launcher:
compiler = NiraiCompiler('coginvasion.exe')
compiler.add_nirai_files()
compiler.add_source('tools/build_tools/coginvasion.cxx')
compiler.run()
elif args.is_launcher and args.compile_cxx:
compiler = NiraiCompiler('launcher.exe')
compiler.add_nirai_files()
compiler.add_source('tools/build_tools/launcher.cxx')
compiler.run()
if args.make_nri and not args.is_launcher:
pkg = CIOPackager('built/coginvasion.bin')
pkg.add_file('lib/coginvasion/base/CIStartGlobal.py')
pkg.add_directory('lib\\coginvasion')
pkg.generate_niraidata()
pkg.write_out()
elif args.is_launcher and args.make_nri:
pkg = CIOPackager('built/launcher.bin')
pkg.add_file('lib/launcher.py')
pkg.add_file('../Panda3D-CI/python/DLLs/_tkinter.pyd')
pkg.generate_niraidata()
pkg.write_out()
if args.models:
os.chdir('..')
cmd = 'multify -cf build/built/models.mf models'
p = subprocess.Popen(cmd, stdout=sys.stdout, stderr=sys.stderr, shell=True)
v = p.wait()
if v != 0:
print 'The following command returned non-zero value (%d): %s' % (v, cmd[:100] + '...')
sys.exit(1)
| [
"brianlach72@gmail.com"
] | brianlach72@gmail.com |
979fdb825b0a5335d0686544a45de62b21f44394 | 747f759311d404af31c0f80029e88098193f6269 | /addons/product_size/__terp__.py | 993d5bef698daa83448dd32d4d3643e51c4d6420 | [] | no_license | sgeerish/sirr_production | 9b0d0f7804a928c0c582ddb4ccb7fcc084469a18 | 1081f3a5ff8864a31b2dcd89406fac076a908e78 | refs/heads/master | 2020-05-19T07:21:37.047958 | 2013-09-15T13:03:36 | 2013-09-15T13:03:36 | 9,648,444 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 62 | py | /home/openerp/production/extra-addons/product_size/__terp__.py | [
"geerish@omerp.net"
] | geerish@omerp.net |
3334adb8db9c8e79af700a28b088578f390eedec | b128e285d084b12caf0856c64a588134c71840c3 | /optomedik/migrations/0022_auto_20201201_1846.py | 2266b37ac4044dcd785f429d1267749686a323d4 | [] | no_license | headlerb/Proyecto-poloTic | 781e8350fef30f4c9d35e206b4c8c9fd4688e9c9 | 9b96393fd4626805bc2447b490d492e19e476798 | refs/heads/main | 2023-02-09T19:17:10.616464 | 2021-01-05T15:32:29 | 2021-01-05T15:32:29 | 318,217,560 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 381 | py | # Generated by Django 3.1.3 on 2020-12-01 23:46
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('optomedik', '0021_language'),
]
operations = [
migrations.AlterModelOptions(
name='language',
options={'verbose_name': 'registro', 'verbose_name_plural': 'optomedik'},
),
]
| [
"arielbedoya@gmail.com"
] | arielbedoya@gmail.com |
905eaafec477c4313a60d72e343b277f8b846d0f | f36a012b948408badca1b0931d0b499ccb655283 | /figure_scripts/02_plot-geodesic-scatterplots.py | 46a4e049d4cce13e4e7e882f0b15c1cb2504b4c2 | [] | no_license | alisonpeard/spatial-nets | 352ac2523e5d1b5583eacb3e58e66570ad6ab10b | 58e765a650ca3547f3bea2137bf7404946e4b470 | refs/heads/main | 2023-07-16T20:35:53.568525 | 2021-07-31T12:57:52 | 2021-07-31T12:57:52 | 367,377,694 | 1 | 0 | null | 2021-05-14T13:52:34 | 2021-05-14T13:52:34 | null | UTF-8 | Python | false | false | 2,390 | py | import os
from pathlib import Path
import pickle
import numpy as np
import matplotlib.pyplot as plt
from cycler import cycler
from spatial_nets import LocationsDataClass
from spatial_nets import utils
from spatial_nets import draw
from config import SAVEFIG, LEGEND, FORMAT
# Change the default colors
colors = ["aqua", "plum", "gainsboro"]
custom_cycler = cycler(color=(draw.named_colors[c] for c in colors))
plt.rcParams.update({"axes.prop_cycle": custom_cycler})
raw_data_dir = Path("raw")
data_dir = Path("output_data")
output_dir = Path("output_figures")
dmat = utils.load_dmat(raw_data_dir / "UK_geodesic_dmat.npz")
data = utils.load_flows(raw_data_dir / "UK_commute2011.npz")
locs = LocationsDataClass(data, coords=dmat)
print("\nLoading DC gravity data\n")
descriptor = "geo_grav_doubly"
prediction = np.load(data_dir / f"model_{descriptor}.npy")
with open(data_dir / f"pvalues_{descriptor}.pkl", "rb") as f:
pvals = pickle.load(f)
pvals.set_significance(0.01)
with open(data_dir / f"pvalues-a_{descriptor}.pkl", "rb") as f:
pvals_a = pickle.load(f)
pvals_a.set_significance(0.01)
# First plot
fig, ax = plt.subplots(figsize=(5, 5))
draw.signed_scatterplot(locs, prediction, pvals, ax, verbose=True)
draw.critical_enveloppes(locs, prediction, pvals_a, ax, verbose=True)
if LEGEND:
leg = ax.legend()
for lh in leg.legendHandles:
lh._legmarker.set_alpha(1)
if SAVEFIG:
filename = f"scatter_{descriptor}.{FORMAT}"
print("Writing: ", filename)
fig.savefig(output_dir / filename, bbox_inches="tight")
print("\nLoading PC Radiation data\n")
descriptor = "geo_rad_prod"
prediction = np.load(data_dir / f"model_{descriptor}.npy")
with open(data_dir / f"pvalues_{descriptor}.pkl", "rb") as f:
pvals = pickle.load(f)
pvals.set_significance(0.01)
with open(data_dir / f"pvalues-a_{descriptor}.pkl", "rb") as f:
pvals_a = pickle.load(f)
pvals_a.set_significance(0.01)
# Second plot
fig, ax = plt.subplots(figsize=(5, 5))
draw.signed_scatterplot(locs, prediction, pvals, ax, verbose=True)
draw.critical_enveloppes(locs, prediction, pvals_a, ax, verbose=True)
if LEGEND:
leg = ax.legend()
for lh in leg.legendHandles:
lh._legmarker.set_alpha(1)
if SAVEFIG:
filename = f"scatter_{descriptor}.{FORMAT}"
print("Writing: ", filename)
fig.savefig(output_dir / filename, bbox_inches="tight")
| [
"rodrigolealc@ciencias.unam.mx"
] | rodrigolealc@ciencias.unam.mx |
ab492279e4cf87cdf5fa60d1fa58f80a9ca98732 | 3e4c5e56d1a1bcae3c0ee5eae2eea9e12d7bb6c6 | /tests.py | 826b067a34dd3b83c526fec4e2768191b7fbfb49 | [
"MIT"
] | permissive | glentakahashi/python-tuya-experimental | 3ce1d6c7d8eebd646b621293866644433ffe7a97 | 86434700ae5552ec1aa972a632bc45da941528ec | refs/heads/master | 2020-04-16T11:26:58.370361 | 2019-01-13T18:06:57 | 2019-01-13T18:06:57 | 165,536,629 | 0 | 0 | MIT | 2019-01-13T17:56:22 | 2019-01-13T17:56:22 | null | UTF-8 | Python | false | false | 7,220 | py | #!/usr/bin/env python3
import logging
import unittest
try:
from unittest.mock import MagicMock # Python 3
except ImportError:
from mock import MagicMock # py2 use https://pypi.python.org/pypi/mock
from hashlib import md5
import json
import logging
import struct
# Enable info logging to see version information
log = logging.getLogger('pytuya')
log.setLevel(level=logging.INFO)
#log.setLevel(level=logging.DEBUG) # Debug hack!
import pytuya
LOCAL_KEY = '0123456789abcdef'
mock_byte_encoding = 'utf-8'
def compare_json_strings(json1, json2, ignoring_keys=None):
json1 = json.loads(json1)
json2 = json.loads(json2)
if ignoring_keys is not None:
for key in ignoring_keys:
json1[key] = json2[key]
return json.dumps(json1, sort_keys=True) == json.dumps(json2, sort_keys=True)
def check_data_frame(data, expected_prefix, encrypted=True):
prefix = data[:15]
suffix = data[-8:]
if encrypted:
payload_len = struct.unpack(">B",data[15:16])[0] # big-endian, unsigned char
version = data[16:19]
checksum = data[19:35]
encrypted_json = data[35:-8]
json_data = pytuya.AESCipher(LOCAL_KEY.encode(mock_byte_encoding)).decrypt(encrypted_json)
else:
json_data = data[16:-8].decode(mock_byte_encoding)
frame_ok = True
if prefix != pytuya.hex2bin(expected_prefix):
frame_ok = False
elif suffix != pytuya.hex2bin("000000000000aa55"):
frame_ok = False
elif encrypted:
if payload_len != len(version) + len(checksum) + len(encrypted_json) + len(suffix):
frame_ok = False
elif version != b"3.1":
frame_ok = False
return json_data, frame_ok
def mock_send_receive_set_timer(data):
if mock_send_receive_set_timer.call_counter == 0:
ret = 20*chr(0x0) + '{"devId":"DEVICE_ID","dps":{"1":false,"2":0}}' + 8*chr(0x0)
elif mock_send_receive_set_timer.call_counter == 1:
expected = '{"uid":"DEVICE_ID_HERE","devId":"DEVICE_ID_HERE","t":"","dps":{"2":6666}}'
json_data, frame_ok = check_data_frame(data, "000055aa0000000000000007000000")
if frame_ok and compare_json_strings(json_data, expected, ['t']):
ret = '{"test_result":"SUCCESS"}'
else:
ret = '{"test_result":"FAIL"}'
ret = ret.encode(mock_byte_encoding)
mock_send_receive_set_timer.call_counter += 1
return ret
def mock_send_receive_set_status(data):
expected = '{"dps":{"1":true},"uid":"DEVICE_ID_HERE","t":"1516117564","devId":"DEVICE_ID_HERE"}'
json_data, frame_ok = check_data_frame(data, "000055aa0000000000000007000000")
if frame_ok and compare_json_strings(json_data, expected, ['t']):
ret = '{"test_result":"SUCCESS"}'
else:
logging.error("json data not the same: {} != {}".format(json_data, expected))
ret = '{"test_result":"FAIL"}'
ret = ret.encode(mock_byte_encoding)
return ret
def mock_send_receive_status(data):
expected = '{"devId":"DEVICE_ID_HERE","gwId":"DEVICE_ID_HERE"}'
json_data, frame_ok = check_data_frame(data, "000055aa000000000000000a000000", False)
# FIXME dead code block
if frame_ok and compare_json_strings(json_data, expected):
ret = '{"test_result":"SUCCESS"}'
else:
logging.error("json data not the same: {} != {}".format(json_data, expected))
ret = '{"test_result":"FAIL"}'
ret = 20*chr(0) + ret + 8*chr(0)
ret = ret.encode(mock_byte_encoding)
return ret
def mock_send_receive_set_colour(data):
expected = '{"dps":{"2":"colour", "5":"ffffff000000ff"}, "devId":"DEVICE_ID_HERE","uid":"DEVICE_ID_HERE", "t":"1516117564"}'
json_data, frame_ok = check_data_frame(data, "000055aa0000000000000007000000")
if frame_ok and compare_json_strings(json_data, expected, ['t']):
ret = '{"test_result":"SUCCESS"}'
else:
logging.error("json data not the same: {} != {}".format(json_data, expected))
ret = '{"test_result":"FAIL"}'
ret = ret.encode(mock_byte_encoding)
return ret
def mock_send_receive_set_white(data):
expected = '{"dps":{"2":"white", "3":255, "4":255}, "devId":"DEVICE_ID_HERE","uid":"DEVICE_ID_HERE", "t":"1516117564"}'
json_data, frame_ok = check_data_frame(data, "000055aa0000000000000007000000")
if frame_ok and compare_json_strings(json_data, expected, ['t']):
ret = '{"test_result":"SUCCESS"}'
else:
logging.error("json data not the same: {} != {}".format(json_data, expected))
ret = '{"test_result":"FAIL"}'
ret = ret.encode(mock_byte_encoding)
return ret
class TestXenonDevice(unittest.TestCase):
def test_set_timer(self):
d = pytuya.OutletDevice('DEVICE_ID_HERE', 'IP_ADDRESS_HERE', LOCAL_KEY)
d._send_receive = MagicMock(side_effect=mock_send_receive_set_timer)
# Reset call_counter and start test
mock_send_receive_set_timer.call_counter = 0
result = d.set_timer(6666)
result = result[result.find(b'{'):result.rfind(b'}')+1]
result = result.decode(mock_byte_encoding) # Python 3 (3.5.4 and earlier) workaround to json stdlib "behavior" https://docs.python.org/3/whatsnew/3.6.html#json
result = json.loads(result)
# Make sure mock_send_receive_set_timer() has been called twice with correct parameters
self.assertEqual(result['test_result'], "SUCCESS")
def test_set_status(self):
d = pytuya.OutletDevice('DEVICE_ID_HERE', 'IP_ADDRESS_HERE', LOCAL_KEY)
d._send_receive = MagicMock(side_effect=mock_send_receive_set_status)
result = d.set_status(True, 1)
result = result.decode(mock_byte_encoding) # Python 3 (3.5.4 and earlier) workaround to json stdlib "behavior" https://docs.python.org/3/whatsnew/3.6.html#json
result = json.loads(result)
# Make sure mock_send_receive_set_timer() has been called twice with correct parameters
self.assertEqual(result['test_result'], "SUCCESS")
def test_status(self):
d = pytuya.OutletDevice('DEVICE_ID_HERE', 'IP_ADDRESS_HERE', LOCAL_KEY)
d._send_receive = MagicMock(side_effect=mock_send_receive_status)
result = d.status()
# Make sure mock_send_receive_set_timer() has been called twice with correct parameters
self.assertEqual(result['test_result'], "SUCCESS")
def test_set_colour(self):
d = pytuya.BulbDevice('DEVICE_ID_HERE', 'IP_ADDRESS_HERE', LOCAL_KEY)
d._send_receive = MagicMock(side_effect=mock_send_receive_set_colour)
result = d.set_colour(255,255,255)
result = result.decode(mock_byte_encoding)
result = json.loads(result)
self.assertEqual(result['test_result'], "SUCCESS")
def test_set_white(self):
d = pytuya.BulbDevice('DEVICE_ID_HERE', 'IP_ADDRESS_HERE', LOCAL_KEY)
d._send_receive = MagicMock(side_effect=mock_send_receive_set_white)
result = d.set_white(255, 255)
result = result.decode(mock_byte_encoding)
result = json.loads(result)
self.assertEqual(result['test_result'], "SUCCESS")
if __name__ == '__main__':
unittest.main()
| [
"dmbp35@gmail.com"
] | dmbp35@gmail.com |
a8a55c5ceaf4f047f4055dd69e03d9e78b0fb41b | e8c4392a4470abd770be6805e6032ef36cb50ea9 | /dev/prepare_jvm_release.py | 045adf5bd2e93b90499e8c464ec286e10128b43c | [
"Apache-2.0"
] | permissive | vishalbelsare/xgboost | 3f133a97c20654e1ada64af4d89da2493a0197f0 | b124a27f57c97123daf9629555aa07e90dc77aed | refs/heads/master | 2023-08-17T01:50:45.285904 | 2021-11-23T08:45:36 | 2021-11-23T08:45:36 | 129,266,376 | 0 | 0 | Apache-2.0 | 2021-11-23T18:35:32 | 2018-04-12T14:44:31 | C++ | UTF-8 | Python | false | false | 2,918 | py | import os
import sys
import errno
import subprocess
import glob
import shutil
from contextlib import contextmanager
def normpath(path):
"""Normalize UNIX path to a native path."""
normalized = os.path.join(*path.split("/"))
if os.path.isabs(path):
return os.path.abspath("/") + normalized
else:
return normalized
def cp(source, target):
source = normpath(source)
target = normpath(target)
print("cp {0} {1}".format(source, target))
shutil.copy(source, target)
def maybe_makedirs(path):
path = normpath(path)
print("mkdir -p " + path)
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
@contextmanager
def cd(path):
path = normpath(path)
cwd = os.getcwd()
os.chdir(path)
print("cd " + path)
try:
yield path
finally:
os.chdir(cwd)
def run(command, **kwargs):
print(command)
subprocess.check_call(command, shell=True, **kwargs)
def main():
with cd("jvm-packages/"):
print("====copying pure-Python tracker====")
for use_cuda in [True, False]:
xgboost4j = "xgboost4j-gpu" if use_cuda else "xgboost4j"
cp("../python-package/xgboost/tracker.py", f"{xgboost4j}/src/main/resources")
print("====copying resources for testing====")
with cd("../demo/CLI/regression"):
run(f"{sys.executable} mapfeat.py")
run(f"{sys.executable} mknfold.py machine.txt 1")
for use_cuda in [True, False]:
xgboost4j = "xgboost4j-gpu" if use_cuda else "xgboost4j"
xgboost4j_spark = "xgboost4j-spark-gpu" if use_cuda else "xgboost4j-spark"
maybe_makedirs(f"{xgboost4j}/src/test/resources")
maybe_makedirs(f"{xgboost4j_spark}/src/test/resources")
for file in glob.glob("../demo/data/agaricus.*"):
cp(file, f"{xgboost4j}/src/test/resources")
cp(file, f"{xgboost4j_spark}/src/test/resources")
for file in glob.glob("../demo/CLI/regression/machine.txt.t*"):
cp(file, f"{xgboost4j_spark}/src/test/resources")
print("====Creating directories to hold native binaries====")
for os, arch in [("linux", "x86_64"), ("windows", "x86_64"), ("macos", "x86_64")]:
output_dir = f"xgboost4j/src/main/resources/lib/{os}/{arch}"
maybe_makedirs(output_dir)
for os, arch in [("linux", "x86_64")]:
output_dir = f"xgboost4j-gpu/src/main/resources/lib/{os}/{arch}"
maybe_makedirs(output_dir)
print("====Next Steps====")
print("1. Obtain Linux and Windows binaries from the CI server")
print("2. Put them in xgboost4j(-gpu)/src/main/resources/lib/[os]/[arch]")
print("3. Now on a Mac machine, run:")
print(" GPG_TTY=$(tty) mvn deploy -Prelease -DskipTests")
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | noreply@github.com |
ffe3dc8e3432d0c52d4ba7a768b8ceff1583aca5 | cc7c792dbae584b0f2874a9ded97b66079d9ac48 | /Dashin_API/CheckConnection.py | da71a5fa0b73a470b23aff19813576a064e4eaa4 | [] | no_license | kk13332488/Python-Algorithm-Trading | 7f2a55df6d6c89c0d57fa0ef4ec7409030ceae4a | 0ce0ba8740f1628c4e85db14e40cf317552267d9 | refs/heads/main | 2023-04-19T04:50:13.727769 | 2021-05-06T15:28:10 | 2021-05-06T15:28:10 | 363,645,491 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | import win32com.client
instCpCybos = win32com.client.Dispatch("CpUtil.CpCybos") #연결상태 확인
print(instCpCybos.IsConnect)
| [
"kk13332488@gmail.com"
] | kk13332488@gmail.com |
32edd15bb9e860bcd3c4952389654eabd58f83b4 | e500972531cc39476a9bbc65b0a284e77ca928b2 | /CeaserCipherDecoder.py | 5825c74af4ac5cb1030a7384ca7a655370784153 | [] | no_license | bivin1999/InterestingCodes | 0c5dcffab5af4a6448ccfaa80029eb602593136a | 1266764294473b97a7ef772b6f8c9f6b2ee590c3 | refs/heads/main | 2023-03-10T18:10:58.487674 | 2021-02-25T04:15:56 | 2021-02-25T04:15:56 | 342,119,135 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 122 | py | cipher=input().strip()
cipher=cipher[::-1]
plain=""
for c in cipher:
plain+=chr(((ord(c)-68)%26)+65)
print(plain) | [
"noreply@github.com"
] | noreply@github.com |
372b0cfb9f859bbd24bc6c5acc254a8040ea3761 | cc112f5ad28288b5e17e559f68ac4b7465c6ef03 | /exercise-2/pnseb-allocation-tool.py | a235b382131515e1943abcc2c2181062d725795f | [] | no_license | zachary-vinyard/business-innovations-interview | c3ea65dc8e018c7c3f9feb3d13e0546fcaf295f2 | 0755e31ee813e10c7452dcf70a5011ecefe96898 | refs/heads/master | 2020-05-29T16:06:39.899511 | 2019-05-30T11:02:53 | 2019-05-30T11:02:53 | 189,240,137 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,455 | py | """
@author: zachary.vinyard@oneacrefund.org
example script for business innovations associate interview
"""
import os, sys
import pandas as pd
import math
from datetime import datetime
#version number
version = '2-15-6'
#FILEPATH - CHANGE THIS TO THE FILE DIRECTORY FOR YOUR COMPUTER
filepath = os.path.expanduser('~' ) + r'\Documents\Tubura\data'
#FILENAME - CHANGE TO MATCH THE INPUT FILE NAME
filename = 'pnseb-input.xlsx'
#sheet names
reps_sheet = 'REPS'
groups_sheet = 'GROUPS'
#voucher sizes
max_fert_vouchers = 12
fert_voucher_size = 25
max_lime_vouchers = 15
lime_voucher_size = 50
#prioritization algorithm - choose prioritize, no_prioritization
prioritization_algorithm = 'prioritize'
#DEBUG
debug = False
def main():
time = datetime.now()
datestring_for_file = '%04d%02d%02d%02d%02d%02d' % (time.year, time.month, time.day, time.hour,time.minute, time.second)
outfile_name ='pnseb_matching_out_%s_%s.xlsx' % (version, datestring_for_file)
debug_outfile_name = 'pnseb_matching_out_debug_%s_%s.xlsx' % (version, datestring_for_file)
prog_announcement('starting - %04d-%02d-%02d %02d:%02d:%02d\nversion - %s' % (time.year, time.month, time.day, time.hour,time.minute, time.second, version))
prog_announcement('loading data - %s' % filename)
os.chdir(filepath)
try:
xl_file = pd.ExcelFile(filename)
reps = xl_file.parse(sheet_name = reps_sheet)
groups = xl_file.parse(sheet_name = groups_sheet)
del xl_file
except TypeError:
xl_file = pd.ExcelFile(filename)
reps = xl_file.parse(sheet_name = reps_sheet)
groups = xl_file.parse(sheet_name = groups_sheet)
del xl_file
except FileNotFoundError:
prog_announcement('file not found - program terminated')
return None
except KeyboardInterrupt:
prog_announcement('keyboard interrupt - program terminated')
return None
prog_announcement('cleaning columns')
try:
reps, groups = col_clean(reps), col_clean(groups)
except KeyboardInterrupt:
prog_announcement('keyboard interrupt - program terminated')
return None
except:
prog_announcement('columns improperly formated - program terminated')
return None
try:
assigned, unassigned, m_groups, no_reps = allocate_vouchers(groups, reps)
if debug:
prog_announcement('writing to file - %s' % debug_outfile_name)
assigned_df = pd.DataFrame(assigned, columns = ['oaf_unique_id', 'name', 'assigned_group', 'group_id', 'match_type', 'ucid', 'group', 'lime_vouchers', 'dap_vouchers', 'npk_vouchers', 'uree_vouchers', 'government_province', 'government_commune', 'government_colline'])
assigned_df.set_index('oaf_unique_id', inplace = True)
#assigned_df.sort_index(axis = 0, inplace = True)
unassigned_df = pd.DataFrame(unassigned, columns = ['oaf_unique_id', 'name', 'assigned_group', 'group_id', 'match_type', 'ucid', 'group', 'lime_vouchers', 'dap_vouchers', 'npk_vouchers', 'uree_vouchers', 'government_province', 'government_commune', 'government_colline'])
unassigned_df.set_index('oaf_unique_id', inplace = True)
#unassigned_df.sort_index(axis = 0, inplace = True)
groups_df = pd.DataFrame(groups, columns = ['group_id', 'group_name', 'ucid', 'lime_demand', 'lime_remaining', 'dap_demand', 'dap_remaining', 'npk_demand', 'npk_remaining', 'uree_demand', 'uree_remaining', 'government_province', 'government_commune', 'government_colline'])
groups_df.set_index('group_id', inplace = True)
groups_df.sort_index(axis = 0, inplace = True)
no_reps_df = pd.DataFrame(no_reps)
xl_writer_debug = pd.ExcelWriter(debug_outfile_name)
assigned_df.to_excel(xl_writer_debug,sheet_name = 'assigned_to_group')
unassigned_df.to_excel(xl_writer_debug, sheet_name = 'unassigned')
groups_df.to_excel(xl_writer_debug, sheet_name = 'groups')
no_reps_df.to_excel(xl_writer_debug, sheet_name = 'no_reps')
try:
xl_writer_debug.save()
del xl_writer_debug
except PermissionError:
prog_announcement('file is open, cannot save debug file')
s_assigned, s_unassigned, s_groups, s_no_reps = allocate_vouchers(pd.DataFrame(m_groups), pd.DataFrame(unassigned), match_type = 'site', priority = algorithms[prioritization_algorithm])
except GroupsEmptyError:
prog_announcement('data contains no groups - programe terminated')
return None
except KeyboardInterrupt:
prog_announcement('keyboard interrupt - program terminated')
return None
except DataFormatError:
prog_announcement('critical error - program terminated')
return None
except:
prog_announcement('unexpected error - program terminated')
return None
prog_announcement('writing to file - %s' % outfile_name)
assigned_df = pd.DataFrame(s_assigned + assigned, columns = ['oaf_unique_id', 'name', 'assigned_group', 'group_id', 'match_type', 'ucid', 'group', 'lime_vouchers', 'dap_vouchers', 'npk_vouchers', 'uree_vouchers', 'government_province', 'government_commune', 'government_colline']) if len(s_assigned) > 0 else pd.DataFrame(assigned, columns = ['oaf_unique_id', 'name', 'assigned_group', 'group_id', 'match_type', 'ucid', 'group', 'lime_vouchers', 'dap_vouchers', 'npk_vouchers', 'uree_vouchers', 'government_province', 'government_commune', 'government_colline'])
assigned_df.set_index('oaf_unique_id', inplace = True)
assigned_df.sort_index(axis = 0, inplace = True)
unassigned_df = pd.DataFrame(s_unassigned, columns = ['oaf_unique_id', 'name', 'assigned_group', 'group_id', 'match_type', 'ucid', 'group', 'lime_vouchers', 'dap_vouchers', 'npk_vouchers', 'uree_vouchers', 'government_province', 'government_commune', 'government_colline'])
unassigned_df.set_index('oaf_unique_id', inplace = True)
unassigned_df.sort_index(axis = 0, inplace = True)
groups_df = pd.DataFrame(s_groups, columns = ['group_id', 'priority', 'group_name', 'ucid', 'lime_demand', 'lime_remaining', 'dap_demand', 'dap_remaining', 'npk_demand', 'npk_remaining', 'uree_demand', 'uree_remaining', 'government_province', 'government_commune', 'government_colline'])
if not debug:
groups_df.drop('priority', axis = 1, inplace = True)
groups_df.set_index('group_id', inplace = True)
groups_df.sort_index(axis = 0, inplace = True)
#no_reps_df = pd.DataFrame(no_reps)
file_information = {'version' : version, 'input_file' : filename, 'outfile_name' : outfile_name, 'debug' : debug, 'elapsed_time' : datetime.now() - time}
if debug:
file_information['input_reps'] = len(reps)
file_information['output_reps'] = len(assigned_df) + len(unassigned_df)
if debug:
file_information['input_groups'] = len(groups)
file_information['output_groups'] = len(groups_df)
file_information_df = pd.DataFrame.from_dict(data = file_information, orient = 'index')
file_information_df.columns = ['information']
xl_writer = pd.ExcelWriter(outfile_name)
assigned_df.to_excel(xl_writer,sheet_name = 'assigned_to_group')
unassigned_df.to_excel(xl_writer, sheet_name = 'unassigned')
groups_df.to_excel(xl_writer, sheet_name = 'groups')
file_information_df.to_excel(xl_writer, sheet_name = 'file_information')
#no_reps_df.to_excel(xl_writer, sheet_name = 'no_reps')
try:
xl_writer.save()
del xl_writer
except PermissionError:
prog_announcement('file is open, cannot save - program terminated')
return None
except KeyboardInterrupt:
prog_announcement('keyboard interrupt - program terminated')
return None
time2 = datetime.now()
elapsed = time2 - time
prog_announcement('elapsed time : %s\ncomplete - %04d-%02d-%02d %02d:%02d:%02d\n' % (elapsed, time2.year, time2.month, time2.day, time2.hour, time2.minute, time2.second))
def col_clean(dataframe):
cols = dataframe.columns
cols = cols.map(lambda x: x.strip().replace(' ', '_').lower() if isinstance(x, (str, 'unicode')) else x)
dataframe.columns = cols
dataframe['ucid'] = dataframe.apply(lambda row : '-'.join([row.government_province.strip().lower(),row.government_commune.strip().lower(),row.government_colline.strip().lower()]), axis = 1)
return dataframe
def prog_bar(loc, length, barlen = 20):
sys.stdout.flush()
percent = int(((loc + 1) / length)*100)
prog = int(((loc + 1)/ length)*barlen)
sys.stdout.write('\r')
sys.stdout.write('[%s%s] - %s %%' % ('='*prog, ' '*(barlen - prog), percent))
if(percent == 100):
sys.stdout.write('\n')
def prog_announcement(text):
sys.stdout.flush()
sys.stdout.write('\r')
sys.stdout.write(text)
sys.stdout.write('\n')
def allocate_vouchers(groups, reps, match_type = 'group', priority = None):
if not( match_type == 'group' or match_type == 'site'):
raise ValueError('match_type must be \'group\' or \'site\'')
elif len(groups) == 0:
raise GroupsEmptyError('data includes no groups')
elif match_type != 'group' and priority == None:
raise DataFormatError('no matching algorithm included')
elif len(groups['group_id']) != len(groups['group_id'].unique()):
raise DataFormatError('group ids not unique')
prog_announcement('starting matching on %s' % match_type)
unassigned = []
assigned = []
no_reps = []
groups_updated = []
assigned_client_ids = []
unassigned_client_ids = []
sites = groups['ucid'].unique()
alg = priority
if match_type == 'site':
try:
reps_backup = reps.set_index('oaf_unique_id')
unassigned_client_ids = list(set(reps['oaf_unique_id']))
except KeyError:
if len(reps) == 0:
prog_announcement('no reps available for site - level matching')
return (reps, reps, groups, unassigned_client_ids)
else:
raise KeyError
groups = alg(groups)
groups, reps = groups.set_index('ucid'), reps.set_index('ucid')
for site in sites:
prog_bar(list(sites).index(site), len(sites))
no_site_reps = False
g = groups.loc[site]
try:
if match_type == 'group':
r = reps
elif match_type == 'site':
r = reps.loc[site]
except KeyError:
no_reps.append(site)
no_site_reps = True
if match_type == 'group':
gids = pd.Series(g['group_id']).unique()
elif match_type == 'site':
gids = pd.Series(g['priority'])
for gid in gids:
try:
if match_type == 'group':
g_row = g.loc[g['group_id'] == gid]
npk_voucher_count = int(g_row.loc[site, 'npk_demand']) / fert_voucher_size
uree_voucher_count = int(g_row.loc[site, 'uree_demand']) / fert_voucher_size
dap_voucher_count = int(g_row.loc[site, 'dap_demand']) / fert_voucher_size
lime_voucher_count = int(g_row.loc[site, 'lime_demand']) / lime_voucher_size
elif match_type == 'site':
g_row = g.loc[g['priority'] == gid]
npk_voucher_count = int(g_row.loc[site, 'npk_remaining']) / fert_voucher_size
uree_voucher_count = int(g_row.loc[site, 'uree_remaining']) / fert_voucher_size
dap_voucher_count = int(g_row.loc[site, 'dap_remaining']) / fert_voucher_size
lime_voucher_count = int(g_row.loc[site, 'lime_remaining']) / lime_voucher_size
except KeyError:
g_row = g
if match_type == 'group':
npk_voucher_count = int(g_row['npk_demand']) / fert_voucher_size
uree_voucher_count = int(g_row['uree_demand']) / fert_voucher_size
dap_voucher_count = int(g_row['dap_demand']) / fert_voucher_size
lime_voucher_count = int(g_row['lime_demand']) / lime_voucher_size
elif match_type == 'site':
npk_voucher_count = int(g_row['npk_remaining']) / fert_voucher_size
uree_voucher_count = int(g_row['uree_remaining']) / fert_voucher_size
dap_voucher_count = int(g_row['dap_remaining']) / fert_voucher_size
lime_voucher_count = int(g_row['lime_remaining']) / lime_voucher_size
if not no_site_reps:
try:
if match_type == 'group':
clients = r.loc[r['group_id'] == gid]['oaf_unique_id']
elif match_type == 'site':
clients = pd.Series(r['oaf_unique_id'])
except KeyError:
clients = pd.Series(r['oaf_unique_id'])
for client in clients:
if match_type == 'site' and client in assigned_client_ids:
if client in unassigned_client_ids:
unassigned_client_ids.remove(client)
continue
client_npk, client_dap, client_uree, client_lime = 0, 0, 0, 0
client_voucher_count = 0
if npk_voucher_count > 0:
npk_available_to_add = min(npk_voucher_count, max_fert_vouchers - client_voucher_count)
client_npk += npk_available_to_add
client_voucher_count += npk_available_to_add
npk_voucher_count = npk_voucher_count - npk_available_to_add
if client_voucher_count < max_fert_vouchers and dap_voucher_count > 0:
dap_available_to_add = min(dap_voucher_count, max_fert_vouchers - client_voucher_count)
client_dap += dap_available_to_add
client_voucher_count += dap_available_to_add
dap_voucher_count = dap_voucher_count - dap_available_to_add
if client_voucher_count < max_fert_vouchers and uree_voucher_count > 0:
uree_available_to_add = min(uree_voucher_count, max_fert_vouchers - client_voucher_count)
client_uree += uree_available_to_add
client_voucher_count += uree_available_to_add
uree_voucher_count = uree_voucher_count - uree_available_to_add
if lime_voucher_count > 0:
client_lime += min(lime_voucher_count, max_lime_vouchers - client_lime)
lime_voucher_count -= client_lime
client_dict = {}
if match_type == 'group':
client_row = r.loc[r['oaf_unique_id'] == client]
for label in client_row.columns:
client_dict[label] = client_row.iloc[0][label]
elif match_type == 'site':
try:
client_row = r.loc[r['oaf_unique_id'] == client]
for label in client_row.columns:
client_dict[label] = client_row.loc[site, label]
except KeyError:
for label, val in r.iteritems():
client_dict[label] = val
client_dict['npk_vouchers'], client_dict['dap_vouchers'], client_dict['uree_vouchers'], client_dict['lime_vouchers'] = math.ceil(client_npk), math.ceil(client_dap), math.ceil(client_uree), math.ceil(client_lime)
client_dict['match_type'] = match_type
if match_type == 'group':
client_dict['assigned_group'] = gid
if match_type == 'site':
try:
client_dict['assigned_group'] = g_row.loc[site, 'group_id']
except KeyError:
client_dict['assigned_group'] = g_row['group_id']
except:
raise DataFormatError('error')
client_dict['ucid'] = site
if (client_voucher_count <= 0 and client_lime <= 0):
if match_type == 'group':
unassigned.append(client_dict)
else:
assigned.append(client_dict)
assigned_client_ids.append(client)
if match_type == 'site':
if client in unassigned_client_ids:
unassigned_client_ids.remove(client)
group_dict = {}
group_dict['ucid'] = site
try:
for label in g_row.columns:
group_dict[label] = g_row.loc[site, label]
except (AttributeError, KeyError) as e:
for label, val in g_row.iteritems():
group_dict[label] = val
group_dict['npk_remaining'] = int(npk_voucher_count) * fert_voucher_size
group_dict['dap_remaining'] = int(dap_voucher_count) * fert_voucher_size
group_dict['uree_remaining'] = int(uree_voucher_count) * fert_voucher_size
group_dict['lime_remaining'] = int(lime_voucher_count) * lime_voucher_size
groups_updated.append(group_dict)
if match_type == 'site':
groups = alg(groups, re = True)
#prog_announcement('finalizing client lists for %s matching' % match_type)
try:
clients_not_in_group = reps.loc[~reps['group_id'].isin(groups['group_id'])]['oaf_unique_id']
except ValueError:
clients_not_in_group = pd.Series(r['oaf_unique_id'])
if len(clients_not_in_group) >0:
prog_announcement('finalizing client lists for %s matching' % match_type)
for client in clients_not_in_group:
prog_bar(list(clients_not_in_group).index(client), len(clients_not_in_group))
client_npk, client_dap, client_uree, client_lime = 0, 0, 0, 0
client_dict = {}
try:
client_row = reps.loc[reps['oaf_unique_id'] == client]
for label in client_row.columns:
client_dict[label] = client_row.iloc[0][label]
except KeyError:
raise DataFormatError
client_dict['npk_vouchers'], client_dict['dap_vouchers'], client_dict['uree_vouchers'], client_dict['lime_vouchers'] = client_npk, client_dap, client_uree, client_lime
client_dict['match_type'] = match_type
client_dict['assigned_group'] = client_dict['group_id']
client_dict['ucid'] = '-'.join([client_dict['government_province'].strip().lower(),client_dict['government_commune'].strip().lower(),client_dict['government_colline'].strip().lower()])
unassigned.append(client_dict)
if match_type == 'site':
unassigned = reps_backup.loc[list(set(unassigned_client_ids))].reset_index().to_dict()
prog_announcement('completed matching on %s' % match_type)
return (assigned, unassigned, groups_updated, unassigned_client_ids)
class DataFormatError (Exception):
pass
class GroupsEmptyError (Exception):
pass
def prioritize(groups, re = False):
if re or 'priority' in groups.columns:
groups.drop('priority', axis = 1, inplace = True)
groups['total_fert'] = groups.apply(lambda row : row.dap_remaining + row.npk_remaining + row.uree_remaining, axis = 1)
groups.sort_values(by = 'total_fert', ascending = False, axis = 0, inplace = True)
groups['priority'] = [x for x in range(1, len(groups) + 1)]
groups.drop('total_fert', axis = 1, inplace = True)
return groups
def no_prioritization(groups, re = False):
if re:
groups.drop('priority', axis = 1, inplace = 1)
groups['priority'] = groups.apply(lambda row : row.group_id, axis = 1)
return groups
algorithms = {'prioritize' : prioritize, 'no_prioritization' : no_prioritization}
if __name__ == '__main__':
main()
| [
"30409881+zachary-vinyard@users.noreply.github.com"
] | 30409881+zachary-vinyard@users.noreply.github.com |
a7bd3d34bdfa7aa034d660135acf803ebf2ef5f4 | 3faf5550dd55c62e5a23d213b4dfb54f924cfc8f | /models.py | 434305d05cd665e4352e43a70405d3491c403842 | [] | no_license | MihirSahu/Flask-Tutorial | 70bac2dc432323cc831570c60e9a534bc4fa2179 | 78496c6f2b97715432c6872a1a30725578cd03ce | refs/heads/main | 2023-08-17T14:50:06.685468 | 2021-10-01T04:00:30 | 2021-10-01T04:00:30 | 412,315,114 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,533 | py | #Classes are considered individual tables in sqlalchemy
class Item(db.Model):
id = db.Column(db.Integer(), primary_key = True) #best practice to create primary key
name = db.Column(db.String(length = 30), nullable = False, unique = True) #create a new column that holds string with a max of 30 characters, no null fields and every element has to be unique
price = db.Column(db.Integer(), nullable = False)
barcode = db.Column(db.String(length = 12), nullable = False, unique = True)
description = db.Column(db.String(length = 1024), nullable = False)
#Just so the database shows the name of the item when using Item.query.all()
def __repr__(self):
return f'Item {self.name}'
#After setting this up, go to a python terminal in the same directory as this file, import the database (db in this case) and use db.create_all(). Also import Item to be able to create items
# from market import db, Item
# db.create_all()
#Then you can create an item using Item() (the id is not added because it will automatically assign a primary key)
# item1 = Item(name = "Iphone 10", price = 500, barcode = '2463746234', description = 'description')
#Then add the item into the database using db.session.add(item1)
#Then commit the data using db.session.commit()
#View items using Item.query.all()
#Loop through items by using 'for item in Item.query.all()'
#Use Item.query.filter_by() to filter by item property
#To see all data in a graphical interface, download a database browser; we'll use sqlite browser
| [
"2002mihir@gmail.com"
] | 2002mihir@gmail.com |
1fb32508e1d364d7b765c2f2940ca3d1f940a816 | f1875c53990cd87944bf7268d1c5a13263b5762d | /pathseg/models/encoders/deeplabv3plus_encoder.py | fd18606e329d808681b0e187e4c9d2be547181e3 | [] | no_license | yinchimaoliang/pathology-segmentation | c4aa9eb7c29e1e7a9d326548288d81366da47505 | 77bb9424d6f89cfa5ede82c6c7365baeb6ed5ddc | refs/heads/master | 2021-02-28T21:16:26.495200 | 2020-11-26T11:52:02 | 2020-11-26T11:52:02 | 245,733,003 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,513 | py | import torch
import torch.nn as nn
from torch.nn import functional as F
from pathseg.core.common.blocks import SeparableConv2d
from pathseg.core.utils.dilate_utils import replace_strides_with_dilation
from ..builder import ENCODERS
from .base_encoder import BaseEncoder
class ASPPConv(nn.Sequential):
def __init__(self, in_channels, out_channels, dilation):
super().__init__(
nn.Conv2d(
in_channels,
out_channels,
kernel_size=3,
padding=dilation,
dilation=dilation,
bias=False,
),
nn.BatchNorm2d(out_channels),
nn.ReLU(),
)
class ASPPSeparableConv(nn.Sequential):
def __init__(self, in_channels, out_channels, dilation):
super().__init__(
SeparableConv2d(
in_channels,
out_channels,
kernel_size=3,
padding=dilation,
dilation=dilation,
bias=False,
),
nn.BatchNorm2d(out_channels),
nn.ReLU(),
)
class ASPPPooling(nn.Sequential):
def __init__(self, in_channels, out_channels):
super().__init__(
nn.AdaptiveAvgPool2d(1),
nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(),
)
def forward(self, x):
size = x.shape[-2:]
for mod in self:
x = mod(x)
return F.interpolate(
x, size=size, mode='bilinear', align_corners=False)
class ASPP(nn.Module):
def __init__(self,
in_channels,
out_channels,
atrous_rates,
separable=False):
super(ASPP, self).__init__()
modules = []
modules.append(
nn.Sequential(
nn.Conv2d(in_channels, out_channels, 1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(),
))
rate1, rate2, rate3 = tuple(atrous_rates)
ASPPConvModule = ASPPConv if not separable else ASPPSeparableConv
modules.append(ASPPConvModule(in_channels, out_channels, rate1))
modules.append(ASPPConvModule(in_channels, out_channels, rate2))
modules.append(ASPPConvModule(in_channels, out_channels, rate3))
modules.append(ASPPPooling(in_channels, out_channels))
self.convs = nn.ModuleList(modules)
self.project = nn.Sequential(
nn.Conv2d(
5 * out_channels, out_channels, kernel_size=1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(),
nn.Dropout(0.5),
)
def forward(self, x):
res = []
for conv in self.convs:
res.append(conv(x))
res = torch.cat(res, dim=1)
return self.project(res)
@ENCODERS.register_module()
class DeeplabV3PlusEncoder(BaseEncoder):
def __init__(self,
backbone,
encoder_output_stride=16,
out_channels=256,
atrous_rates=(12, 24, 36)):
super().__init__(backbone)
if encoder_output_stride == 8:
self.make_dilated(stage_list=[3, 4], dilation_list=[2, 4])
elif encoder_output_stride == 16:
self.make_dilated(stage_list=[4], dilation_list=[2])
else:
raise ValueError(
'Encoder output stride should be 8 or 16, got {}'.format(
encoder_output_stride))
self.aspp = nn.Sequential(
ASPP(
self.backbone.out_shapes[0],
out_channels,
atrous_rates,
separable=True),
SeparableConv2d(
out_channels,
out_channels,
kernel_size=3,
padding=1,
bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(),
)
def make_dilated(self, stage_list, dilation_list):
stages = self.backbone.stages
for stage_indx, dilation_rate in zip(stage_list, dilation_list):
replace_strides_with_dilation(
module=stages[stage_indx],
dilation_rate=dilation_rate,
)
def forward(self, x):
features = super().forward(x)
aspp_features = self.aspp(features[0])
return [aspp_features, features[3]]
| [
"liyinhao@sensetime.com"
] | liyinhao@sensetime.com |
796544094ff9099277358b51572925655de465ba | 5928703295f6d49a7639f305e56b8b0c465fae49 | /nadocoding/module, package/run_internal.py | b1b8d15c31e39afa0e2c785dd409fd990b31b6aa | [] | no_license | jinxue0907/python | fc568d75b28975a9dbecac70e3b83d525b8a4281 | 9e58ca331c630407aa118f9b316686f2e246bb77 | refs/heads/master | 2023-08-28T10:42:18.637516 | 2021-11-12T01:03:03 | 2021-11-12T01:03:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 674 | py | import math
print(math.ceil(3.5))
print(math.ceil(3.4))
print(math.floor(3.5))
print(math.floor(3.4))
print(math.pow(2, 10))
print(math.sin(math.pi/2))
import random
print('-'*20)
print(random.random()) #0.0 <= r < 1.0
print(random.randrange(1, 10)) #1 <= r < 10 정수
print(random.randint(1, 10)) #1 <= r <= 10 정수
list1 = ['김치찌개', '비빔면', '안먹고 잠']
print(random.choice(list1))
print('before: ', list1)
random.shuffle(list1)
print('after: ', list1)
print(random.sample(list1, 2))
import datetime
print('-'*20)
now = datetime.datetime.now()
birthday = datetime.datetime(2004, 1, 5, 14, 0)
print(birthday)
print(now - birthday) | [
"80078131+lasini123@users.noreply.github.com"
] | 80078131+lasini123@users.noreply.github.com |
21efc7c417f88971eaa082808bcee2525569477c | 60cafea63b2aa301b2a8603e4952395cdee0e49a | /ProblemSets/ProblemSet4_Kirimhan.py | 64d8cbf3fb019785e37078b6d8c84bf6320be648 | [] | no_license | destankirimhan/CompEcon_Fall17 | 65959dca426e6740241ff5a678cf2881770ef63d | 065bc5ea74d5b117e6496bd0e0073b598bd39c60 | refs/heads/master | 2021-01-22T17:48:27.946758 | 2017-12-14T19:53:26 | 2017-12-14T19:53:26 | 102,405,186 | 0 | 0 | null | 2017-09-04T21:45:31 | 2017-09-04T21:45:31 | null | UTF-8 | Python | false | false | 7,684 | py | # Importing the required packages
import pandas as pd
import numpy as np
import scipy.optimize as opt
import time
from scipy.optimize import differential_evolution
from geopy.distance import vincenty as vc
# Reading in the dataset
ps4_data = pd.read_excel('radio_merger_data.xlsx')
# Taking the natural logarithm of the variables
ps4_data['pop_ths_log'] = np.log(ps4_data['population_target'] / 1000)
ps4_data['price_ths_log'] = np.log(ps4_data['price'] / 1000)
ps4_data['num_stations_log'] = np.log(1 + ps4_data['num_stations_buyer'])
ps4_data['hhi_log'] = np.log(ps4_data['hhi_target'])
# Defining a function to calculate the distance of observed matches
def distance_calc1 (row):
start = (row['buyer_lat'], row['buyer_long'])
stop = (row['target_lat'], row['target_long'])
return np.log(vc(start, stop).miles)
# Calculating the corresponding variables for observed matches
ps4_data['var1'] = ps4_data['num_stations_log'] * ps4_data['pop_ths_log']
ps4_data['var2'] = ps4_data['corp_owner_buyer'] * ps4_data['pop_ths_log']
ps4_data['var3'] = ps4_data.apply (lambda row: distance_calc1 (row),axis = 1)
# Creating different dataframes for each year
ps4_data_2007 = ps4_data[(ps4_data['year'] == 2007)].copy()
ps4_data_2007['index'] = ps4_data_2007['buyer_id'] - 1 # This creates my own index to fix the indexing/location problem
ps4_data_2007 = ps4_data_2007.set_index('index')
ps4_data_2008 = ps4_data[(ps4_data['year'] == 2008)].copy()
ps4_data_2008['index'] = ps4_data_2008['buyer_id'] - 1
ps4_data_2008 = ps4_data_2008.set_index('index')
# Defining a function to calculate the distance of counterfactual matches
def distance_calc (data,row1,row2):
start = (data.iloc[row1, 3], data.iloc[row1, 4])
stop = (data.iloc[row2, 5], data.iloc[row2, 6])
return np.log(vc(start, stop).miles)
# Defining a function to calculate all the variables used in the payoff function
def payoff(data):
# Defining arrays to stock the values of output
np_temp1 = np.zeros(10, dtype=np.int).reshape(1,10)
np_temp2 = np.zeros(5, dtype=np.int).reshape(1,5)
np_temp3 = np.zeros(5, dtype=np.int).reshape(1,5)
for b in data['buyer_id']:
for t in data['target_id']:
if b < t:
ob1 = data['var1'][b - 1]
ob2 = data['var2'][b - 1]
ob3 = data['var3'][b - 1]
ob4 = data['var1'][t - 1]
ob5 = data['var2'][t - 1]
ob6 = data['var3'][t - 1]
# It returns six variables on the left hand side of the inequalities (observed matches)
ob7 = data['hhi_log'][b - 1]
ob8 = data['price_ths_log'][b - 1]
ob9 = data['hhi_log'][t - 1]
ob10 = data['price_ths_log'][t - 1]
# It returns two additional variables in model2 (the transfered model)
np_temp1 = np.vstack([np_temp1, [ob1, ob2, ob3, ob4, ob5, ob6, ob7, ob8, ob9, ob10]])
# It stacks the observations of the above variables
cf1 = data['num_stations_log'][b - 1] * data['pop_ths_log'][t - 1]
cf2 = data['corp_owner_buyer'][b - 1] * data['pop_ths_log'][t - 1]
cf3 = distance_calc(data, b-1, t-1)
# It returns the three variables of the first part of the right hand side (counterfatual matches)
cf7 = data['hhi_log'][t - 1]
cf8 = data['price_ths_log'][t - 1]
# IT returns two additional variables in model2
np_temp2 = np.vstack([np_temp2, [cf1, cf2, cf3, cf7, cf8]])
# IT stacks the observations of the above variables
if b > t:
cf4 = data['num_stations_log'][b - 1] * data['pop_ths_log'][t - 1]
cf5 = data['corp_owner_buyer'][b - 1] * data['pop_ths_log'][t - 1]
cf6 = distance_calc(data, b-1, t-1)
# It returns the other three variables of the second part of the right hand side (counterfactual matches)
cf9 = data['hhi_log'][t - 1]
cf10 = data['price_ths_log'][t - 1]
# It returns two additional variables in model2
np_temp3 = np.vstack([np_temp3, [cf4, cf5, cf6, cf9, cf10]])
# It stacks the observations of the above variables
# Droping the first row of the array since the first row are all zeros
np_temp1 = np.delete(np_temp1, 0, 0)
np_temp2 = np.delete(np_temp2, 0, 0)
np_temp3 = np.delete(np_temp3, 0, 0)
# Combining all the variables that are stored in arrays into one dataframe
ps4_mse = pd.DataFrame({'ob1':np_temp1[:,0], 'ob2':np_temp1[:,1], 'ob3':np_temp1[:,2], 'ob4':np_temp1[:,3], 'ob5':np_temp1[:,4],
'ob6':np_temp1[:,5], 'ob7':np_temp1[:,6], 'ob8':np_temp1[:,7], 'ob9':np_temp1[:,8], 'ob10':np_temp1[:,9],
'cf1':np_temp2[:,0], 'cf2':np_temp2[:,1], 'cf3':np_temp2[:,2], 'cf7':np_temp2[:,3], 'cf8':np_temp2[:,4],
'cf4':np_temp3[:,0], 'cf5':np_temp3[:,1], 'cf6':np_temp3[:,2], 'cf9':np_temp3[:,3], 'cf10':np_temp3[:,4]})
return ps4_mse
# Combining dataframes of two years together
ps4_mse_2007 = payoff(ps4_data_2007) # This calls the function above twice to get dataframes of two years
ps4_mse_2008 = payoff(ps4_data_2008)
together = [ps4_mse_2007, ps4_mse_2008]
ps4_mse_both = pd.concat(together, ignore_index=True) # I re-index the data
# Writing indicator function of model1
def mse(coefs):
alpha, beta = coefs
for i in ps4_mse_both.index:
indicator = (ps4_mse_both['ob1'] + alpha * ps4_mse_both['ob2'] + beta * ps4_mse_both['ob3'] +
ps4_mse_both['ob4'] + alpha * ps4_mse_both['ob5'] + beta * ps4_mse_both['ob6'] >=
ps4_mse_both['cf1'] + alpha * ps4_mse_both['cf2'] + beta * ps4_mse_both['cf3'] +
ps4_mse_both['cf4'] + alpha * ps4_mse_both['cf5'] + beta * ps4_mse_both['cf6'])
total = -1 * sum(indicator)
return total
# Writing indicator function of model2 (the transfered model)
def mse_tansf(coefs):
sigma, alpha, gamma, beta = coefs
for i in ps4_mse_both.index:
indicator = ((sigma * ps4_mse_both['ob1'] + alpha * ps4_mse_both['ob2'] + beta * ps4_mse_both['ob3'] +
gamma * ps4_mse_both['ob7'] - ps4_mse_both['ob8'] >=
sigma * ps4_mse_both['cf1'] + alpha * ps4_mse_both['cf2'] + beta * ps4_mse_both['cf3'] +
gamma * ps4_mse_both['cf7'] - ps4_mse_both['cf8']) &
(sigma * ps4_mse_both['ob4'] + alpha * ps4_mse_both['ob5'] + beta * ps4_mse_both['ob6'] +
gamma * ps4_mse_both['ob9'] - ps4_mse_both['ob10'] >=
sigma * ps4_mse_both['cf4'] + alpha * ps4_mse_both['cf5'] + beta * ps4_mse_both['cf6'] +
gamma * ps4_mse_both['cf9'] - ps4_mse_both['cf10']))
total = -1 * sum(indicator)
return total
# Calling the minimizer for model1
params_initial = [1, 1]
mse_results = opt.minimize(mse, params_initial, method='Nelder-Mead', tol = 1e-12, options={'maxiter': 5000})
# Calling the minimizer for model2
params_initial_transf = [1,1,1,1]
mse_results_transf = opt.minimize(mse_tansf, params_initial_transf, method='Nelder-Mead', tol = 1e-12, options={'maxiter': 5000})
# Showing the results
coefs = (['alpha', 'beta'])
for i in range(2):
print('Estimated ', coefs[i], "in model(1) = ", mse_results['x'][i])
print()
coefs_transf = (['sigma', 'alpha', 'gamma', 'beta'])
for i in range(4):
print('Estimated ', coefs_transf[i], "in model(2) = ", mse_results_transf['x'][i])
| [
"destan.kirimhan@grad.moore.sc.edu"
] | destan.kirimhan@grad.moore.sc.edu |
b92b30135c9cc2240589abc7f74df6e0a477dc6a | 53fa91eb6107a0c7ff401cc69a4d2c0b397956c6 | /network_monitoring/get_net_info.py | 676808e562d26a30698548545ee779f2c1c4b134 | [] | no_license | SahanaSatya/network_management | e10b01e16e315592913fe7607f2292437d5067d8 | 356a08190466dade7631e09a1283a48720897919 | refs/heads/master | 2020-07-03T07:13:44.138998 | 2019-08-12T02:19:31 | 2019-08-12T02:19:31 | 201,834,301 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 528 | py | from netmiko import ConnectHandler
ios_r1={
'device_type':'cisco_ios',
'username':'lab',
'password':'lab123',
'ip':'192.168.1.1',
}
net_connect=ConnectHandler(**ios_r1)
print("----------Routing Table ------------")
output1=net_connect.send_command('show ip route')
print(output1)
print("-----------ARP Table --------------")
output2=net_connect.send_command('show arp')
print(output2)
print("--------A Brief Status of all the Interfaces ---------")
output3=net_connect.send_command('show ip interface brief')
print(output3)
| [
"noreply@github.com"
] | noreply@github.com |
2655711d5568b9d61c4250f84b97d70cb56775f5 | 34594aa8303b76a3f309e9c82f501cdd16c7b797 | /worldbank.py | 8d790b132a0cc983e0f21b7d07599eac7c7d9280 | [] | no_license | vskalkat/DataStructures | 8a817dc9ee342c6f26e8c261f8168b8c4b3bd1f2 | 2cf4806dd31d5d861c87fe800c7c39adc7da282f | refs/heads/master | 2021-05-03T13:01:47.658198 | 2018-01-05T03:17:21 | 2018-01-05T03:17:21 | 120,508,232 | 0 | 0 | null | 2018-02-06T18:56:32 | 2018-02-06T18:56:31 | null | UTF-8 | Python | false | false | 7,649 | py | import urllib2
import json
import csv
from collections import defaultdict
from csv_tables import *
def extract_raw_country_information(iso2code):
url = ('http://api.worldbank.org/v2/countries/%s?format=json' % iso2code)
response = urllib2.urlopen(url).read()
json_response = json.loads(response)
if not json_response[1][0]:
raise ValueError('Invalid response from World Bank countries API')
return json_response[1][0]
def extract_raw_country_indicator(country_iso2code, indicator, start_year, end_year):
url = ('http://api.worldbank.org/v2/countries/%s/indicators/%s?date=%s:%s&format=json' %
(country_iso2code, indicator, start_year, end_year))
response = urllib2.urlopen(url).read()
json_response = json.loads(response)
return json_response[1]
def transform_raw_country_information(raw_country_information):
clean_country_information = CountryInformation(
raw_country_information['id'],
raw_country_information['iso2Code'],
raw_country_information['name'],
raw_country_information['region']['value'],
raw_country_information['capitalCity'])
return clean_country_information
def transform_raw_country_indicator_information(raw_country_information):
country_indicator_array_by_year = []
for info in raw_country_information:
clean_country_information = CountryIndicators(
info['country']['id'],
info['date'],
info['indicator']['id'],
info['value'])
country_indicator_array_by_year.append(clean_country_information)
return country_indicator_array_by_year
def transform_country_info_to_dimensions(country_information_dictionary, country_indicators_dictionary):
current_year = 2016
country_dimensions_array = []
country_key = 0
for country, info in country_information_dictionary.iteritems():
current_population = country_indicators_dictionary[(info['iso2Code'], str(current_year))]['population']
current_gdp = country_indicators_dictionary[(info['iso2Code'], str(current_year))]['gdp']
country_dimension = CountryDimension(country_key, info['name'], info['iso3Code'], info['iso2Code'],
info['capital_city'], info['region_name'], current_population, current_gdp)
country_dimensions_array.append(country_dimension)
country_key += 1
return country_dimensions_array
def transform_country_indicator_transitions(country_dimensions, country_indicators_dictionary):
country_dimensions_array = []
for year in range(2000, 2017):
for obj in country_dimensions:
current_population = country_indicators_dictionary[obj.iso2code, str(year)]['population']
population_change = 0 if year == 2000 else int(current_population) - int(
country_indicators_dictionary[obj.iso2code, str(year - 1)]['population'])
current_gdp = country_indicators_dictionary[obj.iso2code, str(year)]['gdp']
gdp_change = 0 if year == 2000 else float(current_gdp) - float(
country_indicators_dictionary[obj.iso2code, str(year - 1)]['gdp'])
country_dimension = CountryIndicatorTransitions(obj._country_key, year, current_population,
population_change, current_gdp, gdp_change)
country_dimensions_array.append(country_dimension)
return country_dimensions_array
def load_country_info_to_dimensions(country_dimensions, filename):
with open(filename, 'w') as csvfile:
writer = csv.writer(csvfile, delimiter=',')
writer.writerow(
['_country_key', 'name', 'iso3code', 'iso2code', 'capital_city', 'region_name', 'current_population',
'current_gdp'])
for row in country_dimensions:
writer.writerow([
row._country_key,
row.name,
row.iso3code,
row.iso2code,
row.capital_city,
row.region_name,
row.current_population,
row.current_gdp])
def load_country_indicator_transitions(country_indicator_transitions, filename):
with open(filename, 'w') as csvfile:
writer = csv.writer(csvfile, delimiter=',')
writer.writerow(['_country_key', 'year', 'population', 'population_change', 'gdp_usd', 'gdp_usd_change'])
for row in country_indicator_transitions:
writer.writerow([
row._country_key,
row.year,
row.population,
row.population_change,
row.gdp_usd,
row.gdp_usd_change])
def load_country_information(transformed_country_information_array, filename):
with open(filename, 'wb') as csvfile:
writer = csv.writer(csvfile, delimiter=',')
writer.writerow(['iso3Code', 'iso2Code', 'name', 'region_name', 'capital_city'])
for row in transformed_country_information_array:
writer.writerow([
row.country_id,
row.iso2Code,
row.name,
row.region_name,
row.capital_city])
def load_country_indicator_information(transformed_indicator_information_array, filename):
with open(filename, 'w') as csvfile:
writer = csv.writer(csvfile, delimiter=',')
writer.writerow(['iso2Code', 'year', 'indicator_code', 'indicator_value'])
for country in transformed_indicator_information_array:
for row in country:
writer.writerow([
row.country_iso2code,
row.year,
row.indicator_code,
row.indicator_value])
# TODO: Add comments
def etl_job_1():
g7_countries = ['ca', 'fr', 'de', 'it', 'jp', 'gb', 'us']
indicators = ['NY.GDP.MKTP.CD', 'SP.POP.TOTL']
transformed_country_information_array = []
transformed_country_indicator_info_array = []
for country in g7_countries:
raw_country_information = extract_raw_country_information(country)
transformed_country_information_array.append(transform_raw_country_information(raw_country_information))
for indicator in indicators:
# TODO: PLEASE INCLUDE 1999 for gdp shit and exclude elsewhere
start_year = 2000
end_year = 2016
raw_country_indicator_info = extract_raw_country_indicator(country, indicator, start_year, end_year)
transformed_country_indicator_info_array.append(
transform_raw_country_indicator_information(raw_country_indicator_info))
load_country_information(transformed_country_information_array, 'raw_countries.csv')
load_country_indicator_information(transformed_country_indicator_info_array, 'raw_indicator.csv')
def etl_job_2():
country_information_dictionary = defaultdict(defaultdict)
with open('raw_countries.csv', 'rb') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
country_information_dictionary[row['iso2Code']].update(row)
country_indicators_dictionary = defaultdict(defaultdict)
with open('raw_indicator.csv', 'rb') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
if row['indicator_code'] == 'NY.GDP.MKTP.CD':
country_indicators_dictionary[(row['iso2Code'], row['year'])]['gdp'] = row['indicator_value']
else:
country_indicators_dictionary[(row['iso2Code'], row['year'])]['population'] = row['indicator_value']
country_dimensions = transform_country_info_to_dimensions(country_information_dictionary,
country_indicators_dictionary)
load_country_info_to_dimensions(country_dimensions, 'dim_country.csv')
country_indicator_transitions = transform_country_indicator_transitions(country_dimensions,
country_indicators_dictionary)
load_country_indicator_transitions(country_indicator_transitions, 'fct_country_indicator_transitions.csv')
if __name__ == "__main__":
etl_job_1()
etl_job_2()
| [
"s26mehta@gmail.com"
] | s26mehta@gmail.com |
dcb6a6d9db8bcaafab7bfe2da66350daa43a69e6 | c36a8b52ffeeeed8242f10e8d6f3615bf8b7cbab | /sides_of_a_triangle.py | 6683e01bf21254ccfdb22b48d4c2963a9cf77919 | [] | no_license | parveen99/infytqpy | ee86e148a89c2f290963e1961740716923b111f0 | 44a31659410dea6d1d1c70d815fbb8dc5efc1a25 | refs/heads/master | 2020-05-09T12:56:16.196252 | 2019-06-04T08:24:52 | 2019-06-04T08:24:52 | 181,130,537 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 605 | py | #PF-Assgn-24
def form_triangle(num1,num2,num3):
#Do not change the messages provided below
success="Triangle can be formed"
failure="Triangle can't be formed"
#Write your logic here
if(num1<num2+num3 and num2<num1+num3 and num3<num1+num2):
val=success
else:
val=failure
return val
#Use the following messages to return the result wherever necessary
#Provide different values for the variables, num1, num2, num3 and test your program
num1=3
num2=3
num3=5
res=form_triangle(num1, num2, num3)
print(res)
| [
"noreply@github.com"
] | noreply@github.com |
7a67c47173425d5c73bb905a499cbf5a91d248db | f2df88dd0e6e9b2dbffe9b12e98366d116022deb | /Academic Projects/Advertisement duplicate Detection/script.py | ae02f54322ece19ed847039766b43e2dcb6d9d85 | [] | no_license | ZouCheng321/Codes | 6ea2aae30bccd19dbb9ed2a5a5ebd61bc298471b | 18048a26fcfc25f0781df1ebd3b9b6257cfb3e02 | refs/heads/master | 2020-07-20T19:25:09.145432 | 2016-06-11T14:17:31 | 2016-06-11T14:17:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,591 | py | import numpy as np
import pandas as pd
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler
seed = 1358
nanfill = -1000
trainInfo = pd.read_csv("../input/ItemInfo_train.csv")
trainInfo = trainInfo.drop(['locationID', 'metroID', 'categoryID', 'attrsJSON'], axis = 1)
train = pd.read_csv("../input/ItemPairs_train.csv")
train = pd.merge(pd.merge(train, trainInfo, how = 'inner', left_on = 'itemID_1', right_on = 'itemID'), trainInfo, how = 'inner', left_on = 'itemID_2', right_on = 'itemID')
del trainInfo
train = train.drop(['itemID_1', 'itemID_2', 'generationMethod', 'itemID_x', 'itemID_y'], axis = 1)
print('Find distance of train pairs ...')
train['dist'] = np.abs(train['lat_x']-train['lat_y'])+np.abs(train['lon_x']-train['lon_y'])
train = train.drop(['lat_x', 'lon_x', 'lat_y', 'lon_y'], axis = 1)
print('Find relative price difference of train pairs ...')
train['price_diff'] = np.abs(train['price_x']-train['price_y'])*1./np.min(train[['price_x','price_y']], axis =1)
train.loc[(train.price_x.isnull()) & (train.price_y.isnull()),'price_diff'] = 0
train = train.drop(['price_x', 'price_y'], axis = 1)
print('Find relative title difference of train pairs ...')
train['title_diff'] = train[['title_x', 'title_y']].apply(lambda x:(x[0]==x[1])+0.0, axis = 1)
train = train.drop(['title_x', 'title_y'], axis = 1)
print('Find relative description difference of train pairs ...')
train['description_diff'] = train[['description_x', 'description_y']].apply(lambda x:(x[0]==x[1])+0.0, axis = 1)
train = train.drop(['description_x', 'description_y'], axis = 1)
print('Find difference of number of images in train pairs ...')
train['images_array_x'] = train['images_array_x'].apply(lambda x:len(x.split()) if isinstance(x, str) else 0)
train['images_array_y'] = train['images_array_y'].apply(lambda x:len(x.split()) if isinstance(x, str) else 0)
train['images_num_diff'] = train[['images_array_x', 'images_array_y']].apply(lambda x: abs(x[0]-x[1]), axis = 1)
train = train.drop(['images_array_x', 'images_array_y'], axis = 1)
y = train.isDuplicate.values
train = train.drop('isDuplicate', axis = 1)
train = train.fillna(nanfill)
print(train.columns)
testInfo = pd.read_csv("../input/ItemInfo_test.csv")
testInfo = testInfo.drop(['locationID', 'metroID', 'categoryID', 'attrsJSON'], axis = 1)
test = pd.read_csv("../input/ItemPairs_test.csv")
test = pd.merge(pd.merge(test, testInfo, how = 'inner', left_on = 'itemID_1', right_on = 'itemID'), testInfo, how = 'inner', left_on = 'itemID_2', right_on = 'itemID')
del testInfo
ids = test['id'].values
test = test.drop(['id', 'itemID_1', 'itemID_2', 'itemID_x', 'itemID_y'], axis = 1)
print('Find distance of test pairs ...')
test['dist'] = np.abs(test['lat_x']-test['lat_y'])+np.abs(test['lon_x']-test['lon_y'])
test = test.drop(['lat_x', 'lon_x', 'lat_y', 'lon_y'], axis = 1)
print('Find relative price difference of test pairs ...')
test['price_diff'] = np.abs(test['price_x']-test['price_y'])*1./np.min(test[['price_x','price_y']], axis =1)
test.loc[(test.price_x.isnull()) & (test.price_y.isnull()),'price_diff'] = 0
test = test.drop(['price_x', 'price_y'], axis = 1)
print('Find relative title difference of test pairs ...')
test['title_diff'] = test[['title_x', 'title_y']].apply(lambda x:(x[0]==x[1])+0.0, axis = 1)
test = test.drop(['title_x', 'title_y'], axis = 1)
print('Find relative description difference of test pairs ...')
test['description_diff'] = test[['description_x', 'description_y']].apply(lambda x:(x[0]==x[1])+0.0, axis = 1)
test = test.drop(['description_x', 'description_y'], axis = 1)
print('Find difference of number of images in test pairs ...')
test['images_array_x'] = test['images_array_x'].apply(lambda x:len(x.split()) if isinstance(x, str) else 0)
test['images_array_y'] = test['images_array_y'].apply(lambda x:len(x.split()) if isinstance(x, str) else 0)
test['images_num_diff'] = test[['images_array_x', 'images_array_y']].apply(lambda x: abs(x[0]-x[1]), axis = 1)
test = test.drop(['images_array_x', 'images_array_y'], axis = 1)
train.to_csv('train.csv')
test.to_csv('test.csv')
test = test.fillna(nanfill)
print(test.columns)
scaler = StandardScaler()
train = scaler.fit_transform(train.values)
test = scaler.transform(test.values)
np.random.seed(seed)
shflidx = np.random.permutation(train.shape[0])
train = train[shflidx, :]
y = y[shflidx]
clf = LogisticRegression()
clf.fit(train, y)
preds = clf.predict_proba(test)[:,1]
sub = pd.DataFrame()
sub['id'] = ids
sub['probability'] = preds
sub.to_csv('submission.csv', index = False) | [
"cpsamdavid@gmail.com"
] | cpsamdavid@gmail.com |
b80be6fb025b30540edd6d726ca50c4756a7d739 | fd1ed6e7fdad1bbb2fb8b6df02aed796c336fe1e | /exit.py | e9e94a21cb640f2b5686acfac3a77d14ffe26aab | [] | no_license | gauravwt63/Iris-Data-analysis-and-prediction | b033d788f0c4956a5e6a712b4229fb65c999b894 | a0101481887bea7c151cbf8dba9c8905bd6a8720 | refs/heads/master | 2023-06-08T20:05:33.495570 | 2021-06-22T18:43:37 | 2021-06-22T18:43:37 | 379,368,690 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 156 | py | while True:
again = input("Do you want to Exit(yes / no):").strip().lower()
if again == "yes":
break
elif again=="no":
continue
| [
"gauravwt63@gmail.com"
] | gauravwt63@gmail.com |
89e2373d1870ea26db666a16121383000169882e | 8a081742c8a58c872a15f01d6d4d8c1028e4f7eb | /1404.py | b7f2fe45af35e2bd9b49f8c76ce20624ed64ad2c | [] | no_license | dibery/leetcode | 01b933772e317ccd4885b508de503b7873a4b65f | 096218b5d0b47ce38874c4b7141aca35e9d678c9 | refs/heads/master | 2022-05-20T03:50:45.525256 | 2022-05-17T00:57:48 | 2022-05-17T00:57:48 | 211,606,152 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 187 | py | class Solution:
def numSteps(self, s: str) -> int:
s, ans = int(s, 2), 0
while s > 1:
ans += 1
s += 1 if s % 2 else s //= 2
return ans
| [
"bor810818@yahoo.com.tw"
] | bor810818@yahoo.com.tw |
0ded20645f7f188aff3c3657537ad2ed8f4a01b1 | f61cfb446a9286d897e481f8a9ce3c8cd13e8bb6 | /scrappers/sulbar.py | e634687c764c6a461f9ffebc2a3d806f7b8150fd | [] | no_license | agusedyc/covid19-province-scrapper | 3305192dac003d5f00ae752fe03d85f10b361444 | 02c63c43da1979720443ebe929e41e82880c6c1d | refs/heads/master | 2022-07-08T22:04:40.576758 | 2020-05-06T11:33:45 | 2020-05-06T11:33:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,430 | py | import datetime
import dateparser
import pandas as pd
import requests
from bs4 import BeautifulSoup as soup
from peewee import fn
from models import Province, KabupatenKota, Data
def scrape():
prov = __name__.split(".")[-1]
propinsi = Province.select().where(Province.nama_prov == "Sulawesi Barat")
if propinsi.count() < 1:
propinsi = Province.create(nama_prov="Sulawesi Barat", alias=prov)
else:
propinsi = propinsi.get()
sekarang = datetime.datetime.now().date()
try:
result = list(
Data.select()
.join(Province)
.where(fn.date_trunc("day", Data.last_update) == sekarang),
Province.alias == prov,
)
except:
result = []
if len(result) > 0:
return result
link = "https://covid19.sulbarprov.go.id/utama/data"
output = {}
output["result"] = []
with requests.session() as s:
r = s.get(link, verify=True)
data = r.text
url = soup(data, "lxml")
table = url.find("table", attrs={"class": "table-responsive"})
# print(table)
if table is not None:
res = []
table_rows = table.find_all("tr")
num_rows = len(table_rows)
i = 0
for tr in table_rows:
td = tr.find_all("td")
row = [tr.text.strip() for tr in td if tr.text.strip()]
# print(row)
if i >= 1 and i < num_rows - 1:
if row:
list_item = {}
list_item["provinsi"] = "Sulawesi Barat"
list_item["kode_kab_kota"] = "N/A"
list_item["kab_kota"] = str(row[1]).rstrip()
list_item["kecamatan"] = "N/A"
list_item["populasi"] = "N/A"
list_item["lat_kab_kota"] = "N/A"
list_item["long_kab_kota"] = "N/A"
list_item["n_odr"] = "N/A"
list_item["n_otg"] = int(str(row[6]).rstrip())
list_item["n_odp"] = int(str(row[2]).rstrip())
list_item["n_pdp"] = int(str(row[10]).rstrip())
list_item["n_confirm"] = int(str(row[14]).rstrip())
list_item["n_meninggal"] = (
int(str(row[5]).rstrip())
+ int(str(row[9]).rstrip())
+ int(str(row[12]).rstrip())
+ int(str(row[18]).rstrip())
)
list_item["n_sembuh"] = int(str(row[17]).rstrip())
list_item["last_update"] = "N/A"
# print(list_item)
kabkota = KabupatenKota.select().where(
KabupatenKota.prov_id == propinsi,
KabupatenKota.nama == row[1],
)
if kabkota.count() < 1:
kabkota = KabupatenKota.create(
prov_id=propinsi, nama=row[1]
)
else:
kabkota = kabkota.get()
datum = Data.select().where(
Data.kabupaten == kabkota,
Data.last_update == datetime.datetime.now(),
)
if datum.count() < 1:
datum = Data.create(
kabupaten=kabkota,
n_otg=int(str(row[6]).rstrip()),
n_odp=int(str(row[2]).rstrip()),
n_pdp=int(str(row[10]).rstrip()),
n_confirm=int(str(row[14]).rstrip()),
n_meninggal=int(str(row[5]).rstrip())
+ int(str(row[9]).rstrip())
+ int(str(row[12]).rstrip())
+ int(str(row[18]).rstrip()),
n_sembuh=int(str(row[17]).rstrip()),
last_update=datetime.datetime.now(),
)
output["result"].append(list_item)
i = i + 1
return output
| [
"muhammad.robee@gmail.com"
] | muhammad.robee@gmail.com |
1bccf0f17e21a5f80aa85d92e8131607b1f3fa1c | 9818262abff066b528a4c24333f40bdbe0ae9e21 | /Day 28/UtopianTree.py | 1bc5b897230286bb4173fdca69d29b1cdd03d6f9 | [
"MIT"
] | permissive | skdonepudi/100DaysOfCode | 749f62eef5826cb2ec2a9ab890fa23e784072703 | af4594fb6933e4281d298fa921311ccc07295a7c | refs/heads/master | 2023-02-01T08:51:33.074538 | 2020-12-20T14:02:36 | 2020-12-20T14:02:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,435 | py | '''
The Utopian Tree goes through 2 cycles of growth every year. Each spring, it doubles in height. Each summer, its height increases by 1 meter.
A Utopian Tree sapling with a height of 1 meter is planted at the onset of spring. How tall will the tree be after growth cycles?
For example, if the number of growth cycles is , the calculations are as follows:
Period Height
0 1
1 2
2 3
3 6
4 7
5 14
Function Description
Complete the utopianTree function in the editor below.
utopianTree has the following parameter(s):
int n: the number of growth cycles to simulate
Returns
int: the height of the tree after the given number of cycles
Input Format
The first line contains an integer, , the number of test cases.
subsequent lines each contain an integer, , the number of cycles for that test case.
Sample Input
3
0
1
4
Sample Output
1
2
7
'''
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the utopianTree function below.
def utopianTree(n):
if n < 3:
return n + 1
if n % 2 == 0:
return (utopianTree(n - 2) * 2) + 1
else:
return (utopianTree(n - 2) + 1) * 2
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
t = int(input())
for t_itr in range(t):
n = int(input())
result = utopianTree(n)
fptr.write(str(result) + '\n')
fptr.close()
| [
"sandeepiiitn@gmail.com"
] | sandeepiiitn@gmail.com |
45aba41af0d535134768e5f9a3a6b157615a01f0 | b7b12fbb05a95e14fe350cc8332dae7f474f5584 | /estruturaSequencial/exercicio15.py | e037a0f7f0c913b961cae64ab2728ff15f0a11c6 | [] | no_license | florianodev/maratona_datasciencebrazil | eb43506393dae86f1562d2d6bd22343b98c7679e | e456e88988a42701f265ca407871510e86fd8202 | refs/heads/master | 2020-04-12T19:31:51.530267 | 2018-12-27T01:51:35 | 2018-12-27T01:51:35 | 162,711,362 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 451 | py | import module.MyLib as ml
hh = ml.entradaFloat("Quanto ganha por hora? ")
ht = ml.entradaFloat("Horas trabalhadas no mês? ")
salario = hh*ht;
ir = salario * 0.11
inss = salario * 0.08
sindicato = salario * 0.05
liquido = salario - ir - inss - sindicato
print("+ Salário Bruto: R$ %.2f" % salario)
print("- IR: R$ %.2f" % ir)
print("- INSS:R$ %.2f" % inss)
print("- Sindicato:R$ %.2f" % sindicato)
print("= Salário liquido: R$ %.2f" % liquido)
| [
"floriano.nunes@gmail"
] | floriano.nunes@gmail |
f776d1a93a69794decb2e66b335e5c397fc247ed | 7e0f18afb297a221f79495c03bce1c35d653d27a | /shopping-cart/phone.py | 4fd41ee05a7b4dbaff0365f7d84bd802ca30ac90 | [] | no_license | mounipopuri/Shoppingcart | 669e0c85d28774362e5bd841f32a7c64bef151a4 | bc305bb732c197c19e0105a888b7a98642c800c2 | refs/heads/master | 2020-05-03T22:30:19.041293 | 2019-04-08T09:36:02 | 2019-04-08T09:36:02 | 178,844,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 361 | py | import nexmo
responseData = client.send_message(
{
"from": "Acme Inc",
"to": '917702592673',
"text": "A text message sent using the Nexmo SMS API",
}
)
if responseData["messages"][0]["status"] == "0":
print("Message sent successfully.")
else:
print("Message failed with error: {responseData['messages'][0]['error-text']}")
| [
"muqeeth.23@gmail.com"
] | muqeeth.23@gmail.com |
48e691d9db78dd5311f2cf2ccafe9a0f023f231f | a1fd7d56240180e59ff272b1bbb78e31daadb460 | /cmc/manage.py | eecab2757d789171ce11b6866d43191313965710 | [] | no_license | daranedag/cmc | 10572110a5b8e78bbb90eb0ce06b0bf2672a081d | 83616ed25db9c7f5a530b442028760ce6805ee84 | refs/heads/master | 2020-03-06T18:21:23.001855 | 2018-04-04T14:53:49 | 2018-04-04T14:53:49 | 127,005,707 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 535 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "cmc.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| [
"daraneda@allware.cl"
] | daraneda@allware.cl |
5af889034a242a85d9b2af11a9bffb472f0ac57a | bf9ae1e4269952622c7f03dc86c418d21eb20ec7 | /PythonCode-OldBoy/考核/4_For实现加法计算.py | 78f161cc14d31dcc8db5fe4393529f7e24a1eaf8 | [] | no_license | ChuixinZeng/PythonStudyCode | 5692ca7cf5fe9b9ca24e9f54f6594f3a79b0ffb5 | 2986c83c804da51ef386ca419d0c4ebcf194cf8f | refs/heads/master | 2021-01-21T16:09:58.622069 | 2019-12-01T14:30:36 | 2019-12-01T14:30:36 | 91,876,874 | 4 | 4 | null | null | null | null | UTF-8 | Python | false | false | 325 | py | # -*- coding:utf-8 -*-
# Author:Chuixin Zeng
# 使用for循环和range实现输出 1-2+3-4+5-6...+99 的和
b = 0 #结果
for i in range(1,100):
if i%2 == 0:#判断是偶数
b = b - i #结果加当前的i
else:#基数
b = b + i #结果减当前i
i += 1 #判断完自增一
print(b)
#结果:50 | [
"zengchuixin@le.com"
] | zengchuixin@le.com |
3a8f518a84296bd33cd4f2020765e60f958f8856 | 5e83d62064ea4fd954820960306fb06cc8f0f391 | /newsletter/forms.py | b32cd71b5fa39b823de5b46be2724352afed7ec5 | [] | no_license | bharatkumarrathod/cfe_ecommerce2_RESTapi | eff2fad0cbff7cb3def2c13de282b085aba7291d | a081cdbf10c1fbde58e128b9c9b287443c726071 | refs/heads/master | 2020-12-25T21:43:44.166109 | 2015-10-27T21:04:19 | 2015-10-27T21:04:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 870 | py | from django import forms
from .models import SignUp
class SignUpForm(forms.ModelForm):
""" create form using our model """
class Meta:
model = SignUp
fields = ['full_name', 'email']
## DEBUG ONLY ## use this to manually add entries for different dates.
# fields = ['full_name', 'email', 'created']
def clean_email(self):
email = self.cleaned_data.get('email')
user, domain = email.split('@')
if not domain == 'mydomain.com':
raise forms.ValidationError(
"Please make sure you use your University email."
)
return email
class ContactForm(forms.Form):
""" create form that will not be saved to our database """
name = forms.CharField(max_length=255)
email = forms.EmailField()
message = forms.CharField(widget=forms.Textarea)
| [
"carlofusiello@gmail.com"
] | carlofusiello@gmail.com |
0c8ef2128af545fdb92c81391e5d815bbd2a1371 | 37087383bbb91dbd76b50634111e273aaf2786f5 | /venv/Lib/site-packages/comtypes/gen/_00020430_0000_0000_C000_000000000046_0_2_0.py | 041f2deac1da41636cf3efe0eef6ae5f780a5eb3 | [] | no_license | akhtar016/Pdf-Reader-with-Python | 3a5e5f6ea76d414b8c4ef2a3b4ef2f863c24a2a0 | a9a62dd914fe15006a12d93e9a4f13dfcde6f521 | refs/heads/master | 2022-11-29T04:29:36.929501 | 2020-08-11T07:17:22 | 2020-08-11T07:17:22 | 286,673,401 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,761 | py | # -*- coding: mbcs -*-
typelib_path = 'C:\\Windows\\SysWOW64\\stdole2.tlb'
_lcid = 0 # change this if required
from ctypes import *
FONTSIZE = c_longlong
from ctypes.wintypes import VARIANT_BOOL
FONTBOLD = VARIANT_BOOL
from comtypes.automation import IDispatch
from comtypes import GUID
from comtypes import BSTR
from comtypes import dispid
from comtypes import DISPMETHOD, DISPPROPERTY, helpstring
FONTITALIC = VARIANT_BOOL
FONTUNDERSCORE = VARIANT_BOOL
OLE_HANDLE = c_int
OLE_XSIZE_HIMETRIC = c_int
OLE_YSIZE_HIMETRIC = c_int
OLE_XPOS_HIMETRIC = c_int
OLE_YPOS_HIMETRIC = c_int
FONTSTRIKETHROUGH = VARIANT_BOOL
from comtypes import IUnknown
from ctypes import HRESULT
from comtypes import helpstring
from comtypes import COMMETHOD
from comtypes.automation import IEnumVARIANT
from comtypes import CoClass
from comtypes.automation import EXCEPINFO
OLE_COLOR = c_ulong
OLE_XPOS_PIXELS = c_int
OLE_YPOS_PIXELS = c_int
OLE_XSIZE_PIXELS = c_int
OLE_YSIZE_PIXELS = c_int
from comtypes import GUID
OLE_XPOS_CONTAINER = c_float
OLE_YPOS_CONTAINER = c_float
OLE_XSIZE_CONTAINER = c_float
OLE_YSIZE_CONTAINER = c_float
from comtypes.automation import DISPPARAMS
OLE_OPTEXCLUSIVE = VARIANT_BOOL
OLE_CANCELBOOL = VARIANT_BOOL
FONTNAME = BSTR
OLE_ENABLEDEFAULTBOOL = VARIANT_BOOL
# values for enumeration 'OLE_TRISTATE'
Unchecked = 0
Checked = 1
Gray = 2
OLE_TRISTATE = c_int # enum
class Font(IDispatch):
_case_insensitive_ = True
_iid_ = GUID('{BEF6E003-A874-101A-8BBA-00AA00300CAB}')
_idlflags_ = []
_methods_ = []
Font._disp_methods_ = [
DISPPROPERTY([dispid(0)], BSTR, 'Name'),
DISPPROPERTY([dispid(2)], c_longlong, 'Size'),
DISPPROPERTY([dispid(3)], VARIANT_BOOL, 'Bold'),
DISPPROPERTY([dispid(4)], VARIANT_BOOL, 'Italic'),
DISPPROPERTY([dispid(5)], VARIANT_BOOL, 'Underline'),
DISPPROPERTY([dispid(6)], VARIANT_BOOL, 'Strikethrough'),
DISPPROPERTY([dispid(7)], c_short, 'Weight'),
DISPPROPERTY([dispid(8)], c_short, 'Charset'),
]
class Picture(IDispatch):
_case_insensitive_ = True
_iid_ = GUID('{7BF80981-BF32-101A-8BBB-00AA00300CAB}')
_idlflags_ = []
_methods_ = []
Picture._disp_methods_ = [
DISPPROPERTY([dispid(0), 'readonly'], OLE_HANDLE, 'Handle'),
DISPPROPERTY([dispid(2)], OLE_HANDLE, 'hPal'),
DISPPROPERTY([dispid(3), 'readonly'], c_short, 'Type'),
DISPPROPERTY([dispid(4), 'readonly'], OLE_XSIZE_HIMETRIC, 'Width'),
DISPPROPERTY([dispid(5), 'readonly'], OLE_YSIZE_HIMETRIC, 'Height'),
DISPMETHOD([dispid(6)], None, 'Render',
( [], c_int, 'hdc' ),
( [], c_int, 'x' ),
( [], c_int, 'y' ),
( [], c_int, 'cx' ),
( [], c_int, 'cy' ),
( [], OLE_XPOS_HIMETRIC, 'xSrc' ),
( [], OLE_YPOS_HIMETRIC, 'ySrc' ),
( [], OLE_XSIZE_HIMETRIC, 'cxSrc' ),
( [], OLE_YSIZE_HIMETRIC, 'cySrc' ),
( [], c_void_p, 'prcWBounds' )),
]
class IFont(IUnknown):
_case_insensitive_ = True
'Font Object'
_iid_ = GUID('{BEF6E002-A874-101A-8BBA-00AA00300CAB}')
_idlflags_ = ['hidden']
IFont._methods_ = [
COMMETHOD(['propget'], HRESULT, 'Name',
( ['out', 'retval'], POINTER(BSTR), 'pname' )),
COMMETHOD(['propput'], HRESULT, 'Name',
( ['in'], BSTR, 'pname' )),
COMMETHOD(['propget'], HRESULT, 'Size',
( ['out', 'retval'], POINTER(c_longlong), 'psize' )),
COMMETHOD(['propput'], HRESULT, 'Size',
( ['in'], c_longlong, 'psize' )),
COMMETHOD(['propget'], HRESULT, 'Bold',
( ['out', 'retval'], POINTER(VARIANT_BOOL), 'pbold' )),
COMMETHOD(['propput'], HRESULT, 'Bold',
( ['in'], VARIANT_BOOL, 'pbold' )),
COMMETHOD(['propget'], HRESULT, 'Italic',
( ['out', 'retval'], POINTER(VARIANT_BOOL), 'pitalic' )),
COMMETHOD(['propput'], HRESULT, 'Italic',
( ['in'], VARIANT_BOOL, 'pitalic' )),
COMMETHOD(['propget'], HRESULT, 'Underline',
( ['out', 'retval'], POINTER(VARIANT_BOOL), 'punderline' )),
COMMETHOD(['propput'], HRESULT, 'Underline',
( ['in'], VARIANT_BOOL, 'punderline' )),
COMMETHOD(['propget'], HRESULT, 'Strikethrough',
( ['out', 'retval'], POINTER(VARIANT_BOOL), 'pstrikethrough' )),
COMMETHOD(['propput'], HRESULT, 'Strikethrough',
( ['in'], VARIANT_BOOL, 'pstrikethrough' )),
COMMETHOD(['propget'], HRESULT, 'Weight',
( ['out', 'retval'], POINTER(c_short), 'pweight' )),
COMMETHOD(['propput'], HRESULT, 'Weight',
( ['in'], c_short, 'pweight' )),
COMMETHOD(['propget'], HRESULT, 'Charset',
( ['out', 'retval'], POINTER(c_short), 'pcharset' )),
COMMETHOD(['propput'], HRESULT, 'Charset',
( ['in'], c_short, 'pcharset' )),
COMMETHOD(['propget'], HRESULT, 'hFont',
( ['out', 'retval'], POINTER(OLE_HANDLE), 'phfont' )),
COMMETHOD([], HRESULT, 'Clone',
( ['out'], POINTER(POINTER(IFont)), 'ppfont' )),
COMMETHOD([], HRESULT, 'IsEqual',
( ['in'], POINTER(IFont), 'pfontOther' )),
COMMETHOD([], HRESULT, 'SetRatio',
( ['in'], c_int, 'cyLogical' ),
( ['in'], c_int, 'cyHimetric' )),
COMMETHOD([], HRESULT, 'AddRefHfont',
( ['in'], OLE_HANDLE, 'hFont' )),
COMMETHOD([], HRESULT, 'ReleaseHfont',
( ['in'], OLE_HANDLE, 'hFont' )),
]
################################################################
## code template for IFont implementation
##class IFont_Impl(object):
## def _get(self):
## '-no docstring-'
## #return pname
## def _set(self, pname):
## '-no docstring-'
## Name = property(_get, _set, doc = _set.__doc__)
##
## def _get(self):
## '-no docstring-'
## #return psize
## def _set(self, psize):
## '-no docstring-'
## Size = property(_get, _set, doc = _set.__doc__)
##
## def _get(self):
## '-no docstring-'
## #return pbold
## def _set(self, pbold):
## '-no docstring-'
## Bold = property(_get, _set, doc = _set.__doc__)
##
## def _get(self):
## '-no docstring-'
## #return pitalic
## def _set(self, pitalic):
## '-no docstring-'
## Italic = property(_get, _set, doc = _set.__doc__)
##
## def _get(self):
## '-no docstring-'
## #return punderline
## def _set(self, punderline):
## '-no docstring-'
## Underline = property(_get, _set, doc = _set.__doc__)
##
## def _get(self):
## '-no docstring-'
## #return pstrikethrough
## def _set(self, pstrikethrough):
## '-no docstring-'
## Strikethrough = property(_get, _set, doc = _set.__doc__)
##
## def _get(self):
## '-no docstring-'
## #return pweight
## def _set(self, pweight):
## '-no docstring-'
## Weight = property(_get, _set, doc = _set.__doc__)
##
## def _get(self):
## '-no docstring-'
## #return pcharset
## def _set(self, pcharset):
## '-no docstring-'
## Charset = property(_get, _set, doc = _set.__doc__)
##
## @property
## def hFont(self):
## '-no docstring-'
## #return phfont
##
## def Clone(self):
## '-no docstring-'
## #return ppfont
##
## def IsEqual(self, pfontOther):
## '-no docstring-'
## #return
##
## def SetRatio(self, cyLogical, cyHimetric):
## '-no docstring-'
## #return
##
## def AddRefHfont(self, hFont):
## '-no docstring-'
## #return
##
## def ReleaseHfont(self, hFont):
## '-no docstring-'
## #return
##
IFontDisp = Font
class StdPicture(CoClass):
_reg_clsid_ = GUID('{0BE35204-8F91-11CE-9DE3-00AA004BB851}')
_idlflags_ = []
_typelib_path_ = typelib_path
_reg_typelib_ = ('{00020430-0000-0000-C000-000000000046}', 2, 0)
class IPicture(IUnknown):
_case_insensitive_ = True
'Picture Object'
_iid_ = GUID('{7BF80980-BF32-101A-8BBB-00AA00300CAB}')
_idlflags_ = ['hidden']
StdPicture._com_interfaces_ = [Picture, IPicture]
class StdFont(CoClass):
_reg_clsid_ = GUID('{0BE35203-8F91-11CE-9DE3-00AA004BB851}')
_idlflags_ = []
_typelib_path_ = typelib_path
_reg_typelib_ = ('{00020430-0000-0000-C000-000000000046}', 2, 0)
class FontEvents(IDispatch):
_case_insensitive_ = True
'Event interface for the Font object'
_iid_ = GUID('{4EF6100A-AF88-11D0-9846-00C04FC29993}')
_idlflags_ = ['hidden']
_methods_ = []
StdFont._com_interfaces_ = [Font, IFont]
StdFont._outgoing_interfaces_ = [FontEvents]
IPicture._methods_ = [
COMMETHOD(['propget'], HRESULT, 'Handle',
( ['out', 'retval'], POINTER(OLE_HANDLE), 'phandle' )),
COMMETHOD(['propget'], HRESULT, 'hPal',
( ['out', 'retval'], POINTER(OLE_HANDLE), 'phpal' )),
COMMETHOD(['propget'], HRESULT, 'Type',
( ['out', 'retval'], POINTER(c_short), 'ptype' )),
COMMETHOD(['propget'], HRESULT, 'Width',
( ['out', 'retval'], POINTER(OLE_XSIZE_HIMETRIC), 'pwidth' )),
COMMETHOD(['propget'], HRESULT, 'Height',
( ['out', 'retval'], POINTER(OLE_YSIZE_HIMETRIC), 'pheight' )),
COMMETHOD([], HRESULT, 'Render',
( ['in'], c_int, 'hdc' ),
( ['in'], c_int, 'x' ),
( ['in'], c_int, 'y' ),
( ['in'], c_int, 'cx' ),
( ['in'], c_int, 'cy' ),
( ['in'], OLE_XPOS_HIMETRIC, 'xSrc' ),
( ['in'], OLE_YPOS_HIMETRIC, 'ySrc' ),
( ['in'], OLE_XSIZE_HIMETRIC, 'cxSrc' ),
( ['in'], OLE_YSIZE_HIMETRIC, 'cySrc' ),
( ['in'], c_void_p, 'prcWBounds' )),
COMMETHOD(['propput'], HRESULT, 'hPal',
( ['in'], OLE_HANDLE, 'phpal' )),
COMMETHOD(['propget'], HRESULT, 'CurDC',
( ['out', 'retval'], POINTER(c_int), 'phdcOut' )),
COMMETHOD([], HRESULT, 'SelectPicture',
( ['in'], c_int, 'hdcIn' ),
( ['out'], POINTER(c_int), 'phdcOut' ),
( ['out'], POINTER(OLE_HANDLE), 'phbmpOut' )),
COMMETHOD(['propget'], HRESULT, 'KeepOriginalFormat',
( ['out', 'retval'], POINTER(VARIANT_BOOL), 'pfkeep' )),
COMMETHOD(['propput'], HRESULT, 'KeepOriginalFormat',
( ['in'], VARIANT_BOOL, 'pfkeep' )),
COMMETHOD([], HRESULT, 'PictureChanged'),
COMMETHOD([], HRESULT, 'SaveAsFile',
( ['in'], c_void_p, 'pstm' ),
( ['in'], VARIANT_BOOL, 'fSaveMemCopy' ),
( ['out'], POINTER(c_int), 'pcbSize' )),
COMMETHOD(['propget'], HRESULT, 'Attributes',
( ['out', 'retval'], POINTER(c_int), 'pdwAttr' )),
COMMETHOD([], HRESULT, 'SetHdc',
( ['in'], OLE_HANDLE, 'hdc' )),
]
################################################################
## code template for IPicture implementation
##class IPicture_Impl(object):
## @property
## def Handle(self):
## '-no docstring-'
## #return phandle
##
## def _get(self):
## '-no docstring-'
## #return phpal
## def _set(self, phpal):
## '-no docstring-'
## hPal = property(_get, _set, doc = _set.__doc__)
##
## @property
## def Type(self):
## '-no docstring-'
## #return ptype
##
## @property
## def Width(self):
## '-no docstring-'
## #return pwidth
##
## @property
## def Height(self):
## '-no docstring-'
## #return pheight
##
## def Render(self, hdc, x, y, cx, cy, xSrc, ySrc, cxSrc, cySrc, prcWBounds):
## '-no docstring-'
## #return
##
## @property
## def CurDC(self):
## '-no docstring-'
## #return phdcOut
##
## def SelectPicture(self, hdcIn):
## '-no docstring-'
## #return phdcOut, phbmpOut
##
## def _get(self):
## '-no docstring-'
## #return pfkeep
## def _set(self, pfkeep):
## '-no docstring-'
## KeepOriginalFormat = property(_get, _set, doc = _set.__doc__)
##
## def PictureChanged(self):
## '-no docstring-'
## #return
##
## def SaveAsFile(self, pstm, fSaveMemCopy):
## '-no docstring-'
## #return pcbSize
##
## @property
## def Attributes(self):
## '-no docstring-'
## #return pdwAttr
##
## def SetHdc(self, hdc):
## '-no docstring-'
## #return
##
class Library(object):
'OLE Automation'
name = 'stdole'
_reg_typelib_ = ('{00020430-0000-0000-C000-000000000046}', 2, 0)
FontEvents._disp_methods_ = [
DISPMETHOD([dispid(9)], None, 'FontChanged',
( ['in'], BSTR, 'PropertyName' )),
]
# values for enumeration 'LoadPictureConstants'
Default = 0
Monochrome = 1
VgaColor = 2
Color = 4
LoadPictureConstants = c_int # enum
IPictureDisp = Picture
IFontEventsDisp = FontEvents
__all__ = [ 'OLE_XSIZE_CONTAINER', 'FONTSIZE', 'FONTSTRIKETHROUGH',
'FONTBOLD', 'FONTUNDERSCORE', 'OLE_XSIZE_HIMETRIC',
'OLE_XSIZE_PIXELS', 'OLE_TRISTATE', 'Monochrome',
'OLE_XPOS_CONTAINER', 'IFontEventsDisp',
'OLE_YPOS_CONTAINER', 'Font', 'OLE_XPOS_PIXELS',
'OLE_YSIZE_HIMETRIC', 'OLE_YSIZE_PIXELS', 'IFont',
'Default', 'Unchecked', 'OLE_XPOS_HIMETRIC',
'OLE_ENABLEDEFAULTBOOL', 'FontEvents', 'Picture',
'Checked', 'IPicture', 'FONTITALIC', 'IPictureDisp',
'VgaColor', 'OLE_COLOR', 'LoadPictureConstants',
'OLE_HANDLE', 'StdPicture', 'Gray', 'OLE_CANCELBOOL',
'OLE_YPOS_HIMETRIC', 'StdFont', 'Color', 'IFontDisp',
'OLE_OPTEXCLUSIVE', 'FONTNAME', 'OLE_YPOS_PIXELS',
'OLE_YSIZE_CONTAINER']
from comtypes import _check_version; _check_version('')
| [
"akhtaruzzamankhan016@gmail.com"
] | akhtaruzzamankhan016@gmail.com |
c36443c259db9ea6eeb60c6aa6b75b4990e29d85 | c070464ca88ced58abd25783822eb293ebd34769 | /chap6/stack.py | 8e62e8cc24bd3e5babd837f5a18d1c3522550b76 | [] | no_license | debrekXuHan/core_python | 8b3d034b335826f22eee6e98fe1c244906ebf2ad | ebdbc934334eaa6e8070b213b390a99247386145 | refs/heads/master | 2021-05-05T15:57:35.412331 | 2019-04-29T09:30:53 | 2019-04-29T09:30:53 | 103,156,101 | 0 | 0 | null | 2017-09-11T23:49:42 | 2017-09-11T15:50:59 | Python | UTF-8 | Python | false | false | 829 | py | #!/usr/bin/env python
stack = []
def pushit():
stack.append(input('Enter New string: ').strip())
#delete the blank space on both sides of the input string
def popit():
if len(stack) == 0:
print('Cannot pop from an empty stack!')
else:
print('Removed [', repr(stack.pop()), ']')
def viewstack():
print(stack) # calls str() internally
CMDs = {'u':pushit, 'o':popit, 'v':viewstack}
def showmenu():
pr = """p(U)sh
p(O)p
(V)iew
(Q)uit
Enter choice: """
while True:
while True:
try:
choice = input(pr).strip()[0].lower()
except (EOFError, KeyboardInterrupt, IndexError):
choice = 'q'
print('\nYou picked: [%s]' %choice)
if choice not in 'uovq':
print('Invalid option, try again!')
else:
break
if choice == 'q':
break
CMDs[choice]()
if __name__ == '__main__':
showmenu() | [
"debrek_xuhan@hotmail.com"
] | debrek_xuhan@hotmail.com |
9902c2894b1ebc6917d9c4228758ad6c8af16999 | 7aa686f395ffcc94933f95ca3ccc0f960f8ccb19 | /recurrent-neural-networks/char-rnn/Character_Level_RNN_Exercise.py | c65c19fa223096cb77d1203485b2a29a1b654365 | [
"MIT"
] | permissive | chorus12/deep-learning-v2-pytorch | ca1495ff0b612457a311f1a5def544e20bfea8d7 | 69873120ef8537c212a7398e78ba1c756a075793 | refs/heads/master | 2021-08-03T20:40:22.402289 | 2021-07-27T14:20:09 | 2021-07-27T14:20:09 | 241,337,731 | 0 | 0 | MIT | 2020-02-18T10:53:23 | 2020-02-18T10:53:22 | null | UTF-8 | Python | false | false | 33,201 | py | # -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.5.0
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# !jupytext --to py Character_Level_RNN_Exercise.ipynb
# # Character-Level LSTM in PyTorch
#
# In this notebook, I'll construct a character-level LSTM with PyTorch. The network will train character by character on some text, then generate new text character by character. As an example, I will train on Anna Karenina. **This model will be able to generate new text based on the text from the book!**
#
# This network is based off of Andrej Karpathy's [post on RNNs](http://karpathy.github.io/2015/05/21/rnn-effectiveness/) and [implementation in Torch](https://github.com/karpathy/char-rnn). Below is the general architecture of the character-wise RNN.
#
# <img src="assets/charseq.jpeg" width="500">
# First let's load in our required resources for data loading and model creation.
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
# ## Load in Data
#
# Then, we'll load the Anna Karenina text file and convert it into integers for our network to use.
# open text file and read in data as `text`
with open('data/anna.txt', 'r') as f:
text = f.read()
# Let's check out the first 100 characters, make sure everything is peachy. According to the [American Book Review](http://americanbookreview.org/100bestlines.asp), this is the 6th best first line of a book ever.
print(text[:101])
# ### Tokenization
#
# In the cells, below, I'm creating a couple **dictionaries** to convert the characters to and from integers. Encoding the characters as integers makes it easier to use as input in the network.
# +
# encode the text and map each character to an integer and vice versa
# we create two dictionaries:
# 1. int2char, which maps integers to characters
# 2. char2int, which maps characters to unique integers
chars = tuple(set(text))
int2char = dict(enumerate(chars))
char2int = {ch: ii for ii, ch in int2char.items()}
# encode the text
encoded = np.array([char2int[ch] for ch in text])
# -
# And we can see those same characters from above, encoded as integers.
print(encoded[:101])
# ## Pre-processing the data
#
# As you can see in our char-RNN image above, our LSTM expects an input that is **one-hot encoded** meaning that each character is converted into an integer (via our created dictionary) and *then* converted into a column vector where only it's corresponding integer index will have the value of 1 and the rest of the vector will be filled with 0's. Since we're one-hot encoding the data, let's make a function to do that!
#
def one_hot_encode(arr, n_labels):
# Initialize the the encoded array
one_hot = np.zeros((arr.size, n_labels), dtype=np.float32)
# Fill the appropriate elements with ones
one_hot[np.arange(one_hot.shape[0]), arr.flatten()] = 1.
# Finally reshape it to get back to the original array
one_hot = one_hot.reshape((*arr.shape, n_labels))
return one_hot
# +
# check that the function works as expected
test_seq = np.array([[3, 5, 1, 7]])
one_hot = one_hot_encode(test_seq, 8)
print(one_hot)
# -
# ## Making training mini-batches
#
#
# To train on this data, we also want to create mini-batches for training. Remember that we want our batches to be multiple sequences of some desired number of sequence steps. Considering a simple example, our batches would look like this:
#
# <img src="assets/sequence_batching@1x.png" width=500px>
#
#
# <br>
#
# In this example, we'll take the encoded characters (passed in as the `arr` parameter) and split them into multiple sequences, given by `batch_size`. Each of our sequences will be `seq_length` long.
#
# ### Creating Batches
#
# **1. The first thing we need to do is discard some of the text so we only have completely full mini-batches.**
#
# Each batch contains $N \times M$ characters, where $N$ is the batch size (the number of sequences in a batch) and $M$ is the seq_length or number of time steps in a sequence. Then, to get the total number of batches, $K$, that we can make from the array `arr`, you divide the length of `arr` by the number of characters per batch. Once you know the number of batches, you can get the total number of characters to keep from `arr`, $N * M * K$.
#
# **2. After that, we need to split `arr` into $N$ batches.**
#
# You can do this using `arr.reshape(size)` where `size` is a tuple containing the dimensions sizes of the reshaped array. We know we want $N$ sequences in a batch, so let's make that the size of the first dimension. For the second dimension, you can use `-1` as a placeholder in the size, it'll fill up the array with the appropriate data for you. After this, you should have an array that is $N \times (M * K)$.
#
# **3. Now that we have this array, we can iterate through it to get our mini-batches.**
#
# The idea is each batch is a $N \times M$ window on the $N \times (M * K)$ array. For each subsequent batch, the window moves over by `seq_length`. We also want to create both the input and target arrays. Remember that the targets are just the inputs shifted over by one character. The way I like to do this window is use `range` to take steps of size `n_steps` from $0$ to `arr.shape[1]`, the total number of tokens in each sequence. That way, the integers you get from `range` always point to the start of a batch, and each window is `seq_length` wide.
#
# > **TODO:** Write the code for creating batches in the function below. The exercises in this notebook _will not be easy_. I've provided a notebook with solutions alongside this notebook. If you get stuck, checkout the solutions. The most important thing is that you don't copy and paste the code into here, **type out the solution code yourself.**
def get_batches(arr, batch_size, seq_length):
'''Create a generator that returns batches of size
batch_size x seq_length from arr.
Arguments
---------
arr: Array you want to make batches from
batch_size: Batch size, the number of sequences per batch
seq_length: Number of encoded chars in a sequence
'''
## TODO: Get the number of batches we can make
# how many seq_length by batch_size fit into our array
n_batches = len(arr)//((batch_size)*(seq_length))
## TODO: Keep only enough characters to make full batches
# number of chars = n_batch*batch_size*seq_length
arr = arr[:n_batches*batch_size*seq_length]
## TODO: Reshape into batch_size rows
arr = arr.reshape((batch_size,-1))
## TODO: Iterate over the batches using a window of size seq_length
for n in range(0, arr.shape[1], seq_length):
# The features
x = arr[:,n:n+seq_length]
y = np.zeros_like(x)
# The targets, shifted by one
try:
y[:, :-1], y[:, -1] = x[:, 1:], arr[:, n+seq_length]
except IndexError:
y[:, :-1], y[:, -1] = x[:, 1:], arr[:, 0]
yield x, y
def get_batches_overlapped(arr, batch_size, seq_len):
'''Create a generator that returns batches of size
batch_size x seq_length from arr.
Arguments
---------
arr: Array you want to make batches from
batch_size: Batch size, the number of sequences per batch
seq_length: Number of encoded chars in a sequence
'''
N = len(arr)
# if we can not reshape arr with axis0 to batch_size - we add elements from the beginning
if N%batch_size != 0:
N_to_add = batch_size - N%batch_size
arr = np.append(arr, arr[:N_to_add])
## TODO: Reshape into batch_size rows
arr = arr.reshape((batch_size,-1))
## TODO: Iterate over the batches using a window of size seq_length
for n in range(0, arr.shape[1]-2, seq_len-1):
if n < arr.shape[1] - 1:
if n+seq_len >= arr.shape[1]:
x = arr[:,-seq_len-1:-1]
y = arr[:,-seq_len:]
else:
x = arr[:,n:n+seq_len]
y = arr[:,n+1:n+seq_len+1]
yield x,y
arr = np.array(range(0,28))
batch_size = 2
seq_len = 3
for item in get_batches_overlapped(arr, batch_size, seq_len):
for rowx, rowy in zip(item[0], item[1]):
print(rowx, '\n', rowy)
print()
# ### Test Your Implementation
#
# Now I'll make some data sets and we can check out what's going on as we batch data. Here, as an example, I'm going to use a batch size of 8 and 50 sequence steps.
encoded
batches = get_batches_overlapped(encoded, 8, 50)
x, y = next(batches)
# printing out the first 10 items in a sequence
print('x\n', x[:10, :10])
print('\ny\n', y[:10, :10])
# If you implemented `get_batches` correctly, the above output should look something like
# ```
# x
# [[25 8 60 11 45 27 28 73 1 2]
# [17 7 20 73 45 8 60 45 73 60]
# [27 20 80 73 7 28 73 60 73 65]
# [17 73 45 8 27 73 66 8 46 27]
# [73 17 60 12 73 8 27 28 73 45]
# [66 64 17 17 46 7 20 73 60 20]
# [73 76 20 20 60 73 8 60 80 73]
# [47 35 43 7 20 17 24 50 37 73]]
#
# y
# [[ 8 60 11 45 27 28 73 1 2 2]
# [ 7 20 73 45 8 60 45 73 60 45]
# [20 80 73 7 28 73 60 73 65 7]
# [73 45 8 27 73 66 8 46 27 65]
# [17 60 12 73 8 27 28 73 45 27]
# [64 17 17 46 7 20 73 60 20 80]
# [76 20 20 60 73 8 60 80 73 17]
# [35 43 7 20 17 24 50 37 73 36]]
# ```
# although the exact numbers may be different. Check to make sure the data is shifted over one step for `y`.
# ---
# ## Defining the network with PyTorch
#
# Below is where you'll define the network.
#
# <img src="assets/charRNN.png" width=500px>
#
# Next, you'll use PyTorch to define the architecture of the network. We start by defining the layers and operations we want. Then, define a method for the forward pass. You've also been given a method for predicting characters.
# ### Model Structure
#
# In `__init__` the suggested structure is as follows:
# * Create and store the necessary dictionaries (this has been done for you)
# * Define an LSTM layer that takes as params: an input size (the number of characters), a hidden layer size `n_hidden`, a number of layers `n_layers`, a dropout probability `drop_prob`, and a batch_first boolean (True, since we are batching)
# * Define a dropout layer with `drop_prob`
# * Define a fully-connected layer with params: input size `n_hidden` and output size (the number of characters)
# * Finally, initialize the weights (again, this has been given)
#
# Note that some parameters have been named and given in the `__init__` function, and we use them and store them by doing something like `self.drop_prob = drop_prob`.
# ---
# ### LSTM Inputs/Outputs
#
# You can create a basic [LSTM layer](https://pytorch.org/docs/stable/nn.html#lstm) as follows
#
# ```python
# self.lstm = nn.LSTM(input_size, n_hidden, n_layers,
# dropout=drop_prob, batch_first=True)
# ```
#
# where `input_size` is the number of characters this cell expects to see as sequential input, and `n_hidden` is the number of units in the hidden layers in the cell. And we can add dropout by adding a dropout parameter with a specified probability; this will automatically add dropout to the inputs or outputs. Finally, in the `forward` function, we can stack up the LSTM cells into layers using `.view`. With this, you pass in a list of cells and it will send the output of one cell into the next cell.
#
# We also need to create an initial hidden state of all zeros. This is done like so
#
# ```python
# self.init_hidden()
# ```
# check if GPU is available
train_on_gpu = torch.cuda.is_available()
if(train_on_gpu):
print('Training on GPU!')
else:
print('No GPU available, training on CPU; consider making n_epochs very small.')
# +
class CharRNN(nn.Module):
def __init__(self, tokens, n_hidden=256, n_layers=2,
drop_prob=0.5, lr=0.001):
super(CharRNN, self).__init__()
self.drop_prob = drop_prob
self.n_layers = n_layers
self.n_hidden = n_hidden
self.lr = lr
# creating character dictionaries
self.chars = tokens
self.n_chars = len(tokens)
self.int2char = dict(enumerate(self.chars))
self.char2int = {ch: ii for ii, ch in self.int2char.items()}
## TODO: define the layers of the model
# input_size – The number of expected features in the input x
# hidden_size – The number of features in the hidden state h
# num_layers – Number of recurrent layers. E.g., setting num_layers=2 would mean stacking
# two LSTMs together to form a stacked LSTM, with the second LSTM taking in outputs of the first LSTM and computing the final results. Default: 1
# bias – If False, then the layer does not use bias weights b_ih and b_hh. Default: True
# batch_first – If True, then the input and output tensors are provided as (batch, seq, feature). Default: False
# dropout – If non-zero, introduces a Dropout layer on the outputs of each LSTM layer except the last layer, with dropout probability equal to dropout. Default: 0
# bidirectional – If True, becomes a bidirectional LSTM. Default: False
self.rnn = torch.nn.LSTM(input_size = self.n_chars,
hidden_size=self.n_hidden,
num_layers=self.n_layers,
batch_first=True,
dropout=self.drop_prob )
# output of LSTM has a dim of hidden size and we then transform that into the number of chars we have and apply softmax
self.fc = torch.nn.Linear(self.n_hidden, self.n_chars)
self.dropout = torch.nn.Dropout(p=drop_prob)
def num_parameters(self):
'''
get the number of parameters in a network
'''
# return sum((list(map(lambda x: torch.as_tensor(x.flatten().size()).sum().item(), self.parameters()))))
s=""
for k, v in self.named_parameters():
s+=f'{k:20} {v.shape}\n'
s+=f'Total number of parameters = {sum(list(map(lambda x: x.numel(), net.parameters())))}'
return s
def forward(self, x, hidden):
''' Forward pass through the network.
These inputs are x, and the hidden/cell state `hidden`.
hidden is a tuple of (h_n, c_n)
'''
## TODO: Get the outputs and the new hidden state from LSTM
# return the final output and the hidden(h_n, c_n) state
out, hidden = self.rnn(x, hidden)
# out dim = (batch, seq_len, num_directions * hidden_size)
#print(out.shape, hidden[0].shape)
# torch.Size([2, 50, 256]) (batch, seq_len, num_directions * hidden_size)
# torch.Size([2, 2, 256]) (n_layers, batch_size, hidden_size)
# this print shows that out and hidden are the same if you take the last cell from LSTM stack
# print(out[:,-1:,:10],'\n', hidden[0][-1:,:,:10])
out = self.dropout(out) # apply a dropout
#out = out.contiguous().view(-1, self.n_hidden)
# dim = (seq_len*batch, n_chars)
out = self.fc(out)
# dim = (seq_len, batch, n_chars)
return out.view(-1, self.n_chars), hidden
def init_hidden(self, batch_size):
''' Initializes hidden state '''
# Create two new tensors with sizes n_layers x batch_size x n_hidden,
# initialized to zero, for hidden state and cell state of LSTM
weight = next(self.parameters()).data
hidden = (weight.new_zeros(self.n_layers, batch_size, self.n_hidden), weight.new_zeros(self.n_layers, batch_size, self.n_hidden))
return hidden
# def init_hidden(self, batch_size):
# ''' Initializes hidden state '''
# # Create two new tensors with sizes n_layers x batch_size x n_hidden,
# # initialized to zero, for hidden state and cell state of LSTM
# weight = next(self.parameters()).data
# # (num_layers * num_directions, batch, hidden_size)
# if (train_on_gpu):
# hidden = (weight.new(self.n_layers, batch_size, self.n_hidden).zero_().cuda(),
# weight.new(self.n_layers, batch_size, self.n_hidden).zero_().cuda())
# else:
# hidden = (weight.new(self.n_layers, batch_size, self.n_hidden).zero_(),
# weight.new(self.n_layers, batch_size, self.n_hidden).zero_())
# return hidden
# -
# get the 1st batch
batches = get_batches(encoded, 2, 50)
x, y = next(batches)
print(x.shape)
x, y
# one hot encode the data
x = one_hot_encode(x, len(chars))
print('Vocab size ', len(chars))
print(x.shape)
# the 1st element is 8 and we see the one on 9th position
x[0,1]
# create network and send it to device
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
#device = 'cpu'
print(device)
net = CharRNN(chars).to(device)
net
# make an input tensor
tr_x = torch.FloatTensor(x).to(device)
tr_x[0,1]
# initialize the hidden state
hidden = net.init_hidden(2)
hidden
hidden[0].requires_grad
out, h = net(tr_x,hidden)
out.shape, h[0].shape, h[1].shape
out[:1].sum()
print(net.num_parameters())
# ## Time to train
#
# The train function gives us the ability to set the number of epochs, the learning rate, and other parameters.
#
# Below we're using an Adam optimizer and cross entropy loss since we are looking at character class scores as output. We calculate the loss and perform backpropagation, as usual!
#
# A couple of details about training:
# * Within the batch loop, we detach the hidden state from its history; this time setting it equal to a new *tuple* variable because an LSTM has a hidden state that is a tuple of the hidden and cell states.
# * We use [`clip_grad_norm_`](https://pytorch.org/docs/stable/_modules/torch/nn/utils/clip_grad.html) to help prevent exploding gradients.
a = torch.ones(4)
a.requires_grad
b = a + 4
b.requires_grad
# +
from tqdm.notebook import tqdm, trange
def train(net, data, epochs=10, batch_size=10, seq_length=50, lr=0.001, clip=5, val_frac=0.1, print_every=10, val_loss_min=np.inf):
''' Training a network
Arguments
---------
net: CharRNN network
data: text data to train the network
epochs: Number of epochs to train
batch_size: Number of mini-sequences per mini-batch, aka batch size
seq_length: Number of character steps per mini-batch
lr: learning rate
clip: gradient clipping
val_frac: Fraction of data to hold out for validation
print_every: Number of steps for printing training and validation loss
'''
net.train()
opt = torch.optim.Adam(net.parameters(), lr=lr)
#opt = torch.optim.SGD(net.parameters(), lr=lr, momentum=0.9, nesterov=True)
#opt = torch.optim.RMSprop(net.parameters(), lr=lr, momentum=0.9)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer=opt, T_max = 5*len(data)//(batch_size*seq_length), eta_min=1e-7)
criterion = nn.CrossEntropyLoss()
# create training and validation data
# val_idx = int(len(data)*(1-val_frac))
# data, val_data = data[:val_idx], data[val_idx:]
val_idx = int(len(data)*(val_frac))
val_data, data = data[:val_idx], data[val_idx:]
if(train_on_gpu):
net.cuda()
counter = 0
n_chars = len(net.chars)
for e in trange(epochs):
# initialize hidden state
h = net.init_hidden(batch_size)
for x, y in get_batches(data, batch_size, seq_length):
counter += 1
# One-hot encode our data and make them Torch tensors
x = one_hot_encode(x, n_chars)
inputs, targets = torch.from_numpy(x), torch.from_numpy(y)
if(train_on_gpu):
inputs, targets = inputs.cuda(), targets.cuda()
# Creating new variables for the hidden state, otherwise
# we'd backprop through the entire training history
h = tuple([each.data for each in h])
# basically the same stuff
# h = tuple([each.detach() for each in h])
# zero accumulated gradients
net.zero_grad()
# get the output from the model
output, h = net(inputs, h)
# calculate the loss and perform backprop
loss = criterion(output, targets.view(batch_size*seq_length).long())
loss.backward()
# `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs.
nn.utils.clip_grad_norm_(net.parameters(), clip)
opt.step()
scheduler.step()
# loss stats
if counter % print_every == 0:
# Get validation loss
val_h = net.init_hidden(batch_size)
val_losses = []
net.eval()
with torch.no_grad():
for x, y in get_batches_overlapped(val_data, batch_size, seq_length):
# One-hot encode our data and make them Torch tensors
x = one_hot_encode(x, n_chars)
x, y = torch.from_numpy(x), torch.from_numpy(y)
inputs, targets = x, y
if(train_on_gpu):
inputs, targets = inputs.cuda(), targets.cuda()
# Creating new variables for the hidden state, otherwise
# we'd backprop through the entire training history
# val_h = tuple([each.data for each in val_h])
output, val_h = net(inputs, val_h)
val_loss = criterion(output, targets.view(batch_size*seq_length))
val_losses.append(val_loss.item())
if np.mean(val_losses) < val_loss_min:
print('Validation loss decreases, saving model parameters')
torch.save(net.state_dict(),'anna_model.pt')
val_loss_min = np.mean(val_losses)
net.train() # reset to train mode after iterationg through validation data
print("Epoch: {}/{}...".format(e+1, epochs),
"Step: {}...".format(counter),
"Loss: {:.4f}...".format(loss.item()),
"Val Loss: {:.4f}".format(np.mean(val_losses)))
return val_loss_min
# -
# ## Instantiating the model
#
# Now we can actually train the network. First we'll create the network itself, with some given hyperparameters. Then, define the mini-batches sizes, and start training!
# +
## TODO: set your model hyperparameters
# define and print the net
n_hidden = 256
n_layers = 3
net = CharRNN(chars, n_hidden, n_layers, drop_prob=0.5)
print(net, f'\n\nNumber of parameters in network : {net.num_parameters()}', f'\n\nNumber of chars in text : {len(text):,}')
# -
# ### Set your training hyperparameters!
net.load_state_dict(torch.load('anna_model.pt'))
#val_loss = 1.3498
val_loss = 1.1958
val_loss = 1.2043
# +
batch_size = 10
seq_length = 140
n_epochs = 50 # start small if you are just testing initial behavior
# train the model
val_loss = train(net, encoded, epochs=n_epochs, batch_size=batch_size, seq_length=seq_length, lr=0.001, print_every=1000, val_loss_min=val_loss, val_frac=0.1)
print(val_loss)
# -
# ## Getting the best model
#
# To set your hyperparameters to get the best performance, you'll want to watch the training and validation losses. If your training loss is much lower than the validation loss, you're overfitting. Increase regularization (more dropout) or use a smaller network. If the training and validation losses are close, you're underfitting so you can increase the size of the network.
# ## Hyperparameters
#
# Here are the hyperparameters for the network.
#
# In defining the model:
# * `n_hidden` - The number of units in the hidden layers.
# * `n_layers` - Number of hidden LSTM layers to use.
#
# We assume that dropout probability and learning rate will be kept at the default, in this example.
#
# And in training:
# * `batch_size` - Number of sequences running through the network in one pass.
# * `seq_length` - Number of characters in the sequence the network is trained on. Larger is better typically, the network will learn more long range dependencies. But it takes longer to train. 100 is typically a good number here.
# * `lr` - Learning rate for training
#
# Here's some good advice from Andrej Karpathy on training the network. I'm going to copy it in here for your benefit, but also link to [where it originally came from](https://github.com/karpathy/char-rnn#tips-and-tricks).
#
# > ## Tips and Tricks
#
# >### Monitoring Validation Loss vs. Training Loss
# >If you're somewhat new to Machine Learning or Neural Networks it can take a bit of expertise to get good models. The most important quantity to keep track of is the difference between your training loss (printed during training) and the validation loss (printed once in a while when the RNN is run on the validation data (by default every 1000 iterations)). In particular:
#
# > - If your training loss is much lower than validation loss then this means the network might be **overfitting**. Solutions to this are to decrease your network size, or to increase dropout. For example you could try dropout of 0.5 and so on.
# > - If your training/validation loss are about equal then your model is **underfitting**. Increase the size of your model (either number of layers or the raw number of neurons per layer)
#
# > ### Approximate number of parameters
#
# > The two most important parameters that control the model are `n_hidden` and `n_layers`. I would advise that you always use `n_layers` of either 2/3. The `n_hidden` can be adjusted based on how much data you have. The two important quantities to keep track of here are:
#
# > - The number of parameters in your model. This is printed when you start training.
# > - The size of your dataset. 1MB file is approximately 1 million characters.
#
# >These two should be about the same order of magnitude. It's a little tricky to tell. Here are some examples:
#
# > - I have a 100MB dataset and I'm using the default parameter settings (which currently print 150K parameters). My data size is significantly larger (100 mil >> 0.15 mil), so I expect to heavily underfit. I am thinking I can comfortably afford to make `n_hidden` larger.
# > - I have a 10MB dataset and running a 10 million parameter model. I'm slightly nervous and I'm carefully monitoring my validation loss. If it's larger than my training loss then I may want to try to increase dropout a bit and see if that helps the validation loss.
#
# > ### Best models strategy
#
# >The winning strategy to obtaining very good models (if you have the compute time) is to always err on making the network larger (as large as you're willing to wait for it to compute) and then try different dropout values (between 0,1). Whatever model has the best validation performance (the loss, written in the checkpoint filename, low is good) is the one you should use in the end.
#
# >It is very common in deep learning to run many different models with many different hyperparameter settings, and in the end take whatever checkpoint gave the best validation performance.
#
# >By the way, the size of your training and validation splits are also parameters. Make sure you have a decent amount of data in your validation set or otherwise the validation performance will be noisy and not very informative.
# ## Checkpoint
#
# After training, we'll save the model so we can load it again later if we need too. Here I'm saving the parameters needed to create the same architecture, the hidden layer hyperparameters and the text characters.
# +
# change the name, for saving multiple files
model_name = 'rnn_x_epoch.net'
checkpoint = {'n_hidden': net.n_hidden,
'n_layers': net.n_layers,
'state_dict': net.state_dict(),
'tokens': net.chars}
with open(model_name, 'wb') as f:
torch.save(checkpoint, f)
# -
# ---
# ## Making Predictions
#
# Now that the model is trained, we'll want to sample from it and make predictions about next characters! To sample, we pass in a character and have the network predict the next character. Then we take that character, pass it back in, and get another predicted character. Just keep doing this and you'll generate a bunch of text!
#
# ### A note on the `predict` function
#
# The output of our RNN is from a fully-connected layer and it outputs a **distribution of next-character scores**.
#
# > To actually get the next character, we apply a softmax function, which gives us a *probability* distribution that we can then sample to predict the next character.
#
# ### Top K sampling
#
# Our predictions come from a categorical probability distribution over all the possible characters. We can make the sample text and make it more reasonable to handle (with less variables) by only considering some $K$ most probable characters. This will prevent the network from giving us completely absurd characters while allowing it to introduce some noise and randomness into the sampled text. Read more about [topk, here](https://pytorch.org/docs/stable/torch.html#torch.topk).
#
# ### Show how it is done step by step
x = np.array([[net.char2int['B']]])
x
x = one_hot_encode(x, net.n_chars)
x
# returns a tuple of hidden state and cell state
h = net.init_hidden(1)
len(h), h[0].shape, h[1].shape
inp = torch.from_numpy(x).to(device)
inp.is_cuda
# hidden state is already on the device where the model is
h[0].is_cuda
net.eval()
with torch.no_grad():
next_char, h = net(inp,h)
next_char
next_char = F.softmax(next_char.squeeze_().to('cpu'), dim=0)
p, char = next_char.topk(5)
p, char
next_char = np.random.choice(char.numpy(), p=(p/p.sum()).numpy())
net.int2char[next_char]
def predict(net, char, h=None, top_k=None):
''' Given a character, predict the next character.
Returns the predicted character and the hidden state.
'''
# tensor inputs
x = np.array([[net.char2int[char]]])
x = one_hot_encode(x, len(net.chars))
inputs = torch.from_numpy(x)
if(train_on_gpu):
inputs = inputs.cuda()
with torch.no_grad():
out, h = net(inputs, h)
# get the character probabilities
p = F.softmax(out, dim=1)
if(train_on_gpu):
p = p.cpu() # move to cpu
# get top characters
if top_k is None:
top_ch = np.arange(len(net.chars))
else:
p, top_ch = p.topk(top_k)
#print(p, top_ch)
top_ch = top_ch.numpy().squeeze()
# select the likely next character with some element of randomness
p = p.numpy().squeeze()
char = np.random.choice(top_ch, p=p/p.sum())
# return the encoded value of the predicted char and the hidden state
return net.int2char[char], h
# ### Priming and generating text
#
# Typically you'll want to prime the network so you can build up a hidden state. Otherwise the network will start out generating characters at random. In general the first bunch of characters will be a little rough since it hasn't built up a long history of characters to predict from.
def sample(net, size, prime='The', top_k=None):
if(train_on_gpu):
net.cuda()
else:
net.cpu()
net.eval() # eval mode
# First off, run through the prime characters
chars = [ch for ch in prime]
h = net.init_hidden(1)
for ch in prime:
char, h = predict(net, ch, h, top_k=top_k)
chars.append(char)
# Now pass in the previous character and get a new one
for ii in range(size):
char, h = predict(net, chars[-1], h, top_k=top_k)
chars.append(char)
return ''.join(chars)
print(sample(net, 200, prime='The ', top_k=10))
# ## Loading a checkpoint
# + jupyter={"outputs_hidden": true}
# Here we have loaded in a model that trained over 20 epochs `rnn_20_epoch.net`
with open('rnn_x_epoch.net', 'rb') as f:
checkpoint = torch.load(f)
loaded = CharRNN(checkpoint['tokens'], n_hidden=checkpoint['n_hidden'], n_layers=checkpoint['n_layers'])
loaded.load_state_dict(checkpoint['state_dict'])
# -
loaded = CharRNN(chars, n_hidden, n_layers, drop_prob=0.5)
loaded.load_state_dict(torch.load('anna_model.pt'))
# Sample using a loaded model
print(sample(loaded, 2000, top_k=5, prime="And Levin said"))
# + jupyter={"outputs_hidden": true}
| [
"pa151501@mail.ru"
] | pa151501@mail.ru |
d7ce82cd92dd0e5ae2c3e33a2bbb2f04c5a3d44b | 987697512ce9b8d7c29bfd2f18d5aec0261a6863 | /最长回文串.py | 8b40ab6ad9297a3c3ba8188befafb8af968ff812 | [] | no_license | Luckyaxah/leetcode-python | 65e7ff59d6f19312defdc4d4b4103c39193b198a | 2b9c78ba88e7bf74a46a287fb1914b4d6ba9af38 | refs/heads/master | 2023-06-05T12:15:31.618879 | 2021-06-22T13:05:30 | 2021-06-22T13:05:30 | 262,287,940 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 515 | py | class Solution:
def longestPalindrome(self, s: str) -> int:
d = {}
for i in s:
if not i in d:
d[i] = 1
else:
d[i] += 1
ret = 0
m = 0
for i in d:
if d[i] % 2 ==0:
ret += d[i]
else:
ret += d[i]-1
if ret < len(s):
ret += 1
return ret
if __name__ == "__main__":
a = Solution()
print(a.longestPalindrome("civilwartestingwhee")) | [
"math_leqi@163.com"
] | math_leqi@163.com |
5e051a55c197cdb3591a9123e13532896aadb30b | 46a9527550716765c28f61d015d1526d5b918a54 | /users/views.py | df9c118e4c6d53f3520736474489e1ac8e61d9e8 | [] | no_license | Maratan/DZ | 1ede6d25478936aa9a6e64e0d90dac1b6601aca1 | 1a7bd32a7c8d62e9a48597a0daf85d1359c7fd55 | refs/heads/master | 2021-01-11T10:55:30.067437 | 2016-12-11T10:09:11 | 2016-12-11T10:09:11 | 76,165,167 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,934 | py | from django.contrib.auth import login
from django.contrib.auth.forms import AuthenticationForm, UserCreationForm
from django.contrib import auth
from django.http import HttpResponseRedirect
from django.views.generic.edit import FormView
from .models import User
class login_f(FormView):
form_class = AuthenticationForm
# Аналогично регистрации, только используем шаблон аутентификации.
template_name = "entry.html"
# В случае успеха перенаправим на главную.
success_url = "/"
def form_valid(self, form):
# Получаем объект пользователя на основе введённых в форму данных.
self.user = form.get_user()
# Выполняем аутентификацию пользователя.
login(self.request, self.user)
return super(login_f, self).form_valid(form)
def logout(request):
auth.logout(request)
return HttpResponseRedirect('/')
class reg(FormView):
form_class = UserCreationForm
# Ссылка, на которую будет перенаправляться пользователь в случае успешной регистрации.
# В данном случае указана ссылка на страницу входа для зарегистрированных пользователей.
success_url = "/"
# Шаблон, который будет использоваться при отображении представления.
template_name = "reg.html"
def form_valid(self, form):
# Создаём пользователя, если данные в форму были введены корректно.
form.save()
# Вызываем метод базового класса
return super(reg, self).form_valid(form)
| [
"noreply@github.com"
] | noreply@github.com |
e06ad4aba91f9778ff9a8b6e7115461c5bf9d169 | dab9557fe8ee3e09a38743dfb970e72ee828cceb | /practice problems/p3.py | bcaf2f18c67d0f1559958b052f02ab46ba37aee0 | [] | no_license | Namanvijay/CP | 160b98b70a298584fbccdb153430c7731d04d80f | 1540636d84f1b2dd67767a2cad0552f9ef507504 | refs/heads/master | 2022-04-22T21:17:19.737761 | 2020-04-25T04:41:28 | 2020-04-25T04:41:28 | 258,688,449 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 210 | py | s=input("Enter string:")
n=int(input(" No of Elements to remove "))
w=input("Elements to remove")
w=w.split(' ')
s=s.split(' ')
for i,j in enumerate(s):
if j in w:
s[i]="*"*len(j)
s=' '.join(s)
print(s)
| [
"namanvj26"
] | namanvj26 |
50200dd19d5504549379f554180fe7c6e288d37b | b09da8853a01164153499b949e5737cbf802f1fd | /basic/first.py | 1734338e4353f7f7b3b5a8c636d2b5f4dd9cb7e6 | [] | no_license | SHPStudio/LearnPython | e060d91f161b4a4359fae8bd56576ee99dc9a4e8 | 57cdfb68d2fb782e0947d0f93b99b55231043e15 | refs/heads/master | 2021-09-03T15:52:19.289533 | 2017-11-22T11:23:04 | 2017-11-22T11:23:04 | 108,791,407 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,613 | py | # 输入 name = input()
# 输出 print('name', name)
# a = -100 赋值
# 如果以:为结尾的话那么就代表缩进的为代码块
# if a > 0:
# print('大于0', a)
# else:
# print('小于0', -a)
# 如果要转义的字符串特别的多 可以用r''去表示不要转义字符串
# print(r'/r/s/e')
# 使用'''...''' 来输出多行数据
# print('''shpshp2
# shp3''')
# python代表Null是None
# python中的常量是全部大写的比如 PI=3.1415926
# 除法
# / 除法 就算是整除也是浮点数
# print(9/3)
# print(10/3)
#
# // 地板擦除 只保留整数
# print(10//3)
# 中文 python3使用的是unicode字符
# print('我的是')
# ord('A') ord是把字符转换为整数标识 chr()是把整数转换为字符
# print(ord('A'))
# print(chr(65))
# 字符的转码 比如如果要进行网络传输或者写入磁盘的话一般要把字符转换成字节byte流
# 把字符串转换成以字节为单位的bytes 每个字符只占一个字节 也就是ascii码
# x = b'abc'
# print(x)
# asc = 'ABC'.encode('ascii')
# unic = '中文'.encode('utf-8')
# asczh = '中文'.encode('ascii') # 如果用ascii码的话就会报错因为中文要占用两个字节
# print(unic)
# 字符的解码 从网络上获取到流或者从磁盘上获取到流需要对流进行解码
# bstream = b'\xe4\xb8\xad\xe6\x96\x87'
# dstream = bstream.decode('utf-8')
# print(dstream)
# 计算字符长度
# strLength = len('asdfadgadsg')
# print(strLength)
# 如果计算流的话就变成计算字节数了
# streamLength = len(b'sdfkljgwe')
# streamLengthZH = len('中文') # 2字符数
# streamLengthZh = len('中文'.encode('utf-8')) # 6字节数
# print(streamLengthZh)
# 避免乱码 因为python的源代码也是文本文件所以也需要进行编码的设置
# 在文件的开头加上
#!/usr/bin/env python3 # 这一行代表告诉Linux的操作系统这是python3的可执行文件
# -*- coding: utf-8 -*- # 告诉python解释器要用utf-8来编码
# 字符串格式化 就像c语言中的 %d %s
# %s 字符串
# %d 整数
# %x 十六进制数
# %f 浮点数
# 有几个%? 就对应几个变量 如果只有一个%? 那么就不需要写括号了 用% 来分割变量与要格式化的字符串
# print('hello %s my name is %s' % ('sunhaipeng','sunhaipeng2'))
# 整数可以用 %d %02d 0代表是否补0 2代表格式化的位数
# 浮点数也是一样 %.2
# 如果想要表示 30% 的 % 怎么办 那么就用 %% 来表示 %
# print('%2d-%03d' % (3,5))
# print('%.4f' % 3.03)
# print('%02d%%' % 3)
# list 列表 它是有序的 可以随便的添加和删除 用[]来表示
# customList = ['Mical','Mary','Jack']
# len(xx) 可以计算列表的长度
# print(len(customList))
# 可以和其他语言一样按照索引去访问列表中的元素 从0开始 如果超出索引范围会报错
# print(customList[0])
# print(customList[1])
# print(customList[2])
# print(customList[3])
# 索引为负数的时候表示按照倒序来访问 比如-1 代表访问倒数第一个元素 -2 -3 以此类推 注意倒序也有索引访问报错
# print(customList[-1])
# print(customList[-2])
# print(customList[-3])
# 往列表里添加元素用xxx.append('xxx')
# customList.append('NewOne')
# 指定位置添加xxx.insert(index,element)
# customList.insert(2,'newone')
# 删除末尾元素 使用pop()函数 还可以删除指定位置元素pop(index)
# print(customList.pop())
# 列表里可以添加列表也就可以来组成多维的列表
# customList.append(['imOk','imNotOk'])
# print(customList)
# 列表里的元素也可以为不同类型的元素
# customList.append(True)
# customList.append(123)
# customList.append(3.1546)
# print(customList)
# tuple 元组 用()表示 它与list区别就是他的元素是不可变的 同时它也没有insert等等的操作函数
# customTuple = (1,2,3)
# print(customTuple)
# 可变的元组 其实往元组里放入list 改变list中的值是可以的 只是不能修改元组的元素 改变元素的引用中的值是可以的
# 这也就变相改变了元组中的元素的值
# 能用tuple的地方就用tuple因为比较的安全
# 条件判断
# a = 28
# if a > 18:
# print('大于18')
# elif a <= 18 & a > 9:
# print('大于9小于18')
# else:
# print('小于9')
# 用户输入判断 用户输入的都当做字符串来处理 所以和数字比较的时候需要做类型转化 用int()函数
# birth = input('birth:')
# birth = int(birth)
# if birth > 2000:
# print('是00后')
# else:
# print('00前')
# 循环迭代 使用 for item in collection:
# customList = ['Mical','Mary','Jack']
# for item in customList:
# print(item)
# 生成数列 如果写一个从0到1000的数列手写的话太麻烦
# rang(number) 这个函数可以生成从0到number-1的整数数列 然后可以使用list() 把它转换成列表 这样就容易多了
# createList = list(range(101))
# sum = 0
# for item in createList:
# sum += item
# print(sum)
# while 循环
# n = 99
# sum = 0
# while n > 0:
# sum += n
# n = n - 2
# print(sum)
# break 和 continue的用法和其他语言一样 break结束循环 continue结束当前循环
# 字典 dict 也就是其他语言的map 用{}表示 以键值对的形式 并且插入key的顺序跟存储顺序不一致 是无序的
# customDict = {'mical' : 95, 'mary' : 100, 'shp' : 101}
# print(customDict['shp'])
# key只准有一个 如果有重复的key 后面的会把前面的值冲掉 并且如果key没有那么会报错
# 可以直接给key赋值
# customDict['shp2'] = 123
# 如果防止没有key会报错可以提前检查一下字典里是否有key
# print('Toms' in customDict)
# 还有一种办法就是使用dict.get('xxx') 这种如果没有这个key的话就会返回一个None值
# print(customDict.get('Tomca'))
# 删除字典中的值dict.pop('xxx')
# customDict.pop('shp2')
# print(customDict)
# set 是一组key的集合 不存在重复的key 用set([])表示 set其实就是一个函数 他可以把相应的不可变集合转换成set 只要集合中有可变元素 那么就会报错
# customSet = set([1,2,3,5,4,2])
# print(customSet)
# 可以使用add(element)和remove(element) 去添加元素和删除元素
# customSet.add('56454')
# print(customSet)
# customSet.remove('56454')
# print(customSet)
# set可以理解为数学事实上的无序集合 可以对set进行交集和并集的操作
# firstSet = set([1,2,3,4])
# secondSet = set([2,3,5,6])
# print(firstSet & secondSet) # 交集
# print(firstSet | secondSet) # 并集
# set和dict一样 都不能放入可变的key 比如把list作为key的话就会报错 | [
"sunhaipeng@jd.com"
] | sunhaipeng@jd.com |
2098d21ee99832c33038bc0f47ec05606c5c0ad2 | 9dc23b64eb10f0fdc7147b5d2ebb3621405c58cc | /joint_model_knowl_v7_with_domain_knowledge_context_window/joint_intent_slots_knowledge_domain_predict.py | fa68fccd03fb6bf241dffa365350904ff5a1ba8c | [] | no_license | Xnsam/slot_filling_intent_joint_model | b98f23d3787fdb80687e673582fd6ad847aea68b | f011729ae3b7726e0a0f3d5a99755b89c70d5d7d | refs/heads/master | 2020-03-21T20:12:31.895920 | 2018-06-28T09:37:07 | 2018-06-28T09:37:07 | 138,993,544 | 0 | 0 | null | 2018-06-28T09:02:06 | 2018-06-28T09:02:05 | null | UTF-8 | Python | false | false | 13,578 | py | # -*- coding: utf-8 -*-
#prediction using model.process--->1.load data. 2.create session. 3.feed data. 4.predict
import sys
reload(sys)
sys.setdefaultencoding('utf8')
import sys
reload(sys)
sys.setdefaultencoding('utf8')
import tensorflow as tf
import numpy as np
import os
from joint_intent_slots_knowledge_domain_model import joint_knowledge_domain_model
from a1_data_util import *
import math
import pickle
#configuration
FLAGS=tf.app.flags.FLAGS
tf.app.flags.DEFINE_float("learning_rate",0.001,"learning rate")
tf.app.flags.DEFINE_integer("batch_size", 1, "Batch size for training/evaluating.") #批处理的大小 32-->128
tf.app.flags.DEFINE_integer("decay_steps", 1000, "how many steps before decay learning rate.") #6000批处理的大小 32-->128
tf.app.flags.DEFINE_float("decay_rate", 0.99, "Rate of decay for learning rate.") #0.87一次衰减多少
tf.app.flags.DEFINE_string("ckpt_dir","checkpoint_67800/","checkpoint location for the model")
tf.app.flags.DEFINE_integer("sequence_length",25,"max sentence length") #100
tf.app.flags.DEFINE_integer("embed_size",128,"embedding size")
tf.app.flags.DEFINE_boolean("is_training",False,"is traning.true:tranining,false:testing/inference")
tf.app.flags.DEFINE_integer("num_epochs",10,"number of epochs to run.")
tf.app.flags.DEFINE_integer("validate_step", 1000, "how many step to validate.") #1000做一次检验
tf.app.flags.DEFINE_integer("hidden_size",128,"hidden size")
tf.app.flags.DEFINE_float("l2_lambda", 0.0001, "l2 regularization")
tf.app.flags.DEFINE_boolean("enable_knowledge",True,"whether to use knwoledge or not.")
tf.app.flags.DEFINE_string("knowledge_path","knowledge_67800","file for data source") #skill3_train_20171114.txt
tf.app.flags.DEFINE_string("data_source","knowledge_67800/training_data _10w.txt","file for data source") #knowledge/sht_20171125.txt
tf.app.flags.DEFINE_boolean("test_mode",False,"whether use test mode. if true, only use a small amount of data")
tf.app.flags.DEFINE_string("validation_file","wzz_training_data_20171211_20w_validation.txt","validation file")
# create session and load the model from checkpoint
# load vocabulary for intent and slot name
word2id = create_or_load_vocabulary(None,FLAGS.knowledge_path)
id2word = {value: key for key, value in word2id.items()}
word2id_intent = create_or_load_vocabulary_intent(None,FLAGS.knowledge_path)
id2word_intent = {value: key for key, value in word2id_intent.items()}
word2id_domain= create_or_load_vocabulary_domain(None,FLAGS.knowledge_path)
id2word_domain = {value: key for key, value in word2id_domain.items()}
word2id_slotname = create_or_load_vocabulary_slotname_save(None,FLAGS.knowledge_path)
id2word_slotname = {value: key for key, value in word2id_slotname.items()}
knowledge_dict=load_knowledge(FLAGS.knowledge_path)
basic_pair=FLAGS.knowledge_path+'/raw_data.txt'
q2a_dict,a2q_dict,q_list,q_list_index=process_qa(basic_pair,word2id,FLAGS.sequence_length)
intent_num_classes=len(word2id_intent)
domain_num_classes=len(word2id_domain)
vocab_size=len(word2id)
slots_num_classes=len(id2word_slotname)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
FLAGS.batch_size = 1
sequence_length_batch = [FLAGS.sequence_length] * FLAGS.batch_size
S_Q_len=len(q_list_index)
print("S_Q_len:",S_Q_len)
model = joint_knowledge_domain_model(intent_num_classes, FLAGS.learning_rate, FLAGS.decay_steps, FLAGS.decay_rate,
FLAGS.sequence_length, vocab_size, FLAGS.embed_size, FLAGS.hidden_size,
sequence_length_batch, slots_num_classes, FLAGS.is_training,domain_num_classes,S_Q_len=S_Q_len)
# initialize Saver
saver = tf.train.Saver()
print('restoring Variables from Checkpoint!')
saver.restore(sess, tf.train.latest_checkpoint(FLAGS.ckpt_dir))
slot_values_file = FLAGS.knowledge_path+'/slot_values.txt'
jieba.load_userdict(slot_values_file)
def main(_):
sentence=u'开灯' #u'帮我打开厕所的灯'
#indices=[240, 277, 104, 274, 344, 259, 19, 372, 235, 338, 338, 338, 338, 338, 338] #[283, 180, 362, 277, 99, 338, 338, 338, 338, 338, 338, 338, 338, 338, 338] #u'帮我把客厅的灯打开'
intent,intent_logits, slots,slot_list,similiarity_list_result,domain=predict(sentence)
print(sentence)
print('intent:{},intent_logits:{},domain:{}'.format(intent, intent_logits,domain))
for slot_name,slot_value in slots.items():
print('slot_name:{},slot_value:{}'.format(slot_name, slot_value))
for i,element in enumerate(slot_list):
slot_name,slot_value=element
print('slot_name:{},slot_value:{}'.format(slot_name, slot_value))
#accuracy_similiarity, accuracy_classification=accuarcy_for_similiarity_validation_set()
#print("accuracy_similiarity:",accuracy_similiarity,";accuracy_classification:",accuracy_classification)
predict_interactive()
def accuarcy_for_similiarity_validation_set():#read validation data from outside file, and compute accuarcy for classification model and similiarity model
#1.get validation set
source_file_name=FLAGS.knowledge_path+"/" +FLAGS.validation_file
dict_pair=generate_raw_data(source_file_name, test_mode=False, knowledge_path=FLAGS.knowledge_path, target_file=source_file_name+'_raw_data')
#2.loop each data
count_similiarity_right=0
count_classification_right=0
len_validation=len(dict_pair)
i=0
for sentence,value in dict_pair.items():
#3.call predict
intent, intent_logits, slots, slot_list, similiarity_list_result,domain = predict(sentence)
y_intent_target=value['intent']
similiar_intent=similiarity_list_result[0]
if similiar_intent ==y_intent_target:
count_similiarity_right+=1
if intent==y_intent_target:
count_classification_right+=1
if i%10==0:
print(i,"count_similiarity_right%:",str(float(count_similiarity_right)/float(i+1)),";count_classification_right%:",str(float(count_classification_right)/float(i+1)))
print('sentence:{},y_intent_target:{},intent_classification:{},intent_similiar:{}'.format(sentence,y_intent_target,intent,similiar_intent))
i=i+1
#4.get accuracy
accuracy_similiarity=float(count_similiarity_right)/float(len_validation)
accuracy_classification = float(count_classification_right) / float(len_validation)
return accuracy_similiarity,accuracy_classification
def accuarcy_for_similiarity_validation_setX(): #read cached validation data
#1.get validation set
traing_data, valid_data, test_data, vocab_size, intent_num_classes, slots_num_classes = generate_training_data(FLAGS.data_source,FLAGS.knowledge_path,FLAGS.test_mode,sequence_length=FLAGS.sequence_length)
x_valid, y_intent_valid, y_slots_valid = valid_data
#2.loop each data
count_similiarity_right=0
count_classification_right=0
len_validation=len(x_valid)
for i, x_indices in enumerate(x_valid):
y_intent=y_intent_valid[i]
sentence=get_sentence_from_index(x_indices)
#3.call predict
intent, intent_logits, slots, slot_list, similiarity_list_result,model = predict(sentence)
y_intent_target=id2word_intent[y_intent]
similiar_intent=similiarity_list_result[0]
if similiar_intent ==y_intent_target:
count_similiarity_right+=1
if intent==y_intent_target:
count_classification_right+=1
if i%10==0:
print(i,"count_similiarity_right%:",str(float(count_similiarity_right)/float(i+1)),";count_classification_right%:",str(float(count_classification_right)/float(i+1)))
print('sentence:{},y_intent_target:{},intent_classification:{},intent_similiar:{}'.format(sentence,y_intent_target,intent,similiar_intent))
#4.get accuracy
accuracy_similiarity=float(count_similiarity_right)/float(len_validation)
accuracy_classification = float(count_classification_right) / float(len_validation)
return accuracy_similiarity,accuracy_classification
def get_sentence_from_index(x_indices):
sentence=[ id2word.get(index,UNK) for index in x_indices]
sentence="".join(sentence)
return sentence
def predict(sentence,enable_knowledge=1):
"""
:param sentence: a sentence.
:return: intent and slots
"""
#print("FLAGS.knowledge_path====>:",FLAGS.knowledge_path)
sentence_indices=index_sentence_with_vocabulary(sentence,word2id,FLAGS.sequence_length,knowledge_path=FLAGS.knowledge_path)
y_slots= get_y_slots_by_knowledge(sentence,FLAGS.sequence_length,enable_knowledge=enable_knowledge,knowledge_path=FLAGS.knowledge_path)
#print("predict.y_slots:",y_slots)
qa_list_length=len(q_list_index)
feed_dict = {model.x: np.reshape(sentence_indices,(1,FLAGS.sequence_length)),
model.y_slots:np.reshape(y_slots,(1,FLAGS.sequence_length)),
model.S_Q:np.reshape(q_list_index,(qa_list_length,FLAGS.sequence_length)), #should be:[self.S_Q_len, self.sentence_len]
model.dropout_keep_prob:1.0}
logits_intent,logits_slots,similiarity_list,logits_domain = sess.run([model.logits_intent,model.logits_slots,model.similiarity_list,model.logits_domain], feed_dict) #similiarity_list:[1,None]
intent,intent_logits,slots,slot_list,similiarity_list_result,domain=get_result(logits_intent,logits_slots,sentence_indices,similiarity_list,logits_domain)
return intent,intent_logits,slots,slot_list,similiarity_list_result,domain
def get_y_slots_by_knowledge(sentence,sequence_length,enable_knowledge=1,knowledge_path=None):
"""get y_slots using dictt.e.g. dictt={'slots': {'全部范围': '全', '房间': '储藏室', '设备名': '四开开关'}, 'user': '替我把储藏室四开开关全关闭一下', 'intent': '关设备<房间><全部范围><设备名>'}"""
#knowledge_dict=#{'储藏室': '房间', '全': '全部范围', '四开开关': '设备名'}
user_speech_tokenized=tokenize_sentence(sentence,knowledge_path=knowledge_path) #['替', '我', '把', '储藏室', '四开', '开关', '全', '关闭', '一下']
result=[word2id_slotname[O]]*sequence_length
if enable_knowledge=='1' or enable_knowledge==1:
for i,word in enumerate(user_speech_tokenized):
slot_name=knowledge_dict.get(word,None)
#print('i:{},word_index:{},word:{},slot_name:{}'.format(i,word,id2word.get(word,UNK),slot_name))
if slot_name is not None:
try:
result[i]=word2id_slotname[slot_name]
except:
pass
return result
def predict_interactive():
sys.stdout.write("Please Input Story.>")
sys.stdout.flush()
question = sys.stdin.readline()
enable_knowledge=1
while question:
if question.find("disable_knowledge")>=0:
enable_knowledge=0
print("knowledge disabled")
print("Please Input Story>")
sys.stdout.flush()
question = sys.stdin.readline()
elif question.find("enable_knowledge")>=0:
enable_knowledge=1
#3.read new input
print("knowledge enabled")
print("Please Input Story>")
sys.stdout.flush()
question = sys.stdin.readline()
#1.predict using quesiton
intent, intent_logits,slots,slot_list,similiarity_list,domain=predict(question,enable_knowledge=enable_knowledge)
#2.print
print('意图:{},置信度:{}'.format(intent, intent_logits))
print('技能:{}'.format(domain))
#for i,similiarity in enumerate(similiarity_list):
# print('i:{},similiarity:{}'.format(i, similiarity))
for slot_name, slot_value in slots.items():
print('slot_name:{}-->slot_value:{}'.format(slot_name, slot_value))
#for i, element in enumerate(slot_list):
# slot_name, slot_value = element
# print('slot_name:{},slot_value:{}'.format(slot_name, slot_value))
#3.read new input
print("Please Input Story>")
sys.stdout.flush()
question = sys.stdin.readline()
def get_result(logits_intent,logits_slots,sentence_indices,similiarity_list,logits_domain,top_number=3):
index_intent= np.argmax(logits_intent[0]) #index of intent
intent_logits=logits_intent[0][index_intent]
#print("intent_logits:",index_intent)
intent=id2word_intent[index_intent]
index_domain=np.argmax(logits_domain[0]) #index of domain
domain=id2word_domain[index_domain]
slots=[]
indices_slots=np.argmax(logits_slots[0],axis=1) #[sequence_length]
for i,index in enumerate(indices_slots):
slots.append(id2word_slotname[index])
slots_dict={}
slot_list=[]
for i,slot in enumerate(slots):
word=id2word[sentence_indices[i]]
#print(i,"slot:",slot,";word:",word)
if slot!=O and word!=PAD and word!=UNK:
slots_dict[slot]=word
slot_list.append((slot,word))
#get top answer for the similiarity list.
similiarity_list_top = np.argsort(similiarity_list)[-top_number:]
similiarity_list_top = similiarity_list_top[::-1]
similiarity_list_result=[]
print('最相关问题')
for k,index in enumerate(similiarity_list_top):
question=q_list[index]
answer=q2a_dict[question]
similiarity_list_result.append(answer)
print('index:{}.问题{}:{}, intent:{}'.format(index,k,question, answer))
return intent,intent_logits,slots_dict,slot_list,similiarity_list_result,domain
if __name__ == "__main__":
tf.app.run() | [
"xul@fitme.ai"
] | xul@fitme.ai |
c782dbe3a6069af1298cc9d88de6368616befaf6 | cb640818860c6388fc659a80a24824c5ec9afc8a | /AutoApiTest/tests/TestScript/infoApi/test_info.py | 0b1399c85b247163ed4e0a135857c7c28bffd5ce | [] | no_license | anqianqi5227/apiAutoTest | 211d16829b65ff4310b43eea3f1c366b5eac2073 | 7a2f5b2292b73fed5e27cfdfb6e866896f1e7dc1 | refs/heads/main | 2023-06-10T20:38:56.702181 | 2021-07-01T03:34:13 | 2021-07-01T03:34:13 | 381,894,948 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,781 | py | import pytest
import allure
from common.unit.initialize_yaml import ini_yaml
from common.unit.initialize_relevance import read_relevance
from common.unit.api_send_check import api_send_check
from common.unit.setupTest import setupTest
from Main import root_path, case_level, product_version
case_path = root_path + "/tests/TestCases/infoApi"
relevance_path = root_path + "/common/config_module/relevance"
case_dict = ini_yaml(case_path, "info")
@allure.feature(case_dict['test_info']['title'])
class TestInfo:
@pytest.fixture(scope='class')
def setupClass(self):
# 获取用例初始关联值
self.rel = read_relevance(path=case_path, key='relevance')
return self.rel
@pytest.mark.skipif(case_dict["test_info"]["product_version"] in product_version,
reason="该用例所属版本为:{0},在本次排除版本{1}内".format(case_dict["test_info"]["product_version"],
product_version))
@pytest.mark.skipif(case_dict["test_info"]["case_level"] not in case_level,
reason="该用例的用例等级为:{0},不在本次运行级别{1}内".format(case_dict["test_info"]["case_level"], case_level))
# @pytest.mark.run(order=case_dict["test_info"]["run_order"])
@allure.severity(case_dict["test_info"]["case_level"])
@pytest.mark.parametrize("case_data", case_dict["premise"], ids=[])
def test_premise(self, case_data, setupClass):
if case_data is not None:
self.relevance = setupTest(relevance_path, case_data, setupClass)
api_send_check(case_data, case_dict, case_path, self.relevance)
else:
pass
@pytest.mark.skipif(case_dict["test_info"]["product_version"] in product_version,
reason="该用例所属版本为:{0},在本次排除版本{1}内".format(case_dict["test_info"]["product_version"],
product_version))
@pytest.mark.skipif(case_dict["test_info"]["case_level"] not in case_level,
reason="该用例的用例等级为:{0},不在本次运行级别{1}内".format(case_dict["test_info"]["case_level"], case_level))
# @pytest.mark.run(order=case_dict["test_info"]["run_order"])
@allure.severity(case_dict["test_info"]["case_level"])
@pytest.mark.parametrize("case_data", case_dict["test_case"], ids=[])
@allure.story("info")
def test_case(self, case_data, setupClass):
self.relevance = setupTest(relevance_path, case_data, setupClass)
api_send_check(case_data, case_dict, case_path, self.relevance)
if __name__ == '__main__':
import subprocess
subprocess.call(['pytest', '-v']) | [
"591243446@qq.com"
] | 591243446@qq.com |
bcad2095d69a28b0cecd13f2126677211b59c099 | 8544d75deee3d8037c1cddc6c5ea6b6039abf084 | /test.py | 27817ab1343814600b6c03b7b9ce7d223310080b | [] | no_license | everpalm/TwoSum | b972ce0a28bc30ccb46db52d79ff5d7313d8b199 | d812c2e2fe6b40f28cc24db6323bf9a7c350d33f | refs/heads/master | 2020-06-26T15:08:09.573342 | 2019-08-01T15:31:15 | 2019-08-01T15:31:15 | 199,667,760 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 870 | py | class Solution:
def twoSum(self, nums, target):
print(nums, target)
complement={}
for nums_count in range(len(nums)):
if (target-nums[nums_count]) not in complement:
#print("complement =", complement)
#print(target, "- nums[",nums_count,"] = ", target-nums[nums_count])
complement[nums[nums_count]]=nums_count
#print("1. ","complement[",nums[nums_count],"] = ", nums_count)
else:
#print(target, "- nums[",nums_count,"] = ", target-nums[nums_count])
#print("2. ","dic[",target-nums[nums_count],"] = ", nums_count)
return [complement[target-nums[nums_count]],nums_count]
#if __name__ == "__main__":
#my_solution = Solution
#my_output = my_solution().twoSum([-1,-2,-3,-4], -7)
#print(my_output)
| [
"everpalm@gmail.com"
] | everpalm@gmail.com |
df21640281926ce627e762e7a9f257f9b679b918 | 7538343b2acac2fad6a8adfe5fa987e7bcc34faa | /tests/test_fastapi_example.py | a337a603c5c6d90313f16ce1f0e00b25601dcf5b | [] | no_license | spyndutz/fastapi-example | 5d819d43763e2fd33820eb13f2a60fc6bd60806f | a8c77e1cb01466bc1061c82c62ccd581207ceef0 | refs/heads/main | 2023-03-04T11:19:46.543911 | 2021-02-10T05:56:59 | 2021-02-10T05:56:59 | 337,603,532 | 0 | 0 | null | 2021-02-10T05:57:00 | 2021-02-10T03:15:10 | Python | UTF-8 | Python | false | false | 133 | py | """
This is test example module
"""
from fastapi_example import __version__
def test_version():
assert __version__ == "0.1.0"
| [
"spyndutz@gmail.com"
] | spyndutz@gmail.com |
d13c548e1ff9bf8afe87ef0436455580acbe03df | c02c2e90309844aece0a6cd18c24abd5b7205501 | /backend/todo/urls.py | c6b1b06a93cef8998416f150d9aae3d3afcb66bb | [] | no_license | magnunleno/pair-django-pytest-vue | 612ee34886f03ea9095a1b1c5bdf3137dffca3a8 | 2d6c7ea9cc2ecb3a2b4c8a4fafb66a57fece1001 | refs/heads/master | 2020-03-18T08:28:03.202655 | 2018-05-23T04:05:57 | 2018-05-23T04:05:57 | 134,511,269 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | from django.urls import path
from . import views
app_name = 'todo'
urlpatterns = [
path('', views.index, name='index'),
]
| [
"magnun.leno@gmail.com"
] | magnun.leno@gmail.com |
702e0a2739121304d414e430741208fedc77fff3 | b1528fbef030ce53d2373e8dee0c5e764e5ec02a | /segments.py | 218d7afe32f2009678168970e6ba41caaaa5622c | [] | no_license | VascoXu/CityScanner-API | dc26b99307e7ae49907355082d38ba8f9e705ca7 | 55cf3759132cd06b246de5073a9cbdbf1c27ed64 | refs/heads/master | 2023-07-08T07:03:13.581072 | 2021-08-04T01:42:03 | 2021-08-04T01:42:03 | 350,403,261 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,134 | py | import io
import os
import numpy as np
import pandas as pd
import osmnx as ox
from geojson import FeatureCollection, Feature, Point, dumps, dump
from mapbox import Uploader
from sklearn.cluster import DBSCAN
from shapely.geometry import MultiPoint
from sklearn.neighbors import BallTree
from datetime import datetime
from mongo.mongo import *
# configure MongoDB
MONGO_URL = os.environ.get("MONGO_URL")
MONGO_DB = os.environ.get("MONGO_DB")
mongodb = MongoConnection(url=MONGO_URL, db=MONGO_DB)
"""
# query MongoDB
collection = "NYC_2_MOB_DATA"
# rows = mongodb.find(collection=collection, limit=10)
# data = pd.DataFrame(rows)
data = pd.read_csv("AQ.csv")
data = data[["temp_ext", "hum_ext", "pm25", "pm10", "long", "lat", "time"]]
data = data.dropna()
# data = data[:10]
"""
"""
# configure mapbox
mapbox_token = "sk.eyJ1IjoidmFzY294dSIsImEiOiJja28yM2NteWswMzNrMm5vN2loZHpxcXVoIn0.HPJt_e8pbYkU3wM02Z_fcg"
bbox = [40.9457, 40.4774, -73.7002, -74.2591]
# G = ox.graph_from_bbox(37.79, 37.78, -122.41, -122.43, network_type='drive')
"""
# data = pd.read_csv("AQ.csv")
# data = data[["temp_ext", "hum_ext", "pm25", "pm10", "long", "lat", "time"]]
# data = data.dropna()
# data = data[1:10]
def create_segments(data, bbox):
"""
Matches data from DataFrames to the driving streets OpenStreetMap network area specified by bbox.
:param pd.DataFrame data: A dataframe with the data points to attach to segments. Must have "lat" and "long" columns
:param list bbox: A geographic bounding box of north, south, east, and west values, respectively
"""
nb, sb, eb, wb = bbox
G = ox.graph_from_bbox(nb, sb, eb, wb)
dist = 0.0001
edges = ox.utils_graph.graph_to_gdfs(G, nodes=False, fill_edge_geometry=True)
edges['index'] = range(1, len(edges)+1)
# Get closest point on each segment
lng = data['long']
lat = data['lat']
ne, dist = ox.distance.nearest_edges(G, lng, lat, return_dist=True)
# Format edges to upload to mapbox
all_edges = []
all_lines = []
for n in ne:
u, v, key = n
edge = edges.loc[(u, v, key), "geometry"]
index = edges.loc[(u, v, key), "index"]
if edge not in all_edges:
feature = Feature(id=int(index), geometry=edge)
all_edges.append(edge)
all_lines.append(feature)
all_lines = FeatureCollection(all_lines)
return all_lines
def upload_to_mapbox(geojson_object, tileset_name, mapbox_token):
"""
Uploads a GeoJSON object to Mapbox as a tileset.
:param geojson_object: A GeoJSON compatible object to encode
:param str tileset_name: The tileset name in Mapbox
:return: An object containing the response from Mapbox
"""
# Upload to mapbox
uploader = Uploader(access_token=mapbox_token)
geojson_file = io.BytesIO(dumps(geojson_object).encode())
upload_resp = uploader.upload(geojson_file, tileset_name)
geojson_file.close()
return upload_resp.json()
def averages(data, bbox):
"""
Matches data from DataFrames to the driving streets OpenStreetMap network area specified by bbox.
:param pd.DataFrame data: A dataframe with the data points to attach to segments. Must have "lat" and "long" columns
:param list bbox: A geographic bounding box of north, south, east, and west values, respectively
"""
# load mapbox
nb, sb, eb, wb = bbox
G = ox.graph_from_bbox(nb, sb, eb, wb)
dist = 0.0001
edges = ox.utils_graph.graph_to_gdfs(G, nodes=False, fill_edge_geometry=True)
edges['index'] = range(1, len(edges)+1)
all_data = dict()
for index, row in data.iterrows():
date = datetime.fromtimestamp(row['time'])
print(date)
if date not in all_data:
all_data[date] = [row]
else:
all_data[date].append(row)
rows = []
for key, value in all_data.items():
# get closest point on each segment
lng = value['long']
lat = data['lat']
ne, dist = ox.distance.nearest_edges(G, lng, lat, return_dist=True)
print(ne)
rows.append({""})
averages()
| [
"vamiguelxu@gmail.com"
] | vamiguelxu@gmail.com |
9e45ecb2e12786ff9bae318d2d14df78b0fb4e93 | 794f9f57c975345ce6f2b43f9823b94b01b49f44 | /api/project/apps/gatekeeper/tasks.py | 229830fac82e117991e56a040e3b26b89ef8e923 | [
"BSD-2-Clause"
] | permissive | buddyup/api | 367b7cb02de142ef81fbfacf692559c32974539b | ae42fb99de0104d63b6a087710c31e377958eb77 | refs/heads/master | 2021-05-31T05:13:01.388483 | 2016-04-11T08:55:59 | 2016-04-11T08:55:59 | 55,948,157 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,443 | py | import json
import requests
from binascii import hexlify
from simplecrypt import encrypt
from django.conf import settings
from django.core.mail import send_mail
from django.template.loader import render_to_string
from celery.task import task
from utils.email import get_school_key_from_email
from gatekeeper.models import LoginAttempt, PasswordChangeAttempt,\
SignupAttempt, User, DeleteAttempt, AccountCheckAttempt
@task(name="log_signup_attempt", acks_late=True)
def log_signup_attempt(data, ip, successful):
SignupAttempt.objects.create(
# attempt_full_name=data["full_name"],
attempt_email=data["email"],
attempt_password=hexlify(encrypt(settings.ACCESS_LOG_KEY, data["password"].encode('utf8'))),
# attempt_terms=data["agreed_to_terms"],
attempting_ip=ip,
successful=successful
)
@task(name="log_delete_attempt", acks_late=True)
def log_delete_attempt(data, ip, successful):
DeleteAttempt.objects.create(
# attempt_full_name=data["full_name"],
# attempt_email=data["email"],
attempt_password=hexlify(encrypt(settings.ACCESS_LOG_KEY, data["password"].encode('utf8'))),
# attempt_terms=data["agreed_to_terms"],
attempting_ip=ip,
successful=successful
)
@task(name="log_access_attempt", acks_late=True)
def log_access_attempt(email, password, ip, successful):
LoginAttempt.objects.create(
attempt_email=email,
attempt_password=hexlify(encrypt(settings.ACCESS_LOG_KEY, password.encode('utf8'))),
attempting_ip=ip,
successful=successful
)
@task(name="log_account_check_attempt", acks_late=True)
def log_account_check_attempt(email, ip, successful):
AccountCheckAttempt.objects.create(
attempt_email=email,
attempting_ip=ip,
successful=successful
)
@task(name="log_password_attempt", acks_late=True)
def log_password_attempt(buid, ip, successful):
PasswordChangeAttempt.objects.create(
attempt_buid=buid,
attempting_ip=ip,
successful=successful
)
def notify_will(url, data):
print(data)
if "email" in data:
from utils.school_data import school_data
school_key = get_school_key_from_email(data["email"])
data["school"] = school_data[school_key]
try:
url = "%s/api%s" % (settings.WILL_URL, url)
print(url)
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
r = requests.post(
url,
headers=headers,
data=json.dumps(data),
)
if not r.status_code == 200:
print(r.status_code)
print(r.json())
except:
import traceback
traceback.print_exc()
@task(name="new_signup", acks_late=True)
def notify_new_signup(buid, school_name):
u = User.objects.get(pk=buid)
data = {
"email": u.email,
"school_name": school_name,
}
notify_will("/signup", data)
@task(name="new_group", acks_late=True)
def notify_new_group(buid, event):
u = User.objects.get(buid=buid)
data = {
"email": u.email,
"event": event,
}
notify_will("/group", data)
@task(name="new_class", acks_late=True)
def notify_new_class(user_id, event):
u = User.objects.get(buid=user_id)
data = {
"email": u.email,
"event": event,
}
notify_will("/class", data)
@task(name="send_verification_email", acks_late=True)
def send_verification_email(user_id):
print("send_verification_email")
u = User.objects.get(pk=user_id)
context = {
"user": u,
}
subject = render_to_string('gatekeeper/confirm_email.subject.txt', context).replace("\n", "")
body = render_to_string('gatekeeper/confirm_email.body.txt', context)
send_mail(subject, body, settings.SERVER_EMAIL, [u.email], fail_silently=False)
print("sent")
@task(name="delete_user", acks_late=True, time_limit=7200)
def delete_user(user_pk):
from events.tasks import firebase_url, firebase_put, firebase_patch, firebase_post, firebase_get, firebase_delete
u = User.objects.get(pk=user_pk)
print("deleting %s" % u.buid)
profile = firebase_get("/users/%s/public" % u.buid)
profile["email"] = u.email
# Remove from classes
print("""users/%s/classes""")
resp = firebase_get("/users/%s/classes" % u.buid)
if resp:
for class_id, info in resp.items():
# Students list
firebase_delete("/classes/%s/students/%s" % (class_id, u.buid))
# News feed
feed = firebase_get("/classes/%s/news_feed/" % class_id)
if feed:
for feed_id, item in feed.items():
if item["creator"] == u.buid:
firebase_delete("/classes/%s/news_feed/%s" % (class_id, feed_id))
# Remove from buddies
print("""users/%s/buddies/""")
resp = firebase_get("/users/%s/buddies/" % u.buid)
if resp:
for buddy_id, item in resp.items():
firebase_delete("/users/%s/buddies/%s" % (buddy_id, u.buid))
# Pending requests incoming
print("""users/%s/buddy_requests/""")
resp = firebase_get("/users/%s/buddy_requests/" % u.buid)
if resp:
for buddy_id, item in resp.items():
firebase_delete("/users/%s/buddies-outgoing/%s" % (buddy_id, u.buid))
# Pending requests outgoing
print("""users/%s/buddies-outgoing/""")
resp = firebase_get("/users/%s/buddies-outgoing/" % u.buid)
if resp:
for buddy_id, item in resp.items():
firebase_delete("/users/%s/buddy_requests/%s" % (buddy_id, u.buid))
# Blocked
# Skipping for now.. ?
# Remove from inboxes
print("""users/%s/inbox/""")
resp = firebase_get("/users/%s/inbox/" % u.buid)
if resp:
for buddy_id, item in resp.items():
# Remove threads from both people.
thread_id = item["thread_id"]
firebase_delete("/users/%s/inbox/%s" % (buddy_id, u.buid))
firebase_delete("/message_threads/%s/" % (thread_id))
# Remove from groups
print("""users/%s/groups""")
resp = firebase_get("/users/%s/groups" % u.buid)
if resp:
for group_id, item in resp.items():
# Attending list
firebase_delete("/groups/%s/attending/%s" % (group_id, u.buid))
# If created the group... ?
if item["creator"] == u.buid:
# Remove all attendees
print("""groups/%""")
resp = firebase_get("/groups/%s/attending/" % group_id)
if resp:
for attendee_id, item in resp.items():
firebase_delete("/user/%s/groups/%s" % (attendee_id, group_id))
# Delete it from class
if item["profile"] and item["profile"]["subject"]:
firebase_delete("/classes/%s/groups/%s" % (
item["profile"]["subject"],
group_id
))
# Delete it from school
if item["school_id"]:
firebase_delete("/schools/%s/groups/%s" % (
item["school_id"],
group_id
))
# Delete it.
firebase_delete("/groups/%s" % group_id)
# Remove from main event feed
print("events")
event_list = firebase_get(
"events?orderBy=\"creator\"&startAt=\"%s\"&endAt=\"%s\"" % (u.buid, u.buid)
)
if event_list:
for event_id, item in event_list.items():
print(event_id)
firebase_delete("/events/%s" % event_id)
# print("""users/%s/news_feed""")
# resp = firebase_get("/users/%s/news_feed" % u.buid)
# if resp:
# for id, action in resp.items():
# # Do we remove from main firebase event list?
# print(id)
# print(action["type"])
# All types of events: covered.
# if action["type"] == "resend_verification_email":
# pass
# elif action["type"] == "unbuddied":
# pass
# elif action["type"] == "ignored_request":
# pass
# elif action["type"] == "buddy_request":
# pass
# elif action["type"] == "cancel_buddy_request":
# pass
# elif action["type"] == "blocked":
# pass
# elif action["type"] == "unblocked":
# pass
# elif action["type"] == "report_content":
# pass
# elif action["type"] == "cancel_report_content":
# pass
# elif action["type"] == "thread_created":
# pass
# elif action["type"] == "private_message":
# pass
# elif action["type"] == "added_class":
# pass
# elif action["type"] == "dropped_class":
# pass
# elif action["type"] == "updated_group":
# pass
# elif action["type"] == "attending_group":
# pass
# elif action["type"] == "cancel_group_attend":
# pass
# elif action["type"] == "group_reminder":
# pass
# elif action["type"] == "account_created":
# pass
# elif action["type"] == "signed_up":
# pass
# elif action["type"] == "logged_in":
# pass
# elif action["type"] == "update_profile_pic":
# pass
# elif action["type"] == "delete_account":
# pass
# Do the final wipe.
# Remove from schools
print("""users/%s/schools""")
resp = firebase_get("/users/%s/schools" % u.buid)
if resp:
for school_id, item in resp.items():
# Students list
firebase_delete("/schools/%s/students/%s" % (school_id, u.buid))
firebase_put("/users/%s/" % u.buid, {"deleted": {"marked_for_delete": True}})
u.delete()
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
r = requests.post(
"%s/api/account-deleted" % settings.WILL_URL,
headers=headers,
data=json.dumps(profile)
)
assert r.status_code == 200
@task(name="merge_classes", acks_late=True, time_limit=7200)
def merge_classes(master_id=None, child_id=None):
from events.tasks import firebase_url, firebase_put, firebase_patch,\
firebase_post, firebase_get, firebase_delete, sync_class_data
assert master_id and child_id
print("Merging %s into %s" % (child_id, master_id))
master_profile = firebase_get("/classes/%s/profile/" % (master_id,))
child_profile = firebase_get("/classes/%s/profile/" % (child_id,))
# - add class for each student
child_students = firebase_get("/classes/%s/students/" % (child_id,))
if child_students:
master_data = {
"id": master_id,
"course_id": master_id,
"name": master_profile["name"],
"code": master_profile["code"],
"school_id": master_profile["school_id"],
"subject_code": master_profile["subject_code"],
"subject_name": master_profile["subject_name"],
"subject_icon": master_profile["subject_icon"],
}
for student, student_data in child_students.items():
firebase_put("/users/%s/classes/%s/" % (student, master_id), master_data)
# add to master class list
firebase_put("/classes/%s/students/%s/" % (master_id, student), {".value": True})
# - remove old class from students
for student, student_data in child_students.items():
firebase_delete("/users/%s/classes/%s/" % (student, child_id))
# - update any study groups to new class
groups = firebase_get("/classes/%s/groups/" % (child_id,))
if groups:
for group_id, group_data in groups.items():
group_data["subject"] = master_id
firebase_put("/classes/%s/groups/%s" % (master_id, group_id,), group_data)
# remove class from school.
firebase_delete("/schools/%s/classes/%s/" % (child_profile["school_id"], child_id))
# pull from analytics
firebase_delete("/analytics/classes/%s/" % child_id)
# Update master analytics
school_profile = firebase_get("/schools/%s/profile" % master_profile["school_id"])
sync_class_data.delay(master_id, school_profile["name"], master_profile["school_id"])
# - add message to chat?
# - delete old class
firebase_delete("/classes/%s/" % child_id)
print("Merge complete.")
| [
"steven@inkandfeet.com"
] | steven@inkandfeet.com |
e08986934f548993b70d30092dadbd0b8f412c85 | 620c7bd0f3642a87d9a69563f55857c47acba8d3 | /sniff_beacons.py | 0a08ede5aad32e7cc0712544f4be5a8ce8b0ef79 | [] | no_license | WhoTippedMyCows/Scripts | d4ab7e070b2365382315393d93bc0149f93c8cce | fb68d39d3692f7812fe9c534942cb3838fb01782 | refs/heads/main | 2023-02-27T16:25:09.198731 | 2021-02-11T02:20:37 | 2021-02-11T02:20:37 | 335,766,397 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 452 | py | # Sniff wireless packets
# Setup monitor mode:
# ifconfig wlan0 down
# iwconfig wlan0 mode monitor
# ifconfig wlan0 up
# iwconfig
from scapy.all import *
def get_packets(packet):
if packet.haslayer(Dot11):
if packet.type == 0 and packet.subtype == 8:
print("BSSID: "+packet.addr2,
"ESSID: "+packet.info,
"Interval: "+str(packet.beacon_interval),
"Timestamp: "+str(packet.timestamp))
sniff(iface="wlan0", prn=get_packets)
| [
"noreply@github.com"
] | noreply@github.com |
c2e3ddd4b12b41ff8990d4864171cbf2b80ae6d7 | 159d4ae61f4ca91d94e29e769697ff46d11ae4a4 | /venv/lib/python3.9/site-packages/pandas/tests/resample/test_resampler_grouper.py | 204efa0e5670fcd8e1e750dae7e7b34df513e81a | [
"MIT"
] | permissive | davidycliao/bisCrawler | 729db002afe10ae405306b9eed45b782e68eace8 | f42281f35b866b52e5860b6a062790ae8147a4a4 | refs/heads/main | 2023-05-24T00:41:50.224279 | 2023-01-22T23:17:51 | 2023-01-22T23:17:51 | 411,470,732 | 8 | 0 | MIT | 2023-02-09T16:28:24 | 2021-09-28T23:48:13 | Python | UTF-8 | Python | false | false | 14,411 | py | from textwrap import dedent
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas.util._test_decorators import async_mark
import pandas as pd
from pandas import (
DataFrame,
Index,
Series,
TimedeltaIndex,
Timestamp,
)
import pandas._testing as tm
from pandas.core.indexes.datetimes import date_range
test_frame = DataFrame(
{"A": [1] * 20 + [2] * 12 + [3] * 8, "B": np.arange(40)},
index=date_range("1/1/2000", freq="s", periods=40),
)
@async_mark()
@td.check_file_leaks
async def test_tab_complete_ipython6_warning(ip):
from IPython.core.completer import provisionalcompleter
code = dedent(
"""\
import pandas._testing as tm
s = tm.makeTimeSeries()
rs = s.resample("D")
"""
)
await ip.run_code(code)
# GH 31324 newer jedi version raises Deprecation warning;
# appears resolved 2021-02-02
with tm.assert_produces_warning(None):
with provisionalcompleter("ignore"):
list(ip.Completer.completions("rs.", 1))
def test_deferred_with_groupby():
# GH 12486
# support deferred resample ops with groupby
data = [
["2010-01-01", "A", 2],
["2010-01-02", "A", 3],
["2010-01-05", "A", 8],
["2010-01-10", "A", 7],
["2010-01-13", "A", 3],
["2010-01-01", "B", 5],
["2010-01-03", "B", 2],
["2010-01-04", "B", 1],
["2010-01-11", "B", 7],
["2010-01-14", "B", 3],
]
df = DataFrame(data, columns=["date", "id", "score"])
df.date = pd.to_datetime(df.date)
def f(x):
return x.set_index("date").resample("D").asfreq()
expected = df.groupby("id").apply(f)
result = df.set_index("date").groupby("id").resample("D").asfreq()
tm.assert_frame_equal(result, expected)
df = DataFrame(
{
"date": date_range(start="2016-01-01", periods=4, freq="W"),
"group": [1, 1, 2, 2],
"val": [5, 6, 7, 8],
}
).set_index("date")
def f(x):
return x.resample("1D").ffill()
expected = df.groupby("group").apply(f)
result = df.groupby("group").resample("1D").ffill()
tm.assert_frame_equal(result, expected)
def test_getitem():
g = test_frame.groupby("A")
expected = g.B.apply(lambda x: x.resample("2s").mean())
result = g.resample("2s").B.mean()
tm.assert_series_equal(result, expected)
result = g.B.resample("2s").mean()
tm.assert_series_equal(result, expected)
result = g.resample("2s").mean().B
tm.assert_series_equal(result, expected)
def test_getitem_multiple():
# GH 13174
# multiple calls after selection causing an issue with aliasing
data = [{"id": 1, "buyer": "A"}, {"id": 2, "buyer": "B"}]
df = DataFrame(data, index=date_range("2016-01-01", periods=2))
r = df.groupby("id").resample("1D")
result = r["buyer"].count()
expected = Series(
[1, 1],
index=pd.MultiIndex.from_tuples(
[(1, Timestamp("2016-01-01")), (2, Timestamp("2016-01-02"))],
names=["id", None],
),
name="buyer",
)
tm.assert_series_equal(result, expected)
result = r["buyer"].count()
tm.assert_series_equal(result, expected)
def test_groupby_resample_on_api_with_getitem():
# GH 17813
df = DataFrame(
{"id": list("aabbb"), "date": date_range("1-1-2016", periods=5), "data": 1}
)
exp = df.set_index("date").groupby("id").resample("2D")["data"].sum()
result = df.groupby("id").resample("2D", on="date")["data"].sum()
tm.assert_series_equal(result, exp)
def test_groupby_with_origin():
# GH 31809
freq = "1399min" # prime number that is smaller than 24h
start, end = "1/1/2000 00:00:00", "1/31/2000 00:00"
middle = "1/15/2000 00:00:00"
rng = date_range(start, end, freq="1231min") # prime number
ts = Series(np.random.randn(len(rng)), index=rng)
ts2 = ts[middle:end]
# proves that grouper without a fixed origin does not work
# when dealing with unusual frequencies
simple_grouper = pd.Grouper(freq=freq)
count_ts = ts.groupby(simple_grouper).agg("count")
count_ts = count_ts[middle:end]
count_ts2 = ts2.groupby(simple_grouper).agg("count")
with pytest.raises(AssertionError, match="Index are different"):
tm.assert_index_equal(count_ts.index, count_ts2.index)
# test origin on 1970-01-01 00:00:00
origin = Timestamp(0)
adjusted_grouper = pd.Grouper(freq=freq, origin=origin)
adjusted_count_ts = ts.groupby(adjusted_grouper).agg("count")
adjusted_count_ts = adjusted_count_ts[middle:end]
adjusted_count_ts2 = ts2.groupby(adjusted_grouper).agg("count")
tm.assert_series_equal(adjusted_count_ts, adjusted_count_ts2)
# test origin on 2049-10-18 20:00:00
origin_future = Timestamp(0) + pd.Timedelta("1399min") * 30_000
adjusted_grouper2 = pd.Grouper(freq=freq, origin=origin_future)
adjusted2_count_ts = ts.groupby(adjusted_grouper2).agg("count")
adjusted2_count_ts = adjusted2_count_ts[middle:end]
adjusted2_count_ts2 = ts2.groupby(adjusted_grouper2).agg("count")
tm.assert_series_equal(adjusted2_count_ts, adjusted2_count_ts2)
# both grouper use an adjusted timestamp that is a multiple of 1399 min
# they should be equals even if the adjusted_timestamp is in the future
tm.assert_series_equal(adjusted_count_ts, adjusted2_count_ts2)
def test_nearest():
# GH 17496
# Resample nearest
index = date_range("1/1/2000", periods=3, freq="T")
result = Series(range(3), index=index).resample("20s").nearest()
expected = Series(
[0, 0, 1, 1, 1, 2, 2],
index=pd.DatetimeIndex(
[
"2000-01-01 00:00:00",
"2000-01-01 00:00:20",
"2000-01-01 00:00:40",
"2000-01-01 00:01:00",
"2000-01-01 00:01:20",
"2000-01-01 00:01:40",
"2000-01-01 00:02:00",
],
dtype="datetime64[ns]",
freq="20S",
),
)
tm.assert_series_equal(result, expected)
def test_methods():
g = test_frame.groupby("A")
r = g.resample("2s")
for f in ["first", "last", "median", "sem", "sum", "mean", "min", "max"]:
result = getattr(r, f)()
expected = g.apply(lambda x: getattr(x.resample("2s"), f)())
tm.assert_frame_equal(result, expected)
for f in ["size"]:
result = getattr(r, f)()
expected = g.apply(lambda x: getattr(x.resample("2s"), f)())
tm.assert_series_equal(result, expected)
for f in ["count"]:
result = getattr(r, f)()
expected = g.apply(lambda x: getattr(x.resample("2s"), f)())
tm.assert_frame_equal(result, expected)
# series only
for f in ["nunique"]:
result = getattr(r.B, f)()
expected = g.B.apply(lambda x: getattr(x.resample("2s"), f)())
tm.assert_series_equal(result, expected)
for f in ["nearest", "backfill", "ffill", "asfreq"]:
result = getattr(r, f)()
expected = g.apply(lambda x: getattr(x.resample("2s"), f)())
tm.assert_frame_equal(result, expected)
result = r.ohlc()
expected = g.apply(lambda x: x.resample("2s").ohlc())
tm.assert_frame_equal(result, expected)
for f in ["std", "var"]:
result = getattr(r, f)(ddof=1)
expected = g.apply(lambda x: getattr(x.resample("2s"), f)(ddof=1))
tm.assert_frame_equal(result, expected)
def test_apply():
g = test_frame.groupby("A")
r = g.resample("2s")
# reduction
expected = g.resample("2s").sum()
def f(x):
return x.resample("2s").sum()
result = r.apply(f)
tm.assert_frame_equal(result, expected)
def f(x):
return x.resample("2s").apply(lambda y: y.sum())
result = g.apply(f)
# y.sum() results in int64 instead of int32 on 32-bit architectures
expected = expected.astype("int64")
tm.assert_frame_equal(result, expected)
def test_apply_with_mutated_index():
# GH 15169
index = date_range("1-1-2015", "12-31-15", freq="D")
df = DataFrame(data={"col1": np.random.rand(len(index))}, index=index)
def f(x):
s = Series([1, 2], index=["a", "b"])
return s
expected = df.groupby(pd.Grouper(freq="M")).apply(f)
result = df.resample("M").apply(f)
tm.assert_frame_equal(result, expected)
# A case for series
expected = df["col1"].groupby(pd.Grouper(freq="M")).apply(f)
result = df["col1"].resample("M").apply(f)
tm.assert_series_equal(result, expected)
def test_apply_columns_multilevel():
# GH 16231
cols = pd.MultiIndex.from_tuples([("A", "a", "", "one"), ("B", "b", "i", "two")])
ind = date_range(start="2017-01-01", freq="15Min", periods=8)
df = DataFrame(np.array([0] * 16).reshape(8, 2), index=ind, columns=cols)
agg_dict = {col: (np.sum if col[3] == "one" else np.mean) for col in df.columns}
result = df.resample("H").apply(lambda x: agg_dict[x.name](x))
expected = DataFrame(
2 * [[0, 0.0]],
index=date_range(start="2017-01-01", freq="1H", periods=2),
columns=pd.MultiIndex.from_tuples(
[("A", "a", "", "one"), ("B", "b", "i", "two")]
),
)
tm.assert_frame_equal(result, expected)
def test_resample_groupby_with_label():
# GH 13235
index = date_range("2000-01-01", freq="2D", periods=5)
df = DataFrame(index=index, data={"col0": [0, 0, 1, 1, 2], "col1": [1, 1, 1, 1, 1]})
result = df.groupby("col0").resample("1W", label="left").sum()
mi = [
np.array([0, 0, 1, 2]),
pd.to_datetime(
np.array(["1999-12-26", "2000-01-02", "2000-01-02", "2000-01-02"])
),
]
mindex = pd.MultiIndex.from_arrays(mi, names=["col0", None])
expected = DataFrame(
data={"col0": [0, 0, 2, 2], "col1": [1, 1, 2, 1]}, index=mindex
)
tm.assert_frame_equal(result, expected)
def test_consistency_with_window():
# consistent return values with window
df = test_frame
expected = pd.Int64Index([1, 2, 3], name="A")
result = df.groupby("A").resample("2s").mean()
assert result.index.nlevels == 2
tm.assert_index_equal(result.index.levels[0], expected)
result = df.groupby("A").rolling(20).mean()
assert result.index.nlevels == 2
tm.assert_index_equal(result.index.levels[0], expected)
def test_median_duplicate_columns():
# GH 14233
df = DataFrame(
np.random.randn(20, 3),
columns=list("aaa"),
index=date_range("2012-01-01", periods=20, freq="s"),
)
df2 = df.copy()
df2.columns = ["a", "b", "c"]
expected = df2.resample("5s").median()
result = df.resample("5s").median()
expected.columns = result.columns
tm.assert_frame_equal(result, expected)
def test_apply_to_one_column_of_df():
# GH: 36951
df = DataFrame(
{"col": range(10), "col1": range(10, 20)},
index=date_range("2012-01-01", periods=10, freq="20min"),
)
# access "col" via getattr -> make sure we handle AttributeError
result = df.resample("H").apply(lambda group: group.col.sum())
expected = Series(
[3, 12, 21, 9], index=date_range("2012-01-01", periods=4, freq="H")
)
tm.assert_series_equal(result, expected)
# access "col" via _getitem__ -> make sure we handle KeyErrpr
result = df.resample("H").apply(lambda group: group["col"].sum())
tm.assert_series_equal(result, expected)
def test_resample_groupby_agg():
# GH: 33548
df = DataFrame(
{
"cat": [
"cat_1",
"cat_1",
"cat_2",
"cat_1",
"cat_2",
"cat_1",
"cat_2",
"cat_1",
],
"num": [5, 20, 22, 3, 4, 30, 10, 50],
"date": [
"2019-2-1",
"2018-02-03",
"2020-3-11",
"2019-2-2",
"2019-2-2",
"2018-12-4",
"2020-3-11",
"2020-12-12",
],
}
)
df["date"] = pd.to_datetime(df["date"])
resampled = df.groupby("cat").resample("Y", on="date")
expected = resampled.sum()
result = resampled.agg({"num": "sum"})
tm.assert_frame_equal(result, expected)
def test_resample_groupby_agg_listlike():
# GH 42905
ts = Timestamp("2021-02-28 00:00:00")
df = DataFrame({"class": ["beta"], "value": [69]}, index=Index([ts], name="date"))
resampled = df.groupby("class").resample("M")["value"]
result = resampled.agg(["sum", "size"])
expected = DataFrame(
[[69, 1]],
index=pd.MultiIndex.from_tuples([("beta", ts)], names=["class", "date"]),
columns=["sum", "size"],
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("keys", [["a"], ["a", "b"]])
def test_empty(keys):
# GH 26411
df = DataFrame([], columns=["a", "b"], index=TimedeltaIndex([]))
result = df.groupby(keys).resample(rule=pd.to_timedelta("00:00:01")).mean()
expected = DataFrame(columns=["a", "b"]).set_index(keys, drop=False)
if len(keys) == 1:
expected.index.name = keys[0]
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("consolidate", [True, False])
def test_resample_groupby_agg_object_dtype_all_nan(consolidate):
# https://github.com/pandas-dev/pandas/issues/39329
dates = date_range("2020-01-01", periods=15, freq="D")
df1 = DataFrame({"key": "A", "date": dates, "col1": range(15), "col_object": "val"})
df2 = DataFrame({"key": "B", "date": dates, "col1": range(15)})
df = pd.concat([df1, df2], ignore_index=True)
if consolidate:
df = df._consolidate()
result = df.groupby(["key"]).resample("W", on="date").min()
idx = pd.MultiIndex.from_arrays(
[
["A"] * 3 + ["B"] * 3,
pd.to_datetime(["2020-01-05", "2020-01-12", "2020-01-19"] * 2),
],
names=["key", "date"],
)
expected = DataFrame(
{
"key": ["A"] * 3 + ["B"] * 3,
"date": pd.to_datetime(["2020-01-01", "2020-01-06", "2020-01-13"] * 2),
"col1": [0, 5, 12] * 2,
"col_object": ["val"] * 3 + [np.nan] * 3,
},
index=idx,
)
tm.assert_frame_equal(result, expected)
| [
"davidycliao@gmail.com"
] | davidycliao@gmail.com |
9eac965462f06dc9a3e77c216a9147f97de74a0e | 0cfcd625daf61159ea8869551c750e7ad34c730a | /migrations/versions/b0c8e2c5414c_created_loan.py | c9d9b4cb0a6a2f99d96c014681bed90338b710ad | [] | no_license | GaronSmith/webserver-loans | 96269342d535568c00db976282716f9bcdf269b4 | 9f08b279c737e2cab31407d81c918830b3e0938a | refs/heads/master | 2023-03-31T01:22:25.170835 | 2021-03-30T01:40:53 | 2021-03-30T01:40:53 | 352,091,024 | 1 | 0 | null | 2021-03-29T22:20:50 | 2021-03-27T14:17:28 | Python | UTF-8 | Python | false | false | 894 | py | """created-loan
Revision ID: b0c8e2c5414c
Revises:
Create Date: 2021-03-27 17:02:03.012084
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'b0c8e2c5414c'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('loans',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('amount', sa.Float(), nullable=False),
sa.Column('interest_rate', sa.Float(), nullable=False),
sa.Column('loan_length', sa.Integer(), nullable=False),
sa.Column('monthly_payment', sa.Float(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('loans')
# ### end Alembic commands ###
| [
"wgaronsmith13@gmail.com"
] | wgaronsmith13@gmail.com |
efb53530e5f90e6d505ae760ff80f52b67f20ea1 | 004d8c592a3a88ea138c83516322d6825821a335 | /app.py | d9d3f8cb6a283dad7cc0a54a10451f0fce142c38 | [] | no_license | JoeBusLife/cupcakes | 8dd69b097e36cf1e1b3d7f0b94da2ac30a9ff511 | 9671c5d56f4313fdb012d51a48a412525787fb37 | refs/heads/main | 2023-06-05T06:06:17.635321 | 2021-06-21T19:26:41 | 2021-06-21T19:26:41 | 378,298,585 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,658 | py | """Flask app for Cupcakes"""
from flask import Flask, request, render_template, jsonify, redirect, flash, session
from flask_debugtoolbar import DebugToolbarExtension
from models import db, connect_db, Cupcake
from forms import CupcakeForm
from sqlalchemy import desc
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql:///cupcakes'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SQLALCHEMY_ECHO'] = True
app.config['SECRET_KEY'] = "soup"
app.config['DEBUG_TB_INTERCEPT_REDIRECTS'] = False
debug = DebugToolbarExtension(app)
connect_db(app)
db.create_all()
@app.route('/')
def index_page():
"""Renders html template that includes some JS - NOT PART OF JSON API!"""
form = CupcakeForm()
return render_template('index.html', form=form)
# *****************************
# RESTFUL CUPCAKES JSON API
# *****************************
@app.route('/api/cupcakes')
def list_cupcakes():
"""Returns JSON w/ all cupcakes"""
all_cupcakes = [cupcake.serialize() for cupcake in Cupcake.query.all()]
return jsonify(cupcakes=all_cupcakes)
@app.route('/api/cupcakes/<int:cup_id>')
def get_cupcake(cup_id):
"""Returns JSON for one cupcake in particular"""
cupcake = Cupcake.query.get_or_404(cup_id)
return jsonify(cupcake=cupcake.serialize())
@app.route('/api/cupcakes', methods=["POST"])
def create_cupcake():
"""Creates a new cupcake and returns JSON of that created cupcake"""
new_cupcake = Cupcake(flavor=request.json['flavor'],
rating=request.json['rating'],
size=request.json['size'],
image=request.json['image'] or None)
db.session.add(new_cupcake)
db.session.commit()
response_json = jsonify(cupcake=new_cupcake.serialize())
return (response_json, 201)
@app.route('/api/cupcakes/<int:cup_id>', methods=["PATCH"])
def update_cupcake(cup_id):
"""Updates a particular cupcake and responds w/ JSON of that updated cupcake"""
cupcake = Cupcake.query.get_or_404(cup_id)
cupcake.flavor = request.json.get('flavor', cupcake.flavor)
cupcake.size = request.json.get('size', cupcake.size)
cupcake.rating = request.json.get('rating', cupcake.rating)
cupcake.image = request.json.get('image', cupcake.image)
db.session.commit()
return jsonify(cupcake=cupcake.serialize())
@app.route('/api/cupcakes/<int:cup_id>', methods=["DELETE"])
def delete_cupcake(cup_id):
"""Deletes a particular cupcake"""
cupcake = Cupcake.query.get_or_404(cup_id)
db.session.delete(cupcake)
db.session.commit()
return jsonify(message="deleted") | [
"h.joseph91@hotmail.com"
] | h.joseph91@hotmail.com |
f37207209c196d663aa2e43026e64b1a2b9cd70e | ce76b3ef70b885d7c354b6ddb8447d111548e0f1 | /great_year_and_same_woman/important_part/group/want_old_place.py | 611d05cfce1273ef655fed24e8b67caedddea3ff | [] | no_license | JingkaiTang/github-play | 9bdca4115eee94a7b5e4ae9d3d6052514729ff21 | 51b550425a91a97480714fe9bc63cb5112f6f729 | refs/heads/master | 2021-01-20T20:18:21.249162 | 2016-08-19T07:20:12 | 2016-08-19T07:20:12 | 60,834,519 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 193 | py |
#! /usr/bin/env python
def great_work(str_arg):
life(str_arg)
print('able_person')
def life(str_arg):
print(str_arg)
if __name__ == '__main__':
great_work('life_and_child')
| [
"jingkaitang@gmail.com"
] | jingkaitang@gmail.com |
58404aca7b0867347df5c46dc518357cdf127a16 | a93660b1026c030492f330595b92605788337cd0 | /5 Given a two list. Create a third list by picking an odd-index element from the first list and even index elements from second.py | 4b2ecf871a6f1ea91bd6c4bbb7a3db662430878d | [] | no_license | shawyou1/testseries-day-1 | da476ebf71aca174bc4cd01c49cc8491e0b44623 | 9bf7cfbdc462e4287f2d2b49665d4ff05aed48a8 | refs/heads/master | 2022-07-17T16:58:30.130789 | 2020-05-15T09:38:18 | 2020-05-15T09:38:18 | 263,872,320 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 251 | py | a=list(map(int,input().split()))
b=list(map(int,input().split()))
a=a[0::2]
print("Element at odd-index positions from list one \n",a)
b=b[1::2]
print("Element at even-index positions from list two \n",b)
c=a+b
print("Printing Final third list\n",c)
| [
"noreply@github.com"
] | noreply@github.com |
350aba3f5ae51a3e7b63b960204bddbeee38d131 | d01822ba7fa5522c89c83f20907003f5c4823dde | /CNN_GPU.py | 559d15521dd4810f15c81c1a9f11094f08a722f2 | [] | no_license | cp4011/Neural-Network | 853aab63c871d19aeb73911af56ccf9351ad1f3c | 93b33b6a14fed7010285da8fb3efc4a62152cef3 | refs/heads/master | 2020-05-02T07:22:18.778871 | 2019-04-15T10:49:40 | 2019-04-15T10:49:40 | 177,816,249 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,451 | py | import torch
import torch.nn as nn
import torch.utils.data as Data
import torchvision
# torch.manual_seed(1)
EPOCH = 1
BATCH_SIZE = 50
LR = 0.001
DOWNLOAD_MNIST = False
train_data = torchvision.datasets.MNIST(root='./mnist/', train=True, transform=torchvision.transforms.ToTensor(), download=DOWNLOAD_MNIST,)
train_loader = Data.DataLoader(dataset=train_data, batch_size=BATCH_SIZE, shuffle=True)
test_data = torchvision.datasets.MNIST(root='./mnist/', train=False)
# !!!!!!!! Change in here !!!!!!!!! #
test_x = torch.unsqueeze(test_data.test_data, dim=1).type(torch.FloatTensor)[:2000].cuda()/255. # Tensor on GPU
test_y = test_data.test_labels[:2000].cuda()
class CNN(nn.Module):
def __init__(self):
super(CNN, self).__init__()
self.conv1 = nn.Sequential(nn.Conv2d(in_channels=1, out_channels=16, kernel_size=5, stride=1, padding=2,),
nn.ReLU(), nn.MaxPool2d(kernel_size=2),)
self.conv2 = nn.Sequential(nn.Conv2d(16, 32, 5, 1, 2), nn.ReLU(), nn.MaxPool2d(2),)
self.out = nn.Linear(32 * 7 * 7, 10)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = x.view(x.size(0), -1)
output = self.out(x)
return output
cnn = CNN()
# !!!!!!!! Change in here !!!!!!!!! #
cnn.cuda() # Moves all model parameters and buffers to the GPU.
optimizer = torch.optim.Adam(cnn.parameters(), lr=LR)
loss_func = nn.CrossEntropyLoss()
for epoch in range(EPOCH):
for step, (x, y) in enumerate(train_loader):
# !!!!!!!! Change in here !!!!!!!!! #
b_x = x.cuda() # Tensor on GPU
b_y = y.cuda() # Tensor on GPU
output = cnn(b_x)
loss = loss_func(output, b_y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if step % 50 == 0:
test_output = cnn(test_x)
# !!!!!!!! Change in here !!!!!!!!! #
pred_y = torch.max(test_output, 1)[1].cuda().data # move the computation in GPU
accuracy = torch.sum(pred_y == test_y).type(torch.FloatTensor) / test_y.size(0)
print('Epoch: ', epoch, '| train loss: %.4f' % loss.data.cpu().numpy(), '| test accuracy: %.2f' % accuracy)
test_output = cnn(test_x[:10])
# !!!!!!!! Change in here !!!!!!!!! #
pred_y = torch.max(test_output, 1)[1].cuda().data # move the computation in GPU
print(pred_y, 'prediction number')
print(test_y[:10], 'real number')
| [
"957628963@qq.com"
] | 957628963@qq.com |
e43972840366d521837f8540cf4bf978710485ab | cba0257401ebc18932ffe998ea27385c83dfa6d0 | /self_pratice/Demo1.py | 1d7f0c064861945a246d44903455cde5c32ef2c0 | [] | no_license | Zero-Fire/Pycharm | 07bfde367c16790aacf76c267cfeb538f713b675 | 3d0738ae60775790eb7ea6b0422222b06168de18 | refs/heads/master | 2023-05-05T10:11:55.130661 | 2021-06-01T07:13:26 | 2021-06-01T07:13:26 | 317,237,892 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,400 | py | # -*- coding: utf-8 -*-
#字典
# dict_a={"a":1,"b":2}
# dict_b=dict(a=1,b=2)
# b=dict_a.fromkeys((1,2,3),"a")
# print(b)
# print({i: i * 2 for i in range(1, 4)})
# print(dict_a.keys())
# print(dict_a.values())
# print(dict_b.pop("a"))
# print(dict_a)
# print(dict_b)
# #集合 a=set(1,2,3)
# a={1,2,3}
# b={1,4,5}
# print(a.union(b))
# print(a.intersection(b))
# print(a.difference(b))
# a.add('a')
# print(a)
# print({i for i in "werweffwwerfdfewr"})
# #去重
# c='dfsdfewfwef'
# print(set(c))
# a=[1,2,3] #元祖 a = (1,2,3) 不可变数据类型
# print(a)
# b=(1,2,a,1)
# b[2][0]="a"
# print(b)
# print(b.index(a))
#列表推导式
# a=[]
# for i in range(1,4):
# a.append(i**2)
# print(a)
# b=[i**2 for i in range(1,4)]
# print(b)
# c=[i*j*k for i in range(1,4) for j in range(4,9) for k in range(2,5)]
# print(c)
# d=[i**2 for i in range(1,5) if i==1]
# print(d)
# def fun1(a=1,b=1,c=1):
# print("这是一个函数")
# return a,b,c
# #看返回值需要打印出来,调用函数时,没有传递参数则使用默认参数
# # print(fun1(2,3,4))
# print(fun1())
# fun2=lambda x,y: y+x*2
# #= def fun3(x):
# # return x*2
# print(fun2(2,1))
def mul (a,b):
try:
if isinstance(a,(int,float)) and isinstance(b,(int,float)):
return a*b
else:
return "请输入数字"
except Exception as e:
print(e)
print(mul(1,',')) | [
"992106950@qq.com"
] | 992106950@qq.com |
0eb204f2777859997c66c81bb0b68cc25729608e | b5062ae8da600be085218398ecc8ad9f8482a7ed | /backend/backend/settings.py | 44defea99d26172969aecbd1d0d1d08f0b4d92df | [] | no_license | sankethkini/autowork | 22fe07b5049da3cffa9e01bcfecd62312d92ed8e | 8f5b892dbe2b0c4b91ea3594d40c947f7509974e | refs/heads/master | 2022-12-12T05:43:40.124746 | 2020-08-13T05:41:44 | 2020-08-13T05:41:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,384 | py | """
Django settings for backend project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '(8^d@4ma9q$fvt2t9^!#hz&9&n@i^@%nw*eh3ru4^#x6ui-5ce'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'maruti',
'hyundai',
'corsheaders'
]
ALLOWED_HOSTS =[ '*' ]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware'
]
CORS_ORIGIN_ALLOW_ALL = True
ROOT_URLCONF = 'backend.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR,'frontend','build'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'backend.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS=[
os.path.join(BASE_DIR,'frontend','build','static'),
]
| [
"kinisanketh@gmail.com"
] | kinisanketh@gmail.com |
c9c758f898830a0e19e8518f67c67299e5974c6d | 01a009e1f77d364020fc3c3159b0c679758b670d | /scrap_covid.py | ac7af93a4944458a6415bdf0a7f742327f1b1045 | [] | no_license | Surajcse5338/AIassistant | baf08650e2c107fb3ad5a48f6d30e740c03d4ca8 | 45e42b6e0452b9d5757b8ae4e8c2bab18a63b8ea | refs/heads/master | 2022-11-28T21:46:47.057073 | 2020-08-09T20:44:23 | 2020-08-09T20:44:23 | 283,849,588 | 0 | 0 | null | 2020-07-30T18:34:59 | 2020-07-30T18:31:55 | null | UTF-8 | Python | false | false | 1,543 | py | from bs4 import BeautifulSoup
from selenium import webdriver
import json
def getAllStateCovidData():
driver = webdriver.Chrome('D:/chromedriver')
driver.get('https://www.mohfw.gov.in/')
r = driver.execute_script("return document.documentElement.outerHTML")
driver.quit()
soup = BeautifulSoup(r, 'html.parser')
tableparentDiv = soup.find('div', {'class':'data-table table-responsive'})
tableElement = tableparentDiv.find('table', {'class':'statetable table table-striped'})
tableAllData = tableparentDiv.find('tbody', {})
allStateRows = tableAllData.findAll('tr', {})
# Iterate through each row and get each state data convert it to JSON and add to Array
allStateData = {}
for stateRow in allStateRows:
allColumns = stateRow.findAll('td', {})
dynamicShiftData = stateRow.findAll('span', {})
if len(allColumns) == 8:
allStateData.__setitem__(allColumns[1].text, {
'activeCases': {
'total': int(allColumns[2].text),
'changeSinceYesterday': int(dynamicShiftData[0].text)
},
'cured/discharged/migrated': {
'cumulative': int(allColumns[4].text),
'changeSinceYesterday': int(dynamicShiftData[1].text)
},
'deaths': {
'cumulative': int(allColumns[6].text),
'changeSinceYesterday': dynamicShiftData[2].text
}
})
return allStateData
| [
"noreply@github.com"
] | noreply@github.com |
78dfd5e6cca3b9bcc1360d2a0781159697dff1f4 | 34f1e34416df71803635f5940554e0626b168818 | /bascal/code/pop.py | 966212413a3f6ecbf1e7bea780317021bb171d4b | [] | no_license | CCjerrymi/python | f2a268442648fee33046d2186d92ffb99c1be5f0 | 4fb7e8a5ff7181f1c2413210d8f63deb0191b6e1 | refs/heads/master | 2021-01-25T14:05:05.742062 | 2018-03-07T12:16:09 | 2018-03-07T12:16:09 | 123,654,007 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 424 | py | #pop.py
import sys
from poplib import POP3
import socket
from getpass import getpass
#pop3 server
POP3SVR='pop3.qq.com'
print("enter the emali:")
username = input()
password = getpass("enter the password")
try:
recvSvr = POP3(POP3SVR)
recvSvr.user(username)
recvSvr.pass_(password)
ret = recvSvr.stat()
print(ret)
#exit
recvSvr.quit()
except(socket.gailerror,socket.error,socket.herror) as e:
print(e)
sys.exit(1) | [
"rongfu97@outlook.com"
] | rongfu97@outlook.com |
16e0e3b92cd1522b98412523e323e7d95c5b724b | 34c113622832088c2b419a6b472e50428199c2b7 | /functions/quotes_for_list_adjClose.py | a6bea0a833231c4f9d8233deb428e06f90e652ac | [] | no_license | michaellinp/PyTAAA | 64690a505ef30b610ca41bac41a408ca9a77fced | 78eb38d1f06859f450342ea1023d5e99d6dbdc10 | refs/heads/master | 2021-06-07T13:27:03.476539 | 2016-09-24T02:33:33 | 2016-09-24T02:33:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,773 | py | import numpy as np
import datetime
import datetime
from scipy import random
from scipy.stats import rankdata
import functions.quotes_adjClose
from functions.quotes_adjClose import *
from functions.TAfunctions import *
from functions.readSymbols import *
def get_SandP500List( verbose=True ):
###
### Query wikipedia.com for updated list of stocks in S&P 500 index.
### Return list with stock tickers.
###
import urllib2
import os
import datetime
from bs4 import BeautifulSoup
###
### get symbol list from previous period
###
symbol_directory = os.path.join( os.getcwd(), "symbols" )
symbol_file = "SP500_symbols.txt"
symbols_file = os.path.join( symbol_directory, symbol_file )
with open(symbols_file, "r+") as f:
old_symbolList = f.readlines()
for i in range( len(old_symbolList) ) :
old_symbolList[i] = old_symbolList[i].replace("\n","")
###
### get current symbol list from wikipedia website
###
try:
base_url = 'https://en.wikipedia.org/wiki/List_of_S%26P_500_companies'
content = urllib2.urlopen(base_url)
#print "\n\n\n content = ", content
print "... got web content"
soup = BeautifulSoup(content.read())
t = soup.find("table", {"class" : "wikitable sortable"})
print "... ran beautiful soup on web content"
symbolList = [] # store all of the records in this list
companyNamesList = []
industry = []
subIndustry = []
for row in t.findAll('tr'):
try:
#print "\n\nrow = \n", row
col = row.findAll('td')
_ticker = col[0].string.strip()
_company = col[1].string.strip()
_sector = col[3].string.strip()
_subIndustry = col[4].string.strip()
symbolList.append(_ticker)
companyNamesList.append(_company)
industry.append(_sector)
subIndustry.append(_subIndustry)
except:
pass
print "... retrieved SP500 companies lists from internet"
companyName_file = os.path.join( symbol_directory, "SP500_companyNames.txt" )
with open( companyName_file, "w" ) as f:
for i in range( len(symbolList) ) :
f.write( symbolList[i] + ";" + companyNamesList[i] + "\n" )
print "... wrote SP500_companyNames.txt"
###
### compare old list with new list and print changes, if any
###
# file for index changes history
symbol_change_file = "SP500_symbolsChanges.txt"
symbols_changes_file = os.path.join( symbol_directory, symbol_change_file )
with open(symbols_changes_file, "r+") as f:
old_symbol_changesList = f.readlines()
old_symbol_changesListText = ''
for i in range( len(old_symbol_changesList) ):
old_symbol_changesListText = old_symbol_changesListText + old_symbol_changesList[i]
# parse date
year = datetime.datetime.now().year
month = datetime.datetime.now().month
day = datetime.datetime.now().day
dateToday = str(year)+"-"+str(month)+"-"+str(day)
# compare lists to check for tickers removed from the index
# - printing will be suppressed if "verbose = False"
removedTickers = []
print ""
for i, ticker in enumerate( old_symbolList ):
if i == 0:
removedTickersText = ''
if ticker not in symbolList:
removedTickers.append( ticker )
if verbose:
print " Ticker ", ticker, " has been removed from the SP500 index"
removedTickersText = removedTickersText + "\n" + dateToday + " Remove " + ticker
# compare lists to check for tickers added to the index
# - printing will be suppressed if "verbose = False"
addedTickers = []
print ""
for i, ticker in enumerate( symbolList ):
if i == 0:
addedTickersText = ''
if ticker not in old_symbolList:
addedTickers.append( ticker )
if verbose:
print " Ticker ", ticker, " has been added to the SP500 index"
addedTickersText = addedTickersText + "\n" + dateToday + " Add " + ticker
print ""
with open(symbols_changes_file, "w") as f:
f.write(addedTickersText)
f.write(removedTickersText)
f.write("\n")
f.write(old_symbol_changesListText)
print "****************"
print "addedTickers = ", addedTickers
print "removedTickers = ", removedTickers
print "****************"
###
### update symbols file with current list. Keep copy of of list.
###
if removedTickers != [] or addedTickers != []:
# make copy of previous symbols list file
symbol_directory = os.path.join( os.getcwd(), "symbols" )
symbol_file = "SP500_symbols.txt"
archive_symbol_file = "SP500_symbols__" + str(datetime.date.today()) + ".txt"
symbols_file = os.path.join( symbol_directory, symbol_file )
archive_symbols_file = os.path.join( symbol_directory, archive_symbol_file )
with open( archive_symbols_file, "w" ) as f:
for i in range( len(old_symbolList) ) :
f.write( old_symbolList[i] + "\n" )
# make new symbols list file
with open( symbols_file, "w" ) as f:
for i in range( len(symbolList) ) :
f.write( symbolList[i] + "\n" )
except:
###
### something didn't wor. print message and return old list.
###
print "\n\n\n"
print "! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! "
print " SP500 sysmbols list did not get updated from web."
print " ... check quotes_for_list_adjCloseVol.py in function 'get_SP500List' "
print " ... also check web at en.wikipedia.org/wiki/List_of_S%26P_500_companies"
print "! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! "
print "\n\n\n"
symbolList = old_symbolList
removedTickers = []
addedTickers = []
return symbolList, removedTickers, addedTickers
def get_Naz100List( verbose=True ):
###
### Query nasdaq.com for updated list of stocks in Nasdaq 100 index.
### Return list with stock tickers.
###
#import urllib
import requests
import re
import os
import datetime
###
### get symbol list from previous period
###
symbol_directory = os.path.join( os.getcwd(), "symbols" )
symbol_file = "Naz100_symbols.txt"
symbols_file = os.path.join( symbol_directory, symbol_file )
with open(symbols_file, "r+") as f:
old_symbolList = f.readlines()
for i in range( len(old_symbolList) ) :
old_symbolList[i] = old_symbolList[i].replace("\n","")
###
### get current symbol list from nasdaq website
###
try:
base_url = 'http://www.nasdaq.com/quotes/nasdaq-100-stocks.aspx'
content = requests.get(base_url).text
#print "\n\n\n content = ", content
m = re.search('var table_body.*?>*(?s)(.*?)<.*?>.*?<', content).group(0).split("],[")
# handle exceptions in format for first and last entries in list
m = m[0].split(",\r\n")
m[0] = m[0].split("[")[2]
m[-1] = m[-1].split("];")[0]
print "****************"
for i in range( len(m) ):
print i, m[i]
print "len of m = ",len(m)
print "****************"
# parse list items for symbol name
symbolList = []
companyNamesList = []
for i in range( len(m) ):
symbolList.append( m[i].split(",")[0].split('"')[1] )
companyNamesList.append( m[i].split(",")[1].split('"')[1] )
companyName_file = os.path.join( symbol_directory, "companyNames.txt" )
with open( companyName_file, "w" ) as f:
for i in range( len(symbolList) ) :
f.write( symbolList[i] + ";" + companyNamesList[i] + "\n" )
###
### compare old list with new list and print changes, if any
###
# file for index changes history
symbol_change_file = "Naz100_symbolsChanges.txt"
symbols_changes_file = os.path.join( symbol_directory, symbol_change_file )
with open(symbols_changes_file, "r+") as f:
old_symbol_changesList = f.readlines()
old_symbol_changesListText = ''
for i in range( len(old_symbol_changesList) ):
old_symbol_changesListText = old_symbol_changesListText + old_symbol_changesList[i]
# parse date
year = datetime.datetime.now().year
month = datetime.datetime.now().month
day = datetime.datetime.now().day
dateToday = str(year)+"-"+str(month)+"-"+str(day)
# compare lists to check for tickers removed from the index
# - printing will be suppressed if "verbose = False"
removedTickers = []
print ""
for i, ticker in enumerate( old_symbolList ):
if i == 0:
removedTickersText = ''
if ticker not in symbolList:
removedTickers.append( ticker )
if verbose:
print " Ticker ", ticker, " has been removed from the Nasdaq100 index"
removedTickersText = removedTickersText + "\n" + dateToday + " Remove " + ticker
# compare lists to check for tickers added to the index
# - printing will be suppressed if "verbose = False"
addedTickers = []
print ""
for i, ticker in enumerate( symbolList ):
if i == 0:
addedTickersText = ''
if ticker not in old_symbolList:
addedTickers.append( ticker )
if verbose:
print " Ticker ", ticker, " has been added to the Nasdaq100 index"
addedTickersText = addedTickersText + "\n" + dateToday + " Add " + ticker
print ""
with open(symbols_changes_file, "w") as f:
f.write(addedTickersText)
f.write(removedTickersText)
f.write("\n")
f.write(old_symbol_changesListText)
print "****************"
print "addedTickers = ", addedTickers
print "removedTickers = ", removedTickers
print "****************"
###
### update symbols file with current list. Keep copy of of list.
###
if removedTickers != [] or addedTickers != []:
# make copy of previous symbols list file
symbol_directory = os.path.join( os.getcwd(), "symbols" )
symbol_file = "Naz100_symbols.txt"
archive_symbol_file = "Naz100_symbols__" + str(datetime.date.today()) + ".txt"
symbols_file = os.path.join( symbol_directory, symbol_file )
archive_symbols_file = os.path.join( symbol_directory, archive_symbol_file )
with open( archive_symbols_file, "w" ) as f:
for i in range( len(old_symbolList) ) :
f.write( old_symbolList[i] + "\n" )
# make new symbols list file
with open( symbols_file, "w" ) as f:
for i in range( len(symbolList) ) :
f.write( symbolList[i] + "\n" )
except:
###
### something didn't wor. print message and return old list.
###
print "\n\n\n"
print "! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! "
print " Nasdaq sysmbols list did not get updated from web."
print " ... check quotes_for_list_adjCloseVol.py in function 'get_Naz100List' "
print " ... also check web at http://www.nasdaq.com/quotes/nasdaq-100-stocks.aspx"
print "! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! "
print "\n\n\n"
symbolList = old_symbolList
removedTickers = []
addedTickers = []
return symbolList, removedTickers, addedTickers
def get_Naz100PlusETFsList( verbose=True ):
###
### Query nasdaq.com for updated list of stocks in Nasdaq 100 index.
### Return list with stock tickers.
###
import urllib
import re
import os
import datetime
###
### get symbol list from previous period
###
symbol_directory = os.path.join( os.getcwd(), "symbols" )
symbol_file = "Naz100PlusETFs_symbols.txt"
symbols_file = os.path.join( symbol_directory, symbol_file )
with open(symbols_file, "r+") as f:
old_symbolList = f.readlines()
for i in range( len(old_symbolList) ) :
old_symbolList[i] = old_symbolList[i].replace("\n","")
###
### get current symbol list from nasdaq website
###
base_url = 'http://www.nasdaq.com/quotes/nasdaq-100-stocks.aspx'
content = urllib.urlopen(base_url).read()
m = re.search('var table_body.*?>*(?s)(.*?)<.*?>.*?<', content).group(0).split("],[")
# handle exceptions in format for first and last entries in list
m[0] = m[0].split("[")[2]
m[-1] = m[-1].split("]")[0].split("[")[0]
# parse list items for symbol name
symbolList = []
for i in range( len(m) ):
symbolList.append( m[i].split(",")[0].split('"')[1] )
###
### compare old list with new list and print changes, if any
###
# compare lists to check for tickers removed from the index
# - printing will be suppressed if "verbose = False"
removedTickers = []
print ""
for i, ticker in enumerate( symbolList ):
if ticker not in old_symbolList:
removedTickers.append( ticker )
if verbose:
print " Ticker ", ticker, " has been removed from the Nasdaq100 index"
# add GTAA asset classes to Naz100 tickers for extra diversity
ETF_List = ['AGG', 'CEW', 'DBC', 'EEM', 'EMB', 'FXE', 'GLD', 'HYG', 'IVV', 'LQD', 'TIP', 'TLT', 'USO', 'VNQ', 'XLF', 'XWD.TO' ]
for i in range( len(ETF_List) ) :
symbolList.append( ETF_List[i] )
# compare lists to check for tickers added to the index
# - printing will be suppressed if "verbose = False"
addedTickers = []
print ""
for i, ticker in enumerate( old_symbolList ):
if ticker not in symbolList:
addedTickers.append( ticker )
if verbose:
print " Ticker ", ticker, " has been added to the Nasdaq100 index"
print ""
###
### update symbols file with current list. Keep copy of of list.
###
if removedTickers != [] or addedTickers != []:
# make copy of previous symbols list file
symbol_directory = os.path.join( os.getcwd(), "symbols" )
symbol_file = "Naz100_Symbols.txt"
archive_symbol_file = "Naz100_Symbols__" + str(datetime.date.today()) + ".txt"
symbols_file = os.path.join( symbol_directory, symbol_file )
archive_symbols_file = os.path.join( symbol_directory, archive_symbol_file )
with open( archive_symbols_file, "w" ) as f:
for i in range( len(old_symbolList) ) :
f.write( old_symbolList[i] + "\n" )
# make new symbols list file
with open( symbols_file, "w" ) as f:
for i in range( len(symbolList) ) :
f.write( symbolList[i] + "\n" )
return symbolList.sort(), removedTickers, addedTickers
def arrayFromQuotesForList(symbolsFile, beginDate, endDate):
'''
read in quotes and process to 'clean' ndarray plus date array
- prices in array with dimensions [num stocks : num days ]
- process stock quotes to show closing prices adjusted for splits, dividends
- single ndarray with dates common to all stocks [num days]
- clean up stocks by:
- infilling empty values with linear interpolated value
- repeat first quote to beginning of series
'''
from functions.TAfunctions import interpolate
from functions.TAfunctions import cleantobeginning
# read symbols list
symbols = readSymbolList(symbolsFile,verbose=True)
# get quotes for each symbol in list (adjusted close)
quote = downloadQuotes(symbols,date1=beginDate,date2=endDate,adjust=True,Verbose=True)
# clean up quotes for missing values and varying starting date
#x = quote.as_matrix().swapaxes(0,1)
x = quote.values.T
###print "x = ", x
date = quote.index
date = [d.date().isoformat() for d in date]
datearray = np.array(date)
symbolList = list(quote.columns.values)
# Clean up input quotes
# - infill interior NaN values using nearest good values to linearly interpolate
# - copy first valid quote to from valid date to all earlier positions
for ii in range(x.shape[0]):
x[ii,:] = np.array(x[ii,:]).astype('float')
#print " progress-- ", ii, " of ", x.shape[0], " symbol = ", symbols[ii]
#print " line 283........."
x[ii,:] = interpolate(x[ii,:])
x[ii,:] = cleantobeginning(x[ii,:])
return x, symbolList, datearray
def arrayFromQuotesForListWithVol(symbolsFile, beginDate, endDate):
'''
read in quotes and process to 'clean' ndarray plus date array
- prices in array with dimensions [num stocks : num days ]
- process stock quotes to show closing prices adjusted for splits, dividends
- single ndarray with dates common to all stocks [num days]
- clean up stocks by:
- infilling empty values with linear interpolated value
- repeat first quote to beginning of series
'''
# read symbols list
symbols = readSymbolList(symbolsFile,verbose=True)
# get quotes for each symbol in list (adjusted close)
quote = downloadQuotes(symbols,date1=beginDate,date2=endDate,adjust=True,Verbose=True)
# clean up quotes for missing values and varying starting date
x=quote.copyx()
x=quote.as_matrix().swapaxes(0,1)
date = quote.getlabel(2)
datearray = np.array(date)
# Clean up input quotes
# - infill interior NaN values using nearest good values to linearly interpolate
# - copy first valid quote to from valid date to all earlier positions
for ii in range(x.shape[0]):
print " line 315........."
x[ii,0,:] = interpolate(x[ii,0,:].values)
x[ii,0,:] = cleantobeginning(x[ii,0,:].values)
return x, symbolList, datearray
def get_quote_google( symbol ):
import urllib
import re
base_url = 'http://finance.google.com/finance?q=NASDAQ%3A'
content = urllib.urlopen(base_url + symbol).read()
m = re.search('class="pr".*?>*(?s)(.*?)<.*?>.*?<', content).group(0).split(">")[-1].split("<")[0]
if m :
quote = m
else:
quote = 'no quote available for: ' + symbol
return quote
def get_pe_google( symbol ):
import urllib
import re
base_url = 'http://finance.google.com/finance?q=NASDAQ%3A'
content = urllib.urlopen(base_url + symbol).read()
try:
m = float(content.split("pe_ratio")[1].split('\n')[2].split(">")[-1])
quote = m
except:
quote = ""
return quote
def LastQuotesForSymbolList( symbolList ):
"""
read in latest (15-minute delayed) quote for each symbol in list.
Use google for each symbol's quote.
"""
from time import sleep
quotelist = []
for itick, ticker in enumerate( symbolList ):
if ticker == 'CASH':
print "ticker, quote = CASH 1.0"
quotelist.append(1.0)
else:
try:
data = get_quote_google( ticker )
print "ticker, quote = ", ticker, data
# Remove comma from data
data = data.replace(",", "")
quotelist.append( data )
except:
print "could not get quote for ", ticker, " will try again and again."
sleep(3)
symbolList[itick+1:itick+1] = [ticker]
return quotelist
def LastQuotesForList( symbols_list ):
from time import sleep
from functions.StockRetriever import *
stocks = StockRetriever()
# remove 'CASH' from symbols_list, if present. Keep track of position in list to re-insert
cash_index = None
try:
cash_index = symbols_list.index('CASH')
if cash_index >= 0 and cash_index <= len(symbols_list)-1 :
symbols_list.remove('CASH')
except:
pass
attempt = 1
NeedQuotes = True
while NeedQuotes:
try:
a=stocks.get_current_info( symbols_list )
print "inside LastQuotesForList 1, symbols_list = ", symbols_list
print "inside LastQuotesForList 1, attempt = ", attempt
# convert from strings to numbers and put in a list
quotelist = []
for i in range(len(a)):
singlequote = np.float((a[i]['LastTradePriceOnly']).encode('ascii','ignore'))
quotelist.append(singlequote)
print symbols_list, quotelist
NeedQuotes = False
except:
attempt += 1
sleep(attempt)
print "inside LastQuotesForList... location 2"
# re-insert CASH in original position and also add curent price of 1.0 to quotelist
if cash_index != None:
if cash_index < len(symbols_list):
symbols_list[cash_index:cash_index] = 'CASH'
quotelist[cash_index:cash_index] = 1.0
else:
symbols_list.append('CASH')
quotelist.append(1.0)
print "attempts, sysmbols_list,quotelist =", attempt, symbols_list, quotelist
return quotelist
| [
"donpg1@yahoo.com"
] | donpg1@yahoo.com |
9615461a5293189ea9ccf409f475b6f55413cc97 | 7a550d2268bc4bc7e2fec608ffb1db4b2e5e94a0 | /1201-1300/1273-Delete Tree Nodes/1273-Delete Tree Nodes.py | 5aaf4cc9fdfc040b2b1d974afc0e5e5b2294eae5 | [
"MIT"
] | permissive | jiadaizhao/LeetCode | be31bd0db50cc6835d9c9eff8e0175747098afc6 | 4ddea0a532fe7c5d053ffbd6870174ec99fc2d60 | refs/heads/master | 2021-11-05T04:38:47.252590 | 2021-10-31T09:54:53 | 2021-10-31T09:54:53 | 99,655,604 | 52 | 28 | MIT | 2020-10-02T12:47:47 | 2017-08-08T05:57:26 | C++ | UTF-8 | Python | false | false | 545 | py | import collections
class Solution:
def deleteTreeNodes(self, nodes: int, parent: List[int], value: List[int]) -> int:
graph = collections.defaultdict(list)
for i in range(1, len(parent)):
graph[parent[i]].append(i)
def dfs(root):
total = value[root]
count = 1
for child in graph[root]:
s, c = dfs(child)
total += s
count += c
return total, count if total != 0 else 0
return dfs(0)[1]
| [
"jiadaizhao@gmail.com"
] | jiadaizhao@gmail.com |
c182a9588efd965178c2f3e7b6f10c4a37750821 | 8435da9eccc5c12bd92bb21185ca1dcc6495dd5b | /coursera code/coursera1.py | 675c311d420dad96bc0e79eb17ddeee9b76c079b | [] | no_license | Lynn-Lau/Python-Practice | 2d0a240382e74c11a09d6f2e4724fd00a89ff508 | 54b569db3db32f3e0af013098cd146eaaa1ea28f | refs/heads/master | 2021-01-17T12:49:33.601611 | 2016-07-21T13:02:42 | 2016-07-21T13:02:42 | 56,281,582 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 607 | py | '''
A Simple Program for Coursera Python Data Structure $ Chapter 8 List
Open a file and read the file line by line.Split the line into words
and print them.May be some words shoud be split twice.
Author: Lynn-Lau
Origin Date: 2016/07/14 Version: 1.0
Modified Date: 2016/07/16 Version: 1.1
'''
path = 'D:romeo.txt'
List = []
sortedList = []
CountLine = 0
file = open(path)
for line in file:
Line = line.split()
for item in Line:
List.append(item)
CountLine = CountLine + 1
SortedList = List.sort()
for item in List:
if item not in sortedList:
sortedList.append(item)
print sortedList
| [
"liu.silin@foxmail.com"
] | liu.silin@foxmail.com |
2166dde51f70387142d0bcdbdb804d5c418968d9 | 3155f0937b67dd99f2a583e5132acc2253c677a2 | /Chapter2-ApplicationLayer/Assignments/assign3/smtpClient.py | 34328c904b6b3fa77a0f602856afe2204de09aa1 | [] | no_license | BIT-zhaoyang/ComputerNetwork | 422d05db98ec339ea4079c5c982057c23e3b8b47 | 0fc832fb5c78d370fca17d503be9dc1e218cfe7e | refs/heads/master | 2021-04-15T17:08:59.307426 | 2018-03-26T01:58:28 | 2018-03-26T01:58:28 | 126,758,232 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,779 | py | #!/usr/bin/python
import socket,ssl,base64
msg = "\r\n I love computer networks!"
endmsg = "\r\n.\r\n"
mailServer = ("smtp.163.com", 465)
# Build safe connect
context = ssl.create_default_context()
sslSocket = context.wrap_socket(socket.socket(socket.AF_INET, socket.SOCK_STREAM), server_hostname="smtp.163.com")
sslSocket.connect(mailServer)
recv = sslSocket.recv(1024)
print recv
if recv[:3] != '220':
print '220 reply not received from server.'
# Send HELO command and print server response
heloCommand = 'HELO ZhaoYang \r\n'
sslSocket.send(heloCommand)
recv = sslSocket.recv(1024)
print recv
if recv[:3] != '250':
print '250 reply not received from server.'
# Safe login
user = "japan07033407702@163.com"
passwd = "LXkU54SC7gNQWJoV"
authCommand = 'AUTH LOGIN\r\n'
sslSocket.send(authCommand)
recv = sslSocket.recv(1024)
print recv
if recv[:3] != '334':
print '334 reply not received from server.'
sslSocket.send(base64.b64encode(user) + '\r\n')
recv = sslSocket.recv(1024)
print recv
if recv[:3] != '334':
print '334 reply not received from server.'
sslSocket.send(base64.b64encode(passwd) + '\r\n')
recv = sslSocket.recv(1024)
print recv
if recv[:3] != '235':
print '235 reply not received from server. Login failed.'
# Send MAIL FROM command and print server response.
mailfromCommand = 'MAIL FROM: <japan07033407702@163.com>\r\n'
sslSocket.send(mailfromCommand)
print sslSocket.recv(1024)
# Send RCPT TO command and print server response
rcpttoCommand = 'RCPT TO: <japan07033407702@163.com>\r\n'
sslSocket.send(rcpttoCommand)
print sslSocket.recv(1024)
# Send DATA command and print server response
sslSocket.send('DATA\r\n')
print sslSocket.recv(1024)
# Send message data.
message = """Subject: How to send an email using SMTP
Hello World! I love you!
This is not a spam email, but a test one. Don\'t intercept this please!
The record needs to be set straight once and for all.
I'm absolutely trying to give you something and asking for nothing in return so you can just cash me outside how bow dah?
In all seriousness. This E-Book I'm giving you won't c0st you a dime or penny. But, it won't be around forever. After 7 days, I'm snatching it away. (I don't believe in no takey backeys).
If you wanna check it out just hit that link below and you'll be blessed with all types of knowledge.
Cash Me Outside
Peace,
Austin Dunham
PS: If she actually had the chance to read this E-Book she probably wouldn't have ever lost that fight in the first place. (Just Saying)
PPS: What are you doing at the bottom of this email? The clock is taking. Take advantage of this sweet deal before it goes away forever.
\r\n.\r\n"""
sslSocket.send(message)
print sslSocket.recv(1024)
# QUIT
sslSocket.send('QUIT\r\n')
print sslSocket.recv(1024)
| [
"bit.zhaoyang512@gmail.com"
] | bit.zhaoyang512@gmail.com |
82b58aed3ca85fe890296c8b74ef57978bd1651b | cb0ac5754bf5a2f2c9a9640e9816b02107ea8898 | /authentication/views.py | 6473b2ba2db16e368133234c2f46261dc17569d9 | [] | no_license | mnk179/dreamcather-backend | 342020a2857adca45760913c8aed98fc847d55c8 | e68cfec3b1b2e2b6f0c8aea3a78ae4c0ddc7f676 | refs/heads/master | 2022-05-04T00:21:17.462481 | 2019-11-16T18:03:39 | 2019-11-16T18:03:39 | 222,138,954 | 0 | 0 | null | 2022-04-22T22:43:13 | 2019-11-16T18:01:16 | Python | UTF-8 | Python | false | false | 1,764 | py | from django.contrib.auth.models import User
from django.contrib.auth import authenticate
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.views.decorators.csrf import csrf_exempt
from rest_framework.authtoken.models import Token
from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework import status
from . import serializers
@api_view(['POST'])
def create_user(request):
serialized = serializers.UserSerializer(data=request.data)
if serialized.is_valid():
user = serialized.create(request.data)
token = {"token": Token.objects.get(user=user).key}
appended = {**serialized.data, **token}
return Response(appended, status=status.HTTP_201_CREATED)
else:
return Response(serialized._errors, status=status.HTTP_400_BAD_REQUEST)
@csrf_exempt
@api_view(["POST"])
def login(request):
username = request.data.get("username")
password = request.data.get("password")
if username is None or password is None:
return Response({'error': 'Please provide both username and password'},
status=status.HTTP_400_BAD_REQUEST)
user = authenticate(username=username, password=password)
if not user:
return Response({'error': 'Invalid Credentials'},
status=status.HTTP_404_NOT_FOUND)
token, _ = Token.objects.get_or_create(user=user)
return Response({'token': token.key,
'username': user.username},
status=status.HTTP_200_OK)
@receiver(post_save, sender=User)
def create_auth_token(sender, instance=None, created=False, **kwargs):
if created:
Token.objects.create(user=instance)
| [
"martynas.nor@gmail.com"
] | martynas.nor@gmail.com |
b04b9e636a59a5c6f889dd245dae7adbd868af09 | 8f6946286dfad1d61be7425dde737daed7027c3f | /ckstyle/command/args.py | 0c7551eafb4fb344689f8901ef20f989f68d82c1 | [
"BSD-3-Clause"
] | permissive | ljspace/CSSCheckStyle | e75d7616d8c9444b581b38a91a10aff7b4f731ad | c12be2181d6576349bf52c218d8fb1809c11da12 | refs/heads/master | 2021-01-16T20:29:55.157662 | 2013-04-16T03:58:36 | 2013-04-16T03:58:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,211 | py | class CommandArgs():
def __init__(self):
self.operation = None
self.errorLevel = 2
self.recursive = False
self.printFlag = False
self.extension = '.ckstyle.txt'
self.include = 'all'
self.exclude = 'none'
self.standard = ''
self.exportJson = False
self.ignoreRuleSets = ['@unit-test-expecteds']
self.fixedExtension = '.fixed.css'
self.fixToSingleLine = False
self.compressConfig = CompressArgs()
self.safeMode = False
self.noBak = False
# current browser
self._curBrowser = None
def __str__(self):
return 'errorLevel: %s\n recursive: %s\n printFlag: %s\n extension: %s\n include: %s\n exclude: %s' % (self.errorLevel, self.recursive, self.printFlag, self.extension, self.include, self.exclude)
class CompressArgs():
def __init__(self):
self.extension = '.min.css'
self.combineFile = True
self.browsers = None
self.noBak = False
def __str__(self):
return 'extension: %s, combineFile: %s, browsers: %s' % (self.recursive, self.extension, self.combineAttr, self.combineRuleSet, self.combineFile, self.browsers)
| [
"wangjeaf@gmail.com"
] | wangjeaf@gmail.com |
bf8c7a1eb3e446dca8f0a6dc4e01d8b8f26a1264 | 9a10aabe68f9e98e22362abd9ef79cc784ba55e0 | /snooty/cache.py | e8cfef1648860aed5dcc1b915d55d9d11322f7d8 | [
"Apache-2.0",
"LicenseRef-scancode-python-cwi",
"GPL-1.0-or-later",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-free-unknown",
"Python-2.0"
] | permissive | mrlynn/snooty-parser | 5594aa3a082bee15dc543b574c125a93d7536986 | 2df825d324bf8f73646a8c2299cf765690c1a93f | refs/heads/master | 2023-04-15T17:38:26.310408 | 2020-10-21T17:32:15 | 2020-10-21T20:28:06 | 306,339,200 | 0 | 0 | Apache-2.0 | 2023-04-04T01:26:41 | 2020-10-22T13:03:06 | null | UTF-8 | Python | false | false | 1,475 | py | from collections import defaultdict
from dataclasses import dataclass, field
from typing import Set, Tuple, Dict, DefaultDict, Optional, Iterator, TypeVar, Generic
_T = TypeVar("_T")
@dataclass
class Cache(Generic[_T]):
"""A versioned cache that associates a (_T, int) pair with an arbitrary object and
an integer version. Whenever the key is re-assigned, the version is incremented."""
_cache: Dict[Tuple[_T, int], object] = field(default_factory=dict)
_keys_of_each_fileid: DefaultDict[_T, Set[int]] = field(
default_factory=lambda: defaultdict(set)
)
_versions: DefaultDict[Tuple[_T, int], int] = field(
default_factory=lambda: defaultdict(int)
)
def __setitem__(self, key: Tuple[_T, int], value: object) -> None:
if key in self._cache:
self._cache[key] = value
else:
self._cache[key] = value
self._versions[key] += 1
self._keys_of_each_fileid[key[0]].add(key[1])
def __delitem__(self, fileid: _T) -> None:
keys = self._keys_of_each_fileid[fileid]
del self._keys_of_each_fileid[fileid]
for key in keys:
del self._cache[(fileid, key)]
def __getitem__(self, key: Tuple[_T, int]) -> Optional[object]:
return self._cache.get(key, None)
def get_versions(self, fileid: _T) -> Iterator[int]:
for key, version in self._versions.items():
if key[0] == fileid:
yield version
| [
"i80and@foxquill.com"
] | i80and@foxquill.com |
a6f529f5c6d00ff3aba3df255dba713e85eac766 | 1c07579679f8a4c861777cff4faf30a8064862db | /social/__init__.py | e993f2fda050c964968927a8b09d383d28ce9fcc | [
"BSD-3-Clause",
"Python-2.0",
"BSD-2-Clause"
] | permissive | florelui001/python-social-auth | 86591a1c12dfc011a0d755a7b397691e54821400 | 81093d2135c3eafd6fc5dd763f31a7889a9f1ce4 | refs/heads/master | 2021-01-15T08:10:42.444979 | 2014-02-27T21:00:17 | 2014-02-27T21:00:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 216 | py | """
python-social-auth application, allows OpenId or OAuth user
registration/authentication just adding a few configurations.
"""
version = (0, 1, 22)
extra = '-dev'
__version__ = '.'.join(map(str, version)) + extra
| [
"matiasaguirre@gmail.com"
] | matiasaguirre@gmail.com |
20b8cce1f5fab387d925e323c5d9bd3f3d64c01a | 0bf2dfce698190e61cecc1bbb07ba03d98497157 | /patch_library.py | 8bbd041bf69fac354d7c0b2c1f5a99e9997f4a15 | [
"MIT"
] | permissive | Edwinngera/Brain-Tumour-Segmentation | 2cd9bad67ea304e97a28ef2c67620155d589ddbf | a87a3f4bc62a19fdf3e2699d70ad588e18e0ff6f | refs/heads/master | 2020-04-12T11:40:00.806007 | 2018-02-10T13:23:23 | 2018-02-10T13:23:23 | 162,466,847 | 1 | 0 | MIT | 2018-12-19T16:59:42 | 2018-12-19T16:59:42 | null | UTF-8 | Python | false | false | 7,373 | py | import numpy as np
import random
import os
from glob import glob
import matplotlib
import matplotlib.pyplot as plt
from skimage import io
from skimage.filters.rank import entropy
from skimage.morphology import disk
import progressbar
from sklearn.feature_extraction.image import extract_patches_2d
progress = progressbar.ProgressBar(widgets=[progressbar.Bar('*', '[', ']'), progressbar.Percentage(), ' '])
np.random.seed(5)
class PatchLibrary(object):
def __init__(self, patch_size, train_data, num_samples):
self.patch_size = patch_size
self.train_data = train_data
self.num_samples = num_samples
self.h = self.patch_size[0]
self.w = self.patch_size[1]
def find_patches_unet(self):
h, w = self.patch_size[0], self.patch_size[1]
per_class = self.num_samples/ 5
patches, labels = [], []
for i in xrange(5):
print 'Finding patches of class {}...'.format(i)
ct = 0
while ct<per_class:
im_path = random.choice(self.train_data)
fn = os.path.basename(im_path)
label = io.imread('/media/hrituraj/New Volume/BRATS2015_Training/BRATS2015_Training/Labels/' + fn[:-4] + 'L.png')
#print("Reached here")
while len(np.argwhere(label == i)) < 10:
#print("Stuck here")
im_path = random.choice(self.train_data)
fn = os.path.basename(im_path)
label = io.imread('/media/hrituraj/New Volume/BRATS2015_Training/BRATS2015_Training/Labels/' + fn[:-4] + 'L.png')
img = io.imread(im_path).reshape(5, 240, 240)[:-1].astype('float')
img = img[:, 16:-16, 16:-16]
label = label[16:-16, 16:-16]
patch_gt = []
for j in xrange(5):
temp_label = (label ==j)
patch_gt.append(temp_label)
patch_gt = np.array(patch_gt)
patches.append(img)
labels.append(patch_gt)
ct = ct+1
print (float(ct)/per_class) *100
return np.array(patches), np.array(labels)
#small_patch = np.array([i[p_six[0]:p_six[1], p_six[2]:p_six[3]] for i in img])
#print("This too clear")
def find_patches(self, class_num, num_patches):
h, w = self.patch_size[0], self.patch_size[1]
patches, labels = [], np.full(num_patches, class_num, 'float')
small_patches = []
print 'Finding patches of class {}...'.format(class_num)
progress_another = progressbar.ProgressBar(widgets=[progressbar.Bar('*', '[', ']'), progressbar.Percentage(), ' '])
ct = 0
while ct<num_patches:
im_path = random.choice(self.train_data)
fn = os.path.basename(im_path)
label = io.imread('/media/hrituraj/New Volume/BRATS2015_Training/BRATS2015_Training/Labels/' + fn[:-4] + 'L.png')
#print("Reached here")
while len(np.argwhere(label == class_num)) < 10:
#print("Stuck here")
im_path = random.choice(self.train_data)
fn = os.path.basename(im_path)
label = io.imread('/media/hrituraj/New Volume/BRATS2015_Training/BRATS2015_Training/Labels/' + fn[:-4] + 'L.png')
img = io.imread(im_path).reshape(5, 240, 240)[:-1].astype('float')
p = random.choice(np.argwhere(label == class_num))
p_ix = (p[0]-(h/2), p[0]+((h+1)/2), p[1]-(w/2), p[1]+((w+1)/2))
p_six = (p[0]-(5/2), p[0]+((5+1)/2), p[1]-(5/2), p[1]+((5+1)/2))
#print("This clear")
patch = np.array([i[p_ix[0]:p_ix[1], p_ix[2]:p_ix[3]] for i in img])
small_patch = np.array([i[p_six[0]:p_six[1], p_six[2]:p_six[3]] for i in img])
#print("This too clear")
if patch.shape != (4, h, w) or len(np.unique(patch)) == 1: #So that patch is not entirely background
continue
patches.append(patch)
small_patches.append(small_patch)
ct = ct+1
print (float(ct)/num_patches) *100
p = patches
for img_ix in xrange(len(p)):
for slice in xrange(len(p[img_ix])):
if np.max(p[img_ix][slice]) != 0:
p[img_ix][slice] /= np.max(p[img_ix][slice])
return np.array(p),np.array(small_patches), labels
def make_training_patches(self, entropy = False, balanced_classes = True, classes = [0, 1, 2, 3, 4]):
if balanced_classes:
per_class = self.num_samples/ len(classes)
#per_class = [
patches,s_patches, labels, labels_Unet = [],[], [], []
progress.currval = 0
for i in progress(xrange(len(classes))):
p,q,l = self.find_patches(classes[i], per_class)
for img_ix in xrange(len(p)):
for slice in xrange(len(p[img_ix])):
if np.max(p[img_ix][slice]) != 0:
p[img_ix][slice] /= np.max(p[img_ix][slice])
if np.max(q[img_ix][slice]) != 0:
q[img_ix][slice] /= np.max(q[img_ix][slice])
patches.append(p)
s_patches.append(q)
print(len(patches))
labels.append(l)
print(patches[0].shape)
return np.array(patches).reshape(self.num_samples, 4, self.h, self.w), np.array(s_patches).reshape(self.num_samples, 4, 5, 5), np.array(labels).reshape(self.num_samples)
else:
print "Use balanced classes, random won't work."
if __name__ == '__main__':
train_data = glob('/media/hrituraj/New Volume/BRATS2015_Training/BRATS2015_Training/Norm_PNG/**')
patches = PatchLibrary((32,32), train_data, 1000)
#X,x,y = patches.make_training_patches()
X, y = patches.find_patches_unet()
y = y.astype(int)
#X_o, x_o, y_o = patches.make_training_patches()
np.save('/media/hrituraj/New Volume/BRATS2015_Training/BRATS2015_Training/X_unet_full.npy', X)
#np.save('/media/hrituraj/New Volume/BRATS2015_Training/BRATS2015_Training/x.npy', x_o)
#np.save('/media/hrituraj/New Volume/BRATS2015_Training/BRATS2015_Training/X.npy', X_o)
np.save('/media/hrituraj/New Volume/BRATS2015_Training/BRATS2015_Training/y_unet_full.npy', y)
#np.save('/media/hrituraj/New Volume/BRATS2015_Training/BRATS2015_Training/y.npy', y_o)
#train_data = glob('/home/hrituraj/BRATS2015_Training/BRATS2015_Training/Norm_PNG/**')
#Let_us_make_patches = PatchLibrary((33,33),train_data, 10000 )
#Let_us_make_patches.make_training_images() | [
"noreply@github.com"
] | noreply@github.com |
a1cf96c8d43a4e0b3a16b690bbcbd7de73f7ad31 | 1cdea163c2b053d0ceb0c19ac0203a8d515736d6 | /venv/bin/easy_install | e58a660abf2a1ecf5715b7576976181e0e30fe72 | [] | no_license | git-nori/PortDjangoSampleBlog | 1cb4e52e699f89246a1912361b1fb9836e961d2e | ab8fddc392b19419ca393b2318f8f4ea94b30deb | refs/heads/dev | 2020-06-18T20:26:15.921897 | 2019-07-26T14:12:24 | 2019-07-26T14:12:24 | 196,435,019 | 0 | 0 | null | 2019-07-26T14:12:25 | 2019-07-11T17:04:30 | Python | UTF-8 | Python | false | false | 457 | #!/Users/satouhironori/PycharmProjects/PortDjangoSample/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install')()
)
| [
"maza.hs5360@gmail.com"
] | maza.hs5360@gmail.com | |
424d7fe665c85846cf1bb062915cf9447f3236be | ab3fb86c87c64c2ef59fa152b6a6e306edd07640 | /answer_key.py | 0b6276293645e3040f1da836e0d6a36466a6b308 | [] | no_license | joachimcordero/Crash-Course-in-Python---Final-Output | d2f2b1325185469097bb48b4ddc0ed5a70e887b9 | ddb982c46a49afc18e6b7128bd83adab7d8791ef | refs/heads/master | 2023-03-16T12:35:09.274321 | 2020-11-03T14:59:18 | 2020-11-03T14:59:18 | 345,056,853 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,043 | py | import re
import sys
import operator
import csv
users={}
errors={}
f=open("syslog.log")
for line in f:
line=line.strip()
usrname=(re.search(r"\((.*)\)",line)).group(1)
msg=(re.search(r"(ERROR|INFO)",line)).group(1)
if (usrname not in users):
user_count = {'INFO': 0, 'ERROR': 0}
users[usrname] = user_count
users[usrname][msg]+=1
if msg=="ERROR":
err=(re.search(r"ERROR (.*) ",line)).group(1)
if (err not in errors):
errors[err]=0
errors[err]+=1
f.close()
users2=[]
errors2=[]
for key in sorted(users.keys()):
users2.append([key,users[key]["INFO"],users[key]["ERROR"]])
for key, value in sorted(errors.items(), key=lambda item: item[1],reverse=True):
errors2.append([key, value])
users2.insert(0,["Username","INFO","ERROR"])
errors2.insert(0,["Error","Count"])
fe=open("error_message.csv","w")
fu=open("user_statistics.csv","w")
writer1=csv.writer(fe)
writer2=csv.writer(fu)
writer1.writerows(errors2)
writer2.writerows(users2)
fe.close()
fu.close() | [
"joachimcorder@gmail.com"
] | joachimcorder@gmail.com |
aa7a161ab5068d20d258b20e3c6e0f813989f089 | efa856940078b88295fe11a6004b7e3b5065e4eb | /Problem Solving/Algorithms/Implementation/Easy/Cats and a Mouse.py | 7c9148e4b5e8efd798f1d3e6e9a89872f2c536ca | [] | no_license | AdithyaK47/HackerRank | ee3ed43e6345eee874f5664a71cabec48f091b86 | 6c1979f9c43e625d55741b545ce47ddbb6d048e3 | refs/heads/main | 2023-01-10T03:18:04.236606 | 2020-11-16T15:45:48 | 2020-11-16T15:45:48 | 301,372,546 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 577 | py | #!/bin/python3
import math
import os
import random
import re
import sys
def catAndMouse(x, y, z):
if (abs(x - z) == abs(y - z)):
return "Mouse C"
elif (abs(x - z) < abs(y - z)):
return "Cat A"
else:
return "Cat B"
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
q = int(input())
for q_itr in range(q):
xyz = input().split()
x = int(xyz[0])
y = int(xyz[1])
z = int(xyz[2])
result = catAndMouse(x, y, z)
fptr.write(result + '\n')
fptr.close()
| [
"adithyakrishna1@gmail.com"
] | adithyakrishna1@gmail.com |
b9608a0dee3287386b23d01f28dacb30bd1c0c08 | cec725bb2b9a2baa9ca97f8ee23be6db57f8be38 | /zemployed/bin/django-admin.py | fb6f32d253b52bc590141b8ea51357160308ff8d | [] | no_license | cdelaney314/zemployed | 3218d0e9fff3e56995bec8088dc459318e4867fe | 80609006332c8a8d88f2a81d71cda8a6a7ba8b1c | refs/heads/master | 2023-04-07T11:43:04.346453 | 2021-04-14T23:33:47 | 2021-04-14T23:33:47 | 358,065,655 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 693 | py | #!/var/www/html/cse30246/zemployed/zemployed/bin/python3.9
# When the django-admin.py deprecation ends, remove this script.
import warnings
from django.core import management
try:
from django.utils.deprecation import RemovedInDjango40Warning
except ImportError:
raise ImportError(
'django-admin.py was deprecated in Django 3.1 and removed in Django '
'4.0. Please manually remove this script from your virtual environment '
'and use django-admin instead.'
)
if __name__ == "__main__":
warnings.warn(
'django-admin.py is deprecated in favor of django-admin.',
RemovedInDjango40Warning,
)
management.execute_from_command_line()
| [
"wrobson@nd.edu"
] | wrobson@nd.edu |
b56cf17c850ee1b033aa5372bb53774fe8d95850 | dd221d1ab80a49190a0c93277e2471debaa2db95 | /hanlp/components/parsers/ud/ud_model.py | 729f49c1af9decf8c14572ed1691eed63ed91021 | [
"Apache-2.0",
"CC-BY-NC-SA-4.0"
] | permissive | hankcs/HanLP | 29a22d4e240617e4dc67929c2f9760a822402cf7 | be2f04905a12990a527417bd47b79b851874a201 | refs/heads/doc-zh | 2023-08-18T12:48:43.533453 | 2020-02-15T17:19:28 | 2023-03-14T02:46:03 | 24,976,755 | 32,454 | 9,770 | Apache-2.0 | 2023-08-13T03:11:39 | 2014-10-09T06:36:16 | Python | UTF-8 | Python | false | false | 5,198 | py | # -*- coding:utf-8 -*-
# Author: hankcs
# Date: 2020-12-15 14:21
from typing import Dict, Any
import torch
from hanlp.components.parsers.biaffine.biaffine_dep import BiaffineDependencyParser
from hanlp.components.parsers.biaffine.biaffine_model import BiaffineDecoder
from hanlp.components.parsers.ud.tag_decoder import TagDecoder
from hanlp.layers.embeddings.contextual_word_embedding import ContextualWordEmbeddingModule
from hanlp.layers.scalar_mix import ScalarMixWithDropout
class UniversalDependenciesModel(torch.nn.Module):
def __init__(self,
encoder: ContextualWordEmbeddingModule,
n_mlp_arc,
n_mlp_rel,
mlp_dropout,
num_rels,
num_lemmas,
num_upos,
num_feats,
mix_embedding: int = 13,
layer_dropout: int = 0.0):
super().__init__()
self.encoder = encoder
self.decoder = UniversalDependenciesDecoder(
encoder.get_output_dim(),
n_mlp_arc,
n_mlp_rel,
mlp_dropout,
num_rels,
num_lemmas,
num_upos,
num_feats,
mix_embedding,
layer_dropout
)
def forward(self,
batch: Dict[str, torch.Tensor],
mask,
):
hidden = self.encoder(batch)
return self.decoder(hidden, batch=batch, mask=mask)
class UniversalDependenciesDecoder(torch.nn.Module):
def __init__(self,
hidden_size,
n_mlp_arc,
n_mlp_rel,
mlp_dropout,
num_rels,
num_lemmas,
num_upos,
num_feats,
mix_embedding: int = 13,
layer_dropout: int = 0.0,
) -> None:
super(UniversalDependenciesDecoder, self).__init__()
# decoders
self.decoders = torch.nn.ModuleDict({
'lemmas': TagDecoder(hidden_size, num_lemmas, label_smoothing=0.03, adaptive=True),
'upos': TagDecoder(hidden_size, num_upos, label_smoothing=0.03, adaptive=True),
'deps': BiaffineDecoder(hidden_size, n_mlp_arc, n_mlp_rel, mlp_dropout, num_rels),
'feats': TagDecoder(hidden_size, num_feats, label_smoothing=0.03, adaptive=True),
})
self.gold_keys = {
'lemmas': 'lemma_id',
'upos': 'pos_id',
'feats': 'feat_id',
}
if mix_embedding:
self.scalar_mix = torch.nn.ModuleDict({
task: ScalarMixWithDropout((1, mix_embedding),
do_layer_norm=False,
dropout=layer_dropout)
for task in self.decoders
})
else:
self.scalar_mix = None
def forward(self,
hidden,
batch: Dict[str, torch.Tensor],
mask) -> Dict[str, Any]:
mask_without_root = mask.clone()
mask_without_root[:, 0] = False
logits = {}
class_probabilities = {}
output_dict = {"logits": logits,
"class_probabilities": class_probabilities}
loss = 0
arc = batch.get('arc', None)
# Run through each of the tasks on the shared encoder and save predictions
for task in self.decoders:
if self.scalar_mix:
decoder_input = self.scalar_mix[task](hidden, mask)
else:
decoder_input = hidden
if task == "deps":
s_arc, s_rel = self.decoders[task](decoder_input, mask)
pred_output = {'class_probabilities': {'s_arc': s_arc, 's_rel': s_rel}}
if arc is not None:
# noinspection PyTypeChecker
pred_output['loss'] = BiaffineDependencyParser.compute_loss(None, s_arc, s_rel, arc,
batch['rel_id'],
mask_without_root,
torch.nn.functional.cross_entropy)
else:
pred_output = self.decoders[task](decoder_input, mask_without_root,
batch.get(self.gold_keys[task], None))
if 'logits' in pred_output:
logits[task] = pred_output["logits"]
if 'class_probabilities' in pred_output:
class_probabilities[task] = pred_output["class_probabilities"]
if 'loss' in pred_output:
# Keep track of the loss if we have the gold tags available
loss += pred_output["loss"]
if arc is not None:
output_dict["loss"] = loss
return output_dict
def decode(self, output_dict: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
for task in self.tasks:
self.decoders[task].decode(output_dict)
return output_dict
| [
"jfservice@126.com"
] | jfservice@126.com |
ecb41bdb47bd56c47a86b55a3a537259cbad39f1 | 70a8d9379b503635cd101f10b39f00a4b2f0e13a | /DataBased/batchRun_nnConform.py | bf19d0941236a0a2ad05f056b8b35ea53a493830 | [] | no_license | tkushner/nnSensitivity-hscc2020 | 013edefc66f8f3b5370fc17e7d0331c998a3813d | 4d025b5842603db474b3614edb33697be85d5740 | refs/heads/master | 2022-09-19T18:52:54.440742 | 2020-06-03T17:51:04 | 2020-06-03T17:51:04 | 232,232,581 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,151 | py | ## script to batch run output range for networks
## HSCC 2020
## Taisa Kushner
## taisa.kushner@colorado.edu
import csv
import tempfile
import subprocess
import os
#pull the list of glucose initial trace files from glucIC directory
path = './glucICs/'
glucIC_files = []
# r=root, d=directories, f = files
for r, d, f in os.walk(path):
for file in f:
if '.csv' in file:
glucIC_files.append(os.path.join(r, file))
# run output range analysis for three networks
# populates the OutputRanges directory with reachable sets for insulin doses ranging 0-11 units in 1 unit intervals
# for each input location
for IC in glucIC_files:
#Regular
subprocess.call(["./run_file", "0.0", "1.0", "../BGnetworks/M1_Regular_APNN.nt", IC, "./OutputRanges/M1_Regular_"+str(IC[-7:-4])+".csv"])
#Split structure
subprocess.call(["./run_file", "0.0", "1.0", "../BGnetworks/M2_SplitLayer_APNN.nt", IC, "./OutputRanges/M2_SplitLayer_"+str(IC[-7:-4])+".csv"])
#weight constrained
subprocess.call(["./run_file", "0.0", "1.0", "../BGnetworks/M3_WeightCons_APNN.nt", IC, "./OutputRanges/M3_WeightCons_"+str(IC[-7:-4])+".csv"])
| [
"Taisa.Kushner@Colorado.EDU"
] | Taisa.Kushner@Colorado.EDU |
3d7a846c6ebead37ae4742dbd4b30c43e3f5edcc | 9b3769a69736dccf4812d744c5d20c31f293790d | /FlaskSample/env/Lib/site-packages/adal/log.py | 9fbc2d8395419c45f41ceb7c41361202de024558 | [] | no_license | ToyamaDataCenter/FlaskLinebotEcho | 3b75662da77334ebd7781e7d488ce9a44ccf32e2 | 06c7947525bb77d70090bfbdaf067f07b224aeee | refs/heads/master | 2023-01-12T11:31:21.478181 | 2018-10-26T08:47:38 | 2018-10-26T08:47:38 | 153,573,536 | 2 | 3 | null | 2022-12-27T15:33:52 | 2018-10-18T06:23:01 | Python | UTF-8 | Python | false | false | 5,978 | py | #------------------------------------------------------------------------------
#
# Copyright (c) Microsoft Corporation.
# All rights reserved.
#
# This code is licensed under the MIT License.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files(the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and / or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions :
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
#------------------------------------------------------------------------------
import logging
import uuid
import traceback
ADAL_LOGGER_NAME = 'adal-python'
def create_log_context(correlation_id=None, enable_pii=False):
return {
'correlation_id' : correlation_id or str(uuid.uuid4()),
'enable_pii': enable_pii}
def set_logging_options(options=None):
'''Configure adal logger, including level and handler spec'd by python
logging module.
Basic Usages::
>>>adal.set_logging_options({
>>> 'level': 'DEBUG'
>>> 'handler': logging.FileHandler('adal.log')
>>>})
'''
if options is None:
options = {}
logger = logging.getLogger(ADAL_LOGGER_NAME)
logger.setLevel(options.get('level', logging.ERROR))
handler = options.get('handler')
if handler:
handler.setLevel(logger.level)
logger.addHandler(handler)
def get_logging_options():
'''Get logging options
:returns: a dict, with a key of 'level' for logging level.
'''
logger = logging.getLogger(ADAL_LOGGER_NAME)
level = logger.getEffectiveLevel()
return {
'level': logging.getLevelName(level)
}
class Logger(object):
'''wrapper around python built-in logging to log correlation_id, and stack
trace through keyword argument of 'log_stack_trace'
'''
def __init__(self, component_name, log_context):
if not log_context:
raise AttributeError('Logger: log_context is a required parameter')
self._component_name = component_name
self.log_context = log_context
self._logging = logging.getLogger(ADAL_LOGGER_NAME)
def _log_message(self, msg, log_stack_trace=None):
correlation_id = self.log_context.get("correlation_id",
"<no correlation id>")
formatted = "{} - {}:{}".format(
correlation_id,
self._component_name,
msg)
if log_stack_trace:
formatted += "\nStack:\n{}".format(traceback.format_stack())
return formatted
def warn(self, msg, *args, **kwargs):
"""
The recommended way to call this function with variable content,
is to use the `warn("hello %(name)s", {"name": "John Doe"}` form,
so that this method will scrub pii value when needed.
"""
if len(args) == 1 and isinstance(args[0], dict) and not self.log_context.get('enable_pii'):
args = (scrub_pii(args[0]),)
log_stack_trace = kwargs.pop('log_stack_trace', None)
msg = self._log_message(msg, log_stack_trace)
self._logging.warning(msg, *args, **kwargs)
def info(self, msg, *args, **kwargs):
if len(args) == 1 and isinstance(args[0], dict) and not self.log_context.get('enable_pii'):
args = (scrub_pii(args[0]),)
log_stack_trace = kwargs.pop('log_stack_trace', None)
msg = self._log_message(msg, log_stack_trace)
self._logging.info(msg, *args, **kwargs)
def debug(self, msg, *args, **kwargs):
if len(args) == 1 and isinstance(args[0], dict) and not self.log_context.get('enable_pii'):
args = (scrub_pii(args[0]),)
log_stack_trace = kwargs.pop('log_stack_trace', None)
msg = self._log_message(msg, log_stack_trace)
self._logging.debug(msg, *args, **kwargs)
def exception(self, msg, *args, **kwargs):
if len(args) == 1 and isinstance(args[0], dict) and not self.log_context.get('enable_pii'):
args = (scrub_pii(args[0]),)
msg = self._log_message(msg)
self._logging.exception(msg, *args, **kwargs)
def scrub_pii(arg_dict, padding="..."):
"""
The input is a dict with semantic keys,
and the output will be a dict with PII values replaced by padding.
"""
pii = set([ # Personally Identifiable Information
"subject",
"upn", # i.e. user name
"given_name", "family_name",
"email",
"oid", # Object ID
"userid", # Used in ADAL Python token cache
"login_hint",
"home_oid",
"access_token", "refresh_token", "id_token", "token_response",
# The following are actually Organizationally Identifiable Info
"tenant_id",
"authority", # which typically contains tenant_id
"client_id",
"_clientid", # This is the key name ADAL uses in cache query
"redirect_uri",
# Unintuitively, the following can contain PII
"user_realm_url", # e.g. https://login.windows.net/common/UserRealm/{username}
])
return {k: padding if k.lower() in pii else arg_dict[k] for k in arg_dict}
| [
"itou@tdc-ict.co.jp"
] | itou@tdc-ict.co.jp |
4532ea4272c0db0940cd69b69a8782cafd161afd | 118a3b794d1e8d62221a16d36f0ff08338a84ca1 | /04Linear_Regression_MV/03-1.py | 90034bfd1b65167bc250b42e058a235249e3052f | [] | no_license | oppa3109/MyTutor_TF_SungKim | 22e1dc0e789a6eab24f37cf992bf7ea53721ede2 | 5880f3224547d1433edcd9b693762b1e9c0445c1 | refs/heads/master | 2021-01-23T07:25:48.940435 | 2017-09-29T06:25:06 | 2017-09-29T06:25:06 | 80,501,131 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 930 | py | import tensorflow as tf
x_data = [[73., 80., 75.],
[93., 88., 93.],
[89., 91., 90.],
[96., 98., 100.],
[73., 66., 70.]]
y_data = [[152.],
[185.],
[180.],
[196.],
[142.]]
X = tf.placeholder(tf.float32, shape=[None, 3])
Y = tf.placeholder(tf.float32, shape=[None, 1])
W = tf.Variable(tf.random_normal([3, 1]), name='weight')
b = tf.Variable(tf.random_normal([1]), name='bias')
hypothesis = tf.matmul(X, W) + b
cost = tf.reduce_mean(tf.square(hypothesis - Y))
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1e-5)
train = optimizer.minimize(cost)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
feed = {X: x_data, Y: y_data}
for step in range(2001):
cost_val, hy_val, _ = sess.run([cost, hypothesis, train], feed_dict=feed)
if step % 10 == 0:
print(step, "Cost: ", cost_val, "\nPrediction:\n", hy_val)
| [
"byoungki.jeon@gmail.com"
] | byoungki.jeon@gmail.com |
34da4c02c5e9aef4bb76bf8ab68e179817b9db01 | 42a7b34bce1d2968079c6ea034d4e3f7bb5802ad | /ex3.py | da806348e797aa669ec5014ca90987dda6716f49 | [] | no_license | linpan/LPTHW | 45c9f11265b5e1ffe0387a56cec192fa12c6c4d5 | 227bfee3098e8ecb5f07ffc3a0b8e64a853106ce | refs/heads/master | 2021-04-26T13:42:56.859644 | 2014-12-18T15:21:14 | 2014-12-18T15:21:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 521 | py | #! /usr/bin/env python
#coding:utf-8
print "I will now count my chieckens."
print "Hens", 25 + 30 / 6
print "Roosters", 100 - 25 * 3 % 4
print "Now I will count the eggs:"
print 3 + 2 + 1 - 5 + 4 % 2 - 1 / 4 +6
print "Is it true that 3 + 2 < 5 -7 ?"
print 3 + 2 < 5 -7
print "What is 3 +2 ?", 3 + 2
print "What is 5 -7 ?", 5 - 7
print "Oh,that's why it's False."
print "How about some more."
print "Is it greater?", 5 > -2
print "is it greater or equal?", 5 >= -2
print "Is it less or equal?", 5 <= -2
| [
"shine_forever@yeah.net"
] | shine_forever@yeah.net |
d33c81f835ede07edefa28e45721b9fe3610bf9b | ca74496b2b53ecc8bad1fb895ff37fa096e319a5 | /backend/ServerlessApplication/tests/integration/test_deleteBucketItem.py | ff28adaed81e0aebd60501954596911c7727b280 | [] | no_license | COS301-SE-2021/Ptarmigan | a4f98cd7ff4011118828c1e2f3af50ccf7724954 | 71ebe6bea2912e6d70905dea5b18d0b5ca8268b0 | refs/heads/master | 2023-08-28T12:24:52.406997 | 2021-10-13T20:10:42 | 2021-10-13T20:10:42 | 369,604,842 | 3 | 1 | null | 2021-10-05T09:24:04 | 2021-05-21T17:13:35 | Dart | UTF-8 | Python | false | false | 844 | py | import json
from unittest.mock import patch
import pytest
import unittest
import requests
baseUrl = "https://localhost:3000"
def fixture_event():
return { "content" : "IBM" }
class Test_DeleteBucketItem(unittest.TestCase):
def test_Delete_Bucket_return_success (self):
api_url = "https://cn9x0zd937.execute-api.eu-west-1.amazonaws.com/Prod/scraper/deleteBucketItem"
response = requests.post(api_url, json=fixture_event())
response.json()
print(response)
assert response.status_code == 200
def test_Delete_Bucket_Return_invalid_Input(self):
api_url = "https://cn9x0zd937.execute-api.eu-west-1.amazonaws.com/Prod/scraper/deleteBucketItem"
response = requests.post(api_url, json={})
response.json()
print(response)
assert response.status_code == 400 | [
"u18033882@tuks.co.za"
] | u18033882@tuks.co.za |
a4e9175d9ebdbf75f3903cdd5197dad03a4113e9 | 9c47fbb2761cc50b7b0be67decb20c377dd1d078 | /HackerRank/Python/Basic data types/finding-the-percentage-testcases/solution.py | b6a1da399d97474589f46398c9de2c6e14054b3a | [
"MIT"
] | permissive | IsFilimonov/Interviews | 782ec1f5d82373c20df0edaaeb56cfb0d493a9e7 | 3b9858f43ef6b7a2b5e565ef58406e4018edbf97 | refs/heads/main | 2022-12-12T13:16:25.750870 | 2022-11-30T11:31:38 | 2022-11-30T11:31:38 | 213,611,039 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 610 | py | input00 = open("input/input00.txt", "r")
input01 = open("input/input01.txt", "r")
input00_list = input00.read().splitlines()
input01_list = input01.read().splitlines()
count_elements = int(input00_list[0])
dictionary = {}
for i in range(count_elements):
str_elements = input00_list[i+1].split()
dictionary[str_elements[0]] = [str_elements[1], str_elements[2], str_elements[3]]
average = sum(map(int, dictionary[input00_list[count_elements + 1]])) / len(input00_list)
print(average)
# query_scores = student_marks[query_name]
# print("{0:.2f}".format(sum(query_scores) / len(query_scores)))
| [
"Filimonov_IS@mail.ru"
] | Filimonov_IS@mail.ru |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.