index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
984,800 | ffdb8ad8b6135ffb9c3ae09efc589bd0d9ff365d | from django.db import models
from django.core.urlresolvers import reverse
from django_markdown.models import MarkdownField
class ProjectQuerySet(models.QuerySet):
def published(self):
return self.filter(published=True)
class Project(models.Model):
name = models.CharField(max_length=200)
subtitle = models.CharField(blank=True, max_length=1000)
short_description = models.TextField(blank=True)
description = MarkdownField(blank=True)
published = models.BooleanField(default=True)
old = models.BooleanField(default=False)
date = models.DateField(blank=True)
type = models.CharField(blank=True, max_length=50)
thumbnail = models.ImageField(blank=True)
link = models.URLField(blank=True)
slug = models.SlugField(max_length=200, unique=True)
objects = ProjectQuerySet.as_manager()
def __str__(self):
return self.name
@property
def page_url(self):
return self.get_absolute_url()
def get_absolute_url(self):
if self.link:
return self.link
return reverse('project_detail', kwargs={"slug":self.slug})
class Meta:
verbose_name = "Project"
verbose_name_plural = "Projects"
ordering = ["-date"]
|
984,801 | 1c18a951d1309db892948b3a4271557a4aec09e6 | import math
angulo = float(input('Digite o angulo: '))
sen = math.sin(math.radians(angulo))
cos = math.cos(math.radians(angulo))
tan = math.tan(math.radians(angulo))
print('O valor de seno é {:.2f}, de cosseno é {:.2f} e da tangente é {:.2f}.'.format(sen, cos, tan)) |
984,802 | 6718d78f6489fb133b1d76ad493b8021be719083 | # -*- coding: utf-8 -*-
#sender identifying code by email or mobile
import sys
import smtplib
import requests
import random
from django.core.cache import cache
from celery import shared_task
from email.mime.text import MIMEText
from ourpro_config import email_password, mobile_api_key
reload(sys)
sys.setdefaultencoding('utf-8')
@shared_task
def sender_identifying_code_to_email(user_email):
identifying_code = str(random.randint(000000, 999999))
_TIME_OUT = 3 * 60
cache.set(user_email + "_identifying_code", identifying_code, _TIME_OUT)
_from_user = "843359825@qq.com"
_pwd = email_password
msg = MIMEText(user_email + " ,您好: \n" + " 您的注册码为: " + identifying_code + \
" 该验证码在3分钟内有效,请及时注册", 'plain', 'utf-8')
msg["Subject"] = unicode("[ourpro666] 注册验证码", 'utf-8')
msg["From"] = _from_user
msg["To"] = user_email
s = smtplib.SMTP_SSL("smtp.qq.com", 465)
s.login(_from_user, _pwd)
s.sendmail(_from_user, user_email, msg.as_string())
s.quit()
@shared_task
def sender_identifying_code_to_mobile(user_mobile):
identifying_code = str(random.randint(000000, 999999))
_TIME_OUT = 3 * 60
cache.set(user_mobile + "_identifying_code", identifying_code, _TIME_OUT)
_api_key = mobile_api_key
_yunpian_url = 'https://sms.yunpian.com/v2/sms/single_send.json'
parmas = {
'apikey': _api_key,
'mobile': user_mobile,
'text': '【ourpro测试】您的验证码是' + identifying_code + '。如非本人操作,请忽略本短信'
}
requests.post(_yunpian_url, data=parmas)
|
984,803 | 58d64628fa854eb57cf50e28faf34b1de31063b2 | # ======================
# | STANDARD EXERCISES |
# ======================
# EXERCISE 1
# Standard Exercise 1 - Creating a dictionary
# First, create an empty dictionary
# Then, add a key-value pair to it. Have the key be 'my_favorite_class'
# and the value be the name of your favorite Nueva class.
# Standard Exercise 2 - Adding to a dictionary
# Create a function that takes a dictionary as input. (It could be the one you created
# in the last problem).
# This function should add 2 to each input.
""" EXAMPLE
my_dict = {'carl': 100, 'justin_bieber': 5}
change_dictionary(my_dict)
print(my_dict)
> {'carl': 102, 'justin_bieber': 7}
"""
def change_dictionary(dictionary):
"***Your Code Here!***"
# Hint: You should loop over the dictionary
pass # Delete this line when you write your own code
# Exercise 3
# Create a function that takes two inputs: a list and a dictionary
# This function should modify the dictionary so that it counts the number
# of times a pokemon was captured.
# I've provided the list and dictionary for you below.
""" EXAMPLE
pokemon_zoo = {'pikachu': 0, 'blastoise': 1}
captured_pokemon = ['pikachu', 'pikachu', 'blastoise']
update_pokemon_zoo(pokemon_zoo, captured_pokemon)
print(pokemon_zoo)
pokemon_zoo = {'pikachu': 2, 'blastoise': 2}
"""
# This is all the Pokemon you've captured.
captured_pokemon = ['magikarp', 'mankey', 'pikachu', 'voltorb', 'mankey',
'venomoth','weedle', 'weedle', 'magikarp', 'kakuna']
# Here is a dictionary that describes each Pokemon and how many of each
# type you've captured.
# You don't have any Pokemon, so your zoo is empty.
pokemon_zoo = {'kakuna': 0,
'magikarp': 0,
'mankey': 0,
'pikachu': 0,
'venomoth': 0,
'voltorb': 0,
'weedle': 0}
# Write a function that changes pokemon_zoo to reflect the new
def update_pokemon_zoo(zoo, captured):
"***Your Code Here!***"
pass # Delete this line after you write your own code
# =======================
# | Challenge Exercises |
# =======================
# Challenge Exercise 1
# Write a function that checks if a password corresponds to a username in a
# database.
# It should return False if not, and True if the password is correct.
# This is a database of my username-password pairs.
my_database = {
'Carl': 'iamsuperman',
'Abigail': 'mypasswordisawesome',
'Scooby': 'ilovechickenpie',
'Justin Bieber': 'selenagomez'
}
def check_password(database, username, password):
# This function checks if the password associated with the username
# is correct. If it's not, this function returns False.
# Else, it returns True.
"***YOUR CODE HERE***"
pass
# Challenge Exercise 2
# Write two functions: encrypt and decrypt
# They should each take a string and dictionary and encrypt the string using the
# dictionary
"""EXAMPLE:
encryption_code = {'a': 'b'}
encrypt('He is a monkey.', encryption_code)
> 'He is b monkey.'
decryption_code = {'b': 'a'}
decrypt('He is b monkey.', decryption_code)
> 'He is a monkey.'
"""
def encrypt(to_encrypt, encryption_code):
"***YOUR CODE HERE***"
pass
def decrypt(to_decrypt, decryption_code):
"***YOUR CODE HERE***"
pass
|
984,804 | b528c48661e468c371341ecfcdbc51cf434c6bbe | '''
Created on my MAC Jun 10, 2015-2:59:54 PM
What I do:
edge feature between doc obj
for now only use facc confidence?
What's my input:
doc, obj
What's my output:
hFeature for them
@author: chenyanxiong
'''
'''
Again I am just a virtual to show API
'''
import site
site.addsitedir('/bos/usr0/cx/PyCode/cxPyLib')
site.addsitedir('/bos/usr0/cx/PyCode/GraphRepresentation')
import os
from cxBase.Conf import cxConfC
from cxBase.base import cxBaseC
import logging
class DocObjEdgeFeatureExtractorC(cxBaseC):
def Init(self):
cxBaseC.Init(self)
self.FeatureName = 'DocObjEdge'
def process(self,doc,obj):
logging.warn('please call my subclass')
return {}
|
984,805 | c932220f1da87cc47d0b682ca9cfaaf51560e646 | import os
import json
file = "528884874493_Config_us-east-1_ConfigHistory_AWS__AutoScaling__AutoScalingGroup_20181011T012430Z_20181011T012430Z_1.json"
with open (file, 'r') as json_file:
json_obj = json.load(json_file)
json_str = json_obj['configurationItems']
for i in json_str:
print(i['resourceType'])
break
def main():
download_objects(bucket, work_dir)
file_paths = get_all_file_paths('.')
unzip_files(file_paths)
file_paths2 = get_all_file_paths('.')
for file in file_paths2:
if file.endswith('.json'):
with open (file, 'r') as json_file:
try:
json_obj = json.load(json_file)
json_str = json_obj['configurationItems']
new_dict = {}
for i in json_str:
new_dict['dBInstanceIdentifier'] = i['configuration']['dBInstanceIdentifier']
new_dict['dBInstanceClass'] = i['configuration']['dBInstanceClass']
new_dict['dBInstanceStatus'] = i['configuration']['dBInstanceStatus']
new_dict['dbiResourceId'] = i['configuration']['dbiResourceId']
new_dict['configurationItemCaptureTime'] = i['configurationItemCaptureTime']
new_dict['dBInstanceArn'] = i['configuration']['dBInstanceArn']
new_dict['instanceCreateTime'] = i['configuration']['instanceCreateTime']
new_dict['resourceId'] = i['resourceId']
new_dict['awsAccountId'] = i['awsAccountId']
try:
cursor.execute("insert into Inventory_awsrds(resourceId, dBInstanceClass, dBInstanceStatus, configurationItemCaptureTime, dBInstanceArn, dBInstanceIdentifier) values (?,?,?,?,?,?)",str(new_dict['resourceId']),str(new_dict['dBInstanceClass']),str(new_dict['dBInstanceStatus']),str(new_dict['configurationItemCaptureTime']),str(new_dict['dBInstanceArn']),str(new_dict['dBInstanceIdentifier']))
except Exception as e:
print(e)
except Exception as e:
print(e)
os.remove(file) |
984,806 | 613a992e3327ee7da074a6a43462d2d05d6717f8 | #
# @lc app=leetcode.cn id=85 lang=python3
#
# [85] 最大矩形
#
# https://leetcode-cn.com/problems/maximal-rectangle/description/
#
# algorithms
# Hard (45.24%)
# Likes: 374
# Dislikes: 0
# Total Accepted: 23.7K
# Total Submissions: 52.3K
# Testcase Example: '[["1","0","1","0","0"],["1","0","1","1","1"],["1","1","1","1","1"],["1","0","0","1","0"]]'
#
# 给定一个仅包含 0 和 1 的二维二进制矩阵,找出只包含 1 的最大矩形,并返回其面积。
#
# 示例:
#
# 输入:
# [
# ["1","0","1","0","0"],
# ["1","0","1","1","1"],
# ["1","1","1","1","1"],
# ["1","0","0","1","0"]
# ]
# 输出: 6
#
#
# @lc code=start
class Solution:
def maximalRectangle(self, matrix: List[List[str]]) -> int:
# For each row, compute the height of the histogram based on that row.
# Then compute the largest rectangle in the histogram by the method from Problem 84.
if not matrix or not matrix[0]: return 0
n, ans = len(matrix[0]), 0
heights = [0]*(n+1)
def findMaxRectangleInHistogram(h):
stack, res = [-1], 0
for i in range(len(h)):
while h[i] < h[stack[-1]]:
height = h[stack.pop()]
w = i - 1 - stack[-1]
res = max(res, height * w)
stack.append(i)
return res
for row in matrix:
for i in range(n):
heights[i] = heights[i]+1 if row[i]=='1' else 0
ans = max(ans, findMaxRectangleInHistogram(heights))
return ans
# @lc code=end
|
984,807 | 12ac1d84657af0508b4ab8e9a321b58fdbdc14c3 | from . import util
from . import domain
#from . import plotting
|
984,808 | db6dcd75603941c9b0b364580fa842ac11c232c2 | # -*- coding: utf-8 -*-
import os
import sys
import subprocess
import logging
from glob import glob
logger = logging.getLogger(__name__)
def main(app_dir):
cwd = os.getcwd()
pkg_dir = os.path.join(app_dir, 'packages')
eggs_dir = os.path.join(app_dir, 'eggs')
env = dict([(k, os.environ[k]) for k in os.environ # avoid PYTHONPATH
if not k.startswith('PYTHON')]) # for installation
# ez_setup
setuptools_src = sorted(
glob(os.path.join(eggs_dir, 'setuptools-*gz')) +
glob(os.path.join(eggs_dir, 'setuptools-*.zip'))
)[0]
ez_setup = [
sys.executable, '-c',
"import ez_setup; "
"ez_setup._install('%(setuptools_src)s'); "
% locals()
]
subprocess.check_call(ez_setup, cwd=pkg_dir, env=env)
# bootstrap
buildouts_src = sorted(
glob(os.path.join(eggs_dir, 'zc.buildout-*gz')) +
glob(os.path.join(eggs_dir, 'zc.buildout-*.zip'))
)[-1]
cfg = 'buildout.cfg'
bootstrap_cmd = [
sys.executable, '-m', 'easy_install', buildouts_src,
]
subprocess.check_call(bootstrap_cmd, cwd=app_dir, env=env)
# buildout
buildout_cmd = [
sys.executable, '-c',
"from zc.buildout.buildout import main; "
"main(['-Uvc', '%(cfg)s']); "
% locals()
]
subprocess.check_call(buildout_cmd, cwd=app_dir, env=env)
if __name__ == '__main__':
if len(sys.argv) == 2:
path = sys.argv[1]
else:
path = os.path.dirname(os.path.dirname(os.path.abspath(sys.argv[0])))
if not os.path.exists(path):
logger.error('"%s" is not directory', path)
sys.exit(-2)
main(path)
|
984,809 | fcfcf46792efba3fd19441a94ae66df015a97cd6 | bl_info = {
"name" : "My_addon",
"author" : "brandon humfleet",
"description" : "test addon",
"blender" : (2, 80, 0),
"location" : "View3D",
"warning" : "",
"category" : "Generic",
}
import bpy
class Test_PT_Panel(bpy.types.Panel):
bl_idname = "Test_PT_Panel"
bl_label = "Test Panel"
bl_category = "Test Addon"
bl_space_type = "VIEW_3D"
bl_region_type = "UI"
def draw(self, context):
layout = self.layout
row = layout.row()
row.operator('view3d.cursor_center', text="Center 3D cursor")
|
984,810 | 1fdc9abaac47d04f48ac834261669fdaa2c143ff | # $ python3 -m pip install pyserial
# /dev/cu.usbserial-<XYZ> for mac,
# ...you can find your XYZ using $ python3 -m serial.tools.list_ports -v
# ...it might also be something like /dev/cu.usbmodem<XYZ>, depending on the USB Serial adapter
# My XYZ is "FTVHYZXQ", which matches my USB Serial adapter, model no. TTL232RG-VIP
# ...another option for finding XYZ is to use $ ls /dev/cu.usb*
# Another useful pyserial utility is: $ python3 -m serial.tools.miniterm
# Here's a simple brute force example:
# ser = serial.Serial("/dev/cu.usbserial-FTVHYZXQ",9600)
# To run: $ python3 test_ee101_1wire.py
import sys
import serial
from serial.tools.list_ports import comports
def EE101Text(channel, text):
EE101_SYNC = 0x50
EE101_TEXT_TYPE = 0x00
ser.write(bytes([(int(channel) & 0x07) | EE101_SYNC | EE101_TEXT_TYPE]))
ser.write(text.encode())
ser.write(bytes([0]))
def EE101Value(channel, value):
EE101_SYNC = 0x50
EE101_VALUE_TYPE = 0x80
ser.write(bytes([(int(channel) & 0x07) | EE101_SYNC | EE101_VALUE_TYPE]))
ser.write(bytes([(int(value >> 24))]))
ser.write(bytes([(int(value >> 16))]))
ser.write(bytes([(int(value >> 8))]))
ser.write(bytes([(int(value) & 0xFF)]))
def ask_for_port():
"""\
Show a list of ports and ask the user for an index choice.
"""
sys.stderr.write('\nAvailable ports: <index:> <name> <desc> <hwid>\n')
ports = []
for n, (port, desc, hwid) in enumerate(sorted(comports()), 1):
sys.stderr.write('{:2}: {:40} {!r} {!r}\n'.format(n, port, desc, hwid))
ports.append(port)
while True:
port = raw_input('Enter index ')
try:
index = int(port) - 1
if not 0 <= index < len(ports):
sys.stderr.write('Invalid index!\n')
continue
except ValueError:
pass
else:
port = ports[index]
return port
try:
raw_input
except NameError:
# pylint: disable=redefined-builtin,invalid-name
raw_input = input # in python3 it's "raw"
unichr = chr
try:
user_selected_port_name = ask_for_port()
print("You selected " + user_selected_port_name)
ser = serial.Serial(user_selected_port_name)
print("Press CTL+C to exit program")
i = 0
while True:
EE101Text(0,"Hello")
EE101Text(1,"Tim")
EE101Text(2,"this")
EE101Text(3,"is")
EE101Text(4,"your")
EE101Text(5,"ee101")
EE101Text(6,"ported to")
EE101Text(7,"Python on macOS")
i += 1
EE101Value(0, i)
EE101Value(1, i)
EE101Value(2, i)
EE101Value(3, i)
EE101Value(4, i)
EE101Value(5, i)
EE101Value(6, i)
EE101Value(7, i)
except KeyboardInterrupt:
print("Exiting Program")
except:
print("Error Occurs, Exiting Program")
finally:
ser.close()
pass
|
984,811 | 9dd7e183540b544ae1c93a2071e6954fec38b9fb | # The equation solved is the parabolic equaiton
#
# du d du
# -- = k -- --
# dt dx dx
#
# along with boundary conditions
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
import scipy as sc
import scipy.sparse
import scipy.sparse.linalg
import time
# change some default values to make plots more readable on the screen
LNWDT = 5; FNT = 15
matplotlib.rcParams['lines.linewidth'] = LNWDT; matplotlib.rcParams['font.size'] = FNT
def explicit_python_solver(u_left=1.0, u_right=0.0, nx=20, r=0.5, xmin=0.0, xmax=1.0, tmin=0.0, tmax=1.0, k=1.0):
dx = float(xmax-xmin)/nx
u = np.zeros((nx+1, 1), 'd')
u_old = np.zeros((nx+1, 1), 'd')
x = np.linspace(xmin,xmax,nx+1)
# set boundary conditions
u[0] = u_left
u_old[-1] = u_right
dt = r*dx**2/k # compute timestep based on Fourier number, dx and diffusivity
m = round((tmax-tmin)/dt) # number of temporal intervals
time = np.linspace(tmin,tmax,m)
# advance in time
#for t in time: # uncomment this
#####################################
### write integration scheme here ###
#####################################
return x, u
import functools
## Main program starts here
nx = 20 # number of nodes
L = 1.0 # length of beam
tmax = 0.025 # time length
theta = 0.75 # parameter for implicitness: theta=0.5 Crank-Nicholson, theta=1.0 fully implicit
call = functools.partial
solverlist = [explicit_python_solver]
solvers = [
[call(solverlist[0], u_left=100.0, u_right=0.0, nx=nx, r=0.5, xmin=0.0, xmax=L, tmin=0.0, tmax=tmax, k=1.0)],
['explicit python solver']
]
lstyle = ['r-', ':', '.', '-.', '--']
legends = solvers[1]
i = 0
for solve in solvers[0]:
tic = time.time()
x, u = solve()
toc = time.time()
cputime = toc - tic
print cputime, 'seconds process time for', legends[i]
plt.plot(x,u,lstyle[i])
i += 1
plt.legend(legends)
plt.title('Temperature field')
plt.xlabel('Position on beam')
plt.ylabel('Temperature')
#plt.savefig('1dheat0_025.pdf')
plt.show() |
984,812 | 400ce82f1014ed63276119cde683ea6bf3a4735e | from django_elasticsearch_dsl import Document
from django_elasticsearch_dsl.registries import registry
from elasticsearch_dsl.connections import connections
from wagtail.search import index
from .models import Employee
from .models import Company
connections.create_connection(hosts=['localhost'])
@registry.register_document
class EmployeeDocument(Document):
class Index:
name = 'employees'
settings = {'number_of_shards': 1,
'number_of_replicas': 0}
class Django:
model = Employee
fields = [
'first_name',
'last_name',
'email',
#index.RelatedFields('company_name'),
'salary',
'currency',
'pan',
'gender',
'marital_status',
'address_1',
'address_2',
'city',
'state',
'pin_code',
'country',
]
# Ignore auto updating of Elasticsearch when a model is saved
# or deleted:
# ignore_signals = True
# Don't perform an index refresh after every update (overrides global setting):
# auto_refresh = False
# Paginate the django queryset used to populate the index with the specified size
# (by default it uses the database driver's default setting)
queryset_pagination = 5000
|
984,813 | b1f5f1ea38464965e32fbf9f75bc600f9d65b079 |
import numpy as np
import tensorflow as tf
import time
from tensorflow.contrib import slim
def trainer(input_dim, num_samples, dataset, learning_rate=1e-3, batch_size=100, num_epoch=75, n_z=10):
#input_dim = (28,28)
model = VariantionalAutoencoder(input_dim, learning_rate=learning_rate,
batch_size=batch_size, n_z=n_z)
totalLoss = []
testLoss = []
validateLoss = []
for epoch in range(num_epoch):
start_time = time.time()
epochLoss = []
for iter in range(num_samples // batch_size):
# Obtain a batch
batch = dataset.train.next_batch(batch_size)
#batch = batch[0].reshape([batch_size, 28, 28, 1])
batch = batch[0]
#print(batch.shape)
# Execute the forward and the backward pass and report computed losses
loss, recon_loss, latent_loss = model.run_single_step(batch)
epochLoss.append(loss)
totalLoss.append(sum(epochLoss) / len(epochLoss))
delta_time = time.time() - start_time
batch = dataset.validation.next_batch(10000)[0]
x_hat = model.reconstructor(batch)
vloss, _, _ = model.compute_loss(batch)
validateLoss.append(vloss)
batch = dataset.test.next_batch(10000)[0]
x_hat = model.reconstructor(batch)
tloss, _, _ = model.compute_loss(batch)
testLoss.append(tloss)
if epoch % 1 == 0:
print('[Epoch {} Time {}s] Loss: {}, Recon loss: {}, Latent loss: {}'.format(epoch, delta_time, loss, recon_loss, latent_loss))
print('training losses: ', totalLoss)
print('validation losses: ', validateLoss)
print('testing losses: ', testLoss)
saver = tf.train.Saver()
save_path = saver.save(model.sess, "tmp/model.ckpt")
print('Model saved in path:', save_path)
print('Done!')
return model, totalLoss, validateLoss, testLoss
class VariantionalAutoencoder(object):
def __init__(self, input_dim, learning_rate=1e-3, batch_size=100, n_z=10):
self.learning_rate = learning_rate
self.batch_size = batch_size
self.n_z = n_z
self.input_dim = input_dim
self.build()
#self.sess = tf.InteractiveSession(config=tf.ConfigProto(log_device_placement=True))
self.sess = tf.InteractiveSession()
self.sess.run(tf.global_variables_initializer())
self.writer = tf.summary.FileWriter('graphs', self.sess.graph)
# Build the netowrk and the loss functions
def build(self):
self.x = tf.placeholder(name='x', dtype=tf.float32, shape=[None, 784])
# Encode
# x -> z_mean, z_sigma -> z
net = tf.reshape(self.x, [-1, 28, 28, 1], name='reshape1')
net = slim.conv2d(net, 64, [3, 3], scope='conv1_1', activation_fn=tf.nn.elu)
net = slim.max_pool2d(net, [2, 2], scope='pool1')
net = slim.conv2d(net, 128, [3, 3], scope='conv3_2')
net = slim.max_pool2d(net, [2, 2], scope='pool2')
#net = slim.conv2d(net, 128, [3, 3], scope='conv3_3')
#net = slim.max_pool2d(net, [2, 2], scope='pool3')
net = slim.flatten(net)
net = slim.fully_connected(net, 1024, scope='enc_fc1', activation_fn=tf.nn.elu)
net = slim.dropout(net, 0.75, scope='dropout1')
net = slim.fully_connected(net, 512, scope='enc_fc2', activation_fn=tf.nn.elu)
net = slim.dropout(net, 0.75, scope='dropout2')
f3 = slim.fully_connected(net, 256, scope='enc_fc3', activation_fn=tf.nn.elu)
self.z_mu = slim.fully_connected(f3, self.n_z, scope='enc_fc4_mu', activation_fn=None)
self.z_log_sigma_sq = slim.fully_connected(f3, self.n_z, scope='enc_fc4_sigma', activation_fn=None)
eps = tf.random_normal(shape=tf.shape(self.z_log_sigma_sq),
mean=0, stddev=1, dtype=tf.float32)
self.z = self.z_mu + tf.sqrt(tf.exp(self.z_log_sigma_sq)) * eps
# Decode
# shape is [1, 1, 2, 1]
# z -> x_hat
net = slim.fully_connected(self.z, 256, scope='dec_fc1', activation_fn=tf.nn.elu)
# shape is [1, 1, 256, 1]
net = slim.dropout(net, 0.75, scope='dropout3')
# net = tf.reshape(g1, [-1, 16, 16, 1], name='reshape1')
# net = slim.conv2d_transpose(net, 8, 3, activation_fn=tf.nn.elu)
# net = slim.flatten(net)
net = slim.fully_connected(net, 512, scope='dec_fc2', activation_fn=tf.nn.elu)
net = slim.dropout(net, 0.75, scope='dropout4')
net = slim.fully_connected(net, 1024, scope='dec_fc3', activation_fn=tf.nn.elu)
net = slim.dropout(net, 0.75, scope='dropout5')
self.x_hat = slim.fully_connected(net, 784, scope='dec_fc4', activation_fn=tf.sigmoid)
# Loss
# Reconstruction loss
# Minimize the cross-entropy loss
# H(x, x_hat) = -\Sigma x*log(x_hat) + (1-x)*log(1-x_hat)
epsilon = 1e-10
recon_loss = -tf.reduce_sum(
self.x * tf.log( epsilon +self.x_hat) + ( 1 -self.x) * tf.log( epsilon + 1 -self.x_hat),
axis=1
)
self.recon_loss = tf.reduce_mean(recon_loss)
# Latent loss
# Kullback Leibler divergence: measure the difference between two distributions
# Here we measure the divergence between the latent distribution and N(0, 1)
latent_loss = -0.5 * tf.reduce_sum(
1 + self.z_log_sigma_sq - tf.square(self.z_mu) - tf.exp(self.z_log_sigma_sq), axis=1)
self.latent_loss = tf.reduce_mean(latent_loss)
self.total_loss = tf.reduce_mean(recon_loss + latent_loss)
self.train_op = tf.train.AdamOptimizer(
learning_rate=self.learning_rate).minimize(self.total_loss)
return
# Execute the forward and the backward pass
def run_single_step(self, x):
_, loss, recon_loss, latent_loss = self.sess.run(
[self.train_op, self.total_loss, self.recon_loss, self.latent_loss],
feed_dict={self.x: x}
)
return loss, recon_loss, latent_loss
def compute_loss(self, x):
loss, recon_loss, latent_loss = self.sess.run(
[self.total_loss, self.recon_loss, self.latent_loss],
feed_dict={self.x: x}
)
return loss, recon_loss, latent_loss
# x -> x_hat
def reconstructor(self, x):
x_hat = self.sess.run(self.x_hat, feed_dict={self.x: x})
return x_hat
# z -> x
def generator(self, z):
x_hat = self.sess.run(self.x_hat, feed_dict={self.z: z})
return x_hat
# x -> z
def transformer(self, x):
z = self.sess.run(self.z, feed_dict={self.x: x})
return z
# x -> z_mu
def transformer2(self, x):
z_mu = self.sess.run(self.z_mu, feed_dict={self.x: x})
return z_mu
|
984,814 | d3c4838c5e2109fbbaa3316770282225c5a3f021 | from spotipy.oauth2 import SpotifyOAuth
import spotipy
from bs4 import BeautifulSoup
import requests
import os
#You need to declare env variables with your spotify developer account credentials. Create one in https://developer.spotify.com/
SPOTIPY_CLIENT_ID = os.getenv("SPOTIPY_CLIENT_ID")
SPOTIPY_CLIENT_SECRET = os.getenv("SPOTIPY_CLIENT_SECRET")
SPOTIPY_REDIRECT_URI = os.getenv("SPOTIPY_REDIRECT_URI")
#Get this data in your spotify user account. https://www.spotify.com/us/account/overview/
USER_ID = os.getenv("SPOTIFY_USER_ID")
URL = "https://www.billboard.com/charts/hot-100/"
date = input(
'Which year do you want to travel to? Type the date in this format YYYY-MM-DD: ')
url_query = f"{URL}{date}"
soup = BeautifulSoup(requests.get(url=url_query).text, "html.parser")
span_title = soup.find_all(
name="span", class_="chart-element__information__song text--truncate color--primary")
span_artist = soup.find_all(
name="span", class_="chart-element__information__artist text--truncate color--secondary")
songs = {span_title[i].getText(): span_artist[i].getText()
for i in range(len(span_title))}
scope = "playlist-modify-private"
sp = spotipy.Spotify(auth_manager=SpotifyOAuth(client_id=SPOTIPY_CLIENT_ID,
client_secret=SPOTIPY_CLIENT_SECRET, redirect_uri=SPOTIPY_REDIRECT_URI, cache_path="token.txt", scope=scope))
track_list = []
for title in songs:
try:
sp.search(title, limit=5)['tracks']['items'][0]
except IndexError:
pass
else:
results = sp.search(title, limit=5)['tracks']['items']
for item in results:
if songs[title].lower() in item['artists'][0]['name'].lower() and title.lower() in item['name'].lower():
track_list.append(item['uri'])
break
playlist_id = sp.user_playlist_create(
USER_ID, f"{date} Top Billboard 100", public=False)['id']
sp.user_playlist_add_tracks(USER_ID, playlist_id, track_list)
|
984,815 | 8a55b79fa28db96a95a86f8d29e056f37ed00480 | # BASIC PLOTTING WITH MATPLOTLIB
# You can show matplotlib figures directly in the notebook by using the %matplotlib notebook and %matplotlib
# inline magic commands.
# %matplotlib notebook provides an interactive environment.
# So what actually has happened when you run the magic function matplotlib with the inline parameter, is that matplotlib
# is configured to render into the browser. This configuration is called a backend, and matplotlib has a number of
# different backends available. A backend is an abstraction layer which knows how to interact with the operating
# environment, whether it's an operating system, or an environment like the browser, and knows how to render matplotlib
# commands. In fact, there's a number of different interactive backends, but there are also backends called hard copy
# backends, which support rendering to graphics formats, like scalable vector graphics, SVGs, or PNGs.
import matplotlib as mpl
mpl.get_backend()
# The next layer is where we'll spend most of our time though, and that's called the artist layer. The artist layer is
# an abstraction around drawing and layout primitives. The root of visuals is a set of containers which includes a
# figure object with one or more subplots, each with a series of one or more axes. It also contains primitives such as
# Line2D, recatangle and collections such as PathCollection
# there's one more layer which is extremely important for us as data scientists in particular, and this is called the
# scripting layer. if we were writing an application to use matplotlib, we might never care about the scripting layer.
# But this layer helps simplify and speed up our interaction with the environment in order to build plots quickly.
# It does this, frankly, by doing a bunch of magic for us. And the difference between someone who is effective with
# matplotlib and someone who isn't, is usually based on their understanding of this magic of the scripting layer. The
# scripting layer we use in this course is called pyplot.
# The pyplot scripting layer is a procedural method for building a visualization, in that we tell the underlying
# software which drawing actions we want it to take in order to render our data. There are also declarative methods for
# visualizing data. HTML is a great example of this. Instead of issuing command after command to the backend rendering
# agent, which is the browser with HTML, HTML documents are formatted as models of relationships in a document, often
# called the DOM, or Document Object Model. These are two fundamentally different ways of creating and representing
# graphical interfaces.
# The popular JavaScript library, for instance, D3.JS is an example of a declarative information visualization method.
# While matplotlib's pyplot is an example of a procedural information visualization method.
# MAKING GRAPHS USING PLOT FUNCTION
# A plot has two axes, an x-axis along the horizon, and a y-axis which runs vertically.
import matplotlib.pyplot as plt
# supports any number of named and unnamed arguments the arguments will be interpreted as x, y pairs
# because the default is the line style '-',
# nothing will be shown if we only pass in one point (3,2)
plt.plot(3,2)
# we can pass in '.' to plt.plot to indicate that we want
# the point (3,2) to be indicated with a marker '.'
plt.plot(3,2,'.')
# create a new figure
plt.figure()
# plot the point (3,2) using the circle marker
plt.plot(3, 2, 'o')
# get the current axes
ax = plt.gca()
# Set axis properties [xmin, xmax, ymin, ymax]
ax.axis([0,6,0,10])
# create a new figure
plt.figure()
# plot the point (1.5, 1.5) using the circle marker
plt.plot(1.5, 1.5, 'o')
# plot the point (2, 2) using the circle marker
plt.plot(2, 2, 'o')
# plot the point (2.5, 2.5) using the circle marker
plt.plot(2.5, 2.5, 'o')
# we can go further with the axes object to the point where we can actually get all of the child objects that that axes
# contains. We do this with the axes get_children function.
# get current axes
ax = plt.gca()
# get all the child objects the axes contains
ax.get_children()
# Here, we can see that there's actually three line to the objects contained in this axes, these are our data points.
# A number of spines which are actual renderings of the borders of the frame including tic markers, two axis objects,
# and a bunch of text which are the labels for the chart. There's even a rectangle which is the background for the axes.
# Scatterplots
# A scatterplot is a two dimensional plot similar to the line plots I've shown. The scatter function takes an x-axis
# value as a first argument and y-axis value as the second. If the two arguments are the same, we get a nice diagonal
# alignment of points.
import numpy as np
x=np.array([1,2,3,4,5,6,7,8])
y=x
plt.figure()
plt.scatter(x,y)
# similar to plt.plot(x, y, '.'), but the underlying child objects in the axes are not Line2D
import numpy as np
x = np.array([1,2,3,4,5,6,7,8])
y = x
# create a list of colors for each point to have
# ['green', 'green', 'green', 'green', 'green', 'green', 'green', 'red']
colors = ['green']*(len(x)-1)
colors.append('red')
plt.figure()
# plot the point with size 100 and chosen colors
plt.scatter(x, y, s=100, c=colors)
# convert the two lists into a list of pairwise tuples
zip_generator = zip([1,2,3,4,5], [6,7,8,9,10])
print(list(zip_generator))
# the above prints:
# [(1, 6), (2, 7), (3, 8), (4, 9), (5, 10)]
zip_generator = zip([1,2,3,4,5], [6,7,8,9,10])
# The single star * unpacks a collection into positional arguments
print(*zip_generator)
# the above prints:
# (1, 6) (2, 7) (3, 8) (4, 9) (5, 10)
# use zip to convert 5 tuples with 2 elements each to 2 tuples with 5 elements each
print(list(zip((1, 6), (2, 7), (3, 8), (4, 9), (5, 10))))
# the above prints:
# [(1, 2, 3, 4, 5), (6, 7, 8, 9, 10)]
zip_generator = zip([1,2,3,4,5], [6,7,8,9,10])
# let's turn the data back into 2 lists
x, y = zip(*zip_generator) # This is like calling zip((1, 6), (2, 7), (3, 8), (4, 9), (5, 10))
print(x)
print(y)
# the above prints:
# (1, 2, 3, 4, 5)
# (6, 7, 8, 9, 10)
plt.figure()
# plot a data series 'Tall students' in red using the first two elements of x and y
plt.scatter(x[:2], y[:2], s=100, c='red', label='Tall students')
# plot a second data series 'Short students' in blue using the last three elements of x and y
plt.scatter(x[2:], y[2:], s=100, c='blue', label='Short students')
# add a label to the x axis
plt.xlabel('The number of times the child kicked a ball')
# add a label to the y axis
plt.ylabel('The grade of the student')
# add a title
plt.title('Relationship between ball kicking and grades')
# add a legend (uses the labels from plt.scatter)
plt.legend()
# add the legend to loc=4 (the lower right hand corner), also gets rid of the frame and adds a title
plt.legend(loc=4, frameon=False, title='Legend')
# Line Plots
import numpy as np
linear_data=np.array([1,2,3,4,5,6,7,8])
exponential_data=linear_data**2
plt.figure()
plt.plot(linear_data,'-o',exponential_data,'-o')
# So there are a couple of things which are new about this versus the scatter plots. First, we only gave y-axes values
# to our plot call, no x axes values. Instead, the plot function was smart enough to figure out that what we wanted was
# to use the index of the series as the x value. Which is pretty handy when you want to make quick plots.
# Second we see that the plot identifies this as two series of data and that the colors of the data from the series are
# different including the data points and the lines between the data points. This is different from the scatter plot
# which required us to label the lines directly.
# plot another series with a dashed red line
plt.plot([22,44,55], '--r')
plt.xlabel('Some data')
plt.ylabel('Some other data')
plt.title('A title')
# add a legend with legend entries (because we didn't have labels when we plotted the data series)
plt.legend(['Baseline', 'Competition', 'Us'])
# fill the area between the linear data and exponential data
plt.gca().fill_between(range(len(linear_data))# length
,linear_data # lower bound
, exponential_data # upper bound
,facecolor='blue' # color to fill with
,alpha=0.25 # transparency
)
# working with dates
plt.figure()
observation_dates = np.arange('2017-01-01', '2017-01-09', dtype='datetime64[D]')
plt.plot(observation_dates, linear_data, '-o', observation_dates, exponential_data, '-o')
x = plt.gca().xaxis
# rotate the tick labels for the x axis
for item in x.get_ticklabels():
item.set_rotation(45)
# adjust the subplot so the text doesn't run off the image
plt.subplots_adjust(bottom=0.25)
ax = plt.gca()
ax.set_xlabel('Date')
ax.set_ylabel('Units')
ax.set_title('Exponential vs. Linear performance')
# you can add mathematical expressions in any text element
ax.set_title("Exponential ($x^2$) vs. Linear ($x$) performance")
# Bar Charts
# Matplotlib has support for several kinds of bar charts. The most general case, we plot a bar chart by sending in a
# parameter of the x components, and a parameter of the height of the bar.
plt.figure()
xvals=range(len(linear_data))
plt.bar(xvals,linear_data,width=0.3)
new_xvals=[]
# plot another set of bars, adjusting the new xvals to make up for the first set of bars plotted
for item in xvals:
new_xvals.append(item+0.3)
plt.bar(new_xvals,exponential_data,width=0.3,color='red')
# you can add error bars to each bar as well, using the y-error parameter.
#
# For example, each of our pieces of data in the linear data might actually be a mean value, computed from many
# different observations.
from random import randint
linear_err = [randint(0,15) for x in range(len(linear_data))]
# This will plot a new set of bars with errorbars using the list of random error values
plt.bar(xvals, linear_data, width = 0.3, yerr=linear_err)
# We can also do stacked bar charts as well. For instance, if we wanted to show cumulative values while also keeping the
# series independent, we could do this by setting the bottom parameter and our second plot to be equal to first set of
# data to plot.
# stacked bar charts are also possible
plt.figure()
xvals = range(len(linear_data))
plt.bar(xvals, linear_data, width = 0.3, color='b')
plt.bar(xvals, exponential_data, width = 0.3, bottom=linear_data, color='r')
# or use barh for horizontal bar charts
plt.figure()
xvals = range(len(linear_data))
plt.barh(xvals, linear_data, height = 0.3, color='b')
plt.barh(xvals, exponential_data, height = 0.3, left=linear_data, color='r') |
984,816 | 8660a0eed2267f39cf838804476bb9bd14000803 | import os
class wifi():
def __init__(self):
self.ssid = ''
self.wpa = 0
self.psk = ''
self.wep = False
self.key_mgmt = ''
class wifilist():
def __init__(self):
out = os.popen("sudo iwlist wlan0 scanning | sed 's/^ *//'").read()
outArray = out.split('\n')
idx = -1
self.wlist = []
self.wlist.append(wifi())
for line in outArray:
if line.startswith('Cell'):
if idx < 0:
pass
else:
self.wlist.append(wifi())
idx += 1
elif idx < 0:
pass
elif line.startswith('Encryption key'):
if line.split(":")[1].startswith('on'):
self.wlist[idx].wep = True
elif line.startswith('ESSID'):
self.wlist[idx].ssid = line.split(":")[1]
elif line.startswith('IE: IEEE 802.11i/WPA2'):
self.wlist[idx].wpa = 2
elif line.startswith('IE: IEEE 802.11i/WPA'):
self.wlist[idx].wpa = 1
def disp(self,idx):
print "---- %d ----" %idx
print "SSID: %s" %self.wlist[idx].ssid
print "WPA: %d" %self.wlist[idx].wpa
print "WEP: %s" %self.wlist[idx].wep
def displayall(self):
for i in range(0,len(self.wlist)):
self.disp(i)
print " "
self.dispwpaconf(i)
def dispwpaconf(self,idx,password='password'):
print """
network={
ssid=%s """ %self.wlist[idx].ssid
if self.wlist[idx].wpa:
print " key_mgmt=WPA-PSK"
print ' psk="%s"' %password
else:
print " key_mgmt=NONE"
if password == '':
pass
else:
print " wep_key0=%s" %password
print "}"
def justnames(self):
namelist = []
for i in range(0,len(self.wlist)):
namelist.append(self.wlist[i].ssid[1:-1])
return namelist
def appendwpaconf(self,idx,filename='/var/www/html/wpa_supplicant.conf',password='password'):
os.popen('sudo echo " " | sudo tee --append %s' %filename).read()
os.popen('sudo echo "network={" | sudo tee --append %s' %filename).read()
os.popen('sudo echo \' ssid="%s"\' | sudo tee --append %s' %(self.wlist[idx].ssid[1:-1],filename))
if self.wlist[idx].wpa:
os.popen('sudo echo " key_mgmt=WPA-PSK" | sudo tee --append %s' %filename)
os.popen('sudo echo \' psk="%s"\' | sudo tee --append %s' %(password,filename))
else:
os.popen('sudo echo " key_mgmt=NONE" | sudo tee --append %s' %filename)
if password == "":
pass
else:
os.popen('sudo echo " wep_key0=%s" | sudo tee --append %s' %(password,filename))
os.popen('sudo echo "}" | sudo tee --append %s' %filename)
|
984,817 | 5f37b1c2be0763249300891a751840c6d0e3d286 | import datetime
import urllib.request
import discord
from discord.ext import commands
import os
import errno
class General():
def __init__(self, bot):
self.bot = bot
@commands.command()
async def update(self, ctx, server_id: int = 150):
"""
https://board.fr.ogame.gameforge.com/index.php/Thread/619580-Ogame-API/
:param server_id:
:return:
"""
await self.bot.ogame_API.update(server_id)
await self.bot.send_message(ctx=ctx, message=":ok_hand:")
@commands.command()
async def player_info(self, ctx, player, server_id: int = 150):
p = await self.bot.ogame_API.get_player_dict_from_name(server_id, player)
if not p:
await self.bot.send_message(ctx=ctx, message="Le joueur est introuvable")
return
e = discord.Embed()
e.title = f"Informations sur {player}"
e.set_footer(text=f"Requested by {ctx.author.name}#{ctx.author.discriminator}")
e.add_field(name="Alliance", value=f"{p['alliance']['name']} ({p['alliance']['tag']})")
e.add_field(name="ID", value=p["id"])
await self.bot.send_message(ctx=ctx, embed=e)
e = discord.Embed()
e.title = f"Scores de {player}"
e.set_footer(text=f"Requested by {ctx.author.name}#{ctx.author.discriminator}")
# player_parsed = {"positions" : [], "planets": [], "alliance": {"name" : "Aucune", "tag": "NULL", "id": 000000}}
for position in p["positions"]:
e.add_field(name=position["name"], value=f"{position['score']} @ {position['position']}")
await self.bot.send_message(ctx=ctx, mention=False, embed=e)
e = discord.Embed()
e.title = f"Planetes de {player}"
e.set_footer(text=f"Requested by {ctx.author.name}#{ctx.author.discriminator}")
for planet in p["planets"]:
pstr = f"[{planet['coords']}]"
if len(planet["moons"]) >= 1:
for moon in planet["moons"]:
pstr += f"\n🌝: {moon['name']}"
e.add_field(name=planet["name"], value=pstr)
await self.bot.send_message(ctx=ctx, mention=False, embed=e)
@commands.command()
async def alliance_info(self, ctx, alliance_name, server_id: int = 150):
a = await self.bot.ogame_API.get_alliance_dict_from_name(server_id, alliance_name)
e = discord.Embed() # .from_data({"Title": "Info sur un joueur", "fields" : p})
e.title = f"Informations sur l'alliance {alliance_name}"
e.set_footer(text=f"Requested by {ctx.author.name}#{ctx.author.discriminator}")
for title, content in a.items():
if title == "members":
liste_joueurs = []
for player in content:
p = await self.bot.ogame_API.get_player_dict_from_id(server_id, int(player))
if p["id"] == a["founder"]:
name = f"**{p['name']}**"
else:
name = p['name']
liste_joueurs.append(name)
content = "\n".join(liste_joueurs)
elif title == "founder":
content = (await self.bot.ogame_API.get_player_dict_from_id(server_id, int(content)))["name"]
elif title == "foundDate":
content = datetime.datetime.fromtimestamp(
int(content)
).strftime('%Y-%m-%d %H:%M:%S')
elif title == "open":
content = bool(int(content))
e.add_field(name=title, value=content)
await self.bot.send_message(ctx=ctx, embed=e)
def setup(bot):
bot.add_cog(General(bot))
|
984,818 | f0b21fd7f3eab8a7780b4b4338d967847e24a990 |
import os
from flask import render_template
from slugify import Slugify
from WaterTesting import app
from WaterTesting.parse_data import build_source_summary
#~ import flask, flask_frozen
#~ flask.url_for = flask_frozen.url_for
ROOT_DIR = os.path.dirname(os.path.realpath(__file__))
SLUG = Slugify(to_lower=True)
#~ # Load test result data
if app.config['TESTING'] == True:
from WaterTesting.gsload import load_csv
df = load_csv(app.config['DATA_FILE'])
else:
from WaterTesting.gsload import load_sheet
df = load_sheet(
app.config['GOOGLE_API_KEY'],
app.config['GOOGLE_SHEET_ID'],
app.config['GOOGLE_SHEET_TAB']
)
@app.route('/')
def index():
recent = df.tail(n=5)
recent = recent.sort_values(by='sample_date', ascending=False)
recent = recent.to_dict('records')
return render_template('index.html', recent=recent )
# Setting route to test result
@app.route('/result/<int:sample_id>/')
def result(sample_id):
test_result = df[df['sample_id'] == sample_id]
if test_result.empty:
flash('Test result {} not found.'.format(sample_id))
return redirect(url_for('index'))
report = test_result.to_dict('records')[0]
return render_template('report.html', report=report )
# Setting route to test result
@app.route('/result/')
def result_index():
records = df.sort_values(by='sample_date', ascending=False).to_dict('records')
return render_template('result_index.html', records=records )
# Setting route to location summary page with plots
@app.route('/location/<location>/')
def summary(location):
img_dir = os.path.join(ROOT_DIR, 'static', 'img')
loc_df = df[df["slug"] == location]
locations = [(SLUG(x), x) for x in df['sample_location'].unique()]
#loc_df = df[df['sample_location'] == location]
# build_source_summary() needs to return a dictionary with image filenames
# plus any additional metadata to be used on the page.
summary = build_source_summary(loc_df, location, img_dir)
if summary == None:
flash('Location data not found for {}.'.format(location))
return redirect(url_for('index'))
return render_template('location.html', summary=summary, pages=locations )
|
984,819 | a4ee2f15b4b08d3dce47167bf191df4818a574fb | import pickle
# file_path="../dataset/iwslt14.tokenized.de-en/"
bin_path='../dataset/test_bin/'
file_path='../dataset/test/'
data_name=['test','valid']
languages=['en','de']
for language in languages:
current_dict={}
current_dict['<padding>']=0
current_dict['<sos>']=1
current_dict['<eos>']=2
current_dict['<unk>']=3
current_file=file_path+'train.'+language
lines=open(current_file,encoding='utf-8').readlines()
indexs=[]
for line in lines:
line=line.strip().split(' ')
sentence_index=[]
for word in line:
if word not in current_dict.keys():
current_dict[word]=len(current_dict.keys())
sentence_index.append(current_dict[word])
indexs.append(sentence_index)
pickle.dump(indexs,open(bin_path+'train.'+language,'wb'))
with open(bin_path+'train.'+language+'word','w') as f:
for key in indexs:
f.write(' '.join([str(each) for each in key])+'\n')
with open(bin_path+'dict'+language+'word','w') as f:
for word in current_dict.keys():
f.write(word+'\n')
pickle.dump(current_dict,open(bin_path+'dict.'+language,'wb'))
for file in data_name:
lines=open(file_path+file+'.'+language,encoding='utf-8').readlines()
indexes=[]
for line in lines:
sentence_index=[]
line=line.strip().split(' ')
for word in line:
if word not in current_dict.keys():
sentence_index.append(3)
else:
sentence_index.append(current_dict[word])
indexes.append(sentence_index)
with open(bin_path + file+'.' + language + 'word', 'w') as f:
for key in indexs:
f.write(' '.join([str(each) for each in key]) + '\n')
pickle.dump(indexes,open(bin_path+file+'.'+language,'wb')) |
984,820 | 852f83462a1e03e8127726b4a412db1b76fbcb38 | import os
import numpy as np
import pandas as pd
import argparse
import matplotlib.pyplot as plt
from scipy import stats, sqrt
from sklearn.metrics import mean_squared_error
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVR
from sklearn.linear_model import LinearRegression, LogisticRegression, Ridge, Lasso
from sklearn.model_selection import GridSearchCV
import xgboost
from xgboost import plot_importance
import statsmodels.api as sm
import statsmodels.formula.api as smf
from collections import OrderedDict
def fit_pca(data, ratio=0.9):
# finding minimum n_components parameter to explain >= ratio% variance
pca = PCA()
pca.fit(data)
variance = 0.0
for i in range(len(pca.explained_variance_ratio_)):
variance += pca.explained_variance_ratio_[i]
if variance >= ratio:
break
print("Number of PCA components: %d" % i)
pca = PCA(n_components=i)
return pca
def fit_svr(X_train, y_train):
params = [{'kernel': ['rbf'], 'gamma': [1e-3, 1e-4], 'C': [1, 10, 100, 1000]},
{'kernel': ['linear'], 'C': [1, 10, 100, 1000]}]
svr = SVR(gamma='scale')
clf = GridSearchCV(svr, params, cv=10, refit=True, scoring='neg_mean_squared_error')
clf.fit(X_train, y_train)
print("Best parameters set found on development set:")
print()
print(clf.best_params_)
return clf
def fit_linear(X_train, y_train):
clf = LinearRegression().fit(X_train, y_train)
return clf
def find_important_features(X_train, y_train):
clf = xgboost.XGBRegressor(colsample_bytree=0.4,
gamma=0,
learning_rate=0.07,
max_depth=3,
min_child_weight=1.5,
n_estimators=1000,
reg_alpha=0.75,
reg_lambda=0.45,
subsample=0.6,
seed=28)
clf.fit(X_train, y_train)
important_ft_inds = np.argsort(clf.feature_importances_)
return important_ft_inds
def fit_xgb(X_train, y_train):
parameters = {
'colsample_bytree': [0.4, 0.6, 0.8],
'gamma': [0, 0.03, 0.1, 0.3],
'min_child_weight': [1.5, 6, 10],
'learning_rate': [0.1, 0.07],
'max_depth': [3, 5],
'n_estimators': [10, 50, 100],
'reg_alpha': [1e-5, 1e-2, 0.75],
'reg_lambda': [1e-5, 1e-2, 0.45],
'subsample': [0.6, 0.95]
}
# best params
# parameters = {
# 'colsample_bytree' : [0.4],
# 'gamma' : [0],
# 'learning_rate' : [0.07],
# 'max_depth' : [3],
# 'min_child_weight' : [10],
# 'n_estimators' : [100],
# 'reg_alpha' : [1e-05],
# 'reg_lambda' : [0.01],
# 'subsample' : [0.6]
# }
xgb = xgboost.XGBRegressor(objective='reg:squarederror', seed=28)
clf = GridSearchCV(estimator=xgb, param_grid=parameters, refit=True, cv=5, scoring='neg_mean_squared_error')
clf.fit(X_train, y_train)
print('best params')
print(clf.best_params_)
return clf
def fit_mixedlm(X_train, y_train):
data = sm.datasets.get_rdataset("dietox", "geepack").data
print(data.columns)
def predict(X_train, y_train, X_test, y_test, model_name='linear', train_set="train", plot=False, parser=None):
# norm
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
pca = fit_pca(X_train)
X_train = pca.fit_transform(X_train)
X_test = pca.transform(X_test)
# training
if model_name == 'svr':
clf = fit_svr(X_train, y_train)
elif model_name == 'xgb':
clf = fit_xgb(X_train, y_train)
elif model_name == "mixed":
clf = fit_mixedlm(X_train, y_train)
else:
clf = fit_linear(X_train, y_train)
# train performance
train_rmse = sqrt(mean_squared_error(y_train, clf.predict(X_train)))
print('Train RMSE:', train_rmse)
# test performance
y_pred = clf.predict(X_test)
pred_rmse = sqrt(mean_squared_error(y_test, y_pred))
print('Prediction RMSE:', pred_rmse)
if plot:
samples = np.arange(len(y_test))
plt.plot(samples, y_test, linewidth=1, ls='-', color='#c92508', label='y_true')
plt.plot(samples, y_pred, linewidth=1, ls='--', color='#2348ff', label='y_pred')
plt.suptitle(model_name)
plt.title("Train RMSE: %.3f, Test RMSE: %.3f" % (train_rmse, pred_rmse))
plt.legend(fontsize=12)
plt.savefig(os.path.join(parser.log_dir, "%s_%s.png" % (model_name, train_set)))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--model_name', default="linear", help='Regression model')
parser.add_argument('--data_dir', default="data", help='')
parser.add_argument('--log_dir', default="logs", help='')
parser.add_argument('--train_csv', default="train.csv", help='')
parser.add_argument('--test_csv', default="test.csv", help='')
parser = parser.parse_args()
# reading data
train_df = pd.read_csv(os.path.join(parser.data_dir, parser.train_csv))
test_df = pd.read_csv(os.path.join(parser.data_dir, parser.test_csv))
feature_cols = list(train_df.columns)[1:]
# feature_cols = [name for name in list(train_df.columns) if name.endswith('5')]
target_col = "AGB_Mean"
print("Number of features:", len(feature_cols))
X_train = train_df[feature_cols]
print("Number of samples:", X_train.shape[0])
y_train = train_df[target_col]
X_test = test_df[feature_cols]
y_test = test_df[target_col]
predict(X_train, y_train, X_test, y_test, model_name=parser.model_name, train_set=parser.train_csv.split(".")[0], plot=True, parser=parser) |
984,821 | be2ba44d29c3158b4b7ec024649eca8bdc5c8a61 | #Exercise 29: What if
people = 15
cats = 400
dogs = 20
if people < cats:
print "Too many cats! The world is doomed!"
if people > cats:
print "Not many cats! The world is saved!"
if people < dogs:
print "The world is Grand!"
if people > dogs:
print "The world is boring!"
dogs += 5
if people >=dogs:
print "People are greater than or equal to dogs."
if people <= dogs:
print "People are less than or equal to dogs."
if people ==dogs:
print "People are dogs." |
984,822 | 46704890599be7c329cf6415c4a30e9bacafeef0 | #!/usr/bin/env python3
# vim: fileencoding=utf-8 expandtab ts=4 nospell
# SPDX-FileCopyrightText: 2020 Benedict Harcourt <ben.harcourt@harcourtprogramming.co.uk>
#
# SPDX-License-Identifier: BSD-2-Clause
"""Create GitHub annotations from pylint output"""
from __future__ import annotations
from typing import List
import sys
def main() -> None:
"""Create GitHub annotations from pylint output"""
gh_annotations: List[str] = []
for line in sys.stdin.readlines():
print(line, end="")
if ":" not in line:
continue
try:
file, line_no, col, error = line.strip().split(":", 3)
gh_annotations.append(f"::error file={file},line={line_no},col={col}::{error}")
except ValueError:
pass
for annotation in gh_annotations:
print(annotation)
sys.exit(len(gh_annotations) > 0)
if __name__ == "__main__":
main()
|
984,823 | 9eef3293b55f243f375a198196c5aa8f518c704a | '''
Created on Jan 28, 2015
@author: kyleg
'''
if __name__ == '__main__':
pass
from arcpy import env, AcceptConnections, CreateVersion_management, Compress_management, RebuildIndexes_management, ListVersions, ReconcileVersions_management, AnalyzeDatasets_management
#import datetime
connection = r'D:\SCHED\SDEPROD_SDE.sde'
shared_schema = r'D:/SCHED/SDEPROD_SHARED.sde'
env.workspace = connection
env.overwriteOutput = True
try:
AcceptConnections(connection, True)
versionList = ListVersions(connection)
print "versions identified:"
for version in versionList:
for version in versionList:
if version == "SDE.DEFAULT":
print "no child versions from Default version"
else:
print version
ReconcileVersions_management(connection, "ALL_VERSIONS", "sde.DEFAULT", versionList, "LOCK_ACQUIRED", "NO_ABORT", "BY_OBJECT", "FAVOR_TARGET_VERSION", "POST", "DELETE_VERSION", "D:/SCHED/LOGS/reconcilelog.txt")
CreateVersion_management(shared_schema, "SDE.DEFAULT", version, "PUBLIC")
print version + "version reconciled"
AnalyzeDatasets_management(connection, "SYSTEM", "SDE.COMPRESS_LOG", "ANALYZE_BASE", "ANALYZE_DELTA", "ANALYZE_ARCHIVE")
print "database analyzed"
Compress_management(connection)
print "database compressed"
RebuildIndexes_management(connection,"SYSTEM","SDE.COMPRESS_LOG","ALL")
print "indexes rebuilt"
AnalyzeDatasets_management(connection, "SYSTEM", "SDE.COMPRESS_LOG", "ANALYZE_BASE", "ANALYZE_DELTA", "ANALYZE_ARCHIVE")
print "database analyzed"
AcceptConnections(connection, True)
except:
AcceptConnections(connection, True)
|
984,824 | e9b186a3f82899a1f3c742396cb3582d060b1e50 | import os
import boto3
from dotenv import load_dotenv
import pymongo
from pymongo import MongoClient
def get_file():
load_dotenv()
aws_access_key_id = os.getenv('aws_access_key_id', None)
aws_secret_access_key = os.getenv('aws_secret_access_key', None)
bucket_pandas = os.getenv('bucket_pandas', None)
file_name_pandas = os.getenv('file_name_pandas', None)
assert aws_access_key_id
assert aws_secret_access_key
assert bucket_pandas
assert file_name_pandas
s3 = boto3.client('s3', aws_access_key_id = aws_access_key_id, aws_secret_access_key = aws_secret_access_key)
obj = s3.get_object(Bucket= bucket_pandas, Key= file_name_pandas)
return obj
def connect_mongo():
load_dotenv()
mongoPass = os.getenv('mongoPass', None)
mongoUser = os.getenv('mongoUser',None)
mongoDataBase = os.getenv('mongoDataBase',None)
mongoCollection = os.getenv('mongoCollection',None)
assert mongoPass
assert mongoUser
assert mongoDataBase
assert mongoCollection
cluster = pymongo.MongoClient(f"mongodb+srv://{mongoUser}:{mongoPass}@cluster0.cae74.mongodb.net/{mongoDataBase}?retryWrites=true&w=majority")
db = cluster[str(mongoDataBase)]
collection = db[str(mongoCollection)]
return collection
|
984,825 | 38bb2df354088be5203fa008be5646833b5e762a | n = int(input())
if n % 2 != 0:
print(0)
else:
ans = 0
t = 10
while n >= t:
ans += n//t
t *= 5
print(ans) |
984,826 | f8bed1cc904fd81b718658b6021f8ef17ac4a2f5 | '''
Project: Gui Gin Rummy
File name: utils_extra.py
Author: William Hale
Date created: 3/14/2020
'''
from PIL import Image, ImageDraw, ImageFilter
def rounded_rectangle(self: ImageDraw, xy, corner_radius, fill=None, outline=None): # FIXME: not used
upper_left_point = xy[0]
bottom_right_point = xy[1]
self.rectangle(
[
(upper_left_point[0], upper_left_point[1] + corner_radius),
(bottom_right_point[0], bottom_right_point[1] - corner_radius)
],
fill=fill,
outline=outline
)
self.rectangle(
[
(upper_left_point[0] + corner_radius, upper_left_point[1]),
(bottom_right_point[0] - corner_radius, bottom_right_point[1])
],
fill=fill,
outline=outline
)
self.pieslice(
[upper_left_point, (upper_left_point[0] + corner_radius * 2, upper_left_point[1] + corner_radius * 2)],
180,
270,
fill=fill,
outline=outline
)
self.pieslice(
[(bottom_right_point[0] - corner_radius * 2, bottom_right_point[1] - corner_radius * 2), bottom_right_point],
0,
90,
fill=fill,
outline=outline
)
self.pieslice([(upper_left_point[0], bottom_right_point[1] - corner_radius * 2),
(upper_left_point[0] + corner_radius * 2, bottom_right_point[1])],
90,
180,
fill=fill,
outline=outline
)
self.pieslice([(bottom_right_point[0] - corner_radius * 2, upper_left_point[1]),
(bottom_right_point[0], upper_left_point[1] + corner_radius * 2)],
270,
360,
fill=fill,
outline=outline
)
ImageDraw.rounded_rectangle = rounded_rectangle # FIXME: not used
def mask_rounded_rectangle_transparent(pil_img, corner_radius=8): # FIXME: not used
blur_radius = 0 # FIXME: what is this for ??? wch
mask = Image.new("L", pil_img.size, 0)
draw = ImageDraw.Draw(mask)
rounded_rectangle(draw, xy=((0, 0), (pil_img.size[0], pil_img.size[1])), corner_radius=corner_radius, fill=255)
mask = mask.filter(ImageFilter.GaussianBlur(blur_radius))
result = pil_img.copy()
result.putalpha(mask)
return result
|
984,827 | eefcde5990d146cc9fd9c0d25e9b10d0c9beaebb | # coding=utf-8
import ICP_pos
import ip
import Queue
import threading
import requests
import chardet
import urllib2
import datetime
import re
import time
import sys
sys.path.append("..") # 回退到上一级目录
import database.mysql_operation
'''同步队列'''
domain_q = Queue.Queue()
dm_page_icp_q = Queue.Queue()
res_q = Queue.Queue()
'''数据库连接'''
mysql_conn = database.mysql_operation.MysqlConn('10.245.146.38','root','platform','illegal_domains_profile','utf8')
'''线程数量'''
thread_num = 2
def get_domains():
'''
功能:从数据库中读取未获取权威icp信息的域名,添加入域名队列
'''
global mysql_conn
sql = "SELECT domain,auth_icp,page_icp FROM domain_icp LIMIT 10;"
fetch_data = mysql_conn.exec_readsql(sql)
if fetch_data == False:
print "获取数据有误..."
return False
for item in fetch_data:
domain = item[0]
auth_icp = item[1]
page_icp = item[2]
domain_q.put([domain,auth_icp,page_icp])
def get_chinaz_icp_info():
'''
功能: 从站长之家获取包含域名icp信息的原始页面(注意由于被ban的问题,添加了获取代理),将html页面添加入队列
'''
global domain_q
global dm_page_icp_q
print 'get chainz icp...'
proxy = ip.available_IP_q.get() # 获取一个代理
print 'init proxy:', proxy
while not domain_q.empty():
domain,last_auth_icp,last_page_icp = domain_q.get()
try:
url = 'http://icp.chinaz.com/{query_domain}'.format(query_domain=domain)
html = requests.get(url, proxies = proxy, timeout=5).text
print proxy
print html
except Exception, e: # 其他异常
print str(e)
if "Connection" in str(e):
domain_q.put([domain,last_auth_icp,last_page_icp]) # 被ban导致的获取失败,将域名加入队列,重新获取
proxy = ip.available_IP_q.get()
print proxy
continue
else:
print str(e)
domain_q.put([domain,last_auth_icp,last_page_icp])
print domain + "获取html异常"
continue
# 进行处理获取icp内容内容
auth_icp = get_icp_info(html)
if auth_icp:
# icp地理位置解析
auth_icp_locate = ICP_pos.get_icp_pos(auth_icp) if auth_icp != '--' else ''
print domain,'auth_icp:', auth_icp
dm_page_icp_q.put([domain,last_auth_icp,last_page_icp,auth_icp,auth_icp_locate])
else:
# 提取失败(可能是页面获取有误导致)的重新获取页面
domain_q.put([domain,last_auth_icp,last_page_icp])
print '权威icp获取完成...'
def get_icp_info(html):
'''
功能: 提取chinaz页面的icp信息
'''
if "<h2>404" in html:
#获取icp异常 eg. www.365bet.cd的查询结果
print '===:', icp
icp = '--'
return icp
try:
content = re.compile(r'<p><font>(.+?)</font>').findall(html)
if content == []:
content = re.compile(r'<p class="tc col-red fz18 YaHei pb20">([^<]+?)<a href="javascript:" class="updateByVcode">').findall(html)
icp = content[0]
if icp == u"未备案或者备案取消,获取最新数据请":
print '---:',icp
icp = '--'
return icp
except:
# TODO:这里加一个处理
print '====================='
return ''
print "chinaz页面提取icp异常..."
# urllib2获取响应可能存在压缩包问题,在此处理;同时处理编码问题
def pre_deal_html(req):
'''
功能:urllib2获取响应可能存在压缩包问题,在此处理;同时处理编码问题
'''
info = req.info()
content = req.read()
encoding = info.getheader('Content-Encoding')
if encoding == 'gzip':
buf = StringIO(content)
gf = gzip.GzipFile(fileobj=buf)
content = gf.read()
charset = chardet.detect(content)['encoding']
if charset != 'utf-8' and charset != None:
content = content.decode(charset, 'ignore')
return content
def get_page_icp_info():
'''
功能:获取网页源代码,添加入html队列
注: 页面无法访问的,置icp信息为-1
'''
global dm_page_icp_q
global res_q
while True:
try:
domain,last_auth_icp,last_page_icp,auth_icp,auth_icp_locate = dm_page_icp_q.get(timeout=500)
url = 'http://www.' + domain
except Queue.Empty:
break
print '页面icp获取完成...'
try:
resp = urllib2.urlopen(url,timeout=20)
html = pre_deal_html(resp) # 处理编码
code = resp.code
# 从页面提取icp
page_icp = get_page_icp(html)
except urllib2.HTTPError, e:
code = e.code
page_icp = '-1'
except Exception, e:
code = 'ERROR'
page_icp = '-1'
finally:
if page_icp:
page_icp_locate = ICP_pos.get_icp_pos(page_icp) if page_icp != '--' and page_icp != '-1' else ''
print domain,'page_icp:', page_icp
# print domain,last_auth_icp,last_page_icp,auth_icp,auth_icp_locate,page_icp,page_icp_locate,code
res_q.put([domain,last_auth_icp,last_page_icp,auth_icp,auth_icp_locate,page_icp,page_icp_locate,code])
print 'download over ...'
def get_page_icp(html):
'''
功能:获取页面上的icp信息,分三种情况进行处理:
pattern1: 备案:粤ICP备11007122号-2 (500.com)
pattern2: 京ICP证 030247号 (icbc) 360soulou.com 豫ICP证041518号
pattern2: 京ICP证000007 (sina) (这个可能提取会有误)
pattern3: 粤B2-20090059-111 (qq.com) (增值营业号)
'''
try:
pattern1 = re.compile(u'([\u4e00-\u9fa5]{0,1}ICP[\u5907][\d]{6,8}[\u53f7]*-*[\d]*)').findall(html)
if pattern1 != []:
icp = pattern1[0]
else:
pattern2 = re.compile(u'([\u4e00-\u9fa5]{0,1}ICP[\u8bc1].*[\d]{6,8}[\u53f7])').findall(html)
if pattern2 != []:
icp = pattern2[0]
# 增值业务营业号
else:
pattern3 = re.compile(u'([\u4e00-\u9fa5]{0,1}[A-B][1-2]-[\d]{6,8}-*[\d]*)').findall(html)
if pattern3 != []:
icp = pattern3[0]
else:
icp = '--'
if icp == '':
icp = '--'
return icp
except:
return ''
print domain + "get icp WRONG\n"
def cmp_whether_change(last_auth_icp,last_page_icp,auth_icp,page_icp):
'''
功能:比对2次的icp信息是否变化
param: last_auth_icp:
param: last_page_icp:
param: auth_icp:
param: page_icp:
return: True:发生了变化
return: False:未发生变化
'''
if auth_icp == None and page_icp == None:
# 第一次运行的情况,此时auth_icp,page_icp都是None
return False
if last_auth_icp != auth_icp or last_page_icp != page_icp:
return True
else:
return False
def mysql_save_icp():
global res_q
counter = 0
while True:
try:
domain,last_auth_icp,last_page_icp,auth_icp,auth_icp_locate,page_icp,page_icp_locate,code = res_q.get(timeout=500)
except Queue.Empty:
break
print '存储结束'
# 比对是否发生变化
cmp_flag = cmp_whether_change(last_auth_icp,last_page_icp,auth_icp,page_icp)
# 发生变化则将原记录到was表中
if cmp_flag:
sql = "INSERT INTO domain_icp_was(domain,auth_icp,auth_icp_locate,page_icp,page_icp_locate,reuse_check,icp_tag,flag,get_icp_time,http_code)\
SELECT domain,auth_icp,auth_icp_locate,page_icp,page_icp_locate,reuse_check,icp_tag,flag,get_icp_time,http_code\
FROM domain_icp\
WHERE domain = '%s';" %(domain)
exec_res = mysql_conn.exec_cudsql(sql)
insert_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
print domain,auth_icp,auth_icp_locate,page_icp,page_icp_locate,code,insert_time
sql = "UPDATE domain_icp\
SET auth_icp = '%s',auth_icp_locate = '%s',page_icp = '%s',page_icp_locate = '%s',http_code = '%s',get_icp_time = '%s',\
flag = flag + 1\
WHERE domain = '%s';" %(auth_icp,auth_icp_locate,page_icp,page_icp_locate,code,insert_time,domain)
exec_res = mysql_conn.exec_cudsql(sql)
if exec_res:
counter += 1
print "counter:" + str(counter)
if counter == 100:
mysql_conn.commit()
counter = 0
mysql_conn.commit()
print "存储完成... "
if __name__ == '__main__':
# QUESTION:代码中撤销flag的标志???这样新加入的数据可以直接运行,只要在update中每次令flag自增即可
# QUESTION: 每次运行update时是否要令reuse_check,icp_tag置为空?
# flag = 2
global mysql_conn
ip.run_Getter()
time.sleep(20) # 这个时间很关键,这段时间用来从各平台上获取代理ip
ip.ip_Verify() # ip可用性验证
time.sleep(90) # 验证以获得足够的IP
watcher = threading.Thread(target=ip.ip_watcher) # 可用ip数量监测
watcher.setDaemon(True)
watcher.start()
# '''开始icp批量获取'''
# get_domains()
# get_chinaz_icp_td = []
# for _ in range(thread_num):
# get_chinaz_icp_td.append(threading.Thread(target=get_chinaz_icp_info))
# print '开始获取权威icp信息...'
# for td in get_chinaz_icp_td:
# td.start()
# time.sleep(10)
# get_page_icp_td = []
# for _ in range(thread_num):
# get_page_icp_td.append(threading.Thread(target=get_page_icp_info))
# print '开始获取页面icp信息...'
# for page_td in get_page_icp_td:
# page_td.start()
# print '开始存储icp信息...\n'
# save_db_td = threading.Thread(target=mysql_save_icp)
# save_db_td.start()
# save_db_td.join()
# mysql_conn.close_db()
# print '运行结束,请检查是否有未完成数据;若完成,则令运行icp_analyze.py,输入flag= ' + str(flag+1)
|
984,828 | dfee3e5424e09e048a0b7b3c4b592d893cd9912e | from BinaryTress.BinarySearchTree import Tree
from array import array
tree = Tree()
tree.seed(50)
a = array('i', [3, 70, 1, 40, 6, 4, 120])
for val in a:
tree.insert(val)
tree.traverse()
root = tree.root
succ = Tree.inordersuccessor(70, root)
pred = Tree.inorderpredecessor(70, root)
print("Maximum Node :", tree.getmax(True))
print("Minimum Node :", tree.getmin(True))
print("The inorder predecessor : ", pred.data if pred is not None else None)
print("The inorder successor : ", succ.data if succ is not None else None)
|
984,829 | 661c87edceee157b7346df666613fa6a40fb4d22 | import re
import datetime
from django import forms
from django.forms.extras.widgets import SelectDateWidget
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext, ugettext_lazy as _
from . import models
def v_err(flaw):
"""Raise validation error with the proper error message.
Args:
flaw: a str indicating which error message should display.
Dict contains possible error messages.
Raises: ValidationError.
"""
error_messages = {
'no_season': _(
"Season must contain at least 4 alphanumeric characters."
),
'no_items': _(
"Menu must contain at least 1 item."
),
'no_name': _(
"Name field must contain at least 4 alphanumeric characters."
),
'no_desc': _(
"Description must contain at least 10 characters."
),
'no_chef': _(
"Item must belong to a chef."
),
'no_ing': _(
"Item must contain at least 1 ingredient."
),
'elapsed': _(
"This date has elapsed."
)
}
raise forms.ValidationError(
error_messages[flaw],
code=flaw,
)
class MenuForm(forms.ModelForm):
"""Create or edit a Menu."""
def __init__(self, *args, **kwargs):
super(MenuForm, self).__init__(*args, **kwargs)
self.fields["items"].widget = forms.widgets.SelectMultiple()
self.fields["items"].queryset = models.Item.objects.all()
self.fields["expiration_date"].widget = forms.SelectDateWidget(
empty_label=(
"Choose Year",
"Choose Month",
"Choose Day"
),
)
class Meta:
model = models.Menu
fields = (
'season',
'items',
'expiration_date'
)
exclude = [
'created_date',
]
def clean_season(self):
"""Season str needs at least 4 consecutive alphanumeric characters.
Else: raise ValidationError.
"""
season = self.cleaned_data['season']
if not re.match(r'[\w{4}\s*]+', season) or len(season) < 4:
v_err('no_season')
return season
def clean_items(self):
"""Items field needs at least 1 item.
Else: raise ValidationError.
"""
items = self.cleaned_data['items']
if len(items) < 1:
v_err('no_items')
return items
def clean_expiration_date(self):
"""Expiration date must be later than current date.
Else: raise ValidationError.
"""
expiration_date = self.cleaned_data['expiration_date']
if expiration_date.date() <= datetime.date.today():
v_err('elapsed')
return expiration_date
class ItemForm(forms.ModelForm):
"""Create or edit an Item."""
def __init__(self, *args, **kwargs):
super(ItemForm, self).__init__(*args, **kwargs)
self.fields["ingredients"].widget = forms.widgets.SelectMultiple()
self.fields["ingredients"].queryset = models.Ingredient.objects.all()
class Meta:
model = models.Item
fields = (
'name',
'description',
'ingredients',
'standard'
)
exclude = [
'created_date',
]
def clean_name(self):
"""Name str needs at least 4 consecutive alphanumeric characters.
Else: raise ValidationError.
"""
name = self.cleaned_data['name']
if not re.match(r'[\w{4}\s*]+', name) or len(name) < 4:
v_err('no_name')
return name
def clean_description(self):
"""Description field is a str of at least 10 characters.
Else: raise ValidationError.
"""
description = self.cleaned_data['description']
if not re.match(r'[\w{4}\s*]+', description) or len(description) < 10:
v_err('no_desc')
return description
def clean_ingredients(self):
"""Ingredient field needs at least 1 ingredient.
Else: raise ValidationError.
"""
ingredients = self.cleaned_data['ingredients']
if len(ingredients) < 1:
v_err('no_ing')
return ingredients
def clean_chef(self):
"""Chef field needs at least 1 chef.
Else: raise ValidationError.
"""
chef = self.cleaned_data['chef']
if len(chef) < 1:
v_err('no_chef')
return chef
|
984,830 | 929dd17f4684e8d0848094864ebc5b1bee5bfada | import re
img_path = "/Users/DD/Developer/website/Classmates-Website/img/china.svg"
data_path = "/Users/DD/Developer/website/Classmates-Website/others/pname.txt"
pattern = "title=\"(.*)\""
wfile = open(data_path, "w")
with open(img_path, 'r') as f:
while True:
line = f.readline()
if not line:
break
else:
re_obj = re.search(pattern, line)
if re_obj:
wfile.write(re_obj[1])
wfile.write("\n")
wfile.close()
|
984,831 | 1a1611cb1df8f3c422fba6ae81a458af69814f26 | from terminaltables import SingleTable
from helpers import pretty_print
from messages import help as console_messages
class Help:
def __init__(self):
self.table_title = 'Help Section'
self.table_data = (
('Command', 'Description'),
(console_messages['account_info']['command'], console_messages['account_info']['description']),
(console_messages['full_account_info']['command'], console_messages['full_account_info']['description']),
(console_messages['find_balance']['command'], console_messages['find_balance']['description']),
(console_messages['generate_new_address']['command'], console_messages['generate_new_address']['description']),
(console_messages['send_transfer']['command'], console_messages['send_transfer']['description']),
(console_messages['account_history']['command'], console_messages['account_history']['description']),
(console_messages['full_account_history']['command'], console_messages['full_account_history']['description']),
(console_messages['replay_bundle']['command'], console_messages['replay_bundle']['description']),
(console_messages['settings']['command'], console_messages['settings']['description']),
(console_messages['log_out']['command'], console_messages['log_out']['description']),
(console_messages['exit']['command'], console_messages['exit']['description']),
)
self.print_content()
def print_content(self):
table_instance = SingleTable(self.table_data, self.table_title)
table_instance.inner_row_border = True
pretty_print(table_instance.table, color='green')
|
984,832 | ff434de032bc3bb8d17a21cd4c2e1765dfbcfd71 | #Author: Justin Roberts
#Date: 22 March 2018
#A helper script for running your IBM Quantum Experience scores
#The following implements a quantum score for examining 16 qubit entanglement on ibmqx5
print("\nconnecting....")
#import matplotlib.pyplot as plt
#from qiskit.tools.visualization import plot_histogram
#You can uncomment the above if you want to try and plot the histogram but I wouldn't if I were you :)
from qiskit import QuantumProgram
import Qconfig,time
def QuantumScript():
#The following circuit attempts to entangle all qubits on the ibmqx5 processor
#However, you can just copy and paste your other circuits into here instead if you so wish
global qp
Q_SPECS = {
'circuits': [{
'name': 'Circuit',
'quantum_registers': [{
'name': 'qr',
'size': 16
}],
'classical_registers': [{
'name': 'cr',
'size': 16
}]}],
}
qp = QuantumProgram(specs=Q_SPECS)
qp.set_api(Qconfig.APItoken, Qconfig.config['url'])
circuit = qp.get_circuit('Circuit')
quantum_r = qp.get_quantum_register('qr')
classical_r = qp.get_classical_register('cr')
circuit.h(quantum_r[1])
circuit.h(quantum_r[2])
circuit.h(quantum_r[5])
circuit.h(quantum_r[6])
circuit.h(quantum_r[9])
circuit.h(quantum_r[11])
circuit.h(quantum_r[12])
circuit.h(quantum_r[15])
circuit.cx(quantum_r[1],quantum_r[0])
circuit.cx(quantum_r[2],quantum_r[3])
circuit.cx(quantum_r[5],quantum_r[4])
circuit.cx(quantum_r[6],quantum_r[7])
circuit.cx(quantum_r[9],quantum_r[8])
circuit.cx(quantum_r[11],quantum_r[10])
circuit.cx(quantum_r[12],quantum_r[13])
circuit.cx(quantum_r[15],quantum_r[14])
circuit.h(quantum_r[2])
circuit.h(quantum_r[3])
circuit.h(quantum_r[5])
circuit.h(quantum_r[8])
circuit.h(quantum_r[11])
circuit.h(quantum_r[13])
circuit.cx(quantum_r[1],quantum_r[2])
circuit.cx(quantum_r[3],quantum_r[4])
circuit.cx(quantum_r[6],quantum_r[5])
circuit.cx(quantum_r[8],quantum_r[7])
circuit.cx(quantum_r[9],quantum_r[10])
circuit.cx(quantum_r[12],quantum_r[11])
circuit.cx(quantum_r[13],quantum_r[14])
circuit.cx(quantum_r[15],quantum_r[14])
circuit.cx(quantum_r[15],quantum_r[0])
circuit.h(quantum_r[0])
circuit.h(quantum_r[2])
circuit.h(quantum_r[4])
circuit.h(quantum_r[5])
circuit.h(quantum_r[7])
circuit.h(quantum_r[10])
circuit.h(quantum_r[11])
circuit.h(quantum_r[14])
circuit.barrier(quantum_r)
circuit.measure(quantum_r[0], classical_r[0])
circuit.measure(quantum_r[1], classical_r[1])
circuit.measure(quantum_r[2], classical_r[2])
circuit.measure(quantum_r[3], classical_r[3])
circuit.measure(quantum_r[4], classical_r[4])
circuit.measure(quantum_r[5], classical_r[5])
circuit.measure(quantum_r[6], classical_r[6])
circuit.measure(quantum_r[7], classical_r[7])
circuit.measure(quantum_r[8], classical_r[8])
circuit.measure(quantum_r[9], classical_r[9])
circuit.measure(quantum_r[10], classical_r[10])
circuit.measure(quantum_r[11], classical_r[11])
circuit.measure(quantum_r[12], classical_r[12])
circuit.measure(quantum_r[13], classical_r[13])
circuit.measure(quantum_r[14], classical_r[14])
circuit.measure(quantum_r[15], classical_r[15])
def BackendData():
#Get all the backend availabilty and config data etc
global bk
global shots
global max_cred
print("\nAvailable backends:\n")
for i in range(len(qp.available_backends())):
print(i,qp.available_backends()[i])
bki=input("\nChoose backend (enter index no.): ")
bk=qp.available_backends()[int(bki)]
bk_status=input("\nGet current backend status (y/n): ")
if bk_status=="y":
try:
print('\n',qp.get_backend_status(bk))
except:
LookupError
print("\nNone")
bk_config=input("\nGet backend configuration (y/n): ")
if bk_config=="y":
try:
print('\n',qp.get_backend_configuration(bk))
except:
LookupError
print("\nNone")
bk_calib=input("\nGet backend calibration data: (y/n): ")
if bk_calib=="y":
try:
print('\n',qp.get_backend_calibration(bk))
except:
LookupError
print("\nNone")
bk_params=input("\nGet backend parameters (y/n): ")
if bk_params=="y":
try:
print('\n',qp.get_backend_parameters(bk))
except:
LookupError
print("\nNone")
qasm_source=input("\nPrint qasm source y/n: ")
if qasm_source=="y":
QASM_source = qp.get_qasm('Circuit')
print('\n',QASM_source)
shots=input("\nshots (1-8192): Default=1024: ")
if shots=='':
shots=str(1024)
max_cred=input("\nmaximum credits to use. Default=3: ")
if max_cred=='':
max_cred=str(3)
print("\n.....maximum credits set to 3")
time.sleep(1)
QuantumScript()
BackendData()
warn=input("\nYou are about to run this circuit on "+str(bk)+".\nAre you sure (y/n): ")
if warn=="n" or '':
BackendData()
warn=input("\nYou are about to run this circuit on "+str(bk)+".\nAre you sure (y/n): ")
else:
pass
circuits = ['Circuit']
print("\nProcessing....\n")
result = qp.execute(circuits, bk, shots=int(shots),max_credits=int(max_cred),wait=10, timeout=1000)
res1=result.get_counts('Circuit')
#res2=result.get_data('Circuit')
#print(res1)
#print(res2)
#comment out the below for-loop and uncomment the above if you want to return the results as a dictionary
for key,val in res1.items():
print(key,int(key,2),val)
#plot_histogram(res1)
compiled_qasm=input("\nGet compiled qasm y/n:")
if compiled_qasm=="y":
ran_qasm = result.get_ran_qasm('Circuit')
print('\n',ran_qasm)
else:
print ("\nDone...\n")
|
984,833 | be6c25240a60ec137b4120a02e1cb509ee5e7cb4 | from tkinter import *
import tkinter.messagebox as messagebox
JOGADOR = 0
VALORES = ['X', 'O']
BOTOES = {}
TAB = []
JOGANDO = True
def callback(pos):
"""
Lida com o evento do pressionamento de um botão
Recebe a posição de pressionamento do botão
"""
# Declara as variáveis globais
global JOGADOR, BOTOES, TAB, VALORES, JOGANDO
# Se o botão já foi pressionado anteriormente não devemos mudar seu texto
if BOTOES[pos]["text"] != "" or not JOGANDO:
return
# Colocamos um valor apropriado no botão
BOTOES[pos].config(text= VALORES[JOGADOR])
# E adicionamos ao Tabuleiro
TAB[pos[0]*3 + pos[1]] = JOGADOR
# Verificamos se há vitória ou derrota
if ganhou():
# Se há nós colocamos uma mensagem na tela
messagebox.showinfo("Fim de Jogo", "O jogador %i (%s) ganhou"%(int(JOGADOR) + 1, VALORES[JOGADOR]))
JOGANDO = False
# Verificamos se há empate
if sum(TAB) == 4:
messagebox.showinfo("Fim de Jogo", "Empate!")
JOGANDO = False
# E trocamos o jogador
JOGADOR = not JOGADOR
def criaBotoes():
"""
Cria os botões usados no jogo
"""
# Declara as variáveis globais
global BOTOES, TAB, frame
# Cria a lista de posições possíveis
posições = [ (0, 0), (0, 1), (0, 2), (1, 0), (1, 1), (1, 2), (2, 0), (2, 1), (2, 2) ]
# Para cada posição
for pos in posições:
# Cria-se um botão
b = Button(frame, width = 10, height = 5, command = lambda p=pos: callback(p))
# Armazena-se esse botão no dicionário de botões
BOTOES[pos] = b
# Seleciona-se uma posição específica para esse botão
b.grid( row = pos[0], column = pos[1] )
# E adicona-se um elemento vazio ao tabuleiro
TAB.append(-1)
def ganhou():
"""
Verifica se o jogador atual ganhou
"""
global JOGADOR
return ((TAB[6] == JOGADOR and TAB[7] == JOGADOR and TAB[8] == JOGADOR) or # Linha horizontal baixa
(TAB[3] == JOGADOR and TAB[4] == JOGADOR and TAB[5] == JOGADOR) or # Linha horizontal meio
(TAB[0] == JOGADOR and TAB[1] == JOGADOR and TAB[2] == JOGADOR) or # Linha horizontal alta
(TAB[6] == JOGADOR and TAB[3] == JOGADOR and TAB[0] == JOGADOR) or # Linha vertical Esquerda
(TAB[7] == JOGADOR and TAB[4] == JOGADOR and TAB[1] == JOGADOR) or # Linha vertical central
(TAB[8] == JOGADOR and TAB[5] == JOGADOR and TAB[2] == JOGADOR) or # Linha vertical direita
(TAB[6] == JOGADOR and TAB[4] == JOGADOR and TAB[2] == JOGADOR) or # diagonal
(TAB[8] == JOGADOR and TAB[4] == JOGADOR and TAB[0] == JOGADOR)) # diagonal
if __name__ == "__main__":
# Criamos a instância de Tk
root = Tk()
# Definimos um título
root.title("Jogo da Velha")
# Construimos uma frame para armazenar os botões
frame = Frame(root)
# Criamos os botões
criaBotoes()
# Empacotamos a frame
frame.pack()
# Rodamos o aplicativo
root.mainloop()
|
984,834 | 3f0fd41fc1d1ca561fd9abb678b45568258226ff | from logger.station import Station, Metadata
from common.exceptions import StationParseError
import datetime
class XMHits1Station(Station):
_NAME = 'XMHits1'
_SHORTNAME = 'XM1'
_URL = 'https://www.siriusxm.com/metadata/pdt/en-us/json/channels/siriushits1/timestamp/{XMTS}'
def getUrl(self):
dt = datetime.datetime.utcnow() - datetime.timedelta(minutes=1)
ts = '{d.month:02}-{d.day:02}-{d.hour:02}:{d.minute:02}:00'.format(d=dt)
return self._URL.format(XMTS=ts)
def parseResponse(self, payload):
data = payload.json()
resp = data['channelMetadataResponse']
if resp['messages']['code'] != 100:
raise StationParseError('Endpoint returned invalid status code', resp['messages']['code'])
current = resp['metaData']['currentEvent']
title = str(current['song']['name']).strip()
artist = str(current['artists']['name']).strip()
return Metadata(title=title, artist=artist)
def isDefaultMetadata(self, metadata):
# TODO
keywords = [
'SiriusXM',
'Hits1',
'MorningMashUp'
]
if any(kw.lower() in metadata.title.lower() for kw in keywords) \
or any(kw.lower() in metadata.artist.lower() for kw in keywords):
return True
return False |
984,835 | 41eb2f8170668fd71c0217b68996e9d4e638820e | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Author : RaXianch
# CreatDATE : 2021/8/21
# CreatTIME : 19:58
# Blog : https://blog.raxianch.moe/
# Github : https://github.com/DeSireFire
__author__ = 'RaXianch'
MYSQL_HOST = '128.0.0.1'
MYSQL_USER = '2333'
MYSQL_PORT = 3306
MYSQL_PASSWORD = '2333'
MYSQL_DB = "233"
MYSQL_ENCODING = "utf-8" |
984,836 | da3aee51d7291b10c940e00d447c0f2e93c1bcbc | # -*- coding: utf-8 -*-
"""
Created on Fri Sep 15 23:44:20 2017
@author: Alexandre
"""
############## FILE AKMC_test1.py
from time import time
a=time()
import module_site_deux as mc
from random import randint
size = 50
e_aa = 0
e_ab = 0
e_bb = 0.21
proportion_b = 0.9
systeme = mc.system (size)
systeme.set_maille([(0,0),(0.5,0.5)])
systeme.set_link_energy([e_aa,e_ab,e_bb])
systeme.set_map ()
nombre_b = int(systeme.get_site_number()*proportion_b)
while nombre_b > 0 :
n = randint(0,systeme.get_site_number()-1) # tirage d'un site au hasard
if not systeme.get_map()[n].get_identity() : #
nombre_b -= 1
systeme.get_map()[n].set_identity(True)
file= open("coordonnee.txt","w")
file.writelines(str(systeme.get_site_number())+"\n")
file.writelines("\n")
for i in systeme.get_map() :
file.writelines("{} {:3.1f} {:3.1f} 0 \n".format(i.get_identity(), *i.get_coordinate() ) )
print (time()-a) |
984,837 | bcafe63907245417883e6dcfad56c2b69edf131e | from django.contrib import admin
from visa_app.models import Contact
from visa_app.models import Index
# Register your models here.
admin.site.register(Contact)
admin.site.register(Index) |
984,838 | 98e060e819f3db238aa50532b2ce09477c9ee2f9 | import matplotlib.pyplot as plt
import matplotlib.animation as animation
import matplotlib.cbook as cbook
import time
import numpy as np
fig = plt.figure()
fig.canvas.set_window_title('Frames Alive Graph')
ax1 = fig.add_subplot(1,1,1)
def animate(i):
fname = cbook.get_sample_data('C:\\Users\\yotxb\\Desktop\\DeepTung\\plot2.csv', asfileobj=False)
# test 2; use names
pullData = open('plot2.csv', 'r').read()
dataArray = pullData.split('\n')
xar = []
yar = []
for line in dataArray[1:]:
if len(line) > 1:
x,y = line.split(',')
xar.append(int(x))
yar.append(float(y))
ax1.clear()
ax1.plot(xar,yar)
ani = animation.FuncAnimation(fig,animate,interval=1000)
plt.show()
|
984,839 | a6c901f8081ee261f30a4bf8e7133eeaf196c24c | # Generated by Django 2.2.3 on 2019-07-25 06:58
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('mainapp', '0007_auto_20190725_1558'),
]
operations = [
migrations.AddField(
model_name='product',
name='lowerlimit',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='product',
name='originalprice',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='product',
name='productimg_name',
field=models.CharField(max_length=40, null=True),
),
migrations.AddField(
model_name='product',
name='pub_date',
field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='date published'),
preserve_default=False,
),
]
|
984,840 | 21a8a7a45d5ac1cd05174de59e36f1ac44ca57d7 | import sys
from trade import main
if __name__ == '__main__':
main.set_loggers()
main.main(sys.argv[1:]) |
984,841 | f361fd23d36a6da6b01cd59004ef8177d464744e | t = int(raw_input ())
for i in range (t):
dict = {
'B':0,
'R':0,
'O':0,
'K':0,
'E':0,
'N':0,
}
s = raw_input ()
for c in s:
if c in dict:
dict[c] = dict[c] + 1
if len (set (dict.values())) == 1:
print "No Secure"
else:
print "Secure"
|
984,842 | 73c2ae2738aaf5af229e07f5f726e1096e2f8122 | import pyodbc
#import pymssql
import json
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
print(s.getsockname()[0])
s.close()
# Load credentials from json file
with open("azure_db_settings.json", "r") as file:
creds = json.load(file)
server = creds['server']
database = creds['database']
username = creds['username']
password = creds['password']
port = creds['port']
driver = creds['driver']
conn_str = "DRIVER={};SERVER={},{};DATABASE={};UID={};PWD={}".format(driver, server, port, database, username, password)\
#conn_str = "DRIVER={};SERVER={},{};".format(driver, server, port)
print(conn_str)
#cnxn = pyodbc.connect(conn_str, user=username, password=password, database=database)
cnxn = pyodbc.connect(conn_str)
cursor = cnxn.cursor()
cursor.execute("SELECT * FROM INFORMATION_SCHEMA.tables")
row = cursor.fetchone()
while row:
print (str(row[0]) + " " + str(row[1]))
row = cursor.fetchone() |
984,843 | bcfc6405b1c200badfeb4d10676196848a4a1f6f | # -*- coding: utf-8 -*-
a=int(input('Digite a :'))
fatorial=1
i=1
for i in range (1,(a+1),1):
fatorial=fatorial*1
print(fatorial) |
984,844 | e610f69fdb404ea2cb0d62fbbe24e862a680611d | import pyodbc
class Database:
def __init__(self, server='DESKTOP-ALESSIA' , username='ALESSIA', password='database', database='TEST', table_name='FILM'):
self.server = server
self.database = database
self.username = username
self.password = password
self.table_name = table_name
self.cnxn = pyodbc.connect('DRIVER={ODBC Driver 17 for SQL Server};SERVER='+self.server+';DATABASE='+self.database+';UID='+self.username+';PWD='+ self.password)
self.cursor = self.cnxn.cursor()
def select_record(self, search_by, record):
reply = []
self.cursor.execute("SELECT * from " + self.table_name + " where "+ search_by + " = ?", record)
for row in self.cursor.fetchall():
reply.append(row)
return reply
def insert_record(self, record):
count = self.cursor.execute("SELECT COUNT(*) from " + self.table_name + " where TITOLO = ? and HARD_DISK = ? ", record[0], record[2]).fetchone()[0]
if count == 0:
self.cursor.execute("INSERT into " + self.table_name + " VALUES (?,?,?,?)", record[0], record[1], record[2], record[3])
self.cnxn.commit()
reply = record[0] +' inserito sul database'
else:
reply = record[0] +' già presente sul database'
return reply
def update_record(self, column, name, value):
self.cursor.execute("UPDATE " +self.table_name+ " SET "+ column +" = ? WHERE TITOLO = ? ", value, name)
self.cnxn.commit()
return 'Record aggiornato correttamente'
def delete_record(self, record):
count = self.cursor.execute("SELECT COUNT(*) from " + self.table_name + " where TITOLO = ? AND LINGUA = ? and HARD_DISK = ? and ANNO = ?", record[0], record[1], record[2], record[3]).fetchone()[0]
if count == 0:
reply = record[0] + ' non è presente sul database'
else:
rep = input('Sono presenti ' + str(count) + ' records. Eliminare? Y/N ')
if rep == 'Y':
self.cursor.execute("DELETE from " + self.table_name + " WHERE TITOLO = ? AND LINGUA = ? and HARD_DISK = ? and ANNO = ?", record[0], record[1], record[2], record[3])
self.cnxn.commit()
reply = record[0] + ' eliminato'
elif rep == 'N':
reply = 'Il record non è stato eliminato'
else:
reply = 'Inserire "Y" oppure "N"'
return reply
record = ['PROVA', 'nan', 'prova', '2019']
prova = Database()
|
984,845 | 2b4cfcdb558b92ffbf42ea6d17392b4a27b2e7d0 | import json
import requests
from datetime import datetime
from config import API_KEY, PLAYER_LIST
from DBOper import get_playing_game, update_playing_game
def gaming_status_watcher():
replys = []
status_changed = False
sids = ','.join(str(p[1] + 76561197960265728) for p in PLAYER_LIST)
r = requests.get(f'http://api.steampowered.com/ISteamUser/GetPlayerSummaries/v0002/?key={API_KEY}&steamids={sids}')
j = json.loads(r.content)
for p in j['response']['players']:
sid = int(p['steamid'])
pname = p['personaname']
cur_game = p.get('gameextrainfo', '')
pre_game, last_update = get_playing_game(sid)
# 游戏状态更新
if cur_game != pre_game:
status_changed = True
now = int(datetime.now().timestamp())
minutes = (now - last_update) // 60
if cur_game:
if pre_game:
replys.append(f'{pname}玩了{minutes}分钟{pre_game}后,玩起了{cur_game}')
else:
replys.append(f'{pname}启动了{cur_game}')
else:
replys.append(f'{pname}退出了{pre_game},本次游戏时长{minutes}分钟')
update_playing_game(sid, cur_game, now)
return '\n'.join(replys) if replys else None |
984,846 | bfb4863d6c708a044ecc862c8c37ab38977f4b6b | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Backlink.href'
db.alter_column(u'trovetraces_backlink', 'href', self.gf('django.db.models.fields.TextField')())
def backwards(self, orm):
# Changing field 'Backlink.href'
db.alter_column(u'trovetraces_backlink', 'href', self.gf('django.db.models.fields.URLField')(max_length=200))
models = {
u'trovetraces.article': {
'Meta': {'object_name': 'Article'},
'date': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'newspaper_id': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'newspaper_title': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'page': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
u'trovetraces.backlink': {
'Meta': {'object_name': 'Backlink'},
'anchor': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'context': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'href': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['trovetraces.Page']", 'null': 'True', 'blank': 'True'}),
'target': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['trovetraces.Article']", 'null': 'True', 'blank': 'True'})
},
u'trovetraces.page': {
'Meta': {'object_name': 'Page'},
'cleaned_html': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'file_id': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page_title': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'tld': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
}
}
complete_apps = ['trovetraces'] |
984,847 | c448e3bb4a13f931d187aa74b757f5ed977cc76e | import pyodbc
# Some other example server values are
# server = 'localhost\sqlexpress' # for a named instance
# server = 'myserver,port' # to specify an alternate port
server = 'tcp:172.20.200.252,8629'
cnxn = pyodbc.connect('DSN=TDB;UID=icam;PWD=kfam1801')
cursor = cnxn.cursor()
#Sample select query
cursor.execute("select user_id, username from dba_users")
row = cursor.fetchone()
if row:
print(row)
cursor.execute("select user_id, username from dba_users")
rows = cursor.fetchall()
for row in rows:
print(row.user_id, row.username)
#cursor.execute("delete from products where id <> ?", 'pyodbc')
#print('Deleted {} inferior products'.format(cursor.rowcount))
#cnxn.commit()
|
984,848 | 668d02ddc9d4bbb66c12d5b90f1d7f8666bb114e | #!/usr/bin/env python
def Permutations(iterable, r=None):
pool = tuple(iterable)
n = len(pool)
r = n if r is None else r
if r > n:
return
indices = range(n)
cycles = range(n, n-r, -1)
yield tuple(pool[i] for i in indices[:r])
while n:
for i in reversed(range(r)):
cycles[i] -= 1
if cycles[i] == 0:
indices[i:] = indices[i+1:] + indices[i:i+1]
cycles[i] = n - i
else:
j = cycles[i]
indices[i], indices[-j] = indices[-j], indices[i]
yield tuple(pool[i] for i in indices[:r])
break
else:
return
def p68():
l1,l2 = [1,2,3,4,5],[6,7,8,9,10]
res,rs = 6,''
for i in Permutations(l2):
for j in Permutations(l1):
if len(set([i[k] + j[k] + j[(k+1)%5] for k in range(5)])) == 1:
t = ''.join([str(i[k])+str(j[k])+str(j[(k+1)%5]) for k in range(5)])
if len(rs) == 0:
res,rs = i[0],t
elif i[0] == res:
if t > rs:rs = t
elif i[0] < res:
res,rs = i[0],t
return rs
print p68()
|
984,849 | 9d30de79f7773cb7300415cd41b049e08b2f3ad0 | from django.db import models
class Category(models.Model):
id=models.AutoField(primary_key=True)
name=models.CharField(max_length=50,default='a')
keywords=models.CharField(max_length=50,default='a')
def __str__(self):
self.name
# Create your models here.
|
984,850 | 9ab76b46d59a0383b9de21893c2a90296e32fcf3 | import pandas as pd
import sklearn.feature_selection as fs
import numpy as np
import os
def data_import(path):
data = pd.read_csv(path, sep=";")
data = data.replace(',', '.', regex=True)
data.columns = [c.replace('.', '_') for c in data.columns]
data = data.loc[:, (data != 0).any(axis=0)]
return data
# prepares the data for feature selection
def prep_data_feature_selection(data):
X_feature = data.drop(['hypnogram_Machine', 'hypnogram_User'], axis=1).copy()
predictors = X_feature.columns.values.tolist()
y = data['hypnogram_User']
return X_feature, y, predictors
# KBest function - helps us select the relevant features
def select_kbest(X_feature, y, number_of_besties):
selector = fs.SelectKBest(k=number_of_besties, score_func=fs.f_classif)
selector.fit(X_feature, y)
results = -np.log10(selector.pvalues_)
X_transformed = selector.fit_transform(X_feature, y).copy()
return X_transformed, results, selector
# Percentile function - helps us select the relevant features
def select_percentile(X_feature, y, percentile):
selector = fs.SelectPercentile(percentile=percentile, score_func=fs.f_classif)
selector.fit(X_feature, y)
results = -np.log10(selector.pvalues_)
X_transformed = selector.fit_transform(X_feature, y).copy()
return X_transformed, results
# see results of percentile or kbest function
def get_names(selector, X_feature):
feature_names = []
for i in range(0, X_feature.shape[1]):
if selector.get_support()[i]:
feature_names.append(X_feature.columns[i])
return feature_names
# find top 10 features for the group of patients
def find_group_features(self, directory):
features_all_patients = []
for file in os.listdir(directory):
filename = os.fsdecode(file)
if filename.endswith(".csv"):
path = str(directory)[2:-1] + "/" + str(filename)
df, n_features, feature_names = preprocess_any_file(path, n_features=10)
features_all_patients.extend(feature_names)
return features_all_patients
# this imports data as well as runs the select kbest features
def preprocess_any_file(path, n_features):
data = data_import(path)
X_feature, y, predictors = prep_data_feature_selection(data)
X_transformed, results, selector = select_kbest(X_feature, y, n_features)
features = get_names(selector, X_feature)
df = pd.DataFrame(X_transformed, columns=features)
df['hypnogram_User'] = y
return df, n_features, features
# to be done
# drops scoring of random series of rows for semisupervised learning of HMM
def my_train_test_split_individual_person():
pass |
984,851 | 913b0d16a68cd1df6dec4fa736466938291556e5 | #!/home/jhdavis/anaconda3/envs/default/bin/python3
# -*- coding: utf-8 -*-
"""
@author: jhdavis@mit.edu : github.com/jhdavislab
"""
import argparse
import os
import edit_cs
import numpy as np
def crop_stack(input_stack, output_stack, dim_x, dim_y, e2_path=''):
if e2_path == '':
e2_path = 'e2proc2d.py'
execution_string = e2_path + ' --clip ' + str(dim_x) + ',' + str(dim_y) + ' ' + more_args + ' ' + input_stack + ' ' + output_stack
print("...preparing to execute the following:")
print(execution_string)
if not(test):
print('...executing...')
os.system(execution_string)
else:
print('...running in test mode, so no command was executed.')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Crop a particle stack in real space. Optionally edits a cryosparc .cs file to reflect the new box size. Requires e2proc2d.py is in your path! Note that this tool simply crops about the center of the image.',
epilog='Example usage: python3 crop_real.py input_stack.mrcs output_stack.mrcs 128,96 --cs_file cryosparc_v2_csfile.cs')
parser.add_argument('input_stack', type=str,
help='path to the input .mrcs stack')
parser.add_argument('output_stack', type=str,
help='path to the output .mrcs stack')
parser.add_argument('crop_dimensions', type=str,
help='dimensions to crop. Provided as x_dim,y_dim (e.g. 128,96)')
parser.add_argument('--cs_file', default='',
help='path to the cryosparcv2.cs file (expected export output from a 2D classification job)')
parser.add_argument('--test', default=False, action='store_true',
help='used for testing - will only display the command to be executed, but will not actually run')
parser.add_argument('--e2path', default='',
help='provide a full path to the e2proc2d.py file. If not provided, this file MUST be in your PATH')
parser.add_argument('--more_args', default='',
help='provide additional arguments to e2proc2d.py. These are parsed as one string in quotes. Example --more_args "--first 100 --last 200" would only crop images 100-200. See e2proc2d wiki for additional options.')
args = parser.parse_args()
input_stack= vars(args)['input_stack']
output_stack= vars(args)['output_stack']
cs_file = vars(args)['cs_file']
e2_path = vars(args)['e2path']
test = vars(args)['test']
more_args = vars(args)['more_args']
dims = vars(args)['crop_dimensions']
dim_x, dim_y = dims.split(',')
crop_stack(input_stack, output_stack, dim_x, dim_y, e2_path)
if cs_file != '':
print("...preparing to edit .cs file to reflect the new box size. New file will be:")
new_file_name = cs_file.split('.cs')[0]+'_'+dim_x+'x'+dim_y+'.cs'
print(new_file_name)
np_cs = edit_cs.parse_cs(cs_file)
index = slice(0,np_cs.shape[0])
np_cs_new = edit_cs.edit_field(np_cs, 'blob/shape', index, np.array([dim_x,dim_y]))
if not(test):
print("...editing " + cs_file + ". producing new cs file: " + new_file_name)
edit_cs.write_cs(new_file_name, np_cs_new)
else:
print("...running in test mode. No new .cs file was produced.")
|
984,852 | c0cdecfe121ec4616561ab8f42a4252006e9ceae | from sortedcontainers import SortedDict
"""
@author Anirudh Sharma
Given a binary tree, print the bottom view from left to right.
A node is included in bottom view if it can be seen when we look at the tree from bottom.
Constraints:
1 <= Number of nodes <= 10^5
1 <= Data of a node <= 10^5
"""
def bottomView(root):
# List to store the result
result = []
# Special case
if root is None:
return result
# Horizontal distance for a node.
# It will be zero for the root node.
horizontalDistance = 0
# TreeMap to store node's data, height and
# horizontal distance
treeMap = SortedDict()
# Queue to store the nodes in level order traversal
nodes = []
# Initialize the values
root.horizontalDistance = horizontalDistance
nodes.append(root)
# Loop until the queue is empty
while nodes:
# Get the current node from the head
current = nodes.pop(0)
# Get the current horizontal distance
horizontalDistance = current.horizontalDistance
# Put the dequeued tree node to TreeMap having key
# as horizontal distance. Every time we find a node
# having same horizontal distance we need to replace
# the data in the map.
treeMap[horizontalDistance] = current.data
# Check for the left and right children
if current.left:
current.left.horizontalDistance = horizontalDistance - 1
nodes.append(current.left)
if current.right:
current.right.horizontalDistance = horizontalDistance + 1
nodes.append(current.right)
# Populate the result list
for value in treeMap.values():
result.append(value)
return result
class Node:
def __init__(self, data):
self.data = data
self.horizontalDistance = 0
self.left = None
self.right = None
if __name__ == "__main__":
root = Node(20)
root.left = Node(8)
root.right = Node(22)
root.left.left = Node(5)
root.left.right = Node(3)
root.right.left = Node(4)
root.right.right = Node(25)
root.left.right.left = Node(10)
root.left.right.right = Node(14)
print(bottomView(root)) |
984,853 | 7e5f9155c0e22abb671c826d940da51c3ba46294 | # manual function to predict the y or f(x) value power of the input value speed of a wind turbine.
# Import linear_model from sklearn.
import matplotlib.pyplot as plt
import numpy as np
# Let's use pandas to read a csv file and organise our data.
import pandas as pd
import sklearn.linear_model as lm
# read the dataset
#df = pd.read_csv('https://raw.githubusercontent.com/ianmcloughlin/2020A-machstat-project/master/dataset/powerproduction.csv')
df =pd.read_csv('powerproduction.csv')
# Plots styles.
# Plot size.
plt.rcParams['figure.figsize'] = (14, 10)
# Create a linear regression model instance.
m = lm.LinearRegression()
df.isnull().sum()
x=df[["speed","power"]]
y=df["power"]
m.fit(x,y)
m.intercept_
m.coef_
m.score(x,y)
z=df["speed"]
q=df["power"]
np.polyfit(z,q,1)
m,c =np.polyfit(z,q,1)
a,b,c,d = np.polyfit(z,q,3)
def findy(x):
print('x =',x)
y = (a*x**3) + (b*x**2) + (c*x) +d
if y < 0:
y = 0
return '{:.2f}'.format(y)
print('y = ', findy(10))
|
984,854 | 326fa1c08f14885d644a823533d2c75865428d36 | #!/usr/bin/python
import sys
import os
import optparse
from gtclib import golive
DEPLOYMENT_NAME='deploy'
if __name__ =='__main__':
usage = "usage: python %prog [options] dbname\n"
parser = optparse.OptionParser(version='0.1', usage=usage)
deploy_group=golive.get_deploy_options_group(parser)
parser.add_option_group(deploy_group)
opt, args = parser.parse_args(sys.argv)
user_id,host_id = golive.get_env(opt)
dbname=args[1]
server_path, config_file = golive.get_server_and_conf(opt, DEPLOYMENT_NAME)
#server_path, config_file = '/home/jan/github.com/odoo7', '/home/jan/projects/server_pjbrefactoring.conf'
sys.path.append(server_path)
import openerp
import openerp.tools.config
openerp.tools.config.parse_config(['--config=%s' % config_file])
import openerp.addons.galtyslib.openerplib as openerplib
r=openerp.registry(dbname)
openerp.api.Environment.reset()
cr=r.cursor()
uid=1
env= openerp.api.Environment(cr, uid, {})
pool=r
file_ids=pool.get("deploy.file").search(cr, uid, [])
delete_ids=[]
for f in pool.get("deploy.file").browse(cr, uid, file_ids):
i = pool.get(f.template_id.model).search(cr, uid, [('id','=',f.res_id)] )
if len(i)==0:
delete_ids.append( f.id)
print delete_ids
print pool.get("deploy.file").unlink(cr, uid, delete_ids)
cr.commit()
cr.close()
|
984,855 | 813cd1996e16971dd246e4c2646839143056289e | import os
emails = set()
for dir in os.listdir("final_project/emails_by_address"):
s = dir.find("from_")
if s == -1:
s = dir.find("to_")
e = dir.find("@", s + 3)
emails.add(dir[s + 3:e])
else:
e = dir.find("@", s + 5)
emails.add(dir[s + 5:e])
with open("final_project/emails_by_address/"+dir) as email:
for line in email:
s = line.find("maildir/")
e = line.find("/", s+8)
emails.add(line[s+8:e])
print(emails)
print(len(emails)) |
984,856 | a855b3b3dc8d05663f0a573ae912a418dc8b4e82 | from django.urls import path,include
from test_app import views
from django.contrib.auth import views as auth_views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('', views.LoginView, name='login'),
path('admin_dashboard', views.admin_Dashboard, name='admin_dashboard'),
# Login,Logout authentication
path('user_register', views.User_registration, name='user_register'),
path('admin_register', views.admin_registration, name='admin_register'),
# path('login/', auth_views.LoginView.as_view(template_name='login/login.html', authentication_form=LoginForm),
# name='login'),
path('logout', auth_views.LogoutView.as_view(next_page='login'), name='logout'),
path('add_product', views.Add_Product, name='add_product'),
# User
path('user_dashboard', views.user_Dashboard, name='user_dashboard'),
path('cart', views.cart, name='cart'),
path('pluscart', views.pluscart),
path('minuscart', views.minuscart),
path('removecart', views.removecart),
# API url
path('api/', include('test_app.routers')),
#
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
984,857 | cd71ffec76fb50dca68af0343da648d3a6db99eb | import os
import glob
from skimage import io
import numpy as np
import click
@click.command()
@click.option("--base-dir", required=True, help="PROBA-V root downloaded dir.")
@click.option("--out-dir", required=True, help="Output where to save the pickles.")
@click.option("--band", required=True, help="RED|NIR band.")
def data_to_pickle(base_dir, out_dir, band):
'''
base_dir: specifies the root probav directory (the one downloaded from probav chalenge website)
out_dir: specifies where to place the pickles
band: RED or NIR band
'''
out_dir = out_dir.rstrip()
train_dir = os.path.join(base_dir, 'train/'+band)
dir_list = glob.glob(train_dir+'/imgset*')
dir_list.sort()
input_images_LR = np.array([[io.imread(fname) for fname in sorted(glob.glob(dir_name+'/LR*.png'))]
for dir_name in dir_list])
input_images_LR.dump(os.path.join(out_dir, f'LR_dataset_{band}.npy'))
input_images_HR = np.array(
[io.imread(glob.glob(dir_name+'/HR.png')[0]) for dir_name in dir_list])
input_images_HR.dump(os.path.join(out_dir, f'HR_dataset_{band}.npy'))
mask_HR = np.array([io.imread(glob.glob(dir_name+'/SM.png')[0])
for dir_name in dir_list])
mask_HR.dump(os.path.join(out_dir, f'HR_mask_{band}.npy'))
mask_LR = np.array([[io.imread(fname,)for fname in sorted(glob.glob(dir_name+'/QM*.png'))]
for dir_name in dir_list])
mask_LR.dump(os.path.join(out_dir, f'LR_mask_{band}.npy'))
train_dir = os.path.join(base_dir, 'test', band)
dir_list = glob.glob(train_dir+'/imgset*')
dir_list.sort()
test_images_LR = np.array([[io.imread(fname) for fname in sorted(glob.glob(dir_name+'/LR*.png'))]
for dir_name in dir_list])
test_images_LR.dump(os.path.join(out_dir, f'LR_test_{band}.npy'))
test_mask_LR = np.array([[io.imread(fname) for fname in sorted(glob.glob(dir_name+'/QM*.png'))]
for dir_name in dir_list])
test_mask_LR.dump(os.path.join(out_dir, f'LR_mask_{band}_test.npy'))
if __name__ == '__main__':
data_to_pickle()
|
984,858 | 21967d930266438143ca2f6abf49d50572a5207a | #!/usr/bin/env python
name1 = "Dave"
name2 = "Shelly"
name3 = "Corey"
name4 = input("Enter fourth name: ")
print()
print("{:>30}".format(name1))
print("{:>30}".format(name2))
print("{:>30}".format(name3))
print()
|
984,859 | 0560fc1bd23b3ea7079eacbfab95c2ac91d1399c | BUCKET_NAME = 'company-crawler-check'
AUTOMI_LOCAL_LAPTOP_PRODUCTION = "automi-local-laptops-production"
OFFLINE_RESPONSE = "Offline-Response"
FINAL_JSON = "finalJson"
JSON_UPLOAD = "jsonUpload"
PROFILE_JSON = 'profileJSON'
COMPANY_HTML = "companyHTML"
PROFILE_HTML = "profileHTML"
ZIP_FILE = "zipFile"
LOCAL_FILE_PATH = 'payload.json'
INPUT_LOGS_CSV = 'input_logs.csv'
OUTPUT_LOGD_CSV = 'output_logs.csv'
ERROR_FINAL_JSON = 'final_json_error_logs.csv'
PREFIX='input/LOCAL_'
COMPANY_JSON_CHECK = 'COMPANY_JSONS_CHECK/'
MOBILE_CRAWLED_ZIPS = 'MOBILE_CRAWLED_ZIPS/'
POST_PROCESSING_COMPLETED_ZIP = 'POST_PROCESSING_COMPLETED_ZIP'
BATCH_PRESENT_FOLDER = "NIKHIL_BATCH_CHECK"
BATCH_PROCESSING_COMPLETE = "BATCH_PROCESSING_COMPLETE"
PROFILE_ZIP_PREFIX = 'Html_'
REQUEST_PATH = 'requests/'
REQUEST_ID = 'REQUEST_ID'
REQUEST_DIR = 'requests'
PROCESSED_BUCKET = 'processed/'
LAST_MODIFIED = 'LastModified'
START = 1300
END = 2000
INCREMENT = 1
HOST = "0.0.0.0"
PORT = 5000
LINKEDIN_URL_REGEX = 'linkedin.com\/in'
LINKEDIN_SUB_REGEX = '\?.*'
LINKEDIN_QUESTION_FIND_REGEX = '\?'
BLANK = ''
PROFILE_URL = "PROFILE_URL"
AUTH_ACTIVE_PATH ='AUTH/ACTIVE/'
SLACK_CHANNEL = '#special_projects_testing' #'#lead_crawler_testing'
SLACK_TOKEN = 'xoxp-8164026197-896389526550-1086353458450-9b6c36f279f93387e71d6ebd43301a0e'
SLACK_BOT_USERNAME = "Crawl-Monitoring-Bot"
SLACK_BOT_EMOJI = ':robot_face'
COMPANY_CRAWL_API = "http://localhost:9090/startCompanyCrawl"
PROFILE_PROCESSING_API = "http://localhost:5000/uploadProfile?requestId="
|
984,860 | e4bae58f7c9d861a28f794b1eb56aae2722bb317 | from behave import *
from features.pages.main_page import MainPage
from pages import LoginPage
@given('I open {page_name} page')
def step_impl(context, page_name):
base_url = context.config.get('settings', 'base_url')
if page_name == "home":
context.driver.get(f'{base_url}')
context.current_page = MainPage(context.driver)
elif page_name == "login":
context.driver.get(f'{base_url}/w/index.php?title=Special:UserLogin')
context.current_page = LoginPage(context.driver)
@when('I log in')
def step_impl(context):
context.current_page.type_in('username field', context.config.get('user', 'username'))
context.current_page.type_in('password field', context.config.get('user', 'password'))
context.current_page.click_on('login button')
context.current_page = MainPage(context.driver) # Verify user is on main page
# use of data table example
@then('I see validation message for')
def step_impl(context):
for row in context.table:
context.execute_steps(f'''
When I type "{row['username']}" in username field
When I type "{row['password']}" in password field
When I click on login button
Then I see "{row['text']}" on the page
''')
|
984,861 | b15e5508a2636b554452f8cd44bd6297d0847082 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
import logging
import cymysql
import time
import sys
import socket
import config
import json
import urllib2, urllib
class DbTransfer(object):
instance = None
def __init__(self):
self.last_get_transfer = {}
@staticmethod
def get_instance():
if DbTransfer.instance is None:
DbTransfer.instance = DbTransfer()
return DbTransfer.instance
@staticmethod
def get_mysql_config():
return {
'host': config.MYSQL_HOST,
'port': config.MYSQL_PORT,
'user': config.MYSQL_USER,
'passwd': config.MYSQL_PASS,
'db': config.MYSQL_DB,
'charset': 'utf8'
}
@staticmethod
def send_command(cmd):
data = ''
try:
cli = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
cli.settimeout(1)
cli.sendto(cmd, ('%s' % (config.MANAGE_BIND_IP), config.MANAGE_PORT))
data, addr = cli.recvfrom(1500)
cli.close()
# TODO: bad way solve timed out
time.sleep(0.05)
except:
logging.warn('send_command response')
return data
@staticmethod
def get_servers_transfer():
dt_transfer = {}
cli = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
cli.settimeout(2)
cli.sendto('transfer: {}', ('%s' % (config.MANAGE_BIND_IP), config.MANAGE_PORT))
bflag = False
while True:
data, addr = cli.recvfrom(1500)
if data == 'e':
break
data = json.loads(data)
# print data
dt_transfer.update(data)
cli.close()
return dt_transfer
@staticmethod
def push_db_all_user():
dt_transfer = DbTransfer.get_instance().get_servers_transfer()
conn = cymysql.connect(**DbTransfer.get_instance().get_mysql_config())
cursor = conn.cursor()
# 获取用户和端口的关系
sql = 'SELECT userId user_id, port from user'
cursor.execute(sql)
port_to_user = {}
for item in cursor.fetchall():
port_to_user[str(item[1])] = item[0]
insert_rows = []
insert_sql = 'INSERT INTO transfer (nodeId, userId, flowUp, flowDown, activeAt) VALUES (%s, %s, %s, %s, %s)'
update_head = 'UPDATE user'
update_sub_when = ''
update_sub_when2 = ''
update_sub_in = None
last_time = time.strftime('%Y-%m-%d %H:%M:%S')
for id in dt_transfer.keys():
# 防止受端口扫描等小流量影响
if (dt_transfer[id]) < 1024:
continue
user_id = port_to_user[str(id)]
insert_rows.append([config.NODE_ID, user_id, 0, dt_transfer[id], last_time])
update_sub_when += ' WHEN %s THEN flowUp+%s' % (user_id, 0) # all in d
update_sub_when2 += ' WHEN %s THEN flowDown+%s' % (user_id, dt_transfer[id])
if update_sub_in is not None:
update_sub_in += ',%s' % user_id
else:
update_sub_in = '%s' % user_id
cursor.executemany(insert_sql, insert_rows)
conn.commit()
if update_sub_in is None:
return
update_sql = update_head + ' SET flowUp = CASE userId' + update_sub_when + \
' END, flowDown = CASE userId' + update_sub_when2 + \
' END, activeAt = "%s"' % (last_time) + \
' WHERE userId IN (%s)' % update_sub_in
cursor.execute(update_sql)
cursor.close()
conn.commit()
@staticmethod
def pull_db_all_user():
conn = cymysql.connect(**DbTransfer.get_instance().get_mysql_config())
cursor = conn.cursor()
active_at = time.strftime('%Y-%m-%d %H:%M:%S')
update_sql = 'UPDATE node SET activeAt = "%s" WHERE nodeId = %d' % (active_at, config.NODE_ID)
cursor.execute(update_sql)
conn.commit()
cursor.execute("SELECT port, flowUp flow_up, flowDown flow_down, transferEnable transfer_enable, password, isLocked is_locked FROM user")
rows = []
for r in cursor.fetchall():
rows.append(list(r))
cursor.close()
conn.close()
return rows
@staticmethod
def del_server_out_of_bound_safe(rows):
for row in rows:
server = json.loads(DbTransfer.get_instance().send_command('stat: {"server_port":%s}' % row[0]))
if server['stat'] != 'ko':
if row[5] == 'Y':
# stop disable or switch off user
logging.info('db stop server at port [%s] reason: disable' % (row[0]))
DbTransfer.send_command('remove: {"server_port":%s}' % row[0])
elif row[1] + row[2] >= row[3]:
# stop out bandwidth user
logging.info('db stop server at port [%s] reason: out bandwidth' % (row[0]))
DbTransfer.send_command('remove: {"server_port":%s}' % row[0])
if server['password'] != row[4]:
# password changed
logging.info('db stop server at port [%s] reason: password changed' % (row[0]))
DbTransfer.send_command('remove: {"server_port":%s}' % row[0])
else:
if row[5] == 'N' and row[1] + row[2] < row[3]:
logging.info('db start server at port [%s] pass [%s]' % (row[0], row[4]))
DbTransfer.send_command('add: {"server_port": %s, "password":"%s"}' % (row[0], row[4]))
# print('add: {"server_port": %s, "password":"%s"}'% (row[0], row[4]))
@staticmethod
def thread_db():
import socket
import time
timeout = 30
socket.setdefaulttimeout(timeout)
while True:
logging.info('db loop')
try:
rows = DbTransfer.get_instance().pull_db_all_user()
DbTransfer.del_server_out_of_bound_safe(rows)
except Exception as e:
import traceback
traceback.print_exc()
logging.warn('db thread except:%s' % e)
finally:
time.sleep(config.CHECKTIME)
@staticmethod
def thread_push():
import socket
import time
timeout = 30
socket.setdefaulttimeout(timeout)
while True:
logging.info('db loop2')
try:
DbTransfer.get_instance().push_db_all_user()
except Exception as e:
import traceback
traceback.print_exc()
logging.warn('db thread except:%s' % e)
finally:
time.sleep(config.SYNCTIME)
|
984,862 | c69876fc0eded570b99f93036f9bbef62452521d | from flask import Flask
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
class Users(db.Model):
__tablename__ = "users"
email = db.Column(db.String(120), primary_key = True, nullable = False)
password = db.Column(db.String(80), nullable = False)
timestamp = db.Column(db.DateTime, nullable = False)
def __init__(self, email, password, timestamp) :
self.email = email
self.password = password
self.timestamp = timestamp
def __repr__(self):
return self.email
class Books(db.Model):
__tablename__="books"
isbn=db.Column(db.String,primary_key=True)
title=db.Column(db.String,nullable=False)
author=db.Column(db.String,nullable=False)
year=db.Column(db.String,nullable=False)
def __init__(self,isbn,title,author,year):
self.isbn = isbn
self.title = title
self.author = author
self.year = year
def get_book_details(self,isbn):
b = Books.query.get(isbn)
return b
class Reviews(db.Model):
__tablename__="reviews"
email = db.Column(db.String, db.ForeignKey('users.email'))
book_isbn = db.Column(db.String, db.ForeignKey('books.isbn'))
review = db.Column(db.String)
rating = db.Column(db.String)
__table_args__ = (db.PrimaryKeyConstraint('email', 'book_isbn'),)
def __init__(self, email, book_isbn, review, rating):
self.email = email
self.book_isbn = book_isbn
self.review = review
self.rating = rating
|
984,863 | 73c0071c72b93acd9e16db707896eabcf92f6368 | import os
import csv
from pathlib import Path
# Path to collect data from the Resources folder
bank_csv = os.path.join("Resources", "budget_data.csv")
# Define variables
months = []
profit = []
profit_change = []
# Open the CSV and store the contents
with open(bank_csv) as bankfile:
csvreader = csv.reader(bankfile, delimiter=",")
# Skip the header
header = next(csvreader)
# Work through the rows
for row in csvreader:
months.append(row[0])
profit.append(int(row[1]))
# Work through the profits to find the change
for i in range(len(profit)-1):
# Append the difference
profit_change.append(profit[i+1]-profit[i])
# Find min and max
max_increase = max(profit_change)
max_decrease = min(profit_change)
# Find the months for min and max
max_month = profit_change.index(max(profit_change))+1
min_month = profit_change.index(min(profit_change))+1
# Print
print("Total Months: " + str(len(months)))
print("Total: $" + str(sum(profit)))
print("Average Change: $" + str(round(sum(profit_change)/len(profit_change),2)))
print("Greatest Increase in Profits: " + months[max_month] + " (" + str(max_increase) + ")")
print("Greatest Decrease in Profits: " + months[min_month] + " (" + str(min_month) + ")")
#Output
output_file = Path("Bank_Analysis.csv")
with open(output_file,"w") as file:
file.write("Total Months: " + str(len(months)))
file.write("Total: $" + str(sum(profit)))
file.write("Average Change: $" + str(round(sum(profit_change)/len(profit_change),2)))
file.write("Greatest Increase in Profits: " + months[max_month] + " (" + str(max_increase) + ")")
file.write("Greatest Decrease in Profits: " + months[min_month] + " (" + str(min_month) + ")")
|
984,864 | ea2bccf3ff227bb274f9c590d3a29b76dd577a79 | class Trabalhador:
def __init__(self, nome) -> None:
self.__nome = nome #atributo privado da classe
self.__ferramenta = None
@property
def nome(self):
return self.__nome
@property
def ferramenta(self):
return self.__ferramenta
@ferramenta.setter
def ferramenta(self, ferramenta):
self.__ferramenta = ferramenta
class Computador:
def __init__(self,sistema) -> None: #
self.__sistema = sistema # Init já está fazendo a função de setter
@property
def sistema(self):
return self.__sistema
def usar(self):
print(f'Este funcionário está trabalhando com o computador {self.__sistema}')
class Caneta:
def __init__(self, marca) -> None:
self.__marca = marca
@property
def marca(self):
return self.__marca
def usar(self):
print(f'Este funcionário está trabalhando com a caneta {self.__marca}')
|
984,865 | d5489d4c489424ff11b99686e9d76885b69abb34 | class Posts:
def __init__(self):
self.text = None
self.time = None
self.reshare_count = None
self.status_id = None
self.user = None
self.resharers = None
self.keyword = None
def set_text(self, text):
self.text = text
def set_time(self, time):
self.time = time
def set_reshare_count(self, reshare_count):
self.reshare_count = reshare_count
def set_status_id(self, status_id):
self.status_id = status_id
def set_user(self, user):
self.user = user
def set_resharer(self, resharers):
self.resharers = resharers
def set_keyword(self, keyword):
self.keyword = keyword
def get_text(self):
return self.text
def get_time(self):
return self.time
def get_reshare_count(self):
return self.reshare_count
def get_status_id(self):
return self.status_id
def get_resharer(self):
return self.resharers
def get_user(self):
return self.user
def get_keyword(self):
return self.keyword
|
984,866 | 38b81c894e18d45a6161e271ae633710224ad2c7 | import cv2
import numpy as np
import matplotlib.pyplot as plt
#read image and IMREA_GRAYSCALE to convert image in grayscale substitude is 0
img = cv2.imread('D:/d.jpg' ,cv2.IMREAD_COLOR)
#lineDraw
cv2.line(img , (0 ,0) , (150 , 150) , (255 , 0 , 0) ,10)
#line Rectangle
cv2.rectangle(img ,(15 ,25),(200 , 150 ), (0, 255 , 0),5)
cv2.circle(img, (447, 63), 63, (0, 0, 255), -1)
pts = np.array([[10,5],[20,30],[70,50],[50,10]] , np.int32)
cv2.polylines(img , [pts] ,True , (0 ,255 ,0),2)
cv2.imshow('image' , img)
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img ,'open tuts',(0, 140) ,font ,1,(200 ,255 , 255) ,2 ,cv2.LINE_AA)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
984,867 | 5293dde84aa260ab0054b6656e5f4cca3377611c | from PyQt5 import (QtWidgets, QtCore, QtGui)
from PyQt5.QtWidgets import qApp
import cyberchorus
import sys
WINDOW_X_DIM = 500
WINDOW_Y_DIM = 100
aboutText = ['About', 'Developed by Benjamin Swanson and Noah Brown. Thanks for using our software! <license info here>']
helpText = ['Help', '<help text here>']
# Displays text in a new window.
class TextViewWindow(QtWidgets.QMainWindow):
def __init__(self, text, parent=None):
super().__init__(parent)
self.text = text
self.textLabel = QtWidgets.QLabel(text)
self.textLabel.setWordWrap(True)
self.scrollArea = QtWidgets.QScrollArea()
self.scrollArea.setWidget(self.textLabel)
self.setCentralWidget(self.scrollArea)
self.text = QtWidgets.QLabel(text)
self.text.setMinimumSize(800, 800)
self.text.setSizePolicy(QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Expanding)
# Layout
self.layout = QtWidgets.QGridLayout()
self.layout.addWidget(self.textLabel, 0, 0)
self.layout.addWidget(self.text, 1, 0)
self.layout.setContentsMargins(15, 15, 15, 15)
self.layout.setSpacing(5)
self.setLayout(self.layout)
self.setMinimumSize(self.sizeHint())
# The main user window. Wraps WindowContents.
class MainWindow(QtWidgets.QMainWindow):
def __init__(self, windowName='Window', parent=None):
super().__init__(parent)
# Window config
self.setWindowTitle(windowName)
self.setGeometry(100, 100, WINDOW_X_DIM, WINDOW_Y_DIM)
# centralize main widget contents
self.form_widget = WindowContent()
self.setCentralWidget(self.form_widget)
# create menu bar
bar = self.menuBar()
barHelp = bar.addMenu('Help')
barAbout = bar.addMenu('About')
# Implement bar actions
actionHelp = QtWidgets.QAction('Show Usage', self)
actionAbout = QtWidgets.QAction('About CyberChorus', self)
actionHelp.triggered.connect(lambda: self.DisplayText(helpText))
actionAbout.triggered.connect(lambda: self.DisplayText(aboutText))
# Add actions to tabs
barHelp.addAction(actionHelp)
barAbout.addAction(actionAbout)
def DisplayText(self, text):
QtWidgets.QMessageBox.about(self, text[0], text[1])
# The contents of the MainWindow.
class WindowContent(QtWidgets.QWidget):
def __init__(self, parent=None):
super().__init__(parent)
# Button inits
self.buttonGetConfig = QtWidgets.QPushButton('Get Gonfiguration')
self.buttonRecord = QtWidgets.QPushButton('Record')
self.buttonUpload = QtWidgets.QPushButton('Upload Recording')
self.buttonQuit = QtWidgets.QPushButton('Quit')
# Set button callbacks
self.buttonGetConfig.clicked.connect(lambda: cyberchorus.GetConfig())
self.buttonRecord.clicked.connect(lambda: cyberchorus.RecordSinger())
self.buttonUpload.clicked.connect(lambda: cyberchorus.UploadRecording())
self.buttonQuit.clicked.connect(lambda: self.Quit())
# Label inits
self.labelEmpty = QtWidgets.QLabel('')
# Build layout
v_box = QtWidgets.QVBoxLayout()
v_box.addWidget(self.buttonGetConfig)
v_box.addWidget(self.buttonRecord)
v_box.addWidget(self.labelEmpty)
v_box.addWidget(self.buttonUpload)
self.setLayout(v_box)
def Quit(self):
qApp.quit()
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
app.setWindowIcon(QtGui.QIcon('note_icon.png'))
mainWindow = MainWindow(windowName='CyberChorus')
mainWindow.show()
sys.exit(app.exec_())
|
984,868 | 2faaa9cdeee2c5cd76865ddae9a5528c9e478699 | from config import *
def main():
# load the todo pointing list
input_filename = rawdata_dir + 'todo_list.dat'
sys.stderr.write('Loading from file {} ...\n'.format(input_filename))
input_file = open(input_filename, 'rb')
todo_list = pickle.load(input_file)
input_file.close()
# load the bin list
bins_file = rbins_dir + 'rbins.dat'
rlower, rupper = np.genfromtxt(bins_file, skip_header=1, unpack=True,
usecols=[0,1])
Nbins = len(rlower)
for p in todo_list:
# a progress indicator
if todo_list.index(p) % 10 == 0:
sys.stderr.write('On pointing #{} of {} ..\n'
.format(todo_list.index(p), len(todo_list)))
random_file = uni_dir + 'uniform_' + p.ID + '.ascii.dat'
distance, Z, R = np.genfromtxt( random_file, skip_header=1,
unpack=True, usecols=[2,5,6] )
for i in range(Nbins):
r1 = rlower[i]
r2 = rupper[i]
indices = np.where((distance>r1)&(distance<=r2))[0]
Z_bin = Z[indices]
R_bin = R[indices]
N_stars = len(Z_bin)
output_file = ( zr_dir + 'uniform_ZR_' + p.ID + '_bin_'
+ str(i) + '.dat' )
with open(output_file, 'w') as f:
f.write(str(N_stars))
f.write('\n')
for j in range(N_stars):
f.write('{}\t{}\n'.format(Z_bin[j], R_bin[j]))
if __name__ == '__main__':
main()
|
984,869 | e1111bb43b865b90ed9bdd692f1059e4e39d3d18 | import os
import shutil
from collections import namedtuple
from functools import reduce
from tabulate import tabulate
FileSystemUsage = namedtuple(
'FileSystemUsage',
['total', 'used', 'free', 'percent_used']
)
DirectoryDiskUsage = namedtuple(
'DirectoryDiskUsage',
['name', 'file_count','total_bytes' ]
)
Summary = namedtuple(
'Summary',
[
'total_file_system',
'total_file_system_used',
'total_directory_space',
'total_directory_files',
'top_space_consumers',
]
)
def summarize_usage(path):
file_system = filesystem_usage(path)
per_directory = disk_usage_per_directory(path)
total_directory_space = reduce(lambda x, y: x + y.total_bytes, per_directory, 0)
total_directory_files = reduce(lambda x, y: x + y.file_count, per_directory, 0)
top_n_consumers = top_n_disk_space_consumers(per_directory, 20)
top_n_consumers = [
DirectoryDiskUsage(x.name, x.file_count, _humanize_bytes(x.total_bytes))
for x in top_n_consumers
]
return Summary(
_humanize_bytes(file_system.total),
_humanize_bytes(file_system.used),
_humanize_bytes(total_directory_space),
total_directory_files,
top_n_consumers
)
def tabulate_usage_summary(summary):
summary_table = tabulate(
[
[
summary.total_file_system,
summary.total_file_system_used,
summary.total_directory_space,
summary.total_directory_files,
]
],
headers=['File System Total', 'File System Used', 'Directory Space Used', 'Directory File Count']
)
top_n_consumers_rows = [[x.name, x.total_bytes, x.file_count] for x in summary.top_space_consumers]
top_n_consumers_table = tabulate(top_n_consumers_rows, headers=['Directory Path', 'Directory Space Used', 'Directory File Count'])
return summary_table, top_n_consumers_table
def filesystem_usage(path):
usage = shutil.disk_usage(path)
return FileSystemUsage(
usage.total,
usage.used,
usage.free,
usage.used / usage.total
)
def disk_usage_per_directory(path):
d = []
for root, dirs, files in os.walk(path):
total_bytes = sum(os.path.getsize(os.path.join(root, name)) for name in files)
d.append(DirectoryDiskUsage(root, len(files), total_bytes ))
return d
def top_n_percent_disk_space_consumers(directory_disk_usage, top_n_percent):
n = int(len(directory_disk_usage) * top_n_percent)
return _first_n_items(directory_disk_usage, n)
def top_n_disk_space_consumers(directory_disk_usage, n):
return _first_n_items(directory_disk_usage, n)
def _first_n_items(l, n):
s = sorted(l, key=lambda x: x.total_bytes, reverse=True)
s = list(filter(lambda x: x.total_bytes > 0, s))
return s[:n]
def _humanize_bytes(b):
base = 1e3
bounds = [
('KB', 1e6),
('MB',1e9),
('GB',1e12),
('TB',1e15),
('PB',1e18),
('EB',1e21),
('ZB',1e24),
('YB',1e27),
]
if b < 1e3:
return f'{b} Bytes'
for unit, bound in bounds:
if b <= bound:
return f'{b / (bound / base):.1f} {unit}'
return f'{b} Bytes'
|
984,870 | 8d93016c2ce4c86e05c3b2881e1d5f675d468787 | import os
from .trex_reader import TREx
def load_db_general(path, **kwargs):
subs = os.listdir(path)
ret = {sp: [] for sp in ['train', 'dev', 'test']}
print('loading from', path)
for sp in ret:
files = {sub: os.path.join(path, sub, f'{sp}.jsonl') for sub in subs}
ret[sp] = TREx(files, **kwargs)
return ret
|
984,871 | 3b5b74388c2b1ca1a6ae8c48ba9e340bcb16e502 | # to run it
#python algPTR.py arg1 arg2 arg3
# db=twitter
# collection=refugees
#arg1 db, arg2 collection, arg3 nome file tweet, arg4 time or not (1,0), arg5 num iterations or num days , arg6 file con i seed, arg 7 param
def clean_collection(collection):
#print "Cleaning..."
collection.update({}, {'$unset': {"us_final":1}}, multi=True)
collection.update({}, {'$unset': {"tw_pr":1}}, multi=True)
def mostcommon(iterable, n=None):
"""Return a sorted list of the most common to least common elements and
their counts. If n is specified, return only the n most common elements.
"""
#import operator
bag = {}
bag_get = bag.get
for elem in iterable:
bag[elem] = bag_get(elem, 0) + 1
if n is None:
return sorted(bag.iteritems(), key=itemgetter(1), reverse=True)
it = enumerate(bag.iteritems())
nl = nlargest(n, ((cnt, i, elem) for (i, (elem, cnt)) in it))
return [(elem, cnt) for cnt, i, elem in nl]
def get_data(file_class):
final_dic={}
rawfile=open(file_class, 'r')
with rawfile as f:
cnt=0
for line in f:
cnt+=1
if line[-1]=='\n':
line=line[:-1]
line=line.split(' ')
exec("class"+str(cnt)+"=[]")
for i in line:
exec("class"+str(cnt)+".append(i.lower())")
final_dic["class"+str(cnt)]=eval("class"+str(cnt))
return final_dic
def polarization_track(collection, iteration, param, **final_dic):
partiti=final_dic.keys()
num_partiti=len(partiti)
clean_collection(collection)
#udic=[]
#hdic=[]
out=[]
out.append("")
print ("")
out.append("Iterative procedure: " + str(iteration)+ " iterations.")
print ("Iterative procedure: " + str(iteration)+ " iterations.")
out.append("total tweets: " + str(collection.find().count()))
print ("total tweets: " + str(collection.find().count()))
out.append("total users: " + str(len(collection.find().distinct("user"))))
print ("total users: " + str(len(collection.find().distinct("user"))))
print ""
#cic numero di iterazioni
for cic in range(int(iteration)):
out.append(datetime.now().time())
print (datetime.now().time())
out.append('STEP '+ str(cic+1))
print ('STEP '+ str(cic+1))
print ""
out.append("tweets classification")
print ("tweets classification")
#logging.info('Step: '+str(cic))
us_tw_dic={}
for tweet in collection.find():
conto_p=0
for part in partiti:
if conto_p>1:
break
for i in tweet[u'ht']:
if i in final_dic[part]:
conto_p+=1
part_name=part
break
if conto_p==1:
collection.update({"_id":tweet['_id']},{'$set': {"tw_pr": part_name}})
if us_tw_dic.has_key(int(tweet[u'user'])):
us_tw_dic[int(tweet[u'user'])].append(partiti.index(part_name))
else:
us_tw_dic[int(tweet[u'user'])]=[partiti.index(part_name)]
out.append("classified tweets:")
print ("classified tweets:")
out.append(collection.find({"tw_pr":{'$exists': True}}).count())
print (collection.find({"tw_pr":{'$exists': True}}).count())
#logging.info('tw '+str(collection.find({"tw_pr":{'$exists': True}}).count()))
out.append(datetime.now().time())
print (datetime.now().time())
print ""
#user_id=collection.find({"tw_pr":{'$exists': True}}).distinct("user")
out.append("users classification")
print ("users classification")
us_dic={}
pnt=0
for i in us_tw_dic.keys():
pnt+=1
#if pnt%100000==0:
# print pnt
list_part=us_tw_dic[i]
most_list=mostcommon(list_part)
if len(most_list)==1 or most_list[0][1]>(most_list[1][1])*2:
us_dic[i]=most_list[0][0]
#udic.append(us_dic)
out.append("classified users:")
print ("classified users:")
out.append(len(us_dic.keys()))
print (len(us_dic.keys()))
out.append(datetime.now().time())
print (datetime.now().time())
# print len(collection.find({"us_final":{'$exists': True}}).distinct("user"))
#logging.info('us '+str(len(us_dic)))
#print datetime.now().time()
#H_list=[]
#Edges=[]
hash_score={}
#param=0.002
part_ht=[]
part_ht_dict=[]
out.append("")
print ("")
for part in partiti:
out.append(part)
print (part)
#print "graph construction"
hashtag={}
num_tot_tw=0
for a in collection.find():
if us_dic.has_key(int(a[u'user'])):
if us_dic[int(a[u'user'])]==partiti.index(part):
num_tot_tw+=1
parole=[]
for i in a[u'ht']:
parole.append(i)
for p in set(parole):
if hashtag.has_key(p.lower()):
hashtag[p.lower()]+=1
else:
hashtag[p.lower()]=1
# for q in polar_h:
# if hashtag.has_key(q):
# del hashtag[q]
list_hashtag=[(k,v) for v,k in sorted([(v,k) for k,v in hashtag.items()],reverse=True) if v>1]
#hot_hashtag=[]
list_hash=[]
for i in list_hashtag:
#hot_hashtag.append(i[0])
list_hash.append(i[0])
out.append('retrieved hashtags')
print ('retrieved hashtags')
#H=nx.Graph()
#for i in mostcommon(edges):
#H.add_edge(i[0][0], i[0][1],weight=i[1]/float(mostcommon(edges)[0][1]))
#H_list.append(H)
#exec("%s_upd=list_hash" % part) in globals(), locals()
#print part
out.append(len(list_hash))
print (len(list_hash))
out.append(datetime.now().time())
print (datetime.now().time())
for h in list_hash:
if hashtag.has_key(h):
score=hashtag[h]/float(num_tot_tw)
else:
score=0
# if h in eval(part):
# score=1
# else:
# calc2=0
# for a in collection.find({"us_final2": part}):
# if h in a[u'ht']:
# if len(set(a[u'ht']).intersection(eval(part)))>0:
# calc2+=1
# val=calc2
# score=round(val/float(hashtag[h] + calc1 - val),5)
if hash_score.has_key(h):
hash_score[h][partiti.index(part)]=score
else:
hash_score[h]=[]
for np in range(len(partiti)):
hash_score[h].append(0)
hash_score[h][partiti.index(part)]=score
#exec("%s=[]" % part)
#exec("%sdic={}" % part)
final_dic[part]=[]
#part_ht.append([])
part_ht_dict.append({})
#out.append(datetime.now().time())
for k,v in hash_score.iteritems():
max_h=max(v)
cnt=v.index(max_h)
v=filter(lambda a: a != max_h, v)
if len(v)==(len(partiti)-1):
kval=1
for val_v in v:
kval=kval*(1-float(val_v))
fscore=round(max_h*(kval),5)
if fscore>param:
#print k
#print max_h
part=partiti[cnt]
#exec("%s.append(k)" % part)
#exec("%sdic[k]=fscore" % part)
final_dic[part].append(k)
part_ht_dict[partiti.index(part)][k]=fscore
#print datetime.now().time()
#for part in partiti:
# hdic.append(eval(part))
for part in partiti:
out.append('\n')
print ('\n')
out.append('TOPICS:')
print ('TOPICS:')
out.append(part)
print (part)
out.append(len(final_dic[part]))
print (len(final_dic[part]))
out.append([(k,v) for v,k in sorted([(v,k) for k,v in part_ht_dict[partiti.index(part)].items()],reverse=True)])
print ([(k,v) for v,k in sorted([(v,k) for k,v in part_ht_dict[partiti.index(part)].items()],reverse=True)])
#print collection.find().count()
for part in partiti:
print ('\n')
out.append(part)
print (part)
out.append('polarized tweets: ' + str(collection.find({"tw_pr": part}).count()))
print ('polarized tweets: ' + str(collection.find({"tw_pr": part}).count()))
n2=len([i for i in us_dic.values() if i==partiti.index(part)])
out.append('polarized users: ' + str(n2))
print ('polarized users: ' + str(n2))
out.append('\n')
print ('\n')
out.append('total polarized users: ' + str(len(us_dic)))
print ('total polarized users: ' + str(len(us_dic)))
out.append('\n')
print ('\n')
collection.update({}, {'$unset': {"tw_pr":1}}, multi=True)
return out
def time_polarization_track(collection, ndays, param, **final_dic):
partiti=final_dic.keys()
num_partiti=len(partiti)
clean_collection(collection)
#udic=[]
#hdic=[]
#cic numero di iterazioni
us_dic={}
dlist=collection.find().distinct("day")
dlist.sort()
out=[]
out.append("")
print ("")
out.append("Procedure with time iteration: every " + str(ndays)+ " days.")
print ("Procedure with time iteration: every " + str(ndays)+ " days.")
out.append("total tweets: " + str(collection.find().count()))
print ("total tweets: " + str(collection.find().count()))
out.append("total users: " + str(len(collection.find().distinct("user"))))
print ("total users: " + str(len(collection.find().distinct("user"))))
print ("")
for cic in range(int(nup.ceil(len(dlist)/ndays))):
out.append(datetime.now().time())
print (datetime.now().time())
out.append('step '+ str(cic+1))
print ('STEP '+ str(cic+1))
print ("")
out.append("tweets classification")
print ("tweets classification")
us_tw_dic={}
if ((cic+1)*ndays)>len(dlist)-1:
end_d=dlist[len(dlist)-1]+1
else:
end_d=dlist[(cic+1)*ndays]
for tweet in collection.find({"day": { "$gt": dlist[cic*ndays]-1, "$lt": end_d}}):
conto_p=0
for part in partiti:
if conto_p>1:
break
for i in tweet[u'ht']:
if i in final_dic[part]:
conto_p+=1
part_name=part
break
if conto_p==1:
collection.update({"_id":tweet['_id']},{'$set': {"tw_pr": part_name}})
if us_tw_dic.has_key(int(tweet[u'user'])):
us_tw_dic[int(tweet[u'user'])].append(partiti.index(part_name))
else:
us_tw_dic[int(tweet[u'user'])]=[partiti.index(part_name)]
out.append("classified tweets:")
print ("classified tweets:")
out.append(collection.find({"tw_pr":{'$exists': True}}).count())
print (collection.find({"tw_pr":{'$exists': True}}).count())
#logging.info('tw '+str(collection.find({"tw_pr":{'$exists': True}}).count()))
out.append(datetime.now().time())
print (datetime.now().time())
print ""
#user_id=collection.find({"tw_pr":{'$exists': True}}).distinct("user")
out.append("users classification")
print ("users classification")
pnt=0
for i in us_tw_dic.keys():
pnt+=1
#if pnt%100000==0:
# print pnt
list_part=us_tw_dic[i]
most_list=mostcommon(list_part)
if len(most_list)==1 or most_list[0][1]>(most_list[1][1])*2:
us_dic[i]=most_list[0][0]
#udic.append(us_dic)
out.append("classified users:")
print ("classified users:")
out.append(len(us_dic.keys()))
print (len(us_dic.keys()))
out.append(datetime.now().time())
print (datetime.now().time())
# print len(collection.find({"us_final":{'$exists': True}}).distinct("user"))
#logging.info('us '+str(len(us_dic)))
#print datetime.now().time()
#H_list=[]
#Edges=[]
hash_score={}
#param=0.002
part_ht=[]
part_ht_dict=[]
out.append("")
print ("")
final_dic_b = final_dic.copy()
for part in partiti:
print part
#print "graph construction"
hashtag={}
num_tot_tw=0
for a in collection.find({"day": { "$gt": dlist[cic*ndays]-1, "$lt": end_d}}):
if us_dic.has_key(int(a[u'user'])):
if us_dic[int(a[u'user'])]==partiti.index(part):
num_tot_tw+=1
parole=[]
for i in a[u'ht']:
parole.append(i)
for p in set(parole):
if hashtag.has_key(p.lower()):
hashtag[p.lower()]+=1
else:
hashtag[p.lower()]=1
# for q in polar_h:
# if hashtag.has_key(q):
# del hashtag[q]
list_hashtag=[(k,v) for v,k in sorted([(v,k) for k,v in hashtag.items()],reverse=True) if v>1]
#hot_hashtag=[]
list_hash=[]
for i in list_hashtag:
#hot_hashtag.append(i[0])
list_hash.append(i[0])
out.append('retrieved hashtags')
print ('retrieved hashtags')
out.append(len(list_hash))
print (len(list_hash))
out.append(datetime.now().time())
print (datetime.now().time())
#H=nx.Graph()
#for i in mostcommon(edges):
#H.add_edge(i[0][0], i[0][1],weight=i[1]/float(mostcommon(edges)[0][1]))
#H_list.append(H)
#exec("%s_upd=list_hash" % part)
out.append("")
print ("")
for h in list_hash:
if hashtag.has_key(h):
score=hashtag[h]/float(num_tot_tw)
else:
score=0
# if h in eval(part):
# score=1
# else:
# calc2=0
# for a in collection.find({"us_final2": part}):
# if h in a[u'ht']:
# if len(set(a[u'ht']).intersection(eval(part)))>0:
# calc2+=1
# val=calc2
# score=round(val/float(hashtag[h] + calc1 - val),5)
if hash_score.has_key(h):
hash_score[h][partiti.index(part)]=score
else:
hash_score[h]=[]
for np in range(len(partiti)):
hash_score[h].append(0)
hash_score[h][partiti.index(part)]=score
final_dic[part]=[]
part_ht_dict.append({})
#exec("%s=[]" % part)
#exec("%sdic={}" % part)
#part_ht=[]
#part_ht_dict=[]
#out.append(datetime.now().time())
for k,v in hash_score.iteritems():
max_h=max(v)
cnt=v.index(max_h)
v=filter(lambda a: a != max_h, v)
if len(v)==(len(partiti)-1):
kval=1
for val_v in v:
kval=kval*(1-float(val_v))
fscore=round(max_h*(kval),5)
if fscore>param:
#print k
#print max_h
part=partiti[cnt]
#exec("%s.append(k)" % part)
#exec("%sdic[k]=fscore" % part)
final_dic[part].append(k)
part_ht_dict[partiti.index(part)][k]=fscore
#print datetime.now().time()
#for part in partiti:
# hdic.append(eval(part))
for part in partiti:
out.append('\n')
print ('\n')
out.append('TOPICS:')
print ('TOPICS:')
out.append(part)
print (part)
out.append(len(final_dic[part]))
print (len(final_dic[part]))
out.append([(k,v) for v,k in sorted([(v,k) for k,v in part_ht_dict[partiti.index(part)].items()],reverse=True)])
print ([(k,v) for v,k in sorted([(v,k) for k,v in part_ht_dict[partiti.index(part)].items()],reverse=True)])
if len(final_dic[part])==0:
final_dic[part]=final_dic_b[part]
out.append("")
print ('')
out.append('total tweets in period:')
print ('total tweets in period:')
out.append(collection.find({"day": { "$gt": dlist[cic*ndays]-1, "$lt": end_d}}).count())
print (collection.find({"day": { "$gt": dlist[cic*ndays]-1, "$lt": end_d}}).count())
out.append('total users in period:')
print ('total users in period:')
out.append(str(len(collection.find({"day": { "$gt": dlist[cic*ndays]-1, "$lt": end_d}}).distinct("user"))))
print (str(len(collection.find({"day": { "$gt": dlist[cic*ndays]-1, "$lt": end_d}}).distinct("user"))))
out.append("")
print ('')
#print collection.find().count()
for part in partiti:
out.append(part)
print (part)
out.append('polarized tweets: ' + str(collection.find({"tw_pr": part}).count()))
print ('polarized tweets: ' + str(collection.find({"tw_pr": part}).count()))
n2=len([i for i in us_dic.values() if i==partiti.index(part)])
out.append('polarized users: ' + str(n2))
print ('polarized users: ' + str(n2))
out.append('\n')
print ('\n')
out.append('total polarized users: ' + str(len(us_dic)))
print ('total polarized users: ' + str(len(us_dic)))
out.append('\n')
print ('\n')
collection.update({}, {'$unset': {"tw_pr":1}}, multi=True)
return out
import sys
from pymongo import MongoClient
from operator import itemgetter
from dateutil import parser
import ast
import numpy as nup
from datetime import datetime
import time
if __name__ == "__main__":
client = MongoClient()
exec("db = client." + sys.argv[1])
exec("collection = db." + sys.argv[2])
print ""
print 'Number of arguments: ', len(sys.argv), 'arguments.'
print 'Argument List: ', str(sys.argv)
print ""
print 'Database: ', str(sys.argv[1])
print 'Collection: ', str(sys.argv[2])
exec("file_or = '%s'" % sys.argv[3])
exec("time_version = %s" % sys.argv[4])
exec("iteration = %s" % sys.argv[5])
exec("file_class = '%s'" % sys.argv[6])
exec("param = %s" % sys.argv[7])
param=float(param)
#collection.remove({})
with open(file_or, 'r') as f:
cnt=0
for line in f:
if line[-1]=='\n':
line=line[:-1]
line=line.replace(': null', ': None')
line=line.replace(': false', ': False')
line=line.replace(': true', ': True')
tw=ast.literal_eval(line)
line2={}
line2[u'user']=tw[u'user_id']
line2[u'day']=parser.parse(tw['created_at']).year*10000+parser.parse(tw['created_at']).month*100+parser.parse(tw['created_at']).day
line2[u'tw_id']=tw[u'id_str']
ht=[]
for i in tw[u'hashtags']:
ht.append(i[u'text'].lower())
line2[u'ht']=ht
cnt+=1
collection.insert(line2)
#
print "Tweets inserted in MongoDB: ",str(cnt)
if cnt!=collection.find().count():
print "Error in MongoDB - partial data"
final_dic=get_data(file_class)
if time_version==0:
out=polarization_track(collection, iteration, param, **final_dic)
elif time_version==1:
out=time_polarization_track(collection, iteration, param, **final_dic)
else:
print "Error in parameter: type of algorithm (time or not)"
#for i in out:
# print str(i)
|
984,872 | 5485f50d5fa0c8cd44a4d8c242a832c98dbf677d | input1 = int(input("Input N: "))
input2 = int(input("input M: "))
count = 0
matrix = []
for i in range(input1):
a =[]
for j in range(input2):
a.append(int(input(f"Input value A[{i+1}][{j+1}]")))
matrix.append(a)
for i in range(input1):
for j in range(input2):
if matrix[i][j] > 0:
count += 1
if matrix[0][1] == 0 and matrix[0][2] == 0 and matrix[1][2]:
print("Lower triangle matrix")
elif matrix[1][0] == 0 and matrix[2][0] == 0 and matrix[2][1]:
print("Upper triangle matrix")
elif matrix[0][1] == 0 and matrix[0][2] == 0 and matrix[1][2] and matrix[1][0] == 0 and matrix[2][0] == 0 and matrix[2][1]:
print("Diagonal matrix")
else:
print("Not a special matrix")
print(matrix) |
984,873 | 64b6364df3d0c0117b151644a99477001b4691b8 | from math import *
import random
import sys
import pygame
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import tensorflow
import keras
from keras import layers
from keras.models import Model
from keras import backend as K
from keras import utils as np_utils
from keras import optimizers
from keras import activations
from keras.models import load_model
# Display
dx = 920
dy = 480
rad = 20
gLev = 400
WIN = pi*dx/8+dx/2 - 10
LOSE = -pi*dx/8+dx/2 - 10
red = (255,0,0)
sky = (180,225,255)
earth = (149,69,53)
star = (255,230,20)
green = (0,120,0)
black = (0,0,0)
cvals = [((x*pi/100),cos(x*pi/100)) for x in range(-100,100,1)]
curve = [(x*dx/8 + dx/2,(y-1)*dy/4 + gLev) for (x,y) in cvals]
# IMPORTANT CONSTANTS
dt = 500
gamma = 1.0
g = 1
Episodes = 30000
num_actions = 2
class Game(object):
def __init__(self):
self.acc = 0.000001
self.gc = 0.0000015 * g
self.reset()
def reset(self):
self.pos = 0.0
self.vel = 0.0
self.gametime = 0
S = np.array([self.pos, self.vel])
return S
def update(self, A, dt):
R = -dt/1000
end = False
self.gametime += dt
if A == 0:
self.vel -= self.acc * dt
if A == 1:
self.vel += self.acc * dt # Add control force
self.vel -= self.gc*sin(self.pos) * dt # Gravity
self.vel -= self.vel * dt * 0.0001 # Friction
self.pos += self.vel * dt # Update position
if self.pos >= pi:
R = 10.0
end = True
if self.pos <= -pi:
R = -10.0
end = True
if self.gametime >= 10000:
end = True
if end:
self.reset()
S = np.array([self.pos, self.vel])
return S, R, end
class Agent(object):
def __init__(self):
self.model = load_model('mc-pg-g_1-dt_500.h5')
def getAction(self, S):
S = np.expand_dims(S, axis=0)
probs = np.squeeze(self.model.predict(S))
sample = np.random.choice(np.arange(num_actions), p=probs)
return sample
# Stats keeping track of agent performance
matplotlib.style.use('ggplot')
stats_scores = np.zeros(Episodes)
stats_lengths = np.zeros(Episodes)
#initialize display
pygame.init()
screen = pygame.display.set_mode((dx,dy))
clock = pygame.time.Clock()
# Initialize the game and the agents
g = Game()
agent = Agent()
for e in range(Episodes):
s = g.reset()
total_score = 0
S = []
A = []
G = []
for t in range(1,1000):
dt = clock.tick(2)
screen.fill(sky)
pygame.draw.rect(screen, earth, (0,gLev,dx,dy-gLev), 0)
pygame.draw.lines(screen, black, False, curve, 3)
pygame.draw.ellipse(screen, star, (WIN, -dy/2 + gLev - 40 ,20,40), 0)
pygame.draw.ellipse(screen, red, (LOSE, -dy/2 + gLev - 40 ,20,40), 0)
# QUIT GAME
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
pygame.quit()
sys.exit()
pygame.draw.circle(screen, green, (int(g.pos*dx/8 + dx/2), \
int((cos(g.pos)-1)*dy/4 + gLev - rad)), rad, 0)
pygame.display.update()
a = agent.getAction(s)
S.append(s)
A.append(a)
s, r, end = g.update(a, dt)
G.append(r)
total_score += r
#print(S)
if end:
acc = 0
for i in reversed(range(len(G))):
acc = gamma * acc + G[i]
G[i] = acc
A = np_utils.to_categorical(A, num_classes=num_actions)
print("Game:", e, "completed in:", t, ", earning:", "%.2f"%total_score, "points.")
stats_scores[e] = total_score
stats_lengths[e] = t
break
window = 100
score_ave = np.convolve(stats_scores, np.ones((window,))/window, mode="valid")
t_ave = np.arange(score_ave.size)
plt.rcParams['figure.figsize'] = [15, 5]
plt.plot(t_ave, score_ave)
plt.show() |
984,874 | 6ad98bc71f0c5b433bb584470e7a39bc5eb4b276 |
#!/usr/bin/env python
# mocks.py: Mock objects for low-level peripheral/hardware-related modules
''' To-Do:
- add iterable support channels passed to GPIO
- add GPIO pull-up/pull-down support
- add event detection support
'''
__author__ = 'Trevor Allen'
# trevor's github https://raw.githubusercontent.com/TCAllen07/raspi-device-mocks/
# Standard Library
import random
import time
import math
## RPi.GPIO pins (from http://elinux.org/RPi_Low-level_peripherals):
'''
RPi A+ & B+ GPIO: J8 40-pin header
--------------------------------
+3V3 1 2 +5V
GPIO2 SDA1 3 4 +5V
GPIO3 SCL1 5 6 GND
GPIO4 GCLK 7 8 TXD0 GPIO14
GND 9 10 RXD0 GPIO15
GPIO17 GEN0 11 12 GEN1 GPIO18
GPIO27 GEN2 13 14 GND
GPIO22 GEN3 15 16 GEN4 GPIO23
+3V3 17 18 GEN5 GPIO24
GPIO10 MOSI 19 20 GND
GPIO9 MISO 21 22 GEN6 GPIO25
GPIO11 SCLK 23 24 CE0_N GPIO8
GND 25 26 CE1_N GPIO7
EEPROM ID_SD 27 28 ID_SC EEPROM
GPIO5 29 30 GND
GPIO6 31 32 GPIO12
GPIO13 33 34 GND
GPIO19 35 36 GPIO16
GPIO26 37 38 GPIO20
GND 39 40 GPIO21
--------------------------------
'''
class MockSMBus(object):
''' Mock of smbus.SMBus() class '''
mpu_adr = 0x68
lcd_adr = 0x3f
pwr_mgt_1 = 0x6b
## HIGH byte registers for MPU-6050, value+1 for LOW registers
accel_x = 0x3b
accel_y = 0x3d
accel_z = 0x3f
gyro_x = 0x43
gyro_y = 0x45
gyro_z = 0x47
temp = 0x41
def __init__(self, bus_no):
self.bus_no = bus_no
def __repr__(self):
return "<Mock: smbus.SMBus>"
def write_byte(self, addr, cmd):
if addr not in (self.mpu_adr, self.lcd_adr):
errmsg = 'Address argument not valid: %s' % hex(addr)
raise ValueError(errmsg)
return None
def write_byte_data(self, addr, cmd, zero=0):
''' orig: sensor.Sensor.initialize() '''
# res = bus.write_byte_data(0x68, 0x6b, 0) # res = None
if addr not in (self.mpu_adr, self.lcd_adr):
errmsg = 'Address argument not valid: %s' % hex(addr)
raise ValueError(errmsg)
## This is specific to MPU
if cmd not in (self.mpu_adr, self.lcd_adr, self.pwr_mgt_1, self.accel_x, self.accel_y,
self.accel_z, self.gyro_x, self.gyro_y, self.gyro_z, self.temp):
raise ValueError('Command invalid: %s' % hex(cmd))
return None
def write_block_data(self, addr, cmd, data):
# I don't think this is actually used, just available in lcd_device.py
if addr not in (self.mpu_adr, self.lcd_adr):
errmsg = 'Address argument not valid: %s' % hex(addr)
raise ValueError(errmsg)
return None
def read_byte_data(self, addr, cmd):
''' orig: sensor.Sensor.read_word() '''
# addr is mpu-addr (0x68); cmd is HIGH register: accel-x (0x3b), accel-y (0x3d),
# accel-z (0x3f), gyro-x (0x43), gyro-y (0x45), gyro-z (0x47), temp (0x41)
results = { self.accel_x : 246, self.accel_x+1 : 88,
self.accel_y : 4, self.accel_y+1 : 92,
self.accel_z : 57, self.accel_z+1 : 104,
self.gyro_x : 6, self.gyro_x+1 : 42,
self.gyro_y : 0, self.gyro_y+1 : 184,
self.gyro_z : 255, self.gyro_z+1 : 216,
self.temp : 240, self.temp+1 : 16,
self.lcd_adr : 0
}
if addr not in (self.mpu_adr, self.lcd_adr):
errmsg = 'Address argument not valid: %s' % hex(addr)
raise ValueError(errmsg)
## This is specific to MPU
### Checking commands restricts to MPU, disallows LCD display,
# unless one were to add all the LCD attributes below to this class
if cmd not in results.keys():
raise ValueError('Argument %s not a valid command.' % hex(cmd))
try:
return results[cmd]
except KeyError:
return None
def read_byte(self, addr):
# I don't think this is actually used, just available in lcd_device.py
if addr not in (self.mpu_adr, self.lcd_adr):
errmsg = 'Address argument not valid: %s' % hex(addr)
raise ValueError(errmsg)
return None
def read_block_data(self, addr, cmd):
# I don't think this is actually used, just available in lcd_device.py
if addr not in (self.mpu_adr, self.lcd_adr):
errmsg = 'Address argument not valid: %s' % hex(addr)
raise ValueError(errmsg)
return None
class Mock_smbusModule(object):
''' Mock of smbus module, containing SMBus class '''
### Purpose here is to allow instantiation of a fake smbus 'library'
# like so: smbus = Mock_smbusModule(); bus = smbus.SMBus(1)
SMBus = MockSMBus
class MockGPIO(object):
# Map format is <BCM-#> : <BOARD-#>
bcm_board_map = { 2 : 3,
3 : 5, 4 : 7, 14 : 8, 15 : 10, 17 : 11,
18 : 12, 27 : 13, 22 : 15, 23 : 16, 24 : 18,
10 : 19, 9 : 21, 25 : 22, 11 : 23, 8 : 24,
7 : 26, 5 : 29, 6 : 31, 12 : 32, 13 : 33,
19 : 35, 16 : 36, 26 : 37, 20 : 38, 21 : 40}
RPI_REVISION = 2
BCM = 11
BOARD = 10
OUT = 0
IN = 1
LOW = 0
HIGH = 1
PUD_OFF = 20
PUD_DOWN = 21
PUD_UP = 22
HARD_PWM = 43
FALLING = 32
BOTH = 33
I2C = 42
RISING = 31
SERIAL = 40
SPI = 41
UNKNOWN = -1
VERSION = '0.5.11'
RPI_INFO = {'MANUFACTURER': 'Unknown', 'P1_REVISION': 3, 'PROCESSOR': 'Unknown',
'RAM': 'Unknown', 'REVISION': '0010', 'TYPE': 'Unknown'}
gpio_setting = { k:1 for k in bcm_board_map.keys() }
def __init__(self):
self.mode = -1
self.setmode_run = False
self.setup_run = False
pass
def __repr__(self):
return "<Mock: RPi.GPIO>"
def setmode(self, mode):
# mode should be GPIO.BCM or GPIO.BOARD
if mode not in (self.BCM, self.BOARD):
raise ValueError('An invalid mode was passed to setmode()')
self.setmode_run = True
self.mode = mode
# Returns nothing
pass
def getmode(self):
# Should return BCM, BOARD, or UNKNOWN
return self.mode
def _pin_validate(self, channel):
''' For test/mock purposes, to centralize validation checks of pin numbers & values '''
if channel not in self.bcm_board_map.keys():
raise ValueError('Channel is invalid on a Raspberry Pi: %s' % str(channel))
def cleanup(self, channels=None):
# Resets all to INPUT with no pullup/pulldown and no event detection
if channels is None:
channels = self.bcm_board_map.keys()
elif not hasattr(channels, '__iter__'):
channels = [channels,]
for pin in channels:
self.gpio_setting[pin] = 1
self.mode = -1
self.setmode_run = False
def setup(self, channels, direction, pull_up_down=None, initial=None):
if not hasattr(channels, '__iter__'):
channels = [channels, ]
for channel in channels:
self._pin_validate(channel)
if direction not in (self.IN, self.OUT):
raise ValueError('An invalid direction was passed to setup()')
if (pull_up_down is not None and
pull_up_down not in (self.PUD_OFF, self.PUD_UP, self.PUD_DOWN) ):
raise ValueError('pull_up_down not in pre-defined PUD_OFF/UP/DOWN values')
self.setup_run = True # really should do this on a per-channel basis
self.gpio_setting[channel] = direction
# Returns nothing
pass
def output(self, channel, value):
if not hasattr(channels, '__iter__'):
channels = [channels, ]
for channel in channels:
self._pin_validate(channel)
if value not in (self.LOW, self.HIGH):
raise ValueError('An invalid value was passed to output()')
if not self.setmode_run:
raise RuntimeError('Please set pin numbering mode using GPIO.setmode(GPIO.BOARD) or GPIO.setmode(GPIO.BCM)')
if not self.setup_run:
raise RuntimeError('The GPIO channel has not been set up as an OUTPUT')
# Returns nothing
pass
def input(self, channels):
if not hasattr(channels, '__iter__'):
channels = [channels, ]
for channel in channels:
self._pin_validate(channel)
if not self.setmode_run:
raise RuntimeError('Please set pin numbering mode using GPIO.setmode(GPIO.BOARD) or GPIO.setmode(GPIO.BCM)')
if not self.setup_run:
raise RuntimeError('You must setup() the GPIO channel first')
# Returns either 0 or 1.
### This may need to be customized depending on its intended use, perhaps
# by using mock to specify the desired return value in tests. For me
# leaving it to return 1 works fine.
return self.HIGH
def gpio_function(self, channel):
self._pin_validate(channel)
if not self.setmode_run:
raise RuntimeError('Please set pin numbering mode using GPIO.setmode(GPIO.BOARD) or GPIO.setmode(GPIO.BCM)')
return self.gpio_setting[channel]
## Following functions are placeholders that need filled it.
def add_event_callback(self, *args):
pass
def add_event_detect(self, *args):
pass
def setwarnings(self, *args):
pass
def wait_for_edge(self, *args):
pass
def event_detected(self, *args):
pass
def remove_event_detect(self, *args):
pass
class MockSPI(object):
''' Mock of spi module '''
### Designed for use with MFRC522 rfid module by mxgxw, available on GitHub:
# https://github.com/mxgxw/MFRC522-python
spi_reg_names = {
# Attr name : [register, (spi.xver results)]
'CRCResultRegM' : [0x21, (0, 255)],
'CRCResultRegL' : [0x22, (0, 255)],
'ErrorReg' : [0x06, (0,0)],
'ControlReg' : [0x0C, (0,16)],
'TxControlReg' : [0x14, (0,128)],
'Status2Reg' : [0x08, (0,0)],
'DivIrqReg' : [0x05, (0,0)],
'FIFOLevelReg' : [0x0A, (0,0)],
'CommIrqReg' : [0x04, (0,20)],
'BitFramingReg' : [0x0D, (0,0)]
}
# Attr : [register]
spi_write_names = {
'BitFramingReg' : 0x0D,
'CommandReg' : 0x01,
'PCD_RESETPHASE' : 0x0F,
'PCD_CALCCRC' : 0x03,
'PCD_IDLE' : 0x00,
'PCD_TRANSCEIVE' : 0x0C,
'PCD_AUTHENT' : 0x0E,
'CommIEnReg' : 0x02,
'ModeReg' : 0x11,
'TModeReg' : 0x2A,
'TxAutoReg' : 0x15,
'TPrescalerReg' : 0x2B,
'TReloadRegL' : 0x2D,
'TReloadRegH' : 0x2C
}
''' Need to figure out how to fake these functions from mfrc522.py:
def CalculateCRC(self, pIndata):
i = 0
while i<len(pIndata):
self.Write_MFRC522(self.FIFODataReg, pIndata[i])
i = i + 1
def MFRC522_ToCard(self,command,sendData):
while(i<len(sendData)):
self.Write_MFRC522(self.FIFODataReg, sendData[i])
i = i+1
self.Write_MFRC522(self.FIFODataReg, sendData[i])
'''
def __init__(self):
raise UserWarning('This mock spi class is not entirely operational yet.')
self.spi_xfer_vals = {
0x21 : (0, 255),
0x22 : (0, 255),
0x06 : (0,0),
0x0C : (0,16),
0x14 : (0,128),
0x08 : (0,0),
0x05 : (0,0),
0x0A : (0,0),
0x04 : (0,20),
0x0D : (0,0)
}
# Writable Register : Val(s)
self.spi_write_vals = {
0x0D : [0x00, 0x07], # BitFramingReg
0x01 : [0x0F, 0x03, 0x00, 0x0C, 0x0E], # CommandReg
0x02 : [0x00, 0x12, 0x77, 0x80], # CommIEnReg
0x11 : [0x3D], # ModeReg
0x2A : [0x8D], # TModeReg
0x15 : [0x40], # TxAutoReg
0x2B : [0x3E], # TPrescalerReg
0x2D : [30], # TReloadRegL
0x2C : [0], # TReloadRegH
0x80 : [0], # Fallback/Default
# These following 'masks' may need to be included as "val"s
# for all writable registers: 0x03, 0x80, 0x04, 0x08
}
def openSPI(self, device='/dev/spidev0.0', speed=1000000):
# spi.transfer(((addr<<1)&0x7E,val))
pass
def closeSPI(self):
pass
def transfer(self, stuffs):
# self.spi.transfer(((addr<<1)&0x7E,val))
addr, val = stuffs
## A very poor attempt to mimic MFRC522's read/write functions:
if ( (addr not in self.spi_xfer_vals.keys())
and (addr not in self.spi_write_vals.keys()) ):
raise KeyError('Addr invalid: %s' % str(addr))
legit_value = any( [val in vals for vals in self.spi_write_vals.values()] )
if not legit_value:
raise ValueError('Value invalid: %s' % hex(val))
try:
res = self.spi_xfer_vals[addr]
except KeyError:
res = self.spi_write_vals[addr]
return res
|
984,875 | 0e7ce8bb7a17812fa1569599cff5c2867d45915d |
# given a list of strings, return a list of the strings, omitting any string length 4 or more
def no_long(strings):
new_list = []
for x in strings:
if len(x) < 4:
new_list.append(x)
return new_list
print(no_long(['this', 'not', 'too', 'long']))
print(no_long(['a', 'bbb', 'cccc']))
print(no_long(['cccc', 'cccc', 'cccc']))
|
984,876 | 981f2a932ae20fde05bffa24aeafd7773855c7d0 | # -*- coding: utf-8 -*-
from __future__ import division
def media(lista):
soma = 0
for i in range(0,len(lista),1):
soma = soma + lista[i]
resultado = soma/len(lista)
return resultado
n=input('digite a quantidade de termos das listas:')
a[]
b[]
for i in range(0,n,1):
a.append(input('digite o valor:'))
for i in range(0,n,1):
b.append(input('digite os elementos de b:'))
media_a=media(a)
media_b=media(b)
print media_a
print media_b |
984,877 | fe75a3bfea7869ddb588de6a4ee83b5563f0015e | # Generated by Django 2.0.2 on 2018-06-28 00:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('matches', '0024_auto_20180627_2353'),
]
operations = [
migrations.AddField(
model_name='matches',
name='score_after_penalties_guest',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='matches',
name='score_after_penalties_home',
field=models.IntegerField(default=0),
),
]
|
984,878 | 7370831ef303ebb17f38cfb0b4858b6406056c2b | """The SalesChannel class."""
class SalesChannel:
"""Container for Cloud Commerce Sales Channels."""
def __init__(self, data):
"""Set attributes from API data."""
self.json = data
self.id = data.get("ID", None)
self.name = data.get("Name", None)
self.domain = data.get("Domain", None)
self.brand_id = data.get("BrandID", None)
self.account_id = data.get("AccountID", None)
self.brand_name = data.get("BrandName", None)
self.country_id = data.get("CountryID", None)
self.country_name = data.get("CountryName", None)
self.account_name = data.get("AccountName", None)
self.pre_order = data.get("PreOrder", None)
self.type_id = data.get("TypeID", None)
self.type_name = data.get("TypeName", None)
self.nominal_code_id = data.get("NominalCodeID", None)
self.external_shop_id = data.get("ExtShopID", None)
self.pseudo_stock_level_type = data.get("PseudoStockLevelType", None)
self.currency_symbol = data.get("CurrencySymbol", None)
self.loyalty_point_per_value = data.get("LoyaltyPointPerValue", None)
self.loyalty_value_per_point = data.get("LoyaltyValuePerPoint", None)
self.disabled = data.get("disabled", None)
self.deleted = data.get("deleted", None)
self.note = data.get("Note", None)
def __repr__(self):
return "Sales Channel: {}".format(self.name)
|
984,879 | 624747133b122b41c43cc7109c2913137e96cd78 | # coding:utf-8
import ConfigParser
import cookielib
import urllib
import urllib2
import os
from scrapy.selector import Selector
from PIL import Image
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
class WeiboCookie:
def __init__(self):
# 定义属于自己的opener,获取记录在登陆过程中返还的cookie
self.cookie = cookielib.CookieJar()
self.opener = urllib2.build_opener(
urllib2.HTTPCookieProcessor(self.cookie))
# 注意这里只能使用旧版本的浏览器,新版本的会一只出现验证码错误,别问我怎么知道的。。〒_〒
self.headers = {
'User-Agent': "Mozilla/5.0 (Windows NT 6.3; WOW64; rv:52.0) Gecko/20100101 Firefox/52.0"}
# 存储本地微博账号密码
self.weibos = []
# 存储cookie
self.cookies = []
# 将自定义的opener与urllib2绑定到一起
urllib2.install_opener(self.opener)
# 获取本地微博账号密码
cf = ConfigParser.ConfigParser()
cf.readfp(open(os.getcwd() + '/WeiboAccount.ini'))
accountNum = int(cf.get('AccountNum', 'num'))
for i in range(accountNum):
username = cf.get('Account' + str(i), 'username')
password = cf.get('Account' + str(i), 'password')
self.weibos.append({'no': username, 'psw': password})
def processCookie(self):
dict = {}
for c in self.cookie:
dict[c.name] = c.value
self.cookies.append(dict)
def getCookie(self):
for elem in self.weibos:
try:
postDataUrl = 'https://weibo.cn/login/'
request = urllib2.Request(
url=postDataUrl, headers=self.headers)
response = urllib2.urlopen(request)
selector = Selector(text=response.read())
postData = {}
rand = selector.xpath(
'body/div/form/@action').extract()[0].split('&')[0]
postDataUrl = postDataUrl + rand + \
'&backURL=http%3A%2F%2Fweibo.cn&backTitle=%E6%89%8B%E6%9C%BA%E6%96%B0%E6%B5%AA%E7%BD%91&vt=4'
postData['remember'] = 'on'
postData['submit'] = u'\u767b\u5f55' # 登录
postData['backURL'] = selector.xpath(
'body/div/form/div/input[@name="backURL"]/@value').extract()[0]
postData['vk'] = selector.xpath(
'body/div/form/div/input[@name="vk"]/@value').extract()[0]
postData['backTitle'] = selector.xpath(
'body/div/form/div/input[@name="backTitle"]/@value').extract()[0]
postData['tryCount'] = selector.xpath(
'body/div/form/div/input[@name="tryCount"]/@value').extract()[0]
postData['capId'] = selector.xpath(
'body/div/form/div/input[@name="capId"]/@value').extract()[0]
postData['mobile'] = elem['no']
passwordParameter = 'password_' + postData['vk'].split('_')[0]
postData[passwordParameter] = elem['psw']
# 获取验证码
captchaUrl = selector.xpath(
'body/div/form/div/img/@src').extract()[0]
postData['code'] = self.getCaptcha(captchaUrl)
postData = urllib.urlencode(postData).encode('utf-8')
req = urllib2.Request(url=postDataUrl,
data=postData,
headers=self.headers)
# 如果一切正常的话,这个post会返回空,url会立即失效
# 如果有错误的话,这个post会返回错误信息
try:
response = urllib2.urlopen(req)
if (response.getcode() == 200):
print elem['no'], '\'s cookie receive failed'
print 'please check your input captcha'
except urllib2.URLError as e:
if hasattr(e, 'code') and e.code == 404:
print elem['no'], '\'s cookie has received'
self.processCookie()
else:
print elem['no'], '\'s cookie receive failed'
print 'Reason:', e.reason
finally:
if response:
response.close()
except Exception as e:
print e
def getCaptcha(self, captchaUrl):
request = urllib2.Request(url=captchaUrl, headers=self.headers)
response = urllib2.urlopen(request)
content = response.read()
file_path = "captcha.jpg"
local_pic = open(file_path, "wb")
local_pic.write(content)
local_pic.close()
img = Image.open(file_path)
img.show()
captcha = raw_input('请输入验证码:\n')
return captcha
def updateCookie(self):
self.getCookie()
print 'totally get %d cookies' % len(self.cookies)
cf = ConfigParser.ConfigParser()
cf.add_section("cookie")
cf.set("cookie", "num", len(self.cookies))
for i in range(len(self.cookies)):
cf.set("cookie", "cookie" + str(i), self.cookies[i])
cf.write(open(os.getcwd() + '/CookieConfig.ini', 'r+'))
@staticmethod
def supplyCookie():
cf = ConfigParser.ConfigParser()
cf.readfp(open('CookieConfig/CookieConfig.ini'))
cookies = []
num = int(cf.get('cookie', 'num'))
for i in range(num):
# eval的作用是把字符串转成字典
cookies.append(eval(cf.get('cookie', 'cookie' + str(i))))
return cookies
if __name__ == '__main__':
wc = WeiboCookie()
wc.updateCookie()
# print WeiboCookie.supplyCookie()
|
984,880 | e35d4579c2b2f5d8b3d9f2b1a02005253ca38937 | NomeCompleto = str(input('Digite seu nome completo: ')).strip()
print('Seu nome em letras MIÚSCULAS fica: {}'.format(NomeCompleto.upper()))
print('Seu nome em letras minúsculas fica: {}'.format(NomeCompleto.lower()))
print('Seu nome possui {} letras'.format(len(NomeCompleto)-NomeCompleto.count(' ')))
PrimeiroNome = NomeCompleto.split()
print('O seu primeiro nome é {} e ele possui {} letras'.format(PrimeiroNome[0], NomeCompleto.find(' ')))
|
984,881 | 99033f65693588eadea1f514ab81dd4179cb78c7 | # if asked to return list of TreeNodes
class Solution(object):
def generateTrees(self, n):
"""
:type n: int
:rtype: List[TreeNode]
"""
return self.helper(1, n)
def helper(self, min_, max_):
result = []
if min_ > max_:
return result
for num in range(min_, max_+1):
leftlist = self.helper(min_, num-1)
rightlist = self.helper(num+1, max_)
if len(leftlist) == 0 and len(rightlist) == 0:
root = TreeNode(num)
result.append(root)
elif len(leftlist) == 0:
for right in rightlist:
root = TreeNode(num)
root.right = right
result.append(root)
elif len(rightlist) == 0:
for left in leftlist:
root = TreeNode(num)
root.left = left
result.append(root)
else:
for left in leftlist:
for right in rightlist:
root = TreeNode(num)
root.left = left
root.right = right
result.append(root)
return result
#
if the return needs to be a list of list of number
class Solution(object):
def generateTrees(self, n):
"""
:type n: int
:rtype: List[TreeNode]
"""
node_result = self.helper(1, n)
result = []
for root in node_result:
result.append(self.node_to_list(root))
return result
def node_to_list(self, root):
from collections import deque
res = []
q = deque([root])
while q:
for _ in range(len(q)):
node = q.popleft()
if node:
res.append(node.val)
q.append(node.left)
q.append(node.right)
else:
res.append(None)
while res and not res[-1]:
res.pop()
return res
def helper(self, min_, max_):
result = []
if min_ > max_:
return result
for num in range(min_, max_+1):
leftlist = self.helper(min_, num-1)
rightlist = self.helper(num+1, max_)
if len(leftlist) == 0 and len(rightlist) == 0:
root = TreeNode(num)
result.append(root)
elif len(leftlist) == 0:
for right in rightlist:
root = TreeNode(num)
root.right = right
result.append(root)
elif len(rightlist) == 0:
for left in leftlist:
root = TreeNode(num)
root.left = left
result.append(root)
else:
for left in leftlist:
for right in rightlist:
root = TreeNode(num)
root.left = left
root.right = right
result.append(root)
return result
|
984,882 | d2ce6dda9027dfd18c3609b65588d64c474f85e0 | import csv
currencyList = {}
dictRow = {}
with open('data/iso4217.csv') as csvfile:
readData = csv.reader(csvfile, delimiter=',')
next(readData)
for row in readData:
#print("'",row[0].lower(),"': {'symbol': '",row[0],"', 'numcode': ",row[1],", 'decimals': ",row[2],"'name': '",row[3],"'}",sep='',flush=True)
key = row[0].lower()
dictRow['symbol'] = row[0]
dictRow['numcode'] = int(row[1])
dictRow['decimals'] = int(row[2])
dictRow['name'] = row[3]
currencyList[key] = dictRow.copy()
|
984,883 | 9c1d542ccdf8ff3d5adb1ecdb637865da1fbbe95 | import unittest
from two_sum import two_sums, two_sums_brute_force
class TestTwoSums(unittest.TestCase):
@classmethod
def setUpClass(self):
self.nums = [2, 7, 11, 15]
self.target = 9
self.wrong_target = 8
def test_two_sums(self):
test = two_sums(nums=self.nums, target=self.target)
self.assertNotEqual(test, 'No two sum solution')
self.assertEqual(test, '[0, 1]')
def test_two_sums_not_found(self):
test = two_sums(nums=self.nums, target=self.wrong_target)
self.assertNotEqual(test, '[0, 1]')
self.assertEqual(test, 'No two sum solution')
def test_two_sums_brute_force(self):
self.assertEqual(two_sums_brute_force(
nums=self.nums, target=self.target), '[0, 1]')
if __name__ == '__main__':
unittest.main()
|
984,884 | c02bef95068e56b16ca3921a939554b6ca75b428 | from django.contrib import admin
from products.models import Product
# Register your models here.
class ProductAdmin(admin.ModelAdmin):
'''
To display slug values in admin (related to products)
'''
list_display = ['__str__','slug']
class Meta:
model = Product
admin.site.register(Product,ProductAdmin) |
984,885 | d6fe89d1c6bdd3fe5beedc9382c8611692a9ab53 | import discord
import asyncio
import youtube_dl
from os import system
from itertools import cycle
from discord.utils import get
from discord.ext import commands
from discord import FFmpegPCMAudio
# /********************INCLUDE TOKEN BEFORE TEST********************************/
TOKEN = ''
client = commands.Bot(command_prefix = '.')
client.remove_command('help')
states = ["bored face ... \U0001f61e", "with the API... \U0001f603", " with user requests... \U0001f916"]
player = {}
queues = {}
async def change_status():
await client.wait_until_ready()
msgs = cycle(states)
while not client.is_closed():
current_status = next(msgs)
game = discord.Game(name=current_status)
await client.change_presence(activity=game)
await asyncio.sleep(5)
@client.event
async def on_ready():
game = discord.Game(name="with the API... \U0001f603")
await client.change_presence(activity=game)
print('Bot {} is ready.'.format(client.user)) # Sets prefix for commands (!Command)
@client.event
async def on_member_join(ctx):
role = discord.utils.get(ctx.guild.roles, name = "Example Role")
await ctx.add_roles(role)
@client.command(pass_context=True)
async def help(ctx):
author = ctx.message.author
embed = discord.Embed(
colour = discord.Colour.orange()
)
embed.set_author(name='Help')
embed.add_field(name='.ping', value='Returns Pong!', inline=False)
embed.add_field(name='.echo', value='Echoes what you say after the command')
await author.send(embed=embed)
@client.command(pass_context=True)
async def join(ctx):
try:
if ctx.message.author.voice:
channel = ctx.message.author.voice.channel
await channel.connect()
else:
await ctx.send("You're not in a voice channel in order for me to join! \U0001f611")
except Exception:
await ctx.send("Already connected to a voice \U0001f61c")
@client.command(pass_context=True)
async def leave(ctx):
voice_client = ctx.voice_client
if voice_client:
await voice_client.disconnect()
await ctx.message.add_reaction("☑")
else:
await ctx.send("I'm not in a voice channel! \U0001f611")
@client.command()
async def ping(ctx):
await ctx.send('Pong!')
@client.command()
async def echo(ctx, *args):
await ctx.send(' '.join(args))
@client.command(pass_context=True)
async def clearme(ctx, amount=100):
channel = ctx.message.channel
messages = []
counter = 0
async for message in channel.history(limit=amount):
if str(message.author) == "roughhead1#6919":
counter += 1
messages.append(message)
await channel.delete_messages(messages)
def displayembed(tit, description, colour, author, image_url, thumbnail_url, footer):
embed = discord.Embed(
title=tit,
description=description,
colour=colour
)
embed.set_footer(text=footer)
embed.set_image(url=image_url)
embed.set_thumbnail(url=thumbnail_url)
embed.set_author(name=author)
embed.add_field(name='Field Name', value='Field Value', inline=False)
embed.add_field(name='Field Name', value='Field Value', inline=True)
embed.add_field(name='Field Name', value='Field Value', inline=True)
return embed
@client.command()
async def logout(ctx):
await client.logout()
@client.command()
async def clearbot(ctx, amount=100):
channel = ctx.message.channel
messages = []
counter = 0
async for message in channel.history(limit=amount):
if str(message.author) == "T.DJ#4852":
counter += 1
messages.append(message)
await channel.delete_messages(messages)
@client.command(pass_context=True)
async def play(ctx):
# try:
vc = ctx.voice_client
print("Playing audio")
# vc.play(discord.FFmpegPCMAudio('Don Toliver - Thank God.mp3'), after=lambda e: print('done', e))
vc.play(discord.FFmpegPCMAudio('Don Toliver - Thank God.mp3'), after=lambda : check_queue(ctx.guild.id))
vc.volume = 100
vc.is_playing()
# except Exception:
# await ctx.send("Not connected to a voice channel \U0001f61e")
@client.command(pass_context=True)
async def pause(ctx):
try:
voice = ctx.voice_client
voice.pause()
except Exception:
await ctx.send("Not connected to a voice channel \U0001f61e")
@client.command(pass_context=True)
async def stop(ctx):
try:
voice = ctx.voice_client
voice.stop()
except Exception:
await ctx.send("Not connected to a voice channel \U0001f61e")
@client.command(pass_context=True)
async def resume(ctx):
try:
voice = ctx.voice_client
voice.resume()
except Exception:
await ctx.send("Not connected to a voice channel \U0001f61e")
def check_queue(id):
vc = ctx.voice_client
if queues[id] != []:
song = queues[id].pop(0)
vc.play(discord.FFmpegPCMAudio(str(song)), after=lambda e: print('done', e))
vc.is_playing()
@client.command(pass_context=True)
async def queue(ctx, *args):
vc = ctx.voice_client
guild_id = ctx.guild.id
songs = [song for i in args]
if guild_id not in queues:
queues[guild_id] = []
for song in songs:
queues[guild_id].append(song)
await ctx.send("Song(s) queued")
for song in songs:
vc.play(discord.FFmpegPCMAudio(str(song)), after=lambda : check_queue(ctx.guild.id))
client.loop.create_task(change_status())
client.run(TOKEN)
|
984,886 | 5f6e2b0f2b85ff07b6e8b57377d142e82846d5d1 | # App Sizes
APP_WIDTH = 800
APP_HEIGHT = 800
CELL_SIZE = 20
# Color Definitions
COLOR_BLACK = (0,0,0)
COLOR_WHITE = (255,255,255)
COLOR_GREY = (100,100,100)
COLOR_PURPLE = (255, 0, 255, 150)
COLOR_GREEN = (0, 255, 0, 100)
# Surface Colors
COLOR_DEFAULT_BG = COLOR_GREY
# App Render Speed
RENDER_SPEED = 60
|
984,887 | 41e0368001008d893db2db5ed65283ed3f45b9b0 | from math import pi
class Algorithm:
def __init__(self, thresholdSensor1, thresholdSensor2, maxAngle, armLength, speed):
#Stelt de thresholdwaardes in:
self.thresholdSensor1 = thresholdSensor1
self.thresholdSensor2 = thresholdSensor2
#Stelt de fysieke parameters van de arm in:
self.maxAngle = maxAngle #(in 1 beweging)
self.armLength = armLength
self.speed = speed
#Stelt de huidige staat van de arm in:
self.firstRun = True
self.freeMode = False
self.previousSensorData = [thresholdSensor1,thresholdSensor2]
#Berekend de tijd dat de motoren aan moeten:
def getTimeFromAngle(self, degree, speed, armLength):
#Als de arm niet bewogen hoeft te worden kan de berekening overgeslagen worden:
if degree == 0:
return 0
#De afstand word bepaald aan de hand van een sector van de omtrek van de draaicirkel van de arm
#(de sector is de hoek die de arm moet bewegen)
distance = (degree / 360) * pi * self.armLength * 2
return round(distance / self.speed, 2)
#Algoritme voor Assisted Mode:
def assistedAlgorithm(self, sensor1, sensor2, limitSensor1, limitSensor2):
#Schaalvariabele voor het berekenen van de hoek:
analogToAngle = (1024 - ((self.thresholdSensor1 + self.thresholdSensor2) / 2)) / self.maxAngle
#Arm corrigeert als er over de limieten heen bewogen wordt:
if limitSensor1 or limitSensor2:
direction = max(limitSensor2 - limitSensor1, 0)
angle = 20
#Als de sensorwaarde boven de threshold ligt word de benodigde hoek berekent:
elif sensor1 > self.thresholdSensor1 or sensor2 > self.thresholdSensor2:
#Thresholdwaarde wordt niet meegenomen in de berekening
sensor1 = sensor1 - self.thresholdSensor1 if sensor1 > self.thresholdSensor1 else 0
sensor2 = sensor2 - self.thresholdSensor2 if sensor2 > self.thresholdSensor2 else 0
#Draairichting van de arm:
direction = 1 if sensor1 > sensor2 else 0
#Berekening van de benodigde hoek:
angle = (sensor1 - sensor2 if direction == 1 else sensor2 - sensor1) / analogToAngle
#Onder de threshold kan de huidige positie behouden worden:
else:
angle = 0
time = 0
direction = 0
#Tijd dat de motoren aan moeten:
time = self.getTimeFromAngle(angle, self.speed, self.armLength)
#Plaatst het resultaat in een string en returned deze:
instructions = str(time) + '-' + str(direction) + '-0'
return instructions
#Algoritme voor Free Mode:
def freeModeAlgorithm(self, sensor1, sensor2, limitSensor1, limitSensor2):
#Als er niet bewogen moet worden staat de hoek van de arm op 0:
angle = 0
#Arm corrigeert als er over de limieten heen bewogen wordt:
if limitSensor1 or limitSensor2:
angle = 20
direction = max(limitSensor2 - limitSensor1, 0)
#Als de sensorwaarde boven de threshold ligt word de benodigde hoek berekent:
elif sensor1 > self.thresholdSensor1 or sensor2 > self.thresholdSensor2:
#In Free Mode beweegt de arm met een vastgestelde hoek:
angle = 10
#Draairichting van de arm:
direction = 1 if sensor1 - sensor2 > 0 else 0
#Tijd dat de motoren aan moeten:
time = self.getTimeFromAngle(angle, 0.3, 0.27)
#Na de corrigerende beweging gaat de arm uit Free Mode:
self.freeMode = False
#Plaatst het resultaat in een string en returned deze:
instruction = str(time) + '-' + str(direction) + '-0'
return instruction
#Corrigeert de eventuele uitschieters in de sensordata:
def checkForOutliers(self, sensor1, sensor2):
try:
#De eerste sensordata is vaak incorrect, deze worden dus genegeerd:
if self.firstRun:
self.firstRun = False
#Bij de opvolgende waarden worden de uitschieters wel weg gefilterd:
else:
#Bij een verdubbeling van de sensorwaarde word deze vervangen door de vorige waarden:
#(Hiermee worden uitschieters weggewerkt maar zal er geen overbodige kracht gezet worden gezet door de gebruiker als dit algoritme een fout maakt)
if sensor1 > self.previousSensorData[0] * 2 or sensor2 > self.previousSensorData[1] * 2:
sensor1 = self.previousSensorData[0]
sensor2 = self.previousSensorData[1]
#De sensordata word alleen onthouden als deze boven de threshold ligt:
if sensor1 > self.thresholdSensor1:
self.previousSensorData[0] = sensor1
if sensor2 > self.thresholdSensor2:
self.previousSensorData[1] = sensor2
except Exception as e:
print(e)
sensor1 = 0
sensor2 = 0
return sensor1, sensor2 |
984,888 | a62f59dd1468332dbeabf3194e528b604961d38a | #!/usr/bin/python
# -*- coding: utf-8 -*-
# General
import os, sys, math
# Happy
try :
from happy.utils import *
except :
from utils import *
try :
from happy.plot import *
except :
from plot import *
# Stats
from scipy.signal import savgol_filter # SmoothingAUC
from scipy.signal import find_peaks # Finding peaks
from scipy.signal import peak_widths
def auto_find_limits(peaks, thresholds) :
"""Given 2 peaks positions and thresholds, return a dict like
{"low": int, "dip": int, "high": int} with limits to use"""
limits = {}
diploid_peak = min(peaks)
haploid_peak = max(peaks)
for t, lim in thresholds.items() :
if diploid_peak in t and haploid_peak in t :
limits["dip"] = lim
elif diploid_peak in t and haploid_peak not in t :
limits["low"] = lim
elif haploid_peak in t and diploid_peak not in t :
limits["high"] = lim
else :
continue
if any(x not in limits for x in ["low", "dip", "high"]) :
raise Exception("Could not automatically find limits...")
return limits
def get_threshold_between_peaks(smoothed, peaks, valleys) :
"""For each pairs of consecutive peaks, find the most "middle" valley to set the threshold"""
# For all pairs of consecutive : find the valley in between
valleys = valleys
tresholds = {}
first_peak_width = peak_widths(smoothed, [peaks[0]])[0][0] # Get peak widths
first_peak_boundary = int(peaks[0] - first_peak_width)
first_peak_boundary = 0 if first_peak_boundary < 0 else first_peak_boundary
last_peak_width = peak_widths(smoothed, [peaks[-1]])[0][0] # Get peak widths
last_peak_boundary = int(peaks[-1] + last_peak_width)
last_peak_boundary = len(smoothed) if last_peak_boundary > len(smoothed) else last_peak_boundary
tresholds[(0,peaks[0])] = first_peak_boundary
for p1, p2 in zip(peaks, peaks[1:]) :
valid_thresholds = []
for val in valleys :
if p1 < val < p2 :
valid_thresholds.append(val)
else :
continue
if len(valid_thresholds) > 1 :
most_middle_threshold, diff_size = None, None
for v in valid_thresholds :
x = v - p1
y = p2 - v
diff = abs(x - y)
if diff_size is None or diff < diff_size :
most_middle_threshold = v
diff_size = diff
else :
continue
tresholds[(p1,p2)] = most_middle_threshold
else :
tresholds[(p1,p2)] = valid_thresholds[0]
# last peak
tresholds[(peaks[-1], "inf")] = last_peak_boundary
return tresholds
def determine_peaks_and_limits(
data, smoothed, limits,
peak_prom, peak_height,
valley_prom, valley_height,
debug, smooth_window_size, outfile,
skip_smooth,
):
"""Use a smoothed frequency histogram to find peaks and valleys
Then use the determined peaks and valleys as limits for AUC computation
"""
mm = max(smoothed)
peaks, props = find_peaks(smoothed, height=peak_height, prominence=peak_prom) # maxima (peaks positions)
rpeaks, rprops = find_peaks([-i+mm for i in smoothed], height=valley_height, prominence=valley_prom) # minima (peaks limits)
if len(peaks) > 3 :
print("WARNING: More than 3 peaks detected.\nPossible erroneous detection:\n\t-Restart setting the -ll parameter.\n\t-check histogram and modify peak height and prominence arguments accordingly.\n\t-Contaminant peaks may also break detection, remove them with tools such as blobtools or by hard-filtering low coverage contigs.")
print("NOTE: Assuming the last 2 peaks are diploid and haploid...")
if debug :
debug_plot_peak_errors(data, smoothed, peaks, limits.values(), rpeaks, smooth_window_size, outfile, skip_smooth)
if len(peaks) > 0 :
print("Peaks found: " + "x, ".join(str(p) for p in peaks) + "x")
else :
raise Exception("No peaks found! Try changing the input parameters or setting thresholds manually!")
if len(rpeaks) > 0 :
print("Valleys found: " + "x, ".join(str(p) for p in rpeaks) + "x")
else :
print("No valleys found!")
valleys = [0] + list(rpeaks) + [len(smoothed)]
thresholds = get_threshold_between_peaks(smoothed, peaks, valleys)
relevant_peaks = peaks[-3:]
#valleys = rpeaks[-3:]
print("Relevant peaks: " + "x, ".join(str(p) for p in relevant_peaks) + "x")
print("Thresholds:\n\t- " + "\t- ".join("{}: {}x\n".format(k,p) for k,p in thresholds.items()))
return relevant_peaks, thresholds
def estimate_haploidy(
infile, size, outfile, # required arguments
limit_low, limit_dip_hap, limit_high, # manual thresholds (optional)
peak_prom, peak_height, # peak detection
valley_prom, valley_height, # valley detection
window, skip_smooth=False,
plot=False, debug=False,
):
"""Finds peaks and modality, then computes scores of haploidy"""
# Get histogram file and size estimation
HIST = check_files(infile)
SIZE = size_from_string(size)
print("# Hap.py estimate")
print("Coverage histogram:\t{0}\nOutput file:\t{1}\n".format(HIST, outfile))
print(
"===============================================================================\n"
)
# Read coverage histogram
log("Reading histogram...")
freqs = [int(line.split()[1]) for line in open(HIST, "r")][1:-1]
# Apply Savitzky-Golay filter to smooth coverage histogram
log("Analysing curve...")
if skip_smooth :
smoothed = freqs
else :
if len(freqs) <= window :
print("WARNING: SKIPPED smoothing because max coverage < window size setting.\nTo avoid this warning set a lower '-w' or use '--no-smooth'.")
smoothed = freqs
else :
smoothed = [s for s in savgol_filter(freqs, window, 3) if s >= 0]
limits = {
"low": limit_low,
"dip": limit_dip_hap,
"high": limit_high,
}
auto_detect = False
if all(x is None for x in limits.values()) :
log("Detecting peaks and thresholds automatically...")
peaks, thresholds = determine_peaks_and_limits(
freqs, smoothed, limits,
peak_prom, peak_height, # peak detection
valley_prom, valley_height, # valley detection
debug, window, outfile, skip_smooth
)
auto_detect = True
elif any(x is None for x in limits.values()) :
raise Exception("You must set all values for thresholds -ll, -ld and -lh...")
else :
log("Using input thresholds to compute AUC...")
#peaks, props = find_peaks(smoothed, height=peak_height, prominence=peak_prom) # maxima (peaks positions)
peaks, props = find_peaks(smoothed[limits["low"] : limits["high"]], height=peak_height, prominence=peak_prom) # maxima (peaks positions)
peaks = [p+limits["low"] for p in peaks]
auto_detect = False
heights = [smoothed[i] for i in peaks]
if debug:
debug_smooth_histogram(smoothed, peaks, heights, outfile, skip_smooth=skip_smooth)
params, cov = None, None
peak_ratios = None
if not auto_detect :
if len(peaks) > 2 :
print("WARNING: detected more than 2 peaks in given limits: {}x to {}x".format(limits["low"], limits["high"]))
print("WARNING: haplotigs peak ratio not computed...")
haplotigs_peak_ratio = None
elif len(peaks) < 2 :
print("WARNING: detected less than 2 peaks in given limits: {}x to {}x".format(limits["low"], limits["high"]))
print("WARNING: haplotigs peak ratio not computed...")
haplotigs_peak_ratio = None
else :
# haplotigs_peak_ratio = diploid peak / haploid peak : if > 1 then more diploid if < 1 then more haploid
haplotigs_peak_ratio = round(freqs[min(peaks)]/freqs[max(peaks)], 3)
AUC_low = sum(freqs[: limits["low"]])
AUC_diplo = sum(freqs[limits["low"] : limits["dip"]])
AUC_haplo = sum(freqs[limits["dip"] : limits["high"]])
AUC_high = sum(freqs[limits["high"]:])
else :
if len(peaks) == 0 : # Cannot compute
raise Exception("No peak found! Try with --debug to check distribution.")
elif len(peaks) == 1 : # If only one peak
#log("Found 1 peak at: {}x".format(peaks[0]))
log("WARNING: Only 1 peak found, either set thresholds manually with -ll, -ld and -lh or adapt peak detection parameters...")
log("Finished!")
sys.exit(0)
elif len(peaks) == 2: # If only 2 peaks
#log("Assuming peaks are not low...: {0}x and {1}x".format(peaks[0], peaks[1]))
limits = auto_find_limits(peaks, thresholds)
AUC_low = sum(freqs[: limits["low"]])
AUC_diplo = sum(freqs[limits["low"] : limits["dip"]])
AUC_haplo = sum(freqs[limits["dip"] : limits["high"]])
AUC_high = sum(freqs[limits["high"]:])
haplotigs_peak_ratio = round(freqs[min(peaks)]/freqs[max(peaks)], 3)
elif len(peaks) == 3: # In case 3 peaks : 1 in each category
#log("Assuming last 2 peaks are relevant: {1}x and {2}x...".format(peaks[1], peaks[2]))
valid_peaks = peaks[-2:]
limits = auto_find_limits(valid_peaks, thresholds)
AUC_low = sum(freqs[: limits["low"]])
AUC_diplo = sum(freqs[limits["low"] : limits["dip"]])
AUC_haplo = sum(freqs[limits["dip"] : limits["high"]])
AUC_high = sum(freqs[limits["high"]:])
haplotigs_peak_ratio = round(freqs[min(valid_peaks)]/freqs[max(valid_peaks)], 3)
log("Scoring assembly...")
AUC_ratio = 1 - (AUC_diplo / AUC_haplo)
Haploidy = AUC_haplo / ( (AUC_diplo)/2 + AUC_haplo )
print("AUC(Haploid): H = {}".format(AUC_haplo))
print("AUC(Diploid): D = {}".format(AUC_diplo))
print("Ratio: 1-(D/H) = {}".format(round(AUC_ratio, 3)))
print("Haploidy: H/(H + (D/2)) = {}".format(round(Haploidy, 3)))
TSS = 1 - abs(SIZE - (AUC_haplo + AUC_diplo / 2)) / SIZE
write_stats(outfile, AUC_haplo, AUC_diplo, AUC_ratio, Haploidy)
if plot:
log("Outputting plots...")
plot_metrics(
outfile,
smoothed,
peaks,
heights,
limits,
haplotigs_peak_ratio,
AUC_ratio,
TSS,
AUC_low,
AUC_diplo,
AUC_haplo,
AUC_high,
skip_smooth,
)
log("Finished!")
sys.exit(0)
def write_stats(outname: str, AUC_haplo: float, AUC_diplo: float, AUC_ratio: float,
haploidy: float
):
log("Outputting stats...")
f = open(outname + ".stats.txt", "w")
f.write("AUC(Haploid) = {}\n".format(AUC_haplo))
f.write("AUC(Diploid) = {}\n".format(AUC_diplo))
f.write("Ratio = {}\n".format(AUC_ratio))
f.write("Haploidy = {}\n".format(haploidy))
f.close()
|
984,889 | 230ab9993b20eb715146ddcc3003b212acbd6241 | # Distributed with a free-will license.
# Use it any way you want, profit or free, provided it fits in the licenses of its associated works.
# AD5252
# This code is designed to work with the AD5252_I2CPOT_1K I2C Mini Module available from ControlEverything.com.
# https://www.controleverything.com/content/Potentiometers?sku=AD5252_I2CPOT_1K#tabs-0-product_tabset-2
import smbus
import time
# Get I2C bus
bus = smbus.SMBus(1)
# I2C address of the device
AD5252_DEFAULT_ADDRESS = 0x2C
# AD5252 Command Set
AD5252_WORD_RDAC1_EEMEM1 = 0x01 # Store RDAC1 setting to EEMEM1
AD5252_WORD_RDAC3_EEMEM3 = 0x03 # Store RDAC3 setting to EEMEM3
AD5252_WORD_STORE_EEMEM4 = 0x04 # Store user data to EEMEM4
AD5252_WORD_STORE_EEMEM5 = 0x05 # Store user data to EEMEM5
AD5252_WORD_STORE_EEMEM6 = 0x06 # Store user data to EEMEM6
AD5252_WORD_STORE_EEMEM7 = 0x07 # Store user data to EEMEM7
AD5252_WORD_STORE_EEMEM8 = 0x08 # Store user data to EEMEM8
AD5252_WORD_STORE_EEMEM9 = 0x09 # Store user data to EEMEM9
AD5252_WORD_STORE_EEMEM10 = 0x0A # Store user data to EEMEM10
AD5252_WORD_STORE_EEMEM11 = 0x0B # Store user data to EEMEM11
AD5252_WORD_STORE_EEMEM12 = 0x0C # Store user data to EEMEM12
AD5252_WORD_STORE_EEMEM13 = 0x0D # Store user data to EEMEM13
AD5252_WORD_STORE_EEMEM14 = 0x0E # Store user data to EEMEM14
AD5252_WORD_STORE_EEMEM15 = 0x0F # Store user data to EEMEM15
class AD5252():
def set_channel(self):
"""Select the Channel user want to use from 0-1"""
self.channel = int(input("Enter the Channel No. = "))
if self.channel > 1 :
self.channel = int(input("Enter the Channel No. = "))
return self.channel
def set_resistance(self):
"""Enter the value from 0-256 position for channel"""
self.resistance = int(input("Enter the Value from (0-256)= "))
if self.resistance > 256 :
self.resistance = int(input("Enter the Value from (0-256)= "))
return self.resistance
def write_resistance(self):
if self.channel == 0 :
bus.write_i2c_block_data(AD5252_DEFAULT_ADDRESS, AD5252_WORD_RDAC1_EEMEM1, [self.resistance])
elif self.channel == 1 :
bus.write_i2c_block_data(AD5252_DEFAULT_ADDRESS, AD5252_WORD_RDAC3_EEMEM3, [self.resistance])
def get_resistance(self):
"""Read data back from AD5252_WORD_RDACx_EEMEMx, 1 byte"""
if self.channel == 0 :
data = bus.read_byte_data(AD5252_DEFAULT_ADDRESS, AD5252_WORD_RDAC1_EEMEM1)
elif self.channel == 1 :
data = bus.read_byte_data(AD5252_DEFAULT_ADDRESS, AD5252_WORD_RDAC3_EEMEM3)
# Convert the data
resistance_wb = (data / 256.0) * 1.0 + 0.075
resistance_wa = 1.0 - resistance_wb
return {'a' : resistance_wa, 'b' : resistance_wb}
from AD5252 import AD5252
ad5252 = AD5252()
while True:
ad5252.set_channel()
ad5252.set_resistance()
ad5252.write_resistance()
ad5252.get_resistance()
res = ad5252.get_resistance()
print "Resistance at WB : %.2f K"%(res['b'])
print "Resistance at WA : %.2f K"%(res['a'])
print " ******************************** "
time.sleep(1)
|
984,890 | 63c4b38dfc8dd5fe1590c748ea951b5916f5caeb | import os
import errno
import numpy as np
from math import pi
import pandas as pd
import seaborn as sns
from decimal import Decimal
from collections import Counter
import matplotlib.pyplot as plt
from bokeh.transform import cumsum
from bokeh.io import output_file, show
from bokeh.core.properties import value
from bokeh.palettes import Category10,Spectral10,Paired,Category20
from bokeh.plotting import figure, show, output_file,save
from bokeh.models import HoverTool
import warnings
warnings.filterwarnings('ignore')
###########################################
### Function for checking missing values ##
###########################################
def check_missing(df, col, file):
##### Replace customized missing valve #####
mis_value_code = None # Input #
if mis_value_code != None :
df = df.replace({mis_value_code : np.nan})
##### Search missing valves #####
missing = 0
misVariables = []
CheckNull = df.isnull().sum()
for var in range(0, len(CheckNull)):
if CheckNull[var] != 0:
misVariables.append([col[var], CheckNull[var], round(CheckNull[var]/len(df),3)])
missing = missing + 1
if missing == 0:
print('Dataset is complete with no blanks.')
else:
print('Totally, %d features have missing values (blanks).' %missing)
df_misVariables = pd.DataFrame.from_records(misVariables)
df_misVariables.columns = ['Variable', 'Missing', 'Percentage (%)']
sort_table = df_misVariables.sort_values(by=['Percentage (%)'], ascending=False)
# display(sort_table.style.bar(subset=['Percentage (%)'], color='#d65f5f'))
outputFile = 'output/%s_missings.csv' %file
os.makedirs(os.path.dirname(outputFile), exist_ok=True)
sort_table.to_csv(outputFile)
print('Check missing outcome is saved to Output/%s_missings.csv' %file)
print('Missing values check is done!')
def data_describe(df, col, file):
outputFile = 'output/%s_describe.csv' %file
os.makedirs(os.path.dirname(outputFile), exist_ok=True)
df.describe().to_csv(outputFile)
print('There is %d rows and %d columns' %(len(df), len(col)))
print('Data description is done!')
###########################################
### Function for plot Correlation Matrix ##
###########################################
def corr_Matrix(df, age_range, year):
sns.set(style="white")
corr = df.corr() # [df_avg['SEX']=='M']
# Generate a mask for the upper triangle
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
# Set up the matplotlib figure
f, ax = plt.subplots(figsize=(15, 12))
# Generate a custom diverging colormap
cmap = sns.diverging_palette(220, 10, as_cmap=True)
# Draw the heatmap with the mask and correct aspect ratio
sns.heatmap(corr, cmap=cmap, annot=False, vmax=0.7, vmin=-0.7, #mask=mask,#center=0,
square=True, linewidths=.2, cbar_kws={"shrink": 0.8})
plt.title('Correlation Matrix (Age between %d to %d) in %s' %(age_range[0],age_range[1],year))
filename = 'output/Output_CM/%s/CM_%dTo%d.png'%(year,age_range[0],age_range[1])
os.makedirs(os.path.dirname(filename), exist_ok=True)
plt.savefig(filename)
print('Correlation Matrix plot is done')
plt.clf()
########################################
### Function for plotting Pie Chart ###
#######################################
def merge(df):
### Merge variables ###
if 'SUM' in df.columns:
df_merge = df.drop('SUM',axis=1)
merged_col = df_merge.columns
if 'SEX' in merged_col:
df_merge = df_merge.drop('SEX',axis=1)
if 'AGE' in merged_col:
df_merge = df_merge.drop('AGE',axis=1)
if 'GP_registration' in merged_col and 'GP_consult' in merged_col and 'GP_others' in merged_col:
df_merge['GP'] = df['GP_registration']+df['GP_consult']+df['GP_others']
df_merge = df_merge.drop(['GP_registration','GP_consult','GP_others'],axis=1)
if 'transport_seat' in merged_col and 'transport_land' in merged_col:
df_merge['transport'] = df['transport_seat']+df['transport_land']
df_merge = df_merge.drop(['transport_seat','transport_land'],axis=1)
if 'basicGGZ' in merged_col and 'specialGGZ' in merged_col:
df_merge['GGZ'] = df['basicGGZ']+df['specialGGZ']
df_merge = df_merge.drop(['basicGGZ', 'specialGGZ'],axis=1)
elif 'basicGGZ' in merged_col and 'longGGZ' in merged_col and 'specialGGZ' in merged_col:
df_merge['GGZ'] = df['basicGGZ']+df['longGGZ']+df['specialGGZ']
df_merge = df_merge.drop(['basicGGZ', 'longGGZ', 'specialGGZ'],axis=1)
if 'firstLinePsy' in merged_col and 'secondLineGGZ' in merged_col :
df_merge['1stPsy2ndGGZ'] = df['firstLinePsy']+df['secondLineGGZ']
df_merge = df_merge.drop(['firstLinePsy', 'secondLineGGZ'],axis=1)
if 'paramedical_phy' in merged_col and 'paramedical_others' in merged_col:
df_merge['paramedical'] = df['paramedical_phy']+df['paramedical_others']
df_merge = df_merge.drop(['paramedical_phy','paramedical_others'],axis=1)
merged_col_new = list(df_merge.columns)
full_col = ['medical_specialist','GP','pharmacy','dental','transport','abroad','paramedical', \
'others', '1stPsy2ndGGZ', 'GGZ','rehabilitation','nursing']
order_col = []
for i in full_col:
if i in merged_col_new:
order_col.append(merged_col_new[merged_col_new.index(i)])
df_merge_ordered = df_merge[order_col]
return df_merge_ordered
def pie_Chart(df,age_range, year):
### Merge variables ###
df_merge = merge(df)
x = (df_merge.sum(axis=0)/len(df_merge)).to_dict()
data = pd.Series(x).reset_index(name='value').rename(columns={'index':'Categories'})
data['angle'] = data['value']/data['value'].sum() * 2*pi
data['color'] = Paired[len(x)]
data['percentage'] = data['value']/data['value'].sum()
hover = HoverTool(tooltips=[("Categories","@label"),("percentage", "@percentage{%0.2f}")])
p = figure(plot_height=400, title="Pie Chart on costs (Age between %d to %d) in %s" %(age_range[0],age_range[1],year), \
tools=['save', hover], tooltips="@Categories: @value", x_range=(-0.5, 1.0))
legend_new = []
for i in range(0,len(data['percentage'])):
legend_new.append(str(data['Categories'][i])+': '+ str(float(round(Decimal(data['percentage'][i]),4)*100))+"%")
data['legend_new'] = legend_new
p.wedge(x=0, y=1, radius=0.4,
start_angle=cumsum('angle', include_zero=True), end_angle=cumsum('angle'),
line_color="white", fill_color='color', legend='legend_new', source=data)
p.axis.axis_label=None
p.axis.visible=False
p.grid.grid_line_color = None
filename = "output/Output_Pie/%s/Pie_%dTo%d.html"%(year,age_range[0],age_range[1])
os.makedirs(os.path.dirname(filename), exist_ok=True)
save(p, filename=filename)
print('Pie plot is done')
##########################################
### Function for plotting Distribution ###
##########################################
def make_hist_plot(title, hist, edges, x, pdf):
p = figure(title=title, toolbar_location='below', background_fill_color="#fafafa")
p.quad(top=hist, bottom=0, left=edges[:-1], right=edges[1:],
fill_color="navy", line_color="white", alpha=0.5)
p.line(x, pdf, line_color="#ff8888", line_width=4, alpha=0.7, legend="PDF")
# p.line(x, cdf, line_color="orange", line_width=2, alpha=0.7, legend="CDF")
p.x_range.start = 0
p.x_range.end = 8000
p.y_range.start = 0
p.legend.location = "center_right"
p.legend.background_fill_color = "#fefefe"
p.xaxis.axis_label = 'Cost'
p.yaxis.axis_label = 'Pr(cost)'
p.grid.grid_line_color="white"
return p
def dist_Plot (df,featureName,age_range,year):
F = featureName
fea = df[F].dropna()
mu = fea.mean()
sigma = fea.std()
hist, edges = np.histogram(fea, density=True, bins=120)
x = np.linspace(fea.min(), fea.max(), len(df))
pdf = 1/(sigma * np.sqrt(2*np.pi)) * np.exp(-(x-mu)**2 / (2*sigma**2))
# cdf = (1+scipy.special.erf((x-mu)/np.sqrt(2*sigma**2)))/2
p = make_hist_plot("Total healthcare cost in %s (Age between %d to %d) (μ=%d, σ=%s)" \
%(year, age_range[0], age_range[1], mu, sigma), hist, edges, x, pdf)
# show(p)
filename = "output/Output_Dist/%s/Dist_%dTo%d.html"%(year,age_range[0],age_range[1])
os.makedirs(os.path.dirname(filename), exist_ok=True)
save(p, filename=filename)
print('Distribution plot is done')
##########################################
######## Function for box plot ###########
##########################################
def box_Plot(df, featureSet, file):
sns.set(style="ticks", palette="pastel")
# Draw a nested boxplot to show bills by day and time
p = sns.boxplot(x=featureSet[0], y=featureSet[1],
hue=featureSet[2], palette=Spectral10, data=df)
sns.despine(offset=10, trim=True)
filename = "output/Output_BoxPlot/%s_%s_%s_%s.png" %(featureSet[0],featureSet[1],featureSet[2],file)
os.makedirs(os.path.dirname(filename), exist_ok=True)
plt.savefig(filename)
plt.clf()
##########################################
### Function for plotting Stacked Area ###
##########################################
def stacked_Plot(df,ageRange,year):
N = len(df.columns)
names = ["%s" % i for i in df.columns]
p = figure(x_range=(0, len(df)-1), y_range=(0, 10000))
p.grid.minor_grid_line_color = '#eeeeee'
p.varea_stack(stackers=names, x='index', color=Category20[N], \
legend=[value(x) for x in names], source=df)
p.legend.location = "top_left"
p.x_range.start = ageRange[0]
p.x_range.end = ageRange[-1]
p.xaxis.axis_label = 'Age'
p.yaxis.axis_label = 'Total healthcare cost in %s' %year
# reverse the legend entries to match the stacked order
p.legend[0].items.reverse()
# show(p)
filename = "output/Output_Stacked/StackedArea_%s.html" %year
os.makedirs(os.path.dirname(filename), exist_ok=True)
save(p, filename=filename)
### Give a age range (0-90) ###
def groupAgeRange(df_vektis, age_range, df_stack):
if type(df_stack) == pd.DataFrame:
df_vektis_aged = df_vektis[df_vektis['AGE']==age_range]
else:
df_vektis_aged = df_vektis[age_range[0]<=df_vektis['AGE']]
df_vektis_aged = df_vektis_aged[df_vektis['AGE']<age_range[1]]
print("The number of insured people between %d to %d: " %(age_range[0],age_range[1]), len(df_vektis_aged))
# print("The gender balance: ", Counter(df_vektis_aged['SEX']))
### As the original data is aggregated data
### so we need to calculate the average costs for per insured person
df_avg = df_vektis_aged[['SEX','AGE']]
col = df_vektis_aged.columns
for i in range(0, len(col)):
if col[i] != 'SEX' and col[i] != 'AGE' and col[i] != 'BSNs' and col[i] != 'Insured_year':
df_avg[col[i]] = (df_vektis_aged[col[i]]/df_vektis_aged['BSNs']) / (df_vektis_aged['Insured_year']/df_vektis_aged['BSNs'])
### Add one column - total healthcare costs ###
df_avg['SUM'] = df_avg.drop(['SEX','AGE'],axis=1).sum(axis=1, skipna=True)
return df_avg
##########################################
### Function for num-num relation plot ###
##########################################
def plot_numNum(df,featureSet,file):
num1_feature = featureSet[0]
num2_feature = featureSet[1]
tar_feature = featureSet[2]
if tar_feature == 'None':
sns.set(style="white")
p = sns.jointplot(x=num1_feature, y=num2_feature, data = df, kind="kde", color="b")
p.plot_joint(plt.scatter, c="r", s=30, linewidth=1, marker="+")
filename = "output/Output_NumNum/%s_%s_%s.png" %(featureSet[0],featureSet[1],featureSet[2])
os.makedirs(os.path.dirname(filename), exist_ok=True)
p.savefig(filename)
else:
p = sns.lmplot(x=num1_feature, y=num2_feature, hue=tar_feature, data=df, \
palette = 'magma', height = 6)
filename = "output/Output_NumNum/%s_%s_%s_%s.png" %(featureSet[0],featureSet[1],featureSet[2],file)
os.makedirs(os.path.dirname(filename), exist_ok=True)
p.savefig(filename)
print('Numerical-numerical feature plot is done')
plt.clf()
####################################################
### Function for prepare datasets from all years ###
####################################################
def prepare (df, col, name_col, postcode):
### Select features you are interested in ###
### Feature descriptions are provided by https://www.vektis.nl/intelligence/open-data ###
### As some features are available in some years, we need to check before select certain features ###
data_col = df.columns
present = []
for c in col:
if c in data_col:
present.append(col.index(c))
df_vektis = df[np.array(col)[present]]
new_col = np.array(name_col)[present]
df_vektis.columns = new_col
### Change the types (int,float,str --> float) of values in the AGE column ###
age = []
for i in df['LEEFTIJDSKLASSE']:
if type(i) == str:
try:
age.append(float(i))
except:
age.append(float(i[:-1]))
elif type(i) == float:
age.append(i)
elif type(i) == int:
age.append(i)
### Add new age column ###
df_vektis['AGE'] = age
### Remove the first row (sum) ###
df_vektis = df_vektis[1:]
### search for certain area? ###
# if postcode == "ALL":
# df_vektis_analysis = df_vektis
# elif 100 < postcode and postcode < 1000:
# df_vektis_analysis = df_vektis[df_vektis['Postcode']==postcode]
# else:
# print("Please give a postcode greater than 100 and less than 1000")
return df_vektis
##############################################################################
### 1. All categories in different years from the same age group (heatmap) ###
##############################################################################
def allCategoriesDiffYear (df_mean_allYears,ageRange_string,years,categories,fileName):
for elem in ageRange_string:
avg_cost = []
year_plt = []
cate_plt = []
for i in categories:
for j in years:
year_plt.append(j)
cate_plt.append(i)
if i in list(df_mean_allYears[elem][j].keys()):
avg_cost.append(df_mean_allYears[elem][j][i])
else:
avg_cost.append(0)
cost_plt = pd.DataFrame.from_records([cate_plt,year_plt,avg_cost]).transpose()
cost_plt.columns = ['category','Year','Cost']
sns.set()
cost_df_pivot = cost_plt.pivot('category','Year','Cost')
cost_df_pivot.fillna(value=np.nan, inplace=True)
# Draw a heatmap with the numeric values in each cell
f, ax = plt.subplots(figsize=(13, 10))
sns.heatmap(cost_df_pivot, annot=True, fmt="0.4g",linewidths=.5, ax=ax, vmax=500,\
cmap=sns.cubehelix_palette(10), cbar=True)
plt.title("Costs in different categories between 2011-2016 in age group of %s"%elem)
filename = 'Output_Vektis/withMedSpecialist/%s_Category_Years.png'%ageRange_string[elem]
os.makedirs(os.path.dirname(filename), exist_ok=True)
plt.savefig(filename)
##############################################################################
### 2. All categories in the same year from different age groups (heatmap) ###
##############################################################################
def allCategoriesDiffAge (df_mean_allYears,ageRange_string,years,categories,fileName):
for elem in years:
avg_cost = []
ages_plt = []
cate_plt = []
for i in categories:
for j in ageRange_string:
ages_plt.append(j)
cate_plt.append(i)
if i in list(df_mean_allYears[j][elem].keys()):
avg_cost.append(df_mean_allYears[j][elem][i])
else:
avg_cost.append(0)
cost_plt = pd.DataFrame.from_records([cate_plt,ages_plt,avg_cost]).transpose()
cost_plt.columns = ['category','Age range','Cost']
sns.set()
cost_df_pivot = cost_plt.pivot('category','Age range','Cost')
cost_df_pivot.fillna(value=np.nan, inplace=True)
# Draw a heatmap with the numeric values in each cell
f, ax = plt.subplots(figsize=(13, 10))
sns.heatmap(cost_df_pivot, annot=True, fmt="0.4g",linewidths=.5, ax=ax, vmax=500,\
cmap=sns.cubehelix_palette(10), cbar=True)
plt.title("Costs in different categories from different age groups in %s"%fileName)
filename = 'Output_Vektis/%s_Category_Change.png'%fileName
os.makedirs(os.path.dirname(filename), exist_ok=True)
plt.savefig(filename)
#################################################
### 3. Sum for different age groups and years ###
#################################################
def SumCost_pivot (df_mean_allYears,ageRange_string,years,categories):
age_plt = []
year_plt = []
sum_plt = []
for i in ageRange_string:
for j in years:
age_plt.append(i)
year_plt.append(j)
sum_plt.append(df_mean_allYears[i][j].sum())
sum_cost_plt = pd.DataFrame.from_records([age_plt,year_plt,sum_plt]).transpose()
sum_cost_plt.columns = ['Age range','Year','Sum of costs']
sum_cost_plt_pivot = sum_cost_plt.pivot('Age range','Year','Sum of costs')
sum_cost_plt_pivot = sum_cost_plt_pivot.reindex(ageRange_string)
sum_cost_plt_pivot.fillna(value=np.nan, inplace=True)
return sum_cost_plt_pivot
def SumCost_heatmap(sum_cost_plt_pivot):
sns.set()
# Draw a heatmap with the numeric values in each cell
f, ax = plt.subplots(figsize=(12, 9))
sns.heatmap(sum_cost_plt_pivot, annot=True, fmt="0.4g",linewidths=.5, ax=ax, vmax=7000,\
cmap=sns.cubehelix_palette(10), cbar=True)
plt.title("Sum of costs in different years from different age groups")
plt.show()
filename = 'Output_Vektis/SumVisualization/SumofCost_FULL.png'
os.makedirs(os.path.dirname(filename), exist_ok=True)
plt.savefig(filename)
##############################
######## Sum line plot #######
##############################
def Sumcost_line (sum_cost_plt_pivot,ageRange_string, start_ageGroup, end_ageGroup):
# start_ageGroup = 15
# end_ageGroup = 20
plt.subplots(figsize=(12, 9))
palette = sns.color_palette("muted")
p = sns.lineplot(data=sum_cost_plt_pivot[start_ageGroup:end_ageGroup].transpose(),\
linewidth=2.5,legend='full',dashes=False)
filename = 'Output_Vektis/SumVisualization/from %s - to %s.png'%(ageRange_string[start_ageGroup],
ageRange_string[end_ageGroup-1])
plt.xlabel('Year')
# plt.ylim(1450,3800)
plt.ylabel('Sum of costs')
plt.title('Sum of costs from %s - to %s between 2011-2017' %(ageRange_string[start_ageGroup],
ageRange_string[end_ageGroup-1]))
plt.legend(loc='upper right')
os.makedirs(os.path.dirname(filename), exist_ok=True)
plt.savefig(filename)
#################################################
######## 4. Single category plot (heatmap) ######
#################################################
def CatCost_pivot(df_mean_allYears,ageRange_string,years,categories,one_cate):
age_plt = []
year_plt = []
sglCat_plt = []
for i in ageRange_string:
for j in years:
age_plt.append(i)
year_plt.append(j)
if one_cate in df_mean_allYears[i][j].keys():
sglCat_plt.append(df_mean_allYears[i][j][one_cate])
else:
sglCat_plt.append(0)
# if '1stPsy2ndGGZ' in df_mean_allYears[i][j].keys():
# medSpe_plt.append(df_mean_allYears[i][j]['1stPsy2ndGGZ']) # medical_specialist
# elif 'GGZ' in df_mean_allYears[i][j].keys():
# medSpe_plt.append(df_mean_allYears[i][j]['GGZ'])
sglCat_cost_plt = pd.DataFrame.from_records([age_plt,year_plt,sglCat_plt]).transpose()
sglCat_cost_plt.columns = ['Age range','Year','Cost from %s'%one_cate] #Medical Specialists
sglCat_cost_plt_pivot = sglCat_cost_plt.pivot('Age range','Year','Cost from %s'%one_cate) #Medical Specialists
sglCat_cost_plt_pivot = sglCat_cost_plt_pivot.reindex(ageRange_string)
sglCat_cost_plt_pivot.fillna(value=np.nan, inplace=True)
return sglCat_cost_plt_pivot
def Catcost_heatmap(sglCat_cost_plt_pivot, one_cate):
sns.set()
# Draw a heatmap with the numeric values in each cell
f, ax = plt.subplots(figsize=(12, 9))
sns.heatmap(sglCat_cost_plt_pivot, annot=True, fmt="0.4g",linewidths=.5, ax=ax, \
cmap=sns.cubehelix_palette(10), cbar=True)
plt.title("Costs from %s in different years from different age groups" %one_cate)
filename = 'Output_Vektis/%s/%s.png' %(one_cate,one_cate)
os.makedirs(os.path.dirname(filename), exist_ok=True)
plt.savefig(filename)
##########################################
####### 5. Sum line plot (heatmap) #######
##########################################
def Catcost_line(sglCat_cost_plt_pivot,ageRange_string, start_ageGroup,end_ageGroup, one_cate):
# start_ageGroup = 14
# end_ageGroup = 20
plt.subplots(figsize=(12, 9))
p = sns.lineplot(data=sglCat_cost_plt_pivot[start_ageGroup:end_ageGroup].transpose(), \
linewidth=2.5,legend='full',dashes=False)
filename = 'Output_Vektis/%s/from %s to %s.png' \
%(one_cate, ageRange_string[start_ageGroup],ageRange_string[end_ageGroup-1])
plt.ylim(35,135)
plt.legend(loc='upper right')
plt.xlabel('Age range')
plt.ylabel('Costs from %s Package' %one_cate)
plt.title('Costs from %s from %s to %s '\
%(one_cate, ageRange_string[start_ageGroup],ageRange_string[end_ageGroup-1]))
os.makedirs(os.path.dirname(filename), exist_ok=True)
plt.savefig(filename)
# plt.show() |
984,891 | 983070ba80ef997e57fccba159e9c909fec7785d | # -*- coding: utf-8 -*-
"""
Created on Sat Mar 14 12:41:38 2020
@author: Jake Frazer
"""
import numpy as np
import matplotlib.pyplot as plt
# blackjack probabilities - without draws
p_win = 0.464
p_lose = 0.536
# blackjack probabilities - with draws
p_win = 0.4243
p_lose = 0.4909
p_draw = 0.0848
# simulating playing one stretch of hands - true if make it to end - false if hit 0 at some point
def knocked_out(min_bet, value_required, cash_stack):
''' TODO: Make it carry out the final value too so that we can see the money pulled out in
order to calculate EV of strategy
'''
# check to catch if doubling phase has already surpassed betting requirements
if value_required < 0:
return True, cash_stack
hands_to_play = value_required/min_bet
ko_count = -(cash_stack/min_bet)
wl_count = 0
hands = np.random.random(int(hands_to_play))
for h in hands:
# you lose the hand
if h >= p_win + p_draw:
wl_count -= 1
# you draw the hand
elif h >= p_win:
continue
# you win the hand
else:
wl_count += 1
if wl_count == ko_count:
return False, 0
return True, cash_stack + (wl_count*min_bet)
def doubling_phase(n, start_cash):
''' calculates probability of successfully doubling for n hands.
returns the prob of being alive, cash stack at that ponit, total amount
wagered to then
'''
end_cash = start_cash
total_wagered = 0
prob = 0.464**n
for i in range(1,n+1):
total_wagered += end_cash
end_cash *= 2
return prob, end_cash, total_wagered
def calculate_evs(doubling_hands, start_cash, min_bet, value_required):
''' func to work out EV's and probability of getting money
'''
# simulates doubling phase for n hands
prob, end_cash, total_wagered = doubling_phase(doubling_hands, start_cash)
# make this as big as poss but computational pain!
n = 10000
# gives probability of making it to the end with some amount of money
# sums[0] = prob, sums[1] = money
result = np.array([knocked_out(min_bet, (value_required-total_wagered), end_cash) for x in range(n)],dtype='int64')
sums = np.sum(result,0)/n
probability_of_return = prob*sums[0]
return probability_of_return, sums[1]
# gather some results for different starting values -- EV's
n_range = range(1,40)
results = np.array([calculate_evs(n,10,5,3500) for n in n_range])
x_axis = [x-1 for x in n_range]
evs, prob = [], []
for x in results:
evs.append(x[0]*x[1])
prob.append(x[0]*100)
# my situation post doubling phase
my_scenario = calculate_evs(0,480,5,3025)
# ev plot
plt.plot(x_axis, evs)
plt.ylabel("Expectation Value £'s")
plt.xlabel("Initial all-in hands")
plt.title("Expectation value of free blackjack credit")
plt.show()
# prob of getting any return plot
plt.plot(x_axis, prob)
plt.ylabel("Probability of ANY return, %")
plt.xlabel("Initial all-in hands")
plt.title("Likelihood of a return from free blackjack credit")
plt.show()
|
984,892 | c9df73016547ba9230fa67eff32a73c251eabcbd | #!/mnt/ilustre/users/sanger/app/Python/bin/python
from mbio.workflows.single import SingleWorkflow
from biocluster.wsheet import Sheet
wsheet = Sheet("/mnt/ilustre/users/sanger-dev/sg-users/jinlinfang/tooltest/mapsplice_map_single.json")
wf = SingleWorkflow(wsheet)
wf.run() |
984,893 | 38b8faff9dfd248b60e6a44a69b48ed901037e66 | # emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 et:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""MetadataRecord extractor for dataset information stored in Datalad's own core storage"""
import logging
import time
from uuid import UUID
from .base import DataOutputCategory, ExtractorResult, DatasetMetadataExtractor
lgr = logging.getLogger('datalad.metadata.extractors.metalad_example_dataset')
class MetaladExampleDatasetExtractor(DatasetMetadataExtractor):
def get_id(self) -> UUID:
return UUID("b3c487ea-e670-4801-bcdc-29639bf1269b")
def get_version(self) -> str:
return "0.0.1"
def get_data_output_category(self) -> DataOutputCategory:
return DataOutputCategory.IMMEDIATE
def get_required_content(self) -> bool:
return True
def extract(self, _=None) -> ExtractorResult:
return ExtractorResult(
extractor_version=self.get_version(),
extraction_parameter=self.parameter or {},
extraction_success=True,
datalad_result_dict={
"type": "dataset",
"status": "ok"
},
immediate_data={
"id": self.dataset.id,
"refcommit": self.dataset.repo.get_hexsha(),
"comment": f"example dataset extractor "
f"executed at {time.time()}"
})
|
984,894 | f8965ed141cd6ec3083ada04cc7a1e0118930e01 | def main():
tempo = int(input())
velocidade_media = int(input())
km_l_automovel = 12
quant_litros = (velocidade_media/km_l_automovel)*tempo
print('%.3f'%quant_litros)
if __name__ == '__main__':
main() |
984,895 | 468be556aada157337bfb31a25d44479c4843960 | input_num = int(input())
def is_hansu(num):
if num < 100:
return 1
num = str(num)
num_len = len(num)
num_list = list(num)
minus_list = []
for i in range(num_len - 1):
minus = int(num_list[i]) - int(num_list[i+1])
minus_list.append(minus)
if num_len-1 == minus_list.count(minus_list[0]):
return 1
return 0
su = 0
for i in range(1, input_num+1):
su += is_hansu(i)
print(su)
|
984,896 | 5093ab1d0e5d63e9e77de5fb05b9d745f1480a59 | ii = [('CookGHP3.py', 1), ('WilbRLW4.py', 1), ('CookGHP.py', 1), ('ClarGE2.py', 2), ('CarlTFR.py', 1), ('LyttELD.py', 1), ('AinsWRR3.py', 3), ('BailJD1.py', 2), ('LyelCPG.py', 1), ('DibdTRL2.py', 2), ('AinsWRR.py', 2), ('CrocDNL.py', 2), ('BackGNE.py', 1), ('MedwTAI2.py', 1), ('WheeJPT.py', 1), ('HogaGMM.py', 1), ('MartHRW.py', 1), ('MackCNH.py', 1), ('FitzRNS4.py', 2), ('RoscTTI.py', 1), ('LewiMJW.py', 1), ('AinsWRR2.py', 2), ('BrewDTO.py', 1), ('DibdTRL.py', 2), ('FitzRNS2.py', 3), ('EvarJSP.py', 2), ('BeckWRE.py', 1), ('DibdTBR.py', 1), ('ClarGE4.py', 1)] |
984,897 | 6fa96525d622a5011593b16b72c38df77fd374a7 | #!/usr/bin/env python3
import math
import socket
import numpy as np
HOST = '127.0.0.1' # Standard loopback interface address (localhost)
PORT = 65432 # Port to listen on (non-privileged ports are > 1023)
x = 0
y = 0
angle = 0
sensors = 0b000000
sensor_data = np.array([])
for i in range(91):
sensor_data = np.append(sensor_data, [64.5])
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind((HOST, PORT))
s.listen()
conn, addr = s.accept()
with conn:
print('Connected by', addr)
start = conn.recv(1).decode('utf-8')
print(start)
if start == 'c':
while True:
for i in range(91):
temp = input(i * 2 + ' degrees: ')
if not temp == "":
sensor_data[i] = temp
else:
sensor_data[i] = 64.5
cybot_out = x + '\n' + y + '\n' + angle + '\n' + sensors + '\n'
for reading in sensor_data:
cybot_out += reading + '\n'
cybot_out += ';'
encoded = cybot_out.encode('utf-8')
# This
for b in encoded:
s.send(b)
# or this
# s.sendall(encoded)
cybot_in_dir = s.recv(1).decode('utf-8')
cybot_in_val = ""
while True:
next = s.recv(1)
if next == 'g':
break
cybot_in_val += next
cybot_in_val = cybot_in_val.decode('utf-8')
if cybot_in_dir == 'a':
angle += cybot_in_val % 360
elif cybot_in_dir == 'd':
angle -= cybot_in_val % 360
elif cybot_in_dir == 'w':
x += cybot_in_val * math.cos(angle)
y += cybot_in_val * math.sin(angle)
elif cybot_in_dir == 's':
x -= cybot_in_val * math.cos(angle)
y -= cybot_in_val * math.sin(angle)
|
984,898 | 1ac015b51d55804ffe74e32705b1fa4f5eecc5d2 | ##Write a Python program to find those numbers which are
##divisible by 7 and multiple of 5, between 1500 and 2700 (both included)
for num in range(1500,2701):
if num%7==0 and num%5==0:
print(num)
|
984,899 | f1ab688732ddef09e3469c91d9079ff93b17c949 | #!python3
#-*- coding:utf-8 -*-
import random
import futurist
import timeit
import queue
#from __future__ import print_function
import psutil
import multiprocessing
import os , sys
"""
- java의 multi threading vs python multi threading vs Python Processing
의 performance를 비교하기 위한 예제 소스
- result :
mp_performance_2_result.txt
"""
queue = queue.Queue()
def compute(_queue):
_queue.put(sum([ random.randint(0,100) for _ in range(1000000) ]))
def resource_info(_startTime):
print('CPU usage : {}'.format(psutil.cpu_percent()))
print('consume time : {}'.format(timeit.default_timer() - _startTime))
def mp_test():
qu = multiprocessing.Manager().Queue()
startTime = timeit.default_timer()
with futurist.ProcessPoolExecutor( max_workers = 8 ) as executors:
futures = [executors.submit(compute , qu) for _ in range( 8 )]
results = [f.result() for f in futures]
print('result : {}'.format(qu.get()))
resource_info(startTime)
def mt_test():
startTime = timeit.default_timer()
with futurist.ThreadPoolExecutor( max_workers = 8 ) as executors:
futures = [ executors.submit(compute ,queue) for _ in range( 8 ) ]
results = [f.result() for f in futures]
print('result : {}'.format(queue.get()))
resource_info(startTime)
def main():
print('argv : {}'.format(sys.argv))
if sys.argv[1] == '1':
mt_test()
else:
mp_test()
if __name__ == '__main__':
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.