index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
7,700 | ce3c1a7210632d0a8475fe886d514eb91d3c75ac | ''' Model package should containt all data types for the database engine,
which means that projects like PyCIM can be included within ''' |
7,701 | dffd575b9d5b763abdbce6f88586c183b71086c4 | def formula(a,b):
if(b == 0):
print "You can not divide by zero"
else:
return (a+b)/b
print formula(4,4)
print formula(2,0)
|
7,702 | 6199a2ac12e80395f4a7a54877c5b639315e64aa | import numpy as np
import sys
class NeuralNetworkClassifier():
def __init__(self, hidden_units, learning_rate, batch_size, epochs, l_1_beta_1, l_1_beta_2, l_2_alpha_1, l_2_alpha_2):
self._hidden_units = hidden_units
self._learning_rate = learning_rate
self._batch_size = batch_size
self._epochs = epochs
self._l_1_beta_1 = l_1_beta_1
self._l_1_beta_2 = l_1_beta_2
self._l_2_alpha_1 = l_2_alpha_1
self._l_2_alpha_2 = l_2_alpha_2
def fit(self, X_train, Y_train):
num_input_dimensions = X_train.shape[1]
self._num_classes = Y_train.shape[1]
training_set_size = X_train.shape[0]
self._W_1 = 1 / np.sqrt(self._hidden_units) * np.random.randn(self._hidden_units, num_input_dimensions)
self._W_2 = 1 / np.sqrt(self._num_classes) * np.random.randn(self._num_classes, self._hidden_units)
self._b_1 = 0.01 * np.ones((self._hidden_units, 1))
self._b_2 = 0.01 * np.ones((self._num_classes, 1))
for epoch in range(self._epochs):
for batch_start in range(0, training_set_size, self._batch_size):
batch_end = batch_start + self._batch_size
X_batch = X_train[batch_start:batch_end]
Y_batch = Y_train[batch_start:batch_end]
num_examples = X_batch.shape[0]
W_1_prime_total = 0
W_2_prime_total = 0
b_1_prime_total = 0
b_2_prime_total = 0
for i in range(num_examples):
x = np.vstack(X_batch[i, :])
y = np.vstack(Y_batch[i, :])
z_1, h_1, y_hat = self._forward_propagation(x)
W_1_prime, W_2_prime, b_1_prime, b_2_prime = self._backward_propagation(x, y, z_1, h_1, y_hat)
W_1_prime_total += W_1_prime
W_2_prime_total += W_2_prime
b_1_prime_total += b_1_prime
b_2_prime_total += b_2_prime
self._W_1 = self._W_1 - self._learning_rate * W_1_prime_total
self._W_2 = self._W_2 - self._learning_rate * W_2_prime_total
self._b_1 = self._b_1 - self._learning_rate * b_1_prime_total
self._b_2 = self._b_2 - self._learning_rate * b_2_prime_total
Y_hats = self.predict(X_batch)
y_hat = self.predict(X_train)
print("Epoch %3d/%3d Loss = %.2f Training Accuracy = %.2f" % (epoch + 1, self._epochs,self._cross_entropy_loss(Y_batch, Y_hats), self.score(Y_train, y_hat)))
def _forward_propagation(self, x):
z_1 = self._W_1.dot(x) + self._b_1
# print("_forward_propagation W_1=", self._W_1.shape)
# print("_forward_propagation b_1=", self._b_1.shape)
# print("_forward_propagation x=", x.shape)
# print("_forward_propagation z=", z_1.shape)
h_1 = self._relu(z_1)
# print("_forward_propagation h_1=", h_1.shape)
z_2 = self._W_2.dot(h_1) + self._b_2
# print("_forward_propagation z_2=", z_2.shape)
y_hat = self._softmax(z_2)
# print("_forward_propagation y_hat=", y_hat.shape)
return z_1, h_1, y_hat
def _backward_propagation(self, x, y, z_1, h_1, y_hat):
df_dy = y_hat - y
g = self._g(df_dy, self._W_2, z_1)
W_1_prime = self._W_1_prime(x, g, self._W_1, self._l_2_alpha_1, self._l_1_beta_1)
W_2_prime = self._W_2_prime(df_dy, h_1, self._W_2, self._l_2_alpha_2, self._l_1_beta_2)
b_1_prime = self._learning_rate * self._b_1_prime(g)
b_2_prime = self._learning_rate * self._b_2_prime(df_dy)
return W_1_prime, W_2_prime, b_1_prime, b_2_prime
def predict(self, X):
num_examples = X.shape[0]
Y_hat = np.zeros((num_examples, self._num_classes))
for i in range(num_examples):
x = np.vstack(X[i, :])
_, _, y_hat = self._forward_propagation(x)
Y_hat[i, :] = y_hat[:, 0]
return Y_hat
def _relu(self, x):
return np.maximum(x, 0)
def _relu_prime(self, x):
y = np.zeros((x.shape[0], x.shape[1]))
y[x > 0] = 1.0
return y
def _softmax(self, Z):
exp = np.exp(Z)
total = np.sum(exp, axis=0)
return exp / total
def _g(self, df_dy, W_2, z_1):
return (df_dy.T.dot(W_2) * self._relu_prime(z_1.T)).T
def _W_2_prime(self, df_dy, h_1, W_2, alpha_2, beta_2):
return df_dy.dot(h_1.T) + alpha_2 * W_2 + beta_2 * np.sign(W_2)
def _b_2_prime(self, df_dy):
return df_dy
def _W_1_prime(self, x, g, W_1, alpha_1, beta_1):
return g.dot(x.T) + alpha_1 * W_1 + beta_1 * np.sign(W_1)
def _b_1_prime(self, g):
return g
def _l_1_loss(self, W):
return np.sum(np.absolute(W))
def _l_2_loss(self, W):
return 0.5 * np.linalg.norm(W)
def _cross_entropy_loss(self, y, yhat):
loss = 0
yhat_log = np.log(yhat.T)
for i in range(len(y)):
loss -= y[i, :].dot(yhat_log[:, i])
l_1_regularization = self._l_1_beta_1 * self._l_1_loss(self._W_1) + self._l_1_beta_2 * self._l_1_loss(self._W_2)
l_2_regularization = self._l_2_alpha_1 * self._l_2_loss(self._W_1) + self._l_2_alpha_2 * self._l_2_loss(self._W_2)
return loss + l_1_regularization + l_2_regularization
def _toClassIndices(self, probabilities):
return np.argmax(probabilities, axis=1)
def loss(self, testing_labels, predicted_labels):
return 0
def score(self, expected_labels, predicted_labels):
return np.mean(self._toClassIndices(expected_labels) == self._toClassIndices(predicted_labels))
def describe_hyperparameters(hyperparameters):
return "\nHidden Units: {0} Learning Rate: {1} Minibatch Size: {2} Epochs: {3} L1 Strength: {4} L2 Strength: {5}".format(
hyperparameters[0], hyperparameters[1], hyperparameters[2], hyperparameters[3], hyperparameters[4], hyperparameters[5])
def findBestHyperparameters(training_images, training_labels, validation_images, validation_labels):
print("Start training...")
print()
all_hidden_units = [20, 20, 30, 30, 40, 40, 50, 50, 60, 30]
all_learning_rates = [0.0001, 0.001, 0.01, 0.01, 0.01, 0.02, 0.02, 0.1, 0.2, 0.007]
all_minibatch_sizes = [2, 5, 10, 10, 20, 20, 100, 50, 50, 25]
all_num_epochs = [1, 1, 1, 1, 2, 2, 2, 2, 3, 3]
all_l1_strengths = [0.0, 0.0, 0, 0.01, 0.0, 0.001, 0.01, 0.02, 0.01, 0.001]
all_l2_strengths = [0.0, 0.01, 0.001, 0.0, 0.01, 0.001, 0.01, 0.02, 0.01, 0.001]
best_accuracy = 0
best_hyperparamters = []
for i in range(10):
hyperparameters = (all_hidden_units[slice_start+i],
all_learning_rates[slice_start+i],
all_minibatch_sizes[slice_start+i],
all_num_epochs[slice_start+i],
all_l1_strengths[slice_start+i],
all_l2_strengths[slice_start+i])
print(describe_hyperparameters(hyperparameters))
clf = NeuralNetworkClassifier(
hidden_units = hyperparameters[0],
learning_rate = hyperparameters[1],
batch_size = hyperparameters[2],
epochs = hyperparameters[3],
l_1_beta_1 = hyperparameters[4],
l_1_beta_2 = hyperparameters[4],
l_2_alpha_1 = hyperparameters[5],
l_2_alpha_2 = hyperparameters[5])
clf.fit(training_images, training_labels)
predicted_labels = clf.predict(validation_images)
accuracy = clf.score(validation_labels, predicted_labels)
print("Accuracy: %f" % accuracy)
print("Cross Entropy Loss = %.2f" % (clf.loss(validation_labels, predicted_labels)))
if(accuracy > best_accuracy):
best_accuracy = accuracy
best_hyperparamters = hyperparameters
print("Found new best hyperparameters.")
print("\n")
print(describe_hyperparameters(best_hyperparamters))
return best_hyperparamters
def main():
training_images = np.load("mnist_train_images.npy")
training_labels = np.load("mnist_train_labels.npy")
testing_images = np.load("mnist_test_images.npy")
testing_labels = np.load("mnist_test_labels.npy")
validation_images = np.load("mnist_validation_images.npy")
validation_labels = np.load("mnist_validation_labels.npy")
parameters = findBestHyperparameters(training_images[0:16000, :], training_labels[0:16000, :],
validation_images, validation_labels)
clf = NeuralNetworkClassifier(hidden_units=parameters[0],
learning_rate=parameters[1],
batch_size=parameters[2],
epochs=parameters[3], l_1_beta_1=parameters[4], l_1_beta_2=parameters[4], l_2_alpha_1=parameters[5], l_2_alpha_2=parameters[5])
clf.fit(training_images, training_labels)
predicted_labels = clf.predict(testing_images)
if __name__ == "__main__":
if len(sys.argv) != 1:
print("Usage: python3 digit_recognizer.py")
exit()
main() |
7,703 | f398b724fc28bc25ddb8baf492f34075db0c1f61 | COPY_GOOGLE_DOC_KEY = '1CdafeVmmtNa_PMV99TapPHvLUVzYz0xkvHcpINQtQ6c'
DEPLOY_SLUG = 'al-qassemi'
NUM_SLIDES_AFTER_CONTENT = 2
# Configuration
AUDIO = True
VIDEO = False
FILMSTRIP = False
PROGRESS_BAR = False |
7,704 | cd9f25a2810b02f5588e4e9e8445e7aaec056bf8 | import scrapy
from yijing64.items import Yijing64Item
# import pymysql
class ZhouyiSpider(scrapy.Spider):
name = 'zhouyi'
allowed_domains = ['m.zhouyi.cc']
start_urls = ['https://m.zhouyi.cc/zhouyi/yijing64/']
def parse(self, response):
li_list = response.xpath("//div[@class='gualist1 tip_text']/ul/li")
for li in li_list:
item = Yijing64Item()
item['name'] = li.xpath("./a/text()").extract_first()
# item['urls'] = li.xpath("./a/@href").extract_first()
detail_urls = 'https://m.zhouyi.cc' + \
li.xpath("./a/@href").extract_first()
if detail_urls is not None:
yield scrapy.Request(detail_urls, callback=self.parse_detail, meta={'item': item})
def parse_detail(self, response):
item = response.meta["item"]
item['hexagram1'] = response.xpath("//div/table/tbody/tr[3]/td[1]/text()").extract_first().strip()
item['hexagram2'] = response.xpath("//div/table/tbody/tr[3]/td[2]/text()").extract_first().strip()
item['hexagram3'] = response.xpath("//div/table/tbody/tr[3]/td[3]/text()").extract_first().strip()
item['hexagram4'] = response.xpath("//div/table/tbody/tr[3]/td[4]/text()").extract_first().strip()
# item['hexagram'] = response.xpath("//div[@class='tip_text'][1]").extract_first().strip()
# item['one_yao'] = response.xpath("//div[@class='tip_text'][2]").extract_first().strip()
# item['two_yao'] = response.xpath("//div[@class='tip_text'][3]").extract_first().strip()
# item['san_yao'] = response.xpath("//div[@class='tip_text'][4]").extract_first().strip()
# item['si_yao'] = response.xpath("//div[@class='tip_text'][5]").extract_first().strip()
# item['wu_yao'] = response.xpath("//div[@class='tip_text'][6]").extract_first().strip()
# item['liu_yao'] = response.xpath("//div[@class='tip_text'][7]").extract_first().strip()
yield item
# hexagram_list = response.xpath(
# "//div/table/tbody/tr[3]/td/text()").extract()
# for i, v in enumerate(hexagram_list):
# # print("=="*10)
# # print(i,index)
# if i == 0:
# item['hexagram1'] = v.strip()
# elif i == 1:
# item['hexagram2'] = v.strip()
# elif i == 2:
# item['hexagram3'] = v.strip()
# else:
# item['hexagram4'] = v.strip()
# yield item
# print(item)
# def __init__(self):
# con = pymysql.connect(host=settings['MYSQL_HOST'], user=settings['MYSQL_USER'], passwd=settings['MYSQL_PASS'], db=settings['MYSQL_DB'],charset='utf8')
# cur = con.cursor() # 创建数据库连接,定义连接指针
# con.close()
|
7,705 | 4a223cdd3c957af2f54e33c910ce70d2b5e6c963 | # 斐波那契数列(后一位=前两位之和)
# 又叫黄金分割数列,前/后 越来越接近0.618
# list1 = []
# for i in range(20):
# if i <= 1:
# list1.append(1)
# else:
# list1.append(list1[-2]+list1[-1])
# print(list1)
import random
dict1 = {'A': [], 'B': [], 'C': [], 'D': []}
for i in range(20):
re = random.randint(1, 100)
if re >= 90:
dict1['A'].append(re)
elif re >= 80:
dict1['B'].append(re)
print(dict1) |
7,706 | 8be6031caad26ec6b6b99b8d8b8f80d16ad243d4 | from django.db.models import Count
from django.utils.text import slugify
from rest_framework.serializers import ModelSerializer, SerializerMethodField, Serializer
from rest_framework import serializers
from category.models import Category
from product.models import Product, GalleryProduct, Stone, Color, Size
from category.api.serializers import CategorySerializer
from extensions.calculations import calculating_gold_jewelry
from taggit_serializer.serializers import (
TagListSerializerField,
TaggitSerializer
)
def _create_custom_uuid():
max_id = 1
ex_last_product = Product.objects.last()
if ex_last_product:
max_id = ex_last_product.id
my_id = '{}{:07d}'.format('EUA', max_id if max_id is not None else 1)
return my_id
class ColorSerializer(ModelSerializer):
class Meta:
model = Color
fields = ['id', 'color']
class SizeSerializer(ModelSerializer):
class Meta:
model = Size
fields = ['id', 'size']
class StoneSerilizer(ModelSerializer):
class Meta:
model = Stone
fields = '__all__'
class ImageCreateProductSerializer(serializers.Serializer):
class Meta:
model = GalleryProduct
fields = ['image']
class ProductListSerializer(serializers.ModelSerializer):
gallery = serializers.SerializerMethodField()
category = serializers.SerializerMethodField()
price = serializers.SerializerMethodField()
class Meta:
model = Product
fields = [
'id',
'rating',
'title',
'slug',
'image',
'gallery',
'category',
'price'
]
def get_category(self, obj):
result = obj.category
return CategorySerializer(instance=result).data
def get_gallery(self, obj):
result = GalleryProduct.objects.filter(product_id=obj)
return ImageProductSerializer(instance=result, many=True).data
def get_price(self, obj):
return obj.price
class ProductsOrderCartSerializer(ModelSerializer):
class Meta:
model = Product
fields = ['id', 'title', 'slug', 'image']
class ProductDetailSerializer(TaggitSerializer, ModelSerializer):
tags = TagListSerializerField()
gallery = SerializerMethodField()
color = SerializerMethodField()
size = SerializerMethodField()
category = SerializerMethodField()
price = serializers.SerializerMethodField()
class Meta:
model = Product
exclude = [
'site_rate',
'is_rate_fixed',
'provider_gold_rate',
'provider_diamond_price',
]
def get_color(self, obj):
result = obj.color.all()
return ColorSerializer(instance=result, many=True).data
def get_size(self, obj):
result = obj.size.all()
return SizeSerializer(instance=result, many=True).data
def get_category(self, obj):
return CategorySerializer(instance=obj.category).data
def get_gallery(self, obj):
result = GalleryProduct.objects.filter(product_id=obj)
return ImageProductSerializer(instance=result, many=True).data
def get_price(self, obj):
return obj.price
class ImageProductSerializer(ModelSerializer):
class Meta:
model = GalleryProduct
fields = ['image', 'product']
|
7,707 | 8aeb7786984f27fabdcaffa54f52eb868c277fdb | # Global version information
__version__ = "0.6.1"
|
7,708 | 834469f9c6e065fb29dfe1fd3e421fbb752f5094 | from datetime import datetime
from app import db
class Vocabulary(db.Model):
_id = db.Column(db.Integer, primary_key=True)
language = db.Column(db.String(64), index=True)
word = db.Column(db.String(64), index=True, unique=True)
date = db.Column(db.DateTime, index=True, default=datetime.utcnow)
|
7,709 | b670655e3a8e88b97eed35e187b01d6524a16af3 | #!/usr/bin/python
# -*- coding: utf-8 -*-
plugins_list = []
class PluginType(type):
def __init__(cls, name, bases, attrs):
super(PluginType, cls).__init__(name, bases, attrs)
# registrar el plugin en la lista
if not cls in plugins_list:
plugins_list.append(cls)
class PluginBase(object):
'''
Clase base para todos los plugins
'''
__metaclass__ = PluginType
pass
|
7,710 | 2783fc24806c323ab4ac44fbac55eef73142ab80 | from django.db import models
# Create your models here.
from django.db import models
# Create your models here.
class Project(models.Model):
project_id = models.IntegerField(primary_key=True)
project_name = models.CharField(max_length=50)
project_description = models.CharField(max_length=200, blank=True, null=True)
project_address = models.CharField(max_length=100, blank=True, null=True)
project_city = models.CharField(max_length=50, blank=True, null=True)
project_pincode = models.CharField(max_length=10, blank=True, null=True)
project_status = models.CharField(max_length=10, blank=True, null=True)
class Meta:
db_table = 'project'
managed = True
class Facility(models.Model):
facility_id = models.IntegerField(primary_key=True)
facility_name = models.CharField(max_length=50)
facility_description = models.CharField(max_length=100, blank=True, null=True)
project = models.ForeignKey('Project', models.DO_NOTHING, null=True)
locked_for_edit = models.BooleanField(blank=True, null=True)
class Meta:
db_table = 'facility'
managed = True
class Zone(models.Model):
zone_id = models.AutoField(primary_key=True)
zone_name = models.CharField(max_length=20)
zone_description = models.CharField(max_length=100, blank=True, null=True)
facility = models.ForeignKey(Facility, models.DO_NOTHING, null=True)
class Meta:
db_table = 'zone'
managed = True
|
7,711 | c00db6d6fd903236de37ccc029ed30fd46dccdef | import numpy as np
import pandas as pd
import matplotlib as plt
import scipy.linalg
from distance_metrics import *
import time
import random
RANDOM_SEED = 42
np.random.seed(RANDOM_SEED)
random.seed(RANDOM_SEED)
################################################################
# PCA #
################################################################
def project(X, U, p = None):
if p == None: p = X.shape[1]
Z = np.matmul(X, U)
Z[:, p:] = np.mean(Z[:, p:], axis = 0)
X2 = np.matmul(Z, U.transpose())
return (Z, X2)
def PCA(X, threshold = 0.9):
X2 = X - np.mean(X, axis = 0)
S = np.matmul(X2.transpose(), X2) #Covariance Matrix
[W,U] = np.linalg.eigh(S) #eigen vectors in columns
W = np.flip(W, axis = 0)
U = np.flip(U, axis = 1)
validity = np.cumsum(W)/np.sum(W) #represents validity of choosing first i+1 eigenvalues
p = np.argmax(validity>=threshold) + 1
if p<=1 or threshold == 1: p = X.shape[1]
[Z, X3] = project(X, U, p)
#Projection, P, Reconstruction, EigenVectors, EigenValues
return [Z, p, X3, U, W]
################################################################
# Whitening #
################################################################
def whiteningTransform(X, W, U):
L = np.diag(W)
Z = np.transpose(np.matmul(np.matmul(scipy.linalg.fractional_matrix_power(L, -0.5), U.transpose()), (X - np.mean(X, axis = 0)).transpose()))
return Z
|
7,712 | 1f45bdbfdd29a0b832ebac7e4ff91df1203ae158 | #容器序列
#list tuple collections.deque dict
#扁平序列
#str
#可变序列
#list collections.deque dict
#不变序列
# tuple str |
7,713 | 69d48bc9ecd0f003d7b22c6fbaa532d28137b38e | """
Escreva um programa que leia as coordenadas x e y de um ponto R² e calcule
sua distância da origem(0,0).
"""
import math
print("Origem = 0")
x = int(input("X: "))
y = int(input("Y: "))
aux = (x*x)+(y*y)
dist = math.sqrt(aux)
print("Distância da origem {:.2f}".format(dist)) |
7,714 | 00f8a56b160cab22bf73c0d2397eb2c411e8c966 | import sys, getopt
sys.path.append('.')
import RTIMU
import os.path
import time
import math
import encoders
import motors
#right is master, left is slave
master_power = .6
slave_power = -.6
right_num_revs = 0
left_num_revs = 0
kp = .5
encoders.init()
motors.init()
en_left, en_right = encoders.read()
SETTINGS_FILE = "RTIMULib"
def adjustMotorPowers():
global slave_power
global en_left
global en_right
global kp
error = en_right + en_left
slave_power -= error/kp
encoders.clear()
time.sleep(.1)
def readEncoder():
global en_left
global en_right
global right_num_revs
global left_num_revs
new_en_left, new_en_right = encoders.read()
if(new_en_right != en_right or new_en_left != en_left):
en_right = new_en_right
right_num_revs += en_right
en_left = new_en_left
left_num_revs += en_left
print("Using settings file " + SETTINGS_FILE + ".ini")
if not os.path.exists(SETTINGS_FILE + ".ini"):
print("Settings file does not exist, will be created")
s = RTIMU.Settings(SETTINGS_FILE)
imu = RTIMU.RTIMU(s)
print("IMU Name: " + imu.IMUName())
if (not imu.IMUInit()):
print("IMU Init Failed")
sys.exit(1)
else:
print("IMU Init Succeeded")
# this is a good time to set any fusion parameters
imu.setSlerpPower(0.02)
imu.setGyroEnable(True)
imu.setAccelEnable(True)
imu.setCompassEnable(True)
poll_interval = imu.IMUGetPollInterval()
print("Recommended Poll Interval: %dmS\n" % poll_interval)
old_x = 0
old_y = 0
old_z = 0
while True:
if imu.IMURead():
# x, y, z = imu.getFusionData()
# print("%f %f %f" % (x,y,z))
data = imu.getIMUData()
fusionPose = data["fusionPose"]
x = math.degrees(fusionPose[0])
y = math.degrees(fusionPose[1])
z = math.degrees(fusionPose[2])
if(abs(x-old_x)>0.3 or abs(y-old_y)>0.3 or abs(z-old_z)>0.3):
print("r: %f p: %f y: %f" % (math.degrees(fusionPose[0]),math.degrees(fusionPose[1]), math.degrees(fusionPose[2])))
old_x = x
old_y = y
old_z = z
time.sleep(poll_interval*1.0/1000.0)
try:
print(str(right_num_revs)+" "+str(left_num_revs))
motors.speed(slave_power, master_power)
adjustMotorPowers()
readEncoder()
except KeyboardInterrupt:
break
motors.cleanup()
|
7,715 | 655e6531dc21dcdf8fa827184444cee483492b81 | __author__ = "Sarah Hazell Pickering (sarah.pickering@anu.edu.au)"
__date__ = "2018-11-15"
""" QC and Trimming with fastp
Trimming and QC with fastp.
Then subsampling of reads via seqtk.
Now starts with a sample/sample.file structure.
Number of reads to sample is can be supplied via pairs_to_sample
parameter of the sub_sample rule. Additional options can be passed
to fastp via the 'extra' parameter of the fastp rule.
Now can do multiple subsampling runs from the same script
"""
import random
#configfile: "config.json"
RAW = config["raw_dir"]
NOTATION = config["pair_notation"]
QC = "output_data/qc/"
SUB = QC + "subsamples/"
NO_READS = 50000
FASTP_PARAMS = ""
rule all:
version:
"3.0"
input:
expand(SUB + str(NO_READS) + "/{sample}_{notation}{pair}_subsample.fastq.gz",
sample = config["samples"],
notation = NOTATION,
pair = ["1", "2"])
rule fastp_simple_dir:
"""For use when all fastq files are in a single directory. """
version:
"3.5"
input:
r1_raw = RAW + "{sample}_{notation}1_001.fastq.gz",
r2_raw = RAW + "{sample}_{notation}2_001.fastq.gz"
params:
extra = FASTP_PARAMS,
html = QC + "{sample}.html",
json = QC + "{sample}.json"
#onda: "envs/fastp.yml"
output:
r1 = QC + "trimmed/{sample}_{notation}1_trim.fastq.gz",
r2 = QC + "trimmed/{sample}_{notation}2_trim.fastq.gz"
shell:
"fastp -i {input.r1_raw} -I {input.r2_raw} "
"-o {output.r1} -O {output.r2} "
"-h {params.html} -j {params.json} "
"{params.extra}"
rule fastp_twotier_dirs:
"""For use when fastq files are distributed between directories
of the same name. e.g. base_dir/{sample}/{sample}.fastq
"""
version:
"3.5"
params:
extra = FASTP_PARAMS,
html = QC + "{sample}.html",
json = QC + "{sample}.json"
input:
r1_raw = RAW + "{sample}/{sample}_{notation}1_001.fastq.gz",
r2_raw = RAW + "{sample}/{sample}_R2_001.fastq.gz"
#conda: "envs/fastp.yml"
output:
r1 = QC + "trimmed/{sample}_{notation}1_trim.fastq.gz",
r2 = QC + "trimmed/{sample}_{notation}2_trim.fastq.gz"
shell:
"fastp -i {input.r1_raw} -I {input.r2_raw} "
"-o {output.r1} -O {output.r2} "
"-h {params.html} -j {params.json} "
"{params.extra}"
rule sub_sample:
version:
"4.0"
input:
trim_reads1 = QC + "trimmed/{sample}_{notation}1_trim.fastq.gz",
trim_reads2 = QC + "trimmed/{sample}_{notation}2_trim.fastq.gz"
params:
pairs_to_sample = NO_READS
#conda: "envs/fastp.yml"
output:
subsample1 = SUB + str(NO_READS) + "/{sample}_{notation}1_subsample.fastq",
subsample2 = SUB + str(NO_READS) + "/{sample}_{notation}2_subsample.fastq"
run:
seed = random.randrange(10*len(config["samples"]))
shell("seqtk sample -s{seed} {input.trim_reads1} {params.pairs_to_sample} "
" > {output.subsample1} \n"
"seqtk sample -s{seed} {input.trim_reads2} {params.pairs_to_sample} "
" > {output.subsample2} ")
rule zipper:
input:
SUB + str(NO_READS) + "/{sample}_{notation}{pair}_subsample.fastq"
output:
SUB + str(NO_READS) + "/{sample}_{notation}{pair}_subsample.fastq.gz"
shell:
"gzip {input}"
|
7,716 | 2b7415d86f9157ae55228efdd61c9a9e9920bc5c | __author__ = 'fshaw'
import gzip
import hashlib
import os
import uuid
import json
import jsonpickle
from chunked_upload.models import ChunkedUpload
from chunked_upload.views import ChunkedUploadView, ChunkedUploadCompleteView
from django.conf import settings
from django.core import serializers
from django.core.files.base import ContentFile
from django.http import HttpResponse
from django.template.context_processors import csrf
from rest_framework.renderers import JSONRenderer
import web.apps.web_copo.schemas.utils.data_utils as d_utils
import web.apps.web_copo.utils.EnaUtils as u
from dal.broker_da import BrokerDA
from dal.copo_da import DataFile
from web.apps.web_copo.rest.models import CopoChunkedUpload
class CopoChunkedUploadCompleteView(ChunkedUploadCompleteView):
do_md5_check = False
def get_response_data(self, chunked_upload, request):
"""
Data for the response. Should return a dictionary-like object.
Called *only* if POST is successful.
"""
files = {'files': {}}
files['files']['name'] = chunked_upload.filename
files['files']['id'] = chunked_upload.id
files['files']['size'] = chunked_upload.offset / (1000 * 1000.0)
files['files']['url'] = ''
files['files']['thumbnailUrl'] = ''
files['files']['deleteUrl'] = ''
files['files']['deleteType'] = 'DELETE'
str = jsonpickle.encode(files)
return files
class CopoChunkedUploadView(ChunkedUploadView):
model = CopoChunkedUpload
'''
'''
class JSONResponse(HttpResponse):
"""
An HttpResponse that renders its content into JSON.
"""
def __init__(self, data, **kwargs):
content = JSONRenderer().render(data)
kwargs['content_type'] = 'application/json'
super(JSONResponse, self).__init__(content, **kwargs)
def receive_data_file(request):
# this method is called for writing smaller files (<= 260MB) to disk, larger files use the
# upload method in ChunkedUpload class
from django.utils import timezone
# need to make a chunked upload record to store deails of the file
if request.method == 'POST':
c = {}
f = request.FILES['file']
fname = f.__str__()
attrs = {'user': request.user, 'filename': fname, 'completed_on': timezone.now(), 'offset': f.size}
chunked_upload = ChunkedUpload(**attrs)
# file starts empty
chunked_upload.file.save(name='', content=ContentFile(''), save=True)
path = chunked_upload.file
destination = open(os.path.join(settings.MEDIA_ROOT, path.file.name), 'wb+')
for chunk in f.chunks():
destination.write(chunk)
destination.close()
c.update(csrf(request))
# create output structure to pass back to jquery-upload
files = {'files': {}}
files['files']['name'] = f._name
files['files']['size'] = path.size / (1000 * 1000.0)
files['files']['id'] = chunked_upload.id
files['files']['url'] = ''
files['files']['thumbnailUrl'] = ''
files['files']['deleteUrl'] = ''
files['files']['deleteType'] = 'DELETE'
str = jsonpickle.encode(files)
return HttpResponse(str, content_type='json')
def resume_chunked(request):
file_name = request.GET.get('filename')
user_id = request.user.id
# retrieve incomplete file for user with this name
d = ChunkedUpload.objects.filter(completed_on__isnull=True, user_id=user_id, filename=file_name).order_by(
'-offset')[:1]
if d:
out = serializers.serialize('json', d)
return HttpResponse(jsonpickle.encode(out))
else:
return HttpResponse(jsonpickle.encode(''))
def get_partial_uploads(request):
user_id = request.user.id
d = ChunkedUpload.objects.filter(completed_on__isnull=True, user_id=user_id).order_by('created_on')
if d:
out = serializers.serialize('json', d)
return HttpResponse(jsonpickle.encode(out))
else:
return HttpResponse(jsonpickle.encode(''))
def hash_upload(request):
# utility method to create an md5 hash of a given file path
# open uploaded file
file_id = request.GET['file_id']
print('hash started ' + file_id)
file_obj = ChunkedUpload.objects.get(pk=file_id)
file_name = os.path.join(settings.MEDIA_ROOT, file_obj.file.name)
# now hash opened file
md5 = hashlib.md5()
with open(file_name, 'rb') as f:
for chunk in iter(lambda: f.read(8192), b''):
md5.update(chunk)
file_obj.hash = md5.hexdigest()
file_obj.save()
output_dict = {'output_hash': md5.hexdigest(), 'file_id': file_id}
# update record in mongo
record_object = DataFile().get_by_file_id(file_id)
auto_fields = dict()
auto_fields[DataFile().get_qualified_field("file_hash")] = file_obj.hash
profile_id = request.session['profile_id']
component = "datafile"
BrokerDA(target_id=str(record_object.get("_id", str())),
component=component,
auto_fields=auto_fields
).do_save_edit()
out = json.dumps(output_dict)
print('hash complete ' + file_id)
return HttpResponse(out, content_type='json')
def inspect_file(request):
# utility method to examine a file and return meta-data to the frontend
output_dict = {'file_type': 'unknown', 'do_compress': False}
# get reference to file
file_id = request.GET['file_id']
chunked_upload = ChunkedUpload.objects.get(id=int(file_id))
file_name = os.path.join(settings.MEDIA_ROOT, chunked_upload.file.name)
# size threshold to determine if a file should be compressed
zip_threshold = 200000000 # size in bytes
# check if file is compressed
is_zipped = u.is_gzipped(file_name)
if chunked_upload.offset >= zip_threshold and not is_zipped:
output_dict['do_compress'] = True
# check for file type
if u.is_pdf_file(file_name):
output_dict['file_type'] = 'pdf'
else:
try:
if u.is_fastq_file(file_name):
output_dict['file_type'] = 'fastq'
if not is_zipped:
output_dict['do_compress'] = True
elif u.is_sam_file(file_name):
output_dict['file_type'] = 'sam'
if not is_zipped:
output_dict['do_compress'] = False
elif u.is_bam_file(file_name):
output_dict['file_type'] = 'bam'
if not is_zipped:
output_dict['do_compress'] = False
else: # make file type same as extension
output_dict['file_type'] = chunked_upload.filename.rsplit('.')[1]
except:
output_dict['file_type'] = 'unknown'
# add datafile schema
chunked_upload.type = output_dict['file_type']
chunked_upload.save()
# ...and obtain the inserted record
profile_id = request.session['profile_id']
component = "datafile"
auto_fields = dict()
auto_fields[DataFile().get_qualified_field("file_id")] = file_id
auto_fields[DataFile().get_qualified_field("file_type")] = output_dict['file_type']
auto_fields[DataFile().get_qualified_field("file_location")] = file_name
auto_fields[DataFile().get_qualified_field("file_size")] = u.filesize_toString(chunked_upload.offset)
auto_fields[DataFile().get_qualified_field("name")] = chunked_upload.filename
# get default type from schema
type = [f for f in d_utils.get_copo_schema(component) if f.get("id").split(".")[-1] == "type"]
if type:
type = type[0]["default_value"]
auto_fields[DataFile().get_qualified_field("type")] = type
df = BrokerDA(context=dict(),
profile_id=profile_id,
component=component,
auto_fields=auto_fields,
visualize="last_record"
).do_save_edit().get("record_object", dict())
out = jsonpickle.encode(output_dict)
return HttpResponse(out, content_type='json')
def zip_file(request):
# need to get a reference to the file to zip
file_id = request.GET['file_id']
print("zip started " + file_id)
file_obj = ChunkedUpload.objects.get(pk=file_id)
# get the name of the file to zip and change its suffix to .gz
output_file_location = os.path.join(settings.MEDIA_ROOT, file_obj.file.name)
output_file_name = file_obj.filename + '.gz'
try:
# open the file as gzip acrchive...set compression level
temp_name = os.path.join(settings.MEDIA_ROOT, str(uuid.uuid4()) + '.tmp')
myzip = gzip.open(temp_name, 'wb', compresslevel=1)
src = open(output_file_location, 'r')
# write input file to gzip archive in n byte chunks
n = 100000000
for chunk in iter(lambda: src.read(n), ''):
myzip.write(bytes(chunk, 'UTF-8'))
finally:
myzip.close()
src.close()
print('zip complete ' + file_id)
# now need to delete the old file and update the file record with the new file
new_file_name = output_file_location + '.gz'
os.rename(temp_name, new_file_name)
os.remove(output_file_location)
# calculate new file size
stats = os.stat(new_file_name)
new_file_size = stats.st_size / 1000 / 1000
# update filename
file_obj.filename = output_file_name
file_obj.file.name = new_file_name
# update file size
file_obj.offset = stats.st_size
file_obj.save()
out = {'zipped': True, 'file_name': output_file_name, 'file_size': new_file_size}
# update record in mongo
record_object = DataFile().get_by_file_id(file_id)
auto_fields = dict()
auto_fields[DataFile().get_qualified_field("file_size")] = u.filesize_toString(file_obj.offset)
auto_fields[DataFile().get_qualified_field("name")] = output_file_name
auto_fields[DataFile().get_qualified_field("file_location")] = new_file_name
profile_id = request.session['profile_id']
component = "datafile"
BrokerDA(target_id=str(record_object.get("_id", str())),
component=component,
auto_fields=auto_fields
).do_save_edit()
out = jsonpickle.encode(out)
return HttpResponse(out, content_type='json')
|
7,717 | 95845aeb47e0d2c579739767ece35f4134564d98 | import json
import math
import rospy
import sys
import RPi.GPIO as GPIO
from std_msgs.msg import Float32
from geometry_msgs.msg import Point32
from time import sleep
#pulse width of difference rotations
d_45 = 1.0
d_90 = 1.5
d_180 = 2.5
frequency = 50.0
t_per_cycle = (1.0 / frequency) * 1000.0
#convert to duty cycles
duty_45 = (d_45 / t_per_cycle) * 100.0
duty_90 = (d_90 / t_per_cycle) * 100.0
duty_180 = (d_180 / t_per_cycle) * 100.0
#gear spec
radius = 2.25
cir = 2.0 * radius * math.pi
d = cir / 20.0
cm_theta = 18.0 / d
z_radius = 1.0
z_cir = 2.0 * z_radius * math.pi
z_d = z_cir / 10.0
z_cm_theta = 36.0 / d
class Servo_node:
def __init__(self):
rospy.init_node('servo_node', anonymous=False)
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
# Setting up for pin 12. Make sure to adjust for your own needs
motor_x = 13
motor_y = 12
motor_z = 20
GPIO.setup(motor_x, GPIO.OUT)
GPIO.setup(motor_y, GPIO.OUT)
GPIO.setup(motor_z, GPIO.OUT)
# 0.75-2.75
self.pwm_x = GPIO.PWM(motor_x, frequency)
# 2-3
self.pwm_y = GPIO.PWM(motor_y, frequency)
# 0.8-1.8
self.pwm_z = GPIO.PWM(motor_z, frequency)
#set start position to (0,0)
self.pwm_z.start(duty_45)
sleep(0.5)
self.pwm_z.ChangeDutyCycle(0)
self.pwm_x.start(duty_180)
sleep(0.5)
self.pwm_x.ChangeDutyCycle(0)
self.pwm_y.start(duty_45)
sleep(0.5)
self.pwm_y.ChangeDutyCycle(0)
#topic takes angle as message
self.sub_x = rospy.Subscriber("/servo_ctrl/s1", Float32, self.set_servo_x_angle)
self.sub_y = rospy.Subscriber("/servo_ctrl/s2", Float32, self.set_servo_y_angle)
self.sub_z = rospy.Subscriber("/servo_ctrl/s3", Float32, self.set_servo_z_angle)
#topic for position commands
self.pos_sub = rospy.Subscriber("/servo_ctrl/pos", Point32, self.set_coordinate)
def set_servo_x_angle(self, msg):
rospy.loginfo("setting servo")
self.pwm_x.ChangeDutyCycle(self.saturate_input(msg.data))# Note tha this does not correspond to angle
sleep(1)
self.pwm_x.ChangeDutyCycle(0)
sleep(0.5)
def set_servo_y_angle(self, msg):
rospy.loginfo("setting servo")
self.pwm_y.ChangeDutyCycle(self.saturate_input(msg.data)) # Note tha this does not correspond to angle
sleep(1)
self.pwm_y.ChangeDutyCycle(0)
sleep(0.5)
def set_servo_z_angle(self, msg):
rospy.loginfo("setting servo")
self.pwm_z.ChangeDutyCycle(self.saturate_input(msg.data)) # Note tha this does not correspond to angle
sleep(1)
self.pwm_z.ChangeDutyCycle(0)
sleep(0.5)
def set_coordinate(self, msg):
#conversion between coordinate to motor angles
rospy.loginfo("setting position")
#correction for motors
#offset added to make sure the touch probe is at (0,0) initially
#may need to change depends on your motor
x_offset = 0
y_offset = -5
z_offset = 0
x = msg.x
y = msg.y
z = msg.z
z_pub = rospy.Publisher('servo_ctrl/s3', Float32, queue_size=10)
x_pub = rospy.Publisher('servo_ctrl/s1', Float32, queue_size=10)
y_pub = rospy.Publisher('servo_ctrl/s2', Float32, queue_size=10)
x_angle = 180 - x * cm_theta + x_offset
y_angle = 45 + y * cm_theta + y_offset
z_angle = 45 + (1.5 - z) * z_cm_theta + z_offset
if x == -1 or y == -1 or z == -1:
if x == -1:
self.pwm_x.ChangeDutyCycle(0)
else:
x_pub.publish(Float32(x_angle))
if y == -1:
self.pwm_y.ChangeDutyCycle(0)
else:
y_pub.publish(Float32(y_angle))
if z == -1:
self.pwm_z.ChangeDutyCycle(0)
else:
z_pub.publish(Float32(z_angle))
elif x >= 0 and x <= 2.5 and y >= 0 and y <= 4:
# z_pub.publish(Float32(45))
x_pub.publish(Float32(x_angle))
y_pub.publish(Float32(y_angle))
z_pub.publish(Float32(z_angle))
def saturate_input(self, angle):
#conversion from angle to duty cycles
print(angle)
pw_per_deg = (duty_180 - duty_90) / 90;
duty = pw_per_deg * (angle - 45) + duty_45
print(duty)
return max(min(duty,100),0)
def main_loop():
rate = rospy.Rate(10) # 10Hz
while not rospy.is_shutdown():
rate.sleep()
if __name__ == "__main__":
servo = Servo_node()
main_loop()
|
7,718 | cb6ed6422a5591f1de0a947f75ad080f250e8443 |
# read in file of customs declaration responses
declarations_file = open('day6_declarations.txt', 'r')
lines = declarations_file.readlines()
# initialise variables
group_responses = [] # temporary container for all responses of each group member
count_any_member_has_response = 0 # count for part 1
count_all_members_have_response = 0 # count for part 2
# loop over file
for line in lines:
# if have a blank line (or at end of file), means we have reached end of
# an group's info, so save declaration response info for current group
# and reset group_responses list
if line == '\n' or line == lines[-1]:
# case where at end of file, want to save that last line
if line == lines[-1]:
# remove newlines at end of lines and split by whitespace
line = line.strip()
group_responses.append(line)
#print(group_responses)
# PART 1
# for each group, count the number of questions to which ANYONE responded "yes"
# what is the sum of those counts?
# each group member has their responses as one element in group_responses
# so flatten this so each char of each group member now makes up one element
group_responses_flattened = [item for sublist in group_responses for item in sublist]
# there will be duplicates in the flattened array
# first part wants the total number of UNIQUE elements so convert to set
group_responses_set = set(group_responses_flattened)
#print(group_responses_set)
# count number of unique elements in the set and add this to
# the count_any_member_has_response var which keeps track of the total count
# for all groups
count_any_member_has_response += len(group_responses_set)
# PART 2
# for each group, count the number of questions to which EVERYONE answered "yes"
# what is the sum of those counts?
# easiest way is to look at first group member
# how many of the characters for the first group member
# appear for ALL the other group members
for char in group_responses[0]:
char_in_all_members = True
# see if char exists for all other group members - if not then set
# char_in_all_members to False
for item in group_responses:
if char not in item:
char_in_all_members = False
# if char appears for all members, add one to
# count_all_members_have_response var which keeps track of the total count
# for all groups
if char_in_all_members == True:
#print('char', char, 'exists for all members of this group')
count_all_members_have_response += 1
# finished processing this group so reset the temp var group_responses
# so it can be filled again for the next group
group_responses = []
else:
# we are still in the same group so continue adding
# group member responses to group_responses list
line = line.strip()
group_responses.append(line)
# print out final counts for parts 1 and 2
print('TOTAL COUNT FOR ANY MEMBER HAS RESPONSE =', count_any_member_has_response)
print('TOTAL COUNT FOR ALL MEMBER HAVE RESPONSES =', count_all_members_have_response)
|
7,719 | 25f3c9f48b779d2aec260d529529156ff3c508ca | '''
文件读写的步骤
1.打开文件
2.处理数据
3.关闭文件
1.open函数:
fileobj = open(filename, mode)
fileobj是open()函数返回的文件对象
mode第一个字母指明文件类型和操作的字符串,第二个字母是文件类型:
t(可省略)文本类型,b二进制类型。
文件打开模式:r只读(默认),w覆盖写(不存在则新创建)
a追加模式(不存在则创建)
2.read(size):从文件读取长度为size的字符串,若未给定或为负则读取所有内容
3.readline():读取整行返回字符串
4.readlines():读取所有行并返回列表
5.write(s):把字符串s的内容写入文件
'''
'''
#复制一个文件
fileobj1 = open("test1.txt", "r")
fileobj2 = open("test2.txt", "w")
s = fileobj1.read()
fileobj2.write(s)
fileobj1.close()
fileobj2.close()
'''
#多行文件读写
fileobj3 = open("lines.txt", "r")
for line in fileobj3.readlines():
print(line)
fileobj3.close() |
7,720 | 29cae66fdca65020a82212e5eabbc61eb900e543 | # test_LeapYear.py
# By Alex Graalum
import unittest
import LeapYear
class test_leapyear(unittest.TestCase):
def test_four(self):
self.assertEqual(LeapYear.leapyear(2012), True)
def test_hundred(self):
self.assertEqual(LeapYear.leapyear(2100), False)
def test_fourhundred(self):
self.assertEqual(LeapYear.leapyear(2000), True)
def test_normal(self):
self.assertEqual(LeapYear.leapyear(2002), False)
if __name__ == '__main__':
unittest.main()
|
7,721 | d1077107a5cd3a9f489f74b030a698b0521841f3 | # 파이썬 딕셔너리
# 범용적으로 가장 많이 사용되는 타입
# key와 value의 대용관계 type
# 순서 X, key 중복 X, 수정 O, 삭제 O
# {}
# class란 실세계(오브젝트)의 명사,동사적인 특징들을 추상화시키는 것, 즉 프로그램 내 인스턴트(객체)를 추출하는 템플릿이다
# class는 틀이고 인스턴스는 틀에의해 만들어지는 결과물.하여 instance.class()로 표현
#temp = {}
#print(type(temp))
dic01 = {'name' : 'seop',
'age' : 48,
'address' : 'seoul',
'birth' : '730919',
'gender' : True}
#print('dic - ',dic01,type(dic01))
#print(dir(dic01)) #iter가 있으므로 순환반복가능
# key 유무를 판단하기 위해서
#print('name' in dic01)
# 요소를 추가하는 방법
#dic01['marriage'] = False
#print(dic01)
#dic01['marriage'] = True
#print(dic01)
# 데이터 확인
#print("데이터 확인 - ",dic01['birth'])
#개발자성향에 따라 데이터를 리스트하여 관리하기도함, 각각의 값들은 튜플 이용
dic02 = dict( [('name' , 'seop'),
('age' , 48),
('address' , 'seoul'),
('birth','730919'),
('gender' , True)] )
#print("tuple을 이용한 dict 생성 -",dic02)
#변수에다 값을 할당하는 방법
dic03 = dict( name = 'seop',
age = 48,
address = 'seoul',
birth = '730919',
gender = True)
#출력
#print('dic03 -',dic03['Name']) #키값을 이용, 대소문자 Name때문에 오류남
#print('dic03 -',dic03.get('Name')) #함수를 이용, 해당하는 밸류를 가져오는 것이라 Name에 담긴 값들이 없어서 None을 출력
#print('len - ', len(dic03))
# dict_keys(키), dict_values(값), dict_items(키와값)
#print('dict_keys -',list(dic03.keys())) #각각의 키들은 리스트, 루프를 돌려서 값들을 꺼내올 수도 있다, 리스트화 시킬수도 있음
#print('dict_values -', list(dic03.values())) #밸류도 키와 마찬가지로 각각의 값 리스트
#print('dict_items -',list(dic03.items()))
# for key in dic03.keys() :
# print("{0},{1}".format(key,dic03[key]))
# print("key : {0}, value : {1}".format(key,dic03.get(key)))
# for value in dic03.values() :
# print(value)
# 튜플 패킹 & 언패킹
#t = ('foo','bar','baz','qux') ##괄호형 쳐주고 선언하는 것,패킹
#print(type(t))
#(x1,x2,x3,x4) = t ##다른변수에 담을때 언패킹해서 담아준다(괄호형은 보기편하게묶은것), 언패킹할때 튜플에 있는 값들의 개수가 담을 변수의 개수에 맞게 선언이 되어있어야함
#(x1,x2,x3,x4) = ('foo','bar','baz','qux')
#print(x1,x2,x3,x4)
a, *b, c = (0,1,2,3,4,5) #언패킹할때 개수가 안맞을때 *를 사용하여 처리할 수도 있음, 보통 *이 하나만 나오는 경우가 많음
#print(a)
#print(b)
#print(c)
#for (key , value) in dic03.items() :
# print("key : {0}, value : {1}".format(key,value))
# 삭제 pop(), del
#del dic03['gender']
#print(dic03)
#print('pop -', dic03.pop('birth'))
#print('dic03 - ',dic03)
#dic03.clear()
#print('dic03 - ', dic03) |
7,722 | 138abb40fda0f19b4a74a294d5cd0dd326dc59ce | from getMerriamWebster import searchMerriamWebster
from searchWikipedia import searchWikipedia
from synonyms import searchSynonyms
class Scraping:
def __init__(self, clues, answers, gridIndex):
self.clues = clues
self.domains = {"across": {}, "down":{}}
self.answers = answers
self.gridIndex = gridIndex
def setDomains(self):
for down in self.clues["down"]:
self.domains["down"][down] = self.search(self.clues["down"][down])
for across in self.clues["across"]:
self.domains["across"][across] = self.search(self.clues["across"][across])
#======================== CHEAT =============================
#self.cheat()
def getClueList(self, clue):
clueList = [clue]
return clueList
def search(self, clue):
domain = set()
wiki_set = set()
synonym_set = set()
toSearch = clue
"""
print("Google search for:", toSearch)
try:
domain = domain + self.getGoogle(toSearch)
except:
print("An exception occurred")
"""
print("Wikipedia search for:", toSearch)
try:
wiki_set = wiki_set | self.getWiki(toSearch)
except:
print("An exception occurred")
print("Synonym search from Datamuse and Merriam-Webster for:", toSearch)
try:
synonym_set = synonym_set | self.getSynonyms(toSearch)
except:
print("An exception occurred")
"""
print("Merriam Webster search for:", toSearch)
try:
merriam_set = merriam_set | self.getMerriam(toSearch)
except:
print("An exception occurred")
"""
domain = domain.union(wiki_set, synonym_set)
return ' '.join(str(e) for e in domain) #''.join(str(e) for e in words)
def getGoogle(self, toSearch):
return "toSearch"
def getWiki(self, toSearch):
return searchWikipedia(toSearch)
def getMerriam(self,toSearch):
return searchMerriamWebster(toSearch)
def getSynonyms(self, toSearch):
return searchSynonyms(toSearch, self.clues["across"], self.clues["down"])
def cheat(self):
for across in self.clues["across"]:
for row in range(0,5):
for col in range(0,5):
if self.gridIndex[row][col] == across:
answer = ""
for colIn in range(0,5):
if self.answers[row][colIn] != "-":
answer = answer + self.answers[row][colIn]
self.domains["across"][across] = self.domains["across"][across] + " " + answer
#print(answer)
for down in self.clues["down"]:
for row in range(0,5):
for col in range(0,5):
if self.gridIndex[row][col] == down:
answer = ""
for rowIn in range(0,5):
if self.answers[rowIn][col] != "-":
answer = answer + self.answers[rowIn][col]
self.domains["down"][down] = self.domains["down"][down] + " " + answer
#print(answer)
"""
scraping = Scraping()
scraping.setDomains()
print(scraping.domains)
""" |
7,723 | 3375bc94d214b0b1c67986d35b0587714dd63bcd | # -*- coding: utf-8 -*-
import os
import sys, getopt
import paho.mqtt.client as mqtt
import random
import _thread
import time
import json
HOST = '0.0.0.0'
PORT = 9090
# gb_freq = 0
CONFIG_PATH = 'config/config.cfg'
ITEMS_PATH = 'config/items.cfg'
MILISECOND = 0.001
class Item(object):
def __init__(self, string):
self.convert_string_to_item(string)
def convert_string_to_item(self, string):
# sensor_name, topic_in,topic_out,frequent
tokens = str(string).split(',')
self._platform_type = tokens[0]
self._sensor_name = tokens[1]
self._topic = tokens[2]
self._frequent = int(tokens[3])
def get_sensor_name(self):
return self._sensor_name
def get_topic(self):
return self._topic
def get_frequent(self):
return self._frequent
def increase_frequent(self):
self._frequent += 10
print(self._frequent)
return self._frequent
class SimulatorEngine(object):
_bStop = 1
def __init__(self):
# read config
items = [line.rstrip('\n') for line in open(CONFIG_PATH)]
self._ip_broker = items[0]
self._port_broker = items[1]
self._client_name = items[2]
self._num_of_sensor = 0
if items[3] and items[3] == 'True':
self._is_increase_freq = True
else:
self._is_increase_freq = False
if items[4] and items[4] == 'True':
self._is_increase_instance = True
else:
self._is_increase_instance = False
self.test_time = float(items[5])
items = [Item(string=line.rstrip('\n')) for line in open(ITEMS_PATH)]
self._items = items
self._mqttc = mqtt.Client(self._client_name)
self._mqttc.connect(self._ip_broker, int(self._port_broker))
# hostname = socket.gethostname()
def send_data(self, item):
start_time = time.time()
time_data_change_period = random.randint(60, 3600)
time_data_change = time.time()
data_value = random.randint(0, 100)
print('Change data value. Period {} Value {}'.format(time_data_change_period, data_value))
while 1:
next_time = time.time()
if next_time - time_data_change >= time_data_change_period:
time_data_change = next_time
time_data_change_period = random.randint(60, 3600)
data_value = random.randint(0, 100)
print('Change data value. Period {} Value {}'.format(time_data_change_period, data_value))
if item._platform_type == 'onem2m':
# message = data_value
data = {}
data['value'] = str(data_value)
data['timestamp'] = "{0:.3f}".format(time.time())
data['num_of_sensor'] = str(self._num_of_sensor)
message = json.dumps(data)
else:
message = '''
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<obj>
<int val="{value}" name="data"/>
</obj>
'''.format(value=data_value)
print(message)
self._mqttc.publish(topic=item.get_topic(), payload=message)
time.sleep(60 / item.get_frequent())
print('Topic {} -- Data {}'.format(item.get_topic(), data_value))
if self._is_increase_freq:
if next_time - start_time >= 3600:
start_time = next_time
item.increase_frequent()
# if self._is_increase_instance:
# if next_time - start_time >= 3600:
# start_time = next_time
# item.increase_frequent()
def register_sensor_with_ordinator(self):
os.system(
'sensor_detail="$(/bin/hostname -i),$(hostname)" && curl -F "sensor_detail=${sensor_detail}" -F "defined_file=@openhab/demo.items" ${CO_ORDINATOR_DOMAIN}/sensor/define')
def execute(self, num_of_item_start):
try:
for item in self._items[num_of_item_start:num_of_item_start+5]:
_thread.start_new_thread(self.send_data, (item,))
# print(item.get_topic())
except Exception as e:
print(e)
#
# while self._bStop:
# time.sleep(1)
@property
def is_increase_instance(self):
return self._is_increase_instance
@property
def items(self):
return self._items
def main(argv):
engine = SimulatorEngine()
start_time = time.time()
item_start = 0
engine._num_of_sensor = 5
engine.execute(item_start)
while 1:
if item_start+10 <= len(engine.items):
next_time = time.time()
if engine.is_increase_instance:
if next_time - start_time >= engine.test_time:
start_time = next_time
item_start += 5
engine._num_of_sensor = item_start + 5
engine.execute(item_start)
if __name__ == '__main__':
main(sys.argv[1:])
|
7,724 | 1eab2ddda6fdd71db372e978caa6e7d24c7fe78e | """
Написать программу, которая принимает строку
и выводит строку без пробелов и ее длину.
Для удаления пробелов реализовать доп функцию.
""" |
7,725 | 564c613491b0d1797b216a0bd425690e9fae12bc | import json
from asgiref.sync import async_to_sync
from daphne_API.diversifier import activate_diversifier
from daphne_API.models import Design
def send_archs_back(channel_layer, channel_name, archs):
async_to_sync(channel_layer.send)(channel_name,
{
'type': 'ga.new_archs',
'archs': archs
})
def send_archs_from_queue_to_main_dataset(context):
background_queue_qs = Design.objects.filter(activecontext_id__exact=context.eosscontext.activecontext.id)
arch_list = []
for design in background_queue_qs.all():
design.activecontext = None
design.eosscontext = context.eosscontext
design.save()
context.eosscontext.added_archs_count += 1
context.eosscontext.save()
arch_list.append({
'id': design.id,
'inputs': json.loads(design.inputs),
'outputs': json.loads(design.outputs),
})
if context.eosscontext.added_archs_count >= 5:
context.eosscontext.added_archs_count = 0
context.eosscontext.save()
activate_diversifier(context.eosscontext)
return arch_list |
7,726 | caa92eb5582135f60a6034cb83d364501361d00e | #!/usr/bin/python
# -*- coding: utf-8 -*-
# you can use print for debugging purposes, e.g.
# print "this is a debug message"
def solution(A):
N = len (A)
#min_avg = min( (A[0] + A[1]) / 2, (A[0] + A[1] + A[2]) / 3)
min_avg = (A[0] + A[1]) / 2.0
min_idx = 0
now_avg = 0.0
for i in xrange(1,N-1):
now_avg = (A[i] + A[i+1]) / 2.0
if now_avg < min_avg:
min_avg = now_avg
min_idx = i
if N > 2:
for i in xrange(N-2):
now_avg = (A[i] + A[i+1] + A[i+2]) / 3.0
if now_avg < min_avg:
min_avg = now_avg
min_idx = i
return min_idx
"""
non-empty zero-indexed array A consisting of N integers is given. A pair of integers (P, Q), such that 0 ≤ P < Q < N, is called a slice of array A (notice that the slice contains at least two elements). The average of a slice (P, Q) is the sum of A[P] + A[P + 1] + ... + A[Q] divided by the length of the slice. To be precise, the average equals (A[P] + A[P + 1] + ... + A[Q]) / (Q − P + 1).
For example, array A such that:
A[0] = 4
A[1] = 2
A[2] = 2
A[3] = 5
A[4] = 1
A[5] = 5
A[6] = 8
contains the following example slices:
slice (1, 2), whose average is (2 + 2) / 2 = 2;
slice (3, 4), whose average is (5 + 1) / 2 = 3;
slice (1, 4), whose average is (2 + 2 + 5 + 1) / 4 = 2.5.
The goal is to find the starting position of a slice whose average is minimal.
Write a function:
def solution(A)
that, given a non-empty zero-indexed array A consisting of N integers, returns the starting position of the slice with the minimal average. If there is more than one slice with a minimal average, you should return the smallest starting position of such a slice.
For example, given array A such that:
A[0] = 4
A[1] = 2
A[2] = 2
A[3] = 5
A[4] = 1
A[5] = 5
A[6] = 8
the function should return 1, as explained above.
Assume that:
N is an integer within the range [2..100,000];
each element of array A is an integer within the range [−10,000..10,000].
Complexity:
expected worst-case time complexity is O(N);
expected worst-case space complexity is O(N), beyond input storage (not counting the storage required for input arguments).
Elements of input arrays can be modified.
"""
"""
Analysis
Detected time complexity:
O(N)
collapse all
Example tests
▶
example
example test
✔
OK
1.
0.067 s
OK
collapse all
Correctness tests
▶
double_quadruple
two or four elements
✔
OK
1.
0.066 s
OK
2.
0.067 s
OK
3.
0.067 s
OK
4.
0.066 s
OK
▶
simple1
simple test, the best slice has length 3
✔
OK
1.
0.066 s
OK
2.
0.065 s
OK
▶
simple2
simple test, the best slice has length 3
✔
OK
1.
0.067 s
OK
▶
small_random
random, length = 100
✔
OK
1.
0.067 s
OK
▶
medium_range
increasing, decreasing (legth = ~100) and small functional
✔
OK
1.
0.066 s
OK
2.
0.067 s
OK
3.
0.067 s
OK
collapse all
Performance tests
▶
medium_random
random, N = ~700
✔
OK
1.
0.066 s
OK
▶
large_ones
numbers from -1 to 1, N = ~100,000
✔
OK
1.
0.168 s
OK
2.
0.143 s
OK
▶
large_random
random, N = ~100,000
✔
OK
1.
0.178 s
OK
▶
extreme_values
all maximal values, N = ~100,000
✔
OK
1.
0.184 s
OK
2.
0.181 s
OK
3.
0.175 s
OK
▶
large_sequence
many seqeneces, N = ~100,000
✔
OK
1.
0.168 s
OK
2.
0.142 s
OK
"""
|
7,727 | 16106250548ef60b475b009116cfeb7a25101637 | import torch.nn as nn
class RNN_instruction_encoder(nn.Module):
def __init__(self, vocab_size, word_vec_dim, hidden_size,
n_layers, input_dropout_p=0, dropout_p=0, bidirectional=True,
variable_lengths=True, word2vec=None, fix_embeddings=False,
rnn_cell='lstm'):
super(RNN_instruction_encoder, self).__init__()
assert rnn_cell in ['lstm', 'gru']
self.variable_lengths = variable_lengths
if word2vec is not None:
assert word2vec.size(0) == vocab_size
self.word_vec_dim = word2vec.size(1)
self.embedding = nn.Embedding(vocab_size, self.word_vec_dim)
self.embedding.weight = nn.Parameter(word2vec)
else:
self.word_vec_dim = word_vec_dim
self.embedding = nn.Embedding(vocab_size, word_vec_dim)
if fix_embeddings:
self.embedding.weight.requires_grad = False
if rnn_cell == 'lstm':
self.rnn = nn.LSTM(self.word_vec_dim, hidden_size, n_layers,
batch_first=True, bidirectional=bidirectional,
dropout=dropout_p)
elif rnn_cell == 'gru':
self.rnn = nn.GRU(self.word_vec_dim, hidden_size, n_layers,
batch_first=True, bidirectional=bidirectional,
dropout=dropout_p)
self.input_dropout = nn.Dropout(p=input_dropout_p)
self.bidirectional = bidirectional
self.n_layers = n_layers
self.hidden_size = hidden_size
self.rnn_cell = rnn_cell
def forward(self, input_seq, input_lengths=None):
embedded = self.embedding(input_seq)
embedded = self.input_dropout(embedded)
if self.variable_lengths:
embedded = nn.utils.rnn.pack_padded_sequence(embedded,
input_lengths,
batch_first=True,
enforce_sorted=False)
output, hidden = self.rnn(embedded)
if self.variable_lengths:
output, _ = nn.utils.rnn.pad_packed_sequence(output,
batch_first=True)
return output, hidden
|
7,728 | 98ddf0be2c38cd9b10dfa9cc09f53907b34c1287 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
config = {
'name': 'beziers',
'author': 'Simon Cozens',
'author_email': 'simon@simon-cozens.org',
'url': 'https://github.com/simoncozens/beziers.py',
'description': 'Bezier curve manipulation library',
'long_description': open('README.rst', 'r').read(),
'license': 'MIT',
'version': '0.5.0',
'install_requires': [
'pyclipper'
],
'classifiers': [
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Development Status :: 4 - Beta"
],
'packages': find_packages(),
}
if __name__ == '__main__':
setup(**config)
|
7,729 | 1bb953b665f48638691986e2fcae73b10a1c2ce0 | #!/usr/bin/env python
from __future__ import absolute_import, print_function, unicode_literals
import os
import sys
import unittest
# Allow interactive execution from CLI, cd tests; ./test_cli.py
if __package__ is None:
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from ksconf.conf.parser import PARSECONF_LOOSE, parse_conf
from ksconf.consts import EXIT_CODE_COMBINE_MARKER_MISSING, EXIT_CODE_SUCCESS
from tests.cli_helper import TestWorkDir, ksconf_cli
class CliKsconfCombineTestCase(unittest.TestCase):
def build_test01(self, twd):
twd.write_file("etc/apps/Splunk_TA_aws/default.d/10-upstream/props.conf", r"""
[aws:config]
SHOULD_LINEMERGE = false
TRUNCATE = 8388608
TIME_PREFIX = configurationItemCaptureTime"\s*:\s*"
TIME_FORMAT = %Y-%m-%dT%H:%M:%S.%3NZ
TZ = GMT
MAX_TIMESTAMP_LOOKAHEAD = 28
KV_MODE = json
ANNOTATE_PUNCT = false
FIELDALIAS-dest = resourceType AS dest
FIELDALIAS-object = resourceId AS object
FIELDALIAS-object_id = ARN AS object_id
EVAL-change_type = "configuration"
EVAL-dvc = "AWS Config"
EVAL-status="success"
LOOKUP-action= aws_config_action_lookup status AS configurationItemStatus OUTPUT action
LOOKUP-object_category = aws_config_object_category_lookup type AS resourceType OUTPUT object_category
# unify account ID field
FIELDALIAS-aws-account-id = awsAccountId as aws_account_id
FIELDALIAS-region-for-aws-config = awsRegion AS region
""")
twd.write_file("etc/apps/Splunk_TA_aws/default.d/10-upstream/data/ui/nav/default.xml", """
<nav search_view="search" color="#65A637">
<view name="Inputs" default="true" label="Inputs" />
<view name="Configuration" default="false" label="Configuration" />
<view name="search" default="false" label="Search" />
</nav>
""")
# In the future there will be a more efficient way to handle the global 'ANNOTATE_PUCT' scenario
twd.write_file("etc/apps/Splunk_TA_aws/default.d/20-corp/props.conf", """
[aws:config]
TZ = UTC
# Corp want's punct to be enabled globally
ANNOTATE_PUNCT = true
""")
twd.write_file("etc/apps/Splunk_TA_aws/default.d/60-dept/props.conf", """
[aws:config]
# Our config is bigger than yours!
TRUNCATE = 9999999
""")
twd.write_file("etc/apps/Splunk_TA_aws/default.d/10-upstream/alert_actions.conf", """
[aws_sns_modular_alert]
is_custom = 1
label = AWS SNS Alert
description = Publish search result to AWS SNS
payload_format = json
icon_path = appIcon.png
""")
twd.write_file("etc/apps/Splunk_TA_aws/default.d/60-dept/alert_actions.conf", """
[aws_sns_modular_alert]
param.account = DeptAwsAccount
""")
twd.write_file("etc/apps/Splunk_TA_aws/default.d/60-dept/data/ui/nav/default.xml", """
<nav search_view="search" color="#65A637">
<view name="My custom view" />
<view name="Inputs" default="true" label="Inputs" />
<view name="Configuration" default="false" label="Configuration" />
<view name="search" default="false" label="Search" />
</nav>
""")
def test_combine_3dir(self):
# Note that this test tests the old shool version of '*.d' processing. But we must preserve this behavior.
# Be aware that we pass in 'default.d/*' as a string, and expand the glob vs allowing the shell to handle this
# and this is _normal_ behavior when dealing with Windows.
twd = TestWorkDir()
self.build_test01(twd)
default = twd.get_path("etc/apps/Splunk_TA_aws/default")
with ksconf_cli:
ko = ksconf_cli("combine", "--dry-run", "--target", default, default + ".d/*")
# Q: Why do we run this once, but not check anything about it? (To ensure dry-run has no side effects?)
ko = ksconf_cli("combine", "--target", default, default + ".d/*")
self.assertEqual(ko.returncode, EXIT_CODE_SUCCESS)
cfg = parse_conf(twd.get_path("etc/apps/Splunk_TA_aws/default/props.conf"))
self.assertIn("aws:config", cfg)
self.assertEqual(cfg["aws:config"]["ANNOTATE_PUNCT"], "true")
self.assertEqual(cfg["aws:config"]["EVAL-change_type"], '"configuration"')
self.assertEqual(cfg["aws:config"]["TRUNCATE"], '9999999')
nav_content = twd.read_file("etc/apps/Splunk_TA_aws/default/data/ui/nav/default.xml")
self.assertIn("My custom view", nav_content)
twd.write_conf("etc/apps/Splunk_TA_aws/default.d/99-theforce/props.conf", {
"aws:config": {"TIME_FORMAT": "%Y-%m-%dT%H:%M:%S.%6NZ"}
})
twd.write_file("etc/apps/Splunk_TA_aws/default.d/99-theforce/data/ui/nav/default.xml", """
<nav search_view="search" color="#65A637">
<view name="My custom view" />
<view name="Inputs" default="true" label="Inputs" />
<view name="Configuration" default="false" label="Configuration" />
</nav>
""")
twd.write_file("etc/apps/Splunk_TA_aws/default/data/dead.conf", "# File to remove")
twd.write_file("etc/apps/Splunk_TA_aws/default/data/tags.conf", "# Locally created file")
twd.write_file("etc/apps/Splunk_TA_aws/default.d/99-blah/same.txt", "SAME TEXT")
twd.write_file("etc/apps/Splunk_TA_aws/default/same.txt", "SAME TEXT")
twd.write_file("etc/apps/Splunk_TA_aws/default.d/99-blah/binary.bin", b"#BINARY \xff \x00")
twd.write_file("etc/apps/Splunk_TA_aws/default/binary.bin", b"#BINARY NEW \x00 \xff \xFB")
with ksconf_cli:
ko = ksconf_cli("combine", "--dry-run", "--target", default, default + ".d/*")
self.assertEqual(ko.returncode, EXIT_CODE_SUCCESS)
self.assertRegex(ko.stdout, r'[\r\n][-]\s*<view name="search"')
self.assertRegex(ko.stdout, r'[\r\n][-] ?[\r\n]') # Remove empty lines from nav
self.assertRegex(ko.stdout, r"[\r\n][+]TIME_FORMAT = [^\r\n]+%6N")
with ksconf_cli:
ko = ksconf_cli("combine", "--target", default, default + ".d/*")
def test_sort_order(self):
"Confirm that single input files are copied as-is"
twd = TestWorkDir()
default = twd.get_path("input")
target = twd.get_path("output")
unique_conf = [
"z = 1",
" b=? ",
"a = 9"]
twd.write_file("input/unique.conf",
"\n".join(unique_conf))
with ksconf_cli:
ko = ksconf_cli("combine", "--layer-method", "disable", "--banner", "",
"--target", target, default)
self.assertEqual(ko.returncode, EXIT_CODE_SUCCESS)
data = twd.read_file("output/unique.conf").splitlines()
self.assertListEqual(unique_conf, data)
def test_combine_dird(self):
twd = TestWorkDir()
self.build_test01(twd)
default = twd.get_path("etc/apps/Splunk_TA_aws")
target = twd.get_path("etc/apps/Splunk_TA_aws-OUTPUT")
with ksconf_cli:
ko = ksconf_cli("combine", "--layer-method", "dir.d", "--dry-run", "--target", target, default)
ko = ksconf_cli("combine", "--layer-method", "dir.d", "--target", target, default)
self.assertEqual(ko.returncode, EXIT_CODE_SUCCESS)
cfg = parse_conf(target + "/default/props.conf")
self.assertIn("aws:config", cfg)
self.assertEqual(cfg["aws:config"]["ANNOTATE_PUNCT"], "true")
self.assertEqual(cfg["aws:config"]["EVAL-change_type"], '"configuration"')
self.assertEqual(cfg["aws:config"]["TRUNCATE"], '9999999')
nav_content = twd.read_file("etc/apps/Splunk_TA_aws-OUTPUT/default/data/ui/nav/default.xml")
self.assertIn("My custom view", nav_content)
alert_action = twd.read_conf("etc/apps/Splunk_TA_aws-OUTPUT/default/alert_actions.conf")
self.assertIn("aws_sns_modular_alert", alert_action)
self.assertEqual(alert_action["aws_sns_modular_alert"]["param.account"], "DeptAwsAccount") # layer 10
self.assertEqual(alert_action["aws_sns_modular_alert"]["label"], "AWS SNS Alert") # layer 60
def test_keep_existing_ds_local_app(self):
twd = TestWorkDir()
src = twd.get_path("repo/apps/Splunk_TA_nix")
target = twd.get_path("etc/deployment-apps/Splunk_TA_nix")
twd.write_file("repo/apps/Splunk_TA_nix/default/app.conf", r"""
[install]
allows_disable = false
is_configured = true
state = enabled
[launcher]
author = Splunk
description = The app is Splunk
version = 7.0.0
""")
# Make partent diretories
os.makedirs(twd.get_path("etc/deployment-apps"))
# First run (creates maker file)
with ksconf_cli:
ko = ksconf_cli("combine", "--keep-existing", "local/app.conf",
"--target", target, src)
self.assertEqual(ko.returncode, EXIT_CODE_SUCCESS)
# Local folder hasn't been created yet
self.assertFalse(os.path.isdir(twd.get_path("etc/deployment-apps/Splunk_TA_nix/local")))
# Simulate a 'splunk reload deploy-server'
twd.write_file("etc/deployment-apps/Splunk_TA_nix/local/app.conf", "# Autogenerated file")
with ksconf_cli:
ko = ksconf_cli("combine", "--keep-existing", "local/app.conf",
"--target", target, src)
self.assertEqual(ko.returncode, EXIT_CODE_SUCCESS)
cfg = parse_conf(os.path.join(target, "default/app.conf"))
self.assertIn("install", cfg)
self.assertEqual(cfg["launcher"]["version"], "7.0.0")
self.assertEqual(twd.read_file("etc/deployment-apps/Splunk_TA_nix/local/app.conf"),
"# Autogenerated file")
# This time the file will be removed
ko = ksconf_cli("combine", "--target", target, src)
self.assertFalse(os.path.isfile(twd.get_path("etc/deployment-apps/Splunk_TA_nix/local/app.conf")),
"local/app.conf should have been removed.")
def test_combine_conf_spec(self):
twd = TestWorkDir()
self.build_test01(twd)
twd.write_file("etc/apps/Splunk_TA_aws/README.d/10-upstream/custom_config.conf.spec", r"""
[<stanza_type1>]
important_field = <str>
* Some notes about the important field.
* Required!
disabled = <bool>
""")
twd.write_file("etc/apps/Splunk_TA_aws/README.d/60-dept/custom_config.conf.spec", r"""
[bookmark::<prefixed_stanza_type>]
resource = <url>
category = <str>
* Label for organization
disabled = <bool>
""")
default = twd.get_path("etc/apps/Splunk_TA_aws")
target = twd.get_path("etc/apps/Splunk_TA_aws-OUTPUT")
with ksconf_cli:
ko = ksconf_cli("combine", "--layer-method", "dir.d", "--target", target, default)
self.assertEqual(ko.returncode, EXIT_CODE_SUCCESS)
spec_file = twd.get_path("etc/apps/Splunk_TA_aws-OUTPUT/README/custom_config.conf.spec")
spec = parse_conf(spec_file, profile=PARSECONF_LOOSE)
self.assertIn("bookmark::<prefixed_stanza_type>", spec)
self.assertIn("<stanza_type1>", spec)
def test_require_arg(self):
with ksconf_cli:
ko = ksconf_cli("combine", "source-dir")
self.assertRegex(ko.stderr, "Must provide [^\r\n]+--target")
def test_missing_marker(self):
twd = TestWorkDir()
twd.write_file("source-dir/someapp/default/blah.conf", "[entry]\nboring=yes\n")
twd.write_file("dest-dir/someapp/default/blah.conf", "[entry]\nboring=yes\n")
ko = ksconf_cli("combine", twd.get_path("source-dir"), "--target", twd.get_path("dest-dir"))
self.assertEqual(ko.returncode, EXIT_CODE_COMBINE_MARKER_MISSING)
self.assertRegex(ko.stderr, r".*Marker file missing\b.*")
if __name__ == '__main__': # pragma: no cover
unittest.main()
|
7,730 | 40744a8530df28f0bd8648900beb8a66e2d44cd0 | # Background: The Fibonacci numbers are defined by F(n) = F(n-1) + F(n-2).
# There are different conventions on whether 0 is a Fibonacci number,
# and whether counting starts at n=0 or at n=1. Here, we will assume that
# 0 is not a Fibonacci number, and that counting starts at n=0,
# so F(0)=F(1)=1, and F(2)=2. With this in mind, write the function
# nthfibonaccinumber(n) that takes a non-negative int n and returns the nth Fibonacci number.
def fun_nthfibonaccinumber(n):
n1 = 1
n2 = 1
if n == 0:
return n2
else:
for i in range(0,n-1):
sum = n1 + n2
n1 = n2
n2 = sum
return n2 |
7,731 | 9c98ecde2e8aac00a33da7db6e5e6023519e4b84 | from django.db import models
from django.utils import timezone
from django.utils.text import slugify
from django.db.models.signals import pre_save
from NetFlix.db.models import PublishStateOptions
from NetFlix.db.receivers import publicado_stado_pre_save, slugify_pre_save
class VideoQuerySet(models.QuerySet):
def publicado(self):
ahora = timezone.now()
return self.filter(
stado = PublishStateOptions.PUBLISH,
tiempo_publicado__lte=ahora
)
class VideoManager(models.Manager):
def get_queryset(self):
return VideoQuerySet(self.model, using=self._db)
def publicado(self):
return self.get_queryset().publicado()
class Video(models.Model):
titulo = models.CharField(max_length=120)
descripcion = models.TextField(blank=True, null=True)
slug = models.SlugField(blank=True, null=True)
activo = models.BooleanField(default=True)
video_id = models.CharField(max_length=120, unique=True)
timestamp = models.DateTimeField(auto_now_add=True)
update = models.DateTimeField(auto_now=True)
stado = models.CharField(max_length=2, choices=PublishStateOptions.choices, default=PublishStateOptions.DRAFT)
tiempo_publicado = models.DateTimeField(auto_now_add=False, auto_now=False, blank=True, null=True)
objects = VideoManager()
def __str__(self):
return self.titulo
def get_video_id(self):
if not self.es_publicado:
return None
return self.video_id
def get_descripcion_trailer(self):
return self.descripcion
@property
def es_publicado(self):
if self.activo is False:
return False
estado = self.stado
if estado != PublishStateOptions.PUBLISH:
return False
tiempo_publicado = self.tiempo_publicado
if tiempo_publicado is None:
return False
ahora = timezone.now()
return tiempo_publicado <= ahora
def get_playlista_ids(self):
#self.<foreingkey_obj>_set.all()
#return list(self.playlist_set.all().values_list('id', flat=True))playlist_destacado
return list(self.playlist_destacado.all().values_list('id', flat=True))
#def save(self, *args, **kwargs):
# if self.stado == self.PublishStateOptions.PUBLISH and self.tiempo_publicado is None:
# print("Guardado el tiempo de publicado")
# self.tiempo_publicado = timezone.now()
# elif self.stado == self.PublishStateOptions.DRAFT:
# self.tiempo_publicado = None
# if self.slug is None:
# self.slug = slugify(self.titulo)
# super().save(*args, **kwargs)
class ProxiTodoLosVideo(Video):
class Meta:
proxy = True
verbose_name = "Todo los Video"
verbose_name_plural="Todos los Publicados"
class VideoPublicadoProxy(Video):
class Meta:
proxy =True
verbose_name ='Video Publicado'
verbose_name_plural = 'Videos Publicados'
pre_save.connect(publicado_stado_pre_save, sender=Video)
pre_save.connect(slugify_pre_save, sender=Video)
pre_save.connect(publicado_stado_pre_save, sender=ProxiTodoLosVideo)
pre_save.connect(slugify_pre_save, sender=ProxiTodoLosVideo)
pre_save.connect(publicado_stado_pre_save, sender=VideoPublicadoProxy)
pre_save.connect(slugify_pre_save, sender=VideoPublicadoProxy)
|
7,732 | 415a6cf1c3f633a863851a4a407d416355398b39 | import torch
import numpy as np
import torch.utils.data as data
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import time
class CNN(nn.Module):
def __init__(self,
fragment_length,
conv_layers_num,
conv_kernel_size,
pool_kernel_size,
fc_size,
conv_dilation = 1,
pool_dilation = 1,
conv_stride = 1,
pool_stride = 2):
super(CNN, self).__init__()
self.input_channels = 4
self.fragment_length=fragment_length
self.conv_layers_num = conv_layers_num
self.conv_kernel_size = conv_kernel_size
self.pool_kernel_size = pool_kernel_size
self.conv1 = nn.Conv1d(in_channels=self.input_channels,
out_channels=self.conv_layers_num,
kernel_size=self.conv_kernel_size,
stride = conv_stride,
dilation = conv_dilation)
self.pool = nn.MaxPool1d(kernel_size=self.pool_kernel_size,
stride=pool_stride,
dilation = pool_dilation)
size_after_conv = (self.fragment_length + 2*0 - conv_dilation*(self.conv_kernel_size-1) - 1) / conv_stride + 1
size_after_pool = (size_after_conv + 2*0 - pool_dilation*(self.pool_kernel_size-1) - 1) / pool_stride + 1
self.dropout = nn.Dropout()
self.input_fc = int(size_after_pool)*self.conv_layers_num
self.output_fc = fc_size
self.fc1 = nn.Linear(self.input_fc, self.output_fc)
self.fc2 = nn.Linear(self.output_fc, 2)
self.softmax = torch.nn.Softmax(dim=1)
def forward(self, x):
conv_result=self.conv1(x)
relu_result = F.relu(conv_result)
pooling_result = self.pool(relu_result)
fc_input = pooling_result.view(-1, self.input_fc)
dropout_result1 = self.dropout(fc_input)
fc_result1 = self.fc1(dropout_result1)
relu_result1 = F.relu(fc_result1)
dropout_result2 = self.dropout(relu_result1)
fc_result2 = self.fc2(dropout_result2)
relu_result2 = F.relu(fc_result2)
result = self.softmax(relu_result2)
return result |
7,733 | 57564c2e94a65187bf5e033ee06926fb593e11a7 | from yapsy.IPlugin import IPlugin
import wolframalpha
import yaml
keys_file = open("friday/plugins/KEYS")
keys = yaml.load(keys_file)
keys_file.close()
class Wolfram(IPlugin):
def can_perform(self, friday, request):
return 'result' in request and 'resolvedQuery' in request['result']\
and 'action' in request['result'] and request['result']['action'] == 'wisdom.unknown'
# result = request['result'] # Assumes we're using gTTS
# # Get the text that is supposed to be spoken aloud
# reply = result['fulfillment']['speech']
# # Get what the service thought you said
# question = result['resolvedQuery']
def perform(self, friday, request):
question = request['result']['resolvedQuery']
client = wolframalpha.Client(keys['WOLFRAM'])
res = client.query(question)
answer = str(list(res))
"""if len(res):
results = list(res.results)
if len(results):
answer = results[0].text[0]
else:
answer = ' '.join([each_answer.subpods[0].text for each_answer in res.pods
if each_answer.subpods[0].text])
else:
# answer = "Sorry, Wolfram doesn't know the answer."
answer = ""
"""
"""# Replace some of its notation so it's more easily read.
answer = answer.replace('\n', '. ').replace('~~', ' or about ')
# Get the result to a computation and don't bother reading the original question.
if '=' in answer:
answer = answer[answer.index('=') + 1:].strip()
"""
return answer
#
# def wolfram_query(question):
# # Every service should have a general set of requirements under which
# # it is activated, this would be one of the ones that Wolfram Alpha
# # uses, it does have others as well. Consider having a single method
# # in the plugin system that returns a boolean determining whether
# # a plugin should be activated.
# if question:
#
#
# def wolfram_query_old(question):
# import wolframalpha
# # Every service should have a general set of requirements under which
# # it is activated, this would be one of the ones that Wolfram Alpha
# # uses, it does have others as well. Consider having a single method
# # in the plugin system that returns a boolean determining whether
# # a plugin should be activated.
# if question.lower().startswith('wolfram'):
# question = question[8:]
# client = wolframalpha.Client(user_info.WOLFRAM_KEY)
# res = client.query(question)
# try:
# return next(res.results).text # This really needs to be changed.
# # I shouldn't have to rely upon error catching for my flow control.
# except StopIteration:
# pass
# try:
# answer = ' '.join([each_answer.text for each_answer in res.pods if each_answer])
# except TypeError:
# answer = None
# if not answer:
# answer = "Sorry, Wolfram doesn't know the answer."
#
# # Replace some of its notation so it's more easily read.
# answer = answer.replace('\n', '; ').replace('~~', ' or about ')
# # Get the result to a computation and don't bother reading the original question.
# if '=' in answer:
# answer = answer[answer.index('=')+1:]
# return [answer, None] # Follows answer format of [text, action]
#
|
7,734 | 34c8541e640596f51a5232cba06172df5814db14 | #!/usr/bin/python
from PyMca5.PyMcaGui import PyMcaQt as qt
from RixsTool import mainWindow
app = qt.QApplication([])
win = mainWindow.RIXSMainWindow()
win.show()
app.exec_()
|
7,735 | c6a4d566460a06504abf7e2c54be4f2ea36e01fb | # VGGNet
import numpy as np
np.random.seed(317)
from glob import glob
from itertools import cycle
from keras.applications.vgg19 import VGG19
from keras.optimizers import Adam
from keras.models import Model
from keras.layers import Input, BatchNormalization, Flatten, Dropout, Dense
from keras.utils import plot_model
from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, CSVLogger, EarlyStopping, Callback
from keras.losses import kullback_leibler_divergence
from math import ceil
from os import path, mkdir, listdir
from skimage.transform import resize
from scipy.misc import imread, imsave
from time import time
import argparse
import logging
import keras.backend as K
import pandas as pd
import tifffile as tif
import sys
sys.path.append('.')
from planet.utils.data_utils import tagset_to_ints, random_transforms
from planet.utils.keras_utils import HistoryPlot
from planet.utils.runtime import funcname
class VGGNet(object):
def __init__(self, checkpoint_name='VGGNet'):
self.config = {
'image_shape': [256, 256, 3],
'input_shape': [224, 224, 3],
'output_shape': [17, ],
'batch_size': 60,
'trn_steps': 680,
'trn_nb_epochs': 200,
'trn_transform': True,
'trn_imgs_csv': 'data/train_v2.csv',
'trn_imgs_dir': 'data/train-jpg',
'tst_imgs_csv': 'data/sample_submission_v2.csv',
'tst_imgs_dir': 'data/test-jpg'
}
self.checkpoint_name = checkpoint_name
self.imgs = []
self.lbls = []
self.net = None
self.rng = np.random
@property
def cpdir(self):
cpdir = 'checkpoints/%s_%s/' % (self.checkpoint_name, '_'.join([str(x) for x in self.config['input_shape']]))
if not path.exists(cpdir):
mkdir(cpdir)
return cpdir
def create_net(self):
x = inputs = Input(shape=self.config['input_shape'])
vgg = VGG19(include_top=False, input_tensor=x)
outputs = Flatten()(vgg.output)
outputs = Dropout(0.1)(outputs)
outputs = Dense(self.config['output_shape'][0], activation='sigmoid')(outputs)
def true_pos(yt, yp):
return K.sum(K.round(yt)) / K.sum(K.clip(yt, 1, 1))
def pred_pos(yt, yp):
return K.sum(K.round(yp)) / K.sum(K.clip(yt, 1, 1))
def F2(yt, yp):
yt, yp = K.round(yt), K.round(yp)
tp = K.sum(yt * yp)
fp = K.sum(K.clip(yp - yt, 0, 1))
fn = K.sum(K.clip(yt - yp, 0, 1))
p = tp / (tp + fp)
r = tp / (tp + fn)
b = 2.0
return (1 + b**2) * ((p * r) / (b**2 * p + r + K.epsilon()))
self.net = Model(inputs, outputs)
self.net.compile(optimizer=Adam(0.001), loss='binary_crossentropy',
metrics=['binary_accuracy', F2, true_pos, pred_pos])
self.net.summary()
plot_model(self.net, to_file='%s/net.png' % self.cpdir)
return
def train(self):
batch_gen = self.train_batch_gen(self.config['trn_imgs_csv'], self.config[
'trn_imgs_dir'], self.config['trn_transform'])
cb = [
HistoryPlot('%s/history.png' % self.cpdir),
CSVLogger('%s/history.csv' % self.cpdir),
ModelCheckpoint('%s/loss.weights' % self.cpdir, monitor='loss', verbose=1,
save_best_only=True, mode='min', save_weights_only=True),
ModelCheckpoint('%s/F2.weights' % self.cpdir, monitor='F2',
verbose=1, save_best_only=True, mode='max', save_weights_only=True),
ReduceLROnPlateau(monitor='F2', factor=0.8, patience=2, epsilon=0.005, verbose=1, mode='min'),
EarlyStopping(monitor='F2', min_delta=0.01, patience=10, verbose=1, mode='max')
]
self.net.fit_generator(batch_gen, steps_per_epoch=self.config['trn_steps'], verbose=1, callbacks=cb,
epochs=self.config['trn_nb_epochs'], workers=2, pickle_safe=True)
return
def get_mean_img(self, imgs_paths, mean_img_path):
'''Compute the mean image from the given paths and save it to the given path.'''
logger = logging.getLogger(funcname())
if not path.exists(mean_img_path):
mean_img = np.zeros(self.config['image_shape'], dtype=np.float32)
for idx, img_path in enumerate(imgs_paths):
mean_img += imread(img_path, mode='RGB').astype(np.float32) / len(imgs_paths)
if idx % 1000 == 0:
logger.info('%d/%d' % (idx, len(imgs_paths)))
imsave(mean_img_path, mean_img)
return imread(mean_img_path)
def train_batch_gen(self, imgs_csv, imgs_dir, transform):
logger = logging.getLogger(funcname())
# Read the CSV and extract image names and tags.
df = pd.read_csv(imgs_csv)
imgs_paths = ['%s/%s.jpg' % (imgs_dir, n) for n in df['image_name'].values]
tag_sets = [set(t.strip().split(' ')) for t in df['tags'].values]
# Compute the mean image for pre-processing.
mean_img = self.get_mean_img(imgs_paths, '%s/mean_img_trn.jpg' % self.cpdir)
mean_img = mean_img.astype(np.float32) / 255.
mean_img_mean = np.mean(mean_img)
img_preprocess = lambda img: img.astype(np.float32) / 255. - mean_img_mean
while True:
imgs_batch = np.zeros([self.config['batch_size'], ] + self.config['input_shape'])
tags_batch = np.zeros([self.config['batch_size'], ] + self.config['output_shape'])
random_idxs = cycle(np.random.choice(np.arange(len(imgs_paths)), len(imgs_paths)))
for batch_idx in range(self.config['batch_size']):
data_idx = next(random_idxs)
img = imread(imgs_paths[data_idx], mode='RGB')
img = img_preprocess(img)
img = resize(img, self.config['input_shape'], preserve_range=True, mode='constant')
if transform:
img = random_transforms(img, nb_min=0, nb_max=6)
imgs_batch[batch_idx] = img
tags_batch[batch_idx] = tagset_to_ints(tag_sets[data_idx])
yield imgs_batch, tags_batch
def predict(self, img_batch):
# Get the mean image
imgs_paths = listdir(self.config['trn_imgs_dir'])
mean_img_path = '%s/mean_img_trn.jpg' % self.cpdir
mean_img = self.get_mean_img(imgs_paths, mean_img_path).astype(np.float32) / 255.
mean_img_mean = np.mean(mean_img)
img_preprocess = lambda img: img.astype(np.float32) / 255. - mean_img_mean
for idx in range(len(img_batch)):
img_batch[idx] = img_preprocess(img_batch[idx])
tags_pred = self.net.predict(img_batch)
tags_pred = tags_pred.round().astype(np.uint8)
return tags_pred
if __name__ == "__main__":
from planet.model_runner import model_runner
model = VGGNet()
model_runner(model)
|
7,736 | 2728c3ab26fbdbaac9c47054eafe1c114341f6f2 |
from sand_game.Environment import Environment
from sand_game.behaviours.Behaviour import Behaviour
class EphemeralBehaviour(Behaviour):
"""Removes the particle after one frame
"""
def behave(env: Environment, loc: tuple[int, int]) -> tuple[int, int]:
env.set(loc[0], loc[1], None)
|
7,737 | a5918679b6e3a9bde54808264d9526c6a191578f | # -*- coding: utf-8 -*-
"""
Created on Tue Apr 27 10:34:15 2021
@author: Ivan
課程教材:行銷人轉職爬蟲王實戰|5大社群平台+2大電商
版權屬於「楊超霆」所有,若有疑問,可聯絡ivanyang0606@gmail.com
第一章 爬蟲基本訓練
Html爬蟲Post教學-台灣股市資訊網
"""
import requests
from bs4 import BeautifulSoup
# 要抓取的網址
url = 'https://goodinfo.tw/StockInfo/StockDividendPolicy.asp?STOCK_ID=2002'
# 附帶的資料必須要有
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.131 Safari/537.36' }
#請求網站
list_req = requests.post(url, headers=headers)
#將整個網站的程式碼爬下來
soup = BeautifulSoup(list_req.content, "html.parser")
#抓取想要的資料
soup.find('td',{'style':'color:red'}).text
|
7,738 | 31275ca9e20da9d2709ea396e55c113b3ff4f571 | from django.apps import AppConfig
class ModuloConfig(AppConfig):
name = 'modulo'
verbose_name = 'TUM:JungeAkademie - Modulo'
def ready(self):
#start-up / initialization code here!!!
from .recommender import Recommender
Recommender.initialize() |
7,739 | 1fbe269c9c09fe58b0df1ebd4354cf9dc31a2f90 | def is_palindrome_v2(word):
'''(string)->boolean
returns if word is palindrome (ignores white space)'''
if len(word) < 2:
return True
if(not word[0].isalpha() or not word[1].isalpha()):
if(word[0].isalpha()):
return is_palindrome_v2(word[:-1])
if(word[-1].isalpha()):
return is_palindrome_v2(word[1:])
else:
return is_palindrome_v2(word[2:-2])
if word[0].lower() != word[-1].lower():
return False
return is_palindrome_v2(word[1:-1])
|
7,740 | bf1221bc9768cff2edb67e0e5f5cea0ee2dd64e5 | """social_website URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.contrib.auth.views import (password_reset, password_reset_done, password_reset_complete,
password_reset_confirm, password_change, password_change_done)
from django.conf import settings
from django.conf.urls.static import static
from account.views import dashboard
urlpatterns = [
path('admin/', admin.site.urls),
path('account/', include('account.urls'), name='account'),
path('images/', include('images.urls', namespace='images')),
path('password_reset/', password_reset, {'template_name': 'registration/password_reset.html'}, name='password_reset'),
path('password_reset/done/', password_reset_done, name='password_reset_done'),
path('password_reset/confirm/<str:uidb64>/<str:token>/', password_reset_confirm, name='password_reset_confirm'),
path('password_reset/complete/', password_reset_complete, name='password_reset_complete'),
path('password_change/', password_change, name='password_change'),
path('password_change/done/', password_change_done, name='password_change_done'),
path('', dashboard, name='dashboard'),
path('social-auth/', include('social_django.urls', namespace='social')),
path('api/accounts/', include('account.api.urls', namespace='api-accounts')),
path('api/images/', include('images.api.urls', namespace='api-images')),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
7,741 | c475e095571b211693e66583637442edbf72c260 | from twitter.MyStreamListener import MyStreamListener
import tweepy
from threading import Thread
class TwitterWorker(Thread):
def __init__(self):
Thread.__init__(self)
CONSUMER_KEY = 'IwZZeJHjLXq55ewwQwD0SogHU'
CONSUMER_SECRET = '80kELQhDGNvLNFfNZ7qliIbzAoA3tsgQaAEnnMNWKIr6uMN6Ri'
ACCESS_TOKEN = '857838183224139776-1HrWNTQk8pywtozedEAou6tr7CkB4Uu'
ACCESS_TOKEN_SECRET = 'NkP6s5UZuoBmDSW31mhTzudNSQKpvxwwuE3pcWYcytWgU'
self.auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
self.auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
self.api = tweepy.API(self.auth)
def run(self):
streamListener = MyStreamListener()
self.stream = tweepy.Stream(auth=self.api.auth, listener=streamListener)
self.stream.filter(track=['#HACKPSUHELPLINE'])
if __name__ == '__main__':
print("Starting Stream..")
tw = TwitterWorker()
tw.streaming()
|
7,742 | b419e26cbf5bbb746f897367ddaa829773a6860c | from django.db import models
from django.utils import timezone
from accounts.models import AllUser
from profiles.models import Profile
### MODEL HOLDING MEMBER TO CLIENT RELATIONSHIPS. ###
class MemberClient(models.Model):
created = models.DateTimeField(auto_now_add=timezone.now())
client = models.ForeignKey(AllUser,
related_name='client',
default=None,
on_delete=models.CASCADE)
member = models.ForeignKey(AllUser,
related_name='member',
default=None,
on_delete=models.CASCADE)
profile = models.ForeignKey(Profile,
related_name='profile',
default=None,
on_delete=models.CASCADE,
blank=True,
null=True)
def __str__(self):
return "{0}".format(self.client) |
7,743 | 6f05d1915cd2e123dd72233b59d4de43fd724035 | from . import find_resault
from . import sql |
7,744 | 9f42a9d0ca622d6c4e2cf20bc2e494262c16055b | import cv2
import numpy as np
from matplotlib import pyplot as plt
#cargar la imagen a analizar
imagen= cv2.imread("tomate22.jpg")
#cv2.imshow("Original", imagen)
#cv2.waitKey(0)
# Convertimos en escala de grise
gris = cv2.cvtColor(imagen, cv2.COLOR_BGR2GRAY)
#cv2.imshow("En gris", gris)
#cv2.waitKey(0)
# Aplicar suavizado Gaussiano
gaussiana = cv2.GaussianBlur(gris, (3,3), 0)
#cv2.imshow("Gaussiano", gaussiana)
#cv2.waitKey(0)
#detectamos los bordes con canny
sigma=0.9
v=np.median(gaussiana)
lower=int(max(0,(1.0-sigma)*v))
upper=int(min(255,(1.0+sigma)*v))
canny = cv2.Canny(gaussiana, lower, upper)
plt.subplot(121),plt.imshow(canny,cmap = 'gray')
plt.title('Canny'), plt.xticks([]), plt.yticks([])
#cv2.imshow("Canny", canny)
#cv2.waitKey(0)
#dilatacion
#kernel = np.ones((5,5),np.uint8)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5,5))
dilation = cv2.dilate(canny,kernel,iterations = 1)
#cv2.imshow("Dilatado", dilation)
#cv2.waitKey(0)
#buscamos los contornos
(_,contornos,_) = cv2.findContours(dilation.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(imagen,contornos,-1,(255,0,0), 2)
cv2.imshow("contornos", imagen)
cv2.waitKey(0)
for x in range (len(contornos)):
#mascara
mask=np.zeros_like(imagen)
out=np.zeros_like(imagen)
cv2.drawContours(mask, [contornos[x]], 0, (255,0,0), -1) #con ese -1 al final hace que pinte todo de azul lo que esta dentro del borde
for i in range (imagen.shape[0]): #para recorrer todas las columnas .shape[0]
for j in range (imagen.shape[1]): #para recorrer todas las filas .shape[1]
if mask[i,j,0]==255:
out[i,j]=imagen[i,j]
cv2.imshow("contorno", out)
#histograma
color = ('b','g','r')
for i,col in enumerate(color):
histr = cv2.calcHist([out],[i],None,[256],[1,256])
plt.plot(histr,color = col)
plt.xlim([0,256])
plt.show()
print("Es contorno de tomate?")
c=cv2.waitKey(0) & 0xFF
if (c==ord("t") ):
print("Histograma guardado como valido")
if (c==ord("n") ):
print("Histograma guardado como no valido")
cv2.destroyAllWindows()
|
7,745 | f81e4c9a502855dca31c6c991a08a12af1c2e2a6 | import scipy.constants as const
import scipy.optimize as opt
import numpy as np
import pum.algorithms as alg
from pum.lines import *
from pum.net import *
mu = 1
eps = 2.56
b = 2.8 * const.milli
C = 13.0
Z0 = 50
f0 = 1.34 * const.giga
k = 10 ** ( - np.abs(C) / 20)
print 'k = {}' .format( k)
Z0e = Z0 * np.sqrt( ( 1 + k) / ( 1 - k))
Z0o = Z0 * np.sqrt( ( 1 - k) / ( 1 + k))
print '(Z0e, Z0o) = {}; {}' .format( Z0e, Z0o)
modke = Z0e / ( 29.976 * const.pi * np.sqrt( mu / eps))
qe = np.exp( - const.pi * modke)
ke = np.sqrt( qe) * ( ( alg.n_fun( qe) / alg.d_fun( qe)) ** 2)
modko = Z0o / ( 29.976 * const.pi * np.sqrt( mu / eps))
qo = np.exp( - const.pi * modko)
ko = np.sqrt( qo) * ( ( alg.n_fun( qo) / alg.d_fun( qo)) ** 2)
w = ( 2 * b / const.pi) * np.arctanh( np.sqrt( ke * ko))
s = ( 2 * b / const.pi) * np.arctanh( np.sqrt( ke / ko)) - w
lamb = const.c / ( np.sqrt(eps) * f0)
print 'lambda = {}; lambda/4 = {}' .format( lamb, lamb / 4)
print 'w = {} mm; s = {} mm' .format( w / const.milli, s / const.milli)
print '(Z0e, Z0o) = {}' .format( stripline_coupled( w, s, b, 0, mu, eps))
|
7,746 | e54eea2261517a2b15fde23c46b3fe75c0efec64 | # генераторы списков и словарей
# lists
my_list = [1, 2, 3, 4, 5]
new_list = []
for i in my_list:
new_list.append(i**2)
new_list_comp = [el**2 for el in my_list]
lines = [line.strip() for line in open("text.txt")]
new_list_1 = [el for el in my_list if el % 2 == 0]
str_1 = 'abc'
str_2 = 'def'
str_3 = 'gh'
new_list_2 = [i+j+k for i in str_1 for j in str_2 for k in str_3]
# словари и множества
my_set = {el**2 for el in range(10)}
my_dict = {el: el**2 for el in range(5)}
print(my_dict)
my_list_of_floats = [2.4324324, 5.3243234, 6.23424]
new_list_round = [round(el, 2) for el in my_list_of_floats]
print(new_list_round)
|
7,747 | cc66dcd34115e72479953ca24f4b2eaeb52cf313 | import socket
import json
import numpy as np
"""TCP client used to communicate with the Unity Application"""
class TCP:
def __init__(self, sock = None):
# Create a TCP socket
if sock is None:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
else:
self.sock = sock
def connect(self, host, port):
server_address = (host, port)
print('connecting to {} port {}'.format(*server_address))
self.sock.connect(server_address)
def send(self, value, convergence=False):
"""Send one value (distortion gain) to the server"""
# dump to json format
data = json.dumps(dict({"gain" : value, "convergence" : convergence})).encode()
print("Sending value {} as data {}".format(value, data))
self.sock.sendall(data)
def send2(self, radius, gain, convergence=False):
"""Send two values (distortion gain, and radius) to the server"""
# dump to json format
data = json.dumps(dict({"gain" : gain, "radius": radius, "convergence" : convergence})).encode()
print("Sending value ({}, {}) as data {}".format(radius, gain, data))
self.sock.sendall(data)
def receive(self):
# Convert bytes to float
data = self.sock.recv(1024)
print("Received: {}".format(data))
value = json.loads(data)
return value
def close(self):
print("Closing socket")
self.sock.close() |
7,748 | 891a490410fd8c7b8879f1e71f24df2db62ff85d | import httplib
def get_status_code(host, path="/"):
try:
connect = httplib.HTTPConnection(host)
connect.request("HEAD", path)
return connect.getresponse().status
except StandardError:
return None
if __name__ == '__main__':
print get_status_code("google.com")
|
7,749 | 056235f8f65a3d6a310ee8a8742c1369b5398f28 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
One cycle policy based on Leslie Smith's paper(https://arxiv.org/pdf/1803.09820.pdf)
Created on Wed Mar 31 13:53:39 2021
"""
import logging
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
logging.getLogger('tensorflow').setLevel(logging.ERROR)
from tensorflow.keras.callbacks import Callback
class CosineAnnealer:
def __init__(self, start, end, steps):
self.start = start
self.end = end
self.steps = steps
self.n = 0
def step(self):
self.n += 1
cos = np.cos(np.pi * (self.n / self.steps)) + 1
return self.end + (self.start - self.end) / 2. * cos
class OneCycleScheduler(Callback):
""" `Callback` that schedules the learning rate on a 1cycle policy as per Leslie Smith's paper(https://arxiv.org/pdf/1803.09820.pdf).
If the model supports a momentum parameter, it will also be adapted by the schedule.
The implementation adopts additional improvements as per the fastai library: https://docs.fast.ai/callbacks.one_cycle.html, where
only two phases are used and the adaptation is done using cosine annealing.
In phase 1 the LR increases from `lr_max / div_factor` to `lr_max` and momentum decreases from `mom_max` to `mom_min`.
In the second phase the LR decreases from `lr_max` to `lr_max / (div_factor * 1e4)` and momemtum from `mom_max` to `mom_min`.
By default the phases are not of equal length, with the phase 1 percentage controlled by the parameter `phase_1_pct`.
"""
def __init__(self, lr_max, steps, mom_min=0.85, mom_max=0.95, phase_1_pct=0.3, div_factor=25.):
super(OneCycleScheduler, self).__init__()
lr_min = lr_max / div_factor
final_lr = lr_max / (div_factor * 1e4)
phase_1_steps = steps * phase_1_pct
phase_2_steps = steps - phase_1_steps
self.phase_1_steps = phase_1_steps
self.phase_2_steps = phase_2_steps
self.phase = 0
self.step = 0
self.phases = [[CosineAnnealer(lr_min, lr_max, phase_1_steps), CosineAnnealer(mom_max, mom_min, phase_1_steps)],
[CosineAnnealer(lr_max, final_lr, phase_2_steps), CosineAnnealer(mom_min, mom_max, phase_2_steps)]]
self.lrs = []
self.moms = []
def on_train_begin(self, logs=None):
self.phase = 0
self.step = 0
self.set_lr(self.lr_schedule().start)
self.set_momentum(self.mom_schedule().start)
def on_train_batch_begin(self, batch, logs=None):
self.lrs.append(self.get_lr())
self.moms.append(self.get_momentum())
def on_train_batch_end(self, batch, logs=None):
self.step += 1
if self.step >= self.phase_1_steps:
self.phase = 1
self.set_lr(self.lr_schedule().step())
self.set_momentum(self.mom_schedule().step())
def get_lr(self):
try:
return tf.keras.backend.get_value(self.model.optimizer.lr)
except AttributeError:
return None
def get_momentum(self):
try:
return tf.keras.backend.get_value(self.model.optimizer.momentum)
except AttributeError:
return None
def set_lr(self, lr):
try:
tf.keras.backend.set_value(self.model.optimizer.lr, lr)
except AttributeError:
pass # ignore
def set_momentum(self, mom):
try:
tf.keras.backend.set_value(self.model.optimizer.momentum, mom)
except AttributeError:
pass # ignore
def lr_schedule(self):
return self.phases[self.phase][0]
def mom_schedule(self):
return self.phases[self.phase][1]
def plot(self):
ax = plt.subplot(1, 2, 1)
ax.plot(self.lrs)
ax.set_title('Learning Rate')
ax = plt.subplot(1, 2, 2)
ax.plot(self.moms)
ax.set_title('Momentum')
|
7,750 | 32bb6d5ad0a1398c9ab89190c087fe3916631878 | import ordenador
import pytest
import contatempo
class TestaOrdenador:
@pytest.fixture
def ordenad(self):
return ordenador.Ordenador()
@pytest.fixture
def list_quase_ord(self):
c = contatempo.ContaTempos()
return c.lista_quase_ordenada(100)
@pytest.fixture
def list_aleatoria(self):
c = contatempo.ContaTempos()
return c.lista_aleatoria(100)
def esta_ordenada(self, lista):
for i in range(len(lista)-1):
if lista[i] > lista[i+1]:
return False
return True
def test_selecao_bolha_melhorada_aleatoria(self, ordenad, list_aleatoria):
ordenad.selecao_bolha_melhorada(list_aleatoria)
assert self.esta_ordenada(list_aleatoria)
def test_selecao_direta_aleatoria(self, ordenad, list_aleatoria):
ordenad.selecao_direta(list_aleatoria)
assert self.esta_ordenada(list_aleatoria)
def test_selecao_bolha_melhorada__quase_ord(self, ordenad, list_quase_ord):
ordenad.selecao_bolha_melhorada(list_quase_ord)
assert self.esta_ordenada(list_quase_ord)
def test_selecao_direta_quase_ord(self, ordenad, list_quase_ord):
ordenad.selecao_direta(list_quase_ord)
assert self.esta_ordenada(list_quase_ord)
|
7,751 | fb4a95197882cc6fe72a5f3c2420a474d9cd97aa | # -*- coding: utf-8 -*-
import scrapy
import re
class LeedsAcUkSpider(scrapy.Spider):
name = 'leeds_ac_uk'
allowed_domains = ['webprod3.leeds.ac.uk']
start_urls = ['http://webprod3.leeds.ac.uk/catalogue/dynmodules.asp?Y=201920&M=ANAT-3105']
def parse(self, response):
item = {}
item['Subject'] = response.css('div#module-programmes h2::text').get().split()[-1]
item['Subject short'] = response.css('div#module-programmes h2::text').get().split()[0].split('3')[0]
item['Subject code1'] = response.css('div#module-programmes h2::text').get().split()[0]
item['Topic'] = response.css('div#module-programmes h2::text').get().split('\n')[-1]
Syllabus = response.css('div#module-programmes') |
7,752 | 98f36b216e718fc4fe42d1717ff9ba82cc24c2ff | def to_bitmask(n, bits):
# [2:] to chop off the "0b" part
mask = [int(digit) for digit in bin(n)[2:]]
# pad to fixed length
return [0] * (bits - len(mask)) + mask
def invert_mask(mask):
return [int(not bit) for bit in mask] |
7,753 | 923a433a3a04a8538b43d162d17d379daab4698a | #!/usr/bin/env python3
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
import asyncio
from bleak import BleakScanner
from bleak import BleakClient
from bleak import exc
TOIO_SERVICE_UUID = "10B20100-5B3B-4571-9508-CF3EFCD7BBAE".lower()
TOIO_BATTERY_UUID = "10B20108-5B3B-4571-9508-CF3EFCD7BBAE".lower()
TOIO_BUTTON_UUID = "10B20107-5B3B-4571-9508-CF3EFCD7BBAE".lower()
TOIO_SOUND_UUID = "10B20104-5B3B-4571-9508-CF3EFCD7BBAE".lower()
TOIO_MOTOR_UUID = "10B20102-5B3B-4571-9508-CF3EFCD7BBAE".lower()
cubes = []
async def sound(cube):
sound = bytearray()
sound.append(0x02)
sound.append(9)
sound.append(0xff)
await cube.write_gatt_char(TOIO_SOUND_UUID, sound)
async def motor(cube):
motor = bytearray()
motor.append(0x02)
motor.append(0x01)
motor.append(0x01)
motor.append(0x10)
motor.append(0x02)
motor.append(0x01)
motor.append(0x10)
motor.append(0x40)
await cube.write_gatt_char(TOIO_MOTOR_UUID, motor)
async def connect_to_cube(d):
print('try to connect %s' % d.address)
async with BleakClient(d.address) as cube:
connected = cube.is_connected
if not connected:
print('%s is not connected' % d.address)
return
print('%s connected' % d.address)
services = cube.services
for service in services:
print(service.uuid)
if service.uuid == TOIO_SERVICE_UUID:
cubes.append(cube)
print('toio core cube(%d): %s' % (len(cubes), connected))
print(' Address: ', d.address)
for char in service.characteristics:
print(' Characteristic: ', char)
await sound(cube)
await motor(cube)
async def search_cube():
devices = await BleakScanner.discover(timeout=5.0)
for i, d in enumerate(devices):
print('device %d' % i)
try:
await connect_to_cube(d)
except exc.BleakError as e:
print(e)
except AttributeError as e:
pass
async def main(argv):
print('search toio core cube')
await search_cube()
if len(cubes) == 0:
print('sorry, no cubes are found')
return 0
if __name__ == '__main__':
asyncio.run(main(sys.argv))
|
7,754 | 55a061a1c0cd20e5ab7413c671bc03573de1bbdf | #!/usr/bin/python3
"""
list = list(range(97, 123)
for (i in list):
if (i % 2 == 0):
i = (i - 32)
"""
for letter in "zYxWvUtSrQpOnMlKjIhGfEdCbA":
print('{:s}'.format(letter), end = "")
|
7,755 | 4ddff57790ad191fc29fc092bcc714f0b6273100 | # _*_ coding: utf-8 _*_
# 按层打印二叉树
class TreeNode(object):
def __init__(self, val):
self.val = val
self.left = None
self.right = None
class PrintTree(object):
def printTree(self, root):
if not root:
return
'''
定义next_last为下一层的最后一个,cur_last为当前层最后一个
temp用于存放当前行的值,resutl存放最终的结果
'''
next_last = cur_last = root
_queue = [root]
result, temp = [], []
while _queue:
# 在按层遍历的基础上,不断把下层最右边儿子赋值给next_last
_cur = _queue.pop(0)
temp.append(_cur.val)
if _cur.left:
_queue.append(_cur.left)
next_last = _cur.left
if _cur.right:
_queue.append(_cur.right)
next_last = _cur.right
# 如果当前节点为此层最后的节点时,
# 进行下层最后一个节点的赋值(cur_last=next_last),然后才由_queue.pop(0)进入下层
if _cur == cur_last:
result.append(temp)
temp = []
cur_last = next_last
return result
|
7,756 | d8e0198244c3df77fa0258cc97a55042e36d056f | """
默认查询所有
> db.test1000.find()
{ "_id" : ObjectId("5c3559ab648171cce9135dd6"), "name" : "zhangdapeng" }
{ "_id" : ObjectId("5c3559af648171cce9135dd7"), "name" : "zhangdapeng1" }
{ "_id" : ObjectId("5c3559b2648171cce9135dd8"), "name" : "zhangdapeng2" }
{ "_id" : ObjectId("5c3559b4648171cce9135dd9"), "name" : "zhangdapeng3" }
查询匹配参数
> db.test1000.find({'name':'zhangdapeng'})
{ "_id" : ObjectId("5c3559ab648171cce9135dd6"), "name" : "zhangdapeng" }
>
"""
"""
小于$lt
小于等于$lte
大于$gt
大于等于$gte
不等于$ne
查询年龄小于等于18岁的
> db.test1000.find({age:{$lte:18}})
{ "_id" : ObjectId("5c355a61648171cce9135dda"), "name" : "zhangdapeng3", "age" : 18 }
{ "_id" : ObjectId("5c355a69648171cce9135ddc"), "name" : "zhangdapeng3", "age" : 17 }
>
查询年龄大于等于18岁的
> db.test1000.find({age:{$gte:18}})
{ "_id" : ObjectId("5c355a61648171cce9135dda"), "name" : "zhangdapeng3", "age" : 18 }
{ "_id" : ObjectId("5c355a65648171cce9135ddb"), "name" : "zhangdapeng3", "age" : 19 }
范围 $in $nin不在某个范围类
> db.test1000.find({age:{$in:[17,18,19]}})
{ "_id" : ObjectId("5c355a61648171cce9135dda"), "name" : "zhangdapeng3", "age" : 18 }
{ "_id" : ObjectId("5c355a65648171cce9135ddb"), "name" : "zhangdapeng3", "age" : 19 }
{ "_id" : ObjectId("5c355a69648171cce9135ddc"), "name" : "zhangdapeng3", "age" : 17 }
逻辑查询
并且关系直接用,逗号
或关系$or
> db.test1000.find({$or:[{'age':18},{'age':19}]})
{ "_id" : ObjectId("5c355a61648171cce9135dda"), "name" : "zhangdapeng3", "age" : 18 }
{ "_id" : ObjectId("5c355a65648171cce9135ddb"), "name" : "zhangdapeng3", "age" : 19 }
>
正则表达式
直接用两个/正则表达式就行/
> db.test1000.find({'name':/zhangdapeng*/})
{ "_id" : ObjectId("5c3559ab648171cce9135dd6"), "name" : "zhangdapeng" }
{ "_id" : ObjectId("5c3559af648171cce9135dd7"), "name" : "zhangdapeng1" }
{ "_id" : ObjectId("5c3559b2648171cce9135dd8"), "name" : "zhangdapeng2" }
{ "_id" : ObjectId("5c3559b4648171cce9135dd9"), "name" : "zhangdapeng3" }
{ "_id" : ObjectId("5c355a61648171cce9135dda"), "name" : "zhangdapeng3", "age" : 18 }
{ "_id" : ObjectId("5c355a65648171cce9135ddb"), "name" : "zhangdapeng3", "age" : 19 }
{ "_id" : ObjectId("5c355a69648171cce9135ddc"), "name" : "zhangdapeng3", "age" : 17 }
>
限制内容-输出控制
find().limit(数字)
find().skip(数字)
同时使用可以实现翻页
find().skip(5).limit(20)
自定义查询
db.stu.find({
$where:function(){
return this.age>30;
}
})
"""
|
7,757 | d1af148bc6b27d38052f2e57f1c610c86eccebef | #!/usr/bin/python3
import sys
import math
class parameter :
opt = 0
xp = 0
yp = 0
zp = 0
xv = 0
yv = 0
zv = 0
p = 0
def check_args() :
try :
int(sys.argv[1])
int(sys.argv[2])
int(sys.argv[3])
int(sys.argv[4])
int(sys.argv[5])
int(sys.argv[6])
int(sys.argv[7])
int(sys.argv[8])
except :
sys.exit(84)
if len(sys.argv) != 9 :
sys.exit(84)
if int(sys.argv[1]) != 1 and int(sys.argv[1]) != 2 and int(sys.argv[1]) != 3 :
sys.exit(84)
def help() :
if len(sys.argv) == 2 and sys.argv[1] == '-h' :
print ("USAGE")
print (" ./104intersection opt xp yp zp xv yv zv p")
print ("DESCRIPTION")
print (" opt surface option: 1 for a sphere, 2 for a cylinder, 3 for a cone")
print (" (xp, yp, zp) coordinates of a point by which the light ray passes through")
print (" (xv, yv, zv) coordinates of a vector parallel to the light ray")
print (" p parameter: radius of the sphere, radius of the cylinder, or")
print (" angle formed by the cone and the Z-axis")
sys.exit(0)
def sphere() :
if parameter.p <= 0 :
sys.exit(84)
print("Sphere of radius "+str(parameter.p))
print("Line passing through the point ("+str(parameter.xp)+", "+str(parameter.yp)+", "+str(parameter.zp)+") and parallel to the vector ("+str(parameter.xv)+", "+str(parameter.yv)+", "+str(parameter.zv)+")")
a = float(pow(parameter.xv, 2) + pow(parameter.yv, 2) + pow(parameter.zv, 2))
b = float(parameter.xv*2*parameter.xp + parameter.yv*2*parameter.yp + parameter.zv*2*parameter.zp)
c = float(pow(parameter.xp, 2) + pow(parameter.yp, 2) + pow(parameter.zp, 2) - pow(parameter.p,2))
descriminant = float(pow(b, 2) - 4 * a * c)
if descriminant < 0 :
print("No intersection point.")
if descriminant == 0 :
t1 = float(- (b / (2 * a)))
x = float(parameter.xp + parameter.xv * t1)
y = float(parameter.yp + parameter.yv * t1)
z = float(parameter.zp + parameter.zv * t1)
print("1 intersection point:")
print('(%.3f,' %float(x), '%.3f,' %float(y), '%.3f)' %float(z))
if descriminant > 0 :
t1 = float((-b - math.sqrt(descriminant) ) / (2 * a))
t2 = float((-b + math.sqrt(descriminant) ) / (2 * a))
x1 = float(parameter.xp + parameter.xv * t1)
y1 = float(parameter.yp + parameter.yv * t1)
z1 = float(parameter.zp + parameter.zv * t1)
x2 = float(parameter.xp + parameter.xv * t2)
y2 = float(parameter.yp + parameter.yv * t2)
z2 = float(parameter.zp + parameter.zv * t2)
print("2 intersection points:")
print('(%.3f,' %float(x2), '%.3f,' %float(y2), '%.3f)' %float(z2))
print('(%.3f,' %float(x1), '%.3f,' %float(y1), '%.3f)' %float(z1))
def cylinder() :
infiny = 0
if parameter.p <= 0 :
sys.exit(84)
print("Cylinder of radius "+str(parameter.p))
print("Line passing through the point ("+str(parameter.xp)+", "+str(parameter.yp)+", "+str(parameter.zp)+") and parallel to the vector ("+str(parameter.xv)+", "+str(parameter.yv)+", "+str(parameter.zv)+")")
a = float(parameter.xv + parameter.yv)
b = float( 2 * (parameter.xv * parameter.xp + parameter.yv * parameter.yp))
c = float(pow(parameter.xp,2) + pow(parameter.yp, 2) - pow(parameter.p, 2))
descriminant = float(pow(b, 2) - 4 * a * c)
if descriminant < 0 :
print("No intersection point.")
if descriminant == 0 :
try:
t1 = float(- (b / (2 * a)))
except :
t1 = 1
infiny = 1
x = float(parameter.xp + parameter.xv * t1)
y = float(parameter.yp + parameter.yv * t1)
z = float(parameter.zp + parameter.zv * t1)
if infiny == 0 :
print("1 intersection point:")
print('(%.3f,' %float(x), '%.3f,' %float(y), '%.3f)' %float(z))
else:
print("There is an infinite number of intersection points.")
if descriminant > 0 :
t1 = float((-b - math.sqrt(descriminant) ) / (2 * a))
t2 = float((-b + math.sqrt(descriminant) ) / (2 * a))
x1 = float(parameter.xp + parameter.xv * t1)
y1 = float(parameter.yp + parameter.yv * t1)
z1 = float(parameter.zp + parameter.zv * t1)
x2 = float(parameter.xp + parameter.xv * t2)
y2 = float(parameter.yp + parameter.yv * t2)
z2 = float(parameter.zp + parameter.zv * t2)
print("2 intersection points:")
print('(%.3f,' %float(x2), '%.3f,' %float(y2), '%.3f)' %float(z2))
print('(%.3f,' %float(x1), '%.3f,' %float(y1), '%.3f)' %float(z1))
def cone() :
if parameter.p <= 0 or parameter.p >= 90 :
sys.exit(84)
rad = math.radians(parameter.p)
print("Cone with a"+str(parameter.p)+" degree angle")
print("Line passing through the point ("+str(parameter.xp)+", "+str(parameter.yp)+", "+str(parameter.zp)+") and parallel to the vector ("+str(parameter.xv)+", "+str(parameter.yv)+", "+str(parameter.zv)+")")
a = float(parameter.xv + parameter.yv - pow(parameter.zv, 2) * pow(math.tan(rad), 2))
b = float( 2 * (parameter.xv * parameter.xp + parameter.yv * parameter.yp - pow(math.tan(rad),2) * parameter.zp*parameter.zv))
c = float(pow(parameter.xp,2) + pow(parameter.yp, 2) - pow(math.tan(rad),2) * pow(parameter.zp, 2))
descriminant = float(pow(b, 2) - 4 * a * c)
if descriminant < 0 :
print("No intersection point.")
if descriminant == 0 :
try:
t1 = float(- (b / (2 * a)))
except :
t1 = 1
infiny = 1
x = float(parameter.xp + parameter.xv * t1)
y = float(parameter.yp + parameter.yv * t1)
z = float(parameter.zp + parameter.zv * t1)
if infiny == 0 :
print("1 intersection point:")
print('(%.3f,' %float(x), '%.3f,' %float(y), '%.3f)' %float(z))
else:
print("There is an infinite number of intersection points.")
if descriminant > 0 :
t1 = float((-b - math.sqrt(descriminant) ) / (2 * a))
t2 = float((-b + math.sqrt(descriminant) ) / (2 * a))
x1 = float(parameter.xp + parameter.xv * t1)
y1 = float(parameter.yp + parameter.yv * t1)
z1 = float(parameter.zp + parameter.zv * t1)
x2 = float(parameter.xp + parameter.xv * t2)
y2 = float(parameter.yp + parameter.yv * t2)
z2 = float(parameter.zp + parameter.zv * t2)
print("2 intersection points:")
print('(%.3f,' %float(x2), '%.3f,' %float(y2), '%.3f)' %float(z2))
print('(%.3f,' %float(x1), '%.3f,' %float(y1), '%.3f)' %float(z1))
def apply_args() :
parameter.opt = int(sys.argv[1])
parameter.xp = int(sys.argv[2])
parameter.yp = int(sys.argv[3])
parameter.zp = int(sys.argv[4])
parameter.xv = int(sys.argv[5])
parameter.yv = int(sys.argv[6])
parameter.zv = int(sys.argv[7])
parameter.p = int(sys.argv[8])
def main():
help()
check_args()
apply_args()
if parameter.xv == 0 and parameter.yv == 0 and parameter.zv == 0 :
sys.exit(84)
if parameter.opt == 1 :
sphere()
if parameter.opt == 2 :
cylinder()
if parameter.opt == 3 :
cone()
if __name__ == "__main__" :
main() |
7,758 | 0f6512bb734336a67eab2f13949dd960f5ffc1d5 | from arma_scipy.fit import fit, predict
|
7,759 | ee47b60274ed2eb53a05203e0086d7815bcaaa6e | # -*- coding: utf-8 -*-
from sklearn.feature_extraction.text import TfidfVectorizer
import sentimentAnalysis as sA
import sys
import os
import numpy as np
from sklearn import decomposition
from gensim import corpora, models
if len(sys.argv) > 1:
keyword = sys.argv[1]
else:
keyword = 'data'
locationList = ["","Dallas","NY","SF","LA","Chicago","Washington","Atlanta"]
#Calculating the highest positive and negative comments for all locations and without any location constraint
for location in locationList:
resultPolarityTrump, resultSubjectivityTrump = sA.main("trump",location)
resultPolarityHillary, resultSubjectivityHillary = sA.main("hillary",location)
resultPolarityObama, resultSubjectivityObama = sA.main("obama",location)
print("Trump:",resultPolarityTrump, resultSubjectivityTrump)
print("Hillary:",resultPolarityHillary, resultSubjectivityHillary)
print("Obama:",resultPolarityObama, resultSubjectivityObama)
if resultPolarityObama > resultPolarityTrump and resultPolarityObama > resultPolarityHillary:
highestPol = "Obama"#resultPolarityObama
elif resultPolarityTrump > resultPolarityObama and resultPolarityTrump > resultPolarityHillary:
highestPol = "Trump"#resultPolarityTrump
else:
highestPol = "Hillary"#resultPolarityHillary
if resultSubjectivityObama > resultSubjectivityTrump and resultSubjectivityObama > resultSubjectivityHillary:
highestSub = "Obama"#resultSubjectivityObama
elif resultSubjectivityTrump > resultSubjectivityObama and resultSubjectivityTrump > resultSubjectivityHillary:
highestSub = "Trump"#resultSubjectivityTrump
else:
highestSub = "Hillary"#resultSubjectivityHillary
print("{} has highest positive comments.".format(highestPol))
print("{} has highest negative comments.".format(highestSub))
#JSON Dataset that has tweets
corpus=['tweet_stream_hillary.json','tweet_stream_obama.json','tweet_stream_trump.json']
#Topic Analysis, LDA
fname=[]
corpus=[]
docs=[]
corpus_root='Corpus Data'
for filename in os.listdir(corpus_root):
file = open(os.path.join(corpus_root, filename), "r")
doc = file.read()
words=doc.split()
file.close()
fname.append(filename)
corpus.append(doc)
docs.append(words)
vectorizer = TfidfVectorizer(stop_words='english', min_df=2)
dtm = vectorizer.fit_transform(corpus)
vocab = vectorizer.get_feature_names()
num_topics=3
num_top_words=10
clf = decomposition.NMF(n_components=num_topics, random_state=1)
doctopic = clf.fit_transform(dtm)
print num_topics, clf.reconstruction_err_
topic_words = []
for topic in clf.components_:
word_idx = np.argsort(topic)[::-1][0:num_top_words]
topic_words.append([vocab[i] for i in word_idx])
for t in range(len(topic_words)):
print "Topic {}: {}".format(t, ' '.join(topic_words[t][:15]))
dic = corpora.Dictionary(docs)
corp = [dic.doc2bow(text) for text in docs]
tfidf = models.TfidfModel(corp)
corpus_tfidf = tfidf[corp]
model = models.ldamodel.LdaModel(corpus_tfidf, num_topics=num_topics, id2word=dic, update_every=1, passes=100)
print("LDA model")
topics_found = model.print_topics(20)
counter = 1
for t in topics_found:
print("Topic #{} {}".format(counter, t))
counter += 1
topics_found2 = model.print_topics(50)
counter2 = 1
for t in topics_found2:
print("Topic #{} {}".format(counter2, t))
counter2 += 1
|
7,760 | f507fbe7c92134c0a7149aafe7de88debebd42f5 | #파이썬 심화
#클래스 메소드, 인스턴스 메소드, 스테이틱 메소드
# 기본 인스턴스 메소드
class Student(object):
"""
Student Class
Author : Kim
Date : 2020.11.07
Description : Class, Static, Instance Method
"""
#Class Variable
tuition_per = 1.0
def __init__(self, id, first_name, last_name, email, grade, tuition, gpa):
self._id = id
self._first_name = first_name
self._last_name = last_name
self._email = email
self._grade = grade
self._tuition = tuition
self._gpa = gpa
# Instance Method
def full_name(self):
return '{} {}'.format(self._first_name, self._last_name)
# Instance Method
def detail_info(self):
return 'Student Detail Info : {},{},{},{},{},{}'.format(self._id, self.full_name(), self._email, self._grade, self._tuition, self._gpa)
# Instance Method
def get_fee(self):
return 'Befor Tuition -> Id: {}, fee: {}'.format(self._id, self._tuition)
# Instance Method
def get_fee_culc(self):
return 'After tuition -> Id: {}, fee: {}'.format(self._id, self._tuition*Student.tuition_per)
def __str__(self):
return 'Student Info - > name: {} grade: {} email: {}'.format(self.full_name(), self._grade, self._email)
#Class Method
@classmethod
def raise_fee(cls, per):
if per <= 1:
print('Please Enter 1 or More')
cls.tuition_per = per
print('Succed! tuiton increased')
#Class Method
@classmethod
def student_const(cls, id, first_name, last_name, email, grade, tuition, gpa):
return cls(id, first_name, last_name, email, grade, tuition * cls.tuition_per, gpa)
#Static Method
@staticmethod
def is_scholarship_st(inst):
if inst._gpa >= 4.3:
return '{} is a scholarship recipient.'.format(inst._last_name)
return "Sorry. Not a scholarship recipient"
#학생 인스턴스
student_1 = Student(1, "Kim", 'Sarang', 'student1@naver.com', '1', 400, 3.5)
student_2 = Student(2, "Lee", 'Myungho', 'student2@daum.net', '2', 500, 4.3)
# 기본 정보
print(student_1)
print(student_2)
print()
#전체 정보
print(student_1.detail_info())
print(student_2.detail_info())
#학비 정보(인상전)
print(student_1.get_fee())
print(student_2.get_fee())
#학비 인상 (클래스 매소드 미사용)
# Student.tuition_per = 1.2
#학비 인상 (클래스 매소드 사용)
Student.raise_fee(1.5)
#학비 정보(인상후)
print(student_1.get_fee_culc())
print(student_2.get_fee_culc())
# 클래스 메소드 인스턴스 생성 실습
student_3 = Student.student_const(3, 'Park', 'Minji', 'Student3@gmail.com', '3', 550, 4.5)
student_4 = Student.student_const(4, 'Cho', 'Sunghan', 'Student4@gmail.com', '4', 6000, 4.1)
# 전체 정보
print(student_3.detail_info())
print(student_4.detail_info())
print()
#학생 학비 변경 확인
print(student_3._tuition)
print(student_4._tuition)
print()
# 장학금 혜택 여부(스테이틱 메소드 미사용)
def is_scholarship(inst):
if inst._gpa >= 4.3:
return '{} is a scholarship recipient.'.format(inst._last_name)
return "Sorry. Not a scholarship recipient"
print(is_scholarship(student_1))
print(is_scholarship(student_2))
print(is_scholarship(student_3))
print(is_scholarship(student_4))
print()
# 장학금 혜택 여부(스테이틱 메소드 사용)
print(Student.is_scholarship_st(student_1))
print(Student.is_scholarship_st(student_2))
print(Student.is_scholarship_st(student_3))
print(Student.is_scholarship_st(student_4))
print()
print(student_1.is_scholarship_st(student_1))
print(student_2.is_scholarship_st(student_2))
print(student_3.is_scholarship_st(student_3))
print(student_4.is_scholarship_st(student_4)) |
7,761 | 9dde8e5fd0e83860ee86cf5402ab6eeb5b07ab2c | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'qtGSD_DESIGN.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(661, 728)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(MainWindow.sizePolicy().hasHeightForWidth())
MainWindow.setSizePolicy(sizePolicy)
MainWindow.setMinimumSize(QtCore.QSize(100, 100))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
MainWindow.setFont(font)
MainWindow.setAutoFillBackground(False)
MainWindow.setTabShape(QtGui.QTabWidget.Rounded)
self.centralwidget = QtGui.QWidget(MainWindow)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.centralwidget.sizePolicy().hasHeightForWidth())
self.centralwidget.setSizePolicy(sizePolicy)
self.centralwidget.setMinimumSize(QtCore.QSize(100, 100))
self.centralwidget.setMaximumSize(QtCore.QSize(1000, 1000))
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.verticalLayout = QtGui.QVBoxLayout(self.centralwidget)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.groupBox_3 = QtGui.QGroupBox(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.groupBox_3.sizePolicy().hasHeightForWidth())
self.groupBox_3.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
self.groupBox_3.setFont(font)
self.groupBox_3.setLayoutDirection(QtCore.Qt.LeftToRight)
self.groupBox_3.setAutoFillBackground(True)
self.groupBox_3.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.groupBox_3.setObjectName(_fromUtf8("groupBox_3"))
self.gridLayout = QtGui.QGridLayout(self.groupBox_3)
self.gridLayout.setSizeConstraint(QtGui.QLayout.SetNoConstraint)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.MainVertLayout = QtGui.QVBoxLayout()
self.MainVertLayout.setSizeConstraint(QtGui.QLayout.SetMaximumSize)
self.MainVertLayout.setContentsMargins(-1, 10, -1, -1)
self.MainVertLayout.setSpacing(0)
self.MainVertLayout.setObjectName(_fromUtf8("MainVertLayout"))
self.gridLayout.addLayout(self.MainVertLayout, 0, 0, 1, 1)
self.verticalLayout.addWidget(self.groupBox_3)
self.groupBox = QtGui.QGroupBox(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.groupBox.sizePolicy().hasHeightForWidth())
self.groupBox.setSizePolicy(sizePolicy)
self.groupBox.setMinimumSize(QtCore.QSize(100, 200))
font = QtGui.QFont()
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
self.groupBox.setFont(font)
self.groupBox.setWhatsThis(_fromUtf8(""))
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.horizontalLayoutWidget_6 = QtGui.QWidget(self.groupBox)
self.horizontalLayoutWidget_6.setGeometry(QtCore.QRect(17, 101, 594, 32))
self.horizontalLayoutWidget_6.setObjectName(_fromUtf8("horizontalLayoutWidget_6"))
self.horizontalLayout_9 = QtGui.QHBoxLayout(self.horizontalLayoutWidget_6)
self.horizontalLayout_9.setSizeConstraint(QtGui.QLayout.SetFixedSize)
self.horizontalLayout_9.setObjectName(_fromUtf8("horizontalLayout_9"))
self.label_7 = QtGui.QLabel(self.horizontalLayoutWidget_6)
self.label_7.setMinimumSize(QtCore.QSize(0, 30))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.label_7.setFont(font)
self.label_7.setObjectName(_fromUtf8("label_7"))
self.horizontalLayout_9.addWidget(self.label_7)
self.browserButton_7 = QtGui.QPushButton(self.horizontalLayoutWidget_6)
self.browserButton_7.setEnabled(True)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.browserButton_7.sizePolicy().hasHeightForWidth())
self.browserButton_7.setSizePolicy(sizePolicy)
self.browserButton_7.setMinimumSize(QtCore.QSize(80, 15))
self.browserButton_7.setObjectName(_fromUtf8("browserButton_7"))
self.horizontalLayout_9.addWidget(self.browserButton_7)
self.filePathEdit_7 = QtGui.QLineEdit(self.horizontalLayoutWidget_6)
self.filePathEdit_7.setEnabled(True)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.filePathEdit_7.sizePolicy().hasHeightForWidth())
self.filePathEdit_7.setSizePolicy(sizePolicy)
self.filePathEdit_7.setMinimumSize(QtCore.QSize(400, 30))
self.filePathEdit_7.setObjectName(_fromUtf8("filePathEdit_7"))
self.horizontalLayout_9.addWidget(self.filePathEdit_7)
self.horizontalLayoutWidget_9 = QtGui.QWidget(self.groupBox)
self.horizontalLayoutWidget_9.setGeometry(QtCore.QRect(17, 25, 592, 32))
self.horizontalLayoutWidget_9.setObjectName(_fromUtf8("horizontalLayoutWidget_9"))
self.horizontalLayout_10 = QtGui.QHBoxLayout(self.horizontalLayoutWidget_9)
self.horizontalLayout_10.setSizeConstraint(QtGui.QLayout.SetFixedSize)
self.horizontalLayout_10.setObjectName(_fromUtf8("horizontalLayout_10"))
self.label_11 = QtGui.QLabel(self.horizontalLayoutWidget_9)
self.label_11.setMinimumSize(QtCore.QSize(0, 30))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.label_11.setFont(font)
self.label_11.setObjectName(_fromUtf8("label_11"))
self.horizontalLayout_10.addWidget(self.label_11)
self.browserButton = QtGui.QPushButton(self.horizontalLayoutWidget_9)
self.browserButton.setEnabled(True)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.browserButton.sizePolicy().hasHeightForWidth())
self.browserButton.setSizePolicy(sizePolicy)
self.browserButton.setMinimumSize(QtCore.QSize(80, 15))
self.browserButton.setObjectName(_fromUtf8("browserButton"))
self.horizontalLayout_10.addWidget(self.browserButton)
self.filePathEdit_3 = QtGui.QLineEdit(self.horizontalLayoutWidget_9)
self.filePathEdit_3.setEnabled(True)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.filePathEdit_3.sizePolicy().hasHeightForWidth())
self.filePathEdit_3.setSizePolicy(sizePolicy)
self.filePathEdit_3.setMinimumSize(QtCore.QSize(400, 30))
self.filePathEdit_3.setObjectName(_fromUtf8("filePathEdit_3"))
self.horizontalLayout_10.addWidget(self.filePathEdit_3)
self.horizontalLayoutWidget_10 = QtGui.QWidget(self.groupBox)
self.horizontalLayoutWidget_10.setGeometry(QtCore.QRect(17, 63, 592, 32))
self.horizontalLayoutWidget_10.setObjectName(_fromUtf8("horizontalLayoutWidget_10"))
self.horizontalLayout_11 = QtGui.QHBoxLayout(self.horizontalLayoutWidget_10)
self.horizontalLayout_11.setSizeConstraint(QtGui.QLayout.SetFixedSize)
self.horizontalLayout_11.setObjectName(_fromUtf8("horizontalLayout_11"))
self.label_12 = QtGui.QLabel(self.horizontalLayoutWidget_10)
self.label_12.setMinimumSize(QtCore.QSize(0, 30))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.label_12.setFont(font)
self.label_12.setObjectName(_fromUtf8("label_12"))
self.horizontalLayout_11.addWidget(self.label_12)
self.browserButton_3 = QtGui.QPushButton(self.horizontalLayoutWidget_10)
self.browserButton_3.setEnabled(True)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.browserButton_3.sizePolicy().hasHeightForWidth())
self.browserButton_3.setSizePolicy(sizePolicy)
self.browserButton_3.setMinimumSize(QtCore.QSize(80, 15))
self.browserButton_3.setObjectName(_fromUtf8("browserButton_3"))
self.horizontalLayout_11.addWidget(self.browserButton_3)
self.filePathEdit_5 = QtGui.QLineEdit(self.horizontalLayoutWidget_10)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.filePathEdit_5.sizePolicy().hasHeightForWidth())
self.filePathEdit_5.setSizePolicy(sizePolicy)
self.filePathEdit_5.setMinimumSize(QtCore.QSize(400, 30))
self.filePathEdit_5.setObjectName(_fromUtf8("filePathEdit_5"))
self.horizontalLayout_11.addWidget(self.filePathEdit_5)
self.runButton = QtGui.QPushButton(self.groupBox)
self.runButton.setGeometry(QtCore.QRect(520, 140, 85, 50))
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.runButton.sizePolicy().hasHeightForWidth())
self.runButton.setSizePolicy(sizePolicy)
self.runButton.setMinimumSize(QtCore.QSize(80, 50))
self.runButton.setIconSize(QtCore.QSize(24, 24))
self.runButton.setObjectName(_fromUtf8("runButton"))
self.verticalLayout.addWidget(self.groupBox)
self.groupBox_2 = QtGui.QGroupBox(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.groupBox_2.sizePolicy().hasHeightForWidth())
self.groupBox_2.setSizePolicy(sizePolicy)
self.groupBox_2.setMinimumSize(QtCore.QSize(100, 225))
font = QtGui.QFont()
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
self.groupBox_2.setFont(font)
self.groupBox_2.setAutoFillBackground(True)
self.groupBox_2.setObjectName(_fromUtf8("groupBox_2"))
self.horizontalLayoutWidget_4 = QtGui.QWidget(self.groupBox_2)
self.horizontalLayoutWidget_4.setGeometry(QtCore.QRect(17, 25, 591, 32))
self.horizontalLayoutWidget_4.setObjectName(_fromUtf8("horizontalLayoutWidget_4"))
self.horizontalLayout_6 = QtGui.QHBoxLayout(self.horizontalLayoutWidget_4)
self.horizontalLayout_6.setSizeConstraint(QtGui.QLayout.SetFixedSize)
self.horizontalLayout_6.setObjectName(_fromUtf8("horizontalLayout_6"))
self.label_3 = QtGui.QLabel(self.horizontalLayoutWidget_4)
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.label_3.setFont(font)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.horizontalLayout_6.addWidget(self.label_3)
self.browserButton_4 = QtGui.QPushButton(self.horizontalLayoutWidget_4)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.browserButton_4.sizePolicy().hasHeightForWidth())
self.browserButton_4.setSizePolicy(sizePolicy)
self.browserButton_4.setObjectName(_fromUtf8("browserButton_4"))
self.horizontalLayout_6.addWidget(self.browserButton_4)
self.loadButton = QtGui.QPushButton(self.horizontalLayoutWidget_4)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.loadButton.sizePolicy().hasHeightForWidth())
self.loadButton.setSizePolicy(sizePolicy)
self.loadButton.setObjectName(_fromUtf8("loadButton"))
self.horizontalLayout_6.addWidget(self.loadButton)
self.filePathEdit_4 = QtGui.QLineEdit(self.horizontalLayoutWidget_4)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.filePathEdit_4.sizePolicy().hasHeightForWidth())
self.filePathEdit_4.setSizePolicy(sizePolicy)
self.filePathEdit_4.setMinimumSize(QtCore.QSize(300, 30))
self.filePathEdit_4.setObjectName(_fromUtf8("filePathEdit_4"))
self.horizontalLayout_6.addWidget(self.filePathEdit_4)
self.horizontalLayoutWidget_2 = QtGui.QWidget(self.groupBox_2)
self.horizontalLayoutWidget_2.setGeometry(QtCore.QRect(17, 63, 481, 29))
self.horizontalLayoutWidget_2.setObjectName(_fromUtf8("horizontalLayoutWidget_2"))
self.horizontalLayout_2 = QtGui.QHBoxLayout(self.horizontalLayoutWidget_2)
self.horizontalLayout_2.setSizeConstraint(QtGui.QLayout.SetFixedSize)
self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2"))
self.label_4 = QtGui.QLabel(self.horizontalLayoutWidget_2)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_4.sizePolicy().hasHeightForWidth())
self.label_4.setSizePolicy(sizePolicy)
self.label_4.setObjectName(_fromUtf8("label_4"))
self.horizontalLayout_2.addWidget(self.label_4)
self.comboBox = QtGui.QComboBox(self.horizontalLayoutWidget_2)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.comboBox.sizePolicy().hasHeightForWidth())
self.comboBox.setSizePolicy(sizePolicy)
self.comboBox.setObjectName(_fromUtf8("comboBox"))
self.horizontalLayout_2.addWidget(self.comboBox)
self.plotButton = QtGui.QPushButton(self.horizontalLayoutWidget_2)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.plotButton.sizePolicy().hasHeightForWidth())
self.plotButton.setSizePolicy(sizePolicy)
self.plotButton.setObjectName(_fromUtf8("plotButton"))
self.horizontalLayout_2.addWidget(self.plotButton)
self.horizontalLayoutWidget_3 = QtGui.QWidget(self.groupBox_2)
self.horizontalLayoutWidget_3.setGeometry(QtCore.QRect(17, 100, 481, 29))
self.horizontalLayoutWidget_3.setObjectName(_fromUtf8("horizontalLayoutWidget_3"))
self.horizontalLayout_3 = QtGui.QHBoxLayout(self.horizontalLayoutWidget_3)
self.horizontalLayout_3.setSizeConstraint(QtGui.QLayout.SetFixedSize)
self.horizontalLayout_3.setObjectName(_fromUtf8("horizontalLayout_3"))
self.label_5 = QtGui.QLabel(self.horizontalLayoutWidget_3)
self.label_5.setObjectName(_fromUtf8("label_5"))
self.horizontalLayout_3.addWidget(self.label_5)
self.comboBox_R1 = QtGui.QComboBox(self.horizontalLayoutWidget_3)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.comboBox_R1.sizePolicy().hasHeightForWidth())
self.comboBox_R1.setSizePolicy(sizePolicy)
self.comboBox_R1.setMinimumSize(QtCore.QSize(0, 0))
self.comboBox_R1.setObjectName(_fromUtf8("comboBox_R1"))
self.horizontalLayout_3.addWidget(self.comboBox_R1)
self.label = QtGui.QLabel(self.horizontalLayoutWidget_3)
self.label.setEnabled(False)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label.sizePolicy().hasHeightForWidth())
self.label.setSizePolicy(sizePolicy)
self.label.setMinimumSize(QtCore.QSize(30, 0))
self.label.setMaximumSize(QtCore.QSize(30, 29))
font = QtGui.QFont()
font.setBold(True)
font.setItalic(True)
font.setUnderline(False)
font.setWeight(75)
self.label.setFont(font)
self.label.setAutoFillBackground(True)
self.label.setLineWidth(5)
self.label.setAlignment(QtCore.Qt.AlignCenter)
self.label.setObjectName(_fromUtf8("label"))
self.horizontalLayout_3.addWidget(self.label)
self.comboBox_R2 = QtGui.QComboBox(self.horizontalLayoutWidget_3)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.comboBox_R2.sizePolicy().hasHeightForWidth())
self.comboBox_R2.setSizePolicy(sizePolicy)
self.comboBox_R2.setObjectName(_fromUtf8("comboBox_R2"))
self.horizontalLayout_3.addWidget(self.comboBox_R2)
self.plotRangeButton = QtGui.QPushButton(self.horizontalLayoutWidget_3)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.plotRangeButton.sizePolicy().hasHeightForWidth())
self.plotRangeButton.setSizePolicy(sizePolicy)
self.plotRangeButton.setObjectName(_fromUtf8("plotRangeButton"))
self.horizontalLayout_3.addWidget(self.plotRangeButton)
self.horizontalLayoutWidget_7 = QtGui.QWidget(self.groupBox_2)
self.horizontalLayoutWidget_7.setGeometry(QtCore.QRect(17, 140, 481, 29))
self.horizontalLayoutWidget_7.setObjectName(_fromUtf8("horizontalLayoutWidget_7"))
self.horizontalLayout_5 = QtGui.QHBoxLayout(self.horizontalLayoutWidget_7)
self.horizontalLayout_5.setSizeConstraint(QtGui.QLayout.SetFixedSize)
self.horizontalLayout_5.setObjectName(_fromUtf8("horizontalLayout_5"))
self.label_8 = QtGui.QLabel(self.horizontalLayoutWidget_7)
self.label_8.setObjectName(_fromUtf8("label_8"))
self.horizontalLayout_5.addWidget(self.label_8)
self.comboBox_2 = QtGui.QComboBox(self.horizontalLayoutWidget_7)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.comboBox_2.sizePolicy().hasHeightForWidth())
self.comboBox_2.setSizePolicy(sizePolicy)
self.comboBox_2.setObjectName(_fromUtf8("comboBox_2"))
self.horizontalLayout_5.addWidget(self.comboBox_2)
self.checkBox = QtGui.QCheckBox(self.horizontalLayoutWidget_7)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.checkBox.sizePolicy().hasHeightForWidth())
self.checkBox.setSizePolicy(sizePolicy)
self.checkBox.setObjectName(_fromUtf8("checkBox"))
self.horizontalLayout_5.addWidget(self.checkBox)
self.plotImageButton = QtGui.QPushButton(self.horizontalLayoutWidget_7)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.plotImageButton.sizePolicy().hasHeightForWidth())
self.plotImageButton.setSizePolicy(sizePolicy)
self.plotImageButton.setMinimumSize(QtCore.QSize(80, 15))
self.plotImageButton.setObjectName(_fromUtf8("plotImageButton"))
self.horizontalLayout_5.addWidget(self.plotImageButton)
self.clearPlotButton = QtGui.QPushButton(self.groupBox_2)
self.clearPlotButton.setGeometry(QtCore.QRect(520, 140, 85, 50))
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.clearPlotButton.sizePolicy().hasHeightForWidth())
self.clearPlotButton.setSizePolicy(sizePolicy)
self.clearPlotButton.setMinimumSize(QtCore.QSize(80, 50))
self.clearPlotButton.setObjectName(_fromUtf8("clearPlotButton"))
self.horizontalLayoutWidget = QtGui.QWidget(self.groupBox_2)
self.horizontalLayoutWidget.setGeometry(QtCore.QRect(17, 180, 481, 29))
self.horizontalLayoutWidget.setObjectName(_fromUtf8("horizontalLayoutWidget"))
self.horizontalLayout = QtGui.QHBoxLayout(self.horizontalLayoutWidget)
self.horizontalLayout.setSizeConstraint(QtGui.QLayout.SetFixedSize)
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.label_9 = QtGui.QLabel(self.horizontalLayoutWidget)
self.label_9.setObjectName(_fromUtf8("label_9"))
self.horizontalLayout.addWidget(self.label_9)
self.pushButton = QtGui.QPushButton(self.horizontalLayoutWidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pushButton.sizePolicy().hasHeightForWidth())
self.pushButton.setSizePolicy(sizePolicy)
self.pushButton.setMinimumSize(QtCore.QSize(80, 15))
self.pushButton.setObjectName(_fromUtf8("pushButton"))
self.horizontalLayout.addWidget(self.pushButton)
self.plotStripsButton = QtGui.QPushButton(self.horizontalLayoutWidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.plotStripsButton.sizePolicy().hasHeightForWidth())
self.plotStripsButton.setSizePolicy(sizePolicy)
self.plotStripsButton.setMinimumSize(QtCore.QSize(80, 15))
self.plotStripsButton.setObjectName(_fromUtf8("plotStripsButton"))
self.horizontalLayout.addWidget(self.plotStripsButton)
self.verticalLayout.addWidget(self.groupBox_2)
MainWindow.setCentralWidget(self.centralwidget)
self.statusbar = QtGui.QStatusBar(MainWindow)
self.statusbar.setObjectName(_fromUtf8("statusbar"))
MainWindow.setStatusBar(self.statusbar)
self.toolBar = QtGui.QToolBar(MainWindow)
self.toolBar.setObjectName(_fromUtf8("toolBar"))
MainWindow.addToolBar(QtCore.Qt.TopToolBarArea, self.toolBar)
self.menuBar = QtGui.QMenuBar(MainWindow)
self.menuBar.setGeometry(QtCore.QRect(0, 0, 661, 22))
self.menuBar.setObjectName(_fromUtf8("menuBar"))
MainWindow.setMenuBar(self.menuBar)
self.actionOpen = QtGui.QAction(MainWindow)
self.actionOpen.setVisible(True)
self.actionOpen.setObjectName(_fromUtf8("actionOpen"))
self.actionQuit = QtGui.QAction(MainWindow)
self.actionQuit.setObjectName(_fromUtf8("actionQuit"))
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow", None))
self.groupBox_3.setTitle(_translate("MainWindow", "PLOT WINDOW", None))
self.groupBox.setTitle(_translate("MainWindow", " BINARY DATA CONVERSION", None))
self.label_7.setText(_translate("MainWindow", " OUTPUT FILE ", None))
self.browserButton_7.setText(_translate("MainWindow", "Browse", None))
self.filePathEdit_7.setText(_translate("MainWindow", "D:\\detdev\\maia\\python\\", None))
self.label_11.setText(_translate("MainWindow", " DECODER FILE", None))
self.browserButton.setText(_translate("MainWindow", "Browse", None))
self.filePathEdit_3.setText(_translate("MainWindow", "D:\\detdev\\maia\\python\\qtGSD\\gsd_parse.out", None))
self.label_12.setText(_translate("MainWindow", " INPUT FILE ", None))
self.browserButton_3.setText(_translate("MainWindow", "Browse", None))
self.filePathEdit_5.setText(_translate("MainWindow", "D:\\detdev\\maia\\python\\", None))
self.runButton.setText(_translate("MainWindow", "Run", None))
self.groupBox_2.setTitle(_translate("MainWindow", " PLOT DATA", None))
self.label_3.setText(_translate("MainWindow", " DATA FILE ", None))
self.browserButton_4.setText(_translate("MainWindow", "Browse", None))
self.loadButton.setText(_translate("MainWindow", "Load", None))
self.filePathEdit_4.setText(_translate("MainWindow", "D:\\detdev\\maia\\python\\", None))
self.label_4.setText(_translate("MainWindow", " PLOT STRIP ", None))
self.plotButton.setText(_translate("MainWindow", "Plot", None))
self.label_5.setText(_translate("MainWindow", " PLOT SERIES", None))
self.label.setText(_translate("MainWindow", "to", None))
self.plotRangeButton.setText(_translate("MainWindow", "Plot", None))
self.label_8.setText(_translate("MainWindow", " PLOT SPECTRA AS 2-D IMAGE", None))
self.checkBox.setText(_translate("MainWindow", "Log", None))
self.plotImageButton.setText(_translate("MainWindow", "Plot Image", None))
self.clearPlotButton.setText(_translate("MainWindow", "Clear", None))
self.label_9.setText(_translate("MainWindow", " PLOT TOTAL COUNTS vs STRIP", None))
self.pushButton.setText(_translate("MainWindow", "Update", None))
self.plotStripsButton.setText(_translate("MainWindow", "Plot Strips", None))
self.toolBar.setWindowTitle(_translate("MainWindow", "toolBar", None))
self.actionOpen.setText(_translate("MainWindow", "Open", None))
self.actionQuit.setText(_translate("MainWindow", "Quit", None))
|
7,762 | cd175c236dd1d1c7387a21a491e80d6723f161dc | import json
import urllib.request, urllib.parse, urllib.error
import xml.etree.ElementTree as ET
import ssl
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
total=list()
url= input ('Enter Location: ') #user input url
print('Retrieving: ', url)
uh = urllib.request.urlopen(url, context=ctx) #opening url
data = uh.read() #read the data
print('Retrieved', len(data), 'characters')
info = json.loads(data)
print('User count:', len(info['comments']))
x = info['comments']
for items in x:
y= (items['count'])
total.append(int(y))
print ('Sum :', sum(total))
|
7,763 | 33c0efb47e3253442b6a808c7ebffac275c19321 | from pylab import *
import pandas as pd
from matplotlib import pyplot
import pylab
from mpl_toolkits.mplot3d import Axes3D
from threading import Thread
from threading import Semaphore
from threading import Lock
from Queue import Queue
sam = Semaphore(1)
lck = Lock()
q=Queue(10)
def myFunc(z):
#if z%2==0 and z>1:
max_dist=[]
cumm_dist=[]
endpt_dist=[]
passible=True
for i in range(1,201):
try:
vecs=pd.read_csv("/Users/vmac/Downloads/drivers/"+str(z)+"/"+str(i)+".csv")
#print "proceed"
#ax.plot(vecs['x'], vecs['y'], color=colors[i%6], lw=1)
max_dist.append(0.0)
cumm_dist.append(0.0)
endpt_dist.append(sqrt(vecs['y'][len(vecs)-1]*vecs['y'][len(vecs)-1]+vecs['x'][len(vecs)-1]*vecs['x'][len(vecs)-1]))
for j in range(0,len(vecs)):
local_dist=sqrt(vecs['y'][j]*vecs['y'][j]+vecs['x'][j]*vecs['x'][j])
if j==0:
incr_dist=sqrt(vecs['y'][j]*vecs['y'][j]+vecs['x'][j]*vecs['x'][j])
else:
incr_dist=sqrt((vecs['y'][j]-vecs['y'][j-1])*(vecs['y'][j]-vecs['y'][j-1])+(vecs['x'][j]-vecs['x'][j-1])*(vecs['x'][j]-vecs['x'][j-1]))
if max_dist[i-1]<local_dist:
max_dist[i-1]=local_dist
cumm_dist[i-1]+=incr_dist
except Exception, e:
passible=False
print e
if passible==True:
#prob_vals = hist(max_dist, bins=10,cumulative=True,normed=True)
mean_max_dist=mean(max_dist)
std_max_dist=std(max_dist)
#print std_max_dist
max_dist=(max_dist-mean_max_dist)/std_max_dist
mean_cumm_dist=mean(cumm_dist)
std_cumm_dist=std(cumm_dist)
cumm_dist=(cumm_dist-mean_cumm_dist)/std_cumm_dist
mean_endpt_dist=mean(endpt_dist)
std_endpt_dist=std(endpt_dist)
endpt_dist=(endpt_dist-mean_endpt_dist)/std_endpt_dist
the_norms=np.sqrt(np.square(max_dist)+np.square(cumm_dist)+np.square(endpt_dist))
#mean_norms=mean(the_norms)
#median_norms=median(the_norms)
#print mean(the_norms),median(the_norms)
#fig = plt.figure()
#ax = Axes3D(fig)
#plt.hist(the_norms)
#ax.scatter(max_dist, cumm_dist, endpt_dist)
#plt.show()
prob_vals=hist(the_norms, bins=10, cumulative=True,normed=True)
#print max_dist
#exit(0)
#print prob_vals[0], prob_vals[1]
#approach 1: calculate max distance from origin for each path, use histogram to check for anomalies gap
#approach 2: calculate max_dist, cumulative dist, and endpoint distance,make z score,
# use histogram to check for anomalies gap
threshold1=0
threshold2=0
for i in range(0,len(prob_vals[0])-1):
#print prob_vals[0][i+1]-prob_vals[0][i]
if prob_vals[0][i+1]-prob_vals[0][i]< 0.1:
if threshold1==0:
threshold1=prob_vals[1][i+1]
threshold2=prob_vals[1][i+1]
else:
threshold2=prob_vals[1][i+1]
if z%1==0:
bin_classes=[]
for i in range(0,len(max_dist)):
if the_norms[i]>=threshold1 and the_norms[i]<=threshold2:
bin_classes.append({'driver_trip':str(z)+"_"+str(i+1),'prob':0})
else:
bin_classes.append({'driver_trip':str(z)+"_"+str(i+1),'prob':1})
#print bin_classes
#print "success?"
outpt=pd.DataFrame(data=bin_classes)
#print "success2 "
if z==1:
outpt.to_csv(path_or_buf="/Users/vmac/PycharmProjects/kaggle-axa-driver-telematics/sampleOutz2.csv", index=False)
else:
q.put(outpt)
#print "queue size ",q.qsize()
#with open("/Users/vmac/PycharmProjects/kaggle-axa-driver-telematics/sampleOutz2.csv",'a') as f:
#outpt.to_csv(f,header=False, index=False)
#f.close()
print "success ",z
#sam.release()
def worker():
while True:
item = q.get()
item.to_csv(f, header=False, index=False)
q.task_done()
#print "queue size ",q.qsize()
bin_classes=[]
f=[]
for z in range(2762,3613):#up to 3613
#print "iteration ",z
if z == 168:
myFunc(z)
f = open("/Users/vmac/PycharmProjects/kaggle-axa-driver-telematics/sampleOutz2.csv",'a')
t=Thread(target=worker)
t.daemon=True
t.start()
else:
myFunc(z)
#sam.acquire()
#t=Thread(target=myFunc,args=(z,))
#t.start() |
7,764 | 512a13084a860e2784020664a3d5824d9dace6db | from django.db import models
import django.utils.timezone as timezone
# Create your models here.
# Create your models here.
class Categories(models.Model):
# 文章分类
name = models.CharField(max_length=200, verbose_name = "分类名称")
parent = models.ForeignKey('self', default=0, on_delete=models.DO_NOTHING, null = True, blank = True, verbose_name = "上级分类")
created_at = models.DateTimeField(default = timezone.now, verbose_name = "添加日期")
updated_at = models.DateTimeField(default = timezone.now, verbose_name = "修改日期")
def __str__(self):
return self.name
class Meta:
verbose_name_plural = '分类管理'
class Wordroots(models.Model):
# 爬取的词
SHIRT_SIZES = (
(0, '否'),
(1, '是'),
)
name = models.CharField(max_length=255, verbose_name = "词语")
is_done = models.IntegerField(default=0, choices=SHIRT_SIZES, verbose_name = "是否采集")
category = models.ForeignKey(Categories, on_delete=models.DO_NOTHING, verbose_name = "分类名称")
created_at = models.DateTimeField(default = timezone.now, verbose_name = "添加日期")
updated_at = models.DateTimeField(default = timezone.now, verbose_name = "修改日期")
class Meta:
verbose_name_plural = '词库管理'
class Articles(models.Model):
# 抓取的数据组合的文章
wordroot = models.CharField(max_length=255, verbose_name = "词根")
title = models.CharField(max_length=255, verbose_name = "标题")
content = models.TextField(null = True, blank = True, verbose_name = "内容组合")
category = models.ForeignKey(Categories, on_delete=models.DO_NOTHING, verbose_name = "分类名称")
image = models.ImageField(max_length=255, null = True, blank = True, verbose_name = "图片")
video = models.FileField(max_length=255, null = True, blank = True, verbose_name = "视频")
question = models.CharField(max_length=255, null = True, blank = True, verbose_name = "问答问题")
answer = models.TextField(null = True, blank = True, verbose_name = "问答回答")
baike_title = models.CharField(max_length=255, null = True, blank = True, verbose_name = "百科标题")
baike_content = models.TextField(null = True, blank = True, verbose_name = "百科内容")
seo_keywords = models.CharField(max_length=255, null = True, blank = True, verbose_name = "关键词")
seo_description = models.CharField(max_length=255, null = True, blank = True, verbose_name = "描述")
click_count = models.IntegerField(default=0, verbose_name = "点击")
order = models.IntegerField(default=0, verbose_name = "排序")
created_at = models.DateTimeField(default = timezone.now, verbose_name = "添加日期")
updated_at = models.DateTimeField(default = timezone.now, verbose_name = "修改日期")
class Meta:
verbose_name_plural = '文章管理'
class Baikes(models.Model):
# 百科
wordroot_text = models.CharField(max_length=255, verbose_name = "词根")
wordroot_id = models.IntegerField(default=0, null = True, blank = True, verbose_name = "词根id, 可空")
url = models.CharField(max_length = 255, null = True, blank = True, verbose_name = "爬取链接")
title = models.CharField(max_length=255, null = True, blank = True, verbose_name = "标题")
content = models.TextField(null = True, blank = True, verbose_name = "内容")
created_at = models.DateTimeField(default = timezone.now, verbose_name = "添加日期")
updated_at = models.DateTimeField(default = timezone.now, verbose_name = "修改日期")
class Meta:
verbose_name_plural = '百度百科'
class Zhidaos(models.Model):
# 知道
wordroot_text = models.CharField(max_length=255, verbose_name = "词根")
wordroot_id = models.IntegerField(default=0, null = True, blank = True, verbose_name = "词根id, 可空")
url = models.CharField(max_length = 255, null = True, blank = True, verbose_name = "爬取链接")
title = models.CharField(max_length=255, null = True, blank = True, verbose_name = "标题")
content = models.TextField(null = True, blank = True, verbose_name = "内容")
created_at = models.DateTimeField(default = timezone.now, verbose_name = "添加日期")
updated_at = models.DateTimeField(default = timezone.now, verbose_name = "修改日期")
class Meta:
verbose_name_plural = '百度知道'
class Images(models.Model):
# 图片
wordroot_text = models.CharField(max_length=255, verbose_name = "词根")
wordroot_id = models.IntegerField(default=0, null = True, blank = True, verbose_name = "词根id, 可空")
url = models.CharField(max_length = 255, null = True, blank = True, verbose_name = "爬取链接")
image = models.CharField(max_length=255, null = True, blank = True, verbose_name = "图片链接")
created_at = models.DateTimeField(default = timezone.now, verbose_name = "添加日期")
updated_at = models.DateTimeField(default = timezone.now, verbose_name = "修改日期")
class Meta:
verbose_name_plural = '百度图片'
class Youkus(models.Model):
# 优酷
wordroot_text = models.CharField(max_length=255, verbose_name = "词根")
wordroot_id = models.IntegerField(default=0, null = True, blank = True, verbose_name = "词根id, 可空")
url = models.CharField(max_length = 255, null = True, blank = True, verbose_name = "爬取链接")
video = models.CharField(max_length=255, null = True, blank = True, verbose_name = "视频分享地址")
created_at = models.DateTimeField(default = timezone.now, verbose_name = "添加日期")
updated_at = models.DateTimeField(default = timezone.now, verbose_name = "修改日期")
class Meta:
verbose_name_plural = '优酷视频'
class Tags(models.Model):
#标签
name = models.CharField(max_length=255, verbose_name = "标签名称")
created_at = models.DateTimeField(default = timezone.now, verbose_name = "添加日期")
updated_at = models.DateTimeField(default = timezone.now, verbose_name = "修改日期")
class Meta:
verbose_name_plural = '标签管理'
class TagMaps(models.Model):
article = models.ForeignKey(Articles, on_delete=models.DO_NOTHING)
tag = models.ForeignKey(Tags, on_delete=models.DO_NOTHING)
class ScrapyRules(models.Model):
# 爬虫链接及规则
name = models.CharField(max_length = 255, verbose_name = "爬取网站")
url = models.CharField(max_length = 255, verbose_name = "爬取链接")
search_rules = models.CharField(max_length = 255, null = True, blank = True, verbose_name = "搜索爬取规则")
title_rules = models.CharField(max_length = 255, null = True, blank = True, verbose_name = "标题爬取规则")
content_rules = models.CharField(max_length = 255, null = True, blank = True, verbose_name = "内容爬取规则")
created_at = models.DateTimeField(default = timezone.now, verbose_name = "添加日期")
updated_at = models.DateTimeField(default = timezone.now, verbose_name = "修改日期")
class Meta:
verbose_name_plural = "链接管理"
class ScrapyLogs(models.Model):
# 爬虫日志
url = models.CharField(max_length = 255, verbose_name = "爬取链接")
title = models.CharField(max_length = 255, null = True, blank = True, verbose_name = "页面标题")
created_at = models.DateTimeField(default = timezone.now, verbose_name = "添加日期")
updated_at = models.DateTimeField(default = timezone.now, verbose_name = "修改日期")
class Meta:
verbose_name_plural = "爬虫日志"
class ScrapyTasks(models.Model):
# 定时任务
# 爬取的词
SHIRT_SIZES = (
('0', '否'),
('1', '是'),
)
CYCLE_TYPE = (
('0', '按指定日期时间执行'),
('1', '按指定时间每天执行'),
('2', '按指定时间每周执行'),
('3', '按指定时间每月执行'),
)
start_date_at = models.DateField(default = timezone.now, verbose_name = "开始时间")
start_time_at = models.TimeField(default = timezone.now, verbose_name = "开始时间")
task_cycle = models.IntegerField(default=0, choices=CYCLE_TYPE, verbose_name = "定时方式")
is_done = models.IntegerField(default=0, choices=SHIRT_SIZES, verbose_name = "是否包含已采集")
created_at = models.DateTimeField(default = timezone.now, verbose_name = "添加日期")
updated_at = models.DateTimeField(default = timezone.now, verbose_name = "修改日期")
class Meta:
verbose_name_plural = "爬虫任务"
|
7,765 | 91f3aae4e74f371cadaf10385510bc1c80063f55 | import sys
import time
def initialize(x: object) -> object:
# Create initialization data and take a lot of time
data = []
starttimeinmillis = int(round(time.time()))
c =0
file1 = sys.argv[x]
with open(file1) as datafile:
for line in datafile:
c+=1
if(c%100==0):
print(".",sep='', end='',flush=True)
data.append([int(l) for l in line.split()])
rows = len(data)
cols = len(data[0])
# print(data)
#print("rows=", rows, " cols=", cols)
print("time took:",int(round(time.time()))-starttimeinmillis,"seconds")
return data
|
7,766 | dc5630e17bb6ed85157b06108250427be41416d1 |
def ddm_dd_convert(coord, direction):
"""Converts GPS reading from DDM to DD
str coord - the ddm coordinate from $GPGGA
str direction - the direction of the coord (N,S,W,E)
returns - string representation of dd coordinate
"""
value = ''
if (direction == 'S' or direction == 'W'):
value += '-'
value += coord[0:-7]
minute = float(coord[-7:])
decimal = round(minute / 60, 8)
result = str(decimal)[1:]
value += result
return value
def gprmc_convert(line):
"""Translates $GPRMC line into documented array
str line - the GPRMC line
returns - the data documented into array
"""
gps = line.strip().split(',')
#check data
if gps[2] == 'V':
return
raw_date = gps[9]
time = ''
date = raw_date[0:2]
month = raw_date[2:4]
year = raw_date[4:]
#modify year if reaches year 2100
time += date + '/' + month + '/20' + year
return [time]
def gpvtg_convert(line):
"""Translates $GPVTG line into documented array
Data only used for measuring ground speed
str line - the GPVTG line
returns - the data documented into array
"""
gps = line.strip().split(',')
#check data
if gps[1] == '0.00':
return
#jsondata = {'Horizontal speed': gps[7] + ' kmph or ' + gps[5] + 'knots'}
return []
def gpgga_convert(line):
"""Translates $GPGGPA line into documented array
str line - the GPGGA line
returns - the data documented into array
"""
gps = line.strip().split(',')
#check data
if gps[6] == '0' :
return
fix = ''
if gps[6] == '1':
fix = 'GPS fix'
elif gps[6] == '2':
fix = 'DGPS fix'
elif gps[6] == '4':
fix = 'RTK Fix coordinate (centimeter precision)'
elif gps[6] == '5':
fix = 'RTK Float (decimeter precision)'
#utc = gps[1][0:2] + ':' + gps[1][2:4] + ':' + gps[1][4:6]
lat = ddm_dd_convert(gps[2], gps[3])
long = ddm_dd_convert(gps[4], gps[5])
return [lat, long, fix]
def gpgsa_convert(line):
"""Translates $GPGSA line into documented array
str line - the GPGSA line
returns - the data documented into array
"""
gps = line.strip().split(',')
#check data
if gps[2] == '1':
return
if gps[2] == '2':
fix = '2D fix'
else:
fix = '3D fix'
return [fix] |
7,767 | 6c0a1d4ffd64e0566be53937d9b48975f2530852 | import matplotlib.pyplot as plotOp
import numpy as np
from random import randint
import re as regexOp |
7,768 | d2af2b25a1ba2db93c977a13fe0273919bc2e6e0 | from DataStructures.BST.util import *
def storeInorder(root, inorder):
if root is None:
return
storeInorder(root.left, inorder)
inorder.append(root.data)
storeInorder(root.right, inorder)
def arrayToBST(arr, root):
# Base Case
if root is None:
return
# First update the left subtree
arrayToBST(arr, root.left)
# now update root's data delete the value from array
root.data = arr[0]
arr.pop(0)
# Finally update the right subtree
arrayToBST(arr, root.right)
def binaryTreeToBST(root):
if root is None:
return
# Create the temp array and store the inorder traveral of tree
arr = []
storeInorder(root, arr)
# Sort the array
arr.sort()
# copy array elements back to binary tree
arrayToBST(arr, root)
if __name__ == '__main__':
root = Node(10)
root.left = Node(30)
root.right = Node(15)
root.left.left = Node(20)
root.right.right = Node(5)
binaryTreeToBST(root)
inorder(root)
|
7,769 | 0d862715524bd35347626e7708c7c8f8b370bb3a | import sklearn
import pandas as pd
import numpy as np
from sklearn import datasets, ensemble
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
import statistics as st
import itertools
from sklearn.model_selection import cross_val_score
from sklearn.experimental import enable_hist_gradient_boosting
from sklearn.ensemble import HistGradientBoostingRegressor
from sklearn.ensemble import HistGradientBoostingClassifier
from statsmodels import regression as reg
import statsmodels.api as regMods
from scipy.stats import norm
from scipy.stats import gamma
from scipy.stats import expon
from scipy.stats import poisson
from scipy.stats import binom
from scipy.stats import t
import plotly.express as px
import plotly.figure_factory as ff
heart=pd.read_csv(r"C:\Users\fredr\Documents\StatTool\BZAN540\Homework\HW6\HeartDisease.csv")
heart.columns
train, test = train_test_split(heart[['x1', 'x2', 'x3', 'x4', 'x5','HeartDisease']], test_size=0.2)
y_train=train['HeartDisease']
x_train=train[['x1', 'x2', 'x3', 'x4', 'x5']]
x_test=test[['x1', 'x2', 'x3', 'x4', 'x5']]
y_test=test['HeartDisease']
#boosting to predict heart disease
#make expand grid function to get all combos of the parameters
def expandgrid(*itrs):
product = list(itertools.product(*itrs))
return {'Var{}'.format(i+1):[x[i] for x in product] for i in range(len(itrs))}
#set the range for the parameter values:
n_estimators=np.arange(300, 450, 50) #the number of trees to fit
max_depth=np.arange(3, 5, 1)
min_samples_split=np.arange(3,4,1)
learning_rate=np.arange(0.001,0.004,0.001)
a=expandgrid(n_estimators,max_depth, min_samples_split,learning_rate)
params=pd.DataFrame.from_dict(a)
len(params)
#time the code ???
#looping through the possible parameters for the model and store the estimated validation rmse
ValAcc=list(range(0,len(params)))
for i in range(0,len(params)):
scores = cross_val_score(HistGradientBoostingClassifier(min_samples_leaf=params['Var3'].iloc[i],
max_depth=params['Var2'].iloc[i],
learning_rate=params['Var4'].iloc[i],max_iter=params['Var1'].iloc[i]).fit(x_train, y_train),
x_train, y_train, cv=4,scoring='accuracy')
acc=st.mean(scores)
ValAcc[i]=acc
ValAcc
max(ValAcc)
pars=list(params.iloc[ValAcc==max(ValAcc)].iloc[0])
pars.append(max(ValAcc))
pars
bestPos=np.array(np.where(np.array(ValAcc)==max(ValAcc))).tolist()[0][0]
#fit the best model on Train then predict on Test if mean acc close to val then fit on entire data
bestPos
bestMod=HistGradientBoostingClassifier(min_samples_leaf=params['Var3'].iloc[bestPos],
max_depth=params['Var2'].iloc[bestPos],
learning_rate=params['Var4'].iloc[bestPos],max_iter=params['Var1'].iloc[bestPos]).fit(x_train, y_train)
#gets the predicted values on the test data
bestMod.predict(x_test)
len(y_test[bestMod.predict(x_test)==y_test])/len(y_test) #67% acc on test
#create a dataset with one row and each col is a ind var from model fit above, then input data per var to fill df then predict y on the values in this df
df_i=pd.DataFrame({'x1':np.mean(heart['x1']), 'x2':np.mean(heart['x2']),'x3':np.mean(heart['x3']),'x4':np.mean(heart['x4']),'x5':np.mean(heart['x5'])},index=[0])
if(bestMod.predict(df_i)==0):
print('Predicted: No Heart Disease')
else:
print('Predicted: Has Heart Disease')
#plot two densities centered on the mean of the var and the selected value of the var for all vars
#start with treating each var as a normal distro then plot a density curve where the
#mean is the mean of the var and another curve on same plot where the mean is the selected value from the input of a normal distro set sd to the sd of the var
#for both in the plots, generate random vars the size of the data, except for history heart disease treat as beta with p=actuap prob for var and p=random value that is
#greater than .5
#generates random values from a normal distro with mean=loc and sd=scale
norm.rvs(size=10000,loc=3,scale=8)
#x1:
x1=190
mean=np.mean(heart['x1'])
sd=np.std(heart['x1'])
meanx1_2=x1
xActual=norm.rvs(size=len(heart),loc=mean,scale=sd)
xInput=norm.rvs(size=len(heart),loc=meanx1_2,scale=sd)
group_labels = ['actual','center_selected']
hist_data=[xActual,xInput]
fig = ff.create_distplot(hist_data,group_labels)
fig.show() |
7,770 | 38504dae7b010c2df8c16b752c2179b6b3561c0e | # day one question 1 solution
# find product of two numbers in input.txt list that sum to 2020
# pull everything out of input file
nums = []
with open('input.txt', 'r') as file:
for line in file:
nums.append(int(line))
target = 0
product = 0
# for each number in the input, figure out what it's complement to 2020 would be
for ini in nums:
target = 2020 - ini
# then iterate through and check if its complement exists
for chk in nums:
# if it does, compute the product
# semi-hacky since it assumes there'll only be one pair
if chk == target:
product = ini * chk
print(product) |
7,771 | f8972067fa88e7e74e05cdcc7bdec184116dec4a | import os
import random
import argparse
from vapory import *
from data import colors, object_types
class Torus(POVRayElement):
""""""
def render_scene(filename, object_type, color, location, rotation):
assert (object_type in object_types)
assert (color in colors)
color = colors[color]
size = 2
radius = size/2
attributes = Texture(Pigment('color', color)), Finish('ambient', 0.7), 'rotate', (0, rotation, 0)
if object_type == 'box':
location.insert(1, size/2)
obj = Box([x - size/2 for x in location], [x + size/2 for x in location], *attributes)
if object_type == 'sphere':
location.insert(1, radius)
obj = Sphere(location, radius, *attributes)
if object_type == 'torus':
location.insert(1, radius/2)
obj = Torus(radius, radius/2, 'translate', location, *attributes)
if object_type == 'ellipsoid':
location.insert(1, radius)
obj = Sphere(location, radius, 'scale', (0.75, 0.45, 1.5), *attributes)
if object_type == 'cylinder':
location.insert(1, 0)
location2 = list(location)
location2[1] = size*2
obj = Cylinder(location, location2, radius, *attributes)
camera = Camera('location', [0, 8, 7], 'look_at', [0, 0, 0])
light = LightSource([0, 10, 0], 'color', [1, 1, 1])
chessboard = Plane([0, 1, 0], 0, 'hollow',
Texture(Pigment('checker',
'color', [.47, .6, .74],
'color', [.34, 0.48, 0.6]),
'scale', 4), Finish('ambient', 0.5))
scene = Scene(camera, objects=[light, obj, chessboard])
scene.render(filename, width=128, height=128, antialiasing=1.0)
parser = argparse.ArgumentParser()
parser.add_argument('--n_samples', type=int, default=100)
parser.add_argument('--seed', type=int, default=2018)
args = parser.parse_args()
random.seed(args.seed)
os.makedirs('assets', exist_ok=True)
print("Rendering scenes...")
for color in colors:
for object_type in object_types:
for i in range(args.n_samples):
filename = 'assets/%s-%s-%d' % (color, object_type, i)
if os.path.exists(filename):
print("%s exists, skipping" % filename)
continue
location = [random.uniform(-3, 3), random.uniform(-3, 3)]
rotation = random.uniform(0, 360)
render_scene(filename, object_type, color, location, rotation)
print("Finished")
|
7,772 | 305133d4840741bd5c318a99a96660d8988dd61a | # Copyright (C) 2020 Claudio Marques - All Rights Reserved
dataset_path = "data/output/dataset{toReplace}.csv"
dataset_path_final = "data/output/final/datasetFinal.csv"
log_path = "data/logs/output_append.log"
numberOfThreads = 45
inputFileMalign = "data/input/malign/all.log"
outputFileMalign = "data/output/fileMalign.csv"
sampleMalign = 300
inputFileBenignAAAA = "data/input/benign/aaaa/all.log"
outputFileBenignAAA = "data/output/fileBenignAAAA.csv"
sampleAAAA = 100
inputFileBenignCNAME = "data/input/benign/cname/all.log"
outputFileBenignCNAME = "data/output/fileBenignCNAME.csv"
sampleCNAME = 100
inputFileBenignMX = "data/input/benign/mx/all.log"
outputFileBenignMX = "data/output/fileBenignMX.csv"
sampleMX = 100
alexaDbPath = "utils/Database/AlexaDB/top-1m.csv"
ports = [80, 443, 21, 22, 23, 25, 53, 110, 143, 161, 445, 465, 587, 993, 995, 3306, 3389, 7547, 8080, 8888]
fileHeader = "Domain,DNSRecordType,MXDnsResponse,TXTDnsResponse,HasSPFInfo,HasDkimInfo,HasDmarcInfo,Ip,DomainInAlexaDB,CommonPorts,CountryCode,RegisteredCountry,CreationDate," \
"LastUpdateDate,ASN,HttpResponseCode,RegisteredOrg,SubdomainNumber,Entropy,EntropyOfSubDomains,StrangeCharacters," \
"TLD,IpReputation,DomainReputation," \
"ConsoantRatio,NumericRatio,SpecialCharRatio,VowelRatio,ConsoantSequence,VowelSequence,NumericSequence,SpecialCharSequence,DomainLength,Class"
headerRegex = "%s,%s,%d,%d,%d,%d,%d,%s,%d,%d,%s,%s,%d," \
"%d,%d,%d,%s,%d,%d,%d,%d," \
"%s,%d,%d," \
"%0.1f,%0.1f,%0.1f,%0.1f,%d,%d,%d,%d,%d,%d\n"
sublist3rEngines = "bing,passivedns"
|
7,773 | 1c9345923fe83aa0ee7165ce181ce05ac55e2b2f | class Action(dict):
def __init__(self, action, player=None, target=None):
self['action'] = action
self['player'] = player
if target != None:
self['target'] = target
|
7,774 | 8475792cc2d55f030f0bd9e7d0240e3b59ed996b | import os
import numpy
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def plotObject(obj):
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
x,y,z = numpy.nonzero(obj>0)
ax.scatter(x,y,z,c='r',s=10)
xb,yb,zb = numpy.nonzero(obj<0)
ax.scatter(xb,yb,zb,c='b',s=1)
plt.show()
class GridData:
def __init__(self,datafile,labelfile):
f = open(datafile,'rb')
f2 = open(labelfile,'r')
self.samples = []
self.labels = []
self.label_names = []
self.data_size = 30
self.source = datafile
sample_size = self.data_size ** 3
file_size = os.path.getsize(datafile)
self.num_samples = file_size / sample_size
for i in range(self.num_samples):
arr = numpy.fromfile(f,dtype=numpy.int8,count=sample_size)
matrix = arr.reshape((self.data_size,self.data_size,self.data_size))
self.samples.append(matrix.transpose())
l = f2.readline().split()
self.labels.append(int(l[0]))
self.label_names.append(l[1])
def __str__(self):
return "<%s %d samples (%dx%dx%d)>" % (self.source,self.num_samples,self.data_size,self.data_size,self.data_size)
def __repr__(self):
return str(self)
if __name__=="__main__":
partial_view_file = 'partial_view_single.data'
complete_view_file = 'complete_view_single.data'
label_file = 'labels_single.data'
partial_views = GridData(partial_view_file,label_file)
complete_views = GridData(complete_view_file,label_file)
print(partial_views)
print(complete_views)
for i in range(partial_views.num_samples):
plotObject(partial_views.samples[i])
plotObject(complete_views.samples[i])
|
7,775 | dcb12e282962c63f8e7de5d29c4c81ad177a387e | import heapq
class Solution: #priority queue
# def sortElemsByFrequency(self, arr):
# if arr:
# mydict = {}
# for k,v in enumerate(arr):
# mydict[v] = mydict.get(v, 0) + 1
# sorted_dict = sorted(mydict.items(), key = lambda x:x[1])
# return sorted_dict
def sortElemsByFrequency(self, arr):
if arr:
x = []
res = []
mydict = {}
for k,v in enumerate(arr):
mydict[v] = mydict.get(v, 0) + 1
for k,v in mydict.items():
heapq.heappush(x, (v,k))
while x:
res.insert(0, heapq.heappop(x)[1])
return res
sol = Solution()
res = sol.sortElemsByFrequency([2, 5, 2, 8, 5, 6, 8, 8])
print(res)
|
7,776 | 7451b09c54734fb02167d43b96df972420d86853 | import sys
from domain import *
from fuzzy_set import *
from parser import *
class FuzzyControler(object):
def __init__(self, angle_rules, acc_rules, domains_angle, domains_acc):
self.angle_rules = angle_rules
self.acc_rules = acc_rules
self.domains_angle = domains_angle
self.domains_acc = domains_acc
self.intervals = []
self.intervals.append(0)
i = 1
while i <= 2048:
self.intervals.append(i)
i *= 2
def calculateNewAccAndAngle(self, L, D, LK, DK, V, S):
left = (L, LK, S)
right = (D, DK, S)
left = self.transformToInterval(left)
right = self.transformToInterval(right)
left_acc = (left[0], right[0], V)
right_acc = (left[1], right[1], V)
angle = self.calcAngle(left, right, self.angle_rules, self.domains_angle)
acc = self.calcAcc(left_acc, right_acc, self.acc_rules, self.domains_acc)
sys.stderr.write(str(angle) + " " + str(acc) + "\n")
return acc, angle
def calcAngle(self, left, right, angle_rules, domains):
# print domains
angle_domain = domains["angle_domain"]
cardinality = angle_domain.getCardinality()
new_memberships = []
domain_elements = angle_domain.getElements()
# print "here"
for i in range(cardinality):
#
y = domain_elements[i]
left_elem = left + (y,)
right_elem = right + (y,)
# sys.stderr.write(str(left_elem) + "\n")
# sys.stderr.write(str(right_elem) + "\n")
# print right_elem
max_ = 0
for rule in angle_rules:
# print "here"
# print rule.name
if rule.name.startswith("RULE_LEFT"):
# print "here"
min_ = rule.getMembershipFor(left_elem)
else:
min_ = rule.getMembershipFor(right_elem)
# print min_
if min_ > max_:
max_ = min_
new_memberships.append(max_)
# print self.centerOfArea(new_memberships, domain_elements)
result = int(self.centerOfArea(new_memberships, domain_elements))
return result
def calcAcc(self, left, right, acc_rules, domains):
# print domains
acc_domain = domains["acc_domain"]
cardinality = acc_domain.getCardinality()
new_memberships = []
domain_elements = acc_domain.getElements()
for i in range(cardinality):
#
y = domain_elements[i]
left_elem = left + (y,)
right_elem = right + (y,)
max_ = 0
for rule in acc_rules:
# print "here"
# print rule.name
if rule.name.startswith("RULE_LD"):
# print "here"
min_ = rule.getMembershipFor(left_elem)
else:
sys.stderr.write(str(right_elem) + "\n")
min_ = rule.getMembershipFor(right_elem)
# print min_
if min_ > max_:
max_ = min_
new_memberships.append(max_)
# print self.centerOfArea(new_memberships, domain_elements)
result = int(self.centerOfArea(new_memberships, domain_elements))
return result
def centerOfArea(self, memberships, elements):
# print memberships, elements
# print len(memberships)
result = 0
numerator = 0
denominator = 0
for i in range(len(memberships)):
numerator += memberships[i] * elements[i]
denominator += memberships[i]
if denominator == 0:
return 0
result = float(numerator) / denominator
return result
def transformToInterval(self, elem):
val = list(elem)
for i in range(len(elem)):
for j in range(1, len(self.intervals)):
if elem[i] < self.intervals[j] and elem[i] >= self.intervals[j-1]:
val[i] = self.intervals[j-1]
return tuple(val)
def main():
domains_angle = {}
domains_acc = {}
sets_angle = {}
sets_acc = {}
operators = {}
operators["+"] = ("ZadehS",)
operators["*"] = ("ZadehT",)
operators["!"] = ("ZadehNot",)
operators["->"] = ("'max-min'",)
parser = Parser(sys.argv[1], domains_angle, sets_angle, operators)
parser.parse()
sets_angle = parser.rules
parser = Parser(sys.argv[2], domains_acc, sets_acc, operators)
parser.parse()
sets_acc = parser.rules
controler = FuzzyControler(sets_angle, sets_acc, domains_angle, domains_acc)
while True:
# print "here"
line = sys.stdin.readline()
if line == "KRAJ\n":
break
L,D,LK,DK,V,S = [int(s) for s in line.split() if s.isdigit()]
akcel, kormilo = controler.calculateNewAccAndAngle(L, D, LK, DK, V, S)
print akcel, kormilo
sys.stdout.flush()
if __name__ == "__main__":
main()
|
7,777 | a9eb2b3f26396918c792de3f126e51bde334b709 | #!/usr/bin/env python3
# given a set A and n other sets.
# find whether set A is a strict superset of each of the n sets
# print True if yes, otherwise False
A = set(map(int, input().split()))
b = []
for _ in range(int(input())):
b.append(A > set(map(int, input().split())))
print(all(b))
|
7,778 | 084c9ad83091f6f96d19c0f0c28520ccda93bbaf | import base64
import bleach
import errno
import fcntl
import gzip
import hashlib
import importlib
import inspect
import magic
import mimetypes
import morepath
import operator
import os.path
import re
import shutil
import sqlalchemy
import urllib.request
from markupsafe import Markup
from collections.abc import Iterable
from contextlib import contextmanager
from cProfile import Profile
from functools import reduce
from importlib import import_module
from io import BytesIO, StringIO
from itertools import groupby, islice
from onegov.core import log
from onegov.core.cache import lru_cache
from onegov.core.custom import json
from onegov.core.errors import AlreadyLockedError
from purl import URL
from threading import Thread
from time import perf_counter
from unidecode import unidecode
from uuid import UUID, uuid4
from webob import static
from yubico_client import Yubico
from yubico_client.yubico_exceptions import SignatureVerificationError
from yubico_client.yubico_exceptions import StatusCodeError
from typing import overload, Any, TypeVar, TYPE_CHECKING
if TYPE_CHECKING:
from _typeshed import SupportsRichComparison
from collections.abc import Callable, Collection, Iterator
from fs.base import FS, SubFS
from re import Match
from sqlalchemy import Column
from sqlalchemy.orm import Session
from types import ModuleType
from webob import Response
from .request import CoreRequest
from .types import FileDict, LaxFileDict
_T = TypeVar('_T')
_KT = TypeVar('_KT')
# http://stackoverflow.com/a/13500078
_unwanted_url_chars = re.compile(r'[\.\(\)\\/\s<>\[\]{},:;?!@&=+$#@%|\*"\'`]+')
_double_dash = re.compile(r'[-]+')
_number_suffix = re.compile(r'-([0-9]+)$')
_repeated_spaces = re.compile(r'\s\s+')
_uuid = re.compile(
r'^[a-f0-9]{8}-?[a-f0-9]{4}-?[a-f0-9]{4}-?[a-f0-9]{4}-?[a-f0-9]{12}$')
# only temporary until bleach has a release > 1.4.1 -
_email_regex = re.compile((
r"([a-z0-9!#$%&'*+\/=?^_`{|}~-]+(?:\.[a-z0-9!#$%&'*+\/=?^_`"
r"{|}~-]+)*(@|\sat\s)(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?(\.|"
r"\sdot\s))+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?)"
))
# detects multiple successive newlines
_multiple_newlines = re.compile(r'\n{2,}', re.MULTILINE)
# detect starting strings of phone inside a link
_phone_inside_a_tags = r'(\">|href=\"tel:)?'
# regex pattern for swiss phone numbers
_phone_ch_country_code = r"(\+41|0041|0[0-9]{2})"
_phone_ch = re.compile(_phone_ch_country_code + r'([ \r\f\t\d]+)')
# Adds a regex group to capture if a leading a tag is present or if the
# number is part of the href attributes
_phone_ch_html_safe = re.compile(
_phone_inside_a_tags + _phone_ch_country_code + r'([ \r\f\t\d]+)')
# for yubikeys
ALPHABET = 'cbdefghijklnrtuv'
ALPHABET_RE = re.compile(r'^[cbdefghijklnrtuv]{12,44}$')
@contextmanager
def local_lock(namespace: str, key: str) -> 'Iterator[None]':
""" Locks the given namespace/key combination on the current system,
automatically freeing it after the with statement has been completed or
once the process is killed.
Usage::
with lock('namespace', 'key'):
pass
"""
name = f'{namespace}-{key}'.replace('/', '-')
with open(f'/tmp/{name}', 'w+') as f:
try:
fcntl.flock(f, fcntl.LOCK_EX | fcntl.LOCK_NB)
yield
fcntl.flock(f, fcntl.LOCK_UN)
except BlockingIOError as exception:
raise AlreadyLockedError from exception
def normalize_for_url(text: str) -> str:
""" Takes the given text and makes it fit to be used for an url.
That means replacing spaces and other unwanted characters with '-',
lowercasing everything and turning unicode characters into their closest
ascii equivalent using Unidecode.
See https://pypi.python.org/pypi/Unidecode
"""
# German is our main language, so we are extra considerate about it
# (unidecode turns ü into u)
text = text.replace("ü", "ue")
text = text.replace("ä", "ae")
text = text.replace("ö", "oe")
clean = _unwanted_url_chars.sub('-', unidecode(text).strip(' ').lower())
clean = _double_dash.sub('-', clean)
clean = clean.strip('-')
return clean
def increment_name(name: str) -> str:
""" Takes the given name and adds a numbered suffix beginning at 1.
For example::
foo => foo-1
foo-1 => foo-2
"""
match = _number_suffix.search(name)
if match:
number_str = match.group(1)
next_number = int(number_str) + 1
return f'{name[:-len(number_str)]}{next_number}'
else:
return f'{name}-1'
def remove_repeated_spaces(text: str) -> str:
""" Removes repeated spaces in the text ('a b' -> 'a b'). """
return _repeated_spaces.sub(' ', text)
@contextmanager
def profile(filename: str) -> 'Iterator[None]':
""" Profiles the wrapped code and stores the result in the profiles folder
with the given filename.
"""
profiler = Profile()
profiler.enable()
yield
profiler.disable()
profiler.create_stats()
profiler.dump_stats('profiles/{}'.format(filename))
@contextmanager
def timing(name: str | None = None) -> 'Iterator[None]':
""" Runs the wrapped code and prints the time in ms it took to run it.
The name is printed in front of the time, if given.
"""
start = perf_counter()
yield
duration_ms = 1000.0 * (perf_counter() - start)
if name:
print(f'{name}: {duration_ms:.0f} ms')
else:
print(f'{duration_ms:.0f} ms')
@lru_cache(maxsize=32)
def module_path_root(module: 'ModuleType | str') -> str:
if isinstance(module, str):
module = importlib.import_module(module)
assert module is not None
return os.path.dirname(inspect.getfile(module))
def module_path(module: 'ModuleType | str', subpath: str) -> str:
""" Returns a subdirectory in the given python module.
:mod:
A python module (actual module or string)
:subpath:
Subpath below that python module. Leading slashes ('/') are ignored.
"""
parent = module_path_root(module)
path = os.path.join(parent, subpath.strip('/'))
# always be paranoid with path manipulation
assert is_subpath(parent, path)
return path
def touch(file_path: str) -> None:
""" Touches the file on the given path. """
try:
os.utime(file_path, None)
except Exception:
open(file_path, 'a').close()
class Bunch:
""" A simple but handy "collector of a bunch of named stuff" class.
See `<https://code.activestate.com/recipes/\
52308-the-simple-but-handy-collector-of-a-bunch-of-named/>`_.
For example::
point = Bunch(x=1, y=2)
assert point.x == 1
assert point.y == 2
point.z = 3
assert point.z == 3
Allows the creation of simple nested bunches, for example::
request = Bunch(**{'app.settings.org.my_setting': True})
assert request.app.settings.org.my_setting is True
"""
def __init__(self, **kwargs: Any):
self.__dict__.update(
(key, value)
for key, value in kwargs.items()
if '.' not in key
)
for key, value in kwargs.items():
if '.' in key:
name, _, key = key.partition('.')
setattr(self, name, Bunch(**{key: value}))
if TYPE_CHECKING:
# let mypy know that any attribute access could be valid
def __getattr__(self, name: str) -> Any: ...
def __setattr__(self, name: str, value: Any) -> None: ...
def __delattr__(self, name: str) -> None: ...
def __eq__(self, other: object) -> bool:
if type(other) is type(self):
return self.__dict__ == other.__dict__
return False
def __ne__(self, other: object) -> bool:
return not self.__eq__(other)
def render_file(file_path: str, request: 'CoreRequest') -> 'Response':
""" Takes the given file_path (content) and renders it to the browser.
The file must exist on the local system and be readable by the current
process.
"""
def hash_path(path: str) -> str:
return hashlib.new( # nosec:B324
'sha1',
path.encode('utf-8'),
usedforsecurity=False
).hexdigest()
# this is a very cachable result - though it's possible that a file
# changes it's content type, it should usually not, especially since
# we emphasize the use of random filenames
@request.app.cache.cache_on_arguments(to_str=hash_path)
def get_content_type(file_path: str) -> str:
content_type = mimetypes.guess_type(file_path)[0]
if not content_type:
content_type = magic.from_file(file_path, mime=True)
return content_type
return request.get_response(
static.FileApp(file_path, content_type=get_content_type(file_path)))
def hash_dictionary(dictionary: dict[str, Any]) -> str:
""" Computes a sha256 hash for the given dictionary. The dictionary
is expected to only contain values that can be serialized by json.
That includes int, decimal, string, boolean.
Note that this function is not meant to be used for hashing secrets. Do
not include data in this dictionary that is secret!
"""
dict_as_string = json.dumps(dictionary, sort_keys=True).encode('utf-8')
return hashlib.new( # nosec:B324
'sha1',
dict_as_string,
usedforsecurity=False
).hexdigest()
@overload
def groupbylist(
iterable: Iterable[_T],
key: None = ...
) -> list[tuple[_T, list[_T]]]: ...
@overload
def groupbylist(
iterable: Iterable[_T],
key: 'Callable[[_T], _KT]'
) -> list[tuple[_KT, list[_T]]]: ...
def groupbylist(
iterable: Iterable[_T],
key: 'Callable[[_T], Any] | None' = None
) -> list[tuple[Any, list[_T]]]:
""" Works just like Python's ``itertools.groupby`` function, but instead
of returning generators, it returns lists.
"""
return [(k, list(g)) for k, g in groupby(iterable, key=key)]
def linkify_phone(text: str) -> str:
""" Takes a string and replaces valid phone numbers with html links. If a
phone number is matched, it will be replaced by the result of a callback
function, that does further checks on the regex match. If these checks do
not pass, the matched number will remain unchanged.
"""
def strip_whitespace(number: str) -> str:
return re.sub(r'\s', '', number)
def is_valid_length(number: str) -> bool:
if number.startswith('+00'):
return False
if number.startswith('00'):
return len(number) == 13
elif number.startswith('0'):
return len(number) == 10
elif number.startswith('+'):
return len(number) == 12
return False
def handle_match(match: 'Match[str]') -> str:
inside_html = match.group(1)
number = f'{match.group(2)}{match.group(3)}'
assert not number.endswith('\n')
if inside_html:
return match.group(0)
if is_valid_length(strip_whitespace(number)):
number = remove_repeated_spaces(number).strip()
return f'<a href="tel:{number}">{number}</a> '
return match.group(0)
return _phone_ch_html_safe.sub(handle_match, text)
# FIXME: A lot of these methods should be using MarkupSafe
def linkify(text: str, escape: bool = True) -> str:
""" Takes plain text and injects html links for urls and email addresses.
By default the text is html escaped before it is linkified. This accounts
for the fact that we usually use this for text blocks that we mean to
extend with email addresses and urls.
If html is already possible, why linkify it?
Note: We need to clean the html after we've created it (linkify
parses escaped html and turns it into real html). As a consequence it
is possible to have html urls in the text that won't be escaped.
"""
if not text:
return text
long_top_level_domains = ['.agency']
# bleach.linkify supports only a fairly limited amount of tlds
if any(domain in text for domain in long_top_level_domains):
if '@' in text:
linkified = str(
Markup('<a href="mailto:{text}">{text}</a>').format(
text=text
)
)
else:
linkified = str(
Markup('<a href="{text}">{text}</a>').format(text=text)
)
else:
linkified = linkify_phone(bleach.linkify(text, parse_email=True))
if not escape:
return linkified
return bleach.clean(
linkified,
tags=['a'],
attributes={'a': ['href', 'rel']},
protocols=['http', 'https', 'mailto', 'tel']
)
def paragraphify(text: str) -> str:
""" Takes a text with newlines groups them into paragraphs according to the
following rules:
If there's a single newline between two lines, a <br> will replace that
newline.
If there are multiple newlines between two lines, each line will become
a paragraph and the extra newlines are discarded.
"""
text = text and text.replace('\r', '').strip('\n')
if not text:
return ''
return ''.join(f'<p>{p}</p>' for p in (
p.replace('\n', '<br>') for p in _multiple_newlines.split(text)
))
def to_html_ul(
value: str,
convert_dashes: bool = True,
with_title: bool = False
) -> str:
""" Linkify and convert to text to one or multiple ul's or paragraphs.
"""
if not value:
return ''
value = value.replace('\r', '').strip('\n')
value = value.replace('\n\n', '\n \n')
if not convert_dashes:
return '<p>{}</p>'.format(
'<br>'.join(linkify(value).splitlines())
)
elements = []
temp: list[str] = []
def ul(inner: str) -> str:
return f'<ul class="bulleted">{inner}</ul>'
def li(inner: str) -> str:
return f'<li>{inner}</li>'
def p(inner: str) -> str:
return f'<p>{inner}</p>'
was_list = False
for i, line in enumerate(value.splitlines()):
if not line:
continue
line = linkify(line)
is_list = line.startswith('-')
new_p_or_ul = True if line == ' ' else False
line = line.lstrip('-').strip()
if with_title:
elements.append(p(f'<span class="title">{line}</span>'))
with_title = False
else:
if new_p_or_ul or (was_list != is_list and i > 0):
elements.append(
ul(''.join(temp)) if was_list else p('<br>'.join(temp))
)
temp = []
was_list = False
if not new_p_or_ul:
temp.append((li(line) if is_list else line))
new_p_or_ul = False
was_list = is_list
if temp:
elements.append(
ul(''.join(temp)) if was_list else p('<br>'.join(temp))
)
return ''.join(elements)
def ensure_scheme(url: str, default: str = 'http') -> str:
""" Makes sure that the given url has a scheme in front, if none
was provided.
"""
if not url:
return url
# purl (or to be precise urlparse) will parse empty host names ('abc.xyz')
# wrongly, assuming the abc.xyz is a path. by adding a double slash if
# there isn't one already, we can circumvent that problem
if '//' not in url:
url = '//' + url
_url = URL(url)
if _url.scheme():
return url
return _url.scheme(default).as_string()
def is_uuid(value: str | UUID) -> bool:
""" Returns true if the given value is a uuid. The value may be a string
or of type UUID. If it's a string, the uuid is checked with a regex.
"""
if isinstance(value, str):
return _uuid.match(str(value)) and True or False
return isinstance(value, UUID)
def is_non_string_iterable(obj: object) -> bool:
""" Returns true if the given obj is an iterable, but not a string. """
return not (isinstance(obj, str) or isinstance(obj, bytes))\
and isinstance(obj, Iterable)
def relative_url(absolute_url: str | None) -> str:
""" Removes everything in front of the path, including scheme, host,
username, password and port.
"""
url = URL._mutate(
URL(absolute_url),
scheme=None,
username=None,
password=None,
host=None,
port=None
)
return url.as_string()
def is_subpath(directory: str, path: str) -> bool:
""" Returns true if the given path is inside the given directory. """
directory = os.path.join(os.path.realpath(directory), '')
path = os.path.realpath(path)
# return true, if the common prefix of both is equal to directory
# e.g. /a/b/c/d.rst and directory is /a/b, the common prefix is /a/b
return os.path.commonprefix([path, directory]) == directory
@overload
def is_sorted(
iterable: 'Iterable[SupportsRichComparison]',
key: 'Callable[[SupportsRichComparison], SupportsRichComparison]' = ...,
reverse: bool = ...
) -> bool: ...
@overload
def is_sorted(
iterable: 'Iterable[_T]',
key: 'Callable[[_T], SupportsRichComparison]',
reverse: bool = ...
) -> bool: ...
# FIXME: Do we really want to allow any Iterable? This seems like a bad
# idea to me... Iterators will be consumed and the Iterable might
# be infinite. This seems like it should be a Container instead,
# then we also don't need to use tee or list to make a copy
def is_sorted(
iterable: 'Iterable[Any]',
key: 'Callable[[Any], SupportsRichComparison]' = lambda i: i,
reverse: bool = False
) -> bool:
""" Returns True if the iterable is sorted. """
# NOTE: we previously used `tee` here, but since `sorted` consumes
# the entire iterator, this is the exact case where tee is
# slower than just pulling the entire sequence into a list
seq = list(iterable)
for a, b in zip(seq, sorted(seq, key=key, reverse=reverse)):
if a is not b:
return False
return True
def morepath_modules(cls: type[morepath.App]) -> 'Iterator[str]':
""" Returns all morepath modules which should be scanned for the given
morepath application class.
We can't reliably know the actual morepath modules that
need to be scanned, which is why we assume that each module has
one namespace (like 'more.transaction' or 'onegov.core').
"""
for base in cls.__mro__:
if not issubclass(base, morepath.App):
continue
if base is morepath.App:
continue
module = '.'.join(base.__module__.split('.')[:2])
if module.startswith('test'):
continue
yield module
def scan_morepath_modules(cls: type[morepath.App]) -> None:
""" Tries to scan all the morepath modules required for the given
application class. This is not guaranteed to stay reliable as there is
no sure way to discover all modules required by the application class.
"""
for module in sorted(morepath_modules(cls)):
morepath.scan(import_module(module))
def get_unique_hstore_keys(
session: 'Session',
column: 'Column[dict[str, Any]]'
) -> set[str]:
""" Returns a set of keys found in an hstore column over all records
of its table.
"""
base = session.query(column.keys()).with_entities( # type:ignore
sqlalchemy.func.skeys(column).label('keys'))
query = sqlalchemy.select(
[sqlalchemy.func.array_agg(sqlalchemy.column('keys'))],
distinct=True
).select_from(base.subquery())
keys = session.execute(query).scalar()
return set(keys) if keys else set()
def makeopendir(fs: 'FS', directory: str) -> 'SubFS[FS]':
""" Creates and opens the given directory in the given PyFilesystem. """
if not fs.isdir(directory):
fs.makedir(directory)
return fs.opendir(directory)
def append_query_param(url: str, key: str, value: str) -> str:
""" Appends a single query parameter to an url. This is faster than
using Purl, if and only if we only add one query param.
Also this function assumes that the value is already url encoded.
"""
template = '?' in url and '{}&{}={}' or '{}?{}={}'
return template.format(url, key, value)
class PostThread(Thread):
""" POSTs the given data with the headers to the URL.
Example::
data = {'a': 1, 'b': 2}
data = json.dumps(data).encode('utf-8')
PostThread(
'https://example.com/post',
data,
(
('Content-Type', 'application/json; charset=utf-8'),
('Content-Length', len(data))
)
).start()
This only works for external URLs! If posting to server itself is
needed, use a process instead of the thread!
"""
def __init__(
self,
url: str,
data: bytes,
headers: 'Collection[tuple[str, str]]',
timeout: float = 30
):
Thread.__init__(self)
self.url = url
self.data = data
self.headers = headers
self.timeout = timeout
def run(self) -> None:
try:
# Validate URL protocol before opening it, since it's possible to
# open ftp:// and file:// as well.
if not self.url.lower().startswith('http'):
raise ValueError from None
request = urllib.request.Request(self.url)
for header in self.headers:
request.add_header(header[0], header[1])
urllib.request.urlopen( # nosec B310
request, self.data, self.timeout
)
except Exception as e:
log.error(
'Error while sending a POST request to {}: {}'.format(
self.url, str(e)
)
)
def toggle(collection: set[_T], item: _T | None) -> set[_T]:
""" Returns a new set where the item has been toggled. """
if item is None:
return collection
if item in collection:
return collection - {item}
else:
return collection | {item}
def binary_to_dictionary(
binary: bytes,
filename: str | None = None
) -> 'FileDict':
""" Takes raw binary filedata and stores it in a dictionary together
with metadata information.
The data is compressed before it is stored int he dictionary. Use
:func:`dictionary_to_binary` to get the original binary data back.
"""
assert isinstance(binary, bytes)
mimetype = magic.from_buffer(binary, mime=True)
# according to https://tools.ietf.org/html/rfc7111, text/csv should be used
if mimetype == 'application/csv':
mimetype = 'text/csv'
gzipdata = BytesIO()
with gzip.GzipFile(fileobj=gzipdata, mode='wb') as f:
f.write(binary)
return {
'data': base64.b64encode(gzipdata.getvalue()).decode('ascii'),
'filename': filename,
'mimetype': mimetype,
'size': len(binary)
}
def dictionary_to_binary(dictionary: 'LaxFileDict') -> bytes:
""" Takes a dictionary created by :func:`binary_to_dictionary` and returns
the original binary data.
"""
data = base64.b64decode(dictionary['data'])
with gzip.GzipFile(fileobj=BytesIO(data), mode='r') as f:
return f.read()
@overload
def safe_format(
format: str,
dictionary: dict[str, str | int | float],
types: None = ...,
adapt: 'Callable[[str], str] | None' = ...,
raise_on_missing: bool = ...
) -> str: ...
@overload
def safe_format(
format: str,
dictionary: dict[str, _T],
types: set[type[_T]] = ...,
adapt: 'Callable[[str], str] | None' = ...,
raise_on_missing: bool = ...
) -> str: ...
def safe_format(
format: str,
dictionary: dict[str, Any],
types: set[type[Any]] | None = None,
adapt: 'Callable[[str], str] | None' = None,
raise_on_missing: bool = False
) -> str:
""" Takes a user-supplied string with format blocks and returns a string
where those blocks are replaced by values in a dictionary.
For example::
>>> safe_format('[user] has logged in', {'user': 'admin'})
'admin has logged in'
:param format:
The format to use. Square brackets denote dictionary keys. To
literally print square bracktes, mask them by doubling ('[[' -> '[')
:param dictionary:
The dictionary holding the variables to use. If the key is not found
in the dictionary, the bracket is replaced with an empty string.
:param types:
A set of types supported by the dictionary. Limiting this to safe
types like builtins (str, int, float) ensure that no values are
accidentally leaked through faulty __str__ representations.
Note that inheritance is ignored. Supported types need to be
whitelisted explicitly.
:param adapt:
An optional callable that receives the key before it is used. Returns
the same key or an altered version.
:param raise_on_missing:
True if missing keys should result in a runtime error (defaults to
False).
This is strictly meant for formats provided by users. Python's string
formatting options are clearly superior to this, however it is less
secure!
"""
types = types or {int, str, float}
output = StringIO()
buffer = StringIO()
opened = 0
for ix, char in enumerate(format):
if char == '[':
opened += 1
if char == ']':
opened -= 1
if opened == 1 and char != '[' and char != ']':
print(char, file=buffer, end='')
continue
if opened == 2 or opened == -2:
if buffer.tell():
raise RuntimeError("Unexpected bracket inside bracket found")
print(char, file=output, end='')
opened = 0
continue
if buffer.tell():
k = adapt(buffer.getvalue()) if adapt else buffer.getvalue()
if raise_on_missing and k not in dictionary:
raise RuntimeError("Key '{}' is unknown".format(k))
v = dictionary.get(k, '')
t = type(v)
if t not in types:
raise RuntimeError("Invalid type for '{}': {}".format(k, t))
print(v, file=output, end='')
buffer = StringIO()
if char != '[' and char != ']':
print(char, file=output, end='')
if opened != 0:
raise RuntimeError("Uneven number of brackets in '{}'".format(format))
return output.getvalue()
def safe_format_keys(
format: str,
adapt: 'Callable[[str], str] | None' = None
) -> list[str]:
""" Takes a :func:`safe_format` string and returns the found keys. """
keys = []
def adapt_and_record(key: str) -> str:
key = adapt(key) if adapt else key
keys.append(key)
return key
safe_format(format, {}, adapt=adapt_and_record)
return keys
def is_valid_yubikey(
client_id: str,
secret_key: str,
expected_yubikey_id: str,
yubikey: str
) -> bool:
""" Asks the yubico validation servers if the given yubikey OTP is valid.
:client_id:
The yubico API client id.
:secret_key:
The yubico API secret key.
:expected_yubikey_id:
The expected yubikey id. The yubikey id is defined as the first twelve
characters of any yubikey value. Each user should have a yubikey
associated with it's account. If the yubikey value comes from a
different key, the key is invalid.
:yubikey:
The actual yubikey value that should be verified.
:return: True if yubico confirmed the validity of the key.
"""
assert client_id and secret_key and expected_yubikey_id and yubikey
assert len(expected_yubikey_id) == 12
# if the yubikey doesn't start with the expected yubikey id we do not
# need to make a roundtrip to the validation server
if not yubikey.startswith(expected_yubikey_id):
# FIXME: Are we leaking information with this early out?
return False
try:
return Yubico(client_id, secret_key).verify(yubikey)
except StatusCodeError as e:
if e.status_code != 'REPLAYED_OTP':
raise e
return False
except SignatureVerificationError:
return False
def is_valid_yubikey_format(otp: str) -> bool:
""" Returns True if the given OTP has the correct format. Does not actually
contact Yubico, so this function may return true, for some invalid keys.
"""
return ALPHABET_RE.match(otp) and True or False
def yubikey_otp_to_serial(otp: str) -> int | None:
""" Takes a Yubikey OTP and calculates the serial number of the key.
The serial key is printed on the yubikey, in decimal and as a QR code.
Example:
>>> yubikey_otp_to_serial(
'ccccccdefghdefghdefghdefghdefghdefghdefghklv')
2311522
Adapted from Java:
https://github.com/Yubico/yubikey-salesforce-client/blob/
e38e46ee90296a852374a8b744555e99d16b6ca7/src/classes/Modhex.cls
If the key cannot be calculated, None is returned. This can happen if
they key is malformed.
"""
if not is_valid_yubikey_format(otp):
return None
token = 'cccc' + otp[:12]
toggle = False
keep = 0
bytesarray = []
for char in token:
n = ALPHABET.index(char)
toggle = not toggle
if toggle:
keep = n
else:
bytesarray.append((keep << 4) | n)
value = 0
# in Java, shifts on integers are masked with 0x1f using AND
# https://docs.oracle.com/javase/specs/jls/se8/html/jls-15.html#jls-15.19
mask_value = 0x1f
for i in range(0, 8):
shift = (4 - 1 - i) * 8
value += (bytesarray[i] & 255) << (shift & mask_value)
return value
def yubikey_public_id(otp: str) -> str:
""" Returns the yubikey identity given a token. """
return otp[:12]
def dict_path(dictionary: dict[str, _T], path: str) -> _T:
""" Gets the value of the given dictionary at the given path. For example:
>>> data = {'foo': {'bar': True}}
>>> dict_path(data, 'foo.bar')
True
"""
if not dictionary:
raise KeyError()
return reduce(operator.getitem, path.split('.'), dictionary) # type:ignore
def safe_move(src: str, dst: str) -> None:
""" Rename a file from ``src`` to ``dst``.
* Moves must be atomic. ``shutil.move()`` is not atomic.
* Moves must work across filesystems. Often temp directories and the
cache directories live on different filesystems. ``os.rename()`` can
throw errors if run across filesystems.
So we try ``os.rename()``, but if we detect a cross-filesystem copy, we
switch to ``shutil.move()`` with some wrappers to make it atomic.
Via https://alexwlchan.net/2019/03/atomic-cross-filesystem-moves-in-python
"""
try:
os.rename(src, dst)
except OSError as err:
if err.errno == errno.EXDEV:
# Generate a unique ID, and copy `<src>` to the target directory
# with a temporary name `<dst>.<ID>.tmp`. Because we're copying
# across a filesystem boundary, this initial copy may not be
# atomic. We intersperse a random UUID so if different processes
# are copying into `<dst>`, they don't overlap in their tmp copies.
copy_id = uuid4()
tmp_dst = "%s.%s.tmp" % (dst, copy_id)
shutil.copyfile(src, tmp_dst)
# Then do an atomic rename onto the new name, and clean up the
# source image.
os.rename(tmp_dst, dst)
os.unlink(src)
else:
raise
@overload
def batched(
iterable: Iterable[_T],
batch_size: int,
container_factory: 'type[tuple]' = ... # type:ignore[type-arg]
) -> 'Iterator[tuple[_T, ...]]': ...
@overload
def batched(
iterable: Iterable[_T],
batch_size: int,
container_factory: 'type[list]' # type:ignore[type-arg]
) -> 'Iterator[list[_T]]': ...
# NOTE: If there were higher order TypeVars, we could properly infer
# the type of the Container, for now we just add overloads for
# two of the most common container_factories
@overload
def batched(
iterable: Iterable[_T],
batch_size: int,
container_factory: 'Callable[[Iterator[_T]], Collection[_T]]'
) -> 'Iterator[Collection[_T]]': ...
def batched(
iterable: Iterable[_T],
batch_size: int,
container_factory: 'Callable[[Iterator[_T]], Collection[_T]]' = tuple
) -> 'Iterator[Collection[_T]]':
""" Splits an iterable into batches of batch_size and puts them
inside a given collection (tuple by default).
The container_factory is necessary in order to consume the iterator
returned by islice. Otherwise this function would never return.
"""
iterator = iter(iterable)
while True:
batch = container_factory(islice(iterator, batch_size))
if len(batch) == 0:
return
yield batch
|
7,779 | b9386cf8c17b28fd1fea6e587ca4401de247cbea | #!/usr1/local/bin/python
import os, sys, re, shutil, random
from tempfile import *
# program location
prog_dir = '/home/jpei/test_promals3d_package/bar/promals_package/bin/'
# program names
promals_web = prog_dir + "progress_for_web.py"
csv_cutoff_g = 5
alphabet = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
def run_promals():
csv_cutoff = csv_cutoff_g
# check and parse the command line
cmd_line = sys.argv
if len(cmd_line) <= 1:
promals_help()
sys.exit(1)
elif not os.path.isfile(cmd_line[1]):
print >> sys.stderr, "Error reading input file:", cmd_line[1]
promals_help()
sys.exit(1)
else:
randomstring = ""
infile = os.path.abspath(cmd_line[1])
infiledir = os.path.split(infile)[0]
for x in random.sample(alphabet,40):
randomstring+=x
ranfile = "%s/%s" %(infiledir, randomstring)
try:
fp = open(ranfile, "w")
except:
print >> sys.stderr, "Error:"
print >> sys.stderr, " The directory containing your input file is not writable:", infiledir
print >> sys.stderr, " Input file should be in a writable directory"
sys.exit(1)
fp.close()
os.system("rm -f %s" %ranfile)
cmd_line1 = []
outputfile = ""
blast_dir = ""
resnum = 1
caa_freq = 0.8
for i in range(len(cmd_line)):
arg = cmd_line[i]
if i == 0: arg = prog_dir + 'promals_c'
# change inputfile name to full path name
if i == 1:
arg = os.path.abspath(arg)
inputfile = arg
# change outfile name to full path name
if arg == '-outfile':
if i+1 < len(cmd_line):
cmd_line[i+1] = os.path.abspath(cmd_line[i+1])
outputfile = cmd_line[i+1]
# change blast_dir name to full path name
if arg == '-blast_dir':
if i+1 < len(cmd_line):
cmd_line[i+1] = os.path.abspath(cmd_line[i+1])
#if arg == '-ssw': arg = '-ss_weight'
#if arg == '-aaw': arg = '-score_weight'
#if arg == '-max_homologs': arg = '-max_num_sequences'
#if arg == '-iter_num': arg = '-iter_number'
if arg == '-csv_index':
if i+1 < len(cmd_line):
csv_cutoff = int(cmd_line[i+1])
if (csv_cutoff<0) or (csv_cutoff>9):
csv_cutoff = 5
if arg == "-resnum":
resnum = int(cmd_line[i+1])
if arg == "-caa_freq":
caa_freq = float(sys.argv[i+1])
cmd_line1.append(arg)
if not outputfile:
if re.search("\.fa$", inputfile):
outputfile = re.sub("\.fa$", "", inputfile) + ".promals.aln"
else: outputfile = inputfile + ".promals.aln"
if not blast_dir:
blast_dir = "%s_blast" %inputfile
promals_c = ' '.join(cmd_line1)
promals_c = re.sub("\s+-resnum\s+\S+", " ", promals_c)
promals_c = re.sub("\s+-caa_freq\s+\S+", " ", promals_c)
promals_c = re.sub("\s+-csv_index\s+\S+", " ", promals_c)
if "-blast_dir" not in promals_c:
promals_c += " -blast_dir %s " %blast_dir
outputlogfile = inputfile+".prmls.oUTpUT"
promals_c = promals_c + " > " + outputlogfile
print "promals command:"
print promals_c
print
sys.stdout.flush()
# run programs in a temporary directory to avoid .ncbirc problem
cwd = os.getcwd()
tmpdir = mkdtemp()
os.chdir(tmpdir)
os.system("cp %s.ncbirc ." %prog_dir)
s1 = os.system(promals_c)
if s1 == 0:
print "output alignment file is:", outputfile
print "blast intermediate files are in:", blast_dir
print
else:
print "Error running promals - check log file for details:", outputlogfile
print
print "html file command:"
print "python %s %s %s -cutoff %d -resnum %d -caa_freq %f" %(promals_web, outputfile, outputlogfile, csv_cutoff, resnum, caa_freq)
print
sys.stdout.flush()
s2 = os.system("python %s %s %s -cutoff %d -resnum %d -caa_freq %f 2>/dev/null" %(promals_web, outputfile, outputlogfile, csv_cutoff, resnum, caa_freq) )
if s2 == 0:
print "output html alignment file is:", outputfile + ".html"
print
else:
print "Error generating html file"
print
os.chdir(cwd)
shutil.rmtree(tmpdir)
def promals_help():
help_content = '''
promals with 3D information
command:
promals input_file [options] > input_file.log
python promals input_file [options] > input_file.log
input:
input_file needs to be FASTA format
output:
Two alignment files will be generated. One is in CLUSTAL
format alignment (file name can be specified by option -outfile).
The other file is an html file of colored alignment.
Options:
For alignment strategies:
-id_thr [0, 1] Identity threshold that determined the partition of
fast and slow alignment processes. If two groups of
sequences has average identity above this threshold,
align them in a fast way. Otherwise, use slower but
more accurate way (by profile-profile alignment with
predicted secondary structures and available 3D
constraints). Default: 0.6 (corresponding to 60% identity)
For using 3D information:
-dali [0 or 1] Use DaliLite structural alignment (1) or not use
fast alignment (0) ("DaliLite" executable needs to
be present in bin/ directory). Default: 0 (it is
relatively slow to run DaliLite)
-fast [0 or 1] Use fast structural alignment (1) or not use fast
alignment (0) ("fast" executable needs to be present
in bin/ directory). Default: 1
-tmalign [0 or 1] Use TMalign structural alignment (1) or not use fast
TMalign alignment (0) ("TMalign" executable needs to
be present in bin/ directory). Default: 1
-struct_weight [0, inf[ Weight of structural constraints relative to sequence
constraints. Default: 1.5
For profile scoring:
-ss_weight [0,inf[ Weight of predicted secondary structure in profile-profile
scoring. Default: 0.2
-score_weight [0,inf[ Weight of amino acids in profile-profile scoring.
Default: 0.8
For running PSI-BLAST to get sequence profile:
-iter_number <int> Number of PSI-BLAST iterations for profile generation.
Default: 3
-evalue [0, inf[ PSI-BLAST evalue cutoff for inclusion. Default: 0.001
-low_id_thr [0,1] Remove PSI-BLAST hits with identity to the query less than
this value. Default: 0.2
-blast_dir <file> Directory of running PSI-BLAST and store other intermediate
results.
-clean_blast_before [0 or 1] Remove any file in the directory that stores
intermediate results (specified by -blast_dir option) before
running PSI-BLAST. Default: 0.
-clean_blast_after [0 or 1] Remove any file in the PSI-BLAST directory after running
PSI-BLAST. Default: 0
For output:
-outfile <file> The name of output alignment file.
-blocksize <int> Number of letters in clustal-format alignment blocks.
Default: 70
-resnum [0 or 1] In colored html alignment, show residue numbers for
alignment blocks. Default: 1
-caa_freq [0, 1] In colored html alignment, show amino acid consensus
symbol if the fraction of a class of residues is higher
than this threshold. Default: 0.8
'''
print help_content
if __name__ == '__main__':
run_promals()
|
7,780 | 4c5416582afb3cfeb56259954cda2701ea26f8cd | # -*- coding: utf-8 -*-
"""
helpers
~~~~~~~
Implements various helper functions.
:copyright: (c) 2016 by Patrick Spencer.
:license: Apache 2.0, see LICENSE for more details.
"""
from datetime import datetime, timedelta
import calendar
def month_bounds(year, month):
"""
Returns a tuple of datetime objects (month_start,month_end) given a year and month.
Both params are strings because we want month to be a two digit month representation
and python doesn't handle leading zeros in integers as we want.
:param year: four digit year as a string e.g. "2016"
:param month: 2 digit month as a string e.g. 2 for February, 11 for November
"""
year = int(year)
month = int(month)
month_start = datetime.strptime('%s,%s,1' % (year, month),'%Y,%m,%d')
# days_in_month returns a tuple(weekday, days) where
# weekday is the eekday the month starts on and days is the number of days in the month
days_in_month = calendar.monthrange(year,month)
month_end = month_start + timedelta(days=days_in_month[1]-1)
return (month_start, month_end)
|
7,781 | 730aaa0404a0c776ce4d3a351f292f90768b6867 | import re
def parse_rule(rule):
elem_regex = re.compile("(\d+) (.*) bags?.*")
rule = rule[:-1]
color, inside = tuple(rule.split(" bags contain"))
result = []
for element in inside.split(","):
match = elem_regex.search(element)
if match:
result.append((match.group(2), match.group(1)))
return color, result
def get_neighbours(graph, v):
return [color for color, _ in graph[v]]
def dfs_counting(graph, v):
return 1+sum(list(map(lambda vert: int(vert[1]) * dfs_counting(graph, vert[0]), graph[v])))
f = open('input.txt')
rules = f.readlines()
graph = {}
for rule in rules:
color, elements = parse_rule(rule)
graph[color] = elements
print(dfs_counting(graph, 'shiny gold')-1) #we are not counting the shiny gold one, so we substract 1 |
7,782 | c99878dbd5610c8a58f00912e111b1eef9d3893e | from os import path
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.pipeline import Pipeline
from sta211.datasets import load_train_dataset, load_test_dataset, find_best_train_dataset
from sklearn.model_selection import GridSearchCV
from sta211.selection import get_naive_bayes, get_mlp, get_svm, get_gradient_boosting, get_random_forest, get_best_hyper_parameters, get_extra_trees, get_adaboost, get_voting_classifier
from multiprocessing import cpu_count
n_jobs = max(1, cpu_count()-1)
test_size = 0.20
X, y, quantitatives = load_train_dataset()
# Manual aggregation
pipe, search_grid = get_voting_classifier()
# pipes, search_grid = get_svm()
# pipe = Pipeline(pipes)
cv = StratifiedShuffleSplit(test_size=test_size, random_state=0, n_splits=5)
grid = GridSearchCV(pipe, search_grid, cv=cv, n_jobs=n_jobs, return_train_score=True, refit=True, scoring="accuracy")
grid.fit(X, y)
parameters = get_best_hyper_parameters(grid)
print("Result for {} configurations".format(len(parameters)))
for p in parameters:
print("{};{:.2f}%;{:.4f}%;±{:.4f}%".format(
", ".join(map(lambda k: "{}={}".format(k.split("__")[1], p["params"][k]), p["params"].keys())),
100.0 * p["mean_train_score"],
100.0 * p["mean_test_score"],
200.0 * p["std_test_score"]
))
# print("Results: Train: {:.2f}%, Test: {:.2f}% std:{:.4f} for {}".format(100 * p["mean_train_score"], 100 * p["mean_test_score"], p["std_test_score"], p["params"]))
prediction_file = "{}/predictions.csv".format(path.dirname(path.abspath(__file__)))
pred = grid.predict(load_test_dataset())
f = open(prediction_file, "w")
f.write("\n".join(map(lambda o: str(o), pred)))
f.close()
|
7,783 | 187c2a56ba9360b89c8ded09861091e2deedf32e | import os, sys, shutil
import fnmatch, logging, zipfile
logging.basicConfig(format='%(asctime)s [%(levelname)s] %(message)s', datefmt='%Y-%m-%d,%H:%M:%S', level=logging.DEBUG)
def scan_files(dir, pattern):
fileList = []
for root, subFolders, files in os.walk(dir):
for file in files:
if fnmatch.fnmatch(file, pattern):
fileList.append(os.path.join(root,file))
return fileList
if (not os.path.exists('dist')):
os.makedirs('dist')
currentDir = os.getcwd() # save current dir
os.chdir('..\\..') # go to root of simulation
distPath = os.path.join(currentDir, 'bundle') # where to put files
scanData = [
['WSN\\simulations', '*.ned', '', True],
['WSN\\simulations', '*.xml', '', True],
['WSN\\simulations', '*.exe', '', True],
['WSN\\simulations', '*.ini', '', True],
['WSN\\src', '*.ned', '', True],
['WSN\\src', '*.dll', '', True],
['MiXiM\\src', '*.ned', '', True],
['MiXiM\\src', '*.dll', '', True],
['MiXiM\\src\\base', '*.dll', 'lib', False],
['MiXiM\\src\\modules', '*.dll', 'lib', False],
[os.path.join(currentDir, 'lib'), '*.dll', 'lib', False],
]
# remove old bundle
if (os.path.exists(distPath)):
shutil.rmtree(distPath)
# copy neccessary files
for data in scanData:
for file in scan_files(data[0], data[1]):
if (data[3]):
newSubPath = file
else:
newSubPath = os.path.basename(file)
newPath = os.path.relpath(os.path.join(distPath, data[2], newSubPath))
newDir = os.path.dirname(newPath)
if (not os.path.exists(newDir)):
os.makedirs(newDir)
logging.info('Copying %s to %s' % (file, newPath))
shutil.copyfile(file, newPath)
logging.info("Creating archive")
bundleZip = zipfile.ZipFile(os.path.join(currentDir, 'dist', "bundle.zip"), 'w', zipfile.ZIP_DEFLATED)
for root, subFolders, files in os.walk(distPath):
for file in files:
# make path relative to distPath
newPath = os.path.join(root, file).replace(distPath, '')
# add files to zip
bundleZip.write(os.path.join(root, file), newPath)
bundleZip.close()
logging.info("Done")
os.chdir(currentDir) # go back |
7,784 | a63e5186c0eb8b5ae8510b473168db3461166513 | from django.views.generic import (ListView, DetailView, CreateView,
DeleteView, UpdateView, TemplateView)
from django.views.generic.edit import ModelFormMixin
from django.urls import reverse_lazy
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import login_required
from .models import Movie, Actor
from .forms import MovieForm
from django.http import Http404
def my_print(*args, **kwargs):
raise Http404(*args, **kwargs)
class BaseModelApi(TemplateView, ModelFormMixin):
def get_template_names(self):
prefix = self.request.method
if prefix in ['PUT', 'PATCH', 'POST']:
prefix = 'form'
name = self.model
return [f'{name}/{name}_{prefix}.html']
def get(self, request):
pass
def post(self, request):
pass
def put(self, request):
pass
def patch(self, request):
pass
def delete(self, request):
pass
def dispatch(self, request):
pass
def get_context_data(self):
pass
def get_form(self):
pass
def get_form_class(self):
name = f'{self.model}'.title()
# prefix = f'{self.request.method}'.title()
self.form_class = eval(f'{name}Form')
return self.form_class
class MoviesView(ListView):
model = Movie
context_object_name = 'movies'
class MovieView(DetailView):
model = Movie
context_object_name = 'movie'
class ActorView(DetailView):
model = Actor
context_object_name = 'actor'
@method_decorator(login_required, name='dispatch')
class MovieCreateView(CreateView):
form_class = MovieForm
template_name = 'movies/movie_form.html'
success_url = reverse_lazy('movie_all')
@method_decorator(login_required, name='dispatch')
class MovieUpdateView(UpdateView):
model = Movie
form_class = MovieForm
template_name = 'movies/movie_form.html'
success_url = reverse_lazy('movie_all')
@method_decorator(login_required, name='dispatch')
class MovieDelete(DeleteView):
model = Movie
success_url = reverse_lazy('movie_all')
|
7,785 | 7f63097265b1058785e90441f85b7f0088946717 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('guac_auth', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='guacamoleconnectiongroup',
name='type',
),
migrations.AlterUniqueTogether(
name='guacamoleconnectiongrouppermission',
unique_together=set([]),
),
migrations.AlterUniqueTogether(
name='guacamoleconnectionpermission',
unique_together=set([]),
),
migrations.AlterUniqueTogether(
name='guacamolesystempermission',
unique_together=set([]),
),
migrations.AlterUniqueTogether(
name='guacamoleuserpermission',
unique_together=set([]),
),
migrations.RemoveField(
model_name='guacamoleconnectiongrouppermission',
name='permission',
),
migrations.RemoveField(
model_name='guacamoleconnectionpermission',
name='permission',
),
migrations.RemoveField(
model_name='guacamolesystempermission',
name='permission',
),
migrations.RemoveField(
model_name='guacamoleuserpermission',
name='permission',
),
]
|
7,786 | 053fa80c80d40cd28acb7d6a8bf1b2c30be9b36e | from PIL import Image, ImageDraw, ImageFont
import sys
### Create 1024,1024 pixel image with a white background.
img = Image.new("RGB", (1024, 1024), color = (255,255,255))
### Take text to be drawn on the image from the command terminal.
text = sys.argv[1]
### Chose favourite font and set size of the font.
fnt = ImageFont.truetype("/usr/share/fonts/truetype/freefont/FreeMono.ttf", 150, encoding="unic")
d = ImageDraw.Draw(img)
d.text(xy=(320,420), text = text , font = fnt, fill=(0,0,0))
### Save image as .png file.
img.save(text+'.png')
|
7,787 | 4f9729e396e01cb3d6c9011f79a1ebe618a8e762 | import os
import subprocess
import discord
import asyncio
import traceback
import sys
import ast
from discord.ext import commands
# Import Cogs
from cogs.misc import Miscellaneous
from cogs.serversettings import ServerSettings
from cogs.mod import Moderator
from cogs.automod import AutoMod
from cogs.google import Google
# Minigame/Fun Cogs
from cogs.fun import Fun
#from cogs.hangman import Hangman
#from cogs.rps import RockPaperScissors
from otherscipts.helpers import update_presence
from otherscipts.data import Data
TOKEN = os.getenv('SPARTA_TOKEN')
intents = discord.Intents.default()
intents.members = True
def get_prefix(client, message):
if str(message.guild.id) not in Data.server_data:
Data.server_data[str(message.guild.id)] = Data.create_new_data()
data = Data.server_data[str(message.guild.id)]
return data["prefix"]
PREFIX = get_prefix
bot = commands.Bot(
command_prefix=PREFIX,
description="I am Sparta Bot, a bot for the Official Sparta Gaming Discord server.",
intents=intents,
help_command=None,
case_insensitive=True
)
THEME_COLOR = discord.Colour.blue()
# Add Cogs
bot.add_cog(Miscellaneous(bot, THEME_COLOR))
bot.add_cog(ServerSettings(bot, THEME_COLOR))
bot.add_cog(Moderator(bot, THEME_COLOR))
bot.add_cog(AutoMod(bot, THEME_COLOR))
bot.add_cog(Fun(bot, THEME_COLOR))
bot.add_cog(Google(bot, THEME_COLOR))
#bot.add_cog(Hangman(bot, THEME_COLOR))
#bot.add_cog(RockPaperScissors(bot, THEME_COLOR))
previous_msg_sender_id = None
@bot.event
async def on_ready():
bot.loop.create_task(Data.auto_update_data())
bot.loop.create_task(update_presence(bot, PREFIX))
print("Bot is ready...")
@bot.event
async def on_guild_join(guild):
log_channel = bot.get_channel(773580297954394162)
await log_channel.send(f"Joined - {guild.name}\nServer ID - {guild.id}\nOwner - {guild.owner}")
@bot.event
async def on_guild_remove(guild):
log_channel = bot.get_channel(773580297954394162)
await log_channel.send(f"Left - {guild.name}\nServer ID - {guild.id}\nOwner - {guild.owner}")
@bot.event
async def on_member_join(member):
guild: discord.Guild = member.guild
channels = guild.channels
if str(guild.id) not in Data.server_data:
Data.server_data[str(guild.id)] = Data.create_new_data()
data = Data.server_data[str(guild.id)]
print(f"{member} has joined {guild} server...")
join_role = guild.get_role(data["join_role"])
if join_role is not None:
await member.add_roles(join_role)
# Welcome Message
if data["welcome_msg"] is None:
server_wlcm_msg = f"Welcome, {member.mention}, to the Official **{guild.name}** Server"
else:
server_wlcm_msg = data["welcome_msg"]
server_wlcm_msg = server_wlcm_msg.replace(
"[mention]", f"{member.mention}")
# Welcome Channel
wel_channel = None
if data["welcome_channel"] is None:
for channel in channels:
if str(channel).find("welcome") != -1:
wel_channel = channel
break
else:
wel_channel = guild.get_channel(int(data["welcome_channel"]))
try:
await wel_channel.send(server_wlcm_msg)
except AttributeError:
print("DEBUG: No welcome channel has been set or found.")
#Remove welcome channel
@bot.command(name="remove_welcome", aliases=['rwel', 'remwel'])
@commands.has_guild_permissions(manage_guild=True)
async def remove_welcome(ctx, *, channel):
if str(ctx.guild.id) not in Data.server_data:
Data.server_data[str(ctx.guild.id)] = Data.create_new_data()
Data.server_data[str(ctx.guild.id)]["welcome_channel"] = channel
await ctx.send("This server's welcome channel has been removed")
@bot.event
async def on_member_remove(member):
guild = member.guild
channels = guild.channels
if str(guild.id) not in Data.server_data:
Data.server_data[str(guild.id)] = Data.create_new_data()
data = Data.server_data[str(guild.id)]
print(f"{member} has left the {guild.name}...")
# Leave Message
if data["leave_msg"] is None:
server_leave_msg = f"Goodbye, **{str(member)}**, thank you for staying at **{guild.name}** Server"
else:
server_leave_msg = data["leave_msg"]
server_leave_msg = server_leave_msg.replace("[member]", f"{member}")
# Leave Channel
lv_channel = None
if data["leave_channel"] is None:
for channel in channels:
if str(channel).find("bye") != -1 or str(channel).find("leave") != -1:
lv_channel = channel
break
else:
lv_channel = guild.get_channel(int(data["leave_channel"]))
try:
await lv_channel.send(server_leave_msg)
except AttributeError:
print("DEBUG: No leave channel has been set or found.")
#Remove leave
@bot.command(name="remove_leave", aliases=['rleave', 'remleave'])
@commands.has_guild_permissions(manage_guild=True)
async def remove_welcome( ctx, *, channel):
if str(ctx.guild.id) not in Data.server_data:
Data.server_data[str(ctx.guild.id)] = Data.create_new_data()
Data.server_data[str(ctx.guild.id)]["leave_channel"] = channel
await ctx.send("This server's leave channel has been Removed")
@bot.event
async def on_command_error(ctx, error):
try:
error = error.original
except Exception:
pass
if type(error) is discord.ext.commands.errors.CommandNotFound:
return
elif type(error) is discord.ext.commands.errors.BadArgument:
pass
elif type(error) is discord.ext.commands.errors.MissingRequiredArgument:
pass
elif type(error) is discord.ext.commands.errors.NoPrivateMessage:
pass
elif type(error) is discord.ext.commands.errors.MissingPermissions:
pass
elif type(error) is discord.ext.commands.errors.NotOwner:
pass
elif type(error) is discord.ext.commands.errors.CommandOnCooldown:
pass
elif type(error) is discord.ext.commands.errors.ChannelNotFound:
pass
elif type(error) is discord.ext.commands.errors.BadUnionArgument:
pass
elif type(error) is discord.ext.commands.errors.BotMissingPermissions:
pass
elif type(error) is discord.errors.Forbidden:
error = "I don't have permission to do that!"
else:
print(f"Error {type(error)}: {error}")
traceback.print_exception(
type(error), error, error.__traceback__, file=sys.stderr
)
embed = discord.Embed(
title='Error!',
description='An unexpected error ocurred.\
Please report this to the dev.',
)
embed.add_field(
name='Error Message:',
value=f"{type(error)}:\n{error}",
inline=False
)
await ctx.send(f"{error}")
# LABEL: Programming Commands
def insert_returns(body):
# insert return stmt if the last expression is a expression statement
if isinstance(body[-1], ast.Expr):
body[-1] = ast.Return(body[-1].value)
ast.fix_missing_locations(body[-1])
# for if statements, we insert returns into the body and the orelse
if isinstance(body[-1], ast.If):
insert_returns(body[-1].body)
insert_returns(body[-1].orelse)
# for with blocks, again we insert returns into the body
if isinstance(body[-1], ast.With):
insert_returns(body[-1].body)
@bot.command(name='eval')
async def eval_fn(ctx, *, cmd):
"""Evaluates input.
Input is interpreted as newline seperated statements.
If the last statement is an expression, that is the return value.
Usable globals:
- `bot`: the bot instance
- `discord`: the discord module
- `commands`: the discord.ext.commands module
- `ctx`: the invokation context
- `__import__`: the builtin `__import__` function
Such that `>eval 1 + 1` gives `2` as the result.
The following invokation will cause the bot to send the text '9'
to the channel of invokation and return '3' as the result of evaluating
>eval ```
a = 1 + 2
b = a * 2
await ctx.send(a + b)
a
```
"""
if ctx.message.author.id not in [400857098121904149, 733532987794128897]:
await ctx.send("You are not authorized to run this command")
return
fn_name = "_eval_expr"
cmd = cmd.strip("` ")
# add a layer of indentation
cmd = "\n".join(f" {i}" for i in cmd.splitlines())
# wrap in async def body
body = f"async def {fn_name}():\n{cmd}"
parsed = ast.parse(body)
body = parsed.body[0].body
insert_returns(body)
env = {
'bot': ctx.bot,
'discord': discord,
'commands': commands,
'ctx': ctx,
'__import__': __import__
}
exec(compile(parsed, filename="<ast>", mode="exec"), env)
result = (await eval(f"{fn_name}()", env))
await ctx.send(result)
# LABEL: Debugging Commands
@bot.command(name="data")
async def data(ctx):
is_owner = await bot.is_owner(ctx.author)
if is_owner or ctx.author.id == 733532987794128897: # for real sparta
data_file = discord.File("data.json")
await ctx.send(file=data_file)
@bot.event
async def on_message(message: discord.Message):
global previous_msg_sender_id
if message.author.bot:
return
author: discord.Member = message.author
channel: discord.TextChannel = message.channel
guild: discord.Guild = message.guild
# print(str(author), ": ", message.content)
await bot.process_commands(message)
if str(guild.id) not in Data.server_data:
Data.server_data[str(guild.id)] = Data.create_new_data()
data = Data.server_data[str(guild.id)]
if message.content.replace('!', '') == bot.user.mention:
pre = data["prefix"]
await channel.send(f"The prefix in this server is `{pre}`")
for afk_user_entry in data["afks"]:
afk_user_id = int(afk_user_entry["user"])
afk_reason = afk_user_entry["reason"]
afk_user = guild.get_member(afk_user_id)
if afk_user.id == author.id and afk_user_id == previous_msg_sender_id:
Data.server_data[str(guild.id)]["afks"].remove(afk_user_entry)
await channel.send(f"**{afk_user}** is no longer AFK.")
elif afk_user in message.mentions:
await channel.send(f"**{afk_user}** is currently AFK because **{afk_reason}**.")
if data["pay_respects"] and message.content.strip().lower() == "f":
await channel.send(f"**{author.display_name}** has paid their respects...")
if data["active"] and str(author.id) not in data["users"]:
if not str(channel.id) in data["channels"]:
perms = author.permissions_in(channel)
if not perms.administrator:
if "http://" in message.content or "https://" in message.content:
if len(data["urls"]) > 0:
for url in data["urls"]:
if not url in message.content:
await channel.purge(limit=1)
msg1 = await channel.send(f"{author.mention}, you are not allowed to send links in this channel.")
await asyncio.sleep(2)
await msg1.delete()
else:
await channel.purge(limit=1)
msg2 = await channel.send(f"{author.mention}, you are not allowed to send links in this channel.")
await asyncio.sleep(3)
await msg2.delete()
elif len(message.attachments) > 0:
await channel.purge(limit=1)
msg3 = await channel.send(f"{author.mention}, you are not allowed to send attachments in this channel.")
await asyncio.sleep(3)
await msg3.delete()
previous_msg_sender_id = author.id
bot.run(TOKEN)
|
7,788 | 2440f5bc774f2e2f746a246cbb2e305965c9e576 | from __future__ import absolute_import
from django.conf.urls import patterns, url
from sentry_plugins.jira_ac.views import JiraConfigView, \
JiraDescriptorView, JiraInstalledCallback, JiraUIWidgetView
urlpatterns = patterns(
'',
url(r'^plugin$', JiraUIWidgetView.as_view()),
url(r'^config$', JiraConfigView.as_view()),
url(r'^atlassian-connect\.json$', JiraDescriptorView.as_view()),
url(r'^installed$', JiraInstalledCallback.as_view()),
)
|
7,789 | 937a101cf5c7e943fc62d18b77357eea151fdfaf | cardlist = []
card = []
for j in range(1,5):
for k in range(1,14):
if j == 1:
cardlist.append(["S", "{}".format(k)])
elif j == 2:
cardlist.append(["H", "{}".format(k)])
elif j == 3:
cardlist.append(["C", "{}".format(k)])
elif j == 4:
cardlist.append(["D", "{}".format(k)])
num = int(input())
for i in range(num):
card.append(input().split())
for i in range(num):
cardlist.remove(card[i])
for i in range(52-num):
print("{0} {1}".format(cardlist[i][0], cardlist[i][1])) |
7,790 | 856a27e953a6b4e1f81d02e00717a8f95a7dea5f | import cv2
# open webcam (웹캠 열기)
webcam = cv2.VideoCapture(0)
if not webcam.isOpened():
print("Could not open webcam")
exit()
sample_num = 0
captured_num = 0
# loop through frames
while webcam.isOpened():
# read frame from webcam
status, frame = webcam.read()
sample_num = sample_num + 1
if not status:
break
# display output
cv2.imshow("captured frames", frame)
if sample_num == 4:
captured_num = captured_num + 1
cv2.imwrite('./images/img'+str(captured_num)+'.jpg', frame)
sample_num = 0
# press "Q" to stop
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# release resources
webcam.release()
cv2.destroyAllWindows() |
7,791 | 2060f0af351c1487f8aa45943dbaa050f4291c58 | from typing import Any, Callable, Generator, List, Optional
import pytest
from _pytest import nodes
from _pytest.config import hookimpl
from _pytest.python import Function, PyCollector # type: ignore
from hypothesis.errors import InvalidArgument # pylint: disable=ungrouped-imports
from .._hypothesis import create_test
from ..exceptions import InvalidSchema
from ..models import Endpoint
from ..utils import is_schemathesis_test
@hookimpl(hookwrapper=True) # type:ignore # pragma: no mutate
def pytest_pycollect_makeitem(collector: nodes.Collector, name: str, obj: Any) -> Optional["SchemathesisCase"]:
"""Switch to a different collector if the test is parametrized marked by schemathesis."""
outcome = yield
if is_schemathesis_test(obj):
outcome.force_result(SchemathesisCase(obj, name, collector))
else:
outcome.get_result()
class SchemathesisCase(PyCollector):
def __init__(self, test_function: Callable, *args: Any, **kwargs: Any) -> None:
self.test_function = test_function
self.schemathesis_case = test_function._schemathesis_test # type: ignore
super().__init__(*args, **kwargs)
def _get_test_name(self, endpoint: Endpoint) -> str:
return f"{self.name}[{endpoint.method}:{endpoint.path}]"
def _gen_items(self, endpoint: Endpoint) -> Generator[Function, None, None]:
"""Generate all items for the given endpoint.
Could produce more than one test item if
parametrization is applied via ``pytest.mark.parametrize`` or ``pytest_generate_tests``.
"""
try:
hypothesis_item = create_test(endpoint, self.test_function)
except InvalidSchema:
hypothesis_item = lambda: pytest.fail("Invalid schema for endpoint")
items = self.ihook.pytest_pycollect_makeitem(
collector=self.parent, name=self._get_test_name(endpoint), obj=hypothesis_item
)
for item in items:
item.obj = hypothesis_item
yield item
def collect(self) -> List[Function]: # type: ignore
"""Generate different test items for all endpoints available in the given schema."""
try:
return [
item for endpoint in self.schemathesis_case.get_all_endpoints() for item in self._gen_items(endpoint)
]
except Exception:
pytest.fail("Error during collection")
@hookimpl(hookwrapper=True) # pragma: no mutate
def pytest_pyfunc_call(pyfuncitem): # type:ignore
"""It is possible to have a Hypothesis exception in runtime.
For example - kwargs validation is failed for some strategy.
"""
outcome = yield
try:
outcome.get_result()
except InvalidArgument as exc:
pytest.fail(exc.args[0])
|
7,792 | 0110d26e17a5402c22f519d0aeb2aacca3279d00 | import datetime
now = datetime.datetime.now()
# Printing value of now.
print ("Time now : ", now)
|
7,793 | 3583ce664bc9f42ef8f751de8642997819e08e31 | # -*- coding: utf-8 -*-
import scrapy
import re
import time
import random
import hashlib
from quotetutorial.items import ScrapyItem
import requests
import os
import io
import sys
# 科学技术部
class MostSpider(scrapy.Spider):
index = 12
name = 'most'
allowed_domains = ['www.most.gov.cn']
start_urls = [
'http://www.most.gov.cn/tztg/index.htm']
category_index = {'tztg': '1'}
category_desc = {'tztg': '通知公告-通知通告'}
url_descs = ['通知公告-通知通告']
def parse(self, response):
print("##########", response.url)
if response.status == 200:
id_prefix = ''
cate = ''
if response.url in self.start_urls:
cate_index = self.start_urls.index(response.url)
id_prefix = str(self.index) + '-' + str(cate_index + 1)
cate = self.url_descs[cate_index]
total_count_desc = re.findall('countPage =(.*?)//共多少页', response.text)
print('*********', total_count_desc)
if total_count_desc:
for page_num in range(1, int(total_count_desc[0].strip())):
url = response.url.replace('index.htm', 'index_%s.htm' % page_num)
yield scrapy.Request(url, meta={'id_prefix': id_prefix, 'category': cate},
callback=self.parse)
time.sleep(random.randint(1, 6))
else:
id_prefix = response.meta['id_prefix']
cate = response.meta['category']
links = response.css("td.STYLE30")
for link in links:
href = link.css('a::attr("href")').extract_first()
url = ""
if href.find('/') == 1:
url = response.url[0:response.url.rfind('/')] + link.css('a::attr("href")').extract_first()[1:]
elif href.find('/') == 2:
url = response.url[0:response.url[0:response.url.rfind('/')].rfind('/')] + href[2:]
title = link.css('a::text').extract_first().strip()
print(url, title)
if url:
yield scrapy.Request(url, meta={'id_prefix': id_prefix,
'category': cate,
'title': title}, callback=self.parse_content)
time.sleep(random.randint(1, 6))
def parse_content(self, response):
print("^^^^^^", response.url)
if response.status == 200:
md5 = hashlib.md5()
md5.update(response.url.encode(encoding='utf-8'))
item = ScrapyItem()
id_prefix = response.meta['id_prefix']
item['id'] = id_prefix + "-" + md5.hexdigest()
category = response.meta['category']
item['category'] = category
item['title'] = response.meta['title']
item['published_date'] = ''
date_desc = response.css('div.gray12.lh22::text').extract_first()
if date_desc:
print("##########", date_desc)
date = re.findall('日期:(.*?)$', date_desc)
if date:
date_str = date[0].strip()
timeStruct = time.strptime(date_str, "%Y年%m月%d日")
strTime = time.strftime("%Y-%m-%d", timeStruct)
item['published_date'] = strTime
item['source'] = '科技部'
# div.detail div.detail_con>div.TRS_Editor
item['content'] = ''
conten_p = response.css('div.trshui13.lh22 p::text').extract()
if conten_p:
content_desc = ''.join(conten_p)
item['content'] = content_desc.replace(u'\u3000', '').replace(u'\t', '').replace(u'\r', '').replace(
u'\xa0',
'').replace(
u'\n',
'').strip()
item['view_count'] = '0'
item['url'] = response.url
attach_path_arra = []
attach_arra = []
atta_arra = response.css('a[href*=".xls"]') + response.css('a[href*=".doc"]') + response.css(
'a[href*=".pdf"]') + response.css('a[href*=".zip"]') + response.css('a[href*=".rar"]')
for attch in atta_arra:
save_path = ''
attch_url = response.url[0:response.url.rfind("/")] + attch.css('::attr("href")').extract_first()[1:]
attach_path_arra.append(attch_url)
attch_name = attch.css('::text').extract_first()
if not attch_name:
attch_name = attch_url[attch_url.rfind("/") + 1:]
attach_arra.append(attch_name)
if attch_name.rfind('.') == -1:
save_path = save_path + attch_name + attch_url[attch_url.rfind('.'):]
else:
save_path = save_path + attch_name
self.download_file(attch_url, save_path)
item['attchment_path'] = ','.join(attach_path_arra)
item['attchment'] = ','.join(attach_arra)
# print(item)
yield item
def download_file(self, url, local_path):
if os.path.exists(local_path):
print("the %s exist" % local_path)
else:
print("start download the file", url)
r = requests.get(url)
print('down loading status ', r.status_code)
with open(local_path, "wb") as code:
code.write(r.content)
|
7,794 | 5711613df0bda10512466f147febcffacfe1607b | # [BEGIN IMPORTS]
from mainhandler import MainHandler
from sec.data import *
# [END IMPORTS]
class UpVoteHandler (MainHandler):
def get(self):
user = self.get_user()
if user:
post_id = self.request.get('post_id')
post = PostData.get_by_id(int(post_id))
voter_list = post.voter_list
if post.author == user:
error = "cant vote for self"
self.render('mainpage.html', error=error)
elif user in voter_list:
error = "cant vote twice"
self.render('mainpage.html', error=error)
else:
post.upscore += 1
voter_list.append(user)
post.put()
self.redirect('/blog/' + post_id)
else:
self.redirect('/')
class DownVoteHandler (MainHandler):
def get(self):
user = self.get_user()
if user:
post_id = self.request.get('post_id')
post = PostData.get_by_id(int(post_id))
voter_list = post.voter_list
if post.author == user:
error = "cant vote for self"
self.render('mainpage.html', error=error)
elif user in voter_list:
error = "cant vote twice"
self.render('mainpage.html', error=error)
else:
post.downscore += 1
voter_list.append(user)
post.put()
self.redirect('/blog/' + post_id)
else:
self.redirect('/')
|
7,795 | c58bfa620df9f1b1f31c83a76d0d8a4576cbd535 | #!/usr/bin/env python
from scapy.all import *
from optparse import OptionParser
import socket
import struct
class MagicARP:
def __init__(self, iface):
self.iface = iface
self.macrecs = {}
def magic_arp(self, pkt):
# only look for queries
if ARP in pkt and pkt[ARP].op == 1:
# Get a random MAC address and remember it
mac = get_random_mac()
self.macrecs.setdefault(pkt[ARP].pdst, mac) # The 'setdefault' method will set the value only if it hasn't already been set
# create a response packet
# This is all done with scapy functions/objects
print "ARP: Resolved %s to %s" % (pkt[ARP].pdst, self.macrecs[pkt[ARP].pdst])
sendp( Ether(src=self.macrecs[pkt[ARP].pdst], dst = pkt[Ether].src, type = 2054) /
ARP(hwtype = 1, ptype=0x800, hwlen=6, plen=4, op=2,
hwsrc=self.macrecs[pkt[ARP].pdst],
hwdst=pkt[Ether].src,
psrc=pkt[ARP].pdst,
pdst=pkt[ARP].psrc),
iface = self.iface)
def get_random_mac():
"""Generate a random MAC address"""
# use the Dlink range
mac = "00:05:5D"
for i in range(0,3):
mac += ":%s" % hex(random.randrange(0,256))[2:]
return mac
def main():
clparser = OptionParser()
clparser.add_option("-i", "--interface", help="Interface to listen and send pkts on", action="store", type="string", dest="iface")
(options, args) = clparser.parse_args()
# instantiate new class
new_magic = MagicARP(options.iface)
# set up a sniffer with a callback function to 'magic_dns'. Filter for only stuff going to port 53
sniff(prn=new_magic.magic_arp, filter="arp", store=0, iface=options.iface)
if __name__ == "__main__":
main()
|
7,796 | e31f1e24c319f338d728661dfd50e758526112d6 | import pygame
from settings import *
import random
class Cell:
def __init__(self, game, x, y, bombs):
self.game = game
self.x = x
self.y = y
self.i = x // TILESIZE
self.j = y // TILESIZE
self.revelada = False
self.bomba = False
self.bombas_total = bombs
self.bombs_around = 0
self.flag_enabled = False
def reveal(self):
if not self.game.is_game_over:
self.revelada = True
if self.bombs_around == 0:
self.flood()
if self.bomba:
self.game.is_game_over = True
self.game.score = 0
EFFECT.play()
def check_neighbours(self, grid):
"""
This function will count how many bombs there is around a particular cell
"""
if self.bomba:
self.bombs_around = -1
return
total = 0
for x in range(-1, 2):
for y in range(-1, 2):
i = self.i + x
j = self.j + y
if i > -1 and i < len(grid) and j > -1 and j < len(grid[1]):
neighbor = grid[i][j]
if neighbor.bomba:
total += 1
self.bombs_around = total
def flood(self):
for x in range(-1, 2):
for y in range(-1, 2):
i = self.i + x
j = self.j + y
if i > -1 and i < len(self.game.grid) and j > -1 and j < len(self.game.grid[1]):
neighbor = self.game.grid[i][j]
if not neighbor.revelada and not neighbor.flag_enabled and not self.game.is_game_over:
neighbor.reveal()
def enable_flag(self):
self.flag_enabled = not self.flag_enabled
if self.bomba: # TODO: and self.flag_enabled
self.game.score += 1
# TODO: else: self.game.score -= 1
# all the spots revealed shouldn't be a bomb
def draw_number(self):
"""
This function will draw the numbers according to the total of bombs around the cell.
Also it will give colors to some numbers
"""
text_color = (0, 0, 0)
if self.bombs_around == 1:
text_color = (0, 0, 150)
if self.bombs_around == 2:
text_color = (0, 150, 0)
if self.bombs_around == 3:
text_color = (150, 0, 0)
if self.bombs_around == 4:
text_color = (133, 39, 138)
if self.bombs_around == 5:
text_color = (128, 0, 0)
if self.bombs_around == 6:
text_color = (175, 238, 238)
if self.bombs_around == 7:
text_color = (0, 0, 0)
if self.bombs_around == 8:
text_color = (33, 161, 166)
font = pygame.font.Font("fonts/JetBrainsMono-Bold.ttf", 24)
if self.bombs_around > 0 and self.revelada:
text = font.render(
str(self.bombs_around), False, text_color)
self.game.screen.blit(text, (self.x + 12, self.y))
def set_bomb(self):
"""
This function will turn this cell into a cell with a bomb
(just to keep organized)
"""
self.bomba = True
def draw_cell(self):
pygame.draw.rect(
self.game.screen, WHITE, (self.x, self.y, TILESIZE - 1, TILESIZE - 1))
if self.revelada:
if self.bomba:
pygame.draw.rect(
self.game.screen, RED, (self.x + 10, self.y + 10, TILESIZE - 23, TILESIZE - 23))
else:
pygame.draw.rect(
self.game.screen, GRAY, (self.x, self.y, TILESIZE - 1, TILESIZE - 1))
if self.flag_enabled and not self.revelada:
self.game.flag.draw(self.game.screen, self.x + 10, self.y + 10)
def get_mouse_pos(self):
mouse = pygame.mouse.get_pos()
return [mouse[0] // TILESIZE, mouse[1] // TILESIZE]
|
7,797 | 679d4b224733dbe264caeeda4e228edd090ea9de | # coding=UTF-8
'''
Created on Jul 21, 2013
@author: jin
'''
from django import template
register = template.Library()
@register.filter
def get_list_number(value,num):
result=value[num]
return result
# register.filter('get_list_num', get_list_num)
'''
test
'''
if __name__=='__main__':
print get_list_number([True,False,False],1)
|
7,798 | 3035ac8044b5629d0b5de7934e46890ad36ed551 | from nltk.tokenize import RegexpTokenizer
from stop_words import get_stop_words
from nltk.stem.porter import PorterStemmer
from gensim import corpora, models
import gensim
tokenizer = RegexpTokenizer(r'\w+')
# create English stop words list
en_stop = get_stop_words('en')
# Create p_stemmer of class PorterStemmer
p_stemmer = PorterStemmer()
# create sample documents
doc_a = "Brocolli is good to eat. My brother likes to eat good brocolli, but not my mother."
doc_b = "My mother spends a lot of time driving my brother around to baseball practice."
doc_c = "Some health experts suggest that driving may cause increased tension and blood pressure."
doc_d = "I often feel pressure to perform well at school, but my mother never seems to drive my brother to do better."
doc_e = "Health professionals say that brocolli is good for your health."
rev1 = "I pre-ordered this for my wife mostly to use as a Kindle E-reader as I figured the tablet would be slow and the display would be less than impressive. I was wrong. What a bargain this little beauty is! This model cost $49.00 but it comes with ad's displayed on the lock screen when your tablet is dormant. Once your screen times out, they disappear. You can pay $15.00 up front to get an ad free version so I assumed to unlock the tablet I'd have to spend 15 to 30 seconds looking at an ad for Amazon Prime, or a product from the daily specials section of Amazon.com I abstained from paying for Ad removal and was pleasantly surprised to find that the ads are only on the lock screen and that as soon as I unlock the tablet they disappear immediately. Here are my pros and cons thus far. PRO: Perfect size for Ebooks, and web surfing to alleviate strain on the eyes from my 5 phone display nice sturdy casing that gives it a nice heft but still weighs in as one of the lighter tablets on the market Child Accounts- Amazon allows you to set up this tablet with age restricted access for kids making this a low cost piece of tech that is perfect for school kids and allows mom and dad to ration the amount of time lil Johnny can play Clash of Clans and how much he can hit the ol' Visa card for. Battery life thus far; wife was on it for about 5 hours last night and battery was at about 46% Kindle Integration -this goes without saying but having my ebooks and audible books synced to the tablet is awesome and my Kindle books look great"
rev2 = "UPDATED - After spending quite a bit more time with the device, I would give it a 4.5 due to a few specific gaps that are a bit annoying. However, you are still getting an amazing 7” tablet, with front and rear facing cameras, a gorgeous interface, fairly snappy performance and durability, all for under 50 bucks! I can’t imagine not buying these for myself and my whole family, but not a primary tablet for a techie adult by any means. For background, I have every Kindle, a couple Fires, and multiple tablets from Apple, Microsoft and Samsung. Note that my review with 5 stars considers the value equation, not just performance and how that may or may not compare to other tablets - if you are expecting this to compare to a tablet costing several times more, don't bother. But if you are looking for a great entry level tablet that does most of the things people want, this little tablet definitely delivers the value! PRICING/CONFIG: I prefer this tablet with ads and no accessories to keep the costs down. You have the option to spend more money, but I recommend against it. You can easily see the specs online, so I won’t do you the discourtesy of simply cutting and pasting those here. Here is the price breakdown: 9.99 base price – what an incredible price point! Or buy 5 and get a sixth one free! This puts it into reach of schools and non-profits."
rev3 ="The short/summed up version: it's the new budget king in the 6-8 size. It's screen is a little lower in resolution but still pleasant to look at, it has enough power for most of the typical tablet tasks, and it shares many of the same features as its higher priced brothers such as front and back cameras, b/g/n wifi, and good overall battery life (minus an hour) My favorite size tablet is 8, so if you're looking at the amazon fire lineup, i would take this over the 6 for sure, and would have a hard time picking the 8 fire at 3x the price. If you re not a prime member, it s still a good tablet, if you are a prime member: it s a great tablet. Possible quality control issue: Mine had two dead pixels (not very noticeable, but still will exchange) You can load APKs(enable unknown sources), i loaded antutu and panasonic image app, both work properly."
# compile sample documents into a list
#doc_set = [doc_a, doc_b, doc_c, doc_d, doc_e]
doc_set = [rev1,rev2,rev3]
# list for tokenized documents in loop
texts = []
# loop through document list
for i in doc_set:
# clean and tokenize document string
raw = i.lower()
tokens = tokenizer.tokenize(raw)
# remove stop words from tokens
stopped_tokens = [i for i in tokens if not i in en_stop]
# stem tokens
stemmed_tokens = [p_stemmer.stem(i) for i in stopped_tokens]
# add tokens to list
texts.append(stemmed_tokens)
# turn our tokenized documents into a id <-> term dictionary
dictionary = corpora.Dictionary(texts)
# convert tokenized documents into a document-term matrix
corpus = [dictionary.doc2bow(text) for text in texts]
# generate LDA model
ldamodel = gensim.models.ldamodel.LdaModel(corpus, num_topics=4, id2word=dictionary, passes=20)
print("LDA............")
topics = ldamodel.print_topics(num_topics=3, num_words=5)
for topic in topics:
print(type(topic))
print(topic)
print("LSA.................")
#id2word = gensim.corpora.Dictionary.load_from_text("c:\lda_test.txt")
lsi = gensim.models.lsimodel.LsiModel(corpus, id2word=dictionary)
from nltk.corpus import sentiwordnet as swn
topics = lsi.print_topics(5)
for topic in topics:
print(topic[1])
print(swn.senti_synsets(topic[1]))
print("----------------------------------------")
#print(list(swn.senti_synsets('slow')))
happy = swn.senti_synsets('happy')
print(happy.neg_score())
all = swn.all_senti_synsets()
#print(all) |
7,799 | 4745c00ca0f3ca4316117228a9d44bdb5df02877 |
def lucas():
yield 2
a = 2
b = 1
while True:
yield b
a, b = b, a + b
l = lucas()
for i in range(10):
print('{}: {}'.format(i,next(l))) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.