index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
21,100 | d7db45c3a070d5a7f2ebfe3101fd5400f73d737c | __author__ = 'Stefanos I. Tsaklidis'
__version__ = "0.0.2"
|
21,101 | 7568e5a30b7024361b796e2836d7d57075a9ad02 | #!/usr/bin/env python
import rospy
from quad_arm_trajectory_tracking.msg import FlatState
from geometry_msgs.msg import TwistStamped
from geometry_msgs.msg import Vector3
from std_msgs.msg import Float64
from gazebo_aerial_manipulation_plugin.msg import RPYPose
import numpy as np
from tf import transformations
class Filter():
def __init__(self):
self.twistSub = rospy.Subscriber('/base_twist', TwistStamped, self.callback, queue_size=1)
self.poseSub = rospy.Subscriber('/base_pose', RPYPose, self.pose_callback, queue_size=1)
self.accn_pub = rospy.Publisher('/base_accn', Vector3, queue_size=1)
self.yr_pub = rospy.Publisher('/yaw_rate', Float64, queue_size=1)
self.measurements = list()
self.yaw_measurements = list()
def callback(self, msg):
self.measurements.append(msg)
if len(self.measurements) < 101:
return
bx = np.zeros(100)
by = np.zeros(100)
bz = np.zeros(100)
for t in range(1, 101):
dt = (self.measurements[t].header.stamp - self.measurements[t-1].header.stamp).to_sec()
# print "dt = ", dt
dt = 1e-2
bx[t-1] = (self.measurements[t].twist.linear.x - self.measurements[t-1].twist.linear.x)/dt
by[t-1] = (self.measurements[t].twist.linear.y - self.measurements[t-1].twist.linear.y)/dt
bz[t-1] = (self.measurements[t].twist.linear.z - self.measurements[t-1].twist.linear.z)/dt
accn = Vector3()
accn.x = np.average(bx);
accn.y = np.average(by);
accn.z = np.average(bz);
self.accn_pub.publish(accn)
self.measurements.pop(0)
def pose_callback(self, msg):
self.yaw_measurements.append(msg)
if len(self.yaw_measurements) < 101:
return
y = np.zeros(100)
for t in range(1, 101):
dt = (self.yaw_measurements[t].header.stamp - self.yaw_measurements[t-1].header.stamp).to_sec()
# print "dt = ", dt
dt = 1e-2
y[t-1] = (self.yaw_measurements[t].rpy.z - self.yaw_measurements[t-1].rpy.z)/dt
yr = Float64()
yr.data = np.average(y)
self.yr_pub.publish(yr)
self.yaw_measurements.pop(0)
if __name__ == '__main__':
rospy.init_node('filter')
joy_transport = Filter()
try:
rospy.spin()
except rospy.ROSInterruptException:
print "Received Interrupt"
pass
|
21,102 | d10339208e81b4eb9c1d6acd0eac39881d812c78 | #from django.shortcuts import render
from django.http import JsonResponse, HttpResponse
from django.contrib.auth import get_user_model
from django import forms
from django.views import View
from utils.validator import unique_team_id
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth import authenticate, login
from django.views.decorators.csrf import csrf_protect
import json
from env.environ import CATEGORY
from .apps import get_latest_attack
from .models import SqliFilter, XssFilter
Team = get_user_model()
class HomeView(LoginRequiredMixin, View):
def get(self, request):
return JsonResponse({
'name': request.user.username,
'score': request.user.score,
'money': request.user.balance,
})
class RegisterForm(forms.Form):
username = forms.CharField(validators=[unique_team_id])
password = forms.CharField(min_length=8)
email = forms.EmailField()
class RegisterView(View):
def post(self, request):
form = RegisterForm(json.loads(request.body.decode("utf-8")))
if not form.is_valid():
return HttpResponse(status=400)
team = Team.objects.create_user(
username=form.cleaned_data['username'],
password=form.cleaned_data['password'],
email=form.cleaned_data['email'],
)
SqliFilter.objects.create(owner=team)
XssFilter.objects.create(owner=team)
return HttpResponse(status=201)
class LoginForm(forms.Form):
username = forms.CharField()
password = forms.CharField(min_length=8)
class LoginView(View):
def post(self, request):
form = LoginForm(json.loads(request.body.decode("utf-8")))
if not form.is_valid():
return HttpResponse(status=400)
team = authenticate(
username=form.cleaned_data['username'],
password=form.cleaned_data['password'],
)
if team is not None:
login(request, team)
return JsonResponse({
'sessionid': request.session.session_key
}, status=200)
else:
return HttpResponse(status=401)
class PingView(LoginRequiredMixin, View):
def get(self, request):
return JsonResponse({'ok':True}, status=200)
class DashboardView(View):
def get(self, request):
ret = []
teams = Team.objects.all().exclude(is_superuser=True)
for team in teams:
tmp = {}
tmp["teamname"] = team.username
tmp["score"] = team.score
for cate in CATEGORY:
tmp["attacks"] = dict([(cate[1], get_latest_attack(team, cate[0])) for cate in CATEGORY])
ret.append(tmp)
return JsonResponse(ret, status=200, safe=False)
|
21,103 | f37303c63b791ded528f5076b0655182c81695e6 | #coding: utf-8
"""
User Directory test for auth:ldap, org:ldap.
Copyright (C) 2009-2011 EdenWall Technologies
Written by Julien Miotte <jmiotte AT edenwall.com>
$Id$
"""
from templateUserDirectory import templateOrgLDAP, templateAuthLDAP
from nuconf.common.user_dir import SameAsOrgAuth, LDAPOrg
class TestUserDirectoryLDAPLDAP1(templateAuthLDAP,
templateOrgLDAP):
"""
Test case : LDAP Auth connected to LDAP,
LDAP Org (same dir as Auth)
"""
authConf = SameAsOrgAuth()
user_we_can_test = "Administrateur"
users_password = "cornelius"
orgConf = LDAPOrg(
uri = "ldap://172.17.2.1",
dn_users = "ou=Users,dc=inl,dc=fr",
dn_groups = "ou=Groups,dc=inl,dc=fr",
user = "cn=admin,dc=inl,dc=fr",
password = "INLbabar286",
custom_or_nupki = "SSL_DISABLED",
reqcert = "allow",
server_cert_set = False
)
group_of_the_user = "testeurs"
if __name__ == "__main__":
choice = raw_input("Calling this file directly will cause application of\n"
"the test configuration without reverting it.\n"
"You should call it with py.test.\nContinue anyway ? "
"[yN] : ")
if 'y' in choice.strip():
one_shot = TestUserDirectoryLDAPLDAP1()
one_shot.setup_class()
|
21,104 | 13c4eb3f3c587eec6f084db6ff4e3a309b28d46d | # https://leetcode.com/problems/palindrome-number/
# 9. Palindrome Number
# History:
# Google
# 1.
# Mar 13, 2020
# Determine whether an integer is a palindrome. An integer is a palindrome when it reads the same
# backward as forward.
#
# Example 1:
#
# Input: 121
# Output: true
# Example 2:
#
# Input: -121
# Output: false
# Explanation: From left to right, it reads -121. From right to left, it becomes 121-. Therefore
# it is not a palindrome.
# Example 3:
#
# Input: 10
# Output: false
# Explanation: Reads 01 from right to left. Therefore it is not a palindrome.
# Follow up:
#
# Could you solve it without converting the integer to a string?
class Solution(object):
def isPalindrome(self, x):
"""
:type x: int
:rtype: bool
"""
if x < 0:
return False
ranger = 1
while x / ranger >= 10:
ranger *= 10
while x > 0:
left = x / ranger
right = x % 10
if left != right:
return False
x = (x % ranger) / 10
ranger /= 100
return True
|
21,105 | e0b98693d54e413260eee90691315858465a6505 | def simple_equation(a, b, c): #เธฃเธฑเธเธเนเธฒเธเธฑเธงเนเธฅเธ a, b, c
for i in ['+', '-', '*', '//']: #เธเนเธฒ i เนเธ List
sum1 = str(a) + i + str(b) #เธเธณ str(a) เธชเธธเนเธกเนเธเธฃเธทเนเธญเธเธซเธกเธฒเธข + - * / เธเธฑเธ str(b)
if eval(sum1) == c: #เธเนเธฒเนเธเธฅเธ sum1 เนเธซเนเนเธเนเธ str เนเธฅเธฐเธเนเธญเธเนเธเนเธฒเธเธฑเธ str(c)
eq = sum1 + "=" + str(c) #เนเธซเนเธเธณ sum1 เธกเธฒเธเธงเธเนเธเนเธฒเธเธฑเธ str(c)
return eq.replace('//', '/') #เธฅเธเธเนเธฒ '//' เนเธฅเธฐ '/' เธชเธฒเธกเธฒเธฃเธเนเธเนเนเธเธเธเธฑเธเนเธเน
return "" #เธชเนเธเธเนเธฒเธญเธญเธ
print(simple_equation(1, 2, 3))
print(simple_equation(2, 2, 4))
print(simple_equation(6, 2, 3)) |
21,106 | e78af35c0808368dbaecbe55a99bd25ff12fefdb | import RPi.GPIO as GPIO
import time
from AlphaBot2 import AlphaBot2
TRIG = 22
ECHO = 27
Ab = AlphaBot2()
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(TRIG,GPIO.OUT,initial=GPIO.LOW)
GPIO.setup(ECHO,GPIO.IN)
def Distance():
GPIO.output(TRIG,GPIO.HIGH)
time.sleep(0.000015)
GPIO.output(TRIG,GPIO.LOW)
while not GPIO.input(ECHO):
pass
t1 = time.time()
while GPIO.input(ECHO):
pass
t2 = time.time()
return (t2-t1)*34000/2
print("Ultrasonic_Obstacle_Avoidance")
try:
while True:
Dist = Distance()
print("Distance = %0.2f cm"%Dist)
if Dist <= 20:
Ab.stop()
time.sleep(0.02)
Ab.left()
time.sleep(0.02)
Ab.stop()
#else:
#Ab.forward()
#time.sleep(0.02)
except KeyboardInterrupt:
GPIO.cleanup();
|
21,107 | 6747f921b2321ca9c9e3b7af86ed96bc9c9fefa6 | from django.contrib import admin
from projects.models import Blogs, Project, Technologies
from import_export.admin import ImportExportModelAdmin
# Register your models here.
# admin.site.register(Project)
# admin.site.register(Blogs)
# admin.site.register(Technologies)
@admin.register(Project)
class ProjectAdmin(ImportExportModelAdmin):
pass
@admin.register(Technologies)
class TechnologieAdmin(ImportExportModelAdmin):
pass
@admin.register(Blogs)
class BlogAdmin(ImportExportModelAdmin):
pass
|
21,108 | 759a7240949fa773e71952c650ba05cc1f929dcf | # Generated by Django 2.2.4 on 2020-01-08 15:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('website', '0014_congty_congty_banner'),
]
operations = [
migrations.CreateModel(
name='DanhMucSanPham',
fields=[
('danh_muc_id', models.AutoField(primary_key=True, serialize=False)),
('ten', models.CharField(max_length=200, verbose_name='Tรชn danh mแปฅc')),
('slug', models.SlugField(default=1, verbose_name='ฤฦฐแปng dแบซn rรบt gแปn')),
],
options={
'verbose_name_plural': 'Danh mแปฅc sแบฃn phแบฉm',
},
),
migrations.AlterField(
model_name='congty',
name='congty_banner',
field=models.ImageField(default='banner/top_banner.png', upload_to='banner', verbose_name='Banner'),
),
]
|
21,109 | 5812cc31f4dbd366085c43c2689082232956dae8 | import csv
import pyperclip
import os
import sys
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import NoSuchElementException
FACEBOOK_ID = os.environ.get('FACEBOOK_ID', None)
FACEBOOK_PASSWORD = os.environ.get('FACEBOOK_PASSWORD', None)
CHROME_DRIVER_PATH = os.environ.get('CHROME_DRIVER_PATH', None)
if (FACEBOOK_ID == None) or (FACEBOOK_PASSWORD == None) or (CHROME_DRIVER_PATH == None):
print(">>> Error, missing environmental variable config.")
sys.exit(1)
driver = webdriver.Chrome(executable_path=CHROME_DRIVER_PATH)
driver.get('https://tabelog.com/')
# Login with Facebook account
driver.find_element_by_class_name('js-open-login-modal').click()
driver.find_element_by_class_name('p-login-panel__btn--facebook').click()
driver.find_element_by_id('email').send_keys(FACEBOOK_ID)
driver.find_element_by_id('pass').send_keys(FACEBOOK_PASSWORD)
driver.find_element_by_id('loginbutton').click()
driver.find_element_by_class_name('p-user-menu__target--hozon').click()
restaurants = []
while True:
WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.CLASS_NAME, 'js-copy-restaurant-info-to-clipboard')))
clipboard_buttons = driver.find_elements_by_class_name('js-copy-restaurant-info-to-clipboard')
for button in clipboard_buttons:
button.click()
data = pyperclip.paste().split('\n')
if len(data) < 4:
data.insert(1, '')
map_url = 'https://www.google.co.jp/maps/search/' + ' '.join(data[0:3])
data.append(map_url)
restaurants.append(data)
try:
driver.find_element_by_class_name('c-pagination__arrow--next').click()
except NoSuchElementException:
break
with open('result.csv', 'wt', encoding='utf_8_sig') as f:
writer = csv.writer(f, quoting=csv.QUOTE_ALL)
writer.writerows(restaurants)
print('>>> Done.')
driver.quit()
|
21,110 | 17c9a3355c0cd087e32f769320fc685e2ee841d9 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-08-26 00:41
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('dvaapp', '0002_detector_queue'),
]
operations = [
migrations.RemoveField(
model_name='analyzer',
name='queue',
),
migrations.RemoveField(
model_name='detector',
name='queue',
),
migrations.RemoveField(
model_name='indexer',
name='indexer_queue',
),
migrations.RemoveField(
model_name='indexer',
name='retriever_queue',
),
]
|
21,111 | 9fa4f4d8ddcc934a2b4a90f97b0b99ce9ee56a33 | """
Name: Web_Crawler.py
Description: Flask server that handles routing and socket functions
References:
http://flask.pocoo.org/
https://flask-socketio.readthedocs.io/en/latest/
http://www.gevent.org/gevent.monkey.html
http://www.shanelynn.ie/asynchronous-updates-to-a-webpage-with-flask-and-socket-io/
http://stackoverflow.com/questions/22238090/validating-urls-in-python
https://github.com/miguelgrinberg/Flask-SocketIO/issues/371
"""
import random
import string
import json
import socket
import sys
import validators
import ClientSocket
import Crawler
from flask import Flask, render_template, request
from flask_assets import Environment, Bundle
from flask_compress import Compress
from flask_socketio import SocketIO, emit
from gevent import monkey
# Monkey patch replaces class in the standard socket module so they can work with gevent
# http://www.gevent.org/intro.html#beyond-sockets
monkey.patch_all()
app = Flask(__name__)
# cache static items
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 1440000
assets = Environment(app)
js = Bundle('js/cookieParser.min.js', 'js/makeLog.min.js', 'js/nodeChart.min.js', 'js/notifications.min.js',
'js/submitForm.min.js', 'js/updateProgressBar.min.js', 'js/updateTextArea.min.js', 'js/socketIOconn.min.js',
output='gen/packed.js')
assets.register('js_all', js)
# make a random secret thats between 10 and 20 chars long
# http://stackoverflow.com/questions/2257441/random-string-generation-with-upper-case-letters-and-digits-in-python
app.config['SECRET_KEY'] = ''.join(random.choice(string.ascii_uppercase + string.ascii_lowercase + string.digits)
for _ in range(random.randrange(10, 20, 1)))
# Wrap the flask app with the flask socket io
io = SocketIO(app, engineio_logger=True, ping_timeout=7200)
# compress responses with gzip
Compress(app)
# set time out (sec)
socket.setdefaulttimeout(10)
# set recursion limit
# sys.setrecursionlimit(175)
@app.after_request
def add_header(response):
response.cache_control.max_age = 3000
return response
@app.route('/', methods=['GET'])
def index():
"""Index Page"""
return render_template("index.html")
@app.route("/cookie_handler", methods=['GET'])
def cookie_handler():
"""
Cookie Handling & From Validation
"""
# make the response object
response = app.make_response('')
search_json = {}
url = request.args['url']
type = request.args['type']
num = int(request.args['number'])
keyword = request.args['keyword']
# Validate Inputs
if not validators.url(url):
response = app.make_response('Bad URL')
response.status_code = 400
return response
# search type
if type not in ['Depth', 'Breadth']:
response = app.make_response('Bad Search Type')
response.status_code = 400
return response
# amount of pages
if num < 1 or num > 125:
response = app.make_response('Invalid Pick A Number Between 1-125')
response.status_code = 400
return response
# keyword
if keyword == '':
response = app.make_response('Invalid Keyword')
response.status_code = 400
return response
# Set response code for success
response.status_code = 200
# Make the json with the search data
search_json['url'] = url
search_json['type'] = type
search_json['num'] = num
search_json['keyword'] = keyword
# add search to cookie
if request.cookies.get('past_searches'):
past_searches = json.loads(request.cookies.get('past_searches'))
# loop though the cookie to see if the search has been done
past_search_check = False
for s in past_searches['searches']:
if (s['url'] == url) & (s['keyword'] == keyword):
past_search_check = True
break
searches = past_searches['searches']
# If it hasnt been searched before add it to the cookie
if not past_search_check:
searches.append(search_json)
response.set_cookie('past_searches', json.dumps({'searches': searches}))
# if there is no cookie/no prior searches make the cookie with the search info
else:
response.set_cookie('past_searches', json.dumps({'searches': [search_json]}))
return response
@app.errorhandler(404)
def page_not_found(error):
return '404 - This Page Does Not Exist', 404
@app.errorhandler(500)
def page_not_found(error):
return '500 - Internal Server Error', 500
# Socket IO Listeners
@io.on('connect')
def connected():
"""Handle Socket Connection"""
emit('conn response', {'msg': 'Connected'})
@io.on('random tree')
def handle_numbers(obj=None):
client = ClientSocket.Socket(io, request.sid)
# https://github.com/miguelgrinberg/Flask-SocketIO/issues/371
if obj['type'] == 'Breadth':
crawler = Crawler.Breadth(obj['url'], int(obj['number']), str(obj['keyword']), client)
crawler.search('socket')
elif obj['type'] == 'Depth':
crawler = Crawler.Depth(obj['url'], int(obj['number']), str(obj['keyword']), client)
crawler.search('socket')
else:
client.emit("Error", "Bad Data")
if __name__ == "__main__":
io.run(app, '0.0.0.0', 5000, debug=False)
|
21,112 | e0e54d3456f19653b4eac902702487f81eb27d78 | # -*- coding:utf-8 -*-
from Registration import Registration
from database import Database
import network
import messages
class DownloadRegistration(Registration) :
def __init__(self, key, users) :
Registration.__init__(self, key, users)
self.database = Database()
def run(self, body, socket) :
messages.begin_registration(self.key, socket)
file_name = body["content"]["file_name"]
download_type = body["content"]["download_type"]
username = body["content"]["username"]
password = body["content"]["password"]
if username not in self.users or self.users[username] != password:
status = "0000"
content = "ERROR: The server had problems with the user identification, please try log in again."
else :
status = "1014"
file_download = self.database.download_file(download_type, file_name, username)
content = "SUCCESS: The file was downloaded successfully."
if not file_download :
status = "0000"
content = "ERROR: The typed file doesn't exist in your account."
msg = self.ack_construct(file_download, status, self.response_type)
network.send(socket, msg)
messages.end_registration(self.key, socket, content)
|
21,113 | 37f3fe0bbf1c991413bd4a188124434ebd518f0f | from django.shortcuts import render
from fixlyft.models import PickUP as SchedulePickUP
from django.contrib.admin.views.decorators import staff_member_required
from django.views.generic import UpdateView
# Create your views here.
@staff_member_required
def all_pickups(request):
pickups = SchedulePickUP.objects.filter(completed=False)
context = {
'pickups': pickups
}
return render(request, 'staffs/pickup.html', context)
class UpdatePickup(UpdateView):
model = SchedulePickUP
fields = ['completed',]
template_name = 'staffs/update.html' |
21,114 | d71224d5968907125663487fba25eaa4b458e7cf | #!python
import csv
import sys
from collections import defaultdict
def main(args):
import argparse
parser = argparse.ArgumentParser(description='repeat the task of filling out known campaign tags from last month')
parser.add_argument('primer', type=argparse.FileType('rb'),
help="csv file containing last month's data to train the repeater")
parser.add_argument('input', type=argparse.FileType('rb'),
help="guess values for this file")
parser.add_argument('--output', type=argparse.FileType('wb'),
help="output the results to this file")
opts = parser.parse_args(args)
# In case output is not given, use stdout
output = sys.stdout if not "output" in vars(opts) else opts.output
repeat_files(opts.primer, opts.input, output)
def repeat_files(primer, input, out):
# TODO: Make this match the csv output from OpenOffice
primer_reader = csv.reader(primer, escapechar='\\')
input_reader = csv.reader(input, escapechar='\\')
# Campaign type (column #0) depends on campaign tag (column #1)
completed = repeat_tables(primer_reader, input_reader, { 0 : 1 })
output_writer = csv.writer(out, quotechar='"', delimiter=',', quoting=csv.QUOTE_ALL)
for row in completed:
output_writer.writerow(row)
def repeat_tables(primer, table, dependencies, ignore_header=True):
"""
Build a knowledge map from the previous table and use that to fill in any
empty values in the given table.
Preconditions:
- Both tables have the same schema
- There are no duplicate values for a given column to map
"""
knowledge_map = learn(primer, dependencies)
completed = []
for row in table:
# copy everything over
completed_row = row
for dvcol, ivcol in dependencies.items():
iv = row[ivcol]
# if the value is empty and we know what to put
if row[dvcol] == "" and iv in knowledge_map[dvcol]:
# fill in what we learned
completed_row[dvcol] = knowledge_map[dvcol][iv]
completed.append(completed_row)
return completed
def learn(primer, dependencies):
"""Constructs a knowledge map from a given table.
The keys of the map are the column names (or indexes) and
the values of the map are dicts that map a value of the column
to the learned value of independent value column.
"""
knowledge_map = defaultdict(dict)
for row in primer:
for dvcol, ivcol in dependencies.items():
# knowledge of the dependent value is mapped to the value
# of the independent value col
#
# notice:
# - if the knowledge_map has no entry for the dv col,
# a dict is constructed automatically
# - the value of the iv col is used
# - overwrites the previous known relationship
knowledge_map[dvcol][row[ivcol]] = row[dvcol]
return knowledge_map
if __name__ == '__main__':
main(sys.argv[1:])
|
21,115 | b8e1cabce0e9fa340a1e8260cc6355dfd230e3a4 |
# Slicing out Introns
# The sequence being used
sequence = 'ATCGATCGATCGATCGACTGACTAGTCATAGCTATGCATGTAGCTACTCGATCGATCGATCGATCGATCG' \
'ATCGATCGATCGATCATGCTATCATCGATCGATATCGATGCATCGACTACTAT'
# Finding the two exons, notation used is 'string[start:end]' note start is inclusive, end is not
exon1 = sequence[0:65]
exon2 = sequence[90:]
# Printing the final output
print('The coding sequence is.')
print(exon1 + exon2)
#Finding the percentage of the coding DNA to the full sequence
codingpercent = (((len(exon1) + len(exon2)) / len(sequence)) * 100)
print('\nThe coding part of the DNA makes up {0}% of the total sequence.\n'.format(round(codingpercent, 2)))
#Showing the coding part captalised and the non-coding part in lowercase
#The intron is in lowercase and the part inbetween the two parts
intron = sequence[65:90]
intron = intron.lower()
print(exon1 + intron + exon2)
|
21,116 | af9f04ac4c3a150a68487a63de644c717a8cb428 | #
# @lc app=leetcode id=433 lang=python3
#
# [433] Minimum Genetic Mutation
#
# https://leetcode.com/problems/minimum-genetic-mutation/description/
#
# algorithms
# Medium (39.60%)
# Likes: 342
# Dislikes: 42
# Total Accepted: 27.3K
# Total Submissions: 67.7K
# Testcase Example: '"AACCGGTT"\n"AACCGGTA"\n["AACCGGTA"]'
#
# A gene string can be represented by an 8-character long string, with choices
# from "A", "C", "G", "T".
#
# Suppose we need to investigate about a mutation (mutation from "start" to
# "end"), where ONE mutation is defined as ONE single character changed in the
# gene string.
#
# For example, "AACCGGTT" -> "AACCGGTA" is 1 mutation.
#
# Also, there is a given gene "bank", which records all the valid gene
# mutations. A gene must be in the bank to make it a valid gene string.
#
# Now, given 3 things - start, end, bank, your task is to determine what is the
# minimum number of mutations needed to mutate from "start" to "end". If there
# is no such a mutation, return -1.
#
# Note:
#
#
# Starting point is assumed to be valid, so it might not be included in the
# bank.
# If multiple mutations are needed, all mutations during in the sequence must
# be valid.
# You may assume start and end string is not the same.
# Example 1:
# start: "AACCGGTT"
# end: "AACCGGTA"
# bank: ["AACCGGTA"]
#
# return: 1
# Example 2:
# start: "AACCGGTT"
# end: "AAACGGTA"
# bank: ["AACCGGTA", "AACCGCTA", "AAACGGTA"]
#
# return: 2
# Example 3:
# start: "AAAAACCC"
# end: "AACCCCCC"
# bank: ["AAAACCCC", "AAACCCCC", "AACCCCCC"]
#
# return: 3
# @lc code=start
from queue import Queue
class Solution:
def minMutation(self, start: str, end: str, bank: [str]) -> int:
bank = set(bank)
q = Queue()
q.put([start])
seen = set()
res = -1
while not q.empty():
res += 1
curr = q.get()
next_ = []
for c in curr:
if c == end:
return res
seen.add(c)
next_ += self.generateNext(c, seen, bank)
if next_:
q.put(next_)
return -1
def generateNext(self, gene, seen, bank) -> []:
res = []
for i,c in enumerate(gene):
for g in 'ACGT':
if g != c:
tmp = gene[:i]+g+gene[i+1:]
if tmp not in seen and tmp in bank:
res.append(tmp)
return res
# @lc code=end
if __name__ == '__main__':
s = Solution()
s.minMutation("AACCGGTT","AACCGGTA",["AACCGGTA"])
s.minMutation("AAAAACCC","AACCCCCC",["AAAACCCC", "AAACCCCC", "AACCCCCC"])
s.minMutation("AACCGGTT","AAACGGTA",["AACCGGTA", "AACCGCTA", "AAACGGTA"])
|
21,117 | 3de89bcc17cfe0b147a9a5b778442a21abb8b484 | print("I am alexa. What is your name")
name = input()
print("Hi" , name)
|
21,118 | ce24ae0d54e6ecde3b8f463203a201e686552b53 | """CSC148 Assignment 2
=== CSC148 Winter 2020 ===
Department of Computer Science,
University of Toronto
This code is provided solely for the personal and private use of
students taking the CSC148 course at the University of Toronto.
Copying for purposes other than this use is expressly prohibited.
All forms of distribution of this code, whether as given or with
any changes, are expressly prohibited.
Authors: Diane Horton, David Liu, Mario Badr, Sophia Huynh, Misha Schwartz,
and Jaisie Sin
All of the files in this directory and all subdirectories are:
Copyright (c) Diane Horton, David Liu, Mario Badr, Sophia Huynh,
Misha Schwartz, and Jaisie Sin
=== Module Description ===
This file contains the hierarchy of Goal classes.
"""
from __future__ import annotations
import random
from typing import List, Tuple
from block import Block
from settings import colour_name, COLOUR_LIST
def generate_goals(num_goals: int) -> List[Goal]:
"""Return a randomly generated list of goals with length num_goals.
All elements of the list must be the same type of goal, but each goal
must have a different randomly generated colour from COLOUR_LIST. No two
goals can have the same colour.
Precondition:
- num_goals <= len(COLOUR_LIST)
"""
num = random.random()
goal_list = []
colours = _get_goal_colours()
if num < 0.5:
for i in range(num_goals):
colour = colours[i]
goal = PerimeterGoal(colour)
goal_list.append(goal)
else:
for i in range(num_goals):
colour = colours[i]
goal = BlobGoal(colour)
goal_list.append(goal)
return goal_list
def _get_goal_colours() -> List[Tuple[int, int, int]]:
"""Return a list of randomly shuffled colours for the next goal in
goal_list
>>>my_lst = _get_goal_colours()
[REAL_RED, PACIFIC_POINT, DAFFODIL_DELIGHT, OLD_OLIVE]
>>>new_lst = _get_goal_colours()
[DAFFODIL_DELIGHT, REAL_RED, OLD_OLIVE, PACIFIC_POINT]
"""
colour_lst = COLOUR_LIST[:]
random.shuffle(colour_lst)
return colour_lst
def _flatten(block: Block) -> List[List[Tuple[int, int, int]]]:
"""Return a two-dimensional list representing <block> as rows and columns of
unit cells.
Return a list of lists L, where,
for 0 <= i, j < 2^{max_depth - self.level}
- L[i] represents column i and
- L[i][j] represents the unit cell at column i and row j.
Each unit cell is represented by a tuple of 3 ints, which is the colour
of the block at the cell location[i][j]
L[0][0] represents the unit cell in the upper left corner of the Block.
"""
if block.level == block.max_depth:
return [[block.colour]]
elif not block.children:
dim = int(2 ** (block.max_depth - block.level))
return [[block.colour] * dim for _ in range(dim)]
else:
grid = []
child_grid_0 = _flatten(block.children[0])
child_grid_1 = _flatten(block.children[1])
child_grid_2 = _flatten(block.children[2])
child_grid_3 = _flatten(block.children[3])
dim = 2 * len(child_grid_1)
i = 0
while i < dim:
if i < dim / 2:
column = child_grid_1[i][:] + child_grid_2[i][:]
grid.append(column)
else:
j = int(i - dim/2)
column = child_grid_0[j][:] + child_grid_3[j][:]
grid.append(column)
i += 1
return grid
class Goal:
"""A player goal in the game of Blocky.
This is an abstract class. Only child classes should be instantiated.
=== Attributes ===
colour:
The target colour for this goal, that is the colour to which
this goal applies.
"""
colour: Tuple[int, int, int]
def __init__(self, target_colour: Tuple[int, int, int]) -> None:
"""Initialize this goal to have the given target colour.
"""
self.colour = target_colour
def score(self, board: Block) -> int:
"""Return the current score for this goal on the given board.
The score is always greater than or equal to 0.
"""
raise NotImplementedError
def description(self) -> str:
"""Return a description of this goal.
"""
raise NotImplementedError
class PerimeterGoal(Goal):
"""This is a child of the class Goal.
The PerimeterGoal sets a goal which is to maximize the number of unit cells
of a given colour along the perimeter
=== Attributes ===
colour:
The target colour for this goal, that is the colour to which
this goal applies.
"""
def score(self, board: Block) -> int:
"""Return the score for the perimeter goal, score is calculated based
on how many unit blocks are touching the perimeter"""
grid = _flatten(board)
left = grid[0]
right = grid[-1]
top = [i[0] for i in grid]
bottom = [i[-1] for i in grid]
score0 = left.count(self.colour)
score1 = right.count(self.colour)
score2 = top.count(self.colour)
score3 = bottom.count(self.colour)
return score0 + score1 + score2 + score3
def description(self) -> str:
"""Returns the description for the PerimeterGoal class"""
return f'Maximize number of {colour_name(self.colour)} unit cells on ' \
f'the perimeter, corner cells count double'
class BlobGoal(Goal):
"""A child class of the class Goal
This BlobGoal is to maximize the blob of a given colour
=== Attributes ===
colour:
The target colour for this goal, that is the colour to which
this goal applies.
"""
def score(self, board: Block) -> int:
"""Return the score of the BlobGoal, calculated based on the
largest connected blob of the same colour"""
board = _flatten(board)
dim = len(board)
visited = [[-1] * dim for _ in range(dim)]
max_score = 0
for i in range(dim):
for j in range(dim):
blob_at_ij = self._undiscovered_blob_size((i, j), board,
visited)
max_score = max(max_score, blob_at_ij)
return max_score
def _undiscovered_blob_size(self, pos: Tuple[int, int],
board: List[List[Tuple[int, int, int]]],
visited: List[List[int]]) -> int:
"""Return the size of the largest connected blob that (a) is of this
Goal's target colour, (b) includes the cell at <pos>, and (c) involves
only cells that have never been visited.
If <pos> is out of bounds for <board>, return 0.
<board> is the flattened board on which to search for the blob.
<visited> is a parallel structure that, in each cell, contains:
-1 if this cell has never been visited
0 if this cell has been visited and discovered
not to be of the target colour
1 if this cell has been visited and discovered
to be of the target colour
Update <visited> so that all cells that are visited are marked with
either 0 or 1.
"""
x_0 = pos[0]
y_0 = pos[1]
if x_0 > len(board) - 1 or y_0 > len(board) - 1:
return 0
if visited[x_0][y_0] != -1:
return 0
if board[x_0][y_0] != self.colour:
visited[x_0][y_0] = 0
return 0
else:
visited[x_0][y_0] = 1
size0 = self._undiscovered_blob_size((x_0, y_0 + 1), board, visited)
size1 = self._undiscovered_blob_size((x_0, y_0 - 1), board, visited)
size2 = self._undiscovered_blob_size((x_0 + 1, y_0), board, visited)
size3 = self._undiscovered_blob_size((x_0 - 1, y_0), board, visited)
return size0 + size1 + size2 + size3 + 1
def description(self) -> str:
"""Returns the description for the PerimeterGoal class"""
return f"Maximize number of {colour_name(self.colour)} unit cells " \
f"that form a blob by touching sides. " \
f"Touching corners doesn't count"
if __name__ == '__main__':
import python_ta
python_ta.check_all(config={
'allowed-import-modules': [
'doctest', 'python_ta', 'random', 'typing', 'block', 'settings',
'math', '__future__'
],
'max-attributes': 15
})
|
21,119 | 65c0d281061373998cb3e619377198715345b07a | # code
import sys
import azure.cognitiveservices.speech as speechsdk
from azure.cognitiveservices.speech import AudioDataStream, SpeechConfig, SpeechSynthesizer, SpeechSynthesisOutputFormat
from azure.cognitiveservices.speech.audio import AudioOutputConfig
import time
from random import random
name = sys.argv[1];
def welcome_message(name):
speech_config = speechsdk.SpeechConfig(subscription="b58d19e457574aa39bc0f8b9b763cd55", region="australiaeast")
audio_config = AudioOutputConfig(filename="C:/Users/Pranav Patel/Documents/schabu/back_end/python/welcome.wav")
synthesizer = SpeechSynthesizer(speech_config=speech_config, audio_config=audio_config)
text = "Hello " + name + "! Welcome to Schubu Recrutiment Process. Please Click on the Start button to begin the interview process."
synthesizer.speak_text_async(text)
print(text)
welcome_message(name) |
21,120 | 467a36c9996369d72c2be2fd55dfec30e8dd36f6 | import random
d1=random.randint(1,10)
d2=random.randint(1,10)
sd=d1+d2
d=10
g=0
a=int(input('Me diga um numero de 1 a 20: '))
a2=int(input('Me diga um numero maior ou igual ao numero anterior: '))
if a<sd:
print('Soma menor')
elif a2>sd:
print('Soma maior')
else:
print('Soma no meio')
print('Voce tem {0} dinheiros'.format(d))
c=int(input('Quantos chutes quer comprar, cada chute custa 1 dinheiro: '))
d=d-c
c2=1
while c>0:
c2=+1
c=c-c2
c1=int(input('Diga seu chute: '))
g=d+d*5
if c1==sd:
print('Vocรช acertou e ganhou {0} dinheiros'.format(g))
break
elif c==0:
print('Vocรช acabou o jogo com {0} dinheiros'.format(d))
else:
c1=int(input('Diga seu chute: ')) |
21,121 | 292f95bf744274f10de6ac19f53956a3045bbf30 | import numpy as np
from abc import ABC, abstractmethod
class AbstractEnvRunner(ABC):
def __init__(self, *, env, model, nsteps):
self.env = env
self.model = model
self.nenv = nenv = env.num_envs if hasattr(env, 'num_envs') else 1
self.batch_ob_shape = (nenv*nsteps,) + env.observation_space.shape
self.obs = np.zeros((nenv,) + env.observation_space.shape, dtype=env.observation_space.dtype.name)
self.obs[:] = env.reset()
self.nsteps = nsteps
self.dones = [False for _ in range(nenv)]
@abstractmethod
def run(self):
raise NotImplementedError
class Runner(AbstractEnvRunner):
def __init__(self,env,model,nsteps,gamma,lam):
super().__init__(env=env,model=model,nsteps=nsteps)
self.lam=lam
self.gamma=gamma
def run(self):
mb_obs,mb_actions,mb_rewards,mb_values,mb_dones,mb_neglogps=[],[],[],[],[],[]
for i in range(self.nsteps):
actions,values,neglogps=self.model.step(self.obs)
mb_obs.append(self.obs.copy())
mb_actions.append(actions)
mb_values.append(values)
mb_neglogps.append(neglogps)
mb_dones.append(self.dones)
self.obs[:],rewards,self.dones,infos=self.env.step(actions)#step
mb_rewards.append(rewards)
mb_obs=np.asarray(mb_obs,dtype=self.obs.dtype)
mb_rewards=np.asarray(mb_rewards,dtype=np.float32)
mb_actions=np.asarray(mb_actions)
mb_values=np.asarray(mb_values,dtype=np.float32)
mb_neglogps=np.asarray(mb_neglogps,dtype=np.float32)
mb_dones=np.asarray(mb_dones,dtype=np.bool)
last_values=self.model.value(self.obs)
mb_returns=np.zeros_like(mb_rewards)
mb_advs=np.zeros_like(mb_rewards)
lastgaelam=0
for t in reversed(range(self.nsteps)):
if t==self.nsteps-1:
next_none_terminal=1.0-self.dones
next_values=last_values
else:
next_none_terminal=1.0-mb_dones[t+1]
next_values=mb_values[t+1]
# print("delta0",mb_rewards[t].shape,next_values.shape,next_none_terminal.shape)
delta=mb_rewards[t]+self.gamma*next_values*next_none_terminal-mb_values[t]
# print("delta",delta.shape,next_none_terminal.shape,lastgaelam)
mb_advs[t]=lastgaelam=delta+self.gamma*self.lam*next_none_terminal*lastgaelam
mb_returns=mb_advs+mb_values
return (*map (flatten,(mb_obs,mb_returns,mb_dones,mb_actions,mb_values,mb_neglogps)),)
def flatten(attr):
s=attr.shape
return attr.swapaxes(0,1).reshape(s[0]*s[1],*s[2:])
|
21,122 | 266744840b8e450a52e5644f6abc1fbe594ec07c |
from flask import Flask
# from flask_mail import Mail
from flask_script import Manager
from app.models import db
from app.views import blue
app = Flask(__name__)
# mail = Mail(app)
# mail.init_app(app)
app.register_blueprint(blueprint=blue, url_prefix='/app')
# ๅๅงๅๆฐๆฎๅบ็้
็ฝฎ
# mysql+pymysql://root:123456@127.0.0.1:3306/flask8
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://root:123456@127.0.0.1:3306/flask8'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db.init_app(app)
manage = Manager(app)
if __name__ == '__main__':
# app.run(host='', port='')
manage.run()
|
21,123 | 08bd49caa4f2f6b15bf8b9fe8e7e679e7cd49e06 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import time
import sys
import RPi.GPIO as GPIO
from mfrc522 import SimpleMFRC522
from test_to_server import send_data
from pi_control import *
import mfrc522
GPIO.setwarnings(False)
print(mfrc522)
try:
reader = SimpleMFRC522()
except Exception as e:
print(e)
try:
try:
while True:
print('Waiting for your card...')
card_id= reader.read_id()
if card_id:
res = send_data(card_id=card_id)
print(res,type(res))
if(res=='aaaaa'):
print("sss")
sound_1()
elif res =='bbbbb':
sound_3()
else:
sound_2()
print(card_id)
time.sleep(0.5)
except Exception as e:
print(e)
pass
except KeyboardInterrupt as e:
GPIO.cleanup()
exit(1)
|
21,124 | 3439fb85de393d562f54eb55055289c8021e92c2 | #! /usr/bin/env python
#
def datenum_values ( n_data ):
#*****************************************************************************80
#
## DATENUM_VALUES returns the MATLAB DATENUM for various dates.
#
# Discussion:
#
# The CE or Common Era calendar is used, under the
# hybrid Julian/Gregorian Calendar, with a transition from Julian
# to Gregorian. The day after 04 October 1582 was 15 October 1582.
#
# The year before 1 AD or CE is 1 BC or BCE. In this data set,
# years BC/BCE are indicated by a negative year value.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 13 December 2017
#
# Author:
#
# John Burkardt
#
# Reference:
#
# Edward Reingold, Nachum Dershowitz,
# Calendrical Calculations: The Millennium Edition,
# Cambridge University Press, 2001,
# ISBN: 0 521 77752 6
# LC: CE12.R45.
#
# Parameters:
#
# Input/output, integer N_DATA. The user sets N_DATA to 0
# before the first call. On each call, the routine increments N_DATA by 1,
# and returns the corresponding data; when there is no more data, the
# output value of N_DATA will be 0 again.
#
# Output, integer Y, M, D, the Common Era date.
#
# Output, integer DATENUM, the MATLAB datenum value.
#
import numpy as np
n_max = 11
d_vec = np.array ( ( \
1, \
1, \
1, \
1, \
17, \
9, \
10, \
12, \
6, \
25, \
1 ))
date_num_vec = np.array ( ( \
1, \
367, \
36526, \
365244, \
708434, \
710284, \
713023, \
718199, \
723186, \
729080, \
730486 ))
m_vec = np.array ( ( \
1, \
1, \
1, \
1, \
8, \
9, \
3, \
5, \
1, \
2, \
1 ))
y_vec = np.array ( ( \
0, \
1, \
100, \
1000, \
1939, \
1944, \
1952, \
1966, \
1980, \
1996, \
2000 ))
if ( n_data < 0 ):
n_data = 0
if ( n_max <= n_data ):
n_data = 0
y = 0
m = 0
d = 0
date_num = 0
else:
y = y_vec[n_data]
m = m_vec[n_data]
d = d_vec[n_data]
date_num = date_num_vec[n_data]
n_data = n_data + 1
return n_data, y, m, d, date_num
def datenum_values_test ( ):
#*****************************************************************************80
#
## DATENUM_VALUES_TEST tests DATENUM_VALUES.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 13 December 2017
#
# Author:
#
# John Burkardt
#
import platform
print ( '' )
print ( 'DATENUM_VALUES_TEST:' )
print ( ' Python version: %s' % ( platform.python_version ( ) ) )
print ( ' DATENUM_VALUES stores values of' )
print ( ' the MATLAB datenum for a given Y/M/D date' )
print ( '' )
print ( ' Y M D DateNum' )
print ( '' )
n_data = 0
while ( True ):
n_data, y, m, d, date_num = datenum_values ( n_data )
if ( n_data == 0 ):
break
print ( ' %6d %6d %6d %6d' % ( y, m, d, date_num ) )
#
# Terminate.
#
print ( '' )
print ( 'DATENUM_VALUES_TEST:' )
print ( ' Normal end of execution.' )
return
if ( __name__ == '__main__' ):
from timestamp import timestamp
timestamp ( )
datenum_values_test ( )
timestamp ( )
|
21,125 | ed0e57ed41003c8d5d381af10a14a5422af9a547 | """
Program som tar inn to tall fra bruker,
og legger de sammen.
"""
tall1 = float(input("Skriv inn et tall: "))
tall2 = float(input("Skriv inn et tall til: "))
print(tall1, tall2)
sum_to_tall = tall1 + tall2
print(type(tall1))
print(sum_to_tall)
|
21,126 | be3f73748a9a69587b6f485ad9b8615eb7bfc7ea | # -*- coding: utf-8 -*-
"""
Author:
Xuxin Zhang,xuxinz@qq.com
Reference: Chae D K , Kang J S , Kim S W , et al.
CFGAN: A Generic Collaborative Filtering Framework based on Generative Adversarial Networks[C]// the 27th ACM International Conference. ACM, 2018.
Baseline: https://github.com/1051003502/CFGAN
"""
import random
import re
import copy
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
from torch.autograd import Variable
from sklearn import preprocessing
import data
import cfgan
import warnings
warnings.filterwarnings("ignore")
def UseInfoPreprocessing(UseInfo):
useGender_dummies = pd.get_dummies(UseInfo['useGender'])
UseInfo = UseInfo.join(useGender_dummies)
UseInfo.drop(['useGender'], axis=1, inplace=True)
# create feature for the alphabetical part of the Occupation
UseInfo['useOccupationLetter'] = UseInfo['useOccupation'].map(lambda x: re.compile("([a-zA-Z]+)").search(x).group())
# convert the distinct Occupation letters with incremental integer values
UseInfo['useOccupationLetter'] = pd.factorize(UseInfo['useOccupationLetter'])[0]
UseInfo[['useOccupation','useOccupationLetter']].head()
UseInfo.drop(['useOccupation'], axis=1, inplace=True)
UseInfo['useZipcodeLetter'] = UseInfo['useZipcode'].str.split().str[0]
UseInfo['useZipcodeLetter'] = UseInfo['useZipcodeLetter'].apply(lambda x: "99999" if not(x.isnumeric()) else x)
UseInfo['useZipcodeLetter'] = pd.factorize(UseInfo['useZipcodeLetter'])[0]
UseInfo[['useZipcode','useZipcodeLetter']].head()
UseInfo.drop(['useZipcode'], axis=1, inplace=True)
# StandardScaler will subtract the mean from each value then scale to the unit varience
scaler = preprocessing.StandardScaler()
UseInfo['useAge_scaled'] = scaler.fit_transform(UseInfo['useAge'].values.reshape(-1,1))
UseInfo.drop(['useAge'], axis=1, inplace=True)
return UseInfo
def select_negative_items(realData, num_pm, num_zr):
'''
realData : n-dimensional indicator vector specifying whether u has purchased each item i
num_pm : num of negative items (partial-masking) sampled on the t-th iteration
num_zr : num of negative items (zeroreconstruction regularization) sampled on the t-th iteration
'''
data = np.array(realData)
n_items_pm = np.zeros_like(data)
n_items_zr = np.zeros_like(data)
for i in range(data.shape[0]):
p_items = np.where(data[i] != 0)[0]
all_item_index = random.sample(range(data.shape[1]), 1683)
for j in range(p_items.shape[0]):
all_item_index.remove(list(p_items)[j])
random.shuffle(all_item_index)
n_item_index_pm = all_item_index[0 : num_pm]
n_item_index_zr = all_item_index[num_pm : (num_pm+num_zr)]
n_items_pm[i][n_item_index_pm] = 1
n_items_zr[i][n_item_index_zr] = 1
return n_items_pm, n_items_zr
def computeTopN(groundTruth,result,topN):
result=result.tolist()
for i in range(len(result)):
result[i]=(result[i],i)
result.sort(key=lambda x:x[0],reverse=True)
hit=0
for i in range(topN):
if(result[i][1] in groundTruth):
hit=hit+1
return hit/topN
def main(userCount,itemCount,testSet,trainVector,trainMaskVector,\
UseInfo_pre,topN,epochCount,pro_ZR,pro_PM,alpha):
info_shape = UseInfo_pre.shape[1]
UseInfo_pre = UseInfo_pre.values
UseInfo_pre = np.insert(UseInfo_pre,0,[0,0,0,0,0],axis=0)
UseInfo_pre = torch.tensor(UseInfo_pre.astype(np.float32))
result_precision=np.zeros((1,2))
# Build the generator and discriminator
G=cfgan.generator(itemCount, info_shape)
D=cfgan.discriminator(itemCount, info_shape)
regularization = nn.MSELoss()
d_optimizer = torch.optim.Adam(D.parameters(), lr=0.0001)
g_optimizer = torch.optim.Adam(G.parameters(), lr=0.0001)
G_step=5
D_step=2
batchSize_G = 32
batchSize_D = 32
for epoch in range(epochCount):
# ---------------------
# Train Generator
# ---------------------
for step in range(G_step):
# Select a random batch of purchased vector
leftIndex = random.randint(1, userCount - batchSize_G - 1)
realData = Variable(copy.deepcopy(trainVector[leftIndex:leftIndex + batchSize_G]))
eu = Variable(copy.deepcopy(trainVector[leftIndex:leftIndex + batchSize_G]))
useInfo_batch = Variable(copy.deepcopy(UseInfo_pre[leftIndex:leftIndex + batchSize_G]))
# Select a random batch of negative items for every user
n_items_pm,n_items_zr = select_negative_items(realData,pro_PM,pro_ZR)
ku_zp = Variable(torch.tensor(n_items_pm + n_items_zr))
realData_zp = Variable(torch.ones_like(realData)) * eu + Variable(torch.zeros_like(realData)) * ku_zp
# Generate a batch of new purchased vector
fakeData=G(realData,useInfo_batch)
fakeData_ZP = fakeData * (eu + ku_zp)
fakeData_result=D(fakeData_ZP,useInfo_batch)
# Train the discriminator
g_loss = np.mean(np.log(1.-fakeData_result.detach().numpy()+10e-5)) + alpha*regularization(fakeData_ZP,realData_zp)
g_optimizer.zero_grad()
g_loss.backward(retain_graph=True)
g_optimizer.step()
# ---------------------
# Train Discriminator
# ---------------------
for step in range(D_step):
# Select a random batch of purchased vector
leftIndex=random.randint(1,userCount-batchSize_D-1)
realData=Variable(copy.deepcopy(trainVector[leftIndex:leftIndex+batchSize_D]))
eu = Variable(copy.deepcopy(trainVector[leftIndex:leftIndex + batchSize_G]))
useInfo_batch = Variable(copy.deepcopy(UseInfo_pre[leftIndex:leftIndex + batchSize_G]))
# Select a random batch of negative items for every user
n_items_pm, _ = select_negative_items(realData,pro_PM,pro_ZR)
ku = Variable(torch.tensor(n_items_pm))
# Generate a batch of new purchased vector
fakeData=G(realData,useInfo_batch)
fakeData_ZP = fakeData * (eu + ku)
# Train the discriminator
fakeData_result=D(fakeData_ZP,useInfo_batch)
realData_result=D(realData,useInfo_batch)
d_loss = -np.mean(np.log(realData_result.detach().numpy()+10e-5) +
np.log(1. - fakeData_result.detach().numpy()+10e-5)) + 0*regularization(fakeData_ZP,realData_zp)
d_optimizer.zero_grad()
d_loss.backward(retain_graph=True)
d_optimizer.step()
if( epoch%1==0):
n_user=len(testSet)
index=0
precisions=0
for testUser in testSet.keys():
data = Variable(copy.deepcopy(trainVector[testUser]))
useInfo_index = Variable(copy.deepcopy(torch.tensor(np.expand_dims(UseInfo_pre[index], axis=0))))
# Exclude the purchased vector that have occurred in the training set
result = G(data.reshape(1,1683),useInfo_index) + Variable(copy.deepcopy(trainMaskVector[index]))
result = result.reshape(1683)
precision = computeTopN(testSet[testUser], result, topN)
precisions+=precision
index+=1
precisions = precisions/n_user
result_precision=np.concatenate((result_precision,np.array([[epoch,precisions]])),axis = 0)
print('Epoch[{}/{}],d_loss:{:.6f},g_loss:{:.6f},precision:{}'.format(epoch, epochCount,
d_loss.item(),
g_loss.item(),
precisions))
return result_precision
def result_plt(result_precision):
plt.figure()
plt.title("the precision of CFGAN")
plt.xlabel('epoch')
plt.plot(result_precision[:,0], result_precision[:,1], "r-*",label='precision' )
plt.ylim([0, 0.6])
plt.legend()
plt.show()
if __name__ == '__main__':
topN=5
epochs = 1000
pro_ZR = 50
pro_PM = 50
alpha = 0.1
UseInfo = data.loadUseInfo("data/ml-100k/u.user" , "|")
# ItemInfo = data.loadItemInfo("data/ml-100k/u.item" , "|")
UseInfo_pre = UseInfoPreprocessing(UseInfo)
UseInfo_pre.drop(['userId'], axis=1, inplace=True)
trainSet,train_use,train_item= data.loadTrainingData("data/ml-100k/u1.base", "\t")
testSet,test_use,test_item = data.loadTestData("data/ml-100k/u1.test", "\t")
userCount = max(train_use,test_use)
itemCount = max(train_item,test_item)
userList_test = list(testSet.keys())
trainVector, trainMaskVector, batchCount = data.to_Vectors(trainSet, userCount, \
itemCount, userList_test, "userBased")
result_precision = main(userCount,itemCount,testSet,\
trainVector,trainMaskVector,UseInfo_pre,topN,epochs,pro_ZR,pro_PM,alpha)
result_precision = result_precision[1:,]
result_plt(result_precision)
|
21,127 | 49269dafca6a11add0c8e5bc80ddbc02d20f789f | """
Basic usage example.
"""
from ndist import distros
if __name__ == '__main__':
dist = distros.Normal(loc=100, scale=10)
samples = dist.sample((5, 5))
print("\nNormal(100, 10)")
print(samples)
print(samples.mean(), samples.std())
dist = distros.Uniform(low=0, high=100)
samples = dist.sample((5, 5))
print("\nUniform(0, 100)")
print(samples)
print(samples.min(), samples.max())
dist = distros.Power(a=8)
samples = dist.sample((5, 5))
print("\nPower(8)")
print(samples)
print(samples.mean(), samples.std())
dist = distros.Normal(loc=100, scale=10) + 5
samples = dist.sample(10**5)
print("\nNormal(100, 10) + 5")
print(samples.mean(), samples.std())
dist = distros.Normal(loc=100, scale=10)
dist *=5
samples = dist.sample((5, 5))
print("\nNormal(100, 10) * 5")
print(samples)
print(samples.mean(), samples.std())
dist = distros.Power(a=8) + distros.Uniform(low=0, high=100)
samples = dist.sample(10**5)
print("\nPower(8) + Uniform(0, 100)")
print(samples.mean(), samples.std())
|
21,128 | 28f536b96d58ae9b0f872664ee1b60efd9c31e24 | '''
Copyright 2019 ARM Ltd. and University of Cyprus
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
from abc import ABC, abstractmethod
from xml.dom import minidom
from paramiko import SSHClient, client
import getpass
import paramiko
import socket
import platform
import visa
import os
class Measurement(ABC):
'''
classdocs
'''
def __init__(self, confFile):
'''
Constructor
'''
self.confFile=confFile
self.xmldoc = minidom.parse(confFile)
#most of the below are expected to be initialized in init function (should be called after constructor)
self.targetRunDir= None
self.targetHostname= None
self.targetSSHusername= None
self.targetSSHpassword = None
self.coresToUse=None
self.sourceFilePath=None #to be set in setSourceFilePath funtion
super().__init__() #abstract class init
def init(self): #should be called after constructor.. this can be overridden by child measurement classes to add new or use other configuration parameters..
self.targetRunDir= self.tryGetStringValue('targetRunDir')
self.targetHostname= self.tryGetStringValue('targetHostname')
self.targetSSHusername= self.tryGetStringValue('targetSSHusername')
self.targetSSHpassword = self.tryGetStringValue('targetSSHpassword')
if "" == self.targetSSHpassword:
self.targetSSHpassword = getpass.getpass(prompt="Please input target password: ", stream=None)
coresToUseString=self.tryGetStringValue('coresToUse')
self.coresToUse=[]
for core in coresToUseString.split(" "):
self.coresToUse.append(int(core))
def setSourceFilePath(self,sourceFilePath): #should be called before measurement or in the begining of the GA run if the source file path doesn't changes
self.sourceFilePath=sourceFilePath
##helper functions to make clearer the code.. on exception the code doesn't terminate immediately but it does produce a warning message..
##This is the case because sometimes this might be desirable based on the functionality.. For instance bare metal runs won't use the ssh parameters
def tryGetStringValue(self,key):
try:
value=self.xmldoc.getElementsByTagName(key)[0].attributes['value'].value;
return value
except:
print("Warning failed to read "+str(key))
def tryGetIntValue(self,key):
try:
value=int(self.xmldoc.getElementsByTagName(key)[0].attributes['value'].value);
return value
except:
print("Warning failed to read "+str(key))
def tryGetFloatValue(self,key):
try:
value=float(self.xmldoc.getElementsByTagName(key)[0].attributes['value'].value);
return value
except:
print("Warning failed to read "+str(key))
#this function should return an array of results.. at least one item should be returned.. the defaultFitness.py class (that calculates indidual's fitness) assumes by convention that the first array
#item is the fitness value
@abstractmethod
def measure(self):
pass
## utility function for executing commands over ssh connection.. very common functionality
def executeSSHcommand(self,command,continousAttempt=True,max_tries=10,sudo=False):
tries=0
while True:
try:
ssh = SSHClient()
ssh.set_missing_host_key_policy(client.AutoAddPolicy())
ssh.connect(self.targetHostname, username=self.targetSSHusername, password=self.targetSSHpassword)
stdin, stdout, stderr = ssh.exec_command(command)
if sudo:
stdin.write(self.targetSSHpassword + "\n")
stdin.flush()
lines=[]
for line in stdout.readlines():
lines.append(line)
ssh.close()
return lines
except:
if continousAttempt and tries<max_tries:
tries=tries+1
continue
else:
raise("Exception: Unable to execute command "+str(command))
def executeSSHcommandNonBlocking(self,command,continousAttempt=True,max_tries=10):
tries=0
while True:
try:
ssh = SSHClient()
ssh.set_missing_host_key_policy(client.AutoAddPolicy())
ssh.connect(self.targetHostname, username=self.targetSSHusername, password=self.targetSSHpassword)
ssh.exec_command(command)
ssh.close()
return
except:
if continousAttempt and tries<max_tries:
tries=tries+1
continue
else:
raise("Exception: Unable to execute command "+str(command))
#### utility function for copying the source file over ssh connection.. very common functionality
def copyFileOverFTP(self,continousAttempt=True):
while True:
try:
ssh = SSHClient()
ssh.set_missing_host_key_policy(client.AutoAddPolicy())
ssh.connect(self.targetHostname, username=self.targetSSHusername, password=self.targetSSHpassword)
sftp=ssh.open_sftp();
sftp.put(self.sourceFilePath,self.targetRunDir+"/main.s")
sftp.close()
ssh.close()
break
except:
if continousAttempt:
continue
else:
raise("Exception: Unable to copy file")
def ping (self,host):
"""
Returns True if host responds to a ping request
"""
# Ping parameters as function of OS
ping_str = "-n 1" if platform.system().lower()=="windows" else "-c 1"
# Ping
return os.system("ping " + ping_str + " " + host) == 0
|
21,129 | b8ff9fd52412a8459c3cab7220d9aa6454b2c05a | """
# Employee info
class Employee:
def __init__(self, id, importance, subordinates):
# It's the unique id of each node.
# unique id of this employee
self.id = id
# the importance value of this employee
self.importance = importance
# the id of direct subordinates
self.subordinates = subordinates
"""
class Solution:
def getImportance(self, employees, id):
"""
:type employees: Employee
:type id: int
:rtype: int
"""
# Be careful of this.
# This is WRONG. If in doubt, try it.
#id_importance, id_sub = [(employee.importance, employee.subordinates) for employee in employees if employee.id == id]
for employee in employees:
if employee.id == id:
id_importance, id_sub = employee.importance, employee.subordinates
rest = sum(self.getImportance(employees, ID) for ID in id_sub)
return id_importance + rest
|
21,130 | d9a1b3a63cd4ac23541ac0625e39fc66dc9aaf67 | from django.shortcuts import render
from django.http import HttpResponse
from django.http import QueryDict,Http404
from django.shortcuts import get_list_or_404,get_object_or_404
from hello.models import User
import traceback
def useradd(request):
"""ๆทปๅ ็จๆท
request.POST.get -- ้็จไบ่ทๅๅไธชๅ้่ฟ่กๅค็็ๅบๆฏ
request.POST.dict() ้็จไบๅฐๆๆ่กจๅๆฐๆฎ่ฟ่กๅค็็ๅบๆฏ
Form(request.POST)--้็จไบ่กจๅ้ช่ฏ็ๅบๆฏ
"""
msg = {}
if request.method == "POST":
try:
print(request.POST)
#็ฌฌไธ็งๆนๅผ๏ผไธไธชไธช่ทๅๅผ๏ผ็ถๅไธไธชไธชๅ
ฅๅบ
# name=request.POST.get('name',"")
# password=request.POST.get('password',"")
# sex=request.POST.get('sex',"")
# print(name)
# u=User()
# u.name=name
# u.password=password
# u.sex=int(sex)
# u.save()
#็ฌฌไบ็งๆนๅผ๏ผๅฐๆไบค็ๆฐๆฎ่ฝฌไธบๅญๅ
ธ๏ผไธๆฌกๆงๅ
ฅๅบ
# name = request.POST['name']
# password = request.POST['password']
# sex = request.POST['sex']
# if sex == '็ท':
# sex = 0
# if sex == 'ๅฅณ':
# sex = 1
# # print(username, sex, password)
# data = {'name': name, 'password': password, 'sex': sex }
# User.objects.create(**data)
data1=request.POST.dict()
print(data1)
User.objects.create(**data1)
msg = {"code": 0, "result": "ๆทปๅ ็จๆทๆๅ"}
except:
msg = {"code": 1, "errmsg": "ๆทปๅ ็จๆทๅคฑ่ดฅ: %s" % traceback.format_exc()}
return render(request, "hello/useradd.html", {"msg": msg})
def userlist(request):
"""
็จๆทๅ่กจ&&็จๆทๆ็ดข
"""
keyword=request.GET.get("keyword","")
print(keyword)
users=User.objects.all()
if keyword:
users=users.filter(name__icontains=keyword)
print(users)
return render(request,'hello/userlist.html',{"users":users,"keyword":keyword})
def usermod(request,**kwargs):
"""
็จๆทๆดๆฐ
1.้่ฟIDๆดๆฐๆฐๆฎ๏ผๅนถไผ ๅฐๅ็ซฏๆธฒๆ
2.ๅฐไฟฎๆนๅ็ๆฐๆฎๆไบคๅฐๅ็ซฏ
"""
msg = {}
print(kwargs)
pk = kwargs.get("pk")
user=get_object_or_404(User,pk=pk)
if request.method == "POST":
try:
data=request.POST.dict()
print(data)
User.objects.filter(pk=pk).update(**data)
msg={"code":0,"result":"ๆดๆฐ็จๆทๆๅ"}
except:
msg={"code":1,"errmsg":"ๆดๆฐ็จๆทๅคฑ่ดฅ๏ผ%s" %traceback.format_exc()}
return render(request,"hello/usermod.html",{"user":user,"msg":msg})
def userdel(request,**kwargs):
"""
็จๆทๅ ้ค
"""
msg = {}
pk=kwargs.get("pk")
try:
#่ทๅๅฝๅๆฐๆฎๅ
ๅฎน
user=User.objects.get(pk=pk)
except User.DoesNotExist:
raise Http404
if request.method == "POST":
try:
User.objects.get(pk=pk).delete()
msg={"code":0,"result": "ๅ ้ค็จๆทๆๅ"}
except:
msg={"code":1,"errmsg":"ๅ ้ค็จๆทๅคฑ่ดฅ๏ผ%s" %traceback.format_exc()}
return render(request,"hello/userdel.html",{"user":user,"msg":msg})
|
21,131 | 40fa2a80ff890ef396546845042962d7b34bd0cb |
import RAnd_num
def guess():
'''
This function is created to generate one random number.
Here we take input from user and compare it with random number
'''
num = RAnd_num.randint(1, 15)
print("Guess the number from 1 to 15\n Total number of attempts 5")
print('Enter the Number you guess')
for i in range(1, 6):
num2 = int(input())
if num2 > num:
print('Enter number is greater\n Try again')
elif num2 < num:
print('Enter number is smaller\n Try again')
elif num2 == num and i <= 5:
print('Congratulation you guess correctly in %d attempt' % i)
break
elif (i == 5 and num2 != num):
print("The correct answer is:%d" % num)
else:
print('Guess again\n %d attempt left' % (5 - i))
i = i + 1
while(True):
guess()
print("Press Y to play again\nOR\nPress any key ")
k=input()
if k=='Y' or k=='y':
guess()
else:
break
|
21,132 | 9fb02ff332594c7102234622fd8512aae5f12263 | from django.db import models
# Create your models here.
class KeyValue(models.Model):
key = models.CharField(max_length=255,primary_key=True)
value = models.CharField(max_length=255,null=True,blank=True)
class Sensor(models.Model):
DEVICE_STATE = (
(1, 'unconfigured'),
(10, 'configured'),
(20, 'not connected'),
)
DEVICE_TYPE = (
(1, 'temperature'),
)
CONSOLIDATION_TYPE = (
(1, 'raw'),
(0.01, 'percent (div by 100)'),
(0.001, 'promille (div by 1000)')
)
id = models.CharField(max_length=15,primary_key=True)
name = models.CharField(max_length=25,blank=False)
state = models.IntegerField(choices=DEVICE_STATE,blank=False)
type = models.IntegerField(choices=DEVICE_TYPE,blank=False)
consolidation = models.FloatField(choices=CONSOLIDATION_TYPE, blank=False,default=1)
minvalue = models.IntegerField(default=0)
maxvalue = models.IntegerField(default=1000) |
21,133 | fce0529c7a4502863a9e1e11f86c8342849714ec |
from pico2d import*
class UI:
def __init__(self):
self.score = 30
self.font = load_font("ConsolaMalgun.TTF", 40)
def update(self, frame_time):
self.time = get_time()
def draw(self):
print('์ ์ : %d' % self.score)
self.font.draw(400,550, "์ ์:%d ์๊ฐ:%f" %(self.score, self.time))
print('time:%f' % self.time)
pass
def test_ui():
open_canvas()
ui = UI()
for i in range(100):
ui.score = i
ui.update()
clear_canvas()
ui.draw()
update_canvas()
delay(0.01)
delay(2)
close_canvas()
close_canvas()
if __name__ == "__main__":
test_ui()
|
21,134 | d52426e9bd624be2d719f704b92efbe98308e796 | #ะฟะพะปัะทะพะฒะฐัะตะปั ะฒัะฑะธัะฐะตั ะฒะฐะปััั
currency = input("ะัะฑะตัะธัะต ะฒะฐะปััั: ััะฑะปะธ, ะดะพะปะปะฐัั ะธะปะธ ะตะฒัะพ: ")
change = input("ะัะฑะตัะธัะต ะฒะฐะปััั, ะฒ ะบะพัะพััั ะฝะฐะดะพ ะฟะตัะตะฒะตััะธ: ััะฑะปะธ, ะดะพะปะปะฐัั ะธะปะธ ะตะฒัะพ: ")
money = int(input("ะะฒะตะดะธัะต ะบะพะปะธัะตััะฒะพ ะดะตะฝะตะณ: "))
#ะบะพะฝะฒะตััะฐัะธั ะฒะฐะปััั
if currency == "ััะฑะปะธ" and change == "ะตะฒัะพ":
print(money, "ััะฑะปะตะน = ", money*0.011,"ะตะฒัะพ")
elif currency == "ััะฑะปะธ" and change == "ะดะพะปะปะฐัั":
print(money, "ััะฑะปะตะน = ", money * 0.014, "ะดะพะปะปะฐัะพะฒ")
elif currency == "ะตะฒัะพ" and change == "ะดะพะปะปะฐัั":
print(money, "ะตะฒัะพ = ", money * 1.19, "ะดะพะปะปะฐัะพะฒ")
elif currency == "ะตะฒัะพ" and change == "ััะฑะปะธ":
print(money, "ะตะฒัะพ = ", money * 87.7, "ััะฑะปะตะน")
elif currency == "ะดะพะปะปะฐัั" and change == "ััะฑะปะธ":
print(money, "ะดะพะปะปะฐัะพะฒ = ", money * 73.74, "ััะฑะปะตะน")
elif currency == "ะดะพะปะปะฐัั" and change == "ะตะฒัะพ":
print(money, "ะดะพะปะปะฐัะพะฒ = ", money * 0.84, "ะตะฒัะพ") |
21,135 | 485fe3458a3093a84464c2439c3c5d318809c5eb | from __future__ import unicode_literals
__version__ = "6.25.6"
|
21,136 | 11f2e8cd9896d87510e01714d39433666f8d1bba | import tushare as ts
aa= ts.get_k_data('300448')
print aa |
21,137 | b57b922a5a988e88be6831e99812c177a62bf0e9 | # Copyright 2012 Viewfinder Inc. All Rights Reserved.
"""Tests for DBObject.
"""
__author__ = 'andy@emailscrubbed.com (Andy Kimball)'
import json
import mock
import time
import unittest
from cStringIO import StringIO
from functools import partial
from tornado import httpclient
from viewfinder.backend.base.testing import MockAsyncHTTPClient
from viewfinder.backend.db import dynamodb_client, vf_schema
from viewfinder.backend.db.db_client import DBKey
from viewfinder.backend.db.episode import Episode
from viewfinder.backend.db.photo import Photo
from viewfinder.backend.db.post import Post
from viewfinder.backend.db.test.base_test import DBBaseTestCase
class DBObjectTestCase(DBBaseTestCase):
@unittest.skip("needs aws credentials")
def testRangeQuery(self):
"""Test DBRangeObject.RangeQuery."""
def _MakeResponse(max_index, request):
# Enforce maximum limit of 2.
request_dict = json.loads(request.body)
limit = min(request_dict.get('Limit', 2), 2)
is_count = request_dict.get('Count')
if 'ExclusiveStartKey' in request_dict:
start_index = int(request_dict['ExclusiveStartKey']['RangeKeyElement']['S']['S'][-1]) + 1
else:
start_index = 0
count = min(max_index - start_index, limit)
items = []
for i in xrange(start_index, start_index + count):
items.append({'ei': {'S': 'e0'},
'sk': {'S': 'p%d' % i}})
response_dict = {'Count': count,
'ConsumedCapacityUnits': 0.5}
if not is_count:
response_dict['Items'] = items
if start_index + count < max_index:
response_dict['LastEvaluatedKey'] = {'HashKeyElement': {'S': items[-1]['ei']},
'RangeKeyElement': {'S': items[-1]['sk']}}
return httpclient.HTTPResponse(request, 200,
headers={'Content-Type': 'application/json'},
buffer=StringIO(json.dumps(response_dict)))
# Get session token from Amazon (no need to mock that).
client = dynamodb_client.DynamoDBClient(schema=vf_schema.SCHEMA)
self._RunAsync(client.GetItem, vf_schema.TEST_RENAME, DBKey('1', 1), attributes=None, must_exist=False)
with mock.patch('tornado.httpclient.AsyncHTTPClient', MockAsyncHTTPClient()) as mock_client:
mock_client.map(r'https://dynamodb.us-east-1.amazonaws.com', partial(_MakeResponse, 5))
# Limit = None.
posts = self._RunAsync(Post.RangeQuery, client, 'e0', None, None, None)
self.assertEqual(len(posts), 2)
# Limit = 2.
posts = self._RunAsync(Post.RangeQuery, client, 'e0', None, 2, None)
self.assertEqual(len(posts), 2)
# Limit = 5.
posts = self._RunAsync(Post.RangeQuery, client, 'e0', None, 5, None)
self.assertEqual(len(posts), 5)
# Limit = 7.
posts = self._RunAsync(Post.RangeQuery, client, 'e0', None, 7, None)
self.assertEqual(len(posts), 5)
# Limit = None, count = True.
count = self._RunAsync(Post.RangeQuery, client, 'e0', None, None, None, count=True)
self.assertEqual(count, 2)
# Limit = 2, count = True.
count = self._RunAsync(Post.RangeQuery, client, 'e0', None, 2, None, count=True)
self.assertEqual(count, 2)
# Limit = 5, count = True.
count = self._RunAsync(Post.RangeQuery, client, 'e0', None, 5, None, count=True)
self.assertEqual(count, 5)
# Limit = 7, count = True.
count = self._RunAsync(Post.RangeQuery, client, 'e0', None, 7, None, count=True)
self.assertEqual(count, 5)
def testBatchQuery(self):
"""Test DBObject.BatchQuery."""
# Create some data to query.
keys = []
for i in xrange(3):
photo_id = Photo.ConstructPhotoId(time.time(), self._mobile_dev.device_id, 1)
episode_id = Episode.ConstructEpisodeId(time.time(), self._mobile_dev.device_id, 1)
ph_dict = {'photo_id': photo_id,
'user_id': self._user.user_id,
'episode_id': episode_id}
self._RunAsync(Photo.CreateNew, self._client, **ph_dict)
keys.append(DBKey(photo_id, None))
# Add a key that will not be found.
keys.append(DBKey('unk-photo', None))
photos = self._RunAsync(Photo.BatchQuery, self._client, keys, None, must_exist=False)
self.assertEqual(len(photos), 4)
for i in xrange(3):
self.assertEqual(photos[i].GetKey(), keys[i])
self.assertIsNone(photos[3])
|
21,138 | 1e3f072e49da39cf3b9e4482258342b8a147217e | """Allow superusers to impersonate other users as a debugging aid."""
from django import http
from portfoliyo import model
SESSION_KEY = '_impersonate_user_id'
class ImpersonationMiddleware(object):
"""
Allow superusers to impersonate other users as a debug aid.
Append ?impersonate=email@example.com to begin impersonating a user. The
impersonation will persist in your session until you append
?impersonate=stop to a URL.
"""
def process_request(self, request):
request.impersonating = False
if not request.user.is_superuser:
return None
if 'impersonate' in request.GET:
email = request.GET['impersonate']
if email == 'stop':
if SESSION_KEY in request.session:
del request.session[SESSION_KEY]
return None
try:
impersonate = model.User.objects.get(email=email)
except model.User.DoesNotExist:
return http.HttpResponseBadRequest(
"Cannot impersonate %s; user not found." % email)
request.session[SESSION_KEY] = impersonate.pk
elif SESSION_KEY in request.session:
try:
impersonate = model.User.objects.get(
pk=request.session[SESSION_KEY])
except model.User.DoesNotExist:
del request.session[SESSION_KEY]
return None
else:
return None
request.real_user = request.user
request.impersonating = True
request.user = impersonate
|
21,139 | 50bc6e8d0bf345c9464b5e359fe542450cedd01e | from speechrecproj.data import *
signals = tf.placeholder(tf.float32, [None, 16000])
def main():
sample_manager = SamplesManager('data')
print(len(sample_manager.files_labels))
print(sample_manager.files_labels[0])
print(Label.all_labels)
sample_manager.files_labels[0].get_wav()
tfreader = TFRecordReader(filename='data/train.tfrecord', validation_set_size=6000, batch_size=600)
wavs, labels = tfreader.next_training_batch()
speechrecproj.experiment.hyper_parameter_search.hyper_parameter_search()
# with tf.Session() as sess:
# result = sess.run(wavs)
# print(result)
# with tf.Session() as sess:
# result = sess.run(wavs)
# print(result)
# experiment.hyper_parameter_search.hyper_parameter_search(trainset, valset)
if __name__ == '__main__':
main()
|
21,140 | 2fb899f8821be45225bde4893b5d761818e573bd | # CANTO Kevin 15608801
avec = open("avec.txt", "w")
sans = open("sans.txt", "w")
for mot in open("mots.txt") :
if "s" in mot :
avec.write(mot)
else :
sans.write(mot)
avec.close()
sans.close()
for mot in open("avec.txt") :
print mot,
for mot in open("sans.txt") :
print mot,
|
21,141 | 1ad5a82af79df065fd92a5b2b4be0d476cb20c28 | # ๅตๅฅๅฝๆฐ
def func():
x = 1
y = 2
m= 3
n = 4
def sum(a, b): # ๅ
้จๅฝๆฐ
return a + b
def sub(a, b): # ๅ
้จๅฝๆฐ
return a - b
return sum(x, y) * sub(m, n)
print (func())
|
21,142 | 31d7e63b1b8e6c68a977db4b65e91d59a42cee67 | from OWDTestToolkit.global_imports import *
class main(GaiaTestCase):
def import_GmailLogin(self, p_name, p_pass, p_clickSignIn=True):
#
# Presses the Settings button, then Gmail, then logs in using
# p_name and p_pass (to begin the process of importing contacts).
# <br>
# If p_clickSignIn is set to True then this method will also click
# the Sign in button (defaults to true).
# <br>
# Returns False if the login failed, else True.
#
self.UTILS.logResult("info", "Logging in with '%s'/'%s'." % (p_name, p_pass))
x = self.UTILS.getElement(DOM.Contacts.settings_button, "Settings button")
x.tap()
#
# Press the Gmail button.
#
x = self.UTILS.getElement(DOM.Contacts.gmail_button, "Gmail button")
x.tap()
#
# Sometimes the device remembers your login from before (even if the device is
# reset and all data cleared), so check for that.
#
self.marionette.switch_to_frame()
try:
self.wait_for_element_present("xpath", "//iframe[contains(@%s, '%s')]" % \
(DOM.Contacts.gmail_frame[0], DOM.Contacts.gmail_frame[1]),
timeout=5)
#
# Switch to the gmail login frame.
#
self.UTILS.switchToFrame(*DOM.Contacts.gmail_frame)
time.sleep(2)
self.UTILS.waitForNotElements(DOM.Contacts.import_throbber, "Animated 'loading' indicator")
#
# PERMISSIONS (sometimes appears).
# Seems to happen a few times, so loop through 5 just in case ...
#
for i in range(1,5):
try:
self.wait_for_element_displayed(*DOM.Contacts.gmail_permission_accept, timeout=2)
x = self.marionette.find_element(*DOM.Contacts.gmail_permission_accept)
x.tap()
time.sleep(2)
self.UTILS.waitForNotElements(DOM.Contacts.import_throbber, "Animated 'loading' indicator")
except:
break
#
# Send the login information.
#
x = self.UTILS.getElement(DOM.Contacts.gmail_username, "Email field")
x.send_keys(p_name)
x = self.UTILS.getElement(DOM.Contacts.gmail_password, "Password field")
x.send_keys(p_pass)
if p_clickSignIn:
x = self.UTILS.getElement(DOM.Contacts.gmail_signIn_button, "Sign In button")
x.tap()
#
# Check to see if sigin failed. If it did then stay here.
#
try:
self.wait_for_element_displayed(*DOM.Contacts.gmail_login_error_msg, timeout=2)
x = self.UTILS.screenShotOnErr()
self.UTILS.logResult("info", "<b>Login failed!</b> Screenshot and details:", x)
return False
except:
pass
else:
return True
except:
pass
time.sleep(5)
#
# Journey back to the import iframe.
#
self.UTILS.switchToFrame(*DOM.Contacts.frame_locator)
self.wait_for_element_present("xpath","//iframe[contains(@%s, '%s')]" % \
(DOM.Contacts.gmail_import_frame[0],DOM.Contacts.gmail_import_frame[1]),
timeout=30)
self.UTILS.switchToFrame(*DOM.Contacts.gmail_import_frame, p_viaRootFrame=False)
self.UTILS.waitForElements(DOM.Contacts.import_conts_list, "Contacts list", False, 2)
return True |
21,143 | a6aecc1f49c356825293290513a59e4e8a79399b | import requests
from bs4 import BeautifulSoup
req = requests.get('http://invoice.etax.nat.gov.tw')
bs = BeautifulSoup(req.text,"html.parser")
for i in bs.find_all('span',{'class':'t18Red'}):
print(i.string) |
21,144 | 40cf277cc3a89eece20f14696c5a9edb3f354eac | ## Tuple's ##
# 1) Define a Tuple
# 2) Indexing in Tuple's
# 3) Difference between the List and Tuple
# 1) Define a Tuple
prime_numbers = (2,3,5,7,11)
type(prime_numbers)
perfect_squares = [1,4,9,16,25,36]
type(perfect_squares)
len(prime_numbers)
len(perfect_squares)
my_tuple = ("Hieee", 100, 12.47)
my_tuple
type(my_tuple)
# 2) Indexing in Tuple's
my_tuple[0]
my_tuple[1]
my_tuple[0:2]
my_tuple[-1]
my_tuple.count(100)
# 3) Difference between the List and Tuple
l = ["a", "b", "c", "d", "e"]
t= ("a", "b", "c", "d", "e")
type(l)
type(t)
l[0] = "New Element"
l
t[0] = "New Element" # tuple is immutable sequence of objects
|
21,145 | 84b3f699b49f472f6192ff398b2804ebb83eb41e | # -*- coding: utf-8 -*-
"""
Created on Sun Mar 18 15:20:42 2018
Author: Qie He
"""
import numpy as np
def int2bin(k, n):
"""Transform a non-negative integer k into an array of n elements that represents its binary expansion"""
binary_expansion = np.zeros(n, dtype=int)
position = n-1
while k > 0:
if k % 2 == 1:
binary_expansion[position] = 1
k = int(k/2)
position -=1
return binary_expansion
def getx(v, lb, ub, i, B):
"""Return a vector x such that
(1) all the components of x sum up to 1
(2) for each $j != i$, x[j]=lb[j] if v[j]=0 and x[j]=ub[j] if v[j]=1
(3) lb[i] <= x[i] <= ub[i]
"""
x = lb + np.multiply((ub - lb), v)
x[i] = B - (x.sum() - x[i])
# Test if variable x[i] is within the bounds
if x[i] <= ub[i] and x[i] >= lb[i]:
return x
else:
return np.array([])
def max_sum_xlogx(n, B, lb, ub):
"""
Find the optimal solution of max x1*log(x1) + ... + xn*log(xn)
subject to constraints x1 + ... xn = B, lb_i <= xi <= ub_i.
Parameters
----------
n: int
Number of variables.
B: float
Total number of resources.
lb: array of floats
Lower bounds of variables.
ub: array of floats
Upper bounds of variables.
Returns
-------
opt_obj: float
Optimal objective value.
opt_sol: an array of floats
Optimal solution.
"""
# Initialize the optimal solution and optimal objective
opt_obj = - n/np.e
opt_sol = np.array([])
# First select a variable whose value may not be at bound, indexed by idx-var_interior
for idx_var_interior in range(n):
for idx in range(pow(2, n-1)):
idx_binary_expansion = int2bin(idx, n-1)
# Insert element 0 into position idx_var_interior
idx_binary_expansion = np.insert(idx_binary_expansion, idx_var_interior, 0)
# Compute the solution x with all (but one) variables at bounds
x = getx(idx_binary_expansion, lb, ub, idx_var_interior, B)
if x.size > 0:
obj = np.multiply(x, np.log(x)).sum()
if obj >= opt_obj:
opt_obj = obj
opt_sol = x
return [opt_obj, opt_sol]
# Additional test data
# lowerbound = np.array([0.5, 0.47, 0.45, 0.43, 0.41, 0.39, 0.37])
# upperbound = np.array([0.66, 0.70, 0.71, 0.73, 0.84, 0.88, 0.91])
# res_bound = 4.2
#num_var = len(lowerbound)
#num_var = 10
#a1 = np.random.rand(num_var)
#a2 = np.random.rand(num_var)
#lowerbound = np.minimum(a1, a2)
#upperbound = np.maximum(a1, a2)
#res_bound = lowerbound.sum() + np.random.rand()*(upperbound.sum() - lowerbound.sum()) # total resources available
#
#[opt_obj, opt_sol] = max_sum_xlogx(num_var, res_bound, lowerbound, upperbound)
#print("The optimal solution is:\n", opt_sol)
#print("The optimal objective is:\n", opt_obj) |
21,146 | daab472149a7f20fca9b1598a735a508a61afdc6 | from datetime import date
Y = int(input())
M = int(input())
D = int(input())
f_date = date(Y, M, D)
l_date = date(2021, 9, 13)
delta = l_date - f_date
print(delta)
|
21,147 | 76dd1e01ee410b0656d578ce94203f1b1cd92d8c | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
"""
# @Time : 2019/9/4 13:38
# @Author : zhaoss
# @FileName: multi_figure.py
# @Email : zhaoshaoshuai@hnnydsj.com
Description:
Parameters
"""
import os
import sys
import glob
import time
import fnmatch
import numpy as np
import matplotlib.pyplot as plt
def main():
fig1 = plt.figure('test')
# ๅฎไนx่ฝด
x = np.linspace(-5, 5, 200)
# ็ๆy่ฝด
y1 = x * 2 + 1
# ็ๆ็ฌฌไบy่ฝด
y2 = x ** 2
plt.xlabel('This is x axis')
plt.ylabel('This is y axis')
plt.plot(x, y2)
plt.show()
return None
if __name__ == '__main__':
start_time = time.clock()
end_time = time.clock()
main()
print("time: %.4f secs." % (end_time - start_time))
|
21,148 | 4a0f76ca1172616c63c477c8c50750b41b67fd7e | x=int(input("enter"))
y=int(input("enter"))
z=x+y
k=z+80
print(float(k)) # type casting#float
print(z)
print(k)
print("thank for first success python program")
input("press enter to exit")
|
21,149 | 0ae8571268b23a764e66216b142c9778a2f084c9 | class Solution(object):
def gcd(self, a, b):
if a == 0:
return b
else:
return self.gcd(b % a, a)
def nthMagicalNumber(self, N, A, B):
gcd = self.gcd(A, B)
lcm, lower, higher = (A*B)/gcd, min(A, B), max(A, B)
mod = 10**9 + 7
left, right = 1, N
while left <= right:
mid = (left + right)/2
min_val = lower*mid
alt_val = int(min_val/higher)*higher
x1, y1, z1 = int(min_val/A), int(min_val/B), int(min_val/lcm)
x2, y2, z2 = int(alt_val/A), int(alt_val/B), int(alt_val/lcm)
a, b = x1 + y1 - z1, x2 + y2 - z2
if a == N or b == N:
return min_val%mod if a == N else alt_val%mod
elif a < N:
left = mid + 1
else:
right = mid - 1
min_val = lower*left
alt_val = int(min_val/higher)*higher
return min(min_val, alt_val)%mod |
21,150 | 441cc748a92567067576bb583b4bb64c2f99af8b | # -*- coding: utf-8 -*-
"""
Created on Thu Nov 5 12:49:47 2020
!!!!!!IMPORTANT!!!!!!!!
Each part seperated from each other. Please run cell by cell
@author: Abdullah Hamza ลahin
"""
import numpy as np
from matplotlib import pyplot as plt
def calculateEmpiricalRisk(w0, w, x, y):
totalError = 0
for i in range(0, len(y)):
totalError += (y[i] - (w * x[i] + w0)) ** 2
return totalError / float(len(y))
def gradientDescent(w0, w, X, Y, learningRate):
w0_gradient = 0
w_gradient = 0
m = len(Y)
for i in range(0, m):
x = X[i].reshape(1,1)
y = Y[i].reshape(1,1)
w0_gradient += -(2/m) * (y - ((w * x) + w0))
w_gradient += -(2/m) * x * (y - ((w * x) + w0))
new_w0 = w0 - (learningRate * w0_gradient)
new_w = w - (learningRate * w_gradient)
return [new_w0, new_w]
X = np.array([31,33,31,49,53,69,101,99,143,132,109])
X = X.reshape(X.shape[0],1)
Y = np.array([705,540,650,840,890,850,1200,1150,1700,900,1550])
Y = Y.reshape(Y.shape[0],1)
#%%
#Step size of 10^(-5)
iteration = 40
lr = 1*10**(-5)
W = np.array([100])
W0 = 100
risk = []
for i in range(iteration):
W0, W = gradientDescent(W0, W, X, Y, lr)
risk.append(calculateEmpiricalRisk(W0, W, X, Y)[0])
if i % 5 == 0:
plt.plot(X, Y, "ro")
plt.axis([0, 160, 0, 1800])
plt.plot(X, np.dot(X,W)+W0)
plt.show()
plt.close()
plt.plot(range(iteration), risk)
#%%
#Step size of 10^(-4)
iteration = 15
lr = 1*10**(-4)
W = np.array([100])
W0 = 100
risk = []
for i in range(iteration):
W0, W = gradientDescent(W0, W, X, Y, lr)
risk.append(calculateEmpiricalRisk(W0, W, X, Y)[0])
if i % 1 == 0:
plt.plot(X, Y, "ro")
plt.axis([0, 160, 0, 1800])
plt.plot(X, np.dot(X,W)+W0)
plt.show()
plt.close()
print("Empirical Risk for iterariton ",i,":",calculateEmpiricalRisk(W0, W, X, Y))
plt.plot(range(iteration), risk)
|
21,151 | 9ea20f6c82956b473b8bd9fb7619a1b50bd8de74 | import unittest
from start_screens import img_gen as img
from . import map_generator as gen
from . import spread_players as s
class MapCase(unittest.TestCase):
def test_map_overview_accuracy(self):
""" Test if image of map overview properly represents generated map matrix. """
params = [10000, 5, 10, 15]
height = 100
width = 200
world_map = gen.generate_map(height=height, width=width, params=params)
image = img.get_map_overview(world_map)
pixels = image.load()
for x in range(width):
for y in range(height):
color = tuple(img.get_color(world_map[x][y]))
self.assertEqual(pixels[x, y], color)
def test_spreading_players(self):
""" Test if players are properly spread across. """
params = [3, 4, 11, 20]
w = gen.generate_map(height=50, width=80, params=params)
coords = s.spread_across_the_map(w, 4)
for c in coords:
x = c[0]
y = c[1]
self.assertNotEqual(w[x][y], 0)
self.assertNotEqual(w[x][y], 3) # uncomment the block to see an overview
# w[x][y] = 4
# image = img.get_map_overview(w)
# image2 = img.get_resized_map_overview(image, 781, 521)
# image2.show()
if __name__ == '__main__':
unittest.main()
|
21,152 | 65fb8a0515511524c7209c51353627eb99899a2d | from __future__ import print_function
import os
import os.path
import sys
from pipeline import Pipeline, MultiPipelineExecutor
# - Pipelines:
# - deepdetect + basicdetect + facerecognize
# - deepdetect only
# - deepdetect + basicdetect
# - basicdetect only
# - basicdetect + facerecognize
# - facetrain
#
# User inputs:
# - for detection/recognition:
# - Input directory containing photos and videos
# - Output directory for reports
# - Pipeline file
#
# - for deep detection training
# - TODO
#
# - for face recognition training
# - input images directory
# - size of training images
def detect(input_path, output_directory, pipeline_file):
# if input_path is just a single file, we don't need all the multicore
# setup.
if os.path.isfile(input_path):
pipeline = Pipeline(pipeline_file, os.path.dirname(input_path), output_directory)
pipeline.execute(input_path)
elif os.path.isdir(input_path):
multiexecutor = MultiPipelineExecutor()
multiexecutor.execute(pipeline_file, input_path, output_directory)
else:
print("Input is not an image file or directory:", input_path)
if __name__ == '__main__':
detect(sys.argv[1], sys.argv[2], sys.argv[3])
|
21,153 | 331e46efe23339d12fb209cd9c856502301695fa | import database
from pages import common
from queries import battle_q, city_q
from functions import battle_f
from classes import battle
from functions import path_f
import math
page_data = {
"Admin": True,
"Redirect": "list_battles",
}
def main(cursor):
name = common.get_val("name", "")
campaign = int(common.get_val("campaign", 0))
start = int(common.get_val("start", 0))
duration = int(common.get_val("duration", 0))
btype = int(common.get_val("type", 0))
location = common.get_val("location", "")
city = int(common.get_val("city", 0))
# Get location
result = battle.battle_coords.search(location)
if result != None:
x = int(result.groups()[0])
y = int(result.groups()[1])
else:
if city > 0:
the_city = city_q.get_one_city(cursor, city)
x, y = the_city.x, the_city.y
else:
x, y = 0, 0
# If no name is supplied then it may be from a city
if name == '':
if city > 0:
name = the_city.name
else:
page_data['Redirect'] = 'setup_campaign&campaign={0}'.format(campaign)
return ""
# If there is no last battle then we can't use auto pather to work out start time
last_battle = battle_q.get_last_battle_from_campaign(cursor, campaign)
if start < 0 and last_battle == None:
start = 0
# If negative start time then work out travel time
if start < 0:
waypoints = ((last_battle.x, last_battle.y), (x, y))
b_path = path_f.path(cursor, waypoints, move_speed="Marching", move_type="Medium foot")
start = math.ceil(b_path.time_cost) + last_battle.start + last_battle.duration
database.query(cursor,
battle_f.new_battle(name, campaign, start, duration, x, y, btype=btype, city=city))
# page_data['Redirect'] = 'list_battles&campaign={0}'.format(campaign)
page_data['Redirect'] = 'setup_campaign&campaign={0}'.format(campaign)
return "" |
21,154 | ea9548fa45d6126073524a4e7df1787f9a9c6e0a | # -*- coding: utf-8 -*-
"""
Created on Wed Apr 06 15:41:58 2011
@author: David Schuster
"""
import numpy as np
import math as math
import matplotlib.pyplot as plt
import scipy
import scipy.fftpack
import cmath
import numpy
from scipy import optimize
# def set_fit_plotting(pkg='matplotlib'):
# global plt
# plt={'guiqwt':plt1,'matplotlib':plt2}[pkg]
def argselectdomain(xdata,domain):
ind=np.searchsorted(xdata,domain)
return (ind[0],ind[1])
def selectdomain(xdata,ydata,domain):
ind=np.searchsorted(xdata,domain)
return xdata[ind[0]:ind[1]],ydata[ind[0]:ind[1]]
def zipsort(xdata,ydata):
inds=np.argsort(xdata)
return np.take(xdata,inds),np.take(ydata,inds,axis=0)
"""Wraplter around scipy.optimize.leastsq"""
def fitgeneral(xdata, ydata, fitfunc, fitparams, domain=None, showfit=False, showstartfit=False, showdata=True,
label="", mark_data='bo', mark_fit='r-'):
"""Uses optimize.leastsq to fit xdata ,ydata using fitfunc and adjusting fit params"""
# sort data
order = np.argsort(xdata)
xdata = xdata[order]
ydata = ydata[order]
if domain is not None:
fitdatax,fitdatay = selectdomain(xdata,ydata,domain)
else:
fitdatax=xdata
fitdatay=ydata
# print 'minimum', np.min(fitdatay)
# ymin=np.min(fitdatay)
errfunc = lambda p, x, y: (fitfunc(p,x) - y) #there shouldn't be **2 # Distance to the target function
startparams=fitparams # Initial guess for the parameters
bestfitparams, success = optimize.leastsq(errfunc, startparams[:], args=(fitdatax,fitdatay))
if showfit:
if showdata:
plt.plot(fitdatax,fitdatay,mark_data,label=label+" data")
if showstartfit:
plt.plot(fitdatax,fitfunc(startparams,fitdatax),label=label+" startfit")
plt.plot(fitdatax,fitfunc(bestfitparams,fitdatax),mark_fit,label=label+" fit")
if label!='': plt.legend()
err=math.fsum(errfunc(bestfitparams,fitdatax,fitdatay))
#print 'the best fit has an RMS of {0}'.format(err)
# plt.t
# plt.figtext()
return bestfitparams
def lorfunc(p, x):
"""p[0]+p[1]/(1+(x-p[2])**2/p[3]**2)"""
return p[0]+p[1]/(1+(x-p[2])**2/p[3]**2)
def fitlor(xdata,ydata,fitparams=None,domain=None,showfit=False,showstartfit=False,label="",debug=False):
"""fit lorentzian:
returns [offset,amplitude,center,hwhm]"""
if domain is not None:
fitdatax,fitdatay = selectdomain(xdata,ydata,domain)
else:
fitdatax=xdata
fitdatay=ydata
if fitparams is None:
fitparams=[0,0,0,0]
fitparams[0]=(fitdatay[0]+fitdatay[-1])/2.
fitparams[1]=max(fitdatay)-min(fitdatay)
fitparams[2]=fitdatax[np.argmax(fitdatay)]
fitparams[3]=(max(fitdatax)-min(fitdatax))/10.
if debug==True: print(fitparams)
p1 = fitgeneral(fitdatax, fitdatay, lorfunc, fitparams, domain=None, showfit=showfit, showstartfit=showstartfit,
label=label)
p1[3]=abs(p1[3])
return p1
def harmfunc(p, x):
"""p[0]+p[1]/(1+(x-p[2])**2/p[3]**2)"""
return p[0]+p[1]/((x**2-(p[2])**2)**2 + (p[3]**2)*x**2)**(0.5)
def fitharm(xdata,ydata,fitparams=None,domain=None,showfit=False,showstartfit=False,label="",debug=False):
"""fit lorentzian:
returns [offset,amplitude,center,hwhm]"""
if domain is not None:
fitdatax,fitdatay = selectdomain(xdata,ydata,domain)
else:
fitdatax=xdata
fitdatay=ydata
if fitparams is None:
fitparams=[0,0,0,0]
fitparams[0]=(fitdatay[0]+fitdatay[-1])/2.
fitparams[3] = (max(fitdatax)-min(fitdatax))/50.
fitparams[2]=fitdatax[np.argmax(fitdatay)]
fitparams[1]=(max(fitdatay)-min(fitdatay))*fitparams[3]*fitparams[2]*4*(3.14)**2
# fitparams[3]=(max(fitdatax)-min(fitdatax))/50.
# fitparams[3] = 2.88606749e+05
if debug==True: print(fitparams)
p1 = fitgeneral(fitdatax,fitdatay,harmfunc,fitparams,domain=None,showfit=showfit,showstartfit=showstartfit,label=label)
p1[3]=abs(p1[3])
p1[2] = abs(p1[2])
return p1
def print_cavity_Q(fit):
print(fit[2]/2/fit[3])
return fit[2]/2/fit[3]
def gaussfunc(p, x):
"""p[0]+p[1] exp(- (x-p[2])**2/p[3]**2/2)"""
return p[0]+p[1]*math.e**(-1./2.*(x-p[2])**2/p[3]**2)
def expfunc(p,x):
"""p[0]+p[1]*exp(-(x-p[2])/p[3])"""
return p[0]+p[1]*math.e**(-(x-p[2])/p[3])
def pulse_errfunc(p,x):
"""p[0]+p[1]*exp(-(x-p[2])/p[3])"""
return p[0]+0.5*(1-((1-p[1])**x))
def fitexp(xdata,ydata,fitparams=None,domain=None,showfit=False,showstartfit=False,label=""):
"""Fit exponential decay (p[0]+p[1]*exp(-(x-p[2])/p[3]))"""
if domain is not None:
fitdatax,fitdatay = selectdomain(xdata,ydata,domain)
else:
fitdatax=xdata
fitdatay=ydata
if fitparams is None:
fitparams=[0.,0.,0.,0.]
fitparams[0]=fitdatay[-1]
fitparams[1]=fitdatay[0]-fitdatay[-1]
fitparams[1]=fitdatay[0]-fitdatay[-1]
fitparams[2]=fitdatax[0]
fitparams[3]=(fitdatax[-1]-fitdatax[0])/5.
#print fitparams
p1 = fitgeneral(fitdatax, fitdatay, expfunc, fitparams, domain=None, showfit=showfit, showstartfit=showstartfit,
label=label)
return p1
# Test fit code
def fitpulse_err(xdata,ydata,fitparams=None,domain=None,showfit=False,showstartfit=False,label=""):
"""Fit pulse err decay (p[0]+p[1]*(1-p[2])^x)"""
if domain is not None:
fitdatax,fitdatay = selectdomain(xdata,ydata,domain)
else:
fitdatax=xdata
fitdatay=ydata
if fitparams is None:
fitparams=[0.,0.]
fitparams[0]=fitdatay[-1]
fitparams[1]=fitdatay[0]-fitdatay[-1]
fitparams[1]=fitdatay[0]-fitdatay[-1]
#print fitparams
p1 = fitgeneral(fitdatax, fitdatay, pulse_errfunc, fitparams, domain=None, showfit=showfit,
showstartfit=showstartfit, label=label)
return p1
def gaussfunc_nooffset(p, x):
"""p[0] exp(- (x-p[1])**2/p[2]**2/2)"""
return p[0]*math.e**(-1./2.*(x-p[1])**2/p[2]**2)
def fitgauss(xdata,ydata,fitparams=None,no_offset=False,domain=None,showfit=False,showstartfit=False,label=""):
"""
no_offset = True: p[1] exp(- (x-p[2])**2/p[3]**2/2)
no_offset = False: p[0]+p[1] exp(- (x-p[2])**2/p[3]**2/2)
"""
if domain is not None:
fitdatax,fitdatay = selectdomain(xdata,ydata,domain)
else:
fitdatax=xdata
fitdatay=ydata
if fitparams is None:
fitparams=[0,0,0,0]
fitparams[0]=(fitdatay[0]+fitdatay[-1])/2.
fitparams[1]=max(fitdatay)-min(fitdatay)
fitparams[2]=fitdatax[np.argmax(fitdatay)]
fitparams[3]=(max(fitdatax)-min(fitdatax))/3.
if no_offset:
fitfunc = gaussfunc_nooffset
fitparams = fitparams[1:]
else:
fitfunc = gaussfunc
p1 = fitgeneral(fitdatax,fitdatay,fitfunc,fitparams,domain=None,showfit=showfit,showstartfit=showstartfit,label=label)
return p1
def decaysin(p,x):
"""p[0]*np.sin(2.*pi*p[1]*x+p[2]*pi/180.)*np.e**(-1.*(x-p[5])/p[3])+p[4]"""
return p[0]*np.sin(2.*np.pi*p[1]*x+p[2]*np.pi/180.)*np.e**(-1.*(x-p[5])/p[3])+p[4]
def fitdecaysin(xdata,ydata,fitparams=None,domain=None,showfit=False,showstartfit=False,label=""):
"""Fits decaying sin wave of form: p[0]*np.sin(2.*pi*p[1]*x+p[2]*pi/180.)*np.e**(-1.*(x-p[5])/p[3])+p[4]"""
if domain is not None:
fitdatax,fitdatay = selectdomain(xdata,ydata,domain)
else:
fitdatax=xdata
fitdatay=ydata
if fitparams is None:
FFT=scipy.fft(fitdatay)
fft_freqs=scipy.fftpack.fftfreq(len(fitdatay),fitdatax[1]-fitdatax[0])
max_ind=np.argmax(abs(FFT[4:int(len(fitdatay)/2)]))+4
fft_val=FFT[max_ind]
fitparams=[0,0,0,0,0]
fitparams[4]=np.mean(fitdatay)
fitparams[0]=(max(fitdatay)-min(fitdatay))/2.#2*abs(fft_val)/len(fitdatay)
fitparams[1]=fft_freqs[max_ind]
fitparams[2]=(cmath.phase(fft_val)-np.pi/2.)*180./np.pi
fitparams[3]=(max(fitdatax)-min(fitdatax))
#fitparams[5]=fitdatax[0]
decaysin3 = lambda p, x: p[0] * np.sin(2. * np.pi * p[1] * x + p[2] * np.pi / 180.) * np.e ** (
-1. * (x - fitdatax[0]) / p[3]) + p[4]
# decaysin3 = lambda p, x: p[0] * np.sin(2. * np.pi * p[1] * x + p[2] - np.pi / 2.) * np.e ** (
# -1. * (x - fitdatax[0]) / p[3]) + p[4]
#print "fitparams: ",fitparams
p1 = fitgeneral(fitdatax, fitdatay, decaysin3, fitparams, domain=None, showfit=showfit, showstartfit=showstartfit,
label=label)
return p1
def fitdecaydoublesin(xdata,ydata,fitparams=None,domain=None,showfit=False,showstartfit=False,label=""):
"""Fits decaying sin wave of form: p[0]*np.sin(2.*pi*p[1]*x+p[2]*pi/180.)*np.e**(-1.*(x-p[5])/p[3])+p[4]"""
if domain is not None:
fitdatax,fitdatay = selectdomain(xdata,ydata,domain)
else:
fitdatax=xdata
fitdatay=ydata
if fitparams is None:
FFT=scipy.fft(fitdatay)
fft_freqs=scipy.fftpack.fftfreq(len(fitdatay),fitdatax[1]-fitdatax[0])
max_ind=np.argmax(abs(FFT[4:len(fitdatay)/2.]))+4
fft_val=FFT[max_ind]
fitparams=[0,0,0,0,0,0,0,0]
fitparams[4]=np.mean(fitdatay)
fitparams[0]=(max(fitdatay)-min(fitdatay))/2.#2*abs(fft_val)/len(fitdatay)
fitparams[1]=fft_freqs[max_ind]
fitparams[6]=fft_freqs[max_ind]-0.001
fitparams[2]=(cmath.phase(fft_val)-np.pi/2.)*180./np.pi
fitparams[3]=(max(fitdatax)-min(fitdatax))
fitparams[5] = fitparams[0]
#fitparams[5]=fitdatax[0]
decaydoublesin3 = lambda p, x: p[0] * (np.sin(2. * np.pi * p[1] * x + p[2] * np.pi / 180.) + p[5]* np.sin(2. * np.pi * p[6] * x + p[7] * np.pi / 180.) )* np.e ** (
-1. * (x - fitdatax[0]) / p[3]) + p[4]
#print "fitparams: ",fitparams
p1 = fitgeneral(fitdatax, fitdatay, decaydoublesin3, fitparams, domain=None, showfit=showfit, showstartfit=showstartfit,
label=label)
return p1
def fitsin(xdata,ydata,fitparams=None,domain=None,showfit=False,showstartfit=False,label=""):
"""Fits sin wave of form: p[0]*np.sin(2.*pi*p[1]*x+p[2]*pi/180.)+p[3]"""
if domain is not None:
fitdatax,fitdatay = selectdomain(xdata,ydata,domain)
else:
fitdatax=xdata
fitdatay=ydata
if fitparams is None:
FFT=scipy.fft(fitdatay)
fft_freqs=scipy.fftpack.fftfreq(len(fitdatay),fitdatax[1]-fitdatax[0])
max_ind=np.argmax(abs(FFT[4:len(fitdatay)/2.]))+4
fft_val=FFT[max_ind]
fitparams=[0,0,0,0]
fitparams[3]=np.mean(fitdatay)
fitparams[0]=(max(fitdatay)-min(fitdatay))/2.#2*abs(fft_val)/len(fitdatay)
fitparams[1]=fft_freqs[max_ind]
fitparams[2]=(cmath.phase(fft_val)-np.pi/2.)*180./np.pi
#fitparams[3]=(max(fitdatax)-min(fitdatax))
#fitparams[5]=fitdatax[0]
sin2=lambda p,x: p[0]*np.sin(2.*np.pi*p[1]*x+p[2]*np.pi/180.)+p[3]
#print "fitparams: ",fitparams
p1 = fitgeneral(fitdatax, fitdatay, sin2, fitparams, domain=None, showfit=showfit, showstartfit=showstartfit,
label=label)
return p1
def hangerfunc_old(p,x):
"""p=[f0,Q,S21Min,Tmax]
(4*(x-p[0])**2/p[0]**2 * p[1]**2+p[2]**2/(1+4*(x-p[0])**2/p[0]**2 * p[1]**2))*p[3]
"""
return ((4.*((x-p[0])* p[1]/p[0])**2. +p[2]**2.)/(1.+4.*((x-p[0])* p[1]/p[0])**2.))*p[3]
def hangerqs_old(fitparams):
"""Converts fitparams into Qi and Qc"""
return abs(fitparams[1]/fitparams[2]), abs(fitparams[1])/(1-abs(fitparams[2]))
def fithanger_old (xdata,ydata,fitparams=None,domain=None,showfit=False,showstartfit=False,label=""):
"""Fit's Hanger Transmission (S21) data without taking into account asymmetry
returns p=[f0,Q,S21Min,Tmax]
"""
if domain is not None:
fitdatax,fitdatay = selectdomain(xdata,ydata,domain)
else:
fitdatax=xdata
fitdatay=ydata
if fitparams is None:
fitparams=[0,0,0,0]
peakloc=np.argmin(fitdatay)
ymax=(fitdatay[0]+fitdatay[-1])/2.
fitparams[0]=fitdatax[peakloc]
fitparams[1]=abs(fitdatax[peakloc]/((max(fitdatax)-min(fitdatax))/3.))
if fitdatay[peakloc] > 0:
fitparams[2] = (fitdatay[peakloc] / ymax) ** 0.5
else:
fitparams[2] = 0.001
fitparams[3]=ymax
return fitgeneral(fitdatax, fitdatay, hangerfunc_old, fitparams, domain=None, showfit=showfit,
showstartfit=showstartfit, label=label)
def hangerfunc(p,x):
"""p=[f0,Qi,Qc,df,scale]"""
#print p
f0,Qi,Qc,df,scale = p
a=(x-(f0+df))/(f0+df)
b=2*df/f0
Q0=1./(1./Qi+1./Qc)
return scale * (-2. * Q0 * Qc + Qc ** 2. + Q0 ** 2. * (1. + Qc ** 2. * (2. * a + b) ** 2.)) / (
Qc ** 2 * (1. + 4. * Q0 ** 2. * a ** 2.))
def hangerfunc_new(p,x):
"""p=[f0,Qi,Qc,df,scale]"""
#print p
f0,Qi,df,scale = p
a=(x-(f0+df))/(f0+df)
b=2*df/f0
Qc=4000.
y = 10 * np.log10(scale * (Qc ** 2. + Qi ** 2. * Qc ** 2. * (2. * a + b) ** 2.) / (
(Qc + Qi) ** 2 + 4. * Qi ** 2. * Qc ** 2. * a ** 2.))
return y
def hangerfunc_new_withQc(p,x):
"""p=[f0,Qi,Qc,df,scale]"""
#print p
f0,Qi,Qc,df,scale = p
a=(x-(f0+df))/(f0+df)
b=2*df/f0
y = 10 * np.log10(scale * (Qc ** 2. + Qi ** 2. * Qc ** 2. * (2. * a + b) ** 2.) / (
(Qc + Qi) ** 2 + 4. * Qi ** 2. * Qc ** 2. * a ** 2.))
return y
def hangerfunctilt(p,x):
"""Ge Editing p=[f0,Qi,Qc,df,scale,slope, offset]"""
f0, Qi, Qc, df, slope, offset = p
a=(x-(f0+df))/(f0+df)
b=2*df/f0
Q0=1./(1./Qi+1./Qc)
#y=math.exp(slope*x+offset)
y=[math.exp(slope*i+offset) for i in x]
# return slope*x+offset+scale*(-2.*Q0*Qc + Qc**2. + Q0**2.*(1. + Qc**2.*(2.*a + b)**2.))/(Qc**2*(1. + 4.*Q0**2.*a**2.))
return y * (-2. * Q0 * Qc + Qc ** 2. + Q0 ** 2. * (1. + Qc ** 2. * (2. * a + b) ** 2.)) / (
Qc ** 2 * (1. + 4. * Q0 ** 2. * a ** 2.))
def fithanger_new(xdata, ydata, fitparams=None, domain=None, showfit=False, showstartfit=False, printresult=False,
label="", mark_data='bo', mark_fit='r-'):
"""Fit Hanger Transmission (S21) data taking into account asymmetry.
needs a given Qc, which is assumed to be constant.
You need to define the Qc in hangerfunc_new()
fitparams = []
returns p=[f0,Qi,df,scale]
Uses hangerfunc_new.
"""
if domain is not None:
fitdatax,fitdatay = selectdomain(xdata,ydata,domain)
else:
fitdatax=xdata
fitdatay=ydata
if fitparams is None:
peakloc=np.argmin(fitdatay)
ymax=(fitdatay[0]+fitdatay[-1])/2.
ymin=fitdatay[peakloc]
f0=fitdatax[peakloc]
Q0=abs(fitdatax[peakloc]/((max(fitdatax)-min(fitdatax))/5.))
scale= ymax-ymin
Qi=2*Q0
#slope = (fitdatay[-1]-fitdatay[0])/(fitdatax[-1]-fitdatax[0])
#offset= ymin-slope*f0
fitparams=[f0,abs(Qi),0.,scale]
#print '--------------Initial Parameter Set--------------\nf0: {0}\nQi: {1}\nQc: {2}\ndf: {3}\nScale: {4}\nSlope: {5}\nOffset:{6}\n'.format(f0,Qi,Qc,0.,scale,slope, offset)
fitresult = fitgeneral(fitdatax, fitdatay, hangerfunc_new, fitparams, domain=None, showfit=showfit,
showstartfit=showstartfit, label=label, mark_data=mark_data, mark_fit=mark_fit)
fitresult[1]=abs(fitresult[1])
#fitresult[2]=abs(fitresult[2])
if printresult: print('-- Fit Result --\nf0: {0}\nQi: {1}\nQc: {2}\ndf: {3}'.format(fitresult[0], fitresult[1],
fitresult[2], fitresult[3]))
return fitresult
def fithanger_new_withQc(xdata, ydata, fitparams=None, domain=None, showfit=False, showstartfit=False,
printresult=False, label="", mark_data='bo', mark_fit='r-'):
"""Fit Hanger Transmission (S21) data taking into account asymmetry.
use the same parameters as old one 'fithanger', but a different interpretation of the fit formula
fitparams = []
returns p=[f0,Qi,Qc,df,scale]
Uses hangerfunc.
"""
if domain is not None:
fitdatax,fitdatay = selectdomain(xdata,ydata,domain)
else:
fitdatax=xdata
fitdatay=ydata
if fitparams is None:
peakloc=np.argmin(fitdatay)
ymax=(fitdatay[0]+fitdatay[-1])/2.
ymin=fitdatay[peakloc]
f0=fitdatax[peakloc]
Q0=abs(fitdatax[peakloc]/((max(fitdatax)-min(fitdatax))/5.))
scale= ymax-ymin
Qi=2*Q0
Qc=Q0
#slope = (fitdatay[-1]-fitdatay[0])/(fitdatax[-1]-fitdatax[0])
#offset= ymin-slope*f0
fitparams=[f0,abs(Qi),abs(Qc),0.,scale]
#print '--------------Initial Parameter Set--------------\nf0: {0}\nQi: {1}\nQc: {2}\ndf: {3}\nScale: {4}\nSlope: {5}\nOffset:{6}\n'.format(f0,Qi,Qc,0.,scale,slope, offset)
fitresult = fitgeneral(fitdatax, fitdatay, hangerfunc_new_withQc, fitparams, domain=None, showfit=showfit,
showstartfit=showstartfit, label=label, mark_data=mark_data, mark_fit=mark_fit)
fitresult[1]=abs(fitresult[1])
fitresult[2]=abs(fitresult[2])
if printresult: print('-- Fit Result --\nf0: {0}\nQi: {1}\nQc: {2}\ndf: {3}\nscale: {4}'.format(fitresult[0],
fitresult[1],
fitresult[2],
fitresult[3],
fitresult[4]))
return fitresult
def fithanger(xdata, ydata, fitparams=None, domain=None, showfit=False, showstartfit=False, printresult=False, label="",
mark_data='bo', mark_fit='r-'):
"""Fit Hanger Transmission (S21) data taking into account asymmetry.
fitparams = []
returns p=[f0,Qi,Qc,df,scale]
Uses hangerfunc.
"""
if domain is not None:
fitdatax,fitdatay = selectdomain(xdata,ydata,domain)
else:
fitdatax=xdata
fitdatay=ydata
if fitparams is None:
peakloc=np.argmin(fitdatay)
ymax=(fitdatay[0]+fitdatay[-1])/2.
ymin=fitdatay[peakloc]
f0=fitdatax[peakloc]
Q0=abs(fitdatax[peakloc]/((max(fitdatax)-min(fitdatax))/3.))
scale= ymax
Qi=Q0*(1.+ymax)
Qc=Qi/(ymax)
#slope = (fitdatay[-1]-fitdatay[0])/(fitdatax[-1]-fitdatax[0])
#offset= ymin-slope*f0
fitparams=[f0,abs(Qi),abs(Qc),0.,scale]
#print '--------------Initial Parameter Set--------------\nf0: {0}\nQi: {1}\nQc: {2}\ndf: {3}\nScale: {4}\nSlope: {5}\nOffset:{6}\n'.format(f0,Qi,Qc,0.,scale,slope, offset)
fitresult = fitgeneral(fitdatax, fitdatay, hangerfunc, fitparams, domain=None, showfit=showfit,
showstartfit=showstartfit, label=label, mark_data=mark_data, mark_fit=mark_fit)
if printresult: print('-- Fit Result --\nf0: {0}\nQi: {1}\nQc: {2}\ndf: {3}\nScale: {4}'.format(fitresult[0],
fitresult[1],
fitresult[2],
fitresult[3],
fitresult[4]))
return fitresult
def fithangertilt(xdata, ydata, fitparams=None, domain=None, showfit=False, showstartfit=False, printresult=False,
label="", mark_data='bo', mark_fit='r-'):
"""Fit Hanger Transmission (S21) data taking into account asymmetry.
fitparams = []
returns p=[f0, Q, S21Min, Tmax]
Uses hangerfunctilt instead of hangerfunc.
"""
if domain is not None:
fitdatax,fitdatay = selectdomain(xdata,ydata,domain)
else:
fitdatax=xdata
fitdatay=ydata
if fitparams is None:
peakloc=np.argmin(fitdatay)
ymax=(fitdatay[0]+fitdatay[-1])/2.
ymin=fitdatay[peakloc]
f0=fitdatax[peakloc]
Q0=abs(fitdatax[peakloc]/((max(fitdatax)-min(fitdatax))/3.))
Qi=Q0*(1.+ymax)
Qc=Qi/ymax
scale= ymax-ymin
slope = (fitdatay[-1]-fitdatay[0])/(fitdatax[-1]-fitdatax[0])
offset= ymin-slope*f0
fitparams=[f0,Qi,Qc,0.0001,slope, offset]
#print '--------------Initial Parameter Set--------------\nf0: {0}\nQi: {1}\nQc: {2}\ndf: {3}\nScale: {4}\nSlope: {5}\nOffset:{6}\n'.format(f0,Qi,Qc,0.,scale,slope, offset)
fitresult = fitgeneral(fitdatax, fitdatay, hangerfunctilt, fitparams, domain=None, showfit=showfit,
showstartfit=showstartfit, label=label)
if printresult: print('-- Fit Result --\nf0: {0}\nQi: {1}\nQc: {2}\ndf: {3}\nslope: {4}\noffset: {5}\n'.format(
fitresult[0], fitresult[1], fitresult[2], fitresult[3], fitresult[4], fitresult[5]))
return fitresult
def polynomial(p,x):
return p[0]+p[1]*(x-p[-1])+p[2]*(x-p[-1])**2+p[3]*(x-p[-1])**3+p[4]*(x-p[-1])**4+p[5]*(x-p[-1])**5+p[6]*(x-p[-1])**6+p[7]*(x-p[-1])**7+p[8]*(x-p[-1])**8+p[9]*(x-p[-1])**9
def SNT_func(p, v):
Tn, GB, T, voff = p
qe, kb = (1.6e-19, 1.38e-23)
return GB * kb * (Tn + .5 * (qe * (v - voff) / kb) / np.tanh(qe * (v - voff) / (2 * kb * T)))
def fit_SNT(xdata, ydata, fitparams=None, domain=None, showfit=False, showstartfit=False, label='', debug=False):
"""fit Shot Noise Thermometer curve:
returns [Tn,GainBW,T,voffset]"""
if domain is not None:
fitdatax,fitdatay = selectdomain(xdata,ydata,domain)
else:
fitdatax=xdata
fitdatay=ydata
# Get starting parameters
if fitparams is None:
# fit high bias region with linear fit
# get high bias data
edge_index = len(xdata) / 3
edge_x = xdata[-edge_index:]
A = np.array([edge_x, np.ones(len(edge_x))])
w = np.linalg.lstsq(A.T, ydata[-edge_index:])[0]
qe, kb = (1.6e-19, 1.38e-23)
GB_guess = w[0] / (.5 * qe)
Tn_guess = w[1] / (kb * GB_guess)
T_guess = abs((min(ydata) - w[1]) / (kb * GB_guess))
voff_guess = 0.002
fitparams = (Tn_guess, GB_guess, T_guess, voff_guess)
if debug == True: print(fitparams)
p1 = fitgeneral(fitdatax, fitdatay, SNT_func, fitparams, domain=None, showfit=showfit, showstartfit=showstartfit,
label=label)
return p1
def polynomial2(p,x):
return p[0]+p[1]*(x-p[-1])+p[2]*(x-p[-1])**2
def polynomial(p, x):
return p[0] + p[1] * (x - p[-1]) + p[2] * (x - p[-1]) ** 2 + p[3] * (x - p[-1]) ** 3 + p[4] * (x - p[-1]) ** 4 + \
p[5] * (x - p[-1]) ** 5 + \
p[6] * (x - p[-1]) ** 6 + p[7] * (x - p[-1]) ** 7 + p[8] * (x - p[-1]) ** 8 + p[9] * (x - p[-1]) ** 9
def linear(p,x):
return p[0]+p[1]*(x)
def fitlinear(xdata,ydata,fitparams=None,domain=None,showfit=False,showstartfit=False,label=""):
"""Fits decaying sin wave of form: p[0]*np.sin(2.*pi*p[1]*x+p[2]*pi/180.)*np.e**(-1.*(x-p[5])/p[3])+p[4]"""
if domain is not None:
fitdatax,fitdatay = selectdomain(xdata,ydata,domain)
else:
fitdatax=xdata
fitdatay=ydata
if fitparams is None:
fitparams=[1,1]
fitparams[0] = array(fitdatay)[0]
fitparams[1] = (float(fitdatay[-1])-float(fitdatay[0]))/( float(fitdatax[-1])-float(fitdatax[0]))
p1 = fitgeneral(fitdatax, fitdatay, linear, fitparams, domain=None, showfit=showfit, showstartfit=showstartfit,
label=label)
return p1
def fitbackground(xdata,ydata,fitparams=None, showfit=False,showstartfit=False,label=""):
"""Fit Hanger Transmission (S21) data taking into account asymmetry.
fitparams = []
returns p=[f0,Qi,Qc,df,scale]
Uses hangerfunc.
"""
fitdatax=xdata
fitdatay=ydata
if fitparams is None:
fitparams=[-6,0,0,0,0,0,0,0,0,0,6.9e+9]
#print '--------------Initial Parameter Set--------------\nf0: {0}\nQi: {1}\nQc: {2}\ndf: {3}\nScale: {4}\nSlope: {5}\nOffset:{6}\n'.format(f0,Qi,Qc,0.,scale,slope, offset)
return fitgeneral(fitdatax, fitdatay, polynomial, fitparams, domain=None, showfit=showfit,
showstartfit=showstartfit, label=label)
def _datacheck_peakdetect(x_axis, y_axis):
if x_axis is None:
x_axis = list(range(len(y_axis)))
if len(y_axis) != len(x_axis):
raise ValueError
#needs to be a numpy array
y_axis = np.array(y_axis)
x_axis = np.array(x_axis)
return x_axis, y_axis
def peakdetect(y_axis, x_axis = None, lookahead = 300, delta=0):
"""
Converted from/based on a MATLAB script at:
http://billauer.co.il/peakdet.html
function for detecting local maximas and minmias in a signal.
Discovers peaks by searching for values which are surrounded by lower
or larger values for maximas and minimas respectively
keyword arguments:
y_axis -- A list containg the signal over which to find peaks
x_axis -- (optional) A x-axis whose values correspond to the y_axis list
and is used in the return to specify the postion of the peaks. If
omitted an index of the y_axis is used. (default: None)
lookahead -- (optional) distance to look ahead from a peak candidate to
determine if it is the actual peak (default: 200)
'(sample / period) / f' where '4 >= f >= 1.25' might be a good value
delta -- (optional) this specifies a minimum difference between a peak and
the following points, before a peak may be considered a peak. Useful
to hinder the function from picking up false peaks towards to end of
the signal. To work well delta should be set to delta >= RMSnoise * 5.
(default: 0)
delta function causes a 20% decrease in speed, when omitted
Correctly used it can double the speed of the function
return -- two lists [max_peaks, min_peaks] containing the positive and
negative peaks respectively. Each cell of the lists contains a tupple
of: (position, peak_value)
to get the average peak value do: np.mean(max_peaks, 0)[1] on the
results to unpack one of the lists into x, y coordinates do:
x, y = zip(*tab)
"""
max_peaks = []
min_peaks = []
dump = [] #Used to pop the first hit which almost always is false
# check input data
x_axis, y_axis = _datacheck_peakdetect(x_axis, y_axis)
# store data length for later use
length = len(y_axis)
#perform some checks
if lookahead < 1:
raise ValueError("Lookahead must be '1' or above in value")
if not (np.isscalar(delta) and delta >= 0):
raise ValueError("delta must be a positive number")
#maxima and minima candidates are temporarily stored in
#mx and mn respectively
mn, mx = np.Inf, -np.Inf
#Only detect peak if there is 'lookahead' amount of points after it
for index, (x, y) in enumerate(zip(x_axis[:-lookahead],
y_axis[:-lookahead])):
if y > mx:
mx = y
mxpos = x
if y < mn:
mn = y
mnpos = x
####look for max####
if y < mx-delta and mx != np.Inf:
#Maxima peak candidate found
#look ahead in signal to ensure that this is a peak and not jitter
if y_axis[index:index+lookahead].max() < mx:
max_peaks.append([mxpos, mx])
dump.append(True)
#set algorithm to only find minima now
mx = np.Inf
mn = np.Inf
if index+lookahead >= length:
#end is within lookahead no more peaks can be found
break
continue
#else: #slows shit down this does
# mx = ahead
# mxpos = x_axis[np.where(y_axis[index:index+lookahead]==mx)]
####look for min####
if y > mn+delta and mn != -np.Inf:
#Minima peak candidate found
#look ahead in signal to ensure that this is a peak and not jitter
if y_axis[index:index+lookahead].min() > mn:
min_peaks.append([mnpos, mn])
dump.append(False)
#set algorithm to only find maxima now
mn = -np.Inf
mx = -np.Inf
if index+lookahead >= length:
#end is within lookahead no more peaks can be found
break
#else: #slows shit down this does
# mn = ahead
# mnpos = x_axis[np.where(y_axis[index:index+lookahead]==mn)]
#Remove the false hit on the first value of the y_axis
try:
if dump[0]:
max_peaks.pop(0)
else:
min_peaks.pop(0)
del dump
except IndexError:
#no peaks were found, should the function return empty lists?
pass
return [max_peaks, min_peaks]
if __name__ =='__main__':
plt.figure(1)
xdata=np.linspace(-15,25,1000)
params=[1.,20.,5.,2.]
ydata=gaussfunc(params,xdata)-1+2*np.random.rand(len(xdata))
#plot(xdata,ydata-2.5+5*random.rand(xdata.__len__()),'bo')
plt.subplot(1,2,1)
p1=fitgauss(xdata,ydata,showfit=True)
plt.subplot(1,2,2)
p2=fitlor(xdata,ydata,showfit=True)
#plot(xdata,lorfunc(p1,xdata),'r-')
noise=0.
plt.figure(2)
params2=[7.8,200,200.,0.005,1.,0.,0.]
print('{0}\n--------------Test Parameter---------- \nf0: {1}\nQi: {2}\nQc: {3}\ndf: {4}\nScale: {5}\nSlope: {6}\nOffset:{7}\n'.format\
('',params2[0],params2[1],params2[2],params2[3],params2[4],params2[5],params2[6]))
# params2=[7.8,200,0.01,1.]
xdata2=np.linspace(7,9,1000)
ydata2=hangerfunc(params2,xdata2)-noise/2.+noise*np.random.rand(len(xdata2))
fit=fithanger(xdata2,ydata2,showfit=True,showstartfit=True)
print('{0}\n--------------Best Fit---------- \nf0: {1}\nQi: {2}\nQc: {3}\ndf: {4}\nScale: {5}\nSlope: {6}\nOffset:{7}\n'.format\
('hanger',fit[0],fit[1],fit[2],fit[3],fit[4],fit[5],fit[6]))
#print hangerqs(p3)
plt.show()
def doubleexpfunc(p, x):
return (p[0]+p[1]*math.e**(-(x-p[2])/p[3])+ p[4]*math.e**(-(x-p[2])/10000))
# Only one offset
def decayrabifunc1(p, x):
return (p[0]+ p[1]*(math.e**(-(x-p[2])/p[3]))*((np.cos(np.pi*p[4]*(x-p[5])))**2))
# Only one offset
def fitdecayrabi(xdata,ydata,fitparams=None,domain=None,showfit=False,showstartfit=False,label=""):
"""Fits decaying sin wave of form: p[0]*np.sin(2.*pi*p[1]*x+p[2]*pi/180.)*np.e**(-1.*(x-p[5])/p[3])+p[4]"""
if domain is not None:
fitdatax,fitdatay = selectdomain(xdata,ydata,domain)
else:
fitdatax=xdata
fitdatay=ydata
if fitparams is None:
FFT=scipy.fft(fitdatay)
fft_freqs=scipy.fftpack.fftfreq(len(fitdatay),fitdatax[1]-fitdatax[0])
max_ind=np.argmax(abs(FFT[2:int(len(fitdatay)/2.)]))+2
fft_val=FFT[max_ind]
fitparams=[0,0,0,0,0,0]
fitparams[0]= min(fitdatay)
fitparams[1]= max(fitdatay) - min(fitdatay)
# print fitparams[1]
fitparams[2]= 0
fitparams[3]= (max(fitdatax)-min(fitdatax))
#fitparams[4]= 2*abs(fft_val)/len(fitdatay)
fitparams[4]= fft_freqs[max_ind]
# fitparams[5]= 0 #2*abs(fft_val)/len(fitdatay)
p1 = fitgeneral(fitdatax,fitdatay,decayrabifunc1,fitparams,domain=None,showfit=showfit,showstartfit=showstartfit,label=label)
return p1
def fitdoubleexp(xdata,ydata,fitparams=None,domain=None,showfit=False,showstartfit=False,label=""):
"""Fit exponential decay (p[0]+p[1]*exp(-(x-p[2])/p[3]))"""
if domain is not None:
fitdatax,fitdatay = selectdomain(xdata,ydata,domain)
else:
fitdatax=xdata
fitdatay=ydata
if fitparams is None:
fitparams=[0.,0.,0.,0.,0.,0.]
# fitparams[0]=fitdatay[-1]
fitparams[1]= max(fitdatay) - fitdatay[0]
#fitparams[1]=fitdatay[0]-fitdatay[-1]
#fitparams[2]=fitdatax[0]
fitparams[3]=(fitdatax[-1]-fitdatax[0])/5
#fitparams[4]=(max(fitdatay)-fitdatay[-1])
fitparams[5]=(fitdatax[-1]-fitdatax[0])/5
#print fitparams
p1 = fitgeneral(fitdatax,fitdatay,doubleexpfunc,fitparams,domain=None,showfit=showfit,showstartfit=showstartfit,label=label)
return p1
def fitsin(xdata,ydata,fitparams=None,domain=None,showfit=False,showstartfit=False,label=""):
"""Fits decaying sin wave of form: p[0]*np.sin(2.*pi*p[1]*x+p[2]*pi/180.)*np.e**(-1.*(x-p[5])/p[3])+p[4]"""
if domain is not None:
fitdatax,fitdatay = selectdomain(xdata,ydata,domain)
else:
fitdatax=xdata
fitdatay=ydata
if fitparams is None:
FFT=scipy.fft(fitdatay)
fft_freqs=scipy.fftpack.fftfreq(len(fitdatay),fitdatax[1]-fitdatax[0])
max_ind=np.argmax(abs(FFT[4:int(len(fitdatay)/2.)]))+4
fft_val=FFT[max_ind]
fitparams=[0,0,0,0]
fitparams[3]=np.mean(fitdatay)
fitparams[0]=(max(fitdatay)-min(fitdatay))/2.#2*abs(fft_val)/len(fitdatay)
fitparams[1]=fft_freqs[max_ind]
fitparams[2]=(cmath.phase(fft_val)-np.pi/2.)*180./np.pi
fitparams[3]=(max(fitdatax)-min(fitdatax))
#fitparams[5]=fitdatax[0]
sin3 = lambda p, x: p[0] * np.sin(2. * np.pi * p[1] * x + p[2] * np.pi / 180.) + p[3]
#print "fitparams: ",fitparams
p1 = fitgeneral(fitdatax, fitdatay, sin3, fitparams, domain=None, showfit=showfit, showstartfit=showstartfit,
label=label)
return p1
def linear(p,x):
return p[0]+p[1]*(x)
def fitlinear(xdata,ydata,fitparams=None,domain=None,showfit=False,showstartfit=False,label=""):
"""Fits decaying sin wave of form: p[0]*np.sin(2.*pi*p[1]*x+p[2]*pi/180.)*np.e**(-1.*(x-p[5])/p[3])+p[4]"""
if domain is not None:
fitdatax,fitdatay = selectdomain(xdata,ydata,domain)
else:
fitdatax=xdata
fitdatay=ydata
if fitparams is None:
fitparams=[1,1]
fitparams[0] = fitdatay[0]
fitparams[1] = (float(fitdatay[-1])-float(fitdatay[0]))/( float(fitdatax[-1])-float(fitdatax[0]))
p1 = fitgeneral(fitdatax, fitdatay, linear, fitparams, domain=None, showfit=showfit, showstartfit=showstartfit,
label=label)
return p1
def rabisatfunc(p, x):
return p[0] + x**2/(2*x**2 + 1/p[1] )
def fitrabisatfunc(xdata,ydata,fitparams=None,domain=None,showfit=False,showstartfit=False,label="",debug=False):
"""fit lorentzian:
returns [offset,amplitude,center,hwhm]"""
if domain is not None:
fitdatax,fitdatay = selectdomain(xdata,ydata,domain)
else:
fitdatax=xdata
fitdatay=ydata
if fitparams is None:
fitparams=[0,1]
if debug==True: print(fitparams)
p1 = fitgeneral(fitdatax, fitdatay, rabisatfunc, fitparams, domain=None, showfit=showfit, showstartfit=showstartfit,
label=label)
return p1
def rabiwidth(p, x):
return sqrt(p[1]**2*(x**2) + p[0]**2)
def fitrabiwidth(xdata,ydata,fitparams=None,domain=None,showfit=False,showstartfit=False,label="",debug=False):
"""fit lorentzian:
returns [offset,amplitude,center,hwhm]"""
if domain is not None:
fitdatax,fitdatay = selectdomain(xdata,ydata,domain)
else:
fitdatax=xdata
fitdatay=ydata
if fitparams is None:
fitparams=[sqrt((ydata[-1]-ydata[0])/(xdata[-1]-xdata[0])),ydata[0]]
if debug==True: print(fitparams)
p1 = fitgeneral(fitdatax, fitdatay, rabiwidth, fitparams, domain=None, showfit=showfit, showstartfit=showstartfit,
label=label)
return p1
def poly(p, x):
return p[1]*(x-p[-1])+p[2]*(x-p[-1])**2
def fitpoly(xdata,ydata,fitparams=None,domain=None,showfit=False,showstartfit=False,label="",debug=False):
"""fit lorentzian:
returns [offset,amplitude,center,hwhm]"""
if domain is not None:
fitdatax,fitdatay = selectdomain(xdata,ydata,domain)
else:
fitdatax=xdata
fitdatay=ydata
if fitparams is None:
fitparams=[ydata[0],(ydata[-1]-ydata[0])/(xdata[-1]-xdata[0]),0,xdata[0]]
if debug==True: print(fitparams)
p1 = fitgeneral(fitdatax, fitdatay, poly, fitparams, domain=None, showfit=showfit, showstartfit=showstartfit,
label=label)
return p1
def dispersiveshift(p, x):
"""p[0]+p[1]/(1+(x-p[2])**2/p[3]**2)"""
return p[0]+p[1]/(1+(x-p[2])**2/p[3]**2) + p[4]/(1+(x-p[5])**2/p[6]**2)
def fitdispersiveshift(xdata,ydata,fitparams=None,domain=None,showfit=False,showstartfit=False,label="",debug=False):
"""fit lorentzian:
returns [offset,amplitude,center,hwhm]"""
if domain is not None:
fitdatax,fitdatay = selectdomain(xdata,ydata,domain)
else:
fitdatax=xdata
fitdatay=ydata
if fitparams is None:
fitparams=[0,0,0,0,0,0,0]
fitparams[0]=(fitdatay[0]+fitdatay[-1])/2.
fitparams[1]=max(fitdatay)-min(fitdatay)
fitparams[2]=fitdatax[np.argmax(fitdatay)]
fitparams[3]=(max(fitdatax)-min(fitdatax))/10.
fitparams[4]=0
fitparams[5]=fitdatax[np.argmax(fitdatay)]
fitparams[6]=(max(fitdatax)-min(fitdatax))/10.
if debug==True: print(fitparams)
p1 = fitgeneral(fitdatax, fitdatay, dispersiveshift, fitparams, domain=None, showfit=showfit, showstartfit=showstartfit,
label=label)
p1[3]=abs(p1[3])
return p1 |
21,155 | 5fb8cd364e6b53da9076e1193f1f831fb25e93bf | try:
num = int(input("Enter: "))
print(float(10)/float(num))
except:
print 'Enter other number'
|
21,156 | c8c8632bbf3d0e9118e88720adca6a2249f2eb2a | def processInstruction(inst, idx, acc):
opCode, value = inst.split()
if opCode == 'nop':
return idx + 1, acc;
elif opCode == 'jmp':
return idx + int(value), acc
elif opCode == 'acc':
return idx + 1, acc + int(value)
with open('Input') as inFile:
lines = inFile.read().splitlines()
alreadyVisited = []
currentIndex = 0
acc = 0
while True:
if currentIndex in alreadyVisited:
print('Hit an already run instruction')
break
else:
alreadyVisited.append(currentIndex)
currentIndex, acc = processInstruction(lines[currentIndex], currentIndex, acc)
print('Part 1:', acc)
for changedIndex in range(len(lines)):
tempInstructions = lines[:]
if tempInstructions[changedIndex].startswith('acc'):
continue
elif tempInstructions[changedIndex].startswith('jmp'):
tempInstructions[changedIndex] = tempInstructions[changedIndex].replace('jmp', 'nop')
else:
tempInstructions[changedIndex] = tempInstructions[changedIndex].replace('nop', 'jmp')
alreadyVisited = []
currentIndex = 0
acc = 0
while True:
if currentIndex in alreadyVisited:
break
else:
alreadyVisited.append(currentIndex)
if currentIndex == len(tempInstructions):
print('Part 2:', acc)
exit()
currentIndex, acc = processInstruction(tempInstructions[currentIndex], currentIndex, acc)
|
21,157 | f884bb95caeac75d8f38edd51236ee409632b025 | import tweepy
import csv
import json
from textblob import TextBlob
import sys
# Twitter API credentials
with open('twitter_credentials.json') as cred_data:
info = json.load(cred_data)
consumer_key = info['CONSUMER_KEY']
consumer_secret = info['CONSUMER_SECRET']
access_key = info['ACCESS_KEY']
access_secret = info['ACCESS_SECRET']
# Create the api endpoint
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
api = tweepy.API(auth)
# Mention the maximum number of tweets that you want to be extracted.
num_tweets = \
int(input('Enter the number of tweets that you want to extract- '))
# Mention the hashtag that you want to look out for
hashtag = input('Enter the hashtag you want to scrape- ')
sent = "null"
for tweet in tweepy.Cursor(api.search, q='#' + hashtag,
count=100).items(num_tweets):
with open('tweets_' + hashtag + '.csv', 'a') as \
f:
tweetline = str(tweet.text.encode('utf-8'))
analysis = TextBlob(tweetline)
if analysis.sentiment[0]>0:
sent = "Positive"
f.write("%s\n" % str(sent))
elif analysis.sentiment[0]<0:
sent = "Negative"
f.write("%s\n" % str(sent))
else:
sent = "Neutral"
f.write("%s\n" % str(sent))
print ('Extracted ' + str(num_tweets) \
+ ' tweets with hashtag #' + hashtag)
|
21,158 | 87e36654207572e382c966e0f7ffb08058efbf73 | import os
import numpy as np
import matplotlib.pyplot as plt
from typing import Tuple
from matplotlib_venn import venn2
from ngslite import read_genbank, get_files, Chromosome
def get_codon_arr(chromosome: Chromosome) -> np.ndarray:
"""
On the forward strand, mark the first base of each codon as 1
Because only the first base is labeled 1, overlapping codons of the 3 frames can be distinguished
"""
seq_len = len(chromosome.sequence)
arr = np.zeros((seq_len - 2,), dtype=np.int)
for f in chromosome.features:
if f.type != 'CDS':
continue
if f.strand == '-':
continue
protein_len = (f.end - f.start) // 3
for aa in range(protein_len):
pos = f.start + (aa * 3) - 1 # -1 to 0-based
arr[pos] = 1
return arr
def compare_chromosomes(
chr1: Chromosome,
chr2: Chromosome) -> Tuple[int, int, int]:
"""
Returns:
left: count of codons only appearing in chr1
right: count of codons only appearing in chr2
inner: count of codons in both chr1 and chr2
"""
assert len(chr1.sequence) == len(chr2.sequence)
left, right, inner = 0, 0, 0
arr1 = get_codon_arr(chromosome=chr1)
arr2 = get_codon_arr(chromosome=chr2)
left += np.sum((arr1 - arr2) == 1)
right += np.sum((arr2 - arr1) == 1)
inner += np.sum((arr1 + arr2) == 2)
chr1.reverse()
chr2.reverse()
arr1 = get_codon_arr(chromosome=chr1)
arr2 = get_codon_arr(chromosome=chr2)
left += np.sum((arr1 - arr2) == 1)
right += np.sum((arr2 - arr1) == 1)
inner += np.sum((arr1 + arr2) == 2)
return left, right, inner
def compare_gbks(
gbk1: str,
gbk2: str) -> Tuple[int, int, int]:
left, inner, right = 0, 0, 0
chromosomes1 = read_genbank(gbk1)
chromosomes2 = read_genbank(gbk2)
for chr1, chr2 in zip(chromosomes1, chromosomes2):
l, r, i = compare_chromosomes(chr1=chr1, chr2=chr2)
left += l
right += r
inner += i
return left, right, inner
def plot_venn(
title: str,
left: int,
right: int,
inner: int,
png: str):
fig = plt.figure(figsize=(3, 3), dpi=600)
n = right + left + inner
venn2(
subsets=(left, right, inner),
set_labels=('', ''),
subset_label_formatter=lambda x: f"{x}\n({(x / n):.2%})"
)
plt.title(title)
fig.savefig(png, fmt='png')
plt.close()
def main():
seqname_to_species = {
'NC_000913': 'Escherichia coli',
'NC_002505': 'Vibrio cholerae',
'NC_002516': 'Pseudomonas aeruginosa',
'NC_003098': 'Streptococcus pneumoniae',
'NC_004668': 'Enterococcus faecalis',
'NC_000915': 'Helicobacter pylori',
'NC_000964': 'Bacillus subtilis',
'NC_009089': 'Clostridioides difficile',
'NC_010729': 'Porphyromonas gingivalis',
'NC_007795': 'Staphylococcus aureus',
'NC_000962': 'Mycobacterium tuberculosis',
'NC_003198': 'Salmonella enterica',
'NC_003888': 'Streptomyces coelicolor',
'NC_016845': 'Klebsiella pneumoniae',
'NZ_CP009257': 'Acinetobacter baumannii',
}
gbk1s = get_files(
source='../data',
endswith='gbff',
isfullpath=True)
gbk2s = get_files(
source='../experiment_006/outdir',
endswith='gbff',
isfullpath=True)
os.makedirs('outdir', exist_ok=True)
for gbk1, gbk2 in zip(gbk1s, gbk2s):
left, right, inner = compare_gbks(gbk1=gbk1, gbk2=gbk2)
seqname = read_genbank(gbk1)[0].seqname
title = seqname_to_species[seqname]
plot_venn(
title=title,
left=left,
right=right,
inner=inner,
png=f'outdir/{title}.png')
if __name__ == '__main__':
main()
|
21,159 | 1e769183c46b16e703ca3f737d2f6f8a91ed601c | import komand
import time
from .schema import NewAdvisoryInput, NewAdvisoryOutput
# Custom imports below
import requests
import datetime
_API_HOST = "https://access.redhat.com/labs/securitydataapi"
class NewAdvisory(komand.Trigger):
def __init__(self):
super(self.__class__, self).__init__(
name="new_advisory",
description="Trigger on new advisory",
input=NewAdvisoryInput(),
output=NewAdvisoryOutput(),
)
self.after = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
self.include_cvrf = False
def get_cvrf(self, id_):
if not id_:
return
query = "%s/cvrf/%s.json" % (_API_HOST, id_)
r = requests.get(query)
if r.status_code == 200:
results = r.json()
if results:
self.logger.info("got cvrf: %s", results)
return results.get("cvrfdoc")
def list_advisories(self, start=""):
query = "%s/cvrf.json" % (_API_HOST)
r = requests.get(query, {"after": start})
if r.status_code != 200:
self.logger.error(
"ERROR: Invalid request; returned {} for the following " "query:\n{}".format(r.status_code, query)
)
return []
results = r.json()
return results or []
def find_source_reference(self, refs):
refs = refs or []
for r in refs:
if r.get("type") == "Self":
return r["url"]
return ""
def process_advisory(self, a):
"""
Normalize some of the incoming data
"""
self.logger.debug("got event %s", a)
a["rhsa"] = a.pop("RHSA") or ""
a["cves"] = a.pop("CVEs") or []
source = self.get_cvrf(a.get("rhsa")) or {}
if source.get("document_title"):
a["title"] = source["document_title"]
if source.get("document_type"):
a["type"] = source["document_type"]
if source.get("document_publisher"):
a["publisher"] = source["document_publisher"]
if source.get("document_notes") and source["document_notes"].get("note"):
a["notes"] = source["document_notes"]["note"] or []
a["notes"] = "\n".join(a["notes"])
if source.get("document_references") and source["document_references"].get("reference"):
a["references"] = source["document_references"]["reference"]
a["url"] = self.find_source_reference(source["document_references"]["reference"])
if self.include_cvrf:
a["source"] = source
return a
def process(self):
self.logger.info("processing from: %s", self.after)
advisories = self.list_advisories(self.after)
self.after = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
for a in advisories:
a = self.process_advisory(a)
self.send(a)
def run(self, params={}):
"""Run the trigger"""
self.after = params.get("after") or datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
self.include_cvrf = params.get("include_cvrf")
# send a test event
while True:
self.process()
time.sleep(30)
def test(self):
"""Test the trigger by returning an advisory"""
advisories = self.list_advisories("2016-10-1")
self.after = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
self.include_cvrf = True
for a in advisories:
return self.process_advisory(a)
|
21,160 | 41d21f80d8ff4e8b2689ef3956443f83a76088b8 | import os
from sqlalchemy import create_engine
from sqlalchemy.sql import text
from flask import Flask, flash, jsonify, redirect, render_template, request, session, url_for
from werkzeug.exceptions import default_exceptions, HTTPException, InternalServerError
from werkzeug.security import check_password_hash, generate_password_hash
from helpers import apology, login_required, lookup, usd
# Configure application
app = Flask(__name__)
# Client-side sessions require a secret key
if not os.environ.get("SECRET_KEY"):
raise RuntimeError("SECRET_KEY not set")
app.secret_key = os.environ.get("SECRET_KEY")
# Ensure templates are auto-reloaded
app.config["TEMPLATES_AUTO_RELOAD"] = True
# Ensure responses aren't cached
@app.after_request
def after_request(response):
response.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
response.headers["Expires"] = 0
response.headers["Pragma"] = "no-cache"
return response
# Custom filter
app.jinja_env.filters["usd"] = usd
# Connect to the Heroku Postgres database
if not os.environ.get("DATABASE_URL"):
raise RuntimeError("DATABASE_URL not set")
db = create_engine(os.environ.get("DATABASE_URL"))
# Make sure IEX API key is set
if not os.environ.get("API_KEY"):
raise RuntimeError("API_KEY not set")
@app.route("/", methods=["GET", "POST"])
@login_required
def index():
if request.method == "POST":
if request.form.get("buy"):
return redirect(url_for("buy", symbol=request.form["buy"]))
else:
return redirect(url_for("sell", symbol=request.form["sell"]))
cash = db.execute(text("SELECT * FROM users WHERE id = :id"),
id=session["user_id"]).fetchone()["cash"]
# Coerce decimal.Decimal into float (Postgres numeric is decimal.Decimal)
# https://groups.google.com/d/msg/sqlalchemy/0qXMYJvq8SA/oqtvMD9Uw-kJ
total = float(cash)
rows = db.execute(text(
"SELECT symbol, sum(shares) as shares FROM transactions "
"WHERE user_id=:id GROUP BY symbol"),
id=session["user_id"])
stocks = []
for row in rows:
# Cast as a workaround for Postgres (stores sum(bigint) => numeric):
# https://www.postgresql.org/docs/current/functions-aggregate.html#FUNCTIONS-AGGREGATE-TABLE
# And, of course, SQL Alchemy returns numeric types as decimal.Decimal:
# https://docs.sqlalchemy.org/en/13/core/type_basics.html#sqlalchemy.types.Numeric
shares = int(row["shares"])
if shares == 0:
continue
quote = lookup(row["symbol"])
share_total = shares * quote["price"]
stocks.append({"symbol": row["symbol"],
"shares": shares,
"price": usd(quote["price"]),
"total": usd(share_total)})
total += share_total
return render_template(
"index.html", stocks=stocks, cash=usd(cash), total=usd(total))
@app.route("/account", methods=["GET", "POST"])
@login_required
def account():
"""Modify account settings"""
if request.method == "GET":
username = db.execute(text("SELECT * FROM users WHERE id = :id"),
id=session["user_id"]).fetchone()["username"]
return render_template("account.html", username=username)
# Process username change.
if "submit_username" in request.form:
if not request.form.get("username"):
return apology("missing new username")
elif not request.form.get("password"):
return apology("missing password")
# Query database for new username
row = db.execute(text("SELECT * FROM users WHERE username = :u"),
u=request.form.get("username")).fetchone()
if row:
return apology("username already exists")
hash = db.execute(text("SELECT * FROM users WHERE id = :id"),
id=session["user_id"]).fetchone()["hash"]
if not check_password_hash(hash, request.form.get("password")):
return apology("invalid password", 403)
db.execute(text("UPDATE users SET username=:u WHERE id=:id"),
u=request.form.get("username"),
id=session["user_id"])
flash("Updated username!")
# Process password change.
else:
if not request.form.get("password"):
return apology("missing current password")
elif not request.form.get("new_password"):
return apology("missing new password")
elif request.form.get("new_password") != request.form.get("confirmation"):
return apology("password confirmation must match", 403)
elif request.form.get("password") == request.form.get("new_password"):
return apology("new password same as old", 403)
hash = db.execute(text("SELECT * FROM users WHERE id = :id"),
id=session["user_id"]).fetchone()["hash"]
if not check_password_hash(hash, request.form.get("password")):
return apology("invalid password", 403)
db.execute(text(
"UPDATE users SET hash=:h WHERE id=:id"),
h=generate_password_hash(request.form.get("new_password")),
id=session["user_id"])
flash("Updated password!")
return redirect("/account")
@app.route("/buy", methods=["GET", "POST"])
@login_required
def buy():
"""Buy shares of stock"""
if request.method == "GET":
return render_template("buy.html", symbol=request.args.get("symbol"))
if not request.form.get("symbol"):
return apology("missing symbol", 400)
elif not request.form.get("shares"):
return apology("missing shares", 400)
quote = lookup(request.form.get("symbol"))
if not quote:
return apology("invalid symbol", 400)
cash = db.execute(text("SELECT * FROM users WHERE id = :id"),
id=session["user_id"]).fetchone()["cash"]
purchase_price = int(request.form.get("shares")) * quote["price"]
# Cast decimal.Decimal (from Postgres numeric) to float.
if purchase_price > float(cash):
return apology("can't afford", 400)
db.execute(text(
"INSERT INTO transactions (user_id, symbol, shares, price) "
"VALUES (:u, :sy, :sh, :p)"),
u=session["user_id"],
sy=request.form.get("symbol"),
sh=request.form.get("shares"),
p=quote["price"])
db.execute(text("UPDATE users SET cash=cash-:c WHERE id=:id"),
c=purchase_price,
id=session["user_id"])
flash("Bought!")
return redirect("/")
@app.route("/history")
@login_required
def history():
"""Show history of transactions"""
rows = db.execute(text(
"SELECT symbol, shares, price, time FROM transactions "
"WHERE user_id=:id"),
id=session["user_id"])
transactions = []
for row in rows:
transaction = dict(row)
transaction["price"] = usd(transaction["price"])
transactions.append(transaction)
return render_template("history.html", transactions=transactions)
@app.route("/login", methods=["GET", "POST"])
def login():
"""Log user in"""
# Forget any user_id
session.clear()
# User reached route via GET (as by clicking a link or via redirect)
if request.method == "GET":
return render_template("login.html")
# User reached route via POST (as by submitting a form via POST)
# Ensure username was submitted
if not request.form.get("username"):
return apology("must provide username", 403)
# Ensure password was submitted
elif not request.form.get("password"):
return apology("must provide password", 403)
# Query database for username
row = db.execute(text("SELECT * FROM users WHERE username = :username"),
username=request.form.get("username")).fetchone()
# Ensure username exists
if row is None:
return apology("invalid username")
# Ensure password is correct
if not check_password_hash(row["hash"], request.form.get("password")):
return apology("invalid password", 403)
# Remember which user has logged in
session["user_id"] = row["id"]
# Redirect user to home page
return redirect("/")
@app.route("/logout")
def logout():
"""Log user out"""
# Forget any user_id
session.clear()
# Redirect user to homepage
return redirect("/")
@app.route("/quote", methods=["GET", "POST"])
@login_required
def quote():
"""Get stock quote."""
if request.method == "GET":
return render_template("quote.html")
if not request.form.get("symbol"):
return apology("missing symbol", 400)
quote = lookup(request.form.get("symbol"))
if not quote:
return apology("invalid symbol", 400)
return render_template("quoted.html",
name=quote["name"],
symbol=quote["symbol"],
price=usd(quote["price"]))
@app.route("/register", methods=["GET", "POST"])
def register():
"""Register user"""
if request.method == "GET":
return render_template("register.html")
# Ensure username was submitted
if not request.form.get("username"):
return apology("must provide username", 403)
# Ensure username is not already taken
row = db.execute(text("SELECT * FROM users WHERE username = :username"),
username=request.form.get("username")).fetchone()
if row:
return apology("username already exists", 403)
# Ensure password was submitted
elif not request.form.get("password"):
return apology("must provide password", 403)
# Ensure password confirmation matches
elif request.form.get("password") != request.form.get("confirmation"):
return apology("password confirmation must match", 403)
# Add user and automatically log in.
result = db.execute(text(
"INSERT INTO users (username, hash) VALUES (:u, :h)"),
u=request.form.get("username"),
h=generate_password_hash(request.form.get("password")))
# TODO: Use result.inserted_primary_key after converting to SQLAlchemy ORM.
if db.url.get_backend_name() in ["postgres", "postgresql"]:
id = db.execute("SELECT LASTVAL()").first()[0]
else:
id = result.lastrowid if result.rowcount == 1 else None
session["user_id"] = id
flash("Registered!")
return redirect("/")
@app.route("/sell", methods=["GET", "POST"])
@login_required
def sell():
"""Sell shares of stock"""
if request.method == "GET":
rows = db.execute(text(
"SELECT symbol, sum(shares) as shares FROM transactions "
"WHERE user_id=:id GROUP BY symbol"),
id=session["user_id"])
symbols = [row["symbol"] for row in rows if row["shares"]]
return render_template("sell.html", symbols=symbols,
symbol=request.args.get("symbol"))
if not request.form.get("symbol"):
return apology("missing symbol", 400)
elif not request.form.get("shares"):
return apology("missing shares", 400)
owned_shares = db.execute(text(
"SELECT sum(shares) as shares FROM transactions "
"WHERE user_id=:id AND symbol=:symbol"),
id=session["user_id"],
symbol=request.form.get("symbol")).fetchone()["shares"]
requested_shares = int(request.form.get("shares"))
if requested_shares > owned_shares:
return apology("too many shares", 400)
quote = lookup(request.form.get("symbol"))
db.execute(text(
"INSERT INTO transactions (user_id, symbol, shares, price) "
"VALUES (:u, :sy, :sh, :p)"),
u=session["user_id"],
sy=request.form.get("symbol"),
sh=-requested_shares,
p=quote["price"])
sell_price = int(request.form.get("shares")) * quote["price"]
db.execute(text("UPDATE users SET cash=cash+:c WHERE id=:id"),
c=sell_price,
id=session["user_id"])
flash("Sold!")
return redirect("/")
def errorhandler(e):
"""Handle error"""
if not isinstance(e, HTTPException):
e = InternalServerError()
return apology(e.name, e.code)
# Listen for errors
for code in default_exceptions:
app.errorhandler(code)(errorhandler)
|
21,161 | 841565970ce16c144706c3dd46886f58896b9953 | import json
pythonValue = {
'isCat': True,
'miceCaught': 0,
'name': 'Zophie',
'felineIQ': None
}
jsonData = json.dumps(pythonValue)
print(jsonData)
|
21,162 | fb0d4befff19664b6d0a5292705d9484b67aa587 | import logging
from django.dispatch import receiver
from django.db.models.signals import post_save, post_delete
from django.core.exceptions import ObjectDoesNotExist
from nadine.models import Payment, BillLineItem
logger = logging.getLogger(__name__)
@receiver(post_save, sender=BillLineItem)
def lineitem_post_save(**kwargs):
"""
Update cached totals on UserBill.
"""
lineitem = kwargs['instance']
bill = lineitem.bill
bill.update_cached_totals()
@receiver(post_delete, sender=BillLineItem)
def lineitem_post_delete(**kwargs):
"""
Update cached totals on UserBill.
"""
lineitem = kwargs['instance']
try:
bill = lineitem.bill
bill.update_cached_totals()
except ObjectDoesNotExist:
logger.warn("Deleting a BillLineItem that does not have a Bill!")
@receiver(post_save, sender=Payment)
def payment_post_save(**kwargs):
"""
Update cached totals on UserBill.
"""
payment = kwargs['instance']
bill = payment.bill
bill.update_cached_totals()
@receiver(post_delete, sender=Payment)
def payment_post_delete(**kwargs):
"""
Update cached totals on UserBill.
"""
payment = kwargs['instance']
bill = payment.bill
bill.update_cached_totals()
|
21,163 | d0babe52a25e868375b40bec251a3d70feaeec48 | """
@author: achm
Calculate the weight of the Final ensemble of strong and weak model via keras
"""
import numpy as np
import pandas as pd
from sklearn.manifold import TSNE
from keras.models import Sequential
from sklearn.preprocessing import StandardScaler
from keras.layers.core import Dense, Activation, Dropout
from keras.layers.advanced_activations import PReLU
from keras.layers.convolutional import Convolution1D
from keras.models import Sequential
from keras.utils import np_utils
from keras.layers.embeddings import Embedding
import xgboost as xgb
import sys
import cPickle
import copy
import glob
# Load data
print("Load the training/test data using pandas")
training = pd.read_csv("../input/training.csv")
training['ensemble_weight'] = 1
training.drop('min_ANNmuon', axis=1, inplace=True)
training.drop('mass', axis=1, inplace=True)
training.drop('production', axis=1, inplace=True)
training.drop('signal', axis=1, inplace=True)
param_epoch = 300
for i in range(0,5):
print "### %i ###" %i
try:
fh = open("./model/keras_%i_epoch_%i" %(i,param_epoch), "rb")
deep_model = cPickle.load(fh)
fh.close()
except:
scaler = StandardScaler()
np.random.seed(6174)
print "No prebuild model..."
testing = pd.read_csv("./input/testing_%i.csv" %i)
testing['ensemble_weight'] = 0
#scaler = StandardScaler()
result = pd.concat([training, testing])
y = result["ensemble_weight"]
# Drop Unnesscary features
result.drop('ensemble_weight', axis=1, inplace=True)
result.drop('id', axis=1, inplace=True)
deep_model = copy.deepcopy(Sequential())
deep_model.add(Dense(result.shape[1], 512, init = "glorot_normal"))
deep_model.add(Activation('tanh'))
deep_model.add(Dropout(0.5))
deep_model.add(Dense(512, 256, init = "glorot_normal"))
deep_model.add(Activation('relu'))
deep_model.add(Dropout(0.4))
deep_model.add(Dense(256, 128, init = "glorot_normal"))
deep_model.add(Activation('tanh'))
deep_model.add(Dropout(0.3))
deep_model.add(Dense(128, 64, init = "glorot_normal"))
deep_model.add(Activation('relu'))
deep_model.add(Dropout(0.2))
deep_model.add(Dense(64, 2, init = "glorot_normal"))
deep_model.add(Activation('softmax'))
deep_model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
deep_model.fit(scaler.fit_transform(np.array(result)), np_utils.to_categorical(y),
batch_size=256, nb_epoch=param_epoch, verbose=2, show_accuracy=True)
# save model
temp_file_name = "./model/keras_%i_epoch_%i" %(i,param_epoch)
fh = open(temp_file_name, "wb")
cPickle.dump(deep_model,fh)
fh.close()
# save scalar
temp_file_name = "./model/keras_scalar_%i_epoch_%i" %(i,param_epoch)
fh = open(temp_file_name, "wb")
cPickle.dump(scaler,fh)
fh.close()
fh = open("./model/keras_scalar_%i_epoch_%i" %(i,param_epoch), "rb")
scaler = cPickle.load(fh)
fh.close()
# Make Prediction
testing_eval = pd.read_csv("./input/testing_eval_%i.csv" %i)
#################### FIX #########################
ids = testing_eval['id']
testing_eval.drop('id', axis=1, inplace=True)
##################################################
ensemble_weight = deep_model.predict(scaler.transform(testing_eval), batch_size=256)[:, 1]
# Generate ensemble weight
with open('./output/ensemble_weight_%i_epoch_%i.csv' %(i,param_epoch), 'w') as f:
f.write('id,weight\n')
for ID, p in zip(ids, ensemble_weight):
f.write('%s,%.8f\n' % (ID, p))
# Combine
print("Load ensemble weighting")
ensemble_weight_0 = pd.read_csv("./output/ensemble_weight_0_epoch_%i.csv" %param_epoch)
ensemble_weight_1 = pd.read_csv("./output/ensemble_weight_1_epoch_%i.csv" %param_epoch)
ensemble_weight_2 = pd.read_csv("./output/ensemble_weight_2_epoch_%i.csv" %param_epoch)
ensemble_weight_3 = pd.read_csv("./output/ensemble_weight_3_epoch_%i.csv" %param_epoch)
ensemble_weight_4 = pd.read_csv("./output/ensemble_weight_4_epoch_%i.csv" %param_epoch)
ensemble_weight = pd.concat([ensemble_weight_0, ensemble_weight_1, ensemble_weight_2, ensemble_weight_3, ensemble_weight_4])
ensemble_weight.to_csv("./output/ensemble_weight_epoch_%i.csv" %param_epoch, index=False)
|
21,164 | 780c803c13785c22901b49187778642bf0c2f231 | #!/usr/bin/python3
# -*- encoding: utf-8 -*-
import socket
import threading
class Server(threading.Thread):
def __init__(self, http_port=8080, buffer_size=4096):
print('Server.__init__()')
self.http_port = http_port
self.buffer_size = buffer_size
self.selector = selectors.DefaultSelector()
threading.Thread.__init__(self)
self.start()
def run(self):
print('Server.run()')
self.sock = socket.socket()
self.sock.bind(('', self.http_port))
self.sock.listen(BACKLOG)
self.sock.setblocking(False)
self.selector.register(self.sock, selectors.EVENT_READ, {'callback': self.accept, 'msg': None})
print('listenig', self.http_port)
while True:
events = self.selector.select()
for key, mask in events:
text_mask = 'ready for '
text_mask += 'write ' if (mask & selectors.EVENT_WRITE > 0) else ''
text_mask += 'read ' if (mask & selectors.EVENT_READ > 0) else ''
callback = key.data.get('callback')
print({'event': callback, 'mask': text_mask})
callback(key.fileobj, mask, key.data)
def accept(self, sock, mask, data):
cliSock, addr = sock.accept() # Should be ready
cliSock.setblocking(False)
print('accepted', cliSock, 'from', addr)
self.selector.register(cliSock, selectors.EVENT_READ, {'callback': self.read})
def read(self, cliSock, mask, data):
data = cliSock.recv(self.buffer_size) # Should be ready
if data and data != b'':
print('echoing', repr(data), 'to', cliSock)
# cliSock.send(data) # Hope it won't block
self.write_asycn(cliSock, data) # Hope it won't block
else:
print('closing', cliSock)
self.selector.unregister(cliSock)
cliSock.shutdown(socket.SHUT_RDWR)
cliSock.close()
def write_asycn(self, clientSock, msg):
self.selector.modify(clientSock, selectors.EVENT_WRITE | selectors.EVENT_READ, {'callback': self.write, 'msg': msg})
def write(self, cliSock, mask, data):
# Should be ready
msg = data.get('msg')
if len(msg) == 0:
return
try:
sent = cliSock.send(msg)
except Exception as error:
print(error)
cliSock.shutdown(socket.SHUT_RD)
pass
else:
if sent < len(msg):
self.write_asycn(cliSock, msg[sent:])
|
21,165 | 79ccd11b54838be160e2e20926feac568b140f68 | # -*- coding: utf-8 -*-
import os
from datetime import datetime
import numpy as np
import shutil
from flask import Flask, render_template, redirect, url_for, request, session, send_from_directory, Response
from flask_uploads import UploadSet, configure_uploads, IMAGES, patch_request_class
from flask_wtf import FlaskForm
from flask_wtf.file import FileField, FileRequired, FileAllowed
from wtforms import SubmitField
import xlsxwriter
import xlrd
from xlutils.copy import copy
from flask import jsonify
import openpyxl
import json
from util.AHP import AHP
from flask_sqlalchemy import SQLAlchemy
import config
from flask.json import JSONEncoder as _JSONEncoder
class JSONEncoder(_JSONEncoder):
def default(self, o):
if isinstance(o, datetime):
return int(o.timestamp())
if hasattr(o, 'keys') and hasattr(o, '__getitem__'):
return dict(o)
raise None
app = Flask(__name__)
app.json_encoder = JSONEncoder
app.config.from_object(config)
app.config['SECRET_KEY'] = 'I have a dream'
address = 'C:\\Users\\Administrator\\Desktop\\images\\static\\'
app.config['UPLOADED_PHOTOS_DEST'] = address
app.config['MAX_CONTENT_LENGTH'] = 200 * 1024 * 1024
db = SQLAlchemy(app)
photos = UploadSet('photos', IMAGES)
configure_uploads(app, photos)
patch_request_class(app, size=None) # set maximum file size, default is 16MB
class UploadForm(FlaskForm):
photo = FileField(validators=[FileAllowed(photos, u'ๅช่ฝๆฏ็
ง็ๆ ผๅผ!'), FileRequired(u'Choose a file!')])
submit = SubmitField(u'ไธไผ ')
@app.route('/', methods=['GET', 'POST'])
def index():
return render_template('index.html')
@app.route('/index', methods=['GET', 'POST'])
def upload_file():
folder_name = request.form.get('folderName')
# form = UploadForm()
folder = address + folder_name
tasks = Task.query.filter_by(folder_name=folder_name).all()
if len(tasks) == 0:
task = Task(folder_name=folder_name, size=len(request.files.getlist('photo')), status='0', place='1-2', create_time=datetime.now())
# ่ฐ็จๆทปๅ ๆนๆณ
db.session.add(task)
db.session.commit()
else:
task = Task.query.filter_by(folder_name=folder_name).first()
task.size = str(int(task.size) + len(request.files.getlist('photo')))
db.session.commit()
if not os.path.exists(folder):
os.makedirs(folder)
full_path = folder + '\\names.txt'
file = open(full_path, 'a')
# create_excel(len(request.files.getlist('photo')))
for filename in request.files.getlist('photo'):
name = filename.filename
file.write(name + '\n')
photos.save(filename, folder=folder, name=name)
task = Task.query.filter_by(folder_name=folder_name).first()
return jsonify(task)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8080, debug=True)
# app.run(debug=True)
@app.route('/page_list', methods=['GET', 'POST'])
def page_list():
user_id = request.headers.get('Authorization',None)
task = Task.query.filter_by(user_id=user_id, status=2).first()
folder_name = address + task.folder_name
if not os.path.exists(folder_name):
return jsonify(0)
files_list = os.listdir(folder_name)
return jsonify(len(files_list) - 3)
def create_excel(size, folder_name):
# ๆฐๅปบไธไธชExcelๆไปถ
wb = openpyxl.Workbook()
ws1 = wb.active
for i in range(size - 1):
ws1.cell(row=i+1, column=i+1, value=1)
wb.save((folder_name + '\\data.xlsx'))
workbook = xlsxwriter.Workbook(folder_name + '\\result.xlsx')
workbook.close()
@app.route('/submit', methods=['GET', 'POST'])
def submit():
user_id = request.headers.get('Authorization', None)
task = Task.query.filter_by(user_id=user_id, status=2).first()
task.status = 3
db.session.commit()
folder_name = address + task.folder_name
filename = folder_name + "\\data.xlsx"
arr = []
ex = xlrd.open_workbook(filename).sheets()[0]
for i in range(ex.nrows):
col = ex.row_values(i)
for index, n in enumerate(col):
if isinstance(n, str):
col[index] = 0
arr.append(col)
M = np.array(arr)
obj = AHP(M)
evec = obj.get_evec(obj.supp_mat(M))
obj.save_result(evec, folder_name)
return jsonify("success")
@app.route('/update_excel/<row>/<line>/<value>', methods=['GET', 'POST'])
def update_excel(row, line, value):
user_id = request.headers.get('Authorization', None)
task = Task.query.filter_by(user_id=user_id, status=2).first()
task.place = str(row) + '-' + str(line)
db.session.commit()
folder_name = address + task.folder_name
row = int(row) - 1
line = int(line) - 1
xls = xlrd.open_workbook(folder_name + '\\data.xlsx')
xlsc = copy(xls)
shtc = xlsc.get_sheet(0)
shtc.write(int(row), int(line), int(value))
xlsc.save(folder_name + '\\data.xlsx')
return jsonify("success")
@app.route('/open/<filename>', methods=['GET', 'POST'])
def open_file(filename):
user_id = request.headers.get('Authorization', None)
task = Task.query.filter_by(user_id=user_id, status=2).first()
folder_name = address + task.folder_name
line = getline(folder_name + "\\names.txt", int(filename))
name = line.replace("\n", "")
global app
app.config['UPLOADED_PHOTOS_DEST'] = folder_name
global photos
photos = UploadSet('photos', IMAGES)
configure_uploads(app, photos)
file_url = photos.url(name)
return jsonify(file_url)
@app.route('/delete/<filename>')
def delete_file(filename):
file_path = photos.path(filename)
os.remove(file_path)
return render_template('manage.html', files_list=files_list)
@app.route('/download/<folder_name>/<filename>', methods=['GET', 'POST'])
def download(folder_name, filename):
folder_name = address + folder_name
# filename = folder_name + "\\data.xlsx"
# arr = []
# ex = xlrd.open_workbook(filename).sheets()[0]
# for i in range(ex.nrows):
# col = ex.row_values(i)
# for index, n in enumerate(col):
# if isinstance(n, str):
# col[index] = 0
# arr.append(col)
# M = np.array(arr)
# obj = AHP(M)
# evec = obj.get_evec(obj.supp_mat(M))
# obj.save_result(evec, folder_name)
return send_from_directory(folder_name, filename=filename, as_attachment=True)
@app.route('/getTaskBean', methods=['GET'])
def get_task_bean():
user_id = request.headers.get('Authorization', None)
task = Task.query.filter_by(user_id=user_id, status=2).first()
return jsonify(task)
def getline(the_file_path, line_number):
if line_number < 1:
return ''
for cur_line_number, line in enumerate(open(the_file_path, 'rU')):
if cur_line_number == line_number-1:
return line
return ''
@app.route('/getValue/<row>/<line>', methods=['GET', 'POST'])
def get_excel(row, line):
user_id = request.headers.get('Authorization', None)
task = Task.query.filter_by(user_id=user_id, status=2).first()
folder_name = address + task.folder_name
row = int(row) - 1
line = int(line) - 1
x1 = xlrd.open_workbook(folder_name + '\\data.xlsx')
sheet1 = x1.sheet_by_index(0)
a12 = sheet1.cell_value(row, line)
return jsonify(a12)
@app.route('/login', methods=['POST'])
def login():
data = request.get_data()
json_data = json.loads(data.decode("utf-8"))
username = json_data.get("username")
password = json_data.get("password")
user = User.query.filter_by(username=username, password=password).all()
if len(user) == 1:
return jsonify({'status':'ok','info':'%s็ปๅฝๆๅ'%username,'session':user[0].id,'role':user[0].role})
return jsonify({'status':'no','info':'็ปๅฝๅคฑ่ดฅ'})
@app.route('/registry', methods=['POST'])
def registry():
data = request.get_data()
json_data = json.loads(data.decode("utf-8"))
username = json_data.get("username")
password = json_data.get("password")
users = User.query.filter_by(username=username).all()
if len(users) > 0:
return jsonify({'status':'no','info':'%sๆณจๅๅคฑ่ดฅ'%username})
else:
user = User(username=username, password=password, role=1)
# ่ฐ็จๆทปๅ ๆนๆณ
db.session.add(user)
db.session.commit()
return jsonify({'status':'ok','info':'%sๆณจๅๆๅ'%username,'session':username,'role':1})
@app.route('/getTask', methods=['GET'])
def get_task():
tasks = Task.query.order_by(Task.create_time.desc()).all()
return jsonify(tasks)
@app.route('/getUsers', methods=['GET'])
def get_users():
users = User.query.all()
return jsonify(users)
@app.route('/deleteTask/<task_id>', methods=['GET'])
def delete_task(task_id):
task = Task.query.filter_by(id=task_id).first()
folder_name = address + task.folder_name
shutil.rmtree(path=folder_name)
Task.query.filter_by(id=task_id).delete()
db.session.commit()
return jsonify('success')
@app.route('/updateTask', methods=['POST'])
def update_task():
data = request.get_data()
json_data = json.loads(data.decode("utf-8"))
task_id = json_data.get("id")
user_id = json_data.get("user_id")
status = json_data.get("status")
folder_name = json_data.get("folder_name")
if int(status) == 2:
files_list = os.listdir(address + str(folder_name))
create_excel(len(files_list), address + str(folder_name))
task = Task.query.filter_by(id=task_id).first()
task.user_id = user_id
task.status = status
db.session.commit()
# user_id = request.headers.get('Authorization',None)
users = User.query.all()
return jsonify(users)
class User(db.Model):
__tablename__ = 'user'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
username = db.Column(db.String(100), nullable=False)
password = db.Column(db.String(100), nullable=False)
role = db.Column(db.String(100), nullable=False)
def keys(self):
return ['id', 'username', 'password', 'role']
def __getitem__(self, item):
return getattr(self, item)
class Task(db.Model):
__tablename__ = 'task'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
user_id = db.Column(db.String(100), nullable=False)
folder_name = db.Column(db.String(100), nullable=False)
status = db.Column(db.String(100), nullable=False)
size = db.Column(db.String(100), nullable=False)
place = db.Column(db.String(100), nullable=False)
create_time = db.Column(db.DateTime, nullable=False) # ๅ้ๆถ้ด
def keys(self):
return ['id', 'user_id', 'folder_name', 'status', 'size', 'place', 'create_time']
def __getitem__(self, item):
return getattr(self, item)
|
21,166 | 50092359ef4194ea11bc459d61db311816ed9711 | class Solution:
def removeElement(self, nums: List[int], val: int) -> int:
n = nums.count(val)
while n > 0:
nums.remove(val)
n -= 1 |
21,167 | d4a44a1b3f675a3c09dcb05bbab6faf32c1c986d | import sys
from zeep import Client
def calc_distance(source_file):
client = Client('http://www.webservicex.net/length.asmx?WSDL')
distance_miles = 0
with open(source_file) as f:
for line in f:
distance_miles += float(line.split()[1].replace(',', ''))
result_km = client.service.ChangeLengthUnit(
distance_miles,
'Miles',
'Kilometers'
)
result_km = round(result_km, 2)
print('ะ ะฐัััะพัะฝะธะต ะฒ ะผะธะปัั
:', distance_miles)
print('ะ ะฐัััะพัะฝะธะต ะฒ ะบะธะปะพะผะตััะฐั
:', result_km)
if __name__ == '__main__':
if len(sys.argv) > 1:
calc_distance(sys.argv[1])
else:
print('ะะต ัะบะฐะทะฐะฝ ะฟััั ะบ ัะฐะนะปั ั ะดะฐะฝะฝัะผะธ ะฒ ะฟะฐัะฐะผะตััะต')
|
21,168 | a0565d7fe15fb6c427bcd1fe0afe80ccb2b057c4 |
import os
import pandas as pd
import dask.dataframe as dd
main_path = r'D:\data\AirOnTime\AirOnTimeCSV'
ddf = dd.read_csv(
os.path.join(main_path, 'airOT*.csv'),
encoding='latin-1',
usecols =['YEAR', 'MONTH', 'DAY_OF_MONTH', 'ORIGIN', 'DEP_DELAY']
)
mean_dep_delay_ddf = ddf.groupby(['YEAR', 'MONTH', 'DAY_OF_MONTH','ORIGIN'])[['DEP_DELAY']].mean().reset_index()
mean_dep_delay_df = mean_dep_delay_ddf.compute()
mean_dep_delay_df.to_csv(r'D:\<your-path>\Chapter08\mean_dep_delay_df.csv', index=False)
|
21,169 | c964d77b348d866b420968955452254b070a4466 |
import re
import os
import json
import functools
from collections import namedtuple, defaultdict
from lxml import etree
from cltk.tokenize.sentence import TokenizeSentence
def _fetch_latin_models():
print("Fetching cltk tokenizers...")
from cltk.corpus.utils.importer import CorpusImporter
CorpusImporter('latin').import_corpus('latin_models_cltk')
ROOT_FOLDER = './data/'
DOC = namedtuple('DOC', ['author', 'title', 'sentences', 'nb_words'])
try:
CLTK_TOK = TokenizeSentence('latin')
except:
_fetch_latin_models()
def detokenizer(tokens):
post_punc, pre_punc = {';', '.', ',', ':', '?', '!', ')'}, {'('}
def func(acc, x):
if x in post_punc:
return acc + x
if acc[-1] in pre_punc:
return acc + x
else:
return acc + ' ' + x
return functools.reduce(func, tokens)
def packhum_sentence_tokenizer(doc):
# remove verse line-markers
doc = re.sub(r"[0-9]+(\.)?([0-9]+)?", "", doc)
# normalize whitespace
doc = re.sub(r"(\n[ ]+)+", "\n", doc)
return CLTK_TOK.tokenize_sentences(doc)
def _reader_gen(doc_func, root, include, exclude, subpath, min_sent_len):
found = set()
for f in os.listdir(os.path.join(root, subpath)):
author = f.split('.')[0].replace('_', ' ')
if (exclude and author not in exclude) or \
(include and author in include) or \
(not include and not exclude):
with open(os.path.join(root, subpath, f), 'r+') as inf:
title, nb_words, sentences = doc_func(inf, min_sent_len)
yield DOC(author=author, title=title,
nb_words=nb_words, sentences=sentences)
found.add(author)
# check whether all include authors have been found
for author in include:
assert author in found, "Didn't found author %s" % author
def _packhum_func(inf, min_sent_len):
work = json.load(inf)
title = work['author']
sentences = [s for page in work['pages']
for s in packhum_sentence_tokenizer(page['text'])
if len(s.split()) > min_sent_len]
nb_words = sum(len(s.split()) for s in sentences)
return title, nb_words, sentences
def packhum_reader(root=ROOT_FOLDER, include=(), exclude=(),
subpath='packhum/merged', min_sent_len=5):
"""
Parameters
===========
root : str, top folder of the processed data
exclude : tuple of str, authors to skip when reading. Note that
author names are determined by the file name substituting
underscores `_` with blankspaces ` `.
indlude : tuple of str, authors to include when reading.
"""
return _reader_gen(
_packhum_func, root, include, exclude, subpath, min_sent_len)
def pl_sentence_tokenizer(doc, min_sent_len=5):
"""
Transform .vrt files into list of sentences.
Original sentences are markup <s></s>. However, it seems that
in some document sentences have been automatically terminated with a ".",
and the actual final punctuation has been segmented to its own <s> element.
Example:
<s>
Quem PRO qui2
...
Domine SUB domina
. SENT .
</s>
<s>
. SENT .
</s>
as well as:
<s>
Nam CON nam
...
est VBE sum
pedissequa QLF pedisequus2
: PON :
. SENT .
</s>
<s>
Te PRO tu
...
Therefore we remove all last sentence tokens and add all single-token
sentences to the previous <s> element.
"""
sents, out, eos_only = [], [], 0
for s in doc:
lines = s.text.strip().split('\n')
sent = [line.split('\t')[0] for line in lines]
if len(sent) == 1:
eos_only += 1
sents.append(sent)
if eos_only > (len(sents) / 3): # take a 1/3 ratio as threshold
for idx, s in enumerate(sents[:-1]):
if len(sents[idx]) > 1 and len(sents[idx]) > min_sent_len:
if len(sents[idx + 1]) == 1:
# assume doubled eos only if followed by eos only sent
out.append(sents[idx][:-1] + sents[idx + 1])
else:
out.append(sents[idx])
if len(sents[-1]) > 1 and len(sents[-1]) > min_sent_len:
out.append(sents[-1])
else:
out = sents
return [detokenizer(s) for s in out]
def _pl_func(inf, min_sent_len):
s = inf.read()
tree_root = etree.fromstring(
# get rid of rogue xml
s.replace('<unknown>', 'unknown').encode('utf-8'))
title = tree_root.attrib['titre']
nb_words = tree_root.attrib['nb_tokens']
sentences = pl_sentence_tokenizer(tree_root, min_sent_len=min_sent_len)
nb_words = sum(len(s.split()) for s in sentences)
return title, nb_words, sentences
def patrologia_reader(root=ROOT_FOLDER, include=(), exclude=(),
subpath='pl', min_sent_len=5):
"""
Parameters
===========
root : str, top folder of the processed data
exclude : tuple of str, authors to skip when reading. Note that
author names are determined by the file name substituting
underscores `_` with blankspaces ` `.
indlude : tuple of str, authors to include when reading.
"""
return _reader_gen(
_pl_func, root, include, exclude, subpath, min_sent_len)
if __name__ == '__main__':
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('--min_sent_len', default=5, type=int)
args = parser.parse_args()
def word_counts(docs):
out = defaultdict(lambda: defaultdict(int))
for doc in docs:
out[doc.author][doc.title] += int(doc.nb_words)
return out
def print_wc(wc, pad=10):
max_author = max(len(a) for a in wc)
print('{author},{mean_words},{words},{docs}'.format(
author='author'.replace('.', ' ').ljust(max_author, ' '),
mean_words='mean_words'.ljust(pad, ' '),
words='words'.ljust(pad, ' '),
docs='docs'.ljust(pad, ' ')))
for author in wc:
words = sum(wc[author][d] for d in wc[author])
docs = len(wc[author])
print('{author},{mean_words},{words},{docs}'.format(
author=author.replace('.', ' ').ljust(max_author, ' '),
mean_words=str(words/docs).ljust(pad, ' '),
words=str(words).ljust(pad, ' '),
docs=str(docs).ljust(pad, ' ')))
print_wc(word_counts(patrologia_reader(min_sent_len=args.min_sent_len)))
print_wc(word_counts(packhum_reader(min_sent_len=args.min_sent_len)))
|
21,170 | aff4d7c417bd281824c78b7fdba2c8aecad8a21e | #! /usr/bin/env python
import sys
file = sys.argv[1]
start = int(sys.argv[2])
end = int(sys.argv[3])
size = 0
segment = ''
for line in open(file, 'r'):
if not line.startswith('>'):
size += len(line)
else:
name = line
if size >= start and size <= end:
segment += line
print name, segment
|
21,171 | fe75389256b433fc94adeb15737134c9bb1e0e9b | from django.db import models
from django.urls import reverse
from django.contrib.auth import get_user_model
class Realtor(models.Model):
bedroom_choices = {
'1':1,
'2':2,
'3':3,
'4':4,
'5':5,
}
price_choices = {
'100000':'$100,000',
'200000':'$200,000',
'300000':'$300,000',
'400000':'$400,000',
}
District_choices = {
'TVM' : 'Thiruvananthapuram',
'ALP' : 'Alappuzha',
'KTM' : 'Kottayam',
'IDK' : 'Idukki',
'ERKLM' :'Ernakulam,'
}
class Meta:
verbose_name = 'Realtor'
owner_name = models.CharField(max_length=200)
photo = models.ImageField(upload_to='photos/%Y/%m/%d/')
description = models.TextField(blank=True)
phone = models.CharField(max_length=20)
email = models.CharField(max_length=50)
district = models.TextField(blank =True)
owner = models.ForeignKey(get_user_model(), on_delete=models.CASCADE)
def __str__(self):
return self.owner_name
def get_absolute_url(self):
return reverse('subject', args=[str(self.id)])
# for comments
class Comment(models.Model):
comment= models.ForeignKey('realtors.Realtor', on_delete=models.CASCADE, related_name='comments')
user = models.CharField(max_length=200)
text = models.TextField()
commented_date = models.DateTimeField(auto_now_add=True, editable=False, null=False, blank=False)
|
21,172 | 72616740ad0657d2d9654a4484dca40336675a07 | # -*- coding: utf-8 -*-
import pygame, math, sys, random
from pygame.locals import *
SCREEN_SIZE = (1000,500)
FRAME_RATE = 60
screen = pygame.display.set_mode(SCREEN_SIZE)
clock = pygame.time.Clock()
while True :
clock.tick(FRAME_RATE)
key = pygame.key.get_pressed() # ํค๋ค์ด ๋๋ ค์๋์ง ์ ์ฅํ๋ Dictionary
for event in pygame.event.get() :
if event.type == KEYDOWN :
if event.key == K_ESCAPE :
sys.exit(0)
elif event.type == MOUSEBUTTONDOWN :
pos = event.pos # ๋ง์ฐ์ค ์ปค์ ์์น
if event.button == 1:
# TODO : ๋ง์ฐ์ค ์ผ์ชฝ ๋ฒํผ ์
๋ ฅ์ (pass๋ ์ง์๋ ๋จ)
pass
elif event.button == 3:
# TODO : ๋ง์ฐ์ค ์ค๋ฅธ์ชฝ ๋ฒํผ ์
๋ ฅ์
pass
screen.fill((0,0,0))
# TODO : screen.blit๋ฑ์ ์ด์ฉํด์ ์ฌ๊ธฐ์ ๋ ๋๋ง
pygame.display.flip() |
21,173 | ce1861986cdba5bad946e05131121f09303f9a9d | import os
from numpy import *
from matplotlib.pyplot import *
from netCDF4 import Dataset
from pylab import *
from mpl_util import LevelColormap
import pyroms
import pyroms_toolbox
from mpl_toolkits.basemap import Basemap, shiftgrid
import mpl_toolkits.basemap as mp
from bathy_smoother import *
import mpl_util
import laplace_filter
__author__ = 'Trond Kristiansen'
__email__ = 'trond.kristiansen@imr.no'
__created__ = datetime.datetime(2015, 3, 12)
__modified__ = datetime.datetime(2015, 3, 12)
__version__ = "1.0"
__status__ = "Development, 10.7.2013"
def getSelectedStations():
# Preset stations in MyOcean grid - these are grid indices:
xpos=[655,504,339,158]
ypos=[124,444,112,209]
return xpos, ypos
def ingrid(lon, lat, lon_bd,lat_bd):
return mpl.mlab.inside_poly(zip(lon, lat), zip(lon_bd, lat_bd))
def getPolygon(grid_lon,grid_lat):
lon_bd = np.concatenate((grid_lon[:,0],grid_lon[-1,:],grid_lon[::-1,-1], grid_lon[0,::-1] ))
lat_bd = np.concatenate((grid_lat[:,0],grid_lat[-1,:],grid_lat[::-1,-1], grid_lat[0,::-1] ))
""" Save the polygon as array to plot later"""
polygon_data=np.empty((2,len(lon_bd)))
for k in xrange(len(lon_bd)):
polygon_data[0,k]=lon_bd[k]
polygon_data[1,k]=lat_bd[k]
return polygon_data
def drawGridBoundaries(ax,map,polygon_data,mycolor):
print "Plotting grid boundaries"
polygon_data_xy=[]
for i in xrange(len(polygon_data[0,:])):
myx,myy=map(polygon_data[0,i],polygon_data[1,i])
vertices=[myx,myy]
polygon_data_xy.append(vertices)
polygon_data_xy=np.asarray(polygon_data_xy)
patch = Polygon(array(polygon_data_xy), facecolor='none',
edgecolor=mycolor, linewidth=4)
ax.add_patch(patch)
def getGrid(filename):
cdf=Dataset(filename)
grid_lon=cdf.variables["lon_rho"][:]
grid_lat=cdf.variables["lat_rho"][:]
grid_h=cdf.variables["h"]
print "Grid dimmensions: %s and %s"%(grid_lon.shape,grid_lat.shape)
cdf.close()
grid_lon_min=grid_lon.min()
grid_lon_max=grid_lon.max()
grid_lat_min=grid_lat.min()
grid_lat_max=grid_lat.max()
print "Grid domain longitude from %s to %s"%(grid_lon_min,grid_lon_max)
print "Grid domain latitude from %s to %s"%(grid_lat_min,grid_lat_max)
print "----\n"
return grid_lon, grid_lat,grid_h
def createNiceMap(grd,h,polygon_data_KINO,polygon_data_NS8KM,plotSelectedStations):
fig=plt.figure(figsize=(12,12))
ax=fig.add_subplot(111)
levels = [10,25,50,100,250,500,1000,2500,5000]
mymap = Basemap(llcrnrlon=-18.0,
llcrnrlat=46.0,
urcrnrlon=25.5,
urcrnrlat=67.5,
resolution='i',projection='tmerc',lon_0=0,lat_0=50,area_thresh=50.)
mymap.drawcoastlines()
mymap.drawcountries()
mymap.fillcontinents(color='grey')
mymap.drawmeridians(np.arange(grd.hgrid.lon_rho.min(),grd.hgrid.lon_rho.max(),10),labels=[0,0,0,1])
mymap.drawparallels(np.arange(grd.hgrid.lat_rho.min(),grd.hgrid.lat_rho.max(),4),labels=[1,0,0,0])
x,y = mymap(grd.hgrid.lon_rho,grd.hgrid.lat_rho)
print np.min(grd.hgrid.lon_rho),np.max(grd.hgrid.lon_rho)
print np.min(grd.hgrid.lat_rho),np.max(grd.hgrid.lat_rho)
CS1 = mymap.contourf(x,y,h,levels,
cmap=mpl_util.LevelColormap(levels,cmap=cm.Blues),
extend='upper',
alpha=1.0,
origin='lower',
rasterized=True)
CS1.axis='tight'
"""Draw grid boundaries"""
drawGridBoundaries(ax,mymap,polygon_data_NS8KM,mycolor="red")
drawGridBoundaries(ax,mymap,polygon_data_KINO,mycolor="magenta")
if (plotSelectedStations):
xpos, ypos = getSelectedStations()
xloc,yloc = mymap(grd.hgrid.lon_rho[ypos,xpos],grd.hgrid.lat_rho[ypos,xpos])
mymap.plot(xloc,yloc,marker="o", color="red", markersize=10, linewidth=0)
plotfile='figures/map_NS8KM_and_KINO1600M.pdf'
plt.savefig(plotfile, dpi='200')
plt.show()
""" MAIN """
"""Get the grid file defined in /Users/trond/GMT/pyroms-master/pyroms/pyroms/gridid.txt"""
grd = pyroms.grid.get_ROMS_grid('KINO1600M')
polygon_data_NS8KM = getPolygon(grd.hgrid.lon_rho,grd.hgrid.lat_rho)
"""Read the grid info from the grid file"""
filename="kino_1600m_18062015.nc"
lon_rho,lat_rho,grid_h = getGrid(filename)
plotSelectedStations=True
"""Calculate the x,y grid coordinates"""
(Mp,Lp)=lon_rho.shape
X=np.arange(0,Mp,1)
Y=np.arange(0,Lp,1)
roms_Xgrid,roms_Ygrid=np.meshgrid(Y,X)
"""Loop over all times and store to file or make map"""
polygon_data_KINO = getPolygon(lon_rho,lat_rho)
""" Plot the interpolated bathymetry and the land mask"""
#show()
createNiceMap(grd,grd.vgrid.h,polygon_data_KINO,polygon_data_NS8KM,plotSelectedStations) |
21,174 | e6ae4ad05788e02b4e525673936b72c1b3a6d146 | from lightbulb import Bot
from core.config import load_config
from logging import getLogger
class Elderberry(Bot):
def __init__(self):
self.config = load_config()
self.logger = getLogger('hikari.bot')
super().__init__(
token=self.config['tokens']['discord'],
prefix=self.config['prefix']
) |
21,175 | 668f260f7479ea241bcbcb30b149a855414224a7 | #! /usr/bin/env python
# The line above tells some systems (e.g. Linux/Apple shells) what program to
# use to execute this script.
##############################################################################
# You don't need to understand most of this yet- you can just skip to the #
# large comment section below if this is all a bit daunting! #
##############################################################################
# Import the libraries we need
import sys
from music21 import (
environment,
metadata,
note,
stream,
)
# Tell music21 what to use to play midi and display score
environment.set('midiPath', '/usr/bin/timidity')
environment.set('musicxmlPath', '/usr/bin/musescore')
##############################################################################
# LESSON 3 STARTS HERE #
#############################################################################
right_hand = stream.Part()
left_hand = stream.Part()
bar1_right = stream.Measure()
bar1_right.clef = clef.TrebleClef()
bar1_right.timeSignature = meter.TimeSignature('3/4')
bar1_right.append(note.Note('c4'))
right_hand.append(bar1_right)
bar1_left = stream.Measure()
bar1_left.clef = clef.BassClef()
bar1_left.timeSignature = meter.TimeSignature('3/4')
bar1_left.append(note.Rest(quarterLength=1))
left_hand.append(bar1_left)
bar2_right = stream.Measure()
tie_start = note.Note('c4')
tie_start.setTie(tie.Tie('start'))
bar2_right.append(tie_start)
tie_mid = note.Note('e4')
tie_mid.setTie(tie.Tie('continue'))
bar2_right.append(tie_mid)
tie_end = note.Note('g4')
tie_end.setTie(tie.Tie('stop'))
bar2_right.append(tie_end)
right_hand.append(bar2_right)
bar2_left = stream.Measure()
bar2_left.append(note.Rest(quarterLength=3))
left_hand.append(bar2_left)
bar3_right = stream.Measure()
bar3_right.append(note.Note('g4', quarterLength=2))
bar3_right.append(chord.Chord([
note.Note('e5'),
note.Note('g5'),
]))
right_hand.append(bar3_right)
bar3_left = stream.Measure()
bar3_left.append(note.Note('c3'))
bar3_left.append(chord.Chord([
note.Note('e3'),
note.Note('g3'),
]))
bar3_left.append(chord.Chord([
note.Note('e3'),
note.Note('g3'),
]))
left_hand.append(bar3_left)
bar4_right = stream.Measure()
bar4_right.append(chord.Chord([
note.Note('e5', quarterLength=2),
note.Note('g5', quarterLength=2),
]))
bar4_right.append(chord.Chord([
note.Note('c5'),
note.Note('e5'),
]))
right_hand.append(bar4_right)
bar4_left = stream.Measure()
bar4_left.append(note.Note('c3'))
bar4_left.append(chord.Chord([
note.Note('e3'),
note.Note('g3'),
]))
bar4_left.append(chord.Chord([
note.Note('e3'),
note.Note('g3'),
]))
left_hand.append(bar4_left)
bar5_right = stream.Measure()
bar5_right.append(chord.Chord([
note.Note('c5', quarterLength=2),
note.Note('e5', quarterLength=2),
]))
bar5_right.append(note.Note('c4'))
right_hand.append(bar5_right)
bar5_left = stream.Measure()
bar5_left.append(note.Note('c3'))
bar5_left.append(chord.Chord([
note.Note('e3'),
note.Note('g3'),
]))
bar5_left.append(chord.Chord([
note.Note('e3'),
note.Note('g3'),
]))
left_hand.append(bar5_left)
bar6_right = stream.Measure()
bar6_right.append(note.Note('c4'))
bar6_right.append(note.Note('e4'))
bar6_right.append(note.Note('g4'))
right_hand.append(bar6_right)
bar6_left = stream.Measure()
bar6_left.append(note.Note('c3'))
bar6_left.append(chord.Chord([
note.Note('e3'),
note.Note('g3'),
]))
bar6_left.append(chord.Chord([
note.Note('e3'),
note.Note('g3'),
]))
left_hand.append(bar6_left)
bar7_right = stream.Measure()
bar7_right.append(note.Note('g4', quarterLength=2))
bar7_right.append(chord.Chord([
note.Note('f5'),
note.Note('g5'),
]))
right_hand.append(bar7_right)
bar7_left = stream.Measure()
bar7_left.append(note.Note('d3'))
bar7_left.append(chord.Chord([
note.Note('f3'),
note.Note('g3'),
]))
bar7_left.append(chord.Chord([
note.Note('f3'),
note.Note('g3'),
]))
left_hand.append(bar7_left)
bar8_right = stream.Measure()
bar8_right.append(chord.Chord([
note.Note('f5', quarterLength=2),
note.Note('g5', quarterLength=2),
]))
bar8_right.append(chord.Chord([
note.Note('b4'),
note.Note('f5'),
]))
right_hand.append(bar8_right)
bar8_left = stream.Measure()
bar8_left.append(note.Note('d3'))
bar8_left.append(chord.Chord([
note.Note('f3'),
note.Note('g3'),
]))
bar8_left.append(chord.Chord([
note.Note('f3'),
note.Note('g3'),
]))
left_hand.append(bar8_left)
bar9_right = stream.Measure()
bar9_right.append(chord.Chord([
note.Note('b4', quarterLength=2),
note.Note('f5', quarterLength=2),
]))
bar9_right.append(note.Note('b3'))
right_hand.append(bar9_right)
bar9_left = stream.Measure()
bar9_left.append(note.Note('d3'))
bar9_left.append(chord.Chord([
note.Note('f3'),
note.Note('g3'),
]))
bar9_left.append(chord.Chord([
note.Note('f3'),
note.Note('g3'),
]))
left_hand.append(bar9_left)
bar10_right = stream.Measure()
bar10_right.append(note.Note('b3'))
bar10_right.append(note.Note('d4'))
bar10_right.append(note.Note('a4'))
right_hand.append(bar10_right)
bar10_left = stream.Measure()
bar10_left.append(note.Note('d3'))
bar10_left.append(chord.Chord([
note.Note('f3'),
note.Note('g3'),
]))
bar10_left.append(chord.Chord([
note.Note('f3'),
note.Note('g3'),
]))
left_hand.append(bar10_left)
bar11_right = stream.Measure()
bar11_right.append(note.Note('a4', quarterLength=2))
bar11_right.append(chord.Chord([
note.Note('f5'),
note.Note('a5'),
]))
right_hand.append(bar11_right)
bar11_left = stream.Measure()
bar11_left.append(note.Note('d3'))
bar11_left.append(chord.Chord([
note.Note('f3'),
note.Note('g3'),
]))
bar11_left.append(chord.Chord([
note.Note('f3'),
note.Note('g3'),
]))
left_hand.append(bar11_left)
bar12_right = stream.Measure()
bar12_right.append(chord.Chord([
note.Note('f5', quarterLength=2),
note.Note('a5', quarterLength=2),
]))
bar12_right.append(chord.Chord([
note.Note('b4'),
note.Note('f5'),
]))
right_hand.append(bar12_right)
bar12_left = stream.Measure()
bar12_left.append(note.Note('d3'))
bar12_left.append(chord.Chord([
note.Note('f3'),
note.Note('g3'),
]))
bar12_left.append(chord.Chord([
note.Note('f3'),
note.Note('g3'),
]))
left_hand.append(bar12_left)
bar13_right = stream.Measure()
bar13_right.append(chord.Chord([
note.Note('b4', quarterLength=2),
note.Note('f5', quarterLength=2),
]))
bar13_right.append(note.Note('b3'))
right_hand.append(bar13_right)
bar13_left = stream.Measure()
bar13_left.append(note.Note('d3'))
bar13_left.append(chord.Chord([
note.Note('f3'),
note.Note('g3'),
]))
bar13_left.append(chord.Chord([
note.Note('f3'),
note.Note('g3'),
]))
left_hand.append(bar13_left)
bar14_right = stream.Measure()
bar14_right.append(note.Note('b3'))
bar14_right.append(note.Note('d4'))
bar14_right.append(note.Note('a4'))
right_hand.append(bar14_right)
bar14_left = stream.Measure()
bar14_left.append(note.Note('d3'))
bar14_left.append(chord.Chord([
note.Note('f3'),
note.Note('g3'),
]))
bar14_left.append(chord.Chord([
note.Note('f3'),
note.Note('g3'),
]))
left_hand.append(bar14_left)
bar15_right = stream.Measure()
bar15_right.append(note.Note('a4', quarterLength=2))
bar15_right.append(chord.Chord([
note.Note('e5'),
note.Note('a5'),
]))
right_hand.append(bar15_right)
bar15_left = stream.Measure()
bar15_left.append(note.Note('c3'))
bar15_left.append(chord.Chord([
note.Note('e3'),
note.Note('g3'),
]))
bar15_left.append(chord.Chord([
note.Note('e3'),
note.Note('g3'),
]))
left_hand.append(bar15_left)
bar16_right = stream.Measure()
bar16_right.append(chord.Chord([
note.Note('e5', quarterLength=2),
note.Note('a5', quarterLength=2),
]))
bar16_right.append(chord.Chord([
note.Note('c5'),
note.Note('e5'),
]))
right_hand.append(bar16_right)
bar16_left = stream.Measure()
bar16_left.append(note.Note('c3'))
bar16_left.append(chord.Chord([
note.Note('e3'),
note.Note('g3'),
]))
bar16_left.append(chord.Chord([
note.Note('e3'),
note.Note('g3'),
]))
left_hand.append(bar16_left)
bar17_right = stream.Measure()
bar17_right.append(chord.Chord([
note.Note('c5', quarterLength=2),
note.Note('e5', quarterLength=2),
]))
bar17_right.append(note.Note('c4'))
right_hand.append(bar17_right)
bar17_left = stream.Measure()
bar17_left.append(note.Note('c3'))
bar17_left.append(chord.Chord([
note.Note('e3'),
note.Note('g3'),
]))
bar17_left.append(chord.Chord([
note.Note('e3'),
note.Note('g3'),
]))
left_hand.append(bar17_left)
bar18_right = stream.Measure()
bar18_right.append(note.Note('c4'))
bar18_right.append(note.Note('e4'))
bar18_right.append(note.Note('g4'))
right_hand.append(bar18_right)
bar18_left = stream.Measure()
bar18_left.append(note.Note('c3'))
bar18_left.append(chord.Chord([
note.Note('e3'),
note.Note('g3'),
]))
bar18_left.append(chord.Chord([
note.Note('e3'),
note.Note('g3'),
]))
left_hand.append(bar18_left)
bar19_right = stream.Measure()
bar19_right.append(note.Note('c5', quarterLength=2))
bar19_right.append(chord.Chord([
note.Note('g5'),
note.Note('c6'),
]))
right_hand.append(bar19_right)
bar19_left = stream.Measure()
bar19_left.append(note.Note('e3'))
bar19_left.append(chord.Chord([
note.Note('g3'),
note.Note('c4'),
]))
bar19_left.append(chord.Chord([
note.Note('g3'),
note.Note('c4'),
]))
left_hand.append(bar19_left)
bar20_right = stream.Measure()
bar20_right.append(chord.Chord([
note.Note('g5', quarterLength=2),
note.Note('c6', quarterLength=2),
]))
bar20_right.append(chord.Chord([
note.Note('e5'),
note.Note('g5'),
]))
right_hand.append(bar20_right)
bar20_left = stream.Measure()
bar20_left.append(note.Note('e3'))
bar20_left.append(chord.Chord([
note.Note('g3'),
note.Note('c4'),
]))
bar20_left.append(chord.Chord([
note.Note('g3'),
note.Note('c4'),
]))
left_hand.append(bar20_left)
bar21_right = stream.Measure()
bar21_right.append(chord.Chord([
note.Note('e5', quarterLength=2),
note.Note('g5', quarterLength=2),
]))
bar21_right.append(note.Note('c4'))
right_hand.append(bar21_right)
bar21_left = stream.Measure()
bar21_left.append(note.Note('e3'))
bar21_left.append(chord.Chord([
note.Note('g3'),
note.Note('c4'),
]))
bar21_left.append(chord.Chord([
note.Note('g3'),
note.Note('c4'),
]))
left_hand.append(bar21_left)
bar22_right = stream.Measure()
bar22_right.append(note.Note('c4'))
bar22_right.append(note.Note('e4'))
bar22_right.append(note.Note('g4'))
right_hand.append(bar22_right)
bar22_left = stream.Measure()
bar22_left.append(note.Note('e3'))
bar22_left.append(chord.Chord([
note.Note('g3'),
note.Note('c4'),
]))
bar22_left.append(chord.Chord([
note.Note('g3'),
note.Note('c4'),
]))
left_hand.append(bar22_left)
bar23_right = stream.Measure()
bar23_right.append(note.Note('c5', quarterLength=2))
bar23_right.append(chord.Chord([
note.Note('a5'),
note.Note('c6'),
]))
right_hand.append(bar23_right)
bar23_left = stream.Measure()
bar23_left.append(note.Note('f3'))
bar23_left.append(chord.Chord([
note.Note('a3'),
note.Note('d4'),
]))
bar23_left.append(chord.Chord([
note.Note('a3'),
note.Note('d4'),
]))
left_hand.append(bar23_left)
bar24_right = stream.Measure()
bar24_right.append(chord.Chord([
note.Note('a5', quarterLength=2),
note.Note('c6', quarterLength=2),
]))
bar24_right.append(chord.Chord([
note.Note('f5'),
note.Note('a5'),
]))
right_hand.append(bar24_right)
bar24_left = stream.Measure()
bar24_left.append(note.Note('f3'))
bar24_left.append(chord.Chord([
note.Note('a3'),
note.Note('d4'),
]))
bar24_left.append(chord.Chord([
note.Note('a3'),
note.Note('d4'),
]))
left_hand.append(bar24_left)
bar25_right = stream.Measure()
bar25_right.append(chord.Chord([
note.Note('f5', quarterLength=2),
note.Note('a5', quarterLength=2),
]))
bar25_right.append(note.Note('d4'))
right_hand.append(bar25_right)
bar25_left = stream.Measure()
bar25_left.append(chord.Chord([
note.Note('f3', quarterLength=3),
note.Note('a3', quarterLength=3),
note.Note('d4', quarterLength=3),
]))
left_hand.append(bar25_left)
bar26_right = stream.Measure()
bar26_right.append(note.Note('d4'))
bar26_right.append(note.Note('f4'))
bar26_right.append(note.Note('a4'))
right_hand.append(bar26_right)
bar26_left = stream.Measure()
bar26_left.append(note.Note('a3'))
bar26_left.append(note.Note('f3'))
bar26_left.append(note.Note('d3'))
left_hand.append(bar26_left)
bar27_right = stream.Measure()
bar27_right.append(note.Note('a4', quarterLength=3))
right_hand.append(bar27_right)
bar27_left = stream.Measure()
bar27_left.append(note.Note('d3'))
bar27_left.append(chord.Chord([
note.Note('f3'),
note.Note('g3'),
]))
bar27_left.append(chord.Chord([
note.Note('f3'),
note.Note('g3'),
]))
left_hand.append(bar27_left)
bar28_right = stream.Measure()
bar28_right.append(note.Note('a4'))
bar28_right.append(note.Note('f4#'))
bar28_right.append(note.Note('g4'))
right_hand.append(bar28_right)
bar28_left = stream.Measure()
bar28_left.append(note.Note('b2'))
bar28_left.append(chord.Chord([
note.Note('f3'),
note.Note('g3'),
]))
bar28_left.append(chord.Chord([
note.Note('f3'),
note.Note('g3'),
]))
left_hand.append(bar28_left)
bar29_right = stream.Measure()
bar29_right.append(note.Note('e5', quarterLength=3))
right_hand.append(bar29_right)
bar29_left = stream.Measure()
bar29_left.append(note.Note('c3'))
bar29_left.append(chord.Chord([
note.Note('e3'),
note.Note('g3'),
]))
bar29_left.append(chord.Chord([
note.Note('e3'),
note.Note('g3'),
]))
left_hand.append(bar29_left)
bar30_right = stream.Measure()
bar30_right.append(note.Note('e5'))
bar30_right.append(note.Note('c5'))
bar30_right.append(note.Note('e4'))
right_hand.append(bar30_right)
bar30_left = stream.Measure()
bar30_left.append(note.Note('e3'))
bar30_left.append(chord.Chord([
note.Note('g3'),
note.Note('c4'),
]))
bar30_left.append(chord.Chord([
note.Note('g3'),
note.Note('c4'),
]))
left_hand.append(bar30_left)
bar31_right = stream.Measure()
bar31_right.append(note.Note('e4', quarterLength=2))
bar31_right.append(note.Note('d4'))
right_hand.append(bar31_right)
bar31_left = stream.Measure()
bar31_left.append(chord.Chord([
note.Note('f3', quarterLength=3),
note.Note('a3', quarterLength=3),
note.Note('c4', quarterLength=3),
]))
left_hand.append(bar31_left)
bar32_right = stream.Measure()
bar32_right.append(note.Note('a4', quarterLength=2))
bar32_right.append(note.Note('g4'))
right_hand.append(bar32_right)
bar32_left = stream.Measure()
bar32_left.append(chord.Chord([
note.Note('d3', quarterLength=3),
note.Note('g3', quarterLength=3),
note.Note('b3', quarterLength=3),
]))
left_hand.append(bar32_left)
bar33_right = stream.Measure()
bar33_right.append(note.Note('c4', quarterLength=1.5))
bar33_right.append(note.Note('c5', quarterLength=0.5))
bar33_right.append(note.Note('c5'))
right_hand.append(bar33_right)
bar33_left = stream.Measure()
bar33_left.append(chord.Chord([
note.Note('c3', quarterLength=2),
note.Note('e3', quarterLength=2),
note.Note('g3', quarterLength=2),
]))
bar33_left.append(chord.Chord([
note.Note('c3'),
note.Note('e3'),
note.Note('g3'),
]))
left_hand.append(bar33_left)
bar34_right = stream.Measure()
bar34_right.append(note.Note('c5'))
bar34_right.append(note.Rest(quarterLength=1))
right_hand.append(bar34_right)
bar34_left = stream.Measure()
bar34_left.append(chord.Chord([
note.Note('c3'),
note.Note('e3'),
note.Note('g3'),
]))
bar34_left.append(note.Rest(quarterLength=1))
left_hand.append(bar34_left)
tune = stream.Score()
tune.insert(0, right_hand)
tune.insert(0, left_hand)
# Add a title
tune.metadata = metadata.Metadata(title='Python TTTGLS: Lesson 3 part 1')
##########################################################################
# LESSON 3 (PART 1)ENDS HERE #
##########################################################################
# Only run this if the script is executed directly, not imported
if __name__ == '__main__':
# Complain if there were no arguments passed by the user
if len(sys.argv) == 1:
# First, print a helpful message
print('add a "score" argument to see the score.')
print('add a "text" argument to see the python objects.')
print('add a "midi" argument to hear it.')
print('e.g. To hear the tune: {command} midi'.format(
command=sys.argv[0],
))
# Now exit without doing anything
sys.exit()
# See if the user put the word 'midi' in the arguments
if 'midi' in sys.argv:
# The stream.Score (tune) object knows how to play itself using the
# environment set midi player and will do so when its show method is
# called with a 'midi' argument.
tune.show('midi')
# See if the user put the word 'text' in the arguments
if 'text' in sys.argv:
# The stream.Score (tune) object knows how to display itself as python
# objects in text, and will do so when its show method is called with
# a 'text' argument.
tune.show('text')
# See if the user put the word 'score' in the arguments
if 'score' in sys.argv:
# The stream.Score (tune) object knows how to display itself as
# musical score, and will do so by default when its show method is
# called with no arguments.
tune.show()
|
21,176 | f1d8be32c6899ee14e255a3056a87898c414bf5f | from django.shortcuts import render,redirect,reverse
import stripe
# Create your views here.
def home(request):
return render(request,'index.html')
def charge(request):
print(request.POST)
stripe.api_key = "#secret key is here"
stripe.Customer.create(
name=request.POST.get('name'),
email=request.POST.get('email'),
description="My First Test Customer (created for API docs)",
)
amount=int(request.POST.get('amount'))*100
stripe.Charge.create(
amount=amount,
currency="usd",
source="tok_visa",
description="My First Test Charge (created for API docs)",
)
amount_in_D=amount//100
return redirect(reverse('sucess',args=[amount_in_D]))
def success(request,args):
context={
'amount':args
}
return render(request,'payment-success.html',context) |
21,177 | a86e0b43ec9441d03d51d2e23bd6522221c6913b | from Rede import Rede
from InterfaceGrafica import *
global jogo
run = True
rede = Rede()
jogador = int(rede.get_objeto_socket())
run=True
run_paridade=True
run_numero=False
run_espera=False
paridade=0
n_pontos=10
texto_usuario = ''
try:
jogo = rede.send("conectar")
except:
run = False
print("Nao foi possivel criar o jogo")
while(run):
clock.tick(20)
for event in pygame.event.get():
if event.type==pygame.QUIT:
pygame.quit()
sys.exit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_RETURN:
if run_paridade:
run_numero=True
elif run_numero:
run_espera=True
elif event.key == pygame.K_BACKSPACE:
texto_usuario=texto_usuario[0:-1]
else:
if len(texto_usuario)<=10 and event.key != pygame.K_RETURN and run_espera==False:
texto_usuario += event.unicode
if event.type == pygame.MOUSEBUTTONDOWN:
posicao = pygame.mouse.get_pos()
if continuar.clicar(posicao):
if run_paridade:
run_numero=True
elif run_numero:
run_espera=True
if(run_paridade):
redesenhar_tela_paridade(jogador + 1,texto_usuario)
if run_numero or run_espera:
redesenhar_tela_numero(jogador+1,texto_usuario,paridade,run_espera,n_pontos)
if run_espera:
n_pontos+=1
if n_pontos==40:
n_pontos=10
if jogo.ambos_jogaram():
if (jogo.determina_vencedor() == 1 and jogador == 1) or (jogo.determina_vencedor() == 0 and jogador == 0):
vitoria=True
else:
vitoria=False
jogo_amigo=jogo.jogada[jogador][1]
jogo_adversario=jogo.jogada[not jogador][1]
redesenhar_tela_resultado(vitoria,jogo_amigo,jogo_adversario)
run_paridade = True
run_numero = False
run_espera = False
n_pontos = 10
texto_usuario = ''
try:
jogo = rede.send("zerar")
except:
run = False
print("Nao foi possivel achar o jogo apos ambos jogarem")
break
if (jogo.p1jogou==0 and jogador==0) or (jogo.p2jogou==0 and jogador==1):
if(run_paridade==True and run_numero==True):
paridade = texto_usuario
paridade=paridade.upper()
paridade=paridade.replace(" ","")
if paridade=="PAR":
paridade=0
if paridade=="IMPAR":
paridade=1
texto_usuario=''
run_paridade=False
if(run_numero==True and run_espera==True):
valor = texto_usuario
dado=[paridade,valor]
if jogador == 0:
if not jogo.p1jogou:
jogo = rede.send(dado)
if jogador == 1:
if not jogo.p2jogou:
jogo = rede.send(dado)
run_numero=False
jogo=rede.send("atualizar") |
21,178 | 96987115c6b1ee7fe3f697d9f456eea84aeea77c | import csv
lista = [] # 1 matriz multidimensional
with open('teste1.csv', newline='') as csvfile: # arquivo2 separe por ;
readarquivo = csv.reader(csvfile, delimiter=';')
# o mรณdulo csv detectarรก novas linhas automaticamente
for linha in readarquivo:
#assert isinstance(linha, object)
lista.append(linha)
gravarquivo = open("gravar.txt", "w")
#gravaarquivo = csv.reader(gravaarquivo, delimiter=';')
gravarquivo.writelines(lista[0][0])
gravarquivo.write(';')
#print(len(lista)) #quantos elementos possui na lista
#print(len(lista2))
#print(lista[0]) # linha 1
#print(lista[1][2])
#print(lista[1]) # linha 2
#print(lista[2]) |
21,179 | 9d35feab9526596b4519bb96177800a2e5571a4c | import uiScriptLocale
window = {
"name" : "RefineDialog",
"style" : ("movable", "float",),
"x" : 0,
"y" : 0,
"width" : 220,
"height" : 120,
"children" :
(
{
"name" : "Board",
"type" : "board_with_titlebar",
"style" : ("attach",),
"x" : 0,
"y" : 0,
"width" : 220,
"height" : 120,
"title" : uiScriptLocale.ADD_STATS,
"children" :
(
{
"name" : "StatsSlot",
"type" : "slotbar",
"x" : 20,
"y" : 35,
"width" : 180,
"height" : 20,
#"horizontal_align" : "center",
"children" :
(
{
"name" : "StatsValue",
"type" : "text",
"x" : 0,
"y" : 0,
"text" : "",
"all_align" : "center",
"text_horizontal_align" : "center",
},
),
},
{
"name" : "slider",
"type" : "sliderbar",
"x" : 0,
"y" : 66,
"horizontal_align" : "center",
},
{
"name" : "AcceptButton",
"type" : "button",
"x" : -35,
"y" : 35,
"text" : uiScriptLocale.ADD,
"horizontal_align" : "center",
"vertical_align" : "bottom",
"default_image" : "d:/ymir work/ui/public/Middle_Button_01.sub",
"over_image" : "d:/ymir work/ui/public/Middle_Button_02.sub",
"down_image" : "d:/ymir work/ui/public/Middle_Button_03.sub",
},
{
"name" : "CancelButton",
"type" : "button",
"x" : 35,
"y" : 35,
"text" : uiScriptLocale.CANCEL,
"horizontal_align" : "center",
"vertical_align" : "bottom",
"default_image" : "d:/ymir work/ui/public/Middle_Button_01.sub",
"over_image" : "d:/ymir work/ui/public/Middle_Button_02.sub",
"down_image" : "d:/ymir work/ui/public/Middle_Button_03.sub",
},
),
},
),
} |
21,180 | 57bd3466b119d20ab69ae9673949ef7f0bd4a0a6 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'Main_Form.ui'
#
# Created by: PyQt5 UI code generator 5.13.0
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.setEnabled(True)
Form.resize(1465, 787)
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(10)
Form.setFont(font)
self.Exit = QtWidgets.QPushButton(Form)
self.Exit.setGeometry(QtCore.QRect(1090, 750, 93, 31))
self.Exit.setObjectName("Exit")
self.Import_DUT = QtWidgets.QPushButton(Form)
self.Import_DUT.setGeometry(QtCore.QRect(20, 760, 141, 28))
self.Import_DUT.setObjectName("Import_DUT")
self.textEdit = QtWidgets.QTextEdit(Form)
self.textEdit.setGeometry(QtCore.QRect(570, 700, 611, 31))
self.textEdit.setObjectName("textEdit")
self.sn_list = QtWidgets.QListView(Form)
self.sn_list.setGeometry(QtCore.QRect(10, 60, 181, 631))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(10)
self.sn_list.setFont(font)
self.sn_list.setObjectName("sn_list")
self.select = QtWidgets.QPushButton(Form)
self.select.setGeometry(QtCore.QRect(570, 750, 221, 31))
self.select.setObjectName("select")
self.textEdit_2 = QtWidgets.QTextEdit(Form)
self.textEdit_2.setGeometry(QtCore.QRect(190, 60, 491, 631))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(10)
font.setBold(False)
font.setWeight(50)
self.textEdit_2.setFont(font)
self.textEdit_2.setObjectName("textEdit_2")
self.Query_Database = QtWidgets.QPushButton(Form)
self.Query_Database.setGeometry(QtCore.QRect(20, 710, 141, 28))
self.Query_Database.setObjectName("Query_Database")
self.test_time_list = QtWidgets.QListView(Form)
self.test_time_list.setGeometry(QtCore.QRect(190, 60, 151, 631))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(10)
self.test_time_list.setFont(font)
self.test_time_list.setObjectName("test_time_list")
self.Result_List = QtWidgets.QListView(Form)
self.Result_List.setGeometry(QtCore.QRect(340, 60, 91, 631))
self.Result_List.setObjectName("Result_List")
self.label = QtWidgets.QLabel(Form)
self.label.setGeometry(QtCore.QRect(20, 40, 72, 15))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.label.setFont(font)
self.label.setObjectName("label")
self.label_2 = QtWidgets.QLabel(Form)
self.label_2.setGeometry(QtCore.QRect(190, 40, 91, 16))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.label_2.setFont(font)
self.label_2.setObjectName("label_2")
self.label_3 = QtWidgets.QLabel(Form)
self.label_3.setGeometry(QtCore.QRect(340, 40, 81, 16))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.label_3.setFont(font)
self.label_3.setObjectName("label_3")
self.label_4 = QtWidgets.QLabel(Form)
self.label_4.setGeometry(QtCore.QRect(440, 40, 81, 16))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.label_4.setFont(font)
self.label_4.setObjectName("label_4")
self.Test_Case_List = QtWidgets.QListView(Form)
self.Test_Case_List.setGeometry(QtCore.QRect(430, 60, 256, 631))
self.Test_Case_List.setObjectName("Test_Case_List")
self.tableView = QtWidgets.QTableView(Form)
self.tableView.setGeometry(QtCore.QRect(890, 60, 571, 631))
self.tableView.setObjectName("tableView")
self.Plot_Series = QtWidgets.QPushButton(Form)
self.Plot_Series.setGeometry(QtCore.QRect(190, 710, 93, 28))
self.Plot_Series.setObjectName("Plot_Series")
self.Plot_Nor_Distribution = QtWidgets.QPushButton(Form)
self.Plot_Nor_Distribution.setGeometry(QtCore.QRect(190, 750, 221, 28))
self.Plot_Nor_Distribution.setObjectName("Plot_Nor_Distribution")
self.Test_Config_List = QtWidgets.QListView(Form)
self.Test_Config_List.setGeometry(QtCore.QRect(680, 60, 121, 631))
self.Test_Config_List.setObjectName("Test_Config_List")
self.label_5 = QtWidgets.QLabel(Form)
self.label_5.setGeometry(QtCore.QRect(690, 40, 111, 16))
font = QtGui.QFont()
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.label_5.setFont(font)
self.label_5.setObjectName("label_5")
self.Case_Result_List = QtWidgets.QListView(Form)
self.Case_Result_List.setGeometry(QtCore.QRect(800, 60, 91, 631))
self.Case_Result_List.setObjectName("Case_Result_List")
self.label_6 = QtWidgets.QLabel(Form)
self.label_6.setGeometry(QtCore.QRect(800, 40, 101, 16))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.label_6.setFont(font)
self.label_6.setObjectName("label_6")
self.Plot_Histogram = QtWidgets.QPushButton(Form)
self.Plot_Histogram.setGeometry(QtCore.QRect(290, 710, 121, 28))
self.Plot_Histogram.setObjectName("Plot_Histogram")
self.label_7 = QtWidgets.QLabel(Form)
self.label_7.setGeometry(QtCore.QRect(900, 40, 151, 16))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_7.setFont(font)
self.label_7.setObjectName("label_7")
self.ShowData = QtWidgets.QPushButton(Form)
self.ShowData.setGeometry(QtCore.QRect(440, 730, 93, 28))
self.ShowData.setObjectName("ShowData")
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "ASIC Parameters Test Result Analysis"))
self.Exit.setText(_translate("Form", "Exit"))
self.Import_DUT.setText(_translate("Form", "Import Database"))
self.select.setText(_translate("Form", "Select the import folder"))
self.Query_Database.setText(_translate("Form", "Query_Database"))
self.label.setText(_translate("Form", "S/N"))
self.label_2.setText(_translate("Form", "Test Time"))
self.label_3.setText(_translate("Form", "Test Result"))
self.label_4.setText(_translate("Form", "Test Cae"))
self.Plot_Series.setText(_translate("Form", "Plot_Series"))
self.Plot_Nor_Distribution.setText(_translate("Form", "Plot_Normal_Distribution"))
self.label_5.setText(_translate("Form", "Test Config"))
self.label_6.setText(_translate("Form", "Case Result"))
self.Plot_Histogram.setText(_translate("Form", "Plot_Histogram"))
self.label_7.setText(_translate("Form", "Detailed Data"))
self.ShowData.setText(_translate("Form", "ShowData"))
|
21,181 | 4dcc784415d63c3e89d80245b621987a5f53ba3c | import cv2
from imageio.core.util import Image
image = cv2.imread("py.png")
greyimg = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
convert = cv2.bitwise_not(greyimg)
blur = cv2.GaussianBlur(convert,(35,35),0)
convertblur = cv2.bitwise_not(blur)
sketch = cv2.divide(greyimg, convertblur,scale=230.0)
cv2.imwrite("output.png",sketch)
|
21,182 | d3aba1f8785af8bdf630221c10b3d1275fe71d82 | # encoding: utf-8
from odoo import models, fields, api, _
import odoo.addons.decimal_precision as dp
from odoo.addons.kzm_base.controllers.tools import remove_accent
class hr_cotisation(models.Model):
_name = 'hr.cotisation'
@api.onchange('name')
def _onchange_name(self) :
if self.name and not self.code:
self.code = remove_accent(self.name.strip().replace(' ','_')).upper()
sequence = fields.Integer(string=u'Sรฉquence', default=0)
code = fields.Char(string=u'Code', size=64, required=True)
name = fields.Char(string=u'Nom', size=64, required=True)
rate_salariale = fields.Float(
string=u'Taux salariale',
digits=dp.get_precision('Account'), required=True, default=0)
rate_patronale = fields.Float(
string=u'Taux patronale',
digits=dp.get_precision('Account'), required=True, default=0)
plafond_salariale = fields.Float(
string=u'Plafond salarial',
digits=dp.get_precision('Account'),
required=True,
default=0)
plafond_patronale = fields.Float(
string=u'Plafond patronal',
digits=dp.get_precision('Account'),
required=True,
default=0)
contribution_id = fields.Many2one(
'hr.contribution.register', string=u'Contribution',)
group_id = fields.Many2one('hr.cotisation.group', string=u'Grouper dans le bulletin', required=True, )
ledger_id = fields.Many2one('hr.cotisation.ledger', string=u'Grouper dans le livre d epaie', required=True, )
show_on_payslip = fields.Selection([
('never', 'Jamais'),
('ifnotnull', 'Si diffรฉrent du zรฉro'),
('always', 'Toujours'),
], string=u'Affichage sur les bulletins', required=True, default='ifnotnull')
analytic_account_id = fields.Many2one(
'account.analytic.account', string=u'Compte analytique',)
account_tax_id = fields.Many2one('account.tax', string=u'Code TVA',)
account_debit = fields.Many2one(
'account.account', string=u'Compte du dรฉbit')
account_credit = fields.Many2one(
'account.account', string=u'Compte du crรฉdit')
type_cotisation = fields.Selection([
('cnss', 'CNSS'),
('cimr', 'CIMR'),
('assurance', 'Assurance retraite'),
('autre', 'Autre'),
], string=u'Type de cotisation', required=True, )
@api.one
@api.depends("rate_salariale")
def _compute_rate_inline(self):
main = int(self.rate_salariale)
precision = int((self.rate_salariale - main)*100)
self.rate_salariale_inline = str(main).rjust(2,'0')+str(precision).rjust(2,'0')
rate_salariale_inline = fields.Char(string=u'Rate inline', size = 4 , compute='_compute_rate_inline', store=True, )
@api.model
def create(self, vals):
cotisation_id = super(hr_cotisation, self).create(vals)
self.env['hr.axe'].create({'cotisation_id': cotisation_id.id})
return cotisation_id
@api.multi
def write(self, vals):
res = super(hr_cotisation, self).write(vals)
for cotisation in self:
axes = self.env['hr.axe'].search(
[('cotisation_id', '=', cotisation.id)])
if axes:
for axe in axes:
axe.write({'cotisation_id': cotisation.id})
else:
axes.create({'cotisation_id': cotisation.id})
return res
@api.multi
def unlink(self):
for cotisation in self:
axes = self.env['hr.axe'].search(
[('cotisation_id', '=', cotisation.id)])
if axes:
for axe in axes:
axe.unlink()
return super(hr_cotisation, self).unlink()
class hr_cotisation_group(models.Model):
_name = 'hr.cotisation.group'
_description = 'Groupe'
name = fields.Char(string=u'Nom', size = 64 , required=True, )
code = fields.Char(string=u'Code', size = 64 , required=True, )
sequence = fields.Integer(string=u'Sรฉquence', default=0)
class hr_cotisation_ledger(models.Model):
_name = 'hr.cotisation.ledger'
_description = 'Livre de paie'
name = fields.Char(string=u'Nom', size = 64 , required=True, )
code = fields.Char(string=u'Code', size = 64 , required=True, )
sequence = fields.Integer(string=u'Sรฉquence', default=0)
show_on_ledger = fields.Selection([
('never', 'Jamais'),
('ifnotnull', 'Si diffรฉrent du zรฉro'),
('always', 'Toujours'),
], string=u'Affichage dans le livre de paie', required=True, default='ifnotnull')
|
21,183 | 3b8ef7dd7f79fdec8b0c886391fd70ea515ddedc | """
โโโโโโโ โโโโโโโโโโโโโโโ โโโโ โโโโโโ โโโโโโโโโโโโโโโโ
โโโโโโโโ โโโโโโโโโโโโโโโโ โโโโโโโโโโโโโโโโโโโโโโโโโโโโโ
โโโ โโโโโโโโโโโโโ โโโโโโโโโโโโโโโโโโโ โโโโโ โโโโโโ
โโโ โโโโโโโโโโโโ โโโโโโโโโโโโโโโโโโโ โโโโโ โโโโโโ
โโโโโโโโโโโโโโโ โโโ โโโ โโโโโโ โโโโโโโโโโโโโโโโโโโ
โโโโโโโ โโโโโโ โโโ โโโโโโ โโโโโโโโโโโโโโโโโโโ
"""
from . import encoder
from .maze import Maze
from .surface import GIFSurface
from .animation import Animation
from . import algorithms
from .gentext import generate_text_mask
|
21,184 | 614e86f66abf2c21e96eeb376c57539e1dd4e29d | class Worker(object):
def __init__(self):
gevent.spawn(self.controller)
pass
def controller(self):
while True:
print('[Worker]')
gevent.sleep(1)
def ping(self):
print('[Worker] Ping from Master')
def do_work(self, nums):
nums = [int(n) for n in nums]
gevent.sleep(2)
return str(sum(nums))
if __name__ == '__main__':
s = zerorpc.Server(Worker())
ip_port = sys.argv[1]
ip = ip_port.split(':')[0]
port = ip_port.split(':')[1]
s.bind('tcp://'+ip+':'+port)
c = zerorpc.Client()
c.connect(master_addr)
c.register(ip,port)
s.run()
|
21,185 | 5615b71d8e8c98319f263d7e486b417bf61edbe5 | import torch
import torch.nn as nn
from graph.weights_initializer import weights_init
from graph.encodingBlock import TimePitchModule, PitchTimeModule, PoolingModule, ResidualModule
class PhraseEncoder(nn.Module):
def __init__(self, layers): # [64, 128, 256, 512, 1024]
super(PhraseEncoder, self).__init__()
self.time_pitch = TimePitchModule()
self.pitch_time = PitchTimeModule()
self.layers = []
for i in range(1, len(layers)):
self.layers.append(ResidualModule(layers[i - 1]))
self.layers.append(PoolingModule(layers[i - 1], layers[i]))
self.layers = nn.ModuleList(self.layers)
self.avg = nn.AvgPool2d(kernel_size=(12, 2))
self.linear = nn.Linear(1024, 1152, bias=False)
self.apply(weights_init)
def forward(self, x):
pitch = self.pitch_time(x)
time = self.time_pitch(x)
out = torch.cat((pitch, time), dim=1)
for layer in self.layers:
out = layer(out)
out = self.avg(out)
out = out.view(-1, 1024)
z = self.linear(out)
return z
class PhraseModel(nn.Module):
def __init__(self, layers):
super().__init__()
self.phrase_encoder = PhraseEncoder(layers)
self.apply(weights_init)
def forward(self, phrase):
z_phrase = self.phrase_encoder(phrase)
return z_phrase |
21,186 | 8e9e3c3a76dc27c2796072800416d46d849d68af | import os, sys
import pandas as pd
class Converter(object):
def __init__(self, filePath=None):
"""
:param filePath: Path to input .xlsx file
:cretes the path to .csv file with the same name as the input file
:return: None
"""
self.xmlFile = filePath
if os.path.exists(filePath):
name, ext = os.path.splitext(filePath)
if ext!=".xlsx":
raise Exception("Wrong input file")
self.csvFile = name + ".csv"
else:
sys.exit("Not existing file")
def XlsxToCsv(self):
"""
:generates the .csv file at the same location with the .xlsx file
:return: None
"""
print("Converting XLSX file to CSV")
if os.path.exists(self.csvFile) and os.path.getsize(self.csvFile) > 0:
print(self.csvFile + " already exists")
else:
try:
data_xls = pd.read_excel(self.xmlFile)
data_xls.to_csv(self.csvFile, encoding='utf-8', index = False, header=True)
except Exception as e:
print(e)
sys.exit("Could not convert xlsx to csv file") |
21,187 | 6862a97bfe9686a53496ee26f74c3d9725f2796e | """Research Center Jรผlich - EIT40 system importer (2010 version)
"""
import datetime
import numpy as np
import pandas as pd
def _average_swapped_current_injections(df):
AB = df[['a', 'b']].values
# get unique injections
abu = np.unique(
AB.flatten().view(AB.dtype.descr * 2)
).view(AB.dtype).reshape(-1, 2)
# find swapped pairs
pairs = []
alone = []
abul = [x.tolist() for x in abu]
for ab in abul:
swap = list(reversed(ab))
if swap in abul:
pair = (ab, swap)
pair_r = (swap, ab)
if pair not in pairs and pair_r not in pairs:
pairs.append(pair)
else:
alone.append(ab)
# check that all pairs got assigned
if len(pairs) * 2 + len(alone) != len(abul):
print('len(pairs) * 2 == {0}'.format(len(pairs) * 2))
print(len(abul))
raise Exception(
'total numbers of unswapped-swapped matching do not match!'
)
if len(pairs) > 0 and len(alone) > 0:
print(
'WARNING: Found both swapped configurations and non-swapped ones!'
)
delete_slices = []
# these are the columns that we work on (and that are retained)
columns = [
'frequency', 'a', 'b', 'p',
'Z1', 'Z2', 'Z3',
'Il1', 'Il2', 'Il3',
'Is1', 'Is2', 'Is3',
'Zg1', 'Zg2', 'Zg3',
'datetime',
]
dtypes = {col: df.dtypes[col] for col in columns}
X = df[columns].values
for pair in pairs:
index_a = np.where(
(X[:, 1] == pair[0][0]) & (X[:, 2] == pair[0][1])
)[0]
index_b = np.where(
(X[:, 1] == pair[1][0]) & (X[:, 2] == pair[1][1])
)[0]
# normal injection
A = X[index_a, :]
# swapped injection
B = X[index_b, :]
# make sure we have the same ordering in P, frequency
diff = A[:, [0, 3]] - B[:, [0, 3]]
if not np.all(diff) == 0:
raise Exception('Wrong ordering')
# compute the averages in A
# the minus stems from the swapped current electrodes
X[index_a, 4:10] = (A[:, 4:10] - B[:, 4:10]) / 2.0
X[index_a, 10:16] = (A[:, 10:16] + B[:, 10:16]) / 2.0
# delete the second pair
delete_slices.append(
index_b
)
if len(delete_slices) == 0:
X_clean = X
else:
X_clean = np.delete(X, np.hstack(delete_slices), axis=0)
df_clean = pd.DataFrame(X_clean, columns=columns)
# for col in columns:
# # df_clean[col] = df_clean[col].astype(dtypes[col])
df_clean = df_clean.astype(dtype=dtypes)
return df_clean
def _extract_md(mat, **kwargs):
return None
def _extract_emd(mat, **kwargs):
emd = mat['EMD'].squeeze()
# Labview epoch
epoch = datetime.datetime(1904, 1, 1)
def convert_epoch(x):
timestamp = epoch + datetime.timedelta(seconds=x.astype(float))
return timestamp
dfl = []
# loop over frequencies
for f_id in range(0, emd.size):
# print('Frequency: ', emd[f_id]['fm'])
fdata = emd[f_id]
# fdata_md = md[f_id]
timestamp = np.atleast_2d(
[convert_epoch(x) for x in fdata['Time'].squeeze()]
).T
# import IPython
# IPython.embed()
df = pd.DataFrame(
np.hstack((
timestamp,
fdata['ni'],
fdata['nu'][:, np.newaxis],
fdata['Z3'],
fdata['Is3'],
fdata['Il3'],
fdata['Zg3'],
)),
)
df.columns = (
'datetime',
'a',
'b',
'p',
'Z1',
'Z2',
'Z3',
'Is1',
'Is2',
'Is3',
'Il1',
'Il2',
'Il3',
'Zg1',
'Zg2',
'Zg3',
)
df['frequency'] = np.ones(df.shape[0]) * fdata['fm']
# cast to correct type
df['datetime'] = pd.to_datetime(df['datetime'])
df['a'] = df['a'].astype(int)
df['b'] = df['b'].astype(int)
df['p'] = df['p'].astype(int)
df['Z1'] = df['Z1'].astype(complex)
df['Z2'] = df['Z2'].astype(complex)
df['Z3'] = df['Z3'].astype(complex)
df['Zg1'] = df['Zg1'].astype(complex)
df['Zg2'] = df['Zg2'].astype(complex)
df['Zg3'] = df['Zg3'].astype(complex)
df['Is1'] = df['Is1'].astype(complex)
df['Is2'] = df['Is2'].astype(complex)
df['Is3'] = df['Is3'].astype(complex)
df['Il1'] = df['Il1'].astype(complex)
df['Il2'] = df['Il2'].astype(complex)
df['Il3'] = df['Il3'].astype(complex)
dfl.append(df)
if len(dfl) == 0:
return None
df = pd.concat(dfl)
# average swapped current injections here!
df = _average_swapped_current_injections(df)
# sort current injections
condition = df['a'] > df['b']
df.loc[condition, ['a', 'b']] = df.loc[condition, ['b', 'a']].values
# for some reason we lose the integer casting of a and b here
df['a'] = df['a'].astype(int)
df['b'] = df['b'].astype(int)
# change sign because we changed A and B
df.loc[condition, ['Z1', 'Z2', 'Z3']] *= -1
# average of Z1-Z3
df['Zt'] = np.mean(df[['Z1', 'Z2', 'Z3']].values, axis=1)
# we need to keep the sign of the real part
sign_re = np.real(df['Zt']) / np.abs(np.real(df['Zt']))
df['r'] = np.abs(df['Zt']) * sign_re
# df['Zt_std'] = np.std(df[['Z1', 'Z2', 'Z3']].values, axis=1)
df['Is'] = np.mean(df[['Is1', 'Is2', 'Is3']].values, axis=1)
df['Il'] = np.mean(df[['Il1', 'Il2', 'Il3']].values, axis=1)
df['Zg'] = np.mean(df[['Zg1', 'Zg2', 'Zg3']].values, axis=1)
# "standard" injected current, in [mA]
df['Iab'] = np.abs(df['Is']) * 1e3
df['Iab'] = df['Iab'].astype(float)
# df['Is_std'] = np.std(df[['Is1', 'Is2', 'Is3']].values, axis=1)
# take absolute value and convert to mA
df['Ileakage'] = np.abs(df['Il']) * 1e3
df['Ileakage'] = df['Ileakage'].astype(float)
return df
|
21,188 | 9450f3518f9ced932b96bac6503517c36b93d3bc | import pandas as pd
df=pd.DataFrame()
print(df) |
21,189 | d9677a1301063b04eb2185f297704a8fdb011c1f | from sanic import Sanic, response as res
from sanic.exceptions import NotFound
import time
from matcher import match_rank_articles, match_rank_articles2
from summary import preprocessing_article, get_summary
from pdf_to_string import pdf_to_string_process
# instantiate the app object
app = Sanic("app") # __name__
@app.post('/nlpPost')
async def nlpPost(req):
start = time.time()
message = req.json
article_list = message['dataBaseArticles']
searchText = message['searchText']
typeOfSearch = message['typeOfSearch']
result = ''
if typeOfSearch == 1:
result = match_rank_articles(searchText, article_list)
elif typeOfSearch == 2:
result = match_rank_articles2(searchText, article_list)
dictionary = {'result': result}
end = time.time()
print(f"Total time in python: {end - start:.2f} s")
return res.json(dictionary)
@app.post('/articlePost')
async def articlePost(req):
start = time.time()
article = req.json
article_text = pdf_to_string_process(article['path'])
article['text'] = article_text
article['tokentree'] = preprocessing_article(article_text)
article['summary'] = get_summary(article_text, 10)
end = time.time()
print(f"Total time in python create new article: {end - start:.2f} s")
return res.json(article)
# start the server
if __name__ == "__main__":
app.run(port=5000) |
21,190 | 578aacb6d174a7999189ce70b786949573538966 | #/usr/bin/env python3.6
# -*- coding:utf-8 -*-
__author__ = 'Jagger'
'''
ๆ1ใ2ใ3ใ4ไธชๆฐๅญ๏ผ่ฝ็ปๆๅคๅฐไธชไบไธ็ธๅไธๆ ้ๅคๆฐๅญ็ไธไฝๆฐ๏ผ้ฝๆฏๅคๅฐ๏ผ
'''
def func():
r = []
for i in range(1,5):
for j in range(1,5):
for k in range(1,5):
if (i != j) and j != k and i != k:
r += [i*100 + j*10 + k]
return r
if __name__ == '__main__' :
print("result: ")
print(func())
|
21,191 | 1161fc2497e186a7f8b3bb357fb17b6c29f9ae74 | import pandas as pd
import numpy as np
'''
์์ธ์ ์ฝ๋ก๋19 ๋ฐ์ดํฐ ์์ง ๋ฐ ๋ถ์
2. '์ฐ๋ฒ' ๊ธฐ์ค์ผ๋ก ์ค๋ฆ์ฐจ์ ์ ๋ ฌ
3. ํ์ง์ผ์ ๋น๋์ -> ์ด๋ ๋ ์ง์ ๊ฐ์ฅ ๋ง์ด ํ์ง์ด ๋์๋์ง ํ์ธ ๊ฐ๋ฅ
4. 'ํ์ง์ผ์' ์ปฌ๋ผ ์ถ๊ฐ -> 2020-11-10 ํ์
5. 'ํ์ง์ผ์' ์ปฌ๋ผ ์ด์ฉํ์ฌ '์' ์ปฌ๋ผ ์ถ๊ฐ
6. 'ํ์ง์ผ์' ์ปฌ๋ผ ์ด์ฉํ์ฌ '์ฃผ' ์ปฌ๋ผ ์ถ๊ฐ
7. 'ํ์ง์ผ์' ์ปฌ๋ผ ์ด์ฉํ์ฌ '์-์ผ' ์ปฌ๋ผ ์ถ๊ฐ -> 11-10 ํ์
8. ํ์ง์ ์๊ฐ ๊ฐ์ฅ ๋ง์ ๋ ์ถ๋ ฅ
9. ํ์ง์์๊ฐ ๊ฐ์ฅ ๋ง์ ๋ ๋ฐ์์ด๋ ฅ
10. ์ผ์๋ณ ํ์ง์์ ์ ๊ทธ๋ํ๋ก ์๊ฐํ
11. ์-์ผ๋ณ ํ์ง์์ ์ ๊ทธ๋ํ๋ก ์๊ฐํ + ๊ฐฏ์๊ฐ ์๊ฐํ
12. ์๋ณ ํ์ง์์ ๋ง๋๊ทธ๋ํ๋ก ์๊ฐํ + ๊ฐฏ์๊ฐ ์๊ฐํ
13. ์-์ผ๋ณ ํ์ง์์์ค์์ ์ต๊ทผ ๋ฐ์ดํฐ 50๊ฐ๋ง ์ ๊ทธ๋ํ๋ก ์๊ฐํ + ๊ฐฏ์๊ฐ ์๊ฐํ
14. "์ฃผ"๋ณ ํ์ง์์ ๋ง๋๊ทธ๋ํ๋ก ์๊ฐํ
'''
#์ ์ฅ๋ ํ์ผ ์ฝ๊ธฐ
df = pd.read_csv("seoul_corona19_11_10_.csv", encoding="utf-8") #ํ๊ธ์ฒ๋ฆฌ
#2. '์ฐ๋ฒ' ๊ธฐ์ค์ผ๋ก ์ค๋ฆ์ฐจ์ ์ ๋ ฌ
df = df.sort_values(by="์ฐ๋ฒ", ascending=False)
print("2. '์ฐ๋ฒ' ๊ธฐ์ค์ผ๋ก ์ค๋ฆ์ฐจ์ ์ ๋ ฌ \n", df.head())
#3. ํ์ง์ผ์ ๋น๋์ -> ์ด๋ ๋ ์ง์ ๊ฐ์ฅ ๋ง์ด ํ์ง์ด ๋์๋์ง ํ์ธ ๊ฐ๋ฅ
# value_counts() : ๋น๋์๊ฐ์ ๋ด๋ฆผ์ฐจ์์ผ๋ก ์ ๋ ฌํด์ ๋ฐํ
print("3. ํ์ง์ผ์ ๋น๋์ \n", df["ํ์ง์ผ"].value_counts())
#4.'ํ์ง์ผ์' ์ปฌ๋ผ ์ถ๊ฐ -> 2020-11-10 ํ์
'''
๊ธฐ์กด์ ํ์ง์ผ์ ๋ฌธ์์ด๊ธฐ ๋๋ฌธ์ ๋ ์ง๋ก ๋ณ๊ฒฝํด์ผ ๋๋ค.
๊ฐ. 11.10 -> 11-10๋ก ๋ณ๊ฒฝ
๋. 11-10 -> 2020-11-10๋ก ๋ณ๊ฒฝ (๋ฌธ์์ด ์ฐ๊ฒฐ)
๋ค. 2020-11-10 ๋ฌธ์์ด์ ๋ ์ง๋ก ๋ณ๊ฒฝ (pd.to_datetime() ํจ์)
๋ผ. df["ํ์ง์ผ์"] = ๋ ์ง
'''
df["ํ์ง์ผ์"] = pd.to_datetime("2020-"+df["ํ์ง์ผ"].str.replace(".", "-"))
print("4. 'ํ์ง์ผ์' ์ปฌ๋ผ ์ถ๊ฐ \n", df.head())
'''
๋ฐฉ๋ฒ2
temp = df["ํ์ง์ผ"].str.replace(".", "-")
temp = temp.apply(lambda n:n[:-1])
df["ํ์ง์ผ์"] = pd.to_datetime("2020-"+temp)
print("4. 'ํ์ง์ผ์' ์ปฌ๋ผ ์ถ๊ฐ \n", df.head())
'''
#5. 'ํ์ง์ผ์' ์ปฌ๋ผ ์ด์ฉํ์ฌ '์' ์ปฌ๋ผ ์ถ๊ฐ
df["์"] = df["ํ์ง์ผ์"].dt.month
print("5. 'ํ์ง์ผ์' ์ปฌ๋ผ ์ด์ฉํ์ฌ '์' ์ปฌ๋ผ ์ถ๊ฐ \n", df.head())
#6. 'ํ์ง์ผ์' ์ปฌ๋ผ ์ด์ฉํ์ฌ '์ฃผ' ์ปฌ๋ผ ์ถ๊ฐ
df["์ฃผ"] = df["ํ์ง์ผ์"].dt.isocalendar().week
print("6. 'ํ์ง์ผ์' ์ปฌ๋ผ ์ด์ฉํ์ฌ '์ฃผ' ์ปฌ๋ผ ์ถ๊ฐ \n", df.head())
#7. 'ํ์ง์ผ์' ์ปฌ๋ผ ์ด์ฉํ์ฌ '์-์ผ' ์ปฌ๋ผ ์ถ๊ฐ -> 11-10 ํ์
'''
๋ ์ง ๋ฐ์ดํฐ 'ํ์ง์ผ์' -> ๋ฌธ์ ๋ฐ์ดํฐ๋ก ๋ณ๊ฒฝํ๊ณ
๋ณ๊ฒฝ๋ ๋ฌธ์๋ฐ์ดํฐ์์ ์ฌ๋ผ์ด์ฑ (Series์ ์ ์ฉํ๊ธฐ ๋๋ฌธ์ ์ฌ๋ผ์ด์ฑ ๊ธฐ๋ฅ์ ํจ์ + apply ํจ์)
ex. 2020-11-10 -> 11-10
'''
df["์-์ผ"] = None
df["์-์ผ"] = df["ํ์ง์ผ์"].astype(str).apply(lambda n:n[-5:])
print("7. 'ํ์ง์ผ์' ์ปฌ๋ผ ์ด์ฉํ์ฌ '์-์ผ' ์ปฌ๋ผ ์ถ๊ฐ \n", df.head())
# 8. ํ์ง์ ์๊ฐ ๊ฐ์ฅ ๋ง์ ๋ ์ถ๋ ฅ
'''
'์-์ผ' ์ปฌ๋ผ๊ฐ์ ๋น๋์ ์ด์ฉ
'''
day_count = df["์-์ผ"].value_counts()
print(day_count)
max_day = day_count[day_count == day_count.max()]
print("8. ํ์ง์์๊ฐ ๊ฐ์ฅ ๋ง์ ๋ ์ถ๋ ฅ \n", max_day)
print("8. ํ์ง์์๊ฐ ๊ฐ์ฅ ๋ง์ ๋ ์ถ๋ ฅ \n", max_day.index[0])
max_day2 = df["์-์ผ"].value_counts().index[0]
print("8. ํ์ง์์๊ฐ ๊ฐ์ฅ ๋ง์ ๋ ์ถ๋ ฅ \n", max_day2)
# 9. ํ์ง์์๊ฐ ๊ฐ์ฅ ๋ง์ ๋ ๋ฐ์์ด๋ ฅ
max_day_df = df[df["์-์ผ"] == max_day.index[0]]
print("9. ํ์ง์์๊ฐ ๊ฐ์ฅ ๋ง์ ๋ ๋ฐ์์ด๋ ฅ \n", max_day_df)
'''
์๊ฐํ ๋ฐฉ๋ฒ
1) matplotlib ๋ผ์ด๋ธ๋ฌ๋ฆฌ ์ฌ์ฉ
https://matplotlib.org/
2) matplotlib + seaborn ๋ผ์ด๋ธ๋ฌ๋ฆฌ ์ฌ์ฉ
3) matplotlib + pandas ์ฌ์ฉ ( pandas์ matplotlib ๊ธฐ๋ฅ ํฌํจ )
https://pandas.pydata.org/
pip install matplotlib
pip install seaborn
'''
import matplotlib.pyplot as plt
plt.rc("font", family="AppleGothic") # ํ๊ธ์ฒ๋ฆฌ
# 10. ํ์ง์ผ์๋ณ ํ์ง์์ ์ ๊ทธ๋ํ๋ก ์๊ฐํ
# ์ธ๋ฑ์ค(๋ ์ง)==> x ์ถ์ผ๋ก ๊ฐ: y์ถ์ผ๋ก ์ค์
result = df["ํ์ง์ผ์"].value_counts().sort_index()
print(result)
result.plot(title="์ผ์๋ณ ํ์ง์์", figsize=(9,6))
plt.axhline(100, color="r", linestyle="--")
plt.show()
help(plt.axhline)
# 11. ์-์ผ๋ณ ํ์ง์์ ์ ๊ทธ๋ํ๋ก ์๊ฐํ + ๊ฐฏ์๊ฐ ์๊ฐํ
result = df["์-์ผ"].value_counts().sort_index()
print(result)
g=result.plot(title="์๋ณ ํ์ง์์", figsize=(20,8))
# print(help(g.text))
############################
for i in range(len(result)):
day_count = result.iloc[i]
# print("๊ฐฏ์:", day_count)
if day_count > 100:
g.text(x=i, y=day_count, s=day_count, fontsize=14, color="r")
#################################
plt.axhline(100, color="r", linestyle="--")
plt.show()
# 12. ์๋ณ ํ์ง์์ ๋ง๋๊ทธ๋ํ๋ก ์๊ฐํ + ๊ฐฏ์๊ฐ ์๊ฐํ
result = df["์"].value_counts().sort_index()
print(result)
g=result.plot.bar(title="์๋ณ ํ์ง์์", figsize=(10,8))
for i in range(len(result)):
day_count = result.iloc[i]
g.text(x=i, y=day_count, s=day_count, fontsize=14, color="r")
plt.axhline(1500, color="r", linestyle="--")
plt.show()
# 13. ์-์ผ๋ณ ํ์ง์์์ค์์ ์ต๊ทผ ๋ฐ์ดํฐ 50๊ฐ๋ง ์ ๊ทธ๋ํ๋ก ์๊ฐํ + ๊ฐฏ์๊ฐ ์๊ฐํ
result = df["์-์ผ"].value_counts().sort_index()
print(result)
result = result[-50:] # 50๊ฐ๋ง ์ฌ๋ผ์ด์ฑ
g = result.plot(title="์๋ณ ํ์ง์์", figsize=(20, 8))
# print(help(g.text))
############################
for i in range(len(result)):
day_count = result.iloc[i]
g.text(x=i, y=day_count, s=day_count, fontsize=14, color="r")
#################################
plt.axhline(35, color="r", linestyle="--")
plt.show()
# 14. "์ฃผ"๋ณ ํ์ง์์ ๋ง๋๊ทธ๋ํ๋ก ์๊ฐํ
result = df["์ฃผ"].value_counts().sort_index()
print(result)
g = result.plot.bar(title="์ฃผ๋ณ ํ์ง์์", figsize=(10, 8))
for i in range(len(result)):
day_count = result.iloc[i]
g.text(x=i, y=day_count, s=day_count, fontsize=14, color="r")
plt.axhline(400, color="r", linestyle="--")
plt.show()
|
21,192 | 63350bcf49c8bc0c68ec5b1905bd7f08a53f796d | # Generated by Django 3.2.5 on 2021-07-18 03:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0002_insurance_date_of_purchase'),
]
operations = [
migrations.AlterField(
model_name='insurance',
name='fuel',
field=models.CharField(blank=True, default='', max_length=100, null=True),
),
migrations.AlterField(
model_name='insurance',
name='veichel_segment',
field=models.CharField(blank=True, default='', max_length=100, null=True),
),
]
|
21,193 | 14f7b78cc20a6c4c8fa02ac0cdd0f6425260ec3b | """
Validate and submit requests made in LCOGT_make_requests.py
"""
###########
# imports #
###########
import pickle, requests, socket
from parse import search
import os
from glob import glob
import numpy as np, pandas as pd
from astropy.time import Time
import astropy.units as u
HOMEDIR = os.path.expanduser('~')
API_FILE = os.path.join(HOMEDIR, '.lcogt_api_token')
if not os.path.exists(API_FILE):
raise NotImplementedError('where to get API file?')
with open(API_FILE, 'r') as f:
l = f.readlines()
token = str(l[0].replace('\n',''))
from cdips_followup import __path__
DATADIR = os.path.join(os.path.dirname(__path__[0]), 'data')
RESULTSDIR = os.path.join(os.path.dirname(__path__[0]), 'results')
#############
# functions #
#############
def validate_single_request(requestgroup, max_duration_error=15,
raise_error=True):
"""
Submit the RequestGroup through the "validate" API, cf.
https://developers.lco.global/#validate-a-requestgroup
max_duration_error: in minutes, is the maximum allowable difference between
the start & end times of the request, and the _billed duration_ of the
request. By design in the API, the billed duration is always shorter than
the (end-start) time. I allotted 1 hour on either side for scheduling, so a
bit of slack on either is fine.
"""
is_modified = False
response = requests.post(
'https://observe.lco.global/api/requestgroups/validate/',
headers={'Authorization': 'Token {}'.format(token)},
json=requestgroup
)
# Make sure the API call was successful
try:
response.raise_for_status()
except requests.exceptions.HTTPError as exc:
print('API call failed: {}'.format(response.content))
raise exc
requestgroup_dict = response.json()
# If you get an error because your incorrectly estimated the number of
# exposures, correct it here.
if len(requestgroup_dict['errors']) >= 1:
if 'non_field_errors' in requestgroup_dict['errors']:
print(42*'-')
print('GOT ERROR: {}'.
format(requestgroup_dict['errors']['non_field_errors']))
print(42*'-')
return np.nan, np.nan
if 'requests' in requestgroup_dict['errors']:
print(42*'-')
print('GOT ERROR: {}'.
format(requestgroup_dict['errors']['requests']))
print(42*'-')
try:
errmsg = (
requestgroup_dict['errors']['requests'][0]['non_field_errors'][0]
)
except:
return np.nan, np.nan
if 'the target is visible for a maximum of' in errmsg:
# get the strings of durations, and decrement the requested number
# of exposures by the right multiple!
sr = search("According{}maximum of {} hours "
"within{}your request {} hours. Consider{}",
errmsg)
max_dur = float(sr[1])
req_dur = float(sr[3])
if req_dur == max_dur:
# {:.1f} formatted strings. genius ._.
req_dur += 0.01
if not req_dur > max_dur:
errmsg = (
'ERR! max dur: {}, req dur: {}'.format(max_dur, req_dur)
)
raise ValueError(errmsg)
diff_dur_sec = (req_dur - max_dur)*60*60
# previously, guessed
#
# expcount = np.floor(
# (endtime-starttime).to(u.hr)
# /
# (exptime*u.second + read_time_per_exposure).to(u.hr)
# )
#
# that produced the difference above...
exptime_sec = (
requestgroup['requests'][0]['configurations'][0]['instrument_configs'][0]['exposure_time']
)
expcount = (
requestgroup['requests'][0]['configurations'][0]['instrument_configs'][0]['exposure_count']
)
read_time_per_exposure = 30*u.second # from Bayliss' completed runs
n_exposures_diff = int(
np.ceil(diff_dur_sec/
(exptime_sec + read_time_per_exposure.value)
)
)
new_expcount = expcount - n_exposures_diff
print(42*'-')
print('WRN!: max durn: {} hr, req durn: {} hr. had {} exposures, decrement to {}'.
format(max_dur, req_dur, expcount, new_expcount))
print(42*'-')
requestgroup['requests'][0]['configurations'][0]['instrument_configs'][0]['exposure_count'] = new_expcount
is_modified = True
return requestgroup, is_modified
else:
if raise_error:
raise NotImplementedError('got new API error: {}'.format(errmsg))
else:
print('WRN!: Got API error: {}'.format(errmsg))
print(requestgroup)
return np.nan, np.nan
billed_durn = (
requestgroup_dict['request_durations']['requests'][0]['duration']
)
start = Time(requestgroup['requests'][0]['windows'][0]['start'])
end = Time(requestgroup['requests'][0]['windows'][0]['end'])
window_durn = (end - start).value*24*60*60
expcount = (
requestgroup['requests'][0]['configurations'][0]['instrument_configs'][0]['exposure_count']
)
if (window_durn - billed_durn)/60 > max_duration_error:
errmsg = (
'ERROR! got a window of {:.2f} min; but tried to bill {:.2f} min.'.
format(window_durn/60, billed_durn/60)
)
print(42*'-')
print(errmsg)
print(42*'-')
#import IPython; IPython.embed()
#raise AssertionError(errmsg) #FIXME
return np.nan, np.nan
else:
print(42*'-')
print('ACCEPTED! window durn: {:.2f} min, billed {:.2f} min. had {:d} exposures'.
format(window_durn/60, billed_durn/60, expcount))
print(42*'-')
return requestgroup, is_modified
def submit_single_request(requestgroup):
# Submit the fully formed RequestGroup
response = requests.post(
'https://observe.lco.global/api/requestgroups/',
headers={'Authorization': 'Token {}'.format(token)},
json=requestgroup
)
# Make sure the API call was successful
try:
response.raise_for_status()
except requests.exceptions.HTTPError as exc:
print('API call failed: {}'.format(response.content))
raise exc
# The API returns the newly submitted requestgroup as json
requestgroup_dict = response.json()
# Print out the url on the portal where we can view the submitted request
print('View the observing request: '
'https://observe.lco.global/requestgroups/{}/'.
format(requestgroup_dict['id']))
def submit_all_requests(savstr, validate_all=1, submit_all=0,
max_N_transit_per_object=3, max_duration_error=15,
semesterstr='20A'):
"""
savstr: used for directory management
validate_all: if true, first validates observation requests to ensure that
they can be submitted
submit_all: actually submits them
max_N_transit_per_object:
max_duration_error: in minutes, maximum acceptable difference between
_desired_ observation window, and the window that the LCOGT system accepts.
"""
if submit_all:
assert validate_all
if not 'ephemupdate' in savstr:
resultsdir = (
os.path.join(RESULTSDIR,'LCOGT_{}_observability/'.format(semesterstr))
)
else:
resultsdir = (
os.path.join(RESULTSDIR,'LCOGT_{}_updated_requests/'.format(semesterstr))
)
pkl_savpath = (
os.path.join(resultsdir, '{}.pkl'.format(savstr))
)
mult_savpath = (
os.path.join(resultsdir, '{}_summary.csv'.format(savstr))
)
with open(pkl_savpath, 'rb') as f:
r = pickle.load(f)
df = pd.read_csv(mult_savpath)
if submit_all:
print('ATTEMPTING TO SUBMIT THE FOLLOWING')
df['submit_durn'] = (
df['sched_duration'] *
np.minimum(df['n_requests'], max_N_transit_per_object)
)
print(df)
print(42*'=')
print('\nTotal time: {:.1f} hr\n'.format(np.sum(df['submit_durn'])))
print(42*'=')
#
# sort all the available transit windows for each target by time. submit
# the earliest `max_N_transit_per_object' (e.g., 2 transits).
#
starts = []
for _r in r:
starts.append(
[ Time(__r['requests'][0]['windows'][0]['start']) for __r in _r ]
)
time_sort_inds = []
for start in starts:
time_sort_inds.append(
np.argsort(start)
)
#
# iterate over available requests for each target that met the magnitude
# and depth cuts
#
for _r, ind in zip(r, time_sort_inds):
_requests_sorted = np.array(_r)[ind]
_requests_to_submit = _requests_sorted[:max_N_transit_per_object]
for requestgroup in _requests_to_submit:
if validate_all:
if not submit_all:
print(requestgroup)
requestgroup, is_modified = (
validate_single_request(
requestgroup, max_duration_error=max_duration_error
)
)
n_iter = 0
if is_modified and np.isfinite(is_modified):
while is_modified:
if n_iter >= 10:
raise AssertionError('too many iterations')
requestgroup, is_modified = (
validate_single_request(
requestgroup,
max_duration_error=max_duration_error
)
)
if not isinstance(requestgroup, dict):
if not np.isfinite(requestgroup):
break
n_iter += 1
if submit_all:
if isinstance(requestgroup, dict):
print('SUBMITTING...')
print(requestgroup)
submit_single_request(requestgroup)
else:
print('vvv DID NOT SUBMIT B/C FAILED TO VALIDATE vvv')
print(requestgroup)
print('^^^ DID NOT SUBMIT B/C FAILED TO VALIDATE ^^^')
if __name__=="__main__":
validate_all = 1
submit_all = 1
max_N_transit_per_object = 2
max_duration_error = 20
eventclass = 'OIBEO'
savstr = 'request_19B_59859387_{}'.format(eventclass)
# eventclass = 'OIB'
# savstr = 'request_19B_2m_faint_{}'.format(eventclass)
# eventclass = 'OIBE'
# savstr = 'bright_shallow_19B_{}'.format(eventclass)
# eventclass = 'OIB'
# savstr = 'midpartials_19B_{}'.format(eventclass)
# eventclass = 'OIB'
# savstr = 'toppartials_19B_{}'.format(eventclass)
# max_duration_error = 15
# savstr = 'request_TIC29786532_19B'
# max_N_transit_per_object = 2
# savstr = 'request_19B_2m_faint_v2'
# max_N_transit_per_object = 2
# savstr = 'request_19B_2m_faint'
# max_N_transit_per_object = 4 # actually 3, b/c one fails
# savstr = 'all_requests_19B_easyones'
# max_N_transit_per_object = 3
submit_all_requests(savstr, validate_all=validate_all,
submit_all=submit_all,
max_N_transit_per_object=max_N_transit_per_object,
max_duration_error=max_duration_error)
|
21,194 | 287e2ec94d389d9533ff1010d689cc9b445b5ea7 | from django.urls import reverse
from rest_framework.status import HTTP_200_OK
from parkings.models import EnforcementDomain, PermitArea
from ..enforcement.test_check_parking import create_area_geom
list_url = reverse('operator:v1:permitarea-list')
def test_endpoint_returns_list_of_permitareas(operator_api_client, operator):
domain = EnforcementDomain.objects.create(code='ESP', name='Espoo')
PermitArea.objects.create(
name='AreaOne', geom=create_area_geom(),
identifier='A',
permitted_user=operator.user,
domain=domain
)
response = operator_api_client.get(list_url)
json_response = response.json()
assert response.status_code == HTTP_200_OK
expected_keys = {'code', 'domain', 'name'}
assert json_response['count'] == 1
assert set(json_response['results'][0]) == expected_keys
assert json_response['results'][0]['domain'] == domain.code
assert json_response['results'][0]['code'] == 'A'
assert json_response['results'][0]['name'] == 'AreaOne'
def test_endpoint_returns_only_the_list_of_permitted_permitareas(
operator_api_client, operator, staff_user
):
domain = EnforcementDomain.objects.create(code='ESP', name='Espoo')
PermitArea.objects.create(
name='AreaOne', geom=create_area_geom(),
identifier='A',
permitted_user=operator.user,
domain=domain
)
PermitArea.objects.create(
name='AreaTwo', geom=create_area_geom(),
identifier='B',
permitted_user=staff_user,
domain=domain
)
response = operator_api_client.get(list_url)
json_response = response.json()
assert response.status_code == HTTP_200_OK
assert json_response['count'] == 1
assert json_response['results'][0]['code'] == 'A'
assert PermitArea.objects.count() == 2
|
21,195 | 52e26c9f67b2639b4382ce860f170ab6e829d849 | """
EZLink application note for transmitting data
Hardie Pienaar
Feb 2017
"""
import RFMLib as rfm
import time
import numpy as np
def setup():
"""Setup the library and turn on ism chip"""
rfm.setup()
"""Read the interrupt status1 register"""
ItStatus1 = rfm.read_register(0x03)
ItStatus2 = rfm.read_register(0x04)
"""Set RF Parameters"""
# Set the center frequency to 915MHz
rfm.write_register(0x75, 0x75) # Write 0x75 to the Frequency Band Select register
rfm.write_register(0x76, 0xBB) # Write 0xBB to the Nominal Carrier Frequency1 register
rfm.write_register(0x77, 0x80) # Write 0x80 to the Nominal Carrier Frequency0 register
# Set the desired TX data rate (9.6kbps)
rfm.write_register(0x6E, 0x4E) # Write 0x4E to the TXDataRate 1 register
rfm.write_register(0x6F, 0xA5) # Write 0xA5 to the TXDataRate 0 register
rfm.write_register(0x70, 0x2C) # Write 0x2C to the Modulation Mode Control 1 register
# Set the desired TX deviation (+=45kHz)
rfm.write_register(0x72, 0x48) # Write 0x48 to the Frequency Deviation Register
"""Set Packet Configuration"""
# Set packet structure and modulation type
rfm.write_register(0x34, 0x09) # Write 0x09 to the Preamble length register
# Disable header bytes; set variable packet length (the length of the packet is defined by the
# received packet length field of the packet); set the synch word to two bytes long
rfm.write_register(0x33, 0x02) # Write 0x02 to the Header Control 2 register
# Set the sync word pattern to 0x2DD4
rfm.write_register(0x36, 0x2D) # Write 0x2D to the Sync Word 3 register
rfm.write_register(0x37, 0xD4) # Write 0xD4 to the Sync Word 2 register
# Enable the TX packet handler and CRC-16 (IBM) check
rfm.write_register(0x30, 0x0D) # Write 0x0D to the Data Access Control register
# Enable FIFO mode and GFSK modulation
rfm.write_register(0x71, 0x63) # Write 0x63 to the Modulation Mode Control 2 Register
"""Select modulation"""
# Set VCO and PLL
rfm.write_register(0x54, 0x7F) # Write 0x7F to the VCO Current Trimming register
rfm.write_register(0x59, 0x40) # Write 0x40 to the Divider Current Trimming register
def send_bytes(msg):
"""Set the contents of the packet"""
# Set the length of the payload to 8 bytes
rfm.write_register(0x3E, len(msg))
# Fill the payload into the transmit FIFO
for i in np.arange(len(msg)):
rfm.write_register(0x7F, ord(msg[i]))
"""Disable all interrupts and enable the packet sent interrupt only"""
# This will be used for indicating the successful packet transmission for the CHIP
rfm.write_register(0x05, 0x04) # Write 0x04 to the interrupt enable 1 register
rfm.write_register(0x06, 0x00) # Write 0x03 to the Interrute enable 2 register
# Read interrupt status registers to clear pending interrupts making nIRQ pin go back to high
ItStatus1 = rfm.read_register(0x03) # Read the Interrupt Status 1 register
ItStatus2 = rfm.read_register(0x04) # Read the Interrupt Status 2 register
"""Enable Transmitter"""
# The radio forms the packet and sends it automatically
rfm.write_register(0x07, 0x09) # Write 0x09 to the Operating Function Control 1 register
"""Wait for the packet sent interrupt"""
# CHIP only needs to monitor the ipksent interrupt
while rfm.check_irq():
time.sleep(0.001)
# Read interrupt status registers to release the interrupt flags
ItStatus1 = rfm.read_register(0x03) # Read the Interrupt Status 1 register
ItStatus2 = rfm.read_register(0x04) # Read the Interrupt Status 2 register
def close():
"""Cleanup GPIO and turn off the chip"""
rfm.close()
|
21,196 | 85f0a122216b769e09dadf0217797d5a258ce848 | #simple pascal grammar
'''
Program: program variable; Block
Block:Declarations Compound_statement
Declararations:(Variable_declaration ;)+
| empty
Variable_declaration: id (, id)* : Type_spec
Type_spec: integer | real
Term: factor([mul | div ] factor) *
Factor:
''' |
21,197 | 4134f6ec4336251a5c765c5ddf88bbd1ad817277 |
from twisted.web.resource import Resource
import json
import jwt
import tasks
class Service(Resource):
isLeaf = True
def render_GET(self, request):
print request.getAllHeaders()
content = {"available commands": ["start-backups", "list-jobs"]};
return json.dumps(content);
def render_POST(self, request):
jwtUser = self.getUser(request.getHeader('x-jwt-assertion'))
print "User as passed by the gateway: " + jwtUser
postData = request.content.read()
print str(postData)
receivedJSON = json.loads(postData)
command = receivedJSON['command'];
user = receivedJSON['user'];
if ((user + '@carbon.super') != jwtUser):
print 'Provided user name does not match authenticated user name.'
return 'Provided user name does not match authenticated user name.'
if (command == 'start-backups'):
return self.startBackups(receivedJSON)
elif (command == 'list-jobs'):
return self.listJobs(receivedJSON)
elif (command == 'add-user'):
return 'Please add users through the WSO2 gateway.'
elif (command == 'auth-test'):
return 'Authentication Successful'
elif (command == 'restore-new-machine'):
return self.restoreNewMachine(receivedJSON)
elif (command == 'list-available'):
return self.listAvailable(receivedJSON)
else: return "Invalid command"
def getUser(self, jwtString):
return jwt.decode(jwtString, verify=False)['http://wso2.org/claims/enduser']
# Backup operations
def startBackups(self, receivedJSON):
userName = receivedJSON['user']
ipAddress = receivedJSON['ip']
tasks.startBackups(userName, ipAddress)
from subprocess import call
status = call("./scripts/setup-backups.sh -m " + ipAddress, shell=True)
print "Status: " + str(status)
return "Status: " + str(status)
def listJobs(self, receivedJSON):
userName = receivedJSON['user']
return tasks.listJobs(userName)
def listAvailable(self, receivedJSON):
userName = receivedJSON['user']
return str(tasks.listRestoreOptions(userName))
# Restore operations
def restoreNewMachine(self, receivedJSON):
userName = receivedJSON['user']
ipAddress = receivedJSON['ip']
restoreOption = receivedJSON['restoreOption']
authorizedUser = tasks.authJobAccess(userName, restoreOption)
if (not authorizedUser):
return "You do not have permission to access this backup."
from subprocess import call
status = call("./scripts/restore-new-machine.sh -j " + restoreOption + " -m " + ipAddress, shell=True)
print "Status: " + str(status)
return "Status: " + str(status)
# User operations
def addUser(receivedJSON):
userName = receivedJSON['user']
print 'User \'' + userName + '\' should get created at this point.'
return tasks.reg_usr(userName, password)
|
21,198 | d35cb2aa311caba7b9baf329f22e186859477ef5 | import _wingpio as gpio
import time
led_pin_one = 5
led_pin_two = 6
sensor_pin = 4
pinOneValue = gpio.HIGH
pinTwoValue = gpio.LOW
gpio.setup(led_pin_one, gpio.OUT, gpio.PUD_OFF, gpio.HIGH)
gpio.setup(led_pin_two, gpio.OUT, gpio.PUD_OFF, gpio.HIGH)
def work():
count = 0
gpio.setup(sensor_pin, gpio.OUT)
gpio.output(sensor_pin, gpio.LOW)
time.sleep(0.1)
gpio.setup(sensor_pin, gpio.IN)
while (gpio.input(sensor_pin) == gpio.LOW):
count += 1
if (count > 10000):
gpio.output(led_pin_one, gpio.HIGH)
gpio.output(led_pin_two, gpio.LOW)
else:
gpio.output(led_pin_one, gpio.LOW)
gpio.output(led_pin_two, gpio.HIGH)
return count;
try:
while True:
work()
except KeyboardInterrupt:
pass
finally:
gpio.cleanup() |
21,199 | 84600dc5ab23a00714fcff40f52fc4fc43eaaf02 | #/usr/bin/env python
'''ๆฎ้ไธคไธช้กบๅบๆง่ก็ๅฝๆฐ'''
from time import ctime,sleep
def loop0():
print 'start loop0 at :' ,ctime()
sleep(4)
print 'loop0 done!'
def loop1():
print 'start loop1 at :',ctime()
sleep(2)
print 'loop1 done!'
def main():
print 'start at :', ctime()
loop0()
loop1()
print 'all DONE at :',ctime()
if __name__ == '__main__':
main() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.