index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
8,400 | 829910af55ca84838537a2e1fa697713c7a6c6ca | # This program just for testing push from Mac.
def subset2(num):
mid_result = []
result = []
subset2_helper(num, mid_result, result, 0)
print(result)
def subset2_helper(num, mid_result, result, position):
result.append(mid_result[:])
for i in range(position, len(num)):
mid_result.append(num[i])
subset2_helper(num, mid_result, result, i + 1)
mid_result.pop()
if __name__ == '__main__':
subset2([1, 2, 3]) |
8,401 | 98b27c268fe1f47a899269e988ddf798faf827df | #part-handler
# vi: syntax=python ts=4
#
# Copyright (C) 2012 Silpion IT-Solutions GmbH
#
# Author: Malte Stretz <stretz@silpion.de>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3, as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import StringIO
import tarfile
CACHE_DIR = '/var/cache/cloud'
def list_types():
return(['application/tar'])
def handle_part(data, ctype, filename, payload):
if ctype == ('application/tar'):
dir = "%s/%s" % (CACHE_DIR, os.path.splitext(filename)[0])
if not os.path.exists(dir):
os.makedirs(dir)
buf = StringIO.StringIO(payload)
tar = tarfile.open(name=filename, fileobj=buf)
tar.extractall(dir)
tar.close()
buf.close()
|
8,402 | 321dc411b003949a6744216a13c59c70d919a675 | #@@range_begin(list1) # ←この行は無視してください。本文に引用するためのものです。
#ファイル名 Chapter07/0703person.py
# __metaclass__ = type #← python 2を使っている場合は行頭の「#」を取る
class Person:
def set_name(self, name):
self.name = name
def get_name(self):
return self.name
def greet(self): # あいさつをする
print(f"こんにちは。私は{self.name}です。")
#@@range_end(list1) # ←この行は無視してください。本文に引用するためのものです。
#実行
#@@range_begin(list2) # ←この行は無視してください。本文に引用するためのものです。
foo = Person()
bar = Person()
foo.set_name('ルーク・スカイウォーカー') #『スター・ウォーズ』の主要登場人物
bar.set_name('アナキン・スカイウォーカー') # ルークの父
foo.greet() #←こんにちは。私はルーク・スカイウォーカーです。
bar.greet() #←こんにちは。私はアナキン・スカイウォーカーです。
#@@range_end(list2) # ←この行は無視してください。本文に引用するためのものです。
#@@range_begin(list3) # ←この行は無視してください。本文に引用するためのものです。
print(foo.name) #←ルーク・スカイウォーカー
bar.name = 'ヨーダ'
bar.greet() #←こんにちは。私はヨーダです。
#@@range_end(list3) # ←この行は無視してください。本文に引用するためのものです。
|
8,403 | c1fd6e940b3b15ae01a102b3c0aba9bd327c77b2 | import numpy as np
def layer_forward(x, w):
"""
input:
- inputs (x): (N, d_1, ..., d_k),
- weights (w): (D, M)
"""
# intermediate value (z)
z = None
output = []
cache = (x, w, z, output)
return output, cache
def layer_backward(d_output, cache):
""" Receive derivative of loss with respect
to outputs and cache, and compute derivative
with respect to inputs
"""
# Unpack cache values
x, w, z, output = cache
# Compute derivatives (gradients)
d_x, d_w = None, None
return d_x, d_w
def affine_forward(x, w, b):
"""
A simple linear feedforward (affine)
input:
- inputs (x): (N, d_1, ..., d_k),
- weights (w): (D, M)
- bias (b): (M,)
return:
- output: (N, M)
- cache: (x, w, b)
"""
N = x.shape[0]
# reshape input into rows
output = x.reshape([N, -1]).dot(w) + b
cache = (x, w, b)
return output, cache
def affine_backward(d_output, cache):
"""
input:
- upstream derivative (d_output): (N, M)
- cache (cache): (x, w)
return:
- gradients (dx, d_w, d_b): ((N, d1, ..., d_k)(D, M), (M,))
"""
# Unpack cache values
x, w, b = cache
N = d_output.shape[0]
d_x = d_output.dot(w.T).reshape(x.shape)
d_w = x.reshape([N, -1]).T.dot(d_output)
d_b = np.sum(d_output, axis=0)
return d_x, d_w, d_b
def relu_forward(x):
"""
input:
- inputs (x): (N, d_1, ..., d_k)
return:
- output: (N, d_1, ..., d_k)
- cache: x
"""
output = np.fmax(x, 0)
cache = x
return output, cache
def relu_backward(d_output, cache):
"""
input:
- upstream derivative (d_output): (N, d_1, ..., d_k)
- cache for x (cache): (N, d_1, ..., d_k)
return:
- d_x: gradient with respect to x
"""
x = cache
d_x = np.sign(np.fmax(x, 0)) * d_output
return d_x
|
8,404 | 3d1f7794763b058cc22c543709a97cb021d0fd23 | import pygame
pygame.init()
class Tiles:
Size = 32
Blocked = []
Blocked_Types = ["5", "6", "7", "9", "10", "11", "12", "13", "14", "15", "16", "17", "18", "19", "20", "25", "27", "28", "29"]
def Blocked_At(pos):
if list(pos) in Tiles.Blocked:
return True
else:
return False
def Load_Texture(file, Size):
bitmap = pygame.image.load(file)
bitmap = pygame.transform.scale(bitmap, (Size, Size))
surface = pygame.Surface((Size, Size), pygame.HWSURFACE|pygame.SRCALPHA)
surface.blit(bitmap, (0, 0))
return surface
Tiles = Load_Texture("Graphics\\hospitalTile.png", Size)
Stone = Load_Texture("Graphics\\stone.png", Size)
Grass = Load_Texture("Graphics\\grass.png", Size)
Water = Load_Texture("Graphics\\water.png", Size)
Machine = Load_Texture("Graphics\\hospitalTileMachine.png", Size*4)
WifeY = Load_Texture("Graphics\\young_bed.png", Size*4)
WifeO = Load_Texture("Graphics\\old_bed.png", Size*4)
DoorOpen = Load_Texture("Graphics\\door_open.png", Size)
DoorClosed = Load_Texture("Graphics\\door_closed.png", Size)
WallLeft = Load_Texture("Graphics\\leftwall.png", Size)
WallUpper = Load_Texture("Graphics\\upperwall.png", Size)
WallRight = Load_Texture("Graphics\\rightwall.png", Size)
WallLower = Load_Texture("Graphics\\lowerwall.png", Size)
UpperLeftCorner = Load_Texture("Graphics\\left_upperCorner.png", Size)
UpperRightCorner = Load_Texture("Graphics\\right_upperCorner.png", Size)
LowerRightCorner = Load_Texture("Graphics\\right_lowerCorner.png", Size)
LowerLeftCorner = Load_Texture("Graphics\\left_lowerCorner.png", Size)
TowerWall = Load_Texture("Graphics\\Towerwall.png", Size)
TowerWallLeft = Load_Texture("Graphics\\TowerwallLeft.png", Size)
TowerWallRight = Load_Texture("Graphics\\TowerwallRight.png", Size)
WoodFloor = Load_Texture("Graphics\\woodfloor.png", Size)
Balloons = Load_Texture("Graphics\\balloons.png", Size)
Yellow = Load_Texture("Graphics\\yellow.png", Size)
White = Load_Texture("Graphics\\white.png", Size)
Tombstone = Load_Texture("Graphics\\tombstone.png", Size)
Sand = Load_Texture("Graphics\\sand.png", Size)
Plane = Load_Texture("Graphics\\plane.png", Size * 8)
Car1 = Load_Texture("Graphics\\car3.png", Size * 6)
Car2 = Load_Texture("Graphics\\car4.png", Size * 6)
Texture_Tags = {"1" : Grass,
"2" : Stone,
"3" : Water,
"4" : Tiles,
"5" : Machine,
"6" : WifeY,
"7" : WifeO,
"8" : DoorOpen,
"9" : DoorClosed,
"10" : WallLeft,
"11" : WallUpper,
"12" : WallRight,
"13" : WallLower,
"14" : UpperLeftCorner,
"15" : UpperRightCorner,
"16" : LowerRightCorner,
"17" : LowerLeftCorner,
"18" : TowerWall,
"19" : TowerWallLeft,
"20" : TowerWallRight,
"21" : WoodFloor,
"22" : Balloons,
"23" : Yellow,
"24" : White,
"25" : Tombstone,
"26" : Sand,
"27" : Plane,
"28" : Car1,
"29" : Car2} |
8,405 | db8c2f6f5da0b52c268634043e1132984f610eed | import MySQLdb
import MySQLdb.cursors
from flask import _app_ctx_stack, current_app
class MySQL(object):
def __init__(self, app=None):
self.app = app
if app is not None:
self.init_app(app)
def init_app(self, app):
"""Initialize the `app` for use with this
:class:`~flask_mysqldb.MySQL` class.
This is called automatically if `app` is passed to
:meth:`~MySQL.__init__`.
:param flask.Flask app: the application to configure for use with
this :class:`~flask_mysqldb.MySQL` class.
"""
app.config.setdefault('MYSQL_HOST', 'localhost')
app.config.setdefault('MYSQL_USER', None)
app.config.setdefault('MYSQL_PASSWORD', None)
app.config.setdefault('MYSQL_DB', None)
app.config.setdefault('MYSQL_PORT', 3306)
app.config.setdefault('MYSQL_UNIX_SOCKET', None)
app.config.setdefault('MYSQL_CONNECT_TIMEOUT', 10)
app.config.setdefault('MYSQL_READ_DEFAULT_FILE', None)
app.config.setdefault('MYSQL_USE_UNICODE', True)
app.config.setdefault('MYSQL_CHARSET', 'utf8')
app.config.setdefault('MYSQL_SQL_MODE', None)
app.config.setdefault('MYSQL_CURSORCLASS', None)
if hasattr(app, 'teardown_appcontext'):
app.teardown_appcontext(self.teardown)
@property
def connect(self):
kwargs = {}
if current_app.config['MYSQL_HOST']:
kwargs['host'] = current_app.config['MYSQL_HOST']
if current_app.config['MYSQL_USER']:
kwargs['user'] = current_app.config['MYSQL_USER']
if current_app.config['MYSQL_PASSWORD']:
kwargs['passwd'] = current_app.config['MYSQL_PASSWORD']
if current_app.config['MYSQL_DB']:
kwargs['db'] = current_app.config['MYSQL_DB']
if current_app.config['MYSQL_PORT']:
kwargs['port'] = current_app.config['MYSQL_PORT']
if current_app.config['MYSQL_UNIX_SOCKET']:
kwargs['unix_socket'] = current_app.config['MYSQL_UNIX_SOCKET']
if current_app.config['MYSQL_CONNECT_TIMEOUT']:
kwargs['connect_timeout'] = \
current_app.config['MYSQL_CONNECT_TIMEOUT']
if current_app.config['MYSQL_READ_DEFAULT_FILE']:
kwargs['read_default_file'] = \
current_app.config['MYSQL_READ_DEFAULT_FILE']
if current_app.config['MYSQL_USE_UNICODE']:
kwargs['use_unicode'] = current_app.config['MYSQL_USE_UNICODE']
if current_app.config['MYSQL_CHARSET']:
kwargs['charset'] = current_app.config['MYSQL_CHARSET']
if current_app.config['MYSQL_SQL_MODE']:
kwargs['sql_mode'] = current_app.config['MYSQL_SQL_MODE']
if current_app.config['MYSQL_CURSORCLASS']:
kwargs['cursorclass'] = getattr(MySQLdb.cursors, current_app.config['MYSQL_CURSORCLASS'])
return MySQLdb.connect(**kwargs)
@property
def connection(self):
"""Attempts to connect to the MySQL server.
:return: Bound MySQL connection object if successful or ``None`` if
unsuccessful.
"""
ctx = _app_ctx_stack.top
if ctx is not None:
if not hasattr(ctx, 'mysql_db'):
ctx.mysql_db = self.connect
return ctx.mysql_db
def teardown(self, exception):
ctx = _app_ctx_stack.top
if hasattr(ctx, 'mysql_db'):
ctx.mysql_db.close()
|
8,406 | e3e50df47ef074f13382e249832c065ebdce18a6 | a = []
for i in range((2 * int(input()))):
a.append(int(input()))
if 1 in a:
c = a.index(max(a))
if a[c + 1] == 1:
print(c)
else:
del a[c]
s = a.index(max(a))
if a[s + 1] == 1:
print(s)
else:
print('-1')
|
8,407 | 14971842092c7aa41477f28cec87628a73a8ffd6 | from urllib.request import urlopen
from bs4 import BeautifulSoup
import json
def get_webcasts(year):
url = "https://www.sans.org/webcasts/archive/" + str(year)
page = urlopen(url)
soup = BeautifulSoup(page, 'html.parser')
table = soup.find('table', {"class": "table table-bordered table-striped"})
webcasts = []
for row in table.find_all('tr'):
title_content = row.find('td', {"class": "table_data table_data_title"})
if title_content is None:
continue
title_anchor = title_content.find('a')
title_link = title_anchor.get("href")
title = title_anchor.string
date = row.find('td', {"class": "table_data table_data_date"})
sponsor = row.find('td', {"class": "table_data table_data_sponsor"})
speaker = row.find('td', {"class": "table_data table_data_speaker"})
webcast = {"title": title, "date": date.string, "sponsor": sponsor.string,
"speaker": speaker.string}
webcasts.append(webcast)
return webcasts
result = {}
for year in range(2013, 2019):
webcasts = get_webcasts(year)
result[str(year)] = webcasts
print(json.dumps(result))
|
8,408 | 2809ed3a5ea1e527609e169bca1440e0db2761b9 | import time
import pytest
from pytest_bdd import scenarios, given, when, then
from conf import Constants
from page_components.page import PageComponent
from page_components.overall import OverallPage
# Scenarios
scenarios('overall_rating.feature', features_base_dir=Constants.FEATURE_FILES_BASE_DIR)
# Fixtures
@pytest.fixture
def home_page(getBrowser):
aHome = HomePage(getBrowser)
return aHome
@pytest.fixture
def overall_page(getBrowser):
aOverall = OverallPage(getBrowser)
return aOverall
@pytest.fixture
def page_component(getBrowser):
aPage = PageComponent(getBrowser)
return aPage
# private method
# Given Steps
@given('Go to overall page')
def goTo_overall(getBrowser):
getBrowser.get(Constants.get_overall_url())
time.sleep(1)
# When Steps
@when('the user sort the list by rank (click it)')
def sort_by_rank(overall_page):
overall_page.sort_by_rank()
time.sleep(1)
@when('the user click the viewmore button to see the <num> model')
def viewmore(overall_page, num):
overall_page.use_viewMore(num)
@when('the user check the overall list')
def view_overall_list():
# dummy code
pass
@when('the user click right arrow to see next page')
def goTo_next_page(page_component):
page_component.click_right_arrow()
@when('the user click left arrow to previous page')
def goTo_prev_page(page_component):
page_component.click_left_arrow()
@when('the user input <num> in the edit control')
def input_page_num(page_component, num):
page_component.edit_page(num)
# Then Steps
@then('rating list showed correctly')
def check_list_successfully(overall_page):
make = overall_page.get_value_make(1)
assert make == 'Lamborghini'
@then('rating show according the rank ascending')
def check_rank_sort(overall_page):
first_rank = overall_page.get_value_rank(1)
second_rank = overall_page.get_value_rank(2)
assert first_rank == '1'
assert second_rank == '2'
@then('go to a model page')
def check_goTo_model_page(getBrowser):
assert getBrowser.current_url.startswith(Constants.get_buggycar_host() + "model/")
@then('go to the next page')
def check_goTo_next_page(page_component):
actual_msg = page_component.get_pag_msg()
assert actual_msg == '« » page 2 of 5'
@then('go to the previous page')
def check_goTo_prev_page(page_component):
actual_msg = page_component.get_pag_msg()
assert actual_msg == '« » page 1 of 5'
@then('go to the <num> page')
def check_goTo_num_page(page_component, num):
actual_msg = page_component.get_pag_msg()
assert actual_msg == '« » page ' + num + ' of 5'
|
8,409 | a3bcd383656284a2236e79b5d5d7acdfe433a13b | # list audio files
import glob
def listFiles(path):
return glob.glob(path + '*.wav')
import random
def getNextFile(files):
return random.choice(files)
import pyaudio
import wave
CHUNK = 1024
def getRandomFile(folder = 'test/'):
files = listFiles(folder)
filename = getNextFile(files)
return filename
def play(filename):
# opem file
f = wave.open(filename, 'rb')
p = pyaudio.PyAudio()
#open stream
stream = p.open(format = p.get_format_from_width(f.getsampwidth()),
channels = f.getnchannels(),
rate = f.getframerate(),
output = True)
# read data
data = f.readframes(CHUNK)
#play stream
while data:
stream.write(data)
data = f.readframes(CHUNK)
#stop stream
stream.stop_stream()
stream.close()
#close PyAudio
p.terminate()
|
8,410 | 26bb5dc2679a4375d0950667ed02369df10857a8 | import numpy as np
from django.contrib.auth import logout, login, authenticate
from django.contrib.auth.decorators import login_required
from django.core.mail import EmailMessage
from django.shortcuts import render, redirect
from django.template.loader import get_template
from dashboard.notebook.creditcard import credit_model
from dashboard.notebook.bank import bank_model
from dashboard.notebook.mobile_data import mobile_model
from dashboard.notebook.graphs import result
from dashboard.notebook.mobile_analytics import mobile_result
from dashboard.notebook.creditcard_analytics import creditcard_result
from .forms import ContactForm, UserLoginForm
# view for index page
def index(request):
return render(request, 'index.html')
# view for about page
def about(request):
return render(request, 'about.html')
### contact view
def contact(request):
form_class = ContactForm
# new logic!
if request.method == 'POST':
form = form_class(data=request.POST)
if form.is_valid():
contact_name = request.POST.get('contact_name', '')
contact_email = request.POST.get('contact_email', '')
form_content = request.POST.get('content', '')
form_content = request.POST.get('content', '')
# Email the profile with the
# contact information
template = get_template('contact_template.txt')
context = {
'contact_name': contact_name,
'contact_email': contact_email,
'form_content': form_content,
}
content = template.render(context)
email = EmailMessage(
"New contact form submission",
content,
"FDS" + '',
['b200jst@gmail.com'],
headers={'Reply-To': contact_email}
)
email.send()
return redirect('/success')
return render(request, 'contact.html', {
'form': form_class,
})
# success page
def success(request):
return render(request, 'success.html')
# login page
def login_view(request):
next = request.GET.get('next')
form = UserLoginForm(request.POST or None)
if form.is_valid():
username = form.cleaned_data.get('username')
password = form.cleaned_data.get('password')
user = authenticate(username=username, password=password)
login(request, user)
if next:
return redirect(next)
return redirect("/")
return render(request, 'login.html',{"form":form})
# logout view
@login_required(login_url='/login/')
def logout_view(request):
logout(request)
return render(request, "index.html")
# service view
@login_required(login_url='/login/')
def services(request):
return render(request, 'services.html')
# bank fraud page
@login_required(login_url='/login/')
def bank(request):
return render(request, 'bank.html')
# creditcard fraud page
@login_required(login_url='/login/')
def creditcard(request):
return render(request, 'creditcard.html')
# mobile transaction
@login_required(login_url='/login/')
def mobilefraud(request):
return render(request, 'mobile.html')
#banking services
@login_required(login_url='/login/')
def bankresult(request):
# get the data and print prediction
age = request.POST.get("age")
job = request.POST.get("job")
print(job)
if (job == "Unemployed"):
new_job = 1
elif (job == "Management"):
new_job = 2
elif (job == "Services"):
new_job = 3
elif (job == "Blue-Collar"):
new_job = 4
elif (job == "Entrepreneur"):
new_job = 5
elif (job == "Admin"):
new_job = 6
elif (job == "Unknown"):
new_job = 7
elif (job == "Self-employed"):
new_job = 8
elif (job == "Student"):
new_job = 9
elif (job == "House maid"):
new_job = 10
elif (job == "Technician"):
new_job = 11
elif (job == "Retired"):
new_job = 12
print(new_job)
marital = request.POST.get("marital")
if (marital == "Single"):
new_marital = 1
elif (marital == "Divorced"):
new_marital = 2
elif (marital == "Married"):
new_marital = 3
print(new_marital)
education = request.POST.get("education")
if (education == "Unknown"):
new_education = 1
elif (education == "Primary"):
new_education = 2
elif (education == "Secondary"):
new_education = 3
elif (education == "Graduate"):
new_education = 4
print(new_education)
balance = request.POST.get("balance")
housing = request.POST.get("housing")
if (housing == "Yes"):
new_housing = 1
elif (housing == "No"):
new_housing = 2
print(new_housing)
loan = request.POST.get("loan")
if (loan == "Yes"):
new_loan = 1
elif (loan == "No"):
new_loan = 2
print(new_loan)
duration = int(request.POST.get("duration"))
campaign = int(request.POST.get('campaign'))
pdays = int(request.POST.get('pdays'))
previous = int(request.POST.get('previous'))
poutcome = (request.POST.get("poutcome"))
if (poutcome == "Unknown"):
new_poutcome = 3
elif (poutcome == "Failure"):
new_poutcome = 1
elif (poutcome == "Successs"):
new_poutcome = 4
elif (poutcome == "Failure"):
new_poutcome = 2
print(new_poutcome)
bank_data = np.array([age,new_job,new_marital,new_education,balance,new_housing,new_loan,duration,campaign,pdays,previous,new_poutcome])
clf = bank_model()
c = clf.predict([bank_data])
print(c)
if c == [1]:
# print("Not fraud")
response = 'Not Fraud'
else:
# print("Fraud")
response = 'Fraud'
accuracy = 0.8962983425414365
return render(request, 'bank/result.html', {"result": response, 'accuracy':accuracy})
# analytics
# def analysis(request):
# return render(request, 'analysis.html', {'accuracy': accuracy})
# credit card services
@login_required(login_url='/login/')
def creditresult(request):
if request.method == "POST":
# get the data and print
limit_balance = request.POST.get("limit_balance")
sex = request.POST.get("sex")
print(sex)
if(sex=="Male"):
new_sex = 1
else:
new_sex = 2
print(new_sex)
education = request.POST.get("education")
if (education == "Primary"):
new_education = 1
elif (education == "Secondary"):
new_education = 2
elif (education == "Graduate"):
new_education = 3
print(new_education)
marriage = request.POST.get("marriage")
if (marriage == "Single"):
new_marriage = 1
elif (marriage == "Married"):
new_marriage = 2
elif (education == "Divorced"):
new_marriage = 3
print(new_marriage)
age = request.POST.get("age")
pay_1 = int(request.POST.get("pay_1"))
pay_2 = int(request.POST.get("pay_2"))
pay_3 = int(request.POST.get("pay_3"))
pay_4 = int(request.POST.get("pay_4"))
pay_5 = int(request.POST.get("pay_5"))
pay_6 = int(request.POST.get("pay_6"))
Bill_Amt_1 = int(request.POST.get("Bill_Amt_1"))
Bill_Amt_2 = int(request.POST.get("Bill_Amt_2"))
Bill_Amt_3 = int(request.POST.get("Bill_Amt_3"))
Bill_Amt_4 = int(request.POST.get("Bill_Amt_4"))
Bill_Amt_5 = int(request.POST.get("Bill_Amt_5"))
Bill_Amt_6 = int(request.POST.get("Bill_Amt_6"))
Pay_Amt_1 = int(request.POST.get("Pay_Amt_1"))
Pay_Amt_2 = int(request.POST.get("Pay_Amt_2"))
Pay_Amt_3 = int(request.POST.get("Pay_Amt_3"))
Pay_Amt_4 = int(request.POST.get("Pay_Amt_4"))
Pay_Amt_5 = int(request.POST.get("Pay_Amt_5"))
Pay_Amt_6 = int(request.POST.get("Pay_Amt_6"))
credit_data = np.array([limit_balance, new_sex, new_education, new_marriage, age, pay_1, pay_2, pay_3, pay_4, pay_5, pay_6, Bill_Amt_1, Bill_Amt_2, Bill_Amt_3, Bill_Amt_4, Bill_Amt_5, Bill_Amt_6, Pay_Amt_1, Pay_Amt_2, Pay_Amt_3, Pay_Amt_4, Pay_Amt_5, Pay_Amt_6])
print(credit_data)
clf = credit_model()
c = clf.predict([credit_data])
print(c)
if c == [0]:
response = 'Not a Fraud'
else:
response = 'fraud'
# print(c)
return render(request, 'creditcard/result.html', {"result": response})
else:
return redirect('/creditcard',request)
# mobile fraud services
@login_required(login_url='/login/')
def mobileresult(request):
# get the data and print
step = request.POST.get("step")
type = request.POST.get("type")
if (type == "Payment"):
new_type = 1
elif (type == "Transfer"):
new_type = 4
elif (type == "Cash-out"):
new_type = 5
elif (type == "Debit"):
new_type = 2
print(new_type)
amount = request.POST.get("amount")
nameOrig = request.POST.get("nameOrig")
oldbalanceOrg = request.POST.get("oldbalanceOrg")
newbalanceOrig = request.POST.get("newbalanceOrig")
nameDest = request.POST.get("nameDest")
oldbalanceDest = request.POST.get("oldbalanceDest")
newbalanceDest = request.POST.get("newbalanceDest")
# isFraud = int(request.POST.get("isFraud")))
isFlaggedFraud = 1
mobile_data = np.array([step, new_type, amount, nameOrig, oldbalanceOrg, newbalanceOrig, nameDest,oldbalanceDest, newbalanceDest, isFlaggedFraud])
# print(bank_data)
clf = mobile_model()
c = clf.predict([mobile_data])
print(c)
if c == [0]:
# print("Not fraud")
response = 'Not Fraud'
else:
# print("Fraud")
response = 'Fraud'
return render(request, 'mobile/result.html', {"result": response})
# analytics page
def analytics(request):
return render(request, 'analytics.html', {'analytics':result, "mobile_analytics": mobile_result, "creditcard_analytics": creditcard_result}) |
8,411 | d1ed43bab6171c876b2ad9ef9db834ab8f9026d5 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from trest.utils import utime
from trest.logger import SysLogger
from trest.config import settings
from trest.exception import JsonError
from applications.common.models.user import User
class UserService(object):
@staticmethod
def page_list(where, page, per_page):
"""列表记录
Arguments:
where dict -- 查询条件
page int -- 当前页
per_page int -- 每页记录数
return:
Paginate 对象 | None
"""
query = User.Q
if 'status' in where.keys():
query = query.filter(User.status == where['status'])
else:
query = query.filter(User.status != -1)
pagelist_obj = query.paginate(page=page, per_page=per_page)
return pagelist_obj
@staticmethod
def get(id):
"""获取单条记录
[description]
Arguments:
id int -- 主键
return:
User Model 实例 | None
"""
if not id:
raise JsonError('ID不能为空')
obj = User.Q.filter(User.id == id).first()
return obj
@staticmethod
def update(id, param):
"""更新记录
[description]
Arguments:
id int -- 主键
param dict -- [description]
return:
True | JsonError
"""
columns = [i for (i, _) in User.__table__.columns.items()]
param = {k:v for k,v in param.items() if k in columns}
if 'updated_at' in columns:
param['updated_at'] = utime.timestamp(3)
if not id:
raise JsonError('ID 不能为空')
try:
User.Update.filter(User.id == id).update(param)
User.session.commit()
return True
except Exception as e:
User.session.rollback()
SysLogger.error(e)
raise JsonError('update error')
@staticmethod
def insert(param):
"""插入
[description]
Arguments:
id int -- 主键
param dict -- [description]
return:
True | JsonError
"""
columns = [i for (i, _) in User.__table__.columns.items()]
param = {k:v for k,v in param.items() if k in columns}
if 'created_at' in columns:
param['created_at'] = utime.timestamp(3)
try:
obj = User(**param)
User.session.add(obj)
User.session.commit()
return True
except Exception as e:
User.session.rollback()
SysLogger.error(e)
raise JsonError('insert error')
|
8,412 | 4cbb78234ef6e63b856099060ecaeea1779d6ac5 | # -*- coding: utf-8 -*-
# Copyright (c) 2018, masonarmani38@gmail.com and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class LogisticsPlanningTool(Document):
def autoname(self):
if self.customer:
self.name = "{0}-{1}-{2}".format(self.customer, self.territory, self.schedule_delivery_date)
else:
self.name = "{0}-{1}".format(self.territory, self.schedule_delivery_date)
@frappe.whitelist(True)
def get_atls(ps, pe, territory=None, customer=None, include_pending=None):
conds = ""
if territory and str(territory) != str("Nigeria"):
conds += ' AND territory = "%s" ' % territory
if customer:
conds += ' AND customer = "%s" ' % customer
if not include_pending:
conds += " AND delivery_date BETWEEN DATE('%s') AND DATE('%s') " % (ps, pe)
return frappe.db.sql(
"SELECT name as authority_to_load, IFNULL(delivery_date, transaction_date) as delivery_date , customer, territory from `tabAuthority to Load` WHERE name NOT IN (SELECT l.name FROM `tabLogistics Planning Tool` l INNER JOIN `tabLogistics Planning Tool Detail` c ON(l.name=c.parent) WHERE c.status != 'Delivered') %s ORDER BY territory " % (
conds), as_dict=1)
|
8,413 | fe581ca8176fed01309f0d852f72564863aa0895 | import json
from aioredis import Redis
from aiologger.loggers.json import ExtendedLogRecord
from aiologger.handlers.base import Handler
from app.core import config
class RedisHandler(Handler):
def __init__(
self,
redis_client,
key=f"{config.APP_NAME}-log",
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
self.key = key
self.redis_client: Redis = redis_client
@property
def initialized(self):
return not self.redis_client.closed
async def emit(self, record: ExtendedLogRecord) -> None:
await self.redis_client.rpush(self.key, self.format(record))
async def close(self) -> None:
self.redis_client.close()
await self.redis_client.wait_closed()
@staticmethod
def format(record: ExtendedLogRecord):
o = {
"msg": record.get_message(),
"logged_at": record.created,
"line_number": record.lineno,
"file": record.pathname,
"function": record.funcName,
"level": record.levelname,
"module": record.module,
"kwargs": record.args,
**record.extra,
}
return json.dumps(o, ensure_ascii=False)
|
8,414 | 6b4af452778bdf13ac18e8d260cf1c9176ca95e0 | __author__ = 'Vicio'
from Conexion.conexion import Conexion
class ConexionList():
def __init__(self):
self.conexion = Conexion()
def selectClientes(self):
pass
def selectProveedores(self):
pass
|
8,415 | 5fd54de3b2f9c2e18a283d016fc16e0e622dc6a0 | # -*- coding: utf-8 -*-
"""
Created on Wed Dec 20 09:54:08 2017
@author: chuang
"""
import os
import pickle
#from collections import Counter
#import user_replace
import jieba
import re
from multiprocessing import Pool
#%%
# parameters for processing the dataset
DATA_PATH = '../data/weibo_single/raw'
USER_DICT = './userdict.txt'
PROCESSED_PATH = '../data/weibo_single/processed'
ENCODING = 'utf-8'
jieba.load_userdict(USER_DICT)
DELETE = ['\[.*?\]','\u200b']
MULTI = True
#%%
def replace_tokens(text,replace_dict=None):
# for k,v in replace_dict.items():
# pattern = re.compile("|".join(v))
# text = pattern.sub(k,text)
pattern = re.compile("|".join(DELETE))
text = re.sub(pattern,'',text)
return text
def read_txt(file_path,encoding):
with open(os.path.join(DATA_PATH,file_path), 'r',encoding=encoding,errors='replace') as f:
text = f.read()
text = replace_tokens(text) #,user_replace.replace_dict
convs = text.split('\n\n')
lines = [c.split('\n') for c in convs]
lines = [[i.strip() for i in c if i != ''] for c in lines] ## get ride of empties sentences
lines = [c for c in lines if len(c)>1]
return lines
def context_answers(convos):
context,answers = [],[]
for convo in convos:
for index,line in enumerate(convo[:-1]):
context.append(line)
answers.append(convo[index+1])
assert len(context) == len(answers)
return context,answers
def _basic_tokenizer(line,normalize_digits=False):
"""
A basic tokenizer to tokenize text into tokens
"""
_DIGIT_RE = re.compile(r"\d+") ## find digits
words = []
tokens = list(jieba.cut(line.strip().lower()))
if normalize_digits:
for token in tokens:
m = _DIGIT_RE.search(token)
if m is None:
words.append(token)
else:
words.append('_数字_')
else:
words = tokens
return words
def _tokenized_data(context,answers):
train_enc_tokens = [_basic_tokenizer(t) for t in context]
print('Train_enc_token done.')
train_dec_tokens = [_basic_tokenizer(t) for t in answers]
print('Train_dec_token done.')
return train_enc_tokens, train_dec_tokens
def _filter(ask_sent,answer_sent):
if len(ask_sent)<3 or len(answer_sent)<2:
return False
if "@" in ask_sent or "@" in answer_sent:
return False
return True
def filter_data(context,answers):
'''
filter some answer that is too short or has @ in it
'''
context_return, answers_return = [],[]
for i in range(len(context)):
c = context[i]
a = answers[i]
c_sent = " ".join(c)
a_sent = " ".join(a)
if _filter(c_sent,a_sent):
context_return.append(c)
answers_return.append(a)
return context_return,answers_return
def save_tokenized_data(train_enc_tokens,train_dec_tokens,save_file_name):
save_file_path = os.path.join(PROCESSED_PATH,save_file_name)
pickle.dump((train_enc_tokens, train_dec_tokens,[],[]),open(save_file_path,'wb'))
print('Data saved')
#%%
if __name__ == "__main__":
data_files = os.listdir(DATA_PATH) ## just do two files for now, too many data
#%%
asks,ans = [],[]
for idx,file_path in enumerate(data_files):
#file_path = 'multi_1_4.data'
convos = read_txt(file_path,ENCODING)
context,answers = context_answers(convos)
asks.extend(context)
ans.extend(answers)
print('finish {}'.format(file_path))
print('Total length {}'.format(len(asks)))
#%%
if MULTI:
print('tokanizing, multi process')
cores = os.cpu_count()-2
p = Pool(cores)
context = p.map(_basic_tokenizer,asks)
print('Finish tokenizing ask sentences')
answers = p.map(_basic_tokenizer,ans)
print('Finish tokenizing answer sentences')
p.close()
p.join()
else:
context,answers = _tokenized_data(asks,ans)
print("Total lentgh after tokenization: {}".format(len(context)))
#%%
context,answers = filter_data(context,answers)
print("Total lentgh after filtering: {}".format(len(context)))
#%%
## save into pickles
save_tokenized_data(context,answers,'processed_tokens.p')
#%%
#print(context[:50])
#print(answers[:50])
|
8,416 | a8df6b575afbf6db415e0676a796623f2a9b7a70 | import unittest
from app.party import Party
from app.guest import Guest
from app.shoppingList import ShoppingList
def test_aPartywithNoGuestsShouldHaveNoPartyGuests():
party = Party()
assert 0 == party.numberOfGuests()
def test_aPartywithOneGuestShouldHaveOnePartyGuest():
party = Party()
lisa = Guest("Lisa", 'female')
party.attendedBy(lisa)
assert 1 == party.numberOfGuests()
def test_aPartywithThreeGuestsShouldHaveThreeGuests():
party = Party()
lisa = Guest("Lisa", 'female')
rob = Guest("Rob", 'male')
susan = Guest("susan", 'female')
party.attendedBy(lisa)
party.attendedBy(rob)
party.attendedBy(susan)
assert 3 == party.numberOfGuests()
def test_aGuestShouldBeAbleToLeaveAParty():
party = Party()
lisa = Guest("Lisa", 'female')
rob = Guest("Rob", 'male')
susan = Guest("susan", 'female')
party.attendedBy(lisa)
party.attendedBy(rob)
party.attendedBy(susan)
party.leftBy(rob)
assert 2 == party.numberOfGuests()
def test_aPartyShouldHaveALocation():
party = Party()
party.setLocation("my House")
assert "my House" == party.getLocation()
def test_aGuestShouldRevealHerName():
guest1 = Guest("Lisa", "female")
assert "Lisa" == guest1.hasName()
def test_weShouldKnowWhoIsAtTheParty():
party = Party()
lisa = Guest("Lisa", 'female')
rob = Guest("Rob", 'male')
susan = Guest("susan", 'female')
party.attendedBy(lisa)
party.attendedBy(rob)
party.attendedBy(susan)
assert ["Lisa", "Rob", "susan"] == party.getAttendees()
def test_weShouldBeAbleToCreateAnEmptyShoppingList():
shoppingList = ShoppingList()
assert shoppingList.getItems() == []
def test_weShouldBeAbleToAddItemsToShoppingList():
shoppingList = ShoppingList()
shoppingList.add("milk")
assert shoppingList.getItems() == ["milk"]
def test_createShoppingListBasedOnParty():
shoppingList = ShoppingList()
party = Party()
lisa = Guest("Lisa", 'female')
rob = Guest("Rob", 'male')
susan = Guest("susan", 'female')
party.attendedBy(lisa)
party.attendedBy(rob)
party.attendedBy(susan)
shoppingList.baseOn(party)
assert shoppingList.getItems() == ["wine for 4", "food for 4"]
|
8,417 | 83c3193ea40c9328d16fb91774762a76352d8e09 | import dash_html_components as html
import dash_core_components as dcc
layout = html.Div([
html.Div([
html.Div([
html.H6('Répartition des biens'),
dcc.Graph(
id = "pieGraph",
figure = {
"data": [{
"values": [2878001,2342181,1773296,521395],
"labels": [ 'Maison', 'Appartement', 'Dependance','local_indistriel' ],
"name": "Biens",
"hoverinfo":"label+name+percent",
"hole": .7,
"type": "pie",
"marker": {'colors':['#3b7548','#ea1313','#ffd700','#FF00FF']}
}],
"layout": {
"width": "2000",
"annotations": [{
"font": {
"size": 20
},
"showarrow": False,
"text": "",
"x": 0.2,
"y": 0.2
}],
"showlegend": False
}
}
)
], className="six columns"),
html.Div([
html.H6('Effectif des biens'),
dcc.Graph(
id = "3",
figure ={
"data": [{
'x':[ 'Maison', 'Appartement', 'Dependance','local_indistriel' ],
'y':[2878001,2342181,1773296,521395],
'name':'Bar biens',
'type':'bar',
'marker' :dict(color=['#3b7548','#ea1313','#ffd700','#FF00FF']),
}],
"layout": {
"xaxis" : dict(tickfont=dict(color='black')),
"yaxis" : dict(tickfont=dict(color='black')),
"width": "2000",
'yaxis':{
'title':'Nombre'
},
'xaxis':{
'title':'Type'
},
"annotations": [{
"font": {"size": 20},
"showarrow": False,
"text": "",
"x": 0.2,
"y": 0.2
}],
"showlegend": False
}
}
)
], className="six columns"),
], className="row", style={"margin": "1% 3%"})
]) |
8,418 | c945dc4df68fe110e8b38713fb77e2dce9efad8d | # vim: set et ts=4 sw=4 fileencoding=utf-8:
'''
tests.integration.test_pipeline
===============================
'''
import unittest
import yaml
import subprocess
import time
import pickle
from datetime import datetime
from amqp.exceptions import ChannelError
from yalp.config import settings
@unittest.skip('need to make this less brittle')
class TestSerialization(unittest.TestCase):
'''
Test that serialization via celery does not break
'''
def setUp(self):
settings.parsers = [{
'passthrough': {}
}]
try:
import socket
import amqp
self.connection = amqp.Connection()
self.channel = self.connection.channel()
except socket.error:
from nose.plugins.skip import SkipTest
raise SkipTest('Unable to connect to rabbitmq')
self.now = datetime.now()
self.event = {
'host': 'test_host',
'message': 'test message',
'date_time': self.now,
}
with open('/tmp/test_serial.yml', 'w') as config_file:
config = {
'parsers': [{
'passthrough': {}
}],
'parser_workers': 1
}
yaml.dump(config, config_file)
self.parser_process = subprocess.Popen(
'scripts/yalp-parsers -c /tmp/test_serial.yml',
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
def tearDown(self):
self.channel.queue_delete(queue=settings.parser_queue)
self.channel.queue_delete(queue='outputs')
self.channel.close()
self.connection.close()
self.parser_process.kill()
def test_default_serializer(self):
from yalp.pipeline import tasks
tasks.process_message.apply_async(
args=[self.event],
queue=settings.parser_queue,
serializer=settings.celery_serializer,
)
while True:
try:
message = self.channel.basic_get(queue='outputs')
break
except ChannelError:
time.sleep(0.1)
self.assertIsNotNone(message)
event = pickle.loads(message.body)['message']
self.assertEqual('test message', event['message'])
self.assertEqual(self.now, event['date_time'])
|
8,419 | 301a6ec56bd265ff63a924ecd64d6708cb6b139c | import random
import profile_handler
import re
class RollBot():
"""A class that handles the bulk of functionality"""
def __init__(self):
"""initializes the attributes of the class"""
# this is where the procesed user input gets stored for easy readbacks
self.input_last_roll = ''
# an empty list to store the results of the roll in
self.last_roll = []
# The sum of all the roles inside the last_roll list
self.result = 0
self.error = ''
self.number_of_dice = ''
self.size_of_dice = ''
self.modifier = ''
self.modifier_number = ''
self.sort = False
self.adv = False
self.hidden = False
# a flag to save the number of the dropped roll on an adv roll
self.dropped_roll = ''
# flags for rolling stats for a char
self.d_stats = {}
self.dropped_d_stats = {}
self.result_d_stats = {}
# flag for the art/meme/hidden rolls dictionary
self.art_dict = {}
self.meme_dict = {}
self.hidden_rolls = {}
def roll_input(self, user_input, optional_input):
"""complicated as fuck function that handles input without breaking"""
# Resets the status of everything before a new roll
self.last_roll = []
self.result = 0
self.hidden = False
# An empty error flag to easily throw errors back through discord
self.error = ''
self.number_of_dice = ''
self.size_of_dice = ''
# The modifier is either a + or a -, stored for easy acces
self.modifier = ''
self.modifier_number = ''
self.dropped_roll = ''
# this code is run in try to catch atribute errors due to a wrong input
try:
# All parts filtered by regex get stored in an object,
# that then gets split
split_input = dice_handler.match(user_input)
# sets the number of dice in the class for use in other functions
self.number_of_dice = split_input.group(1)
# sets the size of the dice in the class for use in other functions
self.size_of_dice = split_input.group(3)
# sets the +/- in the class for use in other functions
self.modifier = split_input.group(5)
# sets the number of the mod in the class to use in other functions
self.modifier_number = split_input.group(6)
# An if statements that alows typing 1 for rolling 1 dice to be
# optional.
if self.number_of_dice == '':
self.number_of_dice = 1
# Makes sure atleast 1 dice is rolled
if self.number_of_dice == '0':
self.error = "You can't roll 0 dice"
# Sets a cap of 200 dice being rolled
if int(self.number_of_dice) > 200:
self.error = \
'No! Thats to many dice I do not have that many!!!'
return
# Meant to catch errors where a none size dice managed to sneak
# Through.
if self.size_of_dice == '0' or self.size_of_dice is None:
self.error = "Please define the dice size."
# Sets a cap on how large of a dice you can roll.
if int(self.size_of_dice) > 50000:
self.error = "Dice too big!" + \
" That has gotta be fake nothing goes this high"
return
# Checks wether no modifier was entered or if it was incorrectly
# entered by checking the lenght of the input vs what came through.
if self.modifier is None and \
len(str(user_input)) > \
len(str(self.number_of_dice) +
str(self.size_of_dice) + 'D'):
self.error = " Incorrect modifier. Please use + or -"
return
# Sets modifier to +0 if no +/- is entered.
if self.modifier == '' or self.modifier is None:
self.modifier = '+'
self.modifier_number = '0'
# Sets modifier to +0 if no number for it was entered
if self.modifier_number == '' or self.modifier_number is None:
self.modifier = '+'
self.modifier_number = '0'
# The full input of the user in 1 flag to print back to the user
# at the end.
self.input_last_roll = \
' `Rolled ' + \
str(self.number_of_dice) + \
'd' + \
str(self.size_of_dice) + \
str(self.modifier) + \
str(self.modifier_number) + \
':` '
if optional_input.lower() == 'adv':
self.adv = 'adv'
self.handle_adv()
# Checks if user asked for disadvantage on a roll and hands it off
elif optional_input.lower() == 'dadv':
self.adv = 'dadv'
self.handle_adv()
# Checks if user asked for a sorted roll
elif optional_input.lower() == 'sort':
# Rolls the dice like normal but sorts the flag after.
self.sort = True
self.roll_dice(self.number_of_dice, self.size_of_dice)
elif optional_input.lower() == 'hide':
# Rolls the dice like normal but does not show the result in channel.
self.hidden = True
self.roll_dice(self.number_of_dice, self.size_of_dice)
elif optional_input.lower() != '':
self.error = str(optional_input) + \
" is not a valid option. Please try (sort/adv/dadv/hide)"
else:
# If everything passed the checks hand offs the proccesed input
# to the randomizing and calculating functions.
self.roll_dice(self.number_of_dice, self.size_of_dice)
# Catches and attribute error on a wrong input and notifies the user.
except AttributeError:
self.error = \
" Invalid input please follow this format (1)d20(+/-(5))"
except ValueError:
self.error = \
" Invalid input, please Make sure dice size is bigger than 0"
def roll_dice(self, number_of_dice, size_of_dice):
"""Simple function that rolls dice"""
# makes a list of random numbers based on the information
# that was put in
dice = []
for roll in range(int(number_of_dice)):
roll = random.randint(1, int(size_of_dice))
dice.append(roll)
# Checks wether the result needs to be sorted or not
if self.sort is True:
dice.sort()
# Turns ints into strings after sorting
converted_dice = []
for i in range(len(dice)):
roll_to_convert = dice[i]
roll_to_convert = str(roll_to_convert)
converted_dice.append(roll_to_convert)
# Sets the last roll flag and returns to sort flag to false
self.last_roll = converted_dice
self.sort = False
# Sets the last roll flag for easy cross function use.
else:
# Turns Ints into strings incase it had to be sorted
converted_dice = []
for i in range(len(dice)):
roll_to_convert = dice[i]
roll_to_convert = str(roll_to_convert)
converted_dice.append(roll_to_convert)
self.last_roll = converted_dice
def calculate_roll(self):
"""Function to calculate the sum of the roll"""
# Takes all the numbers from the last roll and adds them up.
for i in self.last_roll:
self.result = int(self.result) + int(i)
self.result = self.result + int(self.modifier + self.modifier_number)
def handle_adv(self):
"""Function that handles the optional advantage options"""
# This part handles advantage so it takes the highest of the 2 numbers
# and then drops the lowest number
if self.adv == 'adv':
# Checks wether the number that was input is not 1 or 2.
if str(self.number_of_dice) != '2' and \
str(self.number_of_dice) != '1':
self.error = 'Can only roll advantage with 2 dice, ya dummy!'
# Checks if number of dice was left blank so automatically set to 1
if str(self.number_of_dice) != '2' and \
str(self.number_of_dice) == '1':
self.number_of_dice = 2
# Checks if the number of dice is 2 before moving on
if str(self.number_of_dice) == '2':
self.sort = True
self.roll_dice(self.number_of_dice, self.size_of_dice)
# Stores the dropped roll before deleting it from last_roll
self.dropped_roll = self.last_roll[0]
del self.last_roll[0]
# Returns flag to default state
self.adv = False
# This part handles disadvantage so it takes the lowest of the 2
# numbers and then drops the highest number
if self.adv == 'dadv':
# Checks wether the number that was input is not 1 or 2.
if str(self.number_of_dice) != '2' and \
str(self.number_of_dice) != '1':
self.error = \
'Can only roll disadvantage with 2 dice, ya dummy!'
# Checks if number of dice was left blank so automatically set to 1
if str(self.number_of_dice) != '2' and \
str(self.number_of_dice) == '1':
self.number_of_dice = 2
# Checks if the number of dice is 2 before moving on
if str(self.number_of_dice) == '2':
self.sort = True
self.roll_dice(self.number_of_dice, self.size_of_dice)
# Stores the dropped roll before deleting it from last_roll
self.dropped_roll = self.last_roll[1]
del self.last_roll[1]
# Returns flag to default state
self.adv = False
def roll_stats(self):
"""Lets you roll new stats for a char"""
for stat in range(6):
self.roll_input('4d6', 'sort')
self.dropped_d_stats[stat] = self.last_roll[0]
del self.last_roll[0]
self.calculate_roll()
self.result_d_stats[stat] = self.result
self.d_stats[stat] = self.last_roll
# The regex that looks through the input for key information.
dice_handler = re.compile(r'(\d*)([dD])(\d*)(([+-])(\d*))?')
ph = profile_handler.PHandler()
|
8,420 | a14a6c015ed3063015973b5376a1351a70808dc0 | casosteste = int(input())
for testes in range(casosteste):
num_instru = int(input())
lista = []
for intru in range(num_instru):
p = input().upper()
if p == 'LEFT':
lista.append(-1)
elif p == 'RIGHT':
lista.append(+1)
elif p.startswith('SAME AS'):
i = int(p[7:]) - 1
lista.append(lista[i])
resultado = int(sum(lista))
print(resultado)
|
8,421 | 6eb8172e7e26ad6ec9cb0d30c5a0613ce79296e6 | import pickle
import saveClass as sc
import libcell as lb
import numpy as np
import struct
import os
# def save_Ldend(Ldends, bfname):
# # create a binary file
# bfname='Dend_length.bin'
# binfile = file(bfname, 'wb')
# # and write out two integers with the row and column dimension
# header = struct.pack('2I', Ldends.shape[0], Ldends.shape[1])
# binfile.write(header)
# # then loop over columns and write each
# for i in range(Ldends.shape[1]):
# ddata = struct.pack('%id' % Ldends.shape[0], *Ldends[:,i])
# binfile.write(ddata)
# binfile.close()
def save_ave_replay(aveData, nIter, nStart, bfname):
vd = np.zeros((nIter, 4, nStart))
for i_trial in range(nIter):
vv = aveData[i_trial]
for i_dendrite in range(4):
vvv = vv[i_dendrite]
mv = np.reshape(vvv, (nStart, 1501))
vd[i_trial, i_dendrite, :] = np.mean(mv[:,550:1000], 1)
mvd = np.mean(vd, 0)
# print (bfname)
# create a binary file
binfile = file(bfname, 'wb')
# and write out two integers with the row and column dimension
header = struct.pack('2I', mvd.shape[0], mvd.shape[1])
binfile.write(header)
# then loop over columns and write each
for i in range(mvd.shape[1]):
ddata = struct.pack('%id' % mvd.shape[0], *mvd[:,i])
binfile.write(ddata)
binfile.close()
def save_ave_place(aveData, nIter, bfname):
vd = np.zeros((nIter, 4, 20))
for i_trial in range(nIter):
vv = aveData[i_trial]
for i_dendrite in range(4):
vvv = vv[i_dendrite]
mv = np.reshape(vvv[0:50000], (20, 2500))
vd[i_trial, i_dendrite, :] = np.mean(mv, 1)
mvd = np.mean(vd, 0)
print (bfname)
# create a binary file
binfile = file(bfname, 'wb')
# and write out two integers with the row and column dimension
header = struct.pack('2I', mvd.shape[0], mvd.shape[1])
binfile.write(header)
# then loop over columns and write each
for i in range(mvd.shape[1]):
ddata = struct.pack('%id' % mvd.shape[0], *mvd[:,i])
binfile.write(ddata)
binfile.close()
def save_sim(data, out_binary=False, out_vdend=False, out_pickle=False, outdir='data', dt_save=1):
if not os.path.exists(outdir):
os.makedirs(outdir)
modelData = sc.emptyObject()
lb.props(modelData)
if (data.stimType=='DStim'):
filename = 'T' + str(data.TSTOP) + '_dend' + str(data.iclampLoc[2]) + '_N' + str(len(data.iRange)) + '_I' + str(data.iRange[0]) + '_dI' + str(data.iRange[1]-data.iRange[0])
elif (data.stimType=='SStim'):
filename = 'T' + str(data.TSTOP) + '_soma_N' + str(len(data.iRange)) + '_I' + str(data.iRange[0]) + '_dI' + str(data.iRange[1]-data.iRange[0])
else :
filename = 'T' + str(data.TSTOP) + '_Ne' + str(data.Ensyn)+'_gA'+str(round(data.Agmax,2)) + '_tauA' + str(data.Atau2)
if (data.NMDA):
filename = filename + '_gN'+str(round(data.Ngmax,2))
if (data.GABA):
filename = filename + '_Ni'+str(data.Insyn) + '_gG'+str(round(data.Igmax, 2))
if (data.GABA_B):
filename = filename + '_gB'+str(round(data.Bgmax, 2))
if (data.modulateNa):
filename = filename + '_noDendNa'
if (data.stimType == 'nIter'):
filename = filename + '_tInt' + str(data.tInterval) + 'ms_' + data.locBias + '_' + data.direction
if ((data.stimType == 'place') + (data.stimType == 'poisson') + (data.stimType == 'replay')):
filename = filename + "_Er" + str(data.Erate) + '_Ir'+str(data.Irate) + '_' + data.placeType + '_rep' + str(data.nIter)
filename = filename + '_stimseed' + str(data.stimseed)
if (data.modulateK == True):
filename = filename + '_K0'
if (data.modulateK_local == True):
filename = filename + '_KL0'
if (data.modulateK_parents == True):
filename = filename + '_KP0'
if (data.modulateRmRa == True):
filename = filename + '_RmRa'
if (data.modulateRmRaSeg == True):
filename = filename + '_RmRaSeg'
if (data.randomW == True):
filename = filename + '_randW'
if out_pickle:
dataList = [data, modelData]
fname = './'+outdir+'/'+filename+'.pkl'
f = open(fname, 'wb')
pickle.dump(dataList, f)
f.close()
if out_binary:
#---------------------------------------------
# WRITE the response in a binary file to read it with R
mat = np.array(data.vdata)
L = mat.shape[1]
dt_ratio = int(round(dt_save / data.dt))
mat = mat[:,0:L:dt_ratio]
np.save("./"+outdir+"/vdata_"+filename+".npy", mat)
#bfname = './'+outdir+'/vdata_'+filename+'.bin'
#print (bfname)
# create a binary file
#binfile = file(bfname, 'wb')
# and write out two integers with the row and column dimension
#header = struct.pack('2I', mat.shape[0], mat.shape[1])
#binfile.write(header)
# then loop over columns and write each
#for i in range(mat.shape[1]):
#ddata = struct.pack('%id' % mat.shape[0], *mat[:,i])
#binfile.write(ddata)
#binfile.close()
if out_vdend:
# # WRITE the dendritic response
nRep = len(data.vDdata)
mat = np.array(data.vDdata[0])
for i in range(1, nRep):
mat = np.hstack((mat, data.vDdata[i]))
L = mat.shape[1]
dt_ratio = int(round(dt_save / data.dt))
mat = mat[:,0:L:dt_ratio]
np.save("./"+outdir+"/vDdata_"+filename+".npy", mat)
# bfname = './'+outdir+'/vDdata_'+filename+'.bin'
# # create a binary file
# binfile = file(bfname, 'wb')
# # and write out two integers with the row and column dimension
# header = struct.pack('2I', mat.shape[0], mat.shape[1])
# binfile.write(header)
# # then loop over columns and write each
# for i in range(mat.shape[1]):
# ddata = struct.pack('%id' % mat.shape[0], *mat[:,i])
# binfile.write(ddata)
# binfile.close()
# # ---------------------------------------------
# # WRITE the location of the synapses
if (data.GABA) :
Ilocs = np.array(data.Ilocs)
#Ilocs[:,1] = 1 + Ilocs[:,1] # code that these are inhibitory synapses
Elocs = np.array(data.Elocs)
Locs = np.row_stack((Elocs, Ilocs))
else :
Locs = np.array(data.Elocs)
#bfname = './'+outdir+'/synlocs_'+filename+'.npy'
#print (bfname)
np.save("./"+outdir+"/Elocs_"+filename+".npy", Elocs)
np.save("./"+outdir+"/Ilocs_"+filename+".npy", Ilocs)
# # create a binary file
# binfile = file(bfname, 'wb')
# # and write out two integers with the row and column dimension
# header = struct.pack('2I', Locs.shape[0], Locs.shape[1])
# binfile.write(header)
# # then loop over columns and write each
# for i in range(Locs.shape[1]):
# ddata = struct.pack('%id' % Locs.shape[0], *Locs[:,i])
# binfile.write(ddata)
# binfile.close()
# #---------------------------------------------
# Write the input spike train
if (len(data.stim)>0):
stim = data.stim
#bfname = './'+outdir+'/stim_'+filename+'.bin'
np.save("./"+outdir+"/stim_"+filename+".npy", stim)
# create a binary file
#binfile = file(bfname, 'wb')
# and write out two integers with the row and column dimension
#header = struct.pack('2I', stim.shape[0], stim.shape[1])
#binfile.write(header)
# then loop over columns and write each
#for i in range(stim.shape[1]):
#ddata = struct.pack('%id' % stim.shape[0], *stim[:,i])
#binfile.write(ddata)
#binfile.close()
|
8,422 | deb0cd745eae97a6dbabdfab37e1c6d75e5372f0 | import numpy
from math import cos, sin, radians, tan
class Window:
# construtor
def __init__(self, world, xyw_min=None, xyw_max=None):
self.world = world
# caso em q é None
if xyw_min is None or xyw_max is None:
self.xyw_min = (-100, -100)
self.xyw_max = (100, 100)
# caso em q n é None
else:
if not isinstance(xyw_min, tuple) or len(xyw_min) != 2:
raise Exception('O param xyw_min deve ser uma tupla de 2 valores.')
try:
self.xyw_min = (float(xyw_min[0]), float(xyw_min[1]))
except Exception:
raise Exception('As coordenadas xyw_min devem ser pares de números.')
if not isinstance(xyw_max, tuple) or len(xyw_max) != 2:
raise Exception('O param xyw_max deve ser uma tupla de 2 valores.')
try:
self.xyw_max = (float(xyw_max[0]), float(xyw_max[1]))
except Exception:
raise Exception('As coordenadas xyw_max devem ser pares de números.')
self.xyw_1 = self.xyw_min
self.xyw_2 = (self.xyw_max[0], self.xyw_min[1])
self.xyw_3 = (self.xyw_min[0], self.xyw_max[1])
self.xyw_4 = self.xyw_max
# define o centro original da window(attr q pode ser usado para trazer a view de volta ao seu centro original)
self.center = self.calcCenter()
# define o novo centro(var que pode ser utilizada em futuros calculos envolvendo o centro da window)
self.newCenter = self.center
self.fatorMovimento = 10
self.window_scn = numpy.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
# inicializa scn da window
self.degrees = 0
self.scn()
def set_xyw_min(self, xmin, ymin):
self.xyw_min = (xmin, ymin)
def set_xyw_max(self, xmax, ymax):
self.xyw_max = (xmax, ymax)
# retorna as coordenadas (x,y) do centro da window
def calcCenter(self) -> (float, float):
return (self.xyw_min[0] + self.xyw_max[0]) / 2, (self.xyw_min[1] + self.xyw_max[1]) / 2
# retorna as coordenadas do canto inferior esquerdo e canto superior direito da window
def getCoords(self) -> (float, float, float, float):
return self.xyw_min[0], self.xyw_min[1], self.xyw_max[0], self.xyw_max[1]
# retorna a largura e profundidade da window
def getWindowDimensions(self) -> (float, float):
xyw1 = numpy.array([self.xyw_1[0], self.xyw_1[1]])
xyw2 = numpy.array([self.xyw_2[0], self.xyw_2[1]])
xyw3 = numpy.array([self.xyw_3[0], self.xyw_3[1]])
return numpy.linalg.norm(xyw2 - xyw1), numpy.linalg.norm(xyw3 - xyw1)
# translada a window para cima, do ponto de vista do usuario
def moveUp(self):
rad_angle = numpy.radians(self.degrees)
sin = numpy.sin(rad_angle)
cos = numpy.cos(rad_angle)
self._translate(dx=self.fatorMovimento * sin, dy=self.fatorMovimento * cos)
# translada a window para baixo, do ponto de vista do usuario
def moveDown(self):
rad_angle = numpy.radians(self.degrees)
sin = numpy.sin(rad_angle)
cos = numpy.cos(rad_angle)
self._translate(dx=(-1) * self.fatorMovimento * sin, dy=(-1) * self.fatorMovimento * cos)
# translada a window para direita, do ponto de vista do usuario
def moveRight(self):
rad_angle = numpy.radians(180 - self.degrees)
sin = numpy.sin(rad_angle)
cos = numpy.cos(rad_angle)
self._translate(dx=(-1) * self.fatorMovimento * cos, dy=(-1) * self.fatorMovimento * sin)
# translada a window para esquerda, do ponto de vista do usuario
def moveLeft(self):
rad_angle = numpy.radians(180 - self.degrees)
sin = numpy.sin(rad_angle)
cos = numpy.cos(rad_angle)
self._translate(dx=self.fatorMovimento * cos, dy=self.fatorMovimento * sin)
# realiza a translaçao da window
def _translate(self, dx=0, dy=0):
# cria a matriz de translacao do obj para um dx e dy qualquer
window_coords = numpy.array([[self.xyw_1[0], self.xyw_1[1], 1],
[self.xyw_2[0], self.xyw_2[1], 1],
[self.xyw_3[0], self.xyw_3[1], 1],
[self.xyw_4[0], self.xyw_4[1], 1]])
# realiza a translacao
translate_matrix = numpy.array([[1, 0, 0], [0, 1, 0], [dx, dy, 1]])
# atualiza a window
xyw_1, xyw_2, xyw_3, xyw_4 = numpy.matmul(window_coords, translate_matrix)
self.xyw_1 = (xyw_1[0], xyw_1[1])
self.xyw_2 = (xyw_2[0], xyw_2[1])
self.xyw_3 = (xyw_3[0], xyw_3[1])
self.xyw_4 = (xyw_4[0], xyw_4[1])
self.xyw_min = self.xyw_1
self.xyw_max = self.xyw_4
# atualiza o centro
self.newCenter = self.calcCenter()
# atualiza scn
self.scn()
# Encolhe a window
def zoomIn(self):
self._scale(scale=0.9)
self.fatorMovimento = self.fatorMovimento * 0.9
# Aumenta a window
def zoomOut(self):
self._scale(scale=1.1)
self.fatorMovimento = self.fatorMovimento * 1.1
# Escalona a window
def _scale(self, scale=1):
# centro do obj
cx, cy = self.newCenter
# coords do mundo
window_coords = numpy.array([[self.xyw_1[0], self.xyw_1[1], 1],
[self.xyw_2[0], self.xyw_2[1], 1],
[self.xyw_3[0], self.xyw_3[1], 1],
[self.xyw_4[0], self.xyw_4[1], 1]])
# ajusta o centro do mundo com o obj
translate_matrix_1 = numpy.array([[1, 0, 0], [0, 1, 0], [(-1) * cx, (-1) * cy, 1]])
# realiza o escalonamento(num sei se esse e o termo correto)
scale_matrix = numpy.array([[scale, 0, 0], [0, scale, 0], [0, 0, 1]])
# reverte o ajuste do centro do mundo com o obj
translate_matrix_2 = numpy.array([[1, 0, 0], [0, 1, 0], [cx, cy, 1]])
# monta uma matriz que aplica todas as transformacoes
transformations = numpy.matmul(translate_matrix_1, scale_matrix)
transformations = numpy.matmul(transformations, translate_matrix_2)
# aplica as transformacoes
xyw_1, xyw_2, xyw_3, xyw_4 = numpy.matmul(window_coords, transformations)
# atualiza xyw_min/max
self.xyw_1 = (xyw_1[0], xyw_1[1])
self.xyw_2 = (xyw_2[0], xyw_2[1])
self.xyw_3 = (xyw_3[0], xyw_3[1])
self.xyw_4 = (xyw_4[0], xyw_4[1])
self.xyw_min = self.xyw_1
self.xyw_max = self.xyw_4
# atualiza o centro
self.newCenter = self.calcCenter()
# atualiza scn
self.scn()
# Rotaciona a window no sentido horario
def rotateRight(self, angle):
# 360 - 10 = 350
self._rotate(360 - angle)
# Rotaciona a window no sentido anti-horario
def rotateLeft(self, angle):
self._rotate(angle)
# Rotaciona a window em relaçao ao seu proprio centro
def _rotate(self, angle=0):
self.degrees = (self.degrees + angle) % 360
# centro do obj
cx, cy = self.newCenter
# coords do mundo
window_coords = numpy.array([[self.xyw_1[0], self.xyw_1[1], 1],
[self.xyw_2[0], self.xyw_2[1], 1],
[self.xyw_3[0], self.xyw_3[1], 1],
[self.xyw_4[0], self.xyw_4[1], 1]])
# ajusta o centro do mundo com o obj
translate_matrix_1 = numpy.array([[1, 0, 0], [0, 1, 0], [(-1) * cx, (-1) * cy, 1]])
# realiza a rotacao
radians = numpy.radians(angle)
sin = numpy.sin(radians)
cos = numpy.cos(radians)
rotate_matrix = numpy.array([[cos, -sin, 0], [sin, cos, 0], [0, 0, 1]])
# reverte a transformacao feita
translate_matrix_2 = numpy.array([[1, 0, 0], [0, 1, 0], [cx, cy, 1]])
# gera a matriz de transformacao de rotacao
transformations = numpy.matmul(translate_matrix_1, rotate_matrix)
transformations = numpy.matmul(transformations, translate_matrix_2)
# aplica as transformacoes
xyw_1, xyw_2, xyw_3, xyw_4 = numpy.matmul(window_coords, transformations)
# atualiza xyw_min/max
self.xyw_1 = (xyw_1[0], xyw_1[1])
self.xyw_2 = (xyw_2[0], xyw_2[1])
self.xyw_3 = (xyw_3[0], xyw_3[1])
self.xyw_4 = (xyw_4[0], xyw_4[1])
self.xyw_min = self.xyw_1
self.xyw_max = self.xyw_4
# atualiza o centro
self.newCenter = self.calcCenter()
# atualiza scn
self.scn()
# Calcula a matriz de transformaçao de sistemas de coordenadas da window
def scn(self):
# centro do obj
cx, cy = self.newCenter
# ajusta o centro do mundo com o obj
translate_matrix_1 = numpy.array([[1, 0, 0], [0, 1, 0], [(-1) * cx, (-1) * cy, 1]])
# pega ao INVERSO da rotacao atual da window
radians = numpy.radians((-1) * self.degrees)
sin = numpy.sin(radians)
cos = numpy.cos(radians)
# rotaciona
rotate_matrix = numpy.array([[cos, -sin, 0], [sin, cos, 0], [0, 0, 1]])
length, height = self.getWindowDimensions()
sx = 1 / (length / 2)
sy = 1 / (height / 2)
# realiza o escalonamento(num sei se esse e o termo correto)
scale_matrix = numpy.array([[sx, 0, 0], [0, sy, 0], [0, 0, 1]])
# gera a matriz de conversao para scn da window
scn = numpy.matmul(translate_matrix_1, rotate_matrix)
self.window_scn = numpy.matmul(scn, scale_matrix)
# Aplica a matriz de transformaçao de sistema de coordenadas da window a um ponto qualquer
def applySCN(self, x, y):
point_coords = numpy.array([x, y, 1])
final_coords = numpy.matmul(point_coords, self.window_scn)
return final_coords[0], final_coords[1]
|
8,423 | dbea2b1555368460b7d14369d2dfe4f0a01f9e4f | # Generated by Django 3.1.6 on 2021-04-03 20:16
import django.contrib.postgres.fields
from django.db import migrations, models
import enrolments.validators
class Migration(migrations.Migration):
dependencies = [
("enrolments", "0007_merge_20210320_1853"),
]
operations = [
migrations.AddField(
model_name="enrolment",
name="students",
field=django.contrib.postgres.fields.ArrayField(
base_field=models.PositiveIntegerField(),
default=list,
size=None,
validators=[enrolments.validators.validate_student_ids_in_family],
),
),
]
|
8,424 | de3a96d46b7eaf198b33efe78b21ef0207dcc609 | from .base import Sort
|
8,425 | 5119b1b6817e002c870b4d6a19fe9aee661fff7e | import unittest
from unflatten import _path_tuples_with_values_to_dict_tree, dot_colon_join, dot_colon_split
from unflatten import _recognize_lists
from unflatten import _tree_to_path_tuples_with_values
from unflatten import brackets_join
from unflatten import flatten
from unflatten import unflatten
class BracketsReduceTestCase(unittest.TestCase):
def test_one_element(self):
self.assertEqual(brackets_join(['aa']), 'aa')
def test_simple(self):
self.assertEqual(brackets_join(['aa', 1, 'bb', 2]), 'aa[1][bb][2]')
class TreeToPathTuplesWithValuesTestCase(unittest.TestCase):
def test_simple(self):
self.assertSequenceEqual(
list(_tree_to_path_tuples_with_values(
{'a': ['b',
{'e': 1}]})),
[(('a', 0), 'b'),
(('a', 1, 'e'), 1)])
class PathTuplesWithValuesToDictTreeTestCase(unittest.TestCase):
def test_simple(self):
self.assertDictEqual(
_path_tuples_with_values_to_dict_tree(
[(('a', 0), 'b'),
(('a', 1, 'e'), 1)]),
{'a': {0: 'b',
1: {'e': 1}}})
class RecognizeListsTestCase(unittest.TestCase):
def test_simple(self):
self.assertListEqual(
_recognize_lists(
{0: 'a',
1: {'b': -1,
'c': {0: 'd',
1: -2}}}),
['a',
{'b': -1,
'c': ['d',
-2]}])
def test_again(self):
self.assertDictEqual(
unflatten(
{'a': 1,
'b': {0: 'c',
1: {0: 'd',
1: {'e': {'f': -1,
'g': 'h'}}}}}),
{'a': 1,
'b': ['c',
['d',
{'e': {'f': -1,
'g': 'h'}}]]})
class FlattenTestCase(unittest.TestCase):
def test_simple(self):
self.assertDictEqual(
flatten(
{'a': 1,
'b': ['c',
['d',
{'e': {'f': -1,
'g': 'h'}}]]}),
{'a': 1,
'b[0]': 'c',
'b[1][0]': 'd',
'b[1][1][e][f]': -1,
'b[1][1][e][g]': 'h'})
def test_dot_colon(self):
self.assertDictEqual(
flatten(
{'a': 1,
'b': ['c',
['d',
{'e': {'f': -1,
'g': 'h'}}]]},
join=dot_colon_join),
{'a': 1,
'b:0': 'c',
'b:1:0': 'd',
'b:1:1.e.f': -1,
'b:1:1.e.g': 'h'})
class UnflattenTestCase(unittest.TestCase):
def test_simple(self):
self.assertDictEqual(
unflatten(
{'a': 1,
'b[0]': 'c',
'b[1][0]': 'd',
'b[1][1][e][f]': -1,
'b[1][1][e][g]': 'h'}),
{'a': 1,
'b': ['c',
['d',
{'e': {'f': -1,
'g': 'h'}}]]})
def test_dot_colon(self):
self.assertDictEqual(
unflatten(
{'a': 1,
'b:0': 'c',
'b:1:0': 'd',
'b:1:1.e.f': -1,
'b:1:1.e.g': 'h'},
split=dot_colon_split),
{'a': 1,
'b': ['c',
['d',
{'e': {'f': -1,
'g': 'h'}}]]})
class DotColonJoinTestCase(unittest.TestCase):
def test_simple(self):
self.assertSequenceEqual(dot_colon_join(('a',)), 'a')
self.assertSequenceEqual(dot_colon_join(('b', 0)), 'b:0')
self.assertSequenceEqual(dot_colon_join(('b', 1)), 'b:1')
self.assertSequenceEqual(dot_colon_join(('b', 2, 'e')), 'b:2.e')
self.assertSequenceEqual(dot_colon_join(('b', 2, 'f')), 'b:2.f')
class DotColonSplitTestCase(unittest.TestCase):
def test_simple(self):
self.assertTupleEqual(dot_colon_split('a'), ('a',))
self.assertTupleEqual(dot_colon_split('b:0'), ('b', 0))
self.assertTupleEqual(dot_colon_split('b:1'), ('b', 1))
self.assertTupleEqual(dot_colon_split('b:2.e'), ('b', 2, 'e'))
self.assertTupleEqual(dot_colon_split('b:2.f'), ('b', 2, 'f'))
|
8,426 | 542602a42eb873508ce2ec39d0856f10cc1e04ff | '''
Created on 2021. 4. 8.
@author: user
'''
import matplotlib.pyplot as plt
import numpy as np
plt.rc("font", family="Malgun Gothic")
def scoreBarChart(names, score):
plt.bar(names, score)
plt.show()
def multiBarChart(names, score):
plt.plot(names, score, "ro--")
plt.plot([1, 2, 3], [70, 80, 90], "bo:")
plt.plot([1, 1, 1], [10, 20, 30], "r>--", [4, 4, 4], [40, 50, 60], "y*-.")
plt.text(3, 96, "평균 : {}".format(np.mean(score)))
plt.grid(True)
plt.xlabel("이름")
plt.ylabel("점수")
plt.title("국어 점수")
plt.show()
if __name__=="__main__":
names = ["홍길동", "이순신", "강감찬", "김유신", "임꺽정"]
score = [89, 86, 97, 77, 92]
#scoreBarChart(names, score)
multiBarChart(names, score) |
8,427 | 3065c87f79433e9fbbd2ff45c2915dfd5b1fa7cc | # 玩家(攻击力)攻击敌人(血量)敌人受伤(减血)可能死亡(播放动画)
# 敌人攻击玩家 玩家受伤(减血 碎屏) 可能死亡(游戏结束)
# class Player:
# def __init__(self,name,hp,atk):
# self.name = name
# self.hp = hp
# self.atk = atk
#
# @property
# def hp(self):
# return self.__hp
# @hp.setter
# def hp(self,value):
# if 0<=value<=100:
# self.__hp = value
# else:
# raise ValueError('血量不在区间内')
#
# @property
# def atk(self):
# return self.__atk
#
# @atk.setter
# def atk(self, value):
# if 0 <= value <= 50:
# self.__atk = value
# else:
# raise ValueError('攻击力不在区间内')
#
#
# class Enemy:
# def __init__(self, e_name, e_hp, e_atk):
# self.e_name = e_name
# self.e_hp = e_hp
# self.e_atk = e_atk
#
# @property
# def e_hp(self):
# return self.__e_hp
#
# @e_hp.setter
# def e_hp(self, value):
# if 0 <= value <= 100:
# self.__e_hp = value
# else:
# raise ValueError('血量不在区间内')
#
# @property
# def e_atk(self):
# return self.__e_atk
#
# @e_atk.setter
# def e_atk(self, value):
# if 0 <= value <= 20:
# self.__e_atk = value
# else:
# raise ValueError('攻击力不在区间内')
#
#
#
# p1 = Player('悟空',100,20)
# e1 = Enemy('妖怪',40,10)
#
# #1.玩家(攻击力)攻击敌人(血量)敌人受伤(减血)可能死亡(播放动画)
# print('1.玩家攻击敌人:')
# def p_atk_e():
# count = 0
# while True:
# e1.e_hp -= p1.atk
# count += 1
# if e1.e_hp >0:
# print('玩家攻击%d次,敌人血量减少到%d' %
# (count,e1.e_hp))
# elif e1.e_hp == 0:
# print('玩家攻击%d次,敌人死亡,播放动画' % count)
# break
#
# p_atk_e()
#
# # 2.敌人攻击玩家 玩家受伤(减血 碎屏) 可能死亡(游戏结束)
# print('2.敌人攻击玩家:')
# def e_atk_p():
# count = 0
# while True:
# p1.hp -= e1.e_atk
# count += 1
# if p1.hp >0:
# print('敌人攻击%d次,玩家血量减少到%d' %
# (count,p1.hp))
# elif p1.hp == 0:
# print('敌人攻击%d次,玩家死亡,游戏结束' % count)
# break
# e_atk_p()
#玩家类
class Player:
def __init__(self,hp = 100,atk = 100):
self.hp = hp
self.atk = atk
def attack(self,enemy):
print('电脑:玩家攻击敌人')
enemy.damage(self.atk)
def damage(self,value):
print('玩家:我去')
#敌人减血
self.hp -= value
#可能死亡
if self.hp <= 0:
print('敌人:你真菜')
#敌人类
class Enemy:
def __init__(self,hp = 100,atk = 99):
self.hp = hp
self.atk = atk
def damage(self,value):
print('敌人:啊')
#玩家减血
self.hp -= value
#可能死亡
if self.hp <= 0:
print('电脑:敌人死亡,播放动画')
def attack(self,player):
print('电脑:敌人攻击玩家')
player.damage(self.atk)
p01 = Player()
e01 = Enemy()
p01.attack(e01)
e01.attack(p01)
e01.attack(p01)
|
8,428 | f3dad6a474d5882beaac7d98f8f60c347730ee55 | #!/usr/bin/env python3
import argparse
import logging
import tango
def delete_devices():
"""."""
db = tango.Database()
class_list = db.get_class_list('*')
print('class list = ', class_list)
server_list = db.get_server_list('*')
print('server list = ', server_list)
# for index in range(num_devices):
# name = 'low_sdp/elt/test_device_{:05d}'.format(index)
# db.delete_server('TestDevice/test1')
# db.delete_device('tango/test1/000')
def delete_server():
"""."""
db = tango.Database()
db.delete_server('')
if __name__ == '__main__':
delete_devices()
|
8,429 | 7ac15f422ca2cd0d30e936b7dd17c96e1f3abff0 | """
Carl Bunge
Washington State University
June 2018
Adapted from @author: Luka Denies from TU Delft.
Changelog:
11/2017 - Integration of CoolProp
06/2018 - Update to OpenFOAM-5.x (Mass-based thermodynamics (for example: cpMcv to CpMCv))
03/2019 - Update to include parahydrogen properties from Refprop
"""
import CoolProp.CoolProp as CP
import numpy as np
import matplotlib.pyplot as plt
#Fluid for thermodynamic properties (rho, Cp, CpMcv, H, S, c, E, thermal conductivity)
CP.set_reference_state('parahydrogen','NBP')
fluid_thermo ='parahydrogen'
#Fluid for transport model (viscosity)
CP.set_reference_state('hydrogen','NBP')
fluid_transport = 'hydrogen'
#****************************************************************************************
#Temperature limits (set within subcritical region for saturation tables)
T0 = 15 #Temperature start (K)
TMax = 32 #Temperature end (K)
#Pressure limits
p0 = 0.1e5 #Pa
pMax = 5.5e5 #Pa
#****************************************************************************************
Tcrit = CP.PropsSI("Tcrit",fluid_thermo)
Ts = []
ps = []
pRange = []
rho = []
mu = []
mu_l = []
mu_v = []
kappa = []
kappa_l = []
kappa_v = []
Cp = []
Cp_l = []
Cp_v = []
H = []
H_l = []
H_v = []
CpMCv = []
CpMCv_l = []
CpMCv_v = []
E = []
E_l = []
E_v = []
S = []
S_l = []
S_v = []
c = []
c_l = []
c_v = []
pSat = []
i = 0
j = 0
p = p0
T = T0
#Build (p, T) tables
while p<pMax:
pRange.append(p)
TRange = []
T = T0
rho.append([0])
Cp.append([0])
Cp_l.append([0])
Cp_v.append([0])
mu.append([0])
mu_l.append([0])
mu_v.append([0])
kappa.append[0])
kappa_l.append([0])
kappa_v.append([0])
CpMCv.append([0])
CpMCv_l.append([0])
CpMCv_v.append([0])
H.append([0])
H_l.append([0])
H_v.append([0])
E.append([0])
E_l.append([0])
E_v.append([0])
S.append([0])
S_l.append([0])
S_v.append([0])
c.append([0])
c_l.append([0])
c_v.append([0])
pSat.append([0])
rho[i][0] = rhoCur = CP.PropsSI('D','T',T,'P',p,fluid_thermo)
CpCur = CP.PropsSI('C','D',rhoCur,'T',T,fluid_thermo)
Cp[i][0] = CpCur
Cp_l[i][0] = CP.PropsSI('C','T',T,'Q',0,fluid_thermo)
Cp_v[i][0] = CP.PropsSI('C','T',T,'Q',1,fluid_thermo)
mu_l[i][0] = CP.PropsSI('V','T',T,'Q',0,fluid_transport)
mu_v[i][0] = CP.PropsSI('V','T',T,'Q',1,fluid_transport)
mu[i][0] = CP.PropsSI('V','D',rhoCur,'T',T,fluid_transport)
kappa_l[i][0] = CP.PropsSI('L','T',T,'Q',0,'REFPROP::parahydrogen')
kappa_v[i][0] = CP.PropsSI('L','T',T,'Q',1,'REFPROP::parahydrogen')
kappa[i][0] = CP.PropsSI('L','D',rhoCur,'T',T,'REFPROP::parahydrogen')
CpMCv_l[i][0] = CP.PropsSI('O','T',T,'Q',0,fluid_thermo)
CpMCv_v[i][0] = CP.PropsSI('O','T',T,'Q',1,fluid_thermo)
CpMCv[i][0] = CpCur-CP.PropsSI('O','D',rhoCur,'T',T,fluid_thermo)
H_l[i][0] = CP.PropsSI('H','T',T,'Q',0,fluid_thermo)
H_v[i][0] = CP.PropsSI('H','T',T,'Q',1,fluid_thermo)
H[i][0] = CP.PropsSI('H','D',rhoCur,'T',T,fluid_thermo)
E_l[i][0] = CP.PropsSI('U','T',T,'Q',0,fluid_thermo)
E_v[i][0] = CP.PropsSI('U','T',T,'Q',1,fluid_thermo)
E[i][0] = CP.PropsSI('U','D',rhoCur,'T',T,fluid_thermo)
S_l[i][0] = CP.PropsSI('S','T',T,'Q',0,fluid_thermo)
S_v[i][0] = CP.PropsSI('S','T',T,'Q',1,fluid_thermo)
S[i][0] = CP.PropsSI('S','D',rhoCur,'T',T,fluid_thermo)
c_l[i][0] = CP.PropsSI('A','T',T,'Q',0,fluid_thermo)
c_v[i][0] = CP.PropsSI('A','T',T,'Q',1,fluid_thermo)
c[i][0] = CP.PropsSI('A','D',rhoCur,'T',T,fluid_thermo)
pSat[i][0] = CP.PropsSI('P','T',T,'Q',0,fluid_thermo)
TRange.append(T)
while T<TMax:
j += 1
dT = 1 # Tstep [K] **************************************************************
T += dT
rhoCur = CP.PropsSI('D','T',T,'P',p,fluid_thermo)
rho[i].append(rhoCur)
CpCur = CP.PropsSI('C','D',rhoCur,'T',T,fluid_thermo)
CpCur_l = CP.PropsSI('C','T',T,'Q',0,fluid_thermo)
CpCur_v = CP.PropsSI('C','T',T,'Q',1,fluid_thermo))
Cp_l[i].append(CP.PropsSI('C','T',T,'Q',0,fluid_thermo))
Cp_v[i].append(CP.PropsSI('C','T',T,'Q',1,fluid_thermo))
Cp[i].append(CpCur)
mu_l[i].append(CP.PropsSI('V','T',T,'Q',0,fluid_transport))
mu_v[i].append(CP.PropsSI('V','T',T,'Q',1,fluid_transport))
mu[i].append(CP.PropsSI('V','D',rhoCur,'T',T,fluid_transport))
kappa_l[i].append(CP.PropsSI('L','T',T,'Q',0,'REFPROP::parahydrogen'))
kappa_v[i].append(CP.PropsSI('L','T',T,'Q',1,'REFPROP::parahydrogen'))
kappa[i].append(CP.PropsSI('L','D',rhoCur,'T',T,'REFPROP::parahydrogen'))
CpMCv_l[i].append((CP.PropsSI('C','T',T,'Q',0,fluid_thermo))-(CP.PropsSI('O','T',T,'Q',0,fluid_thermo)))
CpMCv_v[i].append((CP.PropsSI('C','T',T,'Q',1,fluid_thermo))-(CP.PropsSI('O','T',T,'Q',1,fluid_thermo)))
CpMCv[i].append((CpCur-CP.PropsSI('O','D',rhoCur,'T',T,fluid_thermo)))
H_l[i].append(CP.PropsSI('H','T',T,'Q',0,fluid_thermo))
H_v[i].append(CP.PropsSI('H','T',T,'Q',1,fluid_thermo))
H[i].append(CP.PropsSI('H','D',rhoCur,'T',T,fluid_thermo))
E_l[i].append(CP.PropsSI('U','T',T,'Q',0,fluid_thermo))
E_v[i].append(CP.PropsSI('U','T',T,'Q',1,fluid_thermo))
E[i].append(CP.PropsSI('U','D',rhoCur,'T',T,fluid_thermo))
S_l[i].append(CP.PropsSI('S','T',T,'Q',0,fluid_thermo))
S_v[i].append(CP.PropsSI('S','T',T,'Q',1,fluid_thermo))
S[i].append(CP.PropsSI('S','D',rhoCur,'T',T,fluid_thermo))
c_l[i].append(CP.PropsSI('A','T',T,'Q',0,fluid_thermo))
c_v[i].append(CP.PropsSI('A','T',T,'Q',1,fluid_thermo))
c[i].append(CP.PropsSI('A','D',rhoCur,'T',T,fluid_thermo))
pSat[i].append(CP.PropsSI('P','T',T,'Q',0,fluid_thermo))
TRange.append(T)
i += 1
ps.append([p]*len(TRange))
rhoPseudoCrit = CP.PropsSI('D','T',Tcrit,'P',p,fluid_thermo)
dp = 0.5e5 # Pstep [Pa] ****************************************************************
p += dp
print p
Ts.append(TRange)
print "Calculations done, now writing"
pSatFile = open("pSat","w")
for i,p in enumerate(pRange):
sList = ["\t" + str(pSat[i][j]) + " " + str(Ts[i][j]) + "\n" for j in range(len(Ts[i]))]
pSatFile.write("".join(sList))
pSatFile.write("")
pSatFile.close()
mu_lFile = open("mu_l","w")
for i,p in enumerate(pRange):
sList = ["\t" + str(mu_l[i][j]) + " " + str(Ts[i][j]) + " " + str(p) + "\n" for j in range(len(Ts[i]))]
mu_lFile.write("".join(sList))
mu_lFile.write("")
mu_lFile.close()
mu_vFile = open("mu_v","w")
for i,p in enumerate(pRange):
sList = ["\t" + str(mu_v[i][j]) + " " + str(Ts[i][j]) + " " + str(p) + "\n" for j in range(len(Ts[i]))]
mu_vFile.write("".join(sList))
mu_vFile.write("")
mu_vFile.close()
muFile = open("mu","w")
for i,p in enumerate(pRange):
sList = ["\t" + str(mu[i][j]) + " " + str(Ts[i][j]) + " " + str(p) + "\n" for j in range(len(Ts[i]))]
muFile.write("".join(sList))
muFile.write("")
muFile.close()
rhoFile = open("rho","w")
rhoFile.write("\n")
for i,p in enumerate(pRange):
rhoFile.write("")
sList = ["\t" + str(rho[i][j]) + " " + str(Ts[i][j]) + " " + str(p) + "\n" for j in range(len(Ts[i]))]
rhoFile.write("".join(sList))
rhoFile.write("")
rhoFile.close()
Cp_lFile = open("Cp_l","w")
CpFile.write("\n")
for i,p in enumerate(pRange):
Cp_lFile.write("")
sList = ["\t" + str(Cp_l[i][j]) + " " + str(Ts[i][j]) + " " + str(p) + "\n" for j in range(len(Ts[i]))]
Cp_lFile.write("".join(sList))
Cp_lFile.write("")
Cp_lFile.close()
Cp_vFile = open("Cp_v","w")
Cp_vFile.write("\n")
for i,p in enumerate(pRange):
Cp_vFile.write("")
sList = ["\t" + str(Cp_v[i][j]) + " " + str(Ts[i][j]) + " " + str(p) + "\n" for j in range(len(Ts[i]))]
Cp_vFile.write("".join(sList))
Cp_vFile.write("")
Cp_vFile.close()
CpFile = open("Cp","w")
CpFile.write("\n")
for i,p in enumerate(pRange):
CpFile.write("")
sList = ["\t" + str(Cp[i][j]) + " " + str(Ts[i][j]) + " " + str(p) + "\n" for j in range(len(Ts[i]))]
CpFile.write("".join(sList))
CpFile.write("")
CpFile.close()
kappa_lFile = open("kappa_l","w")
kappa_lFile.write("\n")
for i,p in enumerate(pRange):
kappa_lFile.write("")
sList = ["\t" + str(kappa_l[i][j]) + " " + str(Ts[i][j]) + " " + str(p) + "\n" for j in range(len(Ts[i]))]
kappa_lFile.write("".join(sList))
kappa_lFile.write("")
kappa_lFile.close()
kappa_vFile = open("kappa_v","w")
kappa_vFile.write("\n")
for i,p in enumerate(pRange):
kappa_vFile.write("")
sList = ["\t" + str(kappa_v[i][j]) + " " + str(Ts[i][j]) + " " + str(p) + "\n" for j in range(len(Ts[i]))]
kappa_vFile.write("".join(sList))
kappa_vFile.write("")
kappa_vFile.close()
kappaFile = open("kappa","w")
kappaFile.write("\n")
for i,p in enumerate(pRange):
kappaFile.write("")
sList = ["\t" + str(kappa[i][j]) + " " + str(Ts[i][j]) + " " + str(p) + "\n" for j in range(len(Ts[i]))]
kappaFile.write("".join(sList))
kappaFile.write("")
kappaFile.close()
CpMCv_lFile = open("CpMCv_l","w")
CpMCv_lFile.write("\n")
for i,p in enumerate(pRange):
CpMCv_lFile.write("")
sList = ["\t" + str(CpMCv_l[i][j]) + " " + str(Ts[i][j]) + " " + str(p) + "\n" for j in range(len(Ts[i]))]
CpMCv_lFile.write("".join(sList))
CpMCv_lFile.write("")
CpMCv_lFile.close()
CpMCv_vFile = open("CpMCv_v","w")
CpMCv_vFile.write("\n")
for i,p in enumerate(pRange):
CpMCv_vFile.write("")
sList = ["\t" + str(CpMCv_v[i][j]) + " " + str(Ts[i][j]) + " " + str(p) + "\n" for j in range(len(Ts[i]))]
CpMCv_vFile.write("".join(sList))
CpMCv_vFile.write("")
CpMCv_vFile.close()
CpMCvFile = open("CpMCv","w")
CpMCvFile.write("\n")
for i,p in enumerate(pRange):
CpMCvFile.write("")
sList = ["\t" + str(CpMCv[i][j]) + " " + str(Ts[i][j]) + " " + str(p) + "\n" for j in range(len(Ts[i]))]
CpMCvFile.write("".join(sList))
CpMCvFile.write("")
CpMCvFile.close()
H_lFile = open("H_l","w")
H_lFile.write("\n")
for i,p in enumerate(pRange):
H_lFile.write("")
sList = ["\t" + str(H_l[i][j]) + " " + str(Ts[i][j]) + " " + str(p) + "\n" for j in range(len(Ts[i]))]
H_lFile.write("".join(sList))
H_lFile.write("")
H_lFile.close()
H_vFile = open("H_v","w")
H_vFile.write("\n")
for i,p in enumerate(pRange):
H_vFile.write("")
sList = ["\t" + str(H_v[i][j]) + " " + str(Ts[i][j]) + " " + str(p) + "\n" for j in range(len(Ts[i]))]
H_vFile.write("".join(sList))
H_vFile.write("")
H_vFile.close()
HFile = open("H","w")
HFile.write("\n")
for i,p in enumerate(pRange):
HFile.write("")
sList = ["\t" + str(H[i][j]) + " " + str(Ts[i][j]) + " " + str(p) + "\n" for j in range(len(Ts[i]))]
HFile.write("".join(sList))
HFile.write("")
HFile.close()
E_lFile = open("E_l","w")
E_lFile.write("\n")
for i,p in enumerate(pRange):
E_lFile.write("")
sList = ["\t" + str(E_l[i][j]) + " " + str(Ts[i][j]) + " " + str(p) + "\n" for j in range(len(Ts[i]))]
E_lFile.write("".join(sList))
E_lFile.write("")
E_lFile.close()
E_vFile = open("E_v","w")
E_vFile.write("\n")
for i,p in enumerate(pRange):
E_vFile.write("")
sList = ["\t" + str(E_v[i][j]) + " " + str(Ts[i][j]) + " " + str(p) + "\n" for j in range(len(Ts[i]))]
E_vFile.write("".join(sList))
E_vFile.write("")
E_vFile.close()
EFile = open("E","w")
EFile.write("\n")
for i,p in enumerate(pRange):
EFile.write("")
sList = ["\t" + str(E[i][j]) + " " + str(Ts[i][j]) + " " + str(p) + "\n" for j in range(len(Ts[i]))]
EFile.write("".join(sList))
EFile.write("")
EFile.close()
S_lFile = open("S_l","w")
S_lFile.write("\n")
for i,p in enumerate(pRange):
S_lFile.write("")
sList = ["\t" + str(S_l[i][j]) + " " + str(Ts[i][j]) + " " + str(p) + "\n" for j in range(len(Ts[i]))]
S_lFile.write("".join(sList))
S_lFile.write("")
S_lFile.close()
S_vFile = open("S_v","w")
S_vFile.write("\n")
for i,p in enumerate(pRange):
S_vFile.write("")
sList = ["\t" + str(S_v[i][j]) + " " + str(Ts[i][j]) + " " + str(p) + "\n" for j in range(len(Ts[i]))]
S_vFile.write("".join(sList))
S_vFile.write("")
S_vFile.close()
SFile = open("S","w")
SFile.write("\n")
for i,p in enumerate(pRange):
SFile.write("")
sList = ["\t" + str(S[i][j]) + " " + str(Ts[i][j]) + " " + str(p) + "\n" for j in range(len(Ts[i]))]
SFile.write("".join(sList))
SFile.write("")
SFile.close()
c_lFile = open("c_l","w")
c_lFile.write("\n")
for i,p in enumerate(pRange):
c_lFile.write("")
sList = ["\t" + str(c_l[i][j]) + " " + str(Ts[i][j]) + " " + str(p) + "\n" for j in range(len(Ts[i]))]
c_lFile.write("".join(sList))
c_lFile.write("")
c_lFile.close()
c_vFile = open("c_v","w")
c_vFile.write("\n")
for i,p in enumerate(pRange):
c_vFile.write("")
sList = ["\t" + str(c_v[i][j]) + " " + str(Ts[i][j]) + " " + str(p) + "\n" for j in range(len(Ts[i]))]
c_vFile.write("".join(sList))
c_vFile.write("")
c_vFile.close()
cFile = open("c","w")
cFile.write("\n")
for i,p in enumerate(pRange):
cFile.write("")
sList = ["\t" + str(c[i][j]) + " " + str(Ts[i][j]) + " " + str(p) + "\n" for j in range(len(Ts[i]))]
cFile.write("".join(sList))
cFile.write("")
cFile.close()
#Previous dT method to save computational time:
#dT = drho/CP.PropsSI('d(D)/d(P)|T','D',rhoCur,'T',T,fluid_thermo)*CP.PropsSI('d(P)/d(T)|D','D',rhoCur,'T',T,fluid_thermo)
#Previous dP method to save computational time:
#drho/CP.PropsSI('d(D)/d(P)|T','D',rhoPseudoCrit,'T',Tcrit,fluid_thermo)
|
8,430 | 591ac07e735e08bcafa8274eb1a1547a01261f55 | #!/usr/local/bin/python3
from sys import stdin
import argparse
# Default values
alignment = 'l'
border = 'none'
stretch_factor = '1.0'
toprule = ''
# Default options
custom_header = False
standalone = False
stretch = False
booktabs = False
# Parsing command-line options
parser = argparse.ArgumentParser('<stdin> | csv2table')
parser.add_argument('-a', action='store_true', help='Create standalone tex document')
parser.add_argument('-b', action='store_true', help='Use booktab rules')
parser.add_argument('-box', action='store_true', help='Draw box border')
parser.add_argument('-c', action='store_true', help='Align all elements center')
parser.add_argument('-d', default=',', help='Table column delimiter')
parser.add_argument('-f', default='10', help='Font size in standalone document')
parser.add_argument('-grid', action='store_true', help='Draw grid border')
parser.add_argument('-i', type=int, default=4, help='Number of spaces to indent elements')
parser.add_argument('-l', action='store_true', help='Align all elements left')
parser.add_argument('-r', action='store_true', help='Align all elements right')
parser.add_argument('-s', default='1.0', help='Table stretch factor')
parser.add_argument('-t', default='nil', help='Custom table layout string')
args = parser.parse_args()
if args.a:
standalone = True
if args.b:
booktabs = True
if args.box:
border = 'box'
if args.c:
alignment = 'c'
delimiter = args.d
font = args.f
if args.grid:
border = 'grid'
indent = ''.join([' '] * args.i)
if args.l:
alignment = 'l'
if args.r:
alignment = 'r'
if args.s != '1.0':
stretch = True
stretch_factor = args.s
if args.t != 'nil':
custom_header = args.t
# Global reader state
first_line_read = False
previous_line_read = False
# Returning proper rule types
def rule(type):
if booktabs:
if type == 'top':
return '\\toprule'
if type == 'mid':
return '\\midrule'
if type == 'bottom':
return '\\bottomrule'
else:
return '\\hline'
# Returning proper header string
def make_header(alignment, border, custom, xs):
if custom:
return custom
fields_n = len(xs)
fields = [alignment] * fields_n
if border == 'box':
return '| ' + ' '.join(fields) + ' |'
if border == 'grid':
return '| ' + ' | '.join(fields) + ' |'
else:
return ' '.join(fields)
# Placing standalone header
if standalone:
print('\\documentclass[a4paper,{}pt]{{article}}'.format(font))
if booktabs:
print('\\usepackage{booktabs}')
print('\\begin{document}')
print('\\pagenumbering{gobble}')
# Main parser
for line in stdin:
line = line.rstrip('\n')
# ! Text is passed literally
if line[0] == '!':
print(line[1:])
# # Text is commented out
elif line[0] != '#' and not line.isspace():
xs = line.split(delimiter)
if not first_line_read:
if line == '---':
toprule = '\n' + indent + rule('top')
else:
header = make_header(alignment, border, custom_header, xs)
if stretch:
print('\\bgroup')
print('\\def\\arraystretch{{{}}}%'.format(stretch_factor))
print('\\begin{{tabular}}{{{}}}'.format(header), toprule)
first_line_read = True
if line == '---':
if first_line_read:
print(rule('bottom'))
elif line == '--':
print(rule('mid'))
else:
if previous_line_read and border == 'grid':
print(rule('mid'))
print(indent,' & '.join(xs), '\\\\')
previous_line_read = True
print('\\end{tabular}')
if stretch:
print('\\egroup')
if standalone:
print('\\end{document}')
|
8,431 | 7eb8fe491a88bcfadf2a38eaa158b74b21514a1c | ###########Seq_Profile_Blast_Parser_tool################################
import csv
import time
import re
import os
import sys
from collections import Counter
import operator
from fractions import *
import glob
import ntpath
from collections import defaultdict
path = open('config.txt').read().splitlines()[0].split('=')[-1]
rootDir = '.'
blast_files = []
curdir = os.getcwd()
curdir_up = '/'.join(curdir.split('/')[:-1])
for dirName, subdirList, fileList in os.walk(rootDir, topdown = False):
for fname in fileList:
if fname.startswith("S.A"):
fname = os.path.join(dirName, fname)
blast_files.append(fname)
print 'Module1'
print ' step 1.1 : Parsing the input Blastp files'
for blastfiles in blast_files[:]:
if 'Prot' not in blastfiles:
qids=[]
query_lengths = []
counter = 0
seqsas = []
file1 = open(blastfiles,'r').read()
queries = file1.split('Query=')
datas = queries[1:]
for item in datas[:]:
lines = item.split('\n')
qid = item.split()[0]
qids.append(qid)
for line in lines[:]:
if line.startswith('Length='):
query_lengths.append(int(line.split('=')[-1]))
break
for i,data in enumerate(datas[:]):
lines = data.split('\n')
record = False
for line in lines[:]:
if line.startswith(">") :
tmp = line.split(">")
tmp_name = tmp[1]
tmp_name1 = tmp_name.split("[")
tmp_hit = ''.join(tmp_name[0:-1])
if 'Staphylococcus' in line:
record = True
else:
record = False
if line.startswith(" Score") and record:
tmp = line.strip().split()
tmp_score_s = tmp[2]
tmp_score = float(tmp_score_s)
tmp_evalue = float(tmp[7].replace(",",""))
seqsas.append([qids[i],tmp_hit,tmp_score,tmp_evalue])
if line.startswith(" Identities")and counter <len(seqsas) and record:
tmp = line.strip().split()
tmp_id = tmp[3]
tmp_ids = tmp_id.replace('(','').replace(')','').replace('%','').replace(',','')
ids = int(tmp_ids)
tmp_po = tmp[7]
tmp_pos = tmp_po.replace('(','').replace(')','').replace('%','').replace(',','')
pos = int(tmp_pos)
tmp_gap = tmp[11]
tmp_gaps = tmp_gap.replace('(','').replace(')','').replace('%','').replace(',','')
gaps_percent = int(tmp_gaps)
gap_number = int(tmp[10].split('/')[0])
alignment_length = int(tmp[10].split('/')[-1])
coverage_percent = round(float((alignment_length - gap_number))/query_lengths[i] * 100, 2)
seqsas[counter].append(ids)
seqsas[counter].append(pos)
seqsas[counter].append(gaps_percent)
seqsas[counter].append(gap_number)
seqsas[counter].append(alignment_length)
seqsas[counter].append(coverage_percent)
counter+=1
path1 = '%s/RESULT/MODULE1/P1' % curdir_up
if not os.path.exists(path1):
os.makedirs(path1)
file_name = ntpath.basename('blast_out1%s' % blastfiles) + '.txt'
with open(os.path.join(path1,file_name),'w') as out1:
for item in seqsas[:]:
item = '\t'.join([str(x) for x in item])
out1.write('%s\n' %item)
out1.close()
else:
strsas = []
qids=[]
query_lengths = []
counter = 0
file2 = open(blastfiles,'r').read()
queries = file2.split('Query=')
datas = queries[1:]
for item in datas[:]:
lines = item.split('\n')
qid = item.split()[0]
qids.append(qid)
for line in lines[:]:
if line.startswith('Length='):
query_lengths.append(int(line.split('=')[-1]))
break
for i,data in enumerate(datas[:]):
lines = data.split('\n')
record = False
for line in lines[:]:
if line.startswith(">") :
tmp = line.split(">")
tmp_name = tmp[1]
tmp_hit = tmp_name.split("|")[0]
if line.startswith(" Score") :
tmp = line.strip().split()
tmp_score_s = tmp[2]
tmp_score = float(tmp_score_s)
tmp_evalue = float(tmp[7].replace(",",""))
strsas.append([qids[i],tmp_hit,tmp_score,tmp_evalue])
if line.startswith(" Identities") and counter < len(strsas):
tmp = line.strip().split()
tmp_id = tmp[3]
tmp_ids = tmp_id.replace('(','').replace(')','').replace('%','').replace(',','')
ids = int(tmp_ids)
tmp_po = tmp[7]
tmp_pos = tmp_po.replace('(','').replace(')','').replace('%','').replace(',','')
pos = int(tmp_pos)
tmp_gap = tmp[11]
tmp_gaps = tmp_gap.replace('(','').replace(')','').replace('%','').replace(',','')
gaps_percent = int(tmp_gaps)
gap_number_1 = Fraction(tmp[10])
gap_number = int(tmp[10].split('/')[0])
alignment_length = int(tmp[10].split('/')[-1])
coverage_percent = round(float((alignment_length - gap_number))/query_lengths[i] * 100, 2)
strsas[counter].append(ids)
strsas[counter].append(pos)
strsas[counter].append(gaps_percent)
strsas[counter].append(gap_number)
strsas[counter].append(alignment_length)
strsas[counter].append(coverage_percent)
counter +=1
path1 = '%s/RESULT/MODULE1/P1' %curdir_up
if not os.path.exists(path1):
os.makedirs(path1)
prot_file_name = ntpath.basename('prot_blast_out1%s' % blastfiles) + '.txt'
with open(os.path.join(path1,prot_file_name),'w') as out2:
for item in strsas[:]:
item = '\t'.join([str(x) for x in item])
out2.write('%s\n' %item)
out2.close()
def parser2():
os.chdir('%s/RESULT/MODULE1/P1' %curdir_up)
for file1 in glob.glob('*.txt'):
file_s = open(file1).readlines()
prepsas = []
for item in file_s[:]:
item = item.strip().split('\t')
hit = item[1]
e = float(item[3])
ids = int(item[4])
cov = float(item[9])
if e <=1e-10 and ids >= 35 and cov >= 75:
prepsas.append(item)
if len(item) < 10:
print 'not match'
prot_file_name_s = str(file1)
path2 = '%s/RESULT/MODULE1/P2' %curdir_up
if not os.path.exists(path2):
os.makedirs(path2)
with open(os.path.join(path2,prot_file_name_s),'w') as prepsas1:
for hits in prepsas[:]:
hits = '\t'.join([str(x) for x in hits])
prepsas1.write('%s\n' %hits)
prepsas1.close()
def parser3():
os.chdir('%s/RESULT/MODULE1/P2' %curdir_up)
for file2 in glob.glob('*.txt'):
file3 =open(file2).readlines()
d = {}
for filters in file3[:]:
key, value = filters.strip("\n").split("\t")[0],filters.strip("\n").split("\t")[1:]
key = key.strip('\t')
value = [str(x)[0:]for x in value]
if key not in d:
d[key] = [value]
elif key in d and len(d[key]) <= 250:
d[key].append(value)
prot_file_name_s = str(file2)
path2 = '%s/RESULT/MODULE1/P3' %curdir_up
if not os.path.exists(path2):
os.makedirs(path2)
with open(os.path.join(path2,prot_file_name_s),'w') as fp:
for item in d.keys()[:]:
line = item
hits = d[item]
for hit in hits:
hit2 = ','.join(hit)
line += '\t%s' % hit2
fp.write("%s\n" % line)
parser2()
parser3()
|
8,432 | 3adb50a6375a73f786369dd22712a657b66f758e | #!/usr/bin/env python
"""
Main training workflow
"""
from __future__ import division
import os
import time
import glob
import torch
import random
import signal
import argparse
from models.trainer import build_trainer
from models import data_loader, model_builder
from models.pytorch_pretrained_bert.modeling import BertConfig
from utils import distributed
from utils.logging import logger, init_logger
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
class MultiRunning(object):
def __init__(self, args, device_id):
self.args = args
self.device_id = device_id
def multi_card_run(self):
""" Spawns 1 process per GPU """
init_logger()
nb_gpu = self.args.world_size
mp = torch.multiprocessing.get_context('spawn')
# Create a thread to listen for errors in the child processes.
error_queue = mp.SimpleQueue()
error_handler = ErrorHandler(error_queue)
# Train with multiprocessing.
process = []
for i in range(nb_gpu):
self.device_id = i
process.append(mp.Process(target=self.multi_card_train, args=(self.args, self.device_id, error_queue),
daemon=True))
process[i].start()
logger.info(" Starting process pid: %d " % process[i].pid)
error_handler.add_child(process[i].pid)
for p in process:
p.join()
def multi_card_train(self, error_queue):
""" run process """
setattr(self.args, 'gpu_ranks', [int(i) for i in self.args.gpu_ranks])
try:
gpu_rank = distributed.multi_init(self.device_id, self.args.world_size, self.args.gpu_ranks)
print('gpu_rank %d' % gpu_rank)
if gpu_rank != self.args.gpu_ranks[self.device_id]:
raise AssertionError("An error occurred in Distributed initialization")
runner = Running(self.args, self.device_id)
runner.train()
except KeyboardInterrupt:
pass # killed by parent, do nothing
except Exception:
# propagate exception to parent process, keeping original traceback
import traceback
error_queue.put((self.args.gpu_ranks[self.device_id], traceback.format_exc()))
class ErrorHandler(object):
"""A class that listens for exceptions in children processes and propagates
the tracebacks to the parent process."""
def __init__(self, error_queue):
""" init error handler """
import signal
import threading
self.error_queue = error_queue
self.children_pids = []
self.error_thread = threading.Thread(
target=self.error_listener, daemon=True)
self.error_thread.start()
signal.signal(signal.SIGUSR1, self.signal_handler)
def add_child(self, pid):
""" error handler """
self.children_pids.append(pid)
def error_listener(self):
""" error listener """
(rank, original_trace) = self.error_queue.get()
self.error_queue.put((rank, original_trace))
os.kill(os.getpid(), signal.SIGUSR1)
def signal_handler(self, signalnum, stackframe):
""" signal handler """
for pid in self.children_pids:
os.kill(pid, signal.SIGINT) # kill children processes
(rank, original_trace) = self.error_queue.get()
msg = """\n\n-- Tracebacks above this line can probably
be ignored --\n\n"""
msg += original_trace
raise Exception(msg)
class Running(object):
"""Run Model"""
def __init__(self, args, device_id):
"""
:param args: parser.parse_args()
:param device_id: 0 or -1
"""
self.args = args
self.device_id = device_id
self.model_flags = ['hidden_size', 'ff_size', 'heads', 'inter_layers', 'encoder', 'ff_actv', 'use_interval',
'rnn_size']
self.device = "cpu" if self.args.visible_gpus == '-1' else "cuda"
logger.info('Device ID %d' % self.device_id)
logger.info(f'Device {self.device}')
torch.manual_seed(self.args.seed)
random.seed(self.args.seed)
if self.device_id >= 0:
torch.cuda.set_device(self.device_id)
init_logger(args.log_file)
def baseline(self, cal_lead=False, cal_oracle=False):
test_iter = data_loader.DataLoader(self.args, data_loader.load_dataset(self.args, 'test', shuffle=False),
self.args.batch_size, self.device, shuffle=False, is_test=True)
trainer = build_trainer(self.args, self.device_id, None, None)
if cal_lead:
trainer.test(test_iter, 0, cal_lead=True)
elif cal_oracle:
trainer.test(test_iter, 0, cal_oracle=True)
def train_iter(self):
return data_loader.DataLoader(self.args, data_loader.load_dataset(self.args, 'train', shuffle=True),
self.args.batch_size, self.device, shuffle=True, is_test=False)
def train(self):
model = model_builder.Summarizer(self.args, self.device, load_pretrained_bert=True)
if self.args.train_from:
logger.info(f'Loading checkpoint from {self.args.train_from}')
checkpoint = torch.load(self.args.train_from, map_location=lambda storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt:
if k in self.model_flags:
setattr(self.args, k, opt[k])
model.load_cp(checkpoint)
optimizer = model_builder.build_optim(self.args, model, checkpoint)
else:
optimizer = model_builder.build_optim(self.args, model, None)
logger.info(model)
trainer = build_trainer(self.args, self.device_id, model, optimizer)
trainer.train(self.train_iter, self.args.train_steps)
def validate(self, step):
logger.info(f'Loading checkpoint from {self.args.validate_from}')
checkpoint = torch.load(self.args.validate_from, map_location=lambda storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt:
if k in self.model_flags:
setattr(self.args, k, opt[k])
print(self.args)
config = BertConfig.from_json_file(self.args.bert_config_path)
model = model_builder.Summarizer(self.args, self.device, load_pretrained_bert=False, bert_config=config)
model.load_cp(checkpoint)
model.eval()
valid_iter = data_loader.DataLoader(self.args, data_loader.load_dataset(self.args, 'valid', shuffle=False),
self.args.batch_size, self.device, shuffle=False, is_test=False)
trainer = build_trainer(self.args, self.device_id, model, None)
stats = trainer.validate(valid_iter, step)
return stats.xent()
def wait_and_validate(self):
time_step = 0
if self.args.test_all:
cp_files = sorted(glob.glob(os.path.join(self.args.model_path, 'model_step_*.pt')))
cp_files.sort(key=os.path.getmtime)
xent_lst = []
for i, cp in enumerate(cp_files):
step = int(cp.split('.')[-2].split('_')[-1])
xent = self.validate(step=step)
xent_lst.append((xent, cp))
max_step = xent_lst.index(min(xent_lst))
if i - max_step > 10:
break
xent_lst = sorted(xent_lst, key=lambda x: x[0])[:3]
logger.info(f'PPL {str(xent_lst)}')
for xent, cp in xent_lst:
step = int(cp.split('.')[-2].split('_')[-1])
self.test(step)
else:
while True:
cp_files = sorted(glob.glob(os.path.join(self.args.model_path, 'model_step_*.pt')))
cp_files.sort(key=os.path.getmtime)
if cp_files:
cp = cp_files[-1]
time_of_cp = os.path.getmtime(cp)
if os.path.getsize(cp) <= 0:
time.sleep(60)
continue
if time_of_cp > time_step:
time_step = time_of_cp
step = int(cp.split('.')[-2].split('_')[-1])
self.validate(step)
self.test(step)
cp_files = sorted(glob.glob(os.path.join(self.args.model_path, 'model_step_*.pt')))
cp_files.sort(key=os.path.getmtime)
if cp_files:
cp = cp_files[-1]
time_of_cp = os.path.getmtime(cp)
if time_of_cp > time_step:
continue
else:
time.sleep(300)
def test(self, step=None):
if not step:
try:
step = int(self.args.test_from.split('.')[-2].split('_')[-1])
except IndexError:
step = 0
logger.info(f'Loading checkpoint from {self.args.test_from}')
checkpoint = torch.load(self.args.test_from, map_location=lambda storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt:
if k in self.model_flags:
setattr(self.args, k, opt[k])
config = BertConfig.from_json_file(self.args.bert_config_path)
model = model_builder.Summarizer(self.args, self.device, load_pretrained_bert=False, bert_config=config)
model.load_cp(checkpoint)
model.eval()
# logger.info(model)
trainer = build_trainer(self.args, self.device_id, model, None)
test_iter = data_loader.DataLoader(self.args, data_loader.load_dataset(self.args, 'test', shuffle=False),
self.args.batch_size, self.device, shuffle=False, is_test=True)
trainer.test(test_iter, step)
def gen_features_vector(self, step=None):
if not step:
try:
step = int(self.args.test_from.split('.')[-2].split('_')[-1])
except IndexError:
step = 0
logger.info(f'Loading checkpoint from {self.args.test_from}')
checkpoint = torch.load(self.args.test_from, map_location=lambda storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt:
if k in self.model_flags:
setattr(self.args, k, opt[k])
config = BertConfig.from_json_file(self.args.bert_config_path)
model = model_builder.Summarizer(self.args, self.device, load_pretrained_bert=False, bert_config=config)
model.load_cp(checkpoint)
model.eval()
# logger.info(model)
trainer = build_trainer(self.args, self.device_id, model, None)
test_iter = data_loader.DataLoader(self.args, data_loader.load_dataset(self.args, 'test', shuffle=False),
self.args.batch_size, self.device, shuffle=False, is_test=True)
trainer.gen_features_vector(test_iter, step)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-encoder", default='transformer', type=str,
choices=['classifier', 'transformer', 'rnn', 'baseline'])
parser.add_argument("-mode", default='train', type=str, choices=['train', 'validate', 'test', 'vector'])
parser.add_argument("-data_name", default='chinese_summary', help='vy_text')
parser.add_argument("-bert_data_path", default='./data/bert_data/', help='./data/bert_data/')
parser.add_argument("-model_path", default='./models/models_check_points/')
parser.add_argument("-result_path", default='./results/')
parser.add_argument("-temp_dir", default='./temp/')
parser.add_argument("-bert_pretrained_model_path", default='./models/pytorch_pretrained_bert/bert_pretrain/')
parser.add_argument("-bert_config_path", default='./models/pytorch_pretrained_bert/bert_pretrain/bert_config.json')
parser.add_argument("-batch_size", default=1000, type=int)
parser.add_argument("-use_interval", type=str2bool, nargs='?', const=True, default=True)
parser.add_argument("-hidden_size", default=128, type=int)
parser.add_argument("-ff_size", default=2048, type=int)
parser.add_argument("-heads", default=8, type=int)
parser.add_argument("-inter_layers", default=2, type=int)
parser.add_argument("-rnn_size", default=512, type=int)
parser.add_argument("-param_init", default=0, type=float)
parser.add_argument("-param_init_glorot", type=str2bool, nargs='?', const=True, default=True)
parser.add_argument("-dropout", default=0.1, type=float)
parser.add_argument("-optimizer", default='adam', type=str)
parser.add_argument("-lr", default=2e-3, type=float, help='learning rate')
parser.add_argument("-beta1", default=0.9, type=float)
parser.add_argument("-beta2", default=0.999, type=float)
parser.add_argument("-decay_method", default='noam', type=str)
parser.add_argument("-warmup_steps", default=8000, type=int)
parser.add_argument("-max_grad_norm", default=0, type=float)
parser.add_argument("-save_checkpoint_steps", default=5000, type=int)
parser.add_argument("-accum_count", default=2, type=int)
parser.add_argument("-world_size", default=1, type=int)
parser.add_argument("-report_every", default=50, type=int)
parser.add_argument("-train_steps", default=50000, type=int)
parser.add_argument("-recall_eval", type=str2bool, nargs='?', const=True, default=False)
parser.add_argument('-visible_gpus', default='0', type=str)
parser.add_argument('-gpu_ranks', default='0', type=str)
parser.add_argument('-log_file', default='./logs/project.log')
parser.add_argument('-dataset', default='')
parser.add_argument('-seed', default=666, type=int)
parser.add_argument("-test_all", type=str2bool, nargs='?', const=True, default=False)
parser.add_argument("-test_from", default='./models/models_check_points/model_step_50000.pt')
parser.add_argument("-train_from", default='', help='./models/models_check_points/model_step_45000.pt')
parser.add_argument("-validate_from", default='../models/models_check_points/model_step_50000.pt')
parser.add_argument("-report_rouge", type=str2bool, nargs='?', const=True, default=False)
parser.add_argument("-block_trigram", type=str2bool, nargs='?', const=True, default=True)
parser.add_argument("-shuffle_data", type=str2bool, nargs='?', const=True, default=False)
parser.add_argument("-vy_predict", type=str2bool, nargs='?', const=False, default=True)
_args = parser.parse_args()
gpu_ranks: str = str(_args.gpu_ranks)
_args.gpu_ranks = [int(i) for i in gpu_ranks.split(',')]
os.environ["CUDA_VISIBLE_DEVICES"] = _args.visible_gpus
init_logger(_args.log_file)
_device = "cpu" if _args.visible_gpus == '-1' else "cuda"
_device_id = 0 if _device == "cuda" else -1
runner = Running(args=_args, device_id=_device_id)
multi_runner = MultiRunning(args=_args, device_id=_device_id)
if _args.world_size > 1:
multi_runner.multi_card_run()
elif _args.mode == 'train':
runner.train()
elif _args.mode == 'validate':
runner.wait_and_validate()
elif _args.mode == 'test':
runner.test()
elif _args.mode == 'lead':
runner.baseline(cal_lead=True)
elif _args.mode == 'oracle':
runner.baseline(cal_oracle=True)
elif _args.mode == 'vector':
runner.gen_features_vector()
|
8,433 | bdf2c35c12820dd31bd242ce1b6dae7271ceb2b7 | class TimeEntry:
def __init__(self,date,duration,togglproject = 'default toggl', tdproject = 'default td', togglID = 'NULL', tdID = 'Null' ):
self.duration = duration
self.date = date
self.togglProject = togglproject
self.tdProject = tdproject
self.togglID = togglID
self.tdID = tdID
|
8,434 | 14bf4befdce4270b4514b4e643964182f9c49ff4 | import sys
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sea
import sklearn
import glob
import pydub
from pydub import AudioSegment
import time
import librosa
import noisereduce as nr
from scipy.io import wavfile
import IPython
import sounddevice as sd
from pysndfx import AudioEffectsChain
import python_speech_features
import sox
import math
#y,sr=librosa.load(r"C:\Users\pranj\OneDrive\Desktop\Project\72843_lonemonk_approx-800-laughter-only-1.wav")
my,sr=librosa.load(r"C:\Users\pranj\Downloads\IEMOCAP_full_release_withoutVideos\IEMOCAP_full_release\Session1\sentences\wav\Ses01F_impro01\Ses01F_impro01_F000.wav")
reduced_noise = nr.reduce_noise(audio_clip=my, noise_clip=my, verbose=True,prop_decrease=0.8)
print(IPython.display.Audio(data=my, rate=sr))
sd.play(my, sr)
status = sd.wait() |
8,435 | e81da535408cc36655328b37ca99b4f775f3a78e | from gerador_senha import gerar_senha
gerar_senha() |
8,436 | a7f2791e359b848a217beadc77fc983d971ef8b0 | from django.urls import path
from . import views as user_views
from produtos import views as prod_views
from django.contrib.auth import views as auth_views
app_name = 'user'
urlpatterns = [
path('detalhes/', user_views.painel, name="painel"),
path('produto/ajax/delete_prod/', prod_views.deleteProd, name="deleteProd"),
path('produto/', user_views.painelProdutos, name="painel_produtos"),
path('<int:id_produto>', prod_views.detalheProduto, name="detalhe_prod"),
path('ajax/delete_prod/', prod_views.deleteProd, name='deleteProd'),
] |
8,437 | e7b30353fd25beb9d5cdeee688e4ffa6955d4221 | {
'variables': {
'node_shared_openssl%': 'true'
},
'targets': [
{
'target_name': 'keypair',
'sources': [
'secp256k1/keypair.cc'
],
'conditions': [
# For Windows, require either a 32-bit or 64-bit
# separately-compiled OpenSSL library.
# Currently set up to use with the following OpenSSL distro:
#
# http://slproweb.com/products/Win32OpenSSL.html
[
'OS=="win"',
{
'conditions':
[
[
'target_arch=="x64"',
{
'variables': {
'openssl_root%': 'C:/OpenSSL-Win64'
},
}, {
'variables': {
'openssl_root%': 'C:/OpenSSL-Win32'
}
}
]
],
'libraries': [
'-l<(openssl_root)/lib/libeay32.lib',
],
'include_dirs': [
'<(openssl_root)/include',
],
},
# Otherwise, if not Windows, link against the exposed OpenSSL
# in Node.
{
"conditions": [
['node_shared_openssl=="false"', {
# so when "node_shared_openssl" is "false", then OpenSSL has been
# bundled into the node executable. So we need to include the same
# header files that were used when building node.
'include_dirs': [
'<(node_root_dir)/deps/openssl/openssl/include'
],
"conditions" : [
["target_arch=='ia32'", {
"include_dirs": [ "<(node_root_dir)/deps/openssl/config/piii" ]
}],
["target_arch=='x64'", {
"include_dirs": [ "<(node_root_dir)/deps/openssl/config/k8" ]
}],
["target_arch=='arm'", {
"include_dirs": [ "<(node_root_dir)/deps/openssl/config/arm" ]
}]
]
}]
]}
]]
}
]
}
|
8,438 | c0f3a957613a4f4e04aeb3eb2e3fa4053bd0122c | import pandas as pd
import numpy as np
import logging
import sklearn
from joblib import load
import sys
import warnings
import os
if not sys.warnoptions:
warnings.simplefilter("ignore")
class model:
def __init__(self):
#from number to labels
self.number_to_label = {1 : "Bot",2 : 'DoS attack',3 : 'Brute Force', 5 : 'DDoS attacks',4 : 0}
# load the pretrained model
try:
self.model = load('./decision_tree_model.joblib')
self.attack_model = load('./attack_model.joblib')
except:
# error if model can't be found in the path
logging.error("Model can\'t be found in the main directory")
logging.error("please fix the problem and restart the server")
# load the features for the preprocessing step
try:
self.all_features = open("./all_features.txt", "r").readline().split(',')
self.features = open("./features.txt", "r").read().splitlines()
except:
# error if features file can't be found in the path
logging.error("features.txt can\'t be found in the main directory")
logging.error("please fix the problem and restart the server")
def preprocess(self,data):
#select only the columns that works best with the pretrained model
data = data[self.features]
#remove infinite and null values
data = data.replace([np.inf, -np.inf], np.nan)
data = data.dropna()
#change the type of the data to float
data = data.astype("float")
#return the data as numpy array
return data.to_numpy()
def load_data_csv(self,path = './data_examples/example.csv'):
#load and preprocess the csv file
self.data = pd.read_csv(path)
#for evaluation tasks, we will save the label
if ('Label' in self.data.columns):
self.label = self.data['Label'].to_numpy()
else:
self.label = None
logging.info('This data is labeled')
self.data = self.preprocess(self.data)
def load_data(self, rows) :
#Load and preprocess strings in csv format
self.data =pd.DataFrame([x.strip(',').split(',') for x in rows.strip('bpoint').split('bpoint')],columns = self.all_features)
self.data = self.preprocess(self.data)
def predict(self):
results = []
#predict the class of the flow
self.prediction = self.model.predict(self.data).astype('int32')
#in case of one row prediction
if (self.prediction.shape[0] == 1 ):
if (self.prediction.item() == 1):
results.append(self.number_to_label[self.attack_model.predict(self.data[0,:].reshape(1, -1)).item()])
else:
results.append(0)
else:
for i in range(self.prediction.shape[0]):
if (self.prediction[i] == 1):
results.append(self.number_to_label[self.attack_model.predict(self.data[i,:].reshape(1, -1)).item()])
else:
results.append(0)
return results
def accuracy(self):
#calculate accuracy in case of label availaiblity
if (self.label is None):
logging.error("Score can't be calculated, No label provided")
logging.error("be sure to name your label column with 'Lebel'")
return None
else:
from sklearn.metrics import accuracy_score
accuracy = accuracy_score(self.label, self.prediction)
return accuracy
"""
m = model()
m.load_data(sys.argv[1])
prediction = m.predict()
"""
|
8,439 | 0f916a1f638bf149f6992355cf8f33f74bc9bdb1 | {
"targets": [
{
"target_name": "force-layout",
"sources": [ "src/main.cc", "src/layout.cc", "src/quadTree.cc" ],
'conditions': [
['OS=="win"', {
'cflags': [
'/WX', "/std:latest", "/m"
],
}, { # OS != "win"
'cflags': [
"-std=c++11", "-fpermissive", "-fexceptions"
],
}],
],
'cflags!': [ '-fno-exceptions' ],
'cflags_cc!': [ '-fno-exceptions' ]
}
]
}
|
8,440 | 89256a38208be92f87115b110edc986cebc95306 | class Solution:
def containsDuplicate(self, nums) -> bool:
d = {} # store the elements which already exist
for elem in nums:
if elem in d:
return True
else:
d[elem] = 1
return False
print(Solution().containsDuplicate([0])) |
8,441 | 7b18c967cf50d87b089dc22f3fbe6d40d708483f | import osfrom setuptools import setup
def read(fname): with open(fname) as fhandle: return fhandle.read()
def readMD(fname): # Utility function to read the README file. full_fname = os.path.join(os.path.dirname(__file__), fname) if 'PANDOC_PATH' in os.environ: import pandoc pandoc.core.PANDOC_PATH = os.environ['PANDOC_PATH'] doc = pandoc.Document() with open(full_fname) as fhandle: doc.markdown = fhandle.read() return doc.rst else: return read(fname)
version = '2.0.5'required = [req.strip() for req in read('requirements.txt').splitlines() if req.strip()]setup( name='CacheMan', version=version, author='Matthew Seal', author_email='mseal@opengov.com', description='A dependent cache manager', long_description=readMD('README.md'), install_requires=required, license='New BSD', packages=['cacheman'], test_suite='tests', zip_safe=False, url='https://github.com/OpenGov/py_cache_manager', download_url='https://github.com/OpenGov/py_cache_manager/tarball/v' + version, keywords=['tables', 'data', 'analysis', 'extraction'], classifiers=[ 'Development Status :: 4 - Beta', 'Topic :: Utilities', 'License :: OSI Approved :: BSD License', 'Natural Language :: English', 'Programming Language :: Python :: 2 :: Only' ]) |
8,442 | fdbb64159b72bf902efc3aa2eaa534e199dccf84 | if input is not None:
element = S(input)
if newChild is not None:
newChild = S(newChild)
element.replaceChild(existingChild, newChild)
|
8,443 | 2342a651ec45623b887c4bc1168adb0731ba5ff6 | # encoding: utf-8
import paramiko
import select
import os
import sys
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
host = "47.107.229.100"
user = "root"
pwd = "aliyun1996874353...A"
class SSH:
def __init__(self, host, user, pwd, port=22):
self.host = host
self.user = user
self.pwd = pwd
self.port = port
self.client = paramiko.SSHClient()
self.client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.client.connect(host, username=user, password=pwd, port=port)
def exec_cmd(self, cmd):
stdin, stdout, stderr = self.client.exec_command(cmd)
res, err = stdout.read(), stderr.read()
result = res if res else err ##这里我们使用三元运算
print("##" + result.decode(encoding="utf-8").replace('\n', '', -1) + "##")
def put_file(self, local_file, service_file):
tran = paramiko.Transport(self.host, self.port)
tran.connect(username=self.user, password=self.pwd)
sftp = paramiko.SFTPClient.from_transport(tran)
sftp.put(local_file, service_file)
tran.close()
def get_file(self, service_file, local_file):
self.client.get_transport()
sftp = paramiko.SFTPClient.from_transport(self.client)
sftp.get(service_file, local_file)
def c_connect(self):
channel = self.client.open_session()
def close_ssh(self):
self.client.close()
def test():
import paramiko
import os
import select
import sys
# 建立一个socket
trans = paramiko.Transport((host, 22))
trans.start_client()
# 如果使用rsa密钥登录的话
'''
default_key_file = os.path.join(os.environ['HOME'], '.ssh', 'id_rsa')
prikey = paramiko.RSAKey.from_private_key_file(default_key_file)
trans.auth_publickey(username='super', key=prikey)
'''
# 如果使用用户名和密码登录
trans.auth_password(username=user, password=pwd)
# 打开一个通道
channel = trans.open_session()
# 获取终端
channel.get_pty()
# 激活终端,这样就可以登录到终端了,就和我们用类似于xshell登录系统一样
channel.invoke_shell()
# 下面就可以执行你所有的操作,用select实现
# 对输入终端sys.stdin和 通道进行监控,
# 当用户在终端输入命令后,将命令交给channel通道,这个时候sys.stdin就发生变化,select就可以感知
# channel的发送命令、获取结果过程其实就是一个socket的发送和接受信息的过程
while True:
readlist, writelist, errlist = select.select([channel, sys.stdin, ], [], []) # 如果是用户输入命令了,sys.stdin发生变化
if sys.stdin in readlist: # 获取输入的内容
input_cmd = sys.stdin.read(1) # 将命令发送给服务器
channel.sendall(input_cmd) # 服务器返回了结果,channel通道接受到结果,发生变化 select感知到
if channel in readlist: # 获取结果
result = channel.recv(1024) # 断开连接后退出
if len(result) == 0:
print("\r\n**** EOF **** \r\n")
break # 输出到屏幕
sys.stdout.write(result.decode())
sys.stdout.flush() # 关闭通道
channel.close() # 关闭链接
trans.close()
if __name__ == '__main__':
# put_file()
ssh = SSH(host, user, pwd)
# ssh.put_file("easyops.sh", "/tmp/easyops.sh")
# ssh.exec_cmd("ls /tmp")
# ssh.exec_cmd("sh /tmp/easyops.sh")
# ssh.c_connect()
test() |
8,444 | 7ae6ed8797d6ee02effd04750e243c5a59840177 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2021/2/18 22:27
# @Author : name
# @File : 01.requests第一血.py
import requests
if __name__ == "__main__":
# step1:指定url
url = r'https://www.sogou.com/'
# step2:发起请求
reponse = requests.get(url = url)
# setp3:获取响应数据 text返回的是字符串形式的响应数据
page_text = reponse.text
print(page_text)
# step4:持久化存储
with open('./sogou.html', 'w', encoding='utf-8') as fp:
fp.write(page_text)
print('爬取数据结束!')
|
8,445 | dc4de382ab16f036c6174e711f5c9fe52868ccc9 | from django.shortcuts import render,redirect
from django.http import HttpResponse
from .tasks import read_all_models, update_a_model, delete_a_model, read_a_model, create_a_model
from .forms import MyModelForm
def home(request):
content = read_all_models()
sorted_list = sorted(content, key=lambda k: k['title'].title(), reverse = False)
return render(request,'myapp/index.html',{'sorted_list':sorted_list})
def create(request):
if request.method == 'POST':
title = request.POST.get('title')
desc = request.POST.get('desc')
d = {}
d['title'] = title
d['desc'] = desc
if create_a_model(d):
return redirect('myapp:home')
else:
return HttpResponse("Error Occured")
return render(request,'myapp/create.html')
def delete(request,pk):
if delete_a_model(pk):
return redirect('myapp:home')
else:
return HttpResponse("Error Occured")
def search(request):
if request.method == 'POST':
pk = request.POST.get('pk')
try:
content = read_a_model(pk)
except:
return render(request,'myapp/search.html',{'error':"No Result Found!"})
return render(request,'myapp/search_results.html',{'content':content})
return render(request,'myapp/search.html')
def update(request,pk):
if request.method == 'POST':
title = request.POST.get('title')
desc = request.POST.get('desc')
d = {}
d['title'] = title
d['desc'] = desc
if update_a_model(pk,d):
return redirect('myapp:home')
else:
return HttpResponse("Error Occured")
content = read_a_model(pk)
return render(request,'myapp/update.html',{'content':content})
|
8,446 | 7e33c6ada3d141ba8067dbf88c2e85a91802a067 | # coding:utf-8
__author__ = 'yinzishao'
# dic ={}
class operation():
def GetResult(self):
pass
class operationAdd(operation):
def GetResult(self):
return self.numberA + self.numberB
class operationDev(operation):
def GetResult(self):
# if(self.numberB!=0):
# return self.numberA /self.numberB
# else:
# raise "被除数不能为0"
try :
return self.numberA /self.numberB
except Exception,e:
print "error:divided by zero"
return 0
class operationMul(operation):
def GetResult(self):
return self.numberA*self.numberB
class operationSub(operation):
def GetResult(self):
return self.numberA-self.numberB
class operationFac():
dic ={}
def __init__(self):
self.dic ={"+":operationAdd(),"-":operationSub(),"/":operationDev(),"*":operationDev()}
def creatOpe(self,sign):
if sign in self.dic:
return self.dic[sign]
else:
return "faise"
if __name__ =="__main__":
fuhao = raw_input("operator:")
nA= input("a:")
nB= input("b:")
a =operationFac().creatOpe(fuhao)
a.numberA=nA
a.numberB=nB
print a.GetResult()
# dic ={"+":operationAdd(),"-":operationSub(),"/":operationDev(),"*":operationDev()}
# print dic
|
8,447 | 7817a42e5aee1786cfb3e8018bd7ca0a5e74749d | # -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2017-05-29 04:28
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('nomenclature', '0002_saloon_default'),
]
operations = [
migrations.AlterField(
model_name='supplier',
name='description',
field=models.CharField(blank=True, max_length=500, null=True, verbose_name='Описание'),
),
]
|
8,448 | 696b9db78cc7f6002eb39b640e0e5b2b53e52e91 | from import_.Import import Import
from classifier.Classifier import Classifier
from export.Export import Export
from preprocessing.PreProcess import PreProcess
def main():
date_column = "date of last vet visit"
target = "age at death"
export_file_dir = "./output/"
export_model_dir = "./model/xgb_model.dat"
# IMPORT
import_ = Import()
print("""
To predict how long cats will live (in years) please enter the file path
for the cats csv file for example: ./input/cats_pred.csv
""")
cats = import_.import_df("predict")
cats_copy = cats.copy()
# PRE-PROCESSING
pre_process = PreProcess()
print("Pre-processing Imported Data..")
# process date to keep year only
print("Processing date column to keep year only")
pre_process.strip_year(cats, date_column)
# Storing numerical columns in the background
pre_process.get_numerical_cols(cats)
# Convert all columns to float data type
print("Convert all columns to float data type")
pre_process.convert_to_float(cats)
# Replace NaN values with Median
print("Replacing all NaN values with median")
cats = pre_process.replace_nan(cats)
# Normalise dataset
print("Normalising dataset")
cats = pre_process.normalise(cats)
print("""
Cats dataset
{0}
""".format(cats.head()))
# PREDICTION
print("Prediction Starting")
cats_pred = Classifier.predict(export_model_dir, cats)
# EXPORTING
print("Prediction Finished")
Export.export_pred_file(cats_copy, cats_pred, target, export_file_dir)
if __name__ == "__main__":
main()
|
8,449 | b381d1110e6a7570cd872d689a43aba2d2580a23 | year = int(input('西暦>'))
if year % 4 == 0 and year % 100 != 0:
print('閏年')
pass
elif year % 400 == 0:
print('閏年')
pass
else:
print('平年')
pass
|
8,450 | 56b5faf925d9a1bfaef348caeb35a7d3c323d57f | from django.db.models import Q
from rest_framework.generics import get_object_or_404
from rest_framework.permissions import BasePermission
from relations.models import Relation
from .. import models
class ConversationAccessPermission(BasePermission):
message = 'You cant see others conversations!'
def has_object_permission(self, request, view, obj):
return obj.start_user == request.user or request.user.is_superuser or obj.end_user == request.user
class SendMessagePermission(BasePermission):
message = 'You can only send message to flowers and followings!'
def has_permission(self, request, view):
conversation = get_object_or_404(models.Conversation, slug=request.data.get('conversation_slug'))
user = conversation.start_user
if request.user == conversation.start_user:
user = conversation.end_user
return Relation.objects.filter(
Q(start_user=request.user, end_user=user) |
Q(start_user=user, end_user=request.user)
).exists()
class MessageOwnerPermission(BasePermission):
message = 'You cant modify your messages only!'
def has_object_permission(self, request, view, obj):
return obj.user == request.user or request.user.is_superuser
|
8,451 | 27f162f2e350fdb284740bd67f4293535f0ab593 | import os, sys
import json
import paramiko
"""
Copies the credentials.json file locally from robot
"""
def copy_credentials_file(hostname, username, password, src_path, dst_path):
# create ssh connection
ssh_client = paramiko.SSHClient()
ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh_client.connect(hostname=hostname, username=username, password=password)
# ftp file from robot to local path
ftp_client = ssh_client.open_sftp()
ftp_client.get(src_path, dst_path)
ftp_client.close()
"""
Creates a default config file for AWS
(aws_config.json)
"""
def create_default_config(path):
data = {}
data['method'] = 'GET'
data['service'] = 'ec2'
data['host'] = 'ec2.amazonaws.com'
data['region'] = 'us-east-1'
data['endpoint'] = 'https://ec2.amazonaws.com'
with open(path, 'w+') as file:
json.dump(data, file)
"""
Checks for the aws_config.json file,
creates the file and populates with default values
if not found.
"""
def check_aws_config():
config_path = os.path.expanduser('~/jibo/HubTest/config/aws_config.json')
if not os.path.exists(config_path):
print("\nCreating default AWS config...")
create_default_config(config_path)
print("Done.\n")
return config_path
"""
Checks for the credentials.json file,
creates the file and populates with values from
robot if not found.
"""
def check_credentials():
login_file = os.path.expanduser('~/jibo/HubTest/config/login.json')
login_data = load_json(login_file)
robot_name = login_data['robot_name']
username = login_data['username']
password = login_data['password']
src_path = '/var/jibo/credentials.json'
dst_path = os.path.expanduser('~/jibo/HubTest/config/credentials.json')
if not os.path.exists(dst_path):
print("\nGrabbing AWS credentials from robot...")
copy_credentials_file(robot_name, username, password, src_path, dst_path)
print("Done.\n")
return dst_path
"""
Reads and returns contents of JSON file
"""
def load_json(path):
with open(path, 'r') as file:
data = json.load(file)
return data
|
8,452 | ec4725b5b60d10e86b29aab3723917ace5cf52f6 | print("gist test file4") |
8,453 | e9af8f7830be7db3ca57b0a24de48ef7fcb08d6c | from psycopg2 import extras as ex
import psycopg2 as pg
import json
import datetime
import os
from functools import reduce
data_list = [{'projectName': '伊犁哈萨克自治州友谊医院开发区分院保洁服务项目', 'pingmu': '服务', 'purUnit': '新疆伊犁哈萨克自治州友谊医院', 'adminiArea': '新疆维吾尔自治区', 'bulletTime': '2020年09月02日 19:20', 'obtBidTime': '2020年09月02日至2020年09月09日每日上午:00:00 至 12:00\xa0\xa0下午:12:00 至 23:59(北京时间,法定节假日除外)', 'bidDocPrice': '¥500', 'obtBidLoc': '伊宁市经济合作区福安·西城国际1416室', 'staBidTime': '', 'staLoc': '伊宁市海棠路3号州财政局办公楼附楼1层州政府采购中心 一楼招标厅', 'budget': '¥807.000000万元(人民币)', 'proContact': '胡川', 'proPhone': '18690293446', 'purAddress': '伊宁市斯大林街92号', 'purUnitPhone': '0999-8024023', 'agentName': '新疆诚成工程项目管理有限公司', 'agentAddress': '详见公告正文', 'agentPhone': '18690293446'}
, {'projectName': '旅顺口医疗区医用氧气管道检修采购项目', 'pingmu': '服务/维修和保养服务/其他维修和保养服务', 'purUnit': '中国人民解放军联勤保障部队第九六七医院', 'adminiArea': '大连市', 'bulletTime': '2020年09月02日 19:52', 'obtBidTime': '2020年09月02日至2020年09月07日每日上午:8:30 至 11:30\xa0\xa0下午:13:00 至 16:30(北京时间,法定节假日除外)', 'budget': '¥0.000000万元(人民币)', 'proContact': '廖大成,尹辉', 'proPhone': '0411-80841295 0411-80841296', 'purAddress': '辽宁省大连市西岗区胜利路80号', 'purUnitPhone': '廖大成,尹辉 0411-80841295 0411-80841296', 'agentName': '中国人民解放军联勤保障部队第九六七医院', 'agentAddress': '辽宁省大连市西岗区胜利路80号', 'agentPhone': '廖大成,尹辉 0411-80841295 0411-80841296', 'appendix': '{"2.报价书氧气管道检修.docx": "http://www.ccgp.gov.cn/oss/download?uuid=88FCEC822374C5002F6DD48B15DC44", "3.货物指标及要求氧气管道检修.docx": "http://www.ccgp.gov.cn/oss/download?uuid=2773DFCD00839B5E034DA43339EDF1"}'}
]
dict_tmp={}
values_list = []
result = []
def processJson(dic):
dicobj = json.loads(dic)
print(dicobj)
for k,v in dicobj.items():
dict_tmp = {}
dict_tmp["file_name"] = k
dict_tmp["urls"] =v
print(k)
print(v)
result.append(dict_tmp)
# dict_tmp.clear()
return result
def procesV():
for i in data_list:
if "appendix" in i.keys():
appendix = i["appendix"]
if appendix != "":
fj = processJson(i["appendix"])
print(fj)
fjs = json.dumps(fj,ensure_ascii=False)
values_list.append(("testtest",fjs))
def prosql():
# values 后面直接%s
hostname = '172.18.11.26'
username = 'postgres'
password = 'postgres_cnhis@#$'
database = 'ai'
conn = pg.connect(database=database, user=username, password=password, host=hostname, port="5432")
cursor = conn.cursor()
procesV()
sql = '''insert into ho_sysnc_third_customer_data("purchased_project_name","fj_json")
values %s
'''
# 其中函数中的page_size参数默认为100,表示每个statement包含的最大条目数,
# 如果传过来的argslist长度大于page_size,则该函数最多执行len(argslist)/page_size + 1次。
ex.execute_values(cursor, sql, values_list, page_size=10000)
conn.commit()
conn.close()
cursor.close()
if __name__ =='__main__':
prosql()
# procesV()
|
8,454 | 12442e4debc7fbf102ab88b42464f4ca8eb91351 | #!/usr/bin/python
# coding=utf8
# author: Sun yang
import running
if __name__ == '__main__':
running.go() |
8,455 | 516ea681a55255e4c98e7106393180f9ad2e0250 | # csv URL
url = "https://covid19-dashboard.ages.at/data/CovidFallzahlen.csv"
# read csv from URL
import pandas as pd
import geopandas as gpd
import numpy as np
df=pd.read_csv(url,sep=";")
df.to_csv("/var/www/FlaskApp/FlaskApp/data/covid_data.csv",sep=";",index=False)
# transforming timestamps to proper DateTime format
import datetime as dt
from datetime import datetime
import time
timestamps = []
for i in df["MeldeDatum"]:
i = i.replace(".","")
i = i.replace(":","")
timestamps.append(dt.datetime.strptime(i, "%d%m%Y %H%M%S"))
df["MeldeDatum"] = timestamps
df = df.drop(["Meldedat"], axis=1)
# get List of State Names
states = list(df["Bundesland"].unique())
# append total hospitalizations to DF
l_temp = []
for a,b in zip(df["FZHosp"],df["FZICU"]):
l_temp.append(a+b)
df["Hospitalizations_total"] = l_temp
# append total ICU capacity to DF
l_temp = []
for a,b in zip(df["FZICU"],df["FZICUFree"]):
l_temp.append(a+b)
df["ICU_capacity"] = l_temp
# append ICU occupancy percentages to DF
l_temp = []
for a,b in zip(df["FZICU"],df["ICU_capacity"]):
try:
l_temp.append(100.0 * float(a)/float(b))
except ZeroDivisionError:
l_temp.append(0.0)
df["ICU_perc"] = l_temp
# create list of dataframes by Bundesland
ls_df = []
for i in states:
temp = df[df["Bundesland"]==i]
ls_df.append(temp)
# importing adm0 and adm1 shapefilesas geopandas dataframes
adm1 = gpd.read_file("/var/www/FlaskApp/FlaskApp/data/gadm36_AUT_1.shp")
adm0 = gpd.read_file("/var/www/FlaskApp/FlaskApp/data/gadm36_AUT_0.shp")
#writing to json
#adm1.to_file("data/austria_adm1.geojson", driver="GeoJSON")
#adm0.to_file("data/austria_adm0.geojson", driver="GeoJSON")
# save CSV after manipulating & rounding
df = df.round(1)
df.to_csv("/var/www/FlaskApp/FlaskApp/data/ICU_data.csv")
# create most recent DF for map
most_recent_date = df['MeldeDatum'].max()
df2 = df.loc[df['MeldeDatum'] == most_recent_date]
df2.to_pickle("/var/www/FlaskApp/FlaskApp/data/df2.pkl")
# join geometries with most recent data per state
df_map =gpd.read_file("/var/www/FlaskApp/FlaskApp/data/austria_adm1.geojson")
df_map["Bundesland"] = df_map["NAME_1"]
df_map = pd.merge(df2,df_map,on="Bundesland")
df_map = gpd.GeoDataFrame(df_map, geometry="geometry")
df_map.to_pickle("/var/www/FlaskApp/FlaskApp/data/df_map.pkl")
# drop unused columns and save file in data folder
df_map.drop(["BundeslandID","GID_0","NAME_0","NAME_1","GID_1","VARNAME_1","NL_NAME_1","TYPE_1","ENGTYPE_1","CC_1","HASC_1","test_value"],axis=1).to_csv("/var/www/FlaskApp/FlaskApp/data/df_map.csv",index=False)
"""
CREATE DFs FOR UPDATE GRAPHS
"""
df_perc = pd.DataFrame({
"MeldeDatum": np.asarray(df.loc[df['Bundesland'] == "Alle"]["MeldeDatum"]),
"Alle": np.asarray(df.loc[df['Bundesland'] == "Alle"]["ICU_perc"]),
"Burgenland": np.asarray(df.loc[df["Bundesland"] == "Burgenland"]["ICU_perc"]),
"Kärnten": np.asarray(df.loc[df['Bundesland'] == "Kärnten"]["ICU_perc"]),
"Niederösterreich": np.asarray(df.loc[df["Bundesland"] == "Niederösterreich"]["ICU_perc"]),
"Oberösterreich": np.asarray(df.loc[df['Bundesland'] == "Oberösterreich"]["ICU_perc"]),
"Salzburg": np.asarray(df.loc[df["Bundesland"] == "Salzburg"]["ICU_perc"]),
"Steiermark": np.asarray(df.loc[df['Bundesland'] == "Steiermark"]["ICU_perc"]),
"Tirol": np.asarray(df.loc[df["Bundesland"] == "Tirol"]["ICU_perc"]),
"Vorarlberg": np.asarray(df.loc[df['Bundesland'] == "Vorarlberg"]["ICU_perc"]),
"Wien": np.asarray(df.loc[df["Bundesland"] == "Wien"]["ICU_perc"]),
})
df_perc.to_pickle("/var/www/FlaskApp/FlaskApp/data/df_perc.pkl")
df_FZICU = pd.DataFrame({
"MeldeDatum": np.asarray(df.loc[df['Bundesland'] == "Alle"]["MeldeDatum"]),
"Alle": np.asarray(df.loc[df['Bundesland'] == "Alle"]["FZICU"]),
"Burgenland": np.asarray(df.loc[df["Bundesland"] == "Burgenland"]["FZICU"]),
"Kärnten": np.asarray(df.loc[df['Bundesland'] == "Kärnten"]["FZICU"]),
"Niederösterreich": np.asarray(df.loc[df["Bundesland"] == "Niederösterreich"]["FZICU"]),
"Oberösterreich": np.asarray(df.loc[df['Bundesland'] == "Oberösterreich"]["FZICU"]),
"Salzburg": np.asarray(df.loc[df["Bundesland"] == "Salzburg"]["FZICU"]),
"Steiermark": np.asarray(df.loc[df['Bundesland'] == "Steiermark"]["FZICU"]),
"Tirol": np.asarray(df.loc[df["Bundesland"] == "Tirol"]["FZICU"]),
"Vorarlberg": np.asarray(df.loc[df['Bundesland'] == "Vorarlberg"]["FZICU"]),
"Wien": np.asarray(df.loc[df["Bundesland"] == "Wien"]["FZICU"]),
})
df_FZICU.to_pickle("/var/www/FlaskApp/FlaskApp/data/df_FZICU.pkl")
df_ICU_cap = pd.DataFrame({
"MeldeDatum": np.asarray(df.loc[df['Bundesland'] == "Alle"]["MeldeDatum"]),
"Alle": np.asarray(df.loc[df['Bundesland'] == "Alle"]["ICU_capacity"]),
"Burgenland": np.asarray(df.loc[df["Bundesland"] == "Burgenland"]["ICU_capacity"]),
"Kärnten": np.asarray(df.loc[df['Bundesland'] == "Kärnten"]["ICU_capacity"]),
"Niederösterreich": np.asarray(df.loc[df["Bundesland"] == "Niederösterreich"]["ICU_capacity"]),
"Oberösterreich": np.asarray(df.loc[df['Bundesland'] == "Oberösterreich"]["ICU_capacity"]),
"Salzburg": np.asarray(df.loc[df["Bundesland"] == "Salzburg"]["ICU_capacity"]),
"Steiermark": np.asarray(df.loc[df['Bundesland'] == "Steiermark"]["ICU_capacity"]),
"Tirol": np.asarray(df.loc[df["Bundesland"] == "Tirol"]["ICU_capacity"]),
"Vorarlberg": np.asarray(df.loc[df['Bundesland'] == "Vorarlberg"]["ICU_capacity"]),
"Wien": np.asarray(df.loc[df["Bundesland"] == "Wien"]["ICU_capacity"]),
})
df_ICU_cap.to_pickle("/var/www/FlaskApp/FlaskApp/data/df_ICU_cap.pkl")
# Writing to logfile
file_object = open('/var/www/FlaskApp/FlaskApp/log.txt', 'a')
now = datetime.now() # current date and time
date_time = now.strftime("%m/%d/%Y, %H:%M:%S")
file_object.write('Success: '+date_time+"\n")
file_object.close()
"""
DB CONNECTOR
"""
# DB create string from csv for COVID data
import csv
with open('/var/www/FlaskApp/FlaskApp/data/covid_data.csv', 'r') as f:
instr = ""
reader = csv.reader(f,delimiter=";")
#print(reader)
next(reader) # Skip the header row.
for row in reader:
instr=instr+("INSERT INTO icu_data VALUES ('"+str(row[0])+"','"+str(row[1])+"','"+str(row[2])+"','"+str(row[3])+"','"+str(row[4])+"','"+str(row[5])+"','"+str(row[6])+"','"+str(row[7])+"','"+str(row[8])+"');" )
# DB create string from csv for MAP data
import csv
import sys
csv.field_size_limit(sys.maxsize)
with open('/var/www/FlaskApp/FlaskApp/data/df_map.csv', 'r') as f:
instr_map = ""
reader = csv.reader(f,delimiter=",")
#print(reader)
next(reader) # Skip the header row.
for row in reader:
instr_map=instr_map+("INSERT INTO icu_map VALUES ('"+str(row[0])+"','"+str(row[1])+"','"+str(row[2])+"','"+str(row[3])+"','"+str(row[4])+"','"+str(row[5])+"','"+str(row[6])+"','"+str(row[7])+"','"+str(row[8])+"','"+str(row[9])+"','"+str(row[10])+"');" )
""" connecting to DB, parsing SQL statements """
def csv_parser(statement):
import psycopg2
return_ls = []
try:
connection = psycopg2.connect(user="icu_bot",
password="5B2xwP8h4Ln4Y8Xs",
host="85.214.150.208",
port="5432",
database="ICU")
cursor = connection.cursor()
sql_Query = statement
#print(sql_Query)
cursor.execute(sql_Query)
connection.commit()
#print("Selecting rows from mobile table using cursor.fetchall")
#mobile_records = cursor.fetchall()
#print("Print each row and it's columns values")
#for row in mobile_records:
# return_ls.append(list(row))
except (Exception, psycopg2.Error) as error :
print ("Error while fetching data from PostgreSQL: ", error)
finally:
#closing database connection.
if(connection):
cursor.close()
connection.close()
#print("PostgreSQL connection is closed")
return return_ls
# update database in postgis
csv_parser("DELETE FROM icu_data")
csv_parser(instr)
# Update map data in server
csv_parser("DELETE FROM icu_map")
csv_parser(instr_map)
"""
GeoServer Connector
"""
try:
df_geojson = pd.read_json("https://zgis187.geo.sbg.ac.at/geoserver/IPSDI_WT20/ows?service=WFS&version=1.0.0&request=GetFeature&typeName=IPSDI_WT20%3Aicu_map&maxFeatures=50&outputFormat=application%2Fjson")
df_geojson.to_pickle("/var/www/FlaskApp/FlaskApp/data/df_geojson.pkl")
except:
print("an exception occured connecting to the geoserver")
|
8,456 | 53380810a3d9787fe7c373cf1829f2d849a91c3c | import csv
from matplotlib import pyplot as plt
from datetime import datetime
file_one = 'data/dwifh_all_sales.csv'
file_two = 'data/dwifh_bc_sales.csv'
# create code to automatically build a dictionary for each album?
with open(file_one) as fo:
reader = csv.reader(fo)
header = next(reader)
album = {}
dates, cd_income, dd_income, total_profit, artist_payout = [], [], [], [], []
for row in reader:
if row[2].strip() == 'Harm\'s Way':
dates.append(float(row[0].strip()))
cd_income.append(int(float(row[4].strip())))
dd_income.append(int(float(row[5].strip())))
total_profit.append(int(float(row[7].strip())))
artist_payout.append(int(float(row[8].strip())))
else:
pass
album_alltime_profit = sum(total_profit)
artist_alltime_payout = sum(artist_payout)
# complete the dictionary for this album
album['title'] = 'Harm\'s Way'
album['period of sales'] = dates
album['cd_income_data'] = cd_income
album['dd_income_data'] = dd_income
album['all_time_profit'] = album_alltime_profit
album['all_time_payout'] = artist_alltime_payout
for key, value in album.items():
print(f'{key}: {value}')
plt.style.use('seaborn')
fig, ax = plt.subplots()
ax.plot(album['period of sales'], album['dd_income_data'], c='red')
ax.plot(album['period of sales'], album['cd_income_data'], c = 'blue')
plt.title('{} Sales - All Time'.format(album['title']))
plt.xlabel('', fontsize=16)
fig.autofmt_xdate()
plt.ylabel('CD (blue) and DD (red)', fontsize=16)
plt.tick_params(axis='both', which='major', labelsize=16)
#plt.show()
# TASK:
# 1. get the names of the albums from the .csv file and store
# them in a list. make sure there are no duplicates.
# parse the csv file and create a dictionary for each album,
# assigning it the name taken from the name list.
# use: for album in album_list: so the process is done once
# for each album name.
# the dict created for each album contains all the data pulled
# from the csv file. create the dict, then append it to
# a list of dicts. this list will, when done, contain four
# dictionaries, one for each album.
# but since it's done in a loop, all four dicts get created
# automatically, but they contain different data, respective to
# each album.
|
8,457 | 0a5e30483c1fde10410c442a1ccd1f79bfb329c8 | import pandas as pd
import glob
import string
import os
ALLOWED_CHARS = string.ascii_letters + "-,. \"()'"
def concat_all_data(path : str = 'Data/*.csv', save_path : str = 'Data/final.csv'):
csvs = glob.glob(path)
li = []
for csv in csvs:
df = pd.read_csv(csv)
li.append(df)
final_df = pd.concat(li)
final_df.to_csv(save_path)
def clean_csv(path : str, save_pth : str):
df = pd.read_csv(path)
df = remove_dups_df(df)
df = remove_invalid_rows_df(df)
df.to_csv(save_pth)
def remove_dups_df(df : pd.DataFrame):
df.sort_values("name", inplace = True)
df.drop_duplicates(subset="name", keep=False, inplace=True)
return df
def remove_invalid_rows_df(df : pd.DataFrame):
return df[df['name'].apply(lambda x: set(x).issubset(ALLOWED_CHARS))]
df = pd.DataFrame(columns=['count', 'name'])
f = open("fbnames.txt", "r")
count = 0
save_every = 2000
for line in f:
count += 1
split = line.split()
df = df.append({'count':split[0], 'name':split[1].capitalize()}, ignore_index=True)
if count % save_every == 0:
df.to_csv("fbnames.csv")
df.to_csv("fbnames.csv")
files = os.listdir("namesbystate/")
df = pd.DataFrame(columns=['count', 'name'])
count = 0
save_every = 2000
for file in files:
f = open(f"namesbystate\{file}", "r")
count = 0
for line in f:
count += 1
split = line.split(",")
df = df.append({"count":int(split[4]),"name":split[3]}, ignore_index=True)
if save_every % count == 0:
df = df.groupby(['name']).sum()
df.to_csv("namesbystates.csv")
df.groupby(['name']).sum()
df.to_csv("namesbystates.csv") |
8,458 | c3d0a9bdbfd5b6f2b960ee2c1f11ec4acf508310 | def fibonacci(num):
f_1 = 0
f_2 = 1
answer = 0
for i in range(num-1):
answer = f_1 + f_2
f_1 = f_2
f_2 = answer
return answer
# 아래는 테스트로 출력해 보기 위한 코드입니다.
print(fibonacci(3))
|
8,459 | 676aec735dd7441b0c481956ad18b012b8d98ea4 | # Question : determine whether given number is power of 2
# logic : every no. of the form 2^i has bit represetntaion of the form :
# 2 -> 10 1->01
# 4 -> 100 3->011
# 8 -> 1000 7->0111
# 16 -> 10000 15->01111
# 32 -> 100000 31->011111
# ... and so on
# Thus there is a pattern here, ever predecessor of power of 2 has all 0 bits flipped and so as 1 bit itself
# Complexity : using bit manipulation it can be done in O(1) time
def is_power(n):
if n==0:
return 'not power of two'
if n & (n-1) == 0 :
return 'power of 2'
return 'not power of 2'
if __name__ == "__main__":
input_number = int(input('enter the number : '))
print(is_power(input_number))
|
8,460 | 50b630b762251f8646044b234ac4b82b8e4b645b | import asyncio
import logging
import os.path
from serial_asyncio import open_serial_connection
from typing import NewType, cast
# Type annotations and converters
AsciiBytes = NewType('AsciiBytes', bytes)
def to_ascii(s: str) -> AsciiBytes:
if s[-1] != '\n':
s += '\n'
return cast(AsciiBytes, s.encode(encoding='ascii'))
class USBHandler:
"""Reads from and writes to the underlying MDB USB board.
Users can either obtain an asyncio.Queue that the handler will push
messages to using listen(), or it can ask for a one-time read using read().
For sending messages, if no reply is expected or there is a poller waiting
for any response, send() can be used, otherwise sendread() will send the
message and wait for a one-time reply. Having a listener and waiting for a
single message at the same time is an error. See the Sniffer class for an
example of both usages."""
def __init__(self):
self.initialized = False
self.run_task = None
self.waiters = {}
self.queues = {}
self.logger = logging.getLogger('.'.join((__name__,
self.__class__.__name__)))
async def initialize(self, device_path: str) -> None:
assert os.path.exists(device_path)
self.logger.info("Initializing USBReader.")
self.logger.debug("Opening serial connection to device at %s",
device_path)
self.serial_reader, self.serial_writer = \
await open_serial_connection(url=device_path, baudrate=115200)
self.initialized = True
self.logger.debug("Connected to serial device at %s.", device_path)
async def _run(self) -> None:
while True:
message = await self.serial_reader.readuntil(separator=b'\r\n')
stripped_message = message.decode(encoding='ascii').rstrip('\n\r')
self.logger.debug("Read '%s' from MDB board.", stripped_message)
message_type = stripped_message[0]
if message_type in self.waiters:
self.waiters[message_type].set_result(stripped_message)
del self.waiters[message_type]
# Lets the waiter run.
await asyncio.sleep(0)
elif message_type in self.queues:
try:
self.queues[message_type].put_nowait(stripped_message)
except asyncio.QueueFull:
self.logger.warning('Queue for message type %s is full. '
'Scheduling the put in another task.',
message_type)
asyncio.create_task(
self.queues[message_type].put(stripped_message))
else:
self.logger.error("Unhandled message: %s", stripped_message)
async def run(self) -> None:
assert self.initialized
self.logger.info('Starting runner.')
self.run_task = asyncio.create_task(self._run())
try:
await self.run_task
except asyncio.CancelledError:
self.logger.info('Runner cancelled.')
async def send(self, message: AsciiBytes, _drain=True) -> None:
assert self.initialized
self.logger.info("Sending message to MDB board: %s", message)
self.serial_writer.write(message)
if _drain:
await self.serial_writer.drain()
self.logger.info("Sent message to MDB board: %s", message)
def _read_internal(self, prefix: str) -> asyncio.Future:
assert len(prefix) == 1
if prefix in self.queues or prefix in self.waiters:
raise RuntimeError(f"Tried to wait for message type {prefix}"
" when there was already a queue listening to "
"all messages")
fut = asyncio.get_running_loop().create_future()
self.waiters[prefix] = fut
return fut
async def sendread(self, message: AsciiBytes, prefix: str) -> str:
await self.send(message, _drain=False)
fut = self._read_internal(prefix)
self.logger.info("Waiting for a single message of type: %s", prefix)
try:
await self.serial_writer.drain()
self.logger.info("Sent message to MDB board: %s", message)
await fut
except asyncio.CancelledError as e:
self.logger.warning("Got cancelled while sending message %r or "
"waiting on prefix %s", message, prefix,
exc_info=e)
del self.waiters[prefix]
raise
self.logger.info("Got message: %s", fut.result())
return fut.result()
async def read(self, prefix: str) -> str:
fut = self._read_internal(prefix)
self.logger.info("Waiting for a single message of type: %s", prefix)
try:
await fut
except asyncio.CancelledError as e:
self.logger.warning("Got cancelled while waiting for message on "
"%s", prefix, exc_info=e)
del self.waiters[prefix]
raise
self.logger.info("Got message: %s", fut.result())
return fut.result()
def listen(self, prefix: str) -> asyncio.Queue:
assert len(prefix) == 1
if prefix in self.waiters or prefix in self.queues:
raise RuntimeError("Tried to get a queue for message type "
f"{prefix} when there was already someone"
"waiting on it.")
self.queues[prefix] = asyncio.Queue()
self.logger.info("Polling for messages of type: %s", prefix)
return self.queues[prefix]
def unlisten(self, prefix: str) -> None:
"""Stops pushing messages with this prefix character to a Queue."""
assert len(prefix) == 1
del self.queues[prefix]
self.logger.info("No longer polling for message type: %s", prefix)
async def shutdown(self):
if not self.initialized:
return
self.logger.info("Shutting down.")
if self.run_task:
self.run_task.cancel()
self.run_task = None
for fut in self.waiters.values():
fut.cancel()
self.serial_writer.close()
await self.serial_writer.wait_closed()
self.logger.info("Shutdown complete.")
self.initialized = False
__all__ = (USBHandler, to_ascii)
|
8,461 | 3a9987ac326131878b80cb819e3d06ce2f4cb054 | # -*- coding: utf-8 -*-
from .log_config import LogBase
import os
__all__ = ['MyLog']
class MyLog(LogBase):
"""
功能:
将日志分日志等级记录,并自动压缩2019-11-11.info.log.gz
参数:
:param dir_path: 日志记录的路径,默认是当前路径下的log文件夹
:param logger_name: logger对象的名字
:param info_name: 保存info等级的文件名字
:param error_name:
:param warning_name:
:param debug_name:
:param interval: 压缩日志的频率,默认是7天
:param detail: bool值,记录日志是否为详细记录
:param debug: 是否记录debug,默认不记录
:param info: 是否记录info,默认记录
:param error:
:param warning:
实例方法:
get_logger()-->logger
使用举例:
# 记录四种类型的日志
logger = MyLog(debug=True).get_logger()
logger.info('info')
logger.debug('debug')
logger.error('error')
logger.warning('warning')
# # # # # # # # # # # # # # # # # # # # # # # # #
# 只记录错误日志
logger = MyLog(info=False,warning=False).get_logger()
logger.info('info')
logger.debug('debug')
logger.error('error')
logger.warning('warning')
注意:
MyLog()的实例只会同时存在一个,默认记录首次创建实例的属性.
例如:
mylog = MyLog('./logs/logs/')
mylog2 = MyLog()
logger = mylog.get_logger()
logger2 = mylog2.get_logger()
logger.info('info')
logger2 = MyLog('./logs/logs2/').get_logger()
logger2.info('info2')
以上两个logger logger2,会以logger(第一次创建实例)的属性为准,日志会存放在./logs/logs/下
"""
def __init__(self, log_path: str = './logs/', **kwargs):
self.type_need(log_path, str)
if not log_path.endswith('/'):
log_path += '/'
if not os.path.exists(log_path):
os.makedirs(log_path)
super(MyLog, self).__init__(dir_path=log_path, **kwargs)
def get_logger(self):
return self._get_logger()
@staticmethod
def type_need(parm, type_):
if not isinstance(parm, type_):
raise TypeError(f'expect {type_},but got {type(parm)}')
|
8,462 | 8d6c58e9ef4e14a089a7eb33a92214d081ed7692 | import splunk.admin as admin
import splunk.entity as en
class ConfigApp(admin.MConfigHandler):
def setup(self):
if self.requestedAction == admin.ACTION_EDIT:
for myarg in ['api_key']:
self.supportedArgs.addOptArg(myarg)
def handleList(self, confInfo):
confDict = self.readConf("appsetup")
if None != confDict:
for stanza, settings in confDict.items():
for key, val in settings.items():
if key in ['api_key'] and val in [None, '']:
val = ''
confInfo[stanza].append(key, val)
def handleEdit(self, confInfo):
name = self.callerArgs.id
args = self.callerArgs
self.writeConf('appsetup', 'app_config', self.callerArgs.data)
admin.init(ConfigApp, admin.CONTEXT_NONE)
|
8,463 | 1c171c67ca5ef0e9b5f2941eec7a625a8823271f | import sys
def isPalin(s):
result = True
for i in range(len(s)/2):
if s[i] != s[-(i + 1)]:
result = False
break
return result
def main():
curr_large = 0
for i in xrange(900, 1000):
for j in xrange(900, 1000):
prod = i * j
# Turns out list comprehension is more succint, but I
# leave the traditional up method anyway
if str(prod) == str(prod)[::-1] and prod > curr_large:
curr_large = prod
print curr_large
if __name__ == '__main__':
main()
|
8,464 | d6e06a78c9a5d8184e5adf9b99cc6030c3434558 | # Generated by Django 2.2.2 on 2019-07-09 20:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0015_auto_20190709_1543'),
]
operations = [
migrations.CreateModel(
name='ExampleModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('model_pic', models.ImageField(null=True, upload_to='image/')),
],
),
migrations.RemoveField(
model_name='post',
name='photo',
),
]
|
8,465 | cef904b70eb9a997c3c48884ee34665a77e18897 | # -*- coding: utf-8 -*-
'''
LE JEU DE LA VIE
Mini projet numéro 2 de NSI
Modélisation Objet :
Q1) On peut dégager, au premier abord : une classe cellule (avec un attribut état et un autre voisins) et une classe grille (avec un attribut ordonnée et un autre abscisse). En effet, ce sont les deux éléments du jeu.
Q2) On pourrait donner une méthode pour changer l’état de la cellule, une autre pour obtenir son état. Une autre pour définir les voisins et encore une pour les obtenir. Avec ces méthodes, on pourra modifier l’état des cellules et calculer celui-ci en fonction de ses voisins.
Pour ce qui est de la classe grille, on pourrait coder une méthode pour obtenir les coordonnées, un autre pour les modifier. Ainsi, on pourra placer les cellules dans la grille là où il n’y en a pas forcément.
Q3) Il sera plus simple de représenter la notion de voisinage dans la cellule avec la classe grille. En effet, c’est elle qui contient les coordonnées.
Quand au calcul de celle-ci, elle sera plus simple dans la classe cellule car c’est cette classe qui contient l’état.
Q4) Une cellule qui n’est pas au bord a 8 voisins. En effet, le voisinage de Moore compte les diagonales.
Une cellule sur le côté mais pas dans un coin a 5 voisins.
Dans un coin, elle en a 3.
Q5) Pour la case en haut à droite, on pourrait considérer comme voisin de droite la case tout en haut à gauche. Pour le voisin du haut, on peut considérer la case de même abscisse mais d’ordonnée 0 (celle tout en bas). En fait, on prendrait la case d’abscisse ou d’ordonnée « opposée » comme suivante. Cela afin de ne pas avoir que 3 cases prises en compte lors du calcul de l’état en fonction des voisins.
Q8) Cela peut être utile pour vérifier facilement l’état d’une cellule. Ainsi, on peut l’interpréter dans une autre fonction ou même créer facilement une liste qui serait utilisée comme « historique » de la cellule.
'''
from random import randint
from time import sleep
from tkinter import filedialog
from tkinter import *
class Cellule:
def __init__(self):
'''constructeur qui initialise les variables'''
self.__actuel= False
self.__futur= False
self.__voisins= None
def est_vivant(self):
'''accesseur qui retourne l'état actuel de la cellule'''
return self.__actuel
def set_voisins(self,L):
'''mutateur qui permet de modifier les voisins de la cellule. Prends en argument une liste'''
if type(L)== list: #vérifie si l'élément donné est bien une liste
self.__voisins= L
def get_voisins(self):
'''accesseur qui renvoie les voisins de la cellule'''
return self.__voisins
def naitre(self):
'''mutateur qui passe l'état futur de la cellule à True'''
self.__futur=True
def mourir(self):
'''mutateur qui passe l'état futur de la cellule à False'''
self.__futur=False
def basculer(self):
'''mutateur qui change l'état actuel de la cellule pour l'état futur'''
self.__actuel=self.__futur
def __str__(self):
'''méthode protégée qui renvoie 🌱 si la cellule est actuellement vivante. Sinon, elle renvoie 💀'''
if self.__actuel== True:
return "🌱"
else:
return "💀"
def calcule_etat_futur(self):
'''fonction qui calcule l'état futur des cellules via les règles du jeu'''
acc=0 #compteur qui va permettre de savoir le nombre de voisins vivants
for i in range (len(self.__voisins)):#parcours tout les voisins
if self.__voisins[i]==True: #si le voisin est vivant
acc=acc+1
#vérifie toutes les possibilités offertes par les règles du voisinage de Moore
if acc==3 and self.__actuel==False:
self.naitre()
if acc!=3 and self.__actuel==False:
self.mourir()
elif (acc==2 or acc==3) and self.__actuel==True:
self.naitre()
elif (acc!=2 or acc!=3) and self.__actuel==True:
self.mourir()
class Grille:
def __init__(self):
'''constructeur qui initialise les variables'''
self.largeur=20
self.hauteur=30
self.matrice=[]
def clear_matrice(self):
'''mutateur qui remet la matrice à 0'''
self.matrice=[]
def set_largeur(self,x):
'''mutateur qui change la largeur de la grille. Prends un nombre entier en paramètre'''
if type(x)==int:
self.largeur=x
def set_hauteur(self,x):
'''mutateur qui change la hauteur de la grille. Prends un nombre entier en paramètre'''
if type(x)==int:
self.hauteur=x
def dansgrille(self,i,j):
'''Fonction qui prends en paramètres 2 points (nombres entiers) et dit si ils se trouvent dans la grille'''
if self.hauteur-1>=i>=0 and self.largeur-1>=j>=0: #si les coordonées de l'utilisateur sont comprises entre 0 et la largeur/hauteur en fonction du point
return True
else:
return False
def setXY(self,i,j,valeur):
'''mutateur qui prend en compte des coordonées (nombres entiers) et une valeur. Si les coordonées sont dans la liste, ajoute la valeur à cet endroit'''
if self.dansgrille(i,j)==True:
self.matrice[i][j]= valeur
else:
return 'out of range, not added'
def getXY(self,i,j):
'''accesseur qui renvoie la valeur de la celllule dans la coordonée rentée par l'utilisateur si celle ci est dans la grille'''
if self.dansgrille(i,j)==True:
return self.matrice[i][j]
def get_largeur(self):
'''accesseur qui retourne la largeur de la grille'''
return self.largeur
def get_hauteur(self):
'''accesseur qui permet de récupérer la valeur de la hauteur de la grille'''
return self.hauteur
@staticmethod
def est_voisins(i,j,x,y,instance):
'''fonction qui prend en paramètres les coordonées de deux points et retourne True si ils remplissent la notion de voisinage selon Moore'''
abx=None
ordo=None
if i==x and j==y: #si c'est le même point
return False
for b in range (-1,2):
#regarde si i est en bordure, adapte le point voisin à chercher en fonction
if b+i<0:
abx= instance.largeur+b
if b+i>instance.get_largeur()-1:
abx=0
else:
abx=b+i
for c in range(-1,2):
#regarde si j est en bordure, adapte le point voisin à chercher en fonction
if j+c<0:
ordo= instance.get_hauteur()+c
if j+c>instance.get_hauteur()-1:
ordo=0
else:
ordo=c+j
#compare le point à chercher avec x et y. Si ils correspondent, ça veut dire que les points sont voisins
if x==abx and y==ordo:
return True
return False #si aucun des points n'est voisins. En effet, sinon la fonction s'arrête avec le "return True" à la ligne 162
def get8voisins(self,i,j):
'''fonction qui donne la liste des voisins d'une cellule si celle ci est dans la grille'''
if self.dansgrille(i,j)==True:
L_voisins=[]
for b in range (-1,2):
#regarde si i est en bordure, adapte le point voisin à chercher en fonction
if b+i<0:
abx= self.get_hauteur()+b
elif b+i>self.get_hauteur()-1:
abx=0
else:
abx=b+i
for c in range(-1,2):
#regarde si j est en bordure, adapte le point voisin à chercher en fonction
if j+c<0:
ordo= self.get_largeur()+c
elif j+c>self.get_largeur()-1:
ordo=0
else:
ordo=c+j
#ajoute le point voisin
if abx!=i or ordo!=j:
data=self.getXY(abx,ordo)
L_voisins.append(data.est_vivant())
return L_voisins
else:
return None
def __str__(self):
'''fonction qui affiche le jeu de la vie dans le terminal'''
#parcours tout les éléments du jeu
for i in range(len(self.matrice)):
display=[]
for j in range(len(self.matrice[i])):
display.append(self.matrice[i][j].__str__())#ajoute le résultat
print(display)
print('\n')
def getallstate(self):
'''fonction qui renvoie tout les etats'''
#parcours tout les éléments du jeu
allin=""
for i in range(len(self.matrice)):
display=""
for j in range(len(self.matrice[i])):
display=display+str(self.matrice[i][j].__str__())#ajoute le résultat
allin=allin+display+'\n'
return allin
def remplir_alea(self,pourcent):
'''fonction qui prends en paramètres un nombre entier faisant office de pourcentage et qui rempli la matrice de cellule. Un poucentage (celui rentré) de ces cellules sont aléatoirement vivantes'''
if 0<int(pourcent)<=100:
cases=self.largeur*self.hauteur
nombre=int(cases*(pourcent/100))# calcule le nombre de cellules devant être vivantes
L_vivant=[]
while len(L_vivant)!=nombre: #boucle qui crée des ordonées ou abscisses aléatoire jusqu'a ce que le nombre demandé soit atteint
y= randint(0,self.largeur-1)
x= randint(0, self.hauteur-1)
if (x,y) not in L_vivant: #ajoute l'abscisse si elle n'est pas deja dans la liste
L_vivant.append((x,y))
for i in range(0,self.hauteur):
self.matrice.append([])#crée une nouvelle liste pour stocker une ligne supplémentaire
for b in range(0,self.largeur):
self.matrice[i].append(Cellule())#ajoute une cellule dans la liste
if (i,b) in L_vivant: #si la cellule est dans la liste de celles devant être vivantes
cellule=self.getXY(i,b) #obtient la cellule
cellule.naitre()#passer l'état futur de la celllule à vivant
cellule.basculer()#fait naître la cellule
self.setXY(i,b,cellule) #remplace la cellule "morte"
else:
return False
def Jeu(self):
'''fonction qui calcule l'état futur de chaque cellule'''
for i in range(0,self.hauteur): #parcours chaque point en hauteur
for b in range(0,self.largeur):#parcours chaque point en largeur
cellule=self.getXY(i,b) #obtiens les informations
cellule.set_voisins(self.get8voisins(i,b))#change la valeur des voisins du point
cellule.calcule_etat_futur()
self.setXY(i,b,cellule)#change les données de la cellule dans la matrice
def actualise(self):
'''fonction qui actualise l'état de toutes le cellules'''
for i in range(0,self.hauteur):#parcours chaque point en hauteur
for b in range(0,self.largeur):#parcours chaque point en largeur
cellule=self.getXY(i,b) #obtiens la cellule
cellule.basculer() #bascule l'état
self.setXY(i,b,cellule) #change l'état de la cellule dans la matrice
def bakbak(*args):
'''fonction qui affiche le jeu en interface graphique'''
prépartie.pack_forget() #cache l'écran de paramètrage
plateau.clear_matrice() #nettoye le plateau
plateau.set_largeur(int(numberofcol.get()))#change la largeur du plateau selon celle renseignée
plateau.set_hauteur(int(numberoflign.get()))#change la hauteur du plateau selon celle renseignée
tours=int(numberofturn.get()) #change le nombre de tours
partie=Frame(fenetre,bg='#85c17e')
plateau.remplir_alea(int(pourcent.get()))#crée le plateau de base
touracc=StringVar()
printer=Label(partie, textvariable=touracc,bg="#85c17e",font=('Time News Roman', 19), fg="white")#crée une zone de texte dynamique pour le nombre de tour
printer.pack(padx=10,pady=10)
actuel=StringVar()
actuel.set('')
printer1=Label(partie, textvariable=actuel,bg="#85c17e",font=('Time News Roman', 20))#crée une zone de texte dynamique pour le plateau
printer1.pack()
def update(n=1,chain=''):
'''fonction récursive qui prends pour paramètres un chaine de caractères et un nombre entier'''
ch=''
if n<tours+1:#si le nombre de tours demandés n'a pas été effectué
touracc.set("Tour: "+str(n)) #affiche le numéro du tour
plateau.Jeu()#actualise le jeu
actuel.set(plateau.getallstate())#affiche le plateau pour ce tour
plateau.actualise()#actualise le tour
ch="Tour: "+str(n)+"\n"+plateau.getallstate()+'\n' #stocke les infos du tour
chain=chain+ch#les ajoute à l'historique des tours
partie.after(2000, update, n+1,chain)#attends 2 secondes puis recommence
else:#si le nombre de tours demandés a été effectué
def save():
'''fonction qui sauvegarde chaque étape de la partie au forma .txt'''
f = filedialog.asksaveasfilename(initialdir = "/",title = "Select file",filetypes = (("txt files"),("all files","*.*")))
if f is None: # asksaveasfile return `None` if dialog closed with "cancel".
return
with open(f,'w',encoding='utf-8') as result: #enregsitre le fichier sous le format UTF-8
result.write(chain)
result.close()
Button(partie,text="Recommencer",command=restart,bg="#85c17e",font=('Noto Serif', 11)).pack(side=LEFT,padx=5) #affiche un bouton pour resélectionner des paramètres
Button(partie,text='Sauvegarder la partie',command=save,bg="#85c17e",font=('Noto Serif', 11)).pack(side=LEFT,padx=5) #affiche un bouton pour sauvegarder l'historique de la partie
def restart():
partie.destroy()
prépartie.pack()
update()
Button(partie,text="Quitter le jeu (fermera complètement la fenêtre)",command=quit,bg="#85c17e",font=('Noto Serif', 11)).pack(side=LEFT,padx=10) #affiche un bouton pour quitter la fenêtre
partie.pack()
fenetre=Tk()
fenetre.title("Le jeu de la vie")
fenetre['bg']='#85c17e' #crée une couleur de fond verte
fenetre.geometry("1000x500") #choisis les dimensions de la fenêtre
plateau=Grille()
#titre de présentation
title= Label(fenetre, text="Le jeu de la vie", bg="#85c17e", font=('Courier New', 30), fg="white")
title.pack(padx=10)
'''Prépartie'''
prépartie=Frame(fenetre,borderwidth=2,relief='ridge',bg="#85c17e") #crée un cadre pour contenir tout les cadres de preparties
choix=Frame(prépartie,bg="#85c17e")#frame pour contenir le choix du pourcent
titlepourcent=Label(choix, text="Entrez le pourcentage de cellules vivantes souhaitées au démarrage ",bg="#85c17e",font=('Noto Serif', 11))
titlepourcent.pack(side=LEFT,padx=10)
'''pourcent'''
value = DoubleVar()
pourcent=Scale(choix, variable=value,cursor='dot',orient=HORIZONTAL,bg="#85c17e",troughcolor='white')#reglètte pour choisir le pourcentage
pourcent.pack(side=LEFT,padx=5,pady=10)
'''nombre de tours'''
nombretour=Frame(prépartie,bg="#85c17e")#contient le choix du nombre de tour
Label(nombretour, text="Entrez le nombre de tour souhaités",bg="#85c17e",font=('Noto Serif', 11)).pack(side=LEFT,padx=10)
numberofturn = Spinbox(nombretour, from_=1, to=100,bg="#85c17e",buttonbackground='white',cursor='dot') #spinbox pour choisir le nombre de tours
numberofturn.pack()
'''hauteur et largeur'''
nombrecolonnes=Frame(prépartie,bg="#85c17e",pady=10)#contient le choix du nombre de colonnes
Label(nombrecolonnes, text="Entrez le nombre de colonnes souhaitées",bg="#85c17e",font=('Noto Serif', 11)).pack(side=LEFT,padx=10)
numberofcol = Spinbox(nombrecolonnes, from_=1, to=100,bg="#85c17e",buttonbackground='white',cursor='dot')#spinbox pour choisir le nombre de colonnes
numberofcol.pack()
nombrelignes=Frame(prépartie,bg="#85c17e",pady=10)#contient le choix du nombre de colonnes
Label(nombrelignes, text="Entrez le nombre de lignes souhaitées",bg="#85c17e",font=('Noto Serif', 11)).pack(side=LEFT,padx=10)
numberoflign = Spinbox(nombrelignes, from_=1, to=100,bg="#85c17e",buttonbackground='white',cursor='dot')#spinbox pour choisir le nombre de lignes
numberoflign.pack()
'''validation'''
validate= Frame(prépartie,bg="#B76E79")
Button(validate,text="Valider et lancer le jeu",command=bakbak,bg="#85c17e", activebackground='white',cursor='star',font=('Noto Serif', 11)).pack(side=LEFT,padx=10,pady=15) #bouton qui permet de valider les paramètres séléctionnés
prépartie.pack()
choix.pack()
nombretour.pack()
nombrecolonnes.pack()
nombrelignes.pack()
validate.pack()
fenetre.mainloop()
|
8,466 | 24ed29dfaaf7ce508b2d80740bad1304b291c596 | # Generated by Django 3.1.6 on 2021-04-22 07:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('projects', '0004_project_is_featured'),
]
operations = [
migrations.AlterField(
model_name='project',
name='pin_id',
field=models.CharField(max_length=20, null=True, unique=True),
),
]
|
8,467 | 58058065ac78ffbf7550416b751e1440976c7898 | # -*- coding: utf-8 -*-
import urllib
from pingpp import http_client, util
class WxpubOauth:
"""
用于微信公众号OAuth2.0鉴权,用户授权后获取授权用户唯一标识openid
WxpubOAuth中的方法都是可选的,开发者也可根据实际情况自行开发相关功能
详细内容可参考http://mp.weixin.qq.com/wiki/17/c0f37d5704f0b64713d5d2c37b468d75.html
"""
@staticmethod
def get_openid(app_id, app_secret, code):
"""
获取微信公众号授权用户唯一标识
:param app_id: 微信公众号应用唯一标识
:param app_secret: 微信公众号应用密钥(注意保密)
:param code: 授权code, 通过调用WxpubOAuth.createOauthUrlForCode来获取
:return: openid 微信公众号授权用户唯一标识, 可用于微信网页内支付
"""
url = WxpubOauth.create_oauth_url_for_openid(app_id, app_secret, code)
client = http_client.new_default_http_client()
rbody, rcode = client.request('GET', url, {})
if rcode == 200:
data = util.json.loads(rbody)
return data['openid']
return None
@staticmethod
def create_oauth_url_for_code(app_id, redirect_url, more_info=False):
"""
用于获取授权code的URL地址,此地址用于用户身份鉴权,获取用户身份信息,同时重定向到$redirect_url
:param app_id: 微信公众号应用唯一标识
:param redirect_url: 授权后重定向的回调链接地址,重定向后此地址将带有授权code参数,
该地址的域名需在微信公众号平台上进行设置,
步骤为:登陆微信公众号平台 => 开发者中心 => 网页授权获取用户基本信息 => 修改
:param more_info: FALSE 不弹出授权页面,直接跳转,这个只能拿到用户openid
TRUE 弹出授权页面,这个可以通过 openid 拿到昵称、性别、所在地,
:return: 用于获取授权code的URL地址
"""
data = dict()
data['appid'] = app_id
data['redirect_uri'] = redirect_url
data['response_type'] = 'code'
data['scope'] = 'snsapi_userinfo' if more_info else 'snsapi_base'
data['state'] = 'STATE#wechat_redirect'
query_str = urllib.urlencode(data)
print query_str
return "https://open.weixin.qq.com/connect/oauth2/authorize?" + query_str
@staticmethod
def create_oauth_url_for_openid(app_id, app_secret, code):
"""
获取openid的URL地址
:param app_id: 微信公众号应用唯一标识
:param app_secret: 微信公众号应用密钥(注意保密)
:param code: 授权code, 通过调用WxpubOAuth.createOauthUrlForCode来获取
:return: 获取openid的URL地址
"""
data = dict()
data['appid'] = app_id
data['secret'] = app_secret
data['code'] = code
data['grant_type'] = 'authorization_code'
query_str = urllib.urlencode(data)
return "https://api.weixin.qq.com/sns/oauth2/access_token?" + query_str |
8,468 | 388e43850a2e114cfe7869293ee814831a088b3e | from django.conf.urls import url
from myapp import views
urlpatterns = [
url(r'^$', views.homepage, name='homepage'),
url(r'^search/', views.my_search_view, name = 'article_detail')
] |
8,469 | 75393d39b147097a7ac1d82938ac102491ea9441 | # drop data to file filter
import tarr.compiler_base
def format_data(data):
return '{0.id}: {0.payload}'.format(data)
class WRITE_TO_FILE(tarr.compiler_base.Instruction):
@property
def __name__(self):
return 'POINT OF INTEREST - WRITE("{}")'.format(self.filename)
def __init__(self, filename, formatter=format_data):
self.format = formatter
self.filename = filename
def run(self, runner, data):
# NOTE: we need to do writing in UNBUFFERED mode (buffering=0)
# as potentially there are other processes writing to the same file
# *NOW*
with open(self.filename, 'ab', buffering=0) as f:
f.write(self.format(data) + '\n')
return data
def clone(self):
return self.__class__(filename=self.filename, formatter=self.format)
|
8,470 | 167bd2c405171443c11fbd13575f8c7b20877289 | import logging
import terrestrial.config as config
logger = logging.getLogger(f'{__name__}.common')
def health():
return 'OK', 200
def verify_token(token):
"""
Verifies Token from Authorization header
"""
if config.API_TOKEN is None:
logger.error(
'API token is not configured, auth will fail!')
return token == config.API_TOKEN
|
8,471 | 78d59e903fecd211aa975ae4c8dc01b17c8fad44 | import io
import socket
import ssl
from ..exceptions import ProxySchemeUnsupported
from ..packages import six
SSL_BLOCKSIZE = 16384
class SSLTransport:
"""
The SSLTransport wraps an existing socket and establishes an SSL connection.
Contrary to Python's implementation of SSLSocket, it allows you to chain
multiple TLS connections together. It's particularly useful if you need to
implement TLS within TLS.
The class supports most of the socket API operations.
"""
@staticmethod
def _validate_ssl_context_for_tls_in_tls(ssl_context):
"""
Raises a ProxySchemeUnsupported if the provided ssl_context can't be used
for TLS in TLS.
The only requirement is that the ssl_context provides the 'wrap_bio'
methods.
"""
if not hasattr(ssl_context, "wrap_bio"):
if six.PY2:
raise ProxySchemeUnsupported(
"TLS in TLS requires SSLContext.wrap_bio() which isn't "
"supported on Python 2"
)
else:
raise ProxySchemeUnsupported(
"TLS in TLS requires SSLContext.wrap_bio() which isn't "
"available on non-native SSLContext"
)
def __init__(
self, socket, ssl_context, server_hostname=None, suppress_ragged_eofs=True
):
"""
Create an SSLTransport around socket using the provided ssl_context.
"""
self.incoming = ssl.MemoryBIO()
self.outgoing = ssl.MemoryBIO()
self.suppress_ragged_eofs = suppress_ragged_eofs
self.socket = socket
self.sslobj = ssl_context.wrap_bio(
self.incoming, self.outgoing, server_hostname=server_hostname
)
# Perform initial handshake.
self._ssl_io_loop(self.sslobj.do_handshake)
def __enter__(self):
return self
def __exit__(self, *_):
self.close()
def fileno(self):
return self.socket.fileno()
def read(self, len=1024, buffer=None):
return self._wrap_ssl_read(len, buffer)
def recv(self, len=1024, flags=0):
if flags != 0:
raise ValueError("non-zero flags not allowed in calls to recv")
return self._wrap_ssl_read(len)
def recv_into(self, buffer, nbytes=None, flags=0):
if flags != 0:
raise ValueError("non-zero flags not allowed in calls to recv_into")
if buffer and (nbytes is None):
nbytes = len(buffer)
elif nbytes is None:
nbytes = 1024
return self.read(nbytes, buffer)
def sendall(self, data, flags=0):
if flags != 0:
raise ValueError("non-zero flags not allowed in calls to sendall")
count = 0
with memoryview(data) as view, view.cast("B") as byte_view:
amount = len(byte_view)
while count < amount:
v = self.send(byte_view[count:])
count += v
def send(self, data, flags=0):
if flags != 0:
raise ValueError("non-zero flags not allowed in calls to send")
response = self._ssl_io_loop(self.sslobj.write, data)
return response
def makefile(
self, mode="r", buffering=None, encoding=None, errors=None, newline=None
):
"""
Python's httpclient uses makefile and buffered io when reading HTTP
messages and we need to support it.
This is unfortunately a copy and paste of socket.py makefile with small
changes to point to the socket directly.
"""
if not set(mode) <= {"r", "w", "b"}:
raise ValueError("invalid mode %r (only r, w, b allowed)" % (mode,))
writing = "w" in mode
reading = "r" in mode or not writing
assert reading or writing
binary = "b" in mode
rawmode = ""
if reading:
rawmode += "r"
if writing:
rawmode += "w"
raw = socket.SocketIO(self, rawmode)
self.socket._io_refs += 1
if buffering is None:
buffering = -1
if buffering < 0:
buffering = io.DEFAULT_BUFFER_SIZE
if buffering == 0:
if not binary:
raise ValueError("unbuffered streams must be binary")
return raw
if reading and writing:
buffer = io.BufferedRWPair(raw, raw, buffering)
elif reading:
buffer = io.BufferedReader(raw, buffering)
else:
assert writing
buffer = io.BufferedWriter(raw, buffering)
if binary:
return buffer
text = io.TextIOWrapper(buffer, encoding, errors, newline)
text.mode = mode
return text
def unwrap(self):
self._ssl_io_loop(self.sslobj.unwrap)
def close(self):
self.socket.close()
def getpeercert(self, binary_form=False):
return self.sslobj.getpeercert(binary_form)
def version(self):
return self.sslobj.version()
def cipher(self):
return self.sslobj.cipher()
def selected_alpn_protocol(self):
return self.sslobj.selected_alpn_protocol()
def selected_npn_protocol(self):
return self.sslobj.selected_npn_protocol()
def shared_ciphers(self):
return self.sslobj.shared_ciphers()
def compression(self):
return self.sslobj.compression()
def settimeout(self, value):
self.socket.settimeout(value)
def gettimeout(self):
return self.socket.gettimeout()
def _decref_socketios(self):
self.socket._decref_socketios()
def _wrap_ssl_read(self, len, buffer=None):
try:
return self._ssl_io_loop(self.sslobj.read, len, buffer)
except ssl.SSLError as e:
if e.errno == ssl.SSL_ERROR_EOF and self.suppress_ragged_eofs:
return 0 # eof, return 0.
else:
raise
def _ssl_io_loop(self, func, *args):
"""Performs an I/O loop between incoming/outgoing and the socket."""
should_loop = True
ret = None
while should_loop:
errno = None
try:
ret = func(*args)
except ssl.SSLError as e:
if e.errno not in (ssl.SSL_ERROR_WANT_READ, ssl.SSL_ERROR_WANT_WRITE):
# WANT_READ, and WANT_WRITE are expected, others are not.
raise e
errno = e.errno
buf = self.outgoing.read()
self.socket.sendall(buf)
if errno is None:
should_loop = False
elif errno == ssl.SSL_ERROR_WANT_READ:
buf = self.socket.recv(SSL_BLOCKSIZE)
if buf:
self.incoming.write(buf)
else:
self.incoming.write_eof()
return ret
|
8,472 | e47223622a2718830d830dbb779800659d659ae3 | import cv2
import numpy as np
from pycocotools.coco import maskUtils
# from dataset.augmentors import FlipTransform, joints_to_point8, point8_to_joints, AugImgMetadata
# from dataset.base_dataflow import Meta
from dataset.augmentors import FlipTransform, joints_to_point8, point8_to_joints, AugImgMetadata
from dataset.base_dataflow import Meta
def read_img(components):
"""
Loads image from meta.img_path. Assigns the image to
the field img of the same meta instance.
:param components: components
:return: updated components
"""
img_buf = open(components[0], 'rb').read()
if not img_buf:
raise Exception('image not read, path=%s' % components[0])
arr = np.fromstring(img_buf, np.uint8)
img = cv2.imdecode(arr, cv2.IMREAD_COLOR)
components[1], components[2] = img.shape[:2]
components[10] = img
return components
def gen_mask(components):
"""
Generate masks based on the coco mask polygons.
:param components: components
:return: updated components
"""
masks_segments = components[7]
hh = components[1]
ww = components[2]
if masks_segments:
mask_miss = np.ones((hh, ww), dtype=np.uint8)
for seg in masks_segments:
bin_mask = maskUtils.decode(seg)
bin_mask = np.logical_not(bin_mask)
mask_miss = np.bitwise_and(mask_miss, bin_mask)
components[11] = mask_miss
return components
# components == df
# seems params' type is list
def augment(components, augmentors,use_o=False):
"""
Augmenting of images.
:param components: components
:return: updated components.
"""
img_path = components[0]
height = components[1]
width = components[2]
center = components[3]
bbox = components[4]
area = components[5]
num_keypoints = components[6]
masks_segments = components[7]
scale = components[8]
all_joints = components[9]
img = components[10]
mask = components[11]
aug_center = components[12]
aug_joints = components[13]
idx = components[14]
meta = Meta(img_path, height, width, center, bbox,
area, scale, num_keypoints)
meta.masks_segments = masks_segments
meta.all_joints = all_joints
meta.img = img
meta.mask = mask
meta.aug_center = aug_center
meta.aug_joints = aug_joints
aug_center = meta.center.copy()
aug_joints = joints_to_point8(meta.all_joints)
if idx % 2 == 1:
# print(f"ori: {idx//2}, {idx}")
o_meta= Meta(img_path, height, width, center, bbox,
area, scale, num_keypoints)
o_meta.all_joints=all_joints
o_meta.img=img
o_meta.mask=mask
o_meta.aug_center=aug_center
o_meta.aug_joints=aug_joints
o_aug_center=o_meta.center.copy()
o_aug_joints=joints_to_point8(o_meta.all_joints)
o_trans=augmentors[4].get_transform(AugImgMetadata(
img=o_meta.img,
mask = o_meta.mask,
center=o_aug_center,
scale=o_meta.scale
))
o_img,o_mask=o_trans.apply_image(o_meta)
o_aug_joints = o_trans.apply_coords(o_aug_joints)
# o_aug_center = o_trans.apply_coords(o_aug_center)
# o_meta.img=o_img
# o_meta.mask=mask
o_meta.aug_joints=point8_to_joints(o_aug_joints)
# o_meta.aug_center=o_aug_center
return [o_img,o_meta.aug_joints]
else:
for aug in augmentors:
transformation = aug.get_transform(
AugImgMetadata(img=meta.img,
mask=meta.mask,
center=aug_center,
scale=meta.scale))
im, mask = transformation.apply_image(meta)
# augment joints
aug_joints = transformation.apply_coords(aug_joints)
# after flipping horizontaly the left side joints and right side joints are also
# flipped so we need to recover their orginal orientation.
if isinstance(transformation, FlipTransform):
aug_joints = transformation.recover_left_right(aug_joints)
# augment center position
aug_center = transformation.apply_coords(aug_center)
meta.img = im
meta.mask = mask
meta.aug_joints = point8_to_joints(aug_joints)
meta.aug_center = aug_center
back_img=meta.img
back_aug_joints = meta.aug_joints
# del meta
# return [[back_img,back_aug_joints],
# [o_meta.img,o_meta.aug_joints]]
return [back_img,back_aug_joints]
def apply_mask(components):
"""
Applies the mask (if exists) to the image.
:param components: components
:return: updated components
"""
img = components[10]
mask = components[11]
if mask is not None:
img[:, :, 0] = img[:, :, 0] * mask
img[:, :, 1] = img[:, :, 1] * mask
img[:, :, 2] = img[:, :, 2] * mask
img[img == 0] = 128
return components
def create_all_mask(mask, num, stride):
"""
Helper function to create a stack of scaled down mask.
:param mask: mask image
:param num: number of layers
:param stride: parameter used to scale down the mask image because it has
the same size as orginal image. We need the size of network output.
:return:
"""
scale_factor = 1.0 / stride
small_mask = cv2.resize(mask, (0, 0), fx=scale_factor, fy=scale_factor, interpolation=cv2.INTER_CUBIC)
small_mask = small_mask[:, :, np.newaxis]
return np.repeat(small_mask, num, axis=2)
|
8,473 | 501d50fa933f55c178b4b2eba6cfc5b85592beaa | #!/usr/bin/env python3
#
# compare-sorts.py
# Copyright (c) 2017 Dylan Brown. All rights reserved.
#
# Use Python 3. Run from within the scripts/ directory.
import os
import sys
import re
import subprocess
# Ensure we don't silently fail by running Python 2.
assert sys.version_info[0] >= 3, "This script requires Python 3.x"
assert os.getcwd().split("/")[-1] == "algorithms-sedgewick-wayne", \
"This script must be run from the project's root directory."
# Number of iterations to average over.
N = 25
# Data file to sort.
# DATA = "./algs4-data/words3.txt"
DATA = "./algs4-data/medTale.txt"
def main():
sorts = ["selection-sort",
"insertion-sort",
"shell-sort"]
for sort in sorts:
exe_path = "./build/{}".format(sort.rstrip())
if not os.path.isfile(exe_path):
raise OSError("The executable {} does not exist.".format(exe_path))
accumulated_time = 0
for i in range(N):
# Note shell=True has security implications. Don't accept external inputs.
b_output = subprocess.check_output(" ".join([exe_path, DATA]), shell=True)
str_output = str(b_output)
# Use regex to extract the number follwing "(ns) =" in the output.
accumulated_time += int(re.findall("\d+", str_output)[0]) # Elapsed time in nanoseconds.
average_time = accumulated_time / N
if "selection-sort" == sort:
print("{:>14} took {:>8} ns on average.".format(sort, int(average_time)))
sel_sort_time = average_time
else:
print("{:>14} took {:>8} ns on average, "
"a {:4.1f}x speedup over selection sort.".format(sort,
int(average_time),
sel_sort_time / average_time))
if __name__ == "__main__":
main()
|
8,474 | 3aee336956ac6f962c34f51a27dc4abebf2cc7c8 | """
You are given pre-order traversal with a slight modification.
It includes null pointers when a particular node has nil left/right child.
Reconstruct the binary tree with this information.
Ex. [H, B, F, None, None, E, A, None, None, None, C, None, D, None, G, I, None, None, None]
H
/ \
B C
/ \ \
F E D
/ \
A G
/
I
"""
# time: O(n)
def contruct_tree(pre_order, index=0):
index += 1
if index >= len(pre_order):
raise IndexError('wtf is wrong with you?')
root = pre_order[index]
if root is None:
return (None, index)
node = BST(root)
node.left, index = construct(pre_order, index)
node.right, index = construct(pre_order, index)
return (node, index)
# my solution without recursion
# works?
def contruct_tree(pre_order):
tree = BST(pre_order[0])
curr = tree
stack = []
i = 0
while i < len(pre_order)-1:
if curr is not None:
curr.left = L[i+1]
stack.append(curr)
cur = curr.left
else:
curr = stack.pop()
curr.right = L[i+1]
cur = curr.right
return tree
|
8,475 | 9e525eccbf10a710d6f37c903370cc10f7d2c62b | # -*- coding: utf-8 -*-
# Author:sen
# Date:2020/4/2 14:15
class TreeNode:
def __init__(self, val):
self.val = val
self.left = None
self.right = None
def find(root, val):
if not root:
return None
if val < root.val:
return find(root.left, val)
elif val > root.val:
return find(root.right, val)
else:
return root
def find_min(root):
if root:
while root.left:
root = root.left
return root
def find_max(root):
if root:
while root.right:
root = root.right
return root
def insert(root, val):
if not root:
root = TreeNode(val)
elif val < root.val:
root.left = insert(root.left, val)
elif val > root.val:
root.right = insert(root.right, val)
else:
pass # val==root.val val已经在树中,什么都不做
return root
def delete(root, val):
if not root:
return None
elif val < root.val:
root.left = delete(root.left, val) # 返回左子树的根
elif val > root.val:
root.right = delete(root.right, val)
else: # 执行删除操作
if root.left and root.right: # 两个孩子节点的情况
tmp = find_min(root.right)
root.val = tmp.val
root.right = delete(root.right, tmp.val)
else: # 0个或1个
root = root.left if root.left else root.right
return root
def height(root):
if root is None:
return -1
else:
return 1 + max(height(root.left), height(root.right))
if __name__ == '__main__':
vals = [1, 2, 3, 4, 5, 6, 7, 8]
root = None
from DataStructure.tree import in_order
for v in vals:
root = insert(root, v)
tree_in_order = in_order(root)
assert vals == tree_in_order, "构建树出错"
# vals.append(9)
# root = insert(root, 9)
# tree_in_order = in_order(root)
# assert vals == tree_in_order, "插入出错"
#
# vals.remove(6)
# root = delete(root, 6)
# tree_in_order = in_order(root)
# assert vals == tree_in_order, "删除出错"
print(height(root))
|
8,476 | 3875d85bef37900f9066c108dc720b364cbafffa | import os
from distutils.core import setup
from Cython.Distutils import Extension
from Cython.Build import cythonize
CUR_DIR = os.path.dirname(os.path.abspath(__file__))
SRC_DIR = os.path.join(CUR_DIR, 'src')
TPM_DIR = os.path.join(SRC_DIR, 'tpm')
include_dirs = [SRC_DIR]
src_files = ["pytpm/_tpm.pyx"]
# TPM library and path to the library.
library_dirs = [os.path.expanduser("~/lib/tpm")]
libraries = ['tpm']
ext_modules = [
Extension(
"pytpm._tpm", src_files,
include_dirs=include_dirs,
library_dirs=library_dirs,
libraries=libraries
)
]
setup(
name='pytpm',
packages=['pytpm'],
package_dir={'pytpm': 'pytpm'},
package_data={'pytpm': ['*.pxd', '*.pyx', '*.pxi']},
ext_modules=cythonize(ext_modules)
)
|
8,477 | f2bb00d06023ef7b3ea3dc33f7ec00d1f48d46ae | from openerp import models, fields, api, _
class priority_customer(models.Model):
_inherit = 'res.partner'
is_priority = fields.Boolean("Is Priority Partner:?")
registration_date = fields.Date("Registration Date:")
liability_card_number = fields.Char("Liability Card Number:")
|
8,478 | b3fb210bcdec2ed552c37c6221c1f0f0419d7469 | import boto3
from time import sleep
cfn = boto3.client('cloudformation')
try:
# Get base stack outputs.
stack_id = cfn.describe_stacks(StackName='MinecraftInstance')['Stacks'][0]['StackId']
cfn.delete_stack(StackName=stack_id)
print(f"Deleting Stack: {stack_id}")
except Exception as e:
print('Something went wrong! Make sure to manually delete the stack!')
print(e)
exit(-1)
|
8,479 | cce645073ba117b9e297dfccf5a39710b0c6cd14 | #! /usr/bin/python
# -*- coding: utf-8 -*-
__author__ = 'raek'
web = '910d59f0-30bd-495b-a54c-bf5addc81a8a'
app = '21ec74fb-e941-43be-8772-a2f8dc6ccc4f' |
8,480 | 32b22cccac75c87b8638c76c0c6d27db0de4d750 | # joiner = '+'
# seq = ["Sushil","Bahadur","KC"]
# txt = joiner.join(seq)
# txt
# txt = " Sam "
# ljus = txt.ljust(7,"*")
# ljus
# txtstrip = txt.strip().strip('S')
# txtstrip
# txt = "This is my world."
# txtSplit = txt.split(maxsplit=1)
# txtSplit
# name = input("Enter your full name")
# name = name.strip()
# txt = name.split()
# print("First Name:",txt[0])
# print("Last Name:",txt[1])
# txt = "Amet sint ipsum aliquip ea velit minim.\n \
# Consequat esse do laboris nisi proident nisi tempor magna.\n \
# Occaecat occaecat id qui veniam deserunt ullamco laborum consequat sint ullamco.\n \
# Eu Lorem nisi mollit pariatur commodo minim eu reprehenderit magna ipsum consequat."
# print(txt)
# newData = txt.splitlines()
# newData
# Sequence
#2. List
list1 = ["Sam", "Rocky", 1989, 1890]
print(type(list1))
print(list1[0])
print(list1[len(list1)-1])
list1[0] = 6781
print(list1)
list4 = list1[2:4]
list4
list4 = list1[::-1]
list4
list4 = list1[::2]
list4
list4 = list1[2:0:-1]
list4
list4 = list1+['Hello',2]
list4
list4 = list1*2
list4
list1.append("Sam")
list1
list1.remove(6781)
list1
del list1[2]
list1
|
8,481 | 8faaf9eb2e78b7921dd1cac4772e2415671201c7 | # -*- coding: utf-8 -*-
# Copyright 2013, Achim Köhler
# All rights reserved, see accompanied file license.txt for details.
# $REV$
import argparse
import traylauncher
if __name__ == "__main__":
args = argparse.Namespace()
args.notray = False
traylauncher.start(args) |
8,482 | d332ddd6c66bb22d60190ab8f94931eac6fd2394 | # Bisection recursion algo for sqrt of 2
def bisectionSqrt(x, epsilon = 0.01, low = None, high = None):
"""
Performs a recursive bisection search to find the
square root of x, within epsilon
"""
if low == None:
low = 0.0
if high == None:
high = x
midPoint = (high + low)/2.0
# If the difference of the midpoint squared and x is
# within the epsilon tolerance, OR is the midpoint is
# greater than X, we stop and give answer
if abs(midPoint**2 - x) < epsilon or midPoint > x:
return midPoint
else:
# Otherwise check if the midPoint is too big or small
if midPoint ** 2 < x:
# If too small, recurse on the upper half
return bisectionSqrt(x,epsilon,midPoint,high)
else :
# If too big, recurse on the lower half
return bisectionSqrt(x,epsilon,low,midPoint)
print "bisectionSqrt(25): ", bisectionSqrt(25) |
8,483 | e6fa1202d829fb553423998cdbad13684405437c | # adventofcode.com
# day19
from collections import defaultdict
INPUTFILE = 'input/input19'
TEST = False
TESTCASE = ('HOH', ['H => HO\n', 'H => OH\n', 'O => HH\n'], ['OHOH', 'HOOH', 'HHHH', 'HOHO'])
def find_idx(string, substring):
""" iterator that returns the index of the next occurence of substring
wrapper around string.find() """
idx = string.find(substring)
while idx != -1:
yield idx
idx = string.find(substring, idx+1)
def replace_in_string(string, length, substring, idx):
""" overwrite only length chars in replacement! """
return string[:idx]+substring+string[idx+length:]
if __name__ == '__main__':
subs = defaultdict(list)
if TEST:
inputstring = TESTCASE[0]
lines = TESTCASE[1]
test = TESTCASE[2]
else:
with open(INPUTFILE, 'r') as f:
lines = f.readlines()
inputstring = lines.pop()
test = False
for line in lines:
if line != '\n':
f, t = line.rstrip('\n').split(' => ')
subs[f].append(t)
solution = []
for key, sublist in subs.items():
for sub in sublist:
for idx in find_idx(inputstring, key):
solution.append(replace_in_string(inputstring, len(key), sub, idx))
print "length : ", len(set(solution))
if test:
assert set(test) == set(solution), 'Testcase failure!'
# part B
# Cheated! #Atoms - 2*(#Rn) - 2*(#Y) - 1
# https://www.reddit.com/r/adventofcode/comments/3xflz8/day_19_solutions/cy4etju
print 'part B'
print sum(map(str.isupper,inputstring)) - 2*inputstring.count('Rn') - 2*inputstring.count('Y') - 1
|
8,484 | fa46bd784dcfeee4f9012ffb6ab6731d2764c9fa | # 来源知乎 https://zhuanlan.zhihu.com/p/51987247
# 博客 https://www.cnblogs.com/yangecnu/p/Introduce-Binary-Search-Tree.html
"""
二叉查找树 (Binary Search Tree, BST)
特点 : left < root < right
若任意节点的左子树不空,则左子树上所有结点的 值均小于它的根结点的值;
若任意节点的右子树不空,则右子树上所有结点的值均大于它的根结点的值;
任意节点的左、右子树也分别为二叉查找树;
没有键值相等的节点(no duplicate nodes)。
缺点: 不平衡 所以引入平衡二叉树(常用实现方法有红黑树、AVL、替罪羊树、Treap、伸展树等)
本代码实现了 BST
查找 : 任意值 / 最大值 / 最小值 (查找所需最大次数等于高度)
插入 (递归 迭代) : 插入结果一定是插成叶节点了
删除 (递归 迭代): 当删除的节点没有子节点时 当删除的节点只有1个子节点时 当删除的节点有2个子节点时
"""
import logging
import functools
import time
logging.basicConfig(
level=logging.ERROR,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
class Node():
def __init__(self, data=None):
self._data = data
self._left, self._right = None, None
def __str__(self):
return 'Node:<data:%s>, <left:%s>, <right:%s>' % (
str(self._data), str(self._left), str(self._right))
@property
def data(self):
return self._data
@data.setter
def data(self, value):
self._data = value
@property
def left(self):
return self._left
@left.setter
def left(self, value):
self._left = value
@property
def right(self):
return self._right
@right.setter
def right(self, value):
self._right = value
def check_null(func):
@functools.wraps(func)
def wrapper(self, *args, **kw):
if self.__bool__(): # check if the BinarySearchTree() object is None
return func(self, *args, **kw)
else:
if func.__name__ in ['_insert', '_insert2']:
self._root = Node(args[0])
else:
print('The tree is empty')
return wrapper
# class Ad():
# def nam(self):
# pass
#
# print(Ad().nam.__name__)
# # nam
class BinarySearchTree():
"""
如果非空,那么左子树的所有节点都小于根节点,右子树的所有节点都大于根节点,数为二叉搜索树。
左右子树都为二叉搜索树。
"""
def __init__(self):
self._root = None
def __str__(self):
""" yield 迭代器 """
tree2list = [x.data for x in self._generate_node()]
return 'the BinarySearchTree is %s' % tree2list
def __bool__(self):
if self._root is not None:
return True
else:
return False
@staticmethod
def _redirect(pre_node, is_left, target): # staticmethod no need pass self 与类对象无关
"""
将target节点赋值成 pre_node的is_left/right子节点
:param is_left: 将target赋成父节点 pre_node 的 left 还是 right 子节点
"""
if is_left:
pre_node.left = target
else:
pre_node.right = target
def _generate_node(self):
queue = [self._root]
while queue:
node = queue.pop(0)
yield node
queue.extend([x for x in (node.left, node.right) if x != None])
# (node.left, node.right) is tuple
@check_null
def _metal_find(self, value, node, alert=True):
"""
内部接口: 实现了基本的查找功能,并且实现了跟踪父节点和判断是否为左右子节点的功能
思 路: 比较简单
:param value:
:param node:
:param alert:
:return: node, _pre_node, is_left
找到的node, 该节点的父节点_pre_node, 该节点是_pre_node的左还是右节点bool(is_left)
"""
# if you want the pre_node and is_left get the specific value, let the node=root
is_left, _pre_node = None, None
while node and value != node.data:
# _pre_node 作用跟踪父节点
_pre_node = node
if value < node.data:
node = node.left
# is_left 作用跟踪是否为左子节点
is_left = True
elif value > node.data:
node = node.right
is_left = False
# while 循环完没找到,则node is None
# while 循环完找到的话,则node is not None 跳过if,return 找到的node
if alert and node is None: # alert and (node is None)
print('There is no node<%s>' % value)
return node, _pre_node, is_left
def find(self, value):
"""暴露给外面的接口,按值查找,返回节点"""
# *_ 除第一个外的其他返回值
result, *_ = self._metal_find(value, self._root)
return result
@check_null
def _insert(self, value, node): # node 实际往往是root
"""
recursive insert method
:param node: 树中存在的某个节点
:return: node: 插入的节点node 这样其实插入的node(value) 是叶节点
"""
# _insert函数最终结果是
# 1 找到value==node.data的节点即已有这个节点,执行print(),再返回这个节点
# 2 node is None,然后将此节点新建出来,执行node = Node(value)
if node is None:
node = Node(value)
else:
if value < node.data:
# _insert()返回待插入的节点 当前节点的左子节点 指向待插入的节点
node.left = self._insert(value, node.left)
elif value > node.data:
# _insert()返回待插入的节点 当前节点的右子节点 指向待插入的节点
node.right = self._insert(value, node.right)
else:
print('have the same value')
return node # 注意将node返回
@check_null
def _insert2(self, value):
"""
Iterative insert method
先 _metal_find() 迭代找到 value, 找到 value说明已存在,没找到 _redirect() 新建节点
"""
result, pre_node, is_left = self._metal_find(value, self._root, False) # 查找
if result is None: # 没找到通过self._redirect() 赋值
self._redirect(pre_node, is_left, Node(value))
else: # 找到说明已经存在
print('already have the value')
# 默认走循环的实现, 递归的程序栈很容易爆掉,并且test_insert()测试了下循环比递归快很多
def insert(self, value, isrecursion=False):
if isrecursion:
self._insert(value, self._root)
else:
self._insert2(value)
@check_null
def _find_extremum(self, node, by='max'):
"""
找 max min 节点
:return node:
"""
if by == 'max':
while node.right:
node = node.right
elif by == 'min':
while node.left:
node = node.left
return node
def findmax(self):
return self._find_extremum(self._root)
def findmin(self):
return self._find_extremum(self._root, by='min')
@check_null
def _delete(self, value, node):
""" recursion delete
step1: 通过value 与 node.data比较来找到要删除的节点
step2: 要删除的节点又有三种situations
situation1: 要删除的节点 是叶节点,没有子节点。
situation2: 要删除的节点 只有一个子节点。
situation3: 要删除的节点 有两个子节点。
:return: 删除完value以后的新的node
"""
if not node:
print('can\'t find')
else: # step1
# If the key to be deleted is smaller than the root's
# key then it lies in left subtree
if value < node.data:
node.left = self._delete(value, node.left)
# If the kye to be delete is greater than the root's key
# then it lies in right subtree
elif value > node.data:
node.right = self._delete(value, node.right)
# If key is same as root's key, then this is the node
# to be deleted
else: # step2
# Node with two children: Get the inorder successor 中序继承者
# 最后node.left = self._delete(tmp.data, node.left)其实转化成了
# 后边 Node with only one child or no child 的情形
### 可以找左子树的最大值或者右子树的最小值作为successor
### 而左子树的最大值或者右子树的最小值必然只有一个或零个节点
### 所以转化成了前边 Node with only one child or no child 的情形
if node.left and node.right:
# find the largest in the left subtree as successor
tmp = self._find_extremum(node.left) # default by max
# Copy the inorder successor's content to this node
node.data = tmp.data
# Delete the inorder successor
node.left = self._delete(tmp.data, node.left)
# Node with only one child or no child
else:
if node.left is None:
node = node.right
else:
node = node.left
return node # 最后层层返回
@check_null
def _delete2(self, value, node):
"""非递归删除
首先: 找到要删除的节点result
再次: 找到并删除result的successor,再将successor的data赋给要删除的节点result
讨论复杂的2个节点的情况:
1 找到value所在的节点result,该节点有两个子节点
2 找到result的左子节点的max记为tmp,tmp只有0或1个节点
3 从result中删除tmp,tmp只有0或1个节点,
4 ...
"""
# 首先: 找到要删除的节点result
result, pre_node, is_left = self._metal_find(value, node)
if result is None:
return
# 有2个节点的情况
if result.left and result.right:
tmp = self._find_extremum(result.left) # 再次: 找到result的successor
self._delete2(tmp.data, result) # 再次: 删除result的successor 这步会走后边else里 "# 有1个或者没有" 的情形
result.data = tmp.data # 再将successor的data赋给要删除的节点result
# 有1个或者没有
else:
if result.left is None:
# print('---')
# print(id(result),id(result.right)) # 46446408 1352705168
result = result.right
# print(id(result)) # 1352705168
else:
result = result.left
# 将 result 赋成 pre_node 的 is_left节点 维护
self._redirect(pre_node, is_left, result) # 对节点pre_node的子节点进行赋值
def delete(self, value, isrecursion=False):
if isrecursion:
return self._delete(value, self._root)
else:
return self._delete2(value, self._root)
def test_insert(value):
def _test(value, control=False):
tree = BinarySearchTree()
start = time.time()
for i in range(value):
tree.insert(i, isrecursion=control)
end = time.time()
print('the isrecursion control=%s, the time is: %s' % (control, end - start))
_test(value)
_test(value, control=True)
def main():
# test_insert(100)
tree = BinarySearchTree()
nums = [7, 2, 9, 1, 4, 8, 10]
for i in nums:
tree.insert(i)
print(tree)
print(tree.find(4))
tree.insert(3)
print(tree)
tree.delete(2)
print(tree)
if __name__ == '__main__':
main()
|
8,485 | 464be943f4fe34dda826ebada9e128f1d7d671ac | from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
link = "http://selenium1py.pythonanywhere.com/catalogue/coders-at-work_207/"
def test_guest_should_see_button_add_to_basket(browser):
browser.get(link)
btn_add = "btn.btn-lg.btn-primary.btn-add-to-basket"
found_button = WebDriverWait(browser, 5).until(
EC.element_to_be_clickable((By.CLASS_NAME, btn_add))
)
assert found_button != False, 'Do not found the button of add to basket' |
8,486 | 41f71589d3fb9f5df218d8ffa0f608a890c73ad2 | # coding: utf-8
# 2021/5/29 @ tongshiwei
import logging
def get_logger():
_logger = logging.getLogger("EduNLP")
_logger.setLevel(logging.INFO)
_logger.propagate = False
ch = logging.StreamHandler()
ch.setFormatter(logging.Formatter('[%(name)s, %(levelname)s] %(message)s'))
ch.setLevel(logging.INFO)
_logger.addHandler(ch)
return _logger
logger = get_logger()
|
8,487 | e2489f9d3041c45129fdd71da6652a6093c96d2d | #!/usr/bin/env python
# coding=utf-8
import sys,os
dir = '/home/ellen/yjoqm/fdfs_client/pic'
def scp_file(filename):
cmd = 'scp ellen@61.147.182.142:/home/ellen/yjoqm/fdfs_client/pic/%s .' %filename
os.system(cmd)
def main(args):
args = sys.argv[1]
scp_file(args)
print 'done~~~~'
if __name__ == '__main__':
args = sys.argv
if len(args) < 1:
print 'usage: python scp_file xxxx'
sys.exit(2)
main(args)
|
8,488 | 155b243ad7d93bcf2b74cd5b2bd3409ab7ec7473 | from numpy import*
a=int(input('numero: '))
b='*'
c='o'
for i in range(a):
d=(b*(a-i))+(c*(a-(a-i)))+(c*(a-(a-i)))+(b*(a-i))
print(d)
|
8,489 | e90fb3b6009dd4fb780649c04398b361fa1ae195 | from collections import defaultdict
from django.shortcuts import render
from django.views.decorators.cache import cache_control
from peterbecom.plog.models import BlogItem, Category
from peterbecom.plog.utils import utc_now
from peterbecom.plog.views import json_view
ONE_MONTH = 60 * 60 * 24 * 30
@cache_control(public=True, max_age=ONE_MONTH)
def index(request):
return render(request, "ajaxornot/index.html")
def get_data(max_length=1000, pub_date_format=None, offset=0):
items = []
category_names = dict((x.id, x.name) for x in Category.objects.all())
categories = defaultdict(list)
for e in BlogItem.categories.through.objects.all():
categories[e.blogitem_id].append(category_names[e.category_id])
qs = BlogItem.objects.filter(pub_date__lt=utc_now()).order_by("-pub_date")
for item in qs[offset:max_length]:
pub_date = item.pub_date
if pub_date_format:
pub_date = pub_date_format(pub_date)
items.append(
{
"title": item.title,
"slug": item.oid,
"pub_date": pub_date,
"keywords": [x for x in item.proper_keywords if x][:3],
"categories": categories[item.id][:3],
}
)
return items
@cache_control(public=True, max_age=ONE_MONTH)
def view1(request):
context = {"items": get_data()}
return render(request, "ajaxornot/view1.html", context)
@cache_control(public=True, max_age=ONE_MONTH)
def view2(request):
return render(request, "ajaxornot/view2.html")
@cache_control(public=True, max_age=ONE_MONTH)
def view2_table(request):
context = {"items": get_data()}
return render(request, "ajaxornot/view2_table.html", context)
@cache_control(public=True, max_age=ONE_MONTH)
def view3(request):
return render(request, "ajaxornot/view3.html")
@cache_control(public=True, max_age=ONE_MONTH)
@json_view
def view3_data(request):
return {"items": get_data(pub_date_format=lambda x: x.strftime("%B %Y"))}
@cache_control(public=True, max_age=ONE_MONTH)
def view4(request):
data = get_data(pub_date_format=lambda x: x.strftime("%B %Y"))
context = {"items": data}
return render(request, "ajaxornot/view4.html", context)
@cache_control(public=True, max_age=ONE_MONTH)
def view5(request):
context = {"items": get_data(max_length=25)}
return render(request, "ajaxornot/view5.html", context)
@cache_control(public=True, max_age=ONE_MONTH)
def view5_table(request):
context = {"items": get_data(offset=25)}
return render(request, "ajaxornot/view5_trs.html", context)
@cache_control(public=True, max_age=ONE_MONTH)
def view6(request):
return render(request, "ajaxornot/view6.html")
@cache_control(public=True, max_age=ONE_MONTH)
@json_view
def view6_data(request):
return {"items": get_data(pub_date_format=lambda x: x.strftime("%B %Y"))}
@cache_control(public=True, max_age=ONE_MONTH)
def view7a(request):
return render(request, "ajaxornot/view7a.html")
@cache_control(public=True, max_age=ONE_MONTH)
def view7b(request):
return render(request, "ajaxornot/view7b.html")
|
8,490 | aee009b37b99bf44e27c608470c43834a58e0cc7 | # coding: UTF-8
from PIL import ImageFont,Image,ImageDraw
def min_element(table_d,ignoring_index = None):
min_i,min_j,min_e = 0,0,max(table_d.values())
for key in table_d.keys():
# ignore if i in key or j in key
if ignoring_index is not None:
i,j = key
if i in ignoring_index or j in ignoring_index:
continue
if min_e > table_d[key]:
min_e = table_d[key]
min_i ,min_j = key
return (min_i,min_j,min_e)
def to_dict(table):
table_d = dict()
for i in range(len(table)):
for j in range(i):
table_d[(i,j)] = table[i][j]
table_d[(j,i)] = table[i][j]
return table_d
def next_key(d,original_length,ignoring_keys=[],attension_values=[]):
if len(ignoring_keys) == 0:
return min(d.keys())
save_key = None
for k in d.keys():
v_1,v_2 = d[k]
if k in ignoring_keys:
continue
if not ((v_1 in ignoring_keys or v_1 < original_length) and (v_2 in ignoring_keys or v_2 < original_length)):
continue
if save_key is None:
save_key = k
if v_1 in attension_values or v_2 in attension_values:
return k
return save_key
def main():
# in "sample" file
#
# 0 0.1 0.12 0.21
# 0.1 0 0.04 0.13
# 0.12 0.04 0 0.11
# 0.21 0.13 0.11 0
with open("sample","r") as f:
lines = f.readlines()
table = []
for l in lines:
row = [float(i) for i in l.split(" ")]
table.append(row)
table_d = to_dict(table)
num_of_element = len(table)
cluster = dict()
cluster_num = dict()
ignoring_index = []
original_length = len(table)
while True:
# ignoring_index内にないもののなかで最小のものを選ぶ
min_i,min_j,_ = min_element(table_d,ignoring_index)
# 以降無視
ignoring_index.append(min_i)
ignoring_index.append(min_j)
new_cluster = num_of_element # i&j を新しい要素とする
cluster[new_cluster] = (min_i,min_j)
cluster_num[new_cluster] = 0
cluster_elements = 2
if min_i in cluster_num.keys():
cluster_num[new_cluster] += cluster_num[min_i]
cluster_elements -= 1
if min_j in cluster_num.keys():
cluster_num[new_cluster] += cluster_num[min_j]
cluster_elements -= 1
cluster_num[new_cluster] += cluster_elements
print(cluster_num)
if max(cluster_num.values()) == original_length:
print(cluster)
print(cluster_num)
print(table_d)
print("UPGMA is end")
break
# clusterが所有するオリジナルの要素数
weight_i = 1
weight_j = 1
if min_i in cluster_num.keys():
weight_i = cluster_num[min_i]
if min_j in cluster_num.keys():
weight_j = cluster_num[min_j]
for itr in range(num_of_element):
if itr in ignoring_index:
continue
# テーブルの更新
table_d[(itr,new_cluster)] = (table_d[(itr,min_i)]*weight_i + table_d[(itr,min_j)]*weight_j) / float(weight_i + weight_j)
table_d[(new_cluster,itr)] = (table_d[(itr,min_i)]*weight_i + table_d[(itr,min_j)]*weight_j) / float(weight_i + weight_j)
num_of_element += 1
if len(ignoring_index) - num_of_element == 1:
# Once the remaining elements are two, the distance is obvious.
break
# イメージの操作
# ref: https://ailog.site/2020/03/09/0309/
# 以降は系統樹の作成
# 元々白紙が用意されているものとする
img = Image.open('base.png')
width,height = img.size
draw = ImageDraw.Draw(img)
# padding
top_padding = int(height*0.01)
bottom_padding = int(height*0.01)
right_padding = int(width*0.01)
left_padding = int(width*0.01)
# ラベルに使う領域の高さ
label_height = 64
# 系統樹に使う高さ
main_frame_height = height - top_padding - bottom_padding - label_height
# 高さと系統樹の高さをそろえるための倍率
height_scaler = main_frame_height / float(max(table_d.values()) / 2 )
# ラベル間の幅
interval = int((width - right_padding - left_padding) / (original_length+1))
font = ImageFont.truetype("arial.ttf", 32) # font size is 64
ignoring_keys = []
attension_values = []
painted_number = 0
cluster_x = dict()
cluster_y = dict()
cluster_stack = dict()
for i in range(original_length):
cluster_y[i] = top_padding + main_frame_height
cluster_stack[i] = 0.
while True:
key = next_key(cluster,original_length,ignoring_keys,attension_values)
if key in attension_values:
attension_values.remove(key)
if key is None:
break
i,j = cluster[key]
if not i in cluster_x.keys():
cluster_x[i] = left_padding + interval * (painted_number + 1)
painted_number += 1
if not j in cluster_x.keys():
cluster_x[j] = left_padding + interval * (painted_number + 1)
painted_number += 1
cluster_x[key] = int((cluster_x[i] + cluster_x[j]) / 2)
edge_height = int((table_d[(i,j)] * height_scaler / 2))
cluster_y[key] = top_padding + main_frame_height - edge_height
if not key in cluster_stack.keys():
cluster_stack[key] = table_d[(i,j)] / 2
draw.line((cluster_x[i], cluster_y[i], cluster_x[i], cluster_y[key]), fill=(0, 0, 0), width=10)
draw.line((cluster_x[j], cluster_y[j], cluster_x[j], cluster_y[key]), fill=(0, 0, 0), width=10)
draw.line((cluster_x[i], cluster_y[key], cluster_x[j], cluster_y[key]), fill=(0, 0, 0), width=10)
round_num = 3
# i について
value = round(table_d[(i,j)] / 2 - cluster_stack[i], round_num)
value_text = str(value)
size = font.getsize(value_text)
value_x = cluster_x[i] - int(size[0]*1.05)
value_y = int((cluster_y[i] + cluster_y[key]) / 2)
draw.text((value_x, value_y), value_text, font=font, fill='#0000ff')
# jについて
value = round(table_d[(i,j)] / 2 - cluster_stack[j], round_num)
value_text = str(value)
size = font.getsize(value_text)
value_x = cluster_x[j] - int(size[0]*1.05)
value_y = int((cluster_y[j] + cluster_y[key]) / 2)
draw.text((value_x, value_y), value_text, font=font, fill='#0000ff')
ignoring_keys.append(key)
attension_values.append(key)
font = ImageFont.truetype("arial.ttf", 64) # font size is 64
alphabet = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
for i in range(original_length):
# ラベル辞書を使えば数字以外も扱える
text = alphabet[i]
size = font.getsize(text)
left_x = cluster_x[i] - (size[0] / 2)
print(left_x)
top_y = top_padding + main_frame_height
# 画像右下に'Sampleと表示' #FFFは文字色(白)
draw.text((left_x, top_y), text, font=font, fill='#000000')
# ファイルを保存
img.save('out.png', 'PNG', quality=100, optimize=True)
input("push enter")
if __name__ == "__main__":
main() |
8,491 | 9d1b795b561a26ae28e82833485ca6034438e78b | #!/usr/bin/env python
'''
Created on 2011-08-27
@author: xion
Setup script for the seejoo project.
'''
import ast
import os
from setuptools import find_packages, setup
def read_tags(filename):
"""Reads values of "magic tags" defined in the given Python file.
:param filename: Python filename to read the tags from
:return: Dictionary of tags
"""
with open(filename) as f:
ast_tree = ast.parse(f.read(), filename)
res = {}
for node in ast.walk(ast_tree):
if type(node) is not ast.Assign:
continue
target = node.targets[0]
if type(target) is not ast.Name:
continue
if not (target.id.startswith('__') and target.id.endswith('__')):
continue
name = target.id[2:-2]
res[name] = ast.literal_eval(node.value)
return res
tags = read_tags(os.path.join('seejoo', '__init__.py'))
setup(
name="seejoo",
version=tags['version'],
description='Extensible IRC bot for geek-centered channels',
long_description=open("README.markdown").read(),
author=tags['author'],
url='http://github.com/Xion/seejoo',
license=tags['license'],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: No Input/Output (Daemon)',
'Framework :: Twisted',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Communications :: Chat :: Internet Relay Chat',
'Topic :: Utilities',
],
install_requires=open('requirements.txt').readlines(),
packages=find_packages(exclude=['tests']),
entry_points={
'console_scripts': ['seejoo=seejoo.main:main']
},
)
|
8,492 | 66b7d928bc2c98a12f7adb8a375ced21edce8333 | # Imports
import os
import time
import math
import random
from lib import *
def MT19937_keystream_generator(seed: int) -> bytes:
"""
Generate keystream for MT19937
"""
# Verify that the seed is atmost 16 bit long.
assert math.log2(seed) <= 16
prng = MT19937(seed)
while True:
number = prng.extract_number()
yield from number.to_bytes(4, "big")
def MT19937_CTR(string: str, seed: int) -> bytes:
"""
Encrypts a plaintext with MT19937 CTR Mode.
"""
# Verify that the seed is an integer.
assert isinstance(seed, int)
keystream = MT19937_keystream_generator(seed)
if len(string) == 0:
return b""
else:
return bytes([(b1 ^ b2) for b1, b2 in zip(string, keystream)])
def main():
plaintext = "Hello World!"
# append random characters before plainttext
string = b""
for _ in range(random.randint(0, 10)):
i = random.randint(33, 126)
string += chr(i).encode()
string += plaintext.encode()
seed = random.randint(1, 2**16)
print("> Seed value coded to be", seed)
cipher_bytes = MT19937_CTR(string, seed)
deciphered_bytes = MT19937_CTR(cipher_bytes, seed)
# verify if it can be decrypted
assert string == deciphered_bytes
#The number of possible keys is super small so you can just try them all. They even insist on it in the instructions: the cipher is using a 16-bits seed. It's kind of weird actually because from the specifications of MT19937 the seed seems to be 32 bits. Well even 32 bits should be small enough to crack, it would just take longer.
for seed in range(1, 2**16):
deciphered_bytes = MT19937_CTR(cipher_bytes, seed)
try:
assert string == deciphered_bytes
print("> Brute force successful.\nSeed:", seed)
break
except AssertionError:
continue
return
if __name__=="__main__":
main()
|
8,493 | 5f8a9d82a3245671b438475d1fac7be4db769fbe | from Monument import Monument, Dataset
import importer_utils as utils
import importer as importer
class RoRo(Monument):
def set_adm_location(self):
counties = self.data_files["counties"]
self.set_from_dict_match(counties, "iso_code",
"judetul_iso", "located_adm")
def set_location(self):
"""
Set Location property from article linked in localitate.
Run this after set_adm_location. localitate can
contain several links (we take the 1st which seems to
be the most granular one) and a mix of administrative
types. Compare with admin location so that they're not
the same.
"""
if self.has_non_empty_attribute("localitate"):
loc_item = None
if utils.count_wikilinks(self.localitate) > 0:
loc_link = utils.get_wikilinks(self.localitate)[0]
loc_item = utils.q_from_wikipedia("ro", loc_link.title)
adm_item = self.get_statement_values("located_adm")
if loc_item and loc_item != adm_item[0]:
self.add_statement("location", loc_item)
if not loc_item:
self.add_to_report("localitate", self.localitate, "location")
def set_heritage_id(self):
self.add_statement("romanian_monument_id", self.cod)
def update_descriptions(self):
adm_code = self.judetul_iso
counties = self.data_files["counties"]
county_item = utils.get_item_from_dict_by_key(dict_name=counties,
search_term=adm_code,
return_content_of="itemLabel",
search_in="iso_code")
if len(county_item) == 1:
place_name = "{}, Romania".format(county_item[0])
else:
place_name = "Romania"
desc = "heritage site in {}".format(place_name)
self.add_description("en", desc)
self.add_disambiguator(str(self.cod))
def set_address(self):
street_patterns = ("piața", "str.", "bd.")
if self.has_non_empty_attribute("adresa"):
adr_lower = self.adresa.lower()
adr_nice = utils.remove_markup(self.adresa)
if any(pattern in adr_lower for pattern in street_patterns):
if self.has_non_empty_attribute("localitate"):
town = utils.remove_markup(self.localitate)
adr_nice = "{}, {}".format(adr_nice, town)
self.add_statement("located_street", adr_nice)
else:
directions = utils.package_monolingual(adr_nice, 'ro')
self.add_statement("directions", directions)
def update_labels(self):
romanian = utils.remove_markup(self.denumire)
self.add_label("ro", romanian)
def __init__(self, db_row_dict, mapping, data_files, existing, repository):
Monument.__init__(self, db_row_dict, mapping,
data_files, existing, repository)
self.set_monuments_all_id("cod")
self.set_changed()
self.set_wlm_source()
self.set_heritage_id()
self.set_heritage()
self.set_country()
self.set_adm_location()
self.set_address()
self.set_location()
self.set_coords()
self.set_commonscat()
self.set_image("imagine")
self.update_labels()
self.update_descriptions()
self.set_wd_item(self.find_matching_wikidata(mapping))
if __name__ == "__main__":
"""Command line entry point for importer."""
args = importer.handle_args()
dataset = Dataset("ro", "ro", RoRo)
dataset.data_files = {"counties": "romania_counties.json"}
importer.main(args, dataset)
|
8,494 | 9683c7df01eda0d97615fb3e8f9496ecc95d1d32 | # -*- coding: utf-8 -*-
"""
Created on Wed May 15 17:05:30 2019
@author: qinzhen
"""
import numpy as np
# =============================================================================
# Q5
# =============================================================================
#### Part 1 计算MLE
File = "ner_proc.counts"
q2 = {}
q3 = {}
with open(File) as f:
for i in f.readlines():
#分隔数据
data = i.split()
#判断
if data[1] == "2-GRAM":
q2[(data[2], data[3])] = float(data[0])
q3[(data[2], data[3])] = {}
elif data[1] == "3-GRAM":
q3[(data[2], data[3])][(data[2], data[3], data[4])] = float(data[0])
#计算 MLE
q = {}
for i in q3:
for j in q3[i]:
q[j] = q3[i][j] / q2[i]
'''
#计算对数概率
for j in q:
res = ' '.join(j) + " : " + str(math.log(q[j]))
print(res)
'''
#### Part 2
def Viterbi(sentence, q, e):
#K_0 = *
#标签数量
K = list(Count_y.keys())
#动态规划表
Pi = {}
#反向指针表
bp = {}
#单词数量
n = len(sentence)
for i in range(n + 1):
Pi[i-1] = {}
bp[i-1] = {}
#初始化
Pi[-1][("*", "*")] = 1
#遍历句子中的单词
for k in range(n):
#可以选的标签
K0 = K
K1 = K
K2 = K
if k == 0:
K0 = ["*"]
K1 = ["*"]
elif k == 1:
K0 = ["*"]
'''
elif k == n-1:
K2 = K + ["STOP"]
'''
#循环
for u in K1:
for v in K2:
p = 0
w_arg = ""
key = sentence[k]
if key not in Count_x or Count_x[key] < 5:
key = "_RARE_"
for w in K0:
if (w, u) in Pi[k-1] and (w, u, v) in q and (v, key) in e[key]:
p1 = Pi[k-1][(w, u)] * q[(w, u, v)] * e[key][(v, key)]
if p1 > p:
p = p1
w_arg = w
Pi[k][(u, v)] = p
bp[k][(u, v)] = w_arg
#计算最后两个标签
y0 = ""
y1 = ""
pmax = 0
for u in K:
for v in K:
if (u, v) in Pi[n-1] and (u, v, "STOP") in q:
p = Pi[n-1][(u, v)] * q[(u, v, "STOP")]
if p > pmax:
pmax = p
y0 = u
y1 = v
tag = [y1, y0]
for k in range(n-3, -1, -1):
y = bp[k+2][(y0, y1)]
tag.append(y)
#更新
y1 = y0
y0 = y
#反序
tag = tag[::-1][2:]
return tag, pmax
res_viterbi = []
for sentence in Sentence:
#print(sentence)
tag, p = Viterbi(sentence, q, e_proc)
res_viterbi.append(" ".join(tag) + " " + str(p) + "\n")
#产生结果
File = "ner_dev_viterbi.dat"
with open(File, "w+") as f:
for i in res:
f.writelines(i) |
8,495 | 1280ab66b817011e22e560a78104bbc4340989e7 | planet_list = ["Mercury", "Mars"]
planet_list.append("Jupiter")
planet_list.append("Saturn")
planet_list.extend(["Uranus", "Neptune"])
planet_list.insert(1, "Earth")
planet_list.insert(1, "Venus")
planet_list.append("Pluto")
del planet_list[-1]
print(planet_list) |
8,496 | d7b830890400203ee45c9ec59611c0b20ab6bfc7 | import xadmin
from xadmin import views
from .models import EmailVerifyRecord, Banner
class BaseMyAdminView(object):
'''
enable_themes 启动更改主题
use_bootswatch 启用网上主题
'''
enable_themes = True
use_bootswatch = True
class GlobalSettings(object):
'''
site_title 左上角名称
site_footer 底部名称
menu_style 更改左边样式
'''
site_title = "学习网后台管理系统"
site_footer = "学习网"
menu_style = "accordion"
class EmailVerifyRecordAdmin(object):
list_display = ['email', 'code', 'send_type', 'send_time']
search_fields = ['email', 'code', 'send_type']
list_filter = ['email', 'code', 'send_type', 'send_time']
class BannerAdmin(object):
list_disply = ['title', 'image', 'url', 'index', 'add_time']
search_fields = ['title', 'image', 'url', 'index']
list_filter = ['title', 'image', 'url', 'index', 'add_time']
xadmin.site.register(EmailVerifyRecord, EmailVerifyRecordAdmin)
xadmin.site.register(Banner, BannerAdmin)
xadmin.site.register(views.BaseAdminView, BaseMyAdminView)
xadmin.site.register(views.CommAdminView, GlobalSettings) |
8,497 | 35e61add90b5c12f94d5f8071f00d98316461dd6 | #!/usr/bin/python
from cagd.polyline import polyline
from cagd.spline import spline, knots
from cagd.vec import vec2
import cagd.scene_2d as scene_2d
from math import sin,cos,pi, sqrt
#returns a list of num_samples points that are uniformly distributed on the unit circle
def unit_circle_points(num_samples):
a = 2*pi/num_samples
return [vec2(cos(a*i), sin(a*i)) for i in range(num_samples)]
#calculates the deviation between the given spline and a unit circle
#the Manhattan Metrics is chosen
def calculate_circle_deviation(spline):
ideal_d = 1.0
center_x = 0.0
center_y = 0.0
deviation = 0.0
for p in spline.control_points:
deviation += sqrt((p.x - center_x)**2 + (p.y - center_y)**2)
deviation /= len(spline.control_points)
deviation -= ideal_d
return deviation
#interpolate 6 points with a periodic spline to create the number "8"
pts = [vec2( 0, 2.5), vec2(-1, 1), vec2( 1,-1), vec2( 0,-2.5), vec2(-1,-1), vec2(1,1)]
s = spline.interpolate_cubic_periodic(pts)
p = s.get_polyline_from_control_points()
p.set_color("blue")
sc = scene_2d.scene()
sc.set_resolution(900)
sc.add_element(s)
sc.add_element(p)
#generate a spline that approximates the unit circle
n = 100
circle_pts = unit_circle_points(n)
circle = spline.interpolate_cubic_periodic(circle_pts)
p_circle = circle.get_polyline_from_control_points()
#sc.add_element(circle)
#sc.add_element(p_circle)
p_circle.set_color("blue")
error = calculate_circle_deviation(circle)
print("The error is: " + str(error))
sc.write_image()
sc.show()
|
8,498 | 1253e052865860a6895f91204a70152745b04652 | # -*- coding: utf-8 -*-
from math import acos, pi, sqrt
from decimal import Decimal, getcontext
getcontext().prec = 30
class Vector(object):
NO_NONZERO_ELTS_FOUND_MSG = 'No nonzero elements found'
def __init__(self, coordinates):
try:
if not coordinates:
raise ValueError
self.coordinates = tuple([Decimal(x) for x in coordinates])
self.dimension = len(coordinates)
except ValueError:
raise ValueError('The coordinates must be nonempty')
except TypeError:
raise TypeError('The coordinates must be an iterable')
def __str__(self):
return 'Vector: {}'.format(self.coordinates)
def __eq__(self, v):
return self.coordinates == v.coordinates
def iszero(self, tolerance=1e-10):
return self.magnitude()<tolerance
def plus(self, v):
if isinstance(v, Vector):
if self.dimension == v.dimension :
return Vector([x+y for x, y in zip(self.coordinates, v.coordinates)])
else:
raise ValueError('dimension not match.')
else:
raise TypeError('not a Vector.')
def minus(self, v):
if isinstance(v, Vector):
if self.dimension == v.dimension :
return Vector([x-y for x, y in zip(self.coordinates, v.coordinates)])
else:
raise ValueError('dimension not match.')
else:
raise TypeError('not a Vector.')
def time_scalar(self, scalar):
try:
return Vector([Decimal(scalar) * x for x in self.coordinates])
except Exception:
raise TypeError('{0} is not a number'.format(scalar))
def magnitude(self):
return Decimal(sqrt(sum([x**2 for x in self.coordinates])))
def normalize(self):
if self.iszero():
raise ValueError("Can't normalize a zero vector.")
else:
return self.time_scalar(Decimal(1.0)/self.magnitude())
def dot(self, v):
if not isinstance(v, Vector):
raise TypeError('not a Vector')
else:
if self.dimension != v.dimension:
raise ValueError('dimension not match.')
else:
return sum([x*y for x,y in zip(self.coordinates,v.coordinates)])
def angle_with(self, v, in_degree=False, tolerance=1e-10):
if not isinstance(v, Vector):
raise TypeError('not a Vector')
if self.dimension != v.dimension:
raise ValueError('dimension not match.')
d = self.dot(v)/(self.magnitude()*v.magnitude())
if abs(abs(d)-1) < tolerance:
d = 1 if d>0 else -1
elif abs(d)<tolerance:
d = 0
if in_degree:
return acos(d)/pi*180
else:
return acos(d)
def is_parallel_to(self, v):
if not isinstance(v, Vector):
raise TypeError('not a Vector')
if self.iszero() or v.iszero():
return True
v1 = self.normalize()
v2 = v.normalize()
return (v1.minus(v2).iszero() or
v1.plus(v2).iszero())
def is_parallel_to2(self, v):
if not isinstance(v, Vector):
raise TypeError('not a Vector')
if self.iszero() or v.iszero():
return True
n = Vector.first_nonzero_index(self.coordinates)
if (v.coordinates[n] == 0):
return False
if abs(self.coordinates[n])<=abs(v.coordinates[n]):
return self.time_scalar(v.coordinates[n] / self.coordinates[n]).minus(v).iszero()
else:
return v.time_scalar(self.coordinates[n] / v.coordinates[n]).minus(self).iszero()
def is_parallel_to3(self, v):
if not isinstance(v, Vector):
raise TypeError('not a Vector')
return (self.iszero() or
v.iszero() or
self.angle_with(v) == 0 or
self.angle_with(v) == pi)
def is_orthogonal_to(self, v, tolerance=1e-10):
if not isinstance(v, Vector):
raise TypeError('not a Vector')
return abs(self.dot(v)) < tolerance
def component_project_to(self, v):
if not isinstance(v, Vector):
raise TypeError('not a Vector')
return v.normalize().time_scalar(self.dot(v.normalize()))
def component_orthogonal_to(self, v):
if not isinstance(v, Vector):
raise TypeError('not a Vector')
return self.minus(self.project(v))
def cross(self, v):
if not isinstance(v, Vector):
raise TypeError('not a Vector')
r = []
if ((self.dimension != v.dimension) or
(self.dimension == 1) or
(v.dimension == 1)):
raise ValueError('dimensions not match')
if (self.dimension == v.dimension == 2):
z1 = z2 = Decimal(0.0)
if (self.dimension == v.dimension == 3):
z1 = self.coordinates[2]
z2 = v.coordinates[2]
r.append(self.coordinates[1]*z2 - v.coordinates[1]*z1)
r.append(v.coordinates[0]*z1 - self.coordinates[0]*z2)
r.append(self.coordinates[0]*v.coordinates[1] - v.coordinates[0]*self.coordinates[1])
return Vector(r)
def parallelogram_area(self, v):
if not isinstance(v, Vector):
raise TypeError('not a Vector')
return self.cross(v).magnitude()
@staticmethod
def first_nonzero_index(iterable):
for k, item in enumerate(iterable):
if not MyDecimal(item).is_near_zero():
return k
raise Exception(Vector.NO_NONZERO_ELTS_FOUND_MSG)
def __getitem__(self, i):
return self.coordinates[i]
def __setitem__(self, i, x):
self.coordinates[i] = x
class MyDecimal(Decimal):
def is_near_zero(self, eps=1e-10):
return abs(self) < eps |
8,499 | 15a7f6a63536ed24b6cf17395643476c689ec99b | N, M, T = map(int, input().split())
AB = [list(map(int, input().split())) for i in range(M)]
now_time = 0
battery = N
ans = 'Yes'
for a, b in AB:
# カフェに付くまでにの消費
battery -= a-now_time
if battery <= 0:
ans = 'No'
break
# カフェでの充電
battery += b-a
battery = min(battery, N)
# 現在時刻をカフェを出る時間に更新
now_time = b
# 最後のカフェを出てから帰宅までの消費
battery -= T-now_time
if battery <= 0:
ans = 'No'
print(ans)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.