index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
994,000 | df10aa796406b3d5f71fb1f6b28f21551d418e79 | import os.path
import json_lines
import numpy as np
import re
from underthesea import word_tokenize
# PATH_QUESTION_ANSWER = '/Users/tienthanh/Projects/ML/datapool/tgdd_qa/QA-example.jl'
PATH_QUESTION_ANSWER = '/Users/tienthanh/Projects/ML/datapool/tgdd_qa/iphone-6-32gb-gold.jl'
PATH_TO_STOPWORDS = '/Users/tienthanh/Projects/ML/QAAutomation/vietnamese-stopwords-dash.txt'
def get_qa_by_id(arr_id_cmt):
with open(PATH_QUESTION_ANSWER) as f:
for qa in json_lines.reader(f):
if qa['id_cmt'] in arr_id_cmt:
yield qa
def union_multi_arr(*args):
return set().union(*args)
def save_result_to_file(query, results_id):
if not os.path.exists('search_result'):
os.mkdir('search_result')
def is_valid_qa(qa):
if (qa['question'] is None) or (qa['answer'] is None) or (qa['id_cmt'] is None or (len(qa['question']) == 0) or (len(qa['answer']) == 0)):
return False
return True
def customize_string(string):
replacer_arr = ['.', ',', '?', '\xa0', '\t']
string = string.lower().replace('\xa0', ' ')\
.replace('.', ' ').replace(',', ' ')\
.replace('?', ' ').replace('!', ' ')\
.replace('/', ' ').replace('-', '_') \
.replace(':', ' ') \
.strip()
string = re.sub('\s+', ' ', string).strip()
return word_tokenize(string, format="text")
def get_stopwords():
with open(PATH_TO_STOPWORDS, 'r') as f:
return f.read().splitlines()
def remove_stopword(string):
arr_stopword = get_stopwords()
arr_str = string.split(' ')
for str in arr_str:
if str in arr_stopword:
string = string.replace(str, '')
string = re.sub('\s+', ' ', string).strip()
return string
def customize_and_remove_stopword(string):
string = customize_string(string)
string = remove_stopword(string)
return string
def caculate_AP(arr):
# P_at_k: P@k
P_at_k = 0
relevan_len = len([val for val in arr if val == 1])
precision_threshold = 0
for index, val in enumerate(arr):
if val == 1:
precision_threshold += 1
P_at_k += precision_threshold / (index + 1)
# AP = P_at_k / len(relevan_doc)
if relevan_len == 0:
return 0
return P_at_k / relevan_len
|
994,001 | 4d62e30b611b5c9b520ecea8d9145746f4f6cd6a | # Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmdet.core import build_assigner, build_sampler
from mmtrack.models.track_heads import QuasiDenseEmbedHead
def test_quasi_dense_embed_head():
cfg = mmcv.Config(
dict(
num_convs=4,
num_fcs=1,
embed_channels=256,
norm_cfg=dict(type='GN', num_groups=32),
loss_track=dict(type='MultiPosCrossEntropyLoss', loss_weight=0.25),
loss_track_aux=dict(
type='L2Loss',
neg_pos_ub=3,
pos_margin=0,
neg_margin=0.1,
hard_mining=True,
loss_weight=1.0)))
self = QuasiDenseEmbedHead(**cfg)
gt_match_indices = [torch.tensor([0, 1])]
proposal_list = [
torch.Tensor([[23.6667, 23.8757, 228.6326, 153.8874],
[23.6667, 23.8757, 228.6326, 153.8874]])
]
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 228.6326, 153.8874],
[23.6667, 23.8757, 228.6326, 153.8874]])
]
gt_labels = [torch.LongTensor([1, 1])]
feats = torch.rand(2, 256, 7, 7)
key_sampling_results = _dummy_bbox_sampling(feats, proposal_list,
gt_bboxes, gt_labels)
ref_sampling_results = key_sampling_results
key_embeds = self.forward(feats)
ref_embeds = key_embeds
match_feats = self.match(key_embeds, ref_embeds, key_sampling_results,
ref_sampling_results)
asso_targets = self.get_targets(gt_match_indices, key_sampling_results,
ref_sampling_results)
loss_track = self.loss(*match_feats, *asso_targets)
assert loss_track['loss_track'] >= 0, 'track loss should be zero'
assert loss_track['loss_track_aux'] > 0, 'aux loss should be non-zero'
def _dummy_bbox_sampling(feats, proposal_list, gt_bboxes, gt_labels):
"""Create sample results that can be passed to Head.get_targets."""
num_imgs = len(proposal_list)
assign_config = dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
ignore_iof_thr=-1)
sampler_config = dict(
type='CombinedSampler',
num=4,
pos_fraction=0.5,
neg_pos_ub=3,
add_gt_as_proposals=True,
pos_sampler=dict(type='InstanceBalancedPosSampler'),
neg_sampler=dict(type='RandomSampler'))
bbox_assigner = build_assigner(assign_config)
bbox_sampler = build_sampler(sampler_config)
gt_bboxes_ignore = [None for _ in range(num_imgs)]
sampling_results = []
for i in range(num_imgs):
assign_result = bbox_assigner.assign(proposal_list[i], gt_bboxes[i],
gt_bboxes_ignore[i], gt_labels[i])
sampling_result = bbox_sampler.sample(
assign_result,
proposal_list[i],
gt_bboxes[i],
gt_labels[i],
feats=feats)
sampling_results.append(sampling_result)
return sampling_results
|
994,002 | a1e4dc712978d2b8d1620bd15b8647ad7057a27e | import mxnet as mx
from mxnet import gluon
from adversarial.blocks.resnext import ResNeXt
from adversarial.blocks.mdn import SELU, DenseNet
class JNet(gluon.nn.HybridBlock):
def __init__(self, classes, **kwargs):
super(JNet, self).__init__(**kwargs)
with self.name_scope():
self._pfcand = ResNeXt(bottle_neck=False, layers=[2, 2, 2], channels=[32, 64, 64, 128], prefix='part_')
self._sv = ResNeXt(bottle_neck=False, layers=[2, 2], channels=[32, 32, 64], prefix='sv_')
self._fc1 = gluon.nn.Dense(512, weight_initializer=mx.init.Xavier())
self._fc2 = gluon.nn.Dense(256, weight_initializer=mx.init.Xavier())
self._fc3 = gluon.nn.Dense(256, weight_initializer=mx.init.Xavier())
self._selu = SELU()
self._fc_out = gluon.nn.Dense(classes, weight_initializer=mx.init.Xavier())
def hybrid_forward(self, F, pfcand, sv):
pfcand = self._pfcand(pfcand)
sv = self._sv(sv)
concat = F.concat(pfcand, sv)
fc1 = self._selu(self._fc1(concat))
fc2 = self._selu(self._fc2(fc1))
fc3 = self._selu(self._fc3(fc2))
fc_out = self._fc_out(fc3)
log_softmax = F.log_softmax(fc_out)
return concat, log_softmax
def make_symD(num_classes, **kwargs):
pfcand = mx.sym.var('part')
sv = mx.sym.var('sv')
netD = JNet(num_classes)
netD.hybridize()
_, symD = netD(pfcand, sv)
softmax = mx.sym.exp(data=symD, name='softmax')
return netD, symD, softmax
def get_net(num_classes, use_softmax=False, **kwargs):
netD, symD, softmax = make_symD(num_classes)
netD.hybridize()
netAdv = DenseNet(num_layers=3, hidden_units=256, num_classes=kwargs['adv_mass_nbins']) # Not converging w/ DropOut???
netAdv.hybridize()
symAdv = netAdv(mx.sym.var('scores'))
if use_softmax:
return netD, netAdv, symD, symAdv, softmax
else:
return netD, netAdv, symD, symAdv
def get_loss(**kwargs):
lossD = gluon.loss.SoftmaxCrossEntropyLoss(from_logits=True)
lossD.hybridize()
lossAdv = gluon.loss.SoftmaxCrossEntropyLoss(from_logits=True)
lossAdv.hybridize()
return lossD, lossAdv
|
994,003 | d4619d438ebf30ec79a3795cb9c5718a49fba26a | import time
import Adafruit_PCA9685
esc = Adafruit_PCA9685.PCA9685()
esc_pwm_freq = 50 # PWM frequency used to communicate with the ESCs
tick_length = (1000000 / esc_pwm_freq) / 4096 # 1 second in us, divided by the freq, divided by the bit depth is the tick length per us
esc_min = 1000 * tick_length # 1000us in ticks
esc_max = 2000 * tick_length # 2000us in ticks
print('throttle off signal: ', esc_min)
|
994,004 | 51c135a08dea27df80618196ccaed27902703ccf | from django.shortcuts import render, render_to_response, get_object_or_404, redirect
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.contrib.auth.decorators import login_required
from django.conf import settings
from django.http import HttpResponse
from fullcalendar.util import events_to_json, calendar_options
from django.views.generic.edit import FormMixin
from django.views.generic.list import ListView
from django.utils import timezone
from django.utils.decorators import method_decorator
from easy_pdf.views import PDFTemplateView
from .forms import eventForm, hardwareForm, contactForm, airbillForm, poolForm
from .models import event, hardware, contact, airbill, pool, assignment
## TODO - Update window.open to use URL reverse introspection (Do not hard code), and remove new window
OPTIONS = """{ timeFormat: "H:mm",
customButtons: {
NewEvent: {
text: 'New',
click: function() {
window.open('/events/new/');
return false;
}
}
},
header: {
left: 'prev,next today NewEvent',
center: 'title',
right: 'month, basicWeek, basicDay',
},
allDaySlot: true,
firstDay: 0,
weekMode: 'liquid',
slotMinutes: 15,
defaultEventMinutes: 30,
minTime: 8,
maxTime: 20,
editable: false,
weekNumbers: true,
weekNumberTitle: "Week",
dayClick: function(date, allDay, jsEvent, view) {
if (allDay) {
$('#calendar').fullCalendar('gotoDate', date)
$('#calendar').fullCalendar('changeView', 'basicDay')
}
},
}"""
def home_redirect(request):
return redirect('/')
@login_required
def home(request):
return render(request, "home.html", {})
###############################################
@login_required
def calendar(request):
event_url = 'all_events/'
return render(request, 'events/calendar.html', {'calendar_config_options': calendar_options(event_url, OPTIONS)})
# Create your views here.
@login_required
def all_events(request):
events = event.objects.all()
return HttpResponse(events_to_json(events), content_type='application/json')
##TODO add ability to delete events
@login_required
def new_event(request):
title = 'New Event'
form = eventForm(request.POST or None)
if request.POST:
form = eventForm(request.POST)
if form.is_valid():
form.save()
context = {
"title": title,
"form": form
}
return render(request, "events\event.html", context)
@login_required
def edit_event(request, uuid=None):
title = 'Edit Event'
if uuid:
thisEvent = get_object_or_404(event, evID=uuid)
## TODO fix M2M save 'Cannot set values on a ManyToManyField which specifies an intermediary model.'
## http://stackoverflow.com/questions/387686/what-are-the-steps-to-make-a-modelform-work-with-a-manytomany-relationship-with
if request.POST:
form = eventForm(request.POST, instance=thisEvent)
if form.is_valid():
form.save()
else:
form = eventForm(instance=thisEvent)
print thisEvent
context = {
"title": title,
"form": form,
"evID": thisEvent.evID
}
return render(request, "events/event.html", context)
class packing_pdfView(PDFTemplateView):
template_name = "pdf/pdf_packing.html"
def get_context_data(self, **kwargs):
context = super(packing_pdfView, self).get_context_data(**kwargs)
uuid = self.kwargs['uuid']
ev = get_object_or_404(event, evID=uuid)
context["event"] = ev
# context["contact"] =
return context
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(packing_pdfView, self).dispatch(request, *args, **kwargs)
class srf_pdfView(PDFTemplateView):
template_name = "pdf/pdf_srf.html"
def get_context_data(self, **kwargs):
context = super(srf_pdfView, self).get_context_data(**kwargs)
uuid = self.kwargs['uuid']
print uuid
ev = get_object_or_404(event, evID=uuid)
print ev
context["event"] = ev
return context
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(srf_pdfView, self).dispatch(request, *args, **kwargs)
class checkin_hardware(ListView):
model = assignment
template_name = 'events/checkin_hardware.html'
paginate_by = settings.NUM_PER_PAGE
def get_context_data(self, **kwargs):
context = super(checkin_hardware, self).get_context_data(**kwargs)
uuid = self.kwargs['uuid']
obj_y = assignment.objects.filter(eventID=uuid)
print obj_y.count()
paginator = Paginator(obj_y, self.paginate_by)
page = self.request.GET.get('page')
try:
obj_z = paginator.page(page)
except PageNotAnInteger:
obj_z = paginator.page(1)
except EmptyPage:
obj_z = paginator.page(paginator.num_pages)
print obj_z.object_list
context['page_items'] = obj_z
return context
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(checkin_hardware, self).dispatch(request, *args, **kwargs)
###############################################
@login_required
def new_hardware(request):
title = 'New Hardware'
form = hardwareForm(request.POST or None)
if request.POST:
form = hardwareForm(request.POST)
if form.is_valid():
form.save()
context = {
"title": title,
"form": form
}
return render(request, "hardware/hardware.html", context)
@login_required
def edit_hardware(request, uuid=None):
title = 'Edit Hardware'
if uuid:
thisObj = get_object_or_404(hardware, hwID=uuid)
if request.POST:
form = hardwareForm(request.POST, instance=thisObj)
if form.is_valid():
form.save()
else:
form = hardwareForm(instance=thisObj)
print thisObj
context = {
"title": title,
"form": form
}
return render(request, "hardware/hardware.html", context)
# @login_required
class list_hardware(ListView):
model = hardware
template_name = 'hardware/hwIndex.html'
paginate_by = settings.NUM_PER_PAGE
def get_context_data(self, **kwargs):
context = super(list_hardware, self).get_context_data(**kwargs)
obj_y = hardware.objects.all()
print obj_y.count()
paginator = Paginator(obj_y, self.paginate_by)
page = self.request.GET.get('page')
try:
obj_z = paginator.page(page)
except PageNotAnInteger:
obj_z = paginator.page(1)
except EmptyPage:
obj_z = paginator.page(paginator.num_pages)
print obj_z.object_list
context['page_items'] = obj_z
return context
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(list_hardware, self).dispatch(request, *args, **kwargs)
###############################################
@login_required
def new_contact(request):
title = 'New Contact'
form = contactForm(request.POST or None)
if request.POST:
form = contactForm(request.POST)
if form.is_valid():
form.save()
context = {
"title": title,
"form": form
}
return render(request, "contact/contact.html", context)
@login_required
def edit_contact(request, uuid=None):
title = 'Edit Contact'
if uuid:
thisObj = get_object_or_404(contact, ctID=uuid)
if request.POST:
form = contactForm(request.POST, instance=thisObj)
if form.is_valid():
form.save()
else:
form = contactForm(instance=thisObj)
print thisObj
context = {
"title": title,
"form": form
}
return render(request, "contact/contact.html", context)
##TODO add title context to view
class list_contact(ListView):
model = contact
template_name = 'contact/ctIndex.html'
paginate_by = settings.NUM_PER_PAGE
def get_context_data(self, **kwargs):
context = super(list_contact, self).get_context_data(**kwargs)
obj_y = contact.objects.all()
#print obj_y.count()
paginator = Paginator(obj_y, self.paginate_by)
page = self.request.GET.get('page')
try:
obj_z = paginator.page(page)
except PageNotAnInteger:
obj_z = paginator.page(1)
except EmptyPage:
obj_z = paginator.page(paginator.num_pages)
#print obj_z.object_list
context['page_items'] = obj_z
return context
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(list_contact, self).dispatch(request, *args, **kwargs)
###############################################
@login_required
def new_airbill(request):
title = 'New Airbill'
form = airbillForm(request.POST or None)
if request.POST:
form = airbillForm(request.POST)
if form.is_valid():
form.save()
context = {
"title": title,
"form": form
}
return render(request, "airbill/airbill.html", context)
@login_required
def edit_airbill(request, uuid=None):
title = 'Edit Airbill'
if uuid:
thisObj = get_object_or_404(airbill, abID=uuid)
if request.POST:
form = airbillForm(request.POST, instance=thisObj)
if form.is_valid():
form.save()
else:
form = airbillForm(instance=thisObj)
print thisObj
context = {
"title": title,
"form": form
}
return render(request, "airbill/airbill.html", context)
class list_airbill(ListView):
model = airbill
template_name = 'airbill/abIndex.html'
paginate_by = settings.NUM_PER_PAGE
def get_context_data(self, **kwargs):
context = super(list_airbill, self).get_context_data(**kwargs)
obj_y = airbill.objects.all()
#print obj_y.count()
paginator = Paginator(obj_y, self.paginate_by)
page = self.request.GET.get('page')
try:
obj_z = paginator.page(page)
except PageNotAnInteger:
obj_z = paginator.page(1)
except EmptyPage:
obj_z = paginator.page(paginator.num_pages)
#print obj_z.object_list
context['page_items'] = obj_z
return context
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(list_airbill, self).dispatch(request, *args, **kwargs)
###############################################
@login_required
def new_pool(request):
title = 'New Pool'
form = poolForm(request.POST or None)
if request.POST:
form = poolForm(request.POST)
if form.is_valid():
form.save()
context = {
"title": title,
"form": form
}
return render(request, "airbill/airbill.html", context)
@login_required
def edit_pool(request, uuid=None):
title = 'Edit Pool'
if uuid:
thisObj = get_object_or_404(pool, poolID=uuid)
if request.POST:
form = poolForm(request.POST, instance=thisObj)
if form.is_valid():
form.save()
else:
form = poolForm(instance=thisObj)
print thisObj
context = {
"title": title,
"form": form
}
return render(request, "airbill/airbill.html", context)
class list_pool(ListView):
model = pool
template_name = 'pool/poolIndex.html'
paginate_by = settings.NUM_PER_PAGE
def get_context_data(self, **kwargs):
context = super(list_pool, self).get_context_data(**kwargs)
obj_y = pool.objects.all()
#print obj_y.count()
paginator = Paginator(obj_y, self.paginate_by)
page = self.request.GET.get('page')
try:
obj_z = paginator.page(page)
except PageNotAnInteger:
obj_z = paginator.page(1)
except EmptyPage:
obj_z = paginator.page(paginator.num_pages)
#print obj_z.object_list
context['page_items'] = obj_z
return context
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(list_pool, self).dispatch(request, *args, **kwargs)
###############################################
##TODO Remove Example view and Templates
class HelloPDFView(PDFTemplateView):
template_name = "pdf/hello.html"
def get_context_data(self, **kwargs):
self.evID = self.kwargs['evid'] |
994,005 | 252c1a80587d0a3df923b12328faa4d3978ddf12 | import sqlite3
import pandas as pd
import FinanceDataReader as fdr
import numpy as np
import datetime
from scipy import stats
def get_date(start, terms):
return pd.bdate_range(end = start,tz='Asia/Seoul',periods=terms).date
meta = fdr.StockListing('KOSPI')
meta.dropna(how='all',subset=meta.columns[3:],inplace=True)
#ํ๊ท ๊ด๋ฆฌ์
avg_thres_st_ed = 3 #์๊ฐ ์ข
๊ฐ ๋น๊ต 3%์ด์ -> ๋๋ค ๋์ผ ์๋ด ์๋ด
avg_thres_h_l = 5
avg_thres_dtd = 3
#์ํ ๊ด๋ฆฌ์
dager_thres_st_ed = -7
dager_thres_h_l = -7
dager_thres_dtd = -7
#๊ฐ ์ฃผ์๋ณ ์์น ๊ตฌํ๊ธฐ
names = {}
codes = meta['Symbol'].unique()
stocks_P = {}
stocks_V = {}
#๊ธฐ๊ฐ ์ ํ๊ธฐ
ana_period = 120
show_period = 30
for code in codes:
#find meta & read datas
name = meta.loc[meta['Symbol']==code, "Name"].values[0]
finance = fdr.DataReader(code)
#drop na
finance.replace(0,np.NaN,inplace=True)
#๊ฐ ์ฃผ์๋ณ ์์ต์ฑ,๋ณ๋์ฑ ๋ณ์ ์ด๊ธฐํ
profits = []
variances = []
for n in range(show_period) :
subset = finance[::-1].iloc[n:n+ana_period].copy()
#์์ต์ฑ ์ ์
subset['YM'] = subset.index.year.astype(str) + subset.index.month.astype(str)
temp = subset.groupby('YM').mean()['Open']
temp = (temp - temp.shift(1))/temp * 100
profit = temp.values[1:].sum()*20
#์๊ฐ ์ข
๊ฐ ๋น๊ต์์น
day_percent_st_ed = (subset['Open']-subset['Close'])/subset['Open']
day_count_st_ed=(day_percent_st_ed.abs()*100 > avg_thres_st_ed).sum()
cond = day_percent_st_ed*100 < dager_thres_st_ed
day_count_st_ed_dag = day_percent_st_ed[cond].abs().sum()*100
#๊ณ ๊ฐ ์ ๊ฐ ๋น๊ต์์น
day_percent_h_l = (subset['High']-subset['Low'])/subset['Open']
day_count_h_l=(day_percent_h_l.abs()*100 > avg_thres_h_l).sum()
cond = day_percent_h_l*100 < dager_thres_h_l
day_count_h_l_dag = day_percent_h_l[cond].abs().sum()*100
#์ผ๊ฐ ์ข
๊ฐ ์์น ๋น๊ต
day_to_day_ed = subset['Change']
day_to_day_count=(day_to_day_ed.abs()*100 > avg_thres_dtd).sum()
cond = day_to_day_ed*100 < dager_thres_dtd
day_to_day_dag = day_to_day_ed[cond].abs().sum()*100
totalCount = day_count_st_ed+day_count_st_ed_dag+day_count_h_l+day_count_h_l_dag+day_to_day_count+day_to_day_dag
#๊ฒฐ๊ณผ ์ ์ฅ
profits.append(profit)
variances.append(totalCount)
#์ ๊ทํ
profits = stats.zscore(profits)
variances = stats.zscore(variances)
score = profits - variances
#๊ฐ ์ฃผ์๋ณ ์ ์ ์ ์ฅ
names[name] = score
stocks_P[name] = profits
stocks_V[name] = variances
print(name,score)
push = pd.DataFrame(names,index=get_date(datetime.datetime.now(),30)[::-1])
goods = []
bads = []
recos = []
for i in range(len(push)):
good = push.iloc[i,:].sort_values(ascending=False)[:10].mean()
bad = push.iloc[i,:].sort_values(ascending=False)[10:10+50].mean()
reco = push.iloc[i,:].sort_values(ascending=False)[:10].index.to_list()
reco = ",".join(reco)
goods.append(good)
bads.append(bad)
recos.append(reco)
# ํ ์ฐจํธ์ ๋ค์ด๊ฐ ํ
์ด๋ธ
push2 = pd.DataFrame({"Good":goods,"Bad":bads,"Recommend":recos})
push2.index = push.index
push2 = push2.reset_index()
push2.index.name = "id"
# ๊ฐ๋ณ ์ถ์ subplot์ ๋ค์ด๊ฐ ํ
์ด๋ธ
push3 = push.unstack()
push3_pv = pd.concat(
[
pd.DataFrame(stocks_P,index=get_date(datetime.datetime.now(),30)[::-1]).unstack(),
pd.DataFrame(stocks_V,index=get_date(datetime.datetime.now(),30)[::-1]).unstack(),
],
axis=1,
)
push3 = pd.concat([push3,push3_pv],axis=1)
push3.reset_index(inplace=True)
push3.columns = ['Name','Date','Score','Profit','Variances']
push3.index.name = "id"
print(push3)
#DB์ฐ๊ฒฐ
db_path = "capstone/db.sqlite3"
con = sqlite3.connect(db_path)
#DB๋ฐ์ดํฐ ํธ์
push2.to_sql("Fullplot",con,if_exists="replace")
push3.to_sql("Subplots",con,if_exists="replace") |
994,006 | 2b7a1a77e9ca93d10cdf786f8203e6aa5b31fca2 | # -*- coding: utf-8 -*-
"""
This file gives the dipole radiation (E and B field) in the far field, the full radiation (near field + far field) and the near field radiation only
@author: manu
"""
from __future__ import division
from numpy import *
from numpy.random import *
from pylab import *
#from entropy import *
c=299792458.
mu0=4*pi*1e-7
eps0=1./(mu0*c**2)
def Hertz_dipole_ff (r, p, R, phi, f, t=0, epsr=1.):
"""
Calculate E and B field strength radaited by hertzian dipole(s) in the far field.
p: array of dipole moments [[px0,py0,pz0],[px1,py1,pz1],...[pxn,pyn,pzn]]
R: array of dipole positions [[X0,Y0,Z0],[X1,Y1,Z1],...[Xn,Yn,Zn]]
r: observation point [x,y,z]
f: array of frequencies [f0,f1,...]
t: time
phi: array with dipole phase angles (0..2pi) [phi0,phi1,...,phin]
return: fields values at observation point r at time t for every frequency in f. E and B are (3 components,number of frequencies) arrays.
"""
nf = len(f)
rprime = r-R # r'=r-R
if ndim(p) < 2:
magrprime = sqrt(sum((rprime)**2))
magrprimep = tile(magrprime, (len(f),1)).T
phip = tile(phi, (len(f),1))
w = 2*pi*f # \omega
k = w/c # wave number
krp = k*magrprimep # k|r'|
rprime_cross_p = cross(rprime, p) # r'x p
rp_c_p_c_rp = cross(rprime_cross_p, rprime) # (r' x p) x r'
expfac = exp(1j*(w*t-krp+phip.T))/(4*pi*eps0*epsr)
Ex = (w**2/(c**2*magrprimep**3) * expfac)* (tile(rp_c_p_c_rp[0],(nf,1))).T
Ey = (w**2/(c**2*magrprimep**3) * expfac)* (tile(rp_c_p_c_rp[1],(nf,1))).T
Ez = (w**2/(c**2*magrprimep**3) * expfac)* (tile(rp_c_p_c_rp[2],(nf,1))).T
Bx = expfac/(magrprimep**2*c**3)*(w**2*tile(rprime_cross_p[0],(nf,1)).T)
By = expfac/(magrprimep**2*c**3)*(w**2*tile(rprime_cross_p[1],(nf,1)).T)
Bz = expfac/(magrprimep**2*c**3)*(w**2*tile(rprime_cross_p[2],(nf,1)).T)
E = vstack((Ex,Ey,Ez))
B = vstack((Bx,By,Bz))
else:
magrprime = sqrt(sum((rprime)**2,axis=1)) # |r'|
magrprimep = tile(magrprime, (len(f),1)).T
phip = tile(phi, (len(f),1))
fp = tile(f,(len(magrprime),1))
w = 2*pi*fp # \omega
k = w/c # wave number
krp = k*magrprimep # k|r'|
rprime_cross_p = cross(rprime, p) # r'x p
rp_c_p_c_rp = cross(rprime_cross_p, rprime) # (r' x p) x r'
expfac = exp(1j*(w*t-krp+phip.T))/(4*pi*eps0*epsr)
Ex = (w**2/(c**2*magrprimep**3) * expfac)* (tile(rp_c_p_c_rp[:,0],(nf,1))).T
Ey = (w**2/(c**2*magrprimep**3) * expfac)* (tile(rp_c_p_c_rp[:,1],(nf,1))).T
Ez = (w**2/(c**2*magrprimep**3) * expfac)* (tile(rp_c_p_c_rp[:,2],(nf,1))).T
Bx = expfac/(magrprimep**2*c**3)*(w**2*tile(rprime_cross_p[:,0],(nf,1)).T)
By = expfac/(magrprimep**2*c**3)*(w**2*tile(rprime_cross_p[:,1],(nf,1)).T)
Bz = expfac/(magrprimep**2*c**3)*(w**2*tile(rprime_cross_p[:,2],(nf,1)).T)
E = vstack((sum(Ex,axis=0),sum(Ey,axis=0),sum(Ez,axis=0)))
B = vstack((sum(Bx,axis=0),sum(By,axis=0),sum(Bz,axis=0)))
return E,B
#observation points on a circle
radius=1
nphi=360
phi=linspace(2*pi/nphi,2*pi,nphi)
x=radius*cos(phi)
y=radius*sin(phi)
z=0*ones(len(phi))
f0=1e6
f1=10e9
nf=30
freq=10**(linspace(log10(f0),log10(f1),nf))
#random dipoles that radiates plane waves from their distance...
n_dip=100 #number of Herztian dipoles
#dipole positions
distance=1000
phi_posdip=2*pi*random(n_dip)
th_posdip=arccos(2*random(n_dip)-1)
R=(array([distance*sin(th_posdip)*cos(phi_posdip),distance*sin(th_posdip)*sin(phi_posdip),distance*cos(th_posdip)])).T
#dipole moments
pmax=1e-7 #maximum dipole moment p
r_dip=pmax*random(n_dip)
phi_dip=2*pi*random(n_dip)
th_dip=arccos(2*random(n_dip)-1)
p=(array([r_dip*sin(th_dip)*cos(phi_dip),r_dip*sin(th_dip)*sin(phi_dip),r_dip*cos(th_dip)])).T
#dipole phases
phases_dip=2*pi*random(n_dip)
P=zeros((nphi,nf))
print("Computing the power received along the circle...")
for i in range(nphi):
r=(array([x[i],y[i],z[i]])).T
E,B=Hertz_dipole_ff (r, p, R, phases_dip, freq, t=0, epsr=1.)
P[i,:]=sqrt(sum((0.5*abs(cross(E.T,(B.T))))**2,axis=1))
#P[i,j,:]=.5*sum(abs(E)**2,axis=0) # gives the average value of the power over a period, if you want the radiated power at time t=0, please consider using 0.5*sum(real(E)**2,axis=0)
print('%2.1f/100'%((i+1)/nphi*100))
ent=zeros(nf)
for i in range(nf):
ent[i]=entropy(P[:,i]/P[:,i].mean(),10)
semilogx(freq,ent,'.')
grid()
xlabel('$f$')
ylabel("Entropy")
#show()
#savefig("Entropy.pdf",bbox="tight") |
994,007 | 1d0895c81a0a865fc10323af07b58704afaadbc1 | class Solution(object):
def isPalindrome(self, x):
"""
:type x: int
:rtype: bool
"""
s = str(x)
l, r = 0, len(s)-1
while l < r:
if s[l] != s[r]:
return False
l += 1
r -= 1
return True |
994,008 | 7ae569675306430771f3b4b6813e04cfdeeb882e | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
import click
import logging
from .grafana_dashboard import (
convert_dashboards,
info_dashboards,
test_dashboards,
metrics_dashboards,
metrics_all,
)
from .prometheus_rule import convert_rules, metrics_rules, info_rules
from .utils import is_debug_active
logging.basicConfig(
format="%(asctime)s [%(levelname)-5.5s] %(message)s",
level=logging.INFO,
handlers=[logging.StreamHandler()],
)
def set_logger():
debug = is_debug_active()
if debug:
logging.getLogger().setLevel(logging.DEBUG)
logging.info("Setting logging to DEBUG level...")
else:
logging.getLogger().setLevel(logging.INFO)
logging.info("Setting logging to INFO level...")
@click.command()
@click.option(
"--source-path",
default="./source",
help="Path to search for the source JSON dashboards.",
)
@click.option(
"--build-path",
default="",
help="Path to save converted JSONNET dashboards, none to print to console.",
)
@click.option(
"--format",
default="grafonnet",
help="Format of the dashboard: `grafonnet` or `grafana-builder`.",
)
@click.option(
"--layout",
default="rows",
help="Format of the dashboard: `normal` (scheme 14) , `grid` (scheme 16).",
)
def dashboard_convert(source_path, build_path, format, layout):
"""Convert JSON dashboards to JSONNET format."""
logging.info(
"Searching path `{}` for JSON dashboards to convert ...".format(source_path)
)
set_logger()
convert_dashboards(source_path, build_path, format, layout)
@click.command()
@click.option(
"--path", default="./data", help="Path to search for the source JSON dashboards."
)
@click.option(
"--scheme",
default="16",
help="Scheme version of the dashboard: `16` is the current.",
)
@click.option(
"--layout",
default="rows",
help="Format of the dashboard: `normal` (scheme 14) , `grid` (scheme 16).",
)
def dashboard_test(path, scheme, layout):
"""Test JSONNET formatted dashboards."""
logging.info("Searching path `{}` for JSON dashboards...".format(path))
set_logger()
test_dashboards(path)
@click.command()
@click.option(
"--path", default="./data", help="Path to search for the source JSON dashboards."
)
def dashboard_info(path):
"""Get info from Grafana JSON dashboards."""
logging.info(
"Searching path `{}` for JSON dashboards...".format(path)
)
print(info_dashboards(path))
@click.command()
@click.option(
"--path", default="./data", help="Path to search for the JSON dashboards."
)
def dashboard_metrics(path):
"""Get metric names from Grafana JSON dashboard targets."""
logging.info("Searching path `{}` for JSON dashboards...".format(path))
set_logger()
metrics_dashboards(path)
@click.command()
@click.option(
"--dashboard-path",
default="./data/grafana",
help="Path to search for the Grafana JSON dashboards.",
)
@click.option(
"--rules-path",
default="./data/prometheus",
help="Path to search for the Prometheus YAML rules.",
)
@click.option("--output", default="console", help="Type of output [console/json]")
def all_metrics(dashboard_path, rules_path, output):
"""Get metric names from Grafana JSON dashboard targets and Prometheus rules."""
logging.info(
"Searching path `{}` for JSON dashboards for metrics ...".format(dashboard_path)
)
logging.info(
"Searching path `{}` for YAML rules for metrics ...".format(rules_path)
)
set_logger()
metrics_all(dashboard_path, rules_path, output)
@click.command()
@click.option(
"--path", default="./data", help="Path to search for the YAML rule definions."
)
def rule_info(path):
"""Get detailed info from Prometheus rule targets."""
logging.info(
"Searching path `{}` for YAML rule definitions for detailed info ...".format(
path
)
)
set_logger()
info_rules(path)
@click.command()
@click.option(
"--path", default="./data", help="Path to search for the YAML rule definions."
)
def rule_metrics(path):
"""Get metric names from Prometheus rule targets."""
logging.info(
"Searching path `{}` for YAML rule definitions for metrics ...".format(path)
)
set_logger()
metrics_rules(path)
@click.command()
@click.option(
"--source-path",
default="./source",
help="Path to search for the source YAML rule files.",
)
@click.option(
"--build-path",
default="",
help="Path to save converted JSONNET rules, none to print to console.",
)
def rule_convert(source_path, build_path):
"""Convert Prometheus rule definitions to JSONNET format."""
logging.info(
"Searching path `{}` for YAML rule definitions to convert ...".format(
source_path
)
)
set_logger()
convert_rules(source_path, build_path)
|
994,009 | b2130579faa8779509b3dccb2b535c3be30e4c3d | from base_model import cpu_count,cpu_person |
994,010 | cf6d8a95fc85904407e721b06f02a621e664dbc8 | from temperatures import data_colector
from temperatures import data_adjuster
import numpy as np
import pandas as pd
import seaborn as sns
from tkinter import *
import matplotlib.pyplot as plt
from matplotlib.backends.backend_tkagg import (FigureCanvasTkAgg,NavigationToolbar2Tk)
from matplotlib.backend_bases import key_press_handler
from matplotlib.figure import Figure
import datetime
#data_colector()
data,norma = data_adjuster()
class mainWindow(Frame):
def __init__(self,master=None):
Frame.__init__(self,master)
self.label = Label(master,text = 'Graf namerana teplota vs. internetova teplota',font='Verdana 10 bold')
self.label.place(x=10,y=10)
self.window_setting()
self.button()
self.labels()
self.frames()
def window_setting(self):
self.master.title("Weather station") # Nazov okna
self.master.geometry("{}x{}".format(360, 220)) # Rozmer okna
self.pack(fill=BOTH, expand=1) # Zapisanie nastaveni
def button(self):
butt1 = Button(self, text="5 hodin!", command= self.create_figure1)
butt1.place(x=20, y=40)
butt2 = Button(self, text="10 hodin!", command= self.create_figure2)
butt2.place(x=150, y=40)
butt3 = Button(self, text="15 hodin!", command= self.create_figure3)
butt3.place(x=280, y=40)
butt4 = Button(self, text="Teplota vs. Predpovede (1-5 dni)", command=self.create_figure_real_for)
butt4.place(x=80, y=80)
def labels(self):
self.label1 = Label(self, text='Predpoved o 1 den: ', font='Verdana 10 bold')
self.label2 = Label(self, text='Predpoved o 2 den: ', font='Verdana 10 bold')
self.label3 = Label(self, text='Predpoved o 3 den: ', font='Verdana 10 bold')
self.label4 = Label(self, text='Predpoved o 4 den: ', font='Verdana 10 bold')
self.label1.place(y=120, x=20)
self.label2.place(y=140, x=20)
self.label3.place(y=160, x=20)
self.label4.place(y=180, x=20)
def frames(self):
text1 = data_plotter(norma,1,1).future_forecast()
text2 = data_plotter(norma,2, 2).future_forecast()
text3 = data_plotter(norma,2, 3).future_forecast()
text4 = data_plotter(norma,2, 4).future_forecast()
self.hodnota1 = Label(self, text=str(text1) + "\N{DEGREE SIGN}C", font='Verdana 10')
self.hodnota2 = Label(self, text=str(text2) + "\N{DEGREE SIGN}C", font='Verdana 10')
self.hodnota3 = Label(self, text=str(text3) + "\N{DEGREE SIGN}C", font='Verdana 10')
self.hodnota4 = Label(self, text=str(text4) + "\N{DEGREE SIGN}C", font='Verdana 10')
position_x = 200
self.hodnota1.place(y=120, x=position_x)
self.hodnota2.place(y=140, x=position_x)
self.hodnota3.place(y=160, x=position_x)
self.hodnota4.place(y=180, x=position_x)
def create_figure1(self):
self.figure1 = data_plotter(norma,5,5)
self.figure1.plot_net_real()
def create_figure2(self):
self.figure1 = data_plotter(norma,10,10)
self.figure1.plot_net_real()
def create_figure3(self):
self.figure1 = data_plotter(norma,20,20)
self.figure1.plot_net_real()
def create_figure_real_for(self):
self.figure2 = data_plotter(norma,20,20)
self.figure2.real_forecast()
class data_plotter():
def __init__(self,norma,mod,forecast):
self.n = norma
self.mod = mod
self.forecast = forecast
def plot_net_real(self):
current_time = datetime.datetime.now()
wanted_time = current_time - datetime.timedelta(hours=self.mod)
#g1df = self.d.filter(['Cas','Namerana teplota','Aktualna teplota'])
g1df = self.n.reset_index()
g1df = g1df[g1df['index'] > wanted_time]
sns.set_theme()
sns.lineplot(x='index',y='Namerana teplota',data=g1df)
sns.lineplot(x='index',y='Aktualna teplota',data=g1df)
plt.xlabel('Cas')
plt.ylabel('Teplota C\N{DEGREE SIGN}')
plt.show()
def real_forecast(self):
current_time = datetime.datetime.now()
wanted_time = current_time - datetime.timedelta(hours=10)
sns.set_theme()
g2df = self.n
g2df = g2df.drop(['Aktualna teplota'], axis=1)
g2df = g2df.fillna(value=np.nan)
g2df = g2df.loc[wanted_time:current_time]
sns.set_theme()
sns.lineplot(data=g2df)
plt.xlabel('Cas')
plt.ylabel('Teplota C\N{DEGREE SIGN}')
plt.show()
def future_forecast(self):
time = datetime.datetime.now() + datetime.timedelta(days=self.forecast)
time = time - datetime.timedelta(minutes=time.minute % 10,
seconds=time.second,
microseconds=time.microsecond)
y_pozicia = str(self.forecast) + '_day_forecast'
data = self.n.loc[time, [y_pozicia]].item()
return data
root = Tk()
win = mainWindow(root)
root.mainloop()
#
#
# data = data_adjuster()
# figure = data_plotter(data)
#
# figure.plot_net_real()
|
994,011 | 43dac023d0bf6d1747414002a69bba3a776680d0 | from django.contrib import admin
from django.contrib.admin import DateFieldListFilter
from mptt.admin import DraggableMPTTAdmin, TreeRelatedFieldListFilter
from .models import (
Card,
Category,
Comment,
DelieverChoice,
PaymentChoice,
ProductBrand,
ProductCountry,
Rate,
ShopComment,
ShopRate,
Shop,
OrderItem,
Address,
Coupon,
Order,
Characteristic,
Schedule,
CardCharacteristic,
MainSlider,
)
@admin.register(MainSlider)
class ScheduleAdmin(admin.ModelAdmin):
list_display = 'pk', 'slider_image'
list_display_links = 'pk',
@admin.register(Schedule)
class ScheduleAdmin(admin.ModelAdmin):
list_display = 'day', 'work_time',
list_display_links = 'day',
search_fields = 'day',
@admin.register(CardCharacteristic)
class CardCharacteristicAdmin(admin.ModelAdmin):
list_display = 'pk', 'characteristic', 'value',
list_display_links = 'pk',
@admin.register(Shop)
class ShopAdmin(admin.ModelAdmin):
list_display = 'owner', 'name', 'admin_preview'
list_display_links = 'name',
filter_horizontal = 'schedule',
search_fields = 'owner__username', 'owner__fio'
@admin.register(ProductBrand, ProductCountry)
class ProductParamAmin(admin.ModelAdmin):
list_display = 'title',
list_display_links = 'title',
search_fields = 'title',
@admin.register(Comment, ShopComment)
class CommentAdmin(admin.ModelAdmin):
list_display = (
'user',
'comment',
'datetime',
)
search_fields = ('user__username', 'user__fio',)
list_filter = (
('datetime', DateFieldListFilter),
)
@admin.register(Characteristic)
class CharacteristicAdmin(admin.ModelAdmin):
list_display = (
'pk',
'category',
'name',
)
list_display_links = 'name',
search_fields = ('name', )
list_filter = (
'category',
)
@admin.register(Rate, ShopRate)
class RateAdmin(admin.ModelAdmin):
list_display = (
'user',
'rate',
)
search_fields = ('user__username', 'user__fio',)
@admin.register(Category)
class CategoryAdmin(DraggableMPTTAdmin):
list_display = ('tree_actions', 'name', 'display', 'admin_preview')
list_display_links = ('name',)
filter_fields = ('display')
search_fields = ('name',)
list_filter = (
('parent', TreeRelatedFieldListFilter),
)
@admin.register(Card)
class CardAdmin(admin.ModelAdmin):
list_display = (
'pk',
'title',
'admin_preview',
'seller',
'present',
'price'
)
list_display_links = 'title',
search_fields = ('seller__username', 'seller__fio',)
list_filter = (
('category', TreeRelatedFieldListFilter),
'payment_methods',
'deliver_methods'
)
filter_horizontal = (
'attachments',
'payment_methods',
'deliver_methods',
'characteristics',
)
@admin.register(PaymentChoice, DelieverChoice)
class InfoAdmin(admin.ModelAdmin):
list_display = 'name',
list_display_links = 'name',
search_fields = 'name',
@admin.register(Order)
class OrderAdmin(admin.ModelAdmin):
list_display = [
'ordered',
'being_delivered',
'received',
'refund_requested',
'refund_granted',
'address',
'coupon',
]
list_display_links = [
'address',
'coupon'
]
list_filter = ['ordered',
'being_delivered',
'received',
'refund_requested',
'refund_granted']
search_fields = [
'ref_code'
]
@admin.register(Address)
class AddressAdmin(admin.ModelAdmin):
list_display = [
'street_address',
'apartment_address',
]
search_fields = ['street_address', 'apartment_address']
admin.site.register(OrderItem)
admin.site.register(Coupon)
|
994,012 | 7989bf060e32dc6596fe00f3c39f88600c75ef7f | F_n = {0:0 ,1:1} #declaring fibbonacci for value n = 0 & 1
def fibbonacci(n):
if n in F_n.keys():
return F_n[n]
else:
F_n[n] = fibbonacci(n-1) + fibbonacci(n-2)
return F_n[n]
n = int(input())
print(fibbonacci(n))
|
994,013 | 281bf2d213e367b46119ebdba32dfe24e18958b1 | from .OdbMeshNode import OdbMeshNode
from .OdbSet import OdbSet
class OdbPretensionSection:
"""The pretension section object is used to define an assembly load. It associates a
pretension node with a pretension section.
Attributes
----------
node: OdbSet
An :py:class:`~abaqus.Odb.OdbSet.OdbSet` object specifying the node set containing the pretension node.
element: OdbSet
An :py:class:`~abaqus.Odb.OdbSet.OdbSet` object specifying the element set that defines the pretension section.
surface: OdbSet
An :py:class:`~abaqus.Odb.OdbSet.OdbSet` object specifying the surface set that defines the pretension section.
normal: float
A tuple of Floats specifying the components of the normal to the pretension section.
Notes
-----
This object can be accessed by:
.. code-block:: python
import odbAccess
session.odbs[name].rootAssembly.pretensionSections[i]
"""
# An OdbSet object specifying the node set containing the pretension node.
node: OdbSet = OdbSet("set", tuple[OdbMeshNode]())
# An OdbSet object specifying the element set that defines the pretension section.
element: OdbSet = OdbSet("set", tuple[OdbMeshNode]())
# An OdbSet object specifying the surface set that defines the pretension section.
surface: OdbSet = OdbSet("set", tuple[OdbMeshNode]())
# A tuple of Floats specifying the components of the normal to the pretension section.
normal: float = None
|
994,014 | 53352bd2c96253f93b9e06f01b159b883db9345d | '''
ๆฌ็ฏๅฏ็ดๆฅๅจ rq ็ notebook ่ฟ่ก๏ผๆ ้ๅๆต
ๅๆ่ฟ 80 ไธชไบคๆๆฅ็ ๆฒชๆทฑไธปๅ่ฟ็ปญๆถ็็นไฝๆถ็็
ๅๅซๅไธบ [1, 2, 4, 8, 16] ็ป
ๅ็ฌ็ปๅพ
'''
close_data = get_price('IF88', frequency='1d', start_date='2016-01-01', end_date='2017-06-30')['close']
print('len(close_data): ', len(close_data))
revenue_close_data = 100 * close_data.pct_change().dropna()
print('len(revenue_close_data): ', len(revenue_close_data))
Hurst = []
def test_calculate_hurst():
# ๅญๆพๆๆ H ๅผ
global Hurst
# ่ฎก็ฎๆ่ฟ 80 ไธชไบคๆๆฅ็ๆถ็็
return_close = piece_revenue_close_data
# ๅๅปบไธไธช็ฉบ list ็จไบๅญๆพ ars_data
list_ars_data = []
# ๅฏนๆฏไธไธชๅฏนๅบ็ list_num๏ผ้ฝ่ฎก็ฎไปฅไธๆต็จ
for obj_list_num in list_num:
#print('obj_list_num: ', obj_list_num)
# ่ฎพ็ฝฎไธไธช็ฉบ list ็จไบๅญๅจๆๅ็ rs_data
list_rs_data = []
# ๅฐ 80 ไธชไบคๆๆฅๆๆฐๆถ็็นไฝๆ็ปๆฐๅๅฒ
for i in range(0, obj_list_num):
# ๆฏ็ปๅฏนๅบ็้ฟๅบฆ
list_size = int(80 / obj_list_num)
# ๆฏ็ป่ตทๅง
start = -(list_size * (i + 1)) -1
# ๆฏ็ป็ปๆ
end = -(list_size * i) - 1
# ๅๅบ็ๆฎต
data = return_close[start: end]
#print('data: ', data)
# ่ฎก็ฎๆฏไธช็ๆฎต็ๅๅผ(mean)
mean_data = np.mean(data)
#print('mean_data: ', mean_data)
# ้ๅฏนๆฏไธช็ๆฎต่ฎก็ฎ็ฆปๅทฎๅบๅ(deviation)
dev_data = data - mean_data
#print('dev_data: ', dev_data)
# ่ฎก็ฎ็ฆปๅทฎๅบๅ็ cumsum
cumsum_dev_data = dev_data.cumsum()
# ่ฎก็ฎๆฏไธช็ฆปๅทฎๅบๅ็ cumsum ็ๆๅคงๅทฎ่ท(widest difference)
diff_data = max(cumsum_dev_data) - min(cumsum_dev_data)
#print('diff_data: ', diff_data)
# ่ฎก็ฎๆฏไธช็ๆฎต็ๆ ๅๅทฎ(standard deviation)
std_data = np.std(data)
#print('std_data: ', std_data)
# ่ฎก็ฎๆฏไธช็ๆฎต็ R/S ๅผ
rs_data = np.array(diff_data) / std_data
#print('rs_data: ', rs_data)
# ๅฐ R/S ๅผๅ ๅ
ฅ list_rs_data ็จไบไฟๅญ
list_rs_data.append(rs_data)
#print(list_rs_data)
#print(len(list_rs_data))
#print(list_rs_data)
# ๅฐๅไธช็ๆฎต็ R/S ๅผๆฑๅนณๅๅพๅฐ Average R/S(ARS)
ars_data = np.mean(list_rs_data)
#print('ars_data: ', ars_data)
# ๅฐ ars_data ๅ ๅ
ฅ list_ars_data
list_ars_data.append(ars_data)
#print(list_ars_data)
# ARS ๅฏน 10 ๅๅฏนๆฐ
lg_list_ars_data = np.log10(list_ars_data)
#print('lg_list_ars_data: ', lg_list_ars_data)
# ๅฏน็ๆฎตๅคงๅฐ๏ผsize๏ผๅฏน 10 ๅๅฏนๆฐ
lg_list_size = np.log10([80, 40, 20, 10, 5])
lg_list_size = [[i] for i in lg_list_size]
#print('lg_list_size: ', lg_list_size)
# lg_list_ars_data ๅฏน lg_list_num ๅๅฝ๏ผๆ็ๆฏ Hurst ๆๆฐ
regr = linear_model.LinearRegression()
regr.fit(lg_list_size, lg_list_ars_data)
#HURSTๆๆฐ
beta = regr.coef_
Hurst.append(float(beta))
return beta, Hurst
def finish_hurst():
global piece_revenue_close_data
global list_num
list_num = [1, 2, 4, 8, 16]
for i in range(0, len(revenue_close_data) - 80):
start_num = i
end_num = (i + 1) + 80
piece_revenue_close_data = revenue_close_data[start_num: end_num]
#print('start: ', i)
test_calculate_hurst()
import numpy as np
from sklearn import linear_model
finish_hurst()
import matplotlib.pyplot as plt
import talib
Hurst_ema_3 = talib.EMA(np.array(Hurst), 3)
Hurst_ema_20 = talib.EMA(np.array(Hurst), 20)
Revenue_close_data_ema_3 = talib.EMA(revenue_close_data[80:].values, 3)
Revenue_close_data_ema_20 = talib.EMA(revenue_close_data[80:].values, 20)
Close_data_ema_3 = talib.EMA(close_data[81:].values, 3)
Close_data_ema_20 = talib.EMA(close_data[81:].values, 20)
fig = plt.figure(figsize=(22, 8))
ax1 = fig.add_subplot(2, 2, 3)
ax2 = fig.add_subplot(2, 2, 2)
ax3 = fig.add_subplot(2, 2, 1)
plt.subplots_adjust(hspace=0.5)
ax1.plot(Hurst_ema_3, label='Hurst_ema_3', alpha=0.9)
ax1.plot(Hurst_ema_20, label='Hurst_ema_20', alpha=0.9)
ax1.set_title('Hurst Exponent')
ax1.set_xlabel('trading date, starting from 2016-05-06')
ax1.set_ylabel('value')
ax1.legend(loc='best')
ax2.plot(Revenue_close_data_ema_3, label='Revenue_close_data_ema_3', alpha=0.9)
ax2.plot(Revenue_close_data_ema_20, label='Revenue_close_data_ema_20', alpha=0.9)
ax2.set_title('Revenue')
ax2.set_xlabel('trading date, starting from 2016-05-06')
ax2.set_ylabel('value(%)')
ax2.legend(loc='best')
ax3.plot(Close_data_ema_3, label='Close_data_ema_3', alpha=0.9)
ax3.plot(Close_data_ema_20, label='Close_data_ema_20', alpha=0.9)
ax3.set_title('Close')
ax3.set_xlabel('trading date, starting from 2016-05-06')
ax3.set_ylabel('value')
ax3.legend(loc='best')
plt.grid(True)
plt.show()
|
994,015 | bf458bafa6e152944f2e8904aab279ab6ea58000 | import sys
sys.stdin = open("input.txt")
import math
class Tree:
def __init__(self, M, N):
self.tree = [0] * (M + 1)
self.N = N
for _ in range(N):
i, v = map(int, input().split())
self.tree[i] = v
def calculate(self, index):
while index != 1:
parent_index = math.floor(index / 2)
self.tree[parent_index] += self.tree[index]
index -= 1
def getValue(self, L):
return self.tree[L]
T = int(input())
for t in range(1, T + 1):
M, N, L = map(int, input().split())
tree = Tree(M, N)
tree.calculate(M)
print(f"#{t} {tree.getValue(L)}") |
994,016 | e3de185be1bcd10e5aeb9e8aee8261a8dcc020e7 | import urllib.request;
import random;
file_garena='https://docs.google.com/spreadsheets/d/1Nk-nFm34WZffVyv0LRvgxAl68QkF9l8rMEPAmAHXGRs/edit?usp=sharing'
def dowloadfile(url):
response = urllib.request.urlopen(url);
csv = response.read();
csv_str = str(csv);
lines =csv_str.split("\\n");
dest_url = r'goog.csv'
fx = open(dest_url,'w');
for line in lines:
fx.write(line +"\n")
fx.close();
dowloadfile(file_garena) |
994,017 | 59a2894dd5fb635ae33d1ae6c0f0d8721df873a2 | import os
import os.path
import sys
import requests
cached_extensions = [
"bmp",
"ejs",
"jpeg",
"pdf",
"ps",
"ttf",
"class",
"eot",
"jpg",
"pict",
"svg",
"webp",
"css",
"eps",
"js",
"pls",
"svgz",
"woff",
"csv",
"gif",
"mid",
"png",
"swf",
"woff2",
"doc",
"ico",
"midi",
"ppt",
"tif",
"xls",
"docx",
"jar",
"otf",
"pptx",
"tiff",
"xlsx",
]
headers = {
"Authorization": "Bearer " + os.environ["CLOUDFLARE_API_KEY"],
"Content-Type": "application/json",
}
endpoint = (
"https://api.cloudflare.com/client/v4/zones/"
+ os.environ["CLOUDFLARE_ZONE_ID"]
+ "/purge_cache"
)
payloads = []
current_file_list = []
total_files = 0
for line in sys.stdin:
line = line.strip()
if os.path.splitext(line)[1][1:] not in cached_extensions or not os.path.isfile(line):
continue
print("Updated: "+line)
line = line.replace(".public/", "")
if line[0] == "/":
line = line[1:]
current_file_list.append("https://spnati.net/" + line)
if len(current_file_list) == 30:
payloads.append({"files": current_file_list.copy()})
current_file_list = []
total_files += 1
if len(current_file_list) > 0:
payloads.append({"files": current_file_list.copy()})
request_success = 0
request_total = 0
files_purged = 0
for i, payload in enumerate(payloads):
r = requests.post(endpoint, json=payload, headers=headers)
resp = r.json()
request_total += 1
if r.status_code == 200:
request_success += 1
files_purged += len(payload["files"])
for err in resp["errors"]:
print(
"! Error during request #{}: Code {} - {}".format(
i + 1, err["code"], err["message"]
)
)
if request_total > 0:
print("{:d} / {:d} requests succeeded.".format(request_success, request_total))
print(
"Successfully purged {:d} / {:d} ({:.0%}) files from Cloudflare cache.".format(
files_purged, total_files, files_purged / total_files
)
)
|
994,018 | dad29a1d9354272cfec14b75b67fec8bf65fa771 | from functools import wraps
from django.conf import settings
try:
from google.appengine.api import taskqueue
except ImportError:
taskqueue = None
_webhook_url = '/notifications/notify/'
def do_maybe_notification(func):
"""Wrap a method that returns a serialized list of splits and send updates
to the notification task queue.
"""
@wraps(func)
def send_notification(*args, **kwargs):
resp = func(*args, **kwargs)
if settings.ENABLE_NOTIFICATIONS and taskqueue is not None:
data = resp.data if isinstance(resp.data, list) else [resp.data]
for split in data:
taskqueue.add(url=_webhook_url, params={'split': split['id']})
return resp
return send_notification
|
994,019 | ae50a585bfbe482e08bfe8d6e986e9af012a9a71 | import numpy as np
import loadFittingDataP2 as load_data
import matplotlib.pyplot as plt
import q1,q2,q3
def lsq_error(f_params,w):
X = f_params[0]
y = f_params[1]
M = f_params[2]
phi_X = polynomial_basis(X, M)
error = np.sum((y - np.dot(phi_X,np.transpose(w)))**2)
# e = (y - np.dot(phi_X,np.transpose(w)))
# error = np.inner(e,e)
return error
def sum_of_squares_error(f_params,w,ii=None):
X = f_params[0]
y = f_params[1]
M = f_params[2]
phi_X = polynomial_basis(X, M)
error = lsq_error(f_params,w)
dx = 0.001
# if ii is None:
# grad1 = -2*np.dot(phi_X.T, y) + 2*np.dot(np.dot(phi_X.T, phi_X), w)
# else:
# random_x = X[ii]
# random_y = y[ii]
# phi_random_x = polynomial_basis(random_x,M)
# print phi_random_x, random_y
# grad1 = -2*np.dot(phi_random_x, random_y)
# print np.shape(grad1)
# grad2 = 2*np.dot(np.dot(phi_random_x.T, phi_random_x),w.T)
# print np.shape(grad1)
# grad = grad1+grad2
grad = -2*np.dot(phi_X.T, y) + 2*np.dot(np.dot(phi_X.T, phi_X), w)
grad2 = q2.gradient_approximation(lsq_error, f_params, w, dx)
norm = np.linalg.norm(grad-grad2)
return error, grad
def polynomial_basis(X, M):
if np.ndim(X) == 0:
N = 1
else:
N = np.shape(X)[0]
phi_X = np.empty([N, M+1])
for i in range(M+1):
phi_X[:,i] = X**i
return phi_X
def cosine_basis(X, M):
phi_X = np.empty([np.shape(X)[0], M+1])
phi_X[:,0] = np.ones([np.shape(X)[0]])
for i in range(1,M+1):
phi_X[:,i] = np.cos(np.pi*X*i)
return phi_X
def max_likelihood(X, y, M, f_basis, regularizer=0):
phi_X = f_basis(X, M)
w = np.dot(np.dot(np.linalg.inv(regularizer*np.eye(M+1,M+1)+np.dot(np.transpose(phi_X),phi_X)),np.transpose(phi_X)),y)
return w
def main():
X, y = load_data.getData()
# # Batch GD
# f = sum_of_squares_error
# M = 2
# # phi_X = polynomial_basis(X,M)
# f_params = [X, y, M]
# learning_rate = 0.01
# convergence_limit = 1e-3
# initial_guess = np.array([5, -5, 5])
# # initial_guess = np.zeros([M+1])
# max_iters = 100
# x, fx, dx, num_iters = q1.gradient_descent(f,f_params,learning_rate,convergence_limit,initial_guess,max_iters)
# w_bgd = x[-1]
# print x
# # SGD
# f = sum_of_squares_error
# M = 2
# f_params = [X, y, M]
# learning_rate = 0.01
# convergence_limit = 1e-8
# initial_guess = np.zeros([M+1])
# # guesses = [2,-10,10]
# # for ii in range(min(M,3)):
# # initial_guess[ii] = guesses[ii]
# max_iters = 10000
# t0 = 1e8
# k = 0.6
# w_sgds, fx, num_iters = q3.sgd(f,f_params,convergence_limit,initial_guess,max_iters,t0,k)
# print fx, num_iters
# w_sgd = w_sgds[-1]
# print w_sgd
# # y_sgd = np.dot(w_sgd, np.transpose(polynomial_plot_X))
# errors = []
# for xx in dx:
# _,_,n = sum_of_squares_error(f_params,xx,0.01)
# errors.append(n)
# print errors
# print "max error:", max(errors)
# print "min error:", min(errors)
# print "mean error:", np.mean(errors)
# print "num errors:", len(errors)
# # sse_bgd = lsq_error(f_params,x[-1])
# # print "BGD SSE:", sse_bgd
# Plot different model complexities
plot_X = np.linspace(0,1,100)
true_y = np.cos(np.pi*plot_X)+np.cos(2*np.pi*plot_X)
# # M = 2
# # cosine_plot_X = cosine_basis(plot_X, M)
# # M = 4
# polynomial_plot_X = polynomial_basis(plot_X, M)
# y_bgd = np.dot(w_bgd, np.transpose(polynomial_plot_X))
# fig = plt.figure()
# print polynomial_plot_X
# plt.plot(plot_X,y_bgd,'r-')
# plt.plot(plot_X,true_y,'g-')
# plt.show()
# M = 2
# f_basis = polynomial_basis
# w_polynomial = max_likelihood(X, y, M, f_basis)
# sse_polynomial = lsq_error(f_params,w_polynomial)
# # print "Polynomial SSE:", sse_polynomial
# # print "w_poly:", w_polynomial
# # M = 2
# # f_basis = cosine_basis
# # w_cosine = max_likelihood(X, y, M, f_basis, regularizer=0)
# # print w_cosine
# # y_polynomial = np.dot(w_polynomial, np.transpose(polynomial_plot_X))
# # y_cosine = np.dot(w_cosine, np.transpose(cosine_plot_X))
fig = plt.figure()
# for i, M in enumerate([0,1,2,10]):
for i, M in enumerate([1,2,4,8]):
cosine_plot_X = cosine_basis(plot_X, M)
polynomial_plot_X = polynomial_basis(plot_X, M)
f_basis = polynomial_basis
w_polynomial = max_likelihood(X, y, M, f_basis)
y_polynomial = np.dot(w_polynomial, np.transpose(polynomial_plot_X))
f_basis = cosine_basis
w_cosine = max_likelihood(X, y, M, f_basis, regularizer=0)
y_cosine = np.dot(w_cosine, np.transpose(cosine_plot_X))
# Batch GD
f = sum_of_squares_error
f_params = [X, y, M]
learning_rate = 0.01
convergence_limit = 1e-3
initial_guess = np.zeros([M+1])
guesses = [2,-10,10]
for ii in range(min(M,3)):
initial_guess[ii] = guesses[ii]
max_iters = 100
w_bgds, fx, dx, num_iters = q1.gradient_descent(f,f_params,learning_rate,convergence_limit,initial_guess,max_iters)
w_bgd = w_bgds[-1]
y_bgd = np.dot(w_bgd, np.transpose(polynomial_plot_X))
# SGD
f = sum_of_squares_error
f_params = [X, y, M]
convergence_limit = 1e-8
initial_guess = np.zeros([M+1])
guesses = [2,-10,10]
for ii in range(min(M,3)):
initial_guess[ii] = guesses[ii]
max_iters = 10000
t0 = 1e8
k = 0.6
w_sgds, fx, num_iters = q3.sgd(f,f_params,convergence_limit,initial_guess,max_iters,t0,k)
w_sgd = w_sgds[-1]
y_sgd = np.dot(w_sgd, np.transpose(polynomial_plot_X))
p = plt.subplot(1,4,i+1)
plt.plot(X,y,'co', markersize=10,markerfacecolor='none')
plt.plot(plot_X, true_y, 'g-')
plt.plot(plot_X, y_polynomial, '-',color='orange')
plt.plot(plot_X, y_cosine, 'b-')
plt.plot(plot_X, y_bgd, 'm-')
plt.plot(plot_X, y_sgd, '-', color='black')
plt.xlabel('x')
plt.ylabel('y')
plt.title('Linear Regression (M=%i)'%M)
plt.legend(['Data','True','Polynomial','Cosine','GD','SGD'])
plt.show()
return
if __name__ == '__main__':
main() |
994,020 | 6be515864b89c0d5ca6db22e3618c354145183b8 | equationa = "(2 + 2) * 6 + 3"
equationa = equationa.replace(' ','')
#print(equationa)
operators = "+"
operators1 = "*/"
operators2 = "+"
while "(" in equationa:
start = equationa.find("(")
end = equationa.find(")")
equationb = (equationa[start:end+1])
#print(1)
#print(equationb)
# ''' if equationb[start + 1] in operators:
# for i in range(end)'''
if operators in equationb:
equationb.replace(" ","")
#print(2)
else:
break
while operators1 in equationb:
if "*" in equationb:
a = equationb.find("*")
c = float(equationb[a - 1]) * float(equationb[a + 1])
equationb[a-1] = str(c)
equationb[a] = ""
equationb[a + 1] = ""
if "/" in equationb:
a = equationb.find("/")
c = float(equationb[a - 1]) / float(equationb[a + 1])
equationb[a-1] = str(c)
equationb[a] = ""
equationb[a + 1] = ""
while operators2 in equationb:
if "-" in equationb:
a = equationb.find("-")
c = float(equationb[a - 1]) - float(equationb[a + 1])
equationb[a-1] = str(c)
equationb[a] = ""
equationb[a + 1] = ""
if "+" in equationb:
a = equationb.find("+")
c = int(equationb[a - 1]) + int(equationb[a + 1])
equationb = equationb.replace(equationb, str(c))
equationa = equationa.replace(equationa[start:end+1], equationb)
answer = equationa
print(answer)
|
994,021 | 34d07d3b8961ab9024dd95e4bb3cac0458e521c4 | #!/usr/bin/python3
"""Module lists all states that match name argument"""
if __name__ == "__main__":
import MySQLdb
from sys import argv
# default values user="root", passwd="", db="hbtn_0e_0_usa"
db = MySQLdb.connect(host="localhost",
port=3306,
user=argv[1],
passwd=argv[2],
db=argv[3])
cur = db.cursor()
# %s placeholder fills values more safely
state_lookup = str(argv[4])
SQLcommand = """SELECT * FROM states
WHERE states.name=%s
ORDER BY states.id ASC"""
cur.execute(SQLcommand, (state_lookup,))
for row in cur.fetchall():
print(row)
db.close()
|
994,022 | dbff87f7922bf38e5839d306e47966bbc2f2b9f6 | # python์์ ์ฌ์ฉํ๋ ๋ณ์๋ ๊ฐ์ฒด๋ฅผ ๊ฐ๋ฆฌํค๋ ๊ฒ์ด๋ผ๊ณ ๋ ๋งํ ์ ์๋ค.
a = [1,2,3]
# ์์ a๋ฆฌ์คํธ ์๋ฃํ์ [1,2,3]์ ๊ฐ์ ๊ฐ์ง๋ ๋ฆฌ์คํธ ์๋ฃํ์ด ์๋์ผ๋ก ๋ฉ๋ชจ๋ฆฌ์ ์์ฑ๋๊ณ
# ๋ณ์ a ๋ [1,2,3]๋ฆฌ์คํธ๊ฐ ์ ์ฅ๋ ๋ฉ๋ชจ๋ฆฌ์ ์ฃผ์๋ฅผ ๊ฐ๋ฆฌํค๊ฒ ๋๋ค.
# ๋ณ์ a๊ฐ ๊ฐ๋ฆฌํค๋ ์ฃผ์๊ฐ
print(id(a))
# ๋ฆฌ์คํธ๋ฅผ ๋ณต์ฌ ํ ๋
# a ๋ณ์๊ฐ ๊ฐ์ง๊ณ ์๋ ์ฃผ์๊ฐ์ ๋ณ์ b ์ ๋์
# ๋ ๋ณ์๊ฐ ์ฐธ์กฐํ๋ ๊ฐ์ฒด์ ์ฃผ์๊ฐ์ ์ผ์นํ๊ธฐ ๋๋ฌธ์ ์๋ก ๋ณ์์ ๊ฐ์ ์์ ์ญ์ ํ ์ ์๋ค.
a = [1,2,3]
b = a
a[1] = 4
print(b)
# :์ด์ฉํ๊ธฐ
# :๋ฅผ ์ด์ฉํ๋ ๊ฒฝ์ฐ a ๋ฆฌ์คํธ์ ๊ฐ์ ๋ฐ๊พธ๋๋ผ๋ b ๋ฆฌ์คํธ์๋ ์ํฅ์ ๋ผ์น์ง ์๋๋ค.
a = [1,2,3]
b = a[:]
a[1] = 4
print(b)
# copy ๋ชจ๋ ์ด์ฉ
# copy ํจ์๋ฅผ ์ฐ๊ธฐ ์ํด import ํด์ค
from copy import copy
# copyํจ์๋ฅผ ์ฌ์ฉํ๋ ๊ฒฝ์ฐ b = a[:]์ ๊ฐ์ด ๊ฐ์ ์ง์ ๋์
ํ๋ค.
b = copy(a)
# a ์ b๋ฅผ ๋น๊ตํด๋ ์ฃผ์๊ฐ์ด ๋ค๋ฅด๋ค๋ ๊ฒ์ ์์์๋ค.
print(a is b)
# ๋ณ์๋ฅผ ๋ง๋๋ ๋ฐฉ๋ฒ
# ํํ๋ก a,b์ ๊ฐ์ ๋์
a,b = ('python','life')
# python
print(a)
# life
print(b)
# ํํ์ ๊ดํธ ์๋ต์ด ๊ฐ๋ฅ
(a,b) = 'python','life'
# python
print(a)
# life
print(b)
# ๋ฆฌ์คํธ ๋ณ์ ๋ง๋ค๊ธฐ
[a,b] = ['python','life']
print(a)
print(b)
# ์ฌ๋ฌ๊ฐ์ ๋ณ์์ ๊ฐ์๊ฐ ๋์
ํ๊ธฐ
a = b = 'python'
print(a)
print(b)
# ๊ฐ ์์ ๋กญ๊ฒ ๋ฐ๊พธ๊ธฐ
a = 3
b = 5
print(a)
print(b)
a,b = b,a
print(a)
print(b) |
994,023 | c58885e1397ed34fdef82ba8b485aff6d014ba55 | ###########################
# Time Series Learning
# this code is train and test tsl model.
# select tsl model.
# train :
# train model and save variables of model.
# test :
# load model and evaluate.
#
# data : hospital admit patients
# input : previous EMR data
# output : predicted next EMR data
#
# by Donghoon Oh
###########################
import os
os.environ["CUDA_VISIBLE_DEVICES"]='0'
import numpy as np
import tensorflow as tf
import time
import datetime
from data_preprocessing import read_data, padding, split_data, feature_scaler
from utils import *
from model.tsl.tsl import TSL_model
# flags
from tensorflow.python.platform import flags
flags.DEFINE_string('mode', 'train', 'select train or test')
flags.DEFINE_string('model', 'A', 'select hospital model A, B, C')
flags.DEFINE_string('rnn_type', 'uni', 'select bi-directional or uni-directional')
flags.DEFINE_integer('num_layer', 3, 'set the layers of rnn')
flags.DEFINE_integer('batch_size', 256, 'set the batch size')
flags.DEFINE_integer('hidden_size', 512, 'set the hidden size of rnn cell')
flags.DEFINE_integer('epoch', 30, 'set the number of epochs')
flags.DEFINE_float('learning_rate', 0.001, 'set the learning rate')
flags.DEFINE_string('log_dir', '../log/tsl', 'set the log directory')
flags.DEFINE_float('keep_prob', 0.8, 'set the dropout')
FLAGS = flags.FLAGS
# set arguments
mode = FLAGS.mode
model = FLAGS.model
rnn_type = FLAGS.rnn_type
num_layer = FLAGS.num_layer
batch_size = FLAGS.batch_size
hidden_size = FLAGS.hidden_size
learning_rate = FLAGS.learning_rate
if mode == 'test':
epoch = 1
keep_prob = 1.0
else:
epoch = FLAGS.epoch
keep_prob = FLAGS.keep_prob
# set path of log directory
log_dir = os.path.join(FLAGS.log_dir, rnn_type)
save_dir = os.path.join(log_dir, model,'save')
result_dir = os.path.join(log_dir, model, 'result')
logging_dir = os.path.join(log_dir, model, 'logging')
check_path_exists([log_dir, save_dir, result_dir, logging_dir])
checkpoint_path = os.path.join(save_dir, 'tsl.ckpt')
logfile = os.path.join(logging_dir, str(datetime.datetime.strftime(datetime.datetime.now(),
'%Y-%m-%d_%H:%M:%S') + '_'+mode+'_tsl_'+model+'.txt').replace(' ', '').replace('/', ''))
# Train
class Runner(object):
# set configs
def _default_configs(self):
return {'mode' : mode,
'model' : model,
'rnn_type' : rnn_type,
'num_layer' : num_layer,
'batch_size': batch_size,
'hidden_size': hidden_size,
'epoch' : epoch,
'learning_rate': learning_rate,
'keep_prob' : keep_prob
}
def load_data(self):
# data preprocessing
'''
input : (batch, max_step, input_features)
target : (batch, target_features)
'''
# read the data set
input_set, target_set = read_data('fill')
# padding
pad_input_set, seq_len = padding(input_set)
print(pad_input_set[0])
# split data set for model
input_train, input_test, target_train, target_test, seq_train, seq_test = split_data(
pad_input_set, target_set, seq_len, model)
if mode == 'train':
return input_train, target_train, seq_train
elif mode == 'test':
return input_test, target_test, seq_test
# train
def train(self, args, model, input_train, target_train, seq_train):
with tf.Session(graph=model.graph, config=get_tf_config()) as sess:
# initialization
sess.run(model.initial_op)
for each_epoch in range(epoch):
start = time.time()
print('\n[Epoch :{}]\n'.format(each_epoch+1))
logging(model=model, logfile=logfile, each_epoch=each_epoch, mode='epoch')
# mini batch
batch_epoch = int(input_train.shape[0]/batch_size)
batch_loss = np.zeros(batch_epoch)
rmse_list = []
for b in range(batch_epoch):
batch_inputs, batch_targets, batch_seq_len = next_batch(
batch_size, [input_train, target_train, seq_train])
feed = {model.inputs:batch_inputs,
model.targets:batch_targets,
model.seq_len:batch_seq_len}
_, l, r, p = sess.run([model.optimizer,model.loss, model.rmse,
model.predict],
feed_dict=feed)
batch_loss[b] = l
batch_rmse = RMSE(p, batch_targets, 'RMSE')
r = mean_rmse(batch_rmse)
rmse_list.append(batch_rmse)
if b%10 == 0:
print('batch: {}/{}, loss={:.3f}, rmse={:.3f}'.format(b+1, batch_epoch, l,r))
logging(model, logfile, batch=b, batch_epoch=batch_epoch, loss=l, rmse=r, mode='batch')
loss = np.sum(batch_loss)/batch_epoch
rmse = np.asarray(rmse_list).mean(axis=0)
rmse_mean = mean_rmse(rmse)
delta_time = time.time()-start
print('\n==> Epoch: {}/{}, loss={:.4f}, rmse={:.4f}, epoch time : {}\n'\
.format(each_epoch+1, epoch, loss, rmse_mean, delta_time))
logging(model, logfile, each_epoch, epoch,
loss=loss, rmse=rmse_mean, delta_time=delta_time, mode='train')
# save model by epoch
model.saver.save(sess, checkpoint_path)
print('Prediction : ')
print(p[0:2])
print('target : ')
print(batch_targets[0:2])
def test(self, args, model, input_test, target_test, seq_test):
with tf.Session(graph=model.graph, config=get_tf_config()) as sess:
# initialization
sess.run(model.initial_op)
epoch = 1
# load check point
model.saver.restore(sess, checkpoint_path)
for each_epoch in range(epoch):
start = time.time()
print('\n[Epoch :{}]\n'.format(each_epoch+1))
logging(model=model, logfile=logfile, each_epoch=each_epoch, mode='epoch')
# mini batch
batch_epoch = int(input_test.shape[0]/batch_size)
batch_loss = np.zeros(batch_epoch)
rmse_list = []
nrmse_list = []
for b in range(batch_epoch):
batch_inputs = input_test[b*batch_size : (b+1)*batch_size]
batch_targets = target_test[b*batch_size : (b+1)*batch_size]
batch_seq_len = seq_test[b*batch_size : (b+1)*batch_size]
feed = {model.inputs:batch_inputs,
model.targets:batch_targets,
model.seq_len:batch_seq_len}
l, p, t = sess.run([model.loss,
model.predict,model.targets],
feed_dict=feed)
batch_loss[b] = l
batch_rmse = RMSE(p, batch_targets, 'RMSE')
batch_nrmse = RMSE(p, batch_targets, 'NRMSE')
r = mean_rmse(batch_rmse)
nr = mean_rmse(batch_nrmse)
rmse_list.append(batch_rmse)
nrmse_list.append(batch_nrmse)
if b%10 == 0:
print('batch: {}/{}, loss={:.4f}, rmse={:.4f}'.format(b+1, batch_epoch, l,r))
logging(model, logfile, batch=b, batch_epoch=batch_epoch, loss=l, rmse=r, mode='batch')
loss = np.sum(batch_loss)/batch_epoch
rmse = np.asarray(rmse_list).mean(axis=0)
rmse_mean = mean_rmse(rmse)
nrmse = np.asarray(nrmse_list).mean(axis=0)
delta_time = time.time()-start
print('\n==> Epoch: {}/{}, loss={:.4f}, rmse={:.4f}, epoch time : {}\n'\
.format(each_epoch+1, epoch, loss, rmse_mean, delta_time))
logging(model, logfile, each_epoch, epoch,
loss=loss, rmse=rmse_mean, delta_time=delta_time, mode='train')
with open(logfile, 'a') as myfile:
myfile.write('\nRMSE : \n')
for i,e in enumerate(rmse):
if not i == 0:
myfile.write(', '.format(e))
myfile.write('{:.4f}'.format(e))
myfile.write('\nNormalize RMSE : \n')
for i,e in enumerate(nrmse):
if not i == 0:
myfile.write(', '.format(e))
myfile.write('{:.4f}'.format(e))
print('rmse : ')
print(rmse)
print('normalize rmse : ')
print(nrmse)
# save model by epoch
print('Prediction : ')
print('target : ')
# main
def run(self):
# set args
args_dict = self._default_configs()
args = dotdict(args_dict)
# step 1
# load data
input_set, target_set, seq_len_set = self.load_data()
print('[model data set]')
print('shape of input : {}'.format(input_set.shape))
print('shape of target : {}'.format(target_set.shape))
# data parameters
num_steps = input_set.shape[1]
dim_inputs = input_set.shape[2]
dim_targets = target_set.shape[1]
# load TSL model
tsl_model = TSL_model(args, num_steps, dim_inputs, dim_targets)
# count the num of parameters
num_params = count_params(tsl_model, mode='trainable')
all_num_params = count_params(tsl_model, mode='all')
tsl_model.config['trainable params'] = num_params
tsl_model.config['all params'] = all_num_params
print('\n[model information]\n')
print(tsl_model.config)
# [step 3]
# learning
logging(model=tsl_model, logfile=logfile, mode='config')
if mode == 'train':
self.train(args, tsl_model, input_set, target_set, seq_len_set)
elif mode == 'test':
self.test(args, tsl_model, input_set, target_set, seq_len_set)
if __name__ == '__main__':
runner = Runner()
runner.run()
|
994,024 | e8b6718f19be532b98435b092f2507b9db975a06 | # -*- coding: utf-8 -*-
import numpy
from .types import check_numpy_array
X = (1.0,0.0,0.0)
Y = (0.0,1.0,0.0)
Z = (0.0,0.0,1.0)
Dxy = (1.0,1.0,0.0)/numpy.sqrt(2)
Axy = (-1.0,1.0,0.0)/numpy.sqrt(2)
def normalize2(vec,norm=1.0):
""" Normalizes a vector to a specified size """
vec = check_numpy_array(vec)
vel = numpy.sqrt(numpy.dot(vec,vec))
out = (vec/vel)*norm
return out
def norm(vec):
"""Returns the vector norm (scalar product with itself) """
vel = numpy.sqrt(numpy.dot(vec,vec))
return vel
|
994,025 | 6ad348348af6c72a890a2df2251496ec1c1f5aff | """Definir un conjunto con nรบmeros enteros entre 0 y 9. Luego solicitar valores al
usuario y eliminarlos del conjunto mediante el mรฉtodo remove, mostrando el con-
tenido del conjunto luego de cada eliminaciรณn. Finalizar el proceso al ingresar -1.
Utilizar manejo de excepciones para evitar errores al intentar quitar elementos
inexistentes."""
|
994,026 | 6d0ea74e20d5dd0046a61cc75fac6c9e4c07a74e | #!/usr/bin/python3
with open('data.txt', 'r') as file:
file.read(text)
print("Content-type:text/html\r\n\r\n")
print("<h1>%s</h1>" % ) |
994,027 | 6b512de4bca5e9ebbb6cc163aa394266f016ca50 | from pathlib import Path
import bw_processing as bwp
import numpy as np
from fs.base import FS
from fs.osfs import OSFS
from fs.zipfs import ZipFS
from .errors import InconsistentGlobalIndex
def get_seed(seed=None):
"""Get valid Numpy random seed value"""
# https://groups.google.com/forum/#!topic/briansupport/9ErDidIBBFM
random = np.random.RandomState(seed)
return random.randint(0, 2147483647)
def consistent_global_index(packages, matrix="characterization_matrix"):
global_list = [
resource.get("global_index")
for package in packages
for resource in package.filter_by_attribute("matrix", matrix).filter_by_attribute("kind", "indices").resources
]
if len(set(global_list)) > 1:
raise InconsistentGlobalIndex(
f"Multiple global index values found: {global_list}. If multiple LCIA datapackages are present, they must use the same value for ``GLO``, the global location, in order for filtering for site-generic LCIA to work correctly."
)
return global_list[0] if global_list else None
def wrap_functional_unit(dct):
"""Transform functional units for effective logging.
Turns ``Activity`` objects into their keys."""
data = []
for key, amount in dct.items():
if isinstance(key, int):
data.append({"id": key, "amount": amount})
else:
try:
data.append({"database": key[0], "code": key[1], "amount": amount})
except TypeError:
data.append({"key": key, "amount": amount})
return data
def get_datapackage(obj):
if isinstance(obj, bwp.DatapackageBase):
return obj
elif isinstance(obj, FS):
return bwp.load_datapackage(obj)
elif isinstance(obj, Path) and obj.suffix.lower() == ".zip":
return bwp.load_datapackage(ZipFS(obj))
elif isinstance(obj, Path) and obj.is_dir():
return bwp.load_datapackage(OSFS(obj))
elif isinstance(obj, str) and obj.lower().endswith(".zip") and Path(obj).is_file():
return bwp.load_datapackage(ZipFS(Path(obj)))
elif isinstance(obj, str) and Path(obj).is_dir():
return bwp.load_datapackage(OSFS(Path(obj)))
else:
raise TypeError(
"Unknown input type for loading datapackage: {}: {}".format(type(obj), obj)
)
|
994,028 | 96061e923189ca97d6369aefb78d6e157fa38574 | import pygame,time,random
BLACK = (0,0,0)
GREEN = (34, 139, 34)
RED = (255, 0, 0)
WHITE = (255, 255, 255)
CYAN = (0, 255, 255)
BLUE = (64, 224, 208)
pygame.init()
width, height = 800, 500
win=pygame.display.set_mode((width, height))
pygame.display.set_caption('Flappy bird')
clock = pygame.time.Clock()
img = pygame.image.load(r'C:\Users\THU TRANG\Downloads\project Flappy bird\bird.jpg')
img_width=img.get_size()[0]
img_height=img.get_size()[1]
def show_score(curent_score):
font=pygame.font.SysFont('consolas',20)
text=font.render('Score:'+ str(curent_score),True,WHITE)
win.blit(text,[3,3])
def blocks(x_block,y_block,block_width,block_height,gap):
pygame.draw.rect(win,GREEN,[x_block,y_block,block_width,block_height])
pygame.draw.rect(win,GREEN,[x_block,y_block+block_height+gap,block_width,height])
def maketextobject(text,font):
textWin=font.render(text,True,WHITE)
return textWin,textWin.get_rect()
def replay_or_quit():
for event in pygame.event.get([pygame.KEYDOWN,pygame.KEYUP,pygame.QUIT]):
if event.type==pygame.QUIT:
pygame.quit()
quit()
elif event.type==pygame.KEYDOWN:
continue
return event.key
return None
def msg_win(text):
smallText = pygame.font.SysFont('consolas',20)
largeText =pygame.font.SysFont('consolas',130)
titletextSurf, titleTextRect = maketextobject(text, largeText)
titleTextRect.center = width / 2, height / 2
win.blit(titletextSurf, titleTextRect)
typtextSurf, typTextRect = maketextobject('Press any key to continue', smallText)
typTextRect.center = width / 2, ((height / 2) + 100)
win.blit(typtextSurf, typTextRect)
pygame.display.update()
time.sleep(1)
while replay_or_quit() is None:
clock.tick()
main()
def game_over():
msg_win('Game over')
def bird(x,y,image):
win.blit(image,(x,y))
def main():
x=150
y=200
y_move=0
x_block=width
y_block=0
block_width=50
block_height=random.randint(0,height/2)
gap=img_height*5
block_move=5
score =0
gameover =False
while not gameover:
for event in pygame.event.get():
if event.type == pygame.QUIT:
gameover = True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_UP:
y_move = -5
if event.type == pygame.KEYUP:
if event.key == pygame.K_UP:
y_move = 5
y = y + y_move
win.fill(BLUE)
bird(x, y, img)
show_score(score)
if 3 <= score < 5:
block_move = 6
gap = img_height * 4
if 5 <= score < 8:
block_move = 7
gap = img_height * 3.3
if 8 <= score < 14:
block_move = 8
gap = img_height * 3
if score >= 14:
block_move = 8
gap = img_height * 2.5
blocks(x_block, y_block, block_width, block_height, gap)
x_block -= block_move
if y >height - img_height or y < 0:
game_over()
if x_block < (-1 * block_width):
x_block = width
block_height = random.randint(0, height / 2)
if x + img_width > x_block and x < x_block + block_width:
if y < block_height or y + img_height > block_height + gap:
game_over()
if x > x_block + block_width and x < x_block + block_width + img_width / 5:
score += 1
pygame.display.update()
clock.tick(80)
main()
pygame.quit()
quit()
|
994,029 | aceaac25e51ceff859ba5bfa5b94ca9c27f41d79 | """
regex1 ๆญฃๅ่กจ่พพๅผ
"""
import re
s = "ไปๅนดๆฏ2019ๅนด12ๆ10ๆฅ๏ผ2019ๅนดๅ็็ฎๆ ๅฎ็ฐไบๅ" \
"ไฟๆ95ๆค็ๆฟๆ่ฟ่ฎฐๅพๅ"
pattern = r"\d+"
# ่ทๅๅน้
ๅ
ๅฎน็่ฟญไปฃๅจ
result = re.finditer(pattern, s)
# for i in result:
# ่ฟญไปฃๅพๅฐๆฏๅคๅน้
ๅ
ๅฎน็matchๅฏน่ฑก
# < _sre.SRE_Match object; span = (3, 7), match = '2019' >
# print(i.group()) # 2019 ...
# ๅฎๅ
จๅน้
# obj = re.fullmatch(r".+", s)
# print(obj)
# ๅน้
ๅผๅงไฝ็ฝฎ
obj = re.match("\w+", s)
print(obj.group())
# ๅน้
็ฌฌไธๅค
obj = re.search("\d+", s)
print(obj.group())
|
994,030 | 3b2228da7b7f4b11ecba8000bd10f288dbad0f3a | from django.contrib.auth.forms import AuthenticationForm
from django.utils.translation import gettext_lazy as _
from django import forms
class UserLoginForm(AuthenticationForm):
def __init__(self, *args, **kwargs):
super(UserLoginForm, self).__init__(*args, **kwargs)
username = forms.CharField(widget=forms.TextInput(
attrs={
'class': 'form-control',
'placeholder': 'ะะพะณะธะฝ',
'id': 'inputLogin',
}))
password = forms.CharField(widget=forms.PasswordInput(
attrs={
'class': 'form-control',
'placeholder': 'ะะฐัะพะปั',
'id': 'inputPassword',
}))
error_messages = {
'invalid_login': _(
"ะั
ะพะด ะฒ ะะถะตะดะฝะตะฒะฝะธะบ ะฝะตะดะพะฟัััะธะผ"
),
'inactive': _("This account is inactive."),
}
|
994,031 | a3a7e96cd8c29ac220284891fd6b81a4a19752e9 | # 967. Numbers With Same Consecutive Differences
# Medium
# 1464
# 146
# Add to List
# Share
# Return all non-negative integers of length n such that the absolute difference between every two consecutive digits is k.
# Note that every number in the answer must not have leading zeros. For example, 01 has one leading zero and is invalid.
# You may return the answer in any order.
# Example 1:
# Input: n = 3, k = 7
# Output: [181,292,707,818,929]
# Explanation: Note that 070 is not a valid number, because it has leading zeroes.
# Example 2:
# Input: n = 2, k = 1
# Output: [10,12,21,23,32,34,43,45,54,56,65,67,76,78,87,89,98]
# Constraints:
# 2 <= n <= 9
# 0 <= k <= 9
# This solution works:
class Solution:
def numsSameConsecDiff(self, n: int, k: int) -> List[int]:
def helper(remaining, cur):
nonlocal ans
if remaining == 0:
ans.append(cur)
return
for num in range(10):
if n != remaining:
#the diff has to be k and the 0 is ok
prev = cur % 10
if prev + k == num:
helper(remaining-1, cur*10+num)
elif prev - k == num:
helper(remaining-1, cur*10+num)
elif num != 0:
#no leading 0
helper(remaining-1, cur+num)
ans = []
helper(n, 0)
return ans |
994,032 | b413e782ae4dad5b9ceae777d5c8ee4413ecd007 | import urllib.request
import random
import time
def hello():
print("hello")
def capture(path):
seed = random.random()
# print(seed)
urllib.request.urlretrieve("http://10.146.19.124/capture?t=" + str(seed),
path)
if __name__ == "__main__":
n = 15
print("Start captureing " + str(n) + " 1.9MP images...")
for x in range(0, n):
print("Start Capturing")
path = "fetch/img" + str(x) + ".jpg"
capture(path)
print("Capturing Done.")
end = time.clock()
print("exe time: " + str(end))
print("sleep for 1 secs")
time.sleep(1)
print("All Done")
|
994,033 | b4f9c70337cc0b05dd674d1d1756b3f41723626c | class Solution:
def myPow(self, x: float, n: int) -> float:
episilon = 0.00000000001
if x<episilon and x>-episilon and n<0:
return 0.0
s = 1 if n>0 else -1
result = self.helper(x,n*s)
if s==1:
return result
return 1/result
def helper(self,x,n):
if n==0:
return 1
if n==1:
return x
result = self.helper(x,n>>1)
if n&1:
return result*result*x
else:
return result*result
|
994,034 | 91747a55751cc33590389603e303cdf900cf5677 |
import sys
import time
import numpy
import statistics
"""
Each sudoku board is represented as a dictionary with string keys and
int values.
e.g. my_board['A1'] = 8
"""
ROW = "ABCDEFGHI"
COL = "123456789"
order_of_selection = [1,2,3,4,5,6,7,8,9]
def print_board(board):
"""Helper function to print board in a square."""
print("-----------------")
for i in ROW:
row = ''
for j in COL:
row += (str(board[i + j]) + " ")
print(row)
def board_to_string(board):
"""Helper function to convert board dictionary to string for writing."""
ordered_vals = []
for r in ROW:
for c in COL:
ordered_vals.append(str(board[r + c]))
return ''.join(ordered_vals)
def backtracking(board):
"""Takes a board and returns solved board."""
##implementing this with reference to MRV
##also with forward checking
if assignmentComplete(board) == True:
solved_board = board
return solved_board
else:
var, domains = select_MRV(board)
domain = domains[var]
##now using propogation to check the values
## now implementing forward checking has no legal values
## we need to go through and check if appplying a particular variable leads to no possible variables for the correct columns, rows and squares
new_domain = domain
##go through and select the correct value for the var
for value in new_domain:
if check_valid_insert(board, var, value) == True:
board[var] = value
result = backtracking(board)
if result != "Failure":
return result
board[var] = 0
return "Failure"
def FC(var, board, domain):
##we need to check if there is no valid inserts for entries in the same columns and the same rows
##checking in the same column
x = [1,2,3,4,5,6,7,8,9]
board2 = board
for d in domain:
board2[var] = d
print_board(board2)
column_value = var[1]
for j in range(len(ROW)):
key2 = ROW[j]+column_value
print(key2)
if board2[key2] == 0:
count = 0
for value in domain:
if(not check_valid_insert(board2, var, value)):
count = count + 1
if count == len(domain):
domain.remove(d)
print("activated")
print("AAAAAA")
row_value = var[0]
for j in range(len(ROW)):
key2 = row_value+COL[j]
print(key2)
if board2[key2] == 0:
count1 = 0
for value in x:
if(not check_valid_insert(board2, var, value)):
count1 = count1 + 1
if count1 == 9:
domain.remove(d)
print("activated 2")
square1 = ["A1","B1","C1","A2","B2","C2","A3","B3","C3"]
square2 = ["A4","B4","C4","A5","B5","C5","A6","B6","C6"]
square3 = ["A7","B7","C7","A8","B8","C8","A9","B9","C9"]
square4 = ["D1","E1","F1","D2","E2","F2","D3","E3","F3"]
square5 = ["D4","E4","F4","D5","E5","F5","D6","E6","F6"]
square6 = ["D7","E7","F7","D8","E8","F8","D9","E9","F9"]
square7 = ["G1","H1","I1","G2","H2","I2","G3","H3","I3"]
square8 = ["G4","H4","I4","G5","H5","I5","G6","H6","I6"]
square9 = ["G7","H7","I7","G8","H8","I8","G9","H9","I9"]
matrix = [square1,square2,square3,square4,square5,square6,square7,square8,square9]
for square in matrix:
if var in square:
for key2 in square:
if board2[key2] == 0:
count2 = 0
for value in x:
if(not check_valid_insert(board2, var, value)):
count2 = count2 + 1
if count2 == 9:
domain.remove(d)
print("activated 3")
return domain
def select_MRV(board):
minimum_remaining_value = "filler"
values = [1,2,3,4,5,6,7,8,9]
count = 0
ledger = dict()
domains = dict()
for key in board:
domain = []
if board[key] == 0:
for value in values:
if(check_valid_insert(board, key, value)):
count = count+1
domain.append(value)
domains[key] = domain
ledger[key] = count
count = 0
return min(ledger, key=ledger.get), domains
def check_valid_insert(board, key, value):
if check_columns_valid(board, key, value) and check_rows_valid(board, key, value) and squares_valid(board, key, value):
return True
else:
return False
def check_columns_valid(board, key, value):
column_value = key[1]
boolean = True
for j in range(len(ROW)):
key2 = ROW[j]+column_value
if board[key2] == value:
boolean = False
break
return boolean
def check_rows_valid(board, key, value ):
row_value = key[0]
boolean = True
for j in range(len(COL)):
key2 = row_value+COL[j]
if board[key2] == value:
boolean = False
break
return boolean
def squares_valid(board, key, value):
boolean = True
square1 = ["A1","B1","C1","A2","B2","C2","A3","B3","C3"]
square2 = ["A4","B4","C4","A5","B5","C5","A6","B6","C6"]
square3 = ["A7","B7","C7","A8","B8","C8","A9","B9","C9"]
square4 = ["D1","E1","F1","D2","E2","F2","D3","E3","F3"]
square5 = ["D4","E4","F4","D5","E5","F5","D6","E6","F6"]
square6 = ["D7","E7","F7","D8","E8","F8","D9","E9","F9"]
square7 = ["G1","H1","I1","G2","H2","I2","G3","H3","I3"]
square8 = ["G4","H4","I4","G5","H5","I5","G6","H6","I6"]
square9 = ["G7","H7","I7","G8","H8","I8","G9","H9","I9"]
matrix = [square1,square2,square3,square4,square5,square6,square7,square8,square9]
for square in matrix:
if key in square:
for key2 in square:
if board[key2] == value:
boolean = False
break
return boolean
def assignmentComplete(board):
boolean = True
for key in board:
if int(board[key]) == 0:
boolean = False
return boolean
def allDif(integer, lst):
if integer in lst:
return False
else:
return True
if __name__ == '__main__':
if len(sys.argv) > 1:
# Read individual board from command line arg.
sudoku = sys.argv[1]
if len(sudoku) != 81:
print("Error reading the sudoku string %s" % sys.argv[1])
else:
board = { ROW[r] + COL[c]: int(sudoku[9*r+c])
for r in range(9) for c in range(9)}
print_board(board)
start_time = time.time()
solved_board = backtracking(board)
end_time = time.time()
print_board(solved_board)
out_filename = 'output.txt'
outfile = open(out_filename, "w")
outfile.write(board_to_string(solved_board))
outfile.write('\n')
else:
# Read boards from source.
src_filename = 'sudokus_start.txt'
print("trying")
try:
srcfile = open(src_filename, "r")
sudoku_list = srcfile.read()
except:
print("Error reading the sudoku file %s" % src_filename)
exit()
# Setup output file
out_filename = 'output.txt'
outfile = open(out_filename, "w")
times = []
# Solve each board using backtracking
for line in sudoku_list.split("\n"):
if len(line) < 9:
continue
# Parse boards to dict representation, scanning board L to R, Up to Down
board = { ROW[r] + COL[c]: int(line[9*r+c])
for r in range(9) for c in range(9)}
# Print starting board.
print_board(board)
# Solve with backtracking
start_time = time.time()
solved_board = backtracking(board)
end_time = time.time()
times.append(end_time-start_time)
# Print solved board.
print_board(solved_board)
# Write board to file
outfile.write(board_to_string(solved_board))
outfile.write('\n')
print("Finishing all boards in file.")
print("Min:"+str(min(times)))
print("Min:"+str(max(times)))
print("Std_dev:"+str(statistics.stdev(times)))
print("Mean:"+str(statistics.mean(times)))
|
994,035 | 0e5338ac6aa795a1c7666e850766f6e1af7fe1d4 | # https://leetcode.com/problems/sort-list/
from typing import List
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def sortList(self, head: ListNode) -> ListNode:
return head
root = ListNode(1)
root.next = ListNode(2)
root.next.next = ListNode(3)
root.next.next.next = ListNode(4)
root.next.next.next.next = ListNode(5)
root.next.next.next.next.next = ListNode(6)
root.next.next.next.next.next.next = ListNode(7)
ans = Solution().sortList(root)
print(ans)
|
994,036 | 0a6142bc468c0946de4cd034ea2748ffa53e5a4e | from django.urls import path, re_path
from .views import Home_List
app_name = 'home'
urlpatterns = [
path('', Home_List.as_view(), name='home'),
] |
994,037 | 22384b1ede0cd5111bef06d06053fdbf9f7c7f1b | # from dailycodingproblem.com
#
# Daily Challenge #1085
# Given a string of digits, generate all possible valid IP address combinations.
# IP addresses must follow the format A.B.C.D, where A, B, C, and D are numbers between 0 and 255. Zero-prefixed numbers,
# such as 01 and 065, are not allowed, except for 0 itself.
#
# For example, given "2542540123", you should return ['254.25.40.123', '254.254.0.123'].
|
994,038 | 7445109bfe9000a2d2680b2b9da59ffe0e2c1b9e | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from google.devtools.clouderrorreporting.v1beta1 import error_stats_service_pb2 as google_dot_devtools_dot_clouderrorreporting_dot_v1beta1_dot_error__stats__service__pb2
class ErrorStatsServiceStub(object):
"""An API for retrieving and managing error statistics as well as data for
individual events.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.ListGroupStats = channel.unary_unary(
'/google.devtools.clouderrorreporting.v1beta1.ErrorStatsService/ListGroupStats',
request_serializer=google_dot_devtools_dot_clouderrorreporting_dot_v1beta1_dot_error__stats__service__pb2.ListGroupStatsRequest.SerializeToString,
response_deserializer=google_dot_devtools_dot_clouderrorreporting_dot_v1beta1_dot_error__stats__service__pb2.ListGroupStatsResponse.FromString,
)
self.ListEvents = channel.unary_unary(
'/google.devtools.clouderrorreporting.v1beta1.ErrorStatsService/ListEvents',
request_serializer=google_dot_devtools_dot_clouderrorreporting_dot_v1beta1_dot_error__stats__service__pb2.ListEventsRequest.SerializeToString,
response_deserializer=google_dot_devtools_dot_clouderrorreporting_dot_v1beta1_dot_error__stats__service__pb2.ListEventsResponse.FromString,
)
self.DeleteEvents = channel.unary_unary(
'/google.devtools.clouderrorreporting.v1beta1.ErrorStatsService/DeleteEvents',
request_serializer=google_dot_devtools_dot_clouderrorreporting_dot_v1beta1_dot_error__stats__service__pb2.DeleteEventsRequest.SerializeToString,
response_deserializer=google_dot_devtools_dot_clouderrorreporting_dot_v1beta1_dot_error__stats__service__pb2.DeleteEventsResponse.FromString,
)
class ErrorStatsServiceServicer(object):
"""An API for retrieving and managing error statistics as well as data for
individual events.
"""
def ListGroupStats(self, request, context):
"""Lists the specified groups.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListEvents(self, request, context):
"""Lists the specified events.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteEvents(self, request, context):
"""Deletes all error events of a given project.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_ErrorStatsServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'ListGroupStats': grpc.unary_unary_rpc_method_handler(
servicer.ListGroupStats,
request_deserializer=google_dot_devtools_dot_clouderrorreporting_dot_v1beta1_dot_error__stats__service__pb2.ListGroupStatsRequest.FromString,
response_serializer=google_dot_devtools_dot_clouderrorreporting_dot_v1beta1_dot_error__stats__service__pb2.ListGroupStatsResponse.SerializeToString,
),
'ListEvents': grpc.unary_unary_rpc_method_handler(
servicer.ListEvents,
request_deserializer=google_dot_devtools_dot_clouderrorreporting_dot_v1beta1_dot_error__stats__service__pb2.ListEventsRequest.FromString,
response_serializer=google_dot_devtools_dot_clouderrorreporting_dot_v1beta1_dot_error__stats__service__pb2.ListEventsResponse.SerializeToString,
),
'DeleteEvents': grpc.unary_unary_rpc_method_handler(
servicer.DeleteEvents,
request_deserializer=google_dot_devtools_dot_clouderrorreporting_dot_v1beta1_dot_error__stats__service__pb2.DeleteEventsRequest.FromString,
response_serializer=google_dot_devtools_dot_clouderrorreporting_dot_v1beta1_dot_error__stats__service__pb2.DeleteEventsResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'google.devtools.clouderrorreporting.v1beta1.ErrorStatsService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
|
994,039 | c1c38598afe1f1427adb0c37af0b9265133aa4db | from django.db import models
from lego.apps.podcasts.permissions import PodcastPermissionHandler
from lego.utils.models import BasisModel
class Podcast(BasisModel):
source = models.CharField(max_length=500)
description = models.TextField()
authors = models.ManyToManyField("users.User", related_name="authors", blank=True)
thanks = models.ManyToManyField("users.User", related_name="thanks", blank=True)
def __str__(self):
return self.source
class Meta:
permission_handler = PodcastPermissionHandler()
|
994,040 | 5a62f28e616d29581d3c511572dd7a05cab888f9 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Create data module."""
import pandas as pd
def create_dataframe():
"""Create the dataframe for the dashboard."""
df = pd.read_csv('data/data.csv', header=0)
return df
def get_data():
df = pd.read_csv('data/data2.csv',
dtype={'Manufacturer': 'str',
'Asset Alias': 'str',
'EAM Department Name': 'str',
'EAM Department': 'str',
'Rate Class Descr': 'str',
'Component Code': 'str',
'Component Description': 'str',
'Work Order Crew Code': 'str',
'Work Order Crew': 'str',
'Work Order Number': 'str',
'Work Order Status Code': 'str',
'Work Type': 'str',
'WO Actual Finish Date': 'str',
'WO Actual Finish Year': 'str',
'Work Order Reported Date': 'str',
'Work Order Lead Craft Person': 'str',
'Work Order Maintenance Planner': 'str',
'Work Order Description': 'str',
'Actual Labor Cost': 'float',
'Actual Material Cost': 'float',
'Actual Service Cost': 'float',
'Total Work Order Cost': 'float',
'Overtime Hours': 'float',
'Regular Hours': 'float'}
)
df['WO Actual Finish Date'] = pd.to_datetime(df['WO Actual Finish Date'], format='%Y-%m-%d')
df['Work Order Reported Date'] = pd.to_datetime(df['Work Order Reported Date'], format='%Y-%m-%d')
df.fillna('None Specified', inplace=True)
return df
def format_dataframe(df):
"""Edit data types and formatting for primary OBIEE dataframe
:param df: The primary dataframe returned from OBIEE
:return: The formatted dataframe
"""
# set data types
df['WO Actual Finish Date'] = pd.to_datetime(df['WO Actual Finish Date'], format='%Y-%m-%d')
df['Work Order Reported Date'] = pd.to_datetime(df['Work Order Reported Date'], format='%Y-%m-%d')
df['WO Actual Finish Year'] = df['WO Actual Finish Year'].astype('int')
# deal with missing values
df.fillna('None Specified', inplace=True)
# create new column YYYY-MM
df['Actual Finish YYYY-MM'] = df['WO Actual Finish Date'].apply(lambda x: x.strftime('%Y-%m'))
return df
|
994,041 | 8de7cca58070ba401136ea98a08345cd672f2281 | """
Similar to decorators, context-managers are tools that wrap up the code
While decorators wrap up defined classes or functions, context-managers wrap up
arbitrary, free-form blocks of code
(i) exit is guaranteed even if internal code raises an exception
"""
# context-manager syntax
try:
my_file = open('/path/to/filename', 'r')
contents = myfile.read()
finally:
# make sures that whatever happens , the file will be closed
my_file.close()
# context-manager syntax using keyword "with"
with open('path/to/filename', 'r') as thisfile:
contents = thisfile.read()
'''
The above expression is expected to return an object with two special methods
__enter__ and __exit__
(i) Like decorators, context-managers are used to avoid repetetive code
(ii) Like decorators, context-managers are another way to take bits of functionality that require
reuse across an application, and compartmentalize them in an effective and portable way
'''
|
994,042 | b7be1925c220ca21dead4eac10b020ff9e2b5334 | import datetime
import gzip
import json
import logging
import shutil
import unittest
import os
from log_analyzer import load_config, get_last_log_file, render, calculate_metrics, openfile, \
extract_date_frome_file_name, create_parser, parse_report
logging.disable(logging.CRITICAL)
class TestLogAnalyzer(unittest.TestCase):
def setUp(self):
super(TestLogAnalyzer, self).setUp()
self.abs_path = os.getcwd()
self.path_to_temp = os.path.join(self.abs_path, 'tests', 'temp')
if os.path.exists(self.path_to_temp):
shutil.rmtree(self.path_to_temp)
os.makedirs(self.path_to_temp)
def tearDown(self):
shutil.rmtree(self.path_to_temp)
def _generate_plain_sample(self, file_name="nginx-access-ui.log-20170630"):
content = """1.196.116.32 - - [29/Jun/2017:03:50:22 +0300] "GET /api/v2/banner/25019354 HTTP/1.1" 200 927 "-" "Lynx/2.8.8dev.9 libwww-FM/2.14 SSL-MM/1.4.1 GNUTLS/2.10.5" "-" "1498697422-2190034393-4708-9752759" "dc7161be3" 0.390
1.99.174.176 3b81f63526fa8 - [29/Jun/2017:03:50:22 +0300] "GET /api/1/photogenic_banners/list/?server_name=WIN7RB4 HTTP/1.1" 200 12 "-" "Python-urllib/2.7" "-" "1498697422-32900793-4708-9752770" "-" 0.133
1.169.137.128 - - [29/Jun/2017:03:50:22 +0300] "GET /api/v2/banner/16852664 HTTP/1.1" 200 19415 "-" "Slotovod" "-" "1498697422-2118016444-4708-9752769" "712e90144abee9" 0.199
1.199.4.96 - - [29/Jun/2017:03:50:22 +0300] "GET /api/v2/slot/4705/groups HTTP/1.1" 200 2613 "-" "Lynx/2.8.8dev.9 libwww-FM/2.14 SSL-MM/1.4.1 GNUTLS/2.10.5" "-" "1498697422-3800516057-4708-9752745" "2a828197ae235b0b3cb" 0.704
1.168.65.96 - - [29/Jun/2017:03:50:22 +0300] "GET /api/v2/internal/banner/24294027/info HTTP/1.1" 200 407 "-" "-" "-" "1498697422-2539198130-4709-9928846" "89f7f1be37d" 0.146
1.169.137.128 - - [29/Jun/2017:03:50:22 +0300] "GET /api/v2/group/1769230/banners HTTP/1.1" 200 1020 "-" "Configovod" "-" "1498697422-2118016444-4708-9752747" "712e90144abee9" 0.628
1.194.135.240 - - [29/Jun/2017:03:50:22 +0300] "GET /api/v2/group/7786679/statistic/sites/?date_type=day&date_from=2017-06-28&date_to=2017-06-28 HTTP/1.1" 200 22 "-" "python-requests/2.13.0" "-" "1498697422-3979856266-4708-9752772" "8a7741a54297568b" 0.067
1.169.137.128 - - [29/Jun/2017:03:50:22 +0300] "GET /api/v2/banner/1717161 HTTP/1.1" 200 2116 "-" "Slotovod" "-" "1498697422-2118016444-4708-9752771" "712e90144abee9" 0.138
1.166.85.48 - - [29/Jun/2017:03:50:22 +0300] "GET /export/appinstall_raw/2017-06-29/ HTTP/1.0" 200 28358 "-" "Mozilla/5.0 (Windows; U; Windows NT 6.0; ru; rv:1.9.0.12) Gecko/2009070611 Firefox/3.0.12 (.NET CLR 3.5.30729)" "-" "-" "-" 0.003
1.199.4.96 - - [29/Jun/2017:03:50:22 +0300] "GET /api/v2/slot/4822/groups HTTP/1.1" 200 22 "-" "Lynx/2.8.8dev.9 libwww-FM/2.14 SSL-MM/1.4.1 GNUTLS/2.10.5" "-" "1498697422-3800516057-4708-9752773" "2a828197ae235b0b3cb" 0.157
1.196.116.32 - - [29/Jun/2017:03:50:22 +0300] "GET /api/v2/banner/24987703 HTTP/1.1" 200 883 "-" "Lynx/2.8.8dev.9 libwww-FM/2.14 SSL-MM/1.4.1 GNUTLS/2.10.5" "-" "1498697422-2190034393-4708-9752753" "dc7161be3" 0.726
1.166.85.48 - - [29/Jun/2017:03:50:22 +0300] "GET /export/appinstall_raw/2017-06-30/ HTTP/1.0" 404 162 "-" "Mozilla/5.0 (Windows; U; Windows NT 6.0; ru; rv:1.9.0.12) Gecko/2009070611 Firefox/3.0.12 (.NET CLR 3.5.30729)" "-" "-" "-" 0.001
1.196.116.32 - - [29/Jun/2017:03:50:22 +0300] "GET /api/v2/banner/25020545 HTTP/1.1" 200 969 "-" "Lynx/2.8.8dev.9 libwww-FM/2.14 SSL-MM/1.4.1 GNUTLS/2.10.5" "-" "1498697422-2190034393-4708-9752761" "dc7161be3" 0.738
1.169.137.128 - - [29/Jun/2017:03:50:23 +0300] "GET /api/v2/banner/7763463 HTTP/1.1" 200 1018 "-" "Configovod" "-" "1498697422-2118016444-4708-9752774" "712e90144abee9" 0.181
1.169.137.128 - - [29/Jun/2017:03:50:23 +0300] "GET /api/v2/banner/16168711 HTTP/1.1" 200 16478 "-" "Slotovod" "-" "1498697422-2118016444-4708-9752775" "712e90144abee9" 0.190
1.196.116.32 - - [29/Jun/2017:03:50:23 +0300] "GET /api/v2/banner/25023278 HTTP/1.1" 200 924 "-" "Lynx/2.8.8dev.9 libwww-FM/2.14 SSL-MM/1.4.1 GNUTLS/2.10.5" "-" "1498697422-2190034393-4708-9752762" "dc7161be3" 0.841
1.194.135.240 - - [29/Jun/2017:03:50:23 +0300] "GET /api/v2/group/7786682/statistic/sites/?date_type=day&date_from=2017-06-28&date_to=2017-06-28 HTTP/1.1" 200 22 "-" "python-requests/2.13.0" "-" "1498697423-3979856266-4708-9752778" "8a7741a54297568b" 0.068
1.196.116.32 - - [29/Jun/2017:03:50:23 +0300] "GET /api/v2/banner/25013431 HTTP/1.1" 200 948 "-" "Lynx/2.8.8dev.9 libwww-FM/2.14 SSL-MM/1.4.1 GNUTLS/2.10.5" "-" "1498697422-2190034393-4708-9752758" "dc7161be3" 0.917
1.168.65.96 - - [29/Jun/2017:03:50:23 +0300] "GET /api/v2/internal/banner/24288647/info HTTP/1.1" 200 351 "-" "-" "-" "1498697423-2539198130-4708-9752780" "89f7f1be37d" 0.072
1.169.137.128 - - [29/Jun/2017:03:50:23 +0300] "GET /api/v2/banner/21456892 HTTP/1.1" 200 70795 "-" "Slotovod" "-" "1498697423-2118016444-4708-9752779" "712e90144abee9" 0.158
1.168.65.96 - - [29/Jun/2017:03:50:23 +0300] "GET /api/v2/internal/banner/24197629/info HTTP/1.1" 200 293 "-" "-" "-" "1498697423-2539198130-4708-9752783" "89f7f1be37d" 0.058
1.194.135.240 - - [29/Jun/2017:03:50:23 +0300] "GET /api/v2/group/7786683/statistic/sites/?date_type=day&date_from=2017-06-28&date_to=2017-06-28 HTTP/1.1" 200 22 "-" "python-requests/2.13.0" "-" "1498697423-3979856266-4708-9752782" "8a7741a54297568b" 0.061
1.169.137.128 - - [29/Jun/2017:03:50:23 +0300] "GET /api/v2/banner/16803530 HTTP/1.1" 200 6766 "-" "Slotovod" "-" "1498697423-2118016444-4708-9752781" "712e90144abee9" 0.156
1.196.116.32 - - [29/Jun/2017:03:50:23 +0300] "GET /api/v2/banner/24913311 HTTP/1.1" 200 897 "-" "Lynx/2.8.8dev.9 libwww-FM/2.14 SSL-MM/1.4.1 GNUTLS/2.10.5" "-" "1498697422-2190034393-4708-9752748" "dc7161be3" 1.243
1.196.116.32 - - [29/Jun/2017:03:50:23 +0300] "GET /api/v2/banner/25019908 HTTP/1.1" 200 989 "-" "Lynx/2.8.8dev.9 libwww-FM/2.14 SSL-MM/1.4.1 GNUTLS/2.10.5" "-" "1498697422-2190034393-4708-9752760" "dc7161be3" 1.321
1.196.116.32 - - [29/Jun/2017:03:50:23 +0300] "GET /api/v2/banner/24998073 HTTP/1.1" 200 983 "-" "Lynx/2.8.8dev.9 libwww-FM/2.14 SSL-MM/1.4.1 GNUTLS/2.10.5" "-" "1498697422-2190034393-4708-9752755" "dc7161be3" 1.403
1.194.135.240 - - [29/Jun/2017:03:50:23 +0300] "GET /api/v2/group/7786984/statistic/sites/?date_type=day&date_from=2017-06-28&date_to=2017-06-28 HTTP/1.1" 200 110 "-" "python-requests/2.13.0" "-" "1498697423-3979856266-4708-9752784" "8a7741a54297568b" 0.056
1.169.137.128 - - [29/Jun/2017:03:50:23 +0300] "GET /api/v2/group/1823183/banners HTTP/1.1" 200 1002 "-" "Configovod" "-" "1498697423-2118016444-4708-9752777" "712e90144abee9" 0.680
1.196.116.32 - - [29/Jun/2017:03:50:23 +0300] "GET /api/v2/banner/25047606 HTTP/1.1" 200 959 "-" "Lynx/2.8.8dev.9 libwww-FM/2.14 SSL-MM/1.4.1 GNUTLS/2.10.5" "-" "1498697422-2190034393-4708-9752766" "dc7161be3" 1.490
1.195.208.16 - - [29/Jun/2017:03:50:23 +0300] "GET /api/v2/test/auth/ HTTP/1.0" 401 55 "https://rb.mail.ru/api/v2/test/auth/" "MR HTTP Monitor" "-" "1498697423-1957913694-4708-9752786" "-" 0.003
1.195.208.16 - - [29/Jun/2017:03:50:23 +0300] "GET /accounts/login/ HTTP/1.0" 200 9982 "https://rb.mail.ru/accounts/login/" "MR HTTP Monitor" "-" "1498697423-1957913694-4708-9752785" "-" 0.035
1.195.208.16 - - [29/Jun/2017:03:50:23 +0300] "POST /api/v2/target/12988/list?status=1 HTTP/1.0" 200 2 "https://rb.mail.ru/api/v2/target/12988/list?status=1" "MR HTTP Monitor" "-" "1498697423-1957913694-4708-9752787" "-" 0.003
1.141.250.208 - - [29/Jun/2017:03:50:23 +0300] "GET /export/appinstall_raw/2017-06-29/ HTTP/1.0" 200 28358 "-" "Mozilla/5.0 (Windows; U; Windows NT 6.0; ru; rv:1.9.0.12) Gecko/2009070611 Firefox/3.0.12 (.NET CLR 3.5.30729)" "-" "-" "-" 0.002
1.141.250.208 - - [29/Jun/2017:03:50:23 +0300] "GET /export/appinstall_raw/2017-06-30/ HTTP/1.0" 404 162 "-" "Mozilla/5.0 (Windows; U; Windows NT 6.0; ru; rv:1.9.0.12) Gecko/2009070611 Firefox/3.0.12 (.NET CLR 3.5.30729)" "-" "-" "-" 0.001
1.169.137.128 - - [29/Jun/2017:03:50:23 +0300] "GET /api/v2/banner/7957213 HTTP/1.1" 200 1000 "-" "Configovod" "-" "1498697423-2118016444-4708-9752789" "712e90144abee9" 0.145
1.196.116.32 - - [29/Jun/2017:03:50:23 +0300] "GET /api/v2/banner/25032604 HTTP/1.1" 200 919 "-" "Lynx/2.8.8dev.9 libwww-FM/2.14 SSL-MM/1.4.1 GNUTLS/2.10.5" "-" "1498697422-2190034393-4708-9752763" "dc7161be3" 1.665
1.200.76.128 f032b48fb33e1e692 - [29/Jun/2017:03:50:23 +0300] "GET /api/1/banners/?campaign=7789704 HTTP/1.1" 200 604049 "-" "-" "-" "1498697421-4102637017-4708-9752733" "-" 2.577
1.196.116.32 - - [29/Jun/2017:03:50:23 +0300] "GET /api/v2/banner/25040266 HTTP/1.1" 200 984 "-" "Lynx/2.8.8dev.9 libwww-FM/2.14 SSL-MM/1.4.1 GNUTLS/2.10.5" "-" "1498697422-2190034393-4708-9752765" "dc7161be3" 1.680
1.168.65.96 - - [29/Jun/2017:03:50:23 +0300] "GET /api/v2/internal/banner/24273184/info HTTP/1.1" 200 396 "-" "-" "-" "1498697423-2539198130-4707-9827576" "89f7f1be37d" 0.063
1.194.135.240 - - [29/Jun/2017:03:50:23 +0300] "GET /api/v2/group/7808057/statistic/sites/?date_type=day&date_from=2017-06-28&date_to=2017-06-28 HTTP/1.1" 200 22 "-" "python-requests/2.13.0" "-" "1498697423-3979856266-4708-9752792" "8a7741a54297568b" 0.063
1.168.229.112 545a7b821307935d - [29/Jun/2017:03:50:24 +0300] "GET /agency/banners_stats/?date1=26-06-2017&date2=28-06-2017&date_type=day&do=1&rt=campaign&oi=5370438&as_json=1 HTTP/1.1" 200 316 "-" "python-requests/2.13.0" "-" "1498697417-743364018-4708-9752674" "-" 6.828
1.199.168.112 2a828197ae235b0b3cb - [29/Jun/2017:03:50:24 +0300] "GET /api/1/banners/?campaign=1236490 HTTP/1.1" 200 13945 "-" "Lynx/2.8.8dev.9 libwww-FM/2.14 SSL-MM/1.4.1 GNUTLS/2.10.5" "-" "1498697423-2760328665-4708-9752788" "-" 0.314
1.200.76.128 f032b48fb33e1e692 - [29/Jun/2017:03:50:24 +0300] "GET /api/1/campaigns/?id=7789709 HTTP/1.1" 200 608 "-" "-" "-" "1498697423-4102637017-4708-9752791" "-" 0.146"""
path_to_file = os.path.join(self.path_to_temp, file_name)
with open(path_to_file, "w") as file:
file.write(content)
return path_to_file
def _generate_gz_sample(self, file_name="nginx-access-ui.log-20170630", is_remove_plain=False):
path_to_plain_file = self._generate_plain_sample(file_name)
patrh_to_gz_file = f'{path_to_plain_file}.gz'
with open(path_to_plain_file, 'rb') as f_in:
with gzip.open(patrh_to_gz_file, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
if is_remove_plain:
os.remove(path_to_plain_file)
return patrh_to_gz_file
def _generate_config_file(self, file_name="config.json", config=None):
if config is None:
config = {
"REPORT_SIZE": 1000,
"REPORT_DIR": "./reports",
"LOG_DIR": "./log"
}
path_to_config_file = os.path.join(self.path_to_temp, file_name)
with open(path_to_config_file, 'w') as f_out:
f_out.write(json.dumps(config))
return path_to_config_file
def _generate_table_json(self, count_rows=10):
table = []
for number_row in range(0, count_rows):
table.append({'url': '/api/v2/internal/html5/phantomjs/queue/?wait=1m',
'count': 2767,
'count_perc': 0.106,
'time_avg': 62.995,
'time_max': 9843.569,
'time_med': 60.073,
'time_perc': 9.043,
'time_sum': 174306.352})
return json.dumps(table)
def test_load_normal_config(self):
path_to_config_file = self._generate_config_file()
config = load_config(path_to_config_file)
self.assertIsInstance(config, dict)
def test_load_does_not_exist_config(self):
with self.assertRaises(SystemExit) as exc:
load_config("some_wrong_path_to_config.json")
self.assertIsInstance(exc.exception, SystemExit)
def test_load_broken_json_config(self):
path_to_config_file = self._generate_config_file(config="wrong config")
with open(path_to_config_file, 'w') as f_out:
f_out.write("some text, which broke json-format")
with self.assertRaises(SystemExit) as exc:
load_config(path_to_config_file)
self.assertIsInstance(exc.exception, SystemExit)
def test_validation_loaded_config(self):
path_to_config_file = self._generate_config_file()
config = load_config(path_to_config_file)
self.assertTrue("REPORT_SIZE" in config)
self.assertTrue("REPORT_DIR" in config)
self.assertTrue("LOG_DIR" in config)
self.assertEqual(len(config), 3)
def test_take_last_log_file_plain(self):
template = "nginx-access-ui.log-201706"
log_name_list = [f"{template}{str(day).zfill(2)}" for day in range(1, 4)]
log_file_path_list = [self._generate_plain_sample(log_name) for log_name in log_name_list]
path_to_last_log_file = get_last_log_file(self.path_to_temp)
self.assertTrue(path_to_last_log_file in log_file_path_list)
self.assertEqual(path_to_last_log_file, log_file_path_list[-1])
def test_take_last_log_file_gz(self):
template = "nginx-access-ui.log-201706"
log_name_list = [f"{template}{str(day).zfill(2)}" for day in range(1, 4)]
log_file_path_list = [self._generate_gz_sample(log_name, is_remove_plain=True) for log_name in log_name_list]
path_to_last_log_file = get_last_log_file(self.path_to_temp)
self.assertTrue(path_to_last_log_file in log_file_path_list)
self.assertEqual(path_to_last_log_file, log_file_path_list[-1])
def test_take_last_log_file_gz_and_plain_mixed(self):
template = "nginx-access-ui.log-201706"
log_name_list = [f"{template}{str(day).zfill(2)}" for day in range(1, 4)]
gz_list = [self._generate_gz_sample(log_name, is_remove_plain=True) for log_name in log_name_list]
log_name_list = [f"{template}{str(day).zfill(2)}" for day in range(4, 6)]
plain_list = [self._generate_plain_sample(log_name) for log_name in log_name_list]
path_to_last_log_file = get_last_log_file(self.path_to_temp)
self.assertTrue(path_to_last_log_file in plain_list)
self.assertTrue(path_to_last_log_file not in gz_list)
self.assertEqual(path_to_last_log_file, plain_list[-1])
def test_take_last_log_wrong_format_date(self):
self._generate_plain_sample("nginx-access-ui.log-01052017")
with self.assertRaises(SystemExit) as exc:
get_last_log_file(self.path_to_temp)
def test_render_if_template_does_not_exist(self):
table_json = self._generate_table_json()
report_name = 'test_report.html'
report_dir = os.path.join(self.path_to_temp, 'reports', )
with self.assertRaises(SystemExit) as exc:
render(table_json, report_name, report_dir, path_to_template='./some_wrong_path/report.html')
def test_render_if_table_json_is_empty(self):
report_name = 'test_report.html'
report_dir = os.path.join(self.path_to_temp, 'reports', )
path_to_template = os.path.join(self.abs_path, 'templates', 'report.html')
render('', report_name, report_dir, path_to_template)
self.assertTrue(os.path.exists(os.path.join(report_dir, report_name)))
def test_render_if_report_dir_not_exist(self):
report_dir = os.path.join(self.path_to_temp, 'reports', )
self.assertFalse(os.path.exists(report_dir))
path_to_template = os.path.join(self.abs_path, 'templates', 'report.html')
render(self._generate_table_json(1), 'test_report.html', report_dir, path_to_template)
self.assertTrue(os.path.exists(report_dir))
def test_calculate_report_if_repot_size_default(self):
path_to_file = self._generate_plain_sample("nginx-access-ui.log-20170630")
table_dict = parse_report(path_to_file)
table = calculate_metrics(table_dict)
self.assertIsInstance(table, list)
self.assertTrue(len(table) > 0)
def test_calculate_report_if_repot_size_equal_ten(self):
path_to_file = self._generate_plain_sample("nginx-access-ui.log-20170630")
report_size = 10
table_dict = parse_report(path_to_file)
table = calculate_metrics(table_dict, size=report_size)
self.assertTrue(len(table) == report_size)
def test_extract_date_frome_normal_file_name(self):
name = 'nginx-access-ui.log-20170630'
self.assertIsInstance(extract_date_frome_file_name(name), datetime.date)
def test_extract_date_frome_wrong_file_name(self):
name = 'nginx-access.log-30062017'
self.assertIsNone(extract_date_frome_file_name(name))
def test_extract_date_frome_wrong_fomat_date_in_file_name(self):
name = 'nginx-access-ui.log-30062017'
with self.assertRaises(SystemExit):
extract_date_frome_file_name(name)
def test_openfile_if_plain(self):
path_to_file = self._generate_plain_sample("nginx-access-ui.log-20170630")
with openfile(path_to_file) as out_f:
self.assertIsInstance(out_f.readline(), str)
def test_openfile_if_gzip(self):
path_to_file = self._generate_gz_sample("nginx-access-ui.log-20170630", is_remove_plain=True)
with openfile(path_to_file, 'rb') as out_f:
self.assertIsInstance(out_f.readline(), bytes)
def test_parse_argument_send_config(self):
parser = create_parser()
conf_name = 'config1.json'
parsed = parser.parse_args(['--config', conf_name])
self.assertEqual(parsed.config, conf_name)
def test_parse_argument_config_default(self):
parser = create_parser()
conf_name = 'config.json'
parsed = parser.parse_args()
self.assertEqual(parsed.config, conf_name)
if __name__ == '__main__':
unittest.main()
|
994,043 | 288c75b6772f8085f86c089749377b7967943abd | import unittest
import caffe
import numpy as np
import cv2
class ImgFlatten(caffe.Layer):
def setup(self, bottom, top):
assert len(bottom) == 1, 'requires a single layer.bottom'
assert len(top) == 1, 'requires a single layer.top'
params = eval(self.param_str)
# store input as class variables
self.batch_size = bottom[0].data.shape[0]
self.num_output = bottom[0].data.shape[1]
# Create a batch loader to load the images.
self.batch_loader = ImgLoader(params,bottom[0].data.shape)
top[0].reshape(self.batch_size,self.num_output,self.batch_loader.height,self.batch_loader.width)
print "FlattenImageLayerInfo", top[0].shape
def reshape(self, bottom, top):
pass
def forward(self, bottom, top):
for h in range(self.batch_size):
for d in range(self.num_output):
top[0].data[h,d,...]=self.batch_loader.load_image(bottom[0].data[h,d,...])
def backward(self, top, propagate_down, bottom):
pass
class ImgLoader(object):
def __init__(self, params,shape):
self.input_shape=shape
self.kernel_size = params['kernel_size']
self.stride = params['stride']
self.row_cnt=int((self.input_shape[-1] - self.kernel_size)/self.stride+1)
self.width=self.row_cnt*self.kernel_size
self.height=self.width*2
def load_image(self,img):
flat_img=np.zeros((self.height,self.width))
for row in range(self.row_cnt):
for col in range(self.row_cnt):
flat_img[
row*self.kernel_size:(row+1)*self.kernel_size,
col*self.kernel_size:(col+1)*self.kernel_size
]=img[
row*self.stride:row*self.stride+self.kernel_size,
col*self.stride:col*self.stride+self.kernel_size
]
for row in range(self.row_cnt,2*self.row_cnt):
for col in range(self.row_cnt):
flat_img[
row*self.kernel_size:(row+1)*self.kernel_size,
col*self.kernel_size:(col+1)*self.kernel_size
]=img[
(row-self.row_cnt)*self.stride+self.input_shape[-1]:(row-self.row_cnt)*self.stride+self.input_shape[-1]+self.kernel_size,
col*self.stride:col*self.stride+self.kernel_size
]
return flat_img
class TestLayerWithParam(unittest.TestCase):
def setUp(self):
net_file = "./pixelate/config/04182100/train.prototxt"
self.net = caffe.Net(net_file, caffe.TRAIN)
def test_flatten(self):
self.net.forward()
if __name__=='__main__':
unittest.main()
|
994,044 | eb61f5c5f5de3e17b90c58ddec5bac1043ef1861 | from apistar import http, exceptions
from users.models import User
from project.settings import NO_AUTH_ENDPOINTS, ORIGIN
class Cors():
def on_response(self, response: http.Response):
response.headers['Access-Control-Allow-Origin'] = ORIGIN
class MustBeAuthenticated():
def on_request(self, path: http.Path, user: User=None) -> None:
white_list = NO_AUTH_ENDPOINTS
white_list = list(map(lambda x: x.replace('/', ''), white_list))
path = path.replace('/', '')
if user is None and path not in white_list:
raise exceptions.HTTPException('Unauthorized', 401)
|
994,045 | cf0dbb0ed7197cde7611cf51805ee61fb14246b0 | from tkinter import *
from tkinter import filedialog
import os
from PIL import ImageTk, Image
import time
class UI:
def __init__(self,window):
self.window = window
self.window.minsize(width=600, height=500)
self.window.wm_title("Photo Viewer")
self.folderPath = StringVar()
self.imagePath = ''
self.counter = 0
self.imageList=['item','item2','item3']
self.timeit = False
self.interval = 500
self.getFolderBtn = Button(self.window, text = "Select a Folder", width = 12, command=self.getFolder)
self.getFolderBtn.grid(row=1, column=3)
self.backBtn = Button(self.window, text='back', width=6, command=lambda: self.goToPreviousImage("back"))
self.backBtn.grid(row=2, column=1)
self.nextBtn = Button(self.window, text='next', width=6, command=lambda: self.goToNextImage("next"))
self.nextBtn.grid(row=2, column=5)
self.displayCountdown = Label(self.window, text='', width =12)
self.displayCountdown.grid(row=3,column=1)
self.thirtySecTimer = Button(self.window, text='30 seconds', width=12, command=lambda: self.start(30000))
self.thirtySecTimer.grid(row=3,column=2)
self.sixtySecTimer = Button(self.window, text='60 seconds', width=12, command=lambda: self.start(60000))
self.sixtySecTimer.grid(row=3,column=3)
self.pause = Button(self.window, text='Stop', width=12, command=self.pause)
self.pause.grid(row=3,column=4)
def getFolder(self):
filename = filedialog.askdirectory()
self.folderPath.set(filename)
self.imageList = os.listdir(str(self.folderPath.get()))
self.openImage()
def openImage(self):
self.img = self.imageList[self.counter]
if self.img.lower().endswith(('.png', '.jpg', '.jpeg')):
self.imagePath = (self.folderPath.get()+'/'+str(self.img))
load = Image.open(self.imagePath)
self.resizeImg(load.size[0],load.size[1])
resized = load.resize((self.newWidth, self.newHeight),Image.ANTIALIAS)
render = ImageTk.PhotoImage(resized)
self.img = Label(self.window, width=800, height=600,image=render)
self.img.image = render
self.img.grid(row=2,column=2, columnspan=3)
else:
if self.buttonPress == "back":
self.goToPreviousImage("back")
else:
self.goToNextImage("next")
def resizeImg(self,width,height):
dividedByWidth = 800/width
dividedByHeight = 600/height
if dividedByWidth <= dividedByHeight:
dividedBy = dividedByWidth
else:
dividedBy = dividedByHeight
self.newWidth = int(width * dividedBy)
self.newHeight = int(height * dividedBy)
def goToNextImage (self, btnPress):
self.buttonPress = "next"
if not isinstance(self.img, str):
self.img.destroy()
if self.counter < len(self.imageList)-1:
self.counter += 1
self.openImage()
elif self.counter == len(self.imageList)-1:
self.counter = len(self.imageList)-1
self.openImage()
def goToPreviousImage (self, btnPress):
self.buttonPress = btnPress
if not isinstance(self.img, str):
self.img.destroy()
if self.counter > 0:
self.counter -= 1
self.openImage()
elif self.counter == 0:
self.counter = 0
self.openImage()
def start(self, setTime):
self.interval = setTime
self.timeit = True
self.setCountdown()
def pause(self):
self.interval = 0
self.timeit = False
self.setCountdown()
def setCountdown(self):
if self.counter == len(self.imageList)-1:
self.counter = -1
if self.timeit:
self.goToNextImage('next')
self.displayTime(self.interval)
root.after(self.interval, self.setCountdown)
def displayTime(self, time):
if time >0:
if self.timeit:
currentTime = time
self.displayCountdown["text"]=('Timer: '+str(currentTime/1000))
root.after(1000, self.displayTime, currentTime-1000)
elif self.timeit:
currentTime = self.interval
self.displayCountdown["text"]=('Timer: '+str(currentTime/1000))
root.after(1000, self.displayTime, currentTime-1000)
else:
self.displayCountdown["text"]='stopped'
else:
self.displayCountdown["text"]=('Timer: '+str(self.interval/1000))
root = Tk()
app = UI(root)
root.mainloop()
|
994,046 | abc295b4721ca0633984e56811e8c6de8edd082d | # Virtualized High Performance Computing Toolkit
#
# Copyright (c) 2018-2019 VMware, Inc. All Rights Reserved.
#
# This product is licensed to you under the Apache 2.0 license (the
# "License"). You may not use this product except in compliance with the
# Apache 2.0 License. This product may include a number of subcomponents with
# separate copyright notices and license terms. Your use of these
# subcomponents is subject to the terms and conditions of the subcomponent's
# license, as noted in the LICENSE file.
# SPDX-License-Identifier: Apache-2.0
# coding=utf-8
import os
from pyVmomi import vim
from pyVmomi import vmodl
from vhpc_toolkit import log
from vhpc_toolkit.get_objs import GetDatacenter
from vhpc_toolkit.get_objs import GetHost
from vhpc_toolkit.get_objs import GetVM
from vhpc_toolkit.wait import GetWait
class ConfigVM(object):
"""
A class for configuring VM properties.
Methods under this class will call ReconfigVM_Task (a method of
VirtualMachine object) and return a Task
object, with which to monitor the status of operation.
API References:
https://pubs.vmware.com/vi3/sdk/ReferenceGuide
https://github.com/vmware/pyvmomi
"""
def __init__(self, vm_obj):
"""
Args:
vm_obj (vim.VirtualMachine)
"""
self.vm_obj = vm_obj
self.logger = log.my_logger(name=self.__class__.__name__)
def memory(self, memory_mb):
""" Configure memory size for a VM
Args:
memory_mb (int): memory size in MB to be configured
Returns:
Task
"""
config_spec = vim.vm.ConfigSpec()
config_spec.memoryMB = memory_mb
return self.vm_obj.ReconfigVM_Task(config_spec)
def cpus(self, num_of_cpus):
""" Configure number of CPUs for a VM
Args:
num_of_cpus (int): number of CPUs to be configured
Returns:
Task
"""
config_spec = vim.vm.ConfigSpec()
config_spec.numCPUs = num_of_cpus
return self.vm_obj.ReconfigVM_Task(config_spec)
def cpu_shares(self, shares):
""" Configure CPU shares for a VM
Args:
shares (int): CPU shares to be configured
Returns:
Task
"""
assert shares >= 0
config_spec = vim.vm.ConfigSpec()
shares_alloc = vim.ResourceAllocationInfo()
shares_alloc.shares = vim.SharesInfo(level="custom", shares=shares)
config_spec.cpuAllocation = shares_alloc
return self.vm_obj.ReconfigVM_Task(config_spec)
def cores_per_socket(self, cores_per_socket):
"""Configure cores per socket for a VM
Args:
shares (int): Cores per Socket to be configured
Returns:
Task
"""
assert cores_per_socket >= 0
config_spec = vim.vm.ConfigSpec()
config_spec.numCoresPerSocket = cores_per_socket
return self.vm_obj.ReconfigVM_Task(config_spec)
def memory_shares(self, shares):
""" Configure memory shares for a VM
Args:
shares (int): memory shares to be configured
Returns:
Task
"""
assert shares >= 0
config_spec = vim.vm.ConfigSpec()
shares_alloc = vim.ResourceAllocationInfo()
shares_alloc.shares = vim.SharesInfo(level="custom", shares=shares)
config_spec.memoryAllocation = shares_alloc
return self.vm_obj.ReconfigVM_Task(config_spec)
def memory_reservation(self, reser=0):
""" Configure memory reservation for a VM
Args:
reser (int): 0 (clear reservation) or
non-0 (reserve all memory that is configured)
Returns:
Task
"""
config_spec = vim.vm.ConfigSpec()
mem_alloc = vim.ResourceAllocationInfo()
if reser:
mem_alloc.reservation = self.vm_obj.config.hardware.memoryMB
config_spec.memoryReservationLockedToMax = True
else:
mem_alloc.reservation = 0
config_spec.memoryReservationLockedToMax = False
config_spec.memoryAllocation = mem_alloc
return self.vm_obj.ReconfigVM_Task(config_spec)
def cpu_reservation(self, host_cpu_mhz=None, reser=0):
""" Configure CPU reservation for a VM
Args:
host_cpu_mhz (int): if to reser, host_cpu_mhz must have a value
reser (int): 0 (clear reservation) or
non-0 (reserve all vCPUs that is configured)
Returns:
Task
"""
config_spec = vim.vm.ConfigSpec()
cpu_alloc = vim.ResourceAllocationInfo()
if reser:
assert host_cpu_mhz is not None
vm_cpu = self.vm_obj.config.hardware.numCPU
cpu_alloc.reservation = int(vm_cpu * host_cpu_mhz)
else:
cpu_alloc.reservation = 0
config_spec.cpuAllocation = cpu_alloc
return self.vm_obj.ReconfigVM_Task(config_spec)
def cpu_hotadd(self, enable_hotadd=True):
""" Enable/disable CPU hotadd
Args:
enable_hotadd (bool)
Returns:
Task
"""
config_spec = vim.vm.ConfigSpec()
config_spec.cpuHotAddEnabled = enable_hotadd
return self.vm_obj.ReconfigVM_Task(config_spec)
def mem_hotadd(self, enable_hotadd=True):
""" Enable/disable memory hotadd
Args:
enable_hotadd (bool)
Returns:
Task
"""
config_spec = vim.vm.ConfigSpec()
config_spec.memoryHotAddEnabled = enable_hotadd
return self.vm_obj.ReconfigVM_Task(config_spec)
def power_on(self):
""" Power on VM
Returns:
Task
"""
return self.vm_obj.PowerOn()
def power_off(self):
""" Power off VM
Returns:
Task
"""
return self.vm_obj.PowerOff()
def latency(self, level):
""" Configure Latency Sensitivity for a VM
Args:
level (str): the Latency Sensitivity Level,
available: 'high' and 'normal'
Returns:
Task
"""
latency_levels = ["high", "normal"]
if level not in latency_levels:
self.logger.error(
"Wrong Latency Sensitivity level. "
"Available: {0}".format(str(latency_levels).strip("[]"))
)
raise SystemExit
else:
config_spec = vim.vm.ConfigSpec()
lat_sens = vim.LatencySensitivity()
lat_sens.level = level
config_spec.latencySensitivity = lat_sens
return self.vm_obj.ReconfigVM_Task(config_spec)
def destroy(self):
""" Destroy a VM
Returns:
Task
"""
return self.vm_obj.Destroy()
def add_network_adapter(self, network_obj):
""" Add a network adapter for a VM
The device spec uses vim.vm.device.VirtualVmxnet3() by default,
which is recommended for best performance.
Args:
network_obj (vim.Network): network object accessible
by either hosts or virtual machines
Returns:
Task
References:
pyvmomi/docs/vim/Network.rst
pyvmomi/docs/vim/vm/device/VirtualDeviceSpec.rst
"""
nic_spec = vim.vm.device.VirtualDeviceSpec()
nic_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
nic_spec.device = vim.vm.device.VirtualVmxnet3()
nic_spec.device.wakeOnLanEnabled = True
nic_spec.device.addressType = "assigned"
nic_spec.device.deviceInfo = vim.Description()
nic_spec.device.backing = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo()
nic_spec.device.backing.network = network_obj
nic_spec.device.backing.deviceName = network_obj.name
nic_spec.device.backing.useAutoDetect = False
nic_spec.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
nic_spec.device.connectable.startConnected = True
nic_spec.device.connectable.connected = True
nic_spec.device.connectable.allowGuestControl = True
config_spec = vim.vm.ConfigSpec()
config_spec.deviceChange = [nic_spec]
return self.vm_obj.ReconfigVM_Task(spec=config_spec)
def remove_network_adapter(self, network_obj):
""" Remove a network adapter for a VM
Args:
network_obj (vim.Network): network object accessible by either
hosts or virtual machines
Returns:
Task
References:
pyvmomi/docs/vim/Network.rst
pyvmomi/docs/vim/vm/device/VirtualDeviceSpec.rst
"""
nic_spec = vim.vm.device.VirtualDeviceSpec()
nic_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.remove
nic_spec.device = network_obj
config_spec = vim.vm.ConfigSpec()
config_spec.deviceChange = [nic_spec]
return self.vm_obj.ReconfigVM_Task(spec=config_spec)
def add_sriov_adapter(self, network_obj, pf_obj, dvs_obj):
""" Add a network adapter with SR-IOV adapter type for a VM
Adding SR-IOV adapter requires a back-up physical adapter.
Args:
dvs_obj (vim.dvs.VmwareDistributedVirtualSwitch):
distributed virtual switch object type
network_obj (vim.Network): network object accessible
by either hosts or virtual machines
pf_obj (vim.host.PciDevice): a PCI object type describes info
about of a single PCI device for
backing up SR-IOV configuration
Returns:
Task
References:
pyvmomi/docs/vim/Network.rst
pyvmomi/docs/vim/vm/device/VirtualDeviceSpec.rst
pyvmomi/docs/vim/host/PciDevice.rst
"""
nic_spec = vim.vm.device.VirtualDeviceSpec()
nic_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
nic_spec.device = vim.vm.device.VirtualSriovEthernetCard()
nic_spec.device.wakeOnLanEnabled = True
nic_spec.device.addressType = "assigned"
nic_spec.device.deviceInfo = vim.Description()
if dvs_obj:
nic_spec.device.backing = (
vim.vm.device.VirtualEthernetCard.DistributedVirtualPortBackingInfo()
)
nic_spec.device.backing.port = vim.dvs.PortConnection(
switchUuid=dvs_obj.summary.uuid, portgroupKey=network_obj.config.key
)
else:
nic_spec.device.backing = (
vim.vm.device.VirtualEthernetCard.NetworkBackingInfo()
)
nic_spec.device.backing.network = network_obj
nic_spec.device.backing.deviceName = network_obj.name
nic_spec.device.backing.useAutoDetect = False
nic_spec.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
nic_spec.device.connectable.startConnected = True
nic_spec.device.connectable.connected = True
nic_spec.device.connectable.allowGuestControl = True
nic_spec.device.sriovBacking = (
vim.vm.device.VirtualSriovEthernetCard.SriovBackingInfo()
)
nic_spec.device.allowGuestOSMtuChange = False
# convert decimal to hex for the device ID of physical adapter
device_id = hex(pf_obj.deviceId % 2 ** 16).lstrip("0x")
sys_id = GetVM(self.vm_obj).pci_id_sys_id_sriov()
backing = vim.VirtualPCIPassthroughDeviceBackingInfo(
deviceId=device_id,
id=pf_obj.id,
systemId=sys_id[pf_obj.id],
vendorId=pf_obj.vendorId,
deviceName=pf_obj.deviceName,
)
nic_spec.device.sriovBacking.physicalFunctionBacking = backing
config_spec = vim.vm.ConfigSpec()
config_spec.deviceChange = [nic_spec]
return self.vm_obj.ReconfigVM_Task(spec=config_spec)
def remove_sriov_adapter(self, network_obj):
""" Remove a SR-IOV network adapter for a VM.
Same as removing a regular network adapter
Args:
network_obj (vim.Network): network object accessible
by either hosts or virtual machines
Returns:
Task
"""
task = self.remove_network_adapter(network_obj)
return task
def add_pvrdma(self, dvs_obj, network_obj, label="pvRDMA Network Adapter"):
""" Add a network adapter with pvrdma adapter type for a VM
Adding pvrdma adapter requires a port group from a DVS, which
has uplinks mapped from host RDMA NICS.
Args:
dvs_obj (vim.dvs.VmwareDistributedVirtualSwitch):
distributed virtual switch object type
network_obj (vim.Network): network object accessible
by either hosts or virtual machines
label (str): adapter label
Returns:
Task
API References:
pyvmomi/docs/vim/Network.rst
pyvmomi/docs/vim/vm/device/VirtualDeviceSpec.rst
pyvmomi/docs/vim/dvs/VmwareDistributedVirtualSwitch/ConfigInfo.rst
References:
For more pvrdma configuration details,
please refer to VMware Docs for PVRDMA Support and Configuration
"""
nic_spec = vim.vm.device.VirtualDeviceSpec()
nic_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
nic_spec.device = vim.vm.device.VirtualVmxnet3Vrdma()
nic_spec.device.deviceInfo = vim.Description(label=label)
nic_spec.device.addressType = "generated"
nic_spec.device.backing = (
vim.vm.device.VirtualEthernetCard.DistributedVirtualPortBackingInfo()
)
nic_spec.device.backing.port = vim.dvs.PortConnection(
switchUuid=dvs_obj.summary.uuid, portgroupKey=network_obj.config.key
)
nic_spec.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
nic_spec.device.connectable.startConnected = True
nic_spec.device.connectable.connected = True
nic_spec.device.connectable.allowGuestControl = True
config_spec = vim.vm.ConfigSpec()
config_spec.deviceChange = [nic_spec]
return self.vm_obj.ReconfigVM_Task(spec=config_spec)
def remove_pvrdma(self, network_obj):
""" Remove a PVRDMA network adapter for a VM. Basically same as
removing a regular network adapter
Args:
network_obj (vim.Network): network object accessible
by either hosts or virtual machines
Returns:
Task
"""
self.remove_network_adapter(network_obj)
def config_networking(
self, network_obj, ip, netmask, gateway, domain, dns, guest_hostname
):
""" Configure network properties for a VM
Args:
network_obj (vim.Network): network object accessible
by either hosts or virtual machines.
ip(str): static IP address (IPv4 format) or None (DHCP)
netmask (str): Should in IPv4 address format
gateway (str): Should in IPv4 address format
domain (str)
dns (str or list of str)
guest_hostname (str): if None, the default is VM name
Returns:
Task
References:
pyvmomi/docs/vim/Network.rst
pyvmomi/docs/vim/vm/customization/Specification.rst
"""
global_ip = vim.vm.customization.GlobalIPSettings()
adapter_map = vim.vm.customization.AdapterMapping()
adapter_map.adapter = vim.vm.customization.IPSettings()
adapter_map.macAddress = network_obj.macAddress
if ip:
adapter_map.adapter.ip = vim.vm.customization.FixedIp()
adapter_map.adapter.ip.ipAddress = ip
else:
adapter_map.adapter.ip = vim.vm.customization.DhcpIpGenerator()
adapter_map.adapter.subnetMask = netmask
adapter_map.adapter.gateway = gateway
global_ip.dnsServerList = dns
adapter_map.adapter.dnsDomain = domain
ident = vim.vm.customization.LinuxPrep()
ident.hostName = vim.vm.customization.FixedName()
if guest_hostname:
ident.hostName.name = guest_hostname
else:
ident.hostName.name = self.vm_obj.name
custom_spec = vim.vm.customization.Specification()
custom_spec.nicSettingMap = [adapter_map]
custom_spec.identity = ident
custom_spec.globalIPSettings = global_ip
return self.vm_obj.Customize(spec=custom_spec)
def enable_fork_parent(self):
""" Enable fork parent for a VM
Returns:
None
"""
self.vm_obj.EnableForkParent()
def disable_fork_parent(self):
""" Disable fork parent for a VM
Returns:
Task
"""
self.vm_obj.DisableForkParent()
def full_clone(
self,
dest_vm_name,
host_obj,
datastore_obj,
vm_folder_obj,
resource_pool_obj,
cpu,
mem,
):
""" Clone a VM via full clone
Args:
dest_vm_name (str): Name of destination VM
host_obj(vim.HostSystem): VM host destination
datastore_obj (vim.Datastore): VM datastore destination
vm_folder_obj (vim.Folder): VM folder destination
resource_pool_obj (vim.ResourcePool): Resource Pool destination
cpu (int): number of CPUs
mem (int): memory size in MB
Returns:
Task
"""
self.logger.info("Full cloning VM %s to %s" % (self.vm_obj.name, dest_vm_name))
relocation_spec = vim.vm.RelocateSpec()
relocation_spec.pool = resource_pool_obj
relocation_spec.datastore = datastore_obj
relocation_spec.host = host_obj
clone_spec = vim.vm.CloneSpec()
clone_spec.location = relocation_spec
if cpu or mem:
config_spec = vim.vm.ConfigSpec()
config_spec.numCPUs = cpu
config_spec.memoryMB = mem
clone_spec.config = config_spec
else:
self.logger.debug("No hardware customization for the cloned VM")
task = self.vm_obj.Clone(
folder=vm_folder_obj, name=dest_vm_name, spec=clone_spec
)
return task
def linked_clone(
self, dest_vm, host_obj, folder_obj, resource_pool_obj, cpu, mem, power_on=True
):
""" Clone a VM via linked clone
Args:
dest_vm (str): Name of destination VM
host_obj (vim.HostSystem): VM host destination
folder_obj (vim.Folder): VM folder destination
resource_pool_obj (vim.ResourcePool): Resource Pool destination
power_on (bool): whether enable power on after cloning
Returns:
Task
"""
self.logger.info(
"Linked cloning VM {0} to {1}".format(self.vm_obj.name, dest_vm)
)
relocation_spec = vim.vm.RelocateSpec()
relocation_spec.pool = resource_pool_obj
relocation_spec.host = host_obj
relocation_spec.diskMoveType = "createNewChildDiskBacking"
clone_spec = vim.vm.CloneSpec()
clone_spec.location = relocation_spec
if len(self.vm_obj.rootSnapshot) < 1:
self.logger.info(
"Creating a snapshot for VM for "
"linked clone {0}".format(self.vm_obj.name)
)
task = self.vm_obj.CreateSnapshot_Task(
name="snapshot0", memory=False, quiesce=False
)
GetWait().wait_for_tasks(
[task],
task_name="Take snapshot for "
"template VM for "
"enabling linked clone",
)
clone_spec.powerOn = power_on
clone_spec.template = False
clone_spec.snapshot = self.vm_obj.snapshot.rootSnapshotList[0].snapshot
if cpu or mem:
config_spec = vim.vm.ConfigSpec()
config_spec.numCPUs = cpu
config_spec.memoryMB = mem
clone_spec.config = config_spec
else:
self.logger.debug("No hardware customization for the cloned VM")
task = self.vm_obj.Clone(folder=folder_obj, name=dest_vm, spec=clone_spec)
return task
@staticmethod
def _find_nearest_power_of_two(x):
""" find nearest of power of two for a given int number
Args:
x (int)
Returns:
int: nearest power of two
"""
return 1 << (x - 1).bit_length()
def add_pci(self, pci, host_obj, vm_update, vm_status, mmio_size):
""" Add a PCI device for a VM.
If a PCI device has large BARs, it requires 64bit MMIO
support and large enough MMIO mapping space. This method will add
these two configurations by default and check uEFI installation.
But haven't evaluated the impacts of
adding these configurations for a PCI device which doesn't have
large BARs. For more details, check the reference KB article.
Args:
pci (str): pci ID of the PCI device
host_obj (vim.HostSystem): Host obj to locate the PCI device
vm_update (ConfigVM): VM update obj
vm_status (GetVM): VM status obj
mmio_size (int): 64-bit MMIO space in GB
Returns:
list: a list of Task objects
References:
https://kb.vmware.com/s/article/2142307
"""
self.logger.info("Adding PCI device {0} for {1}".format(pci, self.vm_obj.name))
extra_config_key1 = "pciPassthru.64bitMMIOSizeGB"
extra_config_key2 = "pciPassthru.use64bitMMIO"
if mmio_size is None:
mmio_size = 256
tasks = []
pci_obj = GetHost(host_obj).pci_obj(pci)
# Convert decimal to hex for the device ID of PCI device
device_id = hex(pci_obj.deviceId % 2 ** 16).lstrip("0x")
if not vm_status.uefi():
self.logger.warning(
"VM {0} is not installed with UEFI. "
"If PCI device has large BARs, "
"UEFI installation is required.".format(self.vm_obj.name)
)
else:
self.logger.info(
"Good. VM {0} has UEFI " "installation.".format(self.vm_obj.name)
)
sys_id = vm_status.pci_id_sys_id_passthru()
backing = vim.VirtualPCIPassthroughDeviceBackingInfo(
deviceId=device_id,
id=pci_obj.id,
systemId=sys_id[pci_obj.id],
vendorId=pci_obj.vendorId,
deviceName=pci_obj.deviceName,
)
backing_obj = vim.VirtualPCIPassthrough(backing=backing)
dev_config_spec = vim.VirtualDeviceConfigSpec(device=backing_obj)
dev_config_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
config_spec = vim.vm.ConfigSpec()
config_spec.deviceChange = [dev_config_spec]
tasks.append(self.vm_obj.ReconfigVM_Task(spec=config_spec))
tasks.append(vm_update.add_extra(extra_config_key1, str(mmio_size)))
tasks.append(vm_update.add_extra(extra_config_key2, "TRUE"))
return tasks
def remove_pci(self, pci, vm_status):
""" Remove a PCI device from a VM
Args:
pci (str): pci ID of the PCI device
vm_status (GetVM): the VM status obj
Returns:
Task
"""
self.logger.info(
"Removing PCI {0} from VM " "{1}".format(pci, self.vm_obj.name)
)
pci_obj = vm_status.pci_obj(pci)
dev_config_spec = vim.VirtualDeviceConfigSpec()
dev_config_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.remove
dev_config_spec.device = pci_obj
config_spec = vim.vm.ConfigSpec()
config_spec.deviceChange = [dev_config_spec]
return self.vm_obj.ReconfigVM_Task(spec=config_spec)
def add_extra(self, entry, value):
""" Add an extra advanced vmx entry for a VM
Args:
entry (str): extra config key
value (str): extra config value
Returns:
Task
"""
config_spec = vim.vm.ConfigSpec()
self.logger.info("Adding/Updating extra config: {0} = {1}".format(entry, value))
opt = vim.option.OptionValue()
opt.key = entry
opt.value = value
config_spec.extraConfig = [opt]
return self.vm_obj.ReconfigVM_Task(spec=config_spec)
def remove_extra(self, entry):
""" Add an extra advanced vmx entry from a VM
Args:
entry (str): extra config key
Returns:
Task
"""
config_spec = vim.vm.ConfigSpec()
self.logger.info("Removing extra config {0}".format(entry))
opt = vim.option.OptionValue()
opt.key = entry
opt.value = ""
config_spec.extraConfig = [opt]
return self.vm_obj.ReconfigVM_Task(spec=config_spec)
def add_vgpu(self, vgpu_profile):
""" Add a vGPU profile for a VM
Args:
vgpu_profile (str): the name of vGPU profile to be added into a VM
Returns:
Task
"""
self.logger.info(
"Adding vGPU {0} for " "VM {1}".format(vgpu_profile, self.vm_obj.name)
)
backing = vim.VirtualPCIPassthroughVmiopBackingInfo(vgpu=vgpu_profile)
backing_obj = vim.VirtualPCIPassthrough(backing=backing)
dev_config_spec = vim.VirtualDeviceConfigSpec(device=backing_obj)
dev_config_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
config_spec = vim.vm.ConfigSpec()
config_spec.deviceChange = [dev_config_spec]
return self.vm_obj.ReconfigVM_Task(spec=config_spec)
def remove_vgpu(self, vgpu_profile):
""" Remove a vGPU profile for a VM
Args:
vgpu_profile (str): the name of vGPU profile to be removed from a VM
Returns:
Task
"""
self.logger.info(
"Removing vGPU %s from VM %s" % (vgpu_profile, self.vm_obj.name)
)
vm_status = GetVM(self.vm_obj)
vgpu_obj = vm_status.vgpu_obj(vgpu_profile)
dev_config_spec = vim.VirtualDeviceConfigSpec()
dev_config_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.remove
dev_config_spec.device = vgpu_obj
config_spec = vim.vm.ConfigSpec()
config_spec.deviceChange = [dev_config_spec]
return self.vm_obj.ReconfigVM_Task(spec=config_spec)
def execute_script(self, process_manager, script, username, password):
""" Execute a post script for a VM
First copy local script content to remote VM
Then execute the script in remote VM
Only works for Linux system
Args:
process_manager (GuestProcessManager): A singleton managed object
that provides methods for guest process operations.
Retrieved from service content.
script (str): the script to be executed. Should be in Path
username (str): username for authentication
password (str): password for authentication
Returns:
tuple: pid, auth, self.vm_obj
where pid (long) is the pid of the program started
auth (vim.vm.guest.NamePasswordAuthentication) contains
guest OS authentication info
self.vm_obj (vim.VirtualMachine): the VM object
References:
pyvmomi/docs/vim/vm/guest/GuestOperationsManager.rst
"""
auth = vim.vm.guest.NamePasswordAuthentication()
auth.username = username
auth.password = password
try:
copy_content = (
"'"
+ open(script).read().replace("'", '"\'"')
+ "' >> "
+ os.path.basename(script)
)
program_spec = vim.vm.guest.ProcessManager.ProgramSpec()
program_spec.programPath = "/bin/echo"
program_spec.arguments = copy_content
pid = process_manager.StartProgramInGuest(self.vm_obj, auth, program_spec)
assert pid > 0
program_spec.programPath = "/bin/sh"
log_file = "/var/log/vhpc_toolkit.log"
execute_content = os.path.basename(script) + " 2>&1 | tee " + log_file
program_spec.arguments = execute_content
pid = process_manager.StartProgramInGuest(self.vm_obj, auth, program_spec)
assert pid > 0
self.logger.info(
"Script {0} is being executed in VM {1} guest OS "
"and PID is {2}".format(os.path.basename(script), self.vm_obj.name, pid)
)
except IOError:
self.logger.error("Can not open script {0}".format(script))
raise SystemExit
except AssertionError:
self.logger.error("Script is not launched successfully.")
raise SystemExit
except vim.fault.InvalidGuestLogin as e:
self.logger.error(e.msg)
raise SystemExit
else:
return pid, auth, self.vm_obj
class ConfigHost(object):
"""
A class for configuring host properties.
"""
def __init__(self, host_obj):
"""
Args:
host_obj (vim.HostSystem): The HostSystem managed object type
References:
pyvmomi/docs/vim/HostSystem.rst
"""
self.host_obj = host_obj
self.logger = log.my_logger(name=self.__class__.__name__)
def create_svs(self, svs_name, vmnic, num_ports=8):
""" Create a standard virtual switch
It calls AddVirtualSwitch method from HostNetworkSystem. It
doesn't return a Task to track
Args:
svs_name (str): The name of SVS to be created.
vmnic (str): The name of physical adapter to create the SVS
num_ports (int): number of ports for the SVS
Returns:
None
References:
pyvmomi/docs/vim/host/NetworkSystem.rst
"""
svs = vim.host.VirtualSwitch.Specification()
svs.numPorts = num_ports
svs.bridge = vim.host.VirtualSwitch.BondBridge(nicDevice=[vmnic])
host_network_obj = self.host_obj.configManager.networkSystem
host_network_obj.AddVirtualSwitch(vswitchName=svs_name, spec=svs)
def destroy_svs(self, svs_name):
""" Destroy a standard virtual switch
Args:
svs_name (str): The name of SVS to be destroyed
Returns:
None
References:
pyvmomi/docs/vim/host/NetworkSystem.rst
"""
host_network_obj = self.host_obj.configManager.networkSystem
host_network_obj.RemoveVirtualSwitch(vswitchName=svs_name)
def create_pg_in_svs(self, svs_name, pg_name, vlan_id=0):
""" Create a Port Group within standard virtual switch
Args:
svs_name (str): The name of SVS to create a port group
pg_name (str): The name of port group to be created
vlan_id (int): The VLAN ID for ports using this port group.
Returns:
None
References:
pyvmomi/docs/vim/host/NetworkSystem.rst
pyvmomi/docs/vim/host/PortGroup.rst
"""
pg_spec = vim.host.PortGroup.Specification()
pg_spec.name = pg_name
pg_spec.vlanId = vlan_id
pg_spec.vswitchName = svs_name
security_policy = vim.host.NetworkPolicy.SecurityPolicy()
security_policy.allowPromiscuous = True
security_policy.forgedTransmits = True
security_policy.macChanges = False
pg_spec.policy = vim.host.NetworkPolicy(security=security_policy)
host_network_obj = self.host_obj.configManager.networkSystem
host_network_obj.AddPortGroup(portgrp=pg_spec)
def destroy_pg(self, pg_name):
""" Destroy a Port Group from a Host
Args:
pg_name (str): The name of port group to be destroyed
Returns:
None
API References:
pyvmomi/docs/vim/host/NetworkSystem.rst
pyvmomi/docs/vim/host/PortGroup.rst
"""
host_network_obj = self.host_obj.configManager.networkSystem
host_network_obj.RemovePortGroup(pgName=pg_name)
class ConfigDatacenter(object):
"""
A class for configuring datacenter properties
"""
def __init__(self, datacenter_obj):
"""
Args:
datacenter_obj (vim.Datacenter): the Datacenter managed object type
API References:
pyvmomi/docs/vim/Datacenter.rst
"""
self.datacenter_obj = datacenter_obj
self.logger = log.my_logger(name=self.__class__.__name__)
def create_dvs(self, host_vmnics, dvs_name, num_uplinks=4):
""" Create a distributed virtual switch within the datacenter
Args:
host_vmnics (dict): A dictionary storing {host_obj: vmnics}
where host_obj is vim.HostSystem type and
vmnics is a list of str for the names of
physical adapters.
dvs_name (str): The name of the DVS to be created
num_uplinks (int): Number of active uplinks
Returns:
Task
References:
pyvmomi/docs/vim/host/NetworkSystem.rst
pyvmomi/docs/vim/host/PortGroup.rst
"""
for network_obj in GetDatacenter(self.datacenter_obj).network_resources():
if network_obj.name == dvs_name:
self.logger.info("DVS {0} already exists".format(dvs_name))
return
host_cfgs = []
for host_obj, vmnics in host_vmnics.items():
uplinks = []
if host_obj.runtime.connectionState != "connected":
self.logger.error(
"Host {0} is not connected. Skipped".format(host_obj.name)
)
continue
host_cfg = vim.dvs.HostMember.ConfigSpec()
host_cfg.operation = vim.ConfigSpecOperation.add
host_cfg.host = host_obj
host_cfg.backing = vim.dvs.HostMember.PnicBacking()
for pnic in GetHost(host_obj).pnics():
for vmnic in vmnics:
if pnic.device == vmnic:
pnic_spec = vim.dvs.HostMember.PnicSpec()
pnic_spec.pnicDevice = pnic.device
uplinks.append(pnic_spec)
host_cfg.backing.pnicSpec = uplinks
host_cfgs.append(host_cfg)
uplink_port_policy = vim.DistributedVirtualSwitch.NameArrayUplinkPortPolicy()
uplnk_port_order = []
for i in range(num_uplinks):
name = "uplink%d" % (i + 1)
uplink_port_policy.uplinkPortName.append(name)
uplnk_port_order.append(name)
string_policy = vim.StringPolicy()
string_policy.value = "failover_explicit"
uplink_port_order_policy = (
vim.dvs.VmwareDistributedVirtualSwitch.UplinkPortOrderPolicy()
)
# activeUplinkPort: list of active uplink ports used for load balancing
uplink_port_order_policy.activeUplinkPort = uplnk_port_order
team = vim.dvs.VmwareDistributedVirtualSwitch.UplinkPortTeamingPolicy()
team.policy = string_policy
team.uplinkPortOrder = uplink_port_order_policy
port_config_policy = (
vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy()
)
port_config_policy.uplinkTeamingPolicy = team
dvs_config_spec = vim.dvs.VmwareDistributedVirtualSwitch.ConfigSpec()
dvs_config_spec.name = dvs_name
dvs_config_spec.host = host_cfgs
dvs_config_spec.defaultPortConfig = port_config_policy
dvs_config_spec.lacpApiVersion = (
vim.dvs.VmwareDistributedVirtualSwitch.LacpApiVersion.multipleLag
)
dvs_config_spec.numStandalonePorts = num_uplinks
dvs_create_spec = vim.DistributedVirtualSwitch.CreateSpec(
configSpec=dvs_config_spec
)
task = self.datacenter_obj.networkFolder.CreateDVS_Task(dvs_create_spec)
return task
class ConfigDVS(object):
"""
A class for configuring distributed virtual switch (DVS) properties
"""
def __init__(self, dvs_obj):
"""
Args:
dvs_obj (vim.dvs.VmwareDistributedVirtualSwitch):
distributed virtual switch object
References:
pyvmomi/docs/vim/DistributedVirtualSwitch.rst
"""
self.dvs_obj = dvs_obj
def create_pg_in_dvs(self, dvs_pg_name, num_ports=1):
""" Create a port group in the DVS.
Args:
dvs_pg_name (str): the port group name to be created
num_ports (int): number of ports in this port group
Returns:
Task
"""
dpg_spec = vim.dvs.DistributedVirtualPortgroup.ConfigSpec()
dpg_spec.name = dvs_pg_name
dpg_policy = vim.dvs.DistributedVirtualPortgroup.PortgroupPolicy()
dpg_spec.policy = dpg_policy
dpg_spec.numPorts = num_ports
dpg_spec.type = "earlyBinding"
task = self.dvs_obj.AddDVPortgroup_Task(spec=[dpg_spec])
return task
def destroy_dvs(self):
""" Destroy the DVS
Returns:
Task
"""
task = self.dvs_obj.Destroy_Task()
return task
class ConfigCluster(object):
"""
A class for configuring cluster properties
"""
def __init__(self, cluster_obj):
"""
Args:
cluster_obj [vim.Cluster]: a Cluster object
"""
self.cluster_obj = cluster_obj
def enable_drs(self):
""" enable DRS for the cluster
Todo:
To be implemented
"""
pass
def disable_drs(self):
""" disable DRS for the cluster
Todo:
To be implemented
"""
pass
|
994,047 | 82f5d6173f19684f129fe7978a9bab17ef275e6f | # Instructions
# You are going to write a List Comprehension to create a new list called squared_numbers.
# This new list should contain every number in the list numbers but each number should be squared.
# e.g. `4 * 4 = 16`
# 4 squared equals 16.
# DO NOT modify the List numbers directly. Try to use List Comprehension instead of a Loop.
# Example Output
# [1, 1, 4, 9, 25, 64, 169, 441, 1156, 3025]
# Hint
# Use the keyword method for starting the List comprehension and fill in the relevant parts.
# Make sure the squared_numbers is printed into the console for the code checking to work.
numbers = [1, 1, 2, 3, 5, 8, 13, 21, 34, 55]
# ๐จ Do Not Change the code above ๐
#Write your 1 line code ๐ below:
squared_numbers = [i**2 for i in numbers]
#Write your code ๐ above:
print(squared_numbers)
|
994,048 | 6459163799934a6b8b51251b4ec295fbb5db0f57 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
# script_utils.py ---
#
# @Filename: script_utils.py
# @Description:
# @Author: Weijie Li
# @Created time: 2018-03-18 17:35:59
# @Last Modified by:
# @Last Modified time:
import src.messages.messages_pb2 as pb
def script_to_verify_key(vk):
return pb.Script(body=[pb.ScriptUnit(type=pb.SCRIPT_DATA, data=vk),
pb.ScriptUnit(type=pb.SCRIPT_CHECK_SIG)])
def script_to_member(member):
return script_to_verify_key(member.verify_key_str)
def script_to_sig(member, data):
signature = member.sign(data)
return pb.Script(body= [pb.ScriptUnit(type=pb.SCRIPT_DATA, data=signature)]) |
994,049 | f74ef18ae6e42e936397fe78d793e220f10c7310 |
def suma ():
global x
Y = 3
x = Y + x
print ("la x de la funcion suma es_: ", x)
def main ():
global x
x = 8
x = x + 10
suma()
print("la x del main es: ", x)
main() |
994,050 | 29c2860fe65a0bb6ad57ade7c5ea1a1b6f32e4d5 | import timeit
import unittest
import random
import numpy as np
def read_data(filename):
with open(filename) as f:
array = []
for line in f:
array.extend([int(x) for x in line.split()])
return array
def selection_sort(num_list):
i = 1
while i < len(num_list):
largest = 0
j = len(num_list) - i
index = 0
while j > 0:
if num_list[j] > largest:
largest = num_list[j]
index = j
j = j -1
j = 0
if num_list[j] > largest:
largest = num_list[j]
index = j
j = len(num_list) - i
num_list[index] = num_list[j]
num_list[j] = largest
i = i+1
return num_list
##################
### Radix sort ###
##################
def radix_sort(num_list):
max_val = max(num_list)
exp = 1
while max_val/exp > 0:
counting_sort(num_list,exp)
exp *= 10
return num_list
def counting_sort(num_list,exp1):
b_array = [0] * len(num_list)
c_array = 10 * [0]
for n in num_list:
c_array[(n/exp1)%10] += 1
for i in range(1,10):
c_array[i] += c_array[i-1]
i = len(num_list)-1
while i>=0:
index = (num_list[i]/exp1)
b_array[ c_array[ (index)%10 ] - 1] = num_list[i]
c_array[ (index)%10 ] -= 1
i -= 1
for i in range(0,len(num_list)):
num_list[i] = b_array[i]
################
### Heapsort ###
################
def build_heap(A):
i = (len(A)-1)//2
#import pdb; pdb.set_trace()
while i >= 0:
heapify(A,i)
i -=1
def heapify(array,i):
#import pdb; pdb.set_trace
if i > (len(array)-1)//2:
return 0
l = 2*i+1 #cause counting from 0
r = 2*i+2
if l <= len(array)-1 and array[l] > array[i]:
largest = l
else:
largest = i
if r <= len(array)-1 and array[r] > array[largest]:
largest = r
if largest != i:
temp = array[i]
array[i] = array[largest]
array[largest] = temp
heapify(array,largest)
return array
def heap_sort(num_list):
build_heap(num_list)
i = len(num_list) -1
while i >= 1:
temp = num_list[i]
num_list[i] = num_list[0]
num_list[0] = temp
num_list[:i] = heapify(num_list[:i],0)
i -=1
return num_list
#########################
### Quicksort + rand ###
#########################
def quick_sort_partition_rand(num_list,i,j):
pivot_index = random.randint(i,j)
temp = num_list[i]
num_list[i] = num_list[pivot_index]
num_list[pivot_index] = temp
return quick_sort_partition(num_list, i, j)
def quick_sort_helper_rand(num_list,i,j):
if i<j:
p = quick_sort_partition_rand(num_list,i,j)
quick_sort_helper_rand(num_list,i,p-1)
quick_sort_helper_rand(num_list,p+1,j)
def quick_sort_rand(num_list):
i = 0
j = len(num_list) - 1
quick_sort_helper_rand(num_list,i,j)
return num_list
def quick_sort_partition(num_list,i,j):
pivot = num_list[i]
first = i
i = i+1
done = False
while not done:
while num_list[i] <= pivot and i <= j and i < len(num_list)-1:
i += 1
while num_list[j] >= pivot and j >= i and j >first -1:
j -= 1
if j < i:
done = True
elif j == len(num_list) -1 and i == len(num_list) -1:
done = True
elif j == first and i == first :
done = True
else:
left_s = num_list[i]
right_s = num_list[j]
num_list[i] = right_s
num_list[j] = left_s
num_list[first] = num_list[j]
num_list[j] = pivot
return j
def quick_sort_helper(num_list,i,j):
if i<j:
p = quick_sort_partition(num_list,i,j)
if j - p+1 < p-1 - i:
quick_sort_helper(num_list,p+1,j)
quick_sort_helper(num_list,i,p-1)
elif j - p+1 > p-1 - i:
quick_sort_helper(num_list,i,p-1)
quick_sort_helper(num_list,p+1,j)
else:
quick_sort_helper(num_list,p+1,j)
quick_sort_helper(num_list,i,p-1)
def quick_sort(num_list):
i = 0
j = len(num_list) - 1
quick_sort_helper(num_list,i,j)
return num_list
##################
### Merge Sort ###
##################
def merge_sort(num_list):
l = len(num_list)
if l>1:
num_list_left = num_list[: l//2]
num_list_right = num_list[l//2 : ]
merge_sort(num_list_left)
merge_sort(num_list_right)
i=0
j=0
k=0
while i < len(num_list_left) and j < len(num_list_right):
if num_list_left[i] < num_list_right[j]:
num_list[k]=num_list_left[i]
i=i+1
else:
num_list[k]=num_list_right[j]
j=j+1
k=k+1
while i < len(num_list_left):
num_list[k]=num_list_left[i]
i=i+1
k=k+1
while j < len(num_list_right):
num_list[k]=num_list_right[j]
j=j+1
k=k+1
return num_list
class MyTest(unittest.TestCase):
def test_sorting(self):
num_list = read_data("./hw1_data/testing_file.txt")
self.assertEqual(merge_sort(num_list), [1,2,3,4,5,6,7,7,8,9])
num_list = read_data("./hw1_data/testing_file.txt")
self.assertEqual(selection_sort(num_list), [1,2,3,4,5,6,7,7,8,9])
num_list = read_data("./hw1_data/testing_file.txt")
self.assertEqual(quick_sort(num_list), [1,2,3,4,5,6,7,7,8,9])
num_list = read_data("./hw1_data/testing_file.txt")
self.assertEqual(quick_sort_rand(num_list), [1,2,3,4,5,6,7,7,8,9])
#num_list = read_data("./hw1_data/testing_file.txt")
#self.assertEqual(counting_sort(num_list), [1,2,3,4,5,6,7,7,8,9])
num_list = read_data("./hw1_data/testing_file2.txt")
self.assertEqual(radix_sort(num_list), [3,12,34,404,450,500])
num_list = read_data("./hw1_data/testing_file.txt")
self.assertEqual(heap_sort(num_list), [1,2,3,4,5,6,7,7,8,9])
def main_testing():
unittest.main()
def main(args):
num_list = read_data(args.filename)
run_time_list = []
for i in range(0,int(args.num_runs)):
if args.algorithm == 'merge':
start = timeit.default_timer()
sorted_list = merge_sort(num_list)
stop = timeit.default_timer()
run_time = stop - start
if args.algorithm == 'selection':
start = timeit.default_timer()
sorted_list = merge_sort(num_list)
stop = timeit.default_timer()
run_time = stop - start
if args.algorithm == 'quick_rand':
start = timeit.default_timer()
sorted_list = quick_sort_rand(num_list)
stop = timeit.default_timer()
run_time = stop - start
if args.algorithm == 'quick':
start = timeit.default_timer()
sorted_list = quick_sort(num_list)
stop = timeit.default_timer()
run_time = stop - start
if args.algorithm == 'radix':
start = timeit.default_timer()
sorted_list = radix_sort(num_list)
stop = timeit.default_timer()
run_time = stop - start
if args.algorithm == 'heap':
start = timeit.default_timer()
sorted_list = heap_sort(num_list)
stop = timeit.default_timer()
run_time = stop - start
run_time_list.append(run_time)
print("The average run time for %s sort is: " % args.algorithm, np.average(np.asarray(run_time_list)))
if __name__ == "__main__":
#main_testion()
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--filename', action='store', help = 'path to .txt input file' )
parser.add_argument('--algorithm', action= 'store', choices = ['selection', 'merge', 'quick_rand', 'quick','radix', 'heap'])
parser.add_argument('--num_runs', action= 'store')
args = parser.parse_args()
main(args)
|
994,051 | 03d36284928326e7690ecb111a06d5f9b960b05c | #!/usr/local/bin/python3.7
#-*- coding: utf-8 -*-
'''
Created on 2019ๅนด5ๆ2ๆฅ
fv.Start -- shortdesc
fv.Start is a description
It defines classes_and_methods
@author: Tux48
@version: 0.1
@copyright: 2019 organization_name. All rights reserved.
@deffield updated: Updated
'''
from flask import Flask
from com.financial.av.views.AvView import fv
from com.financial.av.views.IndexView import index
from com.financial.av.views.StockView import stock
app = Flask( __name__ )
app.register_blueprint( fv, url_prefix= "/fv" )
app.register_blueprint( stock, url_prefix= "/stock" )
app.register_blueprint( index, url_prefix= "/index" )
if __name__ == '__main__':
app.run( host = "127.0.0.1", port = "8080", debug = True ) |
994,052 | 91b948fd3223267cbb0d031c8ea75d3e146015cb | from django.urls import path
from rest_framework.routers import SimpleRouter
from src.recommendation import views
router = SimpleRouter()
router.register(r'productprice', views.ProductPriceViewSet, 'ProductPrice')
router.register(r'category', views.CategoryViewSet, 'Category')
router.register(r'productimage', views.ProductImageViewSet, 'ProductImage')
router.register(r'productfeaturebullet', views.ProductFeatureBulletViewSet, 'ProductFeatureBullet')
router.register(r'productreviewmetadata', views.ProductReviewMetaDataViewSet, 'ProductReviewMetaData')
router.register(r'productvariant', views.ProductVariantViewSet, 'ProductVariant')
router.register(r'product', views.ProductViewSet, 'Product')
router.register(r'productreview', views.ProductReviewViewSet, 'ProductReview')
urlpatterns = router.urls
urlpatterns += [
path('recommended-products', views.RecommendedProductApiView.as_view()),
path('productreview-asin/<str:asin>', views.ProductReviewFromProductIdApiView.as_view())
] |
994,053 | d1305075228241cbbe1889cdf05c474ef56ddece | print("\033[1;47mOlรก, mundo!\033[m")
|
994,054 | 4ac41580d967e71664bfb5a9368a982c3b371061 |
from xai.brain.wordbase.adjectives._backup import _BACKUP
#calss header
class _BACKUPS(_BACKUP, ):
def __init__(self,):
_BACKUP.__init__(self)
self.name = "BACKUPS"
self.specie = 'adjectives'
self.basic = "backup"
self.jsondata = {}
|
994,055 | 0938826cc0f56b199f7fa027572cf303af4860eb | from django.shortcuts import (
render, redirect, reverse, HttpResponse, get_object_or_404)
from django.conf import settings
from django.contrib import messages
from products.models import Product
def view_basket(request):
""" View that renders the shopping basket contents page """
context = {
'discount_percentage': settings.DISCOUNT_PERCENTAGE,
}
return render(request, 'basket/basket.html', context)
def add_product_to_basket(request, item_id):
""" Add a quantity of the specified product to the shopping basket """
product = get_object_or_404(Product, pk=item_id)
quantity = int(request.POST.get('quantity'))
redirect_url = request.POST.get('redirect_url')
basket = request.session.get('basket', {})
if item_id in list(basket.keys()):
basket[item_id] += quantity
messages.success(request, f'{product.name} quantity updated to \
{basket[item_id]}')
else:
basket[item_id] = quantity
messages.success(request, f'{product.name} added to basket')
request.session['basket'] = basket
return redirect(redirect_url)
def adjust_basket(request, item_id):
"""Adjust the quantity of the specified product to the specified amount"""
product = get_object_or_404(Product, pk=item_id)
quantity = int(request.POST.get('quantity'))
basket = request.session.get('basket', {})
basket[item_id] = quantity
messages.success(request, f'{product.name} quantity updated to \
{basket[item_id]}')
request.session['basket'] = basket
return redirect(reverse('view_basket'))
def remove_from_basket(request, item_id):
"""Remove the item from the shopping basket"""
try:
product = get_object_or_404(Product, pk=item_id)
basket = request.session.get('basket', {})
basket.pop(item_id)
messages.success(request, f'{product.name} removed from basket')
request.session['basket'] = basket
return HttpResponse(status=200)
except Exception as e:
messages.error(request, f'Error removing item: {e}')
return HttpResponse(status=500)
|
994,056 | 8d07f7d9eb8f638f46a0afe1ef082c85ca46f079 | from car_controller.car_controller_ import CarController # pragma: no cover
import time
import numpy as np
import cv2
from matplotlib import pyplot as plt
THRESHOLD = 100 # less means more stricts
EROSION = 2 # Iteration. More means thicker
DILATION = 2 # Iteration. More means more is removed
MIN_MATCH_COUNT = 8
BOXBLUR = 7
SURF = True
SAVE_PICTURES = False
def thresholding(img):
ret, thresh1 = cv2.threshold(img, THRESHOLD, 255, cv2.THRESH_BINARY)
if SAVE_PICTURES:
cv2.imwrite('threshold.jpg', thresh1)
return thresh1
def erosion(img):
kernel = np.ones((2, 2), np.uint8)
img_erosion = cv2.erode(img, kernel, iterations=EROSION)
if SAVE_PICTURES:
cv2.imwrite("erosion.jpg", img_erosion)
return img_erosion
def dilation(img):
kernel = np.ones((2, 2), np.uint8)
img_dilation = cv2.dilate(img, kernel, iterations=DILATION)
if SAVE_PICTURES:
cv2.imwrite("dilation.jpg", img_dilation)
return img_dilation
def blur(img=None, threshold=BOXBLUR, t=cv2.BORDER_DEFAULT):
blur = cv2.GaussianBlur(img, (threshold, threshold), t)
if SAVE_PICTURES:
cv2.imwrite("blurred.jpg", blur)
return blur
def find_spot(img=None): # Working well ish
parking_coordinates = None
box = cv2.imread('box.png', 0) # queryImage
box = blur(box, 3, 0) # 5 here 5 other both 0 works ok
# Initiate surf detector
surf = cv2.xfeatures2d.SURF_create(2000*2) if SURF else cv2.xfeatures2d.SIFT_create()
# find the keypoints and descriptors with surf
kp1, des1 = surf.detectAndCompute(box, None)
kp2, des2 = surf.detectAndCompute(img, None)
bf = cv2.BFMatcher()
matches = bf.knnMatch(des1, des2, k=2)
# Apply ratio test
good = []
# store all the good matches as per Lowe's ratio test.
good = []
for m, n in matches:
if m.distance < 0.7*n.distance:
good.append(m)
if len(good) > MIN_MATCH_COUNT:
src_pts = np.float32([kp1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
dst_pts = np.float32([kp2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)
M, mask = cv2.findHomography(src_pts,
dst_pts,
method=cv2.RANSAC,
ransacReprojThreshold=5,
maxIters=2000,
confidence=0.999)
matchesmask = mask.ravel().tolist()
h, w = box.shape
pts = np.float32([[0, 0], [0, h-1], [w-1, h-1], [w-1, 0]]).reshape(-1, 1, 2)
pts2 = np.float32([[20, 12.5]]).reshape(-1, 1, 2)
if M is not None and pts is not None:
dst = cv2.perspectiveTransform(pts, M)
dst2 = cv2.perspectiveTransform(pts2, M)
img = cv2.polylines(img, [np.int32(dst)], True, 50, 3, cv2.LINE_AA)
img = cv2.circle(img, (dst2[0][0][0], dst2[0][0][1]), 5, (127, 0, 0), 3)
parking_coordinates = (dst2[0][0][0], dst2[0][0][1])
cv2.imwrite("coordinates.png", img)
else:
print("Got a bad frame")
else:
matchesmask = None
draw_params = dict(matchColor=(0, 255, 0), # draw matches in green color
singlePointColor=None,
matchesMas=matchesmask, # draw only inliers
flags=2)
img3 = cv2.drawMatches(box, kp1, img, kp2, good, None, **draw_params)
if SAVE_PICTURES:
cv2.imwrite("identified.jpg", img3)
return img3, parking_coordinates
|
994,057 | f9f29e26a02d619bebb51d6d742b135a1fc05af2 | import csv
import sys
import numpy as np
import tensorflow.compat.v1 as tf # type: ignore
import Jann.utils as utils
tf.disable_v2_behavior()
def process_pairs_data(args):
"""Main run function to process the pairs data."""
tf.logging.info('Select and save {} random pairs...'.format(
args.num_lines))
tf.logging.info('Input file: {}'.format(args.infile))
# load the lines
lines, _ = utils.load_data(args.infile, dest_type='list', delimiter='\n')
tf.logging.info("Loaded {} lines: {}".format(len(lines), args.infile))
with open(args.outfile, 'w', encoding='iso-8859-1') as outputfile:
writer = csv.writer(outputfile, delimiter=args.delimiter)
collected_pairs = utils.extract_pairs_from_lines(lines)
random_idxs = np.random.choice(
len(collected_pairs), args.num_lines, replace=False)
for random_id in random_idxs:
pair = collected_pairs[random_id]
writer.writerow(pair)
tf.logging.info('Wrote {} pairs to {}.'.format(
args.num_lines, args.outfile))
return True
if __name__ == "__main__":
# Parse the arguments
args = utils.parse_arguments(sys.argv[1:])
sys.exit(process_pairs_data(args))
|
994,058 | 47cb96c6eaae08acfc854bb16fcc42e1a226bed0 | from pathlib import Path
from datetime import datetime
from unittest import mock
import numpy as np
import astropy.units as u
from astropy.time import Time
from sunpy.net import attrs as a
from radiospectra.spectrogram2 import Spectrogram
from radiospectra.spectrogram2.sources import SWAVESSpectrogram
@mock.patch('radiospectra.spectrogram2.spectrogram.SpectrogramFactory._read_dat')
def test_swaves_lfr(read_dat):
meta = {
'instrument': 'swaves',
'observatory': 'STEREO A',
'product': 'average',
'start_time': Time('2020-11-28 00:00:00'),
'end_time': Time('2020-11-28 23:59:00'),
'wavelength': a.Wavelength(2.6 * u.kHz, 153.4 * u.kHz),
'detector': 'lfr',
'freqs': [2.6, 2.8, 3.1, 3.4, 3.7, 4., 4.4, 4.8, 5.2, 5.7, 6.2, 6.8, 7.4, 8.1, 8.8, 9.6,
10.4, 11.4, 12.4, 13.6, 14.8, 16.1, 17.6, 19.2, 20.9, 22.8, 24.9, 27.1, 29.6,
32.2, 35.2, 38.3, 41.8, 45.6, 49.7, 54.2, 59.1, 64.5, 70.3, 76.7, 83.6, 91.2,
99.4, 108.4, 118.3, 129., 140.6, 153.4] * u.kHz,
'times': np.arange(1440) * u.min
}
array = np.zeros((48, 1440))
read_dat.return_value = (meta, array)
file = Path('fake.dat')
spec = Spectrogram(file)
assert isinstance(spec, SWAVESSpectrogram)
assert spec.observatory == 'STEREO A'
assert spec.instrument == 'SWAVES'
assert spec.detector == 'LFR'
assert spec.start_time.datetime == datetime(2020, 11, 28, 0, 0)
assert spec.end_time.datetime == datetime(2020, 11, 28, 23, 59)
assert spec.wavelength.min == 2.6 * u.kHz
assert spec.wavelength.max == 153.4 * u.kHz
@mock.patch('radiospectra.spectrogram2.spectrogram.SpectrogramFactory._read_dat')
def test_swaves_hfr(read_dat):
meta = {
'instrument': 'swaves',
'observatory': 'STEREO A',
'product': 'average',
'start_time': Time('2020-11-28 00:00:00'),
'end_time': Time('2020-11-28 23:59:00'),
'wavelength': a.Wavelength(125.0 * u.kHz, 16025.0 * u.kHz),
'detector': 'hfr',
'freqs': np.linspace(125, 16025, 319) * u.kHz,
'times': np.arange(1440) * u.min,
}
array = np.zeros((319, 1440))
read_dat.return_value = (meta, array)
file = Path('fake.dat')
spec = Spectrogram(file)
assert isinstance(spec, SWAVESSpectrogram)
assert spec.observatory == 'STEREO A'
assert spec.instrument == 'SWAVES'
assert spec.detector == 'HFR'
assert spec.start_time.datetime == datetime(2020, 11, 28, 0, 0)
assert spec.end_time.datetime == datetime(2020, 11, 28, 23, 59)
assert spec.wavelength.min == 125 * u.kHz
assert spec.wavelength.max == 16025 * u.kHz
|
994,059 | 4e45581b5ab09c14cc944fced3f8074f6017398f | import pygame
import random
import os
pygame.init()
pygame.display.set_caption("EndToper's cross-zero")
WHITE = (255,255,255)
BLACK = (0,0,0)
#ัะฐัััะตั ะพัะฝะฐัะธัะตะปัะฝัั
ะบะพะพัะดะธะฝะฐั 2
width, height = pygame.display.Info().current_w, pygame.display.Info().current_h
WIDTH, HEIGHT = width, height
width, height = round(width/2), round(height/2)
#ัะฐัััะตั ะพัะฝะฐัะธัะตะปัะฝัั
ะบะพะพัะดะธะฝะฐั 2
stroks,stolbs = WIDTH // 90, HEIGHT // 90
ostatok_w, ostatok_h = round((width % 90)), round((height % 90))
width - ostatok_w, height - ostatok_h
#ะทะฐะดะฐัะฐ ะฟะตัะตะผะตะฝะฝัั
. FPS = fragmet per second, mode - screen mode, run - game circle, clock - for FPS,layer - who's gonna to go, wins - list with lists with win combinations
FPS = 60
mode = 'first'
run = True
game_run = 0
clock = pygame.time.Clock()
player = 1
wins = None
#ะบะฐััะธะฝะบะธ
image_adress = os.path.join('cross_zero_images','circle1.png')
circle_image = pygame.image.load(image_adress)
image_adress_2 = os.path.join('cross_zero_images','cross.png')
cross_image = pygame.image.load(image_adress_2)
#ะฟัะพะฒะตัะบะฐ ัะฐะฑะพัะฐ ัะธัะธัะตะผ
print(width, height)
print(stroks,stolbs)
print(ostatok_w, ostatok_h)
print(width - ostatok_w, height - ostatok_h )
print(wins)
screen = pygame.display.set_mode((width - ostatok_w, height - ostatok_h))
class sq(pygame.sprite.Sprite):
def __init__(self,x,y,color,x2,y2,ctype):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.Surface((80, 80))
self.rect = self.image.get_rect()
self.rect.center = (x,y)
self.pos = (x2,y2)
self.color = color
self.image.fill(color)
self.image_image = self.image
self.type = ctype
def update(self,pos):
global player,poses, game_run
if player == 1 and self.type == 0:
self.image_image = circle_image
player = 2
self.type = 1
poses[(pos)] = 1
if player == 2 and self.type == 0:
self.image_image = cross_image
player = 1
self.type = 2
poses[(pos)] = 2
win_score = 0
x = pos[0]
y = pos[1]
wins = [[(x+1,y),(x+2,y),(x+3,y),(x+4,y)],[(x-1,y),(x-2,y),(x-3,y),(x-4,y)],[(x,y+1),(x,y+2),(x,y+3),(x,y+4)],[(x,y-1),(x,y-2),(x,y-3),(x,y-4)],[(x-1,y-1),(x-2,y-2),(x-3,y-3),(x-4,y-4)],[(x+1,y+1),(x+2,y+2),(x+3,y+3),(x+4,y+4)],[(x-1,y+1),(x-2,y+2),(x-3,y+3),(x-4,y+4)],[(x+1,y-1),(x+2,y-2),(x+3,y-3),(x+4,y-4)]]
#ะฟัะพะฒะตัะบะฐ ะฟะพะฑะตะดั ะบััะณะพะฒ
if self.type == 1:
for i in range(len(wins)):
for j in range(len(wins[i])):
if wins[i][j][0] > 0 and wins[i][j][1] > 0 and wins[i][j][0] < stroks and wins[i][j][1] < stolbs:
if poses[wins[i][j]] == 1:
win_score = win_score + 1
if win_score >= 4:
if mode == 'first':
width, height = round(WIDTH/2), round(HEIGHT/2)
pygame.display.set_mode((width - ostatok_w, height - ostatok_h ))
game_run = 1
if mode == 'second':
pygame.display.set_mode((WIDTH, HEIGHT),pygame.FULLSCREEN)
game_run = 3
win_score = 0
if i == 1 or i == 3 or i == 5 or i == 7:
win_score = 0
win_score = 0
#ะฟัะพะฒะตัะบะฐ ะฟะพะฑะตะดั ะบัะตััะพะฒ
if self.type == 2:
for i in range(len(wins)):
for j in range(len(wins[i])):
if wins[i][j][0] > 0 and wins[i][j][1] > 0 and wins[i][j][0] < stroks and wins[i][j][1] < stolbs:
if poses[wins[i][j]] == 2:
win_score = win_score + 1
if win_score >= 4:
if mode == 'first':
width, height = round(WIDTH/2), round(HEIGHT/2)
pygame.display.set_mode((width - ostatok_w, height - ostatok_h ))
game_run = 2
if mode == 'second':
pygame.display.set_mode((WIDTH, HEIGHT),pygame.FULLSCREEN)
game_run = 4
win_score = 0
if i == 1 or i == 3 or i == 5 or i == 7:
win_score = 0
win_score = 0
def draw(self):
screen.blit(self.image_image,(self.pos))
def reset(self):
self.image_image = self.image
self.type = 0
all_sprites = pygame.sprite.Group()
squares = []
poses = {}
poses_mass = []
qw = 0
qw2 = 0
qw3 = 1
for i in range(1,stroks*stolbs+1):
x = 45+90*qw2
y = 45+90*qw
x2 = 5+90*qw2
y2 = 5+90*qw
qw = qw + 1
if qw == stolbs:
qw = 0
qw2 = qw2 + 1
qw3 = qw3 + 1
color = WHITE
q = sq(x,y,color,x2,y2,0)
all_sprites.add(q)
squares.append(q)
poses_mass.append((qw3,qw))
poses[(qw3,qw)] = 0
while run:
screen.fill(BLACK)
clock.tick(FPS)
for i2 in range(len(squares)):
squares[i2].draw()
for i in pygame.event.get():
if i.type == pygame.QUIT:
run = False
if i.type == pygame.KEYDOWN:
if i.key == pygame.K_ESCAPE:
run = False
elif i.key == pygame.K_F5 and mode == 'first' and game_run == 0:
pygame.display.set_mode((WIDTH, HEIGHT),pygame.FULLSCREEN)
mode = 'second'
pygame.display.update()
elif i.key == pygame.K_F5 and mode == 'second' and game_run == 0:
width, height = round(WIDTH/2), round(HEIGHT/2)
pygame.display.set_mode((width - ostatok_w, height - ostatok_h ))
mode = 'first'
pygame.display.update()
elif i.type == pygame.MOUSEBUTTONDOWN:
if game_run == 0:
for i2 in range(len(squares)):
if squares[i2].rect.collidepoint(pygame.mouse.get_pos()) and i.button == 1 or squares[i2].rect.collidepoint(pygame.mouse.get_pos()) and i.button == 3:
squares[i2].update(poses_mass[i2])
else:
for i2 in range(len(squares)):
squares[i2].reset()
game_run = 0
for i in poses.keys():
poses[i] = 0
if game_run == 1:
screen.fill(WHITE)
fontObj = pygame.font.Font('freesansbold.ttf', 25)
textSurfaceObj = fontObj.render(f'ะัะนะณัะฐะปะธ ะะพะปะธะบะธ', False, BLACK, WHITE)
textRectObj = textSurfaceObj.get_rect()
textRectObj.center = (width/2,height/2)
screen.blit(textSurfaceObj, textRectObj)
if game_run == 2:
screen.fill(WHITE)
fontObj = pygame.font.Font('freesansbold.ttf', 25)
textSurfaceObj = fontObj.render(f'ะัะนะณัะฐะปะธ ะัะตััะธะบะธ', False, BLACK, WHITE)
textRectObj = textSurfaceObj.get_rect()
textRectObj.center = (width/2,height/2)
screen.blit(textSurfaceObj, textRectObj)
if game_run == 3:
screen.fill(WHITE)
fontObj = pygame.font.Font('freesansbold.ttf', 100)
textSurfaceObj = fontObj.render(f'ะัะนะณัะฐะปะธ ะะพะปะธะบะธ', False, BLACK, WHITE)
textRectObj = textSurfaceObj.get_rect()
textRectObj.center = (WIDTH/2,HEIGHT/2)
screen.blit(textSurfaceObj, textRectObj)
if game_run == 4:
screen.fill(WHITE)
fontObj = pygame.font.Font('freesansbold.ttf', 100)
textSurfaceObj = fontObj.render(f'ะัะนะณัะฐะปะธ ะัะตััะธะบะธ', False, BLACK, WHITE)
textRectObj = textSurfaceObj.get_rect()
textRectObj.center = (WIDTH/2,HEIGHT/2)
screen.blit(textSurfaceObj, textRectObj)
pygame.display.flip()
pygame.quit() |
994,060 | 018617327f0487a44a50856b03503948f574bef5 | import numpy as np
import dezero
from dezero import cuda, utils
from dezero.core import Function, Variable, as_variable, as_array
# =============================================================================
# Basic functions: sin / cos / tanh / exp / log
# =============================================================================
class Sin(Function):
def forward(self, x):
xp = cuda.get_array_module(x)
y = xp.sin(x)
return y
def backward(self, gy):
x, = self.inputs
gx = gy * cos(x)
return gx
def sin(x):
return Sin()(x)
class Cos(Function):
def forward(self, x):
xp = cuda.get_array_module(x)
y = xp.cos(x)
return y
def backward(self, gy):
x, = self.inputs
gx = gy * -sin(x)
return gx
def cos(x):
return Cos()(x)
class Tanh(Function):
def forward(self, x):
xp = cuda.get_array_module(x)
y = xp.tanh(x)
return y
def backward(self, gy):
y = self.outputs[0]() # weakref
gx = gy * (1 - y * y)
return gx
def tanh(x):
return Tanh()(x)
class Exp(Function):
def forward(self, x):
xp = cuda.get_array_module(x)
y = xp.exp(x)
return y
def backward(self, gy):
y = self.outputs[0]() # weakref
gx = gy * y
return gx
def exp(x):
return Exp()(x)
class Log(Function):
def forward(self, x):
xp = cuda.get_array_module(x)
y = xp.log(x)
return y
def backward(self, gy):
x, = self.inputs
gx = gy / x
return gx
def log(x):
return Log()(x)
# =============================================================================
# Tensor operations: reshape / transpose / get_item / expand_dims / flatten
# =============================================================================
class Reshape(Function):
def __init__(self, shape):
self.shape = shape
def forward(self, x):
self.x_shape = x.shape
y = x.reshape(self.shape)
return y
def backward(self, gy):
return reshape(gy, self.x_shape)
def reshape(x, shape):
if x.shape == shape:
return as_variable(x)
return Reshape(shape)(x)
class Transpose(Function):
def __init__(self, axes=None):
self.axes = axes
def forward(self, x):
y = x.transpose(self.axes)
return y
def backward(self, gy):
if self.axes is None:
return transpose(gy)
axes_len = len(self.axes)
inv_axes = tuple(np.argsort([ax % axes_len for ax in self.axes]))
return transpose(gy, inv_axes)
def transpose(x, axes=None):
return Transpose(axes)(x)
class GetItem(Function):
def __init__(self, slices):
self.slices = slices
def forward(self, x):
y = x[self.slices]
return y
def backward(self, gy):
x, = self.inputs
f = GetItemGrad(self.slices, x.shape)
return f(gy)
class GetItemGrad(Function):
def __init__(self, slices, in_shape):
self.slices = slices
self.in_shape = in_shape
def forward(self, gy):
xp = dezero.cuda.get_array_module(gy)
gx = xp.zeros(self.in_shape, dtype=gy.dtype)
if xp is np:
np.add.at(gx, self.slices, gy)
else:
xp.scatter_add(gx, self.slices, gy)
return gx
def backward(self, ggx):
return get_item(ggx, self.slices)
def get_item(x, slices):
f = GetItem(slices)
return f(x)
def expand_dims(x, axis):
x = as_variable(x)
shape = list(x.shape)
shape.insert(axis, 1)
return reshape(x, tuple(shape))
def flatten(x):
"""Flattens the input. Does not affect the batch size."""
return reshape(x, (x.shape[0], -1))
# =============================================================================
# sum / sum_to / broadcast_to / average / matmul / linear
# =============================================================================
class Sum(Function):
def __init__(self, axis, keepdims):
self.axis = axis
self.keepdims = keepdims
def forward(self, x):
self.x_shape = x.shape
y = x.sum(axis=self.axis, keepdims=self.keepdims)
return y
def backward(self, gy):
gy = utils.reshape_sum_backward(gy, self.x_shape, self.axis,
self.keepdims)
gx = broadcast_to(gy, self.x_shape)
return gx
def sum(x, axis=None, keepdims=False):
return Sum(axis, keepdims)(x)
class SumTo(Function):
def __init__(self, shape):
self.shape = shape
def forward(self, x):
self.x_shape = x.shape
y = utils.sum_to(x, self.shape)
return y
def backward(self, gy):
gx = broadcast_to(gy, self.x_shape)
return gx
def sum_to(x, shape):
if x.shape == shape:
return as_variable(x)
return SumTo(shape)(x)
class BroadcastTo(Function):
def __init__(self, shape):
self.shape = shape
def forward(self, x):
self.x_shape = x.shape
xp = dezero.cuda.get_array_module(x)
y = xp.broadcast_to(x, self.shape)
return y
def backward(self, gy):
gx = sum_to(gy, self.x_shape)
return gx
def broadcast_to(x, shape):
if x.shape == shape:
return as_variable(x)
return BroadcastTo(shape)(x)
def average(x, axis=None, keepdims=False):
x = as_variable(x)
y = sum(x, axis, keepdims)
return y * (y.data.size / x.data.size)
mean = average
class MatMul(Function):
def forward(self, x, W):
y = x.dot(W)
return y
def backward(self, gy):
x, W = self.inputs
gx = matmul(gy, W.T)
gW = matmul(x.T, gy)
return gx, gW
def matmul(x, W):
return MatMul()(x, W)
class Linear(Function):
def forward(self, x, W, b):
y = x.dot(W)
if b is not None:
y += b
return y
def backward(self, gy):
x, W, b = self.inputs
gb = None if b.data is None else sum_to(gy, b.shape)
gx = matmul(gy, W.T)
gW = matmul(x.T, gy)
return gx, gW, gb
def linear(x, W, b=None):
return Linear()(x, W, b)
def linear_simple(x, W, b=None):
t = matmul(x, W)
if b is None:
return t
y = t + b
t.data = None # Release t.data (ndarray) for memory efficiency
return y
# =============================================================================
# activation function: sigmoid / relu / softmax / log_softmax / leaky_relu
# =============================================================================
def sigmoid_simple(x):
x = as_variable(x)
y = 1 / (1 + exp(-x))
return y
class Sigmoid(Function):
def forward(self, x):
xp = cuda.get_array_module(x)
# y = 1 / (1 + xp.exp(-x))
y = xp.tanh(x * 0.5) * 0.5 + 0.5 # Better implementation
return y
def backward(self, gy):
y = self.outputs[0]()
gx = gy * y * (1 - y)
return gx
def sigmoid(x):
return Sigmoid()(x)
class ReLU(Function):
def forward(self, x):
xp = cuda.get_array_module(x)
y = xp.maximum(x, 0.0)
return y
def backward(self, gy):
x, = self.inputs
mask = x.data > 0
gx = gy * mask
return gx
def relu(x):
return ReLU()(x)
def softmax_simple(x, axis=1):
x = as_variable(x)
y = exp(x)
sum_y = sum(y, axis=axis, keepdims=True)
return y / sum_y
class Softmax(Function):
def __init__(self, axis=1):
self.axis = axis
def forward(self, x):
xp = cuda.get_array_module(x)
y = x - x.max(axis=self.axis, keepdims=True)
y = xp.exp(y)
y /= y.sum(axis=self.axis, keepdims=True)
return y
def backward(self, gy):
y = self.outputs[0]()
gx = y * gy
sumdx = gx.sum(axis=self.axis, keepdims=True)
gx -= y * sumdx
return gx
def softmax(x, axis=1):
return Softmax(axis)(x)
class LogSoftmax(Function):
def __init__(self, axis=1):
self.axis = axis
def forward(self, x):
log_z = utils.logsumexp(x, self.axis)
y = x - log_z
return y
def backward(self, gy):
y = self.outputs[0]()
gx = gy - exp(y) * gy.sum(axis=self.axis, keepdims=True)
return gx
def log_softmax(x, axis=1):
return LogSoftmax(axis)(x)
class LeakyReLU(Function):
def __init__(self, slope):
self.slope = slope
def forward(self, x):
y = x.copy()
y[x <= 0] *= self.slope
return y
def backward(self, gy):
x, = self.inputs
mask = (x.data > 0).astype(gy.dtype)
mask[mask <= 0] = self.slope
gx = gy * mask
return gx
def leaky_relu(x, slope=0.2):
return LeakyReLU(slope)(x)
# =============================================================================
# loss function: mean_squared_error / softmax_cross_entropy / sigmoid_cross_entropy / binary_cross_entropy
# =============================================================================
def mean_squared_error_simple(x0, x1):
x0, x1 = as_variable(x0), as_variable(x1)
diff = x0 - x1
y = sum(diff ** 2) / len(diff)
return y
class MeanSquaredError(Function):
def forward(self, x0, x1):
diff = x0 - x1
y = (diff ** 2).sum() / len(diff)
return y
def backward(self, gy):
x0, x1 = self.inputs
diff = x0 - x1
gx0 = gy * diff * (2. / len(diff))
gx1 = -gx0
return gx0, gx1
def mean_squared_error(x0, x1):
return MeanSquaredError()(x0, x1)
def softmax_cross_entropy_simple(x, t):
x, t = as_variable(x), as_variable(t)
N = x.shape[0]
p = softmax(x)
p = clip(p, 1e-15, 1.0) # To avoid log(0)
log_p = log(p)
tlog_p = log_p[np.arange(N), t.data]
y = -1 * sum(tlog_p) / N
return y
class SoftmaxCrossEntropy(Function):
def forward(self, x, t):
N = x.shape[0]
log_z = utils.logsumexp(x, axis=1)
log_p = x - log_z
log_p = log_p[np.arange(N), t.ravel()]
y = -log_p.sum() / np.float32(N)
return y
def backward(self, gy):
x, t = self.inputs
N, CLS_NUM = x.shape
gy *= 1/N
y = softmax(x)
# convert to one-hot
xp = cuda.get_array_module(t.data)
t_onehot = xp.eye(CLS_NUM, dtype=t.dtype)[t.data]
y = (y - t_onehot) * gy
return y
def softmax_cross_entropy(x, t):
return SoftmaxCrossEntropy()(x, t)
def sigmoid_cross_entropy(x, t):
if x.ndim != t.ndim:
t = t.reshape(*x.shape)
x, t = as_variable(x), as_variable(t)
N = len(x)
p = sigmoid(x)
p = clip(p, 1e-15, 1.0)
tlog_p = t * log(p) + (1 - t) * log(1 - p)
y = -1 * sum(tlog_p) / N
return y
def binary_cross_entropy(p, t):
if p.ndim != t.ndim:
t = t.reshape(*p.shape)
N = len(t)
p = clip(p, 1e-15, 0.999)
tlog_p = t * log(p) + (1 - t) * log(1 - p)
y = -1 * sum(tlog_p) / N
return y
# =============================================================================
# accuracy / dropout / batch_norm / embed_id
# =============================================================================
def accuracy(y, t):
"""
[WAR] This function is not differentiable.
"""
y, t = as_variable(y), as_variable(t)
pred = y.data.argmax(axis=1).reshape(t.shape)
result = (pred == t.data)
acc = result.mean()
return Variable(as_array(acc))
def dropout(x, dropout_ratio=0.5):
x = as_variable(x)
if dezero.Config.train:
xp = cuda.get_array_module(x)
mask = xp.random.rand(*x.shape) > dropout_ratio
scale = xp.array(1.0 - dropout_ratio).astype(x.dtype)
y = x * mask / scale
return y
else:
return x
class BatchNorm(Function):
def __init__(self, mean, var, decay, eps):
self.avg_mean = mean
self.avg_var = var
self.decay = decay
self.eps = eps
self.inv_std = None
def forward(self, x, gamma, beta):
assert x.ndim == 2 or x.ndim == 4
x_ndim = x.ndim
if x_ndim == 4:
N, C, H, W = x.shape
# (N, C, H, W) -> (N*H*W, C)
x = x.transpose(0, 2, 3, 1).reshape(-1, C)
xp = cuda.get_array_module(x)
if dezero.Config.train:
mean = x.mean(axis=0)
var = x.var(axis=0)
inv_std = 1 / xp.sqrt(var + self.eps)
xc = (x - mean) * inv_std
m = x.size // gamma.size
s = m - 1. if m - 1. > 1. else 1.
adjust = m / s # unbiased estimation
self.avg_mean *= self.decay
self.avg_mean += (1 - self.decay) * mean
self.avg_var *= self.decay
self.avg_var += (1 - self.decay) * adjust * var
self.inv_std = inv_std
else:
inv_std = 1 / xp.sqrt(self.avg_var + self.eps)
xc = (x - self.avg_mean) * inv_std
y = gamma * xc + beta
if x_ndim == 4:
# (N*H*W, C) -> (N, C, H, W)
y = y.reshape(N, H, W, C).transpose(0, 3, 1, 2)
return y
def backward(self, gy):
gy_ndim = gy.ndim
if gy_ndim == 4:
N, C, H, W = gy.shape
gy = gy.transpose(0, 2, 3, 1).reshape(-1, C)
x, gamma, beta = self.inputs
batch_size = len(gy)
if x.ndim == 4:
N, C, H, W = x.shape
x = x.transpose(0, 2, 3, 1).reshape(-1, C)
mean = x.sum(axis=0) / batch_size
xc = (x - mean) * self.inv_std
gbeta = sum(gy, axis=0)
ggamma = sum(xc * gy, axis=0)
gx = gy - gbeta / batch_size - xc * ggamma / batch_size
gx *= gamma * self.inv_std
if gy_ndim == 4:
gx = gx.reshape(N, H, W, C).transpose(0, 3, 1, 2)
return gx, ggamma, gbeta
def batch_nrom(x, gamma, beta, mean, var, decay=0.9, eps=2e-5):
return BatchNorm(mean, var, decay, eps)(x, gamma, beta)
def embed_id(x, W):
return W[x]
# =============================================================================
# max / min / clip
# =============================================================================
class Max(Function):
def __init__(self, axis=None, keepdims=False):
self.axis = axis
self.keepdims = keepdims
def forward(self, x):
y = x.max(axis=self.axis, keepdims=self.keepdims)
return y
def backward(self, gy):
x = self.inputs[0]
y = self.outputs[0]() # weakref
shape = utils.max_backward_shape(x, self.axis)
gy = reshape(gy, shape)
y = reshape(y, shape)
cond = (x.data == y.data)
gy = broadcast_to(gy, cond.shape)
return gy * cond
class Min(Max):
def forward(self, x):
y = x.min(axis=self.axis, keepdims=self.keepdims)
return y
def max(x, axis=None, keepdims=False):
return Max(axis, keepdims)(x)
def min(x, axis=None, keepdims=False):
return Min(axis, keepdims)(x)
class Clip(Function):
def __init__(self, x_min, x_max):
self.x_min = x_min
self.x_max = x_max
def forward(self, x):
xp = cuda.get_array_module(x)
y = xp.clip(x, self.x_min, self.x_max)
return y
def backward(self, gy):
x, = self.inputs
mask = (x.data >= self.x_min) * (x.data <= self.x_max)
gx = gy * mask
return gx
def clip(x, x_min, x_max):
return Clip(x_min, x_max)(x)
# =============================================================================
# conv2d / col2im / im2col / basic_math
# =============================================================================
from dezero.functions_conv import conv2d
from dezero.functions_conv import deconv2d
from dezero.functions_conv import conv2d_simple
from dezero.functions_conv import im2col
from dezero.functions_conv import col2im
from dezero.functions_conv import pooling_simple
from dezero.functions_conv import pooling
from dezero.functions_conv import average_pooling
from dezero.core import add
from dezero.core import sub
from dezero.core import rsub
from dezero.core import mul
from dezero.core import div
from dezero.core import neg
from dezero.core import pow |
994,061 | d6274ba174cc492f58d8d1ae9957e498897f7a59 | '''
Wrapping params from app cell
Useful because it can be hard to predict how the app cell will pass things in,
and to retrieve params in different forms (e.g., CLI vs. prose)
'''
import json
from ..util.debug import dprint
from .globals import Var
class Params:
'''
Provides interface for:
-----------------------
# Flattened params
* `[]` access to required params (e.g., essential UPAs, workspace stuff)
* `get`-like access to default-backed params ( e.g., 3rd-party tool params with defaults)
* RDP Clsf params in prose mode
* RDP Clsf params in CLI args mode
You can leave off non-default params, like you would with underlying CLI tools
However, if you do pass something in for a param, it has to be valid.
Use `params.getd` for default-backed params
Also the parameter groups are an effect of the app cell ui,
and params will be flattened right away
'''
DEFAULTS = {
'conf': 0.8,
'gene': 'silva_138_ssu',
}
# always required, whether front or back end call
REQUIRED = [
'amp_mat_upa',
'output_name',
'workspace_id', # for saving obj
]
ALL = [
'amp_mat_upa',
'output_name',
'rdp_clsf',
'conf',
'gene',
'workspace_id',
'workspace_name',
]
CUSTOM = ['silva_138_ssu', 'silva_138_ssu_v4']
def __init__(self, params):
## Validate params and flattened params
self._validate(params)
## Flatten
params = self.flatten(params)
self.params = params
def _validate(self, params):
for p in self.REQUIRED:
if p not in params:
raise Exception('Missing required `%s`' % p)
for p in params:
if p not in self.ALL:
raise Exception('Unknown `%s`' % p)
for p in self.flatten(params):
if p not in self.ALL:
raise Exception('Unknown `%s`' % p)
def is_custom(self) -> bool:
return self.getd('gene') in self.CUSTOM
def get_prose_args(self, quote_str=False) -> dict:
'''
For printing all RDP Clsf params to user in a pretty way
'''
quote = lambda s: '"%s"' % s
d = {
'conf': '%g' % self.getd('conf'),
'gene': self.getd('gene'),
}
if quote_str:
d['gene'] = quote(d['gene'])
return d
@property
def cli_args(self) -> list:
'''
Non-default RDP Classifier `classify` CLI args
'''
cli_args = []
if self.getd('conf') != self.DEFAULTS['conf']:
cli_args += ['--conf', str(self.getd('conf'))]
if self.getd('gene') in self.CUSTOM:
cli_args += ['--train_propfile', Var.propfile[self.getd('gene')]]
else:
cli_args += ['--gene', self.getd('gene')]
return cli_args
def __getitem__(self, key):
'''
For required params (e.g., input UPAs, workspace stuff)
Should not use this for default-backed params
as those can be left off params
so use `getd` for those
'''
if key not in self.REQUIRED:
raise Exception()
return self.params[key]
def getd(self, key):
'''
For default-backed params (e.g., tunable numbers)
Like `get`
Return the user-supplied value, or the default value if none was supplied
'''
if key not in self.DEFAULTS:
raise Exception('`params.getd(x)` only applicable to params with defaults')
return self.params.get(key, self.DEFAULTS[key])
def __repr__(self) -> str:
return 'Wrapper for params:\n%s' % (json.dumps(self.params, indent=4))
@staticmethod
def flatten(d):
'''
At most 1 level nesting
'''
d1 = d.copy()
for k, v in d.items():
if isinstance(v, dict):
for k1, v1 in d1.pop(k).items():
d1[k1] = v1
return d1
|
994,062 | 68a24863bf99f332eb82cb363c3e2acd71e3b685 | import os
import sys
import transaction
import hashlib
from pyramid.paster import (
get_appsettings,
setup_logging,
)
from pyramid.scripts.common import parse_vars
from ..models.meta import Base
from ..models import (
get_engine,
get_session_factory,
get_tm_session,
)
from ..models import User, CourseElement, Article, Media, Subscription, Payment
def usage(argv):
cmd = os.path.basename(argv[0])
print('usage: %s <config_uri> [var=value]\n'
'(example: "%s development.ini")' % (cmd, cmd))
sys.exit(1)
def main(argv=sys.argv):
if len(argv) < 2:
usage(argv)
config_uri = argv[1]
options = parse_vars(argv[2:])
setup_logging(config_uri)
settings = get_appsettings(config_uri, options=options)
engine = get_engine(settings)
Base.metadata.create_all(engine)
session_factory = get_session_factory(engine)
with transaction.manager:
dbsession = get_tm_session(session_factory, transaction.manager)
key_string = "Dishnek5"
salt = "ololo"
hashstr = hashlib.md5( salt + key_string ).hexdigest()
user = User(name='annndrey', login='gontchar@gmail.com',
password=hashstr,
is_admin=1,
is_author=1)
dbsession.add(user)
|
994,063 | 5d2f2b0232842b2cf0ccd8a22c40a1298adb21e0 | import requests
import math
import datetime
from passlib.hash import bcrypt
#BASE_URL = 'http://54.159.70.183/'
BASE_URL = 'http://127.0.0.1:8888/'
MAININCA_DATA_ENDPOINT = 'api/save_main_inca/'
GET_MEASUREMENT_PROM = 'api/measurementPromedio/'
ACTIVE_QHAWAX_ENDPOINT = 'api/get_all_active_qhawax/'
SAVE_GAS_INCA_ENDPOINT = 'api/saveGasInca/'
def init_inca_gas_processed(valueH2S,valueCO,valueNO2,valuePM10,valuePM25,valueSO2, valueO3, calInca):
inca_json = {}
starting_hour = datetime.datetime.now() - datetime.timedelta(hours=5)
inca_json['timestamp'] = str(starting_hour.replace(minute=0, second=0, microsecond=0))
inca_json['CO'] = valueCO
inca_json['CO2'] = 0.0
inca_json['H2S'] = valueH2S
inca_json['NO'] = 0.0
inca_json['NO2'] = valueNO2
inca_json['O3'] = valueO3
inca_json['PM1'] = 0.0
inca_json['PM25'] = valuePM25
inca_json['PM10'] = valuePM10
inca_json['SO2'] = valueSO2
inca_json['main_inca'] = calInca
return inca_json
def validaH2S(val):
calificacionInca = 0
if val >=0 and val<= 50 :
calificacionInca = 50
elif val >50 and val<=100:
calificacionInca = 100
elif val >100 and val<=1000:
calificacionInca = 500
elif val >1000:
calificacionInca = 600
else:
calificacionInca = -1
return calificacionInca
def validaCO_NO2(val):
calificacionInca = 0
if val >=0 and val<= 50 :
calificacionInca = 50
elif val >50 and val<=100:
calificacionInca = 100
elif val >100 and val<=150:
calificacionInca = 500
elif val >150:
calificacionInca = 600
else:
calificacionInca = -1
return calificacionInca
def validaSO2(val):
calificacionInca = 0
if val >=0 and val<= 50 :
calificacionInca = 50
elif val >50 and val<=100:
calificacionInca = 100
elif val >100 and val<=625:
calificacionInca = 500
elif val >625:
calificacionInca = 600
else:
calificacionInca = -1
return calificacionInca
def validaPM10(val):
calificacionInca = 0
if val >=0 and val<= 50 :
calificacionInca = 50
elif val >50 and val<=100:
calificacionInca = 100
elif val >100 and val<=167:
calificacionInca = 500
elif val >167:
calificacionInca = 600
else:
calificacionInca = -1
return calificacionInca
def validaPM25(val):
calificacionInca = 0
if val >=0 and val<= 50 :
calificacionInca = 50
elif val >50 and val<=100:
calificacionInca = 100
elif val >100 and val<=500:
calificacionInca = 500
elif val >500:
calificacionInca = 600
else:
calificacionInca = -1
return calificacionInca
def validaO3(val):
calificacionInca = 0
if val >=0 and val<= 50 :
calificacionInca = 50
elif val >50 and val<=100:
calificacionInca = 100
elif val >100 and val<=175:
calificacionInca = 500
elif val >175:
calificacionInca = 600
else:
calificacionInca = -1
return calificacionInca
factor_final_CO = (0.0409 * 28.01 * 100)/10000
factor_final_NO2 = (0.0409 * 46.0055 * 100)/200
factor_final_PM10 = 100/150
factor_final_PM25 = 100/25
factor_final_SO2 = (0.0409 * 64.066 * 100)/20
factor_final_O3 = (0.0409 * 48* 100)/120
factor_final_H2S = (0.0409 * 34.1*100)/150
# Request all qhawax
response = requests.get(BASE_URL + ACTIVE_QHAWAX_ENDPOINT)
qhawax_names = [qhawax['name'] for qhawax in response.json()]
for qhawax_name in qhawax_names:
try:
responseCO = requests.get(BASE_URL + GET_MEASUREMENT_PROM, params={'name': qhawax_name, 'sensor': 'CO', 'hoursSensor': 8})
responseNO2 = requests.get(BASE_URL + GET_MEASUREMENT_PROM, params={'name': qhawax_name, 'sensor': 'NO2', 'hoursSensor': 1})
responsePM10 = requests.get(BASE_URL + GET_MEASUREMENT_PROM, params={'name': qhawax_name, 'sensor': 'PM10', 'hoursSensor': 24})
responsePM25 = requests.get(BASE_URL + GET_MEASUREMENT_PROM, params={'name': qhawax_name, 'sensor': 'PM25', 'hoursSensor': 24})
responseSO2 = requests.get(BASE_URL + GET_MEASUREMENT_PROM, params={'name': qhawax_name, 'sensor': 'SO2', 'hoursSensor': 24})
responseO3 = requests.get(BASE_URL + GET_MEASUREMENT_PROM, params={'name': qhawax_name, 'sensor': 'O3', 'hoursSensor': 8})
responseH2S = requests.get(BASE_URL + GET_MEASUREMENT_PROM, params={'name': qhawax_name, 'sensor': 'H2S', 'hoursSensor': 24})
valueH2S = math.floor(float(responseH2S.text) * factor_final_H2S)
valueCO = math.floor(float(responseCO.text) * factor_final_CO)
valueNO2 = math.floor(float(responseNO2.text) * factor_final_NO2)
valuePM10 = math.floor(float(responsePM10.text) * factor_final_PM10)
valuePM25 = math.floor(float(responsePM25.text) * factor_final_PM25)
valueSO2 = math.floor(float(responseSO2.text) * factor_final_SO2)
valueO3 = math.floor(float(responseO3.text) * factor_final_O3)
aux = 0
calInca = 0
aux = validaH2S(valueH2S)
if aux > calInca:
calInca = aux
aux = validaCO_NO2(valueCO)
if aux > calInca:
calInca = aux
aux = validaCO_NO2(valueNO2)
if aux > calInca:
calInca = aux
aux = validaPM10(valuePM10)
if aux > calInca:
calInca = aux
aux = validaPM25(valuePM25)
if aux > calInca:
calInca = aux
aux = validaSO2(valueSO2)
if aux > calInca:
calInca = aux
aux = validaO3(valueO3)
if aux > calInca:
calInca = aux
name_qhawax = qhawax_name
inca_gas_processed = init_inca_gas_processed(valueH2S,valueCO,valueNO2,valuePM10,valuePM25,valueSO2, valueO3,calInca)
inca_gas_processed['ID'] = qhawax_name
response = requests.post(BASE_URL + SAVE_GAS_INCA_ENDPOINT, json=inca_gas_processed)
response = requests.post(BASE_URL + MAININCA_DATA_ENDPOINT, json ={'name': name_qhawax, 'value_inca': calInca})
except Exception as e:
print(e) |
994,064 | 7080acda444997c23370cccaf18981c750b5bceb | from django.apps import AppConfig
class ssp(AppConfig):
name = 'ssp'
def ready(self):
import ssp.signals
|
994,065 | 3277f256185b8d50a8fcc3d5607055bd7f05597b | #Cheryl Zogg HW3 - Problem 2
class Node:
"""base class"""
def __init__(self, name, cost, utility):
"""
:param name: name of this node
:param cost: cost of this node
:param utility: utility of this node
"""
self.name = name
self.cost = cost
self.utility = utility
def get_expected_cost(self):
raise NotImplementedError("This is an abstract method and needs to be implemented in derived classes.")
def get_expected_utility(self):
raise NotImplementedError("This is an abstract method and needs to be implemented in derived classes.")
class ChanceNode(Node):
def __init__(self, name, cost, utility, future_nodes, probs):
"""
:param future_nodes: future nodes connected to this node
:param probs: probability of future nodes
"""
Node.__init__(self, name, cost, utility)
self.futureNodes = future_nodes
self.probs = probs
def get_expected_cost(self):
"""
:return: expected cost of this chance node
"""
exp_cost = self.cost
i = 0
for node in self.futureNodes:
exp_cost += self.probs[i]*node.get_expected_cost()
i += 1
return exp_cost
def get_expected_utility(self):
"""
:return: expected utility of this chance node
"""
exp_utility = self.utility
i = 0
for node in self.futureNodes:
exp_utility += self.probs[i]*node.get_expected_utility()
i += 1
return exp_utility
class TerminalNode(Node):
def __init__(self, name, cost, utility):
Node.__init__(self, name, cost, utility)
def get_expected_cost(self):
return self.cost
def get_expected_utility(self):
return self.utility
class DecisionNode(Node):
def __init__(self, name, cost, utility, future_nodes):
Node.__init__(self, name, cost, utility)
self.futureNode = future_nodes
def get_expected_costs(self):
"""returns the expected costs of future nodes"""
outcomes = dict() # dictionary to store the expected cost of future nodes along with their names as keys
for node in self.futureNode:
outcomes[node.name] = node.get_expected_cost()
return outcomes
def get_expected_utility(self):
"""returns the expected utility of future nodes"""
outcomes = dict() # dictionary to store the expected cost of future nodes along with their names as keys
for node in self.futureNode:
outcomes[node.name] = node.get_expected_utility()
return outcomes
#######################
#create the terminal nodes
T1 = TerminalNode(name='T1', cost=10, utility=0.9)
T2 = TerminalNode(name='T2', cost=20, utility=0.8)
T3 = TerminalNode(name='T3', cost=30, utility=0.7)
T4 = TerminalNode(name='T4', cost=40, utility=0.6)
T5 = TerminalNode(name='T5', cost=50, utility=0.5)
#create the chance nodes
C2 = ChanceNode(name='C2', cost=35, utility=0, future_nodes=[T1,T2], probs=[0.7,0.3])
C1 = ChanceNode(name='C1', cost=25, utility=0, future_nodes=[C2,T3], probs=[0.2,0.8])
C3 = ChanceNode(name='C3', cost=45, utility=0, future_nodes=[T4,T5], probs=[0.1,0.9])
#create the decision node
D1 = DecisionNode(name='D1', cost=0, utility=0, future_nodes=[C1,C3])
#print the expect cost and utility of C1 and C3
print(D1.get_expected_costs(), D1.get_expected_utility()) |
994,066 | 06bf731c66f3497088f27b85c69f58f68ec10002 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
@Author : ๆ้จ
@Contact : 1121352970@qq.com
@Software: PyCharm
@File : errors.py
@Time : 2019-08-05 17:25
@Desc :
"""
from flask import jsonify
from werkzeug.http import HTTP_STATUS_CODES
def error_response(status_code, message=None):
payload = {'error': HTTP_STATUS_CODES.get(status_code, 'Unknown error')}
if message:
payload['message'] = message
response = jsonify(payload)
response.status_code = status_code
return response
def bad_request(message):
return error_response(400, message)
|
994,067 | 3e10fb47e15765269f273bf0098dc7c047faac89 | import urllib
from sp_api.api.products.products import Products
def test_pricing_for_sku():
print(Products().get_product_pricing_for_skus([
]))
|
994,068 | ab6209c4d74d4cf9497f94529b9c471857bfe7ff | import numpy as np
import jieba
from gensim.models import word2vec
from keras.utils import to_categorical
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential
from keras.layers import Embedding, Dense, TimeDistributed, Dropout, LSTM, GRU, Bidirectional, Flatten
import sys
from keras.models import load_model
import csv
jieba.load_userdict(sys.argv[4])
word2vec_model = word2vec.Word2Vec.load("word2vec.model")
# embedding layer
embedding_matrix = np.zeros((len(word2vec_model.wv.vocab.items()) + 1, word2vec_model.vector_size))
dictionary = {}
vocab = []
for word, _ in word2vec_model.wv.vocab.items():
vocab.append((word, word2vec_model.wv[word]))
# build dictionary
for i, info in enumerate(vocab):
word, vec = info
dictionary[word] = i + 1
embedding_matrix[i + 1] = vec
file = open(sys.argv[1], 'r', encoding='utf8')
file2 = open(sys.argv[2], 'r', encoding='big5').read().split('\n')[1:-1]
train_xdata = []
train_ydata = []
for i in file:
split = i.split(",")[1]
split = list(jieba.cut(split))
train_xdata.append(split[:-1]) # delete "\n"
for i in file2:
train_ydata.append(int(i.split(",")[1]))
file.close()
train_xdata = train_xdata[1:]
train_x = []
for sentence in train_xdata:
embedding = []
for word in sentence:
try:
embedding.append(dictionary[word])
except:
embedding.append(0)
train_x.append(embedding)
train_x = np.array(train_x)
padding_length = 200
train_x = pad_sequences(train_x, maxlen=padding_length, padding='post')
train_y = np.zeros((120000,1,2))
for i in range(120000):
if(train_ydata[i]==0):
train_y[i][0][0] = 1
else:
train_y[i][0][1] = 1
model = Sequential()
model.add(Embedding(input_dim=embedding_matrix.shape[0], output_dim=embedding_matrix.shape[1],
weights=[embedding_matrix], trainable=False))
model.add(Bidirectional(GRU(256, return_sequences=True)))
model.add(TimeDistributed(Dense(256, activation='relu')))
model.add(Dropout(0.1))
model.add(Dense(16, activation='relu'))
model.add(Dropout(0.1))
model.add(Dense(2, activation = 'softmax'))
model.summary()
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['categorical_crossentropy', 'accuracy'])
history = model.fit(x=train_x, y=train_y, batch_size=100, epochs=5, validation_split=0.1)
'''
plt.plot(history.history["acc"])
plt.plot(history.history["val_acc"])
plt.title("Model loss")
plt.xlabel("epoch")
plt.ylabel("acc")
plt.legend(['train','validation'])
plt.savefig('Loss.png')
plt.show()
'''
model.save('my_model.h5')
del model |
994,069 | eefaccb2c42b6c8e5109d74e0eb8c0a1f2b9944c |
"""
A bunch of diffeomorphisms of the real line.
"""
import numpy as np
from contracts import contract
from .. import GenericScalar
@contract(x='array', alpha='>0')
def power(x, alpha=2.0):
return np.sign(x) * np.power(np.abs(x), alpha)
#
# @contract(x='array', alpha='>0')
# def powerinv(x, alpha=2):
# return power(x, 1.0 / alpha)
def to01(f):
""" Maps the result of a function from [-1,1] to [0,1] """
def wrap(x, *args, **kwargs):
y = x * 2 - 1
z = f(y, *args, **kwargs)
return (z + 1) / 2
return wrap
power01 = to01(power)
# power01inv = to01(powerinv)
#
# def nlf1(x):
# return np.sin(np.pi * x / 2)
class Power(GenericScalar):
@contract(alpha='>0')
def __init__(self, alpha=2):
self.alpha = alpha
@contract(x='array')
def transform01(self, x):
return power01(x, self.alpha)
def inverse(self):
return Power(1.0 / self.alpha)
|
994,070 | 41777a74ad00e6f5c1f985a0fcd8f3dd3b19d65b | import csv
import re
import sys
def parse_args():
'''Parse arguments and excute operations according to the option.
Returns:
option: Option for the tool
pattern: Pattern for the option
filename: The csv file path
'''
if len(sys.argv) < 2:
print('Error: Argument missing, check usage using \'-help\'')
sys.exit(2)
if sys.argv[1] in ('-help', '-h'):
return sys.argv[1], "", ""
else:
if len(sys.argv) != 4:
print('Error: Argument missing or exceeding, check usage using \'-help\'')
sys.exit(2)
option = sys.argv[1]
pattern = sys.argv[2]
filename = sys.argv[3]
return option, pattern, filename
def read_csv(filename):
'''Read from csv file
Args:
filename: Path for a csv file
Returns:
rows: A list of records
'''
rows = []
try:
with open(filename, 'r') as csvfile:
csv_reader = csv.reader(csvfile)
for row in csv_reader:
rows.append(row)
return rows
except IOError:
print('Cannot read file from %s' % filename)
sys.exit(2)
def print_records(rows):
print('Matching Results of Student Records:')
for row in rows:
print('%s\t%s\t%s\t%s' % (row[0], row[1], row[2], row[3]))
def match_name(pattern, rows):
"""Match the name with pattern and print the matching records.
Args:
pattern: A regular expression string
rows: A list of records
Returns: None
"""
matching = []
for row in rows:
# Use regex matching to check whether first name or last name contains the pattern
if re.search(r'%s' % pattern.lower(), row[0].lower()) != None or re.search(r'%s' % pattern.lower(), row[1].lower()) != None:
matching.append(row)
# print the matched records
print_records(matching)
def match_email(pattern, rows):
"""Match the email with pattern and print the matching records.
Args:
pattern: A regular expression string
rows: A list of records
Returns: None
"""
matching = []
for row in rows:
# Use regex matching to check whether email contains the pattern
if re.search(r'%s' % pattern.lower(), row[2].lower()) != None:
matching.append(row)
# print the matched records
print_records(matching)
def match_gpa(pattern, rows):
"""Match the gpa with pattern and print the matching records.
Args:
pattern: A string of a float number, may have + or - at the end
rows: A list of records
Returns: None
"""
exp = '='
if pattern[-1] == '+' or pattern[-1] == '-':
exp = pattern[-1]
pattern = pattern[0:len(pattern)-1]
try:
line = float(pattern)
except:
print('Error: cannot recognize number: %s' % pattern)
sys.exit(2)
matching = []
for row in rows:
gpa = float(row[3])
if exp == '+' and gpa >= line or exp == '-' and gpa <= line or exp == '=' and gpa == line:
matching.append(row)
else:
continue
# print the matched records
print_records(matching)
if __name__ == "__main__":
option, pattern, filename = parse_args()
help_info = """
Usage: tool [-help][-name <pattern> <path>][-email <pattern> <path>][-gpa <pattern> <path>]
"""
if option in ('-help', '-h'):
print(help_info)
else:
rows = read_csv(filename)
if option in ('-name', '-n'):
match_name(pattern, rows)
elif option in ('-email', '-e'):
match_email(pattern, rows)
elif option in ('-gpa', '-g'):
match_gpa(pattern, rows)
else:
print('Unknown option: %s, check usage using \'-help\' or \'-h\'' % option)
|
994,071 | b5109695da0369d5f1679596acfd12515029e994 | # ------------------------------------------------------------------------------
# Project Euler - Problem 044 - Pentagon numbers
# ------------------------------------------------------------------------------
# Problem Link: https://projecteuler.net/problem=044
# ------------------------------------------------------------------------------
# Author: Paul Frisby
# Email: mail@paulfrisby.com
# Github: https://github.com/paulfrisby/
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# Problem Definition
# ------------------------------------------------------------------------------
"""
Pentagonal numbers are generated by the formula, Pn=n(3nโ1)/2. The first ten
pentagonal numbers are:
1, 5, 12, 22, 35, 51, 70, 92, 117, 145, ...
It can be seen that P4 + P7 = 22 + 70 = 92 = P8. However, their difference,
70 โ 22 = 48, is not pentagonal.
Find the pair of pentagonal numbers, Pj and Pk, for which their sum and
difference are pentagonal and D = |Pk โ Pj| is minimised; what is the value of
D?
"""
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# Pseudocode
# ------------------------------------------------------------------------------
"""
function to find out if a number, x, is pentagonal:
use quadratic formula to work out
since x = n(3n-1)/2, 3/2 n^2 - 1/2 n - x = 0, therefore taking only the
upper root:
n = -b + sqrt(b^2 - 4ac) / 2a
= (-(-1/2) + sqrt((-1/2)^2 - 4*(3/2)*(-x))) / (2*(3/2))
= (1/2 + sqrt(1/4 + 6*x)) / 3
if n is an integer, x is a pentagonal number
minimumD = infinity
bestPj = 0
bestPk = 0
for each pentagonal number in range:
for each larger pentagonal number in range:
if sum & difference of these are also pentagonal:
if difference less than minimumD:
minimumD = difference between 2 pentagonal numbers
bestPj = pentagonal number 1
bestPk = pentagonal number 2
print bestPk - bestPj = minimumD
"""
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# Main Code
# ------------------------------------------------------------------------------
from math import sqrt
# returns nth pentagonal number
def nthPentagonal(n):
return int(n*(3*n-1)/2)
# returns true if input number is pentagonal
def isPentagonal(n):
# uses quadratic formula to find largest root
root = (1/2 + sqrt(1/4 + 6*n)) / 3
# if root is a whole number, n is pentagonal
if root % 1 == 0:
return True
else:
return False
# initialising to infinity so any initially found value will be below it
# this removes need to guess an upper limit
minimumDiff = float('inf')
# to store the 2 pentagonal numbers with the desired property
bestPj = 0
bestPk = 0
for j in range(1, 5000):
Pj = nthPentagonal(j)
for k in range(j+1, 5001):
Pk = nthPentagonal(k)
pSum = Pj + Pk
pDiff = Pk - Pj
if isPentagonal(pDiff) and isPentagonal(pSum):
if pDiff < minimumDiff:
minimumDiff = pDiff
bestPj = Pj
bestPk = Pk
print (f'{bestPk} - {bestPj} = {minimumDiff}')
|
994,072 | e299878ac00b2bbcb1328345290d5c10d6bf419b | #coding : utf8
#Author : taosenlin
#Time : 2020/3/30 18:10
# Seleniumไธญ็xpathๅฎไฝ
# XPathๅณไธบXML่ทฏๅพ่ฏญ่จ๏ผๅฎๆฏไธ็ง็จๆฅ็กฎๅฎXML1ๆๆกฃไธญๆ้จๅไฝ็ฝฎ็่ฏญ่จใ
# ไธใxpathๅฑๆงๅฎไฝ
# 1ใxpath้่ฟๅ
็ด ็idใnameใclass่ฟไบๅฑๆงๅฎไฝ
# //*[@id='kw'] //*[@name='wd'] //*[@class='s_ipt']
# ไบใxpathๅ
ถไปๅฑๆง
# 1ใๅฆๆไธไธชๅ
็ด idใnameใclassๅฑๆง้ฝๆฒกๆ๏ผ่ฟๆถๅไนๅฏไปฅ้่ฟๅ
ถไปๅฑๆงๅฎไฝๅฐ
# //*[@autocomplete='off']
# ไธใxpathๆ ็ญพ
# 1ใๆๆถๅๅไธไธชๅฑๆง๏ผๅๅ็ๆฏ่พๅค๏ผ่ฟๆถๅๅฏไปฅ้่ฟๆ ็ญพ็ญ้ไธ๏ผๅฎไฝๆดๅ็กฎ
# 2ใๅฆๆไธๆณๅถๅฎๆ ็ญพๅ็งฐ๏ผๅฏไปฅ็จ * ๅท่กจ็คบไปปๆๆ ็ญพ
# 3ใๅฆๆๆณๅถๅฎๅ
ทไฝๆไธชๆ ็ญพ๏ผๅฐฑๅฏไปฅ็ดๆฅๅๆ ็ญพๅ็งฐ
# //input[@autocomplete='off']
# //input[@id='kw']
# //input[@name='wd']
# ๅใxpathๅฑ็บง
# 1ใๅฆๆไธไธชๅ
็ด ๏ผๅฎ็ๅฑๆงไธๆฏๅพๆๆพ๏ผๆ ๆณ็ดๆฅๅฎไฝๅฐ๏ผ่ฟๆถๆไปฌๅฏไปฅๅ
ๅฎไฝๅ
ถ็ถๅ
็ด
# 2ใๆพๅฐๅ
ถ็ถๅ
็ด ๏ผๅๆพไธไธชๅฑ็บงๅฐฑ่ฝๅฎไฝๅฐ
# <form id="form"
# <span id="s_kw_wrap"
# <span class="soutu-btn"
# <input id="kw" class="s_ipt"
# 3ใๅฆไธๅพๆ็คบ๏ผ่ฆๅฎไฝ็ๆฏinput่ฟไธชๆ ็ญพ๏ผๅฎ็็ถๅ
็ด ็ id=s_kw_wrap
# 4ใๅฆๆๅฎ็ถๅ
็ด ็ๅฑๆงไนไธๆฏๅพๆๆพ๏ผๅฏไปฅๅๅพไธๅฑๆพ็ถๅ
็ด ็็ถๅ
็ด
# 5ใไบๆฏๅฐฑๅฏไปฅ้่ฟๅฑ็บงๅ
ณ็ณปๅฎไฝๅฐ็ฎๆ ๅ
็ด
# ้่ฟๅฎไฝๅฎ็ถๅ
็ด ๆฅๅฎไฝinput่พๅ
ฅๆก
# //span[@id='s_kw_wrap']/input
# ้่ฟๅฎไฝๅฎ็ถๅ
็ด ็็ถๅ
็ด ๆฅๅฎไฝinput่พๅ
ฅๆก
# //form[@id='form']/span/input
# ไบใxpath็ดขๅผ
# 1ใๅฆๆไธไธชๅ
็ด ๅฎ็ๅ
ๅผๅ
็ด ่ทๅฎ็ๆ ็ญพไธๆ ท๏ผ่ฟๆถๆ ๆณ้่ฟๅฑ็บงๅฎไฝๅฐใ
# 2ใๆญคๆถๅฏไปฅๆ นๆฎๅฎๅจhtmlไธญ็ๆ่กไฝ็ฝฎๅฎไฝ
# <td id="se-setting-3"
# <select id="nr" name="NR"
# <option selected=""
# <option value="20"
# <option value="50"
# 3ใๅฆไธๅพๆ็คบ๏ผ็จxpathๅฎไฝไธไธชๅ
็ด (่ฟ้็ดขๅผๆฏไป1ๅผๅง็ฎ่ตท็๏ผ่ทpython็็ดขๅผไธไธๆ ท)
#็จxpathๅฎไฝ็ฌฌไธไธช
# //select[@id='nr']/option[1]
# ็จxpathๅฎไฝ็ฌฌไบไธช
# //select[@id='nr']/option[2]
# ็จxpathๅฎไฝ็ฌฌไธไธช
# //select[@id='nr']/option[3]
# ๅ
ญใxpath้ป่พ่ฟ็ฎ
# 1ใxpath่ฟๆไธไธชๆฏ่พๅผบ็ๅ่ฝ๏ผๅฏไปฅๅคไธชๅฑๆง้ป่พ่ฟ็ฎ๏ผๆฏๆ ไธ(and)ใๆ(or)ใ้(not)
# 2ใไธ่ฌ็จ็ๆฏ่พๅค็ๆฏ and ่ฟ็ฎ๏ผๅๆถๆปก่ถณไธคไธชๅฑๆง
# //*[@id='kw' and @autocomplete='off']
# ไธใxpathๆจก็ณๅน้
# 1ใxpath่ฟๆไธไธช้ๅธธๅผบๅคง็ๅ่ฝ๏ผๆจก็ณๅน้
# 2ใๆๆกๆจก็ณๅน้
ๅ่ฝ๏ผๅบๆฌไธๆฒกๆๅฎไฝไธๅฐ็
# 3ใๆฏๅฆๅฎไฝ็พๅบฆ้กต้ข็่ถ
้พๆฅ "hao123" ,ๅฏไปฅ้่ฟby_link,ไนๅฏไปฅ้่ฟ
# by_partial_link ๆจก็ณๅน้
ๅฎไฝๅฐใxpathไนๆๅๆ ท็ๅ่ฝใ
# xpathๆจก็ณๅน้
ๅ่ฝ
# //*[contains(text(),'hao123')]
# xpathไนๅฏไปฅๆจก็ณๅน้
ๆไธชๅฑๆง
# //*[contains(@id,'kw')]
# xpathๅฏไปฅๆจก็ณๅน้
ไปฅไปไนๅผๅคด
# //*[starts-with(@id,'s_kw_')]
# xpathๅฏไปฅๆจก็ณๅน้
ไปฅไปไน็ปๅฐพ
# //*[ends-with(@id,'kw_wrap')]
# xpath่ฟๆฏๆๆๅผบ็ๆญฃๅ่กจ่พพๅผ
# //*[matchs(text(),'hao123')]
# ๅ
ซใ็ถๅญใๅ
ๅผใ็ธ้ป่็นๅฎไฝ
# 1ใ็ฑ็ถ่็นๅฎไฝๅญ่็น
# ๆ นๆฎB่็นๅฎไฝๆ id็ๅญ่็น
# (1)ไธฒ่ๆฅๆพ
# driver.find_element_by_id('B').find_element_by_tag_name('div')
# (2)xpath็ถๅญๅ
ณ็ณปๅฏปๆพ
# //div[@id='B']/div
# (3)css็ถๅญๅ
ณ็ณปๅฏปๆพ
# div#B>div
# (4)css nth-child
# div#B div:nth-child(1)
# (5)css nth-of-type
# div#B div:nth-of-type(1)
# (6)xpath่ฝด child
# //div[@id='B']/child::div
# 2ใ็ฑๅญ่็นๅฎไฝ็ถ่็น
# ๆ นๆฎC่็นๅฎไฝๅ
ถไธคๅฑ็ถ่็น็div
# (1)xpath: '.' ไปฃ่กจๅฝๅ่็น; '..' ไปฃ่กจ็ถ่็น
# //div[@id='C']/../..
# (2)xpath่ฝด parent
# //div[@id='C']/parent::*/parent::div
# 3ใ็ฑๅผๅผ่็นๅฎไฝๅฅๅฅ่็น
# ๆ นๆฎD่็นๅฎไฝๅ
ถๅฅๅฅ่็น
# (1)xpath้่ฟ็ถ่็น่ทๅๅ
ถๅฅๅฅ่็น
# //div[@id='D']/../div[1]
# (2)xpath่ฝด preceding-sibling
# //div[@id='D']/preceding-sibling::div[1]
# 4ใ็ฑๅฅๅฅ่็นๅฎไฝๅผๅผ่็น
# ๆ นๆฎD่็นๅฎไฝๅ
ถๅผๅผ่็น
# (1)xpath้่ฟ็ถ่็น่ทๅๅ
ถๅผๅผ่็น
# //div[@id='D']/../div[3]
# (2)xpath่ฝด following-sibling
# //div[@id='D']/following-sibling::div[1]
# (3)xpath่ฝด following
# //div[@id='D']/following::*
# (4)css + ( + ่กจ็คบ็ดง่ทๅจๅฝๅ่็นไนๅ็div่็น)
# div#D + div
# (5)css ~ ( ~ ่กจ็คบๅฝๅ่็นไนๅ็div่็น๏ผๅฆๆ็จfind_elements๏ผๅๅฏ่ทๅๅฐไธ็ปdiv่็น)
# div#D ~ div
|
994,073 | 792a8704693972606447d94ca9908bb2e1a73bb8 | #main.py
from order import Order
o = Order()
o.order_drink() |
994,074 | 0ca6563fbc35bb4f9ee51b7256182062a143628f | #!/usr/local/bin/python
from pprint import pprint as pp
import json
'''
The sum of the squares of the first ten natural numbers is,
12 + 22 + ... + 102 = 385
The square of the sum of the first ten natural numbers is,
(1 + 2 + ... + 10)2 = 552 = 3025
Hence the difference between the sum of the squares of the first ten natural numbers and the square of the sum is 3025 385 = 2640.
Find the difference between the sum of the squares of the first one hundred natural numbers and the square of the sum.
'''
hundred = map(lambda x: x + 1, range(100))
ten = map(lambda x: x + 1, range(10))
thousand = map(lambda x: x + 1, range(1000))
def meredith(set):
return {'sum': sum_of_squares(set), 'square': square_of_sum(set)}
def sum_of_squares(set):
res = 0
for i in set:
res += i*i
return res
def square_of_sum(set):
res = 0
for i in set:
res += i
return res*res
# pp(hundred)
set_results = meredith(hundred)
set_results['result'] = set_results['square'] - set_results['sum']
print json.dumps(set_results) |
994,075 | 7fb7487ffbbae894319dc696a749c8d52f3b72db | # Copyright ยฉ 2022 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module holds data for request tracker."""
from __future__ import annotations
from enum import auto
from typing import List
from legal_api.utils.base import BaseEnum
from legal_api.utils.datetime import datetime
from legal_api.utils.legislation_datetime import LegislationDatetime
from .db import db
class RequestTracker(db.Model): # pylint: disable=too-many-instance-attributes
"""This class manages the request tracker."""
class ServiceName(BaseEnum):
"""Render an Enum of the service name."""
BN_HUB = auto()
class RequestType(BaseEnum):
"""Render an Enum of the service name."""
INFORM_CRA = auto()
GET_BN = auto()
CHANGE_DELIVERY_ADDRESS = auto()
CHANGE_MAILING_ADDRESS = auto()
CHANGE_NAME = auto()
CHANGE_STATUS = auto()
CHANGE_PARTY = auto()
__tablename__ = 'request_tracker'
id = db.Column(db.Integer, primary_key=True)
request_type = db.Column('request_type', db.Enum(RequestType), nullable=False)
is_processed = db.Column('is_processed', db.Boolean, default=False)
request_object = db.Column(db.Text)
response_object = db.Column(db.Text)
retry_number = db.Column('retry_number', db.Integer, default=0, nullable=False)
service_name = db.Column('service_name', db.Enum(ServiceName), nullable=False)
creation_date = db.Column('creation_date', db.DateTime(timezone=True), default=datetime.utcnow)
last_modified = db.Column('last_modified', db.DateTime(timezone=True), default=datetime.utcnow)
is_admin = db.Column('is_admin', db.Boolean, default=False)
message_id = db.Column('message_id', db.String(60))
# parent keys
business_id = db.Column('business_id', db.Integer, db.ForeignKey('businesses.id'), index=True)
filing_id = db.Column('filing_id', db.Integer, db.ForeignKey('filings.id'), index=True)
@property
def json(self) -> dict:
"""Return the request tracker as a json object."""
return {
'id': self.id,
'requestType': self.request_type.name,
'isProcessed': self.is_processed,
'serviceName': self.service_name.name,
'isAdmin': self.is_admin,
'creationDate': LegislationDatetime.as_legislation_timezone(self.creation_date).isoformat()
}
def save(self):
"""Save the object to the database immediately."""
db.session.add(self)
db.session.commit()
@classmethod
def find_by_id(cls, request_tracker_id: int) -> RequestTracker:
"""Return the request tracker matching the id."""
request_tracker = None
if request_tracker_id:
request_tracker = cls.query.filter_by(id=request_tracker_id).one_or_none()
return request_tracker
@classmethod
def find_by(cls, # pylint: disable=too-many-arguments
business_id: int,
service_name: ServiceName,
request_type: RequestType = None,
filing_id: int = None,
is_admin: bool = None,
message_id: str = None) -> List[RequestTracker]:
"""Return the request tracker matching."""
query = db.session.query(RequestTracker). \
filter(RequestTracker.business_id == business_id). \
filter(RequestTracker.service_name == service_name)
if request_type:
query = query.filter(RequestTracker.request_type == request_type)
if filing_id:
query = query.filter(RequestTracker.filing_id == filing_id)
if is_admin:
query = query.filter(RequestTracker.is_admin == is_admin)
if message_id:
query = query.filter(RequestTracker.message_id == message_id)
request_trackers = query.order_by(RequestTracker.id).all()
return request_trackers
|
994,076 | b7d4a2e44592e3e170459f8f025c1b91e2544730 | # The pipeline for constructing an MSM
from msmbuilder.io import load_generic, load_trajs, save_trajs, save_generic
from msmbuilder.msm import MarkovStateModel
from msmbuilder.lumping import PCCAPlus
import numpy as np
import seaborn as sns
import matplotlib
matplotlib.use('Agg')
from matplotlib.pylab import plt
from matplotlib.ticker import FormatStrFormatter
sns.set_style("white")
def print_timescales(timescales):
pass
if __name__ == "__main__":
all_msms = load_generic('rmsd_msms.pickl')
meta, ctraj_dict = load_trajs('ctraj-200')
long_ctrajs = [np.squeeze(traj) for traj in ctraj_dict.values() if traj.shape[0] > 1000]
ps_to_ns = 1000
n_ts = 10
timescales = []
lags = []
for msm in all_msms:
timescales.append(msm.timescales_[:n_ts])
lags.append(msm.get_params()['lag_time'])
lags = np.array(lags)
timescales = np.array(timescales).T/ps_to_ns
msm = all_msms[np.extract(lags == 2000,np.arange(len(lags)))[0]]
m = 2
# 1 timescales means 2 states
pcca = PCCAPlus.from_msm(msm, n_macrostates=m)
pcca_mapping = pcca.x
print(len(pcca_mapping))
plt.clf()
sns.color_palette('colorblind', m)
with sns.plotting_context("notebook", font_scale=1.5):
vec = msm.right_eigenvectors_
n_states = vec.shape[0] # may be less than 200 as T may be non-ergodic.
fig, axes = plt.subplots(nrows=m, sharex=True)
for i in range(m):
for j in range(m):
mask = pcca_mapping==j
axes[i].bar(np.arange(n_states)[mask], vec[mask,i],label='PCCA State {}'.format(j), align='center')
axes[i].yaxis.set_major_formatter(FormatStrFormatter('%.2f'))
axes[i].legend()
axes[i].set_ylabel('Cluster projection')
plt.xlabel('Cluster')
plt.savefig('figures/rmsd_msm_right_eigenvectors-pcca.png', transparent=True)
plt.clf()
sns.color_palette('colorblind', m)
with sns.plotting_context("notebook", font_scale=1.5):
vec = msm.left_eigenvectors_
n_states = vec.shape[0] # may be less than 200 as T may be non-ergodic.
fig, axes = plt.subplots(nrows=m, sharex=True)
for i in range(m):
for j in range(m):
mask = pcca_mapping==j
axes[i].bar(np.arange(n_states)[mask], vec[mask,i],label='PCCA State {}'.format(j), align='center')
axes[i].yaxis.set_major_formatter(FormatStrFormatter('%.2f'))
axes[i].legend()
axes[i].set_ylabel('Cluster projection')
plt.xlabel('Cluster')
plt.savefig('figures/rmsd_msm_left_eigenvectors-pcca.png', transparent=True)
# Transforms:
msm_traj = {}
pcca_traj = {}
for k, v in ctraj_dict.items():
print(k)
msm_traj[k] = msm.partial_transform(np.squeeze(v),mode='fill')
pcca_traj[k] = pcca.partial_transform(np.squeeze(v), mode='fill')
save_trajs(msm_traj, 'msm-traj-200', meta)
save_generic(msm, 'msm-200.pickl')
save_trajs(pcca_traj, 'pcca-2-traj', meta)
save_generic(pcca, 'pcca-2.pickl') |
994,077 | def51cdaf5c7f57f6566bb556b21fe067a208323 | ########
# Copyright (c) 2013 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import pytest
from integration_tests import AgentlessTestCase
from integration_tests.tests.utils import get_resource as resource
pytestmark = pytest.mark.group_deployments
@pytest.mark.usefixtures('testmockoperations_plugin')
@pytest.mark.usefixtures('cloudmock_plugin')
class TestDeploymentNodes(AgentlessTestCase):
def test_get_deployment_nodes(self):
dsl_path = resource("dsl/deployment_nodes_three_nodes.yaml")
deployment, _ = self.deploy_application(dsl_path)
deployment_id = deployment.id
def assert_node_state(node_id_infix, nodes):
self.assertTrue(
any(
node_id_infix in n.id and n.state == 'started'
for n in nodes),
'Failed finding node {0} state'.format(node_id_infix))
def assert_node_states():
nodes = self.client.node_instances.list(
deployment_id=deployment_id)
self.assertEqual(3, len(nodes))
assert_node_state('containing_node', nodes)
assert_node_state('contained_in_node1', nodes)
assert_node_state('contained_in_node2', nodes)
self.do_assertions(assert_node_states, timeout=30)
def test_partial_update_node_instance(self):
dsl_path = resource("dsl/set_property.yaml")
deployment, _ = self.deploy_application(dsl_path)
node_id = self.client.node_instances.list(
deployment_id=deployment.id)[0].id
node_instance = self.client.node_instances.get(node_id)
# Initial assertions
self.assertEqual('started', node_instance.state)
self.assertIsNotNone(node_instance.version)
self.assertEqual(3, len(node_instance.runtime_properties))
# Updating only the state
node_instance = self.client.node_instances.update(
node_id,
version=node_instance.version,
state='new_state')
# Verifying the node's state has changed
self.assertEqual('new_state', node_instance.state)
# Verifying the node's runtime properties remained without a change
self.assertEqual(3, len(node_instance.runtime_properties))
# Updating only the runtime properties
node_instance = self.client.node_instances.update(
node_id,
version=node_instance.version,
runtime_properties={'new_key': 'new_value'})
# Verifying the node's state remained the same despite the update to
# the runtime_properties
self.assertEqual('new_state', node_instance.state)
# Verifying the new property is in the node's runtime properties
self.assertTrue('new_key' in node_instance.runtime_properties)
self.assertEqual('new_value',
node_instance.runtime_properties['new_key'])
# Verifying the older runtime properties no longer exist
self.assertEqual(1, len(node_instance.runtime_properties))
# Updating both state and runtime properties (updating an existing
# key in runtime properties)
node_instance = self.client.node_instances.update(
node_id,
version=node_instance.version,
runtime_properties={'new_key': 'another_value'},
state='final_state')
# Verifying state has updated
self.assertEqual('final_state', node_instance.state)
# Verifying the update to the runtime properties
self.assertEqual(1, len(node_instance.runtime_properties))
self.assertEqual('another_value',
node_instance.runtime_properties['new_key'])
# Updating neither state nor runtime properties (empty update)
node_instance = self.client.node_instances.update(
node_id,
version=node_instance.version)
# Verifying state hasn't changed
self.assertEqual('final_state', node_instance.state)
# Verifying the runtime properties haven't changed
self.assertEqual(1, len(node_instance.runtime_properties))
self.assertEqual('another_value',
node_instance.runtime_properties['new_key'])
def test_update_node_instance_runtime_properties(self):
dsl_path = resource('dsl/set_property.yaml')
deployment, _ = self.deploy_application(dsl_path)
node_id = self.client.node_instances.list(
deployment_id=deployment.id)[0].id
node_instance = self.client.node_instances.get(node_id)
# Initial assertions
self.assertIsNotNone(node_instance.version)
self.assertEqual(3, len(node_instance.runtime_properties))
# Updating the runtime properties with a new key
node_instance = self.client.node_instances.update(
node_id,
version=node_instance.version,
runtime_properties={'new_key': 'new_value'})
# Verifying the new property is in the node's runtime properties
self.assertTrue('new_key' in node_instance.runtime_properties)
self.assertEqual('new_value',
node_instance.runtime_properties['new_key'])
# Verifying the older runtime properties no longer exist
self.assertEqual(1, len(node_instance.runtime_properties))
# Updating an existing key in runtime properties
node_instance = self.client.node_instances.update(
node_id,
version=node_instance.version,
runtime_properties={'new_key': 'another_value'})
# Verifying the update to the runtime properties
self.assertEqual(1, len(node_instance.runtime_properties))
self.assertEqual('another_value',
node_instance.runtime_properties['new_key'])
# Cleaning up the runtime properties
node_instance = self.client.node_instances.update(
node_id,
version=node_instance.version,
runtime_properties={})
# Verifying the node no longer has any runtime properties
self.assertEqual(0, len(node_instance.runtime_properties))
|
994,078 | 1c4fd49f0a52d4909634c24eb323c2d4d9235bb7 | # coding: utf-8
class ISessionSource(object):
"""
A class which loads a session_id from different sources (such as queries, headers and etc.)
"""
def get_session_id(self):
"""
Return a session id
:return:
"""
raise NotImplementedError()
class ISessionBackend(object):
"""
How to manage session data
"""
def get_session_data(self, session_id):
"""
Get session data from some backend (database, file, etc).
If there is no session with session_id return None
:param session_id:
:return: session data or None
"""
raise NotImplementedError()
def save_session_data(self, session_id, data):
"""
Save session data to some backend (database, file, etc).
:param session_id:
:param data:
"""
raise NotImplementedError()
|
994,079 | 84e2acad84092b49bc008019617668552ab50723 | import tweepy
import os
import json
import time
from twitter_util.VideoMedia import VideoMedia
from media_util import is_a_video, get_post_data, is_an_image
class TwitterUtil:
def __init__(self, config):
auth = tweepy.OAuthHandler(config["consumer_key"], config["consumer_secret"])
auth.set_access_token(config["access_token"], config["access_token_secret"])
self.api = tweepy.API(auth)
self.auth = auth.apply_auth()
self.template = config["template"]
def test_tweet(self, post):
self.api.update_status(post)
def tweet_post(self, path):
filename, metadata = get_post_data(path)
if is_a_video(filename):
media = VideoMedia(filename, self.auth)
media.upload_video()
elif is_an_image:
media = self.api.media_upload(filename)
status = self.template.format(metadata["title"], metadata["author"] ,metadata["post_url"])
self.api.update_status(status=status, media_ids=[media.media_id])
def get_followers_of(self, user, cursor): #1 lista/h
followers, cursor = self.api.followers_ids(screen_name=user,cursor=cursor)
result = {"followers": followers, "cursor": cursor[1]}
return result
def follow(self, user_id): #1 follow/ 3 min y 45 segundos Max num de seguidos: 400/dia 5000 en total. Hacer unfollows cada 13 dรญas como mรกximo. Se va a hacer por semana
self.api.create_friendship(user_id)
def unfollow(self, user_id):
self.api.destroy_friendship(user_id)
def check_if_follows_me(self, user_id):
friendship = self.api.show_friendship(user_id)
|
994,080 | 38c69294f2b3526078033496fa2599ebc7a3a746 | #!/usr/bin/env python
#_*_coding:utf-8_*_
#print "ๆถ่ดนๅ็บง้ฉฑๅจไธญๅฝ็ปๆต"
#user_input1 = input("input your name:")
#print ("user input msg:", user_input1 )
#name="daidai"
#name=input("input your girl frend name:")
#print(name)
import os
cmd=os.popen('df -h').read()
print(cmd)
import sys
print(sys.path)
|
994,081 | 18db004a8a80d90a4a53a6f3617fca8ce36cc3e4 | from distutils.core import setup
import py2exe
setup(windows=["__init__.py"],
zipfile=None)
|
994,082 | 963eeecdaf3f5ab0b06a795fcb230d24ad34d164 | import textwrap
import io
from cssypy.visitors import flatteners, formatters
from cssypy import parsers, errors
from cssypy import csstokens as tokens
from cssypy.nodes.util import dump
from cssypy.nodes import *
from .. import base
class Flattener_TestCase(base.TestCaseBase):
def stylesheet_to_string(self, stylesheet):
stream = io.StringIO()
formatter = formatters.CSSFormatterVisitor(stream)
formatter.visit(stylesheet)
return stream.getvalue()
def test_simple(self):
src = u'a {}'
parser = parsers.Parser(src)
self.assertTrue(parser.match(tokens.START))
stylesheet = parser.stylesheet()
flattener = flatteners.RulesetFlattener()
stylesheet = flattener.visit(stylesheet)
expected = \
Stylesheet(
charset=None,
imports=[],
statements=[
RuleSet(
selectors=[
Selector(
children=[
SimpleSelectorSequence(
head=TypeSelector(name=u'a'),
tail=[]
)
]
),
],
statements=[]
),
]
)
self.assertEqual(dump(expected), dump(stylesheet))
def test_two_level(self):
src = u'a { b {} } '
expect = u'''\
a {}
a b {}
'''
expect = textwrap.dedent(expect)
parser = parsers.Parser(src)
self.assertTrue(parser.match(tokens.START))
stylesheet = parser.stylesheet()
flattener = flatteners.RulesetFlattener()
stylesheet = flattener.visit(stylesheet)
result = self.stylesheet_to_string(stylesheet)
self.assertEqual(expect, result)
def test_four_level(self):
src = u'a { b { c { d {} } } } '
expect = u'''\
a {}
a b {}
a b c {}
a b c d {}
'''
expect = textwrap.dedent(expect)
parser = parsers.Parser(src)
self.assertTrue(parser.match(tokens.START))
stylesheet = parser.stylesheet()
flattener = flatteners.RulesetFlattener()
stylesheet = flattener.visit(stylesheet)
result = self.stylesheet_to_string(stylesheet)
self.assertEqual(expect, result)
def test_comma_separated1(self):
src = u'''\
a, b {
c, d {}
}
'''
expect = u'''\
a, b {}
a c, b c, a d, b d {}
'''
expect = textwrap.dedent(expect)
parser = parsers.Parser(src)
self.assertTrue(parser.match(tokens.START))
stylesheet = parser.stylesheet()
flattener = flatteners.RulesetFlattener()
stylesheet = flattener.visit(stylesheet)
result = self.stylesheet_to_string(stylesheet)
self.assertEqual(expect, result)
def test_comma_separated2(self):
src = u'''\
a b, c {
d, e f {}
}
'''
expect = u'''\
a b, c {}
a b d, c d, a b e f, c e f {}
'''
expect = textwrap.dedent(expect)
parser = parsers.Parser(src)
self.assertTrue(parser.match(tokens.START))
stylesheet = parser.stylesheet()
flattener = flatteners.RulesetFlattener()
stylesheet = flattener.visit(stylesheet)
result = self.stylesheet_to_string(stylesheet)
self.assertEqual(expect, result)
def test_ancestor_selector1(self):
src = u'''\
a {
&>b {}
}
'''
expect = u'''\
a {}
a > b {}
'''
expect = textwrap.dedent(expect)
parser = parsers.Parser(src)
self.assertTrue(parser.match(tokens.START))
stylesheet = parser.stylesheet()
flattener = flatteners.RulesetFlattener()
stylesheet = flattener.visit(stylesheet)
result = self.stylesheet_to_string(stylesheet)
self.assertEqual(expect, result)
def test_ancestor_selector2(self):
src = u'''\
.a {
b & c {}
}
'''
expect = u'''\
.a {}
b .a c {}
'''
expect = textwrap.dedent(expect)
parser = parsers.Parser(src)
self.assertTrue(parser.match(tokens.START))
stylesheet = parser.stylesheet()
flattener = flatteners.RulesetFlattener()
stylesheet = flattener.visit(stylesheet)
result = self.stylesheet_to_string(stylesheet)
self.assertEqual(expect, result)
def test_ancestor_selector3(self):
src = u'''\
a {
&.b:c {}
}
'''
expect = u'''\
a {}
a.b:c {}
'''
expect = textwrap.dedent(expect)
parser = parsers.Parser(src)
self.assertTrue(parser.match(tokens.START))
stylesheet = parser.stylesheet()
flattener = flatteners.RulesetFlattener()
stylesheet = flattener.visit(stylesheet)
result = self.stylesheet_to_string(stylesheet)
self.assertEqual(expect, result)
|
994,083 | f0674999501ad84d9d663b56fc160bcae2a77c9c | import keyboard # using module keyboard
import time
import pyautogui
while True: # making a loop
try: # used try so that if user pressed other than the given key error will not be shown
if keyboard.is_pressed('INS'): # if key 'q' is pressed
while True:
print('You Pressed A Key!')
pyautogui.click()
time.sleep(0.02)
#break # finishing the loop
if keyboard.is_pressed('END'):
print('Pause')
break
except:
# if keyboard.is_pressed('q'):
print('STOP!')
break # if user pressed a key other than the given key the loop will break |
994,084 | 7e1e050758aa29cfaf18e989a2786e074ecf9930 | """
ๅ้ก@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
ๅณใใ่ชญใใงใๅทฆใใ่ชญใใงใๅใๆ็ซ ใซใชใๆใๅๆใจ่จใใพใใ
ๅ
ฅๅใใใๆใๅๆใงใใใใฉใใใๅคๅฎใใใใญใฐใฉใ ใไฝใฃใฆใใ ใใใ
(ๅๆใฎไพ)ใใใถใใใใใใใใใใใใใใใใใใใใใใใใใพใใพใใใใใชใฉใ
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
ใใใใงๆพใฃใๅๆใชในใ++++++++++++++++++++++++++++++++++++++++++++++++++++
ใใใใใฆใใใใ
ใ ใใใใใใ
ใชใใใใใฎใใใใใช
ใธใใฎใใใใฎใใธ
ใใฎใชใใญใใใใใใญใใชใฎใ
ใใใใใใใใใ
ใพใใใใพ
ใพใใใใใใพ
ใใใฎใใพใใพใใฎใใ
ใใใใใฌใใฌใใใใ
ใใฎใใใใใใใใฎใ
ใญใใจใตใจใใญ
ใใใใใใใ
ใใใใใใใ
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
ไฝๆๆ้ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
๏ผใๆๅญๅใๅๅฏพใซใใฆใๆ ผ็ดใงใใใใใซใใ.ใฏใชใข
๏ผใifๆใไฝฟใฃใฆๅ
ใฎๆใจไธ่ดใใใใฎๅคๅฎ.ใฏใชใข
๏ผใstrใฎใกใฝใใใjoinใใใพใๅฉ็จใใฆใใใใใ
"".join(reversed(list("flog")))
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
ไปๅใฎใใคใณใ+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
str.join
list(str)
reversed(list)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
"""
text=input("ๆ็ซ ใๆใฃใฆใใ ใใใ๏ผ๏ผ")
#ใฒใฃใใใใใฃใๆใkaibunใซๆ ผ็ดใใ
kaibun=text[::-1]
print(kaibun)
kaibun2="".join(reversed(list(text)))
print(kaibun2)
#ไธ่ดใใใใใชใใใงๅๆใๅคๆญใใ
if text==kaibun2:
print("ใ",text,"ใใฏๅๆใจ่จใใพใใ")
else:
print("ใ",text,"ใใฏๅๆใจใฏ่จใใพใใใ")
|
994,085 | 3700cd973a25f4d5cb5319e789b0a20cd9be9476 | import json
import re
import glob
from mapping import *
dict_data = {}
dict_data['samsung'] = []
dict_data['nokia'] = []
dict_data['apple'] = []
dict_data['oppo'] = []
dict_data['xiaomi'] = []
dict_data['lg'] = []
dict_data['huawei'] = []
dict_data['vivo'] = []
def get_arr_path(path_folder):
path_folder = path_folder + '/*.json'
arr_path = glob.glob(path_folder)
return arr_path
def get_name_source(path):
data_source = read_file(path)
arr_name = []
for data in data_source:
name = data['name']
if name != None:
brand = data['brand']
pattern_name = get_pattern_name(name, brand)
if brand == 'samsung':
dict_data['samsung'].append(pattern_name)
elif brand == 'nokia':
dict_data['nokia'].append(pattern_name)
elif brand == 'apple':
dict_data['apple'].append(pattern_name)
elif brand == 'oppo':
dict_data['oppo'].append(pattern_name)
elif brand == 'xiaomi':
dict_data['xiaomi'].append(pattern_name)
elif brand == 'vivo':
dict_data['vivo'].append(pattern_name)
else:
pass
return dict_data['nokia'], dict_data['apple'], dict_data['oppo'], dict_data['xiaomi'],dict_data['lg'], dict_data['huawei'], dict_data['vivo']
# dict_data['nokia'], dict_data['apple'], dict_data['oppo'], dict_data['xiaomi'],dict_data['lg'], dict_data['huawei'], dict_data['vivo'] = get_name_source('/home/ngocmai/CRAWL/aliscrapy/aliscrapy/data_b4/lazada.json')
# print(dict_data['apple'])
set_ram = ['512mb', '256mb', '1gb', '2gb', '3gb', '4gb', '6gb', '8gb']
set_rom = ['32gb', '64gb', '128gb', '256gb']
arr_brand = ['samsung', 'nokia', 'apple', 'oppo', 'vivo', 'xiaomi', 'lg', 'huawei']
source = ['lazada', 'fptshop', 'the']
over_arr_name = []
arr_path = get_arr_path('/home/ngocmai/CRAWL/aliscrapy/aliscrapy/data_b4')
for path in arr_path:
dict_data['nokia'], dict_data['apple'], dict_data['oppo'], dict_data['xiaomi'],dict_data['lg'], dict_data['huawei'], dict_data['vivo'] = get_name_source(path)
dict_data['nokia'] += dict_data['nokia']
dict_data['nokia'] = list(set(dict_data['nokia']))
dict_data['apple'] += dict_data['apple']
dict_data['apple'] = list(set(dict_data['apple']))
dict_data['oppo'] += dict_data['oppo']
dict_data['oppo'] = list(set(dict_data['oppo']))
dict_data['xiaomi'] += dict_data['xiaomi']
dict_data['xiaomi'] = list(set(dict_data['xiaomi']))
dict_data['lg'] += dict_data['lg']
dict_data['lg'] = list(set(dict_data['lg']))
dict_data['huawei'] += dict_data['huawei']
dict_data['huawei'] = list(set(dict_data['huawei']))
dict_data['vivo'] += dict_data['vivo']
dict_data['vivo'] = list(set(dict_data['vivo']))
# for brand in arr_brand:
# dict_data[brand] += dict_data[brand]
# dict_data[brand] = list(set(dict_data[brand]))
# print(lensamsung(dict_data['samsung']))
# each brand
pattern_dict = {}
for brand in arr_brand:
pattern_dict[brand] = []
for name in dict_data[brand]:
product = {}
product['pattern_name'] = name
for rom in set_rom:
product['rom'] = rom
for ram in set_ram:
product['ram'] = ram
pattern_dict[brand].append(product)
# print(len(pattern_dict['nokia']))
pattern_lazada = mapping('aliscrapy/data_b4/tgdd.json')
# # print(pattern_lazada)
# partern_tgdd = mapping('aliscrapy/data_b4/tgdd.json')
# # print(partern_tgdd)
def compare(dict_product_a, dict_product_b):
if dict_product_a['pattern_name'] == dict_product_b['pattern_name']:
print("trung ten", dict_product_a, "\n", dict_product_b)
if dict_product_a['rom'] == dict_product_b['rom']:
print("trung", dict_product_a, "\n", dict_product_b)
else:
pass
# print("cho no khac nhau")
for product_a in pattern_lazada['samsung']:
for product_b in pattern_dict['samsung']:
compare(product_a, product_b)
|
994,086 | d0b946c27b793097a73f9feff4ada3a79f4d78ee | from django.dispatch import receiver
from allauth.account.signals import user_signed_up
from allauth.socialaccount.signals import social_account_added, social_account_updated, social_account_removed
from allauth.socialaccount.models import SocialAccount, SocialToken
from .models import Profile
import re
@receiver(user_signed_up)
def signed_up(user, **kwargs):
social_info = SocialAccount.objects.filter(user=user)[0]
social_token = SocialToken.objects.filter(account=social_info)[0]
if social_info.provider == "google":
if social_info.extra_data["hd"] == "iniad.org":
match = re.match(r"s1f10(([0-9]{2})[0-9]{4})[0-9]{1}@iniad\.org", social_info.extra_data["email"])
if match:
try:
profile = Profile.objects.get(email=social_info.extra_data["email"])
profile.user = user
profile.save()
except Profile.DoesNotExist:
user.profile = None
print(user)
user.save()
@receiver(social_account_added)
def account_added(request, sociallogin, **kwargs):
user = request.user
social_info = sociallogin.account
social_token = sociallogin.token
if social_info.provider == "google":
if social_info.extra_data["hd"] == "iniad.org":
match = re.match(r"s1f10(([0-9]{2})[0-9]{4})[0-9]{1}@iniad\.org", social_info.extra_data["email"])
if match:
try:
profile = Profile.objects.get(email=social_info.extra_data["email"])
profile.user = user
profile.save()
except Profile.DoesNotExist:
user.profile = None
print(user)
user.save() |
994,087 | aa1bbedee76caa7627712ea60c662f8be24f87a1 | from django.conf.urls import url
from django.contrib import admin
from chatbot.views import ChatBot, backup_chatbot
urlpatterns = [
url(r'^subscribe', ChatBot.subscribe),
url(r'^unsubscribe', ChatBot.unsubscribe),
url(r'^backup', backup_chatbot),
]
|
994,088 | be8cba5ef5e3f7d152302304c7946d368029aa08 | from pyqtgraph import QtGui, QtCore
import pyqtgraph as pg
import numpy as np
import matplotlib.pyplot as plt
import sys
import ipdb
class Workersignal(QtCore.QObject):
finished = QtCore.pyqtSignal()
error = QtCore.pyqtSignal(tuple)
result = QtCore.pyqtSignal(tuple)
#prorgress = QtCore.pyqtSignal(tuple)
class Worker(QtCore.QRunnable):
def __init__(self, fn, *args, **kwargs):
super(Worker, self).__init__()
self.fn = fn
self.args = args
self.kwargs = kwargs
self.signals = Workersignal()
#self.kwargs['progress_callback'] = [0,0]
@QtCore.pyqtSlot()
def run(self):
try:
result = self.fn(*self.args, **self.kwargs)
except:
traceback.print_exc()
exctype, value = sys.exc_info()[:2]
self.signal.error.emit((0, value, traceback.format_exc()))
else:
#ipdb.set_trace()
self.signals.result.emit(result)
class map_sim(QtGui.QMainWindow):
def __init__(self,img_size=[1000,1000], real_size=[22,22]):
""" img_size=[xlen, ylen] is the image size
real_size=[x_size, y_size] is the real values that are
mapped to the image.
The image is in the ((-x_size/2, x_size/2),(-y_size/2, y_size/2))
"""
super(map_sim, self).__init__()
## simulation variables..
undersamp = 2
self.index = 0
self.x_pos = np.loadtxt('x_val')[::undersamp]
self.y_pos = np.loadtxt('y_val')[::undersamp]
##
self.img_size = img_size
self.real_size = real_size
self.img = np.zeros(img_size)
self.dx = 1.*real_size[0]/img_size[0]
self.dy = 1.*real_size[1]/img_size[1]
#
self.win = pg.GraphicsWindow()
self.win.setWindowTitle('scatt anim')
self.win.show()
self.plot1 = self.win.addPlot()
self.plot1.disableAutoRange()
self.plot1.setRange(xRange=[0,self.img_size[0]], yRange=[0,self.img_size[1]])
self.img1 = pg.ImageItem()
self.plot1.addItem(self.img1)
self.lut = plt.get_cmap('viridis').colors
self.threadpool = QtCore.QThreadPool(parent=self.win)
self.threadpool.setMaxThreadCount(1)
self.update()
def img_loc(self, x_pos, y_pos):
x_ind = int((x_pos+1.*self.real_size[0]/2)/self.dx)
y_ind = int((y_pos+1.+self.real_size[1]/2)/self.dy)
return (x_ind, y_ind)
def update(self):
if(self.index>16000):
self.index = 0
self.index = self.index+1
QtGui.QApplication.processEvents()
worker = Worker(self.ex_function, self.index)
worker.signals.result.connect(self.plot_data)
self.threadpool.start(worker)
timer = QtCore.QTimer(parent=self.win)
timer.timeout.connect(self.update)
timer.setSingleShot(True)
timer.start(1)
def ex_function(self, *args):
"""Simulated values
"""
data_x = self.x_pos[args[0]]
data_y = self.y_pos[args[0]]
val = np.random.random()*100
return ([data_x, data_y], val)
def plot_data(self, *args):
#ipdb.set_trace()
[x,y] = self.img_loc(args[0][0][0],args[0][0][1])
self.img[x,y] = args[0][1]
self.img1.setImage(self.img, lut=self.lut)
if __name__ == '__main__':
app = QtGui.QApplication([])
win = map_sim()
app.exec_()
|
994,089 | 936ed60576bc7343fb9daefaa638ace5d25ab5cb | # !/usr/bin/env python
# -*- coding:utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# https://www.tensorflow.org/get_started/mnist/beginners
# https://codelabs.developers.google.com/codelabs/cloud-tensorflow-mnist/
#
import tensorflow as tf
from tensorflow.contrib.learn.python.learn.datasets.mnist import read_data_sets
tf.set_random_seed(0)
# data
mnist = read_data_sets("../MNIST-data/", one_hot=True)
print("MNIST data ready for analysis!\n") # get data ready
batch_size = 100 # how many imgs in each batch?
# model
# neural network with 6 layers of 400,200,100,50,25,10 neurons, respectively
# this is similar with mnist_1layer.py, only that a few sigmoid layers added
# UPDATE:
# 1 use `relu` as transfer function, instead of sigmoid
# 2 learning rate decay
# 3 neuron dropouts
nn0,nn1,nn2,nn3,nn4,nn5,nn6 = 784,400,200,100,50,25,10 # neuroNumber each layer
x = tf.placeholder(tf.float32, [None, nn0]) # for inputing imgs, be of batchSize
transFunc = tf.nn.relu # tf.nn.sigmoid
# Probability of keeping a node during dropout
# =1 at test time (no dropout) and ~0.7 at training time.
pkeep = tf.placeholder(tf.float32)
W1 = tf.Variable(tf.truncated_normal([nn0, nn1], stddev=0.1)) #784=28*28, inputs
b1 = tf.Variable(tf.zeros([nn1])+0.1)
y1 = transFunc(tf.matmul(x, W1) + b1)
y1d = tf.nn.dropout(y1, pkeep)
W2 = tf.Variable(tf.truncated_normal([nn1, nn2], stddev=0.1))
b2 = tf.Variable(tf.zeros([nn2])+0.1)
y2 = transFunc(tf.matmul(y1d, W2) + b2)
y2d = tf.nn.dropout(y2, pkeep)
W3 = tf.Variable(tf.truncated_normal([nn2, nn3], stddev=0.1))
b3 = tf.Variable(tf.zeros([nn3])+0.1)
y3 = transFunc(tf.matmul(y2d, W3) + b3)
y3d = tf.nn.dropout(y3, pkeep)
W4 = tf.Variable(tf.truncated_normal([nn3, nn4], stddev=0.1))
b4 = tf.Variable(tf.zeros([nn4])+0.1)
y4 = transFunc(tf.matmul(y3d, W4) + b4)
y4d = tf.nn.dropout(y4, pkeep)
W5 = tf.Variable(tf.truncated_normal([nn4, nn5], stddev=0.1))
b5 = tf.Variable(tf.zeros([nn5])+0.1)
y5 = transFunc(tf.matmul(y4d, W5) + b5)
y5d = tf.nn.dropout(y5, pkeep)
W6 = tf.Variable(tf.truncated_normal([nn5, nn6], stddev=0.1))
b6 = tf.Variable(tf.zeros([nn6])+0.1)
yLogits = tf.matmul(y5d, W6) + b6
y = tf.nn.softmax(yLogits)
y_ = tf.placeholder(tf.float32, [None, nn6]) # will be loaded in sess.run()
# loss
# cross-entropy loss function (= -sum(Y_i * log(Yi)) )
# normalised for batches of 100 images
# use softmax_cross_entropy_with_logits to avoid numerical stability problems,
# ie, log(0) is NaN
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits\
(logits=yLogits, labels=y_))*100
# training
# learning rate decay
max_learning_rate = 0.003
min_learning_rate = 0.0001
decay_speed = 2000.0 # 0.003-0.0001-2000=>0.9826 done in 5000 iterations
# variable learning rate
lr = tf.placeholder(tf.float32)
# the optimizer
train_stepper = tf.train.AdamOptimizer(lr).minimize(loss)
# initializer
init = tf.global_variables_initializer() # note the version problem
# evaluation
# arg_max : the entry with the highest probability is our prediction
if_prediction_correct = tf.equal(tf.arg_max(y, 1), tf.arg_max(y_, 1)) # T,F,T...
accuracy = tf.reduce_mean(tf.cast(if_prediction_correct, tf.float32)) # 1,0,1...
with tf.Session() as sess:
sess.run(init)
# training
for i in range(10000): # train_step_number
# learning rate decay
lerate = min_learning_rate + \
(max_learning_rate-min_learning_rate) * math.exp(-i/decay_speed)
batch_xs, batch_ys = mnist.train.next_batch(batch_size) # load & train:
sess.run(train_stepper, {x:batch_xs, y_:batch_ys, pkeep:0.7, lr:lerate})
if (i % 1000) == 0: print(i)
print("Accuarcy on Test-dataset: ", sess.run(accuracy, \
feed_dict={x:mnist.test.images, y_:mnist.test.labels, pkeep:1}))
print("\nDone.") # all done
|
994,090 | 0b363fd11a45acb0b0db9765fb2ce5d36972be15 | # -*- coding: future_fstrings -*-
# Copyright 2018 Brandon Shelley. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals, print_function, absolute_import
from builtins import *
import os
import time
from multiprocessing import Process, Pipe
import pytest
import fylmlib.config as config
import fylmlib.operations as ops
import conftest
import make
src = os.path.join(
conftest.films_src_path,
'Rogue.One.A.Star.Wars.Story.2016.PROPER.1080p.BluRay.DTS.x264-DON/Rogue.One.A.Star.Wars.Story.2016.PROPER.1080p.BluRay.DTS.x264-DON.mkv')
dst = os.path.join(
conftest.films_dst_paths['1080p'],
'Rogue One - A Star Wars Story (2016)/Rogue One - A Star Wars Story (2016) Bluray-1080p Proper.mkv')
sm_size = 5354 * make.mb
big_size = 7354 * make.mb
def async_safe_copy(conn, *args):
copy = ops.fileops.safe_move(*args)
conn.send(copy)
conn.close()
# @pytest.mark.skip()
class TestMove(object):
def test_basic_move(self):
conftest.cleanup_all()
conftest.make_empty_dirs()
# Reset config
conftest._setup()
config.interactive = False
assert(config.interactive is False)
make.make_mock_file(src, big_size)
config.safe_copy = False
assert(config.safe_copy is False)
config.test = False
assert(config.test is False)
move = ops.fileops.safe_move(src, dst)
assert(move is True)
assert(not os.path.exists(src))
assert( os.path.exists(dst))
def test_dst_exists_upgrade_smaller(self):
conftest.cleanup_all()
conftest.make_empty_dirs()
make.make_mock_file(src, big_size)
make.make_mock_file(dst, sm_size)
config.safe_copy = False
assert(config.safe_copy is False)
config.test = False
assert(config.test is False)
config.duplicates.force_overwrite = False
assert(config.duplicates.force_overwrite is False)
# Pass ok_to_upgrade here forcibly, because app logic doesn't determine upgrade eligibility
move = ops.fileops.safe_move(src, dst, True)
assert(move is True)
assert(not os.path.exists(src))
assert( os.path.exists(dst))
def test_dst_exists_dont_replace_bigger_overwrite_off(self):
conftest.cleanup_all()
conftest.make_empty_dirs()
make.make_mock_file(src, sm_size)
make.make_mock_file(dst, big_size)
config.safe_copy = False
assert(config.safe_copy is False)
config.test = False
assert(config.test is False)
config.duplicates.force_overwrite = False
assert(config.duplicates.force_overwrite is False)
move = ops.fileops.safe_move(src, dst)
assert(move is False)
assert(os.path.exists(src))
assert(os.path.exists(dst))
def test_dst_exists_replace_bigger_overwrite_on(self):
conftest.cleanup_all()
conftest.make_empty_dirs()
make.make_mock_file(src, sm_size)
make.make_mock_file(dst, big_size)
config.safe_copy = False
assert(config.safe_copy is False)
config.test = False
assert(config.test is False)
config.duplicates.force_overwrite = True
assert(config.duplicates.force_overwrite is True)
move = ops.fileops.safe_move(src, dst)
assert(move is True)
assert(not os.path.exists(src))
assert(os.path.exists(dst))
def test_dst_exists_dont_replace_identical_overwrite_off(self):
conftest.cleanup_all()
conftest.make_empty_dirs()
make.make_mock_file(src, big_size)
make.make_mock_file(dst, big_size)
config.safe_copy = False
assert(config.safe_copy is False)
config.test = False
assert(config.test is False)
config.duplicates.force_overwrite = False
assert(config.duplicates.force_overwrite is False)
move = ops.fileops.safe_move(src, dst)
assert(move is False)
assert(os.path.exists(src))
assert(os.path.exists(dst))
def test_dst_exists_replace_identical_overwrite_on(self):
conftest.cleanup_all()
conftest.make_empty_dirs()
make.make_mock_file(src, big_size)
make.make_mock_file(dst, big_size)
config.safe_copy = False
assert(config.safe_copy is False)
config.test = False
assert(config.test is False)
config.duplicates.force_overwrite = True
assert(config.duplicates.force_overwrite is True)
move = ops.fileops.safe_move(src, dst)
assert(move is True)
assert(not os.path.exists(src))
assert( os.path.exists(dst))
def test_test_enabled(self):
conftest.cleanup_all()
conftest.make_empty_dirs()
make.make_mock_file(src, big_size)
assert(os.path.exists(src))
assert(not os.path.exists(dst))
config.safe_copy = False
assert(config.safe_copy is False)
config.test = True
assert(config.test is True)
move = ops.fileops.safe_move(src, dst)
assert(move is True)
assert(os.path.exists(src))
assert(not os.path.exists(dst))
def test_src_and_dst_are_same_dir(self):
conftest.cleanup_all()
conftest.make_empty_dirs()
make.make_mock_file(src, big_size)
config.safe_copy = False
assert(config.safe_copy is False)
config.test = False
assert(config.test is False)
move = ops.fileops.safe_move(src, src)
assert(move is False)
assert(os.path.exists(src))
def test_safe_copy(self):
big_size = 75 * make.mb
conftest.cleanup_all()
conftest.make_empty_dirs()
# Need to make sure this file is sufficiently big
make.make_mock_file(src, big_size * (1024 if os.environ.get('TRAVIS') is not None else 1))
config.test = False
assert(config.test is False)
config.safe_copy = True
assert(config.safe_copy is True)
parent_conn, child_conn = Pipe()
p = Process(target=async_safe_copy, args=(child_conn, src, dst,))
p.start()
# This is a bit of a hack, but this test requires the file to sufficiently large enough
# to check that the partial exists before the thread finishes, but it also can't start
# too soon, so we sleep for 0.1s.
time.sleep(0.1)
assert(os.path.exists(f'{dst}.partial~'))
copy = parent_conn.recv()
p.join()
assert(copy is True)
assert(not os.path.exists(src))
assert(os.path.exists(dst))
# Disable safe copy
config.safe_copy = False
assert(config.safe_copy is False)
@pytest.mark.xfail(raises=OSError)
def test_not_path_exists(self):
conftest.cleanup_all()
conftest.make_empty_dirs()
src = '_DOES_NOT_EXIST_'
dst = '_DOES_NOT_MATTER_'
move = ops.fileops.safe_move(src, dst)
assert(move is False)
|
994,091 | c0e6137b0442190edd871e16a12773e813928252 | # -*- coding: utf-8 -*-
"""
Created on Thu Mar 2 11:04:38 2017
@author: HonkyT
"""
#from IPython import get_ipython
#get_ipython().magic('reset -sf')
#
#from numpy import fft # om man skriver sรฅhรคr begรถver man itne anvรคnda np.
# men eftersom jag behรถver fler saker frรฅn nupy och jag redan skrivit np pรฅ alla gรถr jag inte det.
import numpy as np
from numpy import fft
from scipy import signal
from scipy.signal import fftconvolve
import matplotlib.pyplot as plt
from create3Dgaussian import create3Dgaussian #import all functions
from scipy import misc
from math import ceil
def crystal():
# look at crystal and their ffts
crystal3D = np.zeros((201,201,201), dtype= np.int32)
crystal3D_fourier = np.zeros((201,201,201), dtype= np.complex64)
dx = 1
for row in range(60,140,dx):
for col in range(80,120,dx):
for time in range(90,110,dx):
crystal3D[row,col,time] = 1
crystal3D_fourier = fft.fftshift(fft.fftn(crystal3D))
#del crystal3D
diffPattern3D = (abs(crystal3D_fourier)**2)
del crystal3D_fourier
return diffPattern3D
diffPattern3D = crystal()
def shrinkwrap3D(diffPattern3D):
absF = np.zeros((diffPattern3D.shape[0], diffPattern3D.shape[1], diffPattern3D.shape[2]),dtype=np.complex64)
#gprime = np.zeros((625, ysize, xsize),dtype=np.complex64)
#G = np.zeros((625, ysize, xsize),dtype=np.complex64)
#Gprime = np.zeros((625, ysize, xsize),dtype=np.complex64)
absF = pow(diffPattern3D,0.5)
# parameters
beta = 0.9 # HIO feedback parameter
thresholdMask1 = 0.04 # Initial threshold for mask
thresholdMask2 = 0.2 # threshold for mask
sigma = 3 # initial value of gaussian width
c = absF.shape[0]/2 # center of Gaussian what about avrundning? #lรคgg till center av shape0,1,2
n = 39 #number of iterations
# initialize vector for error calculation
#err2 = np.zeros(shape=(1,n))
#g = np.zeros((absF.shape[0], absF.shape[1], absF.shape[2]),dtype=np.complex64)
# g represents the unknown object. Create initial guess for g using
# the autocorrelation function of the object. Start by Fourier
# transform the diffraction pattern:
g = np.complex64(fft.fftshift(fft.ifftn(absF))) #finns nรฅgra val som symmetric?
# create autocorrelation function of g
#kanske finns รคn funtion i python som gรถr det hรคr
# gรถr om till 3D. titta pรฅ normaliseringen!
#
g = abs(fft.fftshift(fft.ifftn(fft.fftn(g)*np.conj(fft.fftn(g)))))/(g.shape[0]*g.shape[1])
# create a logical matrix that is the Shrinkwrap support
support = g > thresholdMask1*g.max()
# define iteration counter
k = 0
# Start of iterations
while k<n:
# every 20th iteration, update the support
if k%20==0: # VARFรR SKULLE K VA NEG?
# call function create2Dgaussian
gauss3D = create3Dgaussian(sigma,c,absF.shape[0],absF.shape[1],absF.shape[2])
# calculate the convolution the absolute value of the object wave
# with a gaussian
# finns en djungel av olika alternativ hur man convolverar
support = fftconvolve(gauss3D,abs(g),'same') # ska man convolva med abs(g)?
# Create a logical matrix that is the Shrinkwrap support
support = support > thresholdMask2*support.max() #SรKERT ATT DEN HITTAR 2D MAXET?
# reduce sigma with 1% every 20th iteration until a limit of 1.5
if sigma >= 1.5:
sigma = sigma*0.99
# STEP 1: Fourier transform of g(x), G(u) :
G = np.complex64(fft.fftshift(fft.fftn(g)))
# STEP 2: make |G| confirm with |F|
Gprime = np.complex64(absF*np.exp(1j*np.angle(G)))
# STEP 3: inverse Fourier transform
gprime = fft.ifftn(fft.ifftshift(Gprime))
# STEP 4: See to that gprime satisfies its constraints:
# create inverse of the support (computional aid)
support_inv = np.logical_not(support)
# update g(l,j) outside the support
gprime = gprime*support + g*support_inv - beta*gprime*support_inv
# update g'(l,j) inside the support in one of
# the two following ways:
# gprime(l,j) = g(l,j)(in matrix multiplication i.e.:)
#gprime = gprime*support_inv + g*support
#or
# gprime[l,j] = gprime[l,j] (i.e. not at all)
# for l in range(0,absF.shape[0]):
# for j in range(0,absF.shape[1]): ## ska DET VARA RANGE 0 รR FรRSTA INDEXEDT 0?
# for m in range(0,absF.shape[2]):
# if support[l,j,m] == 0:
# # update g(l,j) outside the support
#
# gprime[l,j,m] = g[l,j,m] - beta * gprime[l,j,m]
# np.disp(l)
# else:
# # update g'(l,j) inside the support in one of
# # the two following ways:
# # gprime[l,j,m] = g[l,j,m]
# gprime[l,j,m] = gprime[l,j,m]
# overwrite g with result from iteration
g = np.complex64(gprime)
# set all negative values of g to zero
g[g<0] = 0
#err2[0][k] = sum(sum( (abs(abs(fft.fftshift(fft.fftn(((g*np.conj(g))))))) - absF)**2)) / sum(sum(absF**2))
np.disp(k)
k = k+1
plt.figure()
plt.imshow(abs(g[100,:,:]), cmap='gray')
plt.title('reconstructed object [100,:,:]')
return g
def plotReconstruction():
plt.figure()
plt.subplot(221)
plt.imshow(abs(g[:,:,100]), cmap='gray')
plt.title('xyplane cut of 3Dcrystal z=100')
plt.xlabel(' x')
plt.ylabel(' y')
plt.axis('off')
plt.subplot(222)
plt.imshow(abs(g[100,:,:]), cmap='gray')
plt.title('xz cut. y=100')
plt.xlabel(' x')
plt.ylabel(' y')
plt.axis('off')
plt.subplot(223)
plt.imshow(abs(g[:,100,:]), cmap='gray')
plt.title('yz Dcrystal x=100')
plt.xlabel(' x')
plt.ylabel(' y')
plt.axis('off')
plt.subplot(224)
plt.imshow(abs(g[:,:,100]), cmap='gray')
plt.title('xyplane z=zmiddle')
plt.xlabel(' x')
plt.ylabel(' y')
plt.axis('off')
#plotReconstruction()
def plotOriginal():
plt.figure()
plt.subplot(221)
plt.imshow(crystal3D[:,:,100], cmap='gray')
plt.title('xyplane cut of 3Dcrystal z=100')
plt.xlabel(' x')
plt.ylabel(' y')
plt.axis('off')
plt.subplot(222)
plt.imshow(crystal3D[100,:,:], cmap='gray')
plt.title('xz cut. y=100')
plt.xlabel(' x')
plt.ylabel(' y')
plt.axis('off')
plt.subplot(223)
plt.imshow(crystal3D[:,100,:], cmap='gray')
plt.title('yz Dcrystal x=100')
plt.xlabel(' x')
plt.ylabel(' y')
plt.axis('off')
plt.subplot(224)
plt.imshow(crystal3D[:,:,int(0.5*crystal3D.shape[2])], cmap='gray')
plt.title('xyplane z=zmiddle')
plt.xlabel(' x')
plt.ylabel(' y')
plt.axis('off')
#plotOriginal()
#plt.figure()
#plt.imshow(crystal3D, cmap='gray')
#plt.title('original object')
#############Test att anvรคnda bild som รคr skjuvad coord
#theta = 0*np.pi / 180 #rad
#r3 = 1 + 1/np.cos(theta) #antal pixlar som motsvarar 1 pixel i xled i xz systemet
#r1 = 1 + 1/ np.sin(theta)
#
#image_skewed = np.zeros((image.shape[0], ceil((image.shape[1]/np.cos(theta)))+ 50 ))
#
#for i in range(0,image.shape[0]):
# for j in range(0,image.shape[1]):
# xs = ceil(j / (1 + np.tan(theta)**2) + i*np.tan(theta)/ ( 1 + np.tan(theta)**2) )
# #np.disp(xs)
# ys = i
# image_skewed[ys,xs] = image[i,j]
# #image_skewed =
#
## cut ot hte skewed image so that it is ccentered
#image_skewed = image_skewed[:,0:120] # Hamnar rรคtt om man klipper den rรคtt
#image_skewed = image_skewed/image_skewed.max()
#fft_image_skewed = abs(np.fft.fftshift(np.fft.fft2(image_skewed)))
#absF = fft_image_skewed
#fattar inte hur man skriver det som function pรฅ ett smidigt sรคtt.
# gรฅr ju inte att kolla pรฅ variablerna om man kรถr som funktion
#def Shrinkwrap(diffPattern):
# the intensity of the diffraction pattern is F^2
|
994,092 | 53b9d39b2c599bff4b2bf289d4fc5ff9e91dfa51 | from django.shortcuts import render
import django.contrib.auth.views as authviews
from django.conf import settings
from urllib.parse import quote
def login(request):
return render(request, 'oauthlogin/login.html', {
'oauth_providers': [(k, v) for k, v in sorted(settings.OAUTH.items())],
'next': quote(request.GET.get('next', '')),
})
def logout(request):
return authviews.logout_then_login(request, login_url='/')
|
994,093 | 8e3f87b38c380608e4eb26dcb21ce0bf72ac4b81 | import pycuda.gpuarray as gpuarray
import pycuda.driver as cuda
import pycuda.autoinit
import numpy as np
a_gpu = gpuarray.to_gpu(np.random.randn(5, 5).astype(np.float32))
a_doubled = (2 * a_gpu).get()
print("ORIGINAL")
print(a_doubled)
print("DOUBLED MATRIX AFTER PyCUDA EXECUTION USING GPUARRAY CALL")
print(a_gpu) |
994,094 | f9e8e68356260e2b928bff58e7c0eb7f9687f15a | # Benchmarking Suite
# Copyright 2014-2017 Engineering Ingegneria Informatica S.p.A.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Developed in the ARTIST EU project (www.artist-project.eu) and in the
# CloudPerfect EU project (https://cloudperfect.eu/)
import configparser
import json
import os
import sys
import uuid
from abc import ABC, abstractmethod
from benchsuite.core.model.exception import ControllerConfigurationException
from benchsuite.core.model.execution import ExecutionEnvironmentRequest, ExecutionEnvironment
class ServiceProvider(ABC):
@abstractmethod
def __init__(self, name, service_type):
self.id = str(uuid.uuid4())
self.name = name
self.service_type = service_type
@abstractmethod
def get_execution_environment(self, request: ExecutionEnvironmentRequest) -> ExecutionEnvironment:
pass
@abstractmethod
def destroy_service(self):
pass
@staticmethod
@abstractmethod
def load_from_config_file(config, service_type):
pass
@abstractmethod
def get_provider_properties_dict(self):
"""This is used to create the dictionary to store in the MongoDB"""
pass
def load_service_provider_from_config_file(config_file, service_type=None) -> ServiceProvider:
if not os.path.isfile(config_file):
raise ControllerConfigurationException('Config file {0} does not exist'.format(config_file))
try:
with open(config_file) as f:
config = configparser.ConfigParser()
config.read_dict(json.load(f))
except ValueError as ex:
config = configparser.ConfigParser()
config.read(config_file)
return load_provider_from_config(config, service_type)
def load_provider_from_config_string(config_string, service_type=None):
try:
config = configparser.ConfigParser()
config.read_dict(json.load(config_string))
except ValueError as ex:
config = configparser.ConfigParser()
config.read_string(config_string)
return load_provider_from_config(config, service_type)
def load_provider_from_config(config, service_type=None):
provider_class = config['provider']['class']
module_name, class_name = provider_class.rsplit('.', 1)
__import__(module_name)
module = sys.modules[module_name]
clazz = getattr(module, class_name)
if not service_type:
# if there is only one section (other then "provider") in the configuration, use that as service provider,
# otherwise rise an excepction
sections = [s for s in list(config.keys()) if s != 'DEFAULT' and s != 'provider']
if len(sections) > 1:
raise ControllerConfigurationException('Service Type not specified and multiple sections found in the configuration')
else:
service_type = sections[0]
return clazz.load_from_config_file(config, service_type) |
994,095 | 8dc76d074f4777a4ae22d5f67f77b71d8deb3015 | import abjad
import baca
from abjadext import rmakers
from mraz import library
#########################################################################################
########################################### 04 ##########################################
#########################################################################################
def make_empty_score(first_measure_number, previous_persistent_indicators):
score = library.make_empty_score()
accumulator = library.Accumulator(score)
section_4 = library.moment_4()
section_5 = library.moment_5()
@baca.call
def block():
collections = section_4.stage_1.rh[:1]
assert library.foo(collections) == ["PC<3, 1, 0, 10>"]
tuplet = baca.from_collection(collections[0], [2, -4, 2, -4, 4], 16, 2)
# 1
accumulator(
library.rh_v3,
[tuplet],
tsd=4,
)
baca.dynamic(baca.select.phead(tuplet, 0), "ff")
baca.register(tuplet, -12, 12)
baca.stem_up(baca.select.pleaves(tuplet))
baca.text_script_up(tuplet)
baca.tuplet_bracket_up(tuplet)
baca.tuplet_bracket_staff_padding(tuplet, 3)
baca.label_figure(tuplet, "4.1.R.1", accumulator)
@baca.call
def block():
collections = section_4.stage_6.rh[1:2]
assert library.foo(collections) == ["{-3, 7, 8, 11, 13, 17, 27, 36}"]
tuplet = baca.from_collection(collections[0], [12], 16)
# 2
accumulator(
library.rh_v2,
[tuplet],
tsd=4,
)
baca.dynamic(baca.select.phead(tuplet, 0), "ff")
baca.marcato(baca.select.pheads(tuplet))
baca.script_up(tuplet)
baca.up_arpeggio(baca.select.pheads(tuplet))
baca.label_figure(tuplet, "4.6.R.2", accumulator, abjad.UP)
@baca.call
def block():
collections = section_4.stage_1.rh[1:2]
assert library.foo(collections) == ["PC<3, 1, 0, 10>"]
tuplet = baca.from_collection(collections[0], [-4, 2, -4, 4], 16, -4)
# 3
accumulator(
library.rh_v3,
[tuplet],
tsd=4,
)
baca.register(tuplet, 12, -12)
baca.stem_up(baca.select.pleaves(tuplet))
baca.tuplet_bracket_up(tuplet)
baca.tuplet_bracket_staff_padding(tuplet, 4)
baca.label_figure(tuplet, "4.1.R.2", accumulator, abjad.UP)
@baca.call
def block():
collections = section_5.stage_2.lh[:1]
assert library.foo(collections) == ["PC<11, 3, 1>"]
tuplet = baca.from_collection(collections[0], [3, -3], 16)
# 4
accumulator(
library.lh_v4,
[tuplet],
tsd=4,
)
baca.dynamic(baca.select.phead(tuplet, 0), "p")
baca.slur(baca.select.pleaves(tuplet))
baca.register(tuplet, 6, -24)
baca.slur_up(tuplet)
baca.label_figure(tuplet, "5.2.L.1", accumulator, abjad.DOWN)
@baca.call
def block():
tuplet = abjad.Tuplet((1, 1), "r2.", hide=True)
# 5
accumulator(
library.rh_v3,
[tuplet],
do_not_increment=True,
tsd=4,
)
baca.label_figure(tuplet, "S.1", accumulator, abjad.DOWN, do_not_increment=True)
@baca.call
def block():
collections = section_4.stage_1.rh[2:3]
assert library.foo(collections) == ["PC<3, 1, 0, 10>"]
tuplet = baca.from_collection(collections[0], [2, -4, 2, -4, 4], 16, 2)
# 6
accumulator(
library.rh_v3,
[tuplet],
tsd=4,
)
baca.register(tuplet, -6)
baca.stem_up(baca.select.pleaves(tuplet))
baca.tuplet_bracket_up(tuplet)
baca.label_figure(tuplet, "4.1.R.3", accumulator, abjad.UP)
@baca.call
def block():
tuplet = abjad.Tuplet((1, 1), "r2.", hide=True)
# 7
accumulator(
library.rh_v3,
[tuplet],
do_not_increment=True,
tsd=4,
)
baca.label_figure(tuplet, "S.2", accumulator, do_not_increment=True)
@baca.call
def block():
collections = section_4.stage_1.lh[:1]
assert library.foo(collections) == ["PC<8, 2, 4>"]
tuplet = baca.from_collection(collections[0], [3, -1], 16)
tuplet.insert(0, "r2.")
# 8
accumulator(
library.lh_v5,
[tuplet],
anchor=baca.anchor_to_figure("4.1.R.1"),
hide_time_signature=True,
)
baca.dynamic(baca.select.phead(tuplet, 0), "f")
baca.register(tuplet, -6, 6)
baca.tenuto(baca.select.pheads(tuplet))
baca.label_figure(tuplet, "4.1.L.1", accumulator, abjad.DOWN)
@baca.call
def block():
tuplet = abjad.Tuplet((1, 1), "r2.", hide=True)
# 9
accumulator(
library.lh_v5,
[tuplet],
anchor=baca.anchor_to_figure("4.6.R.2"),
do_not_increment=True,
hide_time_signature=True,
)
baca.label_figure(tuplet, "S.3", accumulator, do_not_increment=True)
@baca.call
def block():
collections = section_4.stage_1.lh[1:2]
assert library.foo(collections) == ["PC<8, 2, 4>"]
tuplet = baca.from_collection(collections[0], [3, -1, 3, -1, -4, 3, -1], 16)
tuplet.insert(0, "r4")
tuplet.append("r4")
# 10
accumulator(
library.lh_v5,
[tuplet],
hide_time_signature=True,
replace_after_last_nonskip_in_same_voice=True,
)
baca.register(tuplet, -6, 6)
baca.tenuto(baca.select.pheads(tuplet))
baca.label_figure(tuplet, "4.1.L.3", accumulator, abjad.DOWN)
@baca.call
def block():
collections = section_4.stage_2.lh[:1]
assert library.foo(collections) == ["{7, 11, 17, 18, 21}"]
tuplet = baca.from_collection(collections[0], [3], 16)
tuplet.insert(0, "r16")
tuplet.append("r2")
# 11
accumulator(
library.lh_v5,
[tuplet],
anchor=baca.anchor_to_figure("S.1"),
hide_time_signature=True,
)
baca.bass_to_octave(tuplet, 3)
baca.dynamic(baca.select.phead(tuplet, 0), "ff")
baca.marcato(baca.select.pheads(tuplet))
baca.label_figure(tuplet, "4.2.L.1", accumulator)
@baca.call
def block():
collections = section_4.stage_1.lh[2:3]
assert library.foo(collections) == ["PC<8, 2, 4>"]
tuplet = baca.from_collection(collections[0], [3, -1, -4, 3, -1, 3, -1], 16)
tuplet[0:0] = "r4 r4"
# 12
accumulator(
library.lh_v5,
[tuplet],
hide_time_signature=True,
replace_after_last_nonskip_in_same_voice=True,
)
baca.dynamic(baca.select.phead(tuplet, 0), "f")
baca.register(tuplet, -6, 6)
baca.tenuto(baca.select.pheads(tuplet))
baca.label_figure(tuplet, "4.1.L.2", accumulator, abjad.DOWN)
@baca.call
def block():
tuplet = abjad.Tuplet((1, 1), "r2.", hide=True)
# 13
accumulator(
library.lh_v5,
[tuplet],
do_not_increment=True,
hide_time_signature=True,
replace_after_last_nonskip_in_same_voice=True,
)
baca.label_figure(tuplet, "S.5", accumulator, do_not_increment=True)
@baca.call
def block():
collections = section_4.stage_2.lh[1:2]
assert library.foo(collections) == ["{7, 11, 17, 18, 21}"]
tuplet = baca.from_collection(collections[0], [3], 16)
tuplet.insert(0, "r16")
tuplet.append("r2")
# 14
accumulator(
library.lh_v5,
[tuplet],
)
baca.bass_to_octave(tuplet, 3)
baca.dynamic(baca.select.phead(tuplet, 0), "ff")
baca.label_figure(tuplet, "4.2.L.2", accumulator)
@baca.call
def block():
collections = section_4.stage_2.lh[2:3]
assert library.foo(collections) == ["{7, 11, 17, 18, 21}"]
tuplet = baca.from_collection(collections[0], [1], 16)
tuplet.extend("r8. r1")
# 15
accumulator(
library.lh_v5,
[tuplet],
tsd=4,
)
baca.bass_to_octave(tuplet, 3)
baca.label_figure(tuplet, "4.2.L.3", accumulator)
@baca.call
def block():
collections = section_4.stage_2.lh[3:4]
assert library.foo(collections) == ["{7, 11, 17, 18, 21}"]
tuplet = baca.from_collection(collections[0], [3], 16)
tuplet.insert(0, "r16")
tuplet.append("r2")
container = abjad.Container([tuplet])
baca.nest([tuplet], "+1/4")
# 16
accumulator(
library.lh_v5,
container,
tsd=4,
)
baca.bass_to_octave(tuplet, 3)
baca.tuplet_bracket_up(tuplet)
baca.label_figure(tuplet, "4.2.L.4", accumulator)
@baca.call
def block():
collections = section_4.stage_2.lh[4:5]
assert library.foo(collections) == ["{7, 11, 17, 18, 21}"]
tuplet = baca.from_collection(collections[0], [1], 16)
tuplet.extend("r8. r1")
# 17
accumulator(
library.lh_v5,
[tuplet],
tsd=4,
)
baca.bass_to_octave(tuplet, 3)
baca.label_figure(tuplet, "4.2.L.5", accumulator)
@baca.call
def block():
collections = section_4.stage_2.lh[5:6]
assert library.foo(collections) == ["{7, 11, 17, 18, 21}"]
tuplet = baca.from_collection(collections[0], [3], 16)
tuplet.insert(0, "r16")
tuplet.append("r2")
# 18
accumulator(
library.lh_v5,
[tuplet],
tsd=4,
)
baca.bass_to_octave(tuplet, 3)
baca.label_figure(tuplet, "4.2.L.6", accumulator)
@baca.call
def block():
collections = section_4.stage_2.lh[6:7]
assert library.foo(collections) == ["{7, 11, 17, 18, 21}"]
tuplet = baca.from_collection(collections[0], [1], 16)
tuplet.extend("r8. r1")
# 19
accumulator(
library.lh_v5,
[tuplet],
tsd=4,
)
baca.bass_to_octave(tuplet, 3)
baca.label_figure(tuplet, "4.2.L.7", accumulator)
@baca.call
def block():
collections = section_4.stage_2.lh[7:8]
assert library.foo(collections) == ["{7, 11, 17, 18, 21}"]
tuplet = baca.from_collection(collections[0], [3], 16)
tuplet.insert(0, "r16")
tuplet.append("r2")
container = abjad.Container([tuplet])
baca.nest([tuplet], "+1/4")
# 20
accumulator(
library.lh_v5,
container,
tsd=4,
)
baca.bass_to_octave(tuplet, 3)
baca.tuplet_bracket_up(tuplet)
baca.label_figure(tuplet, "4.2.L.8", accumulator)
@baca.call
def block():
collections = section_4.stage_2.lh[8:9]
assert library.foo(collections) == ["{7, 11, 17, 18, 21}"]
tuplet = baca.from_collection(collections[0], [1], 16)
tuplet.extend("r8. r1")
# 21
accumulator(
library.lh_v5,
[tuplet],
tsd=4,
)
baca.bass_to_octave(tuplet, 3)
baca.label_figure(tuplet, "4.2.L.9", accumulator)
@baca.call
def block():
collections = section_4.stage_2.lh[9:10]
assert library.foo(collections) == ["{7, 11, 17, 18, 21}"]
tuplet = baca.from_collection(collections[0], [3], 16)
tuplet.insert(0, "r16")
tuplet.append("r2")
container = abjad.Container([tuplet])
baca.nest([tuplet], "+1/4")
# 22
accumulator(
library.lh_v5,
container,
tsd=4,
)
baca.bass_to_octave(tuplet, 3)
baca.tuplet_bracket_up(tuplet)
baca.label_figure(tuplet, "4.2.L.10", accumulator)
segments = section_4.stage_2.lh[10:11]
segment = segments[0]
segment = baca.pcollections.space_down(segment, bass=7, semitones=3, soprano=9)
segment = baca.pcollections.bass_to_octave(segment, 2)
chord_1_upper, chord_1_lower = baca.pcollections.split(segment, pitch=-1)
@baca.call
def block():
collections = chord_1_lower
assert library.foo(collections) == "<-4, -7, -8, -17>"
collections = [tuple(chord_1_lower)]
tuplet = baca.from_collection(collections[0], [1], 16)
tuplet.extend("r8.")
# 23
accumulator(
library.lh_v5,
[tuplet],
tsd=4,
)
baca.cross_staff(baca.select.phead(tuplet, 0))
baca.dynamic(baca.select.phead(tuplet, 0), "mp")
# TODO: tag colored score only:
# baca.stem_color("darkmagenta", context="PianoStaff")
baca.stem_up(baca.select.pleaves(tuplet))
baca.label_figure(tuplet, "4.2.L.11.L", accumulator, abjad.DOWN)
@baca.call
def block():
collections = chord_1_upper
assert library.foo(collections) == "<9, 6, 2, -1>"
collections = [tuple(chord_1_upper)]
tuplet = baca.from_collection(collections[0], [1], 16)
tuplet.extend("r8.")
# 24
accumulator(
library.rh_v5,
[tuplet],
anchor=baca.anchor(library.lh_v5, lambda _: abjad.select.chord(_, -1)),
hide_time_signature=True,
)
baca.label_figure(tuplet, "4.2.L.11.U", accumulator, abjad.UP)
@baca.call
def block():
collections = [(-35, -23)]
tuplet = baca.from_collection(collections[0], [8], 16)
tuplet.insert(0, "s4")
container = abjad.Container([tuplet])
baca.nest([tuplet], "+1/4")
# 25
accumulator(
library.lh_resonance,
container,
anchor=baca.anchor(library.lh_v5, lambda _: baca.select.rest(_, 24)),
do_not_increment=True,
hide_time_signature=True,
)
baca.label_figure(tuplet, "R.1", accumulator, do_not_increment=True)
@baca.call
def block():
collections = [(-35, -23)]
tuplet = baca.from_collection(collections[0], [4], 16)
# 26
accumulator(
library.lh_resonance,
[tuplet],
do_not_increment=True,
hide_time_signature=True,
replace_after_last_nonskip_in_same_voice=True,
)
baca.label_figure(tuplet, "R.2", accumulator, do_not_increment=True)
@baca.call
def block():
collections = [(-35, -23)]
tuplet = baca.from_collection(collections[0], [16], 16)
# 27
accumulator(
library.lh_resonance,
[tuplet],
do_not_increment=True,
hide_time_signature=True,
replace_after_last_nonskip_in_same_voice=True,
)
baca.label_figure(tuplet, "R.3", accumulator, do_not_increment=True)
@baca.call
def block():
collections = [(-33, -21)]
tuplet = baca.from_collection(collections[0], [8], 16)
tuplet.insert(0, "s4")
container = abjad.Container([tuplet])
baca.nest([tuplet], "+1/4")
# 28
accumulator(
library.lh_resonance,
container,
anchor=baca.anchor(library.lh_v5, lambda _: baca.select.rest(_, 32)),
do_not_increment=True,
hide_time_signature=True,
)
baca.label_figure(tuplet, "R.4", accumulator, do_not_increment=True)
@baca.call
def block():
collections = [(-33, -21)]
tuplet = baca.from_collection(collections[0], [4], 16)
# 29
accumulator(
library.lh_resonance,
[tuplet],
do_not_increment=True,
hide_time_signature=True,
replace_after_last_nonskip_in_same_voice=True,
)
baca.label_figure(tuplet, "R.5", accumulator, do_not_increment=True)
@baca.call
def block():
collections = [(-33, -21)]
tuplet = baca.from_collection(collections[0], [16], 16)
# 30
accumulator(
library.lh_resonance,
[tuplet],
do_not_increment=True,
hide_time_signature=True,
replace_after_last_nonskip_in_same_voice=True,
)
baca.label_figure(tuplet, "R.6", accumulator, do_not_increment=True)
@baca.call
def block():
collections = section_4.stage_4.lh[:2]
assert library.foo(collections) == ["{0, 10}", "{2, 5}"]
tuplets = []
for collection in collections:
tuplet = baca.from_collection(collection, [2], 16)
tuplets.append(tuplet)
tuplets[0].insert(0, "r4")
tuplets[-1].append("r4")
container = abjad.Container(tuplets)
baca.nest(tuplets, "+2/16")
# 31
accumulator(
library.lh_v5,
container,
anchor=baca.resume_after(library.lh_v5),
)
groups = rmakers.nongrace_leaves_in_each_tuplet(tuplets)
rmakers.beam_groups(groups)
for ntrun in baca.select.ntruns(container):
baca.slur(ntrun)
baca.register(tuplets, -12, 0)
baca.label_figure(tuplets, "4.4.L.1-2", accumulator, abjad.DOWN)
@baca.call
def block():
tuplet = abjad.Tuplet((1, 1), "r1", hide=True)
# 32
accumulator(
library.lh_v5,
[tuplet],
do_not_increment=True,
tsd=4,
)
baca.label_figure(tuplet, "S.6", accumulator, do_not_increment=True)
@baca.call
def block():
collections = section_4.stage_4.lh[2:3]
assert library.foo(collections) == ["{0, 4, 8}"]
tuplet = baca.from_collection(collections[0], [6], 16)
tuplet.insert(0, "r8")
container = abjad.Container([tuplet])
# 33
accumulator(
library.lh_v5,
container,
tsd=4,
)
for ntrun in baca.select.ntruns(container):
baca.slur(ntrun)
baca.register(tuplet, -12, 0)
baca.label_figure(tuplet, "4.4.L.3", accumulator, abjad.DOWN)
@baca.call
def block():
tuplet = abjad.Tuplet((1, 1), "r4", hide=True)
# 34
accumulator(
library.rh_v1,
[tuplet],
do_not_increment=True,
tsd=4,
)
baca.rest_transparent(abjad.select.rests(tuplet))
baca.label_figure(tuplet, "S.7", accumulator, do_not_increment=True)
@baca.call
def block():
collections = section_5.stage_2.rh[12:13]
assert library.foo(collections) == ["PC<8, 3, 5, 6>"]
tuplet = baca.from_collection(collections[0], [2], 16)
# 35
accumulator(
library.rh_v3,
[tuplet],
tsd=4,
)
rmakers.beam([tuplet])
baca.slur(tuplet)
baca.register(tuplet, 24)
baca.stem_down(baca.select.pleaves(tuplet))
baca.tuplet_bracket_down(tuplet)
baca.label_figure(tuplet, "5.2.R.13", accumulator)
@baca.call
def block():
collections = section_4.stage_4.lh[3:6]
assert library.foo(collections) == ["{10}", "{2, 5}", "{4, 8}"]
tuplets = []
for collection in collections:
tuplet = baca.from_collection(collection, [2], 16)
tuplets.append(tuplet)
tuplets[0].insert(0, "r4")
tuplets[-1].append("r4")
baca.register(tuplets, 0, -12)
container = abjad.Container(tuplets)
baca.nest(tuplets, "+2/16")
imbrications = baca.imbricate(
container,
library.lh_v5_i,
[10],
)
# 36
accumulator(
library.lh_v5,
container,
imbrications=imbrications,
tsd=4,
)
groups = rmakers.nongrace_leaves_in_each_tuplet(tuplets)
rmakers.beam_groups(groups)
for imbrication in imbrications.values():
rmakers.unbeam(imbrication)
groups = rmakers.nongrace_leaves_in_each_tuplet(imbrication)
rmakers.beam_groups(groups, beam_rests=True),
baca.extend_beam(abjad.select.leaf(imbrication, -1)),
for ntrun in baca.select.ntruns(container):
baca.slur(ntrun)
baca.label_figure(tuplets, "4.4.L.4-6", accumulator, abjad.DOWN)
@baca.call
def block():
tuplet = abjad.Tuplet((1, 1), "r2", hide=True)
# 37
accumulator(
library.lh_v5,
[tuplet],
do_not_increment=True,
tsd=4,
)
baca.label_figure(tuplet, "S.8", accumulator, do_not_increment=True)
@baca.call
def block():
collections = section_4.stage_4.lh[6:9]
assert library.foo(collections) == ["{0, 5, 10}", "{2, 4, 8}", "{0, 10}"]
tuplets = []
for collection in collections:
tuplet = baca.from_collection(collection, [2], 16)
tuplets.append(tuplet)
tuplets[0].insert(0, "r4")
tuplets[-1].append("r4")
baca.register(tuplets, -12, 0)
container = abjad.Container(tuplets)
baca.nest(tuplets, "+2/16")
imbrications = baca.imbricate(
container,
library.lh_v5_i,
[-2, 2],
)
# 38
accumulator(
library.lh_v5,
container,
imbrications=imbrications,
tsd=4,
)
groups = rmakers.nongrace_leaves_in_each_tuplet(tuplets)
rmakers.beam_groups(groups)
for imbrication in imbrications.values():
rmakers.unbeam(imbrication)
groups = rmakers.nongrace_leaves_in_each_tuplet(imbrication)
rmakers.beam_groups(groups, beam_rests=True),
for ntrun in baca.select.ntruns(container):
baca.slur(ntrun)
baca.label_figure(tuplets, "4.4.L.7-9", accumulator, abjad.DOWN)
@baca.call
def block():
collections = section_4.stage_4.lh[9:11]
assert library.foo(collections) == ["{2, 5}", "{0, 4, 8}"]
tuplets = []
for collection in collections:
tuplet = baca.from_collection(collection, [2], 16)
tuplets.append(tuplet)
tuplets[0].insert(0, "r4")
tuplets[-1].append("r8")
baca.register(tuplets, 0, -12)
container = abjad.Container(tuplets)
baca.nest(tuplets, "+2/16")
imbrications = baca.imbricate(
container,
library.lh_v5_i,
[5],
)
# 39
accumulator(
library.lh_v5,
container,
imbrications=imbrications,
tsd=4,
)
groups = rmakers.nongrace_leaves_in_each_tuplet(tuplets)
rmakers.beam_groups(groups)
for imbrication in imbrications.values():
rmakers.unbeam(imbrication)
groups = rmakers.nongrace_leaves_in_each_tuplet(imbrication)
rmakers.beam_groups(groups, beam_rests=True),
baca.extend_beam(abjad.select.leaf(imbrication, -1)),
for ntrun in baca.select.ntruns(container):
baca.slur(ntrun)
baca.label_figure(tuplets, "4.4.L.10-11", accumulator, abjad.DOWN)
@baca.call
def block():
collections = section_4.stage_4.lh[11:12]
assert library.foo(collections) == ["{10}"]
tuplet = baca.from_collection(collections[0], [2], 16)
tuplet.insert(0, "r8")
tuplet.append("r4.")
container = abjad.Container([tuplet])
# 40
accumulator(
library.lh_v5,
container,
tsd=4,
)
for ntrun in baca.select.ntruns(container):
baca.slur(ntrun)
baca.register(tuplet, -12, 0)
baca.label_figure(tuplet, "4.4.L.12", accumulator, abjad.DOWN)
@baca.call
def block():
tuplet = abjad.Tuplet((1, 1), "r2", hide=True)
# 41
accumulator(
library.lh_v5,
[tuplet],
do_not_increment=True,
tsd=4,
)
baca.label_figure(tuplet, "S.9", accumulator, do_not_increment=True)
@baca.call
def block():
collections = section_4.stage_4.lh[12:14]
assert library.foo(collections) == ["{2, 5}", "{4, 8}"]
tuplets = []
for collection in collections:
tuplet = baca.from_collection(collection, [2], 16)
tuplets.append(tuplet)
tuplets[0].insert(0, "r4")
tuplets[-1].append("r8")
baca.register(tuplets, 0, -12)
container = abjad.Container(tuplets)
baca.nest(tuplets, "+2/16")
imbrications = baca.imbricate(
container,
library.lh_v5_i,
[5],
)
# 42
accumulator(
library.lh_v5,
container,
imbrications=imbrications,
tsd=4,
)
groups = rmakers.nongrace_leaves_in_each_tuplet(tuplets)
rmakers.beam_groups(groups)
for imbrication in imbrications.values():
rmakers.unbeam(imbrication)
groups = rmakers.nongrace_leaves_in_each_tuplet(imbrication)
rmakers.beam_groups(groups, beam_rests=True),
baca.extend_beam(abjad.select.leaf(imbrication, -1)),
for ntrun in baca.select.ntruns(container):
baca.slur(ntrun)
baca.label_figure(tuplets, "4.4.L.13-14", accumulator, abjad.DOWN)
@baca.call
def block():
collections = section_4.stage_4.lh[14:15]
assert library.foo(collections) == ["{0, 5, 10}"]
tuplet = baca.from_collection(collections[0], [6], 16)
tuplet.insert(0, "r8")
container = abjad.Container([tuplet])
# 43
accumulator(
library.lh_v5,
container,
tsd=4,
)
for ntrun in baca.select.ntruns(container):
baca.slur(ntrun)
baca.register(tuplet, -12, 0)
baca.label_figure(tuplet, "4.4.L.15", accumulator, abjad.DOWN)
@baca.call
def block():
collections = section_4.stage_4.lh[15:16]
assert library.foo(collections) == ["{2, 4, 8}"]
tuplet = baca.from_collection(collections[0], [2], 16)
tuplet.insert(0, "r8")
tuplet.append("r4.")
baca.register(tuplet, 0, -12)
container = abjad.Container([tuplet])
imbrications = baca.imbricate(
container,
library.lh_v5_i,
[8],
)
# 44
accumulator(
library.lh_v5,
container,
imbrications=imbrications,
tsd=4,
)
for imbrication in imbrications.values():
rmakers.unbeam(imbrication)
groups = rmakers.nongrace_leaves_in_each_tuplet(imbrication)
rmakers.beam_groups(groups, beam_rests=True),
for ntrun in baca.select.ntruns(container):
baca.slur(ntrun)
baca.label_figure(tuplet, "4.4.L.16", accumulator, abjad.DOWN)
@baca.call
def block():
collections = section_4.stage_4.rh[:1]
assert library.foo(collections) == ["PC<2, 8, 3, 9, 2, 5, 11, 4>"]
tuplet = baca.from_collection(collections[0], [2], 16, 4)
tuplet.insert(0, "r8")
tuplet.append("r4.")
baca.register(tuplet, 36, 22)
container = abjad.Container([tuplet])
imbrications = baca.imbricate(
container,
library.rh_v2,
[3, 2, 5],
hocket=True,
by_pitch_class=True,
)
# 45 (produced m 31 @ 5/8)
accumulator(
library.rh_v1,
container,
anchor=baca.anchor_to_figure("4.4.L.1-2"),
hide_time_signature=True,
imbrications=imbrications,
)
for imbrication in imbrications.values():
rmakers.unbeam(imbrication)
groups = rmakers.nongrace_leaves_in_each_tuplet(imbrication)
rmakers.beam_groups(groups, beam_rests=True)
baca.dynamic(baca.select.phead(imbrication, 0), "mp")
baca.register(imbrication, 22, 36)
baca.staccato(baca.select.pheads(imbrication))
groups = rmakers.nongrace_leaves_in_each_tuplet(tuplet)
rmakers.beam_groups(groups)
baca.dynamic(baca.select.phead(tuplet, 0), "mf")
baca.dynamic_text_x_offset(baca.select.pleaf(tuplet, 0), -4)
baca.dynamic_text_extra_offset(baca.select.pleaf(tuplet, 0), (0, -8))
baca.dynamic_up(abjad.select.leaf(tuplet, 0))
for ntrun in baca.select.ntruns(container):
baca.slur(ntrun)
baca.text_script_color(baca.select.pleaf(tuplet, 0), "#black")
baca.text_script_up(tuplet)
baca.label_figure(container, "4.4.R.1", accumulator)
@baca.call
def block():
tuplet = abjad.Tuplet((1, 1), "r2", hide=True)
# 46
accumulator(
library.rh_v1,
[tuplet],
do_not_increment=True,
hide_time_signature=True,
replace_after_last_nonskip_in_same_voice=True,
)
baca.label_figure(tuplet, "S.10", accumulator, do_not_increment=True)
@baca.call
def block():
collections = section_4.stage_4.rh[1:2]
assert library.foo(collections) == ["PC<10, 5, 6, 0, 7, 1, 6, 9>"]
tuplet = baca.from_collection(collections[0], [2], 16, 8)
rmakers.denominator(tuplet, (1, 4))
baca.register(tuplet, 36, 22)
container = abjad.Container([tuplet])
imbrications = baca.imbricate(
container,
library.rh_v2,
[5, 6, 6],
by_pitch_class=True,
hocket=True,
)
# 47
accumulator(
library.rh_v1,
container,
anchor=baca.anchor_to_figure("4.4.L.4-6"),
hide_time_signature=True,
imbrications=imbrications,
)
for imbrication in imbrications.values():
rmakers.unbeam(imbrication)
groups = rmakers.nongrace_leaves_in_each_tuplet(imbrication)
rmakers.beam_groups(groups, beam_rests=True)
baca.register(imbrication, 22, 36)
baca.staccato(baca.select.pheads(imbrication))
groups = rmakers.nongrace_leaves_in_each_tuplet(tuplet)
rmakers.beam_groups(groups)
for ntrun in baca.select.ntruns(container):
baca.slur(ntrun)
baca.label_figure(tuplet, "4.4.R.2", accumulator, abjad.UP)
@baca.call
def block():
collections = section_4.stage_4.rh[2:3]
assert library.foo(collections) == [
"PC<3, 8, 2, 9, 10, 4, 11, 5, 10, 1, 7, 0, 6, 1>"
]
tuplet = baca.from_collection(collections[0], [2], 16, 10)
baca.register(tuplet, 36, 22)
container = abjad.Container([tuplet])
imbrications = baca.imbricate(
container,
library.rh_v2,
[9, 10, 10, 1, 0],
by_pitch_class=True,
hocket=True,
)
# 48
accumulator(
library.rh_v1,
container,
anchor=baca.anchor(library.lh_v5, lambda _: baca.select.rest(_, -8)),
hide_time_signature=True,
imbrications=imbrications,
)
for imbrication in imbrications.values():
rmakers.unbeam(imbrication)
groups = rmakers.nongrace_leaves_in_each_tuplet(imbrication)
rmakers.beam_groups(groups, beam_rests=True)
baca.register(imbrication, 22, 36)
baca.staccato(baca.select.pheads(imbrication))
groups = rmakers.nongrace_leaves_in_each_tuplet(tuplet)
rmakers.beam_groups(groups)
for ntrun in baca.select.ntruns(container):
baca.slur(ntrun)
baca.slur_down(abjad.select.leaf(tuplet, 0))
baca.label_figure(tuplet, "4.4.R.3", accumulator, abjad.UP)
resonance = tuple("e, fs, gs, as, b,".split())
@baca.call
def block():
collections = [resonance]
tuplet = baca.from_collection(collections[0], [4], 16)
# 49 (m 32 @ 1/4)
accumulator(
library.lh_resonance,
[tuplet],
anchor=baca.resume(),
check=True,
do_not_increment=True,
tsd=4,
)
groups = rmakers.nongrace_leaves_in_each_tuplet(tuplet)
rmakers.beam_groups(groups)
baca.label_figure(tuplet, "R.7", accumulator, do_not_increment=True)
@baca.call
def block():
collections = [resonance]
tuplet = baca.from_collection(collections[0], [28], 16)
# 50 (m 33 @ 7/4)
accumulator(
library.lh_resonance,
[tuplet],
do_not_increment=True,
tsd=4,
)
groups = rmakers.nongrace_leaves_in_each_tuplet(tuplet)
rmakers.beam_groups(groups)
baca.label_figure(tuplet, "R.8", accumulator, do_not_increment=True)
@baca.call
def block():
collections = [resonance]
tuplet = baca.from_collection(collections[0], [24], 16)
# 51 (m 34 @ 6/4)
accumulator(
library.lh_resonance,
[tuplet],
do_not_increment=True,
tsd=4,
)
groups = rmakers.nongrace_leaves_in_each_tuplet(tuplet)
rmakers.beam_groups(groups)
baca.label_figure(tuplet, "R.9", accumulator, do_not_increment=True)
@baca.call
def block():
collections = [resonance]
tuplet = baca.from_collection(collections[0], [16], 16)
# 52 (m 35 @ 4/4)
accumulator(
library.lh_resonance,
[tuplet],
do_not_increment=True,
tsd=4,
)
groups = rmakers.nongrace_leaves_in_each_tuplet(tuplet)
rmakers.beam_groups(groups)
baca.label_figure(tuplet, "R.10", accumulator, do_not_increment=True)
@baca.call
def block():
collections = [resonance]
tuplet = baca.from_collection(collections[0], [16], 16)
# 53 (m 36 @ 4/4)
accumulator(
library.lh_resonance,
[tuplet],
do_not_increment=True,
tsd=4,
)
groups = rmakers.nongrace_leaves_in_each_tuplet(tuplet)
rmakers.beam_groups(groups)
baca.label_figure(tuplet, "R.11", accumulator, do_not_increment=True)
@baca.call
def block():
collections = [resonance]
tuplet = baca.from_collection(collections[0], [24], 16)
# 54 (m 37 @ 6/4)
accumulator(
library.lh_resonance,
[tuplet],
do_not_increment=True,
tsd=4,
)
groups = rmakers.nongrace_leaves_in_each_tuplet(tuplet)
rmakers.beam_groups(groups)
baca.label_figure(tuplet, "R.12", accumulator, do_not_increment=True)
@baca.call
def block():
collections = [resonance]
tuplet = baca.from_collection(collections[0], [16], 16)
# 55 (m 38 @ 4/4)
accumulator(
library.lh_resonance,
[tuplet],
do_not_increment=True,
tsd=4,
)
groups = rmakers.nongrace_leaves_in_each_tuplet(tuplet)
rmakers.beam_groups(groups)
baca.label_figure(tuplet, "R.13", accumulator, do_not_increment=True)
@baca.call
def block():
collections = [resonance]
tuplet = baca.from_collection(collections[0], [8], 16)
# 56 (m 39 @ 2/4)
accumulator(
library.lh_resonance,
[tuplet],
do_not_increment=True,
tsd=4,
)
groups = rmakers.nongrace_leaves_in_each_tuplet(tuplet)
rmakers.beam_groups(groups)
baca.label_figure(tuplet, "R.14", accumulator, do_not_increment=True)
@baca.call
def block():
collections = section_4.stage_5.rh[:1]
assert library.foo(collections) == ["PC<3>"]
tuplet = baca.from_collection(collections[0], [28], 16)
# 57
accumulator(
library.rh_v1,
[tuplet],
anchor=baca.anchor_to_figure("R.8"),
hide_time_signature=True,
)
groups = rmakers.nongrace_leaves_in_each_tuplet(tuplet)
rmakers.beam_groups(groups)
baca.dynamic(baca.select.phead(tuplet, 0), "f")
baca.register(tuplet, 36)
baca.tenuto(baca.select.pheads(tuplet))
baca.label_figure(tuplet, "4.5.R.1", accumulator, abjad.UP)
@baca.call
def block():
collections = section_4.stage_5.rh[1:2]
assert library.foo(collections) == ["PC<5>"]
tuplet = baca.from_collection(collections[0], [24], 16)
# 58
accumulator(
library.rh_v1,
[tuplet],
hide_time_signature=True,
replace_after_last_nonskip_in_same_voice=True,
)
groups = rmakers.nongrace_leaves_in_each_tuplet(tuplet)
rmakers.beam_groups(groups)
baca.register(tuplet, 36)
baca.tenuto(baca.select.pheads(tuplet))
baca.label_figure(tuplet, "4.5.R.2", accumulator, abjad.UP)
@baca.call
def block():
collections = section_4.stage_5.rh[2:3]
assert library.foo(collections) == ["PC<10>"]
tuplet = baca.from_collection(collections[0], [16], 16)
# 59
accumulator(
library.rh_v1,
[tuplet],
hide_time_signature=True,
replace_after_last_nonskip_in_same_voice=True,
)
groups = rmakers.nongrace_leaves_in_each_tuplet(tuplet)
rmakers.beam_groups(groups)
baca.register(tuplet, 36)
baca.tenuto(baca.select.pheads(tuplet))
baca.label_figure(tuplet, "4.5.R.3", accumulator, abjad.UP)
@baca.call
def block():
collections = section_4.stage_5.rh[3:4]
assert library.foo(collections) == ["PC<3>"]
tuplet = baca.from_collection(collections[0], [16], 16)
# 60
accumulator(
library.rh_v1,
[tuplet],
hide_time_signature=True,
replace_after_last_nonskip_in_same_voice=True,
)
groups = rmakers.nongrace_leaves_in_each_tuplet(tuplet)
rmakers.beam_groups(groups)
baca.register(tuplet, 36)
baca.tenuto(baca.select.pheads(tuplet))
baca.label_figure(tuplet, "4.5.R.4", accumulator)
@baca.call
def block():
collections = section_4.stage_5.rh[4:5]
assert library.foo(collections) == ["PC<5>"]
tuplet = baca.from_collection(collections[0], [16], 16)
# 61
accumulator(
library.rh_v1,
[tuplet],
anchor=baca.anchor_to_figure("R.13"),
hide_time_signature=True,
)
groups = rmakers.nongrace_leaves_in_each_tuplet(tuplet)
rmakers.beam_groups(groups)
baca.register(tuplet, 36)
baca.tenuto(baca.select.pheads(tuplet))
baca.label_figure(tuplet, "4.5.R.5", accumulator)
@baca.call
def block():
collections = section_4.stage_5.rh[5:6]
assert library.foo(collections) == ["PC<10>"]
tuplet = baca.from_collection(collections[0], [8], 16)
# 62 (fills in m 39 with Bb)
accumulator(
library.rh_v1,
[tuplet],
hide_time_signature=True,
replace_after_last_nonskip_in_same_voice=True,
)
groups = rmakers.nongrace_leaves_in_each_tuplet(tuplet)
rmakers.beam_groups(groups)
baca.register(tuplet, 36)
baca.tenuto(baca.select.pheads(tuplet))
baca.label_figure(tuplet, "4.5.R.6", accumulator)
@baca.call
def block():
collections = section_4.stage_5.lh[:4]
assert library.foo(collections) == [
"PC<11, 6, 7, 9, 1>",
"PC<10, 1, 8, 9, 11>",
"PC<3, 0, 10, 11, 1>",
"PC<5, 2, 0, 1>",
]
tuplets = []
for collection in collections:
tuplet = baca.from_collection(collection, [1], 16, abjad.Duration(1, 4))
tuplets.append(tuplet)
container = abjad.Container(tuplets)
# 63
accumulator(
library.rh_v2,
container,
anchor=baca.anchor_to_figure("4.5.R.3"),
hide_time_signature=True,
)
groups = rmakers.nongrace_leaves_in_each_tuplet(tuplets)
rmakers.beam_groups(groups)
baca.dls_staff_padding(tuplets, 8)
baca.hairpin(baca.select.leaves(tuplets)[:-1], "f < ff")
for ntrun in baca.select.ntruns(container):
baca.slur(ntrun)
baca.register(tuplets, 10, 36)
baca.slur_up(tuplets)
baca.label_figure(tuplets, "4.5.L.1-4", accumulator)
@baca.call
def block():
collections = section_4.stage_5.lh[4:8]
assert library.foo(collections) == [
"PC<3, 7, 4, 2>",
"PC<3, 5, 9, 6, 4>",
"PC<5, 7, 11, 8>",
"PC<6, 7, 9, 1, 10>",
]
tuplets = []
for collection in collections:
tuplet = baca.from_collection(collection, [1], 16, abjad.Duration(1, 4))
tuplets.append(tuplet)
container = abjad.Container(tuplets)
# 64
accumulator(
library.rh_v2,
container,
hide_time_signature=True,
replace_after_last_nonskip_in_same_voice=True,
)
groups = rmakers.nongrace_leaves_in_each_tuplet(tuplets)
rmakers.beam_groups(groups)
baca.dls_staff_padding(tuplets, 8)
baca.hairpin(baca.select.leaves(tuplets)[:-1], "f < ff")
for ntrun in baca.select.ntruns(container):
baca.slur(ntrun)
baca.register(tuplets, 10, 36)
baca.slur_up(tuplets)
baca.text_script_up(tuplets)
baca.label_figure(tuplets, "4.5.L.5-8", accumulator)
@baca.call
def block():
collections = section_4.stage_6.rh[1:2]
assert library.foo(collections) == ["{-3, 7, 8, 11, 13, 17, 27, 36}"]
tuplet = baca.from_collection(collections[0], [24], 16)
# 65
accumulator(
library.rh_v2,
[tuplet],
anchor=baca.anchor_to_figure("R.12"),
hide_time_signature=True,
)
groups = rmakers.nongrace_leaves_in_each_tuplet(tuplet)
rmakers.beam_groups(groups)
baca.marcato(baca.select.pheads(tuplet))
baca.script_up(tuplet)
baca.up_arpeggio(baca.select.pheads(tuplet))
baca.label_figure(tuplet, "4.6.R.2'", accumulator, abjad.UP)
@baca.call
def block():
collections = section_4.stage_5.lh[8:12]
assert library.foo(collections) == [
"PC<1, 8, 9, 11, 3, 0>",
"PC<3, 10, 11, 1, 5>",
"PC<2, 5, 0, 1, 3>",
"PC<7, 4, 2, 3, 5>",
]
tuplets = []
for collection in collections:
tuplet = baca.from_collection(collection, [1], 16, abjad.Duration(1, 4))
tuplets.append(tuplet)
container = abjad.Container(tuplets)
# 66
accumulator(
library.rh_v2,
container,
hide_time_signature=True,
replace_after_last_nonskip_in_same_voice=True,
)
groups = rmakers.nongrace_leaves_in_each_tuplet(tuplets)
rmakers.beam_groups(groups)
baca.dls_staff_padding(tuplets, 8)
baca.hairpin(baca.select.leaves(tuplets)[:-1], "f < ff")
for ntrun in baca.select.ntruns(container):
baca.slur(ntrun)
baca.register(tuplets, 10, 36)
baca.slur_up(tuplets)
baca.text_script_up(tuplets)
baca.label_figure(tuplets, "4.5.L.9-12", accumulator)
@baca.call
def block():
collections = section_4.stage_5.lh[12:14]
assert library.foo(collections) == ["PC<9, 6, 4, 5>", "PC<7, 11, 8, 6>"]
tuplets = []
for collection in collections:
tuplet = baca.from_collection(collection, [1], 16, abjad.Duration(1, 4))
tuplets.append(tuplet)
container = abjad.Container(tuplets)
# 67 (fills in second voice in m 39)
accumulator(
library.rh_v2,
container,
hide_time_signature=True,
replace_after_last_nonskip_in_same_voice=True,
)
groups = rmakers.nongrace_leaves_in_each_tuplet(tuplets)
rmakers.beam_groups(groups)
baca.dls_staff_padding(tuplets, 8)
baca.hairpin(baca.select.leaves(tuplets)[:-1], "f < ff")
for ntrun in baca.select.ntruns(container):
baca.slur(ntrun)
baca.register(tuplets, 10, 36)
baca.slur_up(tuplets)
baca.text_script_up(tuplets)
baca.label_figure(tuplets, "4.5.L.13-14", accumulator)
@baca.call
def block():
collections = section_4.stage_6.rh[:1]
assert library.foo(collections) == ["{17, 27, 36, 40, 42, 46}"]
tuplet = baca.from_collection(collections[0], [16], 16)
# 68 (make m 40 @ 4/4)
accumulator(
library.rh_v2,
[tuplet],
anchor=baca.resume(),
tsd=4,
)
groups = rmakers.nongrace_leaves_in_each_tuplet(tuplet)
rmakers.beam_groups(groups)
baca.up_arpeggio(baca.select.pheads(tuplet))
baca.marcato(baca.select.pheads(tuplet))
baca.script_up(tuplet)
baca.label_figure(tuplet, "4.6.R.3", accumulator, abjad.UP)
@baca.call
def block():
collections = section_4.stage_6.rh[1:2]
assert library.foo(collections) == ["{-3, 7, 8, 11, 13, 17, 27, 36}"]
tuplet = baca.from_collection(collections[0], [16], 16)
# 69 (makes m 41 @ 4/4)
accumulator(
library.rh_v2,
[tuplet],
tsd=4,
)
groups = rmakers.nongrace_leaves_in_each_tuplet(tuplet)
rmakers.beam_groups(groups)
baca.up_arpeggio(baca.select.pheads(tuplet))
baca.marcato(baca.select.pheads(tuplet))
baca.script_up(tuplet)
baca.label_figure(tuplet, "4.6.R.4", accumulator, abjad.UP)
@baca.call
def block():
collections = section_4.stage_6.rh[2:3]
assert library.foo(collections) == ["{4, 6, 10, 21, 31, 32, 35, 37}"]
tuplet = baca.from_collection(collections[0], [16], 16)
# 70 (makes m 42 @ 4/4)
accumulator(
library.rh_v2,
[tuplet],
tsd=4,
)
groups = rmakers.nongrace_leaves_in_each_tuplet(tuplet)
rmakers.beam_groups(groups)
baca.up_arpeggio(baca.select.pheads(tuplet))
baca.marcato(baca.select.pheads(tuplet))
baca.script_up(tuplet)
baca.label_figure(tuplet, "4.6.R.5", accumulator, abjad.UP)
@baca.call
def block():
tuplet = abjad.Tuplet((1, 1), "r1 r1 r1", hide=True)
# TODO: debug: figure out why bracket extends indefinitely to right
container = abjad.Container([tuplet])
# 71 (fills mm 41-43 with rests)
accumulator(
library.lh_v2,
container,
anchor=baca.anchor_to_figure("4.6.R.3"),
do_not_increment=True,
hide_time_signature=True,
)
for lt in baca.select.lts(container)[:-1]:
lt = baca.select.rleaves(lt)
baca.sustain_pedal(lt)
baca.sustain_pedal_staff_padding(tuplet, 4)
baca.rest_transparent(abjad.select.rests(tuplet))
baca.label_figure(tuplet, "S.11", accumulator, do_not_increment=True)
@baca.call
def block():
collections = section_4.stage_1.rh[1:2]
assert library.foo(collections) == ["PC<3, 1, 0, 10>"]
tuplet = baca.from_collection(collections[0], [-4, 2, -4, 4], 16, -4)
# 72 (makes m 43 @ 6/4)
accumulator(
library.rh_v3,
[tuplet],
anchor=baca.resume(),
tsd=4,
)
baca.dynamic(baca.select.phead(tuplet, 0), "mp")
baca.register(tuplet, 12, -12)
baca.stem_up(baca.select.pleaves(tuplet))
baca.text_script_up(tuplet)
baca.tuplet_bracket_staff_padding(tuplet, 4)
baca.tuplet_bracket_up(tuplet)
baca.label_figure(tuplet, "4.1.R.2'", accumulator)
@baca.call
def block():
collections = section_4.stage_1.lh[1:2]
assert library.foo(collections) == ["PC<8, 2, 4>"]
tuplet = baca.from_collection(collections[0], [3, -1, 3, -1, -4, 3, -1], 16)
tuplet.insert(0, "r4")
tuplet.append("r4")
# 73
accumulator(
library.lh_v5,
[tuplet],
anchor=baca.anchor_to_figure("4.1.R.2'"),
hide_time_signature=True,
)
baca.dynamic(baca.select.phead(tuplet, 0), "p")
baca.register(tuplet, -6, 6)
baca.tenuto(baca.select.pheads(tuplet))
baca.label_figure(tuplet, "4.1.L.2'", accumulator, abjad.DOWN)
@baca.call
def block():
collections = section_5.stage_1.rh[:1]
assert library.foo(collections) == ["PC<0, 2, 3, 5>"]
tuplet = baca.from_collection(collections[0], [2, -14], 16)
# 74 (makes m 44 @ 16/4)
accumulator(
library.rh_v2,
[tuplet],
tsd=4,
)
baca.accent(baca.select.pheads(tuplet))
baca.dynamic(baca.select.phead(tuplet, 0), "fff")
baca.dynamic_up(abjad.select.leaf(tuplet, 0))
baca.register(tuplet, 20, 36)
baca.rest_up(abjad.select.rests(tuplet))
baca.script_up(tuplet)
baca.stem_up(baca.select.pleaves(tuplet))
baca.text_script_up(tuplet)
baca.label_figure(tuplet, "5.1.R.1", accumulator)
@baca.call
def block():
collections = section_5.stage_1.lh[:1]
assert library.foo(collections) == ["PC<9, 1, 11, 8, 7>"]
tuplet = baca.from_collection(
collections[0], [4, -4], 16, abjad.Duration(16, 4)
)
rmakers.denominator(tuplet, abjad.Duration(1, 1))
# 75
accumulator(
library.rh_v3,
[tuplet],
anchor=baca.anchor_to_figure("5.1.R.1"),
hide_time_signature=True,
)
baca.dynamic(baca.select.phead(tuplet, 0), "mf")
baca.ottava(baca.select.tleaves(tuplet))
baca.register(tuplet, 10, 26)
baca.rest_down(abjad.select.rests(tuplet))
baca.stem_down(baca.select.pleaves(tuplet))
baca.tuplet_bracket_down(tuplet)
baca.tuplet_bracket_staff_padding(tuplet, 3)
baca.label_figure(tuplet, "5.1.L.1", accumulator)
voices = baca.section.cache_voices(accumulator._score, library.voice_abbreviations)
time_signatures = baca.section.wrap(accumulator.time_signatures)
baca.section.set_up_score(
accumulator._score,
time_signatures(),
append_anchor_skip=True,
always_make_global_rests=True,
first_measure_number=first_measure_number,
manifests=library.manifests,
score_persistent_indicators=previous_persistent_indicators["Score"],
)
rmakers.hide_trivial(accumulator._score)
return accumulator._score, voices, time_signatures
def GLOBALS(skips, rests):
for index, item in (
(0, "84"),
(0, baca.Accelerando()),
(4, "112"),
(5, "84"),
(5, baca.Accelerando()),
(14, "112"),
(19, "112"),
(19, baca.Ritardando()),
(22, "84"),
(23, "112"),
(23, baca.Ritardando()),
(27, "84"),
(29, "112"),
(29, baca.Ritardando()),
(31, "84"),
(32, "112"),
(32, baca.Ritardando()),
(36, "84"),
):
skip = skips[index]
baca.metronome_mark(skip, item, manifests=library.manifests)
for index, string in ((21 - 1, "fermata"),):
baca.global_fermata(rests[index], string)
def postprocess(cache):
m = cache[library.rh_v1]
with baca.scope(m.leaves()) as o:
baca.stem_up(o.pleaves())
baca.tuplet_bracket_staff_padding(o, 8)
baca.tuplet_bracket_up(o)
for item in [(1, 36), (38, 39)]:
with baca.scope(m.get(item)) as o:
baca.ottava(o.tleaves())
with baca.scope(cache[library.rh_v3].leaves()) as o:
baca.tenuto(o.pheads())
m = cache[library.lh_v5]
with baca.scope(m.leaves()) as o:
baca.dynamic_down(o.leaf(0))
with baca.scope(m.get(7, 16)) as o:
baca.marcato(o.pheads())
baca.rest_up(o.rests())
with baca.scope(m.get(18, 44)) as o:
baca.stem_down(o.pleaves())
baca.tuplet_bracket_staff_padding(o, 2)
baca.tuplet_bracket_down(o)
with baca.scope(cache[library.lh_v5_i].leaves()) as o:
baca.script_up(o)
baca.staccato(o.pheads())
baca.stem_up(o.pleaves())
m = cache[library.lh_resonance]
with baca.scope(m.leaves()) as o:
baca.untie(o)
for qrun in baca.select.qruns(o):
baca.repeat_tie(qrun[1:])
for item in [11, 15, (33, 39)]:
with baca.scope(m.get(item)) as o:
baca.accidental_stencil_false(o)
baca.dots_stencil_false(o)
baca.stem_stencil_false(o)
with baca.scope(m[32]) as o:
baca.accidental_x_extent_false(o)
@baca.build.timed("make_score")
def make_score(first_measure_number, previous_persistent_indicators):
score, voices, time_signatures = make_empty_score(
first_measure_number, previous_persistent_indicators
)
GLOBALS(score["Skips"], score["Rests"])
voices = baca.section.cache_voices(score, library.voice_abbreviations)
voice_names = baca.section.get_voice_names(score)
music_voice_names = [
_
for _ in voice_names
if "RHVoice" in _
or "LHVoice" in _
or "InsertVoice" in _
or "ResonanceVoice" in _
]
baca.section.reapply_persistent_indicators(
[voices(_) for _ in music_voice_names],
previous_persistent_indicators,
manifests=library.manifests,
)
cache = baca.section.cache_leaves(
score,
len(time_signatures()),
library.voice_abbreviations,
)
postprocess(cache)
return score
def main():
environment = baca.build.read_environment(__file__, baca.build.argv())
score = make_score(
environment.first_measure_number,
environment.previous_metadata["persistent_indicators"],
environment.timing,
)
metadata = baca.section.postprocess_score(
score,
environment,
library.manifests,
always_make_global_rests=True,
do_not_check_wellformedness=True,
do_not_require_short_instrument_names=True,
error_on_not_yet_pitched=True,
)
baca.tags.deactivate(score, baca.tags.REPEAT_PITCH_CLASS_COLORING)
baca.tags.activate(score, baca.tags.LOCAL_MEASURE_NUMBER, baca.tags.FIGURE_LABEL)
lilypond_file = baca.lilypond.file(
score,
include_layout_ly=True,
includes=["../stylesheet.ily"],
)
baca.build.persist_lilypond_file(
environment.arguments,
environment.section_directory,
environment.timing,
lilypond_file,
metadata,
)
if __name__ == "__main__":
main()
|
994,096 | 6a8e39889a7058bb2b927367c772ddbd205a67f8 | #check if its palindrome or not
string = raw_input("enter any string:")
if (string == string[::-1]):
print ("the string is a palindrome")
else:
print ("the string is not a palindrome")
|
994,097 | bb6d2b1c12b23ab41b925fdfb7c40268db4c64f1 | #!/usr/bin/python3
"""
Module to execute the function that print a text..
"""
def text_indentation(text):
"""
Function that prints a text with 2 new lines
after each of these characters: ., ? and :
"""
i = 0
if type(text) != str:
raise TypeError("text must be a string")
else:
while i < len(text):
if (text[i] == ".") or (text[i] == "?") or (text[i] == ":"):
print("{:s}\n".format(text[i]))
i += 1
else:
print("{:s}".format(text[i]), end="")
i += 1
|
994,098 | 5fe8346fa41fbae4e99185928c659da3b3e53638 | # MIT License
#
# Copyright (c) 2021 Yusuf Cihan
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from typing import Any, List, Optional, Union, Dict
from pyconduit.category import ConduitCategory
from pyconduit.category import ConduitBlock as conduitblock
from pyconduit.step import ConduitVariable
from pyconduit.other import EMPTY
# Lists
# Contains blocks to interact with lists.
class Lists(ConduitCategory):
"""
Contains blocks to interact with lists.
"""
@conduitblock.make
def create(*, value1 : Any = EMPTY, value2 : Any = EMPTY) -> List[Any]:
"""
Creates a new list with items. If items are not provided, then returns an empty list.
Args:
value1:
A value that will be added to list.
value2:
A value that will be added to list.
"""
return [x for x in [value1, value2] if x != EMPTY]
@conduitblock.make
def count(*, list : Union[List[Any], ConduitVariable]) -> int:
"""
Counts the elements in a list.
Args:
list:
The list that will be used in the operation.
"""
return len(list)
@conduitblock.make
def append(*, item : Any, list : Union[List[Any], ConduitVariable]) -> None:
"""
Add item to the end of the list.
Args:
item:
The item that will be added to list.
list:
The list that will be used in the operation.
"""
return list.append(item)
@conduitblock.make
def clear(*, list : Union[List[Any], ConduitVariable]) -> None:
"""
Remove all items from list.
Args:
list:
The list that will be used in the operation.
"""
list.clear()
@conduitblock.make
def extend(*, list1 : Union[List[Any], ConduitVariable], list2 : Union[List[Any], ConduitVariable]) -> None:
"""
Extend list by appending elements from the `list2`. `list2` will not be modified but `list1` will be.
Args:
list1:
First list.
list2:
Second list.
"""
list1.extend(list2)
@conduitblock.make
def sort(*, list : Union[List[Any], ConduitVariable], reverse : bool = False) -> None:
"""
Sort list in place.
Args:
list:
The list that will be used in the operation.
reverse:
If `True`, the sorted list is reversed (or sorted in Descending order)
"""
list.sort(key = None, reverse = reverse)
@conduitblock.make
def insert(*, list : Union[List[Any], ConduitVariable], index : int, item : Any) -> None:
"""
Insert object before index.
Args:
list:
The list that will be used in the operation.
index:
The index that will item placed in.
item:
The item that will be added to list.
"""
list.insert(index, item)
@conduitblock.make
def copy(*, list : Union[List[Any], ConduitVariable]) -> List[Any]:
"""
Return a shallow copy of the list.
Args:
list:
The list that will be used in the operation.
"""
return list.copy()
@conduitblock.make
def remove(*, item : Any, list : Union[List[Any], ConduitVariable]) -> None:
"""
Remove a item from list.
Args:
item:
The item that will be removed.
list:
The list that will be used in the operation.
"""
list.remove(item)
@conduitblock.make
def pop(*, list : Union[List[Any], ConduitVariable], index : Optional[int] = None) -> Any:
"""
Remove and return item at index (default last).
Args:
list:
The list that will be used in the operation.
index:
The index of item that will be removed.
"""
return list.pop(index)
@conduitblock.make
def reverse(*, list : Union[List[Any], ConduitVariable]) -> None:
"""
Reverses the list in place.
Args:
list:
The list that will be used in the operation.
"""
list.reverse()
@conduitblock.make
def count_item(*, item : Any, list : Union[List[Any], ConduitVariable]) -> List[Any]:
"""
Return number of occurrences of value.
Args:
item:
The item that will be counted.
list:
The list that will be used in the operation.
"""
return list.count(item)
@conduitblock.make
def merge(*, list1 : Union[List[Any], ConduitVariable], list2 : Union[List[Any], ConduitVariable]) -> List[Any]:
"""
Merge the two lists.
Args:
list1:
First list.
list2:
Second list.
"""
return [*list1, *list2]
@conduitblock.make
def flatten(*, values : Union[List[Any], ConduitVariable]) -> List[Any]:
"""
Moves the inner lists' items to the root of the list. Only one depth is supported.
Args:
values:
The list that will be used in the operation.
"""
items = []
for item in values:
if isinstance(item, list):
items.extend(item)
else:
items.append(item)
return items
@conduitblock.make
def get(*, list : Union[List[Any], ConduitVariable], index : int) -> Any:
"""
Return the item in the position.
Args:
list:
The list that will be used in the operation.
index:
The index of item.
"""
return list[index]
@conduitblock.make
def index(*, list : Union[List[Any], ConduitVariable], item : Any) -> int:
"""
Return the index of the item. Returns -1 if item is not found.
Args:
list:
The list that will be used in the operation.
item:
The item that its index will be returned.
"""
return -1 if item not in list else list.index(item) |
994,099 | 6e08a9b2b0e4de2908c5ef4aa35bbc82b5fe80f7 | # -*- coding:utf8 -*-
import getpass
import sys
from optparse import OptionParser
from pexpect.exceptions import TIMEOUT
from api.api import Api
from api.inputClient import InputClient
from api.outputClient import OutputClient
from constants.ParamsException import ParamsException
from service.color import redStr, greenStr
def main():
inputClient = InputClient()
api = Api()
usage = "usage: %prog [options] [arg]"
parser = OptionParser(usage)
parser.add_option('-l', '--list', action='store_true', dest="iflist", help=u"list all ssh information")
parser.add_option('-a', '--add', action='store', dest="ifadd",
help=u"Add ssh information.<name>@<ip>[:<ssh port>][@<alias>]. Example: root:1.1.1.1:1010-home1 or root:1.1.1.2")
parser.add_option('-i', '--host', action="store", dest="host", help=u"Connect remote with the host ip")
parser.add_option('-d', action='store', dest='del_host', help=u"Remove ssh information")
parser.add_option('-D', '--delete-by-index', action='store_true', dest='del_by_index',
help=u"Remove ssh information by index id")
parser.add_option('-u', '--update', action="store_true", dest="ifupdate")
# parser.add_option('-')
(option, args) = parser.parse_args()
opt_dict = eval(str(option))
opt_values = opt_dict.values()
param_len = len(opt_values) - opt_values.count(None)
output = OutputClient()
output.set_header(['Index','UserName', 'Host', 'Port', 'Alias'], [7, 17, 17, 10, 30])
if param_len > 1:
raise ParamsException('Param Error')
elif param_len == 0:
try:
if len(args) > 0:
host = args[0]
ssh_conn = api.get_ssh_connect(host)
if ssh_conn is not None:
api.login(ssh_conn.get('host'), ssh_conn.get('name'), ssh_conn.get('passwd'), ssh_conn.get('port'))
else:
ssh_conns = api.login_fuzzy(host)
con_len = len(ssh_conns)
if con_len == 0:
sys.stdout.write(redStr('No Matched Host\n'))
return
elif con_len == 1:
api.login(ssh_conns[0].get('host'), ssh_conns[0].get('name'), ssh_conns[0].get('passwd'),ssh_conns[0].get('port') )
return
else:
output.set_values(ssh_conns)
sys.stdout.write(output.show())
sys.stdout.write('\n')
ssh_conn = output.select()
api.login(ssh_conn.get('host'), ssh_conn.get('name'), ssh_conn.get('passwd'), ssh_conn.get('port'))
return
return
else:
ssh_conns = api.list_ssh_connects()
output.set_values(ssh_conns)
# print output.show()
sys.stdout.write(output.show())
sys.stdout.write('\n')
if len(ssh_conns) > 0:
ssh_conn = output.select()
api.login(ssh_conn.get('host'), ssh_conn.get('name'), ssh_conn.get('passwd'), ssh_conn.get('port'))
return
except ParamsException as e:
sys.stdout.write(e.msg)
sys.stdout.write('\n')
except TIMEOUT as e:
sys.stdout.write("Connection Timeout!\n")
except Exception as e:
sys.stdout.write(e.message)
sys.stdout.write('\n')
else:
iflist = option.iflist
add = option.ifadd
host = option.host
ifupdate = option.ifupdate
del_host = option.del_host
del_by_index = option.del_by_index
try:
if iflist:
ssh_conns = api.list_ssh_connects()
# print ssh_conns
output.set_values(ssh_conns)
sys.stdout.write(output.show())
sys.stdout.write('\n')
return
if add:
s_l = add.split('@')
if len(s_l) < 2:
sys.stdout.write(
redStr('The data format is not correct. Example: <name>@<ip>[:<ssh port>][@<alias>]'))
return
username = s_l[0]
port = 22
alias = ''
if len(s_l) == 3:
alias = s_l[2]
ip_port_arr = s_l[1].split(':')
host = ip_port_arr[0]
try:
if len(ip_port_arr) == 2:
port = int(ip_port_arr[1])
except Exception, e:
sys.stdout.write(
redStr('The data format is not correct. Example: <name>@<ip>[:<ssh port>][@<alias>]'))
return
password = getpass.getpass('Input Your Password:')
api.add_ssh_connect(host, username, password, port, alias)
# print host, username, password
return
if host:
ssh_conn = api.get_ssh_connect(host)
if ssh_conn is None:
raise Exception("Error: Host %s is not exist!" % host)
else:
api.login(ssh_conn.get('host'), ssh_conn.get('name'), ssh_conn.get('passwd'), ssh_conn.get('port'))
if ifupdate:
ssh_conns = api.list_ssh_connects()
# print ssh_conns
output.set_values(ssh_conns)
sys.stdout.write(output.show())
sys.stdout.write('\n')
if len(ssh_conns) > 0:
ssh_conn = output.select_to_update()
username = inputClient.input_username()
password = inputClient.input_password()
api.update_ssh_connect(ssh_conn.get('host'), username, password)
sys.stdout.write(greenStr('Update Successfully!\n'))
if del_host:
ssh_conn = api.get_ssh_connect(del_host)
if ssh_conn is None:
# raise Exception("้่ฏฏ: ไธปๆบ %s ไธๅญๅจ!" % host)
raise Exception("Error: Host %s is not exist!" % host)
api.del_ssh_connect(del_host)
# sys.stdout.write('ๅ ้คๆๅ!\n')
sys.stdout.write(greenStr('Delete Successfully!'))
if del_by_index:
ssh_conns = api.list_ssh_connects()
# print ssh_conns
output.set_values(ssh_conns)
sys.stdout.write(output.show())
sys.stdout.write('\n')
if len(ssh_conns) > 0:
ssh_conn = output.select_to_del()
api.del_ssh_connect(ssh_conn.get('host'))
sys.stdout.write(greenStr('Delete Successfully!\n'))
return
except ParamsException as e:
sys.stdout.write(e.msg)
sys.stdout.write('\n')
except TIMEOUT as e:
sys.stdout.write("Connection Timeout!\n")
except Exception as e:
sys.stdout.write(e.message)
sys.stdout.write('\n')
def intur_hander(signal, frame):
sys.exit(0)
if __name__ == "__main__":
import signal
signal.signal(signal.SIGINT, intur_hander)
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.